mirror of
https://github.com/docling-project/docling-serve.git
synced 2025-11-29 08:33:50 +00:00
Compare commits
159 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6b6dd8a0d0 | ||
|
|
513ae0c119 | ||
|
|
bde040661f | ||
|
|
496f7ec26b | ||
|
|
9d6def0ec8 | ||
|
|
a4fed2d965 | ||
|
|
b0360d723b | ||
|
|
4adc0dfa79 | ||
|
|
40c7f1bcd3 | ||
|
|
d64a2a974a | ||
|
|
0d4545a65a | ||
|
|
fe98338239 | ||
|
|
b844ce737e | ||
|
|
27fdd7b85a | ||
|
|
1df62adf01 | ||
|
|
e5449472b2 | ||
|
|
81f0a8ddf8 | ||
|
|
a69cc867f5 | ||
|
|
624f65d41b | ||
|
|
f02dbc0144 | ||
|
|
37fe02277b | ||
|
|
783ada0580 | ||
|
|
71edf41849 | ||
|
|
9a64410552 | ||
|
|
6e9aa8c759 | ||
|
|
885f319d3a | ||
|
|
d584895e11 | ||
|
|
d26e6637d8 | ||
|
|
7692eb2600 | ||
|
|
3bd7828570 | ||
|
|
8b470cba8e | ||
|
|
8048f4589a | ||
|
|
b3058e91e0 | ||
|
|
63da9eedeb | ||
|
|
b15dc2529f | ||
|
|
4c7207be00 | ||
|
|
db3fdb5bc1 | ||
|
|
fd1b987e8d | ||
|
|
ce15e0302b | ||
|
|
ecb1874a50 | ||
|
|
1333f71c9c | ||
|
|
ec594d84fe | ||
|
|
3771c1b554 | ||
|
|
24db461b14 | ||
|
|
8706706e87 | ||
|
|
766adb2481 | ||
|
|
8222cf8955 | ||
|
|
b922824e5b | ||
|
|
56e328baf7 | ||
|
|
daa924a77e | ||
|
|
e63197e89e | ||
|
|
767ce0982b | ||
|
|
bfde1a0991 | ||
|
|
eb3892ee14 | ||
|
|
93b84712b2 | ||
|
|
c45b937064 | ||
|
|
50e431f30f | ||
|
|
149a8cb1c0 | ||
|
|
5f9c20a985 | ||
|
|
80755a7d59 | ||
|
|
30aca92298 | ||
|
|
717fb3a8d8 | ||
|
|
873d05aefe | ||
|
|
196c5ce42a | ||
|
|
b5c5f47892 | ||
|
|
d5455b7f66 | ||
|
|
7a682494d6 | ||
|
|
524f6a8997 | ||
|
|
9ccf8e3b5e | ||
|
|
ffea34732b | ||
|
|
b299af002b | ||
|
|
c4c41f16df | ||
|
|
7066f3520a | ||
|
|
6a8190c315 | ||
|
|
060ecd8b0e | ||
|
|
32b8a809f3 | ||
|
|
de002dfcdc | ||
|
|
abe5aa03f5 | ||
|
|
3f090b7d15 | ||
|
|
21c1791e42 | ||
|
|
00be428490 | ||
|
|
3ff1b2f983 | ||
|
|
8406fb9b59 | ||
|
|
a2dcb0a20f | ||
|
|
36787bc061 | ||
|
|
509f4889f8 | ||
|
|
919cf5c041 | ||
|
|
35c2630c61 | ||
|
|
382d675631 | ||
|
|
c65f3c654c | ||
|
|
829effec1a | ||
|
|
494d66f992 | ||
|
|
14bafb2628 | ||
|
|
37e2e1ad09 | ||
|
|
71c5fae505 | ||
|
|
91956cbf4e | ||
|
|
4c9571a052 | ||
|
|
41624af09f | ||
|
|
26bef5bec0 | ||
|
|
40bb21d347 | ||
|
|
ee89ee4dae | ||
|
|
6b3d281f02 | ||
|
|
b598872e5c | ||
|
|
087417e5c2 | ||
|
|
57f9073bc0 | ||
|
|
525a43ff6f | ||
|
|
c1ce4719c9 | ||
|
|
5dfb75d3b9 | ||
|
|
420162e674 | ||
|
|
ff75bab21b | ||
|
|
7a0fabae07 | ||
|
|
9ffe49a359 | ||
|
|
68772bb6f0 | ||
|
|
20ec87a63a | ||
|
|
e30f458923 | ||
|
|
03e405638f | ||
|
|
fd8e40a008 | ||
|
|
422c402bab | ||
|
|
ea090288d3 | ||
|
|
07c48edd5d | ||
|
|
a212547d28 | ||
|
|
c76daac70c | ||
|
|
7994b19b9f | ||
|
|
ec57b528ed | ||
|
|
b92c5d8899 | ||
|
|
3c9825df30 | ||
|
|
8dd0e216fd | ||
|
|
d406802f9d | ||
|
|
a92ad48b28 | ||
|
|
da2b26099d | ||
|
|
98b46eda50 | ||
|
|
7e75919ae8 | ||
|
|
c95db36438 | ||
|
|
82f8900197 | ||
|
|
ed851c95fe | ||
|
|
05df0735d3 | ||
|
|
cad1053e36 | ||
|
|
7e6d9cdef3 | ||
|
|
343b985287 | ||
|
|
c430d9b1a1 | ||
|
|
63141f1cc7 | ||
|
|
d5557fad9f | ||
|
|
36967f7f61 | ||
|
|
3b54d9b6ef | ||
|
|
4877248368 | ||
|
|
ec33a61faa | ||
|
|
663e03303a | ||
|
|
c64a450bf9 | ||
|
|
ae3b4906f1 | ||
|
|
7a351fcdea | ||
|
|
1615f977a2 | ||
|
|
1bf487b18e | ||
|
|
be7e4162af | ||
|
|
de42baf8dc | ||
|
|
4da28565a7 | ||
|
|
2a78142b96 | ||
|
|
d0e8578931 | ||
|
|
c6539c42de | ||
|
|
ddf3144512 |
40
.dockerignore
Normal file
40
.dockerignore
Normal file
@@ -0,0 +1,40 @@
|
||||
# Ignore Python cache files
|
||||
__pycache__/
|
||||
**/__pycache__/
|
||||
*.pyc
|
||||
*.pyo
|
||||
*.pyd
|
||||
|
||||
# Ignore virtual environments
|
||||
env/
|
||||
venv/
|
||||
|
||||
# Ignore development artifacts
|
||||
*.log
|
||||
*.db
|
||||
*.sqlite3
|
||||
|
||||
# Ignore configuration and sensitive files
|
||||
**/.env
|
||||
*.env
|
||||
*.ini
|
||||
*.cfg
|
||||
|
||||
# Ignore IDE and editor settings
|
||||
.vscode/
|
||||
.idea/
|
||||
*.swp
|
||||
*.swo
|
||||
|
||||
# Ignore Git files
|
||||
.git/
|
||||
.gitignore
|
||||
|
||||
# Ignore Docker files themselves (optional if not needed in the image)
|
||||
.dockerignore
|
||||
Dockerfile*
|
||||
|
||||
# Ignore build artifacts (if applicable)
|
||||
build/
|
||||
dist/
|
||||
*.egg-info
|
||||
3
.env.example
Normal file
3
.env.example
Normal file
@@ -0,0 +1,3 @@
|
||||
TESSDATA_PREFIX=/usr/share/tesseract/tessdata/
|
||||
UVICORN_WORKERS=2
|
||||
UVICORN_RELOAD=True
|
||||
7
.flake8
7
.flake8
@@ -1,7 +0,0 @@
|
||||
[flake8]
|
||||
max-line-length = 88
|
||||
exclude = test/*
|
||||
max-complexity = 18
|
||||
docstring-convention = google
|
||||
ignore = W503,E203
|
||||
classmethod-decorators = classmethod,validator
|
||||
12
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
12
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
<!-- Thank you for contributing to Docling! -->
|
||||
|
||||
<!-- STEPS TO FOLLOW:
|
||||
1. Add a description of the changes (frequently the same as the commit description)
|
||||
2. Enter the issue number next to "Resolves #" below (if there is no tracking issue resolved, **remove that section**)
|
||||
3. Make sure the PR title follows the **Commit Message Formatting**: https://www.conventionalcommits.org/en/v1.0.0/#summary.
|
||||
-->
|
||||
|
||||
<!-- Uncomment this section with the issue number if an issue is being resolved
|
||||
**Issue resolved by this Pull Request:**
|
||||
Resolves #
|
||||
--->
|
||||
23
.github/SECURITY.md
vendored
Normal file
23
.github/SECURITY.md
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
# Security and Disclosure Information Policy for the Docling Project
|
||||
|
||||
The Docling team and community take security bugs seriously. We appreciate your efforts to responsibly disclose your findings, and will make every effort to acknowledge your contributions.
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
If you think you've identified a security issue in an Docling project repository, please DO NOT report the issue publicly via the GitHub issue tracker, etc.
|
||||
|
||||
Instead, send an email with as many details as possible to [deepsearch-core@zurich.ibm.com](mailto:deepsearch-core@zurich.ibm.com). This is a private mailing list for the maintainers team.
|
||||
|
||||
Please do not create a public issue.
|
||||
|
||||
## Security Vulnerability Response
|
||||
|
||||
Each report is acknowledged and analyzed by the core maintainers within 3 working days.
|
||||
|
||||
Any vulnerability information shared with core maintainers stays within the Docling project and will not be disseminated to other projects unless it is necessary to get the issue fixed.
|
||||
|
||||
After the initial reply to your report, the security team will keep you informed of the progress towards a fix and full announcement, and may ask for additional information or guidance.
|
||||
|
||||
## Security Alerts
|
||||
|
||||
We will send announcements of security vulnerabilities and steps to remediate on the [Docling announcements](https://github.com/docling-project/docling/discussions/categories/announcements).
|
||||
19
.github/actions/setup-poetry/action.yml
vendored
19
.github/actions/setup-poetry/action.yml
vendored
@@ -1,19 +0,0 @@
|
||||
name: 'Set up Poetry and install'
|
||||
description: 'Set up a specific version of Poetry and install dependencies using caching.'
|
||||
inputs:
|
||||
python-version:
|
||||
description: "Version range or exact version of Python or PyPy to use, using SemVer's version range syntax."
|
||||
default: '3.11'
|
||||
runs:
|
||||
using: 'composite'
|
||||
steps:
|
||||
- name: Install poetry
|
||||
run: pipx install poetry==1.8.3
|
||||
shell: bash
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ inputs.python-version }}
|
||||
cache: 'poetry'
|
||||
- name: Install dependencies
|
||||
run: poetry install --all-extras
|
||||
shell: bash
|
||||
2
.github/dco.yml
vendored
Normal file
2
.github/dco.yml
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
allowRemediationCommits:
|
||||
individual: true
|
||||
9
.github/mergify.yml
vendored
Normal file
9
.github/mergify.yml
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
merge_protections:
|
||||
- name: Enforce conventional commit
|
||||
description: Make sure that we follow https://www.conventionalcommits.org/en/v1.0.0/
|
||||
if:
|
||||
- base = main
|
||||
success_conditions:
|
||||
- "title ~=
|
||||
^(fix|feat|docs|style|refactor|perf|test|build|ci|chore|revert)(?:\\(.+\
|
||||
\\))?(!)?:"
|
||||
76
.github/scripts/release.sh
vendored
Executable file
76
.github/scripts/release.sh
vendored
Executable file
@@ -0,0 +1,76 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e # trigger failure on error - do not remove!
|
||||
set -x # display command on output
|
||||
|
||||
## debug
|
||||
# TARGET_VERSION="1.2.x"
|
||||
|
||||
if [ -z "${TARGET_VERSION}" ]; then
|
||||
>&2 echo "No TARGET_VERSION specified"
|
||||
exit 1
|
||||
fi
|
||||
CHGLOG_FILE="${CHGLOG_FILE:-CHANGELOG.md}"
|
||||
|
||||
# Update package version
|
||||
uvx --from=toml-cli toml set --toml-path=pyproject.toml project.version "${TARGET_VERSION}"
|
||||
uv lock --upgrade-package docling-serve
|
||||
|
||||
# Extract all docling packages and versions from uv.lock
|
||||
DOCVERSIONS=$(uvx --with toml python3 - <<'PY'
|
||||
import toml
|
||||
data = toml.load("uv.lock")
|
||||
for pkg in data.get("package", []):
|
||||
if pkg["name"].startswith("docling"):
|
||||
print(f"{pkg['name']} {pkg['version']}")
|
||||
PY
|
||||
)
|
||||
|
||||
# Format docling versions list without trailing newline
|
||||
DOCLING_VERSIONS="### Docling libraries included in this release:"
|
||||
while IFS= read -r line; do
|
||||
DOCLING_VERSIONS+="
|
||||
- $line"
|
||||
done <<< "$DOCVERSIONS"
|
||||
|
||||
# Collect release notes
|
||||
REL_NOTES=$(mktemp)
|
||||
uv run --no-sync semantic-release changelog --unreleased >> "${REL_NOTES}"
|
||||
|
||||
# Strip trailing blank lines from release notes and append docling versions
|
||||
{
|
||||
sed -e :a -e '/^\n*$/{$d;N;};/\n$/ba' "${REL_NOTES}"
|
||||
printf "\n"
|
||||
printf "%s" "${DOCLING_VERSIONS}"
|
||||
printf "\n"
|
||||
} > "${REL_NOTES}.tmp" && mv "${REL_NOTES}.tmp" "${REL_NOTES}"
|
||||
|
||||
# Update changelog
|
||||
TMP_CHGLOG=$(mktemp)
|
||||
TARGET_TAG_NAME="v${TARGET_VERSION}"
|
||||
RELEASE_URL="$(gh repo view --json url -q ".url")/releases/tag/${TARGET_TAG_NAME}"
|
||||
## debug
|
||||
#RELEASE_URL="myrepo/releases/tag/${TARGET_TAG_NAME}"
|
||||
|
||||
# Strip leading blank lines from existing changelog to avoid multiple blank lines when appending
|
||||
EXISTING_CL=$(sed -e :a -e '/^\n*$/{$d;N;};/\n$/ba' "${CHGLOG_FILE}")
|
||||
|
||||
{
|
||||
printf "## [${TARGET_TAG_NAME}](${RELEASE_URL}) - $(date -Idate)\n\n"
|
||||
cat "${REL_NOTES}"
|
||||
printf "\n"
|
||||
printf "%s\n" "${EXISTING_CL}"
|
||||
} >> "${TMP_CHGLOG}"
|
||||
|
||||
mv "${TMP_CHGLOG}" "${CHGLOG_FILE}"
|
||||
|
||||
# Push changes
|
||||
git config --global user.name 'github-actions[bot]'
|
||||
git config --global user.email 'github-actions[bot]@users.noreply.github.com'
|
||||
git add pyproject.toml uv.lock "${CHGLOG_FILE}"
|
||||
COMMIT_MSG="chore: bump version to ${TARGET_VERSION} [skip ci]"
|
||||
git commit -m "${COMMIT_MSG}"
|
||||
git push origin main
|
||||
|
||||
# Create GitHub release (incl. Git tag)
|
||||
gh release create "${TARGET_TAG_NAME}" -F "${REL_NOTES}"
|
||||
39
.github/styles/config/vocabularies/Docling/accept.txt
vendored
Normal file
39
.github/styles/config/vocabularies/Docling/accept.txt
vendored
Normal file
@@ -0,0 +1,39 @@
|
||||
[Dd]ocling
|
||||
precommit
|
||||
asgi
|
||||
async
|
||||
(?i)urls
|
||||
uvicorn
|
||||
[Ww]ebserver
|
||||
RQ
|
||||
(?i)url
|
||||
keyfile
|
||||
[Ww]ebsocket(s?)
|
||||
[Kk]ubernetes
|
||||
UI
|
||||
(?i)vllm
|
||||
APIs
|
||||
[Ss]ubprocesses
|
||||
(?i)api
|
||||
Kubeflow
|
||||
(?i)Jobkit
|
||||
(?i)cpu
|
||||
(?i)PyTorch
|
||||
(?i)CUDA
|
||||
(?i)NVIDIA
|
||||
(?i)ROCm
|
||||
(?i)env
|
||||
Gradio
|
||||
Podman
|
||||
bool
|
||||
Ollama
|
||||
inbody
|
||||
LGTMs
|
||||
Dolfi
|
||||
Lysak
|
||||
Nikos
|
||||
Nassar
|
||||
Panos
|
||||
Vagenas
|
||||
Staar
|
||||
Livathinos
|
||||
11
.github/vale.ini
vendored
Normal file
11
.github/vale.ini
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
StylesPath = styles
|
||||
MinAlertLevel = suggestion
|
||||
; Packages = write-good, proselint
|
||||
|
||||
Vocab = Docling
|
||||
|
||||
[*.md]
|
||||
BasedOnStyles = Vale
|
||||
|
||||
[CHANGELOG.md]
|
||||
BasedOnStyles =
|
||||
2
.github/workflows/actionlint.yml
vendored
2
.github/workflows/actionlint.yml
vendored
@@ -13,7 +13,7 @@ jobs:
|
||||
actionlint:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v5
|
||||
- name: Download actionlint
|
||||
id: get_actionlint
|
||||
run: bash <(curl https://raw.githubusercontent.com/rhysd/actionlint/main/scripts/download-actionlint.bash)
|
||||
|
||||
59
.github/workflows/cd.yml
vendored
Normal file
59
.github/workflows/cd.yml
vendored
Normal file
@@ -0,0 +1,59 @@
|
||||
name: "Run CD"
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
code-checks:
|
||||
uses: ./.github/workflows/job-checks.yml
|
||||
pre-release-check:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
TARGET_TAG_V: ${{ steps.version_check.outputs.TRGT_VERSION }}
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
with:
|
||||
fetch-depth: 0 # for fetching tags, required for semantic-release
|
||||
- name: Install uv and set the python version
|
||||
uses: astral-sh/setup-uv@v6
|
||||
with:
|
||||
enable-cache: true
|
||||
- name: Install dependencies
|
||||
run: uv sync --only-dev
|
||||
- name: Check version of potential release
|
||||
id: version_check
|
||||
run: |
|
||||
TRGT_VERSION=$(uv run --no-sync semantic-release print-version)
|
||||
echo "TRGT_VERSION=${TRGT_VERSION}" >> "$GITHUB_OUTPUT"
|
||||
echo "${TRGT_VERSION}"
|
||||
- name: Check notes of potential release
|
||||
run: uv run --no-sync semantic-release changelog --unreleased
|
||||
release:
|
||||
needs: [code-checks, pre-release-check]
|
||||
if: needs.pre-release-check.outputs.TARGET_TAG_V != ''
|
||||
environment: auto-release
|
||||
runs-on: ubuntu-latest
|
||||
concurrency: release
|
||||
steps:
|
||||
- uses: actions/create-github-app-token@v1
|
||||
id: app-token
|
||||
with:
|
||||
app-id: ${{ vars.CI_APP_ID }}
|
||||
private-key: ${{ secrets.CI_PRIVATE_KEY }}
|
||||
- uses: actions/checkout@v5
|
||||
with:
|
||||
token: ${{ steps.app-token.outputs.token }}
|
||||
fetch-depth: 0 # for fetching tags, required for semantic-release
|
||||
- name: Install uv and set the python version
|
||||
uses: astral-sh/setup-uv@v6
|
||||
with:
|
||||
enable-cache: true
|
||||
- name: Install dependencies
|
||||
run: uv sync --only-dev
|
||||
- name: Run release script
|
||||
env:
|
||||
GH_TOKEN: ${{ steps.app-token.outputs.token }}
|
||||
TARGET_VERSION: ${{ needs.pre-release-check.outputs.TARGET_TAG_V }}
|
||||
CHGLOG_FILE: CHANGELOG.md
|
||||
run: ./.github/scripts/release.sh
|
||||
shell: bash
|
||||
34
.github/workflows/checks.yml
vendored
34
.github/workflows/checks.yml
vendored
@@ -1,34 +0,0 @@
|
||||
name: Run linter checks
|
||||
on:
|
||||
push:
|
||||
branches: ["main"]
|
||||
pull_request:
|
||||
branches: ["main"]
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
py-lint:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: ['3.11']
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: ./.github/actions/setup-poetry
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- name: Run styling check
|
||||
run: poetry run pre-commit run --all-files
|
||||
|
||||
markdown-lint:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: markdownlint-cli2-action
|
||||
uses: DavidAnson/markdownlint-cli2-action@v16
|
||||
with:
|
||||
globs: "**/*.md"
|
||||
|
||||
53
.github/workflows/ci-images-dryrun.yml
vendored
Normal file
53
.github/workflows/ci-images-dryrun.yml
vendored
Normal file
@@ -0,0 +1,53 @@
|
||||
name: Dry run docling-serve image building
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
build_image:
|
||||
name: Build ${{ matrix.spec.name }} container image
|
||||
strategy:
|
||||
matrix:
|
||||
spec:
|
||||
- name: docling-project/docling-serve
|
||||
build_args: |
|
||||
UV_SYNC_EXTRA_ARGS=--no-extra flash-attn
|
||||
platforms: linux/amd64, linux/arm64
|
||||
- name: docling-project/docling-serve-cpu
|
||||
build_args: |
|
||||
UV_SYNC_EXTRA_ARGS=--no-group pypi --group cpu --no-extra flash-attn
|
||||
platforms: linux/amd64, linux/arm64
|
||||
# - name: docling-project/docling-serve-cu124
|
||||
# build_args: |
|
||||
# UV_SYNC_EXTRA_ARGS=--no-group pypi --group cu124
|
||||
# platforms: linux/amd64
|
||||
- name: docling-project/docling-serve-cu126
|
||||
build_args: |
|
||||
UV_SYNC_EXTRA_ARGS=--no-group pypi --group cu126
|
||||
platforms: linux/amd64
|
||||
- name: docling-project/docling-serve-cu128
|
||||
build_args: |
|
||||
UV_SYNC_EXTRA_ARGS=--no-group pypi --group cu128
|
||||
platforms: linux/amd64
|
||||
# - name: docling-project/docling-serve-rocm
|
||||
# build_args: |
|
||||
# UV_SYNC_EXTRA_ARGS=--no-group pypi --group rocm --no-extra flash-attn
|
||||
# platforms: linux/amd64
|
||||
|
||||
permissions:
|
||||
packages: write
|
||||
contents: read
|
||||
attestations: write
|
||||
id-token: write
|
||||
|
||||
uses: ./.github/workflows/job-image.yml
|
||||
with:
|
||||
publish: false
|
||||
build_args: ${{ matrix.spec.build_args }}
|
||||
ghcr_image_name: ${{ matrix.spec.name }}
|
||||
quay_image_name: ""
|
||||
platforms: ${{ matrix.spec.platforms }}
|
||||
25
.github/workflows/ci.yml
vendored
Normal file
25
.github/workflows/ci.yml
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
name: "Run CI"
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: ["main"]
|
||||
pull_request:
|
||||
branches: ["main"]
|
||||
|
||||
jobs:
|
||||
code-checks:
|
||||
# if: ${{ github.event_name == 'push' || (github.event.pull_request.head.repo.full_name != 'docling-project/docling-serve' && github.event.pull_request.head.repo.full_name != 'docling-project/docling-serve') }}
|
||||
uses: ./.github/workflows/job-checks.yml
|
||||
permissions:
|
||||
packages: write
|
||||
contents: read
|
||||
attestations: write
|
||||
id-token: write
|
||||
|
||||
build-images:
|
||||
uses: ./.github/workflows/ci-images-dryrun.yml
|
||||
permissions:
|
||||
packages: write
|
||||
contents: read
|
||||
attestations: write
|
||||
id-token: write
|
||||
192
.github/workflows/dco-advisor.yml
vendored
Normal file
192
.github/workflows/dco-advisor.yml
vendored
Normal file
@@ -0,0 +1,192 @@
|
||||
name: DCO Advisor Bot
|
||||
|
||||
on:
|
||||
pull_request_target:
|
||||
types: [opened, reopened, synchronize]
|
||||
|
||||
permissions:
|
||||
pull-requests: write
|
||||
issues: write
|
||||
|
||||
jobs:
|
||||
dco_advisor:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Handle DCO check result
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
script: |
|
||||
const pr = context.payload.pull_request || context.payload.check_run?.pull_requests?.[0];
|
||||
if (!pr) return;
|
||||
|
||||
const prNumber = pr.number;
|
||||
const baseRef = pr.base.ref;
|
||||
const headSha =
|
||||
context.payload.check_run?.head_sha ||
|
||||
pr.head?.sha;
|
||||
const username = pr.user.login;
|
||||
|
||||
console.log("HEAD SHA:", headSha);
|
||||
|
||||
const sleep = ms => new Promise(resolve => setTimeout(resolve, ms));
|
||||
|
||||
// Poll until DCO check has a conclusion (max 6 attempts, 30s)
|
||||
let dcoCheck = null;
|
||||
for (let attempt = 0; attempt < 6; attempt++) {
|
||||
const { data: checks } = await github.rest.checks.listForRef({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
ref: headSha
|
||||
});
|
||||
|
||||
|
||||
console.log("All check runs:");
|
||||
checks.check_runs.forEach(run => {
|
||||
console.log(`- ${run.name} (${run.status}/${run.conclusion}) @ ${run.head_sha}`);
|
||||
});
|
||||
|
||||
dcoCheck = checks.check_runs.find(run =>
|
||||
run.name.toLowerCase().includes("dco") &&
|
||||
!run.name.toLowerCase().includes("dco_advisor") &&
|
||||
run.head_sha === headSha
|
||||
);
|
||||
|
||||
|
||||
if (dcoCheck?.conclusion) break;
|
||||
console.log(`Waiting for DCO check... (${attempt + 1})`);
|
||||
await sleep(5000); // wait 5 seconds
|
||||
}
|
||||
|
||||
if (!dcoCheck || !dcoCheck.conclusion) {
|
||||
console.log("DCO check did not complete in time.");
|
||||
return;
|
||||
}
|
||||
|
||||
const isFailure = ["failure", "action_required"].includes(dcoCheck.conclusion);
|
||||
console.log(`DCO check conclusion for ${headSha}: ${dcoCheck.conclusion} (treated as ${isFailure ? "failure" : "success"})`);
|
||||
|
||||
// Parse DCO output for commit SHAs and author
|
||||
let badCommits = [];
|
||||
let authorName = "";
|
||||
let authorEmail = "";
|
||||
let moreInfo = `More info: [DCO check report](${dcoCheck?.html_url})`;
|
||||
|
||||
if (isFailure) {
|
||||
const { data: commits } = await github.rest.pulls.listCommits({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
pull_number: prNumber,
|
||||
});
|
||||
|
||||
for (const commit of commits) {
|
||||
const commitMessage = commit.commit.message;
|
||||
const signoffMatch = commitMessage.match(/^Signed-off-by:\s+.+<.+>$/m);
|
||||
if (!signoffMatch) {
|
||||
console.log(`Bad commit found ${commit.sha}`)
|
||||
badCommits.push({
|
||||
sha: commit.sha,
|
||||
authorName: commit.commit.author.name,
|
||||
authorEmail: commit.commit.author.email,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If multiple authors are present, you could adapt the message accordingly
|
||||
// For now, we'll just use the first one
|
||||
if (badCommits.length > 0) {
|
||||
authorName = badCommits[0].authorName;
|
||||
authorEmail = badCommits[0].authorEmail;
|
||||
}
|
||||
|
||||
// Generate remediation commit message if needed
|
||||
let remediationSnippet = "";
|
||||
if (badCommits.length && authorEmail) {
|
||||
remediationSnippet = `git commit --allow-empty -s -m "DCO Remediation Commit for ${authorName} <${authorEmail}>\n\n` +
|
||||
badCommits.map(c => `I, ${c.authorName} <${c.authorEmail}>, hereby add my Signed-off-by to this commit: ${c.sha}`).join('\n') +
|
||||
`"`;
|
||||
} else {
|
||||
remediationSnippet = "# Unable to auto-generate remediation message. Please check the DCO check details.";
|
||||
}
|
||||
|
||||
// Build comment
|
||||
const commentHeader = '<!-- dco-advice-bot -->';
|
||||
let body = "";
|
||||
|
||||
if (isFailure) {
|
||||
body = [
|
||||
commentHeader,
|
||||
'❌ **DCO Check Failed**',
|
||||
'',
|
||||
`Hi @${username}, your pull request has failed the Developer Certificate of Origin (DCO) check.`,
|
||||
'',
|
||||
'This repository supports **remediation commits**, so you can fix this without rewriting history — but you must follow the required message format.',
|
||||
'',
|
||||
'---',
|
||||
'',
|
||||
'### 🛠 Quick Fix: Add a remediation commit',
|
||||
'Run this command:',
|
||||
'',
|
||||
'```bash',
|
||||
remediationSnippet,
|
||||
'git push',
|
||||
'```',
|
||||
'',
|
||||
'---',
|
||||
'',
|
||||
'<details>',
|
||||
'<summary>🔧 Advanced: Sign off each commit directly</summary>',
|
||||
'',
|
||||
'**For the latest commit:**',
|
||||
'```bash',
|
||||
'git commit --amend --signoff',
|
||||
'git push --force-with-lease',
|
||||
'```',
|
||||
'',
|
||||
'**For multiple commits:**',
|
||||
'```bash',
|
||||
`git rebase --signoff origin/${baseRef}`,
|
||||
'git push --force-with-lease',
|
||||
'```',
|
||||
'',
|
||||
'</details>',
|
||||
'',
|
||||
moreInfo
|
||||
].join('\n');
|
||||
} else {
|
||||
body = [
|
||||
commentHeader,
|
||||
'✅ **DCO Check Passed**',
|
||||
'',
|
||||
`Thanks @${username}, all your commits are properly signed off. 🎉`
|
||||
].join('\n');
|
||||
}
|
||||
|
||||
// Get existing comments on the PR
|
||||
const { data: comments } = await github.rest.issues.listComments({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: prNumber
|
||||
});
|
||||
|
||||
// Look for a previous bot comment
|
||||
const existingComment = comments.find(c =>
|
||||
c.body.includes("<!-- dco-advice-bot -->")
|
||||
);
|
||||
|
||||
if (existingComment) {
|
||||
await github.rest.issues.updateComment({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
comment_id: existingComment.id,
|
||||
body: body
|
||||
});
|
||||
} else {
|
||||
await github.rest.issues.createComment({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: prNumber,
|
||||
body: body
|
||||
});
|
||||
}
|
||||
105
.github/workflows/images-dryrun.yml
vendored
105
.github/workflows/images-dryrun.yml
vendored
@@ -1,105 +0,0 @@
|
||||
name: Dry run docling-serve image building
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches: ["main"]
|
||||
|
||||
env:
|
||||
GHCR_REGISTRY: ghcr.io
|
||||
GHCR_DOCLING_SERVE_CPU_IMAGE_NAME: ds4sd/docling-serve-cpu
|
||||
GHCR_DOCLING_SERVE_GPU_IMAGE_NAME: ds4sd/docling-serve
|
||||
|
||||
jobs:
|
||||
build_cpu_image:
|
||||
name: Build docling-serve "CPU only" container image
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
packages: write
|
||||
contents: read
|
||||
attestations: write
|
||||
id-token: write
|
||||
|
||||
steps:
|
||||
- name: Check out the repo
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Cache Docker layers
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: /tmp/.buildx-cache
|
||||
key: ${{ runner.os }}-buildx-${{ github.sha }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-buildx-
|
||||
|
||||
- name: Extract metadata (tags, labels) for docling-serve (CPU only) ghcr image
|
||||
id: ghcr_serve_cpu_meta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: ${{ env.GHCR_REGISTRY }}/${{ env.GHCR_DOCLING_SERVE_CPU_IMAGE_NAME }}
|
||||
|
||||
- name: Build docling-serve-cpu image
|
||||
id: build-serve-cpu-ghcr
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
push: false
|
||||
tags: ${{ steps.ghcr_serve_cpu_meta.outputs.tags }}
|
||||
labels: ${{ steps.ghcr_serve_cpu_meta.outputs.labels }}
|
||||
platforms: linux/amd64, linux/arm64
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
file: Containerfile
|
||||
build-args: |
|
||||
--build-arg CPU_ONLY=true
|
||||
|
||||
- name: Remove Local Docker Images
|
||||
run: |
|
||||
docker image prune -af
|
||||
|
||||
build_gpu_image:
|
||||
name: Build docling-serve (with GPU support) container image
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
packages: write
|
||||
contents: read
|
||||
attestations: write
|
||||
id-token: write
|
||||
|
||||
steps:
|
||||
- name: Check out the repo
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Cache Docker layers
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: /tmp/.buildx-cache
|
||||
key: ${{ runner.os }}-buildx-${{ github.sha }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-buildx-
|
||||
|
||||
- name: Extract metadata (tags, labels) for docling-serve (GPU) ghcr image
|
||||
id: ghcr_serve_gpu_meta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: ${{ env.GHCR_REGISTRY }}/${{ env.GHCR_DOCLING_SERVE_GPU_IMAGE_NAME }}
|
||||
|
||||
- name: Build docling-serve (GPU) image
|
||||
id: build-serve-gpu-ghcr
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
push: false
|
||||
tags: ${{ steps.ghcr_serve_gpu_meta.outputs.tags }}
|
||||
labels: ${{ steps.ghcr_serve_gpu_meta.outputs.labels }}
|
||||
platforms: linux/amd64,linux/arm64
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
file: Containerfile
|
||||
build-args: |
|
||||
--build-arg CPU_ONLY=false
|
||||
224
.github/workflows/images.yml
vendored
224
.github/workflows/images.yml
vendored
@@ -4,193 +4,55 @@ on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
tags:
|
||||
- 'v*'
|
||||
release:
|
||||
types: [published]
|
||||
|
||||
env:
|
||||
GHCR_REGISTRY: ghcr.io
|
||||
GHCR_DOCLING_SERVE_CPU_IMAGE_NAME: ds4sd/docling-serve-cpu
|
||||
GHCR_DOCLING_SERVE_GPU_IMAGE_NAME: ds4sd/docling-serve
|
||||
QUAY_REGISTRY: quay.io
|
||||
QUAY_DOCLING_SERVE_CPU_IMAGE_NAME: ds4sd/docling-serve-cpu
|
||||
QUAY_DOCLING_SERVE_GPU_IMAGE_NAME: ds4sd/docling-serve
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
build_and_publish_cpu_images:
|
||||
name: Push docling-serve "CPU only" container image to GHCR and QUAY
|
||||
runs-on: ubuntu-latest
|
||||
environment: registry-creds
|
||||
build_and_publish_images:
|
||||
name: Build and push ${{ matrix.spec.name }} container image to GHCR and QUAY
|
||||
strategy:
|
||||
matrix:
|
||||
spec:
|
||||
- name: docling-project/docling-serve
|
||||
build_args: |
|
||||
UV_SYNC_EXTRA_ARGS=--no-extra flash-attn
|
||||
platforms: linux/amd64, linux/arm64
|
||||
- name: docling-project/docling-serve-cpu
|
||||
build_args: |
|
||||
UV_SYNC_EXTRA_ARGS=--no-group pypi --group cpu --no-extra flash-attn
|
||||
platforms: linux/amd64, linux/arm64
|
||||
# - name: docling-project/docling-serve-cu124
|
||||
# build_args: |
|
||||
# UV_SYNC_EXTRA_ARGS=--no-group pypi --group cu124
|
||||
# platforms: linux/amd64
|
||||
- name: docling-project/docling-serve-cu126
|
||||
build_args: |
|
||||
UV_SYNC_EXTRA_ARGS=--no-group pypi --group cu126
|
||||
platforms: linux/amd64
|
||||
- name: docling-project/docling-serve-cu128
|
||||
build_args: |
|
||||
UV_SYNC_EXTRA_ARGS=--no-group pypi --group cu128
|
||||
platforms: linux/amd64
|
||||
# - name: docling-project/docling-serve-rocm
|
||||
# build_args: |
|
||||
# UV_SYNC_EXTRA_ARGS=--no-group pypi --group rocm --no-extra flash-attn
|
||||
# platforms: linux/amd64
|
||||
permissions:
|
||||
packages: write
|
||||
contents: read
|
||||
attestations: write
|
||||
id-token: write
|
||||
secrets: inherit
|
||||
|
||||
steps:
|
||||
- name: Check out the repo
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Log in to the GHCR container image registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ${{ env.GHCR_REGISTRY }}
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Log in to the Quay container image registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ${{ env.QUAY_REGISTRY }}
|
||||
username: ${{ secrets.QUAY_USERNAME }}
|
||||
password: ${{ secrets.QUAY_TOKEN }}
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Cache Docker layers
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: /tmp/.buildx-cache
|
||||
key: ${{ runner.os }}-buildx-${{ github.sha }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-buildx-
|
||||
|
||||
- name: Extract metadata (tags, labels) for docling-serve (CPU only) ghcr image
|
||||
id: ghcr_serve_cpu_meta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: ${{ env.GHCR_REGISTRY }}/${{ env.GHCR_DOCLING_SERVE_CPU_IMAGE_NAME }}
|
||||
|
||||
- name: Build and push docling-serve-cpu image to ghcr.io
|
||||
id: push-serve-cpu-ghcr
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
tags: ${{ steps.ghcr_serve_cpu_meta.outputs.tags }}
|
||||
labels: ${{ steps.ghcr_serve_cpu_meta.outputs.labels }}
|
||||
platforms: linux/amd64, linux/arm64
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
file: Containerfile
|
||||
build-args: |
|
||||
--build-arg CPU_ONLY=true
|
||||
|
||||
- name: Generate artifact attestation
|
||||
uses: actions/attest-build-provenance@v1
|
||||
with:
|
||||
subject-name: ${{ env.GHCR_REGISTRY }}/${{ env.GHCR_DOCLING_SERVE_CPU_IMAGE_NAME}}
|
||||
subject-digest: ${{ steps.push-serve-cpu-ghcr.outputs.digest }}
|
||||
push-to-registry: true
|
||||
|
||||
- name: Extract metadata (tags, labels) for docling-serve (CPU only) quay image
|
||||
id: quay_serve_cpu_meta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: ${{ env.QUAY_REGISTRY }}/${{ env.QUAY_DOCLING_SERVE_CPU_IMAGE_NAME }}
|
||||
|
||||
- name: Build and push docling-serve-cpu image to quay.io
|
||||
id: push-serve-cpu-quay
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
tags: ${{ steps.quay_serve_cpu_meta.outputs.tags }}
|
||||
labels: ${{ steps.quay_serve_cpu_meta.outputs.labels }}
|
||||
platforms: linux/amd64, linux/arm64
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
file: Containerfile
|
||||
build-args: |
|
||||
--build-arg CPU_ONLY=true
|
||||
- name: Remove Local Docker Images
|
||||
run: |
|
||||
docker image prune -af
|
||||
|
||||
build_and_publish_gpu_images:
|
||||
name: Push docling-serve (with GPU support) container image to GHCR and QUAY
|
||||
runs-on: ubuntu-latest
|
||||
environment: registry-creds
|
||||
permissions:
|
||||
packages: write
|
||||
contents: read
|
||||
attestations: write
|
||||
id-token: write
|
||||
|
||||
steps:
|
||||
- name: Check out the repo
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Log in to the GHCR container image registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ${{ env.GHCR_REGISTRY }}
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Log in to the Quay container image registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ${{ env.QUAY_REGISTRY }}
|
||||
username: ${{ secrets.QUAY_USERNAME }}
|
||||
password: ${{ secrets.QUAY_TOKEN }}
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Cache Docker layers
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: /tmp/.buildx-cache
|
||||
key: ${{ runner.os }}-buildx-${{ github.sha }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-buildx-
|
||||
|
||||
- name: Extract metadata (tags, labels) for docling-serve (GPU) ghcr image
|
||||
id: ghcr_serve_gpu_meta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: ${{ env.GHCR_REGISTRY }}/${{ env.GHCR_DOCLING_SERVE_GPU_IMAGE_NAME }}
|
||||
|
||||
- name: Build and push docling-serve (GPU) image to ghcr.io
|
||||
id: push-serve-gpu-ghcr
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
tags: ${{ steps.ghcr_serve_gpu_meta.outputs.tags }}
|
||||
labels: ${{ steps.ghcr_serve_gpu_meta.outputs.labels }}
|
||||
platforms: linux/amd64,linux/arm64
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
file: Containerfile
|
||||
build-args: |
|
||||
--build-arg CPU_ONLY=false
|
||||
|
||||
- name: Generate artifact attestation
|
||||
uses: actions/attest-build-provenance@v1
|
||||
with:
|
||||
subject-name: ${{ env.GHCR_REGISTRY }}/${{ env.GHCR_DOCLING_SERVE_GPU_IMAGE_NAME}}
|
||||
subject-digest: ${{ steps.push-serve-gpu-ghcr.outputs.digest }}
|
||||
push-to-registry: true
|
||||
|
||||
- name: Extract metadata (tags, labels) for docling-serve (GPU) quay image
|
||||
id: quay_serve_gpu_meta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: ${{ env.QUAY_REGISTRY }}/${{ env.QUAY_DOCLING_SERVE_GPU_IMAGE_NAME }}
|
||||
|
||||
- name: Build and push docling-serve (GPU) image to quay.io
|
||||
id: push-serve-gpu-quay
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
tags: ${{ steps.quay_serve_gpu_meta.outputs.tags }}
|
||||
labels: ${{ steps.quay_serve_gpu_meta.outputs.labels }}
|
||||
platforms: linux/amd64,linux/arm64
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
file: Containerfile
|
||||
build-args: |
|
||||
--build-arg CPU_ONLY=false
|
||||
uses: ./.github/workflows/job-image.yml
|
||||
with:
|
||||
publish: true
|
||||
environment: registry-creds
|
||||
build_args: ${{ matrix.spec.build_args }}
|
||||
ghcr_image_name: ${{ matrix.spec.name }}
|
||||
quay_image_name: ${{ matrix.spec.name }}
|
||||
platforms: ${{ matrix.spec.platforms }}
|
||||
|
||||
29
.github/workflows/job-build.yml
vendored
Normal file
29
.github/workflows/job-build.yml
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
name: Run checks
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
|
||||
jobs:
|
||||
build-package:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: ['3.12']
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- name: Install uv and set the python version
|
||||
uses: astral-sh/setup-uv@v6
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
enable-cache: true
|
||||
- name: Install dependencies
|
||||
run: uv sync --all-extras --no-extra flash-attn
|
||||
- name: Build package
|
||||
run: uv build
|
||||
- name: Check content of wheel
|
||||
run: unzip -l dist/*.whl
|
||||
- name: Store the distribution packages
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: python-package-distributions
|
||||
path: dist/
|
||||
68
.github/workflows/job-checks.yml
vendored
Normal file
68
.github/workflows/job-checks.yml
vendored
Normal file
@@ -0,0 +1,68 @@
|
||||
name: Run checks
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
|
||||
jobs:
|
||||
py-lint:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: ['3.12']
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- name: Install uv and set the python version
|
||||
uses: astral-sh/setup-uv@v6
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
enable-cache: true
|
||||
|
||||
- name: pre-commit cache key
|
||||
run: echo "PY=$(python -VV | sha256sum | cut -d' ' -f1)" >> "$GITHUB_ENV"
|
||||
- uses: actions/cache@v4
|
||||
with:
|
||||
path: ~/.cache/pre-commit
|
||||
key: pre-commit|${{ env.PY }}|${{ hashFiles('.pre-commit-config.yaml') }}
|
||||
|
||||
- name: Install dependencies
|
||||
run: uv sync --frozen --all-extras --no-extra flash-attn
|
||||
|
||||
- name: Run styling check
|
||||
run: uv run pre-commit run --all-files
|
||||
|
||||
build-package:
|
||||
uses: ./.github/workflows/job-build.yml
|
||||
|
||||
test-package:
|
||||
needs:
|
||||
- build-package
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: ['3.12']
|
||||
steps:
|
||||
- name: Download all the dists
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: python-package-distributions
|
||||
path: dist/
|
||||
- name: Install uv and set the python version
|
||||
uses: astral-sh/setup-uv@v6
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
enable-cache: true
|
||||
- name: Create virtual environment
|
||||
run: uv venv
|
||||
- name: Install package
|
||||
run: uv pip install dist/*.whl
|
||||
- name: Create the server
|
||||
run: .venv/bin/python -c 'from docling_serve.app import create_app; create_app()'
|
||||
|
||||
# markdown-lint:
|
||||
# runs-on: ubuntu-latest
|
||||
# steps:
|
||||
# - uses: actions/checkout@v5
|
||||
# - name: markdownlint-cli2-action
|
||||
# uses: DavidAnson/markdownlint-cli2-action@v16
|
||||
# with:
|
||||
# globs: "**/*.md"
|
||||
232
.github/workflows/job-image.yml
vendored
Normal file
232
.github/workflows/job-image.yml
vendored
Normal file
@@ -0,0 +1,232 @@
|
||||
name: Build docling-serve container image
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
build_args:
|
||||
type: string
|
||||
description: "Extra build arguments for the build."
|
||||
default: ""
|
||||
ghcr_image_name:
|
||||
type: string
|
||||
description: "Name of the image for GHCR."
|
||||
quay_image_name:
|
||||
type: string
|
||||
description: "Name of the image Quay."
|
||||
platforms:
|
||||
type: string
|
||||
description: "Platform argument for building images."
|
||||
default: linux/amd64, linux/arm64
|
||||
publish:
|
||||
type: boolean
|
||||
description: "If true, the images will be published."
|
||||
default: false
|
||||
environment:
|
||||
type: string
|
||||
description: "GH Action environment"
|
||||
default: ""
|
||||
|
||||
env:
|
||||
GHCR_REGISTRY: ghcr.io
|
||||
QUAY_REGISTRY: quay.io
|
||||
|
||||
jobs:
|
||||
image:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
packages: write
|
||||
contents: read
|
||||
attestations: write
|
||||
id-token: write
|
||||
environment: ${{ inputs.environment }}
|
||||
|
||||
steps:
|
||||
- name: Free up space in github runner
|
||||
# Free space as indicated here : https://github.com/actions/runner-images/issues/2840#issuecomment-790492173
|
||||
run: |
|
||||
df -h
|
||||
sudo rm -rf "/usr/local/share/boost"
|
||||
sudo rm -rf "$AGENT_TOOLSDIRECTORY"
|
||||
sudo rm -rf /usr/share/dotnet /opt/ghc /usr/local/lib/android /usr/local/share/powershell /usr/share/swift /usr/local/.ghcup
|
||||
# shellcheck disable=SC2046
|
||||
sudo docker rmi "$(docker image ls -aq)" >/dev/null 2>&1 || true
|
||||
df -h
|
||||
|
||||
- name: Check out the repo
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Log in to the GHCR container image registry
|
||||
if: ${{ inputs.publish }}
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ${{ env.GHCR_REGISTRY }}
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Log in to the Quay container image registry
|
||||
if: ${{ inputs.publish }}
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ${{ env.QUAY_REGISTRY }}
|
||||
username: ${{ secrets.QUAY_USERNAME }}
|
||||
password: ${{ secrets.QUAY_TOKEN }}
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Cache Docker layers
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: /tmp/.buildx-cache
|
||||
key: ${{ runner.os }}-buildx-${{ github.sha }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-buildx-
|
||||
|
||||
- name: Extract metadata (tags, labels) for docling-serve ghcr image
|
||||
id: ghcr_meta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: ${{ env.GHCR_REGISTRY }}/${{ inputs.ghcr_image_name }}
|
||||
|
||||
# # Local test
|
||||
# - name: Set metadata outputs for local testing ## comment out Free up space, Log in to cr, Cache Docker, Extract metadata, and quay blocks and run act
|
||||
# id: ghcr_meta
|
||||
# run: |
|
||||
# echo "tags=ghcr.io/docling-project/docling-serve:pr-123" >> $GITHUB_OUTPUT
|
||||
# echo "labels=org.opencontainers.image.source=https://github.com/docling-project/docling-serve" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Build and push image to ghcr.io
|
||||
id: ghcr_push
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: .
|
||||
push: ${{ inputs.publish }} # set 'false' for local test
|
||||
tags: ${{ steps.ghcr_meta.outputs.tags }}
|
||||
labels: ${{ steps.ghcr_meta.outputs.labels }}
|
||||
platforms: ${{ inputs.platforms }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
file: Containerfile
|
||||
build-args: ${{ inputs.build_args }}
|
||||
##
|
||||
## This stage runs after the build, so it leverages all build cache
|
||||
##
|
||||
- name: Export built image for testing
|
||||
id: ghcr_export_built_image
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: .
|
||||
push: false
|
||||
load: true
|
||||
tags: ${{ env.GHCR_REGISTRY }}/${{ inputs.ghcr_image_name }}:${{ github.sha }}-test
|
||||
labels: |
|
||||
org.opencontainers.image.title=docling-serve
|
||||
org.opencontainers.image.test=true
|
||||
platforms: linux/amd64 # when 'load' is true, we can't use a list ${{ inputs.platforms }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
file: Containerfile
|
||||
build-args: ${{ inputs.build_args }}
|
||||
|
||||
- name: Test image
|
||||
if: steps.ghcr_export_built_image.outcome == 'success'
|
||||
run: |
|
||||
set -e
|
||||
|
||||
IMAGE_TAG="${{ env.GHCR_REGISTRY }}/${{ inputs.ghcr_image_name }}:${{ github.sha }}-test"
|
||||
echo "Testing local image: $IMAGE_TAG"
|
||||
|
||||
# Remove existing container if any
|
||||
docker rm -f docling-serve-test-container 2>/dev/null || true
|
||||
|
||||
echo "Starting container..."
|
||||
docker run -d -p 5001:5001 --name docling-serve-test-container "$IMAGE_TAG"
|
||||
|
||||
echo "Waiting 15s for container to boot..."
|
||||
sleep 15
|
||||
|
||||
# Health check
|
||||
echo "Checking service health..."
|
||||
for i in {1..20}; do
|
||||
HEALTH_RESPONSE=$(curl -s http://localhost:5001/health || true)
|
||||
echo "Health check response [$i]: $HEALTH_RESPONSE"
|
||||
|
||||
if echo "$HEALTH_RESPONSE" | grep -q '"status":"ok"'; then
|
||||
echo "Service is healthy!"
|
||||
|
||||
# Install pytest and dependencies
|
||||
echo "Installing pytest and dependencies..."
|
||||
pip install uv
|
||||
uv venv --allow-existing
|
||||
source .venv/bin/activate
|
||||
uv sync --all-extras --no-extra flash-attn
|
||||
|
||||
# Run pytest tests
|
||||
echo "Running tests..."
|
||||
# Test import
|
||||
python -c 'from docling_serve.app import create_app; create_app()'
|
||||
|
||||
# Run pytest and check result directly
|
||||
if ! pytest -sv -k "test_convert_url" tests/test_1-url-async.py \
|
||||
--disable-warnings; then
|
||||
echo "Tests failed!"
|
||||
docker logs docling-serve-test-container
|
||||
docker rm -f docling-serve-test-container
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Tests passed successfully!"
|
||||
break
|
||||
else
|
||||
echo "Waiting for service... [$i/20]"
|
||||
sleep 3
|
||||
fi
|
||||
done
|
||||
|
||||
# Final health check if service didn't pass earlier
|
||||
if ! echo "$HEALTH_RESPONSE" | grep -q '"status":"ok"'; then
|
||||
echo "Service did not become healthy in time."
|
||||
docker logs docling-serve-test-container
|
||||
docker rm -f docling-serve-test-container
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Cleanup
|
||||
echo "Cleaning up test container..."
|
||||
docker rm -f docling-serve-test-container
|
||||
echo "Cleaning up test image..."
|
||||
docker rmi "$IMAGE_TAG"
|
||||
|
||||
- name: Generate artifact attestation
|
||||
if: ${{ inputs.publish }}
|
||||
uses: actions/attest-build-provenance@v1
|
||||
with:
|
||||
subject-name: ${{ env.GHCR_REGISTRY }}/${{ inputs.ghcr_image_name }}
|
||||
subject-digest: ${{ steps.ghcr_push.outputs.digest }}
|
||||
push-to-registry: true
|
||||
|
||||
- name: Extract metadata (tags, labels) for docling-serve quay image
|
||||
if: ${{ inputs.publish }}
|
||||
id: quay_meta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: ${{ env.QUAY_REGISTRY }}/${{ inputs.quay_image_name }}
|
||||
|
||||
- name: Build and push image to quay.io
|
||||
if: ${{ inputs.publish }}
|
||||
# id: push-serve-cpu-quay
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: .
|
||||
push: ${{ inputs.publish }}
|
||||
tags: ${{ steps.quay_meta.outputs.tags }}
|
||||
labels: ${{ steps.quay_meta.outputs.labels }}
|
||||
platforms: ${{ inputs.platforms}}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
file: Containerfile
|
||||
build-args: ${{ inputs.build_args }}
|
||||
|
||||
- name: Remove Local Docker Images
|
||||
run: |
|
||||
docker image prune -af
|
||||
34
.github/workflows/pypi.yml
vendored
Normal file
34
.github/workflows/pypi.yml
vendored
Normal file
@@ -0,0 +1,34 @@
|
||||
name: "Build and publish package"
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [published]
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
|
||||
build-package:
|
||||
uses: ./.github/workflows/job-build.yml
|
||||
|
||||
build-and-publish:
|
||||
needs:
|
||||
- build-package
|
||||
runs-on: ubuntu-latest
|
||||
environment:
|
||||
name: pypi
|
||||
url: https://pypi.org/p/docling-serve # Replace <package-name> with your PyPI project name
|
||||
permissions:
|
||||
id-token: write # IMPORTANT: mandatory for trusted publishing
|
||||
steps:
|
||||
- name: Download all the dists
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: python-package-distributions
|
||||
path: dist/
|
||||
- name: Publish distribution 📦 to PyPI
|
||||
uses: pypa/gh-action-pypi-publish@release/v1
|
||||
with:
|
||||
# currently not working with reusable workflows
|
||||
attestations: false
|
||||
7
.gitignore
vendored
7
.gitignore
vendored
@@ -1,5 +1,7 @@
|
||||
model_artifacts/
|
||||
scratch/
|
||||
.md-lint
|
||||
actionlint
|
||||
|
||||
# Created by https://www.toptal.com/developers/gitignore/api/python,macos,virtualenv,pycharm,visualstudiocode,emacs,vim,jupyternotebooks
|
||||
# Edit at https://www.toptal.com/developers/gitignore?templates=python,macos,virtualenv,pycharm,visualstudiocode,emacs,vim,jupyternotebooks
|
||||
@@ -442,3 +444,8 @@ pip-selfcheck.json
|
||||
# Makefile
|
||||
.action-lint
|
||||
.markdown-lint
|
||||
|
||||
cookies.txt
|
||||
|
||||
# Examples
|
||||
/examples/splitted_pdf/*
|
||||
@@ -2,5 +2,9 @@ config:
|
||||
line-length: false
|
||||
no-emphasis-as-header: false
|
||||
first-line-heading: false
|
||||
MD033:
|
||||
allowed_elements: ["details", "summary", "br", "a", "b", "p", "img"]
|
||||
MD024:
|
||||
siblings_only: true
|
||||
globs:
|
||||
- "**/*.md"
|
||||
|
||||
@@ -1,41 +1,39 @@
|
||||
fail_fast: true
|
||||
repos:
|
||||
- repo: local
|
||||
- repo: https://github.com/astral-sh/ruff-pre-commit
|
||||
rev: v0.9.6
|
||||
hooks:
|
||||
- id: system
|
||||
name: Black
|
||||
entry: poetry run black docling_serve tests
|
||||
pass_filenames: false
|
||||
language: system
|
||||
files: '\.py$'
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: system
|
||||
name: isort
|
||||
entry: poetry run isort docling_serve tests
|
||||
pass_filenames: false
|
||||
language: system
|
||||
files: '\.py$'
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: system
|
||||
name: flake8
|
||||
entry: poetry run flake8 docling_serve
|
||||
pass_filenames: false
|
||||
language: system
|
||||
files: '\.py$'
|
||||
# Run the Ruff formatter.
|
||||
- id: ruff-format
|
||||
name: "Ruff formatter"
|
||||
args: [--config=pyproject.toml]
|
||||
files: '^(docling_serve|tests|examples).*\.(py|ipynb)$'
|
||||
# Run the Ruff linter.
|
||||
- id: ruff
|
||||
name: "Ruff linter"
|
||||
args: [--exit-non-zero-on-fix, --fix, --config=pyproject.toml]
|
||||
files: '^(docling_serve|tests|examples).*\.(py|ipynb)$'
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: system
|
||||
name: MyPy
|
||||
entry: poetry run mypy docling_serve
|
||||
entry: uv run --no-sync mypy docling_serve
|
||||
pass_filenames: false
|
||||
language: system
|
||||
files: '\.py$'
|
||||
- repo: local
|
||||
- repo: https://github.com/errata-ai/vale
|
||||
rev: v3.12.0 # Use latest stable version
|
||||
hooks:
|
||||
- id: system
|
||||
name: Poetry check
|
||||
entry: poetry check --lock
|
||||
- id: vale
|
||||
name: vale sync
|
||||
pass_filenames: false
|
||||
language: system
|
||||
args: [sync, "--config=.github/vale.ini"]
|
||||
- id: vale
|
||||
name: Spell and Style Check with Vale
|
||||
args: ["--config=.github/vale.ini"]
|
||||
files: \.md$
|
||||
- repo: https://github.com/astral-sh/uv-pre-commit
|
||||
# uv version, https://github.com/astral-sh/uv-pre-commit/releases
|
||||
rev: 0.8.3
|
||||
hooks:
|
||||
- id: uv-lock
|
||||
|
||||
1
.python-version
Normal file
1
.python-version
Normal file
@@ -0,0 +1 @@
|
||||
3.12
|
||||
362
CHANGELOG.md
Normal file
362
CHANGELOG.md
Normal file
@@ -0,0 +1,362 @@
|
||||
## [v1.5.1](https://github.com/docling-project/docling-serve/releases/tag/v1.5.1) - 2025-09-17
|
||||
|
||||
### Fix
|
||||
|
||||
* Remove old dependencies, fixes in docling-parse and more minor dependencies upgrade ([#362](https://github.com/docling-project/docling-serve/issues/362)) ([`513ae0c`](https://github.com/docling-project/docling-serve/commit/513ae0c119b66d3b17cf9a5d371a0f7971f43be7))
|
||||
* Updates rapidocr deps ([#361](https://github.com/docling-project/docling-serve/issues/361)) ([`bde0406`](https://github.com/docling-project/docling-serve/commit/bde040661fb65c67699326cd6281c0e6232e26f2))
|
||||
|
||||
### Docling libraries included in this release:
|
||||
- docling 2.52.0
|
||||
- docling-core 2.48.1
|
||||
- docling-ibm-models 3.9.1
|
||||
- docling-jobkit 1.5.0
|
||||
- docling-mcp 1.2.0
|
||||
- docling-parse 4.5.0
|
||||
- docling-serve 1.5.1
|
||||
|
||||
## [v1.5.0](https://github.com/docling-project/docling-serve/releases/tag/v1.5.0) - 2025-09-09
|
||||
|
||||
### Feature
|
||||
|
||||
* Add chunking endpoints ([#353](https://github.com/docling-project/docling-serve/issues/353)) ([`9d6def0`](https://github.com/docling-project/docling-serve/commit/9d6def0ec8b1804ad31aa71defa17658d73d29a1))
|
||||
|
||||
### Docling libraries included in this release:
|
||||
- docling 2.46.0
|
||||
- docling 2.51.0
|
||||
- docling-core 2.47.0
|
||||
- docling-ibm-models 3.9.1
|
||||
- docling-jobkit 1.5.0
|
||||
- docling-mcp 1.2.0
|
||||
- docling-parse 4.4.0
|
||||
- docling-serve 1.5.0
|
||||
|
||||
## [v1.4.1](https://github.com/docling-project/docling-serve/releases/tag/v1.4.1) - 2025-09-08
|
||||
|
||||
### Fix
|
||||
|
||||
* Trigger fix after ci fixes ([#355](https://github.com/docling-project/docling-serve/issues/355)) ([`b0360d7`](https://github.com/docling-project/docling-serve/commit/b0360d723bff202dcf44a25a3173ec1995945fc2))
|
||||
|
||||
### Docling libraries included in this release:
|
||||
- docling 2.46.0
|
||||
- docling 2.51.0
|
||||
- docling-core 2.47.0
|
||||
- docling-ibm-models 3.9.1
|
||||
- docling-jobkit 1.4.1
|
||||
- docling-mcp 1.2.0
|
||||
- docling-parse 4.4.0
|
||||
- docling-serve 1.4.1
|
||||
|
||||
## [v1.4.0](https://github.com/docling-project/docling-serve/releases/tag/v1.4.0) - 2025-09-05
|
||||
|
||||
### Feature
|
||||
|
||||
* **docling:** Perfomance improvements in parsing, new layout model, fixes in html processing ([#352](https://github.com/docling-project/docling-serve/issues/352)) ([`d64a2a9`](https://github.com/docling-project/docling-serve/commit/d64a2a974a276c7ae3b105c448fd79f77a653d20))
|
||||
|
||||
### Fix
|
||||
|
||||
* Upgrade to latest docling version with fixes ([#335](https://github.com/docling-project/docling-serve/issues/335)) ([`e544947`](https://github.com/docling-project/docling-serve/commit/e5449472b2a3e71796f41c8a58c251d8229305c1))
|
||||
|
||||
### Documentation
|
||||
|
||||
* Add split processing example ([#303](https://github.com/docling-project/docling-serve/issues/303)) ([`0d4545a`](https://github.com/docling-project/docling-serve/commit/0d4545a65a5a941fc1fdefda57e39cfb1ea106ab))
|
||||
* Document DOCLING_NUM_THREADS environment variable ([#341](https://github.com/docling-project/docling-serve/issues/341)) ([`27fdd7b`](https://github.com/docling-project/docling-serve/commit/27fdd7b85ab18b3eece428366f46dc5cf0995e38))
|
||||
* Fix parameters typo ([#333](https://github.com/docling-project/docling-serve/issues/333)) ([`81f0a8d`](https://github.com/docling-project/docling-serve/commit/81f0a8ddf80a532042d550ae4568f891458b45e7))
|
||||
* Describe how to use Docling MCP ([#332](https://github.com/docling-project/docling-serve/issues/332)) ([`a69cc86`](https://github.com/docling-project/docling-serve/commit/a69cc867f5a3fb76648803ca866d65cc3a75c6b8))
|
||||
|
||||
### Docling libraries included in this release:
|
||||
- docling 2.46.0
|
||||
- docling 2.51.0
|
||||
- docling-core 2.47.0
|
||||
- docling-ibm-models 3.9.1
|
||||
- docling-jobkit 1.4.1
|
||||
- docling-mcp 1.2.0
|
||||
- docling-parse 4.4.0
|
||||
- docling-serve 1.4.0
|
||||
|
||||
## [v1.3.1](https://github.com/docling-project/docling-serve/releases/tag/v1.3.1) - 2025-08-21
|
||||
|
||||
### Fix
|
||||
|
||||
* Configuration and performance fixes via upgrade of packages ([#328](https://github.com/docling-project/docling-serve/issues/328)) ([`f02dbc0`](https://github.com/docling-project/docling-serve/commit/f02dbc01449fe1caf3fb4a73c0a5f4adf8265faf))
|
||||
|
||||
### Documentation
|
||||
|
||||
* Fix parameter in api key docs ([#323](https://github.com/docling-project/docling-serve/issues/323)) ([`37fe022`](https://github.com/docling-project/docling-serve/commit/37fe02277b3e2358eced28e15b4360e7c82d3b43))
|
||||
|
||||
## [v1.3.0](https://github.com/docling-project/docling-serve/releases/tag/v1.3.0) - 2025-08-14
|
||||
|
||||
### Feature
|
||||
|
||||
* Add configuration option for apikey security ([#322](https://github.com/docling-project/docling-serve/issues/322)) ([`9a64410`](https://github.com/docling-project/docling-serve/commit/9a644105523d312431993ded8dd88e064550a5db))
|
||||
* Add RQ engine ([#315](https://github.com/docling-project/docling-serve/issues/315)) ([`885f319`](https://github.com/docling-project/docling-serve/commit/885f319d3a3488a4090869560447437a4104f14e))
|
||||
|
||||
### Documentation
|
||||
|
||||
* Example of docling-serve deployment in the RQ engine mode ([#321](https://github.com/docling-project/docling-serve/issues/321)) ([`71edf41`](https://github.com/docling-project/docling-serve/commit/71edf4184960d8664ef9da20617e2d0f91793d36))
|
||||
* Handling models in docling-serve ([#319](https://github.com/docling-project/docling-serve/issues/319)) ([`6e9aa8c`](https://github.com/docling-project/docling-serve/commit/6e9aa8c759220458281c7fe4c87443ac41023eee))
|
||||
* Add Gradio cache usage ([#312](https://github.com/docling-project/docling-serve/issues/312)) ([`d584895`](https://github.com/docling-project/docling-serve/commit/d584895e1108d71a0f45deadcd3c669eb0a58133))
|
||||
|
||||
## [v1.2.2](https://github.com/docling-project/docling-serve/releases/tag/v1.2.2) - 2025-08-13
|
||||
|
||||
### Fix
|
||||
|
||||
* Update of transformers module to 4.55.1 ([#316](https://github.com/docling-project/docling-serve/issues/316)) ([`7692eb2`](https://github.com/docling-project/docling-serve/commit/7692eb26006fd4deaa021180c99e23a1b65de506))
|
||||
|
||||
## [v1.2.1](https://github.com/docling-project/docling-serve/releases/tag/v1.2.1) - 2025-08-13
|
||||
|
||||
### Fix
|
||||
|
||||
* Handling of vlm model options and update deps ([#314](https://github.com/docling-project/docling-serve/issues/314)) ([`8b470cb`](https://github.com/docling-project/docling-serve/commit/8b470cba8ef500c271eb84c8368c8a1a1a5a6d6a))
|
||||
* Add missing response type in sync endpoints ([#309](https://github.com/docling-project/docling-serve/issues/309)) ([`8048f45`](https://github.com/docling-project/docling-serve/commit/8048f4589a91de2b2b391ab33a326efd1b29f25b))
|
||||
|
||||
### Documentation
|
||||
|
||||
* Update readme to use v1 ([#306](https://github.com/docling-project/docling-serve/issues/306)) ([`b3058e9`](https://github.com/docling-project/docling-serve/commit/b3058e91e0c56e27110eb50f22cbdd89640bf398))
|
||||
* Update deployment examples to use v1 API ([#308](https://github.com/docling-project/docling-serve/issues/308)) ([`63da9ee`](https://github.com/docling-project/docling-serve/commit/63da9eedebae3ad31d04e65635e573194e413793))
|
||||
* Fix typo in v1 migration instructions ([#307](https://github.com/docling-project/docling-serve/issues/307)) ([`b15dc25`](https://github.com/docling-project/docling-serve/commit/b15dc2529f78d68a475e5221c37408c3f77d8588))
|
||||
|
||||
## [v1.2.0](https://github.com/docling-project/docling-serve/releases/tag/v1.2.0) - 2025-08-07
|
||||
|
||||
### Feature
|
||||
|
||||
* Workers without shared models and convert params ([#304](https://github.com/docling-project/docling-serve/issues/304)) ([`db3fdb5`](https://github.com/docling-project/docling-serve/commit/db3fdb5bc1a0ae250afd420d737abc4071a7546c))
|
||||
* Add rocm image build support and fix cuda ([#292](https://github.com/docling-project/docling-serve/issues/292)) ([`fd1b987`](https://github.com/docling-project/docling-serve/commit/fd1b987e8dc174f1a6013c003dde33e9acbae39a))
|
||||
|
||||
## [v1.1.0](https://github.com/docling-project/docling-serve/releases/tag/v1.1.0) - 2025-07-30
|
||||
|
||||
### Feature
|
||||
|
||||
* Add docling-mcp in the distribution ([#290](https://github.com/docling-project/docling-serve/issues/290)) ([`ecb1874`](https://github.com/docling-project/docling-serve/commit/ecb1874a507bef83d102e0e031e49fed34298637))
|
||||
* Add 3.0 openapi endpoint ([#287](https://github.com/docling-project/docling-serve/issues/287)) ([`ec594d8`](https://github.com/docling-project/docling-serve/commit/ec594d84fe36df23e7d010a2fcf769856c43600b))
|
||||
* Add new source and target ([#270](https://github.com/docling-project/docling-serve/issues/270)) ([`3771c1b`](https://github.com/docling-project/docling-serve/commit/3771c1b55403bd51966d07d8f760d5c4fbcc1760))
|
||||
|
||||
### Fix
|
||||
|
||||
* Referenced paths relative to zip root ([#289](https://github.com/docling-project/docling-serve/issues/289)) ([`1333f71`](https://github.com/docling-project/docling-serve/commit/1333f71c9c6495342b2169d574e921f828446f15))
|
||||
|
||||
## [v1.0.1](https://github.com/docling-project/docling-serve/releases/tag/v1.0.1) - 2025-07-21
|
||||
|
||||
### Fix
|
||||
|
||||
* Docling update v2.42.0 ([#277](https://github.com/docling-project/docling-serve/issues/277)) ([`8706706`](https://github.com/docling-project/docling-serve/commit/8706706e8797b0a06ec4baa7cf87988311be68b6))
|
||||
|
||||
### Documentation
|
||||
|
||||
* Typo in README ([#276](https://github.com/docling-project/docling-serve/issues/276)) ([`766adb2`](https://github.com/docling-project/docling-serve/commit/766adb248113c7bd5144d14b3c82929a2ad29f8e))
|
||||
|
||||
## [v1.0.0](https://github.com/docling-project/docling-serve/releases/tag/v1.0.0) - 2025-07-14
|
||||
|
||||
### Feature
|
||||
|
||||
* V1 api with list of sources and target ([#249](https://github.com/docling-project/docling-serve/issues/249)) ([`56e328b`](https://github.com/docling-project/docling-serve/commit/56e328baf76b4bb0476fc6ca820b52034e4f97bf))
|
||||
* Use orchestrators from jobkit ([#248](https://github.com/docling-project/docling-serve/issues/248)) ([`daa924a`](https://github.com/docling-project/docling-serve/commit/daa924a77e56d063ef17347dfd8a838872a70529))
|
||||
|
||||
### Breaking
|
||||
|
||||
* v1 api with list of sources and target ([#249](https://github.com/docling-project/docling-serve/issues/249)) ([`56e328b`](https://github.com/docling-project/docling-serve/commit/56e328baf76b4bb0476fc6ca820b52034e4f97bf))
|
||||
* use orchestrators from jobkit ([#248](https://github.com/docling-project/docling-serve/issues/248)) ([`daa924a`](https://github.com/docling-project/docling-serve/commit/daa924a77e56d063ef17347dfd8a838872a70529))
|
||||
|
||||
## [v0.16.1](https://github.com/docling-project/docling-serve/releases/tag/v0.16.1) - 2025-07-07
|
||||
|
||||
### Fix
|
||||
|
||||
* Upgrade deps including, docling v2.40.0 with locks in models init ([#264](https://github.com/docling-project/docling-serve/issues/264)) ([`bfde1a0`](https://github.com/docling-project/docling-serve/commit/bfde1a0991c2da53b72c4f131ff74fa10f6340de))
|
||||
* Missing tesseract osd ([#263](https://github.com/docling-project/docling-serve/issues/263)) ([`eb3892e`](https://github.com/docling-project/docling-serve/commit/eb3892ee141eb2c941d580b095d8a266f2d2610c))
|
||||
* Properly load models at boot ([#244](https://github.com/docling-project/docling-serve/issues/244)) ([`149a8cb`](https://github.com/docling-project/docling-serve/commit/149a8cb1c0a16c1e0b7d17f40b88b4d6e8f0109d))
|
||||
|
||||
### Documentation
|
||||
|
||||
* Fix typo ([#259](https://github.com/docling-project/docling-serve/issues/259)) ([`93b8471`](https://github.com/docling-project/docling-serve/commit/93b84712b2c6d180908a197847b52b217a7ff05f))
|
||||
* Change the doc example ([#258](https://github.com/docling-project/docling-serve/issues/258)) ([`c45b937`](https://github.com/docling-project/docling-serve/commit/c45b93706466a073ab4a5c75aa8a267110873e26))
|
||||
* Update typo ([#247](https://github.com/docling-project/docling-serve/issues/247)) ([`50e431f`](https://github.com/docling-project/docling-serve/commit/50e431f30fbffa33f43727417fe746d20cbb9d6b))
|
||||
|
||||
## [v0.16.0](https://github.com/docling-project/docling-serve/releases/tag/v0.16.0) - 2025-06-25
|
||||
|
||||
### Feature
|
||||
|
||||
* Package updates and more cuda images ([#229](https://github.com/docling-project/docling-serve/issues/229)) ([`30aca92`](https://github.com/docling-project/docling-serve/commit/30aca92298ab0d86bb4debcfcacb2dd8b9040a27))
|
||||
|
||||
### Documentation
|
||||
|
||||
* Update example resources and improve README ([#231](https://github.com/docling-project/docling-serve/issues/231)) ([`80755a7`](https://github.com/docling-project/docling-serve/commit/80755a7d5955f7d0c53df8e558fdd852dd1f5b75))
|
||||
|
||||
## [v0.15.0](https://github.com/docling-project/docling-serve/releases/tag/v0.15.0) - 2025-06-17
|
||||
|
||||
### Feature
|
||||
|
||||
* Use redocs and scalar as api docs ([#228](https://github.com/docling-project/docling-serve/issues/228)) ([`873d05a`](https://github.com/docling-project/docling-serve/commit/873d05aefe141c63b9c1cf53b23b4fa8c96de05d))
|
||||
|
||||
### Fix
|
||||
|
||||
* "tesserocr" instead of "tesseract_cli" in usage docs ([#223](https://github.com/docling-project/docling-serve/issues/223)) ([`196c5ce`](https://github.com/docling-project/docling-serve/commit/196c5ce42a04d77234a4212c3d9b9772d2c2073e))
|
||||
|
||||
## [v0.14.0](https://github.com/docling-project/docling-serve/releases/tag/v0.14.0) - 2025-06-17
|
||||
|
||||
### Feature
|
||||
|
||||
* Read supported file extensions from docling ([#214](https://github.com/docling-project/docling-serve/issues/214)) ([`524f6a8`](https://github.com/docling-project/docling-serve/commit/524f6a8997b86d2f869ca491ec8fb40585b42ca4))
|
||||
|
||||
### Fix
|
||||
|
||||
* Typo in Headline ([#220](https://github.com/docling-project/docling-serve/issues/220)) ([`d5455b7`](https://github.com/docling-project/docling-serve/commit/d5455b7f66de39ea1f8b8927b5968d2baa23ca88))
|
||||
|
||||
## [v0.13.0](https://github.com/docling-project/docling-serve/releases/tag/v0.13.0) - 2025-06-04
|
||||
|
||||
### Feature
|
||||
|
||||
* Upgrade docling to 2.36 ([#212](https://github.com/docling-project/docling-serve/issues/212)) ([`ffea347`](https://github.com/docling-project/docling-serve/commit/ffea34732b24fdd438fabd6df02d3d9ce66b4534))
|
||||
|
||||
## [v0.12.0](https://github.com/docling-project/docling-serve/releases/tag/v0.12.0) - 2025-06-03
|
||||
|
||||
### Feature
|
||||
|
||||
* Export annotations in markdown and html (Docling upgrade) ([#202](https://github.com/docling-project/docling-serve/issues/202)) ([`c4c41f1`](https://github.com/docling-project/docling-serve/commit/c4c41f16dff83c5d2a0b8a4c625b5de19b36b7c5))
|
||||
|
||||
### Fix
|
||||
|
||||
* Processing complex params in multipart-form ([#210](https://github.com/docling-project/docling-serve/issues/210)) ([`7066f35`](https://github.com/docling-project/docling-serve/commit/7066f3520a88c07df1c80a0cc6c4339eaac4d6a7))
|
||||
|
||||
### Documentation
|
||||
|
||||
* Add openshift replicasets examples ([#209](https://github.com/docling-project/docling-serve/issues/209)) ([`6a8190c`](https://github.com/docling-project/docling-serve/commit/6a8190c315792bd1e0e2b0af310656baaa5551e5))
|
||||
|
||||
## [v0.11.0](https://github.com/docling-project/docling-serve/releases/tag/v0.11.0) - 2025-05-23
|
||||
|
||||
### Feature
|
||||
|
||||
* Page break placeholder in markdown exports options ([#194](https://github.com/docling-project/docling-serve/issues/194)) ([`32b8a80`](https://github.com/docling-project/docling-serve/commit/32b8a809f348bf9fbde657f93589a56935d3749d))
|
||||
* Clear results registry ([#192](https://github.com/docling-project/docling-serve/issues/192)) ([`de002df`](https://github.com/docling-project/docling-serve/commit/de002dfcdc111c942a08b156c84b7fa22b3fbaf3))
|
||||
* Upgrade to Docling 2.33.0 ([#198](https://github.com/docling-project/docling-serve/issues/198)) ([`abe5aa0`](https://github.com/docling-project/docling-serve/commit/abe5aa03f54d44ecf5c6d76e3258028997a53e68))
|
||||
* Api to trigger offloading the models ([#188](https://github.com/docling-project/docling-serve/issues/188)) ([`00be428`](https://github.com/docling-project/docling-serve/commit/00be4284904d55b78c75c5475578ef11c2ade94c))
|
||||
* Figure annotations @ docling components 0.0.7 ([#181](https://github.com/docling-project/docling-serve/issues/181)) ([`3ff1b2f`](https://github.com/docling-project/docling-serve/commit/3ff1b2f9834aca37472a895a0e3da47560457d77))
|
||||
|
||||
### Fix
|
||||
|
||||
* Usage of hashlib for FIPS ([#171](https://github.com/docling-project/docling-serve/issues/171)) ([`8406fb9`](https://github.com/docling-project/docling-serve/commit/8406fb9b59d83247b8379974cabed497703dfc4d))
|
||||
|
||||
### Documentation
|
||||
|
||||
* Example and instructions on how to load model weights to persistent volume ([#197](https://github.com/docling-project/docling-serve/issues/197)) ([`3f090b7`](https://github.com/docling-project/docling-serve/commit/3f090b7d15eaf696611d89bbbba5b98569610828))
|
||||
* Async api usage and fixes ([#195](https://github.com/docling-project/docling-serve/issues/195)) ([`21c1791`](https://github.com/docling-project/docling-serve/commit/21c1791e427f5b1946ed46c68dfda03c957dca8f))
|
||||
|
||||
## [v0.10.1](https://github.com/docling-project/docling-serve/releases/tag/v0.10.1) - 2025-04-30
|
||||
|
||||
### Fix
|
||||
|
||||
* Avoid missing specialized keys in the options hash ([#166](https://github.com/docling-project/docling-serve/issues/166)) ([`36787bc`](https://github.com/docling-project/docling-serve/commit/36787bc0616356a6199da618d8646de51636b34e))
|
||||
* Allow users to set the area threshold for picture descriptions ([#165](https://github.com/docling-project/docling-serve/issues/165)) ([`509f488`](https://github.com/docling-project/docling-serve/commit/509f4889f8ed4c0f0ce25bec4126ef1f1199797c))
|
||||
* Expose max wait time in sync endpoints ([#164](https://github.com/docling-project/docling-serve/issues/164)) ([`919cf5c`](https://github.com/docling-project/docling-serve/commit/919cf5c0414f2f11eb8012f451fed7a8f582b7ad))
|
||||
* Add flash-attn for cuda images ([#161](https://github.com/docling-project/docling-serve/issues/161)) ([`35c2630`](https://github.com/docling-project/docling-serve/commit/35c2630c613cf229393fc67b6938152b063ff498))
|
||||
|
||||
## [v0.10.0](https://github.com/docling-project/docling-serve/releases/tag/v0.10.0) - 2025-04-28
|
||||
|
||||
### Feature
|
||||
|
||||
* Add support for file upload and return as file in async endpoints ([#152](https://github.com/docling-project/docling-serve/issues/152)) ([`c65f3c6`](https://github.com/docling-project/docling-serve/commit/c65f3c654c76c6b64b6aada1f0a153d74789d629))
|
||||
|
||||
### Documentation
|
||||
|
||||
* Fix new default pdf_backend ([#158](https://github.com/docling-project/docling-serve/issues/158)) ([`829effe`](https://github.com/docling-project/docling-serve/commit/829effec1a1b80320ccaf2c501be8015169b6fa3))
|
||||
* Fixing small typo in docs ([#155](https://github.com/docling-project/docling-serve/issues/155)) ([`14bafb2`](https://github.com/docling-project/docling-serve/commit/14bafb26286b94f80b56846c50d6e9a6d99a9763))
|
||||
|
||||
## [v0.9.0](https://github.com/docling-project/docling-serve/releases/tag/v0.9.0) - 2025-04-25
|
||||
|
||||
### Feature
|
||||
|
||||
* Expose picture description options ([#148](https://github.com/docling-project/docling-serve/issues/148)) ([`4c9571a`](https://github.com/docling-project/docling-serve/commit/4c9571a052d5ec0044e49225bc5615e13cdb0a56))
|
||||
* Add parameters for Kubeflow pipeline engine (WIP) ([#107](https://github.com/docling-project/docling-serve/issues/107)) ([`26bef5b`](https://github.com/docling-project/docling-serve/commit/26bef5bec060f0afd8d358816b68c3f2c0dd4bc2))
|
||||
|
||||
### Fix
|
||||
|
||||
* Produce image artifacts in referenced mode ([#151](https://github.com/docling-project/docling-serve/issues/151)) ([`71c5fae`](https://github.com/docling-project/docling-serve/commit/71c5fae505366459fd481d2ecdabc5ebed94d49c))
|
||||
|
||||
### Documentation
|
||||
|
||||
* Vlm and picture description options ([#149](https://github.com/docling-project/docling-serve/issues/149)) ([`91956cb`](https://github.com/docling-project/docling-serve/commit/91956cbf4e91cf82bb4d54ace397cdbbfaf594ba))
|
||||
|
||||
## [v0.8.0](https://github.com/docling-project/docling-serve/releases/tag/v0.8.0) - 2025-04-22
|
||||
|
||||
### Feature
|
||||
|
||||
* Add option for vlm pipeline ([#143](https://github.com/docling-project/docling-serve/issues/143)) ([`ee89ee4`](https://github.com/docling-project/docling-serve/commit/ee89ee4daee5e916bd6a3bdb452f78934cd03f60))
|
||||
* Expose more conversion options ([#142](https://github.com/docling-project/docling-serve/issues/142)) ([`6b3d281`](https://github.com/docling-project/docling-serve/commit/6b3d281f02905c195ab75f25bb39f5c4d4e7b680))
|
||||
* **UI:** Change UI to use async endpoints ([#131](https://github.com/docling-project/docling-serve/issues/131)) ([`b598872`](https://github.com/docling-project/docling-serve/commit/b598872e5c48928ac44417a11bb7acc0e5c3f0c6))
|
||||
|
||||
### Fix
|
||||
|
||||
* **UI:** Use https when calling the api ([#139](https://github.com/docling-project/docling-serve/issues/139)) ([`57f9073`](https://github.com/docling-project/docling-serve/commit/57f9073bc0daf72428b068ea28e2bec7cd76c37b))
|
||||
* Fix permissions in docker image ([#136](https://github.com/docling-project/docling-serve/issues/136)) ([`c1ce471`](https://github.com/docling-project/docling-serve/commit/c1ce4719c933179ba3c59d73d0584853bbd6fa6a))
|
||||
* Picture caption visuals ([#129](https://github.com/docling-project/docling-serve/issues/129)) ([`5dfb75d`](https://github.com/docling-project/docling-serve/commit/5dfb75d3b9a7022d1daad12edbb8ec7bbf9aa264))
|
||||
|
||||
### Documentation
|
||||
|
||||
* Fix required permissions for oauth2-proxy requests ([#141](https://github.com/docling-project/docling-serve/issues/141)) ([`087417e`](https://github.com/docling-project/docling-serve/commit/087417e5c2387d4ed95500222058f34d8a8702aa))
|
||||
* Update deployment examples ([#135](https://github.com/docling-project/docling-serve/issues/135)) ([`525a43f`](https://github.com/docling-project/docling-serve/commit/525a43ff6f04b7cc80f9dd6a0e653a8d8c4ab317))
|
||||
* Fix image tag ([#124](https://github.com/docling-project/docling-serve/issues/124)) ([`420162e`](https://github.com/docling-project/docling-serve/commit/420162e674cc38b4c3c13673ffbee4c20a1b15f1))
|
||||
|
||||
## [v0.7.0](https://github.com/docling-project/docling-serve/releases/tag/v0.7.0) - 2025-03-31
|
||||
|
||||
### Feature
|
||||
|
||||
* Expose TLS settings and example deploy with oauth-proxy ([#112](https://github.com/docling-project/docling-serve/issues/112)) ([`7a0faba`](https://github.com/docling-project/docling-serve/commit/7a0fabae07020c2659dbb22c3b0359909051a74c))
|
||||
* Offline static files ([#109](https://github.com/docling-project/docling-serve/issues/109)) ([`68772bb`](https://github.com/docling-project/docling-serve/commit/68772bb6f0a87b71094a08ff851f5754c6ca6163))
|
||||
* Update to Docling 2.28 ([#106](https://github.com/docling-project/docling-serve/issues/106)) ([`20ec87a`](https://github.com/docling-project/docling-serve/commit/20ec87a63a99145bc0ad7931549af8a0c30db641))
|
||||
|
||||
### Fix
|
||||
|
||||
* Move ARGs to prevent cache invalidation ([#104](https://github.com/docling-project/docling-serve/issues/104)) ([`e30f458`](https://github.com/docling-project/docling-serve/commit/e30f458923d34c169db7d5a5c296848716e8cac4))
|
||||
|
||||
## [v0.6.0](https://github.com/docling-project/docling-serve/releases/tag/v0.6.0) - 2025-03-17
|
||||
|
||||
### Feature
|
||||
|
||||
* Expose options for new features ([#92](https://github.com/docling-project/docling-serve/issues/92)) ([`ec57b52`](https://github.com/docling-project/docling-serve/commit/ec57b528ed3f8e7b9604ff4cdf06da3d52c714dd))
|
||||
|
||||
### Fix
|
||||
|
||||
* Allow changes in CORS settings ([#100](https://github.com/docling-project/docling-serve/issues/100)) ([`422c402`](https://github.com/docling-project/docling-serve/commit/422c402bab7f05e46274ede11f234a19a62e093e))
|
||||
* Avoid exploding options cache using lru and expose size parameter ([#101](https://github.com/docling-project/docling-serve/issues/101)) ([`ea09028`](https://github.com/docling-project/docling-serve/commit/ea090288d3eec4ea8fbdcd32a6a497a99c89189d))
|
||||
* Increase timeout_keep_alive and allow parameter changes ([#98](https://github.com/docling-project/docling-serve/issues/98)) ([`07c48ed`](https://github.com/docling-project/docling-serve/commit/07c48edd5d9437219d9623e3d05bc5166c5bb85a))
|
||||
* Add warning when using incompatible parameters ([#99](https://github.com/docling-project/docling-serve/issues/99)) ([`a212547`](https://github.com/docling-project/docling-serve/commit/a212547d28d6588c65e52000dc7bc04f3f77e69e))
|
||||
* **ui:** Use --port parameter and avoid failing when image is not found ([#97](https://github.com/docling-project/docling-serve/issues/97)) ([`c76daac`](https://github.com/docling-project/docling-serve/commit/c76daac70c87da412f791666881e48b74688b060))
|
||||
|
||||
### Documentation
|
||||
|
||||
* Simplify README and move details to docs ([#102](https://github.com/docling-project/docling-serve/issues/102)) ([`fd8e40a`](https://github.com/docling-project/docling-serve/commit/fd8e40a00849771263d9b75b9a56f6caeccb8517))
|
||||
|
||||
## [v0.5.1](https://github.com/docling-project/docling-serve/releases/tag/v0.5.1) - 2025-03-10
|
||||
|
||||
### Fix
|
||||
|
||||
* Submodules in wheels ([#85](https://github.com/docling-project/docling-serve/issues/85)) ([`a92ad48`](https://github.com/docling-project/docling-serve/commit/a92ad48b287bfcb134011dc0fc3f91ee04e067ee))
|
||||
|
||||
## [v0.5.0](https://github.com/docling-project/docling-serve/releases/tag/v0.5.0) - 2025-03-07
|
||||
|
||||
### Feature
|
||||
|
||||
* Async api ([#60](https://github.com/docling-project/docling-serve/issues/60)) ([`82f8900`](https://github.com/docling-project/docling-serve/commit/82f890019745859699c1b01f9ccfb64cb7e37906))
|
||||
* Display version in fastapi docs ([#78](https://github.com/docling-project/docling-serve/issues/78)) ([`ed851c9`](https://github.com/docling-project/docling-serve/commit/ed851c95fee5f59305ddc3dcd5c09efce618470b))
|
||||
|
||||
### Fix
|
||||
|
||||
* Remove uv from image, merge ARG and ENV declarations ([#57](https://github.com/docling-project/docling-serve/issues/57)) ([`c95db36`](https://github.com/docling-project/docling-serve/commit/c95db3643807a4dfb96d93c8e10d6eb486c49a30))
|
||||
* **docs:** Remove comma in convert/source curl example ([#73](https://github.com/docling-project/docling-serve/issues/73)) ([`05df073`](https://github.com/docling-project/docling-serve/commit/05df0735d35a589bdc2a11fcdd764a10f700cb6f))
|
||||
|
||||
## [v0.4.0](https://github.com/docling-project/docling-serve/releases/tag/v0.4.0) - 2025-02-26
|
||||
|
||||
### Feature
|
||||
|
||||
* New container images ([#68](https://github.com/docling-project/docling-serve/issues/68)) ([`7e6d9cd`](https://github.com/docling-project/docling-serve/commit/7e6d9cdef398df70a5b4d626aeb523c428c10d56))
|
||||
* Render DoclingDocument with npm docling-components in the example UI ([#65](https://github.com/docling-project/docling-serve/issues/65)) ([`c430d9b`](https://github.com/docling-project/docling-serve/commit/c430d9b1a162ab29104d86ebaa1ac5a5488b1f09))
|
||||
|
||||
## [v0.3.0](https://github.com/docling-project/docling-serve/releases/tag/v0.3.0) - 2025-02-19
|
||||
|
||||
### Feature
|
||||
|
||||
* Add new docling-serve cli ([#50](https://github.com/docling-project/docling-serve/issues/50)) ([`ec33a61`](https://github.com/docling-project/docling-serve/commit/ec33a61faa7846b9b7998fbf557ebe39a3b800f6))
|
||||
|
||||
### Fix
|
||||
|
||||
* Set DOCLING_SERVE_ARTIFACTS_PATH in images ([#53](https://github.com/docling-project/docling-serve/issues/53)) ([`4877248`](https://github.com/docling-project/docling-serve/commit/487724836896576ca4f98e84abf15fd1c383bec8))
|
||||
* Set root UI path when behind proxy ([#38](https://github.com/docling-project/docling-serve/issues/38)) ([`c64a450`](https://github.com/docling-project/docling-serve/commit/c64a450bf9ba9947ab180e92bef2763ff710b210))
|
||||
* Support python 3.13 and docling updates and switch to uv ([#48](https://github.com/docling-project/docling-serve/issues/48)) ([`ae3b490`](https://github.com/docling-project/docling-serve/commit/ae3b4906f1c0829b1331ea491f3518741cabff71))
|
||||
@@ -3,13 +3,13 @@
|
||||
Our project welcomes external contributions. If you have an itch, please feel
|
||||
free to scratch it.
|
||||
|
||||
To contribute code or documentation, please submit a [pull request](https://github.com/DS4SD/docling-serve/pulls).
|
||||
To contribute code or documentation, please submit a [pull request](https://github.com/docling-project/docling-serve/pulls).
|
||||
|
||||
A good way to familiarize yourself with the codebase and contribution process is
|
||||
to look for and tackle low-hanging fruit in the [issue tracker](https://github.com/DS4SD/docling-serve/issues).
|
||||
to look for and tackle low-hanging fruit in the [issue tracker](https://github.com/docling-project/docling-serve/issues).
|
||||
Before embarking on a more ambitious contribution, please quickly [get in touch](#communication) with us.
|
||||
|
||||
For general questions or support requests, please refer to the [discussion section](https://github.com/DS4SD/docling-serve/discussions).
|
||||
For general questions or support requests, please refer to the [discussion section](https://github.com/docling-project/docling-serve/discussions).
|
||||
|
||||
**Note: We appreciate your effort, and want to avoid a situation where a contribution
|
||||
requires extensive rework (by you or by us), sits in backlog for a long time, or
|
||||
@@ -17,14 +17,14 @@ cannot be accepted at all!**
|
||||
|
||||
### Proposing new features
|
||||
|
||||
If you would like to implement a new feature, please [raise an issue](https://github.com/DS4SD/docling-serve/issues)
|
||||
If you would like to implement a new feature, please [raise an issue](https://github.com/docling-project/docling-serve/issues)
|
||||
before sending a pull request so the feature can be discussed. This is to avoid
|
||||
you wasting your valuable time working on a feature that the project developers
|
||||
are not interested in accepting into the code base.
|
||||
|
||||
### Fixing bugs
|
||||
|
||||
If you would like to fix a bug, please [raise an issue](https://github.com/DS4SD/docling-serve/issues) before sending a
|
||||
If you would like to fix a bug, please [raise an issue](https://github.com/docling-project/docling-serve/issues) before sending a
|
||||
pull request so it can be tracked.
|
||||
|
||||
### Merge approval
|
||||
@@ -73,7 +73,7 @@ git commit -s
|
||||
|
||||
## Communication
|
||||
|
||||
Please feel free to connect with us using the [discussion section](https://github.com/DS4SD/docling-serve/discussions).
|
||||
Please feel free to connect with us using the [discussion section](https://github.com/docling-project/docling-serve/discussions).
|
||||
|
||||
## Developing
|
||||
|
||||
@@ -142,8 +142,7 @@ poetry add NAME
|
||||
|
||||
We use the following tools to enforce code style:
|
||||
|
||||
- iSort, to sort imports
|
||||
- Black, to format code
|
||||
- ruff, to sort imports and format code
|
||||
|
||||
We run a series of checks on the code base on every commit, using `pre-commit`. To install the hooks, run:
|
||||
|
||||
@@ -157,4 +156,4 @@ To run the checks on-demand, run:
|
||||
pre-commit run --all-files
|
||||
```
|
||||
|
||||
Note: Checks like `Black` and `isort` will "fail" if they modify files. This is because `pre-commit` doesn't like to see files modified by their Hooks. In these cases, `git add` the modified files and `git commit` again.
|
||||
Note: Formatting checks like `ruff` will "fail" if they modify files. This is because `pre-commit` doesn't like to see files modified by their Hooks. In these cases, `git add` the modified files and `git commit` again.
|
||||
|
||||
@@ -1,32 +1,80 @@
|
||||
FROM python:3.11-slim-bookworm
|
||||
ARG BASE_IMAGE=quay.io/sclorg/python-312-c9s:c9s
|
||||
|
||||
ARG CPU_ONLY=false
|
||||
WORKDIR /docling-serve
|
||||
ARG UV_VERSION=0.8.3
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y libgl1 libglib2.0-0 curl wget git \
|
||||
&& apt-get clean
|
||||
ARG UV_SYNC_EXTRA_ARGS=""
|
||||
|
||||
RUN pip install --no-cache-dir poetry
|
||||
FROM ${BASE_IMAGE} AS docling-base
|
||||
|
||||
COPY pyproject.toml poetry.lock README.md /docling-serve/
|
||||
###################################################################################################
|
||||
# OS Layer #
|
||||
###################################################################################################
|
||||
|
||||
RUN if [ "$CPU_ONLY" = "true" ]; then \
|
||||
poetry install --no-root --with cpu; \
|
||||
else \
|
||||
poetry install --no-root; \
|
||||
fi
|
||||
USER 0
|
||||
|
||||
ENV HF_HOME=/tmp/
|
||||
ENV TORCH_HOME=/tmp/
|
||||
RUN --mount=type=bind,source=os-packages.txt,target=/tmp/os-packages.txt \
|
||||
dnf -y install --best --nodocs --setopt=install_weak_deps=False dnf-plugins-core && \
|
||||
dnf config-manager --best --nodocs --setopt=install_weak_deps=False --save && \
|
||||
dnf config-manager --enable crb && \
|
||||
dnf -y update && \
|
||||
dnf install -y $(cat /tmp/os-packages.txt) && \
|
||||
dnf -y clean all && \
|
||||
rm -rf /var/cache/dnf
|
||||
|
||||
RUN poetry run python -c 'from docling.pipeline.standard_pdf_pipeline import StandardPdfPipeline; artifacts_path = StandardPdfPipeline.download_models_hf(force=True);'
|
||||
RUN /usr/bin/fix-permissions /opt/app-root/src/.cache
|
||||
|
||||
# On container environments, always set a thread budget to avoid undesired thread congestion.
|
||||
ENV OMP_NUM_THREADS=4
|
||||
ENV TESSDATA_PREFIX=/usr/share/tesseract/tessdata/
|
||||
|
||||
COPY ./docling_serve /docling-serve/docling_serve
|
||||
FROM ghcr.io/astral-sh/uv:${UV_VERSION} AS uv_stage
|
||||
|
||||
###################################################################################################
|
||||
# Docling layer #
|
||||
###################################################################################################
|
||||
|
||||
FROM docling-base
|
||||
|
||||
USER 1001
|
||||
|
||||
WORKDIR /opt/app-root/src
|
||||
|
||||
ENV \
|
||||
OMP_NUM_THREADS=4 \
|
||||
LANG=en_US.UTF-8 \
|
||||
LC_ALL=en_US.UTF-8 \
|
||||
PYTHONIOENCODING=utf-8 \
|
||||
UV_COMPILE_BYTECODE=1 \
|
||||
UV_LINK_MODE=copy \
|
||||
UV_PROJECT_ENVIRONMENT=/opt/app-root \
|
||||
DOCLING_SERVE_ARTIFACTS_PATH=/opt/app-root/src/.cache/docling/models
|
||||
|
||||
ARG UV_SYNC_EXTRA_ARGS
|
||||
|
||||
RUN --mount=from=uv_stage,source=/uv,target=/bin/uv \
|
||||
--mount=type=cache,target=/opt/app-root/src/.cache/uv,uid=1001 \
|
||||
--mount=type=bind,source=uv.lock,target=uv.lock \
|
||||
--mount=type=bind,source=pyproject.toml,target=pyproject.toml \
|
||||
umask 002 && \
|
||||
UV_SYNC_ARGS="--frozen --no-install-project --no-dev --all-extras" && \
|
||||
uv sync ${UV_SYNC_ARGS} ${UV_SYNC_EXTRA_ARGS} --no-extra flash-attn && \
|
||||
FLASH_ATTENTION_SKIP_CUDA_BUILD=TRUE uv sync ${UV_SYNC_ARGS} ${UV_SYNC_EXTRA_ARGS} --no-build-isolation-package=flash-attn
|
||||
|
||||
ARG MODELS_LIST="layout tableformer picture_classifier easyocr"
|
||||
|
||||
RUN echo "Downloading models..." && \
|
||||
HF_HUB_DOWNLOAD_TIMEOUT="90" \
|
||||
HF_HUB_ETAG_TIMEOUT="90" \
|
||||
docling-tools models download -o "${DOCLING_SERVE_ARTIFACTS_PATH}" ${MODELS_LIST} && \
|
||||
chown -R 1001:0 ${DOCLING_SERVE_ARTIFACTS_PATH} && \
|
||||
chmod -R g=u ${DOCLING_SERVE_ARTIFACTS_PATH}
|
||||
|
||||
COPY --chown=1001:0 ./docling_serve ./docling_serve
|
||||
|
||||
RUN --mount=from=uv_stage,source=/uv,target=/bin/uv \
|
||||
--mount=type=cache,target=/opt/app-root/src/.cache/uv,uid=1001 \
|
||||
--mount=type=bind,source=uv.lock,target=uv.lock \
|
||||
--mount=type=bind,source=pyproject.toml,target=pyproject.toml \
|
||||
umask 002 && uv sync --frozen --no-dev --all-extras ${UV_SYNC_EXTRA_ARGS}
|
||||
|
||||
EXPOSE 5001
|
||||
|
||||
CMD ["poetry", "run", "uvicorn", "--port", "5001", "--host", "0.0.0.0", "docling_serve.app:app"]
|
||||
CMD ["docling-serve", "run"]
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
# MAINTAINERS
|
||||
|
||||
- Christoph Auer - [@cau-git](https://github.com/cau-git)
|
||||
- Michele Dolfi - [@dolfim-ibm](https://github.com/dolfim-ibm)
|
||||
- Maxim Lysak - [@maxmnemonic](https://github.com/maxmnemonic)
|
||||
- Nikos Livathinos - [@nikos-livathinos](https://github.com/nikos-livathinos)
|
||||
- Ahmed Nassar - [@nassarofficial](https://github.com/nassarofficial)
|
||||
- Panos Vagenas - [@vagenas](https://github.com/vagenas)
|
||||
- Peter Staar - [@PeterStaar-IBM](https://github.com/PeterStaar-IBM)
|
||||
- Christoph Auer - [`@cau-git`](https://github.com/cau-git)
|
||||
- Michele Dolfi - [`@dolfim-ibm`](https://github.com/dolfim-ibm)
|
||||
- Maxim Lysak - [`@maxmnemonic`](https://github.com/maxmnemonic)
|
||||
- Nikos Livathinos - [`@nikos-livathinos`](https://github.com/nikos-livathinos)
|
||||
- Ahmed Nassar - [`@nassarofficial`](https://github.com/nassarofficial)
|
||||
- Panos Vagenas - [`@vagenas`](https://github.com/vagenas)
|
||||
- Peter Staar - [`@PeterStaar-IBM`](https://github.com/PeterStaar-IBM)
|
||||
|
||||
Maintainers can be contacted at [deepsearch-core@zurich.ibm.com](mailto:deepsearch-core@zurich.ibm.com).
|
||||
|
||||
100
Makefile
100
Makefile
@@ -17,6 +17,7 @@ else
|
||||
endif
|
||||
|
||||
TAG=$(shell git rev-parse HEAD)
|
||||
BRANCH_TAG=$(shell git rev-parse --abbrev-ref HEAD)
|
||||
|
||||
action-lint-file:
|
||||
$(CMD_PREFIX) touch .action-lint
|
||||
@@ -24,19 +25,47 @@ action-lint-file:
|
||||
md-lint-file:
|
||||
$(CMD_PREFIX) touch .markdown-lint
|
||||
|
||||
.PHONY: docling-serve-cpu-image
|
||||
docling-serve-cpu-image: Containerfile ## Build docling-serve "cpu only" continaer image
|
||||
$(ECHO_PREFIX) printf " %-12s Containerfile\n" "[docling-serve CPU ONLY]"
|
||||
$(CMD_PREFIX) docker build --build-arg CPU_ONLY=true -f Containerfile --platform linux/amd64 -t ghcr.io/ds4sd/docling-serve-cpu:$(TAG) .
|
||||
$(CMD_PREFIX) docker tag ghcr.io/ds4sd/docling-serve-cpu:$(TAG) ghcr.io/ds4sd/docling-serve-cpu:main
|
||||
$(CMD_PREFIX) docker tag ghcr.io/ds4sd/docling-serve-cpu:$(TAG) quay.io/ds4sd/docling-serve-cpu:main
|
||||
.PHONY: docling-serve-image
|
||||
docling-serve-image: Containerfile ## Build docling-serve container image
|
||||
$(ECHO_PREFIX) printf " %-12s Containerfile\n" "[docling-serve]"
|
||||
$(CMD_PREFIX) docker build --load -f Containerfile -t ghcr.io/docling-project/docling-serve:$(TAG) .
|
||||
$(CMD_PREFIX) docker tag ghcr.io/docling-project/docling-serve:$(TAG) ghcr.io/docling-project/docling-serve:$(BRANCH_TAG)
|
||||
$(CMD_PREFIX) docker tag ghcr.io/docling-project/docling-serve:$(TAG) quay.io/docling-project/docling-serve:$(BRANCH_TAG)
|
||||
|
||||
.PHONY: docling-serve-gpu-image
|
||||
docling-serve-gpu-image: Containerfile ## Build docling-serve continaer image with GPU support
|
||||
$(ECHO_PREFIX) printf " %-12s Containerfile\n" "[docling-serve with GPU]"
|
||||
$(CMD_PREFIX) docker build --build-arg CPU_ONLY=false -f Containerfile --platform linux/amd64 -t ghcr.io/ds4sd/docling-serve:$(TAG) .
|
||||
$(CMD_PREFIX) docker tag ghcr.io/ds4sd/docling-serve:$(TAG) ghcr.io/ds4sd/docling-serve:main
|
||||
$(CMD_PREFIX) docker tag ghcr.io/ds4sd/docling-serve:$(TAG) quay.io/ds4sd/docling-serve:main
|
||||
.PHONY: docling-serve-cpu-image
|
||||
docling-serve-cpu-image: Containerfile ## Build docling-serve "cpu only" container image
|
||||
$(ECHO_PREFIX) printf " %-12s Containerfile\n" "[docling-serve CPU]"
|
||||
$(CMD_PREFIX) docker build --load --build-arg "UV_SYNC_EXTRA_ARGS=--no-group pypi --group cpu --no-extra flash-attn" -f Containerfile -t ghcr.io/docling-project/docling-serve-cpu:$(TAG) .
|
||||
$(CMD_PREFIX) docker tag ghcr.io/docling-project/docling-serve-cpu:$(TAG) ghcr.io/docling-project/docling-serve-cpu:$(BRANCH_TAG)
|
||||
$(CMD_PREFIX) docker tag ghcr.io/docling-project/docling-serve-cpu:$(TAG) quay.io/docling-project/docling-serve-cpu:$(BRANCH_TAG)
|
||||
|
||||
.PHONY: docling-serve-cu124-image
|
||||
docling-serve-cu124-image: Containerfile ## Build docling-serve container image with CUDA 12.4 support
|
||||
$(ECHO_PREFIX) printf " %-12s Containerfile\n" "[docling-serve with Cuda 12.4]"
|
||||
$(CMD_PREFIX) docker build --load --build-arg "UV_SYNC_EXTRA_ARGS=--no-group pypi --group cu124" -f Containerfile --platform linux/amd64 -t ghcr.io/docling-project/docling-serve-cu124:$(TAG) .
|
||||
$(CMD_PREFIX) docker tag ghcr.io/docling-project/docling-serve-cu124:$(TAG) ghcr.io/docling-project/docling-serve-cu124:$(BRANCH_TAG)
|
||||
$(CMD_PREFIX) docker tag ghcr.io/docling-project/docling-serve-cu124:$(TAG) quay.io/docling-project/docling-serve-cu124:$(BRANCH_TAG)
|
||||
|
||||
.PHONY: docling-serve-cu126-image
|
||||
docling-serve-cu126-image: Containerfile ## Build docling-serve container image with CUDA 12.6 support
|
||||
$(ECHO_PREFIX) printf " %-12s Containerfile\n" "[docling-serve with Cuda 12.6]"
|
||||
$(CMD_PREFIX) docker build --load --build-arg "UV_SYNC_EXTRA_ARGS=--no-group pypi --group cu126" -f Containerfile --platform linux/amd64 -t ghcr.io/docling-project/docling-serve-cu126:$(TAG) .
|
||||
$(CMD_PREFIX) docker tag ghcr.io/docling-project/docling-serve-cu126:$(TAG) ghcr.io/docling-project/docling-serve-cu126:$(BRANCH_TAG)
|
||||
$(CMD_PREFIX) docker tag ghcr.io/docling-project/docling-serve-cu126:$(TAG) quay.io/docling-project/docling-serve-cu126:$(BRANCH_TAG)
|
||||
|
||||
.PHONY: docling-serve-cu128-image
|
||||
docling-serve-cu128-image: Containerfile ## Build docling-serve container image with CUDA 12.8 support
|
||||
$(ECHO_PREFIX) printf " %-12s Containerfile\n" "[docling-serve with Cuda 12.8]"
|
||||
$(CMD_PREFIX) docker build --load --build-arg "UV_SYNC_EXTRA_ARGS=--no-group pypi --group cu128" -f Containerfile --platform linux/amd64 -t ghcr.io/docling-project/docling-serve-cu128:$(TAG) .
|
||||
$(CMD_PREFIX) docker tag ghcr.io/docling-project/docling-serve-cu128:$(TAG) ghcr.io/docling-project/docling-serve-cu128:$(BRANCH_TAG)
|
||||
$(CMD_PREFIX) docker tag ghcr.io/docling-project/docling-serve-cu128:$(TAG) quay.io/docling-project/docling-serve-cu128:$(BRANCH_TAG)
|
||||
|
||||
.PHONY: docling-serve-rocm-image
|
||||
docling-serve-rocm-image: Containerfile ## Build docling-serve container image with ROCm support
|
||||
$(ECHO_PREFIX) printf " %-12s Containerfile\n" "[docling-serve with ROCm 6.3]"
|
||||
$(CMD_PREFIX) docker build --load --build-arg "UV_SYNC_EXTRA_ARGS=--no-group pypi --group rocm --no-extra flash-attn" -f Containerfile --platform linux/amd64 -t ghcr.io/docling-project/docling-serve-rocm:$(TAG) .
|
||||
$(CMD_PREFIX) docker tag ghcr.io/docling-project/docling-serve-rocm:$(TAG) ghcr.io/docling-project/docling-serve-rocm:$(BRANCH_TAG)
|
||||
$(CMD_PREFIX) docker tag ghcr.io/docling-project/docling-serve-rocm:$(TAG) quay.io/docling-project/docling-serve-rocm:$(BRANCH_TAG)
|
||||
|
||||
.PHONY: action-lint
|
||||
action-lint: .action-lint ## Lint GitHub Action workflows
|
||||
@@ -59,17 +88,50 @@ action-lint: .action-lint ## Lint GitHub Action workflows
|
||||
md-lint: .md-lint ## Lint markdown files
|
||||
.md-lint: $(wildcard */**/*.md) | md-lint-file
|
||||
$(ECHO_PREFIX) printf " %-12s ./...\n" "[MD LINT]"
|
||||
$(CMD_PREFIX) docker run --rm -v $$(pwd):/workdir davidanson/markdownlint-cli2:v0.14.0 "**/*.md"
|
||||
$(CMD_PREFIX) docker run --rm -v $$(pwd):/workdir davidanson/markdownlint-cli2:v0.16.0 "**/*.md" "#.venv"
|
||||
$(CMD_PREFIX) touch $@
|
||||
|
||||
|
||||
.PHONY: py-Lint
|
||||
py-lint: ## Lint Python files
|
||||
$(ECHO_PREFIX) printf " %-12s ./...\n" "[PY LINT]"
|
||||
$(CMD_PREFIX) if ! which poetry $(PIPE_DEV_NULL) ; then \
|
||||
echo "Please install poetry." ; \
|
||||
echo "pip install poetry" ; \
|
||||
$(CMD_PREFIX) if ! which uv $(PIPE_DEV_NULL) ; then \
|
||||
echo "Please install uv." ; \
|
||||
exit 1 ; \
|
||||
fi
|
||||
$(CMD_PREFIX) poetry install --all-extras
|
||||
$(CMD_PREFIX) poetry run pre-commit run --all-files
|
||||
$(CMD_PREFIX) uv sync --extra ui
|
||||
$(CMD_PREFIX) uv run pre-commit run --all-files
|
||||
|
||||
.PHONY: run-docling-cpu
|
||||
run-docling-cpu: ## Run the docling-serve container with CPU support and assign a container name
|
||||
$(ECHO_PREFIX) printf " %-12s Removing existing container if it exists...\n" "[CLEANUP]"
|
||||
$(CMD_PREFIX) docker rm -f docling-serve-cpu 2>/dev/null || true
|
||||
$(ECHO_PREFIX) printf " %-12s Running docling-serve container with CPU support on port 5001...\n" "[RUN CPU]"
|
||||
$(CMD_PREFIX) docker run -it --name docling-serve-cpu -p 5001:5001 ghcr.io/docling-project/docling-serve-cpu:main
|
||||
|
||||
.PHONY: run-docling-cu124
|
||||
run-docling-cu124: ## Run the docling-serve container with GPU support and assign a container name
|
||||
$(ECHO_PREFIX) printf " %-12s Removing existing container if it exists...\n" "[CLEANUP]"
|
||||
$(CMD_PREFIX) docker rm -f docling-serve-cu124 2>/dev/null || true
|
||||
$(ECHO_PREFIX) printf " %-12s Running docling-serve container with GPU support on port 5001...\n" "[RUN CUDA 12.4]"
|
||||
$(CMD_PREFIX) docker run -it --name docling-serve-cu124 -p 5001:5001 ghcr.io/docling-project/docling-serve-cu124:main
|
||||
|
||||
.PHONY: run-docling-cu126
|
||||
run-docling-cu126: ## Run the docling-serve container with GPU support and assign a container name
|
||||
$(ECHO_PREFIX) printf " %-12s Removing existing container if it exists...\n" "[CLEANUP]"
|
||||
$(CMD_PREFIX) docker rm -f docling-serve-cu126 2>/dev/null || true
|
||||
$(ECHO_PREFIX) printf " %-12s Running docling-serve container with GPU support on port 5001...\n" "[RUN CUDA 12.6]"
|
||||
$(CMD_PREFIX) docker run -it --name docling-serve-cu126 -p 5001:5001 ghcr.io/docling-project/docling-serve-cu126:main
|
||||
|
||||
.PHONY: run-docling-cu128
|
||||
run-docling-cu128: ## Run the docling-serve container with GPU support and assign a container name
|
||||
$(ECHO_PREFIX) printf " %-12s Removing existing container if it exists...\n" "[CLEANUP]"
|
||||
$(CMD_PREFIX) docker rm -f docling-serve-cu128 2>/dev/null || true
|
||||
$(ECHO_PREFIX) printf " %-12s Running docling-serve container with GPU support on port 5001...\n" "[RUN CUDA 12.8]"
|
||||
$(CMD_PREFIX) docker run -it --name docling-serve-cu128 -p 5001:5001 ghcr.io/docling-project/docling-serve-cu128:main
|
||||
|
||||
.PHONY: run-docling-rocm
|
||||
run-docling-rocm: ## Run the docling-serve container with GPU support and assign a container name
|
||||
$(ECHO_PREFIX) printf " %-12s Removing existing container if it exists...\n" "[CLEANUP]"
|
||||
$(CMD_PREFIX) docker rm -f docling-serve-rocm 2>/dev/null || true
|
||||
$(ECHO_PREFIX) printf " %-12s Running docling-serve container with GPU support on port 5001...\n" "[RUN ROCm 6.3]"
|
||||
$(CMD_PREFIX) docker run -it --name docling-serve-rocm -p 5001:5001 ghcr.io/docling-project/docling-serve-rocm:main
|
||||
|
||||
130
README.md
130
README.md
@@ -1,56 +1,122 @@
|
||||
<p align="center">
|
||||
<a href="https://github.com/docling-project/docling-serve">
|
||||
<img loading="lazy" alt="Docling" src="https://github.com/docling-project/docling-serve/raw/main/docs/assets/docling-serve-pic.png" width="30%"/>
|
||||
</a>
|
||||
</p>
|
||||
|
||||
# Docling Serve
|
||||
|
||||
Running [Docling](https://github.com/DS4SD/docling) as an API service.
|
||||
Running [Docling](https://github.com/docling-project/docling) as an API service.
|
||||
|
||||
> [!NOTE]
|
||||
> This is an unstable draft implementation which will quickly evolve.
|
||||
📚 [Docling Serve documentation](./docs/README.md)
|
||||
|
||||
## Development
|
||||
- Learning how to [configure the webserver](./docs/configuration.md)
|
||||
- Get to know all [runtime options](./docs/usage.md) of the API
|
||||
- Explore useful [deployment examples](./docs/deployment.md)
|
||||
- And more
|
||||
|
||||
Install the dependencies
|
||||
> [!NOTE]
|
||||
> **Migration to the `v1` API.** Docling Serve now has a stable v1 API. Read more on the [migration to v1](./docs/v1_migration.md).
|
||||
|
||||
```sh
|
||||
# Install poetry if not already available
|
||||
curl -sSL https://install.python-poetry.org | python3 -
|
||||
## Getting started
|
||||
|
||||
# Install dependencies
|
||||
poetry install
|
||||
Install the `docling-serve` package and run the server.
|
||||
|
||||
# Run the server
|
||||
poetry run uvicorn docling_serve.app:app --reload
|
||||
```bash
|
||||
# Using the python package
|
||||
pip install "docling-serve[ui]"
|
||||
docling-serve run --enable-ui
|
||||
|
||||
# Using container images, e.g. with Podman
|
||||
podman run -p 5001:5001 -e DOCLING_SERVE_ENABLE_UI=1 quay.io/docling-project/docling-serve
|
||||
```
|
||||
|
||||
Example payload (http source):
|
||||
The server is available at
|
||||
|
||||
```sh
|
||||
- API <http://127.0.0.1:5001>
|
||||
- API documentation <http://127.0.0.1:5001/docs>
|
||||
- UI playground <http://127.0.0.1:5001/ui>
|
||||
|
||||

|
||||
|
||||
Try it out with a simple conversion:
|
||||
|
||||
```bash
|
||||
curl -X 'POST' \
|
||||
'http://127.0.0.1:8000/convert' \
|
||||
'http://localhost:5001/v1/convert/source' \
|
||||
-H 'accept: application/json' \
|
||||
-H 'Content-Type: application/json' \
|
||||
-d '{
|
||||
"http_source": {
|
||||
"url": "https://arxiv.org/pdf/2206.01062"
|
||||
}
|
||||
}'
|
||||
"sources": [{"kind": "http", "url": "https://arxiv.org/pdf/2501.17887"}]
|
||||
}'
|
||||
```
|
||||
|
||||
### Cuda GPU Support
|
||||
### Container Images
|
||||
|
||||
For GPU support try the following:
|
||||
The following container images are available for running **Docling Serve** with different hardware and PyTorch configurations:
|
||||
|
||||
```sh
|
||||
# Create a virtual env
|
||||
python3 -m venv venv
|
||||
#### 📦 Distributed Images
|
||||
|
||||
# Activate the venv
|
||||
source venv/bin/active
|
||||
| Image | Description | Architectures | Size |
|
||||
|-------|-------------|----------------|------|
|
||||
| [`ghcr.io/docling-project/docling-serve`](https://github.com/docling-project/docling-serve/pkgs/container/docling-serve) <br> [`quay.io/docling-project/docling-serve`](https://quay.io/repository/docling-project/docling-serve) | Base image with all packages installed from the official PyPI index. | `linux/amd64`, `linux/arm64` | 4.4 GB (arm64) <br> 8.7 GB (amd64) |
|
||||
| [`ghcr.io/docling-project/docling-serve-cpu`](https://github.com/docling-project/docling-serve/pkgs/container/docling-serve-cpu) <br> [`quay.io/docling-project/docling-serve-cpu`](https://quay.io/repository/docling-project/docling-serve-cpu) | CPU-only variant, using `torch` from the PyTorch CPU index. | `linux/amd64`, `linux/arm64` | 4.4 GB |
|
||||
| [`ghcr.io/docling-project/docling-serve-cu126`](https://github.com/docling-project/docling-serve/pkgs/container/docling-serve-cu126) <br> [`quay.io/docling-project/docling-serve-cu126`](https://quay.io/repository/docling-project/docling-serve-cu126) | CUDA 12.6 build with `torch` from the cu126 index. | `linux/amd64` | 10.0 GB |
|
||||
| [`ghcr.io/docling-project/docling-serve-cu128`](https://github.com/docling-project/docling-serve/pkgs/container/docling-serve-cu128) <br> [`quay.io/docling-project/docling-serve-cu128`](https://quay.io/repository/docling-project/docling-serve-cu128) | CUDA 12.8 build with `torch` from the cu128 index. | `linux/amd64` | 11.4 GB |
|
||||
|
||||
# Install torch with the special index
|
||||
pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu124
|
||||
#### 🚫 Not Distributed
|
||||
|
||||
# Install the package
|
||||
pip install -e .
|
||||
An image for AMD ROCm 6.3 (`docling-serve-rocm`) is supported but **not published** due to its large size.
|
||||
|
||||
# Run the server
|
||||
poetry run uvicorn docling_serve.app:app --reload
|
||||
To build it locally:
|
||||
|
||||
```bash
|
||||
git clone --branch main git@github.com:docling-project/docling-serve.git
|
||||
cd docling-serve/
|
||||
make docling-serve-rocm-image
|
||||
```
|
||||
|
||||
For deployment using Docker Compose, see [docs/deployment.md](docs/deployment.md).
|
||||
|
||||
Coming soon: `docling-serve-slim` images will reduce the size by skipping the model weights download.
|
||||
|
||||
### Demonstration UI
|
||||
|
||||
An easy to use UI is available at the `/ui` endpoint.
|
||||
|
||||

|
||||
|
||||

|
||||
|
||||
## Get help and support
|
||||
|
||||
Please feel free to connect with us using the [discussion section](https://github.com/docling-project/docling/discussions).
|
||||
|
||||
## Contributing
|
||||
|
||||
Please read [Contributing to Docling Serve](https://github.com/docling-project/docling-serve/blob/main/CONTRIBUTING.md) for details.
|
||||
|
||||
## References
|
||||
|
||||
If you use Docling in your projects, please consider citing the following:
|
||||
|
||||
```bib
|
||||
@techreport{Docling,
|
||||
author = {Docling Contributors},
|
||||
month = {1},
|
||||
title = {Docling: An Efficient Open-Source Toolkit for AI-driven Document Conversion},
|
||||
url = {https://arxiv.org/abs/2501.17887},
|
||||
eprint = {2501.17887},
|
||||
doi = {10.48550/arXiv.2501.17887},
|
||||
version = {2.0.0},
|
||||
year = {2025}
|
||||
}
|
||||
```
|
||||
|
||||
## License
|
||||
|
||||
The Docling Serve codebase is under MIT license.
|
||||
|
||||
## IBM ❤️ Open Source AI
|
||||
|
||||
Docling has been brought to you by IBM.
|
||||
|
||||
402
docling_serve/__main__.py
Normal file
402
docling_serve/__main__.py
Normal file
@@ -0,0 +1,402 @@
|
||||
import importlib.metadata
|
||||
import logging
|
||||
import platform
|
||||
import sys
|
||||
import warnings
|
||||
from pathlib import Path
|
||||
from typing import Annotated, Any, Optional, Union
|
||||
|
||||
import typer
|
||||
import uvicorn
|
||||
from rich.console import Console
|
||||
|
||||
from docling_serve.settings import docling_serve_settings, uvicorn_settings
|
||||
from docling_serve.storage import get_scratch
|
||||
|
||||
warnings.filterwarnings(action="ignore", category=UserWarning, module="pydantic|torch")
|
||||
warnings.filterwarnings(action="ignore", category=FutureWarning, module="easyocr")
|
||||
|
||||
|
||||
err_console = Console(stderr=True)
|
||||
console = Console()
|
||||
|
||||
app = typer.Typer(
|
||||
no_args_is_help=True,
|
||||
rich_markup_mode="rich",
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def version_callback(value: bool) -> None:
|
||||
if value:
|
||||
docling_serve_version = importlib.metadata.version("docling_serve")
|
||||
docling_jobkit_version = importlib.metadata.version("docling-jobkit")
|
||||
docling_version = importlib.metadata.version("docling")
|
||||
docling_core_version = importlib.metadata.version("docling-core")
|
||||
docling_ibm_models_version = importlib.metadata.version("docling-ibm-models")
|
||||
docling_parse_version = importlib.metadata.version("docling-parse")
|
||||
platform_str = platform.platform()
|
||||
py_impl_version = sys.implementation.cache_tag
|
||||
py_lang_version = platform.python_version()
|
||||
console.print(f"Docling Serve version: {docling_serve_version}")
|
||||
console.print(f"Docling Jobkit version: {docling_jobkit_version}")
|
||||
console.print(f"Docling version: {docling_version}")
|
||||
console.print(f"Docling Core version: {docling_core_version}")
|
||||
console.print(f"Docling IBM Models version: {docling_ibm_models_version}")
|
||||
console.print(f"Docling Parse version: {docling_parse_version}")
|
||||
console.print(f"Python: {py_impl_version} ({py_lang_version})")
|
||||
console.print(f"Platform: {platform_str}")
|
||||
raise typer.Exit()
|
||||
|
||||
|
||||
@app.callback()
|
||||
def callback(
|
||||
version: Annotated[
|
||||
Union[bool, None],
|
||||
typer.Option(help="Show the version and exit.", callback=version_callback),
|
||||
] = None,
|
||||
verbose: Annotated[
|
||||
int,
|
||||
typer.Option(
|
||||
"--verbose",
|
||||
"-v",
|
||||
count=True,
|
||||
help="Set the verbosity level. -v for info logging, -vv for debug logging.",
|
||||
),
|
||||
] = 0,
|
||||
) -> None:
|
||||
if verbose == 0:
|
||||
logging.basicConfig(level=logging.WARNING)
|
||||
elif verbose == 1:
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
elif verbose == 2:
|
||||
logging.basicConfig(level=logging.DEBUG)
|
||||
|
||||
|
||||
def _run(
|
||||
*,
|
||||
command: str,
|
||||
# Docling serve parameters
|
||||
artifacts_path: Path | None,
|
||||
enable_ui: bool,
|
||||
) -> None:
|
||||
server_type = "development" if command == "dev" else "production"
|
||||
|
||||
console.print(f"Starting {server_type} server 🚀")
|
||||
|
||||
run_subprocess = (
|
||||
uvicorn_settings.workers is not None and uvicorn_settings.workers > 1
|
||||
) or uvicorn_settings.reload
|
||||
|
||||
run_ssl = (
|
||||
uvicorn_settings.ssl_certfile is not None
|
||||
and uvicorn_settings.ssl_keyfile is not None
|
||||
)
|
||||
|
||||
if run_subprocess and docling_serve_settings.artifacts_path != artifacts_path:
|
||||
err_console.print(
|
||||
"\n[yellow]:warning: The server will run with reload or multiple workers. \n"
|
||||
"The argument [bold]--artifacts-path[/bold] will be ignored, please set the value \n"
|
||||
"using the environment variable [bold]DOCLING_SERVE_ARTIFACTS_PATH[/bold].[/yellow]"
|
||||
)
|
||||
|
||||
if run_subprocess and docling_serve_settings.enable_ui != enable_ui:
|
||||
err_console.print(
|
||||
"\n[yellow]:warning: The server will run with reload or multiple workers. \n"
|
||||
"The argument [bold]--enable-ui[/bold] will be ignored, please set the value \n"
|
||||
"using the environment variable [bold]DOCLING_SERVE_ENABLE_UI[/bold].[/yellow]"
|
||||
)
|
||||
|
||||
# Propagate the settings to the app settings
|
||||
docling_serve_settings.artifacts_path = artifacts_path
|
||||
docling_serve_settings.enable_ui = enable_ui
|
||||
|
||||
# Print documentation
|
||||
protocol = "https" if run_ssl else "http"
|
||||
url = f"{protocol}://{uvicorn_settings.host}:{uvicorn_settings.port}"
|
||||
url_docs = f"{url}/docs"
|
||||
url_scalar = f"{url}/scalar"
|
||||
url_ui = f"{url}/ui"
|
||||
|
||||
console.print("")
|
||||
console.print(f"Server started at [link={url}]{url}[/]")
|
||||
console.print(f"Documentation at [link={url_docs}]{url_docs}[/]")
|
||||
console.print(f"Scalar docs at [link={url_docs}]{url_scalar}[/]")
|
||||
if docling_serve_settings.enable_ui:
|
||||
console.print(f"UI at [link={url_ui}]{url_ui}[/]")
|
||||
|
||||
if command == "dev":
|
||||
console.print("")
|
||||
console.print(
|
||||
"Running in development mode, for production use: "
|
||||
"[bold]docling-serve run[/]",
|
||||
)
|
||||
|
||||
console.print("")
|
||||
console.print("Logs:")
|
||||
|
||||
# Launch the server
|
||||
uvicorn.run(
|
||||
app="docling_serve.app:create_app",
|
||||
factory=True,
|
||||
host=uvicorn_settings.host,
|
||||
port=uvicorn_settings.port,
|
||||
reload=uvicorn_settings.reload,
|
||||
workers=uvicorn_settings.workers,
|
||||
root_path=uvicorn_settings.root_path,
|
||||
proxy_headers=uvicorn_settings.proxy_headers,
|
||||
timeout_keep_alive=uvicorn_settings.timeout_keep_alive,
|
||||
ssl_certfile=uvicorn_settings.ssl_certfile,
|
||||
ssl_keyfile=uvicorn_settings.ssl_keyfile,
|
||||
ssl_keyfile_password=uvicorn_settings.ssl_keyfile_password,
|
||||
)
|
||||
|
||||
|
||||
@app.command()
|
||||
def dev(
|
||||
*,
|
||||
# uvicorn options
|
||||
host: Annotated[
|
||||
str,
|
||||
typer.Option(
|
||||
help=(
|
||||
"The host to serve on. For local development in localhost "
|
||||
"use [blue]127.0.0.1[/blue]. To enable public access, "
|
||||
"e.g. in a container, use all the IP addresses "
|
||||
"available with [blue]0.0.0.0[/blue]."
|
||||
)
|
||||
),
|
||||
] = "127.0.0.1",
|
||||
port: Annotated[
|
||||
int,
|
||||
typer.Option(help="The port to serve on."),
|
||||
] = uvicorn_settings.port,
|
||||
reload: Annotated[
|
||||
bool,
|
||||
typer.Option(
|
||||
help=(
|
||||
"Enable auto-reload of the server when (code) files change. "
|
||||
"This is [bold]resource intensive[/bold], "
|
||||
"use it only during development."
|
||||
)
|
||||
),
|
||||
] = True,
|
||||
root_path: Annotated[
|
||||
str,
|
||||
typer.Option(
|
||||
help=(
|
||||
"The root path is used to tell your app that it is being served "
|
||||
"to the outside world with some [bold]path prefix[/bold] "
|
||||
"set up in some termination proxy or similar."
|
||||
)
|
||||
),
|
||||
] = uvicorn_settings.root_path,
|
||||
proxy_headers: Annotated[
|
||||
bool,
|
||||
typer.Option(
|
||||
help=(
|
||||
"Enable/Disable X-Forwarded-Proto, X-Forwarded-For, "
|
||||
"X-Forwarded-Port to populate remote address info."
|
||||
)
|
||||
),
|
||||
] = uvicorn_settings.proxy_headers,
|
||||
timeout_keep_alive: Annotated[
|
||||
int, typer.Option(help="Timeout for the server response.")
|
||||
] = uvicorn_settings.timeout_keep_alive,
|
||||
ssl_certfile: Annotated[
|
||||
Optional[Path], typer.Option(help="SSL certificate file")
|
||||
] = uvicorn_settings.ssl_certfile,
|
||||
ssl_keyfile: Annotated[
|
||||
Optional[Path], typer.Option(help="SSL key file")
|
||||
] = uvicorn_settings.ssl_keyfile,
|
||||
ssl_keyfile_password: Annotated[
|
||||
Optional[str], typer.Option(help="SSL keyfile password")
|
||||
] = uvicorn_settings.ssl_keyfile_password,
|
||||
# docling options
|
||||
artifacts_path: Annotated[
|
||||
Optional[Path],
|
||||
typer.Option(
|
||||
help=(
|
||||
"If set to a valid directory, "
|
||||
"the model weights will be loaded from this path."
|
||||
)
|
||||
),
|
||||
] = docling_serve_settings.artifacts_path,
|
||||
enable_ui: Annotated[bool, typer.Option(help="Enable the development UI.")] = True,
|
||||
) -> Any:
|
||||
"""
|
||||
Run a [bold]Docling Serve[/bold] app in [yellow]development[/yellow] mode. 🧪
|
||||
|
||||
This is equivalent to [bold]docling-serve run[/bold] but with [bold]reload[/bold]
|
||||
enabled and listening on the [blue]127.0.0.1[/blue] address.
|
||||
|
||||
Options can be set also with the corresponding ENV variable, with the exception
|
||||
of --enable-ui, --host and --reload.
|
||||
"""
|
||||
|
||||
uvicorn_settings.host = host
|
||||
uvicorn_settings.port = port
|
||||
uvicorn_settings.reload = reload
|
||||
uvicorn_settings.root_path = root_path
|
||||
uvicorn_settings.proxy_headers = proxy_headers
|
||||
uvicorn_settings.timeout_keep_alive = timeout_keep_alive
|
||||
uvicorn_settings.ssl_certfile = ssl_certfile
|
||||
uvicorn_settings.ssl_keyfile = ssl_keyfile
|
||||
uvicorn_settings.ssl_keyfile_password = ssl_keyfile_password
|
||||
|
||||
_run(
|
||||
command="dev",
|
||||
artifacts_path=artifacts_path,
|
||||
enable_ui=enable_ui,
|
||||
)
|
||||
|
||||
|
||||
@app.command()
|
||||
def run(
|
||||
*,
|
||||
host: Annotated[
|
||||
str,
|
||||
typer.Option(
|
||||
help=(
|
||||
"The host to serve on. For local development in localhost "
|
||||
"use [blue]127.0.0.1[/blue]. To enable public access, "
|
||||
"e.g. in a container, use all the IP addresses "
|
||||
"available with [blue]0.0.0.0[/blue]."
|
||||
)
|
||||
),
|
||||
] = uvicorn_settings.host,
|
||||
port: Annotated[
|
||||
int,
|
||||
typer.Option(help="The port to serve on."),
|
||||
] = uvicorn_settings.port,
|
||||
reload: Annotated[
|
||||
bool,
|
||||
typer.Option(
|
||||
help=(
|
||||
"Enable auto-reload of the server when (code) files change. "
|
||||
"This is [bold]resource intensive[/bold], "
|
||||
"use it only during development."
|
||||
)
|
||||
),
|
||||
] = uvicorn_settings.reload,
|
||||
workers: Annotated[
|
||||
Union[int, None],
|
||||
typer.Option(
|
||||
help=(
|
||||
"Use multiple worker processes. "
|
||||
"Mutually exclusive with the --reload flag."
|
||||
)
|
||||
),
|
||||
] = uvicorn_settings.workers,
|
||||
root_path: Annotated[
|
||||
str,
|
||||
typer.Option(
|
||||
help=(
|
||||
"The root path is used to tell your app that it is being served "
|
||||
"to the outside world with some [bold]path prefix[/bold] "
|
||||
"set up in some termination proxy or similar."
|
||||
)
|
||||
),
|
||||
] = uvicorn_settings.root_path,
|
||||
proxy_headers: Annotated[
|
||||
bool,
|
||||
typer.Option(
|
||||
help=(
|
||||
"Enable/Disable X-Forwarded-Proto, X-Forwarded-For, "
|
||||
"X-Forwarded-Port to populate remote address info."
|
||||
)
|
||||
),
|
||||
] = uvicorn_settings.proxy_headers,
|
||||
timeout_keep_alive: Annotated[
|
||||
int, typer.Option(help="Timeout for the server response.")
|
||||
] = uvicorn_settings.timeout_keep_alive,
|
||||
ssl_certfile: Annotated[
|
||||
Optional[Path], typer.Option(help="SSL certificate file")
|
||||
] = uvicorn_settings.ssl_certfile,
|
||||
ssl_keyfile: Annotated[
|
||||
Optional[Path], typer.Option(help="SSL key file")
|
||||
] = uvicorn_settings.ssl_keyfile,
|
||||
ssl_keyfile_password: Annotated[
|
||||
Optional[str], typer.Option(help="SSL keyfile password")
|
||||
] = uvicorn_settings.ssl_keyfile_password,
|
||||
# docling options
|
||||
artifacts_path: Annotated[
|
||||
Optional[Path],
|
||||
typer.Option(
|
||||
help=(
|
||||
"If set to a valid directory, "
|
||||
"the model weights will be loaded from this path."
|
||||
)
|
||||
),
|
||||
] = docling_serve_settings.artifacts_path,
|
||||
enable_ui: Annotated[
|
||||
bool, typer.Option(help="Enable the development UI.")
|
||||
] = docling_serve_settings.enable_ui,
|
||||
) -> Any:
|
||||
"""
|
||||
Run a [bold]Docling Serve[/bold] app in [green]production[/green] mode. 🚀
|
||||
|
||||
This is equivalent to [bold]docling-serve dev[/bold] but with [bold]reload[/bold]
|
||||
disabled and listening on the [blue]0.0.0.0[/blue] address.
|
||||
|
||||
Options can be set also with the corresponding ENV variable, e.g. UVICORN_PORT
|
||||
or DOCLING_SERVE_ENABLE_UI.
|
||||
"""
|
||||
|
||||
uvicorn_settings.host = host
|
||||
uvicorn_settings.port = port
|
||||
uvicorn_settings.reload = reload
|
||||
uvicorn_settings.workers = workers
|
||||
uvicorn_settings.root_path = root_path
|
||||
uvicorn_settings.proxy_headers = proxy_headers
|
||||
uvicorn_settings.timeout_keep_alive = timeout_keep_alive
|
||||
uvicorn_settings.ssl_certfile = ssl_certfile
|
||||
uvicorn_settings.ssl_keyfile = ssl_keyfile
|
||||
uvicorn_settings.ssl_keyfile_password = ssl_keyfile_password
|
||||
|
||||
_run(
|
||||
command="run",
|
||||
artifacts_path=artifacts_path,
|
||||
enable_ui=enable_ui,
|
||||
)
|
||||
|
||||
|
||||
@app.command()
|
||||
def rq_worker() -> Any:
|
||||
"""
|
||||
Run the [bold]Docling JobKit[/bold] RQ worker.
|
||||
"""
|
||||
from docling_jobkit.convert.manager import DoclingConverterManagerConfig
|
||||
from docling_jobkit.orchestrators.rq.orchestrator import RQOrchestratorConfig
|
||||
from docling_jobkit.orchestrators.rq.worker import run_worker
|
||||
|
||||
rq_config = RQOrchestratorConfig(
|
||||
redis_url=docling_serve_settings.eng_rq_redis_url,
|
||||
results_prefix=docling_serve_settings.eng_rq_results_prefix,
|
||||
sub_channel=docling_serve_settings.eng_rq_sub_channel,
|
||||
scratch_dir=get_scratch(),
|
||||
)
|
||||
|
||||
cm_config = DoclingConverterManagerConfig(
|
||||
artifacts_path=docling_serve_settings.artifacts_path,
|
||||
options_cache_size=docling_serve_settings.options_cache_size,
|
||||
enable_remote_services=docling_serve_settings.enable_remote_services,
|
||||
allow_external_plugins=docling_serve_settings.allow_external_plugins,
|
||||
max_num_pages=docling_serve_settings.max_num_pages,
|
||||
max_file_size=docling_serve_settings.max_file_size,
|
||||
)
|
||||
|
||||
run_worker(
|
||||
rq_config=rq_config,
|
||||
cm_config=cm_config,
|
||||
)
|
||||
|
||||
|
||||
def main() -> None:
|
||||
app()
|
||||
|
||||
|
||||
# Launch the CLI when calling python -m docling_serve
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
1193
docling_serve/app.py
1193
docling_serve/app.py
File diff suppressed because it is too large
Load Diff
56
docling_serve/auth.py
Normal file
56
docling_serve/auth.py
Normal file
@@ -0,0 +1,56 @@
|
||||
from typing import Any
|
||||
|
||||
from fastapi import HTTPException, Request, status
|
||||
from fastapi.security import APIKeyHeader
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
||||
class AuthenticationResult(BaseModel):
|
||||
valid: bool
|
||||
errors: list[str] = []
|
||||
detail: Any | None = None
|
||||
|
||||
|
||||
class APIKeyAuth(APIKeyHeader):
|
||||
"""
|
||||
FastAPI dependency which evaluates a status API Key.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
api_key: str,
|
||||
header_name: str = "X-Api-Key",
|
||||
fail_on_unauthorized: bool = True,
|
||||
) -> None:
|
||||
self.api_key = api_key
|
||||
self.header_name = header_name
|
||||
super().__init__(name=self.header_name, auto_error=False)
|
||||
|
||||
async def _validate_api_key(self, header_api_key: str | None):
|
||||
if header_api_key is None:
|
||||
return AuthenticationResult(
|
||||
valid=False, errors=[f"Missing header {self.header_name}."]
|
||||
)
|
||||
|
||||
header_api_key = header_api_key.strip()
|
||||
|
||||
# Otherwise check the apikey
|
||||
if header_api_key == self.api_key or self.api_key == "":
|
||||
return AuthenticationResult(
|
||||
valid=True,
|
||||
detail=header_api_key,
|
||||
)
|
||||
else:
|
||||
return AuthenticationResult(
|
||||
valid=False,
|
||||
errors=["The provided API Key is invalid."],
|
||||
)
|
||||
|
||||
async def __call__(self, request: Request) -> AuthenticationResult: # type: ignore
|
||||
header_api_key = await super().__call__(request=request)
|
||||
result = await self._validate_api_key(header_api_key)
|
||||
if self.api_key and not result.valid:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED, detail=result.detail
|
||||
)
|
||||
return result
|
||||
0
docling_serve/datamodel/__init__.py
Normal file
0
docling_serve/datamodel/__init__.py
Normal file
40
docling_serve/datamodel/convert.py
Normal file
40
docling_serve/datamodel/convert.py
Normal file
@@ -0,0 +1,40 @@
|
||||
# Define the input options for the API
|
||||
from typing import Annotated
|
||||
|
||||
from pydantic import Field
|
||||
|
||||
from docling.datamodel.pipeline_options import (
|
||||
EasyOcrOptions,
|
||||
)
|
||||
from docling.models.factories import get_ocr_factory
|
||||
from docling_jobkit.datamodel.convert import ConvertDocumentsOptions
|
||||
|
||||
from docling_serve.settings import docling_serve_settings
|
||||
|
||||
ocr_factory = get_ocr_factory(
|
||||
allow_external_plugins=docling_serve_settings.allow_external_plugins
|
||||
)
|
||||
ocr_engines_enum = ocr_factory.get_enum()
|
||||
|
||||
|
||||
class ConvertDocumentsRequestOptions(ConvertDocumentsOptions):
|
||||
ocr_engine: Annotated[ # type: ignore
|
||||
ocr_engines_enum,
|
||||
Field(
|
||||
description=(
|
||||
"The OCR engine to use. String. "
|
||||
f"Allowed values: {', '.join([v.value for v in ocr_engines_enum])}. "
|
||||
"Optional, defaults to easyocr."
|
||||
),
|
||||
examples=[EasyOcrOptions.kind],
|
||||
),
|
||||
] = ocr_engines_enum(EasyOcrOptions.kind) # type: ignore
|
||||
|
||||
document_timeout: Annotated[
|
||||
float,
|
||||
Field(
|
||||
description="The timeout for processing each document, in seconds.",
|
||||
gt=0,
|
||||
le=docling_serve_settings.max_document_timeout,
|
||||
),
|
||||
] = docling_serve_settings.max_document_timeout
|
||||
125
docling_serve/datamodel/requests.py
Normal file
125
docling_serve/datamodel/requests.py
Normal file
@@ -0,0 +1,125 @@
|
||||
import enum
|
||||
from functools import cache
|
||||
from typing import Annotated, Generic, Literal
|
||||
|
||||
from pydantic import BaseModel, Field, model_validator
|
||||
from pydantic_core import PydanticCustomError
|
||||
from typing_extensions import Self, TypeVar
|
||||
|
||||
from docling_jobkit.datamodel.chunking import (
|
||||
BaseChunkerOptions,
|
||||
)
|
||||
from docling_jobkit.datamodel.http_inputs import FileSource, HttpSource
|
||||
from docling_jobkit.datamodel.s3_coords import S3Coordinates
|
||||
from docling_jobkit.datamodel.task_targets import (
|
||||
InBodyTarget,
|
||||
S3Target,
|
||||
TaskTarget,
|
||||
ZipTarget,
|
||||
)
|
||||
|
||||
from docling_serve.datamodel.convert import ConvertDocumentsRequestOptions
|
||||
from docling_serve.settings import AsyncEngine, docling_serve_settings
|
||||
|
||||
## Sources
|
||||
|
||||
|
||||
class FileSourceRequest(FileSource):
|
||||
kind: Literal["file"] = "file"
|
||||
|
||||
|
||||
class HttpSourceRequest(HttpSource):
|
||||
kind: Literal["http"] = "http"
|
||||
|
||||
|
||||
class S3SourceRequest(S3Coordinates):
|
||||
kind: Literal["s3"] = "s3"
|
||||
|
||||
|
||||
## Multipart targets
|
||||
class TargetName(str, enum.Enum):
|
||||
INBODY = InBodyTarget().kind
|
||||
ZIP = ZipTarget().kind
|
||||
|
||||
|
||||
## Aliases
|
||||
SourceRequestItem = Annotated[
|
||||
FileSourceRequest | HttpSourceRequest | S3SourceRequest, Field(discriminator="kind")
|
||||
]
|
||||
|
||||
|
||||
## Complete Source request
|
||||
class ConvertDocumentsRequest(BaseModel):
|
||||
options: ConvertDocumentsRequestOptions = ConvertDocumentsRequestOptions()
|
||||
sources: list[SourceRequestItem]
|
||||
target: TaskTarget = InBodyTarget()
|
||||
|
||||
@model_validator(mode="after")
|
||||
def validate_s3_source_and_target(self) -> Self:
|
||||
for source in self.sources:
|
||||
if isinstance(source, S3SourceRequest):
|
||||
if docling_serve_settings.eng_kind != AsyncEngine.KFP:
|
||||
raise PydanticCustomError(
|
||||
"error source", 'source kind "s3" requires engine kind "KFP"'
|
||||
)
|
||||
if self.target.kind != "s3":
|
||||
raise PydanticCustomError(
|
||||
"error source", 'source kind "s3" requires target kind "s3"'
|
||||
)
|
||||
if isinstance(self.target, S3Target):
|
||||
for source in self.sources:
|
||||
if isinstance(source, S3SourceRequest):
|
||||
return self
|
||||
raise PydanticCustomError(
|
||||
"error target", 'target kind "s3" requires source kind "s3"'
|
||||
)
|
||||
return self
|
||||
|
||||
|
||||
## Source chunking requests
|
||||
|
||||
|
||||
class BaseChunkDocumentsRequest(BaseModel):
|
||||
convert_options: Annotated[
|
||||
ConvertDocumentsRequestOptions, Field(description="Conversion options.")
|
||||
] = ConvertDocumentsRequestOptions()
|
||||
sources: Annotated[
|
||||
list[SourceRequestItem],
|
||||
Field(description="List of input document sources to process."),
|
||||
]
|
||||
include_converted_doc: Annotated[
|
||||
bool,
|
||||
Field(
|
||||
description="If true, the output will include both the chunks and the converted document."
|
||||
),
|
||||
] = False
|
||||
target: Annotated[
|
||||
TaskTarget, Field(description="Specification for the type of output target.")
|
||||
] = InBodyTarget()
|
||||
|
||||
|
||||
ChunkingOptT = TypeVar("ChunkingOptT", bound=BaseChunkerOptions)
|
||||
|
||||
|
||||
class GenericChunkDocumentsRequest(BaseChunkDocumentsRequest, Generic[ChunkingOptT]):
|
||||
chunking_options: ChunkingOptT
|
||||
|
||||
|
||||
@cache
|
||||
def make_request_model(
|
||||
opt_type: type[ChunkingOptT],
|
||||
) -> type[GenericChunkDocumentsRequest[ChunkingOptT]]:
|
||||
"""
|
||||
Dynamically create (and cache) a subclass of GenericChunkDocumentsRequest[opt_type]
|
||||
with chunking_options having a default factory.
|
||||
"""
|
||||
return type(
|
||||
f"{opt_type.__name__}DocumentsRequest",
|
||||
(GenericChunkDocumentsRequest[opt_type],), # type: ignore[valid-type]
|
||||
{
|
||||
"__annotations__": {"chunking_options": opt_type},
|
||||
"chunking_options": Field(
|
||||
default_factory=opt_type, description="Options specific to the chunker."
|
||||
),
|
||||
},
|
||||
)
|
||||
67
docling_serve/datamodel/responses.py
Normal file
67
docling_serve/datamodel/responses.py
Normal file
@@ -0,0 +1,67 @@
|
||||
import enum
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
from docling.datamodel.document import ConversionStatus, ErrorItem
|
||||
from docling.utils.profiling import ProfilingItem
|
||||
from docling_jobkit.datamodel.result import (
|
||||
ChunkedDocumentResultItem,
|
||||
ExportDocumentResponse,
|
||||
ExportResult,
|
||||
)
|
||||
from docling_jobkit.datamodel.task_meta import TaskProcessingMeta, TaskType
|
||||
|
||||
|
||||
# Status
|
||||
class HealthCheckResponse(BaseModel):
|
||||
status: str = "ok"
|
||||
|
||||
|
||||
class ClearResponse(BaseModel):
|
||||
status: str = "ok"
|
||||
|
||||
|
||||
class ConvertDocumentResponse(BaseModel):
|
||||
document: ExportDocumentResponse
|
||||
status: ConversionStatus
|
||||
errors: list[ErrorItem] = []
|
||||
processing_time: float
|
||||
timings: dict[str, ProfilingItem] = {}
|
||||
|
||||
|
||||
class PresignedUrlConvertDocumentResponse(BaseModel):
|
||||
processing_time: float
|
||||
num_converted: int
|
||||
num_succeeded: int
|
||||
num_failed: int
|
||||
|
||||
|
||||
class ConvertDocumentErrorResponse(BaseModel):
|
||||
status: ConversionStatus
|
||||
|
||||
|
||||
class ChunkDocumentResponse(BaseModel):
|
||||
chunks: list[ChunkedDocumentResultItem]
|
||||
documents: list[ExportResult]
|
||||
processing_time: float
|
||||
|
||||
|
||||
class TaskStatusResponse(BaseModel):
|
||||
task_id: str
|
||||
task_type: TaskType
|
||||
task_status: str
|
||||
task_position: Optional[int] = None
|
||||
task_meta: Optional[TaskProcessingMeta] = None
|
||||
|
||||
|
||||
class MessageKind(str, enum.Enum):
|
||||
CONNECTION = "connection"
|
||||
UPDATE = "update"
|
||||
ERROR = "error"
|
||||
|
||||
|
||||
class WebsocketMessage(BaseModel):
|
||||
message: MessageKind
|
||||
task: Optional[TaskStatusResponse] = None
|
||||
error: Optional[str] = None
|
||||
906
docling_serve/gradio_ui.py
Normal file
906
docling_serve/gradio_ui.py
Normal file
@@ -0,0 +1,906 @@
|
||||
import base64
|
||||
import importlib
|
||||
import itertools
|
||||
import json
|
||||
import logging
|
||||
import ssl
|
||||
import tempfile
|
||||
import time
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
import certifi
|
||||
import gradio as gr
|
||||
import httpx
|
||||
|
||||
from docling.datamodel.base_models import FormatToExtensions
|
||||
from docling.datamodel.pipeline_options import (
|
||||
PdfBackend,
|
||||
ProcessingPipeline,
|
||||
TableFormerMode,
|
||||
TableStructureOptions,
|
||||
)
|
||||
|
||||
from docling_serve.helper_functions import _to_list_of_strings
|
||||
from docling_serve.settings import docling_serve_settings, uvicorn_settings
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
############################
|
||||
# Path of static artifacts #
|
||||
############################
|
||||
|
||||
logo_path = "https://raw.githubusercontent.com/docling-project/docling/refs/heads/main/docs/assets/logo.svg"
|
||||
js_components_url = "https://unpkg.com/@docling/docling-components@0.0.7"
|
||||
if (
|
||||
docling_serve_settings.static_path is not None
|
||||
and docling_serve_settings.static_path.is_dir()
|
||||
):
|
||||
logo_path = str(docling_serve_settings.static_path / "logo.svg")
|
||||
js_components_url = "/static/docling-components.js"
|
||||
|
||||
|
||||
##############################
|
||||
# Head JS for web components #
|
||||
##############################
|
||||
head = f"""
|
||||
<script src="{js_components_url}" type="module"></script>
|
||||
"""
|
||||
|
||||
#################
|
||||
# CSS and theme #
|
||||
#################
|
||||
|
||||
css = """
|
||||
#logo {
|
||||
border-style: none;
|
||||
background: none;
|
||||
box-shadow: none;
|
||||
min-width: 80px;
|
||||
}
|
||||
#dark_mode_column {
|
||||
display: flex;
|
||||
align-content: flex-end;
|
||||
}
|
||||
#title {
|
||||
text-align: left;
|
||||
display:block;
|
||||
height: auto;
|
||||
padding-top: 5px;
|
||||
line-height: 0;
|
||||
}
|
||||
.title-text h1 > p, .title-text p {
|
||||
margin-top: 0px !important;
|
||||
margin-bottom: 2px !important;
|
||||
}
|
||||
#custom-container {
|
||||
border: 0.909091px solid;
|
||||
padding: 10px;
|
||||
border-radius: 4px;
|
||||
}
|
||||
#custom-container h4 {
|
||||
font-size: 14px;
|
||||
}
|
||||
#file_input_zone {
|
||||
height: 140px;
|
||||
}
|
||||
|
||||
docling-img {
|
||||
gap: 1rem;
|
||||
}
|
||||
|
||||
docling-img::part(page) {
|
||||
box-shadow: 0 0.5rem 1rem 0 rgba(0, 0, 0, 0.2);
|
||||
}
|
||||
"""
|
||||
|
||||
theme = gr.themes.Default(
|
||||
text_size="md",
|
||||
spacing_size="md",
|
||||
font=[
|
||||
gr.themes.GoogleFont("Red Hat Display"),
|
||||
"ui-sans-serif",
|
||||
"system-ui",
|
||||
"sans-serif",
|
||||
],
|
||||
font_mono=[
|
||||
gr.themes.GoogleFont("Red Hat Mono"),
|
||||
"ui-monospace",
|
||||
"Consolas",
|
||||
"monospace",
|
||||
],
|
||||
)
|
||||
|
||||
#############
|
||||
# Variables #
|
||||
#############
|
||||
|
||||
gradio_output_dir = None # Will be set by FastAPI when mounted
|
||||
file_output_path = None # Will be set when a new file is generated
|
||||
|
||||
#############
|
||||
# Functions #
|
||||
#############
|
||||
|
||||
|
||||
def get_api_endpoint() -> str:
|
||||
protocol = "http"
|
||||
if uvicorn_settings.ssl_keyfile is not None:
|
||||
protocol = "https"
|
||||
return f"{protocol}://{docling_serve_settings.api_host}:{uvicorn_settings.port}"
|
||||
|
||||
|
||||
def get_ssl_context() -> ssl.SSLContext:
|
||||
ctx = ssl.create_default_context(cafile=certifi.where())
|
||||
kube_sa_ca_cert_path = Path(
|
||||
"/run/secrets/kubernetes.io/serviceaccount/service-ca.crt"
|
||||
)
|
||||
if (
|
||||
uvicorn_settings.ssl_keyfile is not None
|
||||
and ".svc." in docling_serve_settings.api_host
|
||||
and kube_sa_ca_cert_path.exists()
|
||||
):
|
||||
ctx.load_verify_locations(cafile=kube_sa_ca_cert_path)
|
||||
return ctx
|
||||
|
||||
|
||||
def health_check():
|
||||
response = httpx.get(f"{get_api_endpoint()}/health")
|
||||
if response.status_code == 200:
|
||||
return "Healthy"
|
||||
return "Unhealthy"
|
||||
|
||||
|
||||
def set_options_visibility(x):
|
||||
return gr.Accordion("Options", open=x)
|
||||
|
||||
|
||||
def set_outputs_visibility_direct(x, y):
|
||||
content = gr.Row(visible=x)
|
||||
file = gr.Row(visible=y)
|
||||
return content, file
|
||||
|
||||
|
||||
def set_task_id_visibility(x):
|
||||
task_id_row = gr.Row(visible=x)
|
||||
return task_id_row
|
||||
|
||||
|
||||
def set_outputs_visibility_process(x):
|
||||
content = gr.Row(visible=not x)
|
||||
file = gr.Row(visible=x)
|
||||
return content, file
|
||||
|
||||
|
||||
def set_download_button_label(label_text: gr.State):
|
||||
return gr.DownloadButton(label=str(label_text), scale=1)
|
||||
|
||||
|
||||
def clear_outputs():
|
||||
task_id_rendered = ""
|
||||
markdown_content = ""
|
||||
json_content = ""
|
||||
json_rendered_content = ""
|
||||
html_content = ""
|
||||
text_content = ""
|
||||
doctags_content = ""
|
||||
|
||||
return (
|
||||
task_id_rendered,
|
||||
markdown_content,
|
||||
markdown_content,
|
||||
json_content,
|
||||
json_rendered_content,
|
||||
html_content,
|
||||
html_content,
|
||||
text_content,
|
||||
doctags_content,
|
||||
)
|
||||
|
||||
|
||||
def clear_url_input():
|
||||
return ""
|
||||
|
||||
|
||||
def clear_file_input():
|
||||
return None
|
||||
|
||||
|
||||
def auto_set_return_as_file(
|
||||
url_input_value: str,
|
||||
file_input_value: Optional[list[str]],
|
||||
image_export_mode_value: str,
|
||||
):
|
||||
# If more than one input source is provided, return as file
|
||||
if (
|
||||
(len(url_input_value.split(",")) > 1)
|
||||
or (file_input_value and len(file_input_value) > 1)
|
||||
or (image_export_mode_value == "referenced")
|
||||
):
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
def change_ocr_lang(ocr_engine):
|
||||
if ocr_engine == "easyocr":
|
||||
return "en,fr,de,es"
|
||||
elif ocr_engine == "tesseract_cli":
|
||||
return "eng,fra,deu,spa"
|
||||
elif ocr_engine == "tesseract":
|
||||
return "eng,fra,deu,spa"
|
||||
elif ocr_engine == "rapidocr":
|
||||
return "english,chinese"
|
||||
|
||||
|
||||
def wait_task_finish(auth: str, task_id: str, return_as_file: bool):
|
||||
conversion_sucess = False
|
||||
task_finished = False
|
||||
task_status = ""
|
||||
|
||||
headers = {}
|
||||
if docling_serve_settings.api_key:
|
||||
headers["X-Api-Key"] = str(auth)
|
||||
|
||||
ssl_ctx = get_ssl_context()
|
||||
while not task_finished:
|
||||
try:
|
||||
response = httpx.get(
|
||||
f"{get_api_endpoint()}/v1/status/poll/{task_id}?wait=5",
|
||||
headers=headers,
|
||||
verify=ssl_ctx,
|
||||
timeout=15,
|
||||
)
|
||||
task_status = response.json()["task_status"]
|
||||
if task_status == "success":
|
||||
conversion_sucess = True
|
||||
task_finished = True
|
||||
|
||||
if task_status in ("failure", "revoked"):
|
||||
conversion_sucess = False
|
||||
task_finished = True
|
||||
raise RuntimeError(f"Task failed with status {task_status!r}")
|
||||
time.sleep(5)
|
||||
except Exception as e:
|
||||
logger.error(f"Error processing file(s): {e}")
|
||||
conversion_sucess = False
|
||||
task_finished = True
|
||||
raise gr.Error(f"Error processing file(s): {e}", print_exception=False)
|
||||
|
||||
if conversion_sucess:
|
||||
try:
|
||||
response = httpx.get(
|
||||
f"{get_api_endpoint()}/v1/result/{task_id}",
|
||||
headers=headers,
|
||||
timeout=15,
|
||||
verify=ssl_ctx,
|
||||
)
|
||||
output = response_to_output(response, return_as_file)
|
||||
return output
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting task result: {e}")
|
||||
|
||||
raise gr.Error(
|
||||
f"Error getting task result, conversion finished with status: {task_status}"
|
||||
)
|
||||
|
||||
|
||||
def process_url(
|
||||
auth,
|
||||
input_sources,
|
||||
to_formats,
|
||||
image_export_mode,
|
||||
pipeline,
|
||||
ocr,
|
||||
force_ocr,
|
||||
ocr_engine,
|
||||
ocr_lang,
|
||||
pdf_backend,
|
||||
table_mode,
|
||||
abort_on_error,
|
||||
return_as_file,
|
||||
do_code_enrichment,
|
||||
do_formula_enrichment,
|
||||
do_picture_classification,
|
||||
do_picture_description,
|
||||
):
|
||||
target = {"kind": "zip" if return_as_file else "inbody"}
|
||||
parameters = {
|
||||
"sources": [
|
||||
{"kind": "http", "url": source} for source in input_sources.split(",")
|
||||
],
|
||||
"options": {
|
||||
"to_formats": to_formats,
|
||||
"image_export_mode": image_export_mode,
|
||||
"pipeline": pipeline,
|
||||
"ocr": ocr,
|
||||
"force_ocr": force_ocr,
|
||||
"ocr_engine": ocr_engine,
|
||||
"ocr_lang": _to_list_of_strings(ocr_lang),
|
||||
"pdf_backend": pdf_backend,
|
||||
"table_mode": table_mode,
|
||||
"abort_on_error": abort_on_error,
|
||||
"do_code_enrichment": do_code_enrichment,
|
||||
"do_formula_enrichment": do_formula_enrichment,
|
||||
"do_picture_classification": do_picture_classification,
|
||||
"do_picture_description": do_picture_description,
|
||||
},
|
||||
"target": target,
|
||||
}
|
||||
if (
|
||||
not parameters["sources"]
|
||||
or len(parameters["sources"]) == 0
|
||||
or parameters["sources"][0]["url"] == ""
|
||||
):
|
||||
logger.error("No input sources provided.")
|
||||
raise gr.Error("No input sources provided.", print_exception=False)
|
||||
|
||||
headers = {}
|
||||
if docling_serve_settings.api_key:
|
||||
headers["X-Api-Key"] = str(auth)
|
||||
|
||||
print(f"{headers=}")
|
||||
try:
|
||||
ssl_ctx = get_ssl_context()
|
||||
response = httpx.post(
|
||||
f"{get_api_endpoint()}/v1/convert/source/async",
|
||||
json=parameters,
|
||||
headers=headers,
|
||||
verify=ssl_ctx,
|
||||
timeout=60,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Error processing URL: {e}")
|
||||
raise gr.Error(f"Error processing URL: {e}", print_exception=False)
|
||||
if response.status_code != 200:
|
||||
data = response.json()
|
||||
error_message = data.get("detail", "An unknown error occurred.")
|
||||
logger.error(f"Error processing file: {error_message}")
|
||||
raise gr.Error(f"Error processing file: {error_message}", print_exception=False)
|
||||
|
||||
task_id_rendered = response.json()["task_id"]
|
||||
return task_id_rendered
|
||||
|
||||
|
||||
def file_to_base64(file):
|
||||
with open(file.name, "rb") as f:
|
||||
encoded_string = base64.b64encode(f.read()).decode("utf-8")
|
||||
return encoded_string
|
||||
|
||||
|
||||
def process_file(
|
||||
auth,
|
||||
files,
|
||||
to_formats,
|
||||
image_export_mode,
|
||||
pipeline,
|
||||
ocr,
|
||||
force_ocr,
|
||||
ocr_engine,
|
||||
ocr_lang,
|
||||
pdf_backend,
|
||||
table_mode,
|
||||
abort_on_error,
|
||||
return_as_file,
|
||||
do_code_enrichment,
|
||||
do_formula_enrichment,
|
||||
do_picture_classification,
|
||||
do_picture_description,
|
||||
):
|
||||
if not files or len(files) == 0:
|
||||
logger.error("No files provided.")
|
||||
raise gr.Error("No files provided.", print_exception=False)
|
||||
files_data = [
|
||||
{"kind": "file", "base64_string": file_to_base64(file), "filename": file.name}
|
||||
for file in files
|
||||
]
|
||||
target = {"kind": "zip" if return_as_file else "inbody"}
|
||||
|
||||
parameters = {
|
||||
"sources": files_data,
|
||||
"options": {
|
||||
"to_formats": to_formats,
|
||||
"image_export_mode": image_export_mode,
|
||||
"pipeline": pipeline,
|
||||
"ocr": ocr,
|
||||
"force_ocr": force_ocr,
|
||||
"ocr_engine": ocr_engine,
|
||||
"ocr_lang": _to_list_of_strings(ocr_lang),
|
||||
"pdf_backend": pdf_backend,
|
||||
"table_mode": table_mode,
|
||||
"abort_on_error": abort_on_error,
|
||||
"return_as_file": return_as_file,
|
||||
"do_code_enrichment": do_code_enrichment,
|
||||
"do_formula_enrichment": do_formula_enrichment,
|
||||
"do_picture_classification": do_picture_classification,
|
||||
"do_picture_description": do_picture_description,
|
||||
},
|
||||
"target": target,
|
||||
}
|
||||
|
||||
headers = {}
|
||||
if docling_serve_settings.api_key:
|
||||
headers["X-Api-Key"] = str(auth)
|
||||
|
||||
try:
|
||||
ssl_ctx = get_ssl_context()
|
||||
response = httpx.post(
|
||||
f"{get_api_endpoint()}/v1/convert/source/async",
|
||||
json=parameters,
|
||||
headers=headers,
|
||||
verify=ssl_ctx,
|
||||
timeout=60,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Error processing file(s): {e}")
|
||||
raise gr.Error(f"Error processing file(s): {e}", print_exception=False)
|
||||
if response.status_code != 200:
|
||||
data = response.json()
|
||||
error_message = data.get("detail", "An unknown error occurred.")
|
||||
logger.error(f"Error processing file: {error_message}")
|
||||
raise gr.Error(f"Error processing file: {error_message}", print_exception=False)
|
||||
|
||||
task_id_rendered = response.json()["task_id"]
|
||||
return task_id_rendered
|
||||
|
||||
|
||||
def response_to_output(response, return_as_file):
|
||||
markdown_content = ""
|
||||
json_content = ""
|
||||
json_rendered_content = ""
|
||||
html_content = ""
|
||||
text_content = ""
|
||||
doctags_content = ""
|
||||
download_button = gr.DownloadButton(visible=False, label="Download Output", scale=1)
|
||||
if return_as_file:
|
||||
filename = (
|
||||
response.headers.get("Content-Disposition").split("filename=")[1].strip('"')
|
||||
)
|
||||
tmp_output_dir = Path(tempfile.mkdtemp(dir=gradio_output_dir, prefix="ui_"))
|
||||
file_output_path = f"{tmp_output_dir}/{filename}"
|
||||
# logger.info(f"Saving file to: {file_output_path}")
|
||||
with open(file_output_path, "wb") as f:
|
||||
f.write(response.content)
|
||||
download_button = gr.DownloadButton(
|
||||
visible=True, label=f"Download {filename}", scale=1, value=file_output_path
|
||||
)
|
||||
else:
|
||||
full_content = response.json()
|
||||
markdown_content = full_content.get("document").get("md_content")
|
||||
json_content = json.dumps(
|
||||
full_content.get("document").get("json_content"), indent=2
|
||||
)
|
||||
# Embed document JSON and trigger load at client via an image.
|
||||
json_rendered_content = f"""
|
||||
<docling-img id="dclimg" pagenumbers><docling-tooltip></docling-tooltip></docling-img>
|
||||
<script id="dcljson" type="application/json" onload="document.getElementById('dclimg').src = JSON.parse(document.getElementById('dcljson').textContent);">{json_content}</script>
|
||||
<img src onerror="document.getElementById('dclimg').src = JSON.parse(document.getElementById('dcljson').textContent);" />
|
||||
"""
|
||||
html_content = full_content.get("document").get("html_content")
|
||||
text_content = full_content.get("document").get("text_content")
|
||||
doctags_content = full_content.get("document").get("doctags_content")
|
||||
return (
|
||||
markdown_content,
|
||||
markdown_content,
|
||||
json_content,
|
||||
json_rendered_content,
|
||||
html_content,
|
||||
html_content,
|
||||
text_content,
|
||||
doctags_content,
|
||||
download_button,
|
||||
)
|
||||
|
||||
|
||||
############
|
||||
# UI Setup #
|
||||
############
|
||||
|
||||
with gr.Blocks(
|
||||
head=head,
|
||||
css=css,
|
||||
theme=theme,
|
||||
title="Docling Serve",
|
||||
delete_cache=(3600, 36000), # Delete all files older than 10 hour every hour
|
||||
) as ui:
|
||||
# Constants stored in states to be able to pass them as inputs to functions
|
||||
processing_text = gr.State("Processing your document(s), please wait...")
|
||||
true_bool = gr.State(True)
|
||||
false_bool = gr.State(False)
|
||||
|
||||
# Banner
|
||||
with gr.Row(elem_id="check_health"):
|
||||
# Logo
|
||||
with gr.Column(scale=1, min_width=90):
|
||||
try:
|
||||
gr.Image(
|
||||
logo_path,
|
||||
height=80,
|
||||
width=80,
|
||||
show_download_button=False,
|
||||
show_label=False,
|
||||
show_fullscreen_button=False,
|
||||
container=False,
|
||||
elem_id="logo",
|
||||
scale=0,
|
||||
)
|
||||
except Exception:
|
||||
logger.warning("Logo not found.")
|
||||
|
||||
# Title
|
||||
with gr.Column(scale=1, min_width=200):
|
||||
gr.Markdown(
|
||||
f"# Docling Serve \n(docling version: "
|
||||
f"{importlib.metadata.version('docling')})",
|
||||
elem_id="title",
|
||||
elem_classes=["title-text"],
|
||||
)
|
||||
# Dark mode button
|
||||
with gr.Column(scale=16, elem_id="dark_mode_column"):
|
||||
dark_mode_btn = gr.Button("Dark/Light Mode", scale=0)
|
||||
dark_mode_btn.click(
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
js="""() => {
|
||||
if (document.querySelectorAll('.dark').length) {
|
||||
document.querySelectorAll('.dark').forEach(
|
||||
el => el.classList.remove('dark')
|
||||
);
|
||||
} else {
|
||||
document.querySelector('body').classList.add('dark');
|
||||
}
|
||||
}""",
|
||||
show_api=False,
|
||||
)
|
||||
|
||||
# URL Processing Tab
|
||||
with gr.Tab("Convert URL"):
|
||||
with gr.Row():
|
||||
with gr.Column(scale=4):
|
||||
url_input = gr.Textbox(
|
||||
label="URL Input Source",
|
||||
placeholder="https://arxiv.org/pdf/2501.17887",
|
||||
)
|
||||
with gr.Column(scale=1):
|
||||
url_process_btn = gr.Button("Process URL", scale=1)
|
||||
url_reset_btn = gr.Button("Reset", scale=1)
|
||||
|
||||
# File Processing Tab
|
||||
with gr.Tab("Convert File"):
|
||||
with gr.Row():
|
||||
with gr.Column(scale=4):
|
||||
file_input = gr.File(
|
||||
elem_id="file_input_zone",
|
||||
label="Upload File",
|
||||
file_types=[
|
||||
f".{v}"
|
||||
for v in itertools.chain.from_iterable(
|
||||
FormatToExtensions.values()
|
||||
)
|
||||
],
|
||||
file_count="multiple",
|
||||
scale=4,
|
||||
)
|
||||
with gr.Column(scale=1):
|
||||
file_process_btn = gr.Button("Process File", scale=1)
|
||||
file_reset_btn = gr.Button("Reset", scale=1)
|
||||
|
||||
# Auth
|
||||
with gr.Row(visible=bool(docling_serve_settings.api_key)):
|
||||
with gr.Column():
|
||||
auth = gr.Textbox(
|
||||
label="Authentication",
|
||||
placeholder="API Key",
|
||||
type="password",
|
||||
)
|
||||
|
||||
# Options
|
||||
with gr.Accordion("Options") as options:
|
||||
with gr.Row():
|
||||
with gr.Column(scale=1):
|
||||
to_formats = gr.CheckboxGroup(
|
||||
[
|
||||
("Docling (JSON)", "json"),
|
||||
("Markdown", "md"),
|
||||
("HTML", "html"),
|
||||
("Plain Text", "text"),
|
||||
("Doc Tags", "doctags"),
|
||||
],
|
||||
label="To Formats",
|
||||
value=["json", "md"],
|
||||
)
|
||||
with gr.Column(scale=1):
|
||||
image_export_mode = gr.Radio(
|
||||
[
|
||||
("Embedded", "embedded"),
|
||||
("Placeholder", "placeholder"),
|
||||
("Referenced", "referenced"),
|
||||
],
|
||||
label="Image Export Mode",
|
||||
value="embedded",
|
||||
)
|
||||
|
||||
with gr.Row():
|
||||
with gr.Column(scale=1, min_width=200):
|
||||
pipeline = gr.Radio(
|
||||
[(v.value.capitalize(), v.value) for v in ProcessingPipeline],
|
||||
label="Pipeline type",
|
||||
value=ProcessingPipeline.STANDARD.value,
|
||||
)
|
||||
with gr.Row():
|
||||
with gr.Column(scale=1, min_width=200):
|
||||
ocr = gr.Checkbox(label="Enable OCR", value=True)
|
||||
force_ocr = gr.Checkbox(label="Force OCR", value=False)
|
||||
with gr.Column(scale=1):
|
||||
ocr_engine = gr.Radio(
|
||||
[
|
||||
("EasyOCR", "easyocr"),
|
||||
("Tesseract", "tesseract"),
|
||||
("RapidOCR", "rapidocr"),
|
||||
],
|
||||
label="OCR Engine",
|
||||
value="easyocr",
|
||||
)
|
||||
with gr.Column(scale=1, min_width=200):
|
||||
ocr_lang = gr.Textbox(
|
||||
label="OCR Language (beware of the format)", value="en,fr,de,es"
|
||||
)
|
||||
ocr_engine.change(change_ocr_lang, inputs=[ocr_engine], outputs=[ocr_lang])
|
||||
with gr.Row():
|
||||
with gr.Column(scale=4):
|
||||
pdf_backend = gr.Radio(
|
||||
[v.value for v in PdfBackend],
|
||||
label="PDF Backend",
|
||||
value=PdfBackend.DLPARSE_V4.value,
|
||||
)
|
||||
with gr.Column(scale=2):
|
||||
table_mode = gr.Radio(
|
||||
[(v.value.capitalize(), v.value) for v in TableFormerMode],
|
||||
label="Table Mode",
|
||||
value=TableStructureOptions().mode.value,
|
||||
)
|
||||
with gr.Column(scale=1):
|
||||
abort_on_error = gr.Checkbox(label="Abort on Error", value=False)
|
||||
return_as_file = gr.Checkbox(label="Return as File", value=False)
|
||||
with gr.Row():
|
||||
with gr.Column():
|
||||
do_code_enrichment = gr.Checkbox(
|
||||
label="Enable code enrichment", value=False
|
||||
)
|
||||
do_formula_enrichment = gr.Checkbox(
|
||||
label="Enable formula enrichment", value=False
|
||||
)
|
||||
with gr.Column():
|
||||
do_picture_classification = gr.Checkbox(
|
||||
label="Enable picture classification", value=False
|
||||
)
|
||||
do_picture_description = gr.Checkbox(
|
||||
label="Enable picture description", value=False
|
||||
)
|
||||
|
||||
# Task id output
|
||||
with gr.Row(visible=False) as task_id_output:
|
||||
task_id_rendered = gr.Textbox(label="Task id", interactive=False)
|
||||
|
||||
# Document output
|
||||
with gr.Row(visible=False) as content_output:
|
||||
with gr.Tab("Docling (JSON)"):
|
||||
output_json = gr.Code(language="json", wrap_lines=True, show_label=False)
|
||||
with gr.Tab("Docling-Rendered"):
|
||||
output_json_rendered = gr.HTML(label="Response")
|
||||
with gr.Tab("Markdown"):
|
||||
output_markdown = gr.Code(
|
||||
language="markdown", wrap_lines=True, show_label=False
|
||||
)
|
||||
with gr.Tab("Markdown-Rendered"):
|
||||
output_markdown_rendered = gr.Markdown(label="Response")
|
||||
with gr.Tab("HTML"):
|
||||
output_html = gr.Code(language="html", wrap_lines=True, show_label=False)
|
||||
with gr.Tab("HTML-Rendered"):
|
||||
output_html_rendered = gr.HTML(label="Response")
|
||||
with gr.Tab("Text"):
|
||||
output_text = gr.Code(wrap_lines=True, show_label=False)
|
||||
with gr.Tab("DocTags"):
|
||||
output_doctags = gr.Code(wrap_lines=True, show_label=False)
|
||||
|
||||
# File download output
|
||||
with gr.Row(visible=False) as file_output:
|
||||
download_file_btn = gr.DownloadButton(label="Placeholder", scale=1)
|
||||
|
||||
##############
|
||||
# UI Actions #
|
||||
##############
|
||||
|
||||
# Handle Return as File
|
||||
url_input.change(
|
||||
auto_set_return_as_file,
|
||||
inputs=[url_input, file_input, image_export_mode],
|
||||
outputs=[return_as_file],
|
||||
)
|
||||
file_input.change(
|
||||
auto_set_return_as_file,
|
||||
inputs=[url_input, file_input, image_export_mode],
|
||||
outputs=[return_as_file],
|
||||
)
|
||||
image_export_mode.change(
|
||||
auto_set_return_as_file,
|
||||
inputs=[url_input, file_input, image_export_mode],
|
||||
outputs=[return_as_file],
|
||||
)
|
||||
|
||||
# URL processing
|
||||
url_process_btn.click(
|
||||
set_options_visibility, inputs=[false_bool], outputs=[options]
|
||||
).then(
|
||||
set_download_button_label, inputs=[processing_text], outputs=[download_file_btn]
|
||||
).then(
|
||||
clear_outputs,
|
||||
inputs=None,
|
||||
outputs=[
|
||||
task_id_rendered,
|
||||
output_markdown,
|
||||
output_markdown_rendered,
|
||||
output_json,
|
||||
output_json_rendered,
|
||||
output_html,
|
||||
output_html_rendered,
|
||||
output_text,
|
||||
output_doctags,
|
||||
],
|
||||
).then(
|
||||
set_task_id_visibility,
|
||||
inputs=[true_bool],
|
||||
outputs=[task_id_output],
|
||||
).then(
|
||||
process_url,
|
||||
inputs=[
|
||||
auth,
|
||||
url_input,
|
||||
to_formats,
|
||||
image_export_mode,
|
||||
pipeline,
|
||||
ocr,
|
||||
force_ocr,
|
||||
ocr_engine,
|
||||
ocr_lang,
|
||||
pdf_backend,
|
||||
table_mode,
|
||||
abort_on_error,
|
||||
return_as_file,
|
||||
do_code_enrichment,
|
||||
do_formula_enrichment,
|
||||
do_picture_classification,
|
||||
do_picture_description,
|
||||
],
|
||||
outputs=[
|
||||
task_id_rendered,
|
||||
],
|
||||
).then(
|
||||
set_outputs_visibility_process,
|
||||
inputs=[return_as_file],
|
||||
outputs=[content_output, file_output],
|
||||
).then(
|
||||
wait_task_finish,
|
||||
inputs=[auth, task_id_rendered, return_as_file],
|
||||
outputs=[
|
||||
output_markdown,
|
||||
output_markdown_rendered,
|
||||
output_json,
|
||||
output_json_rendered,
|
||||
output_html,
|
||||
output_html_rendered,
|
||||
output_text,
|
||||
output_doctags,
|
||||
download_file_btn,
|
||||
],
|
||||
)
|
||||
|
||||
url_reset_btn.click(
|
||||
clear_outputs,
|
||||
inputs=None,
|
||||
outputs=[
|
||||
output_markdown,
|
||||
output_markdown_rendered,
|
||||
output_json,
|
||||
output_json_rendered,
|
||||
output_html,
|
||||
output_html_rendered,
|
||||
output_text,
|
||||
output_doctags,
|
||||
],
|
||||
).then(set_options_visibility, inputs=[true_bool], outputs=[options]).then(
|
||||
set_outputs_visibility_direct,
|
||||
inputs=[false_bool, false_bool],
|
||||
outputs=[content_output, file_output],
|
||||
).then(set_task_id_visibility, inputs=[false_bool], outputs=[task_id_output]).then(
|
||||
clear_url_input, inputs=None, outputs=[url_input]
|
||||
)
|
||||
|
||||
# File processing
|
||||
file_process_btn.click(
|
||||
set_options_visibility, inputs=[false_bool], outputs=[options]
|
||||
).then(
|
||||
set_download_button_label, inputs=[processing_text], outputs=[download_file_btn]
|
||||
).then(
|
||||
clear_outputs,
|
||||
inputs=None,
|
||||
outputs=[
|
||||
task_id_rendered,
|
||||
output_markdown,
|
||||
output_markdown_rendered,
|
||||
output_json,
|
||||
output_json_rendered,
|
||||
output_html,
|
||||
output_html_rendered,
|
||||
output_text,
|
||||
output_doctags,
|
||||
],
|
||||
).then(
|
||||
set_task_id_visibility,
|
||||
inputs=[true_bool],
|
||||
outputs=[task_id_output],
|
||||
).then(
|
||||
process_file,
|
||||
inputs=[
|
||||
auth,
|
||||
file_input,
|
||||
to_formats,
|
||||
image_export_mode,
|
||||
pipeline,
|
||||
ocr,
|
||||
force_ocr,
|
||||
ocr_engine,
|
||||
ocr_lang,
|
||||
pdf_backend,
|
||||
table_mode,
|
||||
abort_on_error,
|
||||
return_as_file,
|
||||
do_code_enrichment,
|
||||
do_formula_enrichment,
|
||||
do_picture_classification,
|
||||
do_picture_description,
|
||||
],
|
||||
outputs=[
|
||||
task_id_rendered,
|
||||
],
|
||||
).then(
|
||||
set_outputs_visibility_process,
|
||||
inputs=[return_as_file],
|
||||
outputs=[content_output, file_output],
|
||||
).then(
|
||||
wait_task_finish,
|
||||
inputs=[auth, task_id_rendered, return_as_file],
|
||||
outputs=[
|
||||
output_markdown,
|
||||
output_markdown_rendered,
|
||||
output_json,
|
||||
output_json_rendered,
|
||||
output_html,
|
||||
output_html_rendered,
|
||||
output_text,
|
||||
output_doctags,
|
||||
download_file_btn,
|
||||
],
|
||||
)
|
||||
|
||||
file_reset_btn.click(
|
||||
clear_outputs,
|
||||
inputs=None,
|
||||
outputs=[
|
||||
output_markdown,
|
||||
output_markdown_rendered,
|
||||
output_json,
|
||||
output_json_rendered,
|
||||
output_html,
|
||||
output_html_rendered,
|
||||
output_text,
|
||||
output_doctags,
|
||||
],
|
||||
).then(set_options_visibility, inputs=[true_bool], outputs=[options]).then(
|
||||
set_outputs_visibility_direct,
|
||||
inputs=[false_bool, false_bool],
|
||||
outputs=[content_output, file_output],
|
||||
).then(set_task_id_visibility, inputs=[false_bool], outputs=[task_id_output]).then(
|
||||
clear_file_input, inputs=None, outputs=[file_input]
|
||||
)
|
||||
129
docling_serve/helper_functions.py
Normal file
129
docling_serve/helper_functions.py
Normal file
@@ -0,0 +1,129 @@
|
||||
import inspect
|
||||
import json
|
||||
import re
|
||||
from typing import Union, get_args, get_origin
|
||||
|
||||
from fastapi import Depends, Form
|
||||
from pydantic import BaseModel, TypeAdapter
|
||||
|
||||
|
||||
def is_pydantic_model(type_):
|
||||
try:
|
||||
if inspect.isclass(type_) and issubclass(type_, BaseModel):
|
||||
return True
|
||||
|
||||
origin = get_origin(type_)
|
||||
if origin is Union:
|
||||
args = get_args(type_)
|
||||
return any(
|
||||
inspect.isclass(arg) and issubclass(arg, BaseModel)
|
||||
for arg in args
|
||||
if arg is not type(None)
|
||||
)
|
||||
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return False
|
||||
|
||||
|
||||
# Adapted from
|
||||
# https://github.com/fastapi/fastapi/discussions/8971#discussioncomment-7892972
|
||||
def FormDepends(
|
||||
cls: type[BaseModel], prefix: str = "", excluded_fields: list[str] = []
|
||||
):
|
||||
new_parameters = []
|
||||
|
||||
for field_name, model_field in cls.model_fields.items():
|
||||
if field_name in excluded_fields:
|
||||
continue
|
||||
|
||||
annotation = model_field.annotation
|
||||
description = model_field.description
|
||||
default = (
|
||||
Form(..., description=description, examples=model_field.examples)
|
||||
if model_field.is_required()
|
||||
else Form(
|
||||
model_field.default,
|
||||
examples=model_field.examples,
|
||||
description=description,
|
||||
)
|
||||
)
|
||||
|
||||
# Flatten nested Pydantic models by accepting them as JSON strings
|
||||
if is_pydantic_model(annotation):
|
||||
annotation = str
|
||||
default = Form(
|
||||
None
|
||||
if model_field.default is None
|
||||
else json.dumps(model_field.default.model_dump(mode="json")),
|
||||
description=description,
|
||||
examples=None
|
||||
if not model_field.examples
|
||||
else [
|
||||
json.dumps(ex.model_dump(mode="json"))
|
||||
for ex in model_field.examples
|
||||
],
|
||||
)
|
||||
|
||||
new_parameters.append(
|
||||
inspect.Parameter(
|
||||
name=f"{prefix}{field_name}",
|
||||
kind=inspect.Parameter.POSITIONAL_ONLY,
|
||||
default=default,
|
||||
annotation=annotation,
|
||||
)
|
||||
)
|
||||
|
||||
async def as_form_func(**data):
|
||||
newdata = {}
|
||||
for field_name, model_field in cls.model_fields.items():
|
||||
if field_name in excluded_fields:
|
||||
continue
|
||||
value = data.get(f"{prefix}{field_name}")
|
||||
newdata[field_name] = value
|
||||
annotation = model_field.annotation
|
||||
|
||||
# Parse nested models from JSON string
|
||||
if value is not None and is_pydantic_model(annotation):
|
||||
try:
|
||||
validator = TypeAdapter(annotation)
|
||||
newdata[field_name] = validator.validate_json(value)
|
||||
except Exception as e:
|
||||
raise ValueError(f"Invalid JSON for field '{field_name}': {e}")
|
||||
|
||||
return cls(**newdata)
|
||||
|
||||
sig = inspect.signature(as_form_func)
|
||||
sig = sig.replace(parameters=new_parameters)
|
||||
as_form_func.__signature__ = sig # type: ignore
|
||||
|
||||
return Depends(as_form_func)
|
||||
|
||||
|
||||
def _to_list_of_strings(input_value: Union[str, list[str]]) -> list[str]:
|
||||
def split_and_strip(value: str) -> list[str]:
|
||||
if re.search(r"[;,]", value):
|
||||
return [item.strip() for item in re.split(r"[;,]", value)]
|
||||
else:
|
||||
return [value.strip()]
|
||||
|
||||
if isinstance(input_value, str):
|
||||
return split_and_strip(input_value)
|
||||
elif isinstance(input_value, list):
|
||||
result = []
|
||||
for item in input_value:
|
||||
result.extend(split_and_strip(str(item)))
|
||||
return result
|
||||
else:
|
||||
raise ValueError("Invalid input: must be a string or a list of strings.")
|
||||
|
||||
|
||||
# Helper functions to parse inputs coming as Form objects
|
||||
def _str_to_bool(value: Union[str, bool]) -> bool:
|
||||
if isinstance(value, bool):
|
||||
return value # Already a boolean, return as-is
|
||||
if isinstance(value, str):
|
||||
value = value.strip().lower() # Normalize input
|
||||
return value in ("true", "1", "yes")
|
||||
return False # Default to False if none of the above matches
|
||||
69
docling_serve/orchestrator_factory.py
Normal file
69
docling_serve/orchestrator_factory.py
Normal file
@@ -0,0 +1,69 @@
|
||||
from functools import lru_cache
|
||||
|
||||
from docling_jobkit.orchestrators.base_orchestrator import BaseOrchestrator
|
||||
|
||||
from docling_serve.settings import AsyncEngine, docling_serve_settings
|
||||
from docling_serve.storage import get_scratch
|
||||
|
||||
|
||||
@lru_cache
|
||||
def get_async_orchestrator() -> BaseOrchestrator:
|
||||
if docling_serve_settings.eng_kind == AsyncEngine.LOCAL:
|
||||
from docling_jobkit.convert.manager import (
|
||||
DoclingConverterManager,
|
||||
DoclingConverterManagerConfig,
|
||||
)
|
||||
from docling_jobkit.orchestrators.local.orchestrator import (
|
||||
LocalOrchestrator,
|
||||
LocalOrchestratorConfig,
|
||||
)
|
||||
|
||||
local_config = LocalOrchestratorConfig(
|
||||
num_workers=docling_serve_settings.eng_loc_num_workers,
|
||||
shared_models=docling_serve_settings.eng_loc_share_models,
|
||||
scratch_dir=get_scratch(),
|
||||
)
|
||||
|
||||
cm_config = DoclingConverterManagerConfig(
|
||||
artifacts_path=docling_serve_settings.artifacts_path,
|
||||
options_cache_size=docling_serve_settings.options_cache_size,
|
||||
enable_remote_services=docling_serve_settings.enable_remote_services,
|
||||
allow_external_plugins=docling_serve_settings.allow_external_plugins,
|
||||
max_num_pages=docling_serve_settings.max_num_pages,
|
||||
max_file_size=docling_serve_settings.max_file_size,
|
||||
)
|
||||
cm = DoclingConverterManager(config=cm_config)
|
||||
|
||||
return LocalOrchestrator(config=local_config, converter_manager=cm)
|
||||
elif docling_serve_settings.eng_kind == AsyncEngine.RQ:
|
||||
from docling_jobkit.orchestrators.rq.orchestrator import (
|
||||
RQOrchestrator,
|
||||
RQOrchestratorConfig,
|
||||
)
|
||||
|
||||
rq_config = RQOrchestratorConfig(
|
||||
redis_url=docling_serve_settings.eng_rq_redis_url,
|
||||
results_prefix=docling_serve_settings.eng_rq_results_prefix,
|
||||
sub_channel=docling_serve_settings.eng_rq_sub_channel,
|
||||
scratch_dir=get_scratch(),
|
||||
)
|
||||
|
||||
return RQOrchestrator(config=rq_config)
|
||||
elif docling_serve_settings.eng_kind == AsyncEngine.KFP:
|
||||
from docling_jobkit.orchestrators.kfp.orchestrator import (
|
||||
KfpOrchestrator,
|
||||
KfpOrchestratorConfig,
|
||||
)
|
||||
|
||||
kfp_config = KfpOrchestratorConfig(
|
||||
endpoint=docling_serve_settings.eng_kfp_endpoint,
|
||||
token=docling_serve_settings.eng_kfp_token,
|
||||
ca_cert_path=docling_serve_settings.eng_kfp_ca_cert_path,
|
||||
self_callback_endpoint=docling_serve_settings.eng_kfp_self_callback_endpoint,
|
||||
self_callback_token_path=docling_serve_settings.eng_kfp_self_callback_token_path,
|
||||
self_callback_ca_cert_path=docling_serve_settings.eng_kfp_self_callback_ca_cert_path,
|
||||
)
|
||||
|
||||
return KfpOrchestrator(config=kfp_config)
|
||||
|
||||
raise RuntimeError(f"Engine {docling_serve_settings.eng_kind} not recognized.")
|
||||
82
docling_serve/response_preparation.py
Normal file
82
docling_serve/response_preparation.py
Normal file
@@ -0,0 +1,82 @@
|
||||
import asyncio
|
||||
import logging
|
||||
|
||||
from fastapi import BackgroundTasks, Response
|
||||
|
||||
from docling_jobkit.datamodel.result import (
|
||||
ChunkedDocumentResult,
|
||||
DoclingTaskResult,
|
||||
ExportResult,
|
||||
RemoteTargetResult,
|
||||
ZipArchiveResult,
|
||||
)
|
||||
from docling_jobkit.orchestrators.base_orchestrator import (
|
||||
BaseOrchestrator,
|
||||
)
|
||||
|
||||
from docling_serve.datamodel.responses import (
|
||||
ChunkDocumentResponse,
|
||||
ConvertDocumentResponse,
|
||||
PresignedUrlConvertDocumentResponse,
|
||||
)
|
||||
from docling_serve.settings import docling_serve_settings
|
||||
|
||||
_log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def prepare_response(
|
||||
task_id: str,
|
||||
task_result: DoclingTaskResult,
|
||||
orchestrator: BaseOrchestrator,
|
||||
background_tasks: BackgroundTasks,
|
||||
):
|
||||
response: (
|
||||
Response
|
||||
| ConvertDocumentResponse
|
||||
| PresignedUrlConvertDocumentResponse
|
||||
| ChunkDocumentResponse
|
||||
)
|
||||
if isinstance(task_result.result, ExportResult):
|
||||
response = ConvertDocumentResponse(
|
||||
document=task_result.result.content,
|
||||
status=task_result.result.status,
|
||||
processing_time=task_result.processing_time,
|
||||
timings=task_result.result.timings,
|
||||
errors=task_result.result.errors,
|
||||
)
|
||||
elif isinstance(task_result.result, ZipArchiveResult):
|
||||
response = Response(
|
||||
content=task_result.result.content,
|
||||
media_type="application/zip",
|
||||
headers={
|
||||
"Content-Disposition": 'attachment; filename="converted_docs.zip"'
|
||||
},
|
||||
)
|
||||
elif isinstance(task_result.result, RemoteTargetResult):
|
||||
response = PresignedUrlConvertDocumentResponse(
|
||||
processing_time=task_result.processing_time,
|
||||
num_converted=task_result.num_converted,
|
||||
num_succeeded=task_result.num_succeeded,
|
||||
num_failed=task_result.num_failed,
|
||||
)
|
||||
elif isinstance(task_result.result, ChunkedDocumentResult):
|
||||
response = ChunkDocumentResponse(
|
||||
chunks=task_result.result.chunks,
|
||||
documents=task_result.result.documents,
|
||||
processing_time=task_result.processing_time,
|
||||
)
|
||||
else:
|
||||
raise ValueError("Unknown result type")
|
||||
|
||||
if docling_serve_settings.single_use_results:
|
||||
|
||||
async def _remove_task_impl():
|
||||
await asyncio.sleep(docling_serve_settings.result_removal_delay)
|
||||
await orchestrator.delete_task(task_id=task_id)
|
||||
|
||||
async def _remove_task():
|
||||
asyncio.create_task(_remove_task_impl()) # noqa: RUF006
|
||||
|
||||
background_tasks.add_task(_remove_task)
|
||||
|
||||
return response
|
||||
@@ -1,6 +1,105 @@
|
||||
import enum
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from typing import Optional, Union
|
||||
|
||||
from pydantic import AnyUrl, model_validator
|
||||
from pydantic_settings import BaseSettings, SettingsConfigDict
|
||||
from typing_extensions import Self
|
||||
|
||||
|
||||
class Settings(BaseSettings):
|
||||
class UvicornSettings(BaseSettings):
|
||||
model_config = SettingsConfigDict(
|
||||
env_prefix="UVICORN_", env_file=".env", extra="allow"
|
||||
)
|
||||
|
||||
model_config = SettingsConfigDict(env_prefix="DOCLING_")
|
||||
host: str = "0.0.0.0"
|
||||
port: int = 5001
|
||||
reload: bool = False
|
||||
root_path: str = ""
|
||||
proxy_headers: bool = True
|
||||
timeout_keep_alive: int = 60
|
||||
ssl_certfile: Optional[Path] = None
|
||||
ssl_keyfile: Optional[Path] = None
|
||||
ssl_keyfile_password: Optional[str] = None
|
||||
workers: Union[int, None] = None
|
||||
|
||||
|
||||
class AsyncEngine(str, enum.Enum):
|
||||
LOCAL = "local"
|
||||
KFP = "kfp"
|
||||
RQ = "rq"
|
||||
|
||||
|
||||
class DoclingServeSettings(BaseSettings):
|
||||
model_config = SettingsConfigDict(
|
||||
env_prefix="DOCLING_SERVE_",
|
||||
env_file=".env",
|
||||
env_parse_none_str="",
|
||||
extra="allow",
|
||||
)
|
||||
|
||||
enable_ui: bool = False
|
||||
api_host: str = "localhost"
|
||||
artifacts_path: Optional[Path] = None
|
||||
static_path: Optional[Path] = None
|
||||
scratch_path: Optional[Path] = None
|
||||
single_use_results: bool = True
|
||||
result_removal_delay: float = 300 # 5 minutes
|
||||
load_models_at_boot: bool = True
|
||||
options_cache_size: int = 2
|
||||
enable_remote_services: bool = False
|
||||
allow_external_plugins: bool = False
|
||||
|
||||
api_key: str = ""
|
||||
|
||||
max_document_timeout: float = 3_600 * 24 * 7 # 7 days
|
||||
max_num_pages: int = sys.maxsize
|
||||
max_file_size: int = sys.maxsize
|
||||
|
||||
max_sync_wait: int = 120 # 2 minutes
|
||||
|
||||
cors_origins: list[str] = ["*"]
|
||||
cors_methods: list[str] = ["*"]
|
||||
cors_headers: list[str] = ["*"]
|
||||
|
||||
eng_kind: AsyncEngine = AsyncEngine.LOCAL
|
||||
# Local engine
|
||||
eng_loc_num_workers: int = 2
|
||||
eng_loc_share_models: bool = False
|
||||
# RQ engine
|
||||
eng_rq_redis_url: str = ""
|
||||
eng_rq_results_prefix: str = "docling:results"
|
||||
eng_rq_sub_channel: str = "docling:updates"
|
||||
# KFP engine
|
||||
eng_kfp_endpoint: Optional[AnyUrl] = None
|
||||
eng_kfp_token: Optional[str] = None
|
||||
eng_kfp_ca_cert_path: Optional[str] = None
|
||||
eng_kfp_self_callback_endpoint: Optional[str] = None
|
||||
eng_kfp_self_callback_token_path: Optional[Path] = None
|
||||
eng_kfp_self_callback_ca_cert_path: Optional[Path] = None
|
||||
|
||||
eng_kfp_experimental: bool = False
|
||||
|
||||
@model_validator(mode="after")
|
||||
def engine_settings(self) -> Self:
|
||||
# Validate KFP engine settings
|
||||
if self.eng_kind == AsyncEngine.KFP:
|
||||
if self.eng_kfp_endpoint is None:
|
||||
raise ValueError("KFP endpoint is required when using the KFP engine.")
|
||||
|
||||
if self.eng_kind == AsyncEngine.KFP:
|
||||
if not self.eng_kfp_experimental:
|
||||
raise ValueError(
|
||||
"KFP is not yet working. To enable the development version, you must set DOCLING_SERVE_ENG_KFP_EXPERIMENTAL=true."
|
||||
)
|
||||
|
||||
if self.eng_kind == AsyncEngine.RQ:
|
||||
if not self.eng_rq_redis_url:
|
||||
raise ValueError("RQ Redis url is required when using the RQ engine.")
|
||||
|
||||
return self
|
||||
|
||||
|
||||
uvicorn_settings = UvicornSettings()
|
||||
docling_serve_settings = DoclingServeSettings()
|
||||
|
||||
16
docling_serve/storage.py
Normal file
16
docling_serve/storage.py
Normal file
@@ -0,0 +1,16 @@
|
||||
import tempfile
|
||||
from functools import lru_cache
|
||||
from pathlib import Path
|
||||
|
||||
from docling_serve.settings import docling_serve_settings
|
||||
|
||||
|
||||
@lru_cache
|
||||
def get_scratch() -> Path:
|
||||
scratch_dir = (
|
||||
docling_serve_settings.scratch_path
|
||||
if docling_serve_settings.scratch_path is not None
|
||||
else Path(tempfile.mkdtemp(prefix="docling_"))
|
||||
)
|
||||
scratch_dir.mkdir(exist_ok=True, parents=True)
|
||||
return scratch_dir
|
||||
55
docling_serve/websocket_notifier.py
Normal file
55
docling_serve/websocket_notifier.py
Normal file
@@ -0,0 +1,55 @@
|
||||
from fastapi import WebSocket
|
||||
|
||||
from docling_jobkit.datamodel.task_meta import TaskStatus
|
||||
from docling_jobkit.orchestrators.base_notifier import BaseNotifier
|
||||
from docling_jobkit.orchestrators.base_orchestrator import BaseOrchestrator
|
||||
|
||||
from docling_serve.datamodel.responses import (
|
||||
MessageKind,
|
||||
TaskStatusResponse,
|
||||
WebsocketMessage,
|
||||
)
|
||||
|
||||
|
||||
class WebsocketNotifier(BaseNotifier):
|
||||
def __init__(self, orchestrator: BaseOrchestrator):
|
||||
super().__init__(orchestrator)
|
||||
self.task_subscribers: dict[str, set[WebSocket]] = {}
|
||||
|
||||
async def add_task(self, task_id: str):
|
||||
self.task_subscribers[task_id] = set()
|
||||
|
||||
async def remove_task(self, task_id: str):
|
||||
if task_id in self.task_subscribers:
|
||||
for websocket in self.task_subscribers[task_id]:
|
||||
await websocket.close()
|
||||
|
||||
del self.task_subscribers[task_id]
|
||||
|
||||
async def notify_task_subscribers(self, task_id: str):
|
||||
if task_id not in self.task_subscribers:
|
||||
raise RuntimeError(f"Task {task_id} does not have a subscribers list.")
|
||||
|
||||
task = await self.orchestrator.get_raw_task(task_id=task_id)
|
||||
task_queue_position = await self.orchestrator.get_queue_position(task_id)
|
||||
msg = TaskStatusResponse(
|
||||
task_id=task.task_id,
|
||||
task_type=task.task_type,
|
||||
task_status=task.task_status,
|
||||
task_position=task_queue_position,
|
||||
task_meta=task.processing_meta,
|
||||
)
|
||||
for websocket in self.task_subscribers[task_id]:
|
||||
await websocket.send_text(
|
||||
WebsocketMessage(message=MessageKind.UPDATE, task=msg).model_dump_json()
|
||||
)
|
||||
if task.is_completed():
|
||||
await websocket.close()
|
||||
|
||||
async def notify_queue_positions(self):
|
||||
for task_id in self.task_subscribers.keys():
|
||||
# notify only pending tasks
|
||||
if self.orchestrator.tasks[task_id].task_status != TaskStatus.PENDING:
|
||||
continue
|
||||
|
||||
await self.notify_task_subscribers(task_id)
|
||||
11
docs/README.md
Normal file
11
docs/README.md
Normal file
@@ -0,0 +1,11 @@
|
||||
# Docling Serve documentation
|
||||
|
||||
This documentation pages explore the webserver configurations, runtime options, deployment examples as well as development best practices.
|
||||
|
||||
- [Configuration](./configuration.md)
|
||||
- [Handling models](./models.md)
|
||||
- [Usage](./usage.md)
|
||||
- [Deployment](./deployment.md)
|
||||
- [MCP](./mcp.md)
|
||||
- [Development](./development.md)
|
||||
- [`v1` migration](./v1_migration.md)
|
||||
BIN
docs/assets/docling-serve-pic.png
Normal file
BIN
docs/assets/docling-serve-pic.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 504 KiB |
101
docs/configuration.md
Normal file
101
docs/configuration.md
Normal file
@@ -0,0 +1,101 @@
|
||||
# Configuration
|
||||
|
||||
The `docling-serve` executable allows to configure the server via command line
|
||||
options as well as environment variables.
|
||||
Configurations are divided between the settings used for the `uvicorn` asgi
|
||||
server and the actual app-specific configurations.
|
||||
|
||||
> [!WARNING]
|
||||
> When the server is running with `reload` or with multiple `workers`, uvicorn
|
||||
> will spawn multiple subprocesses. This invalidates all the values configured
|
||||
> via the CLI command line options. Please use environment variables in this
|
||||
> type of deployments.
|
||||
|
||||
## Webserver configuration
|
||||
|
||||
The following table shows the options which are propagated directly to the
|
||||
`uvicorn` webserver runtime.
|
||||
|
||||
| CLI option | ENV | Default | Description |
|
||||
| -----------|-----|---------|-------------|
|
||||
| `--host` | `UVICORN_HOST` | `0.0.0.0` for `run`, `localhost` for `dev` | THe host to serve on. |
|
||||
| `--port` | `UVICORN_PORT` | `5001` | The port to serve on. |
|
||||
| `--reload` | `UVICORN_RELOAD` | `false` for `run`, `true` for `dev` | Enable auto-reload of the server when (code) files change. |
|
||||
| `--workers` | `UVICORN_WORKERS` | `1` | Use multiple worker processes. |
|
||||
| `--root-path` | `UVICORN_ROOT_PATH` | `""` | The root path is used to tell your app that it is being served to the outside world with some |
|
||||
| `--proxy-headers` | `UVICORN_PROXY_HEADERS` | `true` | Enable/Disable X-Forwarded-Proto, X-Forwarded-For, X-Forwarded-Port to populate remote address info. |
|
||||
| `--timeout-keep-alive` | `UVICORN_TIMEOUT_KEEP_ALIVE` | `60` | Timeout for the server response. |
|
||||
| `--ssl-certfile` | `UVICORN_SSL_CERTFILE` | | SSL certificate file. |
|
||||
| `--ssl-keyfile` | `UVICORN_SSL_KEYFILE` | | SSL key file. |
|
||||
| `--ssl-keyfile-password` | `UVICORN_SSL_KEYFILE_PASSWORD` | | SSL keyfile password. |
|
||||
|
||||
## Docling Serve configuration
|
||||
|
||||
THe following table describes the options to configure the Docling Serve app.
|
||||
|
||||
| CLI option | ENV | Default | Description |
|
||||
| -----------|-----|---------|-------------|
|
||||
| `--artifacts-path` | `DOCLING_SERVE_ARTIFACTS_PATH` | unset | If set to a valid directory, the model weights will be loaded from this path |
|
||||
| | `DOCLING_SERVE_STATIC_PATH` | unset | If set to a valid directory, the static assets for the docs and UI will be loaded from this path |
|
||||
| | `DOCLING_SERVE_SCRATCH_PATH` | | If set, this directory will be used as scratch workspace, e.g. storing the results before they get requested. If unset, a temporary created is created for this purpose. |
|
||||
| `--enable-ui` | `DOCLING_SERVE_ENABLE_UI` | `false` | Enable the demonstrator UI. |
|
||||
| | `DOCLING_SERVE_ENABLE_REMOTE_SERVICES` | `false` | Allow pipeline components making remote connections. For example, this is needed when using a vision-language model via APIs. |
|
||||
| | `DOCLING_SERVE_ALLOW_EXTERNAL_PLUGINS` | `false` | Allow the selection of third-party plugins. |
|
||||
| | `DOCLING_SERVE_SINGLE_USE_RESULTS` | `true` | If true, results can be accessed only once. If false, the results accumulate in the scratch directory. |
|
||||
| | `DOCLING_SERVE_RESULT_REMOVAL_DELAY` | `300` | When `DOCLING_SERVE_SINGLE_USE_RESULTS` is active, this is the delay before results are removed from the task registry. |
|
||||
| | `DOCLING_SERVE_MAX_DOCUMENT_TIMEOUT` | `604800` (7 days) | The maximum time for processing a document. |
|
||||
| | `DOCLING_NUM_THREADS` | `4` | Number of concurrent threads for processing a document. |
|
||||
| | `DOCLING_SERVE_MAX_NUM_PAGES` | | The maximum number of pages for a document to be processed. |
|
||||
| | `DOCLING_SERVE_MAX_FILE_SIZE` | | The maximum file size for a document to be processed. |
|
||||
| | `DOCLING_SERVE_MAX_SYNC_WAIT` | `120` | Max number of seconds a synchronous endpoint is waiting for the task completion. |
|
||||
| | `DOCLING_SERVE_LOAD_MODELS_AT_BOOT` | `True` | If enabled, the models for the default options will be loaded at boot. |
|
||||
| | `DOCLING_SERVE_OPTIONS_CACHE_SIZE` | `2` | How many DocumentConveter objects (including their loaded models) to keep in the cache. |
|
||||
| | `DOCLING_SERVE_CORS_ORIGINS` | `["*"]` | A list of origins that should be permitted to make cross-origin requests. |
|
||||
| | `DOCLING_SERVE_CORS_METHODS` | `["*"]` | A list of HTTP methods that should be allowed for cross-origin requests. |
|
||||
| | `DOCLING_SERVE_CORS_HEADERS` | `["*"]` | A list of HTTP request headers that should be supported for cross-origin requests. |
|
||||
| | `DOCLING_SERVE_API_KEY` | | If specified, all the API requests must contain the header `X-Api-Key` with this value. |
|
||||
| | `DOCLING_SERVE_ENG_KIND` | `local` | The compute engine to use for the async tasks. Possible values are `local`, `rq` and `kfp`. See below for more configurations of the engines. |
|
||||
|
||||
### Compute engine
|
||||
|
||||
Docling Serve can be deployed with several possible of compute engine.
|
||||
The selected compute engine will be running all the async jobs.
|
||||
|
||||
#### Local engine
|
||||
|
||||
The following table describes the options to configure the Docling Serve local engine.
|
||||
|
||||
| ENV | Default | Description |
|
||||
|-----|---------|-------------|
|
||||
| `DOCLING_SERVE_ENG_LOC_NUM_WORKERS` | 2 | Number of workers/threads processing the incoming tasks. |
|
||||
| `DOCLING_SERVE_ENG_LOC_SHARE_MODELS` | False | If true, each process will share the same models among all thread workers. Otherwise, one instance of the models is allocated for each worker thread. |
|
||||
|
||||
#### RQ engine
|
||||
|
||||
The following table describes the options to configure the Docling Serve RQ engine.
|
||||
|
||||
| ENV | Default | Description |
|
||||
|-----|---------|-------------|
|
||||
| `DOCLING_SERVE_ENG_RQ_REDIS_URL` | (required) | The connection Redis url, e.g. `redis://localhost:6373/` |
|
||||
| `DOCLING_SERVE_ENG_RQ_RESULTS_PREFIX` | `docling:results` | The prefix used for storing the results in Redis. |
|
||||
| `DOCLING_SERVE_ENG_RQ_SUB_CHANNEL` | `docling:updates` | The channel key name used for storing communicating updates between the workers and the orchestrator. |
|
||||
|
||||
#### KFP engine
|
||||
|
||||
The following table describes the options to configure the Docling Serve KFP engine.
|
||||
|
||||
| ENV | Default | Description |
|
||||
|-----|---------|-------------|
|
||||
| `DOCLING_SERVE_ENG_KFP_ENDPOINT` | | Must be set to the Kubeflow Pipeline endpoint. When using the in-cluster deployment, make sure to use the cluster endpoint, e.g. `https://NAME.NAMESPACE.svc.cluster.local:8888` |
|
||||
| `DOCLING_SERVE_ENG_KFP_TOKEN` | | The authentication token for KFP. For in-cluster deployment, the app will load automatically the token of the ServiceAccount. |
|
||||
| `DOCLING_SERVE_ENG_KFP_CA_CERT_PATH` | | Path to the CA certificates for the KFP endpoint. For in-cluster deployment, the app will load automatically the internal CA. |
|
||||
| `DOCLING_SERVE_ENG_KFP_SELF_CALLBACK_ENDPOINT` | | If set, it enables internal callbacks providing status update of the KFP job. Usually something like `https://NAME.NAMESPACE.svc.cluster.local:5001/v1/callback/task/progress`. |
|
||||
| `DOCLING_SERVE_ENG_KFP_SELF_CALLBACK_TOKEN_PATH` | | The token used for authenticating the progress callback. For cluster-internal workloads, use `/run/secrets/kubernetes.io/serviceaccount/token`. |
|
||||
| `DOCLING_SERVE_ENG_KFP_SELF_CALLBACK_CA_CERT_PATH` | | The CA certificate for the progress callback. For cluster-inetrnal workloads, use `/var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt`. |
|
||||
|
||||
#### Gradio UI
|
||||
|
||||
When using Gradio UI and using the option to output conversion as file, Gradio uses cache to prevent files to be overwritten ([more info here](https://www.gradio.app/guides/file-access#the-gradio-cache)), and we defined the cache clean frequency of one hour to clean files older than 10hours. For situations that files need to be available to download from UI older than 10 hours, there is two options:
|
||||
|
||||
- Increase the older age of files to clean [here](https://github.com/docling-project/docling-serve/blob/main/docling_serve/gradio_ui.py#L483) to suffice the age desired;
|
||||
- Or set the clean up manually by defining the temporary dir of Gradio to use the same as `DOCLING_SERVE_SCRATCH_PATH` absolute path. This can be achieved by setting the environment variable `GRADIO_TEMP_DIR`, that can be done via command line `export GRADIO_TEMP_DIR="<same_path_as_scratch>"` or in `Dockerfile` using `ENV GRADIO_TEMP_DIR="<same_path_as_scratch>"`. After this, set the clean of cache to `None` [here](https://github.com/docling-project/docling-serve/blob/main/docling_serve/gradio_ui.py#L483). Now, the clean up of `DOCLING_SERVE_SCRATCH_PATH` will also clean the Gradio temporary dir. (If you use this option, please remember when reversing changes to remove the environment variable `GRADIO_TEMP_DIR`, otherwise may lead to files not be available to download).
|
||||
21
docs/deploy-examples/compose-amd.yaml
Normal file
21
docs/deploy-examples/compose-amd.yaml
Normal file
@@ -0,0 +1,21 @@
|
||||
# AMD ROCm deployment
|
||||
|
||||
services:
|
||||
docling-serve:
|
||||
image: ghcr.io/docling-project/docling-serve-rocm:main
|
||||
container_name: docling-serve
|
||||
ports:
|
||||
- "5001:5001"
|
||||
environment:
|
||||
DOCLING_SERVE_ENABLE_UI: "true"
|
||||
ROCR_VISIBLE_DEVICES: "0" # https://rocm.docs.amd.com/en/latest/conceptual/gpu-isolation.html#rocr-visible-devices
|
||||
## This section is for compatibility with older cards
|
||||
# HSA_OVERRIDE_GFX_VERSION: "11.0.0"
|
||||
# HSA_ENABLE_SDMA: "0"
|
||||
devices:
|
||||
- /dev/kfd:/dev/kfd
|
||||
- /dev/dri:/dev/dri
|
||||
group_add:
|
||||
- 44 # video group GID from host
|
||||
- 992 # render group GID from host
|
||||
restart: always
|
||||
20
docs/deploy-examples/compose-nvidia.yaml
Normal file
20
docs/deploy-examples/compose-nvidia.yaml
Normal file
@@ -0,0 +1,20 @@
|
||||
# NVIDIA CUDA deployment
|
||||
|
||||
services:
|
||||
docling-serve:
|
||||
image: ghcr.io/docling-project/docling-serve-cu126:main
|
||||
container_name: docling-serve
|
||||
ports:
|
||||
- "5001:5001"
|
||||
environment:
|
||||
DOCLING_SERVE_ENABLE_UI: "true"
|
||||
NVIDIA_VISIBLE_DEVICES: "all" # https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/docker-specialized.html
|
||||
# deploy: # This section is for compatibility with Swarm
|
||||
# resources:
|
||||
# reservations:
|
||||
# devices:
|
||||
# - driver: nvidia
|
||||
# count: all
|
||||
# capabilities: [gpu]
|
||||
runtime: nvidia
|
||||
restart: always
|
||||
47
docs/deploy-examples/docling-model-cache-deployment.yaml
Normal file
47
docs/deploy-examples/docling-model-cache-deployment.yaml
Normal file
@@ -0,0 +1,47 @@
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: docling-serve
|
||||
labels:
|
||||
app: docling-serve
|
||||
component: docling-serve-api
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: docling-serve
|
||||
component: docling-serve-api
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: docling-serve
|
||||
component: docling-serve-api
|
||||
spec:
|
||||
restartPolicy: Always
|
||||
containers:
|
||||
- name: api
|
||||
resources:
|
||||
limits:
|
||||
cpu: 2
|
||||
memory: 4Gi
|
||||
requests:
|
||||
cpu: 250m
|
||||
memory: 1Gi
|
||||
env:
|
||||
- name: DOCLING_SERVE_ENABLE_UI
|
||||
value: 'true'
|
||||
- name: DOCLING_SERVE_ARTIFACTS_PATH
|
||||
value: '/modelcache'
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 5001
|
||||
protocol: TCP
|
||||
imagePullPolicy: Always
|
||||
image: 'ghcr.io/docling-project/docling-serve-cpu'
|
||||
volumeMounts:
|
||||
- name: docling-model-cache
|
||||
mountPath: /modelcache
|
||||
volumes:
|
||||
- name: docling-model-cache
|
||||
persistentVolumeClaim:
|
||||
claimName: docling-model-cache-pvc
|
||||
33
docs/deploy-examples/docling-model-cache-job.yaml
Normal file
33
docs/deploy-examples/docling-model-cache-job.yaml
Normal file
@@ -0,0 +1,33 @@
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: docling-model-cache-load
|
||||
spec:
|
||||
selector: {}
|
||||
template:
|
||||
metadata:
|
||||
name: docling-model-load
|
||||
spec:
|
||||
containers:
|
||||
- name: loader
|
||||
image: ghcr.io/docling-project/docling-serve-cpu:main
|
||||
command:
|
||||
- docling-tools
|
||||
- models
|
||||
- download
|
||||
- '--output-dir=/modelcache'
|
||||
- 'layout'
|
||||
- 'tableformer'
|
||||
- 'code_formula'
|
||||
- 'picture_classifier'
|
||||
- 'smolvlm'
|
||||
- 'granite_vision'
|
||||
- 'easyocr'
|
||||
volumeMounts:
|
||||
- name: docling-model-cache
|
||||
mountPath: /modelcache
|
||||
volumes:
|
||||
- name: docling-model-cache
|
||||
persistentVolumeClaim:
|
||||
claimName: docling-model-cache-pvc
|
||||
restartPolicy: Never
|
||||
11
docs/deploy-examples/docling-model-cache-pvc.yaml
Normal file
11
docs/deploy-examples/docling-model-cache-pvc.yaml
Normal file
@@ -0,0 +1,11 @@
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: docling-model-cache-pvc
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
volumeMode: Filesystem
|
||||
resources:
|
||||
requests:
|
||||
storage: 10Gi
|
||||
192
docs/deploy-examples/docling-serve-oauth.yaml
Normal file
192
docs/deploy-examples/docling-serve-oauth.yaml
Normal file
@@ -0,0 +1,192 @@
|
||||
# This example deployment configures Docling Serve with a OAuth-Proxy sidecar and TLS termination
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: docling-serve
|
||||
labels:
|
||||
app: docling-serve
|
||||
annotations:
|
||||
serviceaccounts.openshift.io/oauth-redirectreference.primary: '{"kind":"OAuthRedirectReference","apiVersion":"v1","reference":{"kind":"Route","name":"docling-serve"}}'
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: docling-serve-oauth
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: system:auth-delegator
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: docling-serve
|
||||
namespace: docling
|
||||
---
|
||||
apiVersion: route.openshift.io/v1
|
||||
kind: Route
|
||||
metadata:
|
||||
name: docling-serve
|
||||
labels:
|
||||
app: docling-serve
|
||||
component: docling-serve-api
|
||||
spec:
|
||||
to:
|
||||
kind: Service
|
||||
name: docling-serve
|
||||
port:
|
||||
targetPort: oauth
|
||||
tls:
|
||||
termination: Reencrypt
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: docling-serve
|
||||
labels:
|
||||
app: docling-serve
|
||||
component: docling-serve-api
|
||||
annotations:
|
||||
service.alpha.openshift.io/serving-cert-secret-name: docling-serve-tls
|
||||
spec:
|
||||
ports:
|
||||
- name: oauth
|
||||
port: 8443
|
||||
targetPort: oauth
|
||||
- name: http
|
||||
port: 5001
|
||||
targetPort: http
|
||||
selector:
|
||||
app: docling-serve
|
||||
component: docling-serve-api
|
||||
---
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: docling-serve
|
||||
labels:
|
||||
app: docling-serve
|
||||
component: docling-serve-api
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: docling-serve
|
||||
component: docling-serve-api
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: docling-serve
|
||||
component: docling-serve-api
|
||||
spec:
|
||||
restartPolicy: Always
|
||||
serviceAccountName: docling-serve
|
||||
containers:
|
||||
- name: api
|
||||
resources:
|
||||
limits:
|
||||
cpu: 2000m
|
||||
memory: 4Gi
|
||||
requests:
|
||||
cpu: 800m
|
||||
memory: 1Gi
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /health
|
||||
port: http
|
||||
scheme: HTTPS
|
||||
initialDelaySeconds: 10
|
||||
timeoutSeconds: 2
|
||||
periodSeconds: 5
|
||||
successThreshold: 1
|
||||
failureThreshold: 3
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /health
|
||||
port: http
|
||||
scheme: HTTPS
|
||||
initialDelaySeconds: 3
|
||||
timeoutSeconds: 4
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
failureThreshold: 5
|
||||
env:
|
||||
- name: NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: DOCLING_SERVE_ENABLE_UI
|
||||
value: 'true'
|
||||
- name: DOCLING_SERVE_API_HOST
|
||||
value: 'docling-serve.$(NAMESPACE).svc.cluster.local'
|
||||
- name: UVICORN_SSL_CERTFILE
|
||||
value: '/etc/tls/private/tls.crt'
|
||||
- name: UVICORN_SSL_KEYFILE
|
||||
value: '/etc/tls/private/tls.key'
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 5001
|
||||
protocol: TCP
|
||||
volumeMounts:
|
||||
- name: proxy-tls
|
||||
mountPath: /etc/tls/private
|
||||
imagePullPolicy: Always
|
||||
image: 'ghcr.io/docling-project/docling-serve-cpu:fix-ui-with-https'
|
||||
- name: oauth-proxy
|
||||
resources:
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 256Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 256Mi
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /oauth/healthz
|
||||
port: oauth
|
||||
scheme: HTTPS
|
||||
initialDelaySeconds: 5
|
||||
timeoutSeconds: 1
|
||||
periodSeconds: 5
|
||||
successThreshold: 1
|
||||
failureThreshold: 3
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /oauth/healthz
|
||||
port: oauth
|
||||
scheme: HTTPS
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 1
|
||||
periodSeconds: 5
|
||||
successThreshold: 1
|
||||
failureThreshold: 3
|
||||
ports:
|
||||
- name: oauth
|
||||
containerPort: 8443
|
||||
protocol: TCP
|
||||
imagePullPolicy: IfNotPresent
|
||||
volumeMounts:
|
||||
- name: proxy-tls
|
||||
mountPath: /etc/tls/private
|
||||
env:
|
||||
- name: NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
image: 'registry.redhat.io/openshift4/ose-oauth-proxy:v4.13'
|
||||
args:
|
||||
- '--https-address=:8443'
|
||||
- '--provider=openshift'
|
||||
- '--openshift-service-account=docling-serve'
|
||||
- '--upstream=https://docling-serve.$(NAMESPACE).svc.cluster.local:5001'
|
||||
- '--upstream-ca=/var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt'
|
||||
- '--tls-cert=/etc/tls/private/tls.crt'
|
||||
- '--tls-key=/etc/tls/private/tls.key'
|
||||
- '--cookie-secret=SECRET'
|
||||
- '--openshift-delegate-urls={"/": {"group":"route.openshift.io","resource":"routes","verb":"get","name":"docling-serve","namespace":"$(NAMESPACE)"}}'
|
||||
- '--openshift-sar={"namespace":"$(NAMESPACE)","resource":"routes","resourceName":"docling-serve","verb":"get","resourceAPIGroup":"route.openshift.io"}'
|
||||
- '--skip-auth-regex=''(^/health|^/docs)'''
|
||||
volumes:
|
||||
- name: proxy-tls
|
||||
secret:
|
||||
secretName: docling-serve-tls
|
||||
defaultMode: 420
|
||||
@@ -0,0 +1,76 @@
|
||||
# This example deployment configures Docling Serve with a Route + Sticky sessions, a Service and cpu image
|
||||
---
|
||||
kind: Route
|
||||
apiVersion: route.openshift.io/v1
|
||||
metadata:
|
||||
name: docling-serve
|
||||
labels:
|
||||
app: docling-serve
|
||||
component: docling-serve-api
|
||||
annotations:
|
||||
haproxy.router.openshift.io/disable_cookies: "false" # this annotation enables the sticky sessions
|
||||
spec:
|
||||
path: /
|
||||
to:
|
||||
kind: Service
|
||||
name: docling-serve
|
||||
port:
|
||||
targetPort: http
|
||||
tls:
|
||||
termination: edge
|
||||
insecureEdgeTerminationPolicy: Redirect
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: docling-serve
|
||||
labels:
|
||||
app: docling-serve
|
||||
component: docling-serve-api
|
||||
spec:
|
||||
ports:
|
||||
- name: http
|
||||
port: 5001
|
||||
targetPort: http
|
||||
selector:
|
||||
app: docling-serve
|
||||
component: docling-serve-api
|
||||
---
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: docling-serve
|
||||
labels:
|
||||
app: docling-serve
|
||||
component: docling-serve-api
|
||||
spec:
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: docling-serve
|
||||
component: docling-serve-api
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: docling-serve
|
||||
component: docling-serve-api
|
||||
spec:
|
||||
restartPolicy: Always
|
||||
containers:
|
||||
- name: api
|
||||
resources:
|
||||
limits:
|
||||
cpu: 1
|
||||
memory: 4Gi
|
||||
requests:
|
||||
cpu: 250m
|
||||
memory: 1Gi
|
||||
env:
|
||||
- name: DOCLING_SERVE_ENABLE_UI
|
||||
value: 'true'
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 5001
|
||||
protocol: TCP
|
||||
imagePullPolicy: Always
|
||||
image: 'ghcr.io/docling-project/docling-serve'
|
||||
192
docs/deploy-examples/docling-serve-rq-workers.yaml
Normal file
192
docs/deploy-examples/docling-serve-rq-workers.yaml
Normal file
@@ -0,0 +1,192 @@
|
||||
# This example deployment configures Docling Serve with a Service and RQ workers
|
||||
|
||||
# Create following secret
|
||||
# kubectl create secret generic docling-serve-rq-secrets --from-literal=REDIS_PASSWORD=myredispassword --from-literal=RQ_REDIS_URL=redis://:myredispassword@docling-serve-redis-service:6373/
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: docling-serve
|
||||
labels:
|
||||
app: docling-serve
|
||||
component: docling-serve-api
|
||||
spec:
|
||||
ports:
|
||||
- name: http
|
||||
port: 5001
|
||||
targetPort: http
|
||||
selector:
|
||||
app: docling-serve
|
||||
component: docling-serve-api
|
||||
---
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: docling-serve
|
||||
labels:
|
||||
app: docling-serve
|
||||
component: docling-serve-api
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: docling-serve
|
||||
component: docling-serve-api
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: docling-serve
|
||||
component: docling-serve-api
|
||||
spec:
|
||||
restartPolicy: Always
|
||||
containers:
|
||||
- name: api
|
||||
resources:
|
||||
limits:
|
||||
cpu: 1
|
||||
memory: 8Gi
|
||||
requests:
|
||||
cpu: 250m
|
||||
memory: 1Gi
|
||||
env:
|
||||
- name: DOCLING_SERVE_ENABLE_UI
|
||||
value: 'true'
|
||||
- name: DOCLING_SERVE_ENG_KIND
|
||||
value: 'rq'
|
||||
- name: DOCLING_SERVE_ENG_RQ_REDIS_URL
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: docling-serve-rq-secrets
|
||||
key: RQ_REDIS_URL
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 5001
|
||||
protocol: TCP
|
||||
imagePullPolicy: Always
|
||||
image: 'ghcr.io/docling-project/docling-serve-cpu'
|
||||
---
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: docling-serve-rq-workers
|
||||
labels:
|
||||
app: docling-serve-rq-workers
|
||||
component: docling-serve-rq-worker
|
||||
spec:
|
||||
replicas: 2
|
||||
selector:
|
||||
matchLabels:
|
||||
app: docling-serve-rq-workers
|
||||
component: docling-serve-rq-worker
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: docling-serve-rq-workers
|
||||
component: docling-serve-rq-worker
|
||||
spec:
|
||||
restartPolicy: Always
|
||||
containers:
|
||||
- name: worker
|
||||
resources:
|
||||
limits:
|
||||
cpu: 1
|
||||
memory: 4Gi
|
||||
requests:
|
||||
cpu: 250m
|
||||
memory: 1Gi
|
||||
env:
|
||||
- name: DOCLING_SERVE_ENG_KIND
|
||||
value: 'rq'
|
||||
- name: DOCLING_SERVE_ENG_RQ_REDIS_URL
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: docling-serve-rq-secrets
|
||||
key: RQ_REDIS_URL
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 5001
|
||||
protocol: TCP
|
||||
imagePullPolicy: Always
|
||||
image: 'ghcr.io/docling-project/docling-serve-cpu'
|
||||
command: ["docling-serve"]
|
||||
args: ["rq-worker"]
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: docling-serve-redis
|
||||
labels:
|
||||
app: docling-serve-redis
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: docling-serve-redis
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: docling-serve-redis
|
||||
spec:
|
||||
restartPolicy: Always
|
||||
terminationGracePeriodSeconds: 30
|
||||
containers:
|
||||
- name: redis
|
||||
resources:
|
||||
limits:
|
||||
cpu: 1
|
||||
memory: 1Gi
|
||||
requests:
|
||||
cpu: 250m
|
||||
memory: 100Mi
|
||||
image: redis:latest
|
||||
command: ["redis-server"]
|
||||
args:
|
||||
- "--port"
|
||||
- "6373"
|
||||
- "--dir"
|
||||
- "/mnt/redis/data"
|
||||
- "--appendonly"
|
||||
- "yes"
|
||||
- "--requirepass"
|
||||
- "$(REDIS_PASSWORD)"
|
||||
ports:
|
||||
- containerPort: 6373
|
||||
env:
|
||||
- name: REDIS_PASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: docling-serve-rq-secrets
|
||||
key: REDIS_PASSWORD
|
||||
volumeMounts:
|
||||
- name: redis-data
|
||||
mountPath: /mnt/redis/data
|
||||
securityContext:
|
||||
fsGroup: 1004
|
||||
runAsNonRoot: true
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
volumes:
|
||||
- name: redis-data
|
||||
emptyDir:
|
||||
medium: Memory
|
||||
sizeLimit: 2Gi
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: docling-serve-redis-service
|
||||
labels:
|
||||
app: docling-serve-redis
|
||||
spec:
|
||||
type: NodePort
|
||||
ports:
|
||||
- name: redis-service
|
||||
protocol: TCP
|
||||
port: 6373
|
||||
targetPort: 6373
|
||||
selector:
|
||||
app: docling-serve-redis
|
||||
58
docs/deploy-examples/docling-serve-simple.yaml
Normal file
58
docs/deploy-examples/docling-serve-simple.yaml
Normal file
@@ -0,0 +1,58 @@
|
||||
# This example deployment configures Docling Serve with a Service and cuda image
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: docling-serve
|
||||
labels:
|
||||
app: docling-serve
|
||||
component: docling-serve-api
|
||||
spec:
|
||||
ports:
|
||||
- name: http
|
||||
port: 5001
|
||||
targetPort: http
|
||||
selector:
|
||||
app: docling-serve
|
||||
component: docling-serve-api
|
||||
---
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: docling-serve
|
||||
labels:
|
||||
app: docling-serve
|
||||
component: docling-serve-api
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: docling-serve
|
||||
component: docling-serve-api
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: docling-serve
|
||||
component: docling-serve-api
|
||||
spec:
|
||||
restartPolicy: Always
|
||||
containers:
|
||||
- name: api
|
||||
resources:
|
||||
limits:
|
||||
cpu: 1
|
||||
memory: 4Gi
|
||||
nvidia.com/gpu: 1 # Limit to one GPU
|
||||
requests:
|
||||
cpu: 250m
|
||||
memory: 1Gi
|
||||
nvidia.com/gpu: 1 # Limit to one GPU
|
||||
env:
|
||||
- name: DOCLING_SERVE_ENABLE_UI
|
||||
value: 'true'
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 5001
|
||||
protocol: TCP
|
||||
imagePullPolicy: Always
|
||||
image: 'ghcr.io/docling-project/docling-serve-cu124'
|
||||
330
docs/deployment.md
Normal file
330
docs/deployment.md
Normal file
@@ -0,0 +1,330 @@
|
||||
# Deployment Examples
|
||||
|
||||
This document provides deployment examples for running the application in different environments.
|
||||
|
||||
Choose the deployment option that best fits your setup.
|
||||
|
||||
- **[Local GPU NVIDIA](#local-gpu-nvidia)**: For deploying the application locally on a machine with a supported NVIDIA GPU (using Docker Compose).
|
||||
- **[Local GPU AMD](#local-gpu-amd)**: For deploying the application locally on a machine with a supported AMD GPU (using Docker Compose).
|
||||
- **[OpenShift](#openshift)**: For deploying the application on an OpenShift cluster, designed for cloud-native environments.
|
||||
|
||||
---
|
||||
|
||||
## Local GPU NVIDIA
|
||||
|
||||
### Docker compose
|
||||
|
||||
Manifest example: [compose-nvidia.yaml](./deploy-examples/compose-nvidia.yaml)
|
||||
|
||||
This deployment has the following features:
|
||||
|
||||
- NVIDIA cuda enabled
|
||||
|
||||
Install the app with:
|
||||
|
||||
```sh
|
||||
docker compose -f docs/deploy-examples/compose-nvidia.yaml up -d
|
||||
```
|
||||
|
||||
For using the API:
|
||||
|
||||
```sh
|
||||
# Make a test query
|
||||
curl -X 'POST' \
|
||||
"localhost:5001/v1/convert/source/async" \
|
||||
-H "accept: application/json" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"sources": [{"kind": "http", "url": "https://arxiv.org/pdf/2501.17887"}]
|
||||
}'
|
||||
```
|
||||
|
||||
<details>
|
||||
<summary><b>Requirements</b></summary>
|
||||
|
||||
- debian/ubuntu/rhel/fedora/opensuse
|
||||
- docker
|
||||
- nvidia drivers >=550.54.14
|
||||
- nvidia-container-toolkit
|
||||
|
||||
Docs:
|
||||
|
||||
- [NVIDIA Container Toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/supported-platforms.html)
|
||||
- [CUDA Toolkit Release Notes](https://docs.nvidia.com/cuda/cuda-toolkit-release-notes/index.html#id6)
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary><b>Steps</b></summary>
|
||||
|
||||
1. Check driver version and which GPU you want to use 0/1/2/n (and update [compose-nvidia.yaml](./deploy-examples/compose-nvidia.yaml) file or use `count: all`)
|
||||
|
||||
```sh
|
||||
nvidia-smi
|
||||
```
|
||||
|
||||
2. Check if the NVIDIA Container Toolkit is installed/updated
|
||||
|
||||
```sh
|
||||
# debian
|
||||
dpkg -l | grep nvidia-container-toolkit
|
||||
```
|
||||
|
||||
```sh
|
||||
# rhel
|
||||
rpm -q nvidia-container-toolkit
|
||||
```
|
||||
|
||||
NVIDIA Container Toolkit install steps can be found here:
|
||||
|
||||
<https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html>
|
||||
|
||||
3. Check which runtime is being used by Docker
|
||||
|
||||
```sh
|
||||
# docker
|
||||
docker info | grep -i runtime
|
||||
```
|
||||
|
||||
4. If the default Docker runtime changes back from 'nvidia' to 'default' after restarting the Docker service (optional):
|
||||
|
||||
Backup the daemon.json file:
|
||||
|
||||
```sh
|
||||
sudo cp /etc/docker/daemon.json /etc/docker/daemon.json.bak
|
||||
```
|
||||
|
||||
Update the daemon.json file:
|
||||
|
||||
```sh
|
||||
echo '{
|
||||
"runtimes": {
|
||||
"nvidia": {
|
||||
"path": "nvidia-container-runtime"
|
||||
}
|
||||
},
|
||||
"default-runtime": "nvidia"
|
||||
}' | sudo tee /etc/docker/daemon.json > /dev/null
|
||||
```
|
||||
|
||||
Restart the Docker service:
|
||||
|
||||
```sh
|
||||
sudo systemctl restart docker
|
||||
```
|
||||
|
||||
Confirm 'nvidia' is the default runtime used by Docker by repeating step 3.
|
||||
|
||||
5. Run the container:
|
||||
|
||||
```sh
|
||||
docker compose -f docs/deploy-examples/compose-nvidia.yaml up -d
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
## Local GPU AMD
|
||||
|
||||
### Docker compose
|
||||
|
||||
Manifest example: [compose-amd.yaml](./deploy-examples/compose-amd.yaml)
|
||||
|
||||
This deployment has the following features:
|
||||
|
||||
- AMD rocm enabled
|
||||
|
||||
Install the app with:
|
||||
|
||||
```sh
|
||||
docker compose -f docs/deploy-examples/compose-amd.yaml up -d
|
||||
```
|
||||
|
||||
For using the API:
|
||||
|
||||
```sh
|
||||
# Make a test query
|
||||
curl -X 'POST' \
|
||||
"localhost:5001/v1/convert/source/async" \
|
||||
-H "accept: application/json" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"sources": [{"kind": "http", "url": "https://arxiv.org/pdf/2501.17887"}]
|
||||
}'
|
||||
```
|
||||
|
||||
<details>
|
||||
<summary><b>Requirements</b></summary>
|
||||
|
||||
- debian/ubuntu/rhel/fedora/opensuse
|
||||
- docker
|
||||
- AMDGPU driver >=6.3
|
||||
- AMD ROCm >=6.3
|
||||
|
||||
Docs:
|
||||
|
||||
- [AMD ROCm installation](https://rocm.docs.amd.com/projects/install-on-linux/en/latest/install/quick-start.html)
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary><b>Steps</b></summary>
|
||||
|
||||
1. Check driver version and which GPU you want to use 0/1/2/n (and update [compose-amd.yaml](./deploy-examples/compose-amd.yaml) file)
|
||||
|
||||
```sh
|
||||
rocm-smi --showdriverversion
|
||||
rocminfo | grep -i "ROCm version"
|
||||
```
|
||||
|
||||
2. Find both video group GID and render group GID from host (and update [compose-amd.yaml](./deploy-examples/compose-amd.yaml) file)
|
||||
|
||||
```sh
|
||||
getent group video
|
||||
getent group render
|
||||
```
|
||||
|
||||
3. Build the image locally (and update [compose-amd.yaml](./deploy-examples/compose-amd.yaml) file)
|
||||
|
||||
```sh
|
||||
make docling-serve-rocm-image
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
## OpenShift
|
||||
|
||||
### Simple deployment
|
||||
|
||||
Manifest example: [docling-serve-simple.yaml](./deploy-examples/docling-serve-simple.yaml)
|
||||
|
||||
This deployment example has the following features:
|
||||
|
||||
- Deployment configuration
|
||||
- Service configuration
|
||||
- NVIDIA cuda enabled
|
||||
|
||||
Install the app with:
|
||||
|
||||
```sh
|
||||
oc apply -f docs/deploy-examples/docling-serve-simple.yaml
|
||||
```
|
||||
|
||||
For using the API:
|
||||
|
||||
```sh
|
||||
# Port-forward the service
|
||||
oc port-forward svc/docling-serve 5001:5001
|
||||
|
||||
# Make a test query
|
||||
curl -X 'POST' \
|
||||
"localhost:5001/v1/convert/source/async" \
|
||||
-H "accept: application/json" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"sources": [{"kind": "http", "url": "https://arxiv.org/pdf/2501.17887"}]
|
||||
}'
|
||||
```
|
||||
|
||||
### Multiple workers with RQ
|
||||
|
||||
Manifest example: [`docling-serve-rq-workers.yaml`](./deploy-examples/docling-serve-rq-workers.yaml)
|
||||
|
||||
This deployment example has the following features:
|
||||
|
||||
- Deployment configuration
|
||||
- Service configuration
|
||||
- Redis deployment
|
||||
- Multiple (2 by default) worker Pods
|
||||
|
||||
Install the app with:
|
||||
|
||||
- create k8s secret:
|
||||
|
||||
```sh
|
||||
kubectl create secret generic docling-serve-rq-secrets --from-literal=REDIS_PASSWORD=myredispassword --from-literal=RQ_REDIS_URL=redis://:myredispassword@docling-serve-redis-service:6373/
|
||||
```
|
||||
|
||||
- apply deployment manifest:
|
||||
|
||||
```sh
|
||||
oc apply -f docs/deploy-examples/docling-serve-rq-workers.yaml
|
||||
```
|
||||
|
||||
### Secure deployment with `oauth-proxy`
|
||||
|
||||
Manifest example: [docling-serve-oauth.yaml](./deploy-examples/docling-serve-oauth.yaml)
|
||||
|
||||
This deployment has the following features:
|
||||
|
||||
- TLS encryption between all components (using the cluster-internal CA authority).
|
||||
- Authentication via a secure `oauth-proxy` sidecar.
|
||||
- Expose the service using a secure OpenShift `Route`
|
||||
|
||||
Install the app with:
|
||||
|
||||
```sh
|
||||
oc apply -f docs/deploy-examples/docling-serve-oauth.yaml
|
||||
```
|
||||
|
||||
For using the API:
|
||||
|
||||
```sh
|
||||
# Retrieve the endpoint
|
||||
DOCLING_NAME=docling-serve
|
||||
DOCLING_ROUTE="https://$(oc get routes ${DOCLING_NAME} --template={{.spec.host}})"
|
||||
|
||||
# Retrieve the authentication token
|
||||
OCP_AUTH_TOKEN=$(oc whoami --show-token)
|
||||
|
||||
# Make a test query
|
||||
curl -X 'POST' \
|
||||
"${DOCLING_ROUTE}/v1/convert/source/async" \
|
||||
-H "Authorization: Bearer ${OCP_AUTH_TOKEN}" \
|
||||
-H "accept: application/json" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"sources": [{"kind": "http", "url": "https://arxiv.org/pdf/2501.17887"}]
|
||||
}'
|
||||
```
|
||||
|
||||
### ReplicaSets with `sticky sessions`
|
||||
|
||||
Manifest example: [docling-serve-replicas-w-sticky-sessions.yaml](./deploy-examples/docling-serve-replicas-w-sticky-sessions.yaml)
|
||||
|
||||
This deployment has the following features:
|
||||
|
||||
- Deployment configuration with 3 replicas
|
||||
- Service configuration
|
||||
- Expose the service using a OpenShift `Route` and enables sticky sessions
|
||||
|
||||
Install the app with:
|
||||
|
||||
```sh
|
||||
oc apply -f docs/deploy-examples/docling-serve-replicas-w-sticky-sessions.yaml
|
||||
```
|
||||
|
||||
For using the API:
|
||||
|
||||
```sh
|
||||
# Retrieve the endpoint
|
||||
DOCLING_NAME=docling-serve
|
||||
DOCLING_ROUTE="https://$(oc get routes $DOCLING_NAME --template={{.spec.host}})"
|
||||
|
||||
# Make a test query, store the cookie and taskid
|
||||
task_id=$(curl -s -X 'POST' \
|
||||
"${DOCLING_ROUTE}/v1/convert/source/async" \
|
||||
-H "accept: application/json" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"sources": [{"kind": "http", "url": "https://arxiv.org/pdf/2501.17887"}]
|
||||
}' \
|
||||
-c cookies.txt | grep -oP '"task_id":"\K[^"]+')
|
||||
```
|
||||
|
||||
```sh
|
||||
# Grab the taskid and cookie to check the task status
|
||||
curl -v -X 'GET' \
|
||||
"${DOCLING_ROUTE}/v1/status/poll/$task_id?wait=0" \
|
||||
-H "accept: application/json" \
|
||||
-b "cookies.txt"
|
||||
```
|
||||
57
docs/development.md
Normal file
57
docs/development.md
Normal file
@@ -0,0 +1,57 @@
|
||||
# Development
|
||||
|
||||
## Install dependencies
|
||||
|
||||
### CPU only
|
||||
|
||||
```sh
|
||||
# Install uv if not already available
|
||||
curl -LsSf https://astral.sh/uv/install.sh | sh
|
||||
|
||||
# Install dependencies
|
||||
uv sync --extra cpu
|
||||
```
|
||||
|
||||
### Cuda GPU
|
||||
|
||||
For GPU support use the following command:
|
||||
|
||||
```sh
|
||||
# Install dependencies
|
||||
uv sync
|
||||
```
|
||||
|
||||
### Gradio UI and different OCR backends
|
||||
|
||||
`/ui` endpoint using `gradio` and different OCR backends can be enabled via package extras:
|
||||
|
||||
```sh
|
||||
# Enable ui and rapidocr
|
||||
uv sync --extra ui --extra rapidocr
|
||||
```
|
||||
|
||||
```sh
|
||||
# Enable tesserocr
|
||||
uv sync --extra tesserocr
|
||||
```
|
||||
|
||||
See `[project.optional-dependencies]` section in `pyproject.toml` for full list of options and runtime options with `uv run docling-serve --help`.
|
||||
|
||||
### Run the server
|
||||
|
||||
The `docling-serve` executable is a convenient script for launching the webserver both in
|
||||
development and production mode.
|
||||
|
||||
```sh
|
||||
# Run the server in development mode
|
||||
# - reload is enabled by default
|
||||
# - listening on the 127.0.0.1 address
|
||||
# - ui is enabled by default
|
||||
docling-serve dev
|
||||
|
||||
# Run the server in production mode
|
||||
# - reload is disabled by default
|
||||
# - listening on the 0.0.0.0 address
|
||||
# - ui is disabled by default
|
||||
docling-serve run
|
||||
```
|
||||
22
docs/examples.md
Normal file
22
docs/examples.md
Normal file
@@ -0,0 +1,22 @@
|
||||
# Examples
|
||||
|
||||
## Split processing
|
||||
|
||||
The example of provided of split processing demonstrates how to split a PDF into chunks of pages and send them for conversion. At the end, it concatenates all split pages into a single conversion `JSON`.
|
||||
|
||||
At beginning of file there's variables to be used (and modified) such as:
|
||||
| Variable | Description |
|
||||
| ---------|-------------|
|
||||
| `path_to_pdf`| Path to PDF file to be split |
|
||||
| `pages_per_file`| The number of pages per chunk to split PDF |
|
||||
| `base_url`| Base url of the `docling-serve` host |
|
||||
| `out_dir`| The output folder of each conversion `JSON` of split PDF and the final concatenated `JSON` |
|
||||
|
||||
The example follows the following logic:
|
||||
- Get the number of pages of the `PDF`
|
||||
- Based on the number of chunks of pages, send each chunk to conversion using `page_range` parameter
|
||||
- Wait all conversions to finish
|
||||
- Get all conversion results
|
||||
- Save each conversion `JSON` result into a `JSON` file
|
||||
- Concatenate all `JSONs` into a single `JSON` using `docling` concatenate method
|
||||
- Save concatenated `JSON` into a `JSON` file
|
||||
39
docs/mcp.md
Normal file
39
docs/mcp.md
Normal file
@@ -0,0 +1,39 @@
|
||||
# Docling MCP in Docling Serve
|
||||
|
||||
The `docling-serve` container image includes all MCP (Model Communication Protocol) features starting from version v1.1.0. To leverage these features, you simply need to use a different entrypoint—no custom image builds or additional installations are required. The image provides the `docling-mcp-server` executable, which enables MCP functionality out of the box as of version v1.1.0 ([changelog](https://github.com/docling-project/docling-serve/blob/624f65d41b734e8b39ff267bc8bf6e766c376d6d/CHANGELOG.md)).
|
||||
|
||||
Read more on [Docling MCP](https://github.com/docling-project/docling-mcp) in its dedicated repository.
|
||||
|
||||
## Launching the MCP Service
|
||||
|
||||
By default, the container runs `docling-serve run` and exposes port 5001. To start the MCP service, override the entrypoint and specify your desired port mapping. For example:
|
||||
|
||||
```sh
|
||||
podman run -p 8000:8000 quay.io/docling-project/docling-serve -- docling-mcp-server --transport streamable-http --port 8000 --host 0.0.0.0
|
||||
```
|
||||
|
||||
This command starts the MCP server on port 8000, accessible at `http://localhost:8000/mcp`. Adjust the port and host as needed. Key arguments for `docling-mcp-server` include `--transport streamable-http` (HTTP transport for client connections), `--port <PORT>`, and `--host <HOST>` (use `0.0.0.0` to accept connections from any interface).
|
||||
|
||||
## Configuring MCP Clients
|
||||
|
||||
Most MCP-compatible clients, such as LM Studio and Claude Desktop, allow you to specify custom MCP server endpoints. The standard configuration uses a JSON block to define available MCP servers. For example, to connect to the Docling MCP server running on port 8000:
|
||||
|
||||
```json
|
||||
{
|
||||
"mcpServers": {
|
||||
"docling": {
|
||||
"url": "http://localhost:8000/mcp"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Insert this configuration in your client's settings where MCP servers are defined. Update the URL if you use a different port.
|
||||
|
||||
### LM Studio and Claude Desktop
|
||||
|
||||
Both LM Studio and Claude Desktop support MCP endpoints via configuration files or UI settings. Paste the above JSON block into the appropriate configuration section. For Claude Desktop, add the MCP server in the "Custom Model" or "MCP Server" section. For LM Studio, refer to its documentation for the location of the MCP server configuration.
|
||||
|
||||
### Other MCP Clients
|
||||
|
||||
Other clients, such as Continue Coding Assistant, also support custom MCP endpoints. Use the same configuration pattern: provide the MCP server URL ending with `/mcp` and ensure the port matches your container setup. See the [Docling MCP docs](https://github.com/docling-project/docling-mcp/tree/main/docs/integrations) for more details.
|
||||
175
docs/models.md
Normal file
175
docs/models.md
Normal file
@@ -0,0 +1,175 @@
|
||||
# Handling Models in Docling Serve
|
||||
|
||||
When enabling steps in Docling Serve that require extra models (such as picture classification, picture description, table detection, code recognition, formula extraction, or vision-language modules), you must ensure those models are available in the runtime environment. The standard container image includes only the default models. Any additional models must be downloaded and made available before use. If required models are missing, Docling Serve will raise runtime errors rather than downloading them automatically. This default choice wants to guarantee the system is not calling external services.
|
||||
|
||||
## Model Storage Location
|
||||
|
||||
Docling Serve loads models from the directory specified by the `DOCLING_SERVE_ARTIFACTS_PATH` environment variable. This path must be consistent across model download and runtime. When running with multiple workers or reload enabled, you must use the environment variable rather than the CLI argument for configuration [[source]](./configuration.md).
|
||||
|
||||
## Approaches for Making Extra Models Available
|
||||
|
||||
There are several ways to ensure required models are present:
|
||||
|
||||
### 1. Disable Local Models (Trigger Auto-Download)
|
||||
|
||||
You can configure the container to download all models at startup by clearing the artifacts path:
|
||||
|
||||
```sh
|
||||
podman run -d -p 5001:5001 --name docling-serve \
|
||||
-e DOCLING_SERVE_ARTIFACTS_PATH="" \
|
||||
-e DOCLING_SERVE_ENABLE_UI=true \
|
||||
quay.io/docling-project/docling-serve
|
||||
```
|
||||
|
||||
This approach is simple for local development but not recommended for production, as it increases startup time and depends on network availability.
|
||||
|
||||
### 2. Build a Custom Image with Pre-Downloaded Models
|
||||
|
||||
You can create a new image that includes the required models:
|
||||
|
||||
```Dockerfile
|
||||
FROM quay.io/docling-project/docling-serve
|
||||
RUN docling-tools models download smolvlm
|
||||
```
|
||||
|
||||
This method is suitable for production, as it ensures all models are present in the image and avoids runtime downloads.
|
||||
|
||||
### 3. Update the Entrypoint to Download Models Before Startup
|
||||
|
||||
You can override the entrypoint to download models before starting the service:
|
||||
|
||||
```sh
|
||||
podman run -p 5001:5001 -e DOCLING_SERVE_ENABLE_UI=true \
|
||||
quay.io/docling-project/docling-serve \
|
||||
-- sh -c 'exec docling-tools models download smolvlm && exec docling-serve run'
|
||||
```
|
||||
|
||||
This is useful for environments where you want to keep the base image unchanged but still automate model preparation.
|
||||
|
||||
### 4. Mount a Volume with Pre-Downloaded Models
|
||||
|
||||
Download models locally and mount them into the container:
|
||||
|
||||
```sh
|
||||
# Download the models locally
|
||||
docling-tools models download --all -o models
|
||||
|
||||
# Start the container with the local models folder
|
||||
podman run -p 5001:5001 \
|
||||
-v $(pwd)/models:/opt/app-root/src/models \
|
||||
-e DOCLING_SERVE_ARTIFACTS_PATH="/opt/app-root/src/models" \
|
||||
-e DOCLING_SERVE_ENABLE_UI=true \
|
||||
quay.io/docling-project/docling-serve
|
||||
```
|
||||
|
||||
This approach is robust for both local and production deployments, especially when using persistent storage.
|
||||
|
||||
## Kubernetes/Cluster Deployments
|
||||
|
||||
For Kubernetes or OpenShift clusters, the recommended approach is to use a PersistentVolumeClaim (PVC) for model storage, a Kubernetes Job to download models, and mount the volume into the deployment. This ensures models persist across pod restarts and scale-out scenarios.
|
||||
|
||||
### Example: PersistentVolumeClaim
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: docling-model-cache-pvc
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
volumeMode: Filesystem
|
||||
resources:
|
||||
requests:
|
||||
storage: 10Gi
|
||||
```
|
||||
|
||||
If you don't want to use default storage class, set your custom storage class with following:
|
||||
|
||||
```yaml
|
||||
spec:
|
||||
...
|
||||
storageClassName: <Storage Class Name>
|
||||
```
|
||||
|
||||
Manifest example: [docling-model-cache-pvc.yaml](./deploy-examples/docling-model-cache-pvc.yaml)
|
||||
|
||||
### Example: Model Download Job
|
||||
|
||||
```yaml
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: docling-model-cache-load
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: loader
|
||||
image: ghcr.io/docling-project/docling-serve-cpu:main
|
||||
command:
|
||||
- docling-tools
|
||||
- models
|
||||
- download
|
||||
- '--output-dir=/modelcache'
|
||||
- 'layout'
|
||||
- 'tableformer'
|
||||
- 'code_formula'
|
||||
- 'picture_classifier'
|
||||
- 'smolvlm'
|
||||
- 'granite_vision'
|
||||
- 'easyocr'
|
||||
volumeMounts:
|
||||
- name: docling-model-cache
|
||||
mountPath: /modelcache
|
||||
volumes:
|
||||
- name: docling-model-cache
|
||||
persistentVolumeClaim:
|
||||
claimName: docling-model-cache-pvc
|
||||
restartPolicy: Never
|
||||
```
|
||||
|
||||
The job will mount the previously created persistent volume and execute command similar to how we would load models locally:
|
||||
`docling-tools models download --output-dir <MOUNT-PATH> [LIST_OF_MODELS]`
|
||||
|
||||
In manifest, we specify desired models individually, or we can use `--all` parameter to download all models.
|
||||
|
||||
Manifest example: [docling-model-cache-job.yaml](./deploy-examples/docling-model-cache-job.yaml)
|
||||
|
||||
### Example: Deployment with Mounted Volume
|
||||
|
||||
```yaml
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: api
|
||||
env:
|
||||
- name: DOCLING_SERVE_ARTIFACTS_PATH
|
||||
value: '/modelcache'
|
||||
volumeMounts:
|
||||
- name: docling-model-cache
|
||||
mountPath: /modelcache
|
||||
volumes:
|
||||
- name: docling-model-cache
|
||||
persistentVolumeClaim:
|
||||
claimName: docling-model-cache-pvc
|
||||
```
|
||||
|
||||
The value of `DOCLING_SERVE_ARTIFACTS_PATH` must match the mount path where models are stored.
|
||||
|
||||
Now, when docling-serve is executing tasks, the underlying docling installation will load model weights from mounted volume.
|
||||
|
||||
Manifest example: [docling-model-cache-deployment.yaml](./deploy-examples/docling-model-cache-deployment.yaml)
|
||||
|
||||
## Local Docker Execution
|
||||
|
||||
For local Docker or Podman execution, you can use any of the approaches above. Mounting a local directory with pre-downloaded models is the most reliable for repeated runs and avoids network dependencies.
|
||||
|
||||
## Troubleshooting and Best Practices
|
||||
|
||||
- If a required model is missing from the artifacts path, Docling Serve will raise a runtime error.
|
||||
- Always ensure the value of `DOCLING_SERVE_ARTIFACTS_PATH` matches the directory where models are stored and mounted.
|
||||
- For production and cluster environments, prefer persistent storage and pre-loading models via a dedicated job.
|
||||
|
||||
For more details and YAML manifest examples, see the [deployment documentation](./deployment.md).
|
||||
448
docs/usage.md
Normal file
448
docs/usage.md
Normal file
@@ -0,0 +1,448 @@
|
||||
# Usage
|
||||
|
||||
The API provides two endpoints: one for urls, one for files. This is necessary to send files directly in binary format instead of base64-encoded strings.
|
||||
|
||||
## Common parameters
|
||||
|
||||
On top of the source of file (see below), both endpoints support the same parameters, which are almost the same as the Docling CLI.
|
||||
|
||||
- `from_formats` (List[str]): Input format(s) to convert from. Allowed values: `docx`, `pptx`, `html`, `image`, `pdf`, `asciidoc`, `md`. Defaults to all formats.
|
||||
- `to_formats` (List[str]): Output format(s) to convert to. Allowed values: `md`, `json`, `html`, `text`, `doctags`. Defaults to `md`.
|
||||
- `pipeline` (str). The choice of which pipeline to use. Allowed values are `standard` and `vlm`. Defaults to `standard`.
|
||||
- `page_range` (tuple). If specified, only convert a range of pages. The page number starts at 1.
|
||||
- `do_ocr` (bool): If enabled, the bitmap content will be processed using OCR. Defaults to `True`.
|
||||
- `image_export_mode`: Image export mode for the document (only in case of JSON, Markdown or HTML). Allowed values: embedded, placeholder, referenced. Optional, defaults to `embedded`.
|
||||
- `force_ocr` (bool): If enabled, replace any existing text with OCR-generated text over the full content. Defaults to `False`.
|
||||
- `ocr_engine` (str): OCR engine to use. Allowed values: `easyocr`, `tesserocr`, `tesseract`, `rapidocr`, `ocrmac`. Defaults to `easyocr`. To use the `tesserocr` engine, `tesserocr` must be installed where docling-serve is running: `pip install tesserocr`
|
||||
- `ocr_lang` (List[str]): List of languages used by the OCR engine. Note that each OCR engine has different values for the language names. Defaults to empty.
|
||||
- `pdf_backend` (str): PDF backend to use. Allowed values: `pypdfium2`, `dlparse_v1`, `dlparse_v2`, `dlparse_v4`. Defaults to `dlparse_v4`.
|
||||
- `table_mode` (str): Table mode to use. Allowed values: `fast`, `accurate`. Defaults to `fast`.
|
||||
- `abort_on_error` (bool): If enabled, abort on error. Defaults to false.
|
||||
- `md_page_break_placeholder` (str): Add this placeholder between pages in the markdown output.
|
||||
- `do_table_structure` (bool): If enabled, the table structure will be extracted. Defaults to true.
|
||||
- `do_code_enrichment` (bool): If enabled, perform OCR code enrichment. Defaults to false.
|
||||
- `do_formula_enrichment` (bool): If enabled, perform formula OCR, return LaTeX code. Defaults to false.
|
||||
- `do_picture_classification` (bool): If enabled, classify pictures in documents. Defaults to false.
|
||||
- `do_picture_description` (bool): If enabled, describe pictures in documents. Defaults to false.
|
||||
- `picture_description_area_threshold` (float): Minimum percentage of the area for a picture to be processed with the models. Defaults to 0.05.
|
||||
- `picture_description_local` (dict): Options for running a local vision-language model in the picture description. The parameters refer to a model hosted on Hugging Face. This parameter is mutually exclusive with `picture_description_api`.
|
||||
- `picture_description_api` (dict): API details for using a vision-language model in the picture description. This parameter is mutually exclusive with `picture_description_local`.
|
||||
- `include_images` (bool): If enabled, images will be extracted from the document. Defaults to false.
|
||||
- `images_scale` (float): Scale factor for images. Defaults to 2.0.
|
||||
|
||||
### Authentication
|
||||
|
||||
When authentication is activated (see the parameter `DOCLING_SERVE_API_KEY` in [configuration.md](./configuration.md)), all the API requests **must** provide the header `X-Api-Key` with the correct secret key.
|
||||
|
||||
## Convert endpoints
|
||||
|
||||
### Source endpoint
|
||||
|
||||
The endpoint is `/v1/convert/source`, listening for POST requests of JSON payloads.
|
||||
|
||||
On top of the above parameters, you must send the URL(s) of the document you want process with either the `http_sources` or `file_sources` fields.
|
||||
The first is fetching URL(s) (optionally using with extra headers), the second allows to provide documents as base64-encoded strings.
|
||||
No `options` is required, they can be partially or completely omitted.
|
||||
|
||||
Simple payload example:
|
||||
|
||||
```json
|
||||
{
|
||||
"http_sources": [{"url": "https://arxiv.org/pdf/2206.01062"}]
|
||||
}
|
||||
```
|
||||
|
||||
<details>
|
||||
|
||||
<summary>Complete payload example:</summary>
|
||||
|
||||
```json
|
||||
{
|
||||
"options": {
|
||||
"from_formats": ["docx", "pptx", "html", "image", "pdf", "asciidoc", "md", "xlsx"],
|
||||
"to_formats": ["md", "json", "html", "text", "doctags"],
|
||||
"image_export_mode": "placeholder",
|
||||
"do_ocr": true,
|
||||
"force_ocr": false,
|
||||
"ocr_engine": "easyocr",
|
||||
"ocr_lang": ["en"],
|
||||
"pdf_backend": "dlparse_v2",
|
||||
"table_mode": "fast",
|
||||
"abort_on_error": false,
|
||||
},
|
||||
"http_sources": [{"url": "https://arxiv.org/pdf/2206.01062"}]
|
||||
}
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
|
||||
<summary>CURL example:</summary>
|
||||
|
||||
```sh
|
||||
curl -X 'POST' \
|
||||
'http://localhost:5001/v1/convert/source' \
|
||||
-H 'accept: application/json' \
|
||||
-H 'Content-Type: application/json' \
|
||||
-d '{
|
||||
"options": {
|
||||
"from_formats": [
|
||||
"docx",
|
||||
"pptx",
|
||||
"html",
|
||||
"image",
|
||||
"pdf",
|
||||
"asciidoc",
|
||||
"md",
|
||||
"xlsx"
|
||||
],
|
||||
"to_formats": ["md", "json", "html", "text", "doctags"],
|
||||
"image_export_mode": "placeholder",
|
||||
"do_ocr": true,
|
||||
"force_ocr": false,
|
||||
"ocr_engine": "easyocr",
|
||||
"ocr_lang": [
|
||||
"fr",
|
||||
"de",
|
||||
"es",
|
||||
"en"
|
||||
],
|
||||
"pdf_backend": "dlparse_v2",
|
||||
"table_mode": "fast",
|
||||
"abort_on_error": false,
|
||||
"do_table_structure": true,
|
||||
"include_images": true,
|
||||
"images_scale": 2
|
||||
},
|
||||
"http_sources": [{"url": "https://arxiv.org/pdf/2206.01062"}]
|
||||
}'
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>Python example:</summary>
|
||||
|
||||
```python
|
||||
import httpx
|
||||
|
||||
async_client = httpx.AsyncClient(timeout=60.0)
|
||||
url = "http://localhost:5001/v1/convert/source"
|
||||
payload = {
|
||||
"options": {
|
||||
"from_formats": ["docx", "pptx", "html", "image", "pdf", "asciidoc", "md", "xlsx"],
|
||||
"to_formats": ["md", "json", "html", "text", "doctags"],
|
||||
"image_export_mode": "placeholder",
|
||||
"do_ocr": True,
|
||||
"force_ocr": False,
|
||||
"ocr_engine": "easyocr",
|
||||
"ocr_lang": "en",
|
||||
"pdf_backend": "dlparse_v2",
|
||||
"table_mode": "fast",
|
||||
"abort_on_error": False,
|
||||
},
|
||||
"http_sources": [{"url": "https://arxiv.org/pdf/2206.01062"}]
|
||||
}
|
||||
|
||||
response = await async_client_client.post(url, json=payload)
|
||||
|
||||
data = response.json()
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
#### File as base64
|
||||
|
||||
The `file_sources` argument in the endpoint allows to send files as base64-encoded strings.
|
||||
When your PDF or other file type is too large, encoding it and passing it inline to curl
|
||||
can lead to an “Argument list too long” error on some systems. To avoid this, we write
|
||||
the JSON request body to a file and have curl read from that file.
|
||||
|
||||
<details>
|
||||
<summary>CURL steps:</summary>
|
||||
|
||||
```sh
|
||||
# 1. Base64-encode the file
|
||||
B64_DATA=$(base64 -w 0 /path/to/file/pdf-to-convert.pdf)
|
||||
|
||||
# 2. Build the JSON with your options
|
||||
cat <<EOF > /tmp/request_body.json
|
||||
{
|
||||
"options": {
|
||||
},
|
||||
"file_sources": [{
|
||||
"base64_string": "${B64_DATA}",
|
||||
"filename": "pdf-to-convert.pdf"
|
||||
}]
|
||||
}
|
||||
EOF
|
||||
|
||||
# 3. POST the request to the docling service
|
||||
curl -X POST "localhost:5001/v1/convert/source" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d @/tmp/request_body.json
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
### File endpoint
|
||||
|
||||
The endpoint is: `/v1/convert/file`, listening for POST requests of Form payloads (necessary as the files are sent as multipart/form data). You can send one or multiple files.
|
||||
|
||||
<details>
|
||||
<summary>CURL example:</summary>
|
||||
|
||||
```sh
|
||||
curl -X 'POST' \
|
||||
'http://127.0.0.1:5001/v1/convert/file' \
|
||||
-H 'accept: application/json' \
|
||||
-H 'Content-Type: multipart/form-data' \
|
||||
-F 'ocr_engine=easyocr' \
|
||||
-F 'pdf_backend=dlparse_v2' \
|
||||
-F 'from_formats=pdf' \
|
||||
-F 'from_formats=docx' \
|
||||
-F 'force_ocr=false' \
|
||||
-F 'image_export_mode=embedded' \
|
||||
-F 'ocr_lang=en' \
|
||||
-F 'ocr_lang=pl' \
|
||||
-F 'table_mode=fast' \
|
||||
-F 'files=@2206.01062v1.pdf;type=application/pdf' \
|
||||
-F 'abort_on_error=false' \
|
||||
-F 'to_formats=md' \
|
||||
-F 'to_formats=text' \
|
||||
-F 'do_ocr=true'
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>Python example:</summary>
|
||||
|
||||
```python
|
||||
import httpx
|
||||
|
||||
async_client = httpx.AsyncClient(timeout=60.0)
|
||||
url = "http://localhost:5001/v1/convert/file"
|
||||
parameters = {
|
||||
"from_formats": ["docx", "pptx", "html", "image", "pdf", "asciidoc", "md", "xlsx"],
|
||||
"to_formats": ["md", "json", "html", "text", "doctags"],
|
||||
"image_export_mode": "placeholder",
|
||||
"do_ocr": True,
|
||||
"force_ocr": False,
|
||||
"ocr_engine": "easyocr",
|
||||
"ocr_lang": ["en"],
|
||||
"pdf_backend": "dlparse_v2",
|
||||
"table_mode": "fast",
|
||||
"abort_on_error": False,
|
||||
}
|
||||
|
||||
current_dir = os.path.dirname(__file__)
|
||||
file_path = os.path.join(current_dir, '2206.01062v1.pdf')
|
||||
|
||||
files = {
|
||||
'files': ('2206.01062v1.pdf', open(file_path, 'rb'), 'application/pdf'),
|
||||
}
|
||||
|
||||
response = await async_client.post(url, files=files, data=parameters)
|
||||
assert response.status_code == 200, "Response should be 200 OK"
|
||||
|
||||
data = response.json()
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
### Picture description options
|
||||
|
||||
When the picture description enrichment is activated, users may specify which model and which execution mode to use for this task. There are two choices for the execution mode: _local_ will run the vision-language model directly, _api_ will invoke an external API endpoint.
|
||||
|
||||
The local option is specified with:
|
||||
|
||||
```jsonc
|
||||
{
|
||||
"picture_description_local": {
|
||||
"repo_id": "", // Repository id from the Hugging Face Hub.
|
||||
"generation_config": {"max_new_tokens": 200, "do_sample": false}, // HF generation config.
|
||||
"prompt": "Describe this image in a few sentences. ", // Prompt used when calling the vision-language model.
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
The possible values for `generation_config` are documented in the [Hugging Face text generation docs](https://huggingface.co/docs/transformers/en/main_classes/text_generation#transformers.GenerationConfig).
|
||||
|
||||
The api option is specified with:
|
||||
|
||||
```jsonc
|
||||
{
|
||||
"picture_description_api": {
|
||||
"url": "", // Endpoint which accepts openai-api compatible requests.
|
||||
"headers": {}, // Headers used for calling the API endpoint. For example, it could include authentication headers.
|
||||
"params": {}, // Model parameters.
|
||||
"timeout": 20, // Timeout for the API request.
|
||||
"prompt": "Describe this image in a few sentences. ", // Prompt used when calling the vision-language model.
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Example URLs are:
|
||||
|
||||
- `http://localhost:8000/v1/chat/completions` for the local vllm api, with example `picture_description_api`:
|
||||
- the `HuggingFaceTB/SmolVLM-256M-Instruct` model
|
||||
|
||||
```json
|
||||
{
|
||||
"url": "http://localhost:8000/v1/chat/completions",
|
||||
"params": {
|
||||
"model": "HuggingFaceTB/SmolVLM-256M-Instruct",
|
||||
"max_completion_tokens": 200,
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
- the `ibm-granite/granite-vision-3.2-2b` model
|
||||
|
||||
```json
|
||||
{
|
||||
"url": "http://localhost:8000/v1/chat/completions",
|
||||
"params": {
|
||||
"model": "ibm-granite/granite-vision-3.2-2b",
|
||||
"max_completion_tokens": 200,
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
- `http://localhost:11434/v1/chat/completions` for the local Ollama api, with example `picture_description_api`:
|
||||
- the `granite3.2-vision:2b` model
|
||||
|
||||
```json
|
||||
{
|
||||
"url": "http://localhost:11434/v1/chat/completions",
|
||||
"params": {
|
||||
"model": "granite3.2-vision:2b"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Note that when using `picture_description_api`, the server must be launched with `DOCLING_SERVE_ENABLE_REMOTE_SERVICES=true`.
|
||||
|
||||
## Response format
|
||||
|
||||
The response can be a JSON Document or a File.
|
||||
|
||||
- If you process only one file, the response will be a JSON document with the following format:
|
||||
|
||||
```jsonc
|
||||
{
|
||||
"document": {
|
||||
"md_content": "",
|
||||
"json_content": {},
|
||||
"html_content": "",
|
||||
"text_content": "",
|
||||
"doctags_content": ""
|
||||
},
|
||||
"status": "<success|partial_success|skipped|failure>",
|
||||
"processing_time": 0.0,
|
||||
"timings": {},
|
||||
"errors": []
|
||||
}
|
||||
```
|
||||
|
||||
Depending on the value you set in `output_formats`, the different items will be populated with their respective results or empty.
|
||||
|
||||
`processing_time` is the Docling processing time in seconds, and `timings` (when enabled in the backend) provides the detailed
|
||||
timing of all the internal Docling components.
|
||||
|
||||
- If you set the parameter `target` to the zip mode, the response will be a zip file.
|
||||
- If multiple files are generated (multiple inputs, or one input but multiple outputs with the zip target mode), the response will be a zip file.
|
||||
|
||||
## Asynchronous API
|
||||
|
||||
Both `/v1/convert/source` and `/v1/convert/file` endpoints are available as asynchronous variants.
|
||||
The advantage of the asynchronous endpoints is the possible to interrupt the connection, check for the progress update and fetch the result.
|
||||
This approach is more resilient against network instabilities and allows the client application logic to easily interleave conversion with other tasks.
|
||||
|
||||
Launch an asynchronous conversion with:
|
||||
|
||||
- `POST /v1/convert/source/async` when providing the input as sources.
|
||||
- `POST /v1/convert/file/async` when providing the input as multipart-form files.
|
||||
|
||||
The response format is a task detail:
|
||||
|
||||
```jsonc
|
||||
{
|
||||
"task_id": "<task_id>", // the task_id which can be used for the next operations
|
||||
"task_status": "pending|started|success|failure", // the task status
|
||||
"task_position": 1, // the position in the queue
|
||||
"task_meta": null, // metadata e.g. how many documents are in the total job and how many have been converted
|
||||
}
|
||||
```
|
||||
|
||||
### Polling status
|
||||
|
||||
For checking the progress of the conversion task and wait for its completion, use the endpoint:
|
||||
|
||||
- `GET /v1/status/poll/{task_id}`
|
||||
|
||||
<details>
|
||||
<summary>Example waiting loop:</summary>
|
||||
|
||||
```python
|
||||
import time
|
||||
import httpx
|
||||
|
||||
# ...
|
||||
# response from the async task submission
|
||||
task = response.json()
|
||||
|
||||
while task["task_status"] not in ("success", "failure"):
|
||||
response = httpx.get(f"{base_url}/status/poll/{task['task_id']}")
|
||||
task = response.json()
|
||||
|
||||
time.sleep(5)
|
||||
```
|
||||
|
||||
<details>
|
||||
|
||||
### Subscribe with websockets
|
||||
|
||||
Using websocket you can get the client application being notified about updates of the conversion task.
|
||||
To start the websocket connection, use the endpoint:
|
||||
|
||||
- `/v1/status/ws/{task_id}`
|
||||
|
||||
Websocket messages are JSON object with the following structure:
|
||||
|
||||
```jsonc
|
||||
{
|
||||
"message": "connection|update|error", // type of message being sent
|
||||
"task": {}, // the same content of the task description
|
||||
"error": "", // description of the error
|
||||
}
|
||||
```
|
||||
|
||||
<details>
|
||||
<summary>Example websocket usage:</summary>
|
||||
|
||||
```python
|
||||
from websockets.sync.client import connect
|
||||
|
||||
uri = f"ws://{base_url}/v1/status/ws/{task['task_id']}"
|
||||
with connect(uri) as websocket:
|
||||
for message in websocket:
|
||||
try:
|
||||
payload = json.loads(message)
|
||||
if payload["message"] == "error":
|
||||
break
|
||||
if payload["message"] == "error" and payload["task"]["task_status"] in ("success", "failure"):
|
||||
break
|
||||
except:
|
||||
break
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
### Fetch results
|
||||
|
||||
When the task is completed, the result can be fetched with the endpoint:
|
||||
|
||||
- `GET /v1/result/{task_id}`
|
||||
80
docs/v1_migration.md
Normal file
80
docs/v1_migration.md
Normal file
@@ -0,0 +1,80 @@
|
||||
# Migration to the `v1` API
|
||||
|
||||
Docling Serve from the initial prototype `v1alpha` API to the stable `v1` API.
|
||||
This page provides simple instructions to upgrade your application to the new API.
|
||||
|
||||
## API changes
|
||||
|
||||
The breaking changes introduced in the `v1` release of Docling Serve are designed to provide a stable schema which
|
||||
allows the project to provide new capabilities as new type of input sources, targets and also the definition of callback for event-driven applications.
|
||||
|
||||
### Endpoint names
|
||||
|
||||
All endpoints are renamed from `/v1alpha/` to `/v1/`.
|
||||
|
||||
### Sources
|
||||
|
||||
When using the `/v1/convert/source` endpoint, input documents have to be specified with the `sources: []` argument, which is replacing the usage of `file_sources` and `http_sources`.
|
||||
|
||||
Old version:
|
||||
|
||||
```jsonc
|
||||
{
|
||||
"options": {}, // conversion options
|
||||
"file_sources": [ // input documents provided as base64-encoded strings
|
||||
{"base64_string": "abc123...", "filename": "file.pdf"}
|
||||
],
|
||||
"http_sources": [ // input documents provided as http urls
|
||||
{"url": "https://..."}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
New version:
|
||||
|
||||
```jsonc
|
||||
{
|
||||
"options": {}, // conversion options
|
||||
"sources": [
|
||||
// input document provided as base64-encoded string
|
||||
{"kind": "file", "base64_string": "abc123...", "filename": "file.pdf"},
|
||||
// input document provided as http urls
|
||||
{"kind": "http", "url": "https://..."},
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### Targets
|
||||
|
||||
Switching between output formats, i.e. from the JSON inbody response to the zip archive response, users have to specify the `target` argument, which is replacing the usage of `options.return_as_file`.
|
||||
|
||||
Old version:
|
||||
|
||||
```jsonc
|
||||
{
|
||||
"options": {
|
||||
"return_as_file": true // <-- to be removed
|
||||
},
|
||||
// ...
|
||||
}
|
||||
```
|
||||
|
||||
New version:
|
||||
|
||||
```jsonc
|
||||
{
|
||||
"options": {},
|
||||
"target": {"kind": "zip"}, // <-- add this
|
||||
// ...
|
||||
}
|
||||
```
|
||||
|
||||
## Continue with the old API
|
||||
|
||||
If you are not able to apply the changes above to your application, please consider pinning of the previous `v0.x` container images, e.g.
|
||||
|
||||
```sh
|
||||
podman run -p 5001:5001 -e DOCLING_SERVE_ENABLE_UI=1 quay.io/docling-project/docling-serve:v0.16.1
|
||||
```
|
||||
|
||||
_Note that the old prototype API will not be supported in new `v1.x` versions._
|
||||
124
examples/split_processing.py
Normal file
124
examples/split_processing.py
Normal file
@@ -0,0 +1,124 @@
|
||||
import json
|
||||
import time
|
||||
from pathlib import Path
|
||||
|
||||
import httpx
|
||||
from pydantic import BaseModel
|
||||
from pypdf import PdfReader
|
||||
|
||||
from docling_core.types.doc.document import DoclingDocument
|
||||
|
||||
# Variables to use
|
||||
path_to_pdf = Path("./tests/2206.01062v1.pdf")
|
||||
pages_per_file = 4
|
||||
base_url = "http://localhost:5001/v1"
|
||||
out_dir = Path("examples/splitted_pdf/")
|
||||
|
||||
|
||||
class ConvertedSplittedPdf(BaseModel):
|
||||
task_id: str
|
||||
conversion_finished: bool = False
|
||||
result: dict | None = None
|
||||
|
||||
|
||||
def get_task_result(task_id: str):
|
||||
response = httpx.get(
|
||||
f"{base_url}/result/{task_id}",
|
||||
timeout=15,
|
||||
)
|
||||
return response.json()
|
||||
|
||||
|
||||
def check_task_status(task_id: str):
|
||||
response = httpx.get(f"{base_url}/status/poll/{task_id}", timeout=15)
|
||||
task = response.json()
|
||||
task_status = task["task_status"]
|
||||
|
||||
task_finished = False
|
||||
if task_status == "success":
|
||||
task_finished = True
|
||||
|
||||
if task_status in ("failure", "revoked"):
|
||||
raise RuntimeError("A conversion failed")
|
||||
|
||||
time.sleep(5)
|
||||
|
||||
return task_finished
|
||||
|
||||
|
||||
def post_file(file_path: Path, start_page: int, end_page: int):
|
||||
payload = {
|
||||
"to_formats": ["json"],
|
||||
"image_export_mode": "placeholder",
|
||||
"ocr": False,
|
||||
"abort_on_error": False,
|
||||
"page_range": [start_page, end_page],
|
||||
}
|
||||
|
||||
files = {
|
||||
"files": (file_path.name, file_path.open("rb"), "application/pdf"),
|
||||
}
|
||||
response = httpx.post(
|
||||
f"{base_url}/convert/file/async",
|
||||
files=files,
|
||||
data=payload,
|
||||
timeout=15,
|
||||
)
|
||||
|
||||
task = response.json()
|
||||
|
||||
return task["task_id"]
|
||||
|
||||
|
||||
def main():
|
||||
filename = path_to_pdf
|
||||
|
||||
splitted_pdfs: list[ConvertedSplittedPdf] = []
|
||||
|
||||
with open(filename, "rb") as input_pdf_file:
|
||||
pdf_reader = PdfReader(input_pdf_file)
|
||||
total_pages = len(pdf_reader.pages)
|
||||
|
||||
for start_page in range(0, total_pages, pages_per_file):
|
||||
task_id = post_file(
|
||||
filename, start_page + 1, min(start_page + pages_per_file, total_pages)
|
||||
)
|
||||
splitted_pdfs.append(ConvertedSplittedPdf(task_id=task_id))
|
||||
|
||||
all_files_converted = False
|
||||
while not all_files_converted:
|
||||
found_conversion_running = False
|
||||
for splitted_pdf in splitted_pdfs:
|
||||
if not splitted_pdf.conversion_finished:
|
||||
found_conversion_running = True
|
||||
print("checking conversion status...")
|
||||
splitted_pdf.conversion_finished = check_task_status(
|
||||
splitted_pdf.task_id
|
||||
)
|
||||
if not found_conversion_running:
|
||||
all_files_converted = True
|
||||
|
||||
for splitted_pdf in splitted_pdfs:
|
||||
splitted_pdf.result = get_task_result(splitted_pdf.task_id)
|
||||
|
||||
files = []
|
||||
for i, splitted_pdf in enumerate(splitted_pdfs):
|
||||
json_content = json.dumps(
|
||||
splitted_pdf.result.get("document").get("json_content"), indent=2
|
||||
)
|
||||
doc = DoclingDocument.model_validate_json(json_content)
|
||||
filename = f"{out_dir}/splited_json_{i}.json"
|
||||
doc.save_as_json(filename=filename)
|
||||
files.append(filename)
|
||||
|
||||
docs = [DoclingDocument.load_from_json(filename=f) for f in files]
|
||||
concate_doc = DoclingDocument.concatenate(docs=docs)
|
||||
|
||||
exp_json_file = Path(f"{out_dir}/concatenated.json")
|
||||
concate_doc.save_as_json(exp_json_file)
|
||||
|
||||
print("Finished")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
BIN
img/fastapi-ui.png
Normal file
BIN
img/fastapi-ui.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 226 KiB |
BIN
img/ui-input.png
Normal file
BIN
img/ui-input.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 64 KiB |
BIN
img/ui-output.png
Normal file
BIN
img/ui-output.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 124 KiB |
7
os-packages.txt
Normal file
7
os-packages.txt
Normal file
@@ -0,0 +1,7 @@
|
||||
tesseract
|
||||
tesseract-devel
|
||||
tesseract-langpack-eng
|
||||
tesseract-osd
|
||||
leptonica-devel
|
||||
libglvnd-glx
|
||||
glib2
|
||||
4430
poetry.lock
generated
4430
poetry.lock
generated
File diff suppressed because it is too large
Load Diff
343
pyproject.toml
343
pyproject.toml
@@ -1,117 +1,267 @@
|
||||
[tool.poetry]
|
||||
[project]
|
||||
name = "docling-serve"
|
||||
version = "0.1.0"
|
||||
version = "1.5.1" # DO NOT EDIT, updated automatically
|
||||
description = "Running Docling as a service"
|
||||
license = "MIT"
|
||||
license = {text = "MIT"}
|
||||
authors = [
|
||||
"Michele Dolfi <dol@zurich.ibm.com>",
|
||||
"Christoph Auer <cau@zurich.ibm.com>",
|
||||
"Panos Vagenas <pva@zurich.ibm.com>",
|
||||
"Cesar Berrospi Ramis <ceb@zurich.ibm.com>",
|
||||
"Peter Staar <taa@zurich.ibm.com>",
|
||||
{name="Michele Dolfi", email="dol@zurich.ibm.com"},
|
||||
{name="Guillaume Moutier", email="gmoutier@redhat.com"},
|
||||
{name="Anil Vishnoi", email="avishnoi@redhat.com"},
|
||||
{name="Panos Vagenas", email="pva@zurich.ibm.com"},
|
||||
{name="Christoph Auer", email="cau@zurich.ibm.com"},
|
||||
{name="Peter Staar", email="taa@zurich.ibm.com"},
|
||||
]
|
||||
maintainers = [
|
||||
"Peter Staar <taa@zurich.ibm.com>",
|
||||
"Christoph Auer <cau@zurich.ibm.com>",
|
||||
"Michele Dolfi <dol@zurich.ibm.com>",
|
||||
"Cesar Berrospi Ramis <ceb@zurich.ibm.com>",
|
||||
"Panos Vagenas <pva@zurich.ibm.com>",
|
||||
{name="Michele Dolfi", email="dol@zurich.ibm.com"},
|
||||
{name="Anil Vishnoi", email="avishnoi@redhat.com"},
|
||||
{name="Panos Vagenas", email="pva@zurich.ibm.com"},
|
||||
{name="Christoph Auer", email="cau@zurich.ibm.com"},
|
||||
{name="Peter Staar", email="taa@zurich.ibm.com"},
|
||||
]
|
||||
readme = "README.md"
|
||||
repository = "https://github.com/DS4SD/docling-serve"
|
||||
homepage = "https://github.com/DS4SD/docling-serve"
|
||||
classifiers = [
|
||||
"License :: OSI Approved :: MIT License",
|
||||
"Operating System :: OS Independent",
|
||||
# "Development Status :: 5 - Production/Stable",
|
||||
"Development Status :: 5 - Production/Stable",
|
||||
"Intended Audience :: Developers",
|
||||
"Typing :: Typed",
|
||||
"Programming Language :: Python :: 3"
|
||||
"Programming Language :: Python :: 3",
|
||||
"Programming Language :: Python :: 3.10",
|
||||
"Programming Language :: Python :: 3.11",
|
||||
"Programming Language :: Python :: 3.12",
|
||||
"Programming Language :: Python :: 3.13",
|
||||
]
|
||||
requires-python = ">=3.10"
|
||||
dependencies = [
|
||||
"docling~=2.38",
|
||||
"docling-core>=2.45.0",
|
||||
"docling-jobkit[kfp,rq,vlm]>=1.5.0,<2.0.0",
|
||||
"fastapi[standard]~=0.115",
|
||||
"httpx~=0.28",
|
||||
"pydantic~=2.10",
|
||||
"pydantic-settings~=2.4",
|
||||
"python-multipart>=0.0.14,<0.1.0",
|
||||
"typer~=0.12",
|
||||
"uvicorn[standard]>=0.29.0,<1.0.0",
|
||||
"websockets~=14.0",
|
||||
"scalar-fastapi>=1.0.3",
|
||||
"docling-mcp>=1.0.0",
|
||||
]
|
||||
|
||||
[tool.poetry.dependencies]
|
||||
python = "^3.9"
|
||||
docling = "^2.10.0"
|
||||
fastapi = {version = "^0.115.6", extras = ["standard"]}
|
||||
uvicorn = "^0.32.1"
|
||||
pydantic-settings = "^2.4.0"
|
||||
httpx = "^0.28.1"
|
||||
tesserocr = { version = "^2.7.1", optional = true }
|
||||
rapidocr-onnxruntime = { version = "^1.4.0", optional = true, markers = "python_version < '3.13'" }
|
||||
onnxruntime = [
|
||||
# 1.19.2 is the last version with python3.9 support,
|
||||
# see https://github.com/microsoft/onnxruntime/releases/tag/v1.20.0
|
||||
{ version = ">=1.7.0,<1.20.0", optional = true, markers = "python_version < '3.10'" },
|
||||
{ version = "^1.7.0", optional = true, markers = "python_version >= '3.10'" }
|
||||
[project.optional-dependencies]
|
||||
ui = [
|
||||
"gradio~=5.23.2",
|
||||
]
|
||||
tesserocr = [
|
||||
"tesserocr~=2.7"
|
||||
]
|
||||
rapidocr = [
|
||||
"rapidocr (>=3.3,<4.0.0) ; python_version < '3.14'",
|
||||
"onnxruntime (>=1.7.0,<2.0.0)",
|
||||
"modelscope>=1.29.0",
|
||||
]
|
||||
flash-attn = [
|
||||
"flash-attn~=2.8.2; sys_platform == 'linux' and platform_machine == 'x86_64'"
|
||||
]
|
||||
|
||||
[dependency-groups]
|
||||
dev = [
|
||||
"asgi-lifespan~=2.0",
|
||||
"mypy~=1.11",
|
||||
"pre-commit-uv~=4.1",
|
||||
"pypdf>=6.0.0",
|
||||
"pytest~=8.3",
|
||||
"pytest-asyncio~=0.24",
|
||||
"pytest-check~=2.4",
|
||||
"python-semantic-release~=7.32",
|
||||
"ruff>=0.9.6",
|
||||
]
|
||||
|
||||
[tool.poetry.extras]
|
||||
tesserocr = ["tesserocr"]
|
||||
rapidocr = ["rapidocr-onnxruntime", "onnxruntime"]
|
||||
pypi = [
|
||||
"torch>=2.7.1",
|
||||
"torchvision>=0.22.1",
|
||||
]
|
||||
|
||||
cpu = [
|
||||
"torch>=2.7.1",
|
||||
"torchvision>=0.22.1",
|
||||
]
|
||||
|
||||
[tool.poetry.group.pypi-torch]
|
||||
optional = false
|
||||
# cu124 = [
|
||||
# "torch>=2.6.0",
|
||||
# "torchvision>=0.21.0",
|
||||
# ]
|
||||
|
||||
[tool.poetry.group.pypi-torch.dependencies]
|
||||
cu126 = [
|
||||
"torch>=2.7.1",
|
||||
"torchvision>=0.22.1",
|
||||
]
|
||||
|
||||
cu128 = [
|
||||
"torch>=2.7.1",
|
||||
"torchvision>=0.22.1",
|
||||
]
|
||||
|
||||
rocm = [
|
||||
"torch>=2.7.1",
|
||||
"torchvision>=0.22.1",
|
||||
"pytorch-triton-rocm>=3.3.1 ; sys_platform == 'linux' and platform_machine == 'x86_64'",
|
||||
]
|
||||
|
||||
[tool.uv]
|
||||
package = true
|
||||
default-groups = ["dev", "pypi"]
|
||||
conflicts = [
|
||||
[
|
||||
{ group = "pypi" },
|
||||
{ group = "cpu" },
|
||||
# { group = "cu124" },
|
||||
{ group = "cu126" },
|
||||
{ group = "cu128" },
|
||||
{ group = "rocm" },
|
||||
],
|
||||
]
|
||||
environments = ["sys_platform != 'darwin' or platform_machine != 'x86_64'"]
|
||||
override-dependencies = [
|
||||
"urllib3~=2.0"
|
||||
]
|
||||
|
||||
[tool.uv.sources]
|
||||
torch = [
|
||||
{version = "!=2.4.1+cpu" },
|
||||
{ index = "pytorch-pypi", group = "pypi" },
|
||||
{ index = "pytorch-cpu", group = "cpu" },
|
||||
# { index = "pytorch-cu124", group = "cu124", marker = "sys_platform == 'linux'" },
|
||||
{ index = "pytorch-cu126", group = "cu126", marker = "sys_platform == 'linux'" },
|
||||
{ index = "pytorch-cu128", group = "cu128", marker = "sys_platform == 'linux'" },
|
||||
{ index = "pytorch-rocm", group = "rocm", marker = "sys_platform == 'linux'" },
|
||||
]
|
||||
|
||||
torchvision = [
|
||||
{version = "!=0.19.1+cpu" },
|
||||
{ index = "pytorch-pypi", group = "pypi" },
|
||||
{ index = "pytorch-cpu", group = "cpu" },
|
||||
# { index = "pytorch-cu124", group = "cu124", marker = "sys_platform == 'linux'" },
|
||||
{ index = "pytorch-cu126", group = "cu126", marker = "sys_platform == 'linux'" },
|
||||
{ index = "pytorch-cu128", group = "cu128", marker = "sys_platform == 'linux'" },
|
||||
{ index = "pytorch-rocm", group = "rocm", marker = "sys_platform == 'linux'" },
|
||||
]
|
||||
|
||||
[tool.poetry.group.cpu]
|
||||
optional = true
|
||||
|
||||
[tool.poetry.group.cpu.dependencies]
|
||||
torch = [
|
||||
{markers = 'platform_machine=="x86_64" and sys_platform=="linux" and python_version == "3.10"', url="https://download.pytorch.org/whl/cpu/torch-2.4.1%2Bcpu-cp310-cp310-linux_x86_64.whl"},
|
||||
{markers = 'platform_machine=="x86_64" and sys_platform=="linux" and python_version == "3.11"', url="https://download.pytorch.org/whl/cpu/torch-2.4.1%2Bcpu-cp311-cp311-linux_x86_64.whl"},
|
||||
{markers = 'platform_machine=="x86_64" and sys_platform=="linux" and python_version == "3.12"', url="https://download.pytorch.org/whl/cpu/torch-2.4.1%2Bcpu-cp312-cp312-linux_x86_64.whl"},
|
||||
]
|
||||
torchvision = [
|
||||
{markers = 'platform_machine=="x86_64" and sys_platform=="linux" and python_version == "3.10"', url="https://download.pytorch.org/whl/cpu/torchvision-0.19.1%2Bcpu-cp310-cp310-linux_x86_64.whl"},
|
||||
{markers = 'platform_machine=="x86_64" and sys_platform=="linux" and python_version == "3.11"', url="https://download.pytorch.org/whl/cpu/torchvision-0.19.1%2Bcpu-cp311-cp311-linux_x86_64.whl"},
|
||||
{markers = 'platform_machine=="x86_64" and sys_platform=="linux" and python_version == "3.12"', url="https://download.pytorch.org/whl/cpu/torchvision-0.19.1%2Bcpu-cp312-cp312-linux_x86_64.whl"},
|
||||
pytorch-triton-rocm = [
|
||||
{ index = "pytorch-rocm", marker = "sys_platform == 'linux'" },
|
||||
]
|
||||
|
||||
[tool.poetry.group.constraints.dependencies]
|
||||
numpy = [
|
||||
{ version = "^2.1.0", markers = 'python_version >= "3.13"' },
|
||||
{ version = "^1.24.4", markers = 'python_version < "3.13"' },
|
||||
]
|
||||
# docling-jobkit = { git = "https://github.com/docling-project/docling-jobkit/", rev = "main" }
|
||||
# docling-jobkit = { path = "../docling-jobkit", editable = true }
|
||||
|
||||
[tool.poetry.group.dev.dependencies]
|
||||
black = "^24.8.0"
|
||||
isort = "^5.13.2"
|
||||
pre-commit = "^3.8.0"
|
||||
autoflake = "^2.3.1"
|
||||
flake8 = "^7.1.1"
|
||||
pytest = "^8.3.2"
|
||||
mypy = "^1.11.2"
|
||||
[[tool.uv.index]]
|
||||
name = "pytorch-pypi"
|
||||
url = "https://pypi.org/simple"
|
||||
explicit = true
|
||||
|
||||
[build-system]
|
||||
requires = ["poetry-core"]
|
||||
build-backend = "poetry.core.masonry.api"
|
||||
[[tool.uv.index]]
|
||||
name = "pytorch-cpu"
|
||||
url = "https://download.pytorch.org/whl/cpu"
|
||||
explicit = true
|
||||
|
||||
[tool.black]
|
||||
# [[tool.uv.index]]
|
||||
# name = "pytorch-cu124"
|
||||
# url = "https://download.pytorch.org/whl/cu124"
|
||||
# explicit = true
|
||||
|
||||
[[tool.uv.index]]
|
||||
name = "pytorch-cu126"
|
||||
url = "https://download.pytorch.org/whl/cu126"
|
||||
explicit = true
|
||||
|
||||
[[tool.uv.index]]
|
||||
name = "pytorch-cu128"
|
||||
url = "https://download.pytorch.org/whl/cu128"
|
||||
explicit = true
|
||||
|
||||
[[tool.uv.index]]
|
||||
name = "pytorch-rocm"
|
||||
url = "https://download.pytorch.org/whl/rocm6.3"
|
||||
explicit = true
|
||||
|
||||
[tool.setuptools.packages.find]
|
||||
include = ["docling_serve*"]
|
||||
namespaces = true
|
||||
|
||||
[project.scripts]
|
||||
docling-serve = "docling_serve.__main__:main"
|
||||
|
||||
[project.urls]
|
||||
Homepage = "https://github.com/docling-project/docling-serve"
|
||||
# Documentation = "https://ds4sd.github.io/docling"
|
||||
Repository = "https://github.com/docling-project/docling-serve"
|
||||
Issues = "https://github.com/docling-project/docling-serve/issues"
|
||||
Changelog = "https://github.com/docling-project/docling-serve/blob/main/CHANGELOG.md"
|
||||
|
||||
[tool.ruff]
|
||||
target-version = "py310"
|
||||
line-length = 88
|
||||
target-version = ["py310"]
|
||||
include = '\.pyi?$'
|
||||
respect-gitignore = true
|
||||
|
||||
[tool.isort]
|
||||
profile = "black"
|
||||
line_length = 88
|
||||
py_version=311
|
||||
# extend-exclude = [
|
||||
# "tests",
|
||||
# ]
|
||||
|
||||
[tool.autoflake]
|
||||
in-place = true
|
||||
remove-all-unused-imports = true
|
||||
remove-unused-variables = true
|
||||
expand-star-imports = true
|
||||
recursive = true
|
||||
[tool.ruff.format]
|
||||
skip-magic-trailing-comma = false
|
||||
|
||||
[tool.ruff.lint]
|
||||
select = [
|
||||
# "B", # flake8-bugbear
|
||||
"C", # flake8-comprehensions
|
||||
"C9", # mccabe
|
||||
# "D", # flake8-docstrings
|
||||
"E", # pycodestyle errors (default)
|
||||
"F", # pyflakes (default)
|
||||
"I", # isort
|
||||
"PD", # pandas-vet
|
||||
"PIE", # pie
|
||||
# "PTH", # pathlib
|
||||
"Q", # flake8-quotes
|
||||
# "RET", # return
|
||||
"RUF", # Enable all ruff-specific checks
|
||||
# "SIM", # simplify
|
||||
"S307", # eval
|
||||
# "T20", # (disallow print statements) keep debugging statements out of the codebase
|
||||
"W", # pycodestyle warnings
|
||||
"ASYNC", # async
|
||||
"UP", # pyupgrade
|
||||
]
|
||||
|
||||
ignore = [
|
||||
"E501", # Line too long, handled by ruff formatter
|
||||
"D107", # "Missing docstring in __init__",
|
||||
"F811", # "redefinition of the same function"
|
||||
"PL", # Pylint
|
||||
"RUF012", # Mutable Class Attributes
|
||||
"UP007", # Option and Union
|
||||
]
|
||||
|
||||
#extend-select = []
|
||||
|
||||
[tool.ruff.lint.per-file-ignores]
|
||||
"__init__.py" = ["E402", "F401"]
|
||||
"tests/*.py" = ["ASYNC"] # Disable ASYNC check for tests
|
||||
|
||||
[tool.ruff.lint.mccabe]
|
||||
max-complexity = 15
|
||||
|
||||
[tool.ruff.lint.isort.sections]
|
||||
"docling" = ["docling", "docling_core", "docling_jobkit"]
|
||||
|
||||
[tool.ruff.lint.isort]
|
||||
combine-as-imports = true
|
||||
section-order = [
|
||||
"future",
|
||||
"standard-library",
|
||||
"third-party",
|
||||
"docling",
|
||||
"first-party",
|
||||
"local-folder",
|
||||
]
|
||||
|
||||
[tool.mypy]
|
||||
pretty = true
|
||||
@@ -125,5 +275,36 @@ module = [
|
||||
"easyocr.*",
|
||||
"tesserocr.*",
|
||||
"rapidocr_onnxruntime.*",
|
||||
"requests.*",
|
||||
"kfp.*",
|
||||
"kfp_server_api.*",
|
||||
"mlx_vlm.*",
|
||||
"mlx.*",
|
||||
"scalar_fastapi.*",
|
||||
]
|
||||
ignore_missing_imports = true
|
||||
|
||||
[tool.pytest.ini_options]
|
||||
asyncio_mode = "auto"
|
||||
asyncio_default_fixture_loop_scope = "function"
|
||||
minversion = "8.2"
|
||||
testpaths = [
|
||||
"tests",
|
||||
]
|
||||
addopts = "-rA --color=yes --tb=short --maxfail=5"
|
||||
markers = [
|
||||
"asyncio",
|
||||
]
|
||||
|
||||
[tool.semantic_release]
|
||||
# for default values check:
|
||||
# https://github.com/python-semantic-release/python-semantic-release/blob/v7.32.2/semantic_release/defaults.cfg
|
||||
|
||||
version_source = "tag_only"
|
||||
branch = "main"
|
||||
|
||||
# configure types which should trigger minor and patch version bumps respectively
|
||||
# (note that they must be a subset of the configured allowed types):
|
||||
parser_angular_allowed_types = "build,chore,ci,docs,feat,fix,perf,style,refactor,test"
|
||||
parser_angular_minor_types = "feat"
|
||||
parser_angular_patch_types = "fix,perf"
|
||||
|
||||
BIN
tests/2206.01062v1.pdf
Normal file
BIN
tests/2206.01062v1.pdf
Normal file
Binary file not shown.
BIN
tests/2408.09869v5.pdf
Normal file
BIN
tests/2408.09869v5.pdf
Normal file
Binary file not shown.
126
tests/test_1-file-all-outputs.py
Normal file
126
tests/test_1-file-all-outputs.py
Normal file
@@ -0,0 +1,126 @@
|
||||
import json
|
||||
import os
|
||||
|
||||
import httpx
|
||||
import pytest
|
||||
import pytest_asyncio
|
||||
from pytest_check import check
|
||||
|
||||
from docling_serve.settings import docling_serve_settings
|
||||
|
||||
|
||||
@pytest_asyncio.fixture
|
||||
async def async_client():
|
||||
headers = {}
|
||||
if docling_serve_settings.api_key:
|
||||
headers["X-Api-Key"] = docling_serve_settings.api_key
|
||||
async with httpx.AsyncClient(timeout=60.0, headers=headers) as client:
|
||||
yield client
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_convert_file(async_client):
|
||||
"""Test convert single file to all outputs"""
|
||||
url = "http://localhost:5001/v1/convert/file"
|
||||
options = {
|
||||
"from_formats": [
|
||||
"docx",
|
||||
"pptx",
|
||||
"html",
|
||||
"image",
|
||||
"pdf",
|
||||
"asciidoc",
|
||||
"md",
|
||||
"xlsx",
|
||||
],
|
||||
"to_formats": ["md", "json", "html", "text", "doctags"],
|
||||
"image_export_mode": "placeholder",
|
||||
"ocr": True,
|
||||
"force_ocr": False,
|
||||
"ocr_engine": "easyocr",
|
||||
"ocr_lang": ["en"],
|
||||
"pdf_backend": "dlparse_v2",
|
||||
"table_mode": "fast",
|
||||
"abort_on_error": False,
|
||||
}
|
||||
|
||||
current_dir = os.path.dirname(__file__)
|
||||
file_path = os.path.join(current_dir, "2206.01062v1.pdf")
|
||||
|
||||
files = {
|
||||
"files": ("2206.01062v1.pdf", open(file_path, "rb"), "application/pdf"),
|
||||
}
|
||||
|
||||
response = await async_client.post(url, files=files, data=options)
|
||||
assert response.status_code == 200, "Response should be 200 OK"
|
||||
|
||||
data = response.json()
|
||||
|
||||
# Response content checks
|
||||
# Helper function to safely slice strings
|
||||
def safe_slice(value, length=100):
|
||||
if isinstance(value, str):
|
||||
return value[:length]
|
||||
return str(value) # Convert non-string values to string for debug purposes
|
||||
|
||||
# Document check
|
||||
check.is_in(
|
||||
"document",
|
||||
data,
|
||||
msg=f"Response should contain 'document' key. Received keys: {list(data.keys())}",
|
||||
)
|
||||
# MD check
|
||||
check.is_in(
|
||||
"md_content",
|
||||
data.get("document", {}),
|
||||
msg=f"Response should contain 'md_content' key. Received keys: {list(data.get('document', {}).keys())}",
|
||||
)
|
||||
if data.get("document", {}).get("md_content") is not None:
|
||||
check.is_in(
|
||||
"## DocLayNet: ",
|
||||
data["document"]["md_content"],
|
||||
msg=f"Markdown document should contain 'DocLayNet: '. Received: {safe_slice(data['document']['md_content'])}",
|
||||
)
|
||||
# JSON check
|
||||
check.is_in(
|
||||
"json_content",
|
||||
data.get("document", {}),
|
||||
msg=f"Response should contain 'json_content' key. Received keys: {list(data.get('document', {}).keys())}",
|
||||
)
|
||||
if data.get("document", {}).get("json_content") is not None:
|
||||
check.is_in(
|
||||
'{"schema_name": "DoclingDocument"',
|
||||
json.dumps(data["document"]["json_content"]),
|
||||
msg=f'JSON document should contain \'{{\\n "schema_name": "DoclingDocument\'". Received: {safe_slice(data["document"]["json_content"])}',
|
||||
)
|
||||
# HTML check
|
||||
if data.get("document", {}).get("html_content") is not None:
|
||||
check.is_in(
|
||||
"<!DOCTYPE html>\n<html>\n<head>",
|
||||
data["document"]["html_content"],
|
||||
msg=f"HTML document should contain '<!DOCTYPE html>\\n<html>'. Received: {safe_slice(data['document']['html_content'])}",
|
||||
)
|
||||
# Text check
|
||||
check.is_in(
|
||||
"text_content",
|
||||
data.get("document", {}),
|
||||
msg=f"Response should contain 'text_content' key. Received keys: {list(data.get('document', {}).keys())}",
|
||||
)
|
||||
if data.get("document", {}).get("text_content") is not None:
|
||||
check.is_in(
|
||||
"DocLayNet: A Large Human-Annotated Dataset",
|
||||
data["document"]["text_content"],
|
||||
msg=f"Text document should contain 'DocLayNet: A Large Human-Annotated Dataset'. Received: {safe_slice(data['document']['text_content'])}",
|
||||
)
|
||||
# DocTags check
|
||||
check.is_in(
|
||||
"doctags_content",
|
||||
data.get("document", {}),
|
||||
msg=f"Response should contain 'doctags_content' key. Received keys: {list(data.get('document', {}).keys())}",
|
||||
)
|
||||
if data.get("document", {}).get("doctags_content") is not None:
|
||||
check.is_in(
|
||||
"<doctag><page_header><loc",
|
||||
data["document"]["doctags_content"],
|
||||
msg=f"DocTags document should contain '<doctag><page_header><loc'. Received: {safe_slice(data['document']['doctags_content'])}",
|
||||
)
|
||||
75
tests/test_1-file-async.py
Normal file
75
tests/test_1-file-async.py
Normal file
@@ -0,0 +1,75 @@
|
||||
import json
|
||||
import time
|
||||
from pathlib import Path
|
||||
|
||||
import httpx
|
||||
import pytest
|
||||
import pytest_asyncio
|
||||
|
||||
from docling_serve.settings import docling_serve_settings
|
||||
|
||||
|
||||
@pytest_asyncio.fixture
|
||||
async def async_client():
|
||||
headers = {}
|
||||
if docling_serve_settings.api_key:
|
||||
headers["X-Api-Key"] = docling_serve_settings.api_key
|
||||
async with httpx.AsyncClient(timeout=60.0, headers=headers) as client:
|
||||
yield client
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_convert_url(async_client):
|
||||
"""Test convert URL to all outputs"""
|
||||
|
||||
base_url = "http://localhost:5001/v1"
|
||||
payload = {
|
||||
"to_formats": ["md", "json", "html"],
|
||||
"image_export_mode": "placeholder",
|
||||
"ocr": False,
|
||||
"abort_on_error": False,
|
||||
}
|
||||
|
||||
file_path = Path(__file__).parent / "2206.01062v1.pdf"
|
||||
files = {
|
||||
"files": (file_path.name, file_path.open("rb"), "application/pdf"),
|
||||
}
|
||||
|
||||
for n in range(1):
|
||||
response = await async_client.post(
|
||||
f"{base_url}/convert/file/async", files=files, data=payload
|
||||
)
|
||||
assert response.status_code == 200, "Response should be 200 OK"
|
||||
|
||||
task = response.json()
|
||||
|
||||
print(json.dumps(task, indent=2))
|
||||
|
||||
while task["task_status"] not in ("success", "failure"):
|
||||
response = await async_client.get(f"{base_url}/status/poll/{task['task_id']}")
|
||||
assert response.status_code == 200, "Response should be 200 OK"
|
||||
task = response.json()
|
||||
print(f"{task['task_status']=}")
|
||||
print(f"{task['task_position']=}")
|
||||
|
||||
time.sleep(2)
|
||||
|
||||
assert task["task_status"] == "success"
|
||||
print(f"Task completed with status {task['task_status']=}")
|
||||
|
||||
result_resp = await async_client.get(f"{base_url}/result/{task['task_id']}")
|
||||
assert result_resp.status_code == 200, "Response should be 200 OK"
|
||||
result = result_resp.json()
|
||||
print("Got result.")
|
||||
|
||||
assert "md_content" in result["document"]
|
||||
assert result["document"]["md_content"] is not None
|
||||
assert len(result["document"]["md_content"]) > 10
|
||||
|
||||
assert "html_content" in result["document"]
|
||||
assert result["document"]["html_content"] is not None
|
||||
assert len(result["document"]["html_content"]) > 10
|
||||
|
||||
assert "json_content" in result["document"]
|
||||
assert result["document"]["json_content"] is not None
|
||||
assert result["document"]["json_content"]["schema_name"] == "DoclingDocument"
|
||||
127
tests/test_1-url-all-outputs.py
Normal file
127
tests/test_1-url-all-outputs.py
Normal file
@@ -0,0 +1,127 @@
|
||||
import json
|
||||
|
||||
import httpx
|
||||
import pytest
|
||||
import pytest_asyncio
|
||||
from pytest_check import check
|
||||
|
||||
from docling_serve.settings import docling_serve_settings
|
||||
|
||||
|
||||
@pytest_asyncio.fixture
|
||||
async def async_client():
|
||||
headers = {}
|
||||
if docling_serve_settings.api_key:
|
||||
headers["X-Api-Key"] = docling_serve_settings.api_key
|
||||
async with httpx.AsyncClient(timeout=60.0, headers=headers) as client:
|
||||
yield client
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_convert_url(async_client):
|
||||
"""Test convert URL to all outputs"""
|
||||
url = "http://localhost:5001/v1/convert/source"
|
||||
payload = {
|
||||
"options": {
|
||||
"from_formats": [
|
||||
"docx",
|
||||
"pptx",
|
||||
"html",
|
||||
"image",
|
||||
"pdf",
|
||||
"asciidoc",
|
||||
"md",
|
||||
"xlsx",
|
||||
],
|
||||
"to_formats": ["md", "json", "html", "text", "doctags"],
|
||||
"image_export_mode": "placeholder",
|
||||
"ocr": True,
|
||||
"force_ocr": False,
|
||||
"ocr_engine": "easyocr",
|
||||
"ocr_lang": ["en"],
|
||||
"pdf_backend": "dlparse_v2",
|
||||
"table_mode": "fast",
|
||||
"abort_on_error": False,
|
||||
},
|
||||
"sources": [{"kind": "http", "url": "https://arxiv.org/pdf/2206.01062"}],
|
||||
}
|
||||
print(json.dumps(payload, indent=2))
|
||||
|
||||
response = await async_client.post(url, json=payload)
|
||||
assert response.status_code == 200, "Response should be 200 OK"
|
||||
|
||||
data = response.json()
|
||||
|
||||
# Response content checks
|
||||
# Helper function to safely slice strings
|
||||
def safe_slice(value, length=100):
|
||||
if isinstance(value, str):
|
||||
return value[:length]
|
||||
return str(value) # Convert non-string values to string for debug purposes
|
||||
|
||||
# Document check
|
||||
check.is_in(
|
||||
"document",
|
||||
data,
|
||||
msg=f"Response should contain 'document' key. Received keys: {list(data.keys())}",
|
||||
)
|
||||
# MD check
|
||||
check.is_in(
|
||||
"md_content",
|
||||
data.get("document", {}),
|
||||
msg=f"Response should contain 'md_content' key. Received keys: {list(data.get('document', {}).keys())}",
|
||||
)
|
||||
if data.get("document", {}).get("md_content") is not None:
|
||||
check.is_in(
|
||||
"## DocLayNet: ",
|
||||
data["document"]["md_content"],
|
||||
msg=f"Markdown document should contain 'DocLayNet: '. Received: {safe_slice(data['document']['md_content'])}",
|
||||
)
|
||||
# JSON check
|
||||
check.is_in(
|
||||
"json_content",
|
||||
data.get("document", {}),
|
||||
msg=f"Response should contain 'json_content' key. Received keys: {list(data.get('document', {}).keys())}",
|
||||
)
|
||||
if data.get("document", {}).get("json_content") is not None:
|
||||
check.is_in(
|
||||
'{"schema_name": "DoclingDocument"',
|
||||
json.dumps(data["document"]["json_content"]),
|
||||
msg=f'JSON document should contain \'{{\\n "schema_name": "DoclingDocument\'". Received: {safe_slice(data["document"]["json_content"])}',
|
||||
)
|
||||
# HTML check
|
||||
check.is_in(
|
||||
"html_content",
|
||||
data.get("document", {}),
|
||||
msg=f"Response should contain 'html_content' key. Received keys: {list(data.get('document', {}).keys())}",
|
||||
)
|
||||
if data.get("document", {}).get("html_content") is not None:
|
||||
check.is_in(
|
||||
"<!DOCTYPE html>\n<html>\n<head>",
|
||||
data["document"]["html_content"],
|
||||
msg=f"HTML document should contain '<!DOCTYPE html>\\n<html>'. Received: {safe_slice(data['document']['html_content'])}",
|
||||
)
|
||||
# Text check
|
||||
check.is_in(
|
||||
"text_content",
|
||||
data.get("document", {}),
|
||||
msg=f"Response should contain 'text_content' key. Received keys: {list(data.get('document', {}).keys())}",
|
||||
)
|
||||
if data.get("document", {}).get("text_content") is not None:
|
||||
check.is_in(
|
||||
"DocLayNet: A Large Human-Annotated Dataset",
|
||||
data["document"]["text_content"],
|
||||
msg=f"Text document should contain 'DocLayNet: A Large Human-Annotated Dataset'. Received: {safe_slice(data['document']['text_content'])}",
|
||||
)
|
||||
# DocTags check
|
||||
check.is_in(
|
||||
"doctags_content",
|
||||
data.get("document", {}),
|
||||
msg=f"Response should contain 'doctags_content' key. Received keys: {list(data.get('document', {}).keys())}",
|
||||
)
|
||||
if data.get("document", {}).get("doctags_content") is not None:
|
||||
check.is_in(
|
||||
"<doctag><page_header><loc",
|
||||
data["document"]["doctags_content"],
|
||||
msg=f"DocTags document should contain '<doctag><page_header><loc'. Received: {safe_slice(data['document']['doctags_content'])}",
|
||||
)
|
||||
71
tests/test_1-url-async-ws.py
Normal file
71
tests/test_1-url-async-ws.py
Normal file
@@ -0,0 +1,71 @@
|
||||
import base64
|
||||
from pathlib import Path
|
||||
|
||||
import httpx
|
||||
import pytest
|
||||
import pytest_asyncio
|
||||
from websockets.sync.client import connect
|
||||
|
||||
from docling_serve.settings import docling_serve_settings
|
||||
|
||||
|
||||
@pytest_asyncio.fixture
|
||||
async def async_client():
|
||||
headers = {}
|
||||
if docling_serve_settings.api_key:
|
||||
headers["X-Api-Key"] = docling_serve_settings.api_key
|
||||
async with httpx.AsyncClient(timeout=60.0, headers=headers) as client:
|
||||
yield client
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_convert_url(async_client: httpx.AsyncClient):
|
||||
"""Test convert URL to all outputs"""
|
||||
headers = {}
|
||||
if docling_serve_settings.api_key:
|
||||
headers["X-Api-Key"] = docling_serve_settings.api_key
|
||||
|
||||
doc_filename = Path("tests/2408.09869v5.pdf")
|
||||
encoded_doc = base64.b64encode(doc_filename.read_bytes()).decode()
|
||||
|
||||
base_url = "http://localhost:5001/v1"
|
||||
payload = {
|
||||
"options": {
|
||||
"to_formats": ["md", "json"],
|
||||
"image_export_mode": "placeholder",
|
||||
"ocr": True,
|
||||
"abort_on_error": False,
|
||||
# "do_picture_description": True,
|
||||
# "picture_description_api": {
|
||||
# "url": "http://localhost:11434/v1/chat/completions",
|
||||
# "params": {
|
||||
# "model": "granite3.2-vision:2b",
|
||||
# }
|
||||
# },
|
||||
# "picture_description_local": {
|
||||
# "repo_id": "HuggingFaceTB/SmolVLM-256M-Instruct",
|
||||
# },
|
||||
},
|
||||
# "sources": [{"kind": "http", "url": "https://arxiv.org/pdf/2501.17887"}],
|
||||
"sources": [
|
||||
{
|
||||
"kind": "file",
|
||||
"base64_string": encoded_doc,
|
||||
"filename": doc_filename.name,
|
||||
}
|
||||
],
|
||||
}
|
||||
# print(json.dumps(payload, indent=2))
|
||||
|
||||
for n in range(5):
|
||||
response = await async_client.post(
|
||||
f"{base_url}/convert/source/async", json=payload
|
||||
)
|
||||
assert response.status_code == 200, "Response should be 200 OK"
|
||||
|
||||
task = response.json()
|
||||
|
||||
uri = f"ws://localhost:5001/v1/status/ws/{task['task_id']}?api_key={docling_serve_settings.api_key}"
|
||||
with connect(uri) as websocket:
|
||||
for message in websocket:
|
||||
print(message)
|
||||
121
tests/test_1-url-async.py
Normal file
121
tests/test_1-url-async.py
Normal file
@@ -0,0 +1,121 @@
|
||||
import json
|
||||
import random
|
||||
import time
|
||||
|
||||
import httpx
|
||||
import pytest
|
||||
import pytest_asyncio
|
||||
|
||||
from docling_serve.settings import docling_serve_settings
|
||||
|
||||
|
||||
@pytest_asyncio.fixture
|
||||
async def async_client():
|
||||
headers = {}
|
||||
if docling_serve_settings.api_key:
|
||||
headers["X-Api-Key"] = docling_serve_settings.api_key
|
||||
async with httpx.AsyncClient(timeout=60.0, headers=headers) as client:
|
||||
yield client
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_convert_url(async_client):
|
||||
"""Test convert URL to all outputs"""
|
||||
|
||||
example_docs = [
|
||||
"https://arxiv.org/pdf/2411.19710",
|
||||
"https://arxiv.org/pdf/2501.17887",
|
||||
"https://www.nature.com/articles/s41467-024-50779-y.pdf",
|
||||
"https://arxiv.org/pdf/2306.12802",
|
||||
"https://arxiv.org/pdf/2311.18481",
|
||||
]
|
||||
|
||||
base_url = "http://localhost:5001/v1"
|
||||
payload = {
|
||||
"options": {
|
||||
"to_formats": ["md", "json"],
|
||||
"image_export_mode": "placeholder",
|
||||
"ocr": True,
|
||||
"abort_on_error": False,
|
||||
},
|
||||
"sources": [{"kind": "http", "url": random.choice(example_docs)}],
|
||||
}
|
||||
print(json.dumps(payload, indent=2))
|
||||
|
||||
for n in range(3):
|
||||
response = await async_client.post(
|
||||
f"{base_url}/convert/source/async", json=payload
|
||||
)
|
||||
assert response.status_code == 200, "Response should be 200 OK"
|
||||
|
||||
task = response.json()
|
||||
|
||||
print(json.dumps(task, indent=2))
|
||||
|
||||
while task["task_status"] not in ("success", "failure"):
|
||||
response = await async_client.get(f"{base_url}/status/poll/{task['task_id']}")
|
||||
assert response.status_code == 200, "Response should be 200 OK"
|
||||
task = response.json()
|
||||
print(f"{task['task_status']=}")
|
||||
print(f"{task['task_position']=}")
|
||||
|
||||
time.sleep(2)
|
||||
|
||||
assert task["task_status"] == "success"
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@pytest.mark.parametrize("include_converted_doc", [False, True])
|
||||
async def test_chunk_url(async_client, include_converted_doc: bool):
|
||||
"""Test chunk URL"""
|
||||
|
||||
example_docs = [
|
||||
"https://arxiv.org/pdf/2311.18481",
|
||||
]
|
||||
|
||||
base_url = "http://localhost:5001/v1"
|
||||
payload = {
|
||||
"sources": [{"kind": "http", "url": random.choice(example_docs)}],
|
||||
"include_converted_doc": include_converted_doc,
|
||||
}
|
||||
|
||||
response = await async_client.post(
|
||||
f"{base_url}/chunk/hybrid/source/async", json=payload
|
||||
)
|
||||
assert response.status_code == 200, "Response should be 200 OK"
|
||||
|
||||
task = response.json()
|
||||
|
||||
print(json.dumps(task, indent=2))
|
||||
|
||||
while task["task_status"] not in ("success", "failure"):
|
||||
response = await async_client.get(f"{base_url}/status/poll/{task['task_id']}")
|
||||
assert response.status_code == 200, "Response should be 200 OK"
|
||||
task = response.json()
|
||||
print(f"{task['task_status']=}")
|
||||
print(f"{task['task_position']=}")
|
||||
|
||||
time.sleep(2)
|
||||
|
||||
assert task["task_status"] == "success"
|
||||
|
||||
result_resp = await async_client.get(f"{base_url}/result/{task['task_id']}")
|
||||
assert result_resp.status_code == 200, "Response should be 200 OK"
|
||||
result = result_resp.json()
|
||||
print("Got result.")
|
||||
|
||||
assert "chunks" in result
|
||||
assert len(result["chunks"]) > 0
|
||||
|
||||
assert "documents" in result
|
||||
assert len(result["documents"]) > 0
|
||||
assert result["documents"][0]["status"] == "success"
|
||||
|
||||
if include_converted_doc:
|
||||
assert result["documents"][0]["content"]["json_content"] is not None
|
||||
assert (
|
||||
result["documents"][0]["content"]["json_content"]["schema_name"]
|
||||
== "DoclingDocument"
|
||||
)
|
||||
else:
|
||||
assert result["documents"][0]["content"]["json_content"] is None
|
||||
75
tests/test_2-files-all-outputs.py
Normal file
75
tests/test_2-files-all-outputs.py
Normal file
@@ -0,0 +1,75 @@
|
||||
import os
|
||||
|
||||
import httpx
|
||||
import pytest
|
||||
import pytest_asyncio
|
||||
from pytest_check import check
|
||||
|
||||
from docling_serve.settings import docling_serve_settings
|
||||
|
||||
|
||||
@pytest_asyncio.fixture
|
||||
async def async_client():
|
||||
headers = {}
|
||||
if docling_serve_settings.api_key:
|
||||
headers["X-Api-Key"] = docling_serve_settings.api_key
|
||||
async with httpx.AsyncClient(timeout=60.0, headers=headers) as client:
|
||||
yield client
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_convert_file(async_client):
|
||||
"""Test convert single file to all outputs"""
|
||||
url = "http://localhost:5001/v1/convert/file"
|
||||
options = {
|
||||
"from_formats": [
|
||||
"docx",
|
||||
"pptx",
|
||||
"html",
|
||||
"image",
|
||||
"pdf",
|
||||
"asciidoc",
|
||||
"md",
|
||||
"xlsx",
|
||||
],
|
||||
"to_formats": ["md", "json", "html", "text", "doctags"],
|
||||
"image_export_mode": "placeholder",
|
||||
"ocr": True,
|
||||
"force_ocr": False,
|
||||
"ocr_engine": "easyocr",
|
||||
"ocr_lang": ["en"],
|
||||
"pdf_backend": "dlparse_v2",
|
||||
"table_mode": "fast",
|
||||
"abort_on_error": False,
|
||||
}
|
||||
|
||||
current_dir = os.path.dirname(__file__)
|
||||
file_path = os.path.join(current_dir, "2206.01062v1.pdf")
|
||||
|
||||
files = [
|
||||
("files", ("2206.01062v1.pdf", open(file_path, "rb"), "application/pdf")),
|
||||
("files", ("2408.09869v5.pdf", open(file_path, "rb"), "application/pdf")),
|
||||
]
|
||||
|
||||
response = await async_client.post(url, files=files, data=options)
|
||||
assert response.status_code == 200, "Response should be 200 OK"
|
||||
|
||||
# Check for zip file attachment
|
||||
content_disposition = response.headers.get("content-disposition")
|
||||
|
||||
with check:
|
||||
assert content_disposition is not None, (
|
||||
"Content-Disposition header should be present"
|
||||
)
|
||||
with check:
|
||||
assert "attachment" in content_disposition, "Response should be an attachment"
|
||||
with check:
|
||||
assert 'filename="converted_docs.zip"' in content_disposition, (
|
||||
"Attachment filename should be 'converted_docs.zip'"
|
||||
)
|
||||
|
||||
content_type = response.headers.get("content-type")
|
||||
with check:
|
||||
assert content_type == "application/zip", (
|
||||
"Content-Type should be 'application/zip'"
|
||||
)
|
||||
72
tests/test_2-urls-all-outputs.py
Normal file
72
tests/test_2-urls-all-outputs.py
Normal file
@@ -0,0 +1,72 @@
|
||||
import httpx
|
||||
import pytest
|
||||
import pytest_asyncio
|
||||
from pytest_check import check
|
||||
|
||||
from docling_serve.settings import docling_serve_settings
|
||||
|
||||
|
||||
@pytest_asyncio.fixture
|
||||
async def async_client():
|
||||
headers = {}
|
||||
if docling_serve_settings.api_key:
|
||||
headers["X-Api-Key"] = docling_serve_settings.api_key
|
||||
async with httpx.AsyncClient(timeout=60.0, headers=headers) as client:
|
||||
yield client
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_convert_url(async_client):
|
||||
"""Test convert URL to all outputs"""
|
||||
url = "http://localhost:5001/v1/convert/source"
|
||||
payload = {
|
||||
"options": {
|
||||
"from_formats": [
|
||||
"docx",
|
||||
"pptx",
|
||||
"html",
|
||||
"image",
|
||||
"pdf",
|
||||
"asciidoc",
|
||||
"md",
|
||||
"xlsx",
|
||||
],
|
||||
"to_formats": ["md", "json", "html", "text", "doctags"],
|
||||
"image_export_mode": "placeholder",
|
||||
"ocr": True,
|
||||
"force_ocr": False,
|
||||
"ocr_engine": "easyocr",
|
||||
"ocr_lang": ["en"],
|
||||
"pdf_backend": "dlparse_v2",
|
||||
"table_mode": "fast",
|
||||
"abort_on_error": False,
|
||||
},
|
||||
"sources": [
|
||||
{"kind": "http", "url": "https://arxiv.org/pdf/2206.01062"},
|
||||
{"kind": "http", "url": "https://arxiv.org/pdf/2408.09869"},
|
||||
],
|
||||
"target": {"kind": "zip"},
|
||||
}
|
||||
|
||||
response = await async_client.post(url, json=payload)
|
||||
assert response.status_code == 200, "Response should be 200 OK"
|
||||
|
||||
# Check for zip file attachment
|
||||
content_disposition = response.headers.get("content-disposition")
|
||||
|
||||
with check:
|
||||
assert content_disposition is not None, (
|
||||
"Content-Disposition header should be present"
|
||||
)
|
||||
with check:
|
||||
assert "attachment" in content_disposition, "Response should be an attachment"
|
||||
with check:
|
||||
assert 'filename="converted_docs.zip"' in content_disposition, (
|
||||
"Attachment filename should be 'converted_docs.zip'"
|
||||
)
|
||||
|
||||
content_type = response.headers.get("content-type")
|
||||
with check:
|
||||
assert content_type == "application/zip", (
|
||||
"Content-Type should be 'application/zip'"
|
||||
)
|
||||
93
tests/test_2-urls-async-all-outputs.py
Normal file
93
tests/test_2-urls-async-all-outputs.py
Normal file
@@ -0,0 +1,93 @@
|
||||
import json
|
||||
import time
|
||||
|
||||
import httpx
|
||||
import pytest
|
||||
import pytest_asyncio
|
||||
from pytest_check import check
|
||||
|
||||
from docling_serve.settings import docling_serve_settings
|
||||
|
||||
|
||||
@pytest_asyncio.fixture
|
||||
async def async_client():
|
||||
headers = {}
|
||||
if docling_serve_settings.api_key:
|
||||
headers["X-Api-Key"] = docling_serve_settings.api_key
|
||||
async with httpx.AsyncClient(timeout=60.0, headers=headers) as client:
|
||||
yield client
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_convert_url(async_client):
|
||||
"""Test convert URL to all outputs"""
|
||||
base_url = "http://localhost:5001/v1"
|
||||
payload = {
|
||||
"options": {
|
||||
"from_formats": [
|
||||
"docx",
|
||||
"pptx",
|
||||
"html",
|
||||
"image",
|
||||
"pdf",
|
||||
"asciidoc",
|
||||
"md",
|
||||
"xlsx",
|
||||
],
|
||||
"to_formats": ["md", "json", "html", "text", "doctags"],
|
||||
"image_export_mode": "placeholder",
|
||||
"ocr": True,
|
||||
"force_ocr": False,
|
||||
"ocr_engine": "easyocr",
|
||||
"ocr_lang": ["en"],
|
||||
"pdf_backend": "dlparse_v2",
|
||||
"table_mode": "fast",
|
||||
"abort_on_error": False,
|
||||
},
|
||||
"sources": [
|
||||
{"kind": "http", "url": "https://arxiv.org/pdf/2206.01062"},
|
||||
{"kind": "http", "url": "https://arxiv.org/pdf/2408.09869"},
|
||||
],
|
||||
"target": {"kind": "zip"},
|
||||
}
|
||||
|
||||
response = await async_client.post(f"{base_url}/convert/source/async", json=payload)
|
||||
assert response.status_code == 200, "Response should be 200 OK"
|
||||
|
||||
task = response.json()
|
||||
|
||||
print(json.dumps(task, indent=2))
|
||||
|
||||
while task["task_status"] not in ("success", "failure"):
|
||||
response = await async_client.get(f"{base_url}/status/poll/{task['task_id']}")
|
||||
assert response.status_code == 200, "Response should be 200 OK"
|
||||
task = response.json()
|
||||
print(f"{task['task_status']=}")
|
||||
print(f"{task['task_position']=}")
|
||||
|
||||
time.sleep(2)
|
||||
|
||||
assert task["task_status"] == "success"
|
||||
|
||||
result_resp = await async_client.get(f"{base_url}/result/{task['task_id']}")
|
||||
assert result_resp.status_code == 200, "Response should be 200 OK"
|
||||
|
||||
# Check for zip file attachment
|
||||
content_disposition = result_resp.headers.get("content-disposition")
|
||||
|
||||
with check:
|
||||
assert content_disposition is not None, (
|
||||
"Content-Disposition header should be present"
|
||||
)
|
||||
with check:
|
||||
assert "attachment" in content_disposition, "Response should be an attachment"
|
||||
with check:
|
||||
assert 'filename="converted_docs.zip"' in content_disposition, (
|
||||
"Attachment filename should be 'converted_docs.zip'"
|
||||
)
|
||||
|
||||
content_type = result_resp.headers.get("content-type")
|
||||
with check:
|
||||
assert content_type == "application/zip", (
|
||||
"Content-Type should be 'application/zip'"
|
||||
)
|
||||
206
tests/test_fastapi_endpoints.py
Normal file
206
tests/test_fastapi_endpoints.py
Normal file
@@ -0,0 +1,206 @@
|
||||
import asyncio
|
||||
import io
|
||||
import json
|
||||
import os
|
||||
import zipfile
|
||||
|
||||
import pytest
|
||||
import pytest_asyncio
|
||||
from asgi_lifespan import LifespanManager
|
||||
from httpx import ASGITransport, AsyncClient
|
||||
from pytest_check import check
|
||||
|
||||
from docling_core.types.doc import DoclingDocument, PictureItem
|
||||
|
||||
from docling_serve.app import create_app
|
||||
from docling_serve.settings import docling_serve_settings
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def event_loop():
|
||||
return asyncio.get_event_loop()
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def auth_headers():
|
||||
headers = {}
|
||||
if docling_serve_settings.api_key:
|
||||
headers["X-Api-Key"] = docling_serve_settings.api_key
|
||||
return headers
|
||||
|
||||
|
||||
@pytest_asyncio.fixture(scope="session")
|
||||
async def app():
|
||||
app = create_app()
|
||||
|
||||
async with LifespanManager(app) as manager:
|
||||
print("Launching lifespan of app.")
|
||||
yield manager.app
|
||||
|
||||
|
||||
@pytest_asyncio.fixture(scope="session")
|
||||
async def client(app):
|
||||
async with AsyncClient(
|
||||
transport=ASGITransport(app=app), base_url="http://app.io"
|
||||
) as client:
|
||||
print("Client is ready")
|
||||
yield client
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_health(client: AsyncClient):
|
||||
response = await client.get("/health")
|
||||
assert response.status_code == 200
|
||||
assert response.json() == {"status": "ok"}
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_convert_file(client: AsyncClient, auth_headers: dict):
|
||||
"""Test convert single file to all outputs"""
|
||||
|
||||
endpoint = "/v1/convert/file"
|
||||
options = {
|
||||
"from_formats": [
|
||||
"docx",
|
||||
"pptx",
|
||||
"html",
|
||||
"image",
|
||||
"pdf",
|
||||
"asciidoc",
|
||||
"md",
|
||||
"xlsx",
|
||||
],
|
||||
"to_formats": ["md", "json", "html", "text", "doctags"],
|
||||
"image_export_mode": "placeholder",
|
||||
"ocr": True,
|
||||
"force_ocr": False,
|
||||
"ocr_engine": "easyocr",
|
||||
"ocr_lang": ["en"],
|
||||
"pdf_backend": "dlparse_v2",
|
||||
"table_mode": "fast",
|
||||
"abort_on_error": False,
|
||||
}
|
||||
|
||||
current_dir = os.path.dirname(__file__)
|
||||
file_path = os.path.join(current_dir, "2206.01062v1.pdf")
|
||||
|
||||
files = {
|
||||
"files": ("2206.01062v1.pdf", open(file_path, "rb"), "application/pdf"),
|
||||
}
|
||||
|
||||
response = await client.post(
|
||||
endpoint, files=files, data=options, headers=auth_headers
|
||||
)
|
||||
assert response.status_code == 200, "Response should be 200 OK"
|
||||
|
||||
data = response.json()
|
||||
|
||||
# Response content checks
|
||||
# Helper function to safely slice strings
|
||||
def safe_slice(value, length=100):
|
||||
if isinstance(value, str):
|
||||
return value[:length]
|
||||
return str(value) # Convert non-string values to string for debug purposes
|
||||
|
||||
# Document check
|
||||
check.is_in(
|
||||
"document",
|
||||
data,
|
||||
msg=f"Response should contain 'document' key. Received keys: {list(data.keys())}",
|
||||
)
|
||||
# MD check
|
||||
check.is_in(
|
||||
"md_content",
|
||||
data.get("document", {}),
|
||||
msg=f"Response should contain 'md_content' key. Received keys: {list(data.get('document', {}).keys())}",
|
||||
)
|
||||
if data.get("document", {}).get("md_content") is not None:
|
||||
check.is_in(
|
||||
"## DocLayNet: ",
|
||||
data["document"]["md_content"],
|
||||
msg=f"Markdown document should contain 'DocLayNet: '. Received: {safe_slice(data['document']['md_content'])}",
|
||||
)
|
||||
# JSON check
|
||||
check.is_in(
|
||||
"json_content",
|
||||
data.get("document", {}),
|
||||
msg=f"Response should contain 'json_content' key. Received keys: {list(data.get('document', {}).keys())}",
|
||||
)
|
||||
if data.get("document", {}).get("json_content") is not None:
|
||||
check.is_in(
|
||||
'{"schema_name": "DoclingDocument"',
|
||||
json.dumps(data["document"]["json_content"]),
|
||||
msg=f'JSON document should contain \'{{\\n "schema_name": "DoclingDocument\'". Received: {safe_slice(data["document"]["json_content"])}',
|
||||
)
|
||||
# HTML check
|
||||
check.is_in(
|
||||
"html_content",
|
||||
data.get("document", {}),
|
||||
msg=f"Response should contain 'html_content' key. Received keys: {list(data.get('document', {}).keys())}",
|
||||
)
|
||||
if data.get("document", {}).get("html_content") is not None:
|
||||
check.is_in(
|
||||
"<!DOCTYPE html>\n<html>\n<head>",
|
||||
data["document"]["html_content"],
|
||||
msg=f"HTML document should contain '<!DOCTYPE html>\n<html>\n<head>'. Received: {safe_slice(data['document']['html_content'])}",
|
||||
)
|
||||
# Text check
|
||||
check.is_in(
|
||||
"text_content",
|
||||
data.get("document", {}),
|
||||
msg=f"Response should contain 'text_content' key. Received keys: {list(data.get('document', {}).keys())}",
|
||||
)
|
||||
if data.get("document", {}).get("text_content") is not None:
|
||||
check.is_in(
|
||||
"DocLayNet: A Large Human-Annotated Dataset",
|
||||
data["document"]["text_content"],
|
||||
msg=f"Text document should contain 'DocLayNet: A Large Human-Annotated Dataset'. Received: {safe_slice(data['document']['text_content'])}",
|
||||
)
|
||||
# DocTags check
|
||||
check.is_in(
|
||||
"doctags_content",
|
||||
data.get("document", {}),
|
||||
msg=f"Response should contain 'doctags_content' key. Received keys: {list(data.get('document', {}).keys())}",
|
||||
)
|
||||
if data.get("document", {}).get("doctags_content") is not None:
|
||||
check.is_in(
|
||||
"<doctag><page_header>",
|
||||
data["document"]["doctags_content"],
|
||||
msg=f"DocTags document should contain '<doctag><page_header>'. Received: {safe_slice(data['document']['doctags_content'])}",
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_referenced_artifacts(client: AsyncClient, auth_headers: dict):
|
||||
"""Test that paths in the zip file are relative to the zip file root."""
|
||||
|
||||
endpoint = "/v1/convert/file"
|
||||
options = {
|
||||
"to_formats": ["json"],
|
||||
"image_export_mode": "referenced",
|
||||
"target_type": "zip",
|
||||
"ocr": False,
|
||||
}
|
||||
|
||||
current_dir = os.path.dirname(__file__)
|
||||
file_path = os.path.join(current_dir, "2206.01062v1.pdf")
|
||||
|
||||
files = {
|
||||
"files": ("2206.01062v1.pdf", open(file_path, "rb"), "application/pdf"),
|
||||
}
|
||||
|
||||
response = await client.post(
|
||||
endpoint, files=files, data=options, headers=auth_headers
|
||||
)
|
||||
assert response.status_code == 200, "Response should be 200 OK"
|
||||
|
||||
with zipfile.ZipFile(io.BytesIO(response.content)) as zip_file:
|
||||
namelist = zip_file.namelist()
|
||||
for file in namelist:
|
||||
if file.endswith(".json"):
|
||||
doc = DoclingDocument.model_validate(json.loads(zip_file.read(file)))
|
||||
for item, _level in doc.iterate_items():
|
||||
if isinstance(item, PictureItem):
|
||||
assert item.image is not None
|
||||
print(f"{item.image.uri}=")
|
||||
assert str(item.image.uri) in namelist
|
||||
88
tests/test_file_opts.py
Normal file
88
tests/test_file_opts.py
Normal file
@@ -0,0 +1,88 @@
|
||||
import asyncio
|
||||
import json
|
||||
import os
|
||||
|
||||
import pytest
|
||||
import pytest_asyncio
|
||||
from asgi_lifespan import LifespanManager
|
||||
from httpx import ASGITransport, AsyncClient
|
||||
|
||||
from docling_core.types import DoclingDocument
|
||||
from docling_core.types.doc.document import PictureDescriptionData
|
||||
|
||||
from docling_serve.app import create_app
|
||||
from docling_serve.settings import docling_serve_settings
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def event_loop():
|
||||
return asyncio.get_event_loop()
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def auth_headers():
|
||||
headers = {}
|
||||
if docling_serve_settings.api_key:
|
||||
headers["X-Api-Key"] = docling_serve_settings.api_key
|
||||
return headers
|
||||
|
||||
|
||||
@pytest_asyncio.fixture(scope="session")
|
||||
async def app():
|
||||
app = create_app()
|
||||
|
||||
async with LifespanManager(app) as manager:
|
||||
print("Launching lifespan of app.")
|
||||
yield manager.app
|
||||
|
||||
|
||||
@pytest_asyncio.fixture(scope="session")
|
||||
async def client(app):
|
||||
async with AsyncClient(
|
||||
transport=ASGITransport(app=app), base_url="http://app.io"
|
||||
) as client:
|
||||
print("Client is ready")
|
||||
yield client
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_convert_file(client: AsyncClient, auth_headers: dict):
|
||||
"""Test convert single file to all outputs"""
|
||||
|
||||
endpoint = "/v1/convert/file"
|
||||
options = {
|
||||
"to_formats": ["md", "json"],
|
||||
"image_export_mode": "placeholder",
|
||||
"ocr": False,
|
||||
"do_picture_description": True,
|
||||
"picture_description_api": json.dumps(
|
||||
{
|
||||
"url": "http://localhost:11434/v1/chat/completions", # ollama
|
||||
"params": {"model": "granite3.2-vision:2b"},
|
||||
"timeout": 60,
|
||||
"prompt": "Describe this image in a few sentences. ",
|
||||
}
|
||||
),
|
||||
}
|
||||
|
||||
current_dir = os.path.dirname(__file__)
|
||||
file_path = os.path.join(current_dir, "2206.01062v1.pdf")
|
||||
|
||||
files = {
|
||||
"files": ("2206.01062v1.pdf", open(file_path, "rb"), "application/pdf"),
|
||||
}
|
||||
|
||||
response = await client.post(
|
||||
endpoint, files=files, data=options, headers=auth_headers
|
||||
)
|
||||
assert response.status_code == 200, "Response should be 200 OK"
|
||||
|
||||
data = response.json()
|
||||
|
||||
doc = DoclingDocument.model_validate(data["document"]["json_content"])
|
||||
|
||||
for pic in doc.pictures:
|
||||
for ann in pic.annotations:
|
||||
if isinstance(ann, PictureDescriptionData):
|
||||
print(f"{pic.self_ref}")
|
||||
print(ann.text)
|
||||
157
tests/test_results_clear.py
Normal file
157
tests/test_results_clear.py
Normal file
@@ -0,0 +1,157 @@
|
||||
import asyncio
|
||||
import base64
|
||||
import json
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
import pytest_asyncio
|
||||
from asgi_lifespan import LifespanManager
|
||||
from httpx import ASGITransport, AsyncClient
|
||||
|
||||
from docling_serve.app import create_app
|
||||
from docling_serve.settings import docling_serve_settings
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def event_loop():
|
||||
return asyncio.get_event_loop()
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def auth_headers():
|
||||
headers = {}
|
||||
if docling_serve_settings.api_key:
|
||||
headers["X-Api-Key"] = docling_serve_settings.api_key
|
||||
return headers
|
||||
|
||||
|
||||
@pytest_asyncio.fixture(scope="session")
|
||||
async def app():
|
||||
app = create_app()
|
||||
|
||||
async with LifespanManager(app) as manager:
|
||||
print("Launching lifespan of app.")
|
||||
yield manager.app
|
||||
|
||||
|
||||
@pytest_asyncio.fixture(scope="session")
|
||||
async def client(app):
|
||||
async with AsyncClient(
|
||||
transport=ASGITransport(app=app), base_url="http://app.io"
|
||||
) as client:
|
||||
print("Client is ready")
|
||||
yield client
|
||||
|
||||
|
||||
async def convert_file(client: AsyncClient, auth_headers: dict):
|
||||
doc_filename = Path("tests/2408.09869v5.pdf")
|
||||
encoded_doc = base64.b64encode(doc_filename.read_bytes()).decode()
|
||||
|
||||
payload = {
|
||||
"options": {
|
||||
"to_formats": ["json"],
|
||||
},
|
||||
"sources": [
|
||||
{
|
||||
"kind": "file",
|
||||
"base64_string": encoded_doc,
|
||||
"filename": doc_filename.name,
|
||||
}
|
||||
],
|
||||
}
|
||||
|
||||
response = await client.post(
|
||||
"/v1/convert/source/async", json=payload, headers=auth_headers
|
||||
)
|
||||
assert response.status_code == 200, "Response should be 200 OK"
|
||||
|
||||
task = response.json()
|
||||
|
||||
print(json.dumps(task, indent=2))
|
||||
|
||||
while task["task_status"] not in ("success", "failure"):
|
||||
response = await client.get(
|
||||
f"/v1/status/poll/{task['task_id']}", headers=auth_headers
|
||||
)
|
||||
assert response.status_code == 200, "Response should be 200 OK"
|
||||
task = response.json()
|
||||
print(f"{task['task_status']=}")
|
||||
print(f"{task['task_position']=}")
|
||||
|
||||
await asyncio.sleep(2)
|
||||
|
||||
assert task["task_status"] == "success"
|
||||
|
||||
return task
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_clear_results(client: AsyncClient, auth_headers: dict):
|
||||
"""Test removal of task."""
|
||||
|
||||
# Set long delay deletion
|
||||
docling_serve_settings.result_removal_delay = 100
|
||||
|
||||
# Convert and wait for completion
|
||||
task = await convert_file(client, auth_headers=auth_headers)
|
||||
|
||||
# Get result once
|
||||
result_response = await client.get(
|
||||
f"/v1/result/{task['task_id']}", headers=auth_headers
|
||||
)
|
||||
assert result_response.status_code == 200, "Response should be 200 OK"
|
||||
print("Result 1 ok.")
|
||||
result = result_response.json()
|
||||
assert result["document"]["json_content"]["schema_name"] == "DoclingDocument"
|
||||
|
||||
# Get result twice
|
||||
result_response = await client.get(
|
||||
f"/v1/result/{task['task_id']}", headers=auth_headers
|
||||
)
|
||||
assert result_response.status_code == 200, "Response should be 200 OK"
|
||||
print("Result 2 ok.")
|
||||
result = result_response.json()
|
||||
assert result["document"]["json_content"]["schema_name"] == "DoclingDocument"
|
||||
|
||||
# Clear
|
||||
clear_response = await client.get(
|
||||
"/v1/clear/results?older_then=0", headers=auth_headers
|
||||
)
|
||||
assert clear_response.status_code == 200, "Response should be 200 OK"
|
||||
print("Clear ok.")
|
||||
|
||||
# Get deleted result
|
||||
result_response = await client.get(
|
||||
f"/v1/result/{task['task_id']}", headers=auth_headers
|
||||
)
|
||||
assert result_response.status_code == 404, "Response should be removed"
|
||||
print("Result was no longer found.")
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_delay_remove(client: AsyncClient, auth_headers: dict):
|
||||
"""Test automatic removal of task with delay."""
|
||||
|
||||
# Set short delay deletion
|
||||
docling_serve_settings.result_removal_delay = 5
|
||||
|
||||
# Convert and wait for completion
|
||||
task = await convert_file(client, auth_headers=auth_headers)
|
||||
|
||||
# Get result once
|
||||
result_response = await client.get(
|
||||
f"/v1/result/{task['task_id']}", headers=auth_headers
|
||||
)
|
||||
assert result_response.status_code == 200, "Response should be 200 OK"
|
||||
print("Result ok.")
|
||||
result = result_response.json()
|
||||
assert result["document"]["json_content"]["schema_name"] == "DoclingDocument"
|
||||
|
||||
print("Sleeping to wait the automatic task deletion.")
|
||||
await asyncio.sleep(10)
|
||||
|
||||
# Get deleted result
|
||||
result_response = await client.get(
|
||||
f"/v1/result/{task['task_id']}", headers=auth_headers
|
||||
)
|
||||
assert result_response.status_code == 404, "Response should be removed"
|
||||
Reference in New Issue
Block a user