mirror of
https://github.com/docling-project/docling-serve.git
synced 2025-12-03 18:43:18 +00:00
api v1alpha1 (#17)
* api v1alpha1 Signed-off-by: Guillaume Moutier <gmoutier@redhat.com> Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * use actual types in request models and refactor Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * make gradio optional and update README Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * Run workflow jobs sequentially to avoid disk space outage (#19) Github Action runners are running out of the space while building both the images in parallel. This change will build the image sequentially and also clean up the cpu images before start building gpu image. Signed-off-by: Anil Vishnoi <vishnoianil@gmail.com> Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * Add github job to build image (and not publish) on PR creation (#20) Signed-off-by: Anil Vishnoi <vishnoianil@gmail.com> Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * add start_server script for local dev Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * fix 3.12-only syntax Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * fix more py3.10-11 compatibility Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * rework output format and background tasks Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * speficy return schemas for openapi Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * add processing time and update REDAME Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * lint markdown Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * add MD033 to config Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * use port 5000 Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * use port 5001 as default Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * update deps Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * refactor input request Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * return docling document Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * update new payload in README Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * add base64 example Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * wrap example in <details> Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * rename /url in /source Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * move main execution to __main__ Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> --------- Signed-off-by: Guillaume Moutier <gmoutier@redhat.com> Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> Signed-off-by: Anil Vishnoi <vishnoianil@gmail.com> Co-authored-by: Michele Dolfi <dol@zurich.ibm.com> Co-authored-by: Anil Vishnoi <vishnoianil@gmail.com>
This commit is contained in:
committed by
GitHub
parent
ddf3144512
commit
c6539c42de
3
docling_serve/.env.example
Normal file
3
docling_serve/.env.example
Normal file
@@ -0,0 +1,3 @@
|
||||
TESSDATA_PREFIX=/usr/share/tesseract/tessdata/
|
||||
UVICORN_WORKERS=2
|
||||
RELOAD=True
|
||||
20
docling_serve/__main__.py
Normal file
20
docling_serve/__main__.py
Normal file
@@ -0,0 +1,20 @@
|
||||
import os
|
||||
|
||||
from docling_serve.app import app
|
||||
from docling_serve.helper_functions import _str_to_bool
|
||||
|
||||
# Launch the FastAPI server
|
||||
if __name__ == "__main__":
|
||||
from uvicorn import run
|
||||
|
||||
port = int(os.getenv("PORT", "5001"))
|
||||
workers = int(os.getenv("UVICORN_WORKERS", "1"))
|
||||
reload = _str_to_bool(os.getenv("RELOAD", "False"))
|
||||
run(
|
||||
app,
|
||||
host="0.0.0.0",
|
||||
port=port,
|
||||
workers=workers,
|
||||
timeout_keep_alive=600,
|
||||
reload=reload,
|
||||
)
|
||||
@@ -1,177 +1,83 @@
|
||||
import base64
|
||||
import hashlib
|
||||
import logging
|
||||
import os
|
||||
import tempfile
|
||||
from contextlib import asynccontextmanager
|
||||
from enum import Enum
|
||||
from io import BytesIO
|
||||
from typing import Any, Dict, List, Optional, Tuple, Union
|
||||
from pathlib import Path
|
||||
from typing import Annotated, Any, Dict, List, Optional, Union
|
||||
|
||||
import httpx
|
||||
from docling.datamodel.base_models import (
|
||||
ConversionStatus,
|
||||
DocumentStream,
|
||||
ErrorItem,
|
||||
InputFormat,
|
||||
from docling.datamodel.base_models import DocumentStream, InputFormat
|
||||
from docling.document_converter import DocumentConverter
|
||||
from dotenv import load_dotenv
|
||||
from fastapi import BackgroundTasks, FastAPI, UploadFile
|
||||
from fastapi.middleware.cors import CORSMiddleware
|
||||
from fastapi.responses import RedirectResponse
|
||||
from pydantic import BaseModel
|
||||
|
||||
from docling_serve.docling_conversion import (
|
||||
ConvertDocumentFileSourcesRequest,
|
||||
ConvertDocumentsOptions,
|
||||
ConvertDocumentsRequest,
|
||||
convert_documents,
|
||||
converters,
|
||||
get_pdf_pipeline_opts,
|
||||
)
|
||||
from docling.datamodel.document import ConversionResult
|
||||
from docling.datamodel.pipeline_options import (
|
||||
EasyOcrOptions,
|
||||
OcrOptions,
|
||||
PdfPipelineOptions,
|
||||
RapidOcrOptions,
|
||||
TesseractOcrOptions,
|
||||
from docling_serve.helper_functions import FormDepends, _str_to_bool
|
||||
from docling_serve.response_preparation import ConvertDocumentResponse, process_results
|
||||
|
||||
# Load local env vars if present
|
||||
load_dotenv()
|
||||
|
||||
WITH_UI = _str_to_bool(os.getenv("WITH_UI", "False"))
|
||||
if WITH_UI:
|
||||
import gradio as gr
|
||||
|
||||
from docling_serve.gradio_ui import ui as gradio_ui
|
||||
|
||||
|
||||
# Set up custom logging as we'll be intermixes with FastAPI/Uvicorn's logging
|
||||
class ColoredLogFormatter(logging.Formatter):
|
||||
COLOR_CODES = {
|
||||
logging.DEBUG: "\033[94m", # Blue
|
||||
logging.INFO: "\033[92m", # Green
|
||||
logging.WARNING: "\033[93m", # Yellow
|
||||
logging.ERROR: "\033[91m", # Red
|
||||
logging.CRITICAL: "\033[95m", # Magenta
|
||||
}
|
||||
RESET_CODE = "\033[0m"
|
||||
|
||||
def format(self, record):
|
||||
color = self.COLOR_CODES.get(record.levelno, "")
|
||||
record.levelname = f"{color}{record.levelname}{self.RESET_CODE}"
|
||||
return super().format(record)
|
||||
|
||||
|
||||
logging.basicConfig(
|
||||
level=logging.INFO, # Set the logging level
|
||||
format="%(levelname)s:\t%(asctime)s - %(name)s - %(message)s",
|
||||
datefmt="%H:%M:%S",
|
||||
)
|
||||
from docling.document_converter import DocumentConverter, PdfFormatOption
|
||||
from docling.utils.profiling import ProfilingItem
|
||||
from docling_core.types.doc import DoclingDocument, ImageRefMode
|
||||
from docling_core.utils.file import resolve_remote_filename
|
||||
from fastapi import FastAPI, HTTPException, Response
|
||||
from pydantic import AnyHttpUrl, BaseModel
|
||||
|
||||
|
||||
# TODO: import enum from Docling, once it is exposed
|
||||
class OcrEngine(str, Enum):
|
||||
EASYOCR = "easyocr"
|
||||
TESSERACT = "tesseract"
|
||||
RAPIDOCR = "rapidocr"
|
||||
|
||||
|
||||
class ConvertOptions(BaseModel):
|
||||
output_docling_document: bool = True
|
||||
output_markdown: bool = False
|
||||
output_html: bool = False
|
||||
do_ocr: bool = True
|
||||
ocr_engine: OcrEngine = OcrEngine.EASYOCR
|
||||
ocr_lang: Optional[List[str]] = None
|
||||
force_ocr: bool = False
|
||||
do_table_structure: bool = True
|
||||
include_images: bool = True
|
||||
images_scale: float = 2.0
|
||||
|
||||
|
||||
class DocumentConvertBase(BaseModel):
|
||||
options: ConvertOptions = ConvertOptions()
|
||||
|
||||
|
||||
class HttpSource(BaseModel):
|
||||
url: str
|
||||
headers: Dict[str, Any] = {}
|
||||
|
||||
|
||||
class FileSource(BaseModel):
|
||||
base64_string: str
|
||||
filename: str
|
||||
|
||||
|
||||
class ConvertDocumentHttpSourceRequest(DocumentConvertBase):
|
||||
http_source: HttpSource
|
||||
|
||||
|
||||
class ConvertDocumentFileSourceRequest(DocumentConvertBase):
|
||||
file_source: FileSource
|
||||
|
||||
|
||||
class DocumentResponse(BaseModel):
|
||||
markdown: Optional[str] = None
|
||||
docling_document: Optional[DoclingDocument] = None
|
||||
html: Optional[str] = None
|
||||
|
||||
|
||||
class ConvertDocumentResponse(BaseModel):
|
||||
document: DocumentResponse
|
||||
status: ConversionStatus
|
||||
errors: List[ErrorItem] = []
|
||||
timings: Dict[str, ProfilingItem] = {}
|
||||
|
||||
|
||||
class ConvertDocumentErrorResponse(BaseModel):
|
||||
status: ConversionStatus
|
||||
# errors: List[ErrorItem] = []
|
||||
|
||||
|
||||
ConvertDocumentRequest = Union[
|
||||
ConvertDocumentFileSourceRequest, ConvertDocumentHttpSourceRequest
|
||||
]
|
||||
|
||||
|
||||
class MarkdownTextResponse(Response):
|
||||
media_type = "text/markdown"
|
||||
|
||||
|
||||
class HealthCheckResponse(BaseModel):
|
||||
status: str = "ok"
|
||||
|
||||
|
||||
def get_pdf_pipeline_opts(options: ConvertOptions) -> Tuple[PdfPipelineOptions, str]:
|
||||
|
||||
if options.ocr_engine == OcrEngine.EASYOCR:
|
||||
try:
|
||||
import easyocr # noqa: F401
|
||||
except ImportError:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail="The requested OCR engine"
|
||||
f" (ocr_engine={options.ocr_engine.value})"
|
||||
" is not available on this system. Please choose another OCR engine "
|
||||
"or contact your system administrator.",
|
||||
)
|
||||
ocr_options: OcrOptions = EasyOcrOptions(force_full_page_ocr=options.force_ocr)
|
||||
elif options.ocr_engine == OcrEngine.TESSERACT:
|
||||
try:
|
||||
import tesserocr # noqa: F401
|
||||
except ImportError:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail="The requested OCR engine"
|
||||
f" (ocr_engine={options.ocr_engine.value})"
|
||||
" is not available on this system. Please choose another OCR engine "
|
||||
"or contact your system administrator.",
|
||||
)
|
||||
ocr_options = TesseractOcrOptions(force_full_page_ocr=options.force_ocr)
|
||||
elif options.ocr_engine == OcrEngine.RAPIDOCR:
|
||||
try:
|
||||
from rapidocr_onnxruntime import RapidOCR # noqa: F401
|
||||
except ImportError:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail="The requested OCR engine"
|
||||
f" (ocr_engine={options.ocr_engine.value})"
|
||||
" is not available on this system. Please choose another OCR engine "
|
||||
"or contact your system administrator.",
|
||||
)
|
||||
ocr_options = RapidOcrOptions(force_full_page_ocr=options.force_ocr)
|
||||
else:
|
||||
raise RuntimeError(f"Unexpected OCR engine type {options.ocr_engine}")
|
||||
|
||||
if options.ocr_lang is not None:
|
||||
ocr_options.lang = options.ocr_lang
|
||||
|
||||
pipeline_options = PdfPipelineOptions(
|
||||
do_ocr=options.do_ocr,
|
||||
ocr_options=ocr_options,
|
||||
do_table_structure=options.do_table_structure,
|
||||
generate_page_images=options.include_images,
|
||||
generate_picture_images=options.include_images,
|
||||
images_scale=options.images_scale,
|
||||
)
|
||||
|
||||
options_hash = hashlib.sha1(pipeline_options.model_dump_json().encode()).hexdigest()
|
||||
|
||||
return pipeline_options, options_hash
|
||||
|
||||
|
||||
converters: Dict[str, DocumentConverter] = {}
|
||||
|
||||
# Override the formatter with the custom ColoredLogFormatter
|
||||
root_logger = logging.getLogger() # Get the root logger
|
||||
for handler in root_logger.handlers: # Iterate through existing handlers
|
||||
if handler.formatter:
|
||||
handler.setFormatter(ColoredLogFormatter(handler.formatter._fmt))
|
||||
|
||||
_log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# Context manager to initialize and clean up the lifespan of the FastAPI app
|
||||
@asynccontextmanager
|
||||
async def lifespan(app: FastAPI):
|
||||
# settings = Settings()
|
||||
|
||||
# Converter with default options
|
||||
pipeline_options, options_hash = get_pdf_pipeline_opts(ConvertOptions())
|
||||
pdf_format_option, options_hash = get_pdf_pipeline_opts(ConvertDocumentsOptions())
|
||||
converters[options_hash] = DocumentConverter(
|
||||
format_options={
|
||||
InputFormat.PDF: PdfFormatOption(pipeline_options=pipeline_options),
|
||||
InputFormat.IMAGE: PdfFormatOption(pipeline_options=pipeline_options),
|
||||
InputFormat.PDF: pdf_format_option,
|
||||
InputFormat.IMAGE: pdf_format_option,
|
||||
}
|
||||
)
|
||||
|
||||
@@ -180,100 +86,139 @@ async def lifespan(app: FastAPI):
|
||||
yield
|
||||
|
||||
converters.clear()
|
||||
if WITH_UI:
|
||||
gradio_ui.close()
|
||||
|
||||
|
||||
##################################
|
||||
# App creation and configuration #
|
||||
##################################
|
||||
|
||||
app = FastAPI(
|
||||
title="Docling Serve",
|
||||
lifespan=lifespan,
|
||||
)
|
||||
|
||||
origins = ["*"]
|
||||
methods = ["*"]
|
||||
headers = ["*"]
|
||||
|
||||
app.add_middleware(
|
||||
CORSMiddleware,
|
||||
allow_origins=origins,
|
||||
allow_credentials=True,
|
||||
allow_methods=methods,
|
||||
allow_headers=headers,
|
||||
)
|
||||
|
||||
# Mount the Gradio app
|
||||
if WITH_UI:
|
||||
tmp_output_dir = Path(tempfile.mkdtemp())
|
||||
gradio_ui.gradio_output_dir = tmp_output_dir
|
||||
app = gr.mount_gradio_app(
|
||||
app, gradio_ui, path="/ui", allowed_paths=["./logo.png", tmp_output_dir]
|
||||
)
|
||||
|
||||
|
||||
#############################
|
||||
# API Endpoints definitions #
|
||||
#############################
|
||||
|
||||
|
||||
# Favicon
|
||||
@app.get("/favicon.ico", include_in_schema=False)
|
||||
async def favicon():
|
||||
response = RedirectResponse(url="https://ds4sd.github.io/docling/assets/logo.png")
|
||||
return response
|
||||
|
||||
|
||||
# Status
|
||||
class HealthCheckResponse(BaseModel):
|
||||
status: str = "ok"
|
||||
|
||||
|
||||
@app.get("/health")
|
||||
def health() -> HealthCheckResponse:
|
||||
return HealthCheckResponse()
|
||||
|
||||
|
||||
def _convert_document(
|
||||
body: ConvertDocumentRequest,
|
||||
) -> ConversionResult:
|
||||
|
||||
filename: str
|
||||
buf: BytesIO
|
||||
|
||||
if isinstance(body, ConvertDocumentFileSourceRequest):
|
||||
buf = BytesIO(base64.b64decode(body.file_source.base64_string))
|
||||
filename = body.file_source.filename
|
||||
elif isinstance(body, ConvertDocumentHttpSourceRequest):
|
||||
http_res = httpx.get(body.http_source.url, headers=body.http_source.headers)
|
||||
buf = BytesIO(http_res.content)
|
||||
filename = resolve_remote_filename(
|
||||
http_url=AnyHttpUrl(body.http_source.url),
|
||||
response_headers=dict(**http_res.headers),
|
||||
)
|
||||
|
||||
doc_input = DocumentStream(name=filename, stream=buf)
|
||||
|
||||
pipeline_options, options_hash = get_pdf_pipeline_opts(body.options)
|
||||
if options_hash not in converters:
|
||||
converters[options_hash] = DocumentConverter(
|
||||
format_options={
|
||||
InputFormat.PDF: PdfFormatOption(pipeline_options=pipeline_options),
|
||||
InputFormat.IMAGE: PdfFormatOption(pipeline_options=pipeline_options),
|
||||
}
|
||||
)
|
||||
|
||||
result: ConversionResult = converters[options_hash].convert(doc_input)
|
||||
|
||||
if result is None or result.status == ConversionStatus.SKIPPED:
|
||||
raise HTTPException(status_code=400, detail=result.errors)
|
||||
|
||||
if result is None or result.status not in {
|
||||
ConversionStatus.SUCCESS,
|
||||
}:
|
||||
raise HTTPException(
|
||||
status_code=500, detail={"errors": result.errors, "status": result.status}
|
||||
)
|
||||
|
||||
return result
|
||||
# API readiness compatibility for OpenShift AI Workbench
|
||||
@app.get("/api", include_in_schema=False)
|
||||
def api_check() -> HealthCheckResponse:
|
||||
return HealthCheckResponse()
|
||||
|
||||
|
||||
# Convert a document from URL(s)
|
||||
@app.post(
|
||||
"/convert",
|
||||
"/v1alpha/convert/source",
|
||||
response_model=ConvertDocumentResponse,
|
||||
responses={
|
||||
200: {
|
||||
"content": {"application/zip": {}},
|
||||
# "description": "Return the JSON item or an image.",
|
||||
}
|
||||
},
|
||||
)
|
||||
def convert_document(
|
||||
body: ConvertDocumentRequest,
|
||||
) -> ConvertDocumentResponse:
|
||||
def process_url(
|
||||
background_tasks: BackgroundTasks, conversion_request: ConvertDocumentsRequest
|
||||
):
|
||||
sources: List[Union[str, DocumentStream]] = []
|
||||
headers: Optional[Dict[str, Any]] = None
|
||||
if isinstance(conversion_request, ConvertDocumentFileSourcesRequest):
|
||||
for file_source in conversion_request.file_sources:
|
||||
sources.append(file_source.to_document_stream())
|
||||
else:
|
||||
for http_source in conversion_request.http_sources:
|
||||
sources.append(http_source.url)
|
||||
if headers is None and http_source.headers:
|
||||
headers = http_source.headers
|
||||
|
||||
result = _convert_document(body=body)
|
||||
|
||||
image_mode = (
|
||||
ImageRefMode.EMBEDDED
|
||||
if body.options.include_images
|
||||
else ImageRefMode.PLACEHOLDER
|
||||
)
|
||||
doc_resp = DocumentResponse()
|
||||
if body.options.output_docling_document:
|
||||
doc_resp.docling_document = result.document
|
||||
if body.options.output_markdown:
|
||||
doc_resp.markdown = result.document.export_to_markdown(image_mode=image_mode)
|
||||
if body.options.output_html:
|
||||
doc_resp.html = result.document.export_to_html(image_mode=image_mode)
|
||||
|
||||
return ConvertDocumentResponse(
|
||||
document=doc_resp, status=result.status, timings=result.timings
|
||||
# Note: results are only an iterator->lazy evaluation
|
||||
results = convert_documents(
|
||||
sources=sources, options=conversion_request.options, headers=headers
|
||||
)
|
||||
|
||||
# The real processing will happen here
|
||||
response = process_results(
|
||||
background_tasks=background_tasks,
|
||||
conversion_options=conversion_request.options,
|
||||
conv_results=results,
|
||||
)
|
||||
|
||||
@app.post("/convert/markdown", response_class=MarkdownTextResponse)
|
||||
def convert_document_md(
|
||||
body: ConvertDocumentRequest,
|
||||
) -> MarkdownTextResponse:
|
||||
result = _convert_document(body=body)
|
||||
image_mode = (
|
||||
ImageRefMode.EMBEDDED
|
||||
if body.options.include_images
|
||||
else ImageRefMode.PLACEHOLDER
|
||||
)
|
||||
return MarkdownTextResponse(
|
||||
result.document.export_to_markdown(image_mode=image_mode)
|
||||
return response
|
||||
|
||||
|
||||
# Convert a document from file(s)
|
||||
@app.post(
|
||||
"/v1alpha/convert/file",
|
||||
response_model=ConvertDocumentResponse,
|
||||
responses={
|
||||
200: {
|
||||
"content": {"application/zip": {}},
|
||||
}
|
||||
},
|
||||
)
|
||||
async def process_file(
|
||||
background_tasks: BackgroundTasks,
|
||||
files: List[UploadFile],
|
||||
options: Annotated[ConvertDocumentsOptions, FormDepends(ConvertDocumentsOptions)],
|
||||
):
|
||||
|
||||
_log.info(f"Received {len(files)} files for processing.")
|
||||
|
||||
# Load the uploaded files to Docling DocumentStream
|
||||
file_sources = []
|
||||
for file in files:
|
||||
buf = BytesIO(file.file.read())
|
||||
name = file.filename if file.filename else "file.pdf"
|
||||
file_sources.append(DocumentStream(name=name, stream=buf))
|
||||
|
||||
results = convert_documents(sources=file_sources, options=options)
|
||||
|
||||
response = process_results(
|
||||
background_tasks=background_tasks,
|
||||
conversion_options=options,
|
||||
conv_results=results,
|
||||
)
|
||||
|
||||
return response
|
||||
|
||||
400
docling_serve/docling_conversion.py
Normal file
400
docling_serve/docling_conversion.py
Normal file
@@ -0,0 +1,400 @@
|
||||
import base64
|
||||
import hashlib
|
||||
import json
|
||||
import logging
|
||||
from io import BytesIO
|
||||
from pathlib import Path
|
||||
from typing import (
|
||||
Annotated,
|
||||
Any,
|
||||
Dict,
|
||||
Iterable,
|
||||
Iterator,
|
||||
List,
|
||||
Optional,
|
||||
Tuple,
|
||||
Type,
|
||||
Union,
|
||||
)
|
||||
|
||||
from docling.backend.docling_parse_backend import DoclingParseDocumentBackend
|
||||
from docling.backend.docling_parse_v2_backend import DoclingParseV2DocumentBackend
|
||||
from docling.backend.pdf_backend import PdfDocumentBackend
|
||||
from docling.backend.pypdfium2_backend import PyPdfiumDocumentBackend
|
||||
from docling.datamodel.base_models import DocumentStream, InputFormat, OutputFormat
|
||||
from docling.datamodel.document import ConversionResult
|
||||
from docling.datamodel.pipeline_options import (
|
||||
EasyOcrOptions,
|
||||
OcrEngine,
|
||||
OcrOptions,
|
||||
PdfBackend,
|
||||
PdfPipelineOptions,
|
||||
RapidOcrOptions,
|
||||
TableFormerMode,
|
||||
TesseractOcrOptions,
|
||||
)
|
||||
from docling.document_converter import DocumentConverter, FormatOption, PdfFormatOption
|
||||
from docling_core.types.doc import ImageRefMode
|
||||
from fastapi import HTTPException
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from docling_serve.helper_functions import _to_list_of_strings
|
||||
|
||||
_log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# Define the input options for the API
|
||||
class ConvertDocumentsOptions(BaseModel):
|
||||
from_formats: Annotated[
|
||||
List[InputFormat],
|
||||
Field(
|
||||
description=(
|
||||
"Input format(s) to convert from. String or list of strings. "
|
||||
f"Allowed values: {', '.join([v.value for v in InputFormat])}. "
|
||||
"Optional, defaults to all formats."
|
||||
),
|
||||
examples=[[v.value for v in InputFormat]],
|
||||
),
|
||||
] = [v for v in InputFormat]
|
||||
|
||||
to_formats: Annotated[
|
||||
List[OutputFormat],
|
||||
Field(
|
||||
description=(
|
||||
"Output format(s) to convert to. String or list of strings. "
|
||||
f"Allowed values: {', '.join([v.value for v in OutputFormat])}. "
|
||||
"Optional, defaults to Markdown."
|
||||
),
|
||||
examples=[[OutputFormat.MARKDOWN]],
|
||||
),
|
||||
] = [OutputFormat.MARKDOWN]
|
||||
|
||||
image_export_mode: Annotated[
|
||||
ImageRefMode,
|
||||
Field(
|
||||
description=(
|
||||
"Image export mode for the document (in case of JSON,"
|
||||
" Markdown or HTML). "
|
||||
f"Allowed values: {', '.join([v.value for v in ImageRefMode])}. "
|
||||
"Optional, defaults to Embedded."
|
||||
),
|
||||
examples=[ImageRefMode.EMBEDDED.value],
|
||||
# pattern="embedded|placeholder|referenced",
|
||||
),
|
||||
] = ImageRefMode.EMBEDDED
|
||||
|
||||
do_ocr: Annotated[
|
||||
bool,
|
||||
Field(
|
||||
description=(
|
||||
"If enabled, the bitmap content will be processed using OCR. "
|
||||
"Boolean. Optional, defaults to true"
|
||||
),
|
||||
# examples=[True],
|
||||
),
|
||||
] = True
|
||||
|
||||
force_ocr: Annotated[
|
||||
bool,
|
||||
Field(
|
||||
description=(
|
||||
"If enabled, replace existing text with OCR-generated "
|
||||
"text over content. Boolean. Optional, defaults to false."
|
||||
),
|
||||
# examples=[False],
|
||||
),
|
||||
] = False
|
||||
|
||||
# TODO: use a restricted list based on what is installed on the system
|
||||
ocr_engine: Annotated[
|
||||
OcrEngine,
|
||||
Field(
|
||||
description=(
|
||||
"The OCR engine to use. String. "
|
||||
"Allowed values: easyocr, tesseract, rapidocr. "
|
||||
"Optional, defaults to easyocr."
|
||||
),
|
||||
examples=[OcrEngine.EASYOCR],
|
||||
),
|
||||
] = OcrEngine.EASYOCR
|
||||
|
||||
ocr_lang: Annotated[
|
||||
Optional[List[str]],
|
||||
Field(
|
||||
description=(
|
||||
"List of languages used by the OCR engine. "
|
||||
"Note that each OCR engine has "
|
||||
"different values for the language names. String or list of strings. "
|
||||
"Optional, defaults to empty."
|
||||
),
|
||||
examples=[["fr", "de", "es", "en"]],
|
||||
),
|
||||
] = None
|
||||
|
||||
pdf_backend: Annotated[
|
||||
PdfBackend,
|
||||
Field(
|
||||
description=(
|
||||
"The PDF backend to use. String. "
|
||||
f"Allowed values: {', '.join([v.value for v in PdfBackend])}. "
|
||||
f"Optional, defaults to {PdfBackend.DLPARSE_V2.value}."
|
||||
),
|
||||
examples=[PdfBackend.DLPARSE_V2],
|
||||
),
|
||||
] = PdfBackend.DLPARSE_V2
|
||||
|
||||
table_mode: Annotated[
|
||||
TableFormerMode,
|
||||
Field(
|
||||
TableFormerMode.FAST,
|
||||
description=(
|
||||
"Mode to use for table structure, String. "
|
||||
f"Allowed values: {', '.join([v.value for v in TableFormerMode])}. "
|
||||
"Optional, defaults to fast."
|
||||
),
|
||||
examples=[TableFormerMode.FAST],
|
||||
# pattern="fast|accurate",
|
||||
),
|
||||
] = TableFormerMode.FAST
|
||||
|
||||
abort_on_error: Annotated[
|
||||
bool,
|
||||
Field(
|
||||
description=(
|
||||
"Abort on error if enabled. " "Boolean. Optional, defaults to false."
|
||||
),
|
||||
# examples=[False],
|
||||
),
|
||||
] = False
|
||||
|
||||
return_as_file: Annotated[
|
||||
bool,
|
||||
Field(
|
||||
description=(
|
||||
"Return the output as a zip file "
|
||||
"(will happen anyway if multiple files are generated). "
|
||||
"Boolean. Optional, defaults to false."
|
||||
),
|
||||
examples=[False],
|
||||
),
|
||||
] = False
|
||||
|
||||
do_table_structure: Annotated[
|
||||
bool,
|
||||
Field(
|
||||
description=(
|
||||
"If enabled, the table structure will be extracted. "
|
||||
"Boolean. Optional, defaults to true."
|
||||
),
|
||||
examples=[True],
|
||||
),
|
||||
] = True
|
||||
|
||||
include_images: Annotated[
|
||||
bool,
|
||||
Field(
|
||||
description=(
|
||||
"If enabled, images will be extracted from the document. "
|
||||
"Boolean. Optional, defaults to true."
|
||||
),
|
||||
examples=[True],
|
||||
),
|
||||
] = True
|
||||
|
||||
images_scale: Annotated[
|
||||
float,
|
||||
Field(
|
||||
description="Scale factor for images. Float. Optional, defaults to 2.0.",
|
||||
examples=[2.0],
|
||||
),
|
||||
] = 2.0
|
||||
|
||||
|
||||
class DocumentsConvertBase(BaseModel):
|
||||
options: ConvertDocumentsOptions = ConvertDocumentsOptions()
|
||||
|
||||
|
||||
class HttpSource(BaseModel):
|
||||
url: Annotated[
|
||||
str,
|
||||
Field(
|
||||
description="HTTP url to process",
|
||||
examples=["https://arxiv.org/pdf/2206.01062"],
|
||||
),
|
||||
]
|
||||
headers: Annotated[
|
||||
Dict[str, Any],
|
||||
Field(
|
||||
description="Additional headers used to fetch the urls, "
|
||||
"e.g. authorization, agent, etc"
|
||||
),
|
||||
] = {}
|
||||
|
||||
|
||||
class FileSource(BaseModel):
|
||||
base64_string: Annotated[
|
||||
str,
|
||||
Field(
|
||||
description="Content of the file serialized in base64. "
|
||||
"For example it can be obtained via "
|
||||
"`base64 -w 0 /path/to/file/pdf-to-convert.pdf`."
|
||||
),
|
||||
]
|
||||
filename: Annotated[
|
||||
str,
|
||||
Field(description="Filename of the uploaded document", examples=["file.pdf"]),
|
||||
]
|
||||
|
||||
def to_document_stream(self) -> DocumentStream:
|
||||
buf = BytesIO(base64.b64decode(self.base64_string))
|
||||
return DocumentStream(stream=buf, name=self.filename)
|
||||
|
||||
|
||||
class ConvertDocumentHttpSourcesRequest(DocumentsConvertBase):
|
||||
http_sources: List[HttpSource]
|
||||
|
||||
|
||||
class ConvertDocumentFileSourcesRequest(DocumentsConvertBase):
|
||||
file_sources: List[FileSource]
|
||||
|
||||
|
||||
ConvertDocumentsRequest = Union[
|
||||
ConvertDocumentFileSourcesRequest, ConvertDocumentHttpSourcesRequest
|
||||
]
|
||||
|
||||
|
||||
# Document converters will be preloaded and stored in a dictionary
|
||||
converters: Dict[str, DocumentConverter] = {}
|
||||
|
||||
|
||||
# Custom serializer for PdfFormatOption
|
||||
# (model_dump_json does not work with some classes)
|
||||
def _serialize_pdf_format_option(pdf_format_option: PdfFormatOption) -> str:
|
||||
data = pdf_format_option.model_dump()
|
||||
|
||||
# pipeline_options are not fully serialized by model_dump, dedicated pass
|
||||
if pdf_format_option.pipeline_options:
|
||||
data["pipeline_options"] = pdf_format_option.pipeline_options.model_dump()
|
||||
|
||||
# Replace `pipeline_cls` with a string representation
|
||||
data["pipeline_cls"] = repr(data["pipeline_cls"])
|
||||
|
||||
# Replace `backend` with a string representation
|
||||
data["backend"] = repr(data["backend"])
|
||||
|
||||
# Handle `device` in `accelerator_options`
|
||||
if "accelerator_options" in data and "device" in data["accelerator_options"]:
|
||||
data["accelerator_options"]["device"] = repr(
|
||||
data["accelerator_options"]["device"]
|
||||
)
|
||||
|
||||
# Serialize the dictionary to JSON with sorted keys to have consistent hashes
|
||||
return json.dumps(data, sort_keys=True)
|
||||
|
||||
|
||||
# Computes the PDF pipeline options and returns the PdfFormatOption and its hash
|
||||
def get_pdf_pipeline_opts(
|
||||
request: ConvertDocumentsOptions,
|
||||
) -> Tuple[PdfFormatOption, str]:
|
||||
|
||||
if request.ocr_engine == OcrEngine.EASYOCR:
|
||||
try:
|
||||
import easyocr # noqa: F401
|
||||
except ImportError:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail="The requested OCR engine"
|
||||
f" (ocr_engine={request.ocr_engine.value})"
|
||||
" is not available on this system. Please choose another OCR engine "
|
||||
"or contact your system administrator.",
|
||||
)
|
||||
ocr_options: OcrOptions = EasyOcrOptions(force_full_page_ocr=request.force_ocr)
|
||||
elif request.ocr_engine == OcrEngine.TESSERACT:
|
||||
try:
|
||||
import tesserocr # noqa: F401
|
||||
except ImportError:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail="The requested OCR engine"
|
||||
f" (ocr_engine={request.ocr_engine.value})"
|
||||
" is not available on this system. Please choose another OCR engine "
|
||||
"or contact your system administrator.",
|
||||
)
|
||||
ocr_options = TesseractOcrOptions(force_full_page_ocr=request.force_ocr)
|
||||
elif request.ocr_engine == OcrEngine.RAPIDOCR:
|
||||
try:
|
||||
from rapidocr_onnxruntime import RapidOCR # noqa: F401
|
||||
except ImportError:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail="The requested OCR engine"
|
||||
f" (ocr_engine={request.ocr_engine.value})"
|
||||
" is not available on this system. Please choose another OCR engine "
|
||||
"or contact your system administrator.",
|
||||
)
|
||||
ocr_options = RapidOcrOptions(force_full_page_ocr=request.force_ocr)
|
||||
else:
|
||||
raise RuntimeError(f"Unexpected OCR engine type {request.ocr_engine}")
|
||||
|
||||
if request.ocr_lang is not None:
|
||||
if isinstance(request.ocr_lang, str):
|
||||
ocr_options.lang = _to_list_of_strings(request.ocr_lang)
|
||||
else:
|
||||
ocr_options.lang = request.ocr_lang
|
||||
|
||||
pipeline_options = PdfPipelineOptions(
|
||||
do_ocr=request.do_ocr,
|
||||
ocr_options=ocr_options,
|
||||
do_table_structure=request.do_table_structure,
|
||||
)
|
||||
pipeline_options.table_structure_options.do_cell_matching = True # do_cell_matching
|
||||
pipeline_options.table_structure_options.mode = TableFormerMode(request.table_mode)
|
||||
|
||||
if request.image_export_mode != ImageRefMode.PLACEHOLDER:
|
||||
pipeline_options.generate_page_images = True
|
||||
if request.images_scale:
|
||||
pipeline_options.images_scale = request.images_scale
|
||||
|
||||
if request.pdf_backend == PdfBackend.DLPARSE_V1:
|
||||
backend: Type[PdfDocumentBackend] = DoclingParseDocumentBackend
|
||||
elif request.pdf_backend == PdfBackend.DLPARSE_V2:
|
||||
backend = DoclingParseV2DocumentBackend
|
||||
elif request.pdf_backend == PdfBackend.PYPDFIUM2:
|
||||
backend = PyPdfiumDocumentBackend
|
||||
else:
|
||||
raise RuntimeError(f"Unexpected PDF backend type {request.pdf_backend}")
|
||||
|
||||
pdf_format_option = PdfFormatOption(
|
||||
pipeline_options=pipeline_options,
|
||||
backend=backend,
|
||||
)
|
||||
|
||||
serialized_data = _serialize_pdf_format_option(pdf_format_option)
|
||||
|
||||
options_hash = hashlib.sha1(serialized_data.encode()).hexdigest()
|
||||
|
||||
return pdf_format_option, options_hash
|
||||
|
||||
|
||||
def convert_documents(
|
||||
sources: Iterable[Union[Path, str, DocumentStream]],
|
||||
options: ConvertDocumentsOptions,
|
||||
headers: Optional[Dict[str, Any]] = None,
|
||||
):
|
||||
pdf_format_option, options_hash = get_pdf_pipeline_opts(options)
|
||||
|
||||
if options_hash not in converters:
|
||||
format_options: Dict[InputFormat, FormatOption] = {
|
||||
InputFormat.PDF: pdf_format_option,
|
||||
InputFormat.IMAGE: pdf_format_option,
|
||||
}
|
||||
|
||||
converters[options_hash] = DocumentConverter(format_options=format_options)
|
||||
_log.info(f"We now have {len(converters)} converters in memory.")
|
||||
|
||||
results: Iterator[ConversionResult] = converters[options_hash].convert_all(
|
||||
sources,
|
||||
headers=headers,
|
||||
)
|
||||
|
||||
return results
|
||||
635
docling_serve/gradio_ui.py
Normal file
635
docling_serve/gradio_ui.py
Normal file
@@ -0,0 +1,635 @@
|
||||
import importlib
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
|
||||
import gradio as gr
|
||||
import requests
|
||||
|
||||
from docling_serve.helper_functions import _to_list_of_strings
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
#################
|
||||
# CSS and theme #
|
||||
#################
|
||||
|
||||
css = """
|
||||
#logo {
|
||||
border-style: none;
|
||||
background: none;
|
||||
box-shadow: none;
|
||||
min-width: 80px;
|
||||
}
|
||||
#dark_mode_column {
|
||||
display: flex;
|
||||
align-content: flex-end;
|
||||
}
|
||||
#title {
|
||||
text-align: left;
|
||||
display:block;
|
||||
height: auto;
|
||||
padding-top: 5px;
|
||||
line-height: 0;
|
||||
}
|
||||
.title-text h1 > p, .title-text p {
|
||||
margin-top: 0px !important;
|
||||
margin-bottom: 2px !important;
|
||||
}
|
||||
#custom-container {
|
||||
border: 0.909091px solid;
|
||||
padding: 10px;
|
||||
border-radius: 4px;
|
||||
}
|
||||
#custom-container h4 {
|
||||
font-size: 14px;
|
||||
}
|
||||
#file_input_zone {
|
||||
height: 140px;
|
||||
}
|
||||
"""
|
||||
|
||||
theme = gr.themes.Default(
|
||||
text_size="md",
|
||||
spacing_size="md",
|
||||
font=[
|
||||
gr.themes.GoogleFont("Red Hat Display"),
|
||||
"ui-sans-serif",
|
||||
"system-ui",
|
||||
"sans-serif",
|
||||
],
|
||||
font_mono=[
|
||||
gr.themes.GoogleFont("Red Hat Mono"),
|
||||
"ui-monospace",
|
||||
"Consolas",
|
||||
"monospace",
|
||||
],
|
||||
)
|
||||
|
||||
#############
|
||||
# Variables #
|
||||
#############
|
||||
|
||||
gradio_output_dir = None # Will be set by FastAPI when mounted
|
||||
file_output_path = None # Will be set when a new file is generated
|
||||
|
||||
#############
|
||||
# Functions #
|
||||
#############
|
||||
|
||||
|
||||
def health_check():
|
||||
response = requests.get(f"http://localhost:{int(os.getenv('PORT', '5001'))}/health")
|
||||
if response.status_code == 200:
|
||||
return "Healthy"
|
||||
return "Unhealthy"
|
||||
|
||||
|
||||
def set_options_visibility(x):
|
||||
return gr.Accordion("Options", open=x)
|
||||
|
||||
|
||||
def set_outputs_visibility_direct(x, y):
|
||||
content = gr.Row(visible=x)
|
||||
file = gr.Row(visible=y)
|
||||
return content, file
|
||||
|
||||
|
||||
def set_outputs_visibility_process(x):
|
||||
content = gr.Row(visible=not x)
|
||||
file = gr.Row(visible=x)
|
||||
return content, file
|
||||
|
||||
|
||||
def set_download_button_label(label_text: gr.State):
|
||||
return gr.DownloadButton(label=str(label_text), scale=1)
|
||||
|
||||
|
||||
def clear_outputs():
|
||||
markdown_content = ""
|
||||
json_content = ""
|
||||
html_content = ""
|
||||
text_content = ""
|
||||
doctags_content = ""
|
||||
|
||||
return (
|
||||
markdown_content,
|
||||
markdown_content,
|
||||
json_content,
|
||||
html_content,
|
||||
html_content,
|
||||
text_content,
|
||||
doctags_content,
|
||||
)
|
||||
|
||||
|
||||
def clear_url_input():
|
||||
return ""
|
||||
|
||||
|
||||
def clear_file_input():
|
||||
return None
|
||||
|
||||
|
||||
def auto_set_return_as_file(url_input, file_input, image_export_mode):
|
||||
# If more than one input source is provided, return as file
|
||||
if (
|
||||
(len(url_input.split(",")) > 1)
|
||||
or (file_input and len(file_input) > 1)
|
||||
or (image_export_mode == "referenced")
|
||||
):
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
def change_ocr_lang(ocr_engine):
|
||||
if ocr_engine == "easyocr":
|
||||
return "en,fr,de,es"
|
||||
elif ocr_engine == "tesseract_cli":
|
||||
return "eng,fra,deu,spa"
|
||||
elif ocr_engine == "tesseract":
|
||||
return "eng,fra,deu,spa"
|
||||
elif ocr_engine == "rapidocr":
|
||||
return "english,chinese"
|
||||
|
||||
|
||||
def process_url(
|
||||
input_sources,
|
||||
to_formats,
|
||||
image_export_mode,
|
||||
ocr,
|
||||
force_ocr,
|
||||
ocr_engine,
|
||||
ocr_lang,
|
||||
pdf_backend,
|
||||
table_mode,
|
||||
abort_on_error,
|
||||
return_as_file,
|
||||
):
|
||||
parameters = {
|
||||
"http_sources": [{"url": source} for source in input_sources.split(",")],
|
||||
"options": {
|
||||
"to_formats": to_formats,
|
||||
"image_export_mode": image_export_mode,
|
||||
"ocr": ocr,
|
||||
"force_ocr": force_ocr,
|
||||
"ocr_engine": ocr_engine,
|
||||
"ocr_lang": _to_list_of_strings(ocr_lang),
|
||||
"pdf_backend": pdf_backend,
|
||||
"table_mode": table_mode,
|
||||
"abort_on_error": abort_on_error,
|
||||
"return_as_file": return_as_file,
|
||||
},
|
||||
}
|
||||
if (
|
||||
not parameters["http_sources"]
|
||||
or len(parameters["http_sources"]) == 0
|
||||
or parameters["http_sources"][0]["url"] == ""
|
||||
):
|
||||
logger.error("No input sources provided.")
|
||||
raise gr.Error("No input sources provided.", print_exception=False)
|
||||
try:
|
||||
response = requests.post(
|
||||
f"http://localhost:{int(os.getenv('PORT', '5001'))}/v1alpha/convert/source",
|
||||
json=parameters,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Error processing URL: {e}")
|
||||
raise gr.Error(f"Error processing URL: {e}", print_exception=False)
|
||||
if response.status_code != 200:
|
||||
data = response.json()
|
||||
error_message = data.get("detail", "An unknown error occurred.")
|
||||
logger.error(f"Error processing file: {error_message}")
|
||||
raise gr.Error(f"Error processing file: {error_message}", print_exception=False)
|
||||
output = response_to_output(response, return_as_file)
|
||||
return output
|
||||
|
||||
|
||||
def process_file(
|
||||
files,
|
||||
to_formats,
|
||||
image_export_mode,
|
||||
ocr,
|
||||
force_ocr,
|
||||
ocr_engine,
|
||||
ocr_lang,
|
||||
pdf_backend,
|
||||
table_mode,
|
||||
abort_on_error,
|
||||
return_as_file,
|
||||
):
|
||||
if not files or len(files) == 0 or files[0] == "":
|
||||
logger.error("No files provided.")
|
||||
raise gr.Error("No files provided.", print_exception=False)
|
||||
files_data = [("files", (file.name, open(file.name, "rb"))) for file in files]
|
||||
|
||||
parameters = {
|
||||
"to_formats": to_formats,
|
||||
"image_export_mode": image_export_mode,
|
||||
"ocr": str(ocr).lower(),
|
||||
"force_ocr": str(force_ocr).lower(),
|
||||
"ocr_engine": ocr_engine,
|
||||
"ocr_lang": _to_list_of_strings(ocr_lang),
|
||||
"pdf_backend": pdf_backend,
|
||||
"table_mode": table_mode,
|
||||
"abort_on_error": str(abort_on_error).lower(),
|
||||
"return_as_file": str(return_as_file).lower(),
|
||||
}
|
||||
|
||||
try:
|
||||
response = requests.post(
|
||||
f"http://localhost:{int(os.getenv('PORT', '5001'))}/v1alpha/convert/file",
|
||||
files=files_data,
|
||||
data=parameters,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Error processing file(s): {e}")
|
||||
raise gr.Error(f"Error processing file(s): {e}", print_exception=False)
|
||||
if response.status_code != 200:
|
||||
data = response.json()
|
||||
error_message = data.get("detail", "An unknown error occurred.")
|
||||
logger.error(f"Error processing file: {error_message}")
|
||||
raise gr.Error(f"Error processing file: {error_message}", print_exception=False)
|
||||
output = response_to_output(response, return_as_file)
|
||||
return output
|
||||
|
||||
|
||||
def response_to_output(response, return_as_file):
|
||||
markdown_content = ""
|
||||
json_content = ""
|
||||
html_content = ""
|
||||
text_content = ""
|
||||
doctags_content = ""
|
||||
download_button = gr.DownloadButton(visible=False, label="Download Output", scale=1)
|
||||
if return_as_file:
|
||||
filename = (
|
||||
response.headers.get("Content-Disposition").split("filename=")[1].strip('"')
|
||||
)
|
||||
tmp_output_dir = Path(tempfile.mkdtemp(dir=gradio_output_dir, prefix="ui_"))
|
||||
file_output_path = f"{tmp_output_dir}/{filename}"
|
||||
# logger.info(f"Saving file to: {file_output_path}")
|
||||
with open(file_output_path, "wb") as f:
|
||||
f.write(response.content)
|
||||
download_button = gr.DownloadButton(
|
||||
visible=True, label=f"Download {filename}", scale=1, value=file_output_path
|
||||
)
|
||||
else:
|
||||
full_content = response.json()
|
||||
markdown_content = full_content.get("document").get("md_content")
|
||||
json_content = json.dumps(
|
||||
full_content.get("document").get("json_content"), indent=2
|
||||
)
|
||||
html_content = full_content.get("document").get("html_content")
|
||||
text_content = full_content.get("document").get("text_content")
|
||||
doctags_content = full_content.get("document").get("doctags_content")
|
||||
return (
|
||||
markdown_content,
|
||||
markdown_content,
|
||||
json_content,
|
||||
html_content,
|
||||
html_content,
|
||||
text_content,
|
||||
doctags_content,
|
||||
download_button,
|
||||
)
|
||||
|
||||
|
||||
############
|
||||
# UI Setup #
|
||||
############
|
||||
|
||||
with gr.Blocks(
|
||||
css=css,
|
||||
theme=theme,
|
||||
title="Docling Serve",
|
||||
delete_cache=(3600, 3600), # Delete all files older than 1 hour every hour
|
||||
) as ui:
|
||||
|
||||
# Constants stored in states to be able to pass them as inputs to functions
|
||||
processing_text = gr.State("Processing your document(s), please wait...")
|
||||
true_bool = gr.State(True)
|
||||
false_bool = gr.State(False)
|
||||
|
||||
# Banner
|
||||
with gr.Row(elem_id="check_health"):
|
||||
# Logo
|
||||
with gr.Column(scale=1, min_width=90):
|
||||
gr.Image(
|
||||
"https://ds4sd.github.io/docling/assets/logo.png",
|
||||
height=80,
|
||||
width=80,
|
||||
show_download_button=False,
|
||||
show_label=False,
|
||||
show_fullscreen_button=False,
|
||||
container=False,
|
||||
elem_id="logo",
|
||||
scale=0,
|
||||
)
|
||||
# Title
|
||||
with gr.Column(scale=1, min_width=200):
|
||||
gr.Markdown(
|
||||
f"# Docling Serve \n(docling version: "
|
||||
f"{importlib.metadata.version('docling')})",
|
||||
elem_id="title",
|
||||
elem_classes=["title-text"],
|
||||
)
|
||||
# Dark mode button
|
||||
with gr.Column(scale=16, elem_id="dark_mode_column"):
|
||||
dark_mode_btn = gr.Button("Dark/Light Mode", scale=0)
|
||||
dark_mode_btn.click(
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
js="""() => {
|
||||
if (document.querySelectorAll('.dark').length) {
|
||||
document.querySelectorAll('.dark').forEach(
|
||||
el => el.classList.remove('dark')
|
||||
);
|
||||
} else {
|
||||
document.querySelector('body').classList.add('dark');
|
||||
}
|
||||
}""",
|
||||
show_api=False,
|
||||
)
|
||||
|
||||
# URL Processing Tab
|
||||
with gr.Tab("Convert URL(s)"):
|
||||
with gr.Row():
|
||||
with gr.Column(scale=4):
|
||||
url_input = gr.Textbox(
|
||||
label="Input Sources (comma-separated URLs)",
|
||||
placeholder="https://arxiv.org/pdf/2206.01062",
|
||||
)
|
||||
with gr.Column(scale=1):
|
||||
url_process_btn = gr.Button("Process URL(s)", scale=1)
|
||||
url_reset_btn = gr.Button("Reset", scale=1)
|
||||
|
||||
# File Processing Tab
|
||||
with gr.Tab("Convert File(s)"):
|
||||
with gr.Row():
|
||||
with gr.Column(scale=4):
|
||||
file_input = gr.File(
|
||||
elem_id="file_input_zone",
|
||||
label="Upload Files",
|
||||
file_types=[
|
||||
".pdf",
|
||||
".docx",
|
||||
".pptx",
|
||||
".html",
|
||||
".xlsx",
|
||||
".asciidoc",
|
||||
".txt",
|
||||
".md",
|
||||
".jpg",
|
||||
".jpeg",
|
||||
".png",
|
||||
".gif",
|
||||
],
|
||||
file_count="multiple",
|
||||
scale=4,
|
||||
)
|
||||
with gr.Column(scale=1):
|
||||
file_process_btn = gr.Button("Process File(s)", scale=1)
|
||||
file_reset_btn = gr.Button("Reset", scale=1)
|
||||
|
||||
# Options
|
||||
with gr.Accordion("Options") as options:
|
||||
with gr.Row():
|
||||
with gr.Column(scale=1):
|
||||
to_formats = gr.CheckboxGroup(
|
||||
[
|
||||
("Markdown", "md"),
|
||||
("Docling (JSON)", "json"),
|
||||
("HTML", "html"),
|
||||
("Plain Text", "text"),
|
||||
("Doc Tags", "doctags"),
|
||||
],
|
||||
label="To Formats",
|
||||
value=["md"],
|
||||
)
|
||||
with gr.Column(scale=1):
|
||||
image_export_mode = gr.Radio(
|
||||
[
|
||||
("Embedded", "embedded"),
|
||||
("Placeholder", "placeholder"),
|
||||
("Referenced", "referenced"),
|
||||
],
|
||||
label="Image Export Mode",
|
||||
value="embedded",
|
||||
)
|
||||
with gr.Row():
|
||||
with gr.Column(scale=1, min_width=200):
|
||||
ocr = gr.Checkbox(label="Enable OCR", value=True)
|
||||
force_ocr = gr.Checkbox(label="Force OCR", value=False)
|
||||
with gr.Column(scale=1):
|
||||
ocr_engine = gr.Radio(
|
||||
[
|
||||
("EasyOCR", "easyocr"),
|
||||
("Tesseract", "tesseract"),
|
||||
("RapidOCR", "rapidocr"),
|
||||
],
|
||||
label="OCR Engine",
|
||||
value="easyocr",
|
||||
)
|
||||
with gr.Column(scale=1, min_width=200):
|
||||
ocr_lang = gr.Textbox(
|
||||
label="OCR Language (beware of the format)", value="en,fr,de,es"
|
||||
)
|
||||
ocr_engine.change(change_ocr_lang, inputs=[ocr_engine], outputs=[ocr_lang])
|
||||
with gr.Row():
|
||||
with gr.Column(scale=2):
|
||||
pdf_backend = gr.Radio(
|
||||
["pypdfium2", "dlparse_v1", "dlparse_v2"],
|
||||
label="PDF Backend",
|
||||
value="dlparse_v2",
|
||||
)
|
||||
with gr.Column(scale=2):
|
||||
table_mode = gr.Radio(
|
||||
["fast", "accurate"], label="Table Mode", value="fast"
|
||||
)
|
||||
with gr.Column(scale=1):
|
||||
abort_on_error = gr.Checkbox(label="Abort on Error", value=False)
|
||||
return_as_file = gr.Checkbox(label="Return as File", value=False)
|
||||
|
||||
# Document output
|
||||
with gr.Row(visible=False) as content_output:
|
||||
with gr.Tab("Markdown"):
|
||||
output_markdown = gr.Code(
|
||||
language="markdown", wrap_lines=True, show_label=False
|
||||
)
|
||||
with gr.Tab("Markdown-Rendered"):
|
||||
output_markdown_rendered = gr.Markdown(label="Response")
|
||||
with gr.Tab("Docling (JSON)"):
|
||||
output_json = gr.Code(language="json", wrap_lines=True, show_label=False)
|
||||
with gr.Tab("HTML"):
|
||||
output_html = gr.Code(language="html", wrap_lines=True, show_label=False)
|
||||
with gr.Tab("HTML-Rendered"):
|
||||
output_html_rendered = gr.HTML(label="Response")
|
||||
with gr.Tab("Text"):
|
||||
output_text = gr.Code(wrap_lines=True, show_label=False)
|
||||
with gr.Tab("DocTags"):
|
||||
output_doctags = gr.Code(wrap_lines=True, show_label=False)
|
||||
|
||||
# File download output
|
||||
with gr.Row(visible=False) as file_output:
|
||||
download_file_btn = gr.DownloadButton(label="Placeholder", scale=1)
|
||||
|
||||
##############
|
||||
# UI Actions #
|
||||
##############
|
||||
|
||||
# Handle Return as File
|
||||
url_input.change(
|
||||
auto_set_return_as_file,
|
||||
inputs=[url_input, file_input, image_export_mode],
|
||||
outputs=[return_as_file],
|
||||
)
|
||||
file_input.change(
|
||||
auto_set_return_as_file,
|
||||
inputs=[url_input, file_input, image_export_mode],
|
||||
outputs=[return_as_file],
|
||||
)
|
||||
image_export_mode.change(
|
||||
auto_set_return_as_file,
|
||||
inputs=[url_input, file_input, image_export_mode],
|
||||
outputs=[return_as_file],
|
||||
)
|
||||
|
||||
# URL processing
|
||||
url_process_btn.click(
|
||||
set_options_visibility, inputs=[false_bool], outputs=[options]
|
||||
).then(
|
||||
set_download_button_label, inputs=[processing_text], outputs=[download_file_btn]
|
||||
).then(
|
||||
set_outputs_visibility_process,
|
||||
inputs=[return_as_file],
|
||||
outputs=[content_output, file_output],
|
||||
).then(
|
||||
clear_outputs,
|
||||
inputs=None,
|
||||
outputs=[
|
||||
output_markdown,
|
||||
output_markdown_rendered,
|
||||
output_json,
|
||||
output_html,
|
||||
output_html_rendered,
|
||||
output_text,
|
||||
output_doctags,
|
||||
],
|
||||
).then(
|
||||
process_url,
|
||||
inputs=[
|
||||
url_input,
|
||||
to_formats,
|
||||
image_export_mode,
|
||||
ocr,
|
||||
force_ocr,
|
||||
ocr_engine,
|
||||
ocr_lang,
|
||||
pdf_backend,
|
||||
table_mode,
|
||||
abort_on_error,
|
||||
return_as_file,
|
||||
],
|
||||
outputs=[
|
||||
output_markdown,
|
||||
output_markdown_rendered,
|
||||
output_json,
|
||||
output_html,
|
||||
output_html_rendered,
|
||||
output_text,
|
||||
output_doctags,
|
||||
download_file_btn,
|
||||
],
|
||||
)
|
||||
|
||||
url_reset_btn.click(
|
||||
clear_outputs,
|
||||
inputs=None,
|
||||
outputs=[
|
||||
output_markdown,
|
||||
output_markdown_rendered,
|
||||
output_json,
|
||||
output_html,
|
||||
output_html_rendered,
|
||||
output_text,
|
||||
output_doctags,
|
||||
],
|
||||
).then(set_options_visibility, inputs=[true_bool], outputs=[options]).then(
|
||||
set_outputs_visibility_direct,
|
||||
inputs=[false_bool, false_bool],
|
||||
outputs=[content_output, file_output],
|
||||
).then(
|
||||
clear_url_input, inputs=None, outputs=[url_input]
|
||||
)
|
||||
|
||||
# File processing
|
||||
file_process_btn.click(
|
||||
set_options_visibility, inputs=[false_bool], outputs=[options]
|
||||
).then(
|
||||
set_download_button_label, inputs=[processing_text], outputs=[download_file_btn]
|
||||
).then(
|
||||
set_outputs_visibility_process,
|
||||
inputs=[return_as_file],
|
||||
outputs=[content_output, file_output],
|
||||
).then(
|
||||
clear_outputs,
|
||||
inputs=None,
|
||||
outputs=[
|
||||
output_markdown,
|
||||
output_markdown_rendered,
|
||||
output_json,
|
||||
output_html,
|
||||
output_html_rendered,
|
||||
output_text,
|
||||
output_doctags,
|
||||
],
|
||||
).then(
|
||||
process_file,
|
||||
inputs=[
|
||||
file_input,
|
||||
to_formats,
|
||||
image_export_mode,
|
||||
ocr,
|
||||
force_ocr,
|
||||
ocr_engine,
|
||||
ocr_lang,
|
||||
pdf_backend,
|
||||
table_mode,
|
||||
abort_on_error,
|
||||
return_as_file,
|
||||
],
|
||||
outputs=[
|
||||
output_markdown,
|
||||
output_markdown_rendered,
|
||||
output_json,
|
||||
output_html,
|
||||
output_html_rendered,
|
||||
output_text,
|
||||
output_doctags,
|
||||
download_file_btn,
|
||||
],
|
||||
)
|
||||
|
||||
file_reset_btn.click(
|
||||
clear_outputs,
|
||||
inputs=None,
|
||||
outputs=[
|
||||
output_markdown,
|
||||
output_markdown_rendered,
|
||||
output_json,
|
||||
output_html,
|
||||
output_html_rendered,
|
||||
output_text,
|
||||
output_doctags,
|
||||
],
|
||||
).then(set_options_visibility, inputs=[true_bool], outputs=[options]).then(
|
||||
set_outputs_visibility_direct,
|
||||
inputs=[false_bool, false_bool],
|
||||
outputs=[content_output, file_output],
|
||||
).then(
|
||||
clear_file_input, inputs=None, outputs=[file_input]
|
||||
)
|
||||
62
docling_serve/helper_functions.py
Normal file
62
docling_serve/helper_functions.py
Normal file
@@ -0,0 +1,62 @@
|
||||
import inspect
|
||||
import re
|
||||
from typing import List, Type, Union
|
||||
|
||||
from fastapi import Depends, Form
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
||||
# Adapted from
|
||||
# https://github.com/fastapi/fastapi/discussions/8971#discussioncomment-7892972
|
||||
def FormDepends(cls: Type[BaseModel]):
|
||||
new_parameters = []
|
||||
|
||||
for field_name, model_field in cls.model_fields.items():
|
||||
new_parameters.append(
|
||||
inspect.Parameter(
|
||||
name=field_name,
|
||||
kind=inspect.Parameter.POSITIONAL_ONLY,
|
||||
default=(
|
||||
Form(...)
|
||||
if model_field.is_required()
|
||||
else Form(model_field.default)
|
||||
),
|
||||
annotation=model_field.annotation,
|
||||
)
|
||||
)
|
||||
|
||||
async def as_form_func(**data):
|
||||
return cls(**data)
|
||||
|
||||
sig = inspect.signature(as_form_func)
|
||||
sig = sig.replace(parameters=new_parameters)
|
||||
as_form_func.__signature__ = sig # type: ignore
|
||||
return Depends(as_form_func)
|
||||
|
||||
|
||||
def _to_list_of_strings(input_value: Union[str, List[str]]) -> List[str]:
|
||||
def split_and_strip(value: str) -> List[str]:
|
||||
if re.search(r"[;,]", value):
|
||||
return [item.strip() for item in re.split(r"[;,]", value)]
|
||||
else:
|
||||
return [value.strip()]
|
||||
|
||||
if isinstance(input_value, str):
|
||||
return split_and_strip(input_value)
|
||||
elif isinstance(input_value, list):
|
||||
result = []
|
||||
for item in input_value:
|
||||
result.extend(split_and_strip(str(item)))
|
||||
return result
|
||||
else:
|
||||
raise ValueError("Invalid input: must be a string or a list of strings.")
|
||||
|
||||
|
||||
# Helper functions to parse inputs coming as Form objects
|
||||
def _str_to_bool(value: Union[str, bool]) -> bool:
|
||||
if isinstance(value, bool):
|
||||
return value # Already a boolean, return as-is
|
||||
if isinstance(value, str):
|
||||
value = value.strip().lower() # Normalize input
|
||||
return value in ("true", "1", "yes")
|
||||
return False # Default to False if none of the above matches
|
||||
248
docling_serve/response_preparation.py
Normal file
248
docling_serve/response_preparation.py
Normal file
@@ -0,0 +1,248 @@
|
||||
import logging
|
||||
import os
|
||||
import shutil
|
||||
import tempfile
|
||||
import time
|
||||
from pathlib import Path
|
||||
from typing import Dict, Iterable, List, Optional, Union
|
||||
|
||||
from docling.datamodel.base_models import OutputFormat
|
||||
from docling.datamodel.document import ConversionResult, ConversionStatus, ErrorItem
|
||||
from docling.utils.profiling import ProfilingItem
|
||||
from docling_core.types.doc import DoclingDocument, ImageRefMode
|
||||
from fastapi import BackgroundTasks, HTTPException
|
||||
from fastapi.responses import FileResponse
|
||||
from pydantic import BaseModel
|
||||
|
||||
from docling_serve.docling_conversion import ConvertDocumentsOptions
|
||||
|
||||
_log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class DocumentResponse(BaseModel):
|
||||
filename: str
|
||||
md_content: Optional[str] = None
|
||||
json_content: Optional[DoclingDocument] = None
|
||||
html_content: Optional[str] = None
|
||||
text_content: Optional[str] = None
|
||||
doctags_content: Optional[str] = None
|
||||
|
||||
|
||||
class ConvertDocumentResponse(BaseModel):
|
||||
document: DocumentResponse
|
||||
status: ConversionStatus
|
||||
errors: List[ErrorItem] = []
|
||||
processing_time: float
|
||||
timings: Dict[str, ProfilingItem] = {}
|
||||
|
||||
|
||||
class ConvertDocumentErrorResponse(BaseModel):
|
||||
status: ConversionStatus
|
||||
|
||||
|
||||
def _export_document_as_content(
|
||||
conv_res: ConversionResult,
|
||||
export_json: bool,
|
||||
export_html: bool,
|
||||
export_md: bool,
|
||||
export_txt: bool,
|
||||
export_doctags: bool,
|
||||
image_mode: ImageRefMode,
|
||||
):
|
||||
|
||||
document = DocumentResponse(filename=conv_res.input.file.name)
|
||||
|
||||
if conv_res.status == ConversionStatus.SUCCESS:
|
||||
new_doc = conv_res.document._make_copy_with_refmode(Path(), image_mode)
|
||||
|
||||
# Create the different formats
|
||||
if export_json:
|
||||
document.json_content = new_doc
|
||||
if export_html:
|
||||
document.html_content = new_doc.export_to_html(image_mode=image_mode)
|
||||
if export_txt:
|
||||
document.text_content = new_doc.export_to_markdown(
|
||||
strict_text=True, image_mode=image_mode
|
||||
)
|
||||
if export_md:
|
||||
document.md_content = new_doc.export_to_markdown(image_mode=image_mode)
|
||||
if export_doctags:
|
||||
document.doctags_content = new_doc.export_to_document_tokens()
|
||||
elif conv_res.status == ConversionStatus.SKIPPED:
|
||||
raise HTTPException(status_code=400, detail=conv_res.errors)
|
||||
else:
|
||||
raise HTTPException(status_code=500, detail=conv_res.errors)
|
||||
|
||||
return document
|
||||
|
||||
|
||||
def _export_documents_as_files(
|
||||
conv_results: Iterable[ConversionResult],
|
||||
output_dir: Path,
|
||||
export_json: bool,
|
||||
export_html: bool,
|
||||
export_md: bool,
|
||||
export_txt: bool,
|
||||
export_doctags: bool,
|
||||
image_export_mode: ImageRefMode,
|
||||
):
|
||||
|
||||
success_count = 0
|
||||
failure_count = 0
|
||||
|
||||
for conv_res in conv_results:
|
||||
if conv_res.status == ConversionStatus.SUCCESS:
|
||||
success_count += 1
|
||||
doc_filename = conv_res.input.file.stem
|
||||
|
||||
# Export JSON format:
|
||||
if export_json:
|
||||
fname = output_dir / f"{doc_filename}.json"
|
||||
_log.info(f"writing JSON output to {fname}")
|
||||
conv_res.document.save_as_json(
|
||||
filename=fname, image_mode=image_export_mode
|
||||
)
|
||||
|
||||
# Export HTML format:
|
||||
if export_html:
|
||||
fname = output_dir / f"{doc_filename}.html"
|
||||
_log.info(f"writing HTML output to {fname}")
|
||||
conv_res.document.save_as_html(
|
||||
filename=fname, image_mode=image_export_mode
|
||||
)
|
||||
|
||||
# Export Text format:
|
||||
if export_txt:
|
||||
fname = output_dir / f"{doc_filename}.txt"
|
||||
_log.info(f"writing TXT output to {fname}")
|
||||
conv_res.document.save_as_markdown(
|
||||
filename=fname,
|
||||
strict_text=True,
|
||||
image_mode=ImageRefMode.PLACEHOLDER,
|
||||
)
|
||||
|
||||
# Export Markdown format:
|
||||
if export_md:
|
||||
fname = output_dir / f"{doc_filename}.md"
|
||||
_log.info(f"writing Markdown output to {fname}")
|
||||
conv_res.document.save_as_markdown(
|
||||
filename=fname, image_mode=image_export_mode
|
||||
)
|
||||
|
||||
# Export Document Tags format:
|
||||
if export_doctags:
|
||||
fname = output_dir / f"{doc_filename}.doctags"
|
||||
_log.info(f"writing Doc Tags output to {fname}")
|
||||
conv_res.document.save_as_document_tokens(filename=fname)
|
||||
|
||||
else:
|
||||
_log.warning(f"Document {conv_res.input.file} failed to convert.")
|
||||
failure_count += 1
|
||||
|
||||
_log.info(
|
||||
f"Processed {success_count + failure_count} docs, "
|
||||
f"of which {failure_count} failed"
|
||||
)
|
||||
|
||||
|
||||
def process_results(
|
||||
background_tasks: BackgroundTasks,
|
||||
conversion_options: ConvertDocumentsOptions,
|
||||
conv_results: Iterable[ConversionResult],
|
||||
) -> Union[ConvertDocumentResponse, FileResponse]:
|
||||
|
||||
# Let's start by processing the documents
|
||||
try:
|
||||
start_time = time.monotonic()
|
||||
|
||||
# Convert the iterator to a list to count the number of results and get timings
|
||||
# As it's an iterator (lazy evaluation), it will also start the conversion
|
||||
conv_results = list(conv_results)
|
||||
|
||||
processing_time = time.monotonic() - start_time
|
||||
|
||||
_log.info(
|
||||
f"Processed {len(conv_results)} docs in {processing_time:.2f} seconds."
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
if len(conv_results) == 0:
|
||||
raise HTTPException(
|
||||
status_code=500, detail="No documents were generated by Docling."
|
||||
)
|
||||
|
||||
# We have some results, let's prepare the response
|
||||
response: Union[FileResponse, ConvertDocumentResponse]
|
||||
|
||||
# Booleans to know what to export
|
||||
export_json = OutputFormat.JSON in conversion_options.to_formats
|
||||
export_html = OutputFormat.HTML in conversion_options.to_formats
|
||||
export_md = OutputFormat.MARKDOWN in conversion_options.to_formats
|
||||
export_txt = OutputFormat.TEXT in conversion_options.to_formats
|
||||
export_doctags = OutputFormat.DOCTAGS in conversion_options.to_formats
|
||||
|
||||
# Only 1 document was processed, and we are not returning it as a file
|
||||
if len(conv_results) == 1 and not conversion_options.return_as_file:
|
||||
conv_res = conv_results[0]
|
||||
document = _export_document_as_content(
|
||||
conv_res,
|
||||
export_json=export_json,
|
||||
export_html=export_html,
|
||||
export_md=export_md,
|
||||
export_txt=export_txt,
|
||||
export_doctags=export_doctags,
|
||||
image_mode=conversion_options.image_export_mode,
|
||||
)
|
||||
|
||||
response = ConvertDocumentResponse(
|
||||
document=document,
|
||||
status=conv_res.status,
|
||||
processing_time=processing_time,
|
||||
timings=conv_res.timings,
|
||||
)
|
||||
|
||||
# Multiple documents were processed, or we are forced returning as a file
|
||||
else:
|
||||
# Temporary directory to store the outputs
|
||||
work_dir = Path(tempfile.mkdtemp(prefix="docling_"))
|
||||
output_dir = work_dir / "output"
|
||||
output_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Worker pid to use in archive identification as we may have multiple workers
|
||||
os.getpid()
|
||||
|
||||
# Export the documents
|
||||
_export_documents_as_files(
|
||||
conv_results=conv_results,
|
||||
output_dir=output_dir,
|
||||
export_json=export_json,
|
||||
export_html=export_html,
|
||||
export_md=export_md,
|
||||
export_txt=export_txt,
|
||||
export_doctags=export_doctags,
|
||||
image_export_mode=conversion_options.image_export_mode,
|
||||
)
|
||||
|
||||
files = os.listdir(output_dir)
|
||||
|
||||
if len(files) == 0:
|
||||
raise HTTPException(status_code=500, detail="No documents were exported.")
|
||||
|
||||
file_path = work_dir / "converted_docs.zip"
|
||||
shutil.make_archive(
|
||||
base_name=str(file_path.with_suffix("")),
|
||||
format="zip",
|
||||
root_dir=output_dir,
|
||||
)
|
||||
|
||||
# Other cleanups after the response is sent
|
||||
# Output directory
|
||||
background_tasks.add_task(shutil.rmtree, work_dir, ignore_errors=True)
|
||||
|
||||
response = FileResponse(
|
||||
file_path, filename=file_path.name, media_type="application/zip"
|
||||
)
|
||||
|
||||
return response
|
||||
Reference in New Issue
Block a user