Compare commits

..

6 Commits

Author SHA1 Message Date
Alex
fcdb4fb5e8 feat: faster ebook parsing 2026-04-09 18:31:06 +01:00
Alex
e787c896eb upd Security.md 2026-04-08 12:49:20 +01:00
Alex
23aeaff5db Merge pull request #2362 from arc53/v1-mini-improvements
feat: history overwrite
2026-04-06 15:02:32 +01:00
Alex
689dd79597 fix: lang 2026-04-06 14:57:51 +01:00
Alex
0c15af90b1 feat: history overwrite 2026-04-06 14:42:01 +01:00
Alex
cdd6ff6557 chore: bump deps 2026-04-04 12:45:34 +01:00
25 changed files with 679 additions and 210 deletions

View File

@@ -2,9 +2,7 @@
## Supported Versions
Supported Versions:
Currently, we support security patches by committing changes and bumping the version published on Github.
Security patches target the latest release and the `main` branch. We recommend always running the most recent version.
## Reporting a Vulnerability
@@ -14,7 +12,11 @@ https://github.com/arc53/DocsGPT/security
Then click **Report a vulnerability**.
Alternatively:
Alternatively, email us at: security@arc53.com
security@arc53.com
We aim to acknowledge reports within 48 hours.
## Incident Handling
Arc53 maintains internal incident response procedures. If you believe an active exploit is occurring, include **URGENT** in your report subject line.

View File

@@ -112,6 +112,7 @@ class StreamProcessor:
self._required_tool_actions: Optional[Dict[str, Set[Optional[str]]]] = None
self.compressed_summary: Optional[str] = None
self.compressed_summary_tokens: int = 0
self._agent_data: Optional[Dict[str, Any]] = None
def initialize(self):
"""Initialize all required components for processing"""
@@ -359,22 +360,29 @@ class StreamProcessor:
return data
def _configure_source(self):
"""Configure the source based on agent data"""
api_key = self.data.get("api_key") or self.agent_key
"""Configure the source based on agent data.
if api_key:
agent_data = self._get_data_from_api_key(api_key)
The literal string ``"default"`` is a placeholder meaning "no
ingested source" and is normalized to an empty source so that no
retrieval is attempted.
"""
if self._agent_data:
agent_data = self._agent_data
if agent_data.get("sources") and len(agent_data["sources"]) > 0:
source_ids = [
source["id"] for source in agent_data["sources"] if source.get("id")
source["id"]
for source in agent_data["sources"]
if source.get("id") and source["id"] != "default"
]
if source_ids:
self.source = {"active_docs": source_ids}
else:
self.source = {}
self.all_sources = agent_data["sources"]
elif agent_data.get("source"):
self.all_sources = [
s for s in agent_data["sources"] if s.get("id") != "default"
]
elif agent_data.get("source") and agent_data["source"] != "default":
self.source = {"active_docs": agent_data["source"]}
self.all_sources = [
{
@@ -387,11 +395,24 @@ class StreamProcessor:
self.all_sources = []
return
if "active_docs" in self.data:
self.source = {"active_docs": self.data["active_docs"]}
active_docs = self.data["active_docs"]
if active_docs and active_docs != "default":
self.source = {"active_docs": active_docs}
else:
self.source = {}
return
self.source = {}
self.all_sources = []
def _has_active_docs(self) -> bool:
"""Return True if a real document source is configured for retrieval."""
active_docs = self.source.get("active_docs") if self.source else None
if not active_docs:
return False
if active_docs == "default":
return False
return True
def _resolve_agent_id(self) -> Optional[str]:
"""Resolve agent_id from request, then fall back to conversation context."""
request_agent_id = self.data.get("agent_id")
@@ -433,48 +454,39 @@ class StreamProcessor:
effective_key = self.data.get("api_key") or self.agent_key
if effective_key:
data_key = self._get_data_from_api_key(effective_key)
if data_key.get("_id"):
self.agent_id = str(data_key.get("_id"))
self._agent_data = self._get_data_from_api_key(effective_key)
if self._agent_data.get("_id"):
self.agent_id = str(self._agent_data.get("_id"))
self.agent_config.update(
{
"prompt_id": data_key.get("prompt_id", "default"),
"agent_type": data_key.get("agent_type", settings.AGENT_NAME),
"prompt_id": self._agent_data.get("prompt_id", "default"),
"agent_type": self._agent_data.get("agent_type", settings.AGENT_NAME),
"user_api_key": effective_key,
"json_schema": data_key.get("json_schema"),
"default_model_id": data_key.get("default_model_id", ""),
"models": data_key.get("models", []),
"json_schema": self._agent_data.get("json_schema"),
"default_model_id": self._agent_data.get("default_model_id", ""),
"models": self._agent_data.get("models", []),
"allow_system_prompt_override": self._agent_data.get(
"allow_system_prompt_override", False
),
}
)
# Set identity context
if self.data.get("api_key"):
# External API key: use the key owner's identity
self.initial_user_id = data_key.get("user")
self.decoded_token = {"sub": data_key.get("user")}
self.initial_user_id = self._agent_data.get("user")
self.decoded_token = {"sub": self._agent_data.get("user")}
elif self.is_shared_usage:
# Shared agent: keep the caller's identity
pass
else:
# Owner using their own agent
self.decoded_token = {"sub": data_key.get("user")}
self.decoded_token = {"sub": self._agent_data.get("user")}
if data_key.get("source"):
self.source = {"active_docs": data_key["source"]}
if data_key.get("workflow"):
self.agent_config["workflow"] = data_key["workflow"]
self.agent_config["workflow_owner"] = data_key.get("user")
if data_key.get("retriever"):
self.retriever_config["retriever_name"] = data_key["retriever"]
if data_key.get("chunks") is not None:
try:
self.retriever_config["chunks"] = int(data_key["chunks"])
except (ValueError, TypeError):
logger.warning(
f"Invalid chunks value: {data_key['chunks']}, using default value 2"
)
self.retriever_config["chunks"] = 2
if self._agent_data.get("workflow"):
self.agent_config["workflow"] = self._agent_data["workflow"]
self.agent_config["workflow_owner"] = self._agent_data.get("user")
else:
# No API key — default/workflow configuration
agent_type = settings.AGENT_NAME
@@ -497,14 +509,45 @@ class StreamProcessor:
)
def _configure_retriever(self):
"""Assemble retriever config with precedence: request > agent > default."""
doc_token_limit = calculate_doc_token_budget(model_id=self.model_id)
# Start with defaults
retriever_name = "classic"
chunks = 2
# Layer agent-level config (if present)
if self._agent_data:
if self._agent_data.get("retriever"):
retriever_name = self._agent_data["retriever"]
if self._agent_data.get("chunks") is not None:
try:
chunks = int(self._agent_data["chunks"])
except (ValueError, TypeError):
logger.warning(
f"Invalid agent chunks value: {self._agent_data['chunks']}, "
"using default value 2"
)
# Explicit request values win over agent config
if "retriever" in self.data:
retriever_name = self.data["retriever"]
if "chunks" in self.data:
try:
chunks = int(self.data["chunks"])
except (ValueError, TypeError):
logger.warning(
f"Invalid request chunks value: {self.data['chunks']}, "
"using default value 2"
)
self.retriever_config = {
"retriever_name": self.data.get("retriever", "classic"),
"chunks": int(self.data.get("chunks", 2)),
"retriever_name": retriever_name,
"chunks": chunks,
"doc_token_limit": doc_token_limit,
}
# isNoneDoc without an API key forces no retrieval
api_key = self.data.get("api_key") or self.agent_key
if not api_key and "isNoneDoc" in self.data and self.data["isNoneDoc"]:
self.retriever_config["chunks"] = 0
@@ -528,6 +571,9 @@ class StreamProcessor:
if self.data.get("isNoneDoc", False) and not self.agent_id:
logger.info("Pre-fetch skipped: isNoneDoc=True")
return None, None
if not self._has_active_docs():
logger.info("Pre-fetch skipped: no active docs configured")
return None, None
try:
retriever = self.create_retriever()
logger.info(
@@ -910,15 +956,23 @@ class StreamProcessor:
raw_prompt = get_prompt(prompt_id, self.prompts_collection)
self._prompt_content = raw_prompt
rendered_prompt = self.prompt_renderer.render_prompt(
prompt_content=raw_prompt,
user_id=self.initial_user_id,
request_id=self.data.get("request_id"),
passthrough_data=self.data.get("passthrough"),
docs=docs,
docs_together=docs_together,
tools_data=tools_data,
)
# Allow API callers to override the system prompt when the agent
# has opted in via allow_system_prompt_override.
if (
self.agent_config.get("allow_system_prompt_override", False)
and self.data.get("system_prompt_override")
):
rendered_prompt = self.data["system_prompt_override"]
else:
rendered_prompt = self.prompt_renderer.render_prompt(
prompt_content=raw_prompt,
user_id=self.initial_user_id,
request_id=self.data.get("request_id"),
passthrough_data=self.data.get("passthrough"),
docs=docs,
docs_together=docs_together,
tools_data=tools_data,
)
provider = (
get_provider_from_model_id(self.model_id)

View File

@@ -73,6 +73,7 @@ AGENT_TYPE_SCHEMAS = {
"token_limit",
"limited_request_mode",
"request_limit",
"allow_system_prompt_override",
"createdAt",
"updatedAt",
"lastUsedAt",
@@ -96,6 +97,7 @@ AGENT_TYPE_SCHEMAS = {
"token_limit",
"limited_request_mode",
"request_limit",
"allow_system_prompt_override",
"createdAt",
"updatedAt",
"lastUsedAt",
@@ -220,6 +222,12 @@ def build_agent_document(
base_doc["request_limit"] = int(
data.get("request_limit", settings.DEFAULT_AGENT_LIMITS["request_limit"])
)
if "allow_system_prompt_override" in allowed_fields:
base_doc["allow_system_prompt_override"] = (
data.get("allow_system_prompt_override") == "True"
if isinstance(data.get("allow_system_prompt_override"), str)
else bool(data.get("allow_system_prompt_override", False))
)
return {k: v for k, v in base_doc.items() if k in allowed_fields}
@@ -292,6 +300,9 @@ class GetAgent(Resource):
"default_model_id": agent.get("default_model_id", ""),
"folder_id": agent.get("folder_id"),
"workflow": agent.get("workflow"),
"allow_system_prompt_override": agent.get(
"allow_system_prompt_override", False
),
}
return make_response(jsonify(data), 200)
except Exception as e:
@@ -373,6 +384,9 @@ class GetAgents(Resource):
"default_model_id": agent.get("default_model_id", ""),
"folder_id": agent.get("folder_id"),
"workflow": agent.get("workflow"),
"allow_system_prompt_override": agent.get(
"allow_system_prompt_override", False
),
}
for agent in agents
if "source" in agent
@@ -450,6 +464,10 @@ class CreateAgent(Resource):
"folder_id": fields.String(
required=False, description="Folder ID to organize the agent"
),
"allow_system_prompt_override": fields.Boolean(
required=False,
description="Allow API callers to override the system prompt via the v1 endpoint",
),
},
)
@@ -674,6 +692,10 @@ class UpdateAgent(Resource):
"folder_id": fields.String(
required=False, description="Folder ID to organize the agent"
),
"allow_system_prompt_override": fields.Boolean(
required=False,
description="Allow API callers to override the system prompt via the v1 endpoint",
),
},
)
@@ -765,6 +787,7 @@ class UpdateAgent(Resource):
"default_model_id",
"folder_id",
"workflow",
"allow_system_prompt_override",
]
for field in allowed_fields:
@@ -983,6 +1006,13 @@ class UpdateAgent(Resource):
if workflow_error:
return workflow_error
update_fields[field] = workflow_id
elif field == "allow_system_prompt_override":
raw_value = data.get("allow_system_prompt_override", False)
update_fields[field] = (
raw_value == "True"
if isinstance(raw_value, str)
else bool(raw_value)
)
else:
value = data[field]
if field in ["name", "description", "prompt_id", "agent_type"]:

View File

@@ -138,10 +138,18 @@ def chat_completions():
if usage_error:
return usage_error
should_save_conversation = bool(internal_data.get("save_conversation", False))
if is_stream:
return Response(
_stream_response(
helper, question, agent, processor, model_name, continuation
helper,
question,
agent,
processor,
model_name,
continuation,
should_save_conversation,
),
mimetype="text/event-stream",
headers={
@@ -151,7 +159,13 @@ def chat_completions():
)
else:
return _non_stream_response(
helper, question, agent, processor, model_name, continuation
helper,
question,
agent,
processor,
model_name,
continuation,
should_save_conversation,
)
except ValueError as e:
@@ -181,6 +195,7 @@ def _stream_response(
processor: StreamProcessor,
model_name: str,
continuation: Optional[Dict],
should_save_conversation: bool,
) -> Generator[str, None, None]:
"""Generate translated SSE chunks for streaming response."""
completion_id = f"chatcmpl-{int(time.time())}"
@@ -193,6 +208,7 @@ def _stream_response(
decoded_token=processor.decoded_token,
agent_id=processor.agent_id,
model_id=processor.model_id,
should_save_conversation=should_save_conversation,
_continuation=continuation,
)
@@ -225,6 +241,7 @@ def _non_stream_response(
processor: StreamProcessor,
model_name: str,
continuation: Optional[Dict],
should_save_conversation: bool,
) -> Response:
"""Collect full response and return as single JSON."""
stream = helper.complete_stream(
@@ -235,6 +252,7 @@ def _non_stream_response(
decoded_token=processor.decoded_token,
agent_id=processor.agent_id,
model_id=processor.model_id,
should_save_conversation=should_save_conversation,
_continuation=continuation,
)

View File

@@ -80,6 +80,17 @@ def extract_conversation_id(messages: List[Dict]) -> Optional[str]:
return None
def extract_system_prompt(messages: List[Dict]) -> Optional[str]:
"""Extract the first system message content from the messages array.
Returns None if no system message is present.
"""
for msg in messages:
if msg.get("role") == "system":
return msg.get("content", "")
return None
def convert_history(messages: List[Dict]) -> List[Dict]:
"""Convert chat completions messages array to DocsGPT history format.
@@ -148,20 +159,27 @@ def translate_request(
break
history = convert_history(messages)
system_prompt_override = extract_system_prompt(messages)
docsgpt = data.get("docsgpt", {})
result = {
"question": question,
"api_key": api_key,
"history": json.dumps(history),
"save_conversation": True,
# Conversations are NOT persisted by default on the v1 endpoint.
# Callers opt in via ``docsgpt.save_conversation: true``.
"save_conversation": bool(docsgpt.get("save_conversation", False)),
}
if system_prompt_override is not None:
result["system_prompt_override"] = system_prompt_override
# Client tools
if data.get("tools"):
result["client_tools"] = data["tools"]
# DocsGPT extensions
docsgpt = data.get("docsgpt", {})
if docsgpt.get("attachments"):
result["attachments"] = docsgpt["attachments"]

View File

@@ -19,25 +19,10 @@ class EpubParser(BaseParser):
def parse_file(self, file: Path, errors: str = "ignore") -> str:
"""Parse file."""
try:
import ebooklib
from ebooklib import epub
from fast_ebook import epub
except ImportError:
raise ValueError("`EbookLib` is required to read Epub files.")
try:
import html2text
except ImportError:
raise ValueError("`html2text` is required to parse Epub files.")
raise ValueError("`fast-ebook` is required to read Epub files.")
text_list = []
book = epub.read_epub(file, options={"ignore_ncx": True})
# Iterate through all chapters.
for item in book.get_items():
# Chapters are typically located in epub documents items.
if item.get_type() == ebooklib.ITEM_DOCUMENT:
text_list.append(
html2text.html2text(item.get_content().decode("utf-8"))
)
text = "\n".join(text_list)
book = epub.read_epub(file)
text = book.to_markdown()
return text

View File

@@ -1,5 +1,5 @@
anthropic==0.88.0
boto3==1.42.24
boto3==1.42.83
beautifulsoup4==4.14.3
cel-python==0.5.0
celery==5.6.3
@@ -11,8 +11,8 @@ rapidocr>=1.4.0
onnxruntime>=1.19.0
docx2txt==0.9
ddgs>=8.0.0
ebooklib==0.20
elevenlabs==2.40.0
fast-ebook
elevenlabs==2.41.0
Flask==3.1.3
faiss-cpu==1.13.2
fastmcp==3.2.0
@@ -23,10 +23,9 @@ google-auth-httplib2==0.3.1
google-auth-oauthlib==1.3.1
gTTS==2.5.4
gunicorn==25.3.0
html2text==2025.4.15
jinja2==3.1.6
jiter==0.13.0
jmespath==1.0.1
jmespath==1.1.0
joblib==1.5.3
jsonpatch==1.33
jsonpointer==3.0.0
@@ -72,7 +71,7 @@ python-jose==3.5.0
python-pptx==1.0.2
redis==7.4.0
referencing>=0.28.0,<0.38.0
regex==2026.3.32
regex==2026.4.4
requests==2.33.1
retry==0.9.2
sentence-transformers==5.3.0

View File

@@ -7,6 +7,10 @@ export default {
"title": "🔌 Agent API",
"href": "/Agents/api"
},
"openai-compatible": {
"title": "🔄 OpenAI-Compatible API",
"href": "/Agents/openai-compatible"
},
"webhooks": {
"title": "🪝 Agent Webhooks",
"href": "/Agents/webhooks"

View File

@@ -15,6 +15,10 @@ DocsGPT Agents can be accessed programmatically through API endpoints. This page
When you use an agent `api_key`, DocsGPT loads that agent's configuration automatically (prompt, tools, sources, default model). You usually only need to send `question` and `api_key`.
<Callout type="info">
Looking to connect an existing OpenAI-compatible client (opencode, aider, the OpenAI SDKs, etc.) to a DocsGPT Agent? Use the [OpenAI-Compatible Chat Completions API](/Agents/openai-compatible) — it speaks the standard chat completions protocol so no adapter code is required.
</Callout>
## Base URL
<Callout type="info">

View File

@@ -111,6 +111,7 @@ Once an agent is created, you can:
* Modify any of its configuration settings (name, description, source, prompt, tools, type).
* **Generate a Public Link:** From the edit screen, you can create a shareable public link that allows others to import and use your agent.
* **Get a Webhook URL:** You can also obtain a Webhook URL for the agent. This allows external applications or services to trigger the agent and receive responses programmatically, enabling powerful integrations and automations.
* **Use it via API:** Every agent exposes an API key that can be used with the native [Agent API](/Agents/api) or the [OpenAI-Compatible API](/Agents/openai-compatible) so you can drop DocsGPT Agents into any tool that already speaks the chat completions protocol.
## Seeding Premade Agents from YAML

View File

@@ -0,0 +1,93 @@
---
title: OpenAI-Compatible API
description: Connect any OpenAI-compatible client to DocsGPT Agents via /v1/chat/completions.
---
import { Callout, Tabs } from 'nextra/components';
# OpenAI-Compatible API
DocsGPT exposes `/v1/chat/completions` following the standard chat completions protocol. Point any compatible client — **opencode**, **Aider**, **LibreChat** or the OpenAI SDKs — at your DocsGPT Agent by changing only the base URL and API key.
## Quick Start
<Tabs items={['Python', 'cURL']}>
<Tabs.Tab>
```python
from openai import OpenAI
client = OpenAI(
base_url="http://localhost:7091/v1", # or https://gptcloud.arc53.com/v1
api_key="your_agent_api_key",
)
response = client.chat.completions.create(
model="docsgpt-agent",
messages=[{"role": "user", "content": "Summarize our refund policy"}],
)
print(response.choices[0].message.content)
```
</Tabs.Tab>
<Tabs.Tab>
```bash
curl -X POST http://localhost:7091/v1/chat/completions \
-H "Authorization: Bearer your_agent_api_key" \
-H "Content-Type: application/json" \
-d '{"model":"docsgpt-agent","messages":[{"role":"user","content":"Summarize our refund policy"}]}'
```
</Tabs.Tab>
</Tabs>
The `model` field is accepted but ignored — the agent bound to your API key determines the model. The agent's prompt, sources, tools, and default model are loaded automatically.
## Base URL & Auth
| Environment | Base URL |
| --- | --- |
| Local | `http://localhost:7091/v1` |
| Cloud | `https://gptcloud.arc53.com/v1` |
Authenticate with `Authorization: Bearer <agent_api_key>`.
## Endpoints
| Method | Path | Description |
| --- | --- | --- |
| `POST` | `/v1/chat/completions` | Chat request (streaming or non-streaming) |
| `GET` | `/v1/models` | List agents available to your key |
## Streaming
Set `"stream": true`. You'll receive SSE chunks with `choices[0].delta.content`. DocsGPT-specific events (sources, tool calls) arrive as extra frames with a `docsgpt` key — standard clients ignore them.
```python
stream = client.chat.completions.create(
model="docsgpt-agent",
stream=True,
messages=[{"role": "user", "content": "Explain vector search"}],
)
for chunk in stream:
print(chunk.choices[0].delta.content or "", end="", flush=True)
```
## System Prompt Override
System messages are **dropped by default** — the agent's configured prompt is used. To allow callers to override it, enable **Allow prompt override** in the agent's Advanced settings.
<Callout type="warning">
When an override is active, the agent's prompt template is replaced wholesale — template variables like `{summaries}` are not substituted.
</Callout>
## Conversation Persistence
Conversations are **not persisted by default** (stateless, like most OpenAI clients expect). Opt in per request:
```json
{ "docsgpt": { "save_conversation": true } }
```
The response will include `docsgpt.conversation_id`.
## When to Use Native Endpoints Instead
Use [`/api/answer` or `/stream`](/Agents/api) if you need server-side attachments, `passthrough` template variables, explicit `conversation_id` reuse, or persistence by default.

View File

@@ -73,6 +73,7 @@ export default function NewAgent({ mode }: { mode: 'new' | 'edit' | 'draft' }) {
token_limit: undefined,
limited_request_mode: false,
request_limit: undefined,
allow_system_prompt_override: false,
models: [],
default_model_id: '',
});
@@ -241,6 +242,11 @@ export default function NewAgent({ mode }: { mode: 'new' | 'edit' | 'draft' }) {
formData.append('request_limit', '0');
}
formData.append(
'allow_system_prompt_override',
agent.allow_system_prompt_override ? 'True' : 'False',
);
if (imageFile) formData.append('image', imageFile);
if (agent.tools && agent.tools.length > 0)
@@ -361,6 +367,11 @@ export default function NewAgent({ mode }: { mode: 'new' | 'edit' | 'draft' }) {
formData.append('request_limit', '0');
}
formData.append(
'allow_system_prompt_override',
agent.allow_system_prompt_override ? 'True' : 'False',
);
if (agent.models && agent.models.length > 0) {
formData.append('models', JSON.stringify(agent.models));
}
@@ -1266,6 +1277,43 @@ export default function NewAgent({ mode }: { mode: 'new' | 'edit' | 'draft' }) {
}`}
/>
</div>
<div className="mt-6">
<div className="flex items-center justify-between gap-4">
<div className="min-w-0 flex-1">
<h2 className="text-sm font-medium">
{t('agents.form.advanced.systemPromptOverride')}
</h2>
<p className="mt-1 text-xs text-gray-600 dark:text-gray-400">
{t(
'agents.form.advanced.systemPromptOverrideDescription',
)}
</p>
</div>
<button
onClick={() =>
setAgent({
...agent,
allow_system_prompt_override:
!agent.allow_system_prompt_override,
})
}
className={`relative h-6 w-11 shrink-0 rounded-full transition-colors ${
agent.allow_system_prompt_override
? 'bg-primary'
: 'bg-gray-300 dark:bg-gray-600'
}`}
>
<span
className={`absolute top-0.5 h-5 w-5 transform rounded-full bg-white transition-transform ${
agent.allow_system_prompt_override
? ''
: '-translate-x-5'
}`}
/>
</button>
</div>
</div>
</div>
)}
</div>

View File

@@ -36,6 +36,7 @@ export type Agent = {
default_model_id?: string;
folder_id?: string;
workflow?: string;
allow_system_prompt_override?: boolean;
};
export type AgentFolder = {

View File

@@ -18,6 +18,7 @@ import {
X,
} from 'lucide-react';
import { useCallback, useEffect, useMemo, useRef, useState } from 'react';
import { useTranslation } from 'react-i18next';
import { useSelector } from 'react-redux';
import { useNavigate, useParams, useSearchParams } from 'react-router-dom';
import ReactFlow, {
@@ -301,6 +302,7 @@ function createWorkflowPayload(
}
function WorkflowBuilderInner() {
const { t } = useTranslation();
const navigate = useNavigate();
const token = useSelector(selectToken);
const sourceDocs = useSelector(selectSourceDocs);
@@ -1142,6 +1144,10 @@ function WorkflowBuilderInner() {
workflowDescription || `Workflow agent: ${workflowName}`,
);
agentFormData.append('status', 'published');
agentFormData.append(
'allow_system_prompt_override',
currentAgent.allow_system_prompt_override ? 'True' : 'False',
);
if (imageFile) {
agentFormData.append('image', imageFile);
}
@@ -1203,6 +1209,10 @@ function WorkflowBuilderInner() {
agentFormData.append('agent_type', 'workflow');
agentFormData.append('status', 'published');
agentFormData.append('workflow', savedWorkflowId || '');
agentFormData.append(
'allow_system_prompt_override',
currentAgent.allow_system_prompt_override ? 'True' : 'False',
);
if (imageFile) {
agentFormData.append('image', imageFile);
}
@@ -1454,6 +1464,40 @@ function WorkflowBuilderInner() {
Image updates are included the next time you save.
</p>
</div>
<div className="mb-3">
<div className="flex items-center justify-between">
<div>
<label className="block text-sm font-medium text-gray-700 dark:text-gray-300">
{t('agents.form.advanced.systemPromptOverride')}
</label>
<p className="mt-0.5 text-[11px] text-gray-500 dark:text-gray-400">
{t('agents.form.advanced.systemPromptOverrideDescription')}
</p>
</div>
<button
onClick={() =>
setCurrentAgent((prev) => ({
...prev,
allow_system_prompt_override:
!prev.allow_system_prompt_override,
}))
}
className={`relative h-6 w-11 shrink-0 rounded-full transition-colors ${
currentAgent.allow_system_prompt_override
? 'bg-primary'
: 'bg-gray-300 dark:bg-gray-600'
}`}
>
<span
className={`absolute top-0.5 h-5 w-5 transform rounded-full bg-white transition-transform ${
currentAgent.allow_system_prompt_override
? ''
: '-translate-x-5'
}`}
/>
</button>
</div>
</div>
<button
onClick={handleWorkflowSettingsDone}
disabled={isPublishing}

View File

@@ -619,7 +619,9 @@
"tokenLimiting": "Token-Limitierung",
"tokenLimitingDescription": "Begrenze die täglich von diesem Agenten verwendbaren Tokens",
"requestLimiting": "Anfrage-Limitierung",
"requestLimitingDescription": "Begrenze die täglich an diesen Agenten gestellten Anfragen"
"requestLimitingDescription": "Begrenze die täglich an diesen Agenten gestellten Anfragen",
"systemPromptOverride": "Prompt-Überschreibung erlauben",
"systemPromptOverrideDescription": "Erlaubt v1-API-Aufrufern, den System-Prompt dieses Agenten zu ersetzen"
},
"preview": {
"publishedPreview": "Veröffentlichte Agenten können hier in der Vorschau angezeigt werden"

View File

@@ -653,7 +653,9 @@
"tokenLimiting": "Token limiting",
"tokenLimitingDescription": "Limit daily total tokens that can be used by this agent",
"requestLimiting": "Request limiting",
"requestLimitingDescription": "Limit daily total requests that can be made to this agent"
"requestLimitingDescription": "Limit daily total requests that can be made to this agent",
"systemPromptOverride": "Allow prompt override",
"systemPromptOverrideDescription": "Let v1 API callers replace this agent's system prompt"
},
"preview": {
"publishedPreview": "Published agents can be previewed here"

View File

@@ -641,7 +641,9 @@
"tokenLimiting": "Límite de tokens",
"tokenLimitingDescription": "Limita el total diario de tokens que puede usar este agente",
"requestLimiting": "Límite de solicitudes",
"requestLimitingDescription": "Limita el total diario de solicitudes que se pueden hacer a este agente"
"requestLimitingDescription": "Limita el total diario de solicitudes que se pueden hacer a este agente",
"systemPromptOverride": "Permitir sobrescribir el prompt",
"systemPromptOverrideDescription": "Permitir que los llamadores de la API v1 reemplacen el prompt del sistema de este agente"
},
"preview": {
"publishedPreview": "Los agentes publicados se pueden previsualizar aquí"

View File

@@ -641,7 +641,9 @@
"tokenLimiting": "トークン制限",
"tokenLimitingDescription": "このエージェントが使用できる1日の合計トークン数を制限します",
"requestLimiting": "リクエスト制限",
"requestLimitingDescription": "このエージェントに対して行える1日の合計リクエスト数を制限します"
"requestLimitingDescription": "このエージェントに対して行える1日の合計リクエスト数を制限します",
"systemPromptOverride": "プロンプトの上書きを許可",
"systemPromptOverrideDescription": "v1 API呼び出し元がこのエージェントのシステムプロンプトを置き換えることを許可します"
},
"preview": {
"publishedPreview": "公開されたエージェントはここでプレビューできます"

View File

@@ -641,7 +641,9 @@
"tokenLimiting": "Лимит токенов",
"tokenLimitingDescription": "Ограничить ежедневное общее количество токенов, которые может использовать этот агент",
"requestLimiting": "Лимит запросов",
"requestLimitingDescription": "Ограничить ежедневное общее количество запросов, которые можно сделать к этому агенту"
"requestLimitingDescription": "Ограничить ежедневное общее количество запросов, которые можно сделать к этому агенту",
"systemPromptOverride": "Разрешить замену промпта",
"systemPromptOverrideDescription": "Разрешить вызовам API v1 заменять системный промпт этого агента"
},
"preview": {
"publishedPreview": "Опубликованные агенты можно просмотреть здесь"

View File

@@ -641,7 +641,9 @@
"tokenLimiting": "權杖限制",
"tokenLimitingDescription": "限制此代理每天可使用的總權杖數",
"requestLimiting": "請求限制",
"requestLimitingDescription": "限制每天可向此代理發出的總請求數"
"requestLimitingDescription": "限制每天可向此代理發出的總請求數",
"systemPromptOverride": "允許覆蓋提示詞",
"systemPromptOverrideDescription": "允許 v1 API 呼叫者替換此代理的系統提示詞"
},
"preview": {
"publishedPreview": "已發佈的代理可以在此處預覽"

View File

@@ -641,7 +641,9 @@
"tokenLimiting": "令牌限制",
"tokenLimitingDescription": "限制此代理每天可使用的总令牌数",
"requestLimiting": "请求限制",
"requestLimitingDescription": "限制每天可向此代理发出的总请求数"
"requestLimitingDescription": "限制每天可向此代理发出的总请求数",
"systemPromptOverride": "允许覆盖提示词",
"systemPromptOverrideDescription": "允许 v1 API 调用者替换此代理的系统提示词"
},
"preview": {
"publishedPreview": "已发布的代理可以在此处预览"

View File

@@ -330,6 +330,170 @@ class TestStreamProcessorDocPrefetch:
assert docs_together is not None
assert "Agent doc content" in docs_together
def test_configure_source_treats_default_string_as_no_docs(self, mock_mongo_db):
from application.api.answer.services.stream_processor import StreamProcessor
from application.core.settings import settings
db = mock_mongo_db[settings.MONGO_DB_NAME]
agents_collection = db["agents"]
agent_id = ObjectId()
agents_collection.insert_one(
{
"_id": agent_id,
"key": "agent_default_source_key",
"user": "user_123",
"prompt_id": "default",
"agent_type": "classic",
"source": "default",
}
)
processor = StreamProcessor(
{"question": "Hi", "api_key": "agent_default_source_key"},
None,
)
processor._configure_agent()
processor._configure_source()
assert processor.source == {}
assert processor.all_sources == []
def test_prefetch_skipped_when_no_active_docs(self, mock_mongo_db):
from unittest.mock import MagicMock
from application.api.answer.services.stream_processor import StreamProcessor
processor = StreamProcessor(
{"question": "Hi there"},
{"sub": "user_123"},
)
processor.initialize()
processor.create_retriever = MagicMock()
docs_together, docs = processor.pre_fetch_docs("Hi there")
processor.create_retriever.assert_not_called()
assert docs_together is None
assert docs is None
def test_prefetch_skipped_when_active_docs_is_default(self, mock_mongo_db):
from unittest.mock import MagicMock
from application.api.answer.services.stream_processor import StreamProcessor
processor = StreamProcessor(
{"question": "Hi", "active_docs": "default"},
{"sub": "user_123"},
)
processor.initialize()
processor.create_retriever = MagicMock()
docs_together, docs = processor.pre_fetch_docs("Hi")
processor.create_retriever.assert_not_called()
assert docs_together is None
assert docs is None
def test_agent_retriever_and_chunks_propagate_to_retriever_config(self, mock_mongo_db):
from application.api.answer.services.stream_processor import StreamProcessor
from application.core.settings import settings
db = mock_mongo_db[settings.MONGO_DB_NAME]
agents_collection = db["agents"]
source_id = ObjectId()
db["sources"].insert_one(
{"_id": source_id, "name": "src", "retriever": "hybrid", "chunks": 5}
)
agent_id = ObjectId()
agents_collection.insert_one(
{
"_id": agent_id,
"key": "agent_ret_key",
"user": "user_123",
"prompt_id": "default",
"agent_type": "classic",
"retriever": "hybrid",
"chunks": 5,
"source": DBRef("sources", source_id),
}
)
processor = StreamProcessor(
{"question": "Test", "api_key": "agent_ret_key"},
None,
)
processor.initialize()
assert processor.retriever_config["retriever_name"] == "hybrid"
assert processor.retriever_config["chunks"] == 5
def test_request_retriever_and_chunks_override_agent_config(self, mock_mongo_db):
from application.api.answer.services.stream_processor import StreamProcessor
from application.core.settings import settings
db = mock_mongo_db[settings.MONGO_DB_NAME]
agents_collection = db["agents"]
agent_id = ObjectId()
agents_collection.insert_one(
{
"_id": agent_id,
"key": "agent_override_key",
"user": "user_123",
"prompt_id": "default",
"agent_type": "classic",
"retriever": "hybrid",
"chunks": 5,
}
)
processor = StreamProcessor(
{
"question": "Test",
"api_key": "agent_override_key",
"retriever": "classic",
"chunks": 7,
},
None,
)
processor.initialize()
assert processor.retriever_config["retriever_name"] == "classic"
assert processor.retriever_config["chunks"] == 7
def test_agent_data_fetched_once_per_request(self, mock_mongo_db):
from unittest.mock import patch
from application.api.answer.services.stream_processor import StreamProcessor
from application.core.settings import settings
db = mock_mongo_db[settings.MONGO_DB_NAME]
agents_collection = db["agents"]
agent_id = ObjectId()
agents_collection.insert_one(
{
"_id": agent_id,
"key": "agent_once_key",
"user": "user_123",
"prompt_id": "default",
"agent_type": "classic",
}
)
processor = StreamProcessor(
{"question": "Test", "api_key": "agent_once_key"},
None,
)
with patch.object(
processor, "_get_data_from_api_key", wraps=processor._get_data_from_api_key
) as spy:
processor.initialize()
assert spy.call_count == 1
@pytest.mark.unit
class TestStreamProcessorAttachments:

View File

@@ -566,14 +566,13 @@ class TestConfigureSource:
decoded_token={"sub": "u"},
)
sp.agent_key = None
agent_data = {
sp._agent_data = {
"sources": [
{"id": "src1", "retriever": "classic"},
{"id": "src2", "retriever": "hybrid"},
],
"source": None,
}
sp._get_data_from_api_key = MagicMock(return_value=agent_data)
sp._configure_source()
assert sp.source == {"active_docs": ["src1", "src2"]}
assert len(sp.all_sources) == 2
@@ -593,12 +592,11 @@ class TestConfigureSource:
decoded_token={"sub": "u"},
)
sp.agent_key = None
agent_data = {
sp._agent_data = {
"sources": [],
"source": "single_src",
"retriever": "classic",
}
sp._get_data_from_api_key = MagicMock(return_value=agent_data)
sp._configure_source()
assert sp.source == {"active_docs": "single_src"}
assert len(sp.all_sources) == 1
@@ -618,8 +616,7 @@ class TestConfigureSource:
decoded_token={"sub": "u"},
)
sp.agent_key = None
agent_data = {"sources": [], "source": None}
sp._get_data_from_api_key = MagicMock(return_value=agent_data)
sp._agent_data = {"sources": [], "source": None}
sp._configure_source()
assert sp.source == {}
assert sp.all_sources == []
@@ -639,11 +636,10 @@ class TestConfigureSource:
decoded_token={"sub": "u"},
)
sp.agent_key = "agent_key_123"
agent_data = {
sp._agent_data = {
"sources": [{"id": "s1", "retriever": "classic"}],
"source": None,
}
sp._get_data_from_api_key = MagicMock(return_value=agent_data)
sp._configure_source()
assert sp.source == {"active_docs": ["s1"]}
@@ -662,11 +658,10 @@ class TestConfigureSource:
decoded_token={"sub": "u"},
)
sp.agent_key = None
agent_data = {
sp._agent_data = {
"sources": [{"id": None}, {"retriever": "classic"}],
"source": None,
}
sp._get_data_from_api_key = MagicMock(return_value=agent_data)
sp._configure_source()
assert sp.source == {}
@@ -1189,6 +1184,8 @@ class TestConfigureAgent:
"chunks": "5",
})
sp._configure_agent()
sp.model_id = "test-model"
sp._configure_retriever()
assert sp.agent_config["workflow"] == "wf_123"
assert sp.agent_config["workflow_owner"] == "user1"
assert sp.retriever_config["retriever_name"] == "hybrid"
@@ -1211,6 +1208,8 @@ class TestConfigureAgent:
"chunks": "not_a_number",
})
sp._configure_agent()
sp.model_id = "test-model"
sp._configure_retriever()
assert sp.retriever_config["chunks"] == 2
@@ -1763,8 +1762,8 @@ class TestConfigureAgentAdditionalPaths:
assert sp.decoded_token == {"sub": "owner_user"}
@pytest.mark.unit
def test_configure_agent_with_source_in_data_key(self):
"""Cover line 463-464: data_key has 'source' set."""
def test_configure_source_picks_up_cached_agent_data(self):
"""After _configure_agent caches _agent_data, _configure_source uses it."""
sp = self._make_sp()
sp._resolve_agent_id = MagicMock(return_value="agent_id_1")
sp._get_agent_key = MagicMock(return_value=("agent_key", False, None))
@@ -1780,6 +1779,7 @@ class TestConfigureAgentAdditionalPaths:
"source": "my_source",
})
sp._configure_agent()
sp._configure_source()
assert sp.source == {"active_docs": "my_source"}
@pytest.mark.unit
@@ -2067,7 +2067,7 @@ class TestPreFetchDocsFullPaths:
"chunks": 2,
"doc_token_limit": 50000,
}
sp.source = {}
sp.source = {"active_docs": ["src1"]}
sp.model_id = "test-model"
sp.agent_id = None
return sp

View File

@@ -20,133 +20,49 @@ def test_epub_init_parser():
assert parser.parser_config_set
def test_epub_parser_ebooklib_import_error(epub_parser):
"""Test that ImportError is raised when ebooklib is not available."""
with patch.dict(sys.modules, {"ebooklib": None}):
with pytest.raises(ValueError, match="`EbookLib` is required to read Epub files"):
def test_epub_parser_fast_ebook_import_error(epub_parser):
"""Test that ImportError is raised when fast-ebook is not available."""
with patch.dict(sys.modules, {"fast_ebook": None}):
with pytest.raises(ValueError, match="`fast-ebook` is required to read Epub files"):
epub_parser.parse_file(Path("test.epub"))
def test_epub_parser_html2text_import_error(epub_parser):
"""Test that ImportError is raised when html2text is not available."""
fake_ebooklib = types.ModuleType("ebooklib")
fake_epub = types.ModuleType("ebooklib.epub")
fake_ebooklib.epub = fake_epub
with patch.dict(sys.modules, {"ebooklib": fake_ebooklib, "ebooklib.epub": fake_epub}):
with patch.dict(sys.modules, {"html2text": None}):
with pytest.raises(ValueError, match="`html2text` is required to parse Epub files"):
epub_parser.parse_file(Path("test.epub"))
def test_epub_parser_successful_parsing(epub_parser):
"""Test successful parsing of an epub file."""
fake_fast_ebook = types.ModuleType("fast_ebook")
fake_epub = types.ModuleType("fast_ebook.epub")
fake_fast_ebook.epub = fake_epub
fake_ebooklib = types.ModuleType("ebooklib")
fake_epub = types.ModuleType("ebooklib.epub")
fake_html2text = types.ModuleType("html2text")
# Mock ebooklib constants
fake_ebooklib.ITEM_DOCUMENT = "document"
fake_ebooklib.epub = fake_epub
mock_item1 = MagicMock()
mock_item1.get_type.return_value = "document"
mock_item1.get_content.return_value = b"<h1>Chapter 1</h1><p>Content 1</p>"
mock_item2 = MagicMock()
mock_item2.get_type.return_value = "document"
mock_item2.get_content.return_value = b"<h1>Chapter 2</h1><p>Content 2</p>"
mock_item3 = MagicMock()
mock_item3.get_type.return_value = "other" # Should be ignored
mock_item3.get_content.return_value = b"<p>Other content</p>"
mock_book = MagicMock()
mock_book.get_items.return_value = [mock_item1, mock_item2, mock_item3]
mock_book.to_markdown.return_value = "# Chapter 1\n\nContent 1\n\n# Chapter 2\n\nContent 2\n"
fake_epub.read_epub = MagicMock(return_value=mock_book)
def mock_html2text_func(html_content):
if "Chapter 1" in html_content:
return "# Chapter 1\n\nContent 1\n"
elif "Chapter 2" in html_content:
return "# Chapter 2\n\nContent 2\n"
return "Other content\n"
fake_html2text.html2text = mock_html2text_func
with patch.dict(sys.modules, {
"ebooklib": fake_ebooklib,
"ebooklib.epub": fake_epub,
"html2text": fake_html2text
"fast_ebook": fake_fast_ebook,
"fast_ebook.epub": fake_epub,
}):
result = epub_parser.parse_file(Path("test.epub"))
expected_result = "# Chapter 1\n\nContent 1\n\n# Chapter 2\n\nContent 2\n"
assert result == expected_result
# Verify epub.read_epub was called with correct parameters
fake_epub.read_epub.assert_called_once_with(Path("test.epub"), options={"ignore_ncx": True})
assert result == "# Chapter 1\n\nContent 1\n\n# Chapter 2\n\nContent 2\n"
fake_epub.read_epub.assert_called_once_with(Path("test.epub"))
def test_epub_parser_empty_book(epub_parser):
"""Test parsing an epub file with no document items."""
# Create mock modules
fake_ebooklib = types.ModuleType("ebooklib")
fake_epub = types.ModuleType("ebooklib.epub")
fake_html2text = types.ModuleType("html2text")
fake_ebooklib.ITEM_DOCUMENT = "document"
fake_ebooklib.epub = fake_epub
# Create mock book with no document items
"""Test parsing an epub file with no content."""
fake_fast_ebook = types.ModuleType("fast_ebook")
fake_epub = types.ModuleType("fast_ebook.epub")
fake_fast_ebook.epub = fake_epub
mock_book = MagicMock()
mock_book.get_items.return_value = []
mock_book.to_markdown.return_value = ""
fake_epub.read_epub = MagicMock(return_value=mock_book)
fake_html2text.html2text = MagicMock()
with patch.dict(sys.modules, {
"ebooklib": fake_ebooklib,
"ebooklib.epub": fake_epub,
"html2text": fake_html2text
"fast_ebook": fake_fast_ebook,
"fast_ebook.epub": fake_epub,
}):
result = epub_parser.parse_file(Path("empty.epub"))
assert result == ""
fake_html2text.html2text.assert_not_called()
def test_epub_parser_non_document_items_ignored(epub_parser):
"""Test that non-document items are ignored during parsing."""
fake_ebooklib = types.ModuleType("ebooklib")
fake_epub = types.ModuleType("ebooklib.epub")
fake_html2text = types.ModuleType("html2text")
fake_ebooklib.ITEM_DOCUMENT = "document"
fake_ebooklib.epub = fake_epub
mock_doc_item = MagicMock()
mock_doc_item.get_type.return_value = "document"
mock_doc_item.get_content.return_value = b"<p>Document content</p>"
mock_other_item = MagicMock()
mock_other_item.get_type.return_value = "image" # Not a document
mock_book = MagicMock()
mock_book.get_items.return_value = [mock_other_item, mock_doc_item]
fake_epub.read_epub = MagicMock(return_value=mock_book)
fake_html2text.html2text = MagicMock(return_value="Document content\n")
with patch.dict(sys.modules, {
"ebooklib": fake_ebooklib,
"ebooklib.epub": fake_epub,
"html2text": fake_html2text
}):
result = epub_parser.parse_file(Path("test.epub"))
assert result == "Document content\n"
fake_html2text.html2text.assert_called_once_with("<p>Document content</p>")

View File

@@ -11,6 +11,7 @@ import pytest
from application.api.v1.translator import (
_get_client_tool_name,
convert_history,
extract_system_prompt,
extract_tool_results,
is_continuation,
translate_request,
@@ -148,6 +149,48 @@ class TestConvertHistory:
assert history == []
# ---------------------------------------------------------------------------
# extract_system_prompt
# ---------------------------------------------------------------------------
@pytest.mark.unit
class TestExtractSystemPrompt:
def test_extracts_first_system_message(self):
messages = [
{"role": "system", "content": "You are a pirate"},
{"role": "user", "content": "Hello"},
]
assert extract_system_prompt(messages) == "You are a pirate"
def test_returns_none_when_no_system_message(self):
messages = [{"role": "user", "content": "Hello"}]
assert extract_system_prompt(messages) is None
def test_returns_first_of_multiple_system_messages(self):
messages = [
{"role": "system", "content": "First"},
{"role": "system", "content": "Second"},
{"role": "user", "content": "Hello"},
]
assert extract_system_prompt(messages) == "First"
def test_empty_content_returns_empty_string(self):
messages = [
{"role": "system", "content": ""},
{"role": "user", "content": "Hello"},
]
assert extract_system_prompt(messages) == ""
def test_missing_content_returns_empty_string(self):
messages = [
{"role": "system"},
{"role": "user", "content": "Hello"},
]
assert extract_system_prompt(messages) == ""
# ---------------------------------------------------------------------------
# translate_request
# ---------------------------------------------------------------------------
@@ -167,11 +210,25 @@ class TestTranslateRequest:
result = translate_request(data, "test-key")
assert result["question"] == "What's 2+2?"
assert result["api_key"] == "test-key"
assert result["save_conversation"] is True
# Conversations are not persisted by default on the v1 endpoint.
assert result["save_conversation"] is False
history = json.loads(result["history"])
assert len(history) == 1
assert history[0]["prompt"] == "Hello"
def test_save_conversation_opt_in_via_docsgpt_extension(self):
data = {
"messages": [{"role": "user", "content": "Hi"}],
"docsgpt": {"save_conversation": True},
}
result = translate_request(data, "key")
assert result["save_conversation"] is True
def test_save_conversation_default_false(self):
data = {"messages": [{"role": "user", "content": "Hi"}]}
result = translate_request(data, "key")
assert result["save_conversation"] is False
def test_continuation_request(self):
data = {
"messages": [
@@ -237,6 +294,23 @@ class TestTranslateRequest:
result = translate_request(data, "key")
assert result["attachments"] == ["att1", "att2"]
def test_system_prompt_override_included_when_present(self):
data = {
"messages": [
{"role": "system", "content": "Custom prompt"},
{"role": "user", "content": "Hello"},
],
}
result = translate_request(data, "key")
assert result["system_prompt_override"] == "Custom prompt"
def test_system_prompt_override_absent_when_no_system_message(self):
data = {
"messages": [{"role": "user", "content": "Hello"}],
}
result = translate_request(data, "key")
assert "system_prompt_override" not in result
# ---------------------------------------------------------------------------
# translate_response