mirror of
https://github.com/arc53/DocsGPT.git
synced 2026-05-07 06:30:03 +00:00
* feat: postgres tests * feat: mongo cutoff * feat: mongo cutoff * feat: adjust docs and compose files * fix: mini code mongo removals * fix: tests and k8s mongo stuff * feat: test fixes * fix: ruff * fix: vale * Potential fix for pull request finding 'CodeQL / Clear-text logging of sensitive information' Co-authored-by: Copilot Autofix powered by AI <62310815+github-advanced-security[bot]@users.noreply.github.com> * fix: mini suggestions * vale lint fix 2 * fix: codeql columns thing * fix: test mongo * fix: tests coverage * feat: better tests 4 * feat: more tests * feat: decent coverage * fix: ruff fixes * fix: remove mongo mock * feat: enhance workflow engine and API routes; add document retrieval and source handling * feat: e2e tests * fix: mcp, mongo and more * fix: mini codeql warning * fix: agent chunk view * fix: mini issues * fix: more pg fixes * feat: postgres prep on start * feat: qa tests * fix: mini improvements * fix: tests --------- Co-authored-by: Copilot Autofix powered by AI <62310815+github-advanced-security[bot]@users.noreply.github.com> Co-authored-by: Siddhant Rai <siddhant.rai.5686@gmail.com>
50 lines
1.6 KiB
Python
50 lines
1.6 KiB
Python
import logging
|
|
from application.core.url_validation import SSRFError, validate_url
|
|
from application.parser.remote.base import BaseRemote
|
|
from application.parser.schema.base import Document
|
|
from langchain_community.document_loaders import WebBaseLoader
|
|
|
|
headers = {
|
|
"User-Agent": "Mozilla/5.0",
|
|
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*"
|
|
";q=0.8",
|
|
"Accept-Language": "en-US,en;q=0.5",
|
|
"Referer": "https://www.google.com/",
|
|
"DNT": "1",
|
|
"Connection": "keep-alive",
|
|
"Upgrade-Insecure-Requests": "1",
|
|
}
|
|
|
|
|
|
class WebLoader(BaseRemote):
|
|
def __init__(self):
|
|
self.loader = WebBaseLoader
|
|
|
|
def load_data(self, inputs):
|
|
urls = inputs
|
|
if isinstance(urls, str):
|
|
urls = [urls]
|
|
documents = []
|
|
for url in urls:
|
|
try:
|
|
url = validate_url(url)
|
|
except SSRFError as e:
|
|
logging.warning(
|
|
f"Skipping URL due to SSRF validation failure: {url} - {e}"
|
|
)
|
|
continue
|
|
try:
|
|
loader = self.loader([url], header_template=headers)
|
|
loaded_docs = loader.load()
|
|
for doc in loaded_docs:
|
|
documents.append(
|
|
Document(
|
|
doc.page_content,
|
|
extra_info=doc.metadata,
|
|
)
|
|
)
|
|
except Exception as e:
|
|
logging.error(f"Error processing URL {url}: {e}", exc_info=True)
|
|
continue
|
|
return documents
|