From c49b7613e03174a56cb5da76f643bd04dada627e Mon Sep 17 00:00:00 2001 From: Alex Date: Sat, 31 Aug 2024 12:53:37 +0100 Subject: [PATCH] fix: langchain warning --- application/parser/remote/crawler_loader.py | 2 +- application/parser/remote/sitemap_loader.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/application/parser/remote/crawler_loader.py b/application/parser/remote/crawler_loader.py index 2a63f284..76325ae6 100644 --- a/application/parser/remote/crawler_loader.py +++ b/application/parser/remote/crawler_loader.py @@ -5,7 +5,7 @@ from application.parser.remote.base import BaseRemote class CrawlerLoader(BaseRemote): def __init__(self, limit=10): - from langchain.document_loaders import WebBaseLoader + from langchain_community.document_loaders import WebBaseLoader self.loader = WebBaseLoader # Initialize the document loader self.limit = limit # Set the limit for the number of pages to scrape diff --git a/application/parser/remote/sitemap_loader.py b/application/parser/remote/sitemap_loader.py index 6e9182c4..8066f4f6 100644 --- a/application/parser/remote/sitemap_loader.py +++ b/application/parser/remote/sitemap_loader.py @@ -5,7 +5,7 @@ from application.parser.remote.base import BaseRemote class SitemapLoader(BaseRemote): def __init__(self, limit=20): - from langchain.document_loaders import WebBaseLoader + from langchain_community.document_loaders import WebBaseLoader self.loader = WebBaseLoader self.limit = limit # Adding limit to control the number of URLs to process