Crawler + sitemap

This commit is contained in:
Pavel
2023-10-12 12:35:26 +04:00
parent 658867cb46
commit c517bdd2e1
2 changed files with 63 additions and 0 deletions

View File

@@ -0,0 +1,36 @@
import requests
from urllib.parse import urlparse, urljoin
from bs4 import BeautifulSoup
from application.parser.remote.base import BaseRemote
class CrawlerLoader(BaseRemote):
def __init__(self):
from langchain.document_loaders import WebBaseLoader
self.loader = WebBaseLoader
def load_data(self, url):
# Fetch the content of the initial URL
response = requests.get(url)
if response.status_code != 200:
print(f"Failed to fetch initial URL: {url}")
return None
# Parse the HTML content
soup = BeautifulSoup(response.text, 'html.parser')
# Extract the base URL to ensure we only fetch URLs from the same domain
base_url = urlparse(url).scheme + "://" + urlparse(url).hostname
# Extract all links from the HTML content
all_links = [a['href'] for a in soup.find_all('a', href=True)]
# Filter out the links that lead to a different domain
same_domain_links = [urljoin(base_url, link) for link in all_links if base_url in urljoin(base_url, link)]
# Remove duplicates
same_domain_links = list(set(same_domain_links))
#TODO: Optimize this section to parse pages as they are being crawled
loaded_content = self.loader(same_domain_links).load()
return loaded_content

View File

@@ -0,0 +1,27 @@
import requests
import xml.etree.ElementTree as ET
from application.parser.remote.base import BaseRemote
class SitemapLoader(BaseRemote):
def __init__(self):
from langchain.document_loaders import WebBaseLoader
self.loader = WebBaseLoader
def load_data(self, sitemap_url):
# Fetch the sitemap content
response = requests.get(sitemap_url)
if response.status_code != 200:
print(f"Failed to fetch sitemap: {sitemap_url}")
return None
# Parse the sitemap XML
root = ET.fromstring(response.content)
# Extract URLs from the sitemap
# The namespace with "loc" tag might be needed to extract URLs
ns = {'s': 'http://www.sitemaps.org/schemas/sitemap/0.9'}
urls = [loc.text for loc in root.findall('s:url/s:loc', ns)]
# Use your existing loader to load content of extracted URLs
loader = self.loader(urls)
return loader.load()