No crawler, no sitemap

This commit is contained in:
Pavel
2023-10-12 01:03:40 +04:00
parent 8f2ad38503
commit 658867cb46
7 changed files with 76 additions and 1 deletions

View File

@@ -9,6 +9,7 @@ import requests
from application.core.settings import settings
from application.parser.file.bulk import SimpleDirectoryReader
from application.parser.remote.remote_creator import RemoteCreator
from application.parser.open_ai_func import call_openai_api
from application.parser.schema.base import Document
from application.parser.token_func import group_split
@@ -104,3 +105,49 @@ def ingest_worker(self, directory, formats, name_job, filename, user):
'user': user,
'limited': False
}
def remote_worker(self, urls, name_job, user, directory = 'temp', loader = 'url'):
sample = False
token_check = True
min_tokens = 150
max_tokens = 1250
full_path = directory + '/' + user + '/' + name_job
if not os.path.exists(full_path):
os.makedirs(full_path)
self.update_state(state='PROGRESS', meta={'current': 1})
# Use RemoteCreator to load data from URL
remote_loader = RemoteCreator.create_loader(loader, urls)
raw_docs = remote_loader.load_data()
raw_docs = group_split(documents=raw_docs, min_tokens=min_tokens, max_tokens=max_tokens, token_check=token_check)
docs = [Document.to_langchain_format(raw_doc) for raw_doc in raw_docs]
call_openai_api(docs, full_path, self)
self.update_state(state='PROGRESS', meta={'current': 100})
if sample:
for i in range(min(5, len(raw_docs))):
print(raw_docs[i].text)
# Proceed with uploading and cleaning as in the original function
file_data = {'name': name_job, 'user': user}
if settings.VECTOR_STORE == "faiss":
files = {'file_faiss': open(full_path + '/index.faiss', 'rb'),
'file_pkl': open(full_path + '/index.pkl', 'rb')}
response = requests.post(urljoin(settings.API_URL, "/api/upload_index"), files=files, data=file_data)
response = requests.get(urljoin(settings.API_URL, "/api/delete_old?path=" + full_path))
else:
response = requests.post(urljoin(settings.API_URL, "/api/upload_index"), data=file_data)
shutil.rmtree(full_path)
return {
'urls': urls,
'name_job': name_job,
'user': user,
'limited': False
}