mirror of
https://github.com/arc53/DocsGPT.git
synced 2026-01-26 17:00:32 +00:00
Merge branch 'arc53:main' into main
This commit is contained in:
@@ -1,7 +1,7 @@
|
||||
import asyncio
|
||||
import os
|
||||
import sys
|
||||
from flask import Blueprint, request, Response
|
||||
from flask import Blueprint, request, Response, current_app
|
||||
import json
|
||||
import datetime
|
||||
import logging
|
||||
@@ -267,6 +267,10 @@ def stream():
|
||||
else:
|
||||
retriever_name = source["active_docs"]
|
||||
|
||||
current_app.logger.info(f"/stream - request_data: {data}, source: {source}",
|
||||
extra={"data": json.dumps({"request_data": data, "source": source})}
|
||||
)
|
||||
|
||||
prompt = get_prompt(prompt_id)
|
||||
|
||||
retriever = RetrieverCreator.create_retriever(
|
||||
@@ -301,7 +305,9 @@ def stream():
|
||||
mimetype="text/event-stream",
|
||||
)
|
||||
except Exception as e:
|
||||
print("\033[91merr", str(e), file=sys.stderr)
|
||||
current_app.logger.error(f"/stream - error: {str(e)} - traceback: {traceback.format_exc()}",
|
||||
extra={"error": str(e), "traceback": traceback.format_exc()}
|
||||
)
|
||||
message = e.args[0]
|
||||
status_code = 400
|
||||
# # Custom exceptions with two arguments, index 1 as status code
|
||||
@@ -345,7 +351,6 @@ def api_answer():
|
||||
else:
|
||||
token_limit = settings.DEFAULT_MAX_HISTORY
|
||||
|
||||
# use try and except to check for exception
|
||||
try:
|
||||
# check if the vectorstore is set
|
||||
if "api_key" in data:
|
||||
@@ -365,6 +370,10 @@ def api_answer():
|
||||
|
||||
prompt = get_prompt(prompt_id)
|
||||
|
||||
current_app.logger.info(f"/api/answer - request_data: {data}, source: {source}",
|
||||
extra={"data": json.dumps({"request_data": data, "source": source})}
|
||||
)
|
||||
|
||||
retriever = RetrieverCreator.create_retriever(
|
||||
retriever_name,
|
||||
question=question,
|
||||
@@ -399,9 +408,9 @@ def api_answer():
|
||||
|
||||
return result
|
||||
except Exception as e:
|
||||
# print whole traceback
|
||||
traceback.print_exc()
|
||||
print(str(e))
|
||||
current_app.logger.error(f"/api/answer - error: {str(e)} - traceback: {traceback.format_exc()}",
|
||||
extra={"error": str(e), "traceback": traceback.format_exc()}
|
||||
)
|
||||
return bad_request(500, str(e))
|
||||
|
||||
|
||||
@@ -433,6 +442,10 @@ def api_search():
|
||||
token_limit = data["token_limit"]
|
||||
else:
|
||||
token_limit = settings.DEFAULT_MAX_HISTORY
|
||||
|
||||
current_app.logger.info(f"/api/answer - request_data: {data}, source: {source}",
|
||||
extra={"data": json.dumps({"request_data": data, "source": source})}
|
||||
)
|
||||
|
||||
retriever = RetrieverCreator.create_retriever(
|
||||
retriever_name,
|
||||
|
||||
@@ -6,12 +6,14 @@ from application.core.settings import settings
|
||||
from application.api.user.routes import user
|
||||
from application.api.answer.routes import answer
|
||||
from application.api.internal.routes import internal
|
||||
from application.core.logging_config import setup_logging
|
||||
|
||||
if platform.system() == "Windows":
|
||||
import pathlib
|
||||
pathlib.PosixPath = pathlib.WindowsPath
|
||||
|
||||
dotenv.load_dotenv()
|
||||
setup_logging()
|
||||
|
||||
app = Flask(__name__)
|
||||
app.register_blueprint(user)
|
||||
|
||||
@@ -1,9 +1,15 @@
|
||||
from celery import Celery
|
||||
from application.core.settings import settings
|
||||
from celery.signals import setup_logging
|
||||
|
||||
def make_celery(app_name=__name__):
|
||||
celery = Celery(app_name, broker=settings.CELERY_BROKER_URL, backend=settings.CELERY_RESULT_BACKEND)
|
||||
celery.conf.update(settings)
|
||||
return celery
|
||||
|
||||
@setup_logging.connect
|
||||
def config_loggers(*args, **kwargs):
|
||||
from application.core.logging_config import setup_logging
|
||||
setup_logging()
|
||||
|
||||
celery = make_celery()
|
||||
|
||||
22
application/core/logging_config.py
Normal file
22
application/core/logging_config.py
Normal file
@@ -0,0 +1,22 @@
|
||||
from logging.config import dictConfig
|
||||
|
||||
def setup_logging():
|
||||
dictConfig({
|
||||
'version': 1,
|
||||
'formatters': {
|
||||
'default': {
|
||||
'format': '[%(asctime)s] %(levelname)s in %(module)s: %(message)s',
|
||||
}
|
||||
},
|
||||
"handlers": {
|
||||
"console": {
|
||||
"class": "logging.StreamHandler",
|
||||
"stream": "ext://sys.stdout",
|
||||
"formatter": "default",
|
||||
}
|
||||
},
|
||||
'root': {
|
||||
'level': 'INFO',
|
||||
'handlers': ['console'],
|
||||
},
|
||||
})
|
||||
@@ -29,6 +29,7 @@ class Settings(BaseSettings):
|
||||
OPENAI_API_VERSION: Optional[str] = None # azure openai api version
|
||||
AZURE_DEPLOYMENT_NAME: Optional[str] = None # azure deployment name for answering
|
||||
AZURE_EMBEDDINGS_DEPLOYMENT_NAME: Optional[str] = None # azure deployment name for embeddings
|
||||
OPENAI_BASE_URL: Optional[str] = None # openai base url for open ai compatable models
|
||||
|
||||
# elasticsearch
|
||||
ELASTIC_CLOUD_ID: Optional[str] = None # cloud id for elasticsearch
|
||||
|
||||
@@ -2,25 +2,23 @@ from application.llm.base import BaseLLM
|
||||
from application.core.settings import settings
|
||||
|
||||
|
||||
|
||||
class OpenAILLM(BaseLLM):
|
||||
|
||||
def __init__(self, api_key=None, user_api_key=None, *args, **kwargs):
|
||||
global openai
|
||||
from openai import OpenAI
|
||||
|
||||
super().__init__(*args, **kwargs)
|
||||
self.client = OpenAI(
|
||||
api_key=api_key,
|
||||
)
|
||||
if settings.OPENAI_BASE_URL:
|
||||
self.client = OpenAI(
|
||||
api_key=api_key,
|
||||
base_url=settings.OPENAI_BASE_URL
|
||||
)
|
||||
else:
|
||||
self.client = OpenAI(api_key=api_key)
|
||||
self.api_key = api_key
|
||||
self.user_api_key = user_api_key
|
||||
|
||||
def _get_openai(self):
|
||||
# Import openai when needed
|
||||
import openai
|
||||
|
||||
return openai
|
||||
|
||||
def _raw_gen(
|
||||
self,
|
||||
baseself,
|
||||
@@ -29,7 +27,7 @@ class OpenAILLM(BaseLLM):
|
||||
stream=False,
|
||||
engine=settings.AZURE_DEPLOYMENT_NAME,
|
||||
**kwargs
|
||||
):
|
||||
):
|
||||
response = self.client.chat.completions.create(
|
||||
model=model, messages=messages, stream=stream, **kwargs
|
||||
)
|
||||
@@ -44,7 +42,7 @@ class OpenAILLM(BaseLLM):
|
||||
stream=True,
|
||||
engine=settings.AZURE_DEPLOYMENT_NAME,
|
||||
**kwargs
|
||||
):
|
||||
):
|
||||
response = self.client.chat.completions.create(
|
||||
model=model, messages=messages, stream=stream, **kwargs
|
||||
)
|
||||
@@ -73,8 +71,3 @@ class AzureOpenAILLM(OpenAILLM):
|
||||
api_base=settings.OPENAI_API_BASE,
|
||||
deployment_name=settings.AZURE_DEPLOYMENT_NAME,
|
||||
)
|
||||
|
||||
def _get_openai(self):
|
||||
openai = super()._get_openai()
|
||||
|
||||
return openai
|
||||
|
||||
@@ -5,7 +5,7 @@ from application.parser.remote.base import BaseRemote
|
||||
|
||||
class CrawlerLoader(BaseRemote):
|
||||
def __init__(self, limit=10):
|
||||
from langchain.document_loaders import WebBaseLoader
|
||||
from langchain_community.document_loaders import WebBaseLoader
|
||||
self.loader = WebBaseLoader # Initialize the document loader
|
||||
self.limit = limit # Set the limit for the number of pages to scrape
|
||||
|
||||
|
||||
@@ -5,7 +5,7 @@ from application.parser.remote.base import BaseRemote
|
||||
|
||||
class SitemapLoader(BaseRemote):
|
||||
def __init__(self, limit=20):
|
||||
from langchain.document_loaders import WebBaseLoader
|
||||
from langchain_community.document_loaders import WebBaseLoader
|
||||
self.loader = WebBaseLoader
|
||||
self.limit = limit # Adding limit to control the number of URLs to process
|
||||
|
||||
|
||||
@@ -9,15 +9,15 @@ EbookLib==0.18
|
||||
elasticsearch==8.14.0
|
||||
escodegen==1.0.11
|
||||
esprima==4.0.1
|
||||
faiss-cpu==1.7.4
|
||||
Flask==3.0.1
|
||||
faiss-cpu==1.8.0
|
||||
gunicorn==23.0.0
|
||||
html2text==2020.1.16
|
||||
javalang==0.13.0
|
||||
langchain==0.1.4
|
||||
langchain-openai==0.0.5
|
||||
openapi3_parser==1.1.16
|
||||
pandas==2.2.0
|
||||
pandas==2.2.2
|
||||
pydantic_settings==2.4.0
|
||||
pymongo==4.8.0
|
||||
PyPDF2==3.0.1
|
||||
|
||||
@@ -2,7 +2,7 @@ import json
|
||||
from application.retriever.base import BaseRetriever
|
||||
from application.core.settings import settings
|
||||
from application.llm.llm_creator import LLMCreator
|
||||
from application.utils import count_tokens
|
||||
from application.utils import num_tokens_from_string
|
||||
from langchain_community.tools import BraveSearch
|
||||
|
||||
|
||||
@@ -78,7 +78,7 @@ class BraveRetSearch(BaseRetriever):
|
||||
self.chat_history.reverse()
|
||||
for i in self.chat_history:
|
||||
if "prompt" in i and "response" in i:
|
||||
tokens_batch = count_tokens(i["prompt"]) + count_tokens(
|
||||
tokens_batch = num_tokens_from_string(i["prompt"]) + num_tokens_from_string(
|
||||
i["response"]
|
||||
)
|
||||
if tokens_current_history + tokens_batch < self.token_limit:
|
||||
|
||||
@@ -4,7 +4,7 @@ from application.core.settings import settings
|
||||
from application.vectorstore.vector_creator import VectorCreator
|
||||
from application.llm.llm_creator import LLMCreator
|
||||
|
||||
from application.utils import count_tokens
|
||||
from application.utils import num_tokens_from_string
|
||||
|
||||
|
||||
class ClassicRAG(BaseRetriever):
|
||||
@@ -98,7 +98,7 @@ class ClassicRAG(BaseRetriever):
|
||||
self.chat_history.reverse()
|
||||
for i in self.chat_history:
|
||||
if "prompt" in i and "response" in i:
|
||||
tokens_batch = count_tokens(i["prompt"]) + count_tokens(
|
||||
tokens_batch = num_tokens_from_string(i["prompt"]) + num_tokens_from_string(
|
||||
i["response"]
|
||||
)
|
||||
if tokens_current_history + tokens_batch < self.token_limit:
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
from application.retriever.base import BaseRetriever
|
||||
from application.core.settings import settings
|
||||
from application.llm.llm_creator import LLMCreator
|
||||
from application.utils import count_tokens
|
||||
from application.utils import num_tokens_from_string
|
||||
from langchain_community.tools import DuckDuckGoSearchResults
|
||||
from langchain_community.utilities import DuckDuckGoSearchAPIWrapper
|
||||
|
||||
@@ -95,7 +95,7 @@ class DuckDuckSearch(BaseRetriever):
|
||||
self.chat_history.reverse()
|
||||
for i in self.chat_history:
|
||||
if "prompt" in i and "response" in i:
|
||||
tokens_batch = count_tokens(i["prompt"]) + count_tokens(
|
||||
tokens_batch = num_tokens_from_string(i["prompt"]) + num_tokens_from_string(
|
||||
i["response"]
|
||||
)
|
||||
if tokens_current_history + tokens_batch < self.token_limit:
|
||||
|
||||
@@ -2,7 +2,7 @@ import sys
|
||||
from pymongo import MongoClient
|
||||
from datetime import datetime
|
||||
from application.core.settings import settings
|
||||
from application.utils import count_tokens
|
||||
from application.utils import num_tokens_from_string
|
||||
|
||||
mongo = MongoClient(settings.MONGO_URI)
|
||||
db = mongo["docsgpt"]
|
||||
@@ -24,9 +24,9 @@ def update_token_usage(user_api_key, token_usage):
|
||||
def gen_token_usage(func):
|
||||
def wrapper(self, model, messages, stream, **kwargs):
|
||||
for message in messages:
|
||||
self.token_usage["prompt_tokens"] += count_tokens(message["content"])
|
||||
self.token_usage["prompt_tokens"] += num_tokens_from_string(message["content"])
|
||||
result = func(self, model, messages, stream, **kwargs)
|
||||
self.token_usage["generated_tokens"] += count_tokens(result)
|
||||
self.token_usage["generated_tokens"] += num_tokens_from_string(result)
|
||||
update_token_usage(self.user_api_key, self.token_usage)
|
||||
return result
|
||||
|
||||
@@ -36,14 +36,14 @@ def gen_token_usage(func):
|
||||
def stream_token_usage(func):
|
||||
def wrapper(self, model, messages, stream, **kwargs):
|
||||
for message in messages:
|
||||
self.token_usage["prompt_tokens"] += count_tokens(message["content"])
|
||||
self.token_usage["prompt_tokens"] += num_tokens_from_string(message["content"])
|
||||
batch = []
|
||||
result = func(self, model, messages, stream, **kwargs)
|
||||
for r in result:
|
||||
batch.append(r)
|
||||
yield r
|
||||
for line in batch:
|
||||
self.token_usage["generated_tokens"] += count_tokens(line)
|
||||
self.token_usage["generated_tokens"] += num_tokens_from_string(line)
|
||||
update_token_usage(self.user_api_key, self.token_usage)
|
||||
|
||||
return wrapper
|
||||
|
||||
@@ -1,6 +1,22 @@
|
||||
from transformers import GPT2TokenizerFast
|
||||
import tiktoken
|
||||
|
||||
tokenizer = GPT2TokenizerFast.from_pretrained('gpt2')
|
||||
tokenizer.model_max_length = 100000
|
||||
def count_tokens(string):
|
||||
return len(tokenizer(string)['input_ids'])
|
||||
_encoding = None
|
||||
|
||||
def get_encoding():
|
||||
global _encoding
|
||||
if _encoding is None:
|
||||
_encoding = tiktoken.get_encoding("cl100k_base")
|
||||
return _encoding
|
||||
|
||||
def num_tokens_from_string(string: str) -> int:
|
||||
encoding = get_encoding()
|
||||
num_tokens = len(encoding.encode(string))
|
||||
return num_tokens
|
||||
|
||||
def count_tokens_docs(docs):
|
||||
docs_content = ""
|
||||
for doc in docs:
|
||||
docs_content += doc.page_content
|
||||
|
||||
tokens = num_tokens_from_string(docs_content)
|
||||
return tokens
|
||||
@@ -2,8 +2,8 @@ import os
|
||||
import shutil
|
||||
import string
|
||||
import zipfile
|
||||
import tiktoken
|
||||
from urllib.parse import urljoin
|
||||
import logging
|
||||
|
||||
import requests
|
||||
|
||||
@@ -13,6 +13,8 @@ from application.parser.remote.remote_creator import RemoteCreator
|
||||
from application.parser.open_ai_func import call_openai_api
|
||||
from application.parser.schema.base import Document
|
||||
from application.parser.token_func import group_split
|
||||
from application.utils import count_tokens_docs
|
||||
|
||||
|
||||
# Define a function to extract metadata from a given filename.
|
||||
def metadata_from_filename(title):
|
||||
@@ -41,7 +43,7 @@ def extract_zip_recursive(zip_path, extract_to, current_depth=0, max_depth=5):
|
||||
max_depth (int): Maximum allowed depth of recursion to prevent infinite loops.
|
||||
"""
|
||||
if current_depth > max_depth:
|
||||
print(f"Reached maximum recursion depth of {max_depth}")
|
||||
logging.warning(f"Reached maximum recursion depth of {max_depth}")
|
||||
return
|
||||
|
||||
with zipfile.ZipFile(zip_path, "r") as zip_ref:
|
||||
@@ -88,16 +90,13 @@ def ingest_worker(self, directory, formats, name_job, filename, user):
|
||||
max_tokens = 1250
|
||||
recursion_depth = 2
|
||||
full_path = os.path.join(directory, user, name_job)
|
||||
import sys
|
||||
|
||||
print(full_path, file=sys.stderr)
|
||||
logging.info(f"Ingest file: {full_path}", extra={"user": user, "job": name_job})
|
||||
# check if API_URL env variable is set
|
||||
file_data = {"name": name_job, "file": filename, "user": user}
|
||||
response = requests.get(
|
||||
urljoin(settings.API_URL, "/api/download"), params=file_data
|
||||
)
|
||||
# check if file is in the response
|
||||
print(response, file=sys.stderr)
|
||||
file = response.content
|
||||
|
||||
if not os.path.exists(full_path):
|
||||
@@ -137,7 +136,7 @@ def ingest_worker(self, directory, formats, name_job, filename, user):
|
||||
|
||||
if sample:
|
||||
for i in range(min(5, len(raw_docs))):
|
||||
print(raw_docs[i].text)
|
||||
logging.info(f"Sample document {i}: {raw_docs[i]}")
|
||||
|
||||
# get files from outputs/inputs/index.faiss and outputs/inputs/index.pkl
|
||||
# and send them to the server (provide user and name in form)
|
||||
@@ -180,6 +179,7 @@ def remote_worker(self, source_data, name_job, user, loader, directory="temp"):
|
||||
if not os.path.exists(full_path):
|
||||
os.makedirs(full_path)
|
||||
self.update_state(state="PROGRESS", meta={"current": 1})
|
||||
logging.info(f"Remote job: {full_path}", extra={"user": user, "job": name_job, source_data: source_data})
|
||||
|
||||
remote_loader = RemoteCreator.create_loader(loader)
|
||||
raw_docs = remote_loader.load_data(source_data)
|
||||
@@ -212,26 +212,4 @@ def remote_worker(self, source_data, name_job, user, loader, directory="temp"):
|
||||
|
||||
shutil.rmtree(full_path)
|
||||
|
||||
return {"urls": source_data, "name_job": name_job, "user": user, "limited": False}
|
||||
|
||||
|
||||
def count_tokens_docs(docs):
|
||||
# Here we convert the docs list to a string and calculate the number of tokens the string represents.
|
||||
# docs_content = (" ".join(docs))
|
||||
docs_content = ""
|
||||
for doc in docs:
|
||||
docs_content += doc.page_content
|
||||
|
||||
tokens, total_price = num_tokens_from_string(
|
||||
string=docs_content, encoding_name="cl100k_base"
|
||||
)
|
||||
# Here we print the number of tokens and the approx user cost with some visually appealing formatting.
|
||||
return tokens
|
||||
|
||||
|
||||
def num_tokens_from_string(string: str, encoding_name: str) -> int:
|
||||
# Function to convert string to tokens and estimate user cost.
|
||||
encoding = tiktoken.get_encoding(encoding_name)
|
||||
num_tokens = len(encoding.encode(string))
|
||||
total_price = (num_tokens / 1000) * 0.0004
|
||||
return num_tokens, total_price
|
||||
return {"urls": source_data, "name_job": name_job, "user": user, "limited": False}
|
||||
@@ -36,6 +36,14 @@ List of latest supported LLMs are https://github.com/arc53/DocsGPT/blob/main/app
|
||||
Visit application/llm and select the file of your selected llm and there you will find the speicifc requirements needed to be filled in order to use it,i.e API key of that llm.
|
||||
</Steps>
|
||||
|
||||
### For OpenAI-Compatible Endpoints:
|
||||
DocsGPT supports the use of OpenAI-compatible endpoints through base URL substitution. This feature allows you to use alternative AI models or services that implement the OpenAI API interface.
|
||||
|
||||
|
||||
Set the OPENAI_BASE_URL in your environment. You can change .env file with OPENAI_BASE_URL with the desired base URL or docker-compose.yml file and add the environment variable to the backend container.
|
||||
|
||||
> Make sure you have the right API_KEY and correct LLM_NAME.
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
28
extensions/chrome/package-lock.json
generated
28
extensions/chrome/package-lock.json
generated
@@ -107,12 +107,12 @@
|
||||
}
|
||||
},
|
||||
"node_modules/braces": {
|
||||
"version": "3.0.2",
|
||||
"resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz",
|
||||
"integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==",
|
||||
"version": "3.0.3",
|
||||
"resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz",
|
||||
"integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==",
|
||||
"dev": true,
|
||||
"dependencies": {
|
||||
"fill-range": "^7.0.1"
|
||||
"fill-range": "^7.1.1"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=8"
|
||||
@@ -260,9 +260,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/fill-range": {
|
||||
"version": "7.0.1",
|
||||
"resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz",
|
||||
"integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==",
|
||||
"version": "7.1.1",
|
||||
"resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz",
|
||||
"integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==",
|
||||
"dev": true,
|
||||
"dependencies": {
|
||||
"to-regex-range": "^5.0.1"
|
||||
@@ -884,12 +884,12 @@
|
||||
"dev": true
|
||||
},
|
||||
"braces": {
|
||||
"version": "3.0.2",
|
||||
"resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz",
|
||||
"integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==",
|
||||
"version": "3.0.3",
|
||||
"resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz",
|
||||
"integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==",
|
||||
"dev": true,
|
||||
"requires": {
|
||||
"fill-range": "^7.0.1"
|
||||
"fill-range": "^7.1.1"
|
||||
}
|
||||
},
|
||||
"camelcase-css": {
|
||||
@@ -1000,9 +1000,9 @@
|
||||
}
|
||||
},
|
||||
"fill-range": {
|
||||
"version": "7.0.1",
|
||||
"resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz",
|
||||
"integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==",
|
||||
"version": "7.1.1",
|
||||
"resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz",
|
||||
"integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==",
|
||||
"dev": true,
|
||||
"requires": {
|
||||
"to-regex-range": "^5.0.1"
|
||||
|
||||
65
frontend/package-lock.json
generated
65
frontend/package-lock.json
generated
@@ -37,7 +37,7 @@
|
||||
"eslint-config-standard-with-typescript": "^34.0.0",
|
||||
"eslint-plugin-import": "^2.27.5",
|
||||
"eslint-plugin-n": "^15.7.0",
|
||||
"eslint-plugin-prettier": "^4.2.1",
|
||||
"eslint-plugin-prettier": "^5.2.1",
|
||||
"eslint-plugin-promise": "^6.6.0",
|
||||
"eslint-plugin-react": "^7.35.0",
|
||||
"eslint-plugin-unused-imports": "^2.0.0",
|
||||
@@ -893,6 +893,18 @@
|
||||
"node": ">= 8"
|
||||
}
|
||||
},
|
||||
"node_modules/@pkgr/core": {
|
||||
"version": "0.1.1",
|
||||
"resolved": "https://registry.npmjs.org/@pkgr/core/-/core-0.1.1.tgz",
|
||||
"integrity": "sha512-cq8o4cWH0ibXh9VGi5P20Tu9XF/0fFXl9EUinr9QfTM7a7p0oTA4iJRCQWppXR1Pg8dSM0UCItCkPwsk9qWWYA==",
|
||||
"dev": true,
|
||||
"engines": {
|
||||
"node": "^12.20.0 || ^14.18.0 || >=16.0.0"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://opencollective.com/unts"
|
||||
}
|
||||
},
|
||||
"node_modules/@reduxjs/toolkit": {
|
||||
"version": "1.9.2",
|
||||
"resolved": "https://registry.npmjs.org/@reduxjs/toolkit/-/toolkit-1.9.2.tgz",
|
||||
@@ -3469,21 +3481,30 @@
|
||||
"dev": true
|
||||
},
|
||||
"node_modules/eslint-plugin-prettier": {
|
||||
"version": "4.2.1",
|
||||
"resolved": "https://registry.npmjs.org/eslint-plugin-prettier/-/eslint-plugin-prettier-4.2.1.tgz",
|
||||
"integrity": "sha512-f/0rXLXUt0oFYs8ra4w49wYZBG5GKZpAYsJSm6rnYL5uVDjd+zowwMwVZHnAjf4edNrKpCDYfXDgmRE/Ak7QyQ==",
|
||||
"version": "5.2.1",
|
||||
"resolved": "https://registry.npmjs.org/eslint-plugin-prettier/-/eslint-plugin-prettier-5.2.1.tgz",
|
||||
"integrity": "sha512-gH3iR3g4JfF+yYPaJYkN7jEl9QbweL/YfkoRlNnuIEHEz1vHVlCmWOS+eGGiRuzHQXdJFCOTxRgvju9b8VUmrw==",
|
||||
"dev": true,
|
||||
"dependencies": {
|
||||
"prettier-linter-helpers": "^1.0.0"
|
||||
"prettier-linter-helpers": "^1.0.0",
|
||||
"synckit": "^0.9.1"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=12.0.0"
|
||||
"node": "^14.18.0 || >=16.0.0"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://opencollective.com/eslint-plugin-prettier"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"eslint": ">=7.28.0",
|
||||
"prettier": ">=2.0.0"
|
||||
"@types/eslint": ">=8.0.0",
|
||||
"eslint": ">=8.0.0",
|
||||
"eslint-config-prettier": "*",
|
||||
"prettier": ">=3.0.0"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"@types/eslint": {
|
||||
"optional": true
|
||||
},
|
||||
"eslint-config-prettier": {
|
||||
"optional": true
|
||||
}
|
||||
@@ -6434,9 +6455,9 @@
|
||||
]
|
||||
},
|
||||
"node_modules/micromatch": {
|
||||
"version": "4.0.7",
|
||||
"resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.7.tgz",
|
||||
"integrity": "sha512-LPP/3KorzCwBxfeUuZmaR6bG2kdeHSbe0P2tY3FLRU4vYrjYz5hI4QZwV0njUx3jeuKe67YukQ1LSPZBKDqO/Q==",
|
||||
"version": "4.0.8",
|
||||
"resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.8.tgz",
|
||||
"integrity": "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==",
|
||||
"dev": true,
|
||||
"dependencies": {
|
||||
"braces": "^3.0.3",
|
||||
@@ -8214,6 +8235,28 @@
|
||||
"integrity": "sha512-e4hG1hRwoOdRb37cIMSgzNsxyzKfayW6VOflrwvR+/bzrkyxY/31WkbgnQpgtrNp1SdpJvpUAGTa/ZoiPNDuRQ==",
|
||||
"dev": true
|
||||
},
|
||||
"node_modules/synckit": {
|
||||
"version": "0.9.1",
|
||||
"resolved": "https://registry.npmjs.org/synckit/-/synckit-0.9.1.tgz",
|
||||
"integrity": "sha512-7gr8p9TQP6RAHusBOSLs46F4564ZrjV8xFmw5zCmgmhGUcw2hxsShhJ6CEiHQMgPDwAQ1fWHPM0ypc4RMAig4A==",
|
||||
"dev": true,
|
||||
"dependencies": {
|
||||
"@pkgr/core": "^0.1.0",
|
||||
"tslib": "^2.6.2"
|
||||
},
|
||||
"engines": {
|
||||
"node": "^14.18.0 || >=16.0.0"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://opencollective.com/unts"
|
||||
}
|
||||
},
|
||||
"node_modules/synckit/node_modules/tslib": {
|
||||
"version": "2.7.0",
|
||||
"resolved": "https://registry.npmjs.org/tslib/-/tslib-2.7.0.tgz",
|
||||
"integrity": "sha512-gLXCKdN1/j47AiHiOkJN69hJmcbGTHI0ImLmbYLHykhgeN0jVGola9yVjFgzCUklsZQMW55o+dW7IXv3RCXDzA==",
|
||||
"dev": true
|
||||
},
|
||||
"node_modules/tailwindcss": {
|
||||
"version": "3.2.4",
|
||||
"resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-3.2.4.tgz",
|
||||
|
||||
@@ -48,7 +48,7 @@
|
||||
"eslint-config-standard-with-typescript": "^34.0.0",
|
||||
"eslint-plugin-import": "^2.27.5",
|
||||
"eslint-plugin-n": "^15.7.0",
|
||||
"eslint-plugin-prettier": "^4.2.1",
|
||||
"eslint-plugin-prettier": "^5.2.1",
|
||||
"eslint-plugin-promise": "^6.6.0",
|
||||
"eslint-plugin-react": "^7.35.0",
|
||||
"eslint-plugin-unused-imports": "^2.0.0",
|
||||
|
||||
@@ -174,16 +174,12 @@ export default function Navigation({ navOpen, setNavOpen }: NavigationProps) {
|
||||
console.error(err);
|
||||
});
|
||||
}
|
||||
useOutsideAlerter(
|
||||
navRef,
|
||||
() => {
|
||||
if (isMobile && navOpen && apiKeyModalState === 'INACTIVE') {
|
||||
setNavOpen(false);
|
||||
setIsDocsListOpen(false);
|
||||
}
|
||||
},
|
||||
[navOpen, isDocsListOpen, apiKeyModalState],
|
||||
);
|
||||
useOutsideAlerter(navRef, () => {
|
||||
if (isMobile && navOpen && apiKeyModalState === 'INACTIVE') {
|
||||
setNavOpen(false);
|
||||
setIsDocsListOpen(false);
|
||||
}
|
||||
}, [navOpen, isDocsListOpen, apiKeyModalState]);
|
||||
|
||||
/*
|
||||
Needed to fix bug where if mobile nav was closed and then window was resized to desktop, nav would still be closed but the button to open would be gone, as per #1 on issue #146
|
||||
|
||||
@@ -91,14 +91,14 @@ function Dropdown({
|
||||
{selectedValue && 'label' in selectedValue
|
||||
? selectedValue.label
|
||||
: selectedValue && 'description' in selectedValue
|
||||
? `${
|
||||
selectedValue.value < 1e9
|
||||
? selectedValue.value + ` (${selectedValue.description})`
|
||||
: selectedValue.description
|
||||
}`
|
||||
: placeholder
|
||||
? placeholder
|
||||
: 'From URL'}
|
||||
? `${
|
||||
selectedValue.value < 1e9
|
||||
? selectedValue.value + ` (${selectedValue.description})`
|
||||
: selectedValue.description
|
||||
}`
|
||||
: placeholder
|
||||
? placeholder
|
||||
: 'From URL'}
|
||||
</span>
|
||||
)}
|
||||
<img
|
||||
@@ -128,14 +128,14 @@ function Dropdown({
|
||||
{typeof option === 'string'
|
||||
? option
|
||||
: option.name
|
||||
? option.name
|
||||
: option.label
|
||||
? option.label
|
||||
: `${
|
||||
option.value < 1e9
|
||||
? option.value + ` (${option.description})`
|
||||
: option.description
|
||||
}`}
|
||||
? option.name
|
||||
: option.label
|
||||
? option.label
|
||||
: `${
|
||||
option.value < 1e9
|
||||
? option.value + ` (${option.description})`
|
||||
: option.description
|
||||
}`}
|
||||
</span>
|
||||
{showEdit && onEdit && (
|
||||
<img
|
||||
|
||||
@@ -54,9 +54,9 @@ export default function Conversation() {
|
||||
}
|
||||
}, []);
|
||||
|
||||
useEffect(()=>{
|
||||
useEffect(() => {
|
||||
fetchStream.current && fetchStream.current.abort();
|
||||
},[conversationId]);
|
||||
}, [conversationId]);
|
||||
|
||||
useEffect(() => {
|
||||
const observerCallback: IntersectionObserverCallback = (entries) => {
|
||||
|
||||
@@ -235,10 +235,11 @@ const ConversationBubble = forwardRef<
|
||||
<SyntaxHighlighter
|
||||
{...rest}
|
||||
PreTag="div"
|
||||
children={String(children).replace(/\n$/, '')}
|
||||
language={match[1]}
|
||||
style={vscDarkPlus}
|
||||
/>
|
||||
>
|
||||
{String(children).replace(/\n$/, '')}
|
||||
</SyntaxHighlighter>
|
||||
<div
|
||||
className={`absolute right-3 top-3 lg:invisible
|
||||
${type !== 'ERROR' ? 'group-hover:lg:visible' : ''} `}
|
||||
@@ -396,8 +397,9 @@ const ConversationBubble = forwardRef<
|
||||
toggleState={(state: boolean) => {
|
||||
setIsSidebarOpen(state);
|
||||
}}
|
||||
children={<AllSources sources={sources} />}
|
||||
/>
|
||||
>
|
||||
<AllSources sources={sources} />
|
||||
</Sidebar>
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
|
||||
@@ -71,7 +71,7 @@
|
||||
"remote": "Remote",
|
||||
"name": "Name",
|
||||
"choose": "Choose Files",
|
||||
"info": "Please upload .pdf, .txt, .rst, .docx, .md, .zip limited to 25mb",
|
||||
"info": "Please upload .pdf, .txt, .rst, .csv, .docx, .md, .zip limited to 25mb",
|
||||
"uploadedFiles": "Uploaded Files",
|
||||
"cancel": "Cancel",
|
||||
"train": "Train",
|
||||
|
||||
@@ -19,15 +19,11 @@ export default function DeleteConvModal({
|
||||
const dispatch = useDispatch();
|
||||
const { isMobile } = useMediaQuery();
|
||||
const { t } = useTranslation();
|
||||
useOutsideAlerter(
|
||||
modalRef,
|
||||
() => {
|
||||
if (isMobile && modalState === 'ACTIVE') {
|
||||
dispatch(setModalState('INACTIVE'));
|
||||
}
|
||||
},
|
||||
[modalState],
|
||||
);
|
||||
useOutsideAlerter(modalRef, () => {
|
||||
if (isMobile && modalState === 'ACTIVE') {
|
||||
dispatch(setModalState('INACTIVE'));
|
||||
}
|
||||
}, [modalState]);
|
||||
|
||||
function handleSubmit() {
|
||||
handleDeleteAllConv();
|
||||
|
||||
@@ -22,15 +22,11 @@ export default function APIKeyModal({
|
||||
const modalRef = useRef(null);
|
||||
const { isMobile } = useMediaQuery();
|
||||
|
||||
useOutsideAlerter(
|
||||
modalRef,
|
||||
() => {
|
||||
if (isMobile && modalState === 'ACTIVE') {
|
||||
setModalState('INACTIVE');
|
||||
}
|
||||
},
|
||||
[modalState],
|
||||
);
|
||||
useOutsideAlerter(modalRef, () => {
|
||||
if (isMobile && modalState === 'ACTIVE') {
|
||||
setModalState('INACTIVE');
|
||||
}
|
||||
}, [modalState]);
|
||||
|
||||
function handleSubmit() {
|
||||
if (key.length <= 1) {
|
||||
|
||||
@@ -259,6 +259,7 @@ function Upload({
|
||||
'application/zip': ['.zip'],
|
||||
'application/vnd.openxmlformats-officedocument.wordprocessingml.document':
|
||||
['.docx'],
|
||||
'text/csv': ['.csv'],
|
||||
},
|
||||
});
|
||||
|
||||
|
||||
Reference in New Issue
Block a user