Merge branch 'main' into feat/analytics-and-logs

This commit is contained in:
Siddhant Rai
2024-09-11 17:58:04 +05:30
committed by GitHub
51 changed files with 1116 additions and 1498 deletions

View File

@@ -9,6 +9,7 @@ import traceback
from pymongo import MongoClient
from bson.objectid import ObjectId
from bson.dbref import DBRef
from application.core.settings import settings
from application.llm.llm_creator import LLMCreator
@@ -20,7 +21,7 @@ logger = logging.getLogger(__name__)
mongo = MongoClient(settings.MONGO_URI)
db = mongo["docsgpt"]
conversations_collection = db["conversations"]
vectors_collection = db["vectors"]
sources_collection = db["sources"]
prompts_collection = db["prompts"]
api_key_collection = db["api_keys"]
user_logs_collection = db["user_logs"]
@@ -37,9 +38,7 @@ if settings.MODEL_NAME: # in case there is particular model name configured
gpt_model = settings.MODEL_NAME
# load the prompts
current_dir = os.path.dirname(
os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
)
current_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
with open(os.path.join(current_dir, "prompts", "chat_combine_default.txt"), "r") as f:
chat_combine_template = f.read()
@@ -75,35 +74,34 @@ def run_async_chain(chain, question, chat_history):
def get_data_from_api_key(api_key):
data = api_key_collection.find_one({"key": api_key})
# # Raise custom exception if the API key is not found
if data is None:
raise Exception("Invalid API Key, please generate new key", 401)
if "retriever" not in data:
data["retriever"] = None
if "source" in data and isinstance(data["source"], DBRef):
source_doc = db.dereference(data["source"])
data["source"] = str(source_doc["_id"])
if "retriever" in source_doc:
data["retriever"] = source_doc["retriever"]
else:
data["source"] = {}
return data
def get_vectorstore(data):
if "active_docs" in data:
if data["active_docs"].split("/")[0] == "default":
vectorstore = ""
elif data["active_docs"].split("/")[0] == "local":
vectorstore = "indexes/" + data["active_docs"]
else:
vectorstore = "vectors/" + data["active_docs"]
if data["active_docs"] == "default":
vectorstore = ""
else:
vectorstore = ""
vectorstore = os.path.join("application", vectorstore)
return vectorstore
def get_retriever(source_id: str):
doc = sources_collection.find_one({"_id": ObjectId(source_id)})
if doc is None:
raise Exception("Source document does not exist", 404)
retriever_name = None if "retriever" not in doc else doc["retriever"]
return retriever_name
def is_azure_configured():
return (
settings.OPENAI_API_BASE
and settings.OPENAI_API_VERSION
and settings.AZURE_DEPLOYMENT_NAME
)
return settings.OPENAI_API_BASE and settings.OPENAI_API_VERSION and settings.AZURE_DEPLOYMENT_NAME
def save_conversation(conversation_id, question, response, source_log_docs, llm):
@@ -263,33 +261,33 @@ def stream():
else:
token_limit = settings.DEFAULT_MAX_HISTORY
# check if active_docs or api_key is set
## retriever can be "brave_search, duckduck_search or classic"
retriever_name = data["retriever"] if "retriever" in data else "classic"
# check if active_docs or api_key is set
if "api_key" in data:
data_key = get_data_from_api_key(data["api_key"])
chunks = int(data_key["chunks"])
prompt_id = data_key["prompt_id"]
source = {"active_docs": data_key["source"]}
retriever_name = data_key["retriever"] or retriever_name
user_api_key = data["api_key"]
elif "active_docs" in data:
source = {"active_docs": data["active_docs"]}
source = {"active_docs" : data["active_docs"]}
retriever_name = get_retriever(data["active_docs"]) or retriever_name
user_api_key = None
else:
source = {}
user_api_key = None
if source["active_docs"].split("/")[0] in ["default", "local"]:
retriever_name = "classic"
else:
retriever_name = source["active_docs"]
current_app.logger.info(
f"/stream - request_data: {data}, source: {source}",
extra={"data": json.dumps({"request_data": data, "source": source})},
current_app.logger.info(f"/stream - request_data: {data}, source: {source}",
extra={"data": json.dumps({"request_data": data, "source": source})}
)
prompt = get_prompt(prompt_id)
retriever = RetrieverCreator.create_retriever(
retriever_name,
question=question,
@@ -369,6 +367,10 @@ def api_answer():
else:
token_limit = settings.DEFAULT_MAX_HISTORY
## retriever can be brave_search, duckduck_search or classic
retriever_name = data["retriever"] if "retriever" in data else "classic"
# use try and except to check for exception
try:
# check if the vectorstore is set
if "api_key" in data:
@@ -376,15 +378,15 @@ def api_answer():
chunks = int(data_key["chunks"])
prompt_id = data_key["prompt_id"]
source = {"active_docs": data_key["source"]}
retriever_name = data_key["retriever"] or retriever_name
user_api_key = data["api_key"]
else:
source = data
elif "active_docs" in data:
source = {"active_docs":data["active_docs"]}
retriever_name = get_retriever(data["active_docs"]) or retriever_name
user_api_key = None
if source["active_docs"].split("/")[0] in ["default", "local"]:
retriever_name = "classic"
else:
retriever_name = source["active_docs"]
source = {}
user_api_key = None
prompt = get_prompt(prompt_id)
@@ -421,8 +423,8 @@ def api_answer():
)
result = {"answer": response_full, "sources": source_log_docs}
result["conversation_id"] = save_conversation(
conversation_id, question, response_full, source_log_docs, llm
result["conversation_id"] = str(
save_conversation(conversation_id, question, response_full, source_log_docs, llm)
)
retriever_params = retriever.get_params()
user_logs_collection.insert_one(
@@ -459,19 +461,19 @@ def api_search():
if "api_key" in data:
data_key = get_data_from_api_key(data["api_key"])
chunks = int(data_key["chunks"])
source = {"active_docs": data_key["source"]}
user_api_key = data["api_key"]
source = {"active_docs":data_key["source"]}
user_api_key = data_key["api_key"]
elif "active_docs" in data:
source = {"active_docs": data["active_docs"]}
source = {"active_docs":data["active_docs"]}
user_api_key = None
else:
source = {}
user_api_key = None
if source["active_docs"].split("/")[0] in ["default", "local"]:
retriever_name = "classic"
if "retriever" in data:
retriever_name = data["retriever"]
else:
retriever_name = source["active_docs"]
retriever_name = "classic"
if "token_limit" in data:
token_limit = data["token_limit"]
else:

View File

@@ -3,13 +3,13 @@ import datetime
from flask import Blueprint, request, send_from_directory
from pymongo import MongoClient
from werkzeug.utils import secure_filename
from bson.objectid import ObjectId
from application.core.settings import settings
mongo = MongoClient(settings.MONGO_URI)
db = mongo["docsgpt"]
conversations_collection = db["conversations"]
vectors_collection = db["vectors"]
sources_collection = db["sources"]
current_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
@@ -35,7 +35,12 @@ def upload_index_files():
return {"status": "no name"}
job_name = secure_filename(request.form["name"])
tokens = secure_filename(request.form["tokens"])
save_dir = os.path.join(current_dir, "indexes", user, job_name)
retriever = secure_filename(request.form["retriever"])
id = secure_filename(request.form["id"])
type = secure_filename(request.form["type"])
remote_data = secure_filename(request.form["remote_data"]) if "remote_data" in request.form else None
save_dir = os.path.join(current_dir, "indexes", str(id))
if settings.VECTOR_STORE == "faiss":
if "file_faiss" not in request.files:
print("No file part")
@@ -55,17 +60,19 @@ def upload_index_files():
os.makedirs(save_dir)
file_faiss.save(os.path.join(save_dir, "index.faiss"))
file_pkl.save(os.path.join(save_dir, "index.pkl"))
# create entry in vectors_collection
vectors_collection.insert_one(
# create entry in sources_collection
sources_collection.insert_one(
{
"_id": ObjectId(id),
"user": user,
"name": job_name,
"language": job_name,
"location": save_dir,
"date": datetime.datetime.now().strftime("%d/%m/%Y %H:%M:%S"),
"model": settings.EMBEDDINGS_NAME,
"type": "local",
"tokens": tokens
"type": type,
"tokens": tokens,
"retriever": retriever,
"remote_data": remote_data
}
)
return {"status": "ok"}

View File

@@ -20,7 +20,7 @@ from application.vectorstore.vector_creator import VectorCreator
mongo = MongoClient(settings.MONGO_URI)
db = mongo["docsgpt"]
conversations_collection = db["conversations"]
vectors_collection = db["vectors"]
sources_collection = db["sources"]
prompts_collection = db["prompts"]
feedback_collection = db["feedback"]
api_key_collection = db["api_keys"]
@@ -30,9 +30,7 @@ user_logs_collection = db["user_logs"]
user = Blueprint("user", __name__)
current_dir = os.path.dirname(
os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
)
current_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
def generate_minute_range(start_date, end_date):
@@ -83,9 +81,7 @@ def get_conversations():
conversations = conversations_collection.find().sort("date", -1).limit(30)
list_conversations = []
for conversation in conversations:
list_conversations.append(
{"id": str(conversation["_id"]), "name": conversation["name"]}
)
list_conversations.append({"id": str(conversation["_id"]), "name": conversation["name"]})
# list_conversations = [{"id": "default", "name": "default"}, {"id": "jeff", "name": "jeff"}]
@@ -116,15 +112,10 @@ def api_feedback():
question = data["question"]
answer = data["answer"]
feedback = data["feedback"]
feedback_collection.insert_one(
{
"question": question,
"answer": answer,
"feedback": feedback,
"timestamp": datetime.datetime.now(datetime.timezone.utc),
}
)
new_doc = {"question": question, "answer": answer, "feedback": feedback, "timestamp": datetime.datetime.now(datetime.timezone.utc)}
if "api_key" in data:
new_doc["api_key"] = data["api_key"]
feedback_collection.insert_one(new_doc)
return {"status": "ok"}
@@ -137,7 +128,7 @@ def delete_by_ids():
return {"status": "error"}
if settings.VECTOR_STORE == "faiss":
result = vectors_collection.delete_index(ids=ids)
result = sources_collection.delete_index(ids=ids)
if result:
return {"status": "ok"}
return {"status": "error"}
@@ -147,28 +138,24 @@ def delete_by_ids():
def delete_old():
"""Delete old indexes."""
import shutil
path = request.args.get("path")
dirs = path.split("/")
dirs_clean = []
for i in range(0, len(dirs)):
dirs_clean.append(secure_filename(dirs[i]))
# check that path strats with indexes or vectors
if dirs_clean[0] not in ["indexes", "vectors"]:
return {"status": "error"}
path_clean = "/".join(dirs_clean)
vectors_collection.delete_one({"name": dirs_clean[-1], "user": dirs_clean[-2]})
source_id = request.args.get("source_id")
doc = sources_collection.find_one({
"_id": ObjectId(source_id),
"user": "local",
})
if(doc is None):
return {"status":"not found"},404
if settings.VECTOR_STORE == "faiss":
try:
shutil.rmtree(os.path.join(current_dir, path_clean))
shutil.rmtree(os.path.join(current_dir, str(doc["_id"])))
except FileNotFoundError:
pass
else:
vetorstore = VectorCreator.create_vectorstore(
settings.VECTOR_STORE, path=os.path.join(current_dir, path_clean)
)
vetorstore = VectorCreator.create_vectorstore(settings.VECTOR_STORE, source_id=str(doc["_id"]))
vetorstore.delete_index()
sources_collection.delete_one({
"_id": ObjectId(source_id),
})
return {"status": "ok"}
@@ -202,9 +189,7 @@ def upload_file():
file.save(os.path.join(temp_dir, filename))
# Use shutil.make_archive to zip the temp directory
zip_path = shutil.make_archive(
base_name=os.path.join(save_dir, job_name), format="zip", root_dir=temp_dir
)
zip_path = shutil.make_archive(base_name=os.path.join(save_dir, job_name), format="zip", root_dir=temp_dir)
final_filename = os.path.basename(zip_path)
# Clean up the temporary directory after zipping
@@ -246,9 +231,7 @@ def upload_remote():
source_data = request.form["data"]
if source_data:
task = ingest_remote.delay(
source_data=source_data, job_name=job_name, user=user, loader=source
)
task = ingest_remote.delay(source_data=source_data, job_name=job_name, user=user, loader=source)
task_id = task.id
return {"status": "ok", "task_id": task_id}
else:
@@ -275,54 +258,36 @@ def combined_json():
data = [
{
"name": "default",
"language": "default",
"version": "",
"description": "default",
"fullName": "default",
"date": "default",
"docLink": "default",
"model": settings.EMBEDDINGS_NAME,
"location": "remote",
"tokens": "",
"retriever": "classic",
}
]
# structure: name, language, version, description, fullName, date, docLink
# append data from vectors_collection in sorted order in descending order of date
for index in vectors_collection.find({"user": user}).sort("date", -1):
# append data from sources_collection in sorted order in descending order of date
for index in sources_collection.find({"user": user}).sort("date", -1):
data.append(
{
"id": str(index["_id"]),
"name": index["name"],
"language": index["language"],
"version": "",
"description": index["name"],
"fullName": index["name"],
"date": index["date"],
"docLink": index["location"],
"model": settings.EMBEDDINGS_NAME,
"location": "local",
"tokens": index["tokens"] if ("tokens" in index.keys()) else "",
"retriever": index["retriever"] if ("retriever" in index.keys()) else "classic",
}
)
if settings.VECTOR_STORE == "faiss":
data_remote = requests.get(
"https://d3dg1063dc54p9.cloudfront.net/combined.json"
).json()
for index in data_remote:
index["location"] = "remote"
data.append(index)
if "duckduck_search" in settings.RETRIEVERS_ENABLED:
data.append(
{
"name": "DuckDuckGo Search",
"language": "en",
"version": "",
"description": "duckduck_search",
"fullName": "DuckDuckGo Search",
"date": "duckduck_search",
"docLink": "duckduck_search",
"model": settings.EMBEDDINGS_NAME,
"location": "custom",
"tokens": "",
"retriever": "duckduck_search",
}
)
if "brave_search" in settings.RETRIEVERS_ENABLED:
@@ -330,14 +295,11 @@ def combined_json():
{
"name": "Brave Search",
"language": "en",
"version": "",
"description": "brave_search",
"fullName": "Brave Search",
"date": "brave_search",
"docLink": "brave_search",
"model": settings.EMBEDDINGS_NAME,
"location": "custom",
"tokens": "",
"retriever": "brave_search",
}
)
@@ -346,39 +308,13 @@ def combined_json():
@user.route("/api/docs_check", methods=["POST"])
def check_docs():
# check if docs exist in a vectorstore folder
data = request.get_json()
# split docs on / and take first part
if data["docs"].split("/")[0] == "local":
return {"status": "exists"}
vectorstore = "vectors/" + secure_filename(data["docs"])
base_path = "https://raw.githubusercontent.com/arc53/DocsHUB/main/"
if os.path.exists(vectorstore) or data["docs"] == "default":
return {"status": "exists"}
else:
file_url = urlparse(base_path + vectorstore + "index.faiss")
if (
file_url.scheme in ["https"]
and file_url.netloc == "raw.githubusercontent.com"
and file_url.path.startswith("/arc53/DocsHUB/main/")
):
r = requests.get(file_url.geturl())
if r.status_code != 200:
return {"status": "null"}
else:
if not os.path.exists(vectorstore):
os.makedirs(vectorstore)
with open(vectorstore + "index.faiss", "wb") as f:
f.write(r.content)
r = requests.get(base_path + vectorstore + "index.pkl")
with open(vectorstore + "index.pkl", "wb") as f:
f.write(r.content)
else:
return {"status": "null"}
return {"status": "loaded"}
return {"status": "not found"}
@user.route("/api/create_prompt", methods=["POST"])
@@ -409,9 +345,7 @@ def get_prompts():
list_prompts.append({"id": "creative", "name": "creative", "type": "public"})
list_prompts.append({"id": "strict", "name": "strict", "type": "public"})
for prompt in prompts:
list_prompts.append(
{"id": str(prompt["_id"]), "name": prompt["name"], "type": "private"}
)
list_prompts.append({"id": str(prompt["_id"]), "name": prompt["name"], "type": "private"})
return jsonify(list_prompts)
@@ -420,21 +354,15 @@ def get_prompts():
def get_single_prompt():
prompt_id = request.args.get("id")
if prompt_id == "default":
with open(
os.path.join(current_dir, "prompts", "chat_combine_default.txt"), "r"
) as f:
with open(os.path.join(current_dir, "prompts", "chat_combine_default.txt"), "r") as f:
chat_combine_template = f.read()
return jsonify({"content": chat_combine_template})
elif prompt_id == "creative":
with open(
os.path.join(current_dir, "prompts", "chat_combine_creative.txt"), "r"
) as f:
with open(os.path.join(current_dir, "prompts", "chat_combine_creative.txt"), "r") as f:
chat_reduce_creative = f.read()
return jsonify({"content": chat_reduce_creative})
elif prompt_id == "strict":
with open(
os.path.join(current_dir, "prompts", "chat_combine_strict.txt"), "r"
) as f:
with open(os.path.join(current_dir, "prompts", "chat_combine_strict.txt"), "r") as f:
chat_reduce_strict = f.read()
return jsonify({"content": chat_reduce_strict})
@@ -463,9 +391,7 @@ def update_prompt_name():
# check if name is null
if name == "":
return {"status": "error"}
prompts_collection.update_one(
{"_id": ObjectId(id)}, {"$set": {"name": name, "content": content}}
)
prompts_collection.update_one({"_id": ObjectId(id)}, {"$set": {"name": name, "content": content}})
return {"status": "ok"}
@@ -475,12 +401,23 @@ def get_api_keys():
keys = api_key_collection.find({"user": user})
list_keys = []
for key in keys:
if "source" in key and isinstance(key["source"],DBRef):
source = db.dereference(key["source"])
if source is None:
continue
else:
source_name = source["name"]
elif "retriever" in key:
source_name = key["retriever"]
else:
continue
list_keys.append(
{
"id": str(key["_id"]),
"name": key["name"],
"key": key["key"][:4] + "..." + key["key"][-4:],
"source": key["source"],
"source": source_name,
"prompt_id": key["prompt_id"],
"chunks": key["chunks"],
}
@@ -492,21 +429,22 @@ def get_api_keys():
def create_api_key():
data = request.get_json()
name = data["name"]
source = data["source"]
prompt_id = data["prompt_id"]
chunks = data["chunks"]
key = str(uuid.uuid4())
user = "local"
resp = api_key_collection.insert_one(
{
"name": name,
"key": key,
"source": source,
"user": user,
"prompt_id": prompt_id,
"chunks": chunks,
}
)
new_api_key = {
"name": name,
"key": key,
"user": user,
"prompt_id": prompt_id,
"chunks": chunks,
}
if "source" in data and ObjectId.is_valid(data["source"]):
new_api_key["source"] = DBRef("sources", ObjectId(data["source"]))
if "retriever" in data:
new_api_key["retriever"] = data["retriever"]
resp = api_key_collection.insert_one(new_api_key)
new_id = str(resp.inserted_id)
return {"id": new_id, "key": key}
@@ -533,36 +471,37 @@ def share_conversation():
conversation_id = data["conversation_id"]
isPromptable = request.args.get("isPromptable").lower() == "true"
conversation = conversations_collection.find_one(
{"_id": ObjectId(conversation_id)}
)
conversation = conversations_collection.find_one({"_id": ObjectId(conversation_id)})
if(conversation is None):
raise Exception("Conversation does not exist")
current_n_queries = len(conversation["queries"])
##generate binary representation of uuid
explicit_binary = Binary.from_uuid(uuid.uuid4(), UuidRepresentation.STANDARD)
if isPromptable:
source = "default" if "source" not in data else data["source"]
prompt_id = "default" if "prompt_id" not in data else data["prompt_id"]
chunks = "2" if "chunks" not in data else data["chunks"]
name = conversation["name"] + "(shared)"
pre_existing_api_document = api_key_collection.find_one(
{
new_api_key_data = {
"prompt_id": prompt_id,
"chunks": chunks,
"source": source,
"user": user,
}
if "source" in data and ObjectId.is_valid(data["source"]):
new_api_key_data["source"] = DBRef("sources",ObjectId(data["source"]))
elif "retriever" in data:
new_api_key_data["retriever"] = data["retriever"]
pre_existing_api_document = api_key_collection.find_one(
new_api_key_data
)
api_uuid = str(uuid.uuid4())
if pre_existing_api_document:
api_uuid = pre_existing_api_document["key"]
pre_existing = shared_conversations_collections.find_one(
{
"conversation_id": DBRef(
"conversations", ObjectId(conversation_id)
),
"conversation_id": DBRef("conversations", ObjectId(conversation_id)),
"isPromptable": isPromptable,
"first_n_queries": current_n_queries,
"user": user,
@@ -593,21 +532,18 @@ def share_conversation():
"api_key": api_uuid,
}
)
return jsonify(
{"success": True, "identifier": str(explicit_binary.as_uuid())}
)
return jsonify({"success": True, "identifier": str(explicit_binary.as_uuid())})
else:
api_key_collection.insert_one(
{
"name": name,
"key": api_uuid,
"source": source,
"user": user,
"prompt_id": prompt_id,
"chunks": chunks,
}
)
shared_conversations_collections.insert_one(
api_uuid = str(uuid.uuid4())
new_api_key_data["key"] = api_uuid
new_api_key_data["name"] = name
if "source" in data and ObjectId.is_valid(data["source"]):
new_api_key_data["source"] = DBRef("sources", ObjectId(data["source"]))
if "retriever" in data:
new_api_key_data["retriever"] = data["retriever"]
api_key_collection.insert_one(new_api_key_data)
shared_conversations_collections.insert_one(
{
"uuid": explicit_binary,
"conversation_id": {
@@ -619,12 +555,10 @@ def share_conversation():
"user": user,
"api_key": api_uuid,
}
)
)
## Identifier as route parameter in frontend
return (
jsonify(
{"success": True, "identifier": str(explicit_binary.as_uuid())}
),
jsonify({"success": True, "identifier": str(explicit_binary.as_uuid())}),
201,
)
@@ -639,9 +573,7 @@ def share_conversation():
)
if pre_existing is not None:
return (
jsonify(
{"success": True, "identifier": str(pre_existing["uuid"].as_uuid())}
),
jsonify({"success": True, "identifier": str(pre_existing["uuid"].as_uuid())}),
200,
)
else:
@@ -659,9 +591,7 @@ def share_conversation():
)
## Identifier as route parameter in frontend
return (
jsonify(
{"success": True, "identifier": str(explicit_binary.as_uuid())}
),
jsonify({"success": True, "identifier": str(explicit_binary.as_uuid())}),
201,
)
except Exception as err:
@@ -673,16 +603,10 @@ def share_conversation():
@user.route("/api/shared_conversation/<string:identifier>", methods=["GET"])
def get_publicly_shared_conversations(identifier: str):
try:
query_uuid = Binary.from_uuid(
uuid.UUID(identifier), UuidRepresentation.STANDARD
)
query_uuid = Binary.from_uuid(uuid.UUID(identifier), UuidRepresentation.STANDARD)
shared = shared_conversations_collections.find_one({"uuid": query_uuid})
conversation_queries = []
if (
shared
and "conversation_id" in shared
and isinstance(shared["conversation_id"], DBRef)
):
if shared and "conversation_id" in shared and isinstance(shared["conversation_id"], DBRef):
# Resolve the DBRef
conversation_ref = shared["conversation_id"]
conversation = db.dereference(conversation_ref)
@@ -696,9 +620,7 @@ def get_publicly_shared_conversations(identifier: str):
),
404,
)
conversation_queries = conversation["queries"][
: (shared["first_n_queries"])
]
conversation_queries = conversation["queries"][: (shared["first_n_queries"])]
for query in conversation_queries:
query.pop("sources") ## avoid exposing sources
else: