mirror of
https://github.com/arc53/DocsGPT.git
synced 2025-11-29 16:43:16 +00:00
cleanups
This commit is contained in:
@@ -78,8 +78,6 @@ def api_answer():
|
||||
else:
|
||||
embeddings_key = os.getenv("EMBEDDINGS_KEY")
|
||||
|
||||
print(embeddings_key)
|
||||
print(api_key)
|
||||
|
||||
# check if the vectorstore is set
|
||||
if "active_docs" in data:
|
||||
@@ -115,7 +113,7 @@ def api_answer():
|
||||
qa_chain = load_qa_chain(llm=llm, chain_type="map_reduce",
|
||||
combine_prompt=c_prompt)
|
||||
|
||||
chain = VectorDBQA(combine_documents_chain=qa_chain, vectorstore=docsearch, k=2)
|
||||
chain = VectorDBQA(combine_documents_chain=qa_chain, vectorstore=docsearch, k=4)
|
||||
|
||||
# fetch the answer
|
||||
result = chain({"query": question})
|
||||
|
||||
@@ -50,7 +50,6 @@ def ingest(yes: bool = typer.Option(False, "-y", "--yes", prompt=False,
|
||||
required_exts=formats, num_files_limit=limit,
|
||||
exclude_hidden=exclude).load_data()
|
||||
raw_docs = [Document.to_langchain_format(raw_doc) for raw_doc in raw_docs]
|
||||
print(raw_docs)
|
||||
# Here we split the documents, as needed, into smaller chunks.
|
||||
# We do this due to the context limits of the LLMs.
|
||||
text_splitter = RecursiveCharacterTextSplitter()
|
||||
|
||||
Reference in New Issue
Block a user