This commit is contained in:
Alex
2023-02-15 20:29:09 +00:00
parent b6629ce7aa
commit 8a5e1e8d98
2 changed files with 1 additions and 4 deletions

View File

@@ -78,8 +78,6 @@ def api_answer():
else:
embeddings_key = os.getenv("EMBEDDINGS_KEY")
print(embeddings_key)
print(api_key)
# check if the vectorstore is set
if "active_docs" in data:
@@ -115,7 +113,7 @@ def api_answer():
qa_chain = load_qa_chain(llm=llm, chain_type="map_reduce",
combine_prompt=c_prompt)
chain = VectorDBQA(combine_documents_chain=qa_chain, vectorstore=docsearch, k=2)
chain = VectorDBQA(combine_documents_chain=qa_chain, vectorstore=docsearch, k=4)
# fetch the answer
result = chain({"query": question})

View File

@@ -50,7 +50,6 @@ def ingest(yes: bool = typer.Option(False, "-y", "--yes", prompt=False,
required_exts=formats, num_files_limit=limit,
exclude_hidden=exclude).load_data()
raw_docs = [Document.to_langchain_format(raw_doc) for raw_doc in raw_docs]
print(raw_docs)
# Here we split the documents, as needed, into smaller chunks.
# We do this due to the context limits of the LLMs.
text_splitter = RecursiveCharacterTextSplitter()