mirror of
https://github.com/arc53/DocsGPT.git
synced 2026-01-20 14:00:55 +00:00
Merge branch 'arc53:main' into main
This commit is contained in:
@@ -28,12 +28,15 @@ vectors_collection = db["vectors"]
|
||||
prompts_collection = db["prompts"]
|
||||
answer = Blueprint('answer', __name__)
|
||||
|
||||
if settings.LLM_NAME == "gpt4":
|
||||
gpt_model = 'gpt-4'
|
||||
gpt_model = ""
|
||||
# to have some kind of default behaviour
|
||||
if settings.LLM_NAME == "openai":
|
||||
gpt_model = 'gpt-3.5-turbo'
|
||||
elif settings.LLM_NAME == "anthropic":
|
||||
gpt_model = 'claude-2'
|
||||
else:
|
||||
gpt_model = 'gpt-3.5-turbo'
|
||||
|
||||
if settings.MODEL_NAME: # in case there is particular model name configured
|
||||
gpt_model = settings.MODEL_NAME
|
||||
|
||||
# load the prompts
|
||||
current_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
@@ -95,7 +98,7 @@ def is_azure_configured():
|
||||
return settings.OPENAI_API_BASE and settings.OPENAI_API_VERSION and settings.AZURE_DEPLOYMENT_NAME
|
||||
|
||||
|
||||
def complete_stream(question, docsearch, chat_history, api_key, prompt_id, conversation_id):
|
||||
def complete_stream(question, docsearch, chat_history, api_key, prompt_id, conversation_id, chunks=2):
|
||||
llm = LLMCreator.create_llm(settings.LLM_NAME, api_key=api_key)
|
||||
|
||||
if prompt_id == 'default':
|
||||
@@ -106,8 +109,11 @@ def complete_stream(question, docsearch, chat_history, api_key, prompt_id, conve
|
||||
prompt = chat_combine_strict
|
||||
else:
|
||||
prompt = prompts_collection.find_one({"_id": ObjectId(prompt_id)})["content"]
|
||||
|
||||
docs = docsearch.search(question, k=2)
|
||||
|
||||
if chunks == 0:
|
||||
docs = []
|
||||
else:
|
||||
docs = docsearch.search(question, k=chunks)
|
||||
if settings.LLM_NAME == "llama.cpp":
|
||||
docs = [docs[0]]
|
||||
# join all page_content together with a newline
|
||||
@@ -190,6 +196,10 @@ def stream():
|
||||
prompt_id = data["prompt_id"]
|
||||
else:
|
||||
prompt_id = 'default'
|
||||
if 'chunks' in data:
|
||||
chunks = int(data["chunks"])
|
||||
else:
|
||||
chunks = 2
|
||||
|
||||
# check if active_docs is set
|
||||
|
||||
@@ -211,7 +221,8 @@ def stream():
|
||||
complete_stream(question, docsearch,
|
||||
chat_history=history, api_key=api_key,
|
||||
prompt_id=prompt_id,
|
||||
conversation_id=conversation_id), mimetype="text/event-stream"
|
||||
conversation_id=conversation_id,
|
||||
chunks=chunks), mimetype="text/event-stream"
|
||||
)
|
||||
|
||||
|
||||
@@ -237,6 +248,10 @@ def api_answer():
|
||||
prompt_id = data["prompt_id"]
|
||||
else:
|
||||
prompt_id = 'default'
|
||||
if 'chunks' in data:
|
||||
chunks = int(data["chunks"])
|
||||
else:
|
||||
chunks = 2
|
||||
|
||||
if prompt_id == 'default':
|
||||
prompt = chat_combine_template
|
||||
@@ -260,7 +275,10 @@ def api_answer():
|
||||
|
||||
|
||||
|
||||
docs = docsearch.search(question, k=2)
|
||||
if chunks == 0:
|
||||
docs = []
|
||||
else:
|
||||
docs = docsearch.search(question, k=chunks)
|
||||
# join all page_content together with a newline
|
||||
docs_together = "\n".join([doc.page_content for doc in docs])
|
||||
p_chat_combine = prompt.replace("{summaries}", docs_together)
|
||||
@@ -359,9 +377,15 @@ def api_search():
|
||||
vectorstore = get_vectorstore({"active_docs": data["active_docs"]})
|
||||
else:
|
||||
vectorstore = ""
|
||||
if 'chunks' in data:
|
||||
chunks = int(data["chunks"])
|
||||
else:
|
||||
chunks = 2
|
||||
docsearch = VectorCreator.create_vectorstore(settings.VECTOR_STORE, vectorstore, embeddings_key)
|
||||
|
||||
docs = docsearch.search(question, k=2)
|
||||
if chunks == 0:
|
||||
docs = []
|
||||
else:
|
||||
docs = docsearch.search(question, k=chunks)
|
||||
|
||||
source_log_docs = []
|
||||
for doc in docs:
|
||||
|
||||
@@ -9,6 +9,7 @@ current_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__
|
||||
|
||||
class Settings(BaseSettings):
|
||||
LLM_NAME: str = "docsgpt"
|
||||
MODEL_NAME: Optional[str] = None # when LLM_NAME is openai, MODEL_NAME can be e.g. gpt-4-turbo-preview or gpt-3.5-turbo
|
||||
EMBEDDINGS_NAME: str = "huggingface_sentence-transformers/all-mpnet-base-v2"
|
||||
CELERY_BROKER_URL: str = "redis://localhost:6379/0"
|
||||
CELERY_RESULT_BACKEND: str = "redis://localhost:6379/1"
|
||||
|
||||
27
docs/package-lock.json
generated
27
docs/package-lock.json
generated
@@ -7,7 +7,7 @@
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@vercel/analytics": "^1.1.1",
|
||||
"docsgpt": "^0.3.0",
|
||||
"docsgpt": "^0.3.7",
|
||||
"next": "^14.0.4",
|
||||
"nextra": "^2.13.2",
|
||||
"nextra-theme-docs": "^2.13.2",
|
||||
@@ -422,6 +422,11 @@
|
||||
"node": ">=6.9.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@bpmn-io/snarkdown": {
|
||||
"version": "2.2.0",
|
||||
"resolved": "https://registry.npmjs.org/@bpmn-io/snarkdown/-/snarkdown-2.2.0.tgz",
|
||||
"integrity": "sha512-bVD7FIoaBDZeCJkMRgnBPDeptPlto87wt2qaCjf5t8iLaevDmTPaREd6FpBEGsHlUdHFFZWRk4qAoEC5So2M0Q=="
|
||||
},
|
||||
"node_modules/@braintree/sanitize-url": {
|
||||
"version": "6.0.4",
|
||||
"resolved": "https://registry.npmjs.org/@braintree/sanitize-url/-/sanitize-url-6.0.4.tgz",
|
||||
@@ -4958,11 +4963,12 @@
|
||||
}
|
||||
},
|
||||
"node_modules/docsgpt": {
|
||||
"version": "0.3.0",
|
||||
"resolved": "https://registry.npmjs.org/docsgpt/-/docsgpt-0.3.0.tgz",
|
||||
"integrity": "sha512-0yT2m+HAlJ+289p278c3Zi07bu2wr6zULOT/bYXtJ/nb59V2Vpfdj2xFB49+lYLSeVe8H+Ij5fFSNZ6RkVRfMQ==",
|
||||
"version": "0.3.7",
|
||||
"resolved": "https://registry.npmjs.org/docsgpt/-/docsgpt-0.3.7.tgz",
|
||||
"integrity": "sha512-VHrXXOEFtjNTcpA8Blf3IzpLlJxOMhm/S5CM4FDjQEkdK9WWhI8yXd/0Rs/FS8oz7YbFrNxO758mlP7OtQtBBw==",
|
||||
"dependencies": {
|
||||
"@babel/plugin-transform-flow-strip-types": "^7.23.3",
|
||||
"@bpmn-io/snarkdown": "^2.2.0",
|
||||
"@parcel/resolver-glob": "^2.12.0",
|
||||
"@parcel/transformer-svg-react": "^2.12.0",
|
||||
"@parcel/transformer-typescript-tsc": "^2.12.0",
|
||||
@@ -4972,6 +4978,7 @@
|
||||
"@types/react-dom": "^18.2.19",
|
||||
"class-variance-authority": "^0.7.0",
|
||||
"clsx": "^2.1.0",
|
||||
"dompurify": "^3.0.9",
|
||||
"flow-bin": "^0.229.2",
|
||||
"i": "^0.3.7",
|
||||
"install": "^0.13.0",
|
||||
@@ -5029,9 +5036,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/dompurify": {
|
||||
"version": "3.0.7",
|
||||
"resolved": "https://registry.npmjs.org/dompurify/-/dompurify-3.0.7.tgz",
|
||||
"integrity": "sha512-BViYTZoqP3ak/ULKOc101y+CtHDUvBsVgSxIF1ku0HmK6BRf+C03MC+tArMvOPtVtZp83DDh5puywKDu4sbVjQ=="
|
||||
"version": "3.0.11",
|
||||
"resolved": "https://registry.npmjs.org/dompurify/-/dompurify-3.0.11.tgz",
|
||||
"integrity": "sha512-Fan4uMuyB26gFV3ovPoEoQbxRRPfTu3CvImyZnhGq5fsIEO+gEFLp45ISFt+kQBWsK5ulDdT0oV28jS1UrwQLg=="
|
||||
},
|
||||
"node_modules/domutils": {
|
||||
"version": "2.8.0",
|
||||
@@ -6206,9 +6213,9 @@
|
||||
"integrity": "sha512-gfFQZrcTc8CnKXp6Y4/CBT3fTc0OVuDofpre4aEeEpSBPV5X5v4+Vmx+8snU7RLPrNHPKSgLxGo9YuQzz20o+w=="
|
||||
},
|
||||
"node_modules/katex": {
|
||||
"version": "0.16.9",
|
||||
"resolved": "https://registry.npmjs.org/katex/-/katex-0.16.9.tgz",
|
||||
"integrity": "sha512-fsSYjWS0EEOwvy81j3vRA8TEAhQhKiqO+FQaKWp0m39qwOzHVBgAUBIXWj1pB+O2W3fIpNa6Y9KSKCVbfPhyAQ==",
|
||||
"version": "0.16.10",
|
||||
"resolved": "https://registry.npmjs.org/katex/-/katex-0.16.10.tgz",
|
||||
"integrity": "sha512-ZiqaC04tp2O5utMsl2TEZTXxa6WSC4yo0fv5ML++D3QZv/vx2Mct0mTlRx3O+uUkjfuAgOkzsCmq5MiUEsDDdA==",
|
||||
"funding": [
|
||||
"https://opencollective.com/katex",
|
||||
"https://github.com/sponsors/katex"
|
||||
|
||||
@@ -8,6 +8,8 @@ import {
|
||||
setPrompt,
|
||||
selectSourceDocs,
|
||||
setSourceDocs,
|
||||
setChunks,
|
||||
selectChunks,
|
||||
} from './preferences/preferenceSlice';
|
||||
import { Doc } from './preferences/preferenceApi';
|
||||
import { useDarkTheme } from './hooks';
|
||||
@@ -193,10 +195,13 @@ const Setting: React.FC = () => {
|
||||
const General: React.FC = () => {
|
||||
const themes = ['Light', 'Dark'];
|
||||
const languages = ['English'];
|
||||
const chunks = ['0', '2', '4', '6', '8', '10'];
|
||||
const selectedChunks = useSelector(selectChunks);
|
||||
const [isDarkTheme, toggleTheme] = useDarkTheme();
|
||||
const [selectedTheme, setSelectedTheme] = useState(
|
||||
isDarkTheme ? 'Dark' : 'Light',
|
||||
);
|
||||
const dispatch = useDispatch();
|
||||
const [selectedLanguage, setSelectedLanguage] = useState(languages[0]);
|
||||
return (
|
||||
<div className="mt-[59px]">
|
||||
@@ -211,7 +216,7 @@ const General: React.FC = () => {
|
||||
}}
|
||||
/>
|
||||
</div>
|
||||
<div>
|
||||
<div className="mb-4">
|
||||
<p className="font-bold text-jet dark:text-bright-gray">
|
||||
Select Language
|
||||
</p>
|
||||
@@ -221,6 +226,16 @@ const General: React.FC = () => {
|
||||
onSelect={setSelectedLanguage}
|
||||
/>
|
||||
</div>
|
||||
<div>
|
||||
<p className="font-bold text-jet dark:text-bright-gray">
|
||||
Chunks processed per query
|
||||
</p>
|
||||
<Dropdown
|
||||
options={chunks}
|
||||
selectedValue={selectedChunks}
|
||||
onSelect={(value: string) => dispatch(setChunks(value))}
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
@@ -160,7 +160,10 @@ const ConversationBubble = forwardRef<
|
||||
>
|
||||
{message}
|
||||
</ReactMarkdown>
|
||||
{DisableSourceFE || type === 'ERROR' ? null : (
|
||||
{DisableSourceFE ||
|
||||
type === 'ERROR' ||
|
||||
!sources ||
|
||||
sources.length === 0 ? null : (
|
||||
<>
|
||||
<span className="mt-3 h-px w-full bg-[#DEDEDE]"></span>
|
||||
<div className="mt-3 flex w-full flex-row flex-wrap items-center justify-start gap-2">
|
||||
|
||||
@@ -11,6 +11,7 @@ export function fetchAnswerApi(
|
||||
history: Array<any> = [],
|
||||
conversationId: string | null,
|
||||
promptId: string | null,
|
||||
chunks: string,
|
||||
): Promise<
|
||||
| {
|
||||
result: any;
|
||||
@@ -65,6 +66,7 @@ export function fetchAnswerApi(
|
||||
active_docs: docPath,
|
||||
conversation_id: conversationId,
|
||||
prompt_id: promptId,
|
||||
chunks: chunks,
|
||||
}),
|
||||
signal,
|
||||
})
|
||||
@@ -95,6 +97,7 @@ export function fetchAnswerSteaming(
|
||||
history: Array<any> = [],
|
||||
conversationId: string | null,
|
||||
promptId: string | null,
|
||||
chunks: string,
|
||||
onEvent: (event: MessageEvent) => void,
|
||||
): Promise<Answer> {
|
||||
let namePath = selectedDocs.name;
|
||||
@@ -130,6 +133,7 @@ export function fetchAnswerSteaming(
|
||||
history: JSON.stringify(history),
|
||||
conversation_id: conversationId,
|
||||
prompt_id: promptId,
|
||||
chunks: chunks,
|
||||
};
|
||||
fetch(apiHost + '/stream', {
|
||||
method: 'POST',
|
||||
@@ -192,6 +196,7 @@ export function searchEndpoint(
|
||||
selectedDocs: Doc,
|
||||
conversation_id: string | null,
|
||||
history: Array<any> = [],
|
||||
chunks: string,
|
||||
) {
|
||||
/*
|
||||
"active_docs": "default",
|
||||
@@ -223,6 +228,7 @@ export function searchEndpoint(
|
||||
active_docs: docPath,
|
||||
conversation_id,
|
||||
history,
|
||||
chunks: chunks,
|
||||
};
|
||||
return fetch(`${apiHost}/api/search`, {
|
||||
method: 'POST',
|
||||
|
||||
@@ -28,6 +28,7 @@ export const fetchAnswer = createAsyncThunk<Answer, { question: string }>(
|
||||
state.conversation.queries,
|
||||
state.conversation.conversationId,
|
||||
state.preference.prompt.id,
|
||||
state.preference.chunks,
|
||||
|
||||
(event) => {
|
||||
const data = JSON.parse(event.data);
|
||||
@@ -51,6 +52,7 @@ export const fetchAnswer = createAsyncThunk<Answer, { question: string }>(
|
||||
state.preference.selectedDocs!,
|
||||
state.conversation.conversationId,
|
||||
state.conversation.queries,
|
||||
state.preference.chunks,
|
||||
).then((sources) => {
|
||||
//dispatch streaming sources
|
||||
dispatch(
|
||||
@@ -86,6 +88,7 @@ export const fetchAnswer = createAsyncThunk<Answer, { question: string }>(
|
||||
state.conversation.queries,
|
||||
state.conversation.conversationId,
|
||||
state.preference.prompt.id,
|
||||
state.preference.chunks,
|
||||
);
|
||||
if (answer) {
|
||||
let sourcesPrepped = [];
|
||||
|
||||
@@ -10,6 +10,7 @@ interface Preference {
|
||||
apiKey: string;
|
||||
prompt: { name: string; id: string; type: string };
|
||||
selectedDocs: Doc | null;
|
||||
chunks: string;
|
||||
sourceDocs: Doc[] | null;
|
||||
conversations: { name: string; id: string }[] | null;
|
||||
}
|
||||
@@ -17,6 +18,7 @@ interface Preference {
|
||||
const initialState: Preference = {
|
||||
apiKey: 'xxx',
|
||||
prompt: { name: 'default', id: 'default', type: 'public' },
|
||||
chunks: '2',
|
||||
selectedDocs: {
|
||||
name: 'default',
|
||||
language: 'default',
|
||||
@@ -51,6 +53,9 @@ export const prefSlice = createSlice({
|
||||
setPrompt: (state, action) => {
|
||||
state.prompt = action.payload;
|
||||
},
|
||||
setChunks: (state, action) => {
|
||||
state.chunks = action.payload;
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
@@ -60,6 +65,7 @@ export const {
|
||||
setSourceDocs,
|
||||
setConversations,
|
||||
setPrompt,
|
||||
setChunks,
|
||||
} = prefSlice.actions;
|
||||
export default prefSlice.reducer;
|
||||
|
||||
@@ -91,6 +97,16 @@ prefListenerMiddleware.startListening({
|
||||
},
|
||||
});
|
||||
|
||||
prefListenerMiddleware.startListening({
|
||||
matcher: isAnyOf(setChunks),
|
||||
effect: (action, listenerApi) => {
|
||||
localStorage.setItem(
|
||||
'DocsGPTChunks',
|
||||
JSON.stringify((listenerApi.getState() as RootState).preference.chunks),
|
||||
);
|
||||
},
|
||||
});
|
||||
|
||||
export const selectApiKey = (state: RootState) => state.preference.apiKey;
|
||||
export const selectApiKeyStatus = (state: RootState) =>
|
||||
!!state.preference.apiKey;
|
||||
@@ -105,3 +121,4 @@ export const selectConversations = (state: RootState) =>
|
||||
export const selectConversationId = (state: RootState) =>
|
||||
state.conversation.conversationId;
|
||||
export const selectPrompt = (state: RootState) => state.preference.prompt;
|
||||
export const selectChunks = (state: RootState) => state.preference.chunks;
|
||||
|
||||
@@ -8,11 +8,13 @@ import {
|
||||
const key = localStorage.getItem('DocsGPTApiKey');
|
||||
const prompt = localStorage.getItem('DocsGPTPrompt');
|
||||
const doc = localStorage.getItem('DocsGPTRecentDocs');
|
||||
const chunks = localStorage.getItem('DocsGPTChunks');
|
||||
|
||||
const store = configureStore({
|
||||
preloadedState: {
|
||||
preference: {
|
||||
apiKey: key ?? '',
|
||||
chunks: JSON.parse(chunks ?? '2').toString(),
|
||||
selectedDocs: doc !== null ? JSON.parse(doc) : null,
|
||||
prompt:
|
||||
prompt !== null
|
||||
|
||||
Reference in New Issue
Block a user