mirror of
https://github.com/arc53/DocsGPT.git
synced 2025-11-29 08:33:20 +00:00
Fixed request length bug, changed to as less used port
This commit is contained in:
@@ -3,7 +3,7 @@ EMBEDDINGS_KEY=your_api_key
|
||||
CELERY_BROKER_URL=redis://localhost:6379/0
|
||||
CELERY_RESULT_BACKEND=redis://localhost:6379/1
|
||||
MONGO_URI=mongodb://localhost:27017/docsgpt
|
||||
API_URL=http://localhost:5001
|
||||
API_URL=http://localhost:7091
|
||||
|
||||
#For OPENAI on Azure
|
||||
OPENAI_API_BASE=
|
||||
|
||||
@@ -18,6 +18,6 @@ COPY . /app
|
||||
ENV FLASK_APP=app.py
|
||||
ENV FLASK_DEBUG=true
|
||||
|
||||
EXPOSE 5001
|
||||
EXPOSE 7091
|
||||
|
||||
CMD ["gunicorn", "-w", "2", "--timeout", "120", "--bind", "0.0.0.0:5001", "wsgi:app"]
|
||||
CMD ["gunicorn", "-w", "2", "--timeout", "120", "--bind", "0.0.0.0:7091", "wsgi:app"]
|
||||
|
||||
@@ -43,6 +43,7 @@ from worker import ingest_worker
|
||||
# os.environ["LANGCHAIN_HANDLER"] = "langchain"
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
gpt_model = 'gpt-3.5-turbo' # gpt-4
|
||||
|
||||
if settings.LLM_NAME == "manifest":
|
||||
from manifest import Manifest
|
||||
@@ -195,7 +196,7 @@ def complete_stream(question, docsearch, chat_history, api_key):
|
||||
messages_combine.append({"role": "user", "content": i["prompt"]})
|
||||
messages_combine.append({"role": "system", "content": i["response"]})
|
||||
messages_combine.append({"role": "user", "content": question})
|
||||
completion = openai.ChatCompletion.create(model="gpt-3.5-turbo", engine=settings.AZURE_DEPLOYMENT_NAME,
|
||||
completion = openai.ChatCompletion.create(model=gpt_model, engine=settings.AZURE_DEPLOYMENT_NAME,
|
||||
messages=messages_combine, stream=True, max_tokens=500, temperature=0)
|
||||
|
||||
for line in completion:
|
||||
@@ -208,26 +209,27 @@ def complete_stream(question, docsearch, chat_history, api_key):
|
||||
yield f"data: {data}\n\n"
|
||||
|
||||
|
||||
@app.route("/stream", methods=["POST", "GET"])
|
||||
@app.route("/stream", methods=["POST"])
|
||||
def stream():
|
||||
data = request.get_json()
|
||||
# get parameter from url question
|
||||
question = request.args.get("question")
|
||||
history = request.args.get("history")
|
||||
question = data["question"]
|
||||
history = data["history"]
|
||||
# history to json object from string
|
||||
history = json.loads(history)
|
||||
|
||||
# check if active_docs is set
|
||||
|
||||
if not api_key_set:
|
||||
api_key = request.args.get("api_key")
|
||||
api_key = data["api_key"]
|
||||
else:
|
||||
api_key = settings.API_KEY
|
||||
if not embeddings_key_set:
|
||||
embeddings_key = request.args.get("embeddings_key")
|
||||
embeddings_key = data["embeddings_key"]
|
||||
else:
|
||||
embeddings_key = settings.EMBEDDINGS_KEY
|
||||
if "active_docs" in request.args:
|
||||
vectorstore = get_vectorstore({"active_docs": request.args.get("active_docs")})
|
||||
if "active_docs" in data:
|
||||
vectorstore = get_vectorstore({"active_docs": data["active_docs"]})
|
||||
else:
|
||||
vectorstore = ""
|
||||
docsearch = get_docsearch(vectorstore, embeddings_key)
|
||||
@@ -279,7 +281,7 @@ def api_answer():
|
||||
)
|
||||
else:
|
||||
logger.debug("plain OpenAI")
|
||||
llm = ChatOpenAI(openai_api_key=api_key) # optional parameter: model_name="gpt-4"
|
||||
llm = ChatOpenAI(openai_api_key=api_key, model_name=gpt_model) # optional parameter: model_name="gpt-4"
|
||||
messages_combine = [SystemMessagePromptTemplate.from_template(chat_combine_template)]
|
||||
if history:
|
||||
tokens_current_history = 0
|
||||
@@ -597,4 +599,4 @@ def after_request(response):
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
app.run(debug=True, port=5001)
|
||||
app.run(debug=True, port=7091)
|
||||
|
||||
@@ -12,7 +12,7 @@ class Settings(BaseSettings):
|
||||
MODEL_PATH: str = "./models/gpt4all-model.bin"
|
||||
TOKENS_MAX_HISTORY: int = 150
|
||||
|
||||
API_URL: str = "http://localhost:5001" # backend url for celery worker
|
||||
API_URL: str = "http://localhost:7091" # backend url for celery worker
|
||||
|
||||
API_KEY: str = None # LLM api key
|
||||
EMBEDDINGS_KEY: str = None # api key for embeddings (if using openai, just copy API_KEY
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from app import app
|
||||
|
||||
if __name__ == "__main__":
|
||||
app.run(debug=True, port=5001)
|
||||
app.run(debug=True, port=7091)
|
||||
|
||||
@@ -4,7 +4,7 @@ services:
|
||||
frontend:
|
||||
build: ./frontend
|
||||
environment:
|
||||
- VITE_API_HOST=http://localhost:5001
|
||||
- VITE_API_HOST=http://localhost:7091
|
||||
- VITE_API_STREAMING=$VITE_API_STREAMING
|
||||
ports:
|
||||
- "5173:5173"
|
||||
@@ -25,7 +25,7 @@ services:
|
||||
- AZURE_DEPLOYMENT_NAME=$AZURE_DEPLOYMENT_NAME
|
||||
- AZURE_EMBEDDINGS_DEPLOYMENT_NAME=$AZURE_EMBEDDINGS_DEPLOYMENT_NAME
|
||||
ports:
|
||||
- "5001:5001"
|
||||
- "7091:7091"
|
||||
volumes:
|
||||
- ./application/indexes:/app/indexes
|
||||
- ./application/inputs:/app/inputs
|
||||
@@ -43,7 +43,7 @@ services:
|
||||
- CELERY_BROKER_URL=redis://redis:6379/0
|
||||
- CELERY_RESULT_BACKEND=redis://redis:6379/1
|
||||
- MONGO_URI=mongodb://mongo:27017/docsgpt
|
||||
- API_URL=http://backend:5001
|
||||
- API_URL=http://backend:7091
|
||||
- OPENAI_API_KEY=$OPENAI_API_KEY
|
||||
- OPENAI_API_BASE=$OPENAI_API_BASE
|
||||
- OPENAI_API_VERSION=$OPENAI_API_VERSION
|
||||
|
||||
@@ -4,7 +4,7 @@ services:
|
||||
frontend:
|
||||
build: ./frontend
|
||||
environment:
|
||||
- VITE_API_HOST=http://localhost:5001
|
||||
- VITE_API_HOST=http://localhost:7091
|
||||
- VITE_API_STREAMING=$VITE_API_STREAMING
|
||||
ports:
|
||||
- "5173:5173"
|
||||
@@ -20,14 +20,14 @@ services:
|
||||
- CELERY_RESULT_BACKEND=redis://redis:6379/1
|
||||
- MONGO_URI=mongodb://mongo:27017/docsgpt
|
||||
ports:
|
||||
- "5001:5001"
|
||||
- "7091:7091"
|
||||
volumes:
|
||||
- ./application/indexes:/app/indexes
|
||||
- ./application/inputs:/app/inputs
|
||||
- ./application/vectors:/app/vectors
|
||||
depends_on:
|
||||
- redis
|
||||
- mongo
|
||||
- redis
|
||||
- mongo
|
||||
|
||||
worker:
|
||||
build: ./application
|
||||
@@ -38,10 +38,10 @@ services:
|
||||
- CELERY_BROKER_URL=redis://redis:6379/0
|
||||
- CELERY_RESULT_BACKEND=redis://redis:6379/1
|
||||
- MONGO_URI=mongodb://mongo:27017/docsgpt
|
||||
- API_URL=http://backend:5001
|
||||
- API_URL=http://backend:7091
|
||||
depends_on:
|
||||
- redis
|
||||
- mongo
|
||||
- redis
|
||||
- mongo
|
||||
|
||||
redis:
|
||||
image: redis:6-alpine
|
||||
@@ -55,7 +55,5 @@ services:
|
||||
volumes:
|
||||
- mongodb_data_container:/data/db
|
||||
|
||||
|
||||
|
||||
volumes:
|
||||
mongodb_data_container:
|
||||
mongodb_data_container:
|
||||
|
||||
@@ -21,7 +21,7 @@ document.getElementById("message-form").addEventListener("submit", function(even
|
||||
}
|
||||
|
||||
// send post request to server http://127.0.0.1:5000/ with message in json body
|
||||
fetch('http://127.0.0.1:5001/api/answer', {
|
||||
fetch('http://127.0.0.1:7091/api/answer', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
|
||||
@@ -11,7 +11,7 @@ dotenv.load_dotenv()
|
||||
# Replace 'YOUR_BOT_TOKEN' with your bot's token
|
||||
TOKEN = os.getenv("DISCORD_TOKEN")
|
||||
PREFIX = '@DocsGPT'
|
||||
BASE_API_URL = 'http://localhost:5001'
|
||||
BASE_API_URL = 'http://localhost:7091'
|
||||
|
||||
intents = discord.Intents.default()
|
||||
intents.message_content = True
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
const API_ENDPOINT = "http://localhost:5001/api/answer"; // Replace with your API endpoint
|
||||
const API_ENDPOINT = "http://localhost:7091/api/answer"; // Replace with your API endpoint
|
||||
|
||||
const widgetInitMessage = document.getElementById("docsgpt-init-message");
|
||||
const widgetAnswerMessage = document.getElementById("docsgpt-answer");
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
# Please put appropriate value
|
||||
VITE_API_HOST=http://localhost:5001
|
||||
VITE_API_HOST=http://localhost:7091
|
||||
@@ -91,22 +91,64 @@ export function fetchAnswerSteaming(
|
||||
});
|
||||
|
||||
return new Promise<Answer>((resolve, reject) => {
|
||||
const url = new URL(apiHost + '/stream');
|
||||
url.searchParams.append('question', question);
|
||||
url.searchParams.append('api_key', apiKey);
|
||||
url.searchParams.append('embeddings_key', apiKey);
|
||||
url.searchParams.append('active_docs', docPath);
|
||||
url.searchParams.append('history', JSON.stringify(history));
|
||||
|
||||
const eventSource = new EventSource(url.href);
|
||||
|
||||
eventSource.onmessage = onEvent;
|
||||
|
||||
eventSource.onerror = (error) => {
|
||||
console.log('Connection failed.');
|
||||
eventSource.close();
|
||||
const body = {
|
||||
question: question,
|
||||
api_key: apiKey,
|
||||
embeddings_key: apiKey,
|
||||
active_docs: docPath,
|
||||
history: JSON.stringify(history),
|
||||
};
|
||||
});
|
||||
|
||||
fetch(apiHost + '/stream', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
body: JSON.stringify(body),
|
||||
})
|
||||
.then((response) => {
|
||||
if (!response.body) throw Error("No response body");
|
||||
|
||||
const reader = response.body.getReader();
|
||||
const decoder = new TextDecoder('utf-8');
|
||||
var counterrr = 0
|
||||
const processStream = ({ done, value }: ReadableStreamReadResult<Uint8Array>) => {
|
||||
if (done) {
|
||||
console.log(counterrr);
|
||||
return;
|
||||
}
|
||||
|
||||
counterrr += 1;
|
||||
|
||||
const chunk = decoder.decode(value);
|
||||
|
||||
const lines = chunk.split("\n");
|
||||
|
||||
for (let line of lines) {
|
||||
if (line.trim() == "") {
|
||||
continue;
|
||||
}
|
||||
if (line.startsWith('data:')) {
|
||||
line = line.substring(5);
|
||||
}
|
||||
|
||||
const messageEvent: MessageEvent = new MessageEvent("message", {
|
||||
data: line,
|
||||
});
|
||||
|
||||
onEvent(messageEvent); // handle each message
|
||||
}
|
||||
|
||||
reader.read().then(processStream).catch(reject);
|
||||
}
|
||||
|
||||
reader.read().then(processStream).catch(reject);
|
||||
})
|
||||
.catch((error) => {
|
||||
console.error('Connection failed:', error);
|
||||
reject(error);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
export function sendFeedback(
|
||||
|
||||
6
setup.sh
6
setup.sh
@@ -15,7 +15,7 @@ docker run -d --name redis -p 6379:6379 redis:6-alpine
|
||||
docker run -d --name mongo -p 27017:27017 -v mongodb_data_container:/data/db mongo:6
|
||||
|
||||
# Run backend and worker services
|
||||
docker run -d --name backend -p 5001:5001 \
|
||||
docker run -d --name backend -p 7091:7091 \
|
||||
--link redis:redis --link mongo:mongo \
|
||||
-v $(pwd)/application/indexes:/app/indexes \
|
||||
-v $(pwd)/application/inputs:/app/inputs \
|
||||
@@ -34,12 +34,12 @@ docker run -d --name worker \
|
||||
-e CELERY_BROKER_URL=redis://redis:6379/0 \
|
||||
-e CELERY_RESULT_BACKEND=redis://redis:6379/1 \
|
||||
-e MONGO_URI=mongodb://mongo:27017/docsgpt \
|
||||
-e API_URL=http://backend:5001 \
|
||||
-e API_URL=http://backend:7091 \
|
||||
backend_image \
|
||||
celery -A app.celery worker -l INFO
|
||||
|
||||
# Run frontend service
|
||||
docker run -d --name frontend -p 5173:5173 \
|
||||
-e VITE_API_HOST=http://localhost:5001 \
|
||||
-e VITE_API_HOST=http://localhost:7091 \
|
||||
frontend_image
|
||||
|
||||
|
||||
Reference in New Issue
Block a user