From 9526ed02584b9946e4c3b9e33dc29e4268a68232 Mon Sep 17 00:00:00 2001 From: Alex Date: Mon, 2 Sep 2024 19:46:25 +0100 Subject: [PATCH] feat: added easy way to proxy --- application/core/settings.py | 1 + application/llm/openai.py | 28 +++++++++++----------------- 2 files changed, 12 insertions(+), 17 deletions(-) diff --git a/application/core/settings.py b/application/core/settings.py index 6ae5475c..bbd62fe4 100644 --- a/application/core/settings.py +++ b/application/core/settings.py @@ -29,6 +29,7 @@ class Settings(BaseSettings): OPENAI_API_VERSION: Optional[str] = None # azure openai api version AZURE_DEPLOYMENT_NAME: Optional[str] = None # azure deployment name for answering AZURE_EMBEDDINGS_DEPLOYMENT_NAME: Optional[str] = None # azure deployment name for embeddings + OPENAI_BASE_URL: Optional[str] = None # openai base url for open ai compatable models # elasticsearch ELASTIC_CLOUD_ID: Optional[str] = None # cloud id for elasticsearch diff --git a/application/llm/openai.py b/application/llm/openai.py index b1574dd1..73a0c3d1 100644 --- a/application/llm/openai.py +++ b/application/llm/openai.py @@ -1,26 +1,25 @@ from application.llm.base import BaseLLM from application.core.settings import settings +import logging + class OpenAILLM(BaseLLM): def __init__(self, api_key=None, user_api_key=None, *args, **kwargs): - global openai from openai import OpenAI super().__init__(*args, **kwargs) - self.client = OpenAI( - api_key=api_key, - ) + if settings.OPENAI_BASE_URL: + self.client = OpenAI( + api_key=api_key, + base_url=settings.OPENAI_BASE_URL + ) + else: + self.client = OpenAI(api_key=api_key) self.api_key = api_key self.user_api_key = user_api_key - def _get_openai(self): - # Import openai when needed - import openai - - return openai - def _raw_gen( self, baseself, @@ -29,7 +28,7 @@ class OpenAILLM(BaseLLM): stream=False, engine=settings.AZURE_DEPLOYMENT_NAME, **kwargs - ): + ): response = self.client.chat.completions.create( model=model, messages=messages, stream=stream, **kwargs ) @@ -44,7 +43,7 @@ class OpenAILLM(BaseLLM): stream=True, engine=settings.AZURE_DEPLOYMENT_NAME, **kwargs - ): + ): response = self.client.chat.completions.create( model=model, messages=messages, stream=stream, **kwargs ) @@ -73,8 +72,3 @@ class AzureOpenAILLM(OpenAILLM): api_base=settings.OPENAI_API_BASE, deployment_name=settings.AZURE_DEPLOYMENT_NAME, ) - - def _get_openai(self): - openai = super()._get_openai() - - return openai