diff --git a/application/llm/openai.py b/application/llm/openai.py index c918768d..e363130b 100644 --- a/application/llm/openai.py +++ b/application/llm/openai.py @@ -13,10 +13,11 @@ class OpenAILLM(BaseLLM): from openai import OpenAI super().__init__(*args, **kwargs) - if settings.OPENAI_BASE_URL: + if isinstance(settings.OPENAI_BASE_URL, str) and settings.OPENAI_BASE_URL.strip(): self.client = OpenAI(api_key=api_key, base_url=settings.OPENAI_BASE_URL) else: - self.client = OpenAI(api_key=api_key) + DEFAULT_OPENAI_API_BASE = "https://api.openai.com/v1" + self.client = OpenAI(api_key=api_key, base_url=DEFAULT_OPENAI_API_BASE) self.api_key = api_key self.user_api_key = user_api_key self.storage = StorageCreator.get_storage() diff --git a/setup.sh b/setup.sh index 479def9b..5cf013fc 100755 --- a/setup.sh +++ b/setup.sh @@ -240,7 +240,7 @@ serve_local_ollama() { echo "LLM_NAME=openai" >> .env echo "MODEL_NAME=$model_name" >> .env echo "VITE_API_STREAMING=true" >> .env - echo "OPENAI_BASE_URL=http://host.docker.internal:11434/v1" >> .env + echo "OPENAI_BASE_URL=http://ollama:11434/v1" >> .env echo "EMBEDDINGS_NAME=huggingface_sentence-transformers/all-mpnet-base-v2" >> .env echo -e "${GREEN}.env file configured for Ollama ($(echo "$docker_compose_file_suffix" | tr '[:lower:]' '[:upper:]')${NC}${GREEN}).${NC}" echo -e "${YELLOW}Note: MODEL_NAME is set to '${BOLD}$model_name${NC}${YELLOW}'. You can change it later in the .env file.${NC}"