Adding OpenWebUI to Local AI Starter Kit

This commit is contained in:
Cole Medin
2024-10-22 13:10:26 -05:00
parent 0fa98dbdee
commit 90dad49ff9
6 changed files with 1036 additions and 770 deletions

View File

@@ -8,12 +8,12 @@
"type": "@n8n/n8n-nodes-langchain.memoryPostgresChat",
"typeVersion": 1.1,
"position": [
900,
1040,
560
],
"credentials": {
"postgres": {
"id": "BAhZ6uu3zrQ3JmQm",
"id": "iN7fO2CgatVwq73z",
"name": "Postgres account"
}
}
@@ -28,12 +28,12 @@
"type": "@n8n/n8n-nodes-langchain.lmChatOllama",
"typeVersion": 1,
"position": [
760,
920,
560
],
"credentials": {
"ollamaApi": {
"id": "tEBwcGKXQiQrbKjH",
"id": "eOwAotC7AUgJlvHM",
"name": "Ollama account"
}
}
@@ -48,12 +48,12 @@
"type": "@n8n/n8n-nodes-langchain.lmOllama",
"typeVersion": 1,
"position": [
1660,
1960,
500
],
"credentials": {
"ollamaApi": {
"id": "tEBwcGKXQiQrbKjH",
"id": "eOwAotC7AUgJlvHM",
"name": "Ollama account"
}
}
@@ -68,7 +68,7 @@
"type": "@n8n/n8n-nodes-langchain.toolVectorStore",
"typeVersion": 1,
"position": [
1440,
1740,
340
]
},
@@ -81,12 +81,12 @@
"type": "@n8n/n8n-nodes-langchain.embeddingsOllama",
"typeVersion": 1,
"position": [
1540,
1840,
600
],
"credentials": {
"ollamaApi": {
"id": "tEBwcGKXQiQrbKjH",
"id": "eOwAotC7AUgJlvHM",
"name": "Ollama account"
}
}
@@ -121,7 +121,7 @@
],
"credentials": {
"googleDriveOAuth2Api": {
"id": "hKn2avoGUt7FoAyY",
"id": "vzcL2pD7uQzqDpdK",
"name": "Google Drive account"
}
}
@@ -156,7 +156,7 @@
],
"credentials": {
"googleDriveOAuth2Api": {
"id": "hKn2avoGUt7FoAyY",
"id": "vzcL2pD7uQzqDpdK",
"name": "Google Drive account"
}
}
@@ -217,7 +217,7 @@
"executeOnce": true,
"credentials": {
"googleDriveOAuth2Api": {
"id": "hKn2avoGUt7FoAyY",
"id": "vzcL2pD7uQzqDpdK",
"name": "Google Drive account"
}
}
@@ -291,7 +291,7 @@
],
"credentials": {
"ollamaApi": {
"id": "tEBwcGKXQiQrbKjH",
"id": "eOwAotC7AUgJlvHM",
"name": "Ollama account"
}
}
@@ -300,7 +300,7 @@
"parameters": {
"content": "## Local RAG AI Agent with Chat Interface",
"height": 527.3027193303974,
"width": 670.6821106717343
"width": 969.0343804425795
},
"id": "a18773ae-1eb3-46b8-91cf-4184c66cf14f",
"name": "Sticky Note2",
@@ -323,7 +323,7 @@
"type": "n8n-nodes-base.stickyNote",
"typeVersion": 1,
"position": [
1240,
1540,
220
]
},
@@ -343,19 +343,6 @@
760
]
},
{
"parameters": {
"options": {}
},
"id": "c9dfe906-178b-4375-8bda-f9290f35f222",
"name": "AI Agent",
"type": "@n8n/n8n-nodes-langchain.agent",
"typeVersion": 1.6,
"position": [
840,
340
]
},
{
"parameters": {
"options": {}
@@ -385,12 +372,12 @@
"type": "@n8n/n8n-nodes-langchain.vectorStoreQdrant",
"typeVersion": 1,
"position": [
1260,
1560,
480
],
"credentials": {
"qdrantApi": {
"id": "4sxNvYWRIrm2rKl9",
"id": "VOnegFP8eijBkbNO",
"name": "QdrantApi account"
}
}
@@ -399,7 +386,7 @@
"parameters": {
"code": {
"execute": {
"code": "const { QdrantVectorStore } = require(\"@langchain/qdrant\");\nconst { OllamaEmbeddings } = require(\"@langchain/community/embeddings/ollama\");\n\nconst embeddings = new OllamaEmbeddings({\n model: \"nomic-embed-text\",\n baseUrl: \"http://host.docker.internal:11434\"\n});\n\nconst vectorStore = await QdrantVectorStore.fromExistingCollection(\n embeddings,\n {\n url: \"http://host.docker.internal:6333\",\n collectionName: \"documents\",\n }\n);\n\nconst fileIdToDelete = this.getInputData()[0].json.file_id;\n\nconst filter = {\n must: [\n {\n key: \"metadata.file_id\",\n match: {\n value: fileIdToDelete,\n },\n },\n ],\n }\n\n// const results = await vectorStore.similaritySearch(\"this\", 10, filter);\n// const idsToDelete = results.map((doc) => doc.id);\n\n// NOT IMPLEMENTED!\n// await vectorStore.delete({ ids: idsToDelete });\n\nvectorStore.client.delete(\"documents\", {\n filter\n});\n\nreturn [ {json: { file_id: fileIdToDelete } } ];\n"
"code": "const { QdrantVectorStore } = require(\"@langchain/qdrant\");\nconst { OllamaEmbeddings } = require(\"@langchain/community/embeddings/ollama\");\n\nconst embeddings = new OllamaEmbeddings({\n model: \"nomic-embed-text\",\n baseUrl: \"http://ollama:11434\"\n});\n\nconst vectorStore = await QdrantVectorStore.fromExistingCollection(\n embeddings,\n {\n url: \"http://qdrant:6333\",\n collectionName: \"documents\",\n }\n);\n\nconst fileIdToDelete = this.getInputData()[0].json.file_id;\n\nconst filter = {\n must: [\n {\n key: \"metadata.file_id\",\n match: {\n value: fileIdToDelete,\n },\n },\n ],\n }\n\n// const results = await vectorStore.similaritySearch(\"this\", 10, filter);\n// const idsToDelete = results.map((doc) => doc.id);\n\n// NOT IMPLEMENTED!\n// await vectorStore.delete({ ids: idsToDelete });\n\nvectorStore.client.delete(\"documents\", {\n filter\n});\n\nreturn [ {json: { file_id: fileIdToDelete } } ];\n"
}
},
"inputs": {
@@ -449,10 +436,82 @@
],
"credentials": {
"qdrantApi": {
"id": "4sxNvYWRIrm2rKl9",
"id": "VOnegFP8eijBkbNO",
"name": "QdrantApi account"
}
}
},
{
"parameters": {
"options": {}
},
"id": "e537544a-37d5-4b00-b5ff-bc71f041f4bb",
"name": "Respond to Webhook",
"type": "n8n-nodes-base.respondToWebhook",
"typeVersion": 1.1,
"position": [
1340,
340
]
},
{
"parameters": {
"httpMethod": "POST",
"path": "invoke_n8n_agent",
"responseMode": "responseNode",
"options": {}
},
"id": "2b8cd01f-30a8-4aab-b0dd-56d2b658f059",
"name": "Webhook",
"type": "n8n-nodes-base.webhook",
"typeVersion": 2,
"position": [
620,
520
],
"webhookId": "4a839da9-b8a2-45f8-bcaf-c484f9a5912d"
},
{
"parameters": {
"options": {}
},
"id": "c9dfe906-178b-4375-8bda-f9290f35f222",
"name": "AI Agent",
"type": "@n8n/n8n-nodes-langchain.agent",
"typeVersion": 1.6,
"position": [
1000,
340
]
},
{
"parameters": {
"assignments": {
"assignments": [
{
"id": "75ebfdef-c8e2-4c3e-b716-1479d0cc2a73",
"name": "chatInput",
"value": "={{ $json?.chatInput || $json.body.chatInput }}",
"type": "string"
},
{
"id": "59b7a20f-0626-4861-93e2-015d430c266e",
"name": "sessionId",
"value": "={{ $json?.sessionId || $json.body.sessionId}}",
"type": "string"
}
]
},
"options": {}
},
"id": "8f974a15-aa2f-4525-8278-ad58ad296076",
"name": "Edit Fields",
"type": "n8n-nodes-base.set",
"typeVersion": 3.4,
"position": [
820,
340
]
}
],
"pinData": {},
@@ -490,17 +549,6 @@
]
]
},
"Vector Store Tool": {
"ai_tool": [
[
{
"node": "AI Agent",
"type": "ai_tool",
"index": 0
}
]
]
},
"Embeddings Ollama": {
"ai_embedding": [
[
@@ -604,7 +652,7 @@
"main": [
[
{
"node": "AI Agent",
"node": "Edit Fields",
"type": "main",
"index": 0
}
@@ -632,16 +680,60 @@
}
]
]
},
"Webhook": {
"main": [
[
{
"node": "Edit Fields",
"type": "main",
"index": 0
}
]
]
},
"AI Agent": {
"main": [
[
{
"node": "Respond to Webhook",
"type": "main",
"index": 0
}
]
]
},
"Edit Fields": {
"main": [
[
{
"node": "AI Agent",
"type": "main",
"index": 0
}
]
]
},
"Vector Store Tool": {
"ai_tool": [
[
{
"node": "AI Agent",
"type": "ai_tool",
"index": 0
}
]
]
}
},
"active": false,
"active": true,
"settings": {
"executionOrder": "v1"
},
"versionId": "24e894e6-ec51-4ed0-a3cb-2de36c5d887a",
"versionId": "19f9691c-4682-4704-81f2-33fdec9d0be2",
"meta": {
"templateCredsSetupCompleted": true,
"instanceId": "eb51798708ee5e41b5338814f84985fff5b79dd7e443bf0d1f51648e6d6e003b"
"instanceId": "f722e3e1e81e942a38faa434ad0aee8699371bbff9f883b9d5c59a7c726605af"
},
"id": "vTN9y2dLXqTiDfPT",
"tags": []

View File

@@ -2,15 +2,17 @@
**Self-hosted AI Starter Kit** is an open, docker compose template that
quickly bootstraps a fully featured Local AI and Low Code development
environment.
environment including Open WebUI for an interface to chat with your N8N agents.
This is Cole's version with a couple of improvements! Also,
the local RAG AI Agent workflow from the video will be automatically in your
This is Cole's version with a couple of improvements and the addition of Open WebUI!
Also, the local RAG AI Agent workflow from the video will be automatically in your
n8n instance if you use this setup instead of the base one provided by n8n!
Download my N8N + OpenWebUI integration [directly on the Open WebUI site.](https://openwebui.com/f/coleam/n8n_pipe/) (more instructions below)
![n8n.io - Screenshot](https://raw.githubusercontent.com/n8n-io/self-hosted-ai-starter-kit/main/assets/n8n-demo.gif)
Curated by <https://github.com/n8n-io>, it combines the self-hosted n8n
Curated by <https://github.com/n8n-io> and <https://github.com/coleam00>, it combines the self-hosted n8n
platform with a curated list of compatible AI products and components to
quickly get started with building self-hosted AI workflows.
@@ -25,6 +27,9 @@ integrations and advanced AI components
✅ [**Ollama**](https://ollama.com/) - Cross-platform LLM platform to install
and run the latest local LLMs
✅ [**Open WebUI**](https://openwebui.com/) - ChatGPT-like interface to
privately interact with your local models and N8N agents
✅ [**Qdrant**](https://qdrant.tech/) - Open-source, high performance vector
store with an comprehensive API
@@ -46,8 +51,8 @@ Engineering world, handles large amounts of data safely.
### For Nvidia GPU users
```
git clone https://github.com/n8n-io/self-hosted-ai-starter-kit.git
cd self-hosted-ai-starter-kit
git clone https://github.com/coleam00/ai-agents-masterclass.git
cd ai-agents-masterclass/local-ai-packaged
docker compose --profile gpu-nvidia up
```
@@ -70,8 +75,8 @@ If you want to run Ollama on your mac, check the
for installation instructions, and run the starter kit as follows:
```
git clone https://github.com/n8n-io/self-hosted-ai-starter-kit.git
cd self-hosted-ai-starter-kit
git clone https://github.com/coleam00/ai-agents-masterclass.git
cd ai-agents-masterclass/local-ai-packaged
docker compose up
```
@@ -81,8 +86,8 @@ by using `http://host.docker.internal:11434/` as the host.
### For everyone else
```
git clone https://github.com/n8n-io/self-hosted-ai-starter-kit.git
cd self-hosted-ai-starter-kit
git clone https://github.com/coleam00/ai-agents-masterclass.git
cd ai-agents-masterclass/local-ai-packaged
docker compose --profile cpu up
```
@@ -94,15 +99,40 @@ install. After completing the installation steps above, follow the steps below
to get started.
1. Open <http://localhost:5678/> in your browser to set up n8n. Youll only
have to do this once.
have to do this once. You are NOT creating an account with n8n in the setup here,
it is only a local account for your instance!
2. Open the included workflow:
<http://localhost:5678/workflow/srOnR8PAY3u4RSwb>
3. Select **Test workflow** to start running the workflow.
4. If this is the first time youre running the workflow, you may need to wait
<http://localhost:5678/workflow/vTN9y2dLXqTiDfPT>
3. Create credentials for every service:
Ollama URL: http://ollama:11434
Postgres: use DB, username, and password from .env. Host is postgres
Qdrant URL: http://qdrant:6333 (API key can be whatever since this is running locally)
Google Drive: Follow [this guide from n8n](https://docs.n8n.io/integrations/builtin/credentials/google/).
Don't use localhost for the redirect URI, just use another domain you have, it will still work!
Alternatively, you can set up [local file triggers](https://docs.n8n.io/integrations/builtin/core-nodes/n8n-nodes-base.localfiletrigger/).
4. Select **Test workflow** to start running the workflow.
5. If this is the first time youre running the workflow, you may need to wait
until Ollama finishes downloading Llama3.1. You can inspect the docker
console logs to check on the progress.
6. Make sure to toggle the workflow as active and copy the "Production" webhook URL!
7. Open <http://localhost:3000/> in your browser to set up Open WebUI.
Youll only have to do this once. You are NOT creating an account with Open WebUI in the
setup here, it is only a local account for your instance!
8. Go to Workspace -> Functions -> Add Function -> Give name + description then paste in
the code from `n8n_pipe.py`
The function is also [published here on Open WebUI's site](https://openwebui.com/f/coleam/n8n_pipe/).
9. Click on the gear icon and set the n8n_url to the production URL for the webhook
you copied in a previous step.
10. Toggle the function on and now it will be available in your model dropdown in the top left!
To open n8n at any time, visit <http://localhost:5678/> in your browser.
To open Open WebUI at any time, visit <http://localhost:3000/>.
With your n8n instance, youll have access to over 400 integrations and a
suite of basic and advanced AI nodes such as

View File

@@ -3,6 +3,7 @@ volumes:
postgres_storage:
ollama_storage:
qdrant_storage:
open-webui:
networks:
demo:
@@ -44,6 +45,18 @@ x-init-ollama: &init-ollama
- "sleep 3; OLLAMA_HOST=ollama:11434 ollama pull llama3.1; OLLAMA_HOST=ollama:11434 ollama pull nomic-embed-text"
services:
open-webui:
image: ghcr.io/open-webui/open-webui:main
networks: ['demo']
restart: unless-stopped
container_name: open-webui
ports:
- "3000:8080"
extra_hosts:
- "host.docker.internal:host-gateway"
volumes:
- open-webui:/app/backend/data
postgres:
image: postgres:16-alpine
networks: ['demo']

View File

@@ -1,87 +0,0 @@
{
"createdAt": "2024-02-23T16:58:31.616Z",
"updatedAt": "2024-02-23T16:58:31.616Z",
"id": "srOnR8PAY3u4RSwb",
"name": "Demo workflow",
"active": false,
"nodes": [
{
"parameters": {},
"id": "74003dcd-2ac7-4caa-a1cd-adecc5143c07",
"name": "Chat Trigger",
"type": "@n8n/n8n-nodes-langchain.chatTrigger",
"typeVersion": 1,
"position": [
660,
340
],
"webhookId": "cdb5c076-d458-4b9d-8398-f43bd25059b1"
},
{
"parameters": {},
"id": "ce8c3da4-899c-4cc4-af73-8096c64eec64",
"name": "Basic LLM Chain",
"type": "@n8n/n8n-nodes-langchain.chainLlm",
"typeVersion": 1.3,
"position": [
880,
340
]
},
{
"parameters": {
"model": "llama3.1:latest",
"options": {}
},
"id": "3dee878b-d748-4829-ac0a-cfd6705d31e5",
"name": "Ollama Chat Model",
"type": "@n8n/n8n-nodes-langchain.lmChatOllama",
"typeVersion": 1,
"position": [
900,
560
],
"credentials": {
"ollamaApi": {
"id": "xHuYe0MDGOs9IpBW",
"name": "Local Ollama service"
}
}
}
],
"connections": {
"Chat Trigger": {
"main": [
[
{
"node": "Basic LLM Chain",
"type": "main",
"index": 0
}
]
]
},
"Ollama Chat Model": {
"ai_languageModel": [
[
{
"node": "Basic LLM Chain",
"type": "ai_languageModel",
"index": 0
}
]
]
}
},
"settings": {
"executionOrder": "v1"
},
"staticData": null,
"meta": {
"templateCredsSetupCompleted": true
},
"pinData": {},
"versionId": "4e2affe6-bb1c-4ddc-92f9-dde0b7656796",
"triggerCount": 0,
"tags": []
}

View File

@@ -0,0 +1,126 @@
"""
title: n8n Pipe Function
author: Cole Medin
author_url: https://www.youtube.com/@ColeMedin
version: 0.1.0
This module defines a Pipe class that utilizes N8N for an Agent
"""
from typing import Optional, Callable, Awaitable
from pydantic import BaseModel, Field
import os
import time
import requests
class Pipe:
class Valves(BaseModel):
n8n_url: str = Field(
default="https://n8n.[your domain].com/webhook/[your webhook URL]"
)
n8n_bearer_token: str = Field(default="...")
input_field: str = Field(default="chatInput")
response_field: str = Field(default="output")
emit_interval: float = Field(
default=2.0, description="Interval in seconds between status emissions"
)
enable_status_indicator: bool = Field(
default=True, description="Enable or disable status indicator emissions"
)
def __init__(self):
self.type = "pipe"
self.id = "n8n_pipe"
self.name = "N8N Pipe"
self.valves = self.Valves()
self.last_emit_time = 0
pass
async def emit_status(
self,
__event_emitter__: Callable[[dict], Awaitable[None]],
level: str,
message: str,
done: bool,
):
current_time = time.time()
if (
__event_emitter__
and self.valves.enable_status_indicator
and (
current_time - self.last_emit_time >= self.valves.emit_interval or done
)
):
await __event_emitter__(
{
"type": "status",
"data": {
"status": "complete" if done else "in_progress",
"level": level,
"description": message,
"done": done,
},
}
)
self.last_emit_time = current_time
async def pipe(
self,
body: dict,
__user__: Optional[dict] = None,
__event_emitter__: Callable[[dict], Awaitable[None]] = None,
__event_call__: Callable[[dict], Awaitable[dict]] = None,
) -> Optional[dict]:
await self.emit_status(
__event_emitter__, "info", "/Calling N8N Workflow...", False
)
messages = body.get("messages", [])
# Verify a message is available
if messages:
question = messages[-1]["content"]
try:
# Invoke N8N workflow
headers = {
"Authorization": f"Bearer {self.valves.n8n_bearer_token}",
"Content-Type": "application/json",
}
payload = {"sessionId": f"{__user__['id']} - {messages[0]['content'].split("Prompt: ")[-1][:100]}"}
payload[self.valves.input_field] = question
response = requests.post(
self.valves.n8n_url, json=payload, headers=headers
)
if response.status_code == 200:
n8n_response = response.json()[self.valves.response_field]
else:
raise Exception(f"Error: {response.status_code} - {response.text}")
# Set assitant message with chain reply
body["messages"].append({"role": "assistant", "content": n8n_response})
except Exception as e:
await self.emit_status(
__event_emitter__,
"error",
f"Error during sequence execution: {str(e)}",
True,
)
return {"error": str(e)}
# If no message is available alert user
else:
await self.emit_status(
__event_emitter__,
"error",
"No messages found in the request body",
True,
)
body["messages"].append(
{
"role": "assistant",
"content": "No messages found in the request body",
}
)
await self.emit_status(__event_emitter__, "info", "Complete", True)
return n8n_response