n8n + Python + LangChain AI Agent

This commit is contained in:
Cole Medin
2024-09-22 18:38:12 -05:00
parent 9a3111d578
commit 0408d5b9f7
9 changed files with 684 additions and 0 deletions

View File

@@ -0,0 +1,37 @@
# Rename this file to .env once you have filled in the below environment variables!
# The bearer token value that you set for the Header credentials in n8n
# -> Click into the webhook node in n8n
# -> select the "Credential for Header Auth" dropdown
# -> Click "- Create New Credentials -"
# -> For the Name field, enter "Authorization" (not including quotes)
# -> For the Value field enter "Bearer [N8N_BEARER_TOKEN]", but
# replace N8N_BEARER_TOKEN with your webhook "password"
N8N_BEARER_TOKEN=
# Production URL for your n8n workflow that summarizes Slack conversations
# Make sure your n8n workflow is switched to active so this works!
SUMMARIZE_SLACK_CONVERSATION_WEBHOOK=
# Production URL for your n8n workflow that sends a Slack message
SEND_SLACK_MESSAGE_WEBHOOK=
# Production URL for your n8n workflow that creates a Google Doc in your Drive
UPLOAD_GOOGLE_DOC_WEBHOOK=
# See all Open AI models you can use here -
# https://platform.openai.com/docs/models
# And all Anthropic models you can use here -
# https://docs.anthropic.com/en/docs/about-claude/models
# A good default to go with here is gpt-4o or claude-3-5-sonnet-20240620
LLM_MODEL=gpt-4o
# Get your Open AI API Key by following these instructions -
# https://help.openai.com/en/articles/4936850-where-do-i-find-my-openai-api-key
# You only need this environment variable set if you set LLM_MODEL to a GPT model
OPENAI_API_KEY=
# Get your Anthropic API Key in your account settings -
# https://console.anthropic.com/settings/keys
# You only need this environment variable set if you set LLM_MODEL to a Claude model
ANTHROPIC_API_KEY=

View File

@@ -0,0 +1,96 @@
from datetime import datetime
import streamlit as st
import asyncio
import json
import uuid
import os
from langchain_core.messages import SystemMessage, AIMessage, HumanMessage
from runnable import get_runnable
@st.cache_resource
def create_chatbot_instance():
return get_runnable()
chatbot = create_chatbot_instance()
@st.cache_resource
def get_thread_id():
return str(uuid.uuid4())
thread_id = get_thread_id()
system_message = f"""
You are a personal assistant who helps with research, managing Google Drive, and managing Slack.
You never give IDs to the user since those are just for you to keep track of.
The link to any Google Doc is: https://docs.google.com/document/d/[document ID]
The current date is: {datetime.now().date()}
"""
async def prompt_ai(messages):
config = {
"configurable": {
"thread_id": thread_id
}
}
async for event in chatbot.astream_events(
{"messages": messages}, config, version="v2"
):
if event["event"] == "on_chat_model_stream":
yield event["data"]["chunk"].content
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# ~~~~~~~~~~~~~~~~~~ Main Function with UI Creation ~~~~~~~~~~~~~~~~~~~~
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
async def main():
st.title("n8n LangChain Agent")
# Initialize chat history
if "messages" not in st.session_state:
st.session_state.messages = [
SystemMessage(content=system_message)
]
# Display chat messages from history on app rerun
for message in st.session_state.messages:
message_json = json.loads(message.json())
message_type = message_json["type"]
if message_type in ["human", "ai", "system"]:
with st.chat_message(message_type):
st.markdown(message_json["content"])
# React to user input
if prompt := st.chat_input("What would you like to do today?"):
# Display user message in chat message container
st.chat_message("user").markdown(prompt)
# Add user message to chat history
st.session_state.messages.append(HumanMessage(content=prompt))
# Display assistant response in chat message container
response_content = ""
with st.chat_message("assistant"):
message_placeholder = st.empty() # Placeholder for updating the message
# Run the async generator to fetch responses
async for chunk in prompt_ai(st.session_state.messages):
if isinstance(chunk, str):
response_content += chunk
elif isinstance(chunk, list):
for chunk_text in chunk:
if "text" in chunk_text:
response_content += chunk_text["text"]
else:
raise Exception("Chunk is not a string or list.")
# Update the placeholder with the current response content
message_placeholder.markdown(response_content)
st.session_state.messages.append(AIMessage(content=response_content))
if __name__ == "__main__":
asyncio.run(main())

View File

@@ -0,0 +1,10 @@
python-dotenv==0.13.0
langchain==0.2.12
langchain-anthropic==0.1.22
langchain-community==0.2.11
langchain-core==0.2.28
langchain-openai==0.1.20
streamlit==1.36.0
langgraph==0.1.19
aiosqlite==0.20.0
requests==2.32.3

View File

@@ -0,0 +1,129 @@
from langgraph.graph.message import AnyMessage, add_messages
from langgraph.checkpoint.aiosqlite import AsyncSqliteSaver
from langchain_core.runnables import RunnableConfig
from langgraph.graph import END, StateGraph
from typing_extensions import TypedDict
from typing import Annotated, Literal, Dict
from dotenv import load_dotenv
import os
from langchain_openai import ChatOpenAI
from langchain_anthropic import ChatAnthropic
from langchain_core.messages import ToolMessage, AIMessage
from tools import available_functions
load_dotenv()
model = os.getenv('LLM_MODEL', 'gpt-4o')
tools = [tool for _, tool in available_functions.items()]
chatbot = ChatOpenAI(model=model, streaming=True) if "gpt" in model.lower() else ChatAnthropic(model=model, streaming=True)
chatbot_with_tools = chatbot.bind_tools(tools)
### State
class GraphState(TypedDict):
"""
Represents the state of our graph.
Attributes:
messages: List of chat messages.
"""
messages: Annotated[list[AnyMessage], add_messages]
async def call_model(state: GraphState, config: RunnableConfig) -> Dict[str, AnyMessage]:
"""
Function that calls the model to generate a response.
Args:
state (GraphState): The current graph state
Returns:
dict: The updated state with a new AI message
"""
print("---CALL MODEL---")
messages = list(filter(
lambda m: not isinstance(m, AIMessage) or hasattr(m, "response_metadata") and m.response_metadata,
state["messages"]
))
# Invoke the chatbot with the binded tools
response = await chatbot_with_tools.ainvoke(messages, config)
print("Response from model:", response)
# We return an object because this will get added to the existing list
return {"messages": response}
def tool_node(state: GraphState) -> Dict[str, AnyMessage]:
"""
Function that handles all tool calls.
Args:
state (GraphState): The current graph state
Returns:
dict: The updated state with tool messages
"""
print("---TOOL NODE---")
messages = state["messages"]
last_message = messages[-1] if messages else None
outputs = []
if last_message and last_message.tool_calls:
for call in last_message.tool_calls:
tool = available_functions.get(call['name'], None)
if tool is None:
raise Exception(f"Tool '{call['name']}' not found.")
print(f"\n\nInvoking tool: {call['name']} with args {call['args']}")
output = tool.invoke(call['args'])
print(f"Result of invoking tool: {output}\n\n")
outputs.append(ToolMessage(
output if isinstance(output, str) else json.dumps(output),
tool_call_id=call['id']
))
return {'messages': outputs}
def should_continue(state: GraphState) -> Literal["__end__", "tools"]:
"""
Determine whether to continue or end the workflow based on if there are tool calls to make.
Args:
state (GraphState): The current graph state
Returns:
str: The next node to execute or END
"""
print("---SHOULD CONTINUE---")
messages = state["messages"]
last_message = messages[-1] if messages else None
# If there is no function call, then we finish
if not last_message or not last_message.tool_calls:
return END
else:
return "tools"
def get_runnable():
workflow = StateGraph(GraphState)
# Define the nodes and how they connect
workflow.add_node("agent", call_model)
workflow.add_node("tools", tool_node)
workflow.set_entry_point("agent")
workflow.add_conditional_edges(
"agent",
should_continue
)
workflow.add_edge("tools", "agent")
# Compile the LangGraph graph into a runnable
memory = AsyncSqliteSaver.from_conn_string(":memory:")
app = workflow.compile(checkpointer=memory)
return app

View File

@@ -0,0 +1,124 @@
from dotenv import load_dotenv
import requests
import json
import os
from langchain_core.tools import tool
load_dotenv()
N8N_BEARER_TOKEN = os.environ["N8N_BEARER_TOKEN"]
SUMMARIZE_SLACK_CONVERSATION_WEBHOOK = os.environ["SUMMARIZE_SLACK_CONVERSATION_WEBHOOK"]
SEND_SLACK_MESSAGE_WEBHOOK = os.environ["SEND_SLACK_MESSAGE_WEBHOOK"]
UPLOAD_GOOGLE_DOC_WEBHOOK = os.environ["UPLOAD_GOOGLE_DOC_WEBHOOK"]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# ~~~~~~~~~~~~ Helper Function for Invoking n8n Webhooks ~~~~~~~~~~~~~~~
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def invoke_n8n_webhook(method, url, function_name, payload=None):
"""
Helper function to make a GET or POST request.
Args:
method (str): HTTP method ('GET' or 'POST')
url (str): The API endpoint
function_name (str): The name of the tool the AI agent invoked
payload (dict, optional): The payload for POST requests
Returns:
str: The API response in JSON format or an error message
"""
headers = {
"Authorization": f"Bearer {N8N_BEARER_TOKEN}",
"Content-Type": "application/json"
}
try:
if method == "GET":
response = requests.get(url, headers=headers)
elif method == "POST":
response = requests.post(url, headers=headers, json=payload)
else:
return f"Unsupported method: {method}"
response.raise_for_status()
return json.dumps(response.json(), indent=2)
except Exception as e:
return f"Exception when calling {function_name}: {e}"
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# ~~~~~~~~~~~~~~~~~~~ n8n AI Agent Tool Functions ~~~~~~~~~~~~~~~~~~~~~~
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@tool
def summarize_slack_conversation():
"""
Gets the latest messages in a Slack channel and summarizes the conversation
Example call:
summarize_slack_conversation()
Args:
None
Returns:
str: The API response with the Slack conversation summary or an error if there was an issue
"""
return invoke_n8n_webhook(
"GET",
SUMMARIZE_SLACK_CONVERSATION_WEBHOOK,
"summarize_slack_conversation"
)
@tool
def send_slack_message(message):
"""
Sends a message in a Slack channel
Example call:
send_slack_message("Greetings!")
Args:
message (str): The message to send in the Slack channel
Returns:
str: The API response with the result of sending the Slack message or an error if there was an issue
"""
return invoke_n8n_webhook(
"POST",
SEND_SLACK_MESSAGE_WEBHOOK,
"send_slack_message",
{"message": message}
)
@tool
def create_google_doc(document_title, document_text):
"""
Creates a Google Doc in Google Drive with the text specified.
Example call:
create_google_doc("9/20 Meeting Notes", "Meeting notes for 9/20...")
Args:
document_title (str): The name of the Google Doc
document_text (str): The text to put in the new Google Doc
Returns:
str: The API response with the result of creating the Google Doc or an error if there was an issue
"""
return invoke_n8n_webhook(
"POST",
UPLOAD_GOOGLE_DOC_WEBHOOK,
"create_google_doc",
{"document_title": document_title, "document_text": document_text}
)
# Maps the function names to the actual function object in the script
# This mapping will also be used to create the list of tools to bind to the agent
available_functions = {
"summarize_slack_conversation": summarize_slack_conversation,
"send_slack_message": send_slack_message,
"create_google_doc": create_google_doc
}

View File

@@ -0,0 +1,37 @@
# Rename this file to .env once you have filled in the below environment variables!
# The bearer token value that you set for the Header credentials in n8n
# -> Click into the webhook node in n8n
# -> select the "Credential for Header Auth" dropdown
# -> Click "- Create New Credentials -"
# -> For the Name field, enter "Authorization" (not including quotes)
# -> For the Value field enter "Bearer [N8N_BEARER_TOKEN]", but
# replace N8N_BEARER_TOKEN with your webhook "password"
N8N_BEARER_TOKEN=
# Production URL for your n8n workflow that summarizes Slack conversations
# Make sure your n8n workflow is switched to active so this works!
SUMMARIZE_SLACK_CONVERSATION_WEBHOOK=
# Production URL for your n8n workflow that sends a Slack message
SEND_SLACK_MESSAGE_WEBHOOK=
# Production URL for your n8n workflow that creates a Google Doc in your Drive
UPLOAD_GOOGLE_DOC_WEBHOOK=
# See all Open AI models you can use here -
# https://platform.openai.com/docs/models
# And all Anthropic models you can use here -
# https://docs.anthropic.com/en/docs/about-claude/models
# A good default to go with here is gpt-4o or claude-3-5-sonnet-20240620
LLM_MODEL=gpt-4o
# Get your Open AI API Key by following these instructions -
# https://help.openai.com/en/articles/4936850-where-do-i-find-my-openai-api-key
# You only need this environment variable set if you set LLM_MODEL to a GPT model
OPENAI_API_KEY=
# Get your Anthropic API Key in your account settings -
# https://console.anthropic.com/settings/keys
# You only need this environment variable set if you set LLM_MODEL to a Claude model
ANTHROPIC_API_KEY=

View File

@@ -0,0 +1,119 @@
from langchain_core.messages import SystemMessage, AIMessage, ToolMessage, HumanMessage
from langchain_anthropic import ChatAnthropic
from langchain_openai import ChatOpenAI
from dotenv import load_dotenv
from datetime import datetime
import streamlit as st
import asyncio
import json
import os
from tools import available_functions
load_dotenv()
model = os.getenv('LLM_MODEL', 'gpt-4o')
system_message = f"""
You are a personal assistant who helps with research, managing Google Drive, and managing Slack.
You never give IDs to the user since those are just for you to keep track of.
The link to any Google Doc is: https://docs.google.com/document/d/[document ID]
The current date is: {datetime.now().date()}
"""
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# ~~~~~~~~~~~~~~~~~~~~~~ AI Prompting Function ~~~~~~~~~~~~~~~~~~~~~~~~~
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def get_chunk_text(chunk):
response_content = ""
chunk_content = chunk.content
if isinstance(chunk_content, str):
response_content += chunk_content
elif isinstance(chunk_content, list):
for chunk_text in chunk_content:
if "text" in chunk_text:
response_content += chunk_text["text"]
return response_content
def prompt_ai(messages):
# First, prompt the AI with the latest user message
tools = [tool for _, tool in available_functions.items()]
n8n_chatbot = ChatOpenAI(model=model) if "gpt" in model.lower() else ChatAnthropic(model=model)
n8n_chatbot_with_tools = n8n_chatbot.bind_tools(tools)
stream = n8n_chatbot_with_tools.stream(messages)
first = True
for chunk in stream:
if first:
gathered = chunk
first = False
else:
gathered = gathered + chunk
yield get_chunk_text(chunk)
has_tool_calls = len(gathered.tool_calls) > 0
# Second, see if the AI decided it needs to invoke a tool
if has_tool_calls:
# Add the tool request to the list of messages so the AI knows later it invoked the tool
messages.append(gathered)
# If the AI decided to invoke a tool, invoke it
# For each tool the AI wanted to call, call it and add the tool result to the list of messages
for tool_call in gathered.tool_calls:
tool_name = tool_call["name"].lower()
selected_tool = available_functions[tool_name]
print(f"\nInvoking tool: {tool_call['name']} with args {tool_call['args']}")
tool_output = selected_tool.invoke(tool_call["args"])
print(f"Result of invoking tool: {tool_output}\n")
messages.append(ToolMessage(tool_output, tool_call_id=tool_call["id"]))
# Call the AI again so it can produce a response with the result of calling the tool(s)
additional_stream = prompt_ai(messages)
for additional_chunk in additional_stream:
yield additional_chunk
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# ~~~~~~~~~~~~~~~~~~ Main Function with UI Creation ~~~~~~~~~~~~~~~~~~~~
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
async def main():
st.title("n8n LangChain Agent")
# Initialize chat history
if "messages" not in st.session_state:
st.session_state.messages = [
SystemMessage(content=system_message)
]
# Display chat messages from history on app rerun
for message in st.session_state.messages:
message_json = json.loads(message.json())
message_type = message_json["type"]
if message_type in ["human", "ai", "system"]:
with st.chat_message(message_type):
st.markdown(message_json["content"])
# React to user input
if prompt := st.chat_input("What would you like to do today?"):
# Display user message in chat message container
st.chat_message("user").markdown(prompt)
# Add user message to chat history
st.session_state.messages.append(HumanMessage(content=prompt))
# Display assistant response in chat message container
with st.chat_message("assistant"):
stream = prompt_ai(st.session_state.messages)
response = st.write_stream(stream)
st.session_state.messages.append(AIMessage(content=response))
if __name__ == "__main__":
asyncio.run(main())

View File

@@ -0,0 +1,8 @@
python-dotenv==0.13.0
langchain==0.2.12
langchain-anthropic==0.1.22
langchain-community==0.2.11
langchain-core==0.2.28
langchain-openai==0.1.20
streamlit==1.36.0
requests==2.32.3

View File

@@ -0,0 +1,124 @@
from dotenv import load_dotenv
import requests
import json
import os
from langchain_core.tools import tool
load_dotenv()
N8N_BEARER_TOKEN = os.environ["N8N_BEARER_TOKEN"]
SUMMARIZE_SLACK_CONVERSATION_WEBHOOK = os.environ["SUMMARIZE_SLACK_CONVERSATION_WEBHOOK"]
SEND_SLACK_MESSAGE_WEBHOOK = os.environ["SEND_SLACK_MESSAGE_WEBHOOK"]
UPLOAD_GOOGLE_DOC_WEBHOOK = os.environ["UPLOAD_GOOGLE_DOC_WEBHOOK"]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# ~~~~~~~~~~~~ Helper Function for Invoking n8n Webhooks ~~~~~~~~~~~~~~~
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def invoke_n8n_webhook(method, url, function_name, payload=None):
"""
Helper function to make a GET or POST request.
Args:
method (str): HTTP method ('GET' or 'POST')
url (str): The API endpoint
function_name (str): The name of the tool the AI agent invoked
payload (dict, optional): The payload for POST requests
Returns:
str: The API response in JSON format or an error message
"""
headers = {
"Authorization": f"Bearer {N8N_BEARER_TOKEN}",
"Content-Type": "application/json"
}
try:
if method == "GET":
response = requests.get(url, headers=headers)
elif method == "POST":
response = requests.post(url, headers=headers, json=payload)
else:
return f"Unsupported method: {method}"
response.raise_for_status()
return json.dumps(response.json(), indent=2)
except Exception as e:
return f"Exception when calling {function_name}: {e}"
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# ~~~~~~~~~~~~~~~~~~~ n8n AI Agent Tool Functions ~~~~~~~~~~~~~~~~~~~~~~
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@tool
def summarize_slack_conversation():
"""
Gets the latest messages in a Slack channel and summarizes the conversation
Example call:
summarize_slack_conversation()
Args:
None
Returns:
str: The API response with the Slack conversation summary or an error if there was an issue
"""
return invoke_n8n_webhook(
"GET",
SUMMARIZE_SLACK_CONVERSATION_WEBHOOK,
"summarize_slack_conversation"
)
@tool
def send_slack_message(message):
"""
Sends a message in a Slack channel
Example call:
send_slack_message("Greetings!")
Args:
message (str): The message to send in the Slack channel
Returns:
str: The API response with the result of sending the Slack message or an error if there was an issue
"""
return invoke_n8n_webhook(
"POST",
SEND_SLACK_MESSAGE_WEBHOOK,
"send_slack_message",
{"message": message}
)
@tool
def create_google_doc(document_title, document_text):
"""
Creates a Google Doc in Google Drive with the text specified.
Example call:
create_google_doc("9/20 Meeting Notes", "Meeting notes for 9/20...")
Args:
document_title (str): The name of the Google Doc
document_text (str): The text to put in the new Google Doc
Returns:
str: The API response with the result of creating the Google Doc or an error if there was an issue
"""
return invoke_n8n_webhook(
"POST",
UPLOAD_GOOGLE_DOC_WEBHOOK,
"create_google_doc",
{"document_title": document_title, "document_text": document_text}
)
# Maps the function names to the actual function object in the script
# This mapping will also be used to create the list of tools to bind to the agent
available_functions = {
"summarize_slack_conversation": summarize_slack_conversation,
"send_slack_message": send_slack_message,
"create_google_doc": create_google_doc
}