Updating LLM eval for Llama 3.2 testing

This commit is contained in:
Cole Medin
2024-09-28 21:15:54 -05:00
parent 0408d5b9f7
commit 07cd054a1b

View File

@@ -26,9 +26,10 @@ model_mapping = {
"gpt": ChatOpenAI, "gpt": ChatOpenAI,
"claude": ChatAnthropic, "claude": ChatAnthropic,
"groq": ChatGroq, "groq": ChatGroq,
"llama": ChatHuggingFace # ChatHuggingFace doesn't work for tool calling yet with HuggingFaceEndpoint but will in the future "llama": ChatGroq
} }
# Support for HuggingFace with local models coming soon! This function isn't used yet.
@st.cache_resource @st.cache_resource
def get_local_model(): def get_local_model():
return HuggingFaceEndpoint( return HuggingFaceEndpoint(
@@ -54,7 +55,7 @@ tools = [tool for _, tool in available_functions.items()]
for key, chatbot_class in model_mapping.items(): for key, chatbot_class in model_mapping.items():
if key in model.lower(): if key in model.lower():
chatbot = chatbot_class(model=model) if key != "llama" else chatbot_class(llm=get_local_model()) chatbot = chatbot_class(model=model) if key != "huggingface" else chatbot_class(llm=get_local_model())
break break
chatbot_with_tools = chatbot.bind_tools(tools) chatbot_with_tools = chatbot.bind_tools(tools)