How to pass run time values to a tool
This guide assumes familiarity with the following concepts:
This how-to guide uses models with native tool calling capability. You can find a list of all models that support tool calling.
If you're using LangGraph, please refer to this how-to guide which shows how to create an agent that keeps track of a given user's favorite pets.
You may need to bind values to a tool that are only known at runtime. For example, the tool logic may require using the ID of the user who made the request.
Most of the time, such values should not be controlled by the LLM. In fact, allowing the LLM to control the user ID may lead to a security risk.
Instead, the LLM should only control the parameters of the tool that are meant to be controlled by the LLM, while other parameters (such as user ID) should be fixed by the application logic.
This how-to guide shows a simple design pattern that creates the tool dynamically at run time and binds to them appropriate values.
We can bind them to chat models as follows:
- OpenAI
- Anthropic
- Azure
- Cohere
- FireworksAI
- Groq
- MistralAI
- TogetherAI
pip install -qU langchain-openai
import getpass
import os
os.environ["OPENAI_API_KEY"] = getpass.getpass()
from langchain_openai import ChatOpenAI
llm = ChatOpenAI(model="gpt-3.5-turbo-0125")
pip install -qU langchain-anthropic
import getpass
import os
os.environ["ANTHROPIC_API_KEY"] = getpass.getpass()
from langchain_anthropic import ChatAnthropic
llm = ChatAnthropic(model="claude-3-sonnet-20240229")
pip install -qU langchain-openai
import getpass
import os
os.environ["AZURE_OPENAI_API_KEY"] = getpass.getpass()
from langchain_openai import AzureChatOpenAI
llm = AzureChatOpenAI(
azure_endpoint=os.environ["AZURE_OPENAI_ENDPOINT"],
azure_deployment=os.environ["AZURE_OPENAI_DEPLOYMENT_NAME"],
openai_api_version=os.environ["AZURE_OPENAI_API_VERSION"],
)
pip install -qU langchain-google-vertexai
import getpass
import os
os.environ["GOOGLE_API_KEY"] = getpass.getpass()
from langchain_google_vertexai import ChatVertexAI
llm = ChatVertexAI(model="gemini-pro")
pip install -qU langchain-cohere
import getpass
import os
os.environ["COHERE_API_KEY"] = getpass.getpass()
from langchain_cohere import ChatCohere
llm = ChatCohere(model="command-r")
pip install -qU langchain-fireworks
import getpass
import os
os.environ["FIREWORKS_API_KEY"] = getpass.getpass()
from langchain_fireworks import ChatFireworks
llm = ChatFireworks(model="accounts/fireworks/models/firefunction-v1", temperature=0)
pip install -qU langchain-groq
import getpass
import os
os.environ["GROQ_API_KEY"] = getpass.getpass()
from langchain_groq import ChatGroq
llm = ChatGroq(model="llama3-8b-8192")
pip install -qU langchain-mistralai
import getpass
import os
os.environ["MISTRAL_API_KEY"] = getpass.getpass()
from langchain_mistralai import ChatMistralAI
llm = ChatMistralAI(model="mistral-large-latest")
pip install -qU langchain-openai
import getpass
import os
os.environ["TOGETHER_API_KEY"] = getpass.getpass()
from langchain_openai import ChatOpenAI
llm = ChatOpenAI(
base_url="https://api.together.xyz/v1",
api_key=os.environ["TOGETHER_API_KEY"],
model="mistralai/Mixtral-8x7B-Instruct-v0.1",
)
Passing request time information
The idea is to create the tool dynamically at request time, and bind to it the appropriate information. For example, this information may be the user ID as resolved from the request itself.
from typing import List
from langchain_core.output_parsers import JsonOutputParser
from langchain_core.tools import BaseTool, tool
user_to_pets = {}
def generate_tools_for_user(user_id: str) -> List[BaseTool]:
"""Generate a set of tools that have a user id associated with them."""
@tool
def update_favorite_pets(pets: List[str]) -> None:
"""Add the list of favorite pets."""
user_to_pets[user_id] = pets
@tool
def delete_favorite_pets() -> None:
"""Delete the list of favorite pets."""
if user_id in user_to_pets:
del user_to_pets[user_id]
@tool
def list_favorite_pets() -> None:
"""List favorite pets if any."""
return user_to_pets.get(user_id, [])
return [update_favorite_pets, delete_favorite_pets, list_favorite_pets]
Verify that the tools work correctly
update_pets, delete_pets, list_pets = generate_tools_for_user("eugene")
update_pets.invoke({"pets": ["cat", "dog"]})
print(user_to_pets)
print(list_pets.invoke({}))
{'eugene': ['cat', 'dog']}
['cat', 'dog']
from langchain_core.prompts import ChatPromptTemplate
def handle_run_time_request(user_id: str, query: str):
"""Handle run time request."""
tools = generate_tools_for_user(user_id)
llm_with_tools = llm.bind_tools(tools)
prompt = ChatPromptTemplate.from_messages(
[("system", "You are a helpful assistant.")],
)
chain = prompt | llm_with_tools
return llm_with_tools.invoke(query)
This code will allow the LLM to invoke the tools, but the LLM is unaware of the fact that a user ID even exists!
ai_message = handle_run_time_request(
"eugene", "my favorite animals are cats and parrots."
)
ai_message.tool_calls
[{'name': 'update_favorite_pets',
'args': {'pets': ['cats', 'parrots']},
'id': 'call_jJvjPXsNbFO5MMgW0q84iqCN'}]
Chat models only output requests to invoke tools, they don't actually invoke the underlying tools.
To see how to invoke the tools, please refer to how to use a model to call tools.