Source code for langchain.agents.react.agent
from __future__ import annotations
from typing import Optional, Sequence
from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts import BasePromptTemplate
from langchain_core.runnables import Runnable, RunnablePassthrough
from langchain_core.tools import BaseTool
from langchain.agents import AgentOutputParser
from langchain.agents.format_scratchpad import format_log_to_str
from langchain.agents.output_parsers import ReActSingleInputOutputParser
from langchain.tools.render import ToolsRenderer, render_text_description
[docs]def create_react_agent(
llm: BaseLanguageModel,
tools: Sequence[BaseTool],
prompt: BasePromptTemplate,
output_parser: Optional[AgentOutputParser] = None,
tools_renderer: ToolsRenderer = render_text_description,
) -> Runnable:
"""Create an agent that uses ReAct prompting.
Args:
llm: LLM to use as the agent.
tools: Tools this agent has access to.
prompt: The prompt to use. See Prompt section below for more.
output_parser: AgentOutputParser for parse the LLM output.
tools_renderer: This controls how the tools are converted into a string and
then passed into the LLM. Default is `render_text_description`.
Returns:
A Runnable sequence representing an agent. It takes as input all the same input
variables as the prompt passed in does. It returns as output either an
AgentAction or AgentFinish.
Examples:
.. code-block:: python
from langchain import hub
from langchain_community.llms import OpenAI
from langchain.agents import AgentExecutor, create_react_agent
prompt = hub.pull("hwchase17/react")
model = OpenAI()
tools = ...
agent = create_react_agent(model, tools, prompt)
agent_executor = AgentExecutor(agent=agent, tools=tools)
agent_executor.invoke({"input": "hi"})
# Use with chat history
from langchain_core.messages import AIMessage, HumanMessage
agent_executor.invoke(
{
"input": "what's my name?",
# Notice that chat_history is a string
# since this prompt is aimed at LLMs, not chat models
"chat_history": "Human: My name is Bob\nAI: Hello Bob!",
}
)
Prompt:
The prompt must have input keys:
* `tools`: contains descriptions and arguments for each tool.
* `tool_names`: contains all tool names.
* `agent_scratchpad`: contains previous agent actions and tool outputs as a string.
Here's an example:
.. code-block:: python
from langchain_core.prompts import PromptTemplate
template = '''Answer the following questions as best you can. You have access to the following tools:
{tools}
Use the following format:
Question: the input question you must answer
Thought: you should always think about what to do
Action: the action to take, should be one of [{tool_names}]
Action Input: the input to the action
Observation: the result of the action
... (this Thought/Action/Action Input/Observation can repeat N times)
Thought: I now know the final answer
Final Answer: the final answer to the original input question
Begin!
Question: {input}
Thought:{agent_scratchpad}'''
prompt = PromptTemplate.from_template(template)
""" # noqa: E501
missing_vars = {"tools", "tool_names", "agent_scratchpad"}.difference(
prompt.input_variables
)
if missing_vars:
raise ValueError(f"Prompt missing required variables: {missing_vars}")
prompt = prompt.partial(
tools=tools_renderer(list(tools)),
tool_names=", ".join([t.name for t in tools]),
)
llm_with_stop = llm.bind(stop=["\nObservation"])
output_parser = output_parser or ReActSingleInputOutputParser()
agent = (
RunnablePassthrough.assign(
agent_scratchpad=lambda x: format_log_to_str(x["intermediate_steps"]),
)
| prompt
| llm_with_stop
| output_parser
)
return agent