forked from Archives/langchain
agent improvements
This commit is contained in:
parent
711a2436bb
commit
93db5470bb
@ -26,6 +26,7 @@ class Agent(Chain, BaseModel, ABC):
|
|||||||
prompt: ClassVar[BasePromptTemplate]
|
prompt: ClassVar[BasePromptTemplate]
|
||||||
llm_chain: LLMChain
|
llm_chain: LLMChain
|
||||||
tools: List[Tool]
|
tools: List[Tool]
|
||||||
|
retry_on_failed_tool: bool = False
|
||||||
input_key: str = "input" #: :meta private:
|
input_key: str = "input" #: :meta private:
|
||||||
output_key: str = "output" #: :meta private:
|
output_key: str = "output" #: :meta private:
|
||||||
|
|
||||||
@ -141,13 +142,21 @@ class Agent(Chain, BaseModel, ABC):
|
|||||||
# If the tool chosen is the finishing tool, then we end and return.
|
# If the tool chosen is the finishing tool, then we end and return.
|
||||||
if output.tool == self.finish_tool_name:
|
if output.tool == self.finish_tool_name:
|
||||||
return {self.output_key: output.tool_input}
|
return {self.output_key: output.tool_input}
|
||||||
|
if output.tool not in name_to_tool_map:
|
||||||
|
if self.retry_on_failed_tool:
|
||||||
|
observation = f"Tool {output.tool} not found."
|
||||||
|
color = None
|
||||||
|
else:
|
||||||
|
raise KeyError(observation)
|
||||||
|
else:
|
||||||
# Otherwise we lookup the tool
|
# Otherwise we lookup the tool
|
||||||
chain = name_to_tool_map[output.tool]
|
chain = name_to_tool_map[output.tool]
|
||||||
# We then call the tool on the tool input to get an observation
|
# We then call the tool on the tool input to get an observation
|
||||||
observation = chain(output.tool_input)
|
observation = chain(output.tool_input)
|
||||||
|
color = color_mapping[output.tool]
|
||||||
# We then log the observation
|
# We then log the observation
|
||||||
chained_input.add(f"\n{self.observation_prefix}")
|
chained_input.add(f"\n{self.observation_prefix}")
|
||||||
chained_input.add(observation, color=color_mapping[output.tool])
|
chained_input.add(observation, color=color)
|
||||||
# We then add the LLM prefix into the prompt to get the LLM to start
|
# We then add the LLM prefix into the prompt to get the LLM to start
|
||||||
# thinking, and start the loop all over.
|
# thinking, and start the loop all over.
|
||||||
chained_input.add(f"\n{self.llm_prefix}")
|
chained_input.add(f"\n{self.llm_prefix}")
|
||||||
|
@ -14,6 +14,9 @@ Observation: the result of the action
|
|||||||
Thought: I now know the final answer
|
Thought: I now know the final answer
|
||||||
Final Answer: the final answer to the original input question
|
Final Answer: the final answer to the original input question
|
||||||
|
|
||||||
|
Do NOT take the same action with the same action input.
|
||||||
|
Only take actions with tools that exist.
|
||||||
|
|
||||||
Begin!
|
Begin!
|
||||||
|
|
||||||
Question: {{input}}"""
|
Question: {{input}}"""
|
||||||
|
Loading…
Reference in New Issue
Block a user