fix: agent json parser fails with text in suffix (#1734)

While testing out `VectorDBQA` as a `Tool` for one of the conversation,
I happened to get a response from LLM (OpenAI) like this

<code>
Could not parse LLM output: Here's a response using the Product Search
tool:

```json
{
    "action": "Product Search",
    "action_input": "pots for plants"
}
```

This will allow you to search for pots for your plants and find a
variety of options that are available for purchase. You can use this
information to choose the pots that best fit your needs and preferences.
</code>

i.e. The response had a text before & *after* the expected JSON, leading
to `JSONDecodeError`. It's fixed now, by removing text after '```' to
remove unwanted text.

The error I encountered in this Jupyter Notebook -
[link](https://github.com/anselm94/chatbot-llm-ecommerce/blob/main/chatcommerce.ipynb)

<details>
    <summary>Error encountered</summary>
    <code>
    

---------------------------------------------------------------------------
JSONDecodeError Traceback (most recent call last)
File
~/Git/chatbot-llm-ecommerce/.venv/lib/python3.11/site-packages/langchain/agents/conversational_chat/base.py:104,
in ConversationalChatAgent._extract_tool_and_input(self, llm_output)
        103 try:
    --> 104     response = self.output_parser.parse(llm_output)
        105     return response["action"], response["action_input"]

File
~/Git/chatbot-llm-ecommerce/.venv/lib/python3.11/site-packages/langchain/agents/conversational_chat/base.py:49,
in AgentOutputParser.parse(self, text)
        48 cleaned_output = cleaned_output.strip()
    ---> 49 response = json.loads(cleaned_output)
50 return {"action": response["action"], "action_input":
response["action_input"]}

File
/opt/homebrew/Cellar/python@3.11/3.11.2_1/Frameworks/Python.framework/Versions/3.11/lib/python3.11/json/__init__.py:346,
in loads(s, cls, object_hook, parse_float, parse_int, parse_constant,
object_pairs_hook, **kw)
        343 if (cls is None and object_hook is None and
        344         parse_int is None and parse_float is None and
345 parse_constant is None and object_pairs_hook is None and not kw):
    --> 346     return _default_decoder.decode(s)
        347 if cls is None:

File
/opt/homebrew/Cellar/python@3.11/3.11.2_1/Frameworks/Python.framework/Versions/3.11/lib/python3.11/json/decoder.py:340,
in JSONDecoder.decode(self, s, _w)
        339 if end != len(s):
    --> 340     raise JSONDecodeError("Extra data", s, end)
        341 return obj

    JSONDecodeError: Extra data: line 5 column 1 (char 74)

    During handling of the above exception, another exception occurred:

ValueError Traceback (most recent call last)
    Cell In[22], line 1
    ----> 1 ask_ai.run("Yes. I need pots for my plants")

File
~/Git/chatbot-llm-ecommerce/.venv/lib/python3.11/site-packages/langchain/chains/base.py:213,
in Chain.run(self, *args, **kwargs)
        211     if len(args) != 1:
212 raise ValueError("`run` supports only one positional argument.")
    --> 213     return self(args[0])[self.output_keys[0]]
        215 if kwargs and not args:
        216     return self(kwargs)[self.output_keys[0]]

File
~/Git/chatbot-llm-ecommerce/.venv/lib/python3.11/site-packages/langchain/chains/base.py:116,
in Chain.__call__(self, inputs, return_only_outputs)
        114 except (KeyboardInterrupt, Exception) as e:
115 self.callback_manager.on_chain_error(e, verbose=self.verbose)
    --> 116     raise e
117 self.callback_manager.on_chain_end(outputs, verbose=self.verbose)
118 return self.prep_outputs(inputs, outputs, return_only_outputs)

File
~/Git/chatbot-llm-ecommerce/.venv/lib/python3.11/site-packages/langchain/chains/base.py:113,
in Chain.__call__(self, inputs, return_only_outputs)
        107 self.callback_manager.on_chain_start(
        108     {"name": self.__class__.__name__},
        109     inputs,
        110     verbose=self.verbose,
        111 )
        112 try:
    --> 113     outputs = self._call(inputs)
        114 except (KeyboardInterrupt, Exception) as e:
115 self.callback_manager.on_chain_error(e, verbose=self.verbose)

File
~/Git/chatbot-llm-ecommerce/.venv/lib/python3.11/site-packages/langchain/agents/agent.py:499,
in AgentExecutor._call(self, inputs)
        497 # We now enter the agent loop (until it returns something).
        498 while self._should_continue(iterations):
    --> 499     next_step_output = self._take_next_step(
500 name_to_tool_map, color_mapping, inputs, intermediate_steps
        501     )
        502     if isinstance(next_step_output, AgentFinish):
503 return self._return(next_step_output, intermediate_steps)

File
~/Git/chatbot-llm-ecommerce/.venv/lib/python3.11/site-packages/langchain/agents/agent.py:409,
in AgentExecutor._take_next_step(self, name_to_tool_map, color_mapping,
inputs, intermediate_steps)
404 """Take a single step in the thought-action-observation loop.
        405
406 Override this to take control of how the agent makes and acts on
choices.
        407 """
        408 # Call the LLM to see what to do.
    --> 409 output = self.agent.plan(intermediate_steps, **inputs)
410 # If the tool chosen is the finishing tool, then we end and return.
        411 if isinstance(output, AgentFinish):

File
~/Git/chatbot-llm-ecommerce/.venv/lib/python3.11/site-packages/langchain/agents/agent.py:105,
in Agent.plan(self, intermediate_steps, **kwargs)
        94 """Given input, decided what to do.
        95
        96 Args:
    (...)
        102     Action specifying what tool to use.
        103 """
104 full_inputs = self.get_full_inputs(intermediate_steps, **kwargs)
    --> 105 action = self._get_next_action(full_inputs)
        106 if action.tool == self.finish_tool_name:
107 return AgentFinish({"output": action.tool_input}, action.log)

File
~/Git/chatbot-llm-ecommerce/.venv/lib/python3.11/site-packages/langchain/agents/agent.py:67,
in Agent._get_next_action(self, full_inputs)
65 def _get_next_action(self, full_inputs: Dict[str, str]) ->
AgentAction:
        66     full_output = self.llm_chain.predict(**full_inputs)
---> 67 parsed_output = self._extract_tool_and_input(full_output)
        68     while parsed_output is None:
        69         full_output = self._fix_text(full_output)

File
~/Git/chatbot-llm-ecommerce/.venv/lib/python3.11/site-packages/langchain/agents/conversational_chat/base.py:107,
in ConversationalChatAgent._extract_tool_and_input(self, llm_output)
        105     return response["action"], response["action_input"]
        106 except Exception:
--> 107 raise ValueError(f"Could not parse LLM output: {llm_output}")

ValueError: Could not parse LLM output: Here's a response using the
Product Search tool:

    ```json
    {
        "action": "Product Search",
        "action_input": "pots for plants"
    }
    ```

This will allow you to search for pots for your plants and find a
variety of options that are available for purchase. You can use this
information to choose the pots that best fit your needs and preferences.

</details>
tool-patch
Merbin J Anselm 1 year ago committed by GitHub
parent 276940fd9b
commit fbc0c85b90
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -39,6 +39,8 @@ class AgentOutputParser(BaseOutputParser):
cleaned_output = text.strip()
if "```json" in cleaned_output:
_, cleaned_output = cleaned_output.split("```json")
if "```" in cleaned_output:
cleaned_output, _ = cleaned_output.split("```")
if cleaned_output.startswith("```json"):
cleaned_output = cleaned_output[len("```json") :]
if cleaned_output.startswith("```"):

Loading…
Cancel
Save