langchain/docs/extras/modules/model_io/output_parsers/retry.ipynb

241 lines
8.4 KiB
Plaintext
Raw Normal View History

{
"cells": [
{
"cell_type": "markdown",
"id": "4d6c0c86",
"metadata": {},
"source": [
"# Retry parser\n",
"\n",
"While in some cases it is possible to fix any parsing mistakes by only looking at the output, in other cases it can't. An example of this is when the output is not just in the incorrect format, but is partially complete. Consider the below example."
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "f28526bd",
"metadata": {},
"outputs": [],
"source": [
"from langchain.prompts import (\n",
" PromptTemplate,\n",
" ChatPromptTemplate,\n",
" HumanMessagePromptTemplate,\n",
")\n",
"from langchain.llms import OpenAI\n",
"from langchain.chat_models import ChatOpenAI\n",
"from langchain.output_parsers import (\n",
" PydanticOutputParser,\n",
" OutputFixingParser,\n",
" RetryOutputParser,\n",
")\n",
"from pydantic import BaseModel, Field, validator\n",
"from typing import List"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "67c5e1ac",
"metadata": {},
"outputs": [],
"source": [
"template = \"\"\"Based on the user question, provide an Action and Action Input for what step should be taken.\n",
"{format_instructions}\n",
"Question: {query}\n",
"Response:\"\"\"\n",
"\n",
"\n",
"class Action(BaseModel):\n",
" action: str = Field(description=\"action to take\")\n",
" action_input: str = Field(description=\"input to the action\")\n",
"\n",
"\n",
"parser = PydanticOutputParser(pydantic_object=Action)"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "007aa87f",
"metadata": {},
"outputs": [],
"source": [
"prompt = PromptTemplate(\n",
" template=\"Answer the user query.\\n{format_instructions}\\n{query}\\n\",\n",
" input_variables=[\"query\"],\n",
" partial_variables={\"format_instructions\": parser.get_format_instructions()},\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "10d207ff",
"metadata": {},
"outputs": [],
"source": [
"prompt_value = prompt.format_prompt(query=\"who is leo di caprios gf?\")"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "68622837",
"metadata": {},
"outputs": [],
"source": [
"bad_response = '{\"action\": \"search\"}'"
]
},
{
"cell_type": "markdown",
"id": "25631465",
"metadata": {},
"source": [
"If we try to parse this response as is, we will get an error"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "894967c1",
"metadata": {},
"outputs": [
{
"ename": "OutputParserException",
"evalue": "Failed to parse Action from completion {\"action\": \"search\"}. Got: 1 validation error for Action\naction_input\n field required (type=value_error.missing)",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mValidationError\u001b[0m Traceback (most recent call last)",
"File \u001b[0;32m~/workplace/langchain/langchain/output_parsers/pydantic.py:24\u001b[0m, in \u001b[0;36mPydanticOutputParser.parse\u001b[0;34m(self, text)\u001b[0m\n\u001b[1;32m 23\u001b[0m json_object \u001b[38;5;241m=\u001b[39m json\u001b[38;5;241m.\u001b[39mloads(json_str)\n\u001b[0;32m---> 24\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mpydantic_object\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mparse_obj\u001b[49m\u001b[43m(\u001b[49m\u001b[43mjson_object\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 26\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m (json\u001b[38;5;241m.\u001b[39mJSONDecodeError, ValidationError) \u001b[38;5;28;01mas\u001b[39;00m e:\n",
"File \u001b[0;32m~/.pyenv/versions/3.9.1/envs/langchain/lib/python3.9/site-packages/pydantic/main.py:527\u001b[0m, in \u001b[0;36mpydantic.main.BaseModel.parse_obj\u001b[0;34m()\u001b[0m\n",
"File \u001b[0;32m~/.pyenv/versions/3.9.1/envs/langchain/lib/python3.9/site-packages/pydantic/main.py:342\u001b[0m, in \u001b[0;36mpydantic.main.BaseModel.__init__\u001b[0;34m()\u001b[0m\n",
"\u001b[0;31mValidationError\u001b[0m: 1 validation error for Action\naction_input\n field required (type=value_error.missing)",
"\nDuring handling of the above exception, another exception occurred:\n",
"\u001b[0;31mOutputParserException\u001b[0m Traceback (most recent call last)",
"Cell \u001b[0;32mIn[6], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m \u001b[43mparser\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mparse\u001b[49m\u001b[43m(\u001b[49m\u001b[43mbad_response\u001b[49m\u001b[43m)\u001b[49m\n",
"File \u001b[0;32m~/workplace/langchain/langchain/output_parsers/pydantic.py:29\u001b[0m, in \u001b[0;36mPydanticOutputParser.parse\u001b[0;34m(self, text)\u001b[0m\n\u001b[1;32m 27\u001b[0m name \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mpydantic_object\u001b[38;5;241m.\u001b[39m\u001b[38;5;18m__name__\u001b[39m\n\u001b[1;32m 28\u001b[0m msg \u001b[38;5;241m=\u001b[39m \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mFailed to parse \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mname\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m from completion \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mtext\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m. Got: \u001b[39m\u001b[38;5;132;01m{\u001b[39;00me\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m---> 29\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m OutputParserException(msg)\n",
"\u001b[0;31mOutputParserException\u001b[0m: Failed to parse Action from completion {\"action\": \"search\"}. Got: 1 validation error for Action\naction_input\n field required (type=value_error.missing)"
]
}
],
"source": [
"parser.parse(bad_response)"
]
},
{
"cell_type": "markdown",
"id": "f6b64696",
"metadata": {},
"source": [
"If we try to use the `OutputFixingParser` to fix this error, it will be confused - namely, it doesn't know what to actually put for action input."
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "78b2b40d",
"metadata": {},
"outputs": [],
"source": [
"fix_parser = OutputFixingParser.from_llm(parser=parser, llm=ChatOpenAI())"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "4fe1301d",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"Action(action='search', action_input='')"
]
},
"execution_count": 8,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"fix_parser.parse(bad_response)"
]
},
{
"cell_type": "markdown",
"id": "9bd9ea7d",
"metadata": {},
"source": [
"Instead, we can use the RetryOutputParser, which passes in the prompt (as well as the original output) to try again to get a better response."
]
},
{
"cell_type": "code",
"execution_count": 9,
"id": "7e8a8a28",
"metadata": {},
"outputs": [],
"source": [
"from langchain.output_parsers import RetryWithErrorOutputParser"
]
},
{
"cell_type": "code",
"execution_count": 15,
"id": "5c86e141",
"metadata": {},
"outputs": [],
"source": [
"retry_parser = RetryWithErrorOutputParser.from_llm(\n",
" parser=parser, llm=OpenAI(temperature=0)\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 16,
"id": "9c04f731",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"Action(action='search', action_input='who is leo di caprios gf?')"
]
},
"execution_count": 16,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"retry_parser.parse_with_prompt(bad_response, prompt_value)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.3"
}
},
"nbformat": 4,
"nbformat_minor": 5
}