Harrison/output error (#3094)

Co-authored-by: yummydum <sumita@nowcast.co.jp>
fix_agent_callbacks
Harrison Chase 1 year ago committed by GitHub
parent 1c1b77bbfe
commit aad0a498ac
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -15,7 +15,7 @@
"id": "a389367b", "id": "a389367b",
"metadata": {}, "metadata": {},
"source": [ "source": [
"# 1st example: hierarchical planning agent\n", "## 1st example: hierarchical planning agent\n",
"\n", "\n",
"In this example, we'll consider an approach called hierarchical planning, common in robotics and appearing in recent works for LLMs X robotics. We'll see it's a viable approach to start working with a massive API spec AND to assist with user queries that require multiple steps against the API.\n", "In this example, we'll consider an approach called hierarchical planning, common in robotics and appearing in recent works for LLMs X robotics. We'll see it's a viable approach to start working with a massive API spec AND to assist with user queries that require multiple steps against the API.\n",
"\n", "\n",
@ -31,7 +31,7 @@
"id": "4b6ecf6e", "id": "4b6ecf6e",
"metadata": {}, "metadata": {},
"source": [ "source": [
"## To start, let's collect some OpenAPI specs." "### To start, let's collect some OpenAPI specs."
] ]
}, },
{ {
@ -169,7 +169,7 @@
"id": "76349780", "id": "76349780",
"metadata": {}, "metadata": {},
"source": [ "source": [
"## How big is this spec?" "### How big is this spec?"
] ]
}, },
{ {
@ -229,7 +229,7 @@
"id": "cbc4964e", "id": "cbc4964e",
"metadata": {}, "metadata": {},
"source": [ "source": [
"## Let's see some examples!\n", "### Let's see some examples!\n",
"\n", "\n",
"Starting with GPT-4. (Some robustness iterations under way for GPT-3 family.)" "Starting with GPT-4. (Some robustness iterations under way for GPT-3 family.)"
] ]
@ -759,7 +759,7 @@
"name": "python", "name": "python",
"nbconvert_exporter": "python", "nbconvert_exporter": "python",
"pygments_lexer": "ipython3", "pygments_lexer": "ipython3",
"version": "3.9.0" "version": "3.9.1"
} }
}, },
"nbformat": 4, "nbformat": 4,

@ -1,18 +1,20 @@
{ {
"cells": [ "cells": [
{ {
"attachments": {},
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
"# Confluence\n", "# Confluence\n",
"\n", "\n",
"A loader for Confluence pages. Port of https://llamahub.ai/l/confluence\n", "A loader for Confluence pages.\n",
"\n",
"\n", "\n",
"This currently supports both username/api_key and Oauth2 login.\n", "This currently supports both username/api_key and Oauth2 login.\n",
"\n", "\n",
"\n",
"Specify a list page_ids and/or space_key to load in the corresponding pages into Document objects, if both are specified the union of both sets will be returned.\n", "Specify a list page_ids and/or space_key to load in the corresponding pages into Document objects, if both are specified the union of both sets will be returned.\n",
"\n", "\n",
"\n",
"You can also specify a boolean `include_attachments` to include attachments, this is set to False by default, if set to True all attachments will be downloaded and ConfluenceReader will extract the text from the attachments and add it to the Document object. Currently supported attachment types are: PDF, PNG, JPEG/JPG, SVG, Word and Excel.\n", "You can also specify a boolean `include_attachments` to include attachments, this is set to False by default, if set to True all attachments will be downloaded and ConfluenceReader will extract the text from the attachments and add it to the Document object. Currently supported attachment types are: PDF, PNG, JPEG/JPG, SVG, Word and Excel.\n",
"\n", "\n",
"Hint: space_key and page_id can both be found in the URL of a page in Confluence - https://yoursite.atlassian.com/wiki/spaces/<space_key>/pages/<page_id>\n" "Hint: space_key and page_id can both be found in the URL of a page in Confluence - https://yoursite.atlassian.com/wiki/spaces/<space_key>/pages/<page_id>\n"
@ -37,15 +39,22 @@
], ],
"metadata": { "metadata": {
"kernelspec": { "kernelspec": {
"display_name": "Python 3", "display_name": "Python 3 (ipykernel)",
"language": "python", "language": "python",
"name": "python3" "name": "python3"
}, },
"language_info": { "language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python", "name": "python",
"version": "3.9.7 (default, Mar 5 2023, 20:59:52) \n[Clang 12.0.0 (clang-1200.0.32.2)]" "nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.1"
}, },
"orig_nbformat": 4,
"vscode": { "vscode": {
"interpreter": { "interpreter": {
"hash": "cc99336516f23363341912c6723b01ace86f02e26b4290be1efc0677e2e2ec24" "hash": "cc99336516f23363341912c6723b01ace86f02e26b4290be1efc0677e2e2ec24"

@ -59,7 +59,7 @@
"name": "python", "name": "python",
"nbconvert_exporter": "python", "nbconvert_exporter": "python",
"pygments_lexer": "ipython3", "pygments_lexer": "ipython3",
"version": "3.10.9" "version": "3.9.1"
} }
}, },
"nbformat": 4, "nbformat": 4,

@ -62,8 +62,8 @@
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 4, "execution_count": 4,
"id": "05fe33b9",
"metadata": { "metadata": {
"collapsed": false,
"pycharm": { "pycharm": {
"name": "#%%\n" "name": "#%%\n"
} }
@ -106,7 +106,7 @@
"name": "python", "name": "python",
"nbconvert_exporter": "python", "nbconvert_exporter": "python",
"pygments_lexer": "ipython3", "pygments_lexer": "ipython3",
"version": "3.11.2" "version": "3.9.1"
} }
}, },
"nbformat": 4, "nbformat": 4,

@ -4,10 +4,10 @@
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
"# Semantic Search over a group chat messages\n", "# Question answering over a group chat messages\n",
"In this tutorial, we are going to use Langchain + Deep Lake with GPT4 to semantically search a group chat.\n", "In this tutorial, we are going to use Langchain + Deep Lake with GPT4 to semantically search and ask questions over a group chat.\n",
"\n", "\n",
"View a working demo here: https://twitter.com/thisissukh_/status/1647223328363679745" "View a working demo [here](https://twitter.com/thisissukh_/status/1647223328363679745)"
] ]
}, },
{ {
@ -83,7 +83,7 @@
"Generate a group chat conversation with three friends talking about their day, referencing real places and fictional names. Make it funny and as detailed as possible.\n", "Generate a group chat conversation with three friends talking about their day, referencing real places and fictional names. Make it funny and as detailed as possible.\n",
"```\n", "```\n",
"\n", "\n",
"I've already generated such a chat in `../../messages.txt`. We can keep it simple and use this for our example.\n", "I've already generated such a chat in `messages.txt`. We can keep it simple and use this for our example.\n",
"\n", "\n",
"## 3. Ingest chat embeddings\n", "## 3. Ingest chat embeddings\n",
"\n", "\n",
@ -144,8 +144,10 @@
] ]
}, },
{ {
"cell_type": "markdown", "cell_type": "code",
"execution_count": null,
"metadata": {}, "metadata": {},
"outputs": [],
"source": [] "source": []
} }
], ],

@ -2,12 +2,16 @@ import json
from typing import Union from typing import Union
from langchain.agents.agent import AgentOutputParser from langchain.agents.agent import AgentOutputParser
from langchain.schema import AgentAction, AgentFinish from langchain.agents.chat.prompt import FORMAT_INSTRUCTIONS
from langchain.schema import AgentAction, AgentFinish, OutputParserException
FINAL_ANSWER_ACTION = "Final Answer:" FINAL_ANSWER_ACTION = "Final Answer:"
class ChatOutputParser(AgentOutputParser): class ChatOutputParser(AgentOutputParser):
def get_format_instructions(self) -> str:
return FORMAT_INSTRUCTIONS
def parse(self, text: str) -> Union[AgentAction, AgentFinish]: def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
if FINAL_ANSWER_ACTION in text: if FINAL_ANSWER_ACTION in text:
return AgentFinish( return AgentFinish(
@ -19,4 +23,4 @@ class ChatOutputParser(AgentOutputParser):
return AgentAction(response["action"], response["action_input"], text) return AgentAction(response["action"], response["action_input"], text)
except Exception: except Exception:
raise ValueError(f"Could not parse LLM output: {text}") raise OutputParserException(f"Could not parse LLM output: {text}")

@ -2,12 +2,16 @@ import re
from typing import Union from typing import Union
from langchain.agents.agent import AgentOutputParser from langchain.agents.agent import AgentOutputParser
from langchain.schema import AgentAction, AgentFinish from langchain.agents.conversational.prompt import FORMAT_INSTRUCTIONS
from langchain.schema import AgentAction, AgentFinish, OutputParserException
class ConvoOutputParser(AgentOutputParser): class ConvoOutputParser(AgentOutputParser):
ai_prefix: str = "AI" ai_prefix: str = "AI"
def get_format_instructions(self) -> str:
return FORMAT_INSTRUCTIONS
def parse(self, text: str) -> Union[AgentAction, AgentFinish]: def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
if f"{self.ai_prefix}:" in text: if f"{self.ai_prefix}:" in text:
return AgentFinish( return AgentFinish(
@ -16,7 +20,7 @@ class ConvoOutputParser(AgentOutputParser):
regex = r"Action: (.*?)[\n]*Action Input: (.*)" regex = r"Action: (.*?)[\n]*Action Input: (.*)"
match = re.search(regex, text) match = re.search(regex, text)
if not match: if not match:
raise ValueError(f"Could not parse LLM output: `{text}`") raise OutputParserException(f"Could not parse LLM output: `{text}`")
action = match.group(1) action = match.group(1)
action_input = match.group(2) action_input = match.group(2)
return AgentAction(action.strip(), action_input.strip(" ").strip('"'), text) return AgentAction(action.strip(), action_input.strip(" ").strip('"'), text)

@ -2,12 +2,16 @@ import re
from typing import Union from typing import Union
from langchain.agents.agent import AgentOutputParser from langchain.agents.agent import AgentOutputParser
from langchain.schema import AgentAction, AgentFinish from langchain.agents.mrkl.prompt import FORMAT_INSTRUCTIONS
from langchain.schema import AgentAction, AgentFinish, OutputParserException
FINAL_ANSWER_ACTION = "Final Answer:" FINAL_ANSWER_ACTION = "Final Answer:"
class MRKLOutputParser(AgentOutputParser): class MRKLOutputParser(AgentOutputParser):
def get_format_instructions(self) -> str:
return FORMAT_INSTRUCTIONS
def parse(self, text: str) -> Union[AgentAction, AgentFinish]: def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
if FINAL_ANSWER_ACTION in text: if FINAL_ANSWER_ACTION in text:
return AgentFinish( return AgentFinish(
@ -17,7 +21,7 @@ class MRKLOutputParser(AgentOutputParser):
regex = r"Action\s*\d*\s*:(.*?)\nAction\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)" regex = r"Action\s*\d*\s*:(.*?)\nAction\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)"
match = re.search(regex, text, re.DOTALL) match = re.search(regex, text, re.DOTALL)
if not match: if not match:
raise ValueError(f"Could not parse LLM output: `{text}`") raise OutputParserException(f"Could not parse LLM output: `{text}`")
action = match.group(1).strip() action = match.group(1).strip()
action_input = match.group(2) action_input = match.group(2)
return AgentAction(action, action_input.strip(" ").strip('"'), text) return AgentAction(action, action_input.strip(" ").strip('"'), text)

@ -2,21 +2,23 @@ import re
from typing import Union from typing import Union
from langchain.agents.agent import AgentOutputParser from langchain.agents.agent import AgentOutputParser
from langchain.schema import AgentAction, AgentFinish from langchain.schema import AgentAction, AgentFinish, OutputParserException
class ReActOutputParser(AgentOutputParser): class ReActOutputParser(AgentOutputParser):
def parse(self, text: str) -> Union[AgentAction, AgentFinish]: def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
action_prefix = "Action: " action_prefix = "Action: "
if not text.strip().split("\n")[-1].startswith(action_prefix): if not text.strip().split("\n")[-1].startswith(action_prefix):
raise ValueError(f"Could not parse LLM Output: {text}") raise OutputParserException(f"Could not parse LLM Output: {text}")
action_block = text.strip().split("\n")[-1] action_block = text.strip().split("\n")[-1]
action_str = action_block[len(action_prefix) :] action_str = action_block[len(action_prefix) :]
# Parse out the action and the directive. # Parse out the action and the directive.
re_matches = re.search(r"(.*?)\[(.*?)\]", action_str) re_matches = re.search(r"(.*?)\[(.*?)\]", action_str)
if re_matches is None: if re_matches is None:
raise ValueError(f"Could not parse action directive: {action_str}") raise OutputParserException(
f"Could not parse action directive: {action_str}"
)
action, action_input = re_matches.group(1), re_matches.group(2) action, action_input = re_matches.group(1), re_matches.group(2)
if action == "Finish": if action == "Finish":
return AgentFinish({"output": action_input}, text) return AgentFinish({"output": action_input}, text)

@ -1,7 +1,7 @@
from typing import Union from typing import Union
from langchain.agents.agent import AgentOutputParser from langchain.agents.agent import AgentOutputParser
from langchain.schema import AgentAction, AgentFinish from langchain.schema import AgentAction, AgentFinish, OutputParserException
class SelfAskOutputParser(AgentOutputParser): class SelfAskOutputParser(AgentOutputParser):
@ -12,7 +12,7 @@ class SelfAskOutputParser(AgentOutputParser):
if followup not in last_line: if followup not in last_line:
finish_string = "So the final answer is: " finish_string = "So the final answer is: "
if finish_string not in last_line: if finish_string not in last_line:
raise ValueError(f"Could not parse output: {text}") raise OutputParserException(f"Could not parse output: {text}")
return AgentFinish({"output": last_line[len(finish_string) :]}, text) return AgentFinish({"output": last_line[len(finish_string) :]}, text)
after_colon = text.split(":")[-1] after_colon = text.split(":")[-1]

@ -9,7 +9,7 @@ from langchain.agents.mrkl.output_parser import MRKLOutputParser
from langchain.agents.mrkl.prompt import FORMAT_INSTRUCTIONS, PREFIX, SUFFIX from langchain.agents.mrkl.prompt import FORMAT_INSTRUCTIONS, PREFIX, SUFFIX
from langchain.agents.tools import Tool from langchain.agents.tools import Tool
from langchain.prompts import PromptTemplate from langchain.prompts import PromptTemplate
from langchain.schema import AgentAction from langchain.schema import AgentAction, OutputParserException
from tests.unit_tests.llms.fake_llm import FakeLLM from tests.unit_tests.llms.fake_llm import FakeLLM
@ -98,7 +98,7 @@ def test_get_final_answer_multiline() -> None:
def test_bad_action_input_line() -> None: def test_bad_action_input_line() -> None:
"""Test handling when no action input found.""" """Test handling when no action input found."""
llm_output = "Thought: I need to search for NBA\n" "Action: Search\n" "Thought: NBA" llm_output = "Thought: I need to search for NBA\n" "Action: Search\n" "Thought: NBA"
with pytest.raises(ValueError): with pytest.raises(OutputParserException):
get_action_and_input(llm_output) get_action_and_input(llm_output)
@ -107,7 +107,7 @@ def test_bad_action_line() -> None:
llm_output = ( llm_output = (
"Thought: I need to search for NBA\n" "Thought: Search\n" "Action Input: NBA" "Thought: I need to search for NBA\n" "Thought: Search\n" "Action Input: NBA"
) )
with pytest.raises(ValueError): with pytest.raises(OutputParserException):
get_action_and_input(llm_output) get_action_and_input(llm_output)

Loading…
Cancel
Save