This commit is contained in:
Bagatur 2023-07-19 10:23:10 -07:00 committed by GitHub
parent 7bb843477f
commit f97535b33e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 26 additions and 30 deletions

View File

@ -80,12 +80,13 @@
"text": [
"\n",
"\n",
"\u001b[1m> Entering new chain...\u001b[0m\n",
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
"Prompt after formatting:\n",
"\u001b[32;1m\u001b[1;3mSystem: You are a world class algorithm for extracting information in structured formats.\n",
"Human: Use the given format to extract information from the following input:\n",
"Human: Sally is 13\n",
"Human: Tips: Make sure to answer in the correct format\u001b[0m\n",
" {'function_call': {'name': '_OutputFormatter', 'arguments': '{\\n \"output\": {\\n \"name\": \"Sally\",\\n \"age\": 13,\\n \"fav_food\": \"Unknown\"\\n }\\n}'}}\n",
"\n",
"\u001b[1m> Finished chain.\u001b[0m\n"
]
@ -93,7 +94,7 @@
{
"data": {
"text/plain": [
"{'name': 'Sally', 'age': 13}"
"Person(name='Sally', age=13, fav_food='Unknown')"
]
},
"execution_count": 3,
@ -103,7 +104,7 @@
],
"source": [
"# If we pass in a model explicitly, we need to make sure it supports the OpenAI function-calling API.\n",
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0613\", temperature=0)\n",
"llm = ChatOpenAI(model=\"gpt-4\", temperature=0)\n",
"\n",
"prompt_msgs = [\n",
" SystemMessage(\n",
@ -141,12 +142,13 @@
"text": [
"\n",
"\n",
"\u001b[1m> Entering new chain...\u001b[0m\n",
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
"Prompt after formatting:\n",
"\u001b[32;1m\u001b[1;3mSystem: You are a world class algorithm for extracting information in structured formats.\n",
"Human: Use the given format to extract information from the following input:\n",
"Human: Sally is 13, Joey just turned 12 and loves spinach. Caroline is 10 years older than Sally, so she's 23.\n",
"Human: Tips: Make sure to answer in the correct format\u001b[0m\n",
" {'function_call': {'name': '_OutputFormatter', 'arguments': '{\\n \"output\": {\\n \"people\": [\\n {\\n \"name\": \"Sally\",\\n \"age\": 13,\\n \"fav_food\": \"\"\\n },\\n {\\n \"name\": \"Joey\",\\n \"age\": 12,\\n \"fav_food\": \"spinach\"\\n },\\n {\\n \"name\": \"Caroline\",\\n \"age\": 23,\\n \"fav_food\": \"\"\\n }\\n ]\\n }\\n}'}}\n",
"\n",
"\u001b[1m> Finished chain.\u001b[0m\n"
]
@ -154,9 +156,7 @@
{
"data": {
"text/plain": [
"{'people': [{'name': 'Sally', 'age': 13, 'fav_food': ''},\n",
" {'name': 'Joey', 'age': 12, 'fav_food': 'spinach'},\n",
" {'name': 'Caroline', 'age': 23, 'fav_food': ''}]}"
"People(people=[Person(name='Sally', age=13, fav_food=''), Person(name='Joey', age=12, fav_food='spinach'), Person(name='Caroline', age=23, fav_food='')])"
]
},
"execution_count": 4,
@ -192,7 +192,7 @@
},
{
"cell_type": "code",
"execution_count": 5,
"execution_count": 6,
"id": "3484415e",
"metadata": {},
"outputs": [],
@ -216,7 +216,7 @@
},
{
"cell_type": "code",
"execution_count": 6,
"execution_count": 7,
"id": "be9b76b3",
"metadata": {},
"outputs": [
@ -226,12 +226,13 @@
"text": [
"\n",
"\n",
"\u001b[1m> Entering new chain...\u001b[0m\n",
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
"Prompt after formatting:\n",
"\u001b[32;1m\u001b[1;3mSystem: You are a world class algorithm for extracting information in structured formats.\n",
"Human: Use the given format to extract information from the following input:\n",
"Human: Sally is 13\n",
"Human: Tips: Make sure to answer in the correct format\u001b[0m\n",
" {'function_call': {'name': 'output_formatter', 'arguments': '{\\n \"name\": \"Sally\",\\n \"age\": 13\\n}'}}\n",
"\n",
"\u001b[1m> Finished chain.\u001b[0m\n"
]
@ -242,7 +243,7 @@
"{'name': 'Sally', 'age': 13}"
]
},
"execution_count": 6,
"execution_count": 7,
"metadata": {},
"output_type": "execute_result"
}
@ -278,7 +279,7 @@
},
{
"cell_type": "code",
"execution_count": 7,
"execution_count": 8,
"id": "17f52508",
"metadata": {},
"outputs": [],
@ -301,7 +302,7 @@
},
{
"cell_type": "code",
"execution_count": 8,
"execution_count": 9,
"id": "a4658ad8",
"metadata": {},
"outputs": [
@ -311,12 +312,13 @@
"text": [
"\n",
"\n",
"\u001b[1m> Entering new chain...\u001b[0m\n",
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
"Prompt after formatting:\n",
"\u001b[32;1m\u001b[1;3mSystem: You are a world class algorithm for recording entities\n",
"Human: Make calls to the relevant function to record the entities in the following input:\n",
"Human: Harry was a chubby brown beagle who loved chicken\n",
"Human: Tips: Make sure to answer in the correct format\u001b[0m\n",
" {'function_call': {'name': 'RecordDog', 'arguments': '{\\n \"name\": \"Harry\",\\n \"color\": \"brown\",\\n \"fav_food\": \"chicken\"\\n}'}}\n",
"\n",
"\u001b[1m> Finished chain.\u001b[0m\n"
]
@ -327,7 +329,7 @@
"RecordDog(name='Harry', color='brown', fav_food='chicken')"
]
},
"execution_count": 8,
"execution_count": 9,
"metadata": {},
"output_type": "execute_result"
}
@ -360,7 +362,7 @@
},
{
"cell_type": "code",
"execution_count": 9,
"execution_count": 10,
"id": "95ac5825",
"metadata": {},
"outputs": [
@ -370,12 +372,13 @@
"text": [
"\n",
"\n",
"\u001b[1m> Entering new chain...\u001b[0m\n",
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
"Prompt after formatting:\n",
"\u001b[32;1m\u001b[1;3mSystem: You are a world class algorithm for recording entities\n",
"Human: Make calls to the relevant function to record the entities in the following input:\n",
"Human: The most important thing to remember about Tommy, my 12 year old, is that he'll do anything for apple pie.\n",
"Human: Tips: Make sure to answer in the correct format\u001b[0m\n",
" {'function_call': {'name': 'record_person', 'arguments': '{\\n \"name\": \"Tommy\",\\n \"age\": 12,\\n \"fav_food\": {\\n \"food\": \"apple pie\"\\n }\\n}'}}\n",
"\n",
"\u001b[1m> Finished chain.\u001b[0m\n"
]
@ -386,7 +389,7 @@
"{'name': 'Tommy', 'age': 12, 'fav_food': {'food': 'apple pie'}}"
]
},
"execution_count": 9,
"execution_count": 10,
"metadata": {},
"output_type": "execute_result"
}
@ -431,7 +434,7 @@
},
{
"cell_type": "code",
"execution_count": 10,
"execution_count": 11,
"id": "8b0d11de",
"metadata": {},
"outputs": [
@ -441,12 +444,13 @@
"text": [
"\n",
"\n",
"\u001b[1m> Entering new chain...\u001b[0m\n",
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
"Prompt after formatting:\n",
"\u001b[32;1m\u001b[1;3mSystem: You are a world class algorithm for recording entities\n",
"Human: Make calls to the relevant function to record the entities in the following input:\n",
"Human: I can't find my dog Henry anywhere, he's a small brown beagle. Could you send a message about him?\n",
"Human: Tips: Make sure to answer in the correct format\u001b[0m\n",
" {'function_call': {'name': 'record_dog', 'arguments': '{\\n \"name\": \"Henry\",\\n \"color\": \"brown\",\\n \"fav_food\": {\\n \"food\": null\\n }\\n}'}}\n",
"\n",
"\u001b[1m> Finished chain.\u001b[0m\n"
]
@ -458,7 +462,7 @@
" 'arguments': {'name': 'Henry', 'color': 'brown', 'fav_food': {'food': None}}}"
]
},
"execution_count": 10,
"execution_count": 11,
"metadata": {},
"output_type": "execute_result"
}
@ -494,14 +498,6 @@
"- [OpenAPI](/docs/modules/chains/additional/openapi_openai): take an OpenAPI spec and create + execute valid requests against the API, using OpenAI functions under the hood.\n",
"- [QA with citations](/docs/modules/chains/additional/qa_citations): use OpenAI functions ability to extract citations from text."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "93425c66",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {

View File

@ -102,7 +102,7 @@ def _convert_dict_to_message(_dict: Mapping[str, Any]) -> BaseMessage:
elif role == "assistant":
# Fix for azure
# Also OpenAI returns None for tool invocations
content = _dict.get("content", "")
content = _dict.get("content", "") or ""
if _dict.get("function_call"):
additional_kwargs = {"function_call": dict(_dict["function_call"])}
else: