forked from Archives/langchain
parent
df6865cd52
commit
7bec461782
@ -0,0 +1,192 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9a9350a6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Memory\n",
|
||||
"This notebook goes over how to use Memory with chat models. The main difference between this and Memory for LLMs is that rather than trying to condense all previous messages into a string, we can keep them as their own unique memory object."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "110935ae",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.prompts import (\n",
|
||||
" ChatPromptTemplate, \n",
|
||||
" MessagesPlaceholder, \n",
|
||||
" SystemMessagePromptTemplate, \n",
|
||||
" HumanMessagePromptTemplate\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "161b6629",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"prompt = ChatPromptTemplate.from_messages([\n",
|
||||
" SystemMessagePromptTemplate.from_template(\"The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\"),\n",
|
||||
" MessagesPlaceholder(variable_name=\"history\"),\n",
|
||||
" HumanMessagePromptTemplate.from_template(\"{input}\")\n",
|
||||
"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "4976fbda",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains import ConversationChain\n",
|
||||
"from langchain.chat_models import ChatOpenAI\n",
|
||||
"from langchain.memory import ConversationBufferMemory"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "12a0bea6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm = ChatOpenAI(temperature=0)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f6edcd6a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can now initialize the memory. Note that we set `return_messages=True` To denote that this should return a list of messages when appropriate"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "f55bea38",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"memory = ConversationBufferMemory(return_messages=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "737e8c78",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can now use this in the rest of the chain."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "80152db7",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"conversation = ConversationChain(memory=memory, prompt=prompt, llm=llm)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "ac68e766",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'Hello! How can I assist you today?'"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"conversation.predict(input=\"Hi there!\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "babb33d0",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\"That sounds like fun! I'm happy to chat with you. Is there anything specific you'd like to talk about?\""
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"conversation.predict(input=\"I'm doing well! Just having a conversation with an AI.\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "36f8a1dc",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\"Sure! I am an AI language model created by OpenAI. I was trained on a large dataset of text from the internet, which allows me to understand and generate human-like language. I can answer questions, provide information, and even have conversations like this one. Is there anything else you'd like to know about me?\""
|
||||
]
|
||||
},
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"conversation.predict(input=\"Tell me about yourself.\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "79fb460b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
@ -0,0 +1,284 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "46196aa3",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## ConversationBufferMemory\n",
|
||||
"\n",
|
||||
"This notebook shows how to use `ConversationBufferMemory`. This memory allows for storing of messages and then extracts the messages in a variable.\n",
|
||||
"\n",
|
||||
"We can first extract it as a string."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "3bac84f3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.memory import ConversationBufferMemory"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "cef35e7f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"memory = ConversationBufferMemory()\n",
|
||||
"memory.save_context({\"input\": \"hi\"}, {\"ouput\": \"whats up\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "2c9b39af",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'history': 'Human: hi\\nAI: whats up'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"memory.load_memory_variables({})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "567f7c16",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can also get the history as a list of messages (this is useful if you are using this with a chat model)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "a481a415",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"memory = ConversationBufferMemory(return_messages=True)\n",
|
||||
"memory.save_context({\"input\": \"hi\"}, {\"ouput\": \"whats up\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "86a56348",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'history': [HumanMessage(content='hi', additional_kwargs={}),\n",
|
||||
" AIMessage(content='whats up', additional_kwargs={})]}"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"memory.load_memory_variables({})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d051c1da",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Finally, let's take a look at using this in a chain (setting `verbose=True` so we can see the prompt)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"id": "54301321",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.llms import OpenAI\n",
|
||||
"from langchain.chains import ConversationChain\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"llm = OpenAI(temperature=0)\n",
|
||||
"conversation = ConversationChain(\n",
|
||||
" llm=llm, \n",
|
||||
" verbose=True, \n",
|
||||
" memory=ConversationBufferMemory()\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"id": "ae046bff",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new ConversationChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n",
|
||||
"\n",
|
||||
"Current conversation:\n",
|
||||
"\n",
|
||||
"Human: Hi there!\n",
|
||||
"AI:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\" Hi there! It's nice to meet you. How can I help you today?\""
|
||||
]
|
||||
},
|
||||
"execution_count": 16,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"conversation.predict(input=\"Hi there!\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 17,
|
||||
"id": "d8e2a6ff",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new ConversationChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n",
|
||||
"\n",
|
||||
"Current conversation:\n",
|
||||
"Human: Hi there!\n",
|
||||
"AI: Hi there! It's nice to meet you. How can I help you today?\n",
|
||||
"Human: I'm doing well! Just having a conversation with an AI.\n",
|
||||
"AI:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\" That's great! It's always nice to have a conversation with someone new. What would you like to talk about?\""
|
||||
]
|
||||
},
|
||||
"execution_count": 17,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"conversation.predict(input=\"I'm doing well! Just having a conversation with an AI.\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 18,
|
||||
"id": "15eda316",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new ConversationChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n",
|
||||
"\n",
|
||||
"Current conversation:\n",
|
||||
"Human: Hi there!\n",
|
||||
"AI: Hi there! It's nice to meet you. How can I help you today?\n",
|
||||
"Human: I'm doing well! Just having a conversation with an AI.\n",
|
||||
"AI: That's great! It's always nice to have a conversation with someone new. What would you like to talk about?\n",
|
||||
"Human: Tell me about yourself.\n",
|
||||
"AI:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\" Sure! I'm an AI created to help people with their everyday tasks. I'm programmed to understand natural language and provide helpful information. I'm also constantly learning and updating my knowledge base so I can provide more accurate and helpful answers.\""
|
||||
]
|
||||
},
|
||||
"execution_count": 18,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"conversation.predict(input=\"Tell me about yourself.\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "bd0146c2",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"And that's it for the getting started! There are plenty of different types of memory, check out our examples to see them all"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "447c138d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
@ -0,0 +1,310 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "716c8cab",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## ConversationBufferWindowMemory\n",
|
||||
"\n",
|
||||
"`ConversationBufferWindowMemory` keeps a list of the interactions of the conversation over time. It only uses the last K interactions. This can be useful for keeping a sliding window of the most recent interactions, so the buffer does not get too large\n",
|
||||
"\n",
|
||||
"Let's first explore the basic functionality of this type of memory."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "dc9d10b0",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.memory import ConversationBufferWindowMemory"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "2dac7769",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"memory = ConversationBufferWindowMemory( k=1)\n",
|
||||
"memory.save_context({\"input\": \"hi\"}, {\"ouput\": \"whats up\"})\n",
|
||||
"memory.save_context({\"input\": \"not much you\"}, {\"ouput\": \"not much\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "40664f10",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'history': 'Human: not much you\\nAI: not much'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"memory.load_memory_variables({})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b1932b49",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can also get the history as a list of messages (this is useful if you are using this with a chat model)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "5fd077d5",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"memory = ConversationBufferWindowMemory( k=1, return_messages=True)\n",
|
||||
"memory.save_context({\"input\": \"hi\"}, {\"ouput\": \"whats up\"})\n",
|
||||
"memory.save_context({\"input\": \"not much you\"}, {\"ouput\": \"not much\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "b94b750f",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'history': [HumanMessage(content='not much you', additional_kwargs={}),\n",
|
||||
" AIMessage(content='not much', additional_kwargs={})]}"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"memory.load_memory_variables({})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "ac59a682",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Let's walk through an example, again setting `verbose=True` so we can see the prompt."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "0b9da4cd",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new ConversationChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n",
|
||||
"\n",
|
||||
"Current conversation:\n",
|
||||
"\n",
|
||||
"Human: Hi, what's up?\n",
|
||||
"AI:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\" Hi there! I'm doing great. I'm currently helping a customer with a technical issue. How about you?\""
|
||||
]
|
||||
},
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.llms import OpenAI\n",
|
||||
"from langchain.chains import ConversationChain\n",
|
||||
"conversation_with_summary = ConversationChain(\n",
|
||||
" llm=OpenAI(temperature=0), \n",
|
||||
" # We set a low k=2, to only keep the last 2 interactions in memory\n",
|
||||
" memory=ConversationBufferWindowMemory(k=2), \n",
|
||||
" verbose=True\n",
|
||||
")\n",
|
||||
"conversation_with_summary.predict(input=\"Hi, what's up?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"id": "90f73431",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new ConversationChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n",
|
||||
"\n",
|
||||
"Current conversation:\n",
|
||||
"Human: Hi, what's up?\n",
|
||||
"AI: Hi there! I'm doing great. I'm currently helping a customer with a technical issue. How about you?\n",
|
||||
"Human: What's their issues?\n",
|
||||
"AI:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\" The customer is having trouble connecting to their Wi-Fi network. I'm helping them troubleshoot the issue and get them connected.\""
|
||||
]
|
||||
},
|
||||
"execution_count": 13,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"conversation_with_summary.predict(input=\"What's their issues?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"id": "cbb499e7",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new ConversationChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n",
|
||||
"\n",
|
||||
"Current conversation:\n",
|
||||
"Human: Hi, what's up?\n",
|
||||
"AI: Hi there! I'm doing great. I'm currently helping a customer with a technical issue. How about you?\n",
|
||||
"Human: What's their issues?\n",
|
||||
"AI: The customer is having trouble connecting to their Wi-Fi network. I'm helping them troubleshoot the issue and get them connected.\n",
|
||||
"Human: Is it going well?\n",
|
||||
"AI:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\" Yes, it's going well so far. We've already identified the problem and are now working on a solution.\""
|
||||
]
|
||||
},
|
||||
"execution_count": 14,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"conversation_with_summary.predict(input=\"Is it going well?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"id": "0d209cfe",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new ConversationChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n",
|
||||
"\n",
|
||||
"Current conversation:\n",
|
||||
"Human: What's their issues?\n",
|
||||
"AI: The customer is having trouble connecting to their Wi-Fi network. I'm helping them troubleshoot the issue and get them connected.\n",
|
||||
"Human: Is it going well?\n",
|
||||
"AI: Yes, it's going well so far. We've already identified the problem and are now working on a solution.\n",
|
||||
"Human: What's the solution?\n",
|
||||
"AI:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\" The solution is to reset the router and reconfigure the settings. We're currently in the process of doing that.\""
|
||||
]
|
||||
},
|
||||
"execution_count": 15,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Notice here that the first interaction does not appear.\n",
|
||||
"conversation_with_summary.predict(input=\"What's the solution?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "8c09a239",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
@ -0,0 +1,300 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "44c9933a",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Conversation Knowledge Graph Memory\n",
|
||||
"\n",
|
||||
"This type of memory uses a knowledge graph to recreate memory.\n",
|
||||
"\n",
|
||||
"Let's first walk through how to use the utilities"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "f71f40ba",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.memory import ConversationKGMemory\n",
|
||||
"from langchain.llms import OpenAI"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "1d9a5b18",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm = OpenAI(temperature=0)\n",
|
||||
"memory = ConversationKGMemory(llm=llm)\n",
|
||||
"memory.save_context({\"input\": \"say hi to sam\"}, {\"ouput\": \"who is sam\"})\n",
|
||||
"memory.save_context({\"input\": \"sam is a friend\"}, {\"ouput\": \"okay\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "3f5da288",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'history': 'On Sam: Sam is friend.'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"memory.load_memory_variables({\"input\": 'who is sam'})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2ab23035",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can also get the history as a list of messages (this is useful if you are using this with a chat model)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "43dbbc43",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"memory = ConversationKGMemory(llm=llm, return_messages=True)\n",
|
||||
"memory.save_context({\"input\": \"say hi to sam\"}, {\"ouput\": \"who is sam\"})\n",
|
||||
"memory.save_context({\"input\": \"sam is a friend\"}, {\"ouput\": \"okay\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "ef6a4f60",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'history': [SystemMessage(content='On Sam: Sam is friend.', additional_kwargs={})]}"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"memory.load_memory_variables({\"input\": 'who is sam'})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "5ba0dde9",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Let's now use this in a chain!"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "b462baf1",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"llm = OpenAI(temperature=0)\n",
|
||||
"from langchain.prompts.prompt import PromptTemplate\n",
|
||||
"from langchain.chains import ConversationChain\n",
|
||||
"\n",
|
||||
"template = \"\"\"The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. \n",
|
||||
"If the AI does not know the answer to a question, it truthfully says it does not know. The AI ONLY uses information contained in the \"Relevant Information\" section and does not hallucinate.\n",
|
||||
"\n",
|
||||
"Relevant Information:\n",
|
||||
"\n",
|
||||
"{history}\n",
|
||||
"\n",
|
||||
"Conversation:\n",
|
||||
"Human: {input}\n",
|
||||
"AI:\"\"\"\n",
|
||||
"prompt = PromptTemplate(\n",
|
||||
" input_variables=[\"history\", \"input\"], template=template\n",
|
||||
")\n",
|
||||
"conversation_with_kg = ConversationChain(\n",
|
||||
" llm=llm, \n",
|
||||
" verbose=True, \n",
|
||||
" prompt=prompt,\n",
|
||||
" memory=ConversationKGMemory(llm=llm)\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "97efaf38",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new ConversationChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. \n",
|
||||
"If the AI does not know the answer to a question, it truthfully says it does not know. The AI ONLY uses information contained in the \"Relevant Information\" section and does not hallucinate.\n",
|
||||
"\n",
|
||||
"Relevant Information:\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Conversation:\n",
|
||||
"Human: Hi, what's up?\n",
|
||||
"AI:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\" Hi there! I'm doing great. I'm currently in the process of learning about the world around me. I'm learning about different cultures, languages, and customs. It's really fascinating! How about you?\""
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"conversation_with_kg.predict(input=\"Hi, what's up?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "55b5bcad",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new ConversationChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. \n",
|
||||
"If the AI does not know the answer to a question, it truthfully says it does not know. The AI ONLY uses information contained in the \"Relevant Information\" section and does not hallucinate.\n",
|
||||
"\n",
|
||||
"Relevant Information:\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Conversation:\n",
|
||||
"Human: My name is James and I'm helping Will. He's an engineer.\n",
|
||||
"AI:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\" Hi James, it's nice to meet you. I'm an AI and I understand you're helping Will, the engineer. What kind of engineering does he do?\""
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"conversation_with_kg.predict(input=\"My name is James and I'm helping Will. He's an engineer.\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "9981e219",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new ConversationChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. \n",
|
||||
"If the AI does not know the answer to a question, it truthfully says it does not know. The AI ONLY uses information contained in the \"Relevant Information\" section and does not hallucinate.\n",
|
||||
"\n",
|
||||
"Relevant Information:\n",
|
||||
"\n",
|
||||
"On Will: Will is an engineer.\n",
|
||||
"\n",
|
||||
"Conversation:\n",
|
||||
"Human: What do you know about Will?\n",
|
||||
"AI:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"' Will is an engineer.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"conversation_with_kg.predict(input=\"What do you know about Will?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "8c09a239",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
@ -0,0 +1,262 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "5d3f2f0f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## ConversationSummaryMemory\n",
|
||||
"Now let's take a look at using a slightly more complex type of memory - `ConversationSummaryMemory`. This type of memory creates a summary of the conversation over time. This can be useful for condensing information from the conversation over time.\n",
|
||||
"\n",
|
||||
"Let's first explore the basic functionality of this type of memory."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "a36c34d6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.memory import ConversationSummaryMemory\n",
|
||||
"from langchain.llms import OpenAI"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "89646c34",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"memory = ConversationSummaryMemory(llm=OpenAI(temperature=0))\n",
|
||||
"memory.save_context({\"input\": \"hi\"}, {\"ouput\": \"whats up\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "dea210aa",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'history': '\\nThe human greets the AI, to which the AI responds.'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"memory.load_memory_variables({})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3838fe93",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can also get the history as a list of messages (this is useful if you are using this with a chat model)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "114dbef6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"memory = ConversationSummaryMemory(llm=OpenAI(temperature=0), return_messages=True)\n",
|
||||
"memory.save_context({\"input\": \"hi\"}, {\"ouput\": \"whats up\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "39c8c106",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'history': [SystemMessage(content='\\nThe human greets the AI and the AI responds with a casual greeting.', additional_kwargs={})]}"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"memory.load_memory_variables({})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4fad9448",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Let's walk through an example of using this in a chain, again setting `verbose=True` so we can see the prompt."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "b7274f2c",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new ConversationChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n",
|
||||
"\n",
|
||||
"Current conversation:\n",
|
||||
"\n",
|
||||
"Human: Hi, what's up?\n",
|
||||
"AI:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\" Hi there! I'm doing great. I'm currently helping a customer with a technical issue. How about you?\""
|
||||
]
|
||||
},
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.llms import OpenAI\n",
|
||||
"from langchain.chains import ConversationChain\n",
|
||||
"llm = OpenAI(temperature=0)\n",
|
||||
"conversation_with_summary = ConversationChain(\n",
|
||||
" llm=llm, \n",
|
||||
" memory=ConversationSummaryMemory(llm=OpenAI()),\n",
|
||||
" verbose=True\n",
|
||||
")\n",
|
||||
"conversation_with_summary.predict(input=\"Hi, what's up?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "a6b6b88f",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new ConversationChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n",
|
||||
"\n",
|
||||
"Current conversation:\n",
|
||||
"\n",
|
||||
"The human greeted the AI and asked how it was doing. The AI replied that it was doing great and was currently helping a customer with a technical issue.\n",
|
||||
"Human: Tell me more about it!\n",
|
||||
"AI:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\" Sure! The customer is having trouble with their computer not connecting to the internet. I'm helping them troubleshoot the issue and figure out what the problem is. So far, we've tried resetting the router and checking the network settings, but the issue still persists. We're currently looking into other possible solutions.\""
|
||||
]
|
||||
},
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"conversation_with_summary.predict(input=\"Tell me more about it!\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"id": "dad869fe",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new ConversationChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n",
|
||||
"\n",
|
||||
"Current conversation:\n",
|
||||
"\n",
|
||||
"The human greeted the AI and asked how it was doing. The AI replied that it was doing great and was currently helping a customer with a technical issue where their computer was not connecting to the internet. The AI was troubleshooting the issue and had already tried resetting the router and checking the network settings, but the issue still persisted and they were looking into other possible solutions.\n",
|
||||
"Human: Very cool -- what is the scope of the project?\n",
|
||||
"AI:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\" The scope of the project is to troubleshoot the customer's computer issue and find a solution that will allow them to connect to the internet. We are currently exploring different possibilities and have already tried resetting the router and checking the network settings, but the issue still persists.\""
|
||||
]
|
||||
},
|
||||
"execution_count": 13,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"conversation_with_summary.predict(input=\"Very cool -- what is the scope of the project?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "8c09a239",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
@ -0,0 +1,290 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2e90817b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## ConversationSummaryBufferMemory\n",
|
||||
"\n",
|
||||
"`ConversationSummaryBufferMemory` combines the last two ideas. It keeps a buffer of recent interactions in memory, but rather than just completely flushing old interactions it compiles them into a summary and uses both. Unlike the previous implementation though, it uses token length rather than number of interactions to determine when to flush interactions.\n",
|
||||
"\n",
|
||||
"Let's first walk through how to use the utilities"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "b8aec547",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.memory import ConversationSummaryBufferMemory\n",
|
||||
"from langchain.llms import OpenAI\n",
|
||||
"llm = OpenAI()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "2594c8f1",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"memory = ConversationSummaryBufferMemory(llm=llm, max_token_limit=10)\n",
|
||||
"memory.save_context({\"input\": \"hi\"}, {\"ouput\": \"whats up\"})\n",
|
||||
"memory.save_context({\"input\": \"not much you\"}, {\"ouput\": \"not much\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "a25087e0",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'history': 'System: \\nThe human greets the AI, to which the AI responds inquiring what is up.\\nHuman: not much you\\nAI: not much'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"memory.load_memory_variables({})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3a6e7905",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can also get the history as a list of messages (this is useful if you are using this with a chat model)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "e439451f",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"memory = ConversationSummaryBufferMemory(llm=llm, max_token_limit=10, return_messages=True)\n",
|
||||
"memory.save_context({\"input\": \"hi\"}, {\"ouput\": \"whats up\"})\n",
|
||||
"memory.save_context({\"input\": \"not much you\"}, {\"ouput\": \"not much\"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a6d2569f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Let's walk through an example, again setting `verbose=True` so we can see the prompt."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "ebd68c10",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new ConversationChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n",
|
||||
"\n",
|
||||
"Current conversation:\n",
|
||||
"\n",
|
||||
"Human: Hi, what's up?\n",
|
||||
"AI:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\" Hi there! I'm doing great. I'm spending some time learning about the latest developments in AI technology. How about you?\""
|
||||
]
|
||||
},
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.chains import ConversationChain\n",
|
||||
"conversation_with_summary = ConversationChain(\n",
|
||||
" llm=llm, \n",
|
||||
" # We set a very low max_token_limit for the purposes of testing.\n",
|
||||
" memory=ConversationSummaryBufferMemory(llm=OpenAI(), max_token_limit=40),\n",
|
||||
" verbose=True\n",
|
||||
")\n",
|
||||
"conversation_with_summary.predict(input=\"Hi, what's up?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "86207a61",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new ConversationChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n",
|
||||
"\n",
|
||||
"Current conversation:\n",
|
||||
"Human: Hi, what's up?\n",
|
||||
"AI: Hi there! I'm doing great. I'm spending some time learning about the latest developments in AI technology. How about you?\n",
|
||||
"Human: Just working on writing some documentation!\n",
|
||||
"AI:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"' That sounds like a great use of your time. Do you have experience with writing documentation?'"
|
||||
]
|
||||
},
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"conversation_with_summary.predict(input=\"Just working on writing some documentation!\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"id": "76a0ab39",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new ConversationChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n",
|
||||
"\n",
|
||||
"Current conversation:\n",
|
||||
"System: \n",
|
||||
"The human asked the AI what it was up to and the AI responded that it was learning about the latest developments in AI technology.\n",
|
||||
"Human: Just working on writing some documentation!\n",
|
||||
"AI: That sounds like a great use of your time. Do you have experience with writing documentation?\n",
|
||||
"Human: For LangChain! Have you heard of it?\n",
|
||||
"AI:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\" No, I haven't heard of LangChain. Can you tell me more about it?\""
|
||||
]
|
||||
},
|
||||
"execution_count": 13,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# We can see here that there is a summary of the conversation and then some previous interactions\n",
|
||||
"conversation_with_summary.predict(input=\"For LangChain! Have you heard of it?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"id": "8c669db1",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new ConversationChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n",
|
||||
"\n",
|
||||
"Current conversation:\n",
|
||||
"System: \n",
|
||||
"The human asked the AI what it was up to and the AI responded that it was learning about the latest developments in AI technology. The human then mentioned they were writing documentation, to which the AI responded that it sounded like a great use of their time and asked if they had experience with writing documentation.\n",
|
||||
"Human: For LangChain! Have you heard of it?\n",
|
||||
"AI: No, I haven't heard of LangChain. Can you tell me more about it?\n",
|
||||
"Human: Haha nope, although a lot of people confuse it for that\n",
|
||||
"AI:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"' Oh, okay. What is LangChain?'"
|
||||
]
|
||||
},
|
||||
"execution_count": 14,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# We can see here that the summary and the buffer are updated\n",
|
||||
"conversation_with_summary.predict(input=\"Haha nope, although a lot of people confuse it for that\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "8c09a239",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
@ -1,493 +1,21 @@
|
||||
"""Memory modules for conversation prompts."""
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from pydantic import BaseModel, Field, root_validator
|
||||
|
||||
from langchain.chains.base import Memory
|
||||
from langchain.chains.conversation.prompt import (
|
||||
ENTITY_EXTRACTION_PROMPT,
|
||||
ENTITY_SUMMARIZATION_PROMPT,
|
||||
KNOWLEDGE_TRIPLE_EXTRACTION_PROMPT,
|
||||
SUMMARY_PROMPT,
|
||||
)
|
||||
from langchain.chains.llm import LLMChain
|
||||
from langchain.graphs.networkx_graph import (
|
||||
NetworkxEntityGraph,
|
||||
get_entities,
|
||||
parse_triples,
|
||||
)
|
||||
from langchain.llms.base import BaseLLM
|
||||
from langchain.prompts.base import BasePromptTemplate
|
||||
|
||||
|
||||
def _get_prompt_input_key(inputs: Dict[str, Any], memory_variables: List[str]) -> str:
|
||||
# "stop" is a special key that can be passed as input but is not used to
|
||||
# format the prompt.
|
||||
prompt_input_keys = list(set(inputs).difference(memory_variables + ["stop"]))
|
||||
if len(prompt_input_keys) != 1:
|
||||
raise ValueError(f"One input key expected got {prompt_input_keys}")
|
||||
return prompt_input_keys[0]
|
||||
|
||||
|
||||
class CombinedMemory(Memory, BaseModel):
|
||||
"""Class for combining multiple memories' data together."""
|
||||
|
||||
memories: List[Memory]
|
||||
"""For tracking all the memories that should be accessed."""
|
||||
|
||||
@property
|
||||
def memory_variables(self) -> List[str]:
|
||||
"""All the memory variables that this instance provides."""
|
||||
"""Collected from the all the linked memories."""
|
||||
|
||||
memory_variables = []
|
||||
|
||||
for memory in self.memories:
|
||||
memory_variables.extend(memory.memory_variables)
|
||||
|
||||
return memory_variables
|
||||
|
||||
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]:
|
||||
"""Load all vars from sub-memories."""
|
||||
memory_data: Dict[str, Any] = {}
|
||||
|
||||
# Collect vars from all sub-memories
|
||||
for memory in self.memories:
|
||||
data = memory.load_memory_variables(inputs)
|
||||
memory_data = {
|
||||
**memory_data,
|
||||
**data,
|
||||
}
|
||||
|
||||
return memory_data
|
||||
|
||||
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
|
||||
"""Save context from this session for every memory."""
|
||||
# Save context for all sub-memories
|
||||
for memory in self.memories:
|
||||
memory.save_context(inputs, outputs)
|
||||
|
||||
def clear(self) -> None:
|
||||
"""Clear context from this session for every memory."""
|
||||
for memory in self.memories:
|
||||
memory.clear()
|
||||
|
||||
|
||||
class ConversationBufferMemory(Memory, BaseModel):
|
||||
"""Buffer for storing conversation memory."""
|
||||
|
||||
human_prefix: str = "Human"
|
||||
ai_prefix: str = "AI"
|
||||
"""Prefix to use for AI generated responses."""
|
||||
buffer: str = ""
|
||||
output_key: Optional[str] = None
|
||||
input_key: Optional[str] = None
|
||||
memory_key: str = "history" #: :meta private:
|
||||
|
||||
@property
|
||||
def memory_variables(self) -> List[str]:
|
||||
"""Will always return list of memory variables.
|
||||
|
||||
:meta private:
|
||||
"""
|
||||
return [self.memory_key]
|
||||
|
||||
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]:
|
||||
"""Return history buffer."""
|
||||
return {self.memory_key: self.buffer}
|
||||
|
||||
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
|
||||
"""Save context from this conversation to buffer."""
|
||||
if self.input_key is None:
|
||||
prompt_input_key = _get_prompt_input_key(inputs, self.memory_variables)
|
||||
else:
|
||||
prompt_input_key = self.input_key
|
||||
if self.output_key is None:
|
||||
if len(outputs) != 1:
|
||||
raise ValueError(f"One output key expected, got {outputs.keys()}")
|
||||
output_key = list(outputs.keys())[0]
|
||||
else:
|
||||
output_key = self.output_key
|
||||
human = f"{self.human_prefix}: " + inputs[prompt_input_key]
|
||||
ai = f"{self.ai_prefix}: " + outputs[output_key]
|
||||
self.buffer += "\n" + "\n".join([human, ai])
|
||||
|
||||
def clear(self) -> None:
|
||||
"""Clear memory contents."""
|
||||
self.buffer = ""
|
||||
|
||||
|
||||
class ConversationBufferWindowMemory(Memory, BaseModel):
|
||||
"""Buffer for storing conversation memory."""
|
||||
|
||||
human_prefix: str = "Human"
|
||||
ai_prefix: str = "AI"
|
||||
"""Prefix to use for AI generated responses."""
|
||||
buffer: List[str] = Field(default_factory=list)
|
||||
memory_key: str = "history" #: :meta private:
|
||||
output_key: Optional[str] = None
|
||||
input_key: Optional[str] = None
|
||||
k: int = 5
|
||||
|
||||
@property
|
||||
def memory_variables(self) -> List[str]:
|
||||
"""Will always return list of memory variables.
|
||||
|
||||
:meta private:
|
||||
"""
|
||||
return [self.memory_key]
|
||||
|
||||
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]:
|
||||
"""Return history buffer."""
|
||||
return {self.memory_key: "\n".join(self.buffer[-self.k :])}
|
||||
|
||||
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
|
||||
"""Save context from this conversation to buffer."""
|
||||
if self.input_key is None:
|
||||
prompt_input_key = _get_prompt_input_key(inputs, self.memory_variables)
|
||||
else:
|
||||
prompt_input_key = self.input_key
|
||||
if self.output_key is None:
|
||||
if len(outputs) != 1:
|
||||
raise ValueError(f"One output key expected, got {outputs.keys()}")
|
||||
output_key = list(outputs.keys())[0]
|
||||
else:
|
||||
output_key = self.output_key
|
||||
human = f"{self.human_prefix}: " + inputs[prompt_input_key]
|
||||
ai = f"{self.ai_prefix}: " + outputs[output_key]
|
||||
self.buffer.append("\n".join([human, ai]))
|
||||
|
||||
def clear(self) -> None:
|
||||
"""Clear memory contents."""
|
||||
self.buffer = []
|
||||
|
||||
|
||||
# For legacy naming reasons
|
||||
ConversationalBufferWindowMemory = ConversationBufferWindowMemory
|
||||
|
||||
|
||||
class ConversationSummaryMemory(Memory, BaseModel):
|
||||
"""Conversation summarizer to memory."""
|
||||
|
||||
buffer: str = ""
|
||||
human_prefix: str = "Human"
|
||||
ai_prefix: str = "AI"
|
||||
"""Prefix to use for AI generated responses."""
|
||||
llm: BaseLLM
|
||||
prompt: BasePromptTemplate = SUMMARY_PROMPT
|
||||
memory_key: str = "history" #: :meta private:
|
||||
output_key: Optional[str] = None
|
||||
input_key: Optional[str] = None
|
||||
|
||||
@property
|
||||
def memory_variables(self) -> List[str]:
|
||||
"""Will always return list of memory variables.
|
||||
|
||||
:meta private:
|
||||
"""
|
||||
return [self.memory_key]
|
||||
|
||||
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]:
|
||||
"""Return history buffer."""
|
||||
return {self.memory_key: self.buffer}
|
||||
|
||||
@root_validator()
|
||||
def validate_prompt_input_variables(cls, values: Dict) -> Dict:
|
||||
"""Validate that prompt input variables are consistent."""
|
||||
prompt_variables = values["prompt"].input_variables
|
||||
expected_keys = {"summary", "new_lines"}
|
||||
if expected_keys != set(prompt_variables):
|
||||
raise ValueError(
|
||||
"Got unexpected prompt input variables. The prompt expects "
|
||||
f"{prompt_variables}, but it should have {expected_keys}."
|
||||
)
|
||||
return values
|
||||
|
||||
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
|
||||
"""Save context from this conversation to buffer."""
|
||||
if self.input_key is None:
|
||||
prompt_input_key = _get_prompt_input_key(inputs, self.memory_variables)
|
||||
else:
|
||||
prompt_input_key = self.input_key
|
||||
if self.output_key is None:
|
||||
if len(outputs) != 1:
|
||||
raise ValueError(f"One output key expected, got {outputs.keys()}")
|
||||
output_key = list(outputs.keys())[0]
|
||||
else:
|
||||
output_key = self.output_key
|
||||
human = f"{self.human_prefix}: {inputs[prompt_input_key]}"
|
||||
ai = f"{self.ai_prefix}: {outputs[output_key]}"
|
||||
new_lines = "\n".join([human, ai])
|
||||
chain = LLMChain(llm=self.llm, prompt=self.prompt)
|
||||
self.buffer = chain.predict(summary=self.buffer, new_lines=new_lines)
|
||||
|
||||
def clear(self) -> None:
|
||||
"""Clear memory contents."""
|
||||
self.buffer = ""
|
||||
|
||||
|
||||
class ConversationEntityMemory(Memory, BaseModel):
|
||||
"""Entity extractor & summarizer to memory."""
|
||||
|
||||
buffer: List[str] = []
|
||||
human_prefix: str = "Human"
|
||||
ai_prefix: str = "AI"
|
||||
"""Prefix to use for AI generated responses."""
|
||||
llm: BaseLLM
|
||||
entity_extraction_prompt: BasePromptTemplate = ENTITY_EXTRACTION_PROMPT
|
||||
entity_summarization_prompt: BasePromptTemplate = ENTITY_SUMMARIZATION_PROMPT
|
||||
output_key: Optional[str] = None
|
||||
input_key: Optional[str] = None
|
||||
store: Dict[str, Optional[str]] = {}
|
||||
entity_cache: List[str] = []
|
||||
k: int = 3
|
||||
chat_history_key: str = "history"
|
||||
|
||||
@property
|
||||
def memory_variables(self) -> List[str]:
|
||||
"""Will always return list of memory variables.
|
||||
|
||||
:meta private:
|
||||
"""
|
||||
return ["entities", self.chat_history_key]
|
||||
|
||||
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""Return history buffer."""
|
||||
chain = LLMChain(llm=self.llm, prompt=self.entity_extraction_prompt)
|
||||
if self.input_key is None:
|
||||
prompt_input_key = _get_prompt_input_key(inputs, self.memory_variables)
|
||||
else:
|
||||
prompt_input_key = self.input_key
|
||||
output = chain.predict(
|
||||
history="\n".join(self.buffer[-self.k :]),
|
||||
input=inputs[prompt_input_key],
|
||||
)
|
||||
if output.strip() == "NONE":
|
||||
entities = []
|
||||
else:
|
||||
entities = [w.strip() for w in output.split(",")]
|
||||
entity_summaries = {}
|
||||
for entity in entities:
|
||||
entity_summaries[entity] = self.store.get(entity, "")
|
||||
self.entity_cache = entities
|
||||
return {
|
||||
self.chat_history_key: "\n".join(self.buffer[-self.k :]),
|
||||
"entities": entity_summaries,
|
||||
}
|
||||
|
||||
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
|
||||
if inputs is None:
|
||||
raise ValueError(
|
||||
"Inputs must be provided to save context from "
|
||||
"ConversationEntityMemory."
|
||||
)
|
||||
|
||||
"""Save context from this conversation to buffer."""
|
||||
if self.input_key is None:
|
||||
prompt_input_key = _get_prompt_input_key(inputs, self.memory_variables)
|
||||
else:
|
||||
prompt_input_key = self.input_key
|
||||
if self.output_key is None:
|
||||
if len(outputs) != 1:
|
||||
raise ValueError(f"One output key expected, got {outputs.keys()}")
|
||||
output_key = list(outputs.keys())[0]
|
||||
else:
|
||||
output_key = self.output_key
|
||||
human = f"{self.human_prefix}: " + inputs[prompt_input_key]
|
||||
ai = f"{self.ai_prefix}: " + outputs[output_key]
|
||||
for entity in self.entity_cache:
|
||||
chain = LLMChain(llm=self.llm, prompt=self.entity_summarization_prompt)
|
||||
# key value store for entity
|
||||
existing_summary = self.store.get(entity, "")
|
||||
output = chain.predict(
|
||||
summary=existing_summary,
|
||||
history="\n".join(self.buffer[-self.k :]),
|
||||
input=inputs[prompt_input_key],
|
||||
entity=entity,
|
||||
)
|
||||
self.store[entity] = output.strip()
|
||||
new_lines = "\n".join([human, ai])
|
||||
self.buffer.append(new_lines)
|
||||
|
||||
def clear(self) -> None:
|
||||
"""Clear memory contents."""
|
||||
self.buffer = []
|
||||
self.store = {}
|
||||
|
||||
|
||||
class ConversationSummaryBufferMemory(Memory, BaseModel):
|
||||
"""Buffer with summarizer for storing conversation memory."""
|
||||
|
||||
buffer: List[str] = Field(default_factory=list)
|
||||
max_token_limit: int = 2000
|
||||
moving_summary_buffer: str = ""
|
||||
llm: BaseLLM
|
||||
prompt: BasePromptTemplate = SUMMARY_PROMPT
|
||||
memory_key: str = "history"
|
||||
human_prefix: str = "Human"
|
||||
ai_prefix: str = "AI"
|
||||
"""Prefix to use for AI generated responses."""
|
||||
output_key: Optional[str] = None
|
||||
input_key: Optional[str] = None
|
||||
|
||||
@property
|
||||
def memory_variables(self) -> List[str]:
|
||||
"""Will always return list of memory variables.
|
||||
|
||||
:meta private:
|
||||
"""
|
||||
return [self.memory_key]
|
||||
|
||||
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]:
|
||||
"""Return history buffer."""
|
||||
if self.moving_summary_buffer == "":
|
||||
return {self.memory_key: "\n".join(self.buffer)}
|
||||
memory_val = self.moving_summary_buffer + "\n" + "\n".join(self.buffer)
|
||||
return {self.memory_key: memory_val}
|
||||
|
||||
@root_validator()
|
||||
def validate_prompt_input_variables(cls, values: Dict) -> Dict:
|
||||
"""Validate that prompt input variables are consistent."""
|
||||
prompt_variables = values["prompt"].input_variables
|
||||
expected_keys = {"summary", "new_lines"}
|
||||
if expected_keys != set(prompt_variables):
|
||||
raise ValueError(
|
||||
"Got unexpected prompt input variables. The prompt expects "
|
||||
f"{prompt_variables}, but it should have {expected_keys}."
|
||||
)
|
||||
return values
|
||||
|
||||
def get_num_tokens_list(self, arr: List[str]) -> List[int]:
|
||||
"""Get list of number of tokens in each string in the input array."""
|
||||
return [self.llm.get_num_tokens(x) for x in arr]
|
||||
|
||||
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
|
||||
"""Save context from this conversation to buffer."""
|
||||
if self.input_key is None:
|
||||
prompt_input_key = _get_prompt_input_key(inputs, self.memory_variables)
|
||||
else:
|
||||
prompt_input_key = self.input_key
|
||||
if self.output_key is None:
|
||||
if len(outputs) != 1:
|
||||
raise ValueError(f"One output key expected, got {outputs.keys()}")
|
||||
output_key = list(outputs.keys())[0]
|
||||
else:
|
||||
output_key = self.output_key
|
||||
human = f"{self.human_prefix}: {inputs[prompt_input_key]}"
|
||||
ai = f"{self.ai_prefix}: {outputs[output_key]}"
|
||||
new_lines = "\n".join([human, ai])
|
||||
self.buffer.append(new_lines)
|
||||
# Prune buffer if it exceeds max token limit
|
||||
curr_buffer_length = sum(self.get_num_tokens_list(self.buffer))
|
||||
if curr_buffer_length > self.max_token_limit:
|
||||
pruned_memory = []
|
||||
while curr_buffer_length > self.max_token_limit:
|
||||
pruned_memory.append(self.buffer.pop(0))
|
||||
curr_buffer_length = sum(self.get_num_tokens_list(self.buffer))
|
||||
chain = LLMChain(llm=self.llm, prompt=self.prompt)
|
||||
self.moving_summary_buffer = chain.predict(
|
||||
summary=self.moving_summary_buffer, new_lines=("\n".join(pruned_memory))
|
||||
)
|
||||
|
||||
def clear(self) -> None:
|
||||
"""Clear memory contents."""
|
||||
self.buffer = []
|
||||
self.moving_summary_buffer = ""
|
||||
|
||||
|
||||
class ConversationKGMemory(Memory, BaseModel):
|
||||
"""Knowledge graph memory for storing conversation memory.
|
||||
|
||||
Integrates with external knowledge graph to store and retrieve
|
||||
information about knowledge triples in the conversation.
|
||||
"""
|
||||
|
||||
k: int = 2
|
||||
buffer: List[str] = Field(default_factory=list)
|
||||
kg: NetworkxEntityGraph = Field(default_factory=NetworkxEntityGraph)
|
||||
knowledge_extraction_prompt: BasePromptTemplate = KNOWLEDGE_TRIPLE_EXTRACTION_PROMPT
|
||||
entity_extraction_prompt: BasePromptTemplate = ENTITY_EXTRACTION_PROMPT
|
||||
llm: BaseLLM
|
||||
"""Number of previous utterances to include in the context."""
|
||||
human_prefix: str = "Human"
|
||||
ai_prefix: str = "AI"
|
||||
"""Prefix to use for AI generated responses."""
|
||||
output_key: Optional[str] = None
|
||||
input_key: Optional[str] = None
|
||||
memory_key: str = "history" #: :meta private:
|
||||
|
||||
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""Return history buffer."""
|
||||
entities = self._get_current_entities(inputs)
|
||||
summaries = {}
|
||||
for entity in entities:
|
||||
knowledge = self.kg.get_entity_knowledge(entity)
|
||||
if knowledge:
|
||||
summaries[entity] = ". ".join(knowledge) + "."
|
||||
if summaries:
|
||||
summary_strings = [
|
||||
f"On {entity}: {summary}" for entity, summary in summaries.items()
|
||||
]
|
||||
context_str = "\n".join(summary_strings)
|
||||
else:
|
||||
context_str = ""
|
||||
return {self.memory_key: context_str}
|
||||
|
||||
@property
|
||||
def memory_variables(self) -> List[str]:
|
||||
"""Will always return list of memory variables.
|
||||
|
||||
:meta private:
|
||||
"""
|
||||
return [self.memory_key]
|
||||
|
||||
def _get_prompt_input_key(self, inputs: Dict[str, Any]) -> str:
|
||||
"""Get the input key for the prompt."""
|
||||
if self.input_key is None:
|
||||
return _get_prompt_input_key(inputs, self.memory_variables)
|
||||
return self.input_key
|
||||
|
||||
def _get_prompt_output_key(self, outputs: Dict[str, Any]) -> str:
|
||||
"""Get the output key for the prompt."""
|
||||
if self.output_key is None:
|
||||
if len(outputs) != 1:
|
||||
raise ValueError(f"One output key expected, got {outputs.keys()}")
|
||||
return list(outputs.keys())[0]
|
||||
return self.output_key
|
||||
|
||||
def _get_current_entities(self, inputs: Dict[str, Any]) -> List[str]:
|
||||
"""Get the current entities in the conversation."""
|
||||
prompt_input_key = self._get_prompt_input_key(inputs)
|
||||
chain = LLMChain(llm=self.llm, prompt=self.entity_extraction_prompt)
|
||||
output = chain.predict(
|
||||
history="\n".join(self.buffer[-self.k :]),
|
||||
input=inputs[prompt_input_key],
|
||||
)
|
||||
return get_entities(output)
|
||||
|
||||
def _get_and_update_kg(self, inputs: Dict[str, Any]) -> None:
|
||||
"""Get and update knowledge graph from the conversation history."""
|
||||
chain = LLMChain(llm=self.llm, prompt=self.knowledge_extraction_prompt)
|
||||
prompt_input_key = self._get_prompt_input_key(inputs)
|
||||
output = chain.predict(
|
||||
history="\n".join(self.buffer[-self.k :]),
|
||||
input=inputs[prompt_input_key],
|
||||
verbose=True,
|
||||
)
|
||||
knowledge = parse_triples(output)
|
||||
for triple in knowledge:
|
||||
self.kg.add_triple(triple)
|
||||
|
||||
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
|
||||
"""Save context from this conversation to buffer."""
|
||||
self._get_and_update_kg(inputs)
|
||||
prompt_input_key = self._get_prompt_input_key(inputs)
|
||||
output_key = self._get_prompt_output_key(outputs)
|
||||
human = f"{self.human_prefix}: {inputs[prompt_input_key]}"
|
||||
ai = f"{self.ai_prefix}: {outputs[output_key]}"
|
||||
new_lines = "\n".join([human.strip(), ai.strip()])
|
||||
self.buffer.append(new_lines)
|
||||
|
||||
def clear(self) -> None:
|
||||
"""Clear memory contents."""
|
||||
return self.kg.clear()
|
||||
from langchain.memory.buffer import ConversationBufferMemory
|
||||
from langchain.memory.buffer_window import ConversationBufferWindowMemory
|
||||
from langchain.memory.combined import CombinedMemory
|
||||
from langchain.memory.entity import ConversationEntityMemory
|
||||
from langchain.memory.kg import ConversationKGMemory
|
||||
from langchain.memory.summary import ConversationSummaryMemory
|
||||
from langchain.memory.summary_buffer import ConversationSummaryBufferMemory
|
||||
|
||||
# This is only for backwards compatibility.
|
||||
|
||||
__all__ = [
|
||||
"ConversationSummaryBufferMemory",
|
||||
"ConversationSummaryMemory",
|
||||
"ConversationKGMemory",
|
||||
"ConversationBufferWindowMemory",
|
||||
"ConversationEntityMemory",
|
||||
"ConversationBufferMemory",
|
||||
"CombinedMemory",
|
||||
]
|
||||
|
@ -0,0 +1,21 @@
|
||||
from langchain.memory.buffer import ConversationBufferMemory
|
||||
from langchain.memory.buffer_window import ConversationBufferWindowMemory
|
||||
from langchain.memory.chat_memory import ChatMessageHistory
|
||||
from langchain.memory.combined import CombinedMemory
|
||||
from langchain.memory.entity import ConversationEntityMemory
|
||||
from langchain.memory.kg import ConversationKGMemory
|
||||
from langchain.memory.simple import SimpleMemory
|
||||
from langchain.memory.summary import ConversationSummaryMemory
|
||||
from langchain.memory.summary_buffer import ConversationSummaryBufferMemory
|
||||
|
||||
__all__ = [
|
||||
"CombinedMemory",
|
||||
"ConversationBufferWindowMemory",
|
||||
"ConversationBufferMemory",
|
||||
"SimpleMemory",
|
||||
"ConversationSummaryBufferMemory",
|
||||
"ConversationKGMemory",
|
||||
"ConversationEntityMemory",
|
||||
"ConversationSummaryMemory",
|
||||
"ChatMessageHistory",
|
||||
]
|
@ -0,0 +1,38 @@
|
||||
from typing import Any, Dict, List
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
from langchain.memory.chat_memory import BaseChatMemory
|
||||
from langchain.memory.utils import get_buffer_string
|
||||
|
||||
|
||||
class ConversationBufferMemory(BaseChatMemory, BaseModel):
|
||||
"""Buffer for storing conversation memory."""
|
||||
|
||||
human_prefix: str = "Human"
|
||||
ai_prefix: str = "AI"
|
||||
memory_key: str = "history" #: :meta private:
|
||||
|
||||
@property
|
||||
def buffer(self) -> Any:
|
||||
"""String buffer of memory."""
|
||||
if self.return_messages:
|
||||
return self.chat_memory.messages
|
||||
else:
|
||||
return get_buffer_string(
|
||||
self.chat_memory.messages,
|
||||
human_prefix=self.human_prefix,
|
||||
ai_prefix=self.ai_prefix,
|
||||
)
|
||||
|
||||
@property
|
||||
def memory_variables(self) -> List[str]:
|
||||
"""Will always return list of memory variables.
|
||||
|
||||
:meta private:
|
||||
"""
|
||||
return [self.memory_key]
|
||||
|
||||
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""Return history buffer."""
|
||||
return {self.memory_key: self.buffer}
|
@ -0,0 +1,42 @@
|
||||
from typing import Any, Dict, List
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
from langchain.memory.chat_memory import BaseChatMemory
|
||||
from langchain.memory.utils import get_buffer_string
|
||||
from langchain.schema import BaseMessage
|
||||
|
||||
|
||||
class ConversationBufferWindowMemory(BaseChatMemory, BaseModel):
|
||||
"""Buffer for storing conversation memory."""
|
||||
|
||||
human_prefix: str = "Human"
|
||||
ai_prefix: str = "AI"
|
||||
memory_key: str = "history" #: :meta private:
|
||||
k: int = 5
|
||||
|
||||
@property
|
||||
def buffer(self) -> List[BaseMessage]:
|
||||
"""String buffer of memory."""
|
||||
return self.chat_memory.messages
|
||||
|
||||
@property
|
||||
def memory_variables(self) -> List[str]:
|
||||
"""Will always return list of memory variables.
|
||||
|
||||
:meta private:
|
||||
"""
|
||||
return [self.memory_key]
|
||||
|
||||
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]:
|
||||
"""Return history buffer."""
|
||||
|
||||
if self.return_messages:
|
||||
buffer: Any = self.buffer[-self.k * 2 :]
|
||||
else:
|
||||
buffer = get_buffer_string(
|
||||
self.buffer[-self.k * 2 :],
|
||||
human_prefix=self.human_prefix,
|
||||
ai_prefix=self.ai_prefix,
|
||||
)
|
||||
return {self.memory_key: buffer}
|
@ -0,0 +1,46 @@
|
||||
from abc import ABC
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from langchain.memory.utils import get_prompt_input_key
|
||||
from langchain.schema import AIMessage, BaseMemory, BaseMessage, HumanMessage
|
||||
|
||||
|
||||
class ChatMessageHistory(BaseModel):
|
||||
messages: List[BaseMessage] = Field(default_factory=list)
|
||||
|
||||
def add_user_message(self, message: str) -> None:
|
||||
self.messages.append(HumanMessage(content=message))
|
||||
|
||||
def add_ai_message(self, message: str) -> None:
|
||||
self.messages.append(AIMessage(content=message))
|
||||
|
||||
def clear(self) -> None:
|
||||
self.messages = []
|
||||
|
||||
|
||||
class BaseChatMemory(BaseMemory, ABC):
|
||||
chat_memory: ChatMessageHistory = Field(default_factory=ChatMessageHistory)
|
||||
output_key: Optional[str] = None
|
||||
input_key: Optional[str] = None
|
||||
return_messages: bool = False
|
||||
|
||||
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
|
||||
"""Save context from this conversation to buffer."""
|
||||
if self.input_key is None:
|
||||
prompt_input_key = get_prompt_input_key(inputs, self.memory_variables)
|
||||
else:
|
||||
prompt_input_key = self.input_key
|
||||
if self.output_key is None:
|
||||
if len(outputs) != 1:
|
||||
raise ValueError(f"One output key expected, got {outputs.keys()}")
|
||||
output_key = list(outputs.keys())[0]
|
||||
else:
|
||||
output_key = self.output_key
|
||||
self.chat_memory.add_user_message(inputs[prompt_input_key])
|
||||
self.chat_memory.add_ai_message(outputs[output_key])
|
||||
|
||||
def clear(self) -> None:
|
||||
"""Clear memory contents."""
|
||||
self.chat_memory.clear()
|
@ -0,0 +1,49 @@
|
||||
from typing import Any, Dict, List
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
from langchain.schema import BaseMemory
|
||||
|
||||
|
||||
class CombinedMemory(BaseMemory, BaseModel):
|
||||
"""Class for combining multiple memories' data together."""
|
||||
|
||||
memories: List[BaseMemory]
|
||||
"""For tracking all the memories that should be accessed."""
|
||||
|
||||
@property
|
||||
def memory_variables(self) -> List[str]:
|
||||
"""All the memory variables that this instance provides."""
|
||||
"""Collected from the all the linked memories."""
|
||||
|
||||
memory_variables = []
|
||||
|
||||
for memory in self.memories:
|
||||
memory_variables.extend(memory.memory_variables)
|
||||
|
||||
return memory_variables
|
||||
|
||||
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]:
|
||||
"""Load all vars from sub-memories."""
|
||||
memory_data: Dict[str, Any] = {}
|
||||
|
||||
# Collect vars from all sub-memories
|
||||
for memory in self.memories:
|
||||
data = memory.load_memory_variables(inputs)
|
||||
memory_data = {
|
||||
**memory_data,
|
||||
**data,
|
||||
}
|
||||
|
||||
return memory_data
|
||||
|
||||
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
|
||||
"""Save context from this session for every memory."""
|
||||
# Save context for all sub-memories
|
||||
for memory in self.memories:
|
||||
memory.save_context(inputs, outputs)
|
||||
|
||||
def clear(self) -> None:
|
||||
"""Clear context from this session for every memory."""
|
||||
for memory in self.memories:
|
||||
memory.clear()
|
@ -0,0 +1,103 @@
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
from langchain.chains.llm import LLMChain
|
||||
from langchain.llms.base import BaseLLM
|
||||
from langchain.memory.chat_memory import BaseChatMemory
|
||||
from langchain.memory.prompt import (
|
||||
ENTITY_EXTRACTION_PROMPT,
|
||||
ENTITY_SUMMARIZATION_PROMPT,
|
||||
)
|
||||
from langchain.memory.utils import get_buffer_string, get_prompt_input_key
|
||||
from langchain.prompts.base import BasePromptTemplate
|
||||
from langchain.schema import BaseMessage
|
||||
|
||||
|
||||
class ConversationEntityMemory(BaseChatMemory, BaseModel):
|
||||
"""Entity extractor & summarizer to memory."""
|
||||
|
||||
human_prefix: str = "Human"
|
||||
ai_prefix: str = "AI"
|
||||
llm: BaseLLM
|
||||
entity_extraction_prompt: BasePromptTemplate = ENTITY_EXTRACTION_PROMPT
|
||||
entity_summarization_prompt: BasePromptTemplate = ENTITY_SUMMARIZATION_PROMPT
|
||||
store: Dict[str, Optional[str]] = {}
|
||||
entity_cache: List[str] = []
|
||||
k: int = 3
|
||||
chat_history_key: str = "history"
|
||||
|
||||
@property
|
||||
def buffer(self) -> List[BaseMessage]:
|
||||
return self.chat_memory.messages
|
||||
|
||||
@property
|
||||
def memory_variables(self) -> List[str]:
|
||||
"""Will always return list of memory variables.
|
||||
|
||||
:meta private:
|
||||
"""
|
||||
return ["entities", self.chat_history_key]
|
||||
|
||||
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""Return history buffer."""
|
||||
chain = LLMChain(llm=self.llm, prompt=self.entity_extraction_prompt)
|
||||
if self.input_key is None:
|
||||
prompt_input_key = get_prompt_input_key(inputs, self.memory_variables)
|
||||
else:
|
||||
prompt_input_key = self.input_key
|
||||
buffer_string = get_buffer_string(
|
||||
self.buffer[-self.k * 2 :],
|
||||
human_prefix=self.human_prefix,
|
||||
ai_prefix=self.ai_prefix,
|
||||
)
|
||||
output = chain.predict(
|
||||
history=buffer_string,
|
||||
input=inputs[prompt_input_key],
|
||||
)
|
||||
if output.strip() == "NONE":
|
||||
entities = []
|
||||
else:
|
||||
entities = [w.strip() for w in output.split(",")]
|
||||
entity_summaries = {}
|
||||
for entity in entities:
|
||||
entity_summaries[entity] = self.store.get(entity, "")
|
||||
self.entity_cache = entities
|
||||
if self.return_messages:
|
||||
buffer: Any = self.buffer[-self.k * 2 :]
|
||||
else:
|
||||
buffer = buffer_string
|
||||
return {
|
||||
self.chat_history_key: buffer,
|
||||
"entities": entity_summaries,
|
||||
}
|
||||
|
||||
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
|
||||
"""Save context from this conversation to buffer."""
|
||||
super().save_context(inputs, outputs)
|
||||
if self.input_key is None:
|
||||
prompt_input_key = get_prompt_input_key(inputs, self.memory_variables)
|
||||
else:
|
||||
prompt_input_key = self.input_key
|
||||
for entity in self.entity_cache:
|
||||
chain = LLMChain(llm=self.llm, prompt=self.entity_summarization_prompt)
|
||||
# key value store for entity
|
||||
existing_summary = self.store.get(entity, "")
|
||||
buffer_string = get_buffer_string(
|
||||
self.buffer[-self.k * 2 :],
|
||||
human_prefix=self.human_prefix,
|
||||
ai_prefix=self.ai_prefix,
|
||||
)
|
||||
|
||||
output = chain.predict(
|
||||
summary=existing_summary,
|
||||
history=buffer_string,
|
||||
input=inputs[prompt_input_key],
|
||||
entity=entity,
|
||||
)
|
||||
self.store[entity] = output.strip()
|
||||
|
||||
def clear(self) -> None:
|
||||
"""Clear memory contents."""
|
||||
self.chat_memory.clear()
|
||||
self.store = {}
|
@ -0,0 +1,122 @@
|
||||
from typing import Any, Dict, List
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from langchain.chains.llm import LLMChain
|
||||
from langchain.graphs import NetworkxEntityGraph
|
||||
from langchain.graphs.networkx_graph import get_entities, parse_triples
|
||||
from langchain.llms.base import BaseLLM
|
||||
from langchain.memory.chat_memory import BaseChatMemory
|
||||
from langchain.memory.prompt import (
|
||||
ENTITY_EXTRACTION_PROMPT,
|
||||
KNOWLEDGE_TRIPLE_EXTRACTION_PROMPT,
|
||||
)
|
||||
from langchain.memory.utils import get_buffer_string, get_prompt_input_key
|
||||
from langchain.prompts.base import BasePromptTemplate
|
||||
from langchain.schema import SystemMessage
|
||||
|
||||
|
||||
class ConversationKGMemory(BaseChatMemory, BaseModel):
|
||||
"""Knowledge graph memory for storing conversation memory.
|
||||
|
||||
Integrates with external knowledge graph to store and retrieve
|
||||
information about knowledge triples in the conversation.
|
||||
"""
|
||||
|
||||
k: int = 2
|
||||
human_prefix: str = "Human"
|
||||
ai_prefix: str = "AI"
|
||||
kg: NetworkxEntityGraph = Field(default_factory=NetworkxEntityGraph)
|
||||
knowledge_extraction_prompt: BasePromptTemplate = KNOWLEDGE_TRIPLE_EXTRACTION_PROMPT
|
||||
entity_extraction_prompt: BasePromptTemplate = ENTITY_EXTRACTION_PROMPT
|
||||
llm: BaseLLM
|
||||
"""Number of previous utterances to include in the context."""
|
||||
memory_key: str = "history" #: :meta private:
|
||||
|
||||
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""Return history buffer."""
|
||||
entities = self._get_current_entities(inputs)
|
||||
summaries = {}
|
||||
for entity in entities:
|
||||
knowledge = self.kg.get_entity_knowledge(entity)
|
||||
if knowledge:
|
||||
summaries[entity] = ". ".join(knowledge) + "."
|
||||
if summaries:
|
||||
summary_strings = [
|
||||
f"On {entity}: {summary}" for entity, summary in summaries.items()
|
||||
]
|
||||
if self.return_messages:
|
||||
context: Any = [SystemMessage(content=text) for text in summary_strings]
|
||||
else:
|
||||
context = "\n".join(summary_strings)
|
||||
else:
|
||||
if self.return_messages:
|
||||
context = []
|
||||
else:
|
||||
context = ""
|
||||
return {self.memory_key: context}
|
||||
|
||||
@property
|
||||
def memory_variables(self) -> List[str]:
|
||||
"""Will always return list of memory variables.
|
||||
|
||||
:meta private:
|
||||
"""
|
||||
return [self.memory_key]
|
||||
|
||||
def _get_prompt_input_key(self, inputs: Dict[str, Any]) -> str:
|
||||
"""Get the input key for the prompt."""
|
||||
if self.input_key is None:
|
||||
return get_prompt_input_key(inputs, self.memory_variables)
|
||||
return self.input_key
|
||||
|
||||
def _get_prompt_output_key(self, outputs: Dict[str, Any]) -> str:
|
||||
"""Get the output key for the prompt."""
|
||||
if self.output_key is None:
|
||||
if len(outputs) != 1:
|
||||
raise ValueError(f"One output key expected, got {outputs.keys()}")
|
||||
return list(outputs.keys())[0]
|
||||
return self.output_key
|
||||
|
||||
def _get_current_entities(self, inputs: Dict[str, Any]) -> List[str]:
|
||||
"""Get the current entities in the conversation."""
|
||||
prompt_input_key = self._get_prompt_input_key(inputs)
|
||||
chain = LLMChain(llm=self.llm, prompt=self.entity_extraction_prompt)
|
||||
buffer_string = get_buffer_string(
|
||||
self.chat_memory.messages[-self.k * 2 :],
|
||||
human_prefix=self.human_prefix,
|
||||
ai_prefix=self.ai_prefix,
|
||||
)
|
||||
output = chain.predict(
|
||||
history=buffer_string,
|
||||
input=inputs[prompt_input_key],
|
||||
)
|
||||
return get_entities(output)
|
||||
|
||||
def _get_and_update_kg(self, inputs: Dict[str, Any]) -> None:
|
||||
"""Get and update knowledge graph from the conversation history."""
|
||||
chain = LLMChain(llm=self.llm, prompt=self.knowledge_extraction_prompt)
|
||||
prompt_input_key = self._get_prompt_input_key(inputs)
|
||||
buffer_string = get_buffer_string(
|
||||
self.chat_memory.messages[-self.k * 2 :],
|
||||
human_prefix=self.human_prefix,
|
||||
ai_prefix=self.ai_prefix,
|
||||
)
|
||||
output = chain.predict(
|
||||
history=buffer_string,
|
||||
input=inputs[prompt_input_key],
|
||||
verbose=True,
|
||||
)
|
||||
knowledge = parse_triples(output)
|
||||
for triple in knowledge:
|
||||
self.kg.add_triple(triple)
|
||||
|
||||
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
|
||||
"""Save context from this conversation to buffer."""
|
||||
super().save_context(inputs, outputs)
|
||||
self._get_and_update_kg(inputs)
|
||||
|
||||
def clear(self) -> None:
|
||||
"""Clear memory contents."""
|
||||
super().clear()
|
||||
self.kg.clear()
|
@ -0,0 +1,165 @@
|
||||
# flake8: noqa
|
||||
from langchain.prompts.prompt import PromptTemplate
|
||||
|
||||
_DEFAULT_ENTITY_MEMORY_CONVERSATION_TEMPLATE = """You are an assistant to a human, powered by a large language model trained by OpenAI.
|
||||
|
||||
You are designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, you are able to generate human-like text based on the input you receive, allowing you to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.
|
||||
|
||||
You are constantly learning and improving, and your capabilities are constantly evolving. You are able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. You have access to some personalized information provided by the human in the Context section below. Additionally, you are able to generate your own text based on the input you receive, allowing you to engage in discussions and provide explanations and descriptions on a wide range of topics.
|
||||
|
||||
Overall, you are a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether the human needs help with a specific question or just wants to have a conversation about a particular topic, you are here to assist.
|
||||
|
||||
Context:
|
||||
{entities}
|
||||
|
||||
Current conversation:
|
||||
{history}
|
||||
Last line:
|
||||
Human: {input}
|
||||
You:"""
|
||||
|
||||
ENTITY_MEMORY_CONVERSATION_TEMPLATE = PromptTemplate(
|
||||
input_variables=["entities", "history", "input"],
|
||||
template=_DEFAULT_ENTITY_MEMORY_CONVERSATION_TEMPLATE,
|
||||
)
|
||||
|
||||
_DEFAULT_SUMMARIZER_TEMPLATE = """Progressively summarize the lines of conversation provided, adding onto the previous summary returning a new summary.
|
||||
|
||||
EXAMPLE
|
||||
Current summary:
|
||||
The human asks what the AI thinks of artificial intelligence. The AI thinks artificial intelligence is a force for good.
|
||||
|
||||
New lines of conversation:
|
||||
Human: Why do you think artificial intelligence is a force for good?
|
||||
AI: Because artificial intelligence will help humans reach their full potential.
|
||||
|
||||
New summary:
|
||||
The human asks what the AI thinks of artificial intelligence. The AI thinks artificial intelligence is a force for good because it will help humans reach their full potential.
|
||||
END OF EXAMPLE
|
||||
|
||||
Current summary:
|
||||
{summary}
|
||||
|
||||
New lines of conversation:
|
||||
{new_lines}
|
||||
|
||||
New summary:"""
|
||||
SUMMARY_PROMPT = PromptTemplate(
|
||||
input_variables=["summary", "new_lines"], template=_DEFAULT_SUMMARIZER_TEMPLATE
|
||||
)
|
||||
|
||||
_DEFAULT_ENTITY_EXTRACTION_TEMPLATE = """You are an AI assistant reading the transcript of a conversation between an AI and a human. Extract all of the proper nouns from the last line of conversation. As a guideline, a proper noun is generally capitalized. You should definitely extract all names and places.
|
||||
|
||||
The conversation history is provided just in case of a coreference (e.g. "What do you know about him" where "him" is defined in a previous line) -- ignore items mentioned there that are not in the last line.
|
||||
|
||||
Return the output as a single comma-separated list, or NONE if there is nothing of note to return (e.g. the user is just issuing a greeting or having a simple conversation).
|
||||
|
||||
EXAMPLE
|
||||
Conversation history:
|
||||
Person #1: how's it going today?
|
||||
AI: "It's going great! How about you?"
|
||||
Person #1: good! busy working on Langchain. lots to do.
|
||||
AI: "That sounds like a lot of work! What kind of things are you doing to make Langchain better?"
|
||||
Last line:
|
||||
Person #1: i'm trying to improve Langchain's interfaces, the UX, its integrations with various products the user might want ... a lot of stuff.
|
||||
Output: Langchain
|
||||
END OF EXAMPLE
|
||||
|
||||
EXAMPLE
|
||||
Conversation history:
|
||||
Person #1: how's it going today?
|
||||
AI: "It's going great! How about you?"
|
||||
Person #1: good! busy working on Langchain. lots to do.
|
||||
AI: "That sounds like a lot of work! What kind of things are you doing to make Langchain better?"
|
||||
Last line:
|
||||
Person #1: i'm trying to improve Langchain's interfaces, the UX, its integrations with various products the user might want ... a lot of stuff. I'm working with Person #2.
|
||||
Output: Langchain, Person #2
|
||||
END OF EXAMPLE
|
||||
|
||||
Conversation history (for reference only):
|
||||
{history}
|
||||
Last line of conversation (for extraction):
|
||||
Human: {input}
|
||||
|
||||
Output:"""
|
||||
ENTITY_EXTRACTION_PROMPT = PromptTemplate(
|
||||
input_variables=["history", "input"], template=_DEFAULT_ENTITY_EXTRACTION_TEMPLATE
|
||||
)
|
||||
|
||||
_DEFAULT_ENTITY_SUMMARIZATION_TEMPLATE = """You are an AI assistant helping a human keep track of facts about relevant people, places, and concepts in their life. Update the summary of the provided entity in the "Entity" section based on the last line of your conversation with the human. If you are writing the summary for the first time, return a single sentence.
|
||||
The update should only include facts that are relayed in the last line of conversation about the provided entity, and should only contain facts about the provided entity.
|
||||
|
||||
If there is no new information about the provided entity or the information is not worth noting (not an important or relevant fact to remember long-term), return the existing summary unchanged.
|
||||
|
||||
Full conversation history (for context):
|
||||
{history}
|
||||
|
||||
Entity to summarize:
|
||||
{entity}
|
||||
|
||||
Existing summary of {entity}:
|
||||
{summary}
|
||||
|
||||
Last line of conversation:
|
||||
Human: {input}
|
||||
Updated summary:"""
|
||||
|
||||
ENTITY_SUMMARIZATION_PROMPT = PromptTemplate(
|
||||
input_variables=["entity", "summary", "history", "input"],
|
||||
template=_DEFAULT_ENTITY_SUMMARIZATION_TEMPLATE,
|
||||
)
|
||||
|
||||
|
||||
KG_TRIPLE_DELIMITER = "<|>"
|
||||
_DEFAULT_KNOWLEDGE_TRIPLE_EXTRACTION_TEMPLATE = (
|
||||
"You are a networked intelligence helping a human track knowledge triples"
|
||||
" about all relevant people, things, concepts, etc. and integrating"
|
||||
" them with your knowledge stored within your weights"
|
||||
" as well as that stored in a knowledge graph."
|
||||
" Extract all of the knowledge triples from the last line of conversation."
|
||||
" A knowledge triple is a clause that contains a subject, a predicate,"
|
||||
" and an object. The subject is the entity being described,"
|
||||
" the predicate is the property of the subject that is being"
|
||||
" described, and the object is the value of the property.\n\n"
|
||||
"EXAMPLE\n"
|
||||
"Conversation history:\n"
|
||||
"Person #1: Did you hear aliens landed in Area 51?\n"
|
||||
"AI: No, I didn't hear that. What do you know about Area 51?\n"
|
||||
"Person #1: It's a secret military base in Nevada.\n"
|
||||
"AI: What do you know about Nevada?\n"
|
||||
"Last line of conversation:\n"
|
||||
"Person #1: It's a state in the US. It's also the number 1 producer of gold in the US.\n\n"
|
||||
f"Output: (Nevada, is a, state){KG_TRIPLE_DELIMITER}(Nevada, is in, US)"
|
||||
f"{KG_TRIPLE_DELIMITER}(Nevada, is the number 1 producer of, gold)\n"
|
||||
"END OF EXAMPLE\n\n"
|
||||
"EXAMPLE\n"
|
||||
"Conversation history:\n"
|
||||
"Person #1: Hello.\n"
|
||||
"AI: Hi! How are you?\n"
|
||||
"Person #1: I'm good. How are you?\n"
|
||||
"AI: I'm good too.\n"
|
||||
"Last line of conversation:\n"
|
||||
"Person #1: I'm going to the store.\n\n"
|
||||
"Output: NONE\n"
|
||||
"END OF EXAMPLE\n\n"
|
||||
"EXAMPLE\n"
|
||||
"Conversation history:\n"
|
||||
"Person #1: What do you know about Descartes?\n"
|
||||
"AI: Descartes was a French philosopher, mathematician, and scientist who lived in the 17th century.\n"
|
||||
"Person #1: The Descartes I'm referring to is a standup comedian and interior designer from Montreal.\n"
|
||||
"AI: Oh yes, He is a comedian and an interior designer. He has been in the industry for 30 years. His favorite food is baked bean pie.\n"
|
||||
"Person #1: Oh huh. I know Descartes likes to drive antique scooters and play the mandolin.\n"
|
||||
"Last line of conversation:\n"
|
||||
f"Output: (Descartes, likes to drive, antique scooters){KG_TRIPLE_DELIMITER}(Descartes, plays, mandolin)\n"
|
||||
"END OF EXAMPLE\n\n"
|
||||
"Conversation history (for reference only):\n"
|
||||
"{history}"
|
||||
"\nLast line of conversation (for extraction):\n"
|
||||
"Human: {input}\n\n"
|
||||
"Output:"
|
||||
)
|
||||
|
||||
KNOWLEDGE_TRIPLE_EXTRACTION_PROMPT = PromptTemplate(
|
||||
input_variables=["history", "input"],
|
||||
template=_DEFAULT_KNOWLEDGE_TRIPLE_EXTRACTION_TEMPLATE,
|
||||
)
|
@ -0,0 +1,28 @@
|
||||
from typing import Any, Dict, List
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
from langchain.schema import BaseMemory
|
||||
|
||||
|
||||
class SimpleMemory(BaseMemory, BaseModel):
|
||||
"""Simple memory for storing context or other bits of information that shouldn't
|
||||
ever change between prompts.
|
||||
"""
|
||||
|
||||
memories: Dict[str, Any] = dict()
|
||||
|
||||
@property
|
||||
def memory_variables(self) -> List[str]:
|
||||
return list(self.memories.keys())
|
||||
|
||||
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]:
|
||||
return self.memories
|
||||
|
||||
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
|
||||
"""Nothing should be saved or changed, my memory is set in stone."""
|
||||
pass
|
||||
|
||||
def clear(self) -> None:
|
||||
"""Nothing to clear, got a memory like a vault."""
|
||||
pass
|
@ -0,0 +1,67 @@
|
||||
from typing import Any, Dict, List
|
||||
|
||||
from pydantic import BaseModel, root_validator
|
||||
|
||||
from langchain.chains.llm import LLMChain
|
||||
from langchain.llms.base import BaseLLM
|
||||
from langchain.memory.chat_memory import BaseChatMemory
|
||||
from langchain.memory.prompt import SUMMARY_PROMPT
|
||||
from langchain.memory.utils import get_buffer_string
|
||||
from langchain.prompts.base import BasePromptTemplate
|
||||
from langchain.schema import SystemMessage
|
||||
|
||||
|
||||
class ConversationSummaryMemory(BaseChatMemory, BaseModel):
|
||||
"""Conversation summarizer to memory."""
|
||||
|
||||
buffer: str = ""
|
||||
human_prefix: str = "Human"
|
||||
ai_prefix: str = "AI"
|
||||
llm: BaseLLM
|
||||
prompt: BasePromptTemplate = SUMMARY_PROMPT
|
||||
memory_key: str = "history" #: :meta private:
|
||||
|
||||
@property
|
||||
def memory_variables(self) -> List[str]:
|
||||
"""Will always return list of memory variables.
|
||||
|
||||
:meta private:
|
||||
"""
|
||||
return [self.memory_key]
|
||||
|
||||
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""Return history buffer."""
|
||||
if self.return_messages:
|
||||
buffer: Any = [SystemMessage(content=self.buffer)]
|
||||
else:
|
||||
buffer = self.buffer
|
||||
return {self.memory_key: buffer}
|
||||
|
||||
@root_validator()
|
||||
def validate_prompt_input_variables(cls, values: Dict) -> Dict:
|
||||
"""Validate that prompt input variables are consistent."""
|
||||
prompt_variables = values["prompt"].input_variables
|
||||
expected_keys = {"summary", "new_lines"}
|
||||
if expected_keys != set(prompt_variables):
|
||||
raise ValueError(
|
||||
"Got unexpected prompt input variables. The prompt expects "
|
||||
f"{prompt_variables}, but it should have {expected_keys}."
|
||||
)
|
||||
return values
|
||||
|
||||
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
|
||||
"""Save context from this conversation to buffer."""
|
||||
super().save_context(inputs, outputs)
|
||||
new_lines = get_buffer_string(
|
||||
self.chat_memory.messages[-2:],
|
||||
human_prefix=self.human_prefix,
|
||||
ai_prefix=self.ai_prefix,
|
||||
)
|
||||
|
||||
chain = LLMChain(llm=self.llm, prompt=self.prompt)
|
||||
self.buffer = chain.predict(summary=self.buffer, new_lines=new_lines)
|
||||
|
||||
def clear(self) -> None:
|
||||
"""Clear memory contents."""
|
||||
super().clear()
|
||||
self.buffer = ""
|
@ -0,0 +1,95 @@
|
||||
from typing import Any, Dict, List
|
||||
|
||||
from pydantic import BaseModel, root_validator
|
||||
|
||||
from langchain.chains.llm import LLMChain
|
||||
from langchain.llms.base import BaseLLM
|
||||
from langchain.memory.chat_memory import BaseChatMemory
|
||||
from langchain.memory.prompt import SUMMARY_PROMPT
|
||||
from langchain.memory.utils import get_buffer_string
|
||||
from langchain.prompts.base import BasePromptTemplate
|
||||
from langchain.schema import BaseMessage, SystemMessage
|
||||
|
||||
|
||||
class ConversationSummaryBufferMemory(BaseChatMemory, BaseModel):
|
||||
"""Buffer with summarizer for storing conversation memory."""
|
||||
|
||||
max_token_limit: int = 2000
|
||||
human_prefix: str = "Human"
|
||||
ai_prefix: str = "AI"
|
||||
moving_summary_buffer: str = ""
|
||||
llm: BaseLLM
|
||||
prompt: BasePromptTemplate = SUMMARY_PROMPT
|
||||
memory_key: str = "history"
|
||||
|
||||
@property
|
||||
def buffer(self) -> List[BaseMessage]:
|
||||
return self.chat_memory.messages
|
||||
|
||||
@property
|
||||
def memory_variables(self) -> List[str]:
|
||||
"""Will always return list of memory variables.
|
||||
|
||||
:meta private:
|
||||
"""
|
||||
return [self.memory_key]
|
||||
|
||||
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""Return history buffer."""
|
||||
buffer = self.buffer
|
||||
if self.moving_summary_buffer != "":
|
||||
first_messages: List[BaseMessage] = [
|
||||
SystemMessage(content=self.moving_summary_buffer)
|
||||
]
|
||||
buffer = first_messages + buffer
|
||||
if self.return_messages:
|
||||
final_buffer: Any = buffer
|
||||
else:
|
||||
final_buffer = get_buffer_string(
|
||||
buffer, human_prefix=self.human_prefix, ai_prefix=self.ai_prefix
|
||||
)
|
||||
return {self.memory_key: final_buffer}
|
||||
|
||||
@root_validator()
|
||||
def validate_prompt_input_variables(cls, values: Dict) -> Dict:
|
||||
"""Validate that prompt input variables are consistent."""
|
||||
prompt_variables = values["prompt"].input_variables
|
||||
expected_keys = {"summary", "new_lines"}
|
||||
if expected_keys != set(prompt_variables):
|
||||
raise ValueError(
|
||||
"Got unexpected prompt input variables. The prompt expects "
|
||||
f"{prompt_variables}, but it should have {expected_keys}."
|
||||
)
|
||||
return values
|
||||
|
||||
def get_num_tokens_list(self, arr: List[BaseMessage]) -> List[int]:
|
||||
"""Get list of number of tokens in each string in the input array."""
|
||||
return [self.llm.get_num_tokens(get_buffer_string([x])) for x in arr]
|
||||
|
||||
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
|
||||
"""Save context from this conversation to buffer."""
|
||||
super().save_context(inputs, outputs)
|
||||
# Prune buffer if it exceeds max token limit
|
||||
buffer = self.chat_memory.messages
|
||||
curr_buffer_length = sum(self.get_num_tokens_list(buffer))
|
||||
if curr_buffer_length > self.max_token_limit:
|
||||
pruned_memory = []
|
||||
while curr_buffer_length > self.max_token_limit:
|
||||
pruned_memory.append(buffer.pop(0))
|
||||
curr_buffer_length = sum(self.get_num_tokens_list(buffer))
|
||||
chain = LLMChain(llm=self.llm, prompt=self.prompt)
|
||||
self.moving_summary_buffer = chain.predict(
|
||||
summary=self.moving_summary_buffer,
|
||||
new_lines=(
|
||||
get_buffer_string(
|
||||
pruned_memory,
|
||||
human_prefix=self.human_prefix,
|
||||
ai_prefix=self.ai_prefix,
|
||||
)
|
||||
),
|
||||
)
|
||||
|
||||
def clear(self) -> None:
|
||||
"""Clear memory contents."""
|
||||
super().clear()
|
||||
self.moving_summary_buffer = ""
|
@ -0,0 +1,38 @@
|
||||
from typing import Any, Dict, List
|
||||
|
||||
from langchain.schema import (
|
||||
AIMessage,
|
||||
BaseMessage,
|
||||
ChatMessage,
|
||||
HumanMessage,
|
||||
SystemMessage,
|
||||
)
|
||||
|
||||
|
||||
def get_buffer_string(
|
||||
messages: List[BaseMessage], human_prefix: str = "Human", ai_prefix: str = "AI"
|
||||
) -> str:
|
||||
"""Get buffer string of messages."""
|
||||
string_messages = []
|
||||
for m in messages:
|
||||
if isinstance(m, HumanMessage):
|
||||
role = human_prefix
|
||||
elif isinstance(m, AIMessage):
|
||||
role = ai_prefix
|
||||
elif isinstance(m, SystemMessage):
|
||||
role = "System"
|
||||
elif isinstance(m, ChatMessage):
|
||||
role = m.role
|
||||
else:
|
||||
raise ValueError(f"Got unsupported message type: {m}")
|
||||
string_messages.append(f"{role}: {m.content}")
|
||||
return "\n".join(string_messages)
|
||||
|
||||
|
||||
def get_prompt_input_key(inputs: Dict[str, Any], memory_variables: List[str]) -> str:
|
||||
# "stop" is a special key that can be passed as input but is not used to
|
||||
# format the prompt.
|
||||
prompt_input_keys = list(set(inputs).difference(memory_variables + ["stop"]))
|
||||
if len(prompt_input_keys) != 1:
|
||||
raise ValueError(f"One input key expected got {prompt_input_keys}")
|
||||
return prompt_input_keys[0]
|
Loading…
Reference in New Issue