Harrison/motorhead (#2599)

Co-authored-by: James O'Dwyer <100361543+softboyjimbo@users.noreply.github.com>
fix-readthedocs
Harrison Chase 1 year ago committed by GitHub
parent 79a44c8225
commit b9e5b27a99
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -0,0 +1,196 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Motörhead Memory\n",
"[Motörhead](https://github.com/getmetal/motorhead) is a memory server implemented in Rust. It automatically handles incremental summarization in the background and allows for stateless applications.\n",
"\n",
"## Setup\n",
"\n",
"See instructions at [Motörhead](https://github.com/getmetal/motorhead) for running the server locally.\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"from langchain.memory.motorhead_memory import MotorheadMemory\n",
"from langchain import OpenAI, LLMChain, PromptTemplate\n",
"\n",
"template = \"\"\"You are a chatbot having a conversation with a human.\n",
"\n",
"{chat_history}\n",
"Human: {human_input}\n",
"AI:\"\"\"\n",
"\n",
"prompt = PromptTemplate(\n",
" input_variables=[\"chat_history\", \"human_input\"], \n",
" template=template\n",
")\n",
"memory = MotorheadMemory(\n",
" session_id=\"testing-1\",\n",
" url=\"http://localhost:8080\",\n",
" memory_key=\"chat_history\"\n",
")\n",
"\n",
"await memory.init(); # loads previous state from Motörhead 🤘\n",
"\n",
"llm_chain = LLMChain(\n",
" llm=OpenAI(), \n",
" prompt=prompt, \n",
" verbose=True, \n",
" memory=memory,\n",
")\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"\n",
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
"Prompt after formatting:\n",
"\u001b[32;1m\u001b[1;3mYou are a chatbot having a conversation with a human.\n",
"\n",
"\n",
"Human: hi im bob\n",
"AI:\u001b[0m\n",
"\n",
"\u001b[1m> Finished chain.\u001b[0m\n"
]
},
{
"data": {
"text/plain": [
"' Hi Bob, nice to meet you! How are you doing today?'"
]
},
"execution_count": 2,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"llm_chain.run(\"hi im bob\")"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"\n",
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
"Prompt after formatting:\n",
"\u001b[32;1m\u001b[1;3mYou are a chatbot having a conversation with a human.\n",
"\n",
"Human: hi im bob\n",
"AI: Hi Bob, nice to meet you! How are you doing today?\n",
"Human: whats my name?\n",
"AI:\u001b[0m\n",
"\n",
"\u001b[1m> Finished chain.\u001b[0m\n"
]
},
{
"data": {
"text/plain": [
"' You said your name is Bob. Is that correct?'"
]
},
"execution_count": 3,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"llm_chain.run(\"whats my name?\")"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"\n",
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
"Prompt after formatting:\n",
"\u001b[32;1m\u001b[1;3mYou are a chatbot having a conversation with a human.\n",
"\n",
"Human: hi im bob\n",
"AI: Hi Bob, nice to meet you! How are you doing today?\n",
"Human: whats my name?\n",
"AI: You said your name is Bob. Is that correct?\n",
"Human: whats for dinner?\n",
"AI:\u001b[0m\n",
"\n",
"\u001b[1m> Finished chain.\u001b[0m\n"
]
},
{
"data": {
"text/plain": [
"\" I'm sorry, I'm not sure what you're asking. Could you please rephrase your question?\""
]
},
"execution_count": 4,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"llm_chain.run(\"whats for dinner?\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.1"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

@ -1,5 +1,5 @@
from abc import ABC
from typing import Any, Dict, Optional
from typing import Any, Dict, Optional, Tuple
from pydantic import Field
@ -14,8 +14,9 @@ class BaseChatMemory(BaseMemory, ABC):
input_key: Optional[str] = None
return_messages: bool = False
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
"""Save context from this conversation to buffer."""
def _get_input_output(
self, inputs: Dict[str, Any], outputs: Dict[str, str]
) -> Tuple[str, str]:
if self.input_key is None:
prompt_input_key = get_prompt_input_key(inputs, self.memory_variables)
else:
@ -26,8 +27,13 @@ class BaseChatMemory(BaseMemory, ABC):
output_key = list(outputs.keys())[0]
else:
output_key = self.output_key
self.chat_memory.add_user_message(inputs[prompt_input_key])
self.chat_memory.add_ai_message(outputs[output_key])
return inputs[prompt_input_key], outputs[output_key]
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
"""Save context from this conversation to buffer."""
input_str, output_str = self._get_input_output(inputs, outputs)
self.chat_memory.add_user_message(input_str)
self.chat_memory.add_ai_message(output_str)
def clear(self) -> None:
"""Clear memory contents."""

@ -0,0 +1,58 @@
from typing import Any, Dict, List, Optional
import requests
from langchain.memory.chat_memory import BaseChatMemory
from langchain.schema import get_buffer_string
class MotorheadMemory(BaseChatMemory):
url: str = "http://localhost:8080"
timeout = 3000
memory_key = "history"
session_id: str
context: Optional[str] = None
async def init(self) -> None:
res = requests.get(
f"{self.url}/sessions/{self.session_id}/memory",
timeout=self.timeout,
headers={"Content-Type": "application/json"},
)
res_data = res.json()
messages = res_data.get("messages", [])
context = res_data.get("context", "NONE")
for message in messages:
if message["role"] == "AI":
self.chat_memory.add_ai_message(message["content"])
else:
self.chat_memory.add_user_message(message["content"])
if context and context != "NONE":
self.context = context
def load_memory_variables(self, values: Dict[str, Any]) -> Dict[str, Any]:
if self.return_messages:
return {self.memory_key: self.chat_memory.messages}
else:
return {self.memory_key: get_buffer_string(self.chat_memory.messages)}
@property
def memory_variables(self) -> List[str]:
return [self.memory_key]
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
input_str, output_str = self._get_input_output(inputs, outputs)
requests.post(
f"{self.url}/sessions/{self.session_id}/memory",
timeout=self.timeout,
json={
"messages": [
{"role": "Human", "content": f"{input_str}"},
{"role": "AI", "content": f"{output_str}"},
]
},
headers={"Content-Type": "application/json"},
)
super().save_context(inputs, outputs)
Loading…
Cancel
Save