Add Managed Motorhead (#5507)

# Add Managed Motorhead
This change enabled MotorheadMemory to utilize Metal's managed version
of Motorhead. We can easily enable this by passing in a `api_key` and
`client_id` in order to hit the managed url and access the memory api on
Metal.

Twitter: [@softboyjimbo](https://twitter.com/softboyjimbo)

## Who can review?

Community members can review the PR once tests pass. Tag
maintainers/contributors who might be interested:

 @dev2049 @hwchase17

---------

Co-authored-by: Dev 2049 <dev.dev2049@gmail.com>
searx_updates
James O'Dwyer 12 months ago committed by GitHub
parent 5ffa924488
commit 226a7521ed
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -0,0 +1,198 @@
{
"cells": [
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"# Motörhead Memory (Managed)\n",
"[Motörhead](https://github.com/getmetal/motorhead) is a memory server implemented in Rust. It automatically handles incremental summarization in the background and allows for stateless applications.\n",
"\n",
"## Setup\n",
"\n",
"See instructions at [Motörhead](https://docs.getmetal.io/motorhead/introduction) for running the managed version of Motorhead. You can retrieve your `api_key` and `client_id` by creating an account on [Metal](https://getmetal.io).\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"from langchain.memory.motorhead_memory import MotorheadMemory\n",
"from langchain import OpenAI, LLMChain, PromptTemplate\n",
"\n",
"template = \"\"\"You are a chatbot having a conversation with a human.\n",
"\n",
"{chat_history}\n",
"Human: {human_input}\n",
"AI:\"\"\"\n",
"\n",
"prompt = PromptTemplate(\n",
" input_variables=[\"chat_history\", \"human_input\"], \n",
" template=template\n",
")\n",
"memory = MotorheadMemory(\n",
" api_key=\"YOUR_API_KEY\",\n",
" client_id=\"YOUR_CLIENT_ID\"\n",
" session_id=\"testing-1\",\n",
" memory_key=\"chat_history\"\n",
")\n",
"\n",
"await memory.init(); # loads previous state from Motörhead 🤘\n",
"\n",
"llm_chain = LLMChain(\n",
" llm=OpenAI(), \n",
" prompt=prompt, \n",
" verbose=True, \n",
" memory=memory,\n",
")\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"\n",
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
"Prompt after formatting:\n",
"\u001b[32;1m\u001b[1;3mYou are a chatbot having a conversation with a human.\n",
"\n",
"\n",
"Human: hi im bob\n",
"AI:\u001b[0m\n",
"\n",
"\u001b[1m> Finished chain.\u001b[0m\n"
]
},
{
"data": {
"text/plain": [
"' Hi Bob, nice to meet you! How are you doing today?'"
]
},
"execution_count": 2,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"llm_chain.run(\"hi im bob\")"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"\n",
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
"Prompt after formatting:\n",
"\u001b[32;1m\u001b[1;3mYou are a chatbot having a conversation with a human.\n",
"\n",
"Human: hi im bob\n",
"AI: Hi Bob, nice to meet you! How are you doing today?\n",
"Human: whats my name?\n",
"AI:\u001b[0m\n",
"\n",
"\u001b[1m> Finished chain.\u001b[0m\n"
]
},
{
"data": {
"text/plain": [
"' You said your name is Bob. Is that correct?'"
]
},
"execution_count": 3,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"llm_chain.run(\"whats my name?\")"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"\n",
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
"Prompt after formatting:\n",
"\u001b[32;1m\u001b[1;3mYou are a chatbot having a conversation with a human.\n",
"\n",
"Human: hi im bob\n",
"AI: Hi Bob, nice to meet you! How are you doing today?\n",
"Human: whats my name?\n",
"AI: You said your name is Bob. Is that correct?\n",
"Human: whats for dinner?\n",
"AI:\u001b[0m\n",
"\n",
"\u001b[1m> Finished chain.\u001b[0m\n"
]
},
{
"data": {
"text/plain": [
"\" I'm sorry, I'm not sure what you're asking. Could you please rephrase your question?\""
]
},
"execution_count": 4,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"llm_chain.run(\"whats for dinner?\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.1"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

@ -5,19 +5,47 @@ import requests
from langchain.memory.chat_memory import BaseChatMemory
from langchain.schema import get_buffer_string
MANAGED_URL = "https://api.getmetal.io/v1/motorhead"
# LOCAL_URL = "http://localhost:8080"
class MotorheadMemory(BaseChatMemory):
url: str = "http://localhost:8080"
url: str = MANAGED_URL
timeout = 3000
memory_key = "history"
session_id: str
context: Optional[str] = None
# Managed Params
api_key: Optional[str] = None
client_id: Optional[str] = None
def __get_headers(self) -> Dict[str, str]:
is_managed = self.url == MANAGED_URL
headers = {
"Content-Type": "application/json",
}
if is_managed and not (self.api_key and self.client_id):
raise ValueError(
"""
You must provide an API key or a client ID to use the managed
version of Motorhead. Visit https://getmetal.io for more information.
"""
)
if is_managed and self.api_key and self.client_id:
headers["x-metal-api-key"] = self.api_key
headers["x-metal-client-id"] = self.client_id
return headers
async def init(self) -> None:
res = requests.get(
f"{self.url}/sessions/{self.session_id}/memory",
timeout=self.timeout,
headers={"Content-Type": "application/json"},
headers=self.__get_headers(),
)
res_data = res.json()
messages = res_data.get("messages", [])
@ -53,6 +81,6 @@ class MotorheadMemory(BaseChatMemory):
{"role": "AI", "content": f"{output_str}"},
]
},
headers={"Content-Type": "application/json"},
headers=self.__get_headers(),
)
super().save_context(inputs, outputs)

Loading…
Cancel
Save