mirror of
https://github.com/hwchase17/langchain
synced 2024-10-31 15:20:26 +00:00
6bc8ae63ef
I'm using a hash function for the key just to make sure its length doesn't get out of hand, otherwise the implementation is quite similar.
554 lines
13 KiB
Plaintext
554 lines
13 KiB
Plaintext
{
|
|
"cells": [
|
|
{
|
|
"cell_type": "markdown",
|
|
"id": "f36d938c",
|
|
"metadata": {},
|
|
"source": [
|
|
"# LLM Caching\n",
|
|
"This notebook covers how to cache results of individual LLM calls."
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 1,
|
|
"id": "10ad9224",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"from langchain.llms import OpenAI"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"id": "b50f0598",
|
|
"metadata": {},
|
|
"source": [
|
|
"### In Memory Cache"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 2,
|
|
"id": "426ff912",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"import langchain\n",
|
|
"from langchain.cache import InMemoryCache\n",
|
|
"langchain.llm_cache = InMemoryCache()"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 3,
|
|
"id": "f69f6283",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"# To make the caching really obvious, lets use a slower model.\n",
|
|
"llm = OpenAI(model_name=\"text-davinci-002\", n=2, best_of=2)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 4,
|
|
"id": "64005d1f",
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"name": "stdout",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"CPU times: user 30.6 ms, sys: 9.95 ms, total: 40.5 ms\n",
|
|
"Wall time: 730 ms\n"
|
|
]
|
|
},
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"'\\n\\nWhy did the chicken cross the road?\\n\\nTo get to the other side!'"
|
|
]
|
|
},
|
|
"execution_count": 4,
|
|
"metadata": {},
|
|
"output_type": "execute_result"
|
|
}
|
|
],
|
|
"source": [
|
|
"%%time\n",
|
|
"# The first time, it is not yet in cache, so it should take longer\n",
|
|
"llm(\"Tell me a joke\")"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 5,
|
|
"id": "c8a1cb2b",
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"name": "stdout",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"CPU times: user 71 µs, sys: 3 µs, total: 74 µs\n",
|
|
"Wall time: 78.9 µs\n"
|
|
]
|
|
},
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"'\\n\\nWhy did the chicken cross the road?\\n\\nTo get to the other side!'"
|
|
]
|
|
},
|
|
"execution_count": 5,
|
|
"metadata": {},
|
|
"output_type": "execute_result"
|
|
}
|
|
],
|
|
"source": [
|
|
"%%time\n",
|
|
"# The second time it is, so it goes faster\n",
|
|
"llm(\"Tell me a joke\")"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"id": "4bf59c12",
|
|
"metadata": {},
|
|
"source": [
|
|
"### SQLite Cache"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 6,
|
|
"id": "5f036236",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"# We can do the same thing with a SQLite cache\n",
|
|
"from langchain.cache import SQLiteCache\n",
|
|
"langchain.llm_cache = SQLiteCache(database_path=\".langchain.db\")"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 7,
|
|
"id": "fa18e3af",
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"name": "stdout",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"CPU times: user 5.27 ms, sys: 2.36 ms, total: 7.63 ms\n",
|
|
"Wall time: 6.68 ms\n"
|
|
]
|
|
},
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"'\\n\\nWhy did the chicken cross the road?\\n\\nTo get to the other side.'"
|
|
]
|
|
},
|
|
"execution_count": 7,
|
|
"metadata": {},
|
|
"output_type": "execute_result"
|
|
}
|
|
],
|
|
"source": [
|
|
"%%time\n",
|
|
"# The first time, it is not yet in cache, so it should take longer\n",
|
|
"llm(\"Tell me a joke\")"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 8,
|
|
"id": "5bf2f6fd",
|
|
"metadata": {
|
|
"scrolled": true
|
|
},
|
|
"outputs": [
|
|
{
|
|
"name": "stdout",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"CPU times: user 3.05 ms, sys: 1.1 ms, total: 4.16 ms\n",
|
|
"Wall time: 5.58 ms\n"
|
|
]
|
|
},
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"'\\n\\nWhy did the chicken cross the road?\\n\\nTo get to the other side.'"
|
|
]
|
|
},
|
|
"execution_count": 8,
|
|
"metadata": {},
|
|
"output_type": "execute_result"
|
|
}
|
|
],
|
|
"source": [
|
|
"%%time\n",
|
|
"# The second time it is, so it goes faster\n",
|
|
"llm(\"Tell me a joke\")"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"id": "278ad7ae",
|
|
"metadata": {},
|
|
"source": [
|
|
"### Redis Cache"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 9,
|
|
"id": "39f6eb0b",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"# We can do the same thing with a Redis cache\n",
|
|
"# (make sure your local Redis instance is running first before running this example)\n",
|
|
"from redis import Redis\n",
|
|
"from langchain.cache import RedisCache\n",
|
|
"langchain.llm_cache = RedisCache(redis_=Redis())"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 10,
|
|
"id": "28920749",
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"name": "stdout",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"CPU times: user 6.75 ms, sys: 3.14 ms, total: 9.89 ms\n",
|
|
"Wall time: 716 ms\n"
|
|
]
|
|
},
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"'\\n\\nWhy did the chicken cross the road?\\n\\nTo get to the other side!'"
|
|
]
|
|
},
|
|
"execution_count": 10,
|
|
"metadata": {},
|
|
"output_type": "execute_result"
|
|
}
|
|
],
|
|
"source": [
|
|
"%%time\n",
|
|
"# The first time, it is not yet in cache, so it should take longer\n",
|
|
"llm(\"Tell me a joke\")"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 11,
|
|
"id": "94bf9415",
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"name": "stdout",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"CPU times: user 1.66 ms, sys: 1.92 ms, total: 3.57 ms\n",
|
|
"Wall time: 7.56 ms\n"
|
|
]
|
|
},
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"'\\n\\nWhy did the chicken cross the road?\\n\\nTo get to the other side!'"
|
|
]
|
|
},
|
|
"execution_count": 11,
|
|
"metadata": {},
|
|
"output_type": "execute_result"
|
|
}
|
|
],
|
|
"source": [
|
|
"%%time\n",
|
|
"# The second time it is, so it goes faster\n",
|
|
"llm(\"Tell me a joke\")"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"id": "934943dc",
|
|
"metadata": {},
|
|
"source": [
|
|
"### SQLAlchemy Cache"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 9,
|
|
"id": "acccff40",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"# You can use SQLAlchemyCache to cache with any SQL database supported by SQLAlchemy.\n",
|
|
"\n",
|
|
"# from langchain.cache import SQLAlchemyCache\n",
|
|
"# from sqlalchemy import create_engine\n",
|
|
"\n",
|
|
"# engine = create_engine(\"postgresql://postgres:postgres@localhost:5432/postgres\")\n",
|
|
"# langchain.llm_cache = SQLAlchemyCache(engine)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"id": "0c69d84d",
|
|
"metadata": {},
|
|
"source": [
|
|
"### Optional Caching\n",
|
|
"You can also turn off caching for specific LLMs should you choose. In the example below, even though global caching is enabled, we turn it off for a specific LLM"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 10,
|
|
"id": "6af46e2b",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"llm = OpenAI(model_name=\"text-davinci-002\", n=2, best_of=2, cache=False)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 11,
|
|
"id": "26c4fd8f",
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"name": "stdout",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"CPU times: user 5.59 ms, sys: 2.35 ms, total: 7.95 ms\n",
|
|
"Wall time: 1.46 s\n"
|
|
]
|
|
},
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"'\\n\\nWhy did the chicken cross the road?\\n\\nTo get to the other side!'"
|
|
]
|
|
},
|
|
"execution_count": 11,
|
|
"metadata": {},
|
|
"output_type": "execute_result"
|
|
}
|
|
],
|
|
"source": [
|
|
"%%time\n",
|
|
"llm(\"Tell me a joke\")"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 12,
|
|
"id": "46846b20",
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"name": "stdout",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"CPU times: user 5.76 ms, sys: 3.15 ms, total: 8.9 ms\n",
|
|
"Wall time: 660 ms\n"
|
|
]
|
|
},
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"\"\\n\\nWhy couldn't the bicycle stand up by itself? Because it was...two tired!\""
|
|
]
|
|
},
|
|
"execution_count": 12,
|
|
"metadata": {},
|
|
"output_type": "execute_result"
|
|
}
|
|
],
|
|
"source": [
|
|
"%%time\n",
|
|
"llm(\"Tell me a joke\")"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"id": "5da41b77",
|
|
"metadata": {},
|
|
"source": [
|
|
"### Optional Caching in Chains\n",
|
|
"You can also turn off caching for particular nodes in chains. Note that because of certain interfaces, its often easier to construct the chain first, and then edit the LLM afterwards.\n",
|
|
"\n",
|
|
"As an example, we will load a summarizer map-reduce chain. We will cache results for the map-step, but then not freeze it for the combine step."
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 17,
|
|
"id": "9afa3f7a",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"llm = OpenAI(model_name=\"text-davinci-002\")\n",
|
|
"no_cache_llm = OpenAI(model_name=\"text-davinci-002\", cache=False)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 18,
|
|
"id": "98a78e8e",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"from langchain.text_splitter import CharacterTextSplitter\n",
|
|
"from langchain.chains.mapreduce import MapReduceChain\n",
|
|
"\n",
|
|
"text_splitter = CharacterTextSplitter()"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 19,
|
|
"id": "2bfb099b",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"with open('../state_of_the_union.txt') as f:\n",
|
|
" state_of_the_union = f.read()\n",
|
|
"texts = text_splitter.split_text(state_of_the_union)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 20,
|
|
"id": "f78b7f51",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"from langchain.docstore.document import Document\n",
|
|
"docs = [Document(page_content=t) for t in texts[:3]]\n",
|
|
"from langchain.chains.summarize import load_summarize_chain"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 21,
|
|
"id": "a2a30822",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"chain = load_summarize_chain(llm, chain_type=\"map_reduce\", reduce_llm=no_cache_llm)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 22,
|
|
"id": "a545b743",
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"name": "stdout",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"CPU times: user 471 ms, sys: 130 ms, total: 601 ms\n",
|
|
"Wall time: 5.8 s\n"
|
|
]
|
|
},
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"\"\\n\\nIn response to Vladimir Putin's aggression in Ukraine, the United States has joined with European allies to impose economic sanctions and cut off Russia's access to technology. The Department of Justice is also assembling a task force to go after the crimes of Russian oligarchs.\\n\\nThe sanctions and task force are aimed at punishing Putin and Russian oligarchs for their aggression in Ukraine and deterring future aggression. The long-term goal is to make Russia pay a high price for its aggression, so that it will be less likely to engage in similar behavior in the future.\""
|
|
]
|
|
},
|
|
"execution_count": 22,
|
|
"metadata": {},
|
|
"output_type": "execute_result"
|
|
}
|
|
],
|
|
"source": [
|
|
"%%time\n",
|
|
"chain.run(docs)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"id": "3ed85e9d",
|
|
"metadata": {},
|
|
"source": [
|
|
"When we run it again, we see that it runs substantially faster but the final answer is different. This is due to caching at the map steps, but not at the reduce step."
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 23,
|
|
"id": "39cbb282",
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"name": "stdout",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"CPU times: user 10.6 ms, sys: 4.25 ms, total: 14.8 ms\n",
|
|
"Wall time: 2.19 s\n"
|
|
]
|
|
},
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"\"\\n\\nIn response to Vladimir Putin's aggression in Ukraine, the United States has joined with European allies to impose economic sanctions and cut off Russia's access to technology. The Department of Justice is also assembling a task force to go after the crimes of Russian oligarchs. The goal is to put pressure on Putin and make him pay a high price for his aggression. These initiatives will also help improve infrastructure and provide clean water and high-speed internet access for all Americans.\""
|
|
]
|
|
},
|
|
"execution_count": 23,
|
|
"metadata": {},
|
|
"output_type": "execute_result"
|
|
}
|
|
],
|
|
"source": [
|
|
"%%time\n",
|
|
"chain.run(docs)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "9df0dab8",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": []
|
|
}
|
|
],
|
|
"metadata": {
|
|
"kernelspec": {
|
|
"display_name": "Python 3 (ipykernel)",
|
|
"language": "python",
|
|
"name": "python3"
|
|
},
|
|
"language_info": {
|
|
"codemirror_mode": {
|
|
"name": "ipython",
|
|
"version": 3
|
|
},
|
|
"file_extension": ".py",
|
|
"mimetype": "text/x-python",
|
|
"name": "python",
|
|
"nbconvert_exporter": "python",
|
|
"pygments_lexer": "ipython3",
|
|
"version": "3.10.4"
|
|
}
|
|
},
|
|
"nbformat": 4,
|
|
"nbformat_minor": 5
|
|
}
|