|
|
@ -415,7 +415,7 @@
|
|
|
|
"\n",
|
|
|
|
"\n",
|
|
|
|
"# Avoid multiple caches using the same file, causing different llm model caches to affect each other\n",
|
|
|
|
"# Avoid multiple caches using the same file, causing different llm model caches to affect each other\n",
|
|
|
|
"\n",
|
|
|
|
"\n",
|
|
|
|
"def init_gptcache(cache_obj: Cache, llm str):\n",
|
|
|
|
"def init_gptcache(cache_obj: Cache, llm: str):\n",
|
|
|
|
" cache_obj.init(\n",
|
|
|
|
" cache_obj.init(\n",
|
|
|
|
" pre_embedding_func=get_prompt,\n",
|
|
|
|
" pre_embedding_func=get_prompt,\n",
|
|
|
|
" data_manager=manager_factory(manager=\"map\", data_dir=f\"map_cache_{llm}\"),\n",
|
|
|
|
" data_manager=manager_factory(manager=\"map\", data_dir=f\"map_cache_{llm}\"),\n",
|
|
|
|