mirror of
https://github.com/hwchase17/langchain
synced 2024-11-06 03:20:49 +00:00
Router chains (#4019)
Unpolished router examples to help flesh out abstractions and use cases ![Screenshot 2023-05-02 at 7 02 58 PM](https://user-images.githubusercontent.com/130488702/235820394-389e5584-db0b-415e-a260-2824b5555167.png) --------- Co-authored-by: Shreya Rajpal <shreya.rajpal@gmail.com>
This commit is contained in:
parent
bbbca10704
commit
7f8727bbcd
165
docs/modules/chains/examples/multi_prompt_router.ipynb
Normal file
165
docs/modules/chains/examples/multi_prompt_router.ipynb
Normal file
@ -0,0 +1,165 @@
|
|||||||
|
{
|
||||||
|
"cells": [
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "a5cf6c49",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"# Router Chains: Selecting from multiple prompts with MultiPromptChain\n",
|
||||||
|
"\n",
|
||||||
|
"This notebook demonstrates how to use the `RouterChain` paradigm to create a chain that dynamically selects the prompt to use for a given input. Specifically we show how to use the `MultiPromptChain` to create a question-answering chain that selects the prompt which is most relevant for a given question, and then answers the question using that prompt."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 1,
|
||||||
|
"id": "e8d624d4",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from langchain.chains.router import MultiPromptChain\n",
|
||||||
|
"from langchain.llms import OpenAI"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 2,
|
||||||
|
"id": "8d11fa5c",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"physics_template = \"\"\"You are a very smart physics professor. \\\n",
|
||||||
|
"You are great at answering questions about physics in a concise and easy to understand manner. \\\n",
|
||||||
|
"When you don't know the answer to a question you admit that you don't know.\n",
|
||||||
|
"\n",
|
||||||
|
"Here is a question:\n",
|
||||||
|
"{input}\"\"\"\n",
|
||||||
|
"\n",
|
||||||
|
"\n",
|
||||||
|
"math_template = \"\"\"You are a very good mathematician. You are great at answering math questions. \\\n",
|
||||||
|
"You are so good because you are able to break down hard problems into their component parts, \\\n",
|
||||||
|
"answer the component parts, and then put them together to answer the broader question.\n",
|
||||||
|
"\n",
|
||||||
|
"Here is a question:\n",
|
||||||
|
"{input}\"\"\""
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 3,
|
||||||
|
"id": "b89de9f3",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"prompt_infos = [\n",
|
||||||
|
" (\"physics\", \"Good for answering questions about physics\", physics_template),\n",
|
||||||
|
" (\"math\", \"Good for answering math questions\", math_template)\n",
|
||||||
|
"]"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 4,
|
||||||
|
"id": "db679975",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"chain = MultiPromptChain.from_prompts(OpenAI(), *zip(*prompt_infos), verbose=True)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 5,
|
||||||
|
"id": "90fd594c",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "stdout",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"\n",
|
||||||
|
"\n",
|
||||||
|
"\u001b[1m> Entering new MultiPromptChain chain...\u001b[0m\n",
|
||||||
|
"physics: {'input': 'What is black body radiation?'}\n",
|
||||||
|
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||||
|
"\n",
|
||||||
|
"\n",
|
||||||
|
"Black body radiation is the emission of electromagnetic radiation from a body that is in thermal equilibrium with its environment. It is emitted by all objects regardless of their temperature, but the intensity and spectral distribution of the radiation depends on the temperature of the body. As the temperature increases, the intensity of the radiation also increases and the peak wavelength shifts to shorter wavelengths.\n"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"print(chain.run(\"What is black body radiation?\"))"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 6,
|
||||||
|
"id": "b8c83765",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "stdout",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"\n",
|
||||||
|
"\n",
|
||||||
|
"\u001b[1m> Entering new MultiPromptChain chain...\u001b[0m\n",
|
||||||
|
"math: {'input': 'What is the first prime number greater than 40 such that one plus the prime number is divisible by 3'}\n",
|
||||||
|
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||||
|
"?\n",
|
||||||
|
"\n",
|
||||||
|
"The first prime number greater than 40 such that one plus the prime number is divisible by 3 is 43. To solve this, we first need to identify all of the prime numbers between 40 and 50. These are 41, 43, 47, and 49. We then need to check which of these, when added to 1, will be divisible by 3. The prime number that fits this criteria is 43. Therefore, the answer is 43.\n"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"print(chain.run(\"What is the first prime number greater than 40 such that one plus the prime number is divisible by 3\"))"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 7,
|
||||||
|
"id": "74c6bba7",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "stdout",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"\n",
|
||||||
|
"\n",
|
||||||
|
"\u001b[1m> Entering new MultiPromptChain chain...\u001b[0m\n",
|
||||||
|
"None: {'input': 'What is the name of the type of cloud that rains?'}\n",
|
||||||
|
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||||
|
"The name of the type of cloud that usually brings rain is called a cumulonimbus cloud. These clouds are typically tall and dark with a flat base and anvil-shaped top. They form when warm, moist air rises rapidly and condenses into water droplets, which eventually become heavy enough to fall as rain.\n"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"print(chain.run(\"What is the name of the type of cloud that rins\"))"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"kernelspec": {
|
||||||
|
"display_name": "venv",
|
||||||
|
"language": "python",
|
||||||
|
"name": "venv"
|
||||||
|
},
|
||||||
|
"language_info": {
|
||||||
|
"codemirror_mode": {
|
||||||
|
"name": "ipython",
|
||||||
|
"version": 3
|
||||||
|
},
|
||||||
|
"file_extension": ".py",
|
||||||
|
"mimetype": "text/x-python",
|
||||||
|
"name": "python",
|
||||||
|
"nbconvert_exporter": "python",
|
||||||
|
"pygments_lexer": "ipython3",
|
||||||
|
"version": "3.11.3"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nbformat": 4,
|
||||||
|
"nbformat_minor": 5
|
||||||
|
}
|
188
docs/modules/chains/examples/multi_retrieval_qa_router.ipynb
Normal file
188
docs/modules/chains/examples/multi_retrieval_qa_router.ipynb
Normal file
@ -0,0 +1,188 @@
|
|||||||
|
{
|
||||||
|
"cells": [
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "782ffcf1",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"# Router Chains: Selecting from multiple prompts with MultiRetrievalQAChain\n",
|
||||||
|
"\n",
|
||||||
|
"This notebook demonstrates how to use the `RouterChain` paradigm to create a chain that dynamically selects which Retrieval system to use. Specifically we show how to use the `MultiRetrievalQAChain` to create a question-answering chain that selects the retrieval QA chain which is most relevant for a given question, and then answers the question using it."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 1,
|
||||||
|
"id": "b6aeec07",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from langchain.chains.router import MultiRetrievalQAChain\n",
|
||||||
|
"from langchain.llms import OpenAI"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 2,
|
||||||
|
"id": "3c42f051",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from langchain.embeddings import OpenAIEmbeddings\n",
|
||||||
|
"from langchain.document_loaders import TextLoader\n",
|
||||||
|
"from langchain.vectorstores import FAISS\n",
|
||||||
|
"\n",
|
||||||
|
"sou_docs = TextLoader('../../state_of_the_union.txt').load_and_split()\n",
|
||||||
|
"sou_retriever = FAISS.from_documents(sou_docs, OpenAIEmbeddings()).as_retriever()\n",
|
||||||
|
"\n",
|
||||||
|
"pg_docs = TextLoader('../../paul_graham_essay.txt').load_and_split()\n",
|
||||||
|
"pg_retriever = FAISS.from_documents(pg_docs, OpenAIEmbeddings()).as_retriever()\n",
|
||||||
|
"\n",
|
||||||
|
"personal_texts = [\n",
|
||||||
|
" \"I love apple pie\",\n",
|
||||||
|
" \"My favorite color is fuchsia\",\n",
|
||||||
|
" \"My dream is to become a professional dancer\",\n",
|
||||||
|
" \"I broke my arm when I was 12\",\n",
|
||||||
|
" \"My parents are from Peru\",\n",
|
||||||
|
"]\n",
|
||||||
|
"personal_retriever = FAISS.from_texts(personal_texts, OpenAIEmbeddings()).as_retriever()\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 3,
|
||||||
|
"id": "5b671ac5",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"retriever_infos = [\n",
|
||||||
|
" (\"state of the union\", \"Good for answering questions about the 2023 State of the Union address\", sou_retriever),\n",
|
||||||
|
" (\"pg essay\", \"Good for answer quesitons about Paul Graham's essay on his career\", pg_retriever),\n",
|
||||||
|
" (\"personal\", \"Good for answering questions about me\", personal_retriever)\n",
|
||||||
|
"]\n",
|
||||||
|
"chain = MultiRetrievalQAChain.from_retrievers(OpenAI(), *zip(*retriever_infos), verbose=True)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 4,
|
||||||
|
"id": "7db5814f",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "stdout",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"\n",
|
||||||
|
"\n",
|
||||||
|
"\u001b[1m> Entering new MultiRetrievalQAChain chain...\u001b[0m\n",
|
||||||
|
"state of the union: {'query': 'What did the president say about the economy in the 2023 State of the Union Address?'}\n",
|
||||||
|
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||||
|
" The president said that the economy had created over 6.5 million jobs in the previous year, the strongest growth in nearly 40 years, and that his plan to fight inflation would lower costs and the deficit. He also announced the Bipartisan Infrastructure Law and said that investing in workers and building the economy from the bottom up and the middle out would build a better America.\n"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"print(chain.run(\"What did the president say about the economy?\"))"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 5,
|
||||||
|
"id": "bbcdbe82",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "stdout",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"\n",
|
||||||
|
"\n",
|
||||||
|
"\u001b[1m> Entering new MultiRetrievalQAChain chain...\u001b[0m\n",
|
||||||
|
"pg essay: {'query': 'What is something Paul Graham regrets about his work?'}\n",
|
||||||
|
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||||
|
" Paul Graham regrets that he was so consumed by running Y Combinator that it ended up eating away at his other projects, like writing essays and working on Arc.\n"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"print(chain.run(\"What is something Paul Graham regrets about his work?\"))"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 6,
|
||||||
|
"id": "37c88a27",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "stdout",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"\n",
|
||||||
|
"\n",
|
||||||
|
"\u001b[1m> Entering new MultiRetrievalQAChain chain...\u001b[0m\n",
|
||||||
|
"personal: {'query': 'What is my background?'}\n",
|
||||||
|
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||||
|
" Your background is Peruvian.\n"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"print(chain.run(\"What is my background?\"))"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 7,
|
||||||
|
"id": "de8519b2",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "stdout",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"\n",
|
||||||
|
"\n",
|
||||||
|
"\u001b[1m> Entering new MultiRetrievalQAChain chain...\u001b[0m\n",
|
||||||
|
"None: {'query': 'What year was the Internet created in?'}\n",
|
||||||
|
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||||
|
"The Internet was created in the late 1960s by the United States Department of Defense's Advanced Research Projects Agency (ARPA). It was originally called the ARPANET and was used to connect computers at different universities and research institutions. Over time, it evolved into the global network that we know today. So, to answer your question, the Internet was technically created in the late 1960s.\n"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"print(chain.run(\"What year was the Internet created in?\"))"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "e50a0227",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": []
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"kernelspec": {
|
||||||
|
"display_name": "venv",
|
||||||
|
"language": "python",
|
||||||
|
"name": "venv"
|
||||||
|
},
|
||||||
|
"language_info": {
|
||||||
|
"codemirror_mode": {
|
||||||
|
"name": "ipython",
|
||||||
|
"version": 3
|
||||||
|
},
|
||||||
|
"file_extension": ".py",
|
||||||
|
"mimetype": "text/x-python",
|
||||||
|
"name": "python",
|
||||||
|
"nbconvert_exporter": "python",
|
||||||
|
"pygments_lexer": "ipython3",
|
||||||
|
"version": "3.11.3"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nbformat": 4,
|
||||||
|
"nbformat_minor": 5
|
||||||
|
}
|
@ -363,7 +363,7 @@
|
|||||||
"name": "python",
|
"name": "python",
|
||||||
"nbconvert_exporter": "python",
|
"nbconvert_exporter": "python",
|
||||||
"pygments_lexer": "ipython3",
|
"pygments_lexer": "ipython3",
|
||||||
"version": "3.9.1"
|
"version": "3.11.3"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"nbformat": 4,
|
"nbformat": 4,
|
||||||
|
@ -171,7 +171,7 @@
|
|||||||
"name": "python",
|
"name": "python",
|
||||||
"nbconvert_exporter": "python",
|
"nbconvert_exporter": "python",
|
||||||
"pygments_lexer": "ipython3",
|
"pygments_lexer": "ipython3",
|
||||||
"version": "3.9.1"
|
"version": "3.11.3"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"nbformat": 4,
|
"nbformat": 4,
|
||||||
|
@ -76,18 +76,16 @@ class Chain(BaseModel, ABC):
|
|||||||
def output_keys(self) -> List[str]:
|
def output_keys(self) -> List[str]:
|
||||||
"""Output keys this chain expects."""
|
"""Output keys this chain expects."""
|
||||||
|
|
||||||
def _validate_inputs(self, inputs: Dict[str, str]) -> None:
|
def _validate_inputs(self, inputs: Dict[str, Any]) -> None:
|
||||||
"""Check that all inputs are present."""
|
"""Check that all inputs are present."""
|
||||||
missing_keys = set(self.input_keys).difference(inputs)
|
missing_keys = set(self.input_keys).difference(inputs)
|
||||||
if missing_keys:
|
if missing_keys:
|
||||||
raise ValueError(f"Missing some input keys: {missing_keys}")
|
raise ValueError(f"Missing some input keys: {missing_keys}")
|
||||||
|
|
||||||
def _validate_outputs(self, outputs: Dict[str, str]) -> None:
|
def _validate_outputs(self, outputs: Dict[str, Any]) -> None:
|
||||||
if set(outputs) != set(self.output_keys):
|
missing_keys = set(self.output_keys).difference(outputs)
|
||||||
raise ValueError(
|
if missing_keys:
|
||||||
f"Did not get output keys that were expected. "
|
raise ValueError(f"Missing some output keys: {missing_keys}")
|
||||||
f"Got: {set(outputs)}. Expected: {set(self.output_keys)}."
|
|
||||||
)
|
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def _call(
|
def _call(
|
||||||
|
@ -8,15 +8,13 @@ from langchain.memory.prompt import (
|
|||||||
)
|
)
|
||||||
from langchain.prompts.prompt import PromptTemplate
|
from langchain.prompts.prompt import PromptTemplate
|
||||||
|
|
||||||
_DEFAULT_TEMPLATE = """The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.
|
DEFAULT_TEMPLATE = """The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.
|
||||||
|
|
||||||
Current conversation:
|
Current conversation:
|
||||||
{history}
|
{history}
|
||||||
Human: {input}
|
Human: {input}
|
||||||
AI:"""
|
AI:"""
|
||||||
PROMPT = PromptTemplate(
|
PROMPT = PromptTemplate(input_variables=["history", "input"], template=DEFAULT_TEMPLATE)
|
||||||
input_variables=["history", "input"], template=_DEFAULT_TEMPLATE
|
|
||||||
)
|
|
||||||
|
|
||||||
# Only for backwards compatibility
|
# Only for backwards compatibility
|
||||||
|
|
||||||
|
@ -231,7 +231,7 @@ class LLMChain(Chain):
|
|||||||
|
|
||||||
def predict_and_parse(
|
def predict_and_parse(
|
||||||
self, callbacks: Callbacks = None, **kwargs: Any
|
self, callbacks: Callbacks = None, **kwargs: Any
|
||||||
) -> Union[str, List[str], Dict[str, str]]:
|
) -> Union[str, List[str], Dict[str, Any]]:
|
||||||
"""Call predict and then parse the results."""
|
"""Call predict and then parse the results."""
|
||||||
result = self.predict(callbacks=callbacks, **kwargs)
|
result = self.predict(callbacks=callbacks, **kwargs)
|
||||||
if self.prompt.output_parser is not None:
|
if self.prompt.output_parser is not None:
|
||||||
|
12
langchain/chains/router/__init__.py
Normal file
12
langchain/chains/router/__init__.py
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
from langchain.chains.router.base import MultiRouteChain, RouterChain
|
||||||
|
from langchain.chains.router.llm_router import LLMRouterChain
|
||||||
|
from langchain.chains.router.multi_prompt import MultiPromptChain
|
||||||
|
from langchain.chains.router.multi_retrieval_qa import MultiRetrievalQAChain
|
||||||
|
|
||||||
|
__all__ = [
|
||||||
|
"RouterChain",
|
||||||
|
"MultiRouteChain",
|
||||||
|
"MultiPromptChain",
|
||||||
|
"MultiRetrievalQAChain",
|
||||||
|
"LLMRouterChain",
|
||||||
|
]
|
88
langchain/chains/router/base.py
Normal file
88
langchain/chains/router/base.py
Normal file
@ -0,0 +1,88 @@
|
|||||||
|
"""Base classes for chain routing."""
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from abc import ABC
|
||||||
|
from typing import Any, Dict, List, Mapping, NamedTuple, Optional
|
||||||
|
|
||||||
|
from pydantic import Extra
|
||||||
|
|
||||||
|
from langchain.callbacks.manager import CallbackManagerForChainRun, Callbacks
|
||||||
|
from langchain.chains.base import Chain
|
||||||
|
|
||||||
|
|
||||||
|
class Route(NamedTuple):
|
||||||
|
destination: Optional[str]
|
||||||
|
next_inputs: Dict[str, Any]
|
||||||
|
|
||||||
|
|
||||||
|
class RouterChain(Chain, ABC):
|
||||||
|
"""Chain that outputs the name of a destination chain and the inputs to it."""
|
||||||
|
|
||||||
|
@property
|
||||||
|
def output_keys(self) -> List[str]:
|
||||||
|
return ["destination", "next_inputs"]
|
||||||
|
|
||||||
|
def route(self, inputs: Dict[str, Any], callbacks: Callbacks = None) -> Route:
|
||||||
|
result = self(inputs, callbacks=callbacks)
|
||||||
|
return Route(result["destination"], result["next_inputs"])
|
||||||
|
|
||||||
|
|
||||||
|
class MultiRouteChain(Chain):
|
||||||
|
"""Use a single chain to route an input to one of multiple candidate chains."""
|
||||||
|
|
||||||
|
router_chain: RouterChain
|
||||||
|
"""Chain that routes inputs to destination chains."""
|
||||||
|
destination_chains: Mapping[str, Chain]
|
||||||
|
"""Chains that return final answer to inputs."""
|
||||||
|
default_chain: Chain
|
||||||
|
"""Default chain to use when none of the destination chains are suitable."""
|
||||||
|
silent_errors: bool = False
|
||||||
|
"""If True, use default_chain when an invalid destination name is provided.
|
||||||
|
Defaults to False."""
|
||||||
|
|
||||||
|
class Config:
|
||||||
|
"""Configuration for this pydantic object."""
|
||||||
|
|
||||||
|
extra = Extra.forbid
|
||||||
|
arbitrary_types_allowed = True
|
||||||
|
|
||||||
|
@property
|
||||||
|
def input_keys(self) -> List[str]:
|
||||||
|
"""Will be whatever keys the router chain prompt expects.
|
||||||
|
|
||||||
|
:meta private:
|
||||||
|
"""
|
||||||
|
return self.router_chain.input_keys
|
||||||
|
|
||||||
|
@property
|
||||||
|
def output_keys(self) -> List[str]:
|
||||||
|
"""Will always return text key.
|
||||||
|
|
||||||
|
:meta private:
|
||||||
|
"""
|
||||||
|
return []
|
||||||
|
|
||||||
|
def _call(
|
||||||
|
self,
|
||||||
|
inputs: Dict[str, Any],
|
||||||
|
run_manager: Optional[CallbackManagerForChainRun] = None,
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
|
||||||
|
callbacks = _run_manager.get_child()
|
||||||
|
route = self.router_chain.route(inputs, callbacks=callbacks)
|
||||||
|
|
||||||
|
_run_manager.on_text(
|
||||||
|
str(route.destination) + ": " + str(route.next_inputs), verbose=self.verbose
|
||||||
|
)
|
||||||
|
if not route.destination:
|
||||||
|
return self.default_chain(route.next_inputs, callbacks=callbacks)
|
||||||
|
elif route.destination in self.destination_chains:
|
||||||
|
return self.destination_chains[route.destination](
|
||||||
|
route.next_inputs, callbacks=callbacks
|
||||||
|
)
|
||||||
|
elif self.silent_errors:
|
||||||
|
return self.default_chain(route.next_inputs, callbacks=callbacks)
|
||||||
|
else:
|
||||||
|
raise ValueError(
|
||||||
|
f"Received invalid destination chain name '{route.destination}'"
|
||||||
|
)
|
99
langchain/chains/router/llm_router.py
Normal file
99
langchain/chains/router/llm_router.py
Normal file
@ -0,0 +1,99 @@
|
|||||||
|
"""Base classes for LLM-powered router chains."""
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from typing import Any, Dict, List, Optional, Type, cast
|
||||||
|
|
||||||
|
from pydantic import root_validator
|
||||||
|
|
||||||
|
from langchain.base_language import BaseLanguageModel
|
||||||
|
from langchain.callbacks.manager import CallbackManagerForChainRun
|
||||||
|
from langchain.chains import LLMChain
|
||||||
|
from langchain.chains.router.base import RouterChain
|
||||||
|
from langchain.output_parsers.structured import parse_json_markdown
|
||||||
|
from langchain.prompts import BasePromptTemplate
|
||||||
|
from langchain.schema import BaseOutputParser, OutputParserException
|
||||||
|
|
||||||
|
|
||||||
|
class LLMRouterChain(RouterChain):
|
||||||
|
"""A router chain that uses an LLM chain to perform routing."""
|
||||||
|
|
||||||
|
llm_chain: LLMChain
|
||||||
|
"""LLM chain used to perform routing"""
|
||||||
|
|
||||||
|
@root_validator()
|
||||||
|
def validate_prompt(cls, values: dict) -> dict:
|
||||||
|
prompt = values["llm_chain"].prompt
|
||||||
|
if prompt.output_parser is None:
|
||||||
|
raise ValueError(
|
||||||
|
"LLMRouterChain requires base llm_chain prompt to have an output"
|
||||||
|
" parser that converts LLM text output to a dictionary with keys"
|
||||||
|
" 'destination' and 'next_inputs'. Received a prompt with no output"
|
||||||
|
" parser."
|
||||||
|
)
|
||||||
|
return values
|
||||||
|
|
||||||
|
@property
|
||||||
|
def input_keys(self) -> List[str]:
|
||||||
|
"""Will be whatever keys the LLM chain prompt expects.
|
||||||
|
|
||||||
|
:meta private:
|
||||||
|
"""
|
||||||
|
return self.llm_chain.input_keys
|
||||||
|
|
||||||
|
def _validate_outputs(self, outputs: Dict[str, Any]) -> None:
|
||||||
|
super()._validate_outputs(outputs)
|
||||||
|
if not isinstance(outputs["next_inputs"], dict):
|
||||||
|
raise ValueError
|
||||||
|
|
||||||
|
def _call(
|
||||||
|
self,
|
||||||
|
inputs: Dict[str, Any],
|
||||||
|
run_manager: Optional[CallbackManagerForChainRun] = None,
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
|
||||||
|
callbacks = _run_manager.get_child()
|
||||||
|
output = cast(
|
||||||
|
Dict[str, Any],
|
||||||
|
self.llm_chain.predict_and_parse(callbacks=callbacks, **inputs),
|
||||||
|
)
|
||||||
|
return output
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_llm(
|
||||||
|
cls, llm: BaseLanguageModel, prompt: BasePromptTemplate, **kwargs: Any
|
||||||
|
) -> LLMRouterChain:
|
||||||
|
"""Convenience constructor."""
|
||||||
|
llm_chain = LLMChain(llm=llm, prompt=prompt)
|
||||||
|
return cls(llm_chain=llm_chain, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
class RouterOutputParser(BaseOutputParser[Dict[str, str]]):
|
||||||
|
"""Parser for output of router chain int he multi-prompt chain."""
|
||||||
|
|
||||||
|
default_destination: str = "DEFAULT"
|
||||||
|
next_inputs_type: Type = str
|
||||||
|
next_inputs_inner_key: str = "input"
|
||||||
|
|
||||||
|
def parse(self, text: str) -> Dict[str, Any]:
|
||||||
|
try:
|
||||||
|
expected_keys = ["destination", "next_inputs"]
|
||||||
|
parsed = parse_json_markdown(text, expected_keys)
|
||||||
|
if not isinstance(parsed["destination"], str):
|
||||||
|
raise ValueError("Expected 'destination' to be a string.")
|
||||||
|
if not isinstance(parsed["next_inputs"], self.next_inputs_type):
|
||||||
|
raise ValueError(
|
||||||
|
f"Expected 'next_inputs' to be {self.next_inputs_type}."
|
||||||
|
)
|
||||||
|
parsed["next_inputs"] = {self.next_inputs_inner_key: parsed["next_inputs"]}
|
||||||
|
if (
|
||||||
|
parsed["destination"].strip().lower()
|
||||||
|
== self.default_destination.lower()
|
||||||
|
):
|
||||||
|
parsed["destination"] = None
|
||||||
|
else:
|
||||||
|
parsed["destination"] = parsed["destination"].strip()
|
||||||
|
return parsed
|
||||||
|
except Exception as e:
|
||||||
|
raise OutputParserException(
|
||||||
|
f"Parsing text\n{text}\n raised following error:\n{e}"
|
||||||
|
)
|
70
langchain/chains/router/multi_prompt.py
Normal file
70
langchain/chains/router/multi_prompt.py
Normal file
@ -0,0 +1,70 @@
|
|||||||
|
"""Use a single chain to route an input to one of multiple llm chains."""
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from typing import Any, List, Mapping, Optional
|
||||||
|
|
||||||
|
from langchain.base_language import BaseLanguageModel
|
||||||
|
from langchain.chains import ConversationChain
|
||||||
|
from langchain.chains.llm import LLMChain
|
||||||
|
from langchain.chains.router.base import MultiRouteChain
|
||||||
|
from langchain.chains.router.llm_router import LLMRouterChain, RouterOutputParser
|
||||||
|
from langchain.chains.router.multi_prompt_prompt import MULTI_PROMPT_ROUTER_TEMPLATE
|
||||||
|
from langchain.chat_models import ChatOpenAI
|
||||||
|
from langchain.prompts import PromptTemplate
|
||||||
|
|
||||||
|
|
||||||
|
class MultiPromptChain(MultiRouteChain):
|
||||||
|
"""A multi-route chain that uses an LLM router chain to choose amongst prompts."""
|
||||||
|
|
||||||
|
router_chain: LLMRouterChain
|
||||||
|
"""Chain for deciding a destination chain and the input to it."""
|
||||||
|
destination_chains: Mapping[str, LLMChain]
|
||||||
|
"""Map of name to candidate chains that inputs can be routed to."""
|
||||||
|
default_chain: LLMChain
|
||||||
|
"""Default chain to use when router doesn't map input to one of the destinations."""
|
||||||
|
|
||||||
|
@property
|
||||||
|
def output_keys(self) -> List[str]:
|
||||||
|
return ["text"]
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_prompts(
|
||||||
|
cls,
|
||||||
|
llm: BaseLanguageModel,
|
||||||
|
prompt_names: List[str],
|
||||||
|
prompt_descriptions: List[str],
|
||||||
|
prompt_templates: List[str],
|
||||||
|
default_chain: Optional[LLMChain] = None,
|
||||||
|
**kwargs: Any,
|
||||||
|
) -> MultiPromptChain:
|
||||||
|
"""Convenience constructor for instantiating from destination prompts."""
|
||||||
|
destinations = [
|
||||||
|
f"{name}: {description}"
|
||||||
|
for name, description in zip(prompt_names, prompt_descriptions)
|
||||||
|
]
|
||||||
|
destinations_str = "\n".join(destinations)
|
||||||
|
router_template = MULTI_PROMPT_ROUTER_TEMPLATE.format(
|
||||||
|
destinations=destinations_str
|
||||||
|
)
|
||||||
|
router_prompt = PromptTemplate(
|
||||||
|
template=router_template,
|
||||||
|
input_variables=["input"],
|
||||||
|
output_parser=RouterOutputParser(),
|
||||||
|
)
|
||||||
|
router_chain = LLMRouterChain.from_llm(llm, router_prompt)
|
||||||
|
destination_chains = {
|
||||||
|
name: LLMChain(
|
||||||
|
llm=llm,
|
||||||
|
prompt=PromptTemplate(template=prompt, input_variables=["input"]),
|
||||||
|
)
|
||||||
|
for name, prompt in zip(prompt_names, prompt_templates)
|
||||||
|
}
|
||||||
|
_default_chain = default_chain or ConversationChain(
|
||||||
|
llm=ChatOpenAI(), output_key="text"
|
||||||
|
)
|
||||||
|
return cls(
|
||||||
|
router_chain=router_chain,
|
||||||
|
destination_chains=destination_chains,
|
||||||
|
default_chain=_default_chain,
|
||||||
|
**kwargs,
|
||||||
|
)
|
31
langchain/chains/router/multi_prompt_prompt.py
Normal file
31
langchain/chains/router/multi_prompt_prompt.py
Normal file
@ -0,0 +1,31 @@
|
|||||||
|
"""Prompt for the router chain in the multi-prompt chain."""
|
||||||
|
|
||||||
|
MULTI_PROMPT_ROUTER_TEMPLATE = """\
|
||||||
|
Given a raw text input to a language model select the model prompt best suited for \
|
||||||
|
the input. You will be given the names of the available prompts and a description of \
|
||||||
|
what the prompt is best suited for. You may also revise the original input if you \
|
||||||
|
think that revising it will ultimately lead to a better response from the language \
|
||||||
|
model.
|
||||||
|
|
||||||
|
<< FORMATTING >>
|
||||||
|
Return a markdown code snippet with a JSON object formatted to look like:
|
||||||
|
```json
|
||||||
|
{{{{
|
||||||
|
"destination": string \\ name of the prompt to use or "DEFAULT"
|
||||||
|
"next_inputs": string \\ a potentially modified version of the original input
|
||||||
|
}}}}
|
||||||
|
```
|
||||||
|
|
||||||
|
REMEMBER: "destination" MUST be one of the candidate prompt names specified below OR \
|
||||||
|
it can be "DEFAULT" if the input is not well suited for any of the candidate prompts.
|
||||||
|
REMEMBER: "next_inputs" can just be the original input if you don't think any \
|
||||||
|
modifications are needed.
|
||||||
|
|
||||||
|
<< CANDIDATE PROMPTS >>
|
||||||
|
{destinations}
|
||||||
|
|
||||||
|
<< INPUT >>
|
||||||
|
{{input}}
|
||||||
|
|
||||||
|
<< OUTPUT >>
|
||||||
|
"""
|
30
langchain/chains/router/multi_retrieval_prompt.py
Normal file
30
langchain/chains/router/multi_retrieval_prompt.py
Normal file
@ -0,0 +1,30 @@
|
|||||||
|
"""Prompt for the router chain in the multi-retrieval qa chain."""
|
||||||
|
|
||||||
|
MULTI_RETRIEVAL_ROUTER_TEMPLATE = """\
|
||||||
|
Given a query to a question answering system select the system best suited \
|
||||||
|
for the input. You will be given the names of the available systems and a description \
|
||||||
|
of what questions the system is best suited for. You may also revise the original \
|
||||||
|
input if you think that revising it will ultimately lead to a better response.
|
||||||
|
|
||||||
|
<< FORMATTING >>
|
||||||
|
Return a markdown code snippet with a JSON object formatted to look like:
|
||||||
|
```json
|
||||||
|
{{{{
|
||||||
|
"destination": string \\ name of the question answering system to use or "DEFAULT"
|
||||||
|
"next_inputs": string \\ a potentially modified version of the original input
|
||||||
|
}}}}
|
||||||
|
```
|
||||||
|
|
||||||
|
REMEMBER: "destination" MUST be one of the candidate prompt names specified below OR \
|
||||||
|
it can be "DEFAULT" if the input is not well suited for any of the candidate prompts.
|
||||||
|
REMEMBER: "next_inputs" can just be the original input if you don't think any \
|
||||||
|
modifications are needed.
|
||||||
|
|
||||||
|
<< CANDIDATE PROMPTS >>
|
||||||
|
{destinations}
|
||||||
|
|
||||||
|
<< INPUT >>
|
||||||
|
{{input}}
|
||||||
|
|
||||||
|
<< OUTPUT >>
|
||||||
|
"""
|
93
langchain/chains/router/multi_retrieval_qa.py
Normal file
93
langchain/chains/router/multi_retrieval_qa.py
Normal file
@ -0,0 +1,93 @@
|
|||||||
|
"""Use a single chain to route an input to one of multiple retrieval qa chains."""
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from typing import Any, List, Mapping, Optional
|
||||||
|
|
||||||
|
from langchain.base_language import BaseLanguageModel
|
||||||
|
from langchain.chains import ConversationChain
|
||||||
|
from langchain.chains.base import Chain
|
||||||
|
from langchain.chains.conversation.prompt import DEFAULT_TEMPLATE
|
||||||
|
from langchain.chains.retrieval_qa.base import BaseRetrievalQA, RetrievalQA
|
||||||
|
from langchain.chains.router.base import MultiRouteChain
|
||||||
|
from langchain.chains.router.llm_router import LLMRouterChain, RouterOutputParser
|
||||||
|
from langchain.chains.router.multi_retrieval_prompt import (
|
||||||
|
MULTI_RETRIEVAL_ROUTER_TEMPLATE,
|
||||||
|
)
|
||||||
|
from langchain.chat_models import ChatOpenAI
|
||||||
|
from langchain.prompts import PromptTemplate
|
||||||
|
from langchain.schema import BaseRetriever
|
||||||
|
|
||||||
|
|
||||||
|
class MultiRetrievalQAChain(MultiRouteChain):
|
||||||
|
"""A multi-route chain that uses an LLM router chain to choose amongst retrieval
|
||||||
|
qa chains."""
|
||||||
|
|
||||||
|
router_chain: LLMRouterChain
|
||||||
|
"""Chain for deciding a destination chain and the input to it."""
|
||||||
|
destination_chains: Mapping[str, BaseRetrievalQA]
|
||||||
|
"""Map of name to candidate chains that inputs can be routed to."""
|
||||||
|
default_chain: Chain
|
||||||
|
"""Default chain to use when router doesn't map input to one of the destinations."""
|
||||||
|
|
||||||
|
@property
|
||||||
|
def output_keys(self) -> List[str]:
|
||||||
|
return ["result"]
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_retrievers(
|
||||||
|
cls,
|
||||||
|
llm: BaseLanguageModel,
|
||||||
|
retriever_names: List[str],
|
||||||
|
retriever_descriptions: List[str],
|
||||||
|
retrievers: List[BaseRetriever],
|
||||||
|
retriever_prompts: Optional[List[PromptTemplate]] = None,
|
||||||
|
default_retriever: Optional[BaseRetriever] = None,
|
||||||
|
default_prompt: Optional[PromptTemplate] = None,
|
||||||
|
default_chain: Optional[Chain] = None,
|
||||||
|
**kwargs: Any,
|
||||||
|
) -> MultiRetrievalQAChain:
|
||||||
|
if default_prompt and not default_retriever:
|
||||||
|
raise ValueError(
|
||||||
|
"`default_retriever` must be specified if `default_prompt` is "
|
||||||
|
"provided. Received only `default_prompt`."
|
||||||
|
)
|
||||||
|
destinations = [
|
||||||
|
f"{name}: {description}"
|
||||||
|
for name, description in zip(retriever_names, retriever_descriptions)
|
||||||
|
]
|
||||||
|
destinations_str = "\n".join(destinations)
|
||||||
|
router_template = MULTI_RETRIEVAL_ROUTER_TEMPLATE.format(
|
||||||
|
destinations=destinations_str
|
||||||
|
)
|
||||||
|
router_prompt = PromptTemplate(
|
||||||
|
template=router_template,
|
||||||
|
input_variables=["input"],
|
||||||
|
output_parser=RouterOutputParser(next_inputs_inner_key="query"),
|
||||||
|
)
|
||||||
|
router_chain = LLMRouterChain.from_llm(llm, router_prompt)
|
||||||
|
destination_chains = {}
|
||||||
|
for i, retriever in enumerate(retrievers):
|
||||||
|
name = retriever_names[i]
|
||||||
|
prompt = retriever_prompts[i] if retriever_prompts else None
|
||||||
|
chain = RetrievalQA.from_llm(llm, prompt=prompt, retriever=retriever)
|
||||||
|
destination_chains[name] = chain
|
||||||
|
if default_chain:
|
||||||
|
_default_chain = default_chain
|
||||||
|
elif default_retriever:
|
||||||
|
_default_chain = RetrievalQA.from_llm(
|
||||||
|
llm, prompt=default_prompt, retriever=default_retriever
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
prompt_template = DEFAULT_TEMPLATE.replace("input", "query")
|
||||||
|
prompt = PromptTemplate(
|
||||||
|
template=prompt_template, input_variables=["history", "query"]
|
||||||
|
)
|
||||||
|
_default_chain = ConversationChain(
|
||||||
|
llm=ChatOpenAI(), prompt=prompt, input_key="query", output_key="result"
|
||||||
|
)
|
||||||
|
return cls(
|
||||||
|
router_chain=router_chain,
|
||||||
|
destination_chains=destination_chains,
|
||||||
|
default_chain=_default_chain,
|
||||||
|
**kwargs,
|
||||||
|
)
|
Loading…
Reference in New Issue
Block a user