forked from Archives/langchain
Simplify router chain constructor signatures (#4146)
This commit is contained in:
parent
43a7a89e93
commit
6cd51ef3d0
@ -47,13 +47,21 @@
|
|||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 3,
|
"execution_count": 3,
|
||||||
"id": "b89de9f3",
|
"id": "d0b8856e",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"prompt_infos = [\n",
|
"prompt_infos = [\n",
|
||||||
" (\"physics\", \"Good for answering questions about physics\", physics_template),\n",
|
" {\n",
|
||||||
" (\"math\", \"Good for answering math questions\", math_template)\n",
|
" \"name\": \"physics\", \n",
|
||||||
|
" \"description\": \"Good for answering questions about physics\", \n",
|
||||||
|
" \"prompt_template\": physics_template\n",
|
||||||
|
" },\n",
|
||||||
|
" {\n",
|
||||||
|
" \"name\": \"math\", \n",
|
||||||
|
" \"description\": \"Good for answering math questions\", \n",
|
||||||
|
" \"prompt_template\": math_template\n",
|
||||||
|
" }\n",
|
||||||
"]"
|
"]"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@ -64,7 +72,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"chain = MultiPromptChain.from_prompts(OpenAI(), *zip(*prompt_infos), verbose=True)"
|
"chain = MultiPromptChain.from_prompts(OpenAI(), prompt_infos, verbose=True)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -84,7 +92,7 @@
|
|||||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||||
"\n",
|
"\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Black body radiation is the emission of electromagnetic radiation from a body that is in thermal equilibrium with its environment. It is emitted by all objects regardless of their temperature, but the intensity and spectral distribution of the radiation depends on the temperature of the body. As the temperature increases, the intensity of the radiation also increases and the peak wavelength shifts to shorter wavelengths.\n"
|
"Black body radiation is the emission of electromagnetic radiation from a body due to its temperature. It is a type of thermal radiation that is emitted from the surface of all objects that are at a temperature above absolute zero. It is a spectrum of radiation that is influenced by the temperature of the body and is independent of the composition of the emitting material.\n"
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
@ -109,7 +117,13 @@
|
|||||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||||
"?\n",
|
"?\n",
|
||||||
"\n",
|
"\n",
|
||||||
"The first prime number greater than 40 such that one plus the prime number is divisible by 3 is 43. To solve this, we first need to identify all of the prime numbers between 40 and 50. These are 41, 43, 47, and 49. We then need to check which of these, when added to 1, will be divisible by 3. The prime number that fits this criteria is 43. Therefore, the answer is 43.\n"
|
"The first prime number greater than 40 such that one plus the prime number is divisible by 3 is 43. To solve this problem, we can break down the question into two parts: finding the first prime number greater than 40, and then finding a number that is divisible by 3. \n",
|
||||||
|
"\n",
|
||||||
|
"The first step is to find the first prime number greater than 40. A prime number is a number that is only divisible by 1 and itself. The next prime number after 40 is 41.\n",
|
||||||
|
"\n",
|
||||||
|
"The second step is to find a number that is divisible by 3. To do this, we can add 1 to 41, which gives us 42. Now, we can check if 42 is divisible by 3. 42 divided by 3 is 14, so 42 is divisible by 3.\n",
|
||||||
|
"\n",
|
||||||
|
"Therefore, the answer to the question is 43.\n"
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
@ -132,7 +146,7 @@
|
|||||||
"\u001b[1m> Entering new MultiPromptChain chain...\u001b[0m\n",
|
"\u001b[1m> Entering new MultiPromptChain chain...\u001b[0m\n",
|
||||||
"None: {'input': 'What is the name of the type of cloud that rains?'}\n",
|
"None: {'input': 'What is the name of the type of cloud that rains?'}\n",
|
||||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||||
"The name of the type of cloud that usually brings rain is called a cumulonimbus cloud. These clouds are typically tall and dark with a flat base and anvil-shaped top. They form when warm, moist air rises rapidly and condenses into water droplets, which eventually become heavy enough to fall as rain.\n"
|
"The type of cloud that typically produces rain is called a cumulonimbus cloud. This type of cloud is characterized by its large vertical extent and can produce thunderstorms and heavy precipitation. Is there anything else you'd like to know?\n"
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
|
@ -51,21 +51,42 @@
|
|||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 3,
|
"execution_count": 3,
|
||||||
"id": "5b671ac5",
|
"id": "783d6bcd",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"retriever_infos = [\n",
|
"retriever_infos = [\n",
|
||||||
" (\"state of the union\", \"Good for answering questions about the 2023 State of the Union address\", sou_retriever),\n",
|
" {\n",
|
||||||
" (\"pg essay\", \"Good for answer quesitons about Paul Graham's essay on his career\", pg_retriever),\n",
|
" \"name\": \"state of the union\", \n",
|
||||||
" (\"personal\", \"Good for answering questions about me\", personal_retriever)\n",
|
" \"description\": \"Good for answering questions about the 2023 State of the Union address\", \n",
|
||||||
"]\n",
|
" \"retriever\": sou_retriever\n",
|
||||||
"chain = MultiRetrievalQAChain.from_retrievers(OpenAI(), *zip(*retriever_infos), verbose=True)"
|
" },\n",
|
||||||
|
" {\n",
|
||||||
|
" \"name\": \"pg essay\", \n",
|
||||||
|
" \"description\": \"Good for answer quesitons about Paul Graham's essay on his career\", \n",
|
||||||
|
" \"retriever\": pg_retriever\n",
|
||||||
|
" },\n",
|
||||||
|
" {\n",
|
||||||
|
" \"name\": \"personal\", \n",
|
||||||
|
" \"description\": \"Good for answering questions about me\", \n",
|
||||||
|
" \"retriever\": personal_retriever\n",
|
||||||
|
" }\n",
|
||||||
|
"]"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 4,
|
"execution_count": 4,
|
||||||
|
"id": "5b671ac5",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"chain = MultiRetrievalQAChain.from_retrievers(OpenAI(), retriever_infos, verbose=True)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 5,
|
||||||
"id": "7db5814f",
|
"id": "7db5814f",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [
|
"outputs": [
|
||||||
@ -76,9 +97,9 @@
|
|||||||
"\n",
|
"\n",
|
||||||
"\n",
|
"\n",
|
||||||
"\u001b[1m> Entering new MultiRetrievalQAChain chain...\u001b[0m\n",
|
"\u001b[1m> Entering new MultiRetrievalQAChain chain...\u001b[0m\n",
|
||||||
"state of the union: {'query': 'What did the president say about the economy in the 2023 State of the Union Address?'}\n",
|
"state of the union: {'query': 'What did the president say about the economy in the 2023 State of the Union address?'}\n",
|
||||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||||
" The president said that the economy had created over 6.5 million jobs in the previous year, the strongest growth in nearly 40 years, and that his plan to fight inflation would lower costs and the deficit. He also announced the Bipartisan Infrastructure Law and said that investing in workers and building the economy from the bottom up and the middle out would build a better America.\n"
|
" The president said that the economy was stronger than it had been a year prior, and that the American Rescue Plan helped create record job growth and fuel economic relief for millions of Americans. He also proposed a plan to fight inflation and lower costs for families, including cutting the cost of prescription drugs and energy, providing investments and tax credits for energy efficiency, and increasing access to child care and Pre-K.\n"
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
@ -88,7 +109,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 5,
|
"execution_count": 6,
|
||||||
"id": "bbcdbe82",
|
"id": "bbcdbe82",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [
|
"outputs": [
|
||||||
@ -101,7 +122,7 @@
|
|||||||
"\u001b[1m> Entering new MultiRetrievalQAChain chain...\u001b[0m\n",
|
"\u001b[1m> Entering new MultiRetrievalQAChain chain...\u001b[0m\n",
|
||||||
"pg essay: {'query': 'What is something Paul Graham regrets about his work?'}\n",
|
"pg essay: {'query': 'What is something Paul Graham regrets about his work?'}\n",
|
||||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||||
" Paul Graham regrets that he was so consumed by running Y Combinator that it ended up eating away at his other projects, like writing essays and working on Arc.\n"
|
" Paul Graham regrets that he did not take a vacation after selling his company, instead of immediately starting to paint.\n"
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
@ -111,7 +132,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 6,
|
"execution_count": 7,
|
||||||
"id": "37c88a27",
|
"id": "37c88a27",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [
|
"outputs": [
|
||||||
@ -134,7 +155,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 7,
|
"execution_count": 8,
|
||||||
"id": "de8519b2",
|
"id": "de8519b2",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [
|
"outputs": [
|
||||||
@ -147,7 +168,7 @@
|
|||||||
"\u001b[1m> Entering new MultiRetrievalQAChain chain...\u001b[0m\n",
|
"\u001b[1m> Entering new MultiRetrievalQAChain chain...\u001b[0m\n",
|
||||||
"None: {'query': 'What year was the Internet created in?'}\n",
|
"None: {'query': 'What year was the Internet created in?'}\n",
|
||||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||||
"The Internet was created in the late 1960s by the United States Department of Defense's Advanced Research Projects Agency (ARPA). It was originally called the ARPANET and was used to connect computers at different universities and research institutions. Over time, it evolved into the global network that we know today. So, to answer your question, the Internet was technically created in the late 1960s.\n"
|
"The Internet was created in 1969 through a project called ARPANET, which was funded by the United States Department of Defense. However, the World Wide Web, which is often confused with the Internet, was created in 1989 by British computer scientist Tim Berners-Lee.\n"
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
"""Use a single chain to route an input to one of multiple llm chains."""
|
"""Use a single chain to route an input to one of multiple llm chains."""
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
from typing import Any, List, Mapping, Optional
|
from typing import Any, Dict, List, Mapping, Optional
|
||||||
|
|
||||||
from langchain.base_language import BaseLanguageModel
|
from langchain.base_language import BaseLanguageModel
|
||||||
from langchain.chains import ConversationChain
|
from langchain.chains import ConversationChain
|
||||||
@ -31,17 +31,12 @@ class MultiPromptChain(MultiRouteChain):
|
|||||||
def from_prompts(
|
def from_prompts(
|
||||||
cls,
|
cls,
|
||||||
llm: BaseLanguageModel,
|
llm: BaseLanguageModel,
|
||||||
prompt_names: List[str],
|
prompt_infos: List[Dict[str, str]],
|
||||||
prompt_descriptions: List[str],
|
|
||||||
prompt_templates: List[str],
|
|
||||||
default_chain: Optional[LLMChain] = None,
|
default_chain: Optional[LLMChain] = None,
|
||||||
**kwargs: Any,
|
**kwargs: Any,
|
||||||
) -> MultiPromptChain:
|
) -> MultiPromptChain:
|
||||||
"""Convenience constructor for instantiating from destination prompts."""
|
"""Convenience constructor for instantiating from destination prompts."""
|
||||||
destinations = [
|
destinations = [f"{p['name']}: {p['description']}" for p in prompt_infos]
|
||||||
f"{name}: {description}"
|
|
||||||
for name, description in zip(prompt_names, prompt_descriptions)
|
|
||||||
]
|
|
||||||
destinations_str = "\n".join(destinations)
|
destinations_str = "\n".join(destinations)
|
||||||
router_template = MULTI_PROMPT_ROUTER_TEMPLATE.format(
|
router_template = MULTI_PROMPT_ROUTER_TEMPLATE.format(
|
||||||
destinations=destinations_str
|
destinations=destinations_str
|
||||||
@ -52,13 +47,13 @@ class MultiPromptChain(MultiRouteChain):
|
|||||||
output_parser=RouterOutputParser(),
|
output_parser=RouterOutputParser(),
|
||||||
)
|
)
|
||||||
router_chain = LLMRouterChain.from_llm(llm, router_prompt)
|
router_chain = LLMRouterChain.from_llm(llm, router_prompt)
|
||||||
destination_chains = {
|
destination_chains = {}
|
||||||
name: LLMChain(
|
for p_info in prompt_infos:
|
||||||
llm=llm,
|
name = p_info["name"]
|
||||||
prompt=PromptTemplate(template=prompt, input_variables=["input"]),
|
prompt_template = p_info["prompt_template"]
|
||||||
)
|
prompt = PromptTemplate(template=prompt_template, input_variables=["input"])
|
||||||
for name, prompt in zip(prompt_names, prompt_templates)
|
chain = LLMChain(llm=llm, prompt=prompt)
|
||||||
}
|
destination_chains[name] = chain
|
||||||
_default_chain = default_chain or ConversationChain(
|
_default_chain = default_chain or ConversationChain(
|
||||||
llm=ChatOpenAI(), output_key="text"
|
llm=ChatOpenAI(), output_key="text"
|
||||||
)
|
)
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
"""Use a single chain to route an input to one of multiple retrieval qa chains."""
|
"""Use a single chain to route an input to one of multiple retrieval qa chains."""
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
from typing import Any, List, Mapping, Optional
|
from typing import Any, Dict, List, Mapping, Optional
|
||||||
|
|
||||||
from langchain.base_language import BaseLanguageModel
|
from langchain.base_language import BaseLanguageModel
|
||||||
from langchain.chains import ConversationChain
|
from langchain.chains import ConversationChain
|
||||||
@ -37,10 +37,7 @@ class MultiRetrievalQAChain(MultiRouteChain):
|
|||||||
def from_retrievers(
|
def from_retrievers(
|
||||||
cls,
|
cls,
|
||||||
llm: BaseLanguageModel,
|
llm: BaseLanguageModel,
|
||||||
retriever_names: List[str],
|
retriever_infos: List[Dict[str, Any]],
|
||||||
retriever_descriptions: List[str],
|
|
||||||
retrievers: List[BaseRetriever],
|
|
||||||
retriever_prompts: Optional[List[PromptTemplate]] = None,
|
|
||||||
default_retriever: Optional[BaseRetriever] = None,
|
default_retriever: Optional[BaseRetriever] = None,
|
||||||
default_prompt: Optional[PromptTemplate] = None,
|
default_prompt: Optional[PromptTemplate] = None,
|
||||||
default_chain: Optional[Chain] = None,
|
default_chain: Optional[Chain] = None,
|
||||||
@ -51,10 +48,7 @@ class MultiRetrievalQAChain(MultiRouteChain):
|
|||||||
"`default_retriever` must be specified if `default_prompt` is "
|
"`default_retriever` must be specified if `default_prompt` is "
|
||||||
"provided. Received only `default_prompt`."
|
"provided. Received only `default_prompt`."
|
||||||
)
|
)
|
||||||
destinations = [
|
destinations = [f"{r['name']}: {r['description']}" for r in retriever_infos]
|
||||||
f"{name}: {description}"
|
|
||||||
for name, description in zip(retriever_names, retriever_descriptions)
|
|
||||||
]
|
|
||||||
destinations_str = "\n".join(destinations)
|
destinations_str = "\n".join(destinations)
|
||||||
router_template = MULTI_RETRIEVAL_ROUTER_TEMPLATE.format(
|
router_template = MULTI_RETRIEVAL_ROUTER_TEMPLATE.format(
|
||||||
destinations=destinations_str
|
destinations=destinations_str
|
||||||
@ -66,10 +60,11 @@ class MultiRetrievalQAChain(MultiRouteChain):
|
|||||||
)
|
)
|
||||||
router_chain = LLMRouterChain.from_llm(llm, router_prompt)
|
router_chain = LLMRouterChain.from_llm(llm, router_prompt)
|
||||||
destination_chains = {}
|
destination_chains = {}
|
||||||
for i, retriever in enumerate(retrievers):
|
for r_info in retriever_infos:
|
||||||
name = retriever_names[i]
|
prompt = r_info.get("prompt")
|
||||||
prompt = retriever_prompts[i] if retriever_prompts else None
|
retriever = r_info["retriever"]
|
||||||
chain = RetrievalQA.from_llm(llm, prompt=prompt, retriever=retriever)
|
chain = RetrievalQA.from_llm(llm, prompt=prompt, retriever=retriever)
|
||||||
|
name = r_info["name"]
|
||||||
destination_chains[name] = chain
|
destination_chains[name] = chain
|
||||||
if default_chain:
|
if default_chain:
|
||||||
_default_chain = default_chain
|
_default_chain = default_chain
|
||||||
|
Loading…
Reference in New Issue
Block a user