harrison/reorg_smart_chains
Harrison Chase 2 years ago
parent 45ce74d0bc
commit 2a84d3d5ca

@ -68,7 +68,7 @@
{
"cell_type": "code",
"execution_count": null,
"id": "b5a265c6",
"id": "2d6f4b1d",
"metadata": {},
"outputs": [],
"source": []

@ -20,23 +20,28 @@
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"\n",
"\u001b[1m> Entering new chain...\u001b[0m\n",
"\n",
"\n",
"\u001b[1m> Entering new chain...\u001b[0m\n",
"What is the hometown of the reigning men's U.S. Open champion?\n",
"Are follow up questions needed here:\u001b[32;1m\u001b[1;3m Yes.\n",
"Follow up: Who is the reigning men's U.S. Open champion?\u001b[0m\n",
"Intermediate answer: \u001b[33;1m\u001b[1;3mCarlos Alcaraz.\u001b[0m\u001b[32;1m\u001b[1;3m\n",
"Follow up: Where is Carlos Alcaraz from?\u001b[0m\n",
"Intermediate answer: \u001b[33;1m\u001b[1;3mEl Palmar, Spain.\u001b[0m\u001b[32;1m\u001b[1;3m\n",
"So the final answer is: El Palmar, Spain\u001b[0m\n",
"Intermediate answer: \u001b[36;1m\u001b[1;3mCarlos Alcaraz\u001b[0m\n",
"\u001b[32;1m\u001b[1;3mFollow up: Where is Carlos Alcaraz from?\u001b[0m\n",
"Intermediate answer: \u001b[36;1m\u001b[1;3mEl Palmar, Spain\u001b[0m\n",
"\u001b[32;1m\u001b[1;3mSo the final answer is: El Palmar, Spain\u001b[0m\n",
"\u001b[1m> Finished chain.\u001b[0m\n",
"\n",
"\u001b[1m> Finished chain.\u001b[0m\n"
]
},
{
"data": {
"text/plain": [
"'\\nSo the final answer is: El Palmar, Spain'"
"'El Palmar, Spain'"
]
},
"execution_count": 1,

@ -10,6 +10,7 @@ from langchain.chains.router import LLMRouterChain
from langchain.input import ChainedInput, get_color_mapping
from langchain.llms.base import LLM
from langchain.prompts import BasePromptTemplate, PromptTemplate
from langchain.chains.router_expert import RouterExpertChain, ExpertConfig
FINAL_ANSWER_ACTION = "Final Answer: "
@ -31,9 +32,9 @@ class ChainConfig(NamedTuple):
def get_action_and_input(llm_output: str) -> Tuple[str, str]:
"""Parse out the action and input from the LLM output."""
ps = [p for p in llm_output.split("\n") if p]
if ps[-1].startswith(FINAL_ANSWER_ACTION):
if ps[-1].startswith("Final Answer"):
directive = ps[-1][len(FINAL_ANSWER_ACTION) :]
return FINAL_ANSWER_ACTION, directive
return "Final Answer", directive
if not ps[-1].startswith("Action Input: "):
raise ValueError(
"The last line does not have an action input, "
@ -52,6 +53,16 @@ def get_action_and_input(llm_output: str) -> Tuple[str, str]:
class MRKLRouterChain(LLMRouterChain):
"""Router for the MRKL chain."""
@property
def observation_prefix(self) -> str:
"""Prefix to append the observation with."""
return "Observation: "
@property
def router_prefix(self) -> str:
"""Prefix to append the router call with."""
return "Thought:"
def __init__(self, llm: LLM, chain_configs: List[ChainConfig], **kwargs: Any):
"""Initialize with an LLM and the chain configs it has access to."""
tools = "\n".join(
@ -166,21 +177,15 @@ class MRKLChain(Chain, BaseModel):
def _call(self, inputs: Dict[str, str]) -> Dict[str, str]:
router_chain = MRKLRouterChain(self.llm, self.chain_configs)
chained_input = ChainedInput(
f"{inputs[self.input_key]}\nThought:", verbose=self.verbose
)
color_mapping = get_color_mapping(
list(self.action_to_chain_map.keys()), excluded_colors=["green"]
question = inputs[self.input_key]
expert_configs = [
ExpertConfig(expert_name=c.action_name, expert=c.action)
for c in self.chain_configs
]
chain = RouterExpertChain(
router_chain=router_chain,
expert_configs=expert_configs,
verbose=self.verbose
)
while True:
action, action_input, thought = router_chain.get_action_and_input(
chained_input.input
)
chained_input.add(thought, color="green")
if action == FINAL_ANSWER_ACTION:
return {self.output_key: action_input}
chain = self.action_to_chain_map[action]
ca = chain(action_input)
chained_input.add("\nObservation: ")
chained_input.add(ca, color=color_mapping[action])
chained_input.add("\nThought:")
output = chain.run(question)
return {self.output_key: output}

@ -12,6 +12,7 @@ from langchain.docstore.base import Docstore
from langchain.docstore.document import Document
from langchain.input import ChainedInput
from langchain.llms.base import LLM
from langchain.chains.router_expert import RouterExpertChain, ExpertConfig
class ReActRouterChain(LLMRouterChain, BaseModel):
@ -43,6 +44,42 @@ class ReActRouterChain(LLMRouterChain, BaseModel):
raise ValueError(f"Could not parse action directive: {action_str}")
return re_matches.group(1), re_matches.group(2)
@property
def finish_action_name(self) -> str:
"""The action name of when to finish the chain."""
return "Finish"
@property
def observation_prefix(self) -> str:
"""Prefix to append the observation with."""
return f"Observation {self.i - 1}: "
@property
def router_prefix(self) -> str:
"""Prefix to append the router call with."""
return f"Thought {self.i}:"
class DocstoreExplorer:
def __init__(self, docstore: Docstore):
self.docstore=docstore
self.document = None
def search(self, term: str):
result = self.docstore.search(term)
if isinstance(result, Document):
self.document = result
return self.document.summary
else:
self.document = None
return result
def lookup(self, term: str):
if self.document is None:
raise ValueError("Cannot lookup without a successful search first")
return self.document.lookup(term)
class ReActChain(Chain, BaseModel):
"""Chain that implements the ReAct paper.
@ -86,29 +123,15 @@ class ReActChain(Chain, BaseModel):
def _call(self, inputs: Dict[str, Any]) -> Dict[str, str]:
question = inputs[self.input_key]
router_chain = ReActRouterChain(self.llm)
chained_input = ChainedInput(f"{question}\nThought 1:", verbose=self.verbose)
document = None
while True:
action, directive, ret_text = router_chain.get_action_and_input(
chained_input.input
)
chained_input.add(ret_text, color="green")
if action == "Search":
result = self.docstore.search(directive)
if isinstance(result, Document):
document = result
observation = document.summary
else:
document = None
observation = result
elif action == "Lookup":
if document is None:
raise ValueError("Cannot lookup without a successful search first")
observation = document.lookup(directive)
elif action == "Finish":
return {self.output_key: directive}
else:
raise ValueError(f"Got unknown action directive: {action}")
chained_input.add(f"\nObservation {router_chain.i - 1}: ")
chained_input.add(observation, color="yellow")
chained_input.add(f"\nThought {router_chain.i}:")
docstore_explorer = DocstoreExplorer(self.docstore)
expert_configs = [
ExpertConfig(expert_name="Search", expert=docstore_explorer.search),
ExpertConfig(expert_name="Lookup", expert=docstore_explorer.lookup)
]
chain = RouterExpertChain(
router_chain=router_chain,
expert_configs=expert_configs,
verbose=self.verbose
)
output = chain.run(question)
return {self.output_key: output}

@ -36,6 +36,21 @@ class RouterChain(Chain, BaseModel, ABC):
def get_action_and_input(self, text: str) -> Tuple[str, str, str]:
"""Return action, action input, and log (in that order)."""
@property
@abstractmethod
def observation_prefix(self) -> str:
"""Prefix to append the observation with."""
@property
@abstractmethod
def router_prefix(self) -> str:
"""Prefix to append the router call with."""
@property
def finish_action_name(self) -> str:
"""The action name of when to finish the chain."""
return "Final Answer"
def _call(self, inputs: Dict[str, str]) -> Dict[str, str]:
action, action_input, log = self.get_action_and_input(inputs[self.input_key])
return {

@ -12,7 +12,7 @@ from langchain.llms.base import LLM
from langchain.prompts import BasePromptTemplate, PromptTemplate
from langchain.chains.router import RouterChain
FINAL_ANSWER_ACTION = "Final Answer: "
class ExpertConfig(NamedTuple):
@ -28,6 +28,8 @@ class RouterExpertChain(Chain, BaseModel):
"""Router chain."""
expert_configs: List[ExpertConfig]
"""Expert configs this chain has access to."""
starter_string: str = "\n"
"""String to put after user input but before first router."""
input_key: str = "question" #: :meta private:
output_key: str = "answer" #: :meta private:
@ -54,22 +56,22 @@ class RouterExpertChain(Chain, BaseModel):
return [self.output_key]
def _call(self, inputs: Dict[str, str]) -> Dict[str, str]:
router_chain = MRKLRouterChain(self.llm, self.chain_configs)
action_to_chain_map = {e.expert_name: e.expert for e in self.expert_configs}
chained_input = ChainedInput(
f"{inputs[self.input_key]}", verbose=self.verbose
f"{inputs[self.input_key]}{self.starter_string}{self.router_chain.router_prefix}", verbose=self.verbose
)
color_mapping = get_color_mapping(
[c.], excluded_colors=["green"]
[c.expert_name for c in self.expert_configs], excluded_colors=["green"]
)
while True:
action, action_input, thought = router_chain.get_action_and_input(
action, action_input, log = self.router_chain.get_action_and_input(
chained_input.input
)
chained_input.add(thought, color="green")
if action == FINAL_ANSWER_ACTION:
chained_input.add(log, color="green")
if action == self.router_chain.finish_action_name:
return {self.output_key: action_input}
chain = self.action_to_chain_map[action]
chain = action_to_chain_map[action]
ca = chain(action_input)
chained_input.add("\nObservation: ")
chained_input.add(f"\n{self.router_chain.observation_prefix}")
chained_input.add(ca, color=color_mapping[action])
chained_input.add("\nThought:")
chained_input.add(f"\n{self.router_chain.router_prefix}")

@ -10,6 +10,7 @@ from langchain.chains.self_ask_with_search.prompt import PROMPT
from langchain.chains.serpapi import SerpAPIChain
from langchain.input import ChainedInput
from langchain.llms.base import LLM
from langchain.chains.router_expert import RouterExpertChain, ExpertConfig
class SelfAskWithSearchRouter(LLMRouterChain):
@ -28,7 +29,10 @@ class SelfAskWithSearchRouter(LLMRouterChain):
last_line = text.split("\n")[-1]
if followup not in last_line:
return "Final Answer", text
finish_string = "So the final answer is: "
if finish_string not in last_line:
raise ValueError("We should probably never get here")
return "Final Answer", text[len(finish_string):]
if ":" not in last_line:
after_colon = last_line
@ -42,6 +46,16 @@ class SelfAskWithSearchRouter(LLMRouterChain):
return "Intermediate Answer", after_colon
@property
def observation_prefix(self) -> str:
"""Prefix to append the observation with."""
return "Intermediate answer: "
@property
def router_prefix(self) -> str:
"""Prefix to append the router call with."""
return ""
class SelfAskWithSearchChain(Chain, BaseModel):
"""Chain that does self ask with search.
@ -84,16 +98,9 @@ class SelfAskWithSearchChain(Chain, BaseModel):
return [self.output_key]
def _call(self, inputs: Dict[str, Any]) -> Dict[str, str]:
chained_input = ChainedInput(inputs[self.input_key], verbose=self.verbose)
chained_input.add("\nAre follow up questions needed here:")
intermediate = "\nIntermediate answer:"
router = SelfAskWithSearchRouter(self.llm, stops=[intermediate])
action, action_input, log = router.get_action_and_input(chained_input.input)
chained_input.add(log, color="green")
while action != "Final Answer":
external_answer = self.search_chain.run(action_input)
chained_input.add(intermediate + " ")
chained_input.add(external_answer + ".", color="yellow")
action, action_input, log = router.get_action_and_input(chained_input.input)
chained_input.add(log, color="green")
return {self.output_key: action_input}
expert_configs = [ExpertConfig(expert_name="Intermediate Answer", expert=self.search_chain.run)]
chain = RouterExpertChain(router_chain=router, expert_configs=expert_configs, verbose=self.verbose, starter_string="\nAre follow up questions needed here:")
output = chain.run(inputs[self.input_key])
return {self.output_key: output}

Loading…
Cancel
Save