forked from Archives/langchain
docs for restructure
parent
8c8eb47765
commit
71a0940435
@ -0,0 +1,7 @@
|
||||
:mod:`langchain.routing_chains`
|
||||
===============================
|
||||
|
||||
.. automodule:: langchain.routing_chains
|
||||
:members:
|
||||
:undoc-members:
|
||||
|
@ -0,0 +1,14 @@
|
||||
"""Routing chains."""
|
||||
from langchain.routing_chains.mrkl.base import MRKLChain
|
||||
from langchain.routing_chains.react.base import ReActChain
|
||||
from langchain.routing_chains.router import LLMRouter
|
||||
from langchain.routing_chains.routing_chain import RoutingChain
|
||||
from langchain.routing_chains.self_ask_with_search.base import SelfAskWithSearchChain
|
||||
|
||||
__all__ = [
|
||||
"MRKLChain",
|
||||
"SelfAskWithSearchChain",
|
||||
"ReActChain",
|
||||
"LLMRouter",
|
||||
"RoutingChain",
|
||||
]
|
@ -0,0 +1,86 @@
|
||||
"""Chain that takes in an input and produces an action and action input."""
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import NamedTuple, Optional, Tuple
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
from langchain.chains.llm import LLMChain
|
||||
|
||||
|
||||
class RouterOutput(NamedTuple):
|
||||
"""Output of a router."""
|
||||
|
||||
tool: str
|
||||
tool_input: str
|
||||
log: str
|
||||
|
||||
|
||||
class Router(ABC):
|
||||
"""Chain responsible for deciding the action to take."""
|
||||
|
||||
@abstractmethod
|
||||
def route(self, text: str) -> RouterOutput:
|
||||
"""Given input, decided how to route it.
|
||||
|
||||
Args:
|
||||
text: input string
|
||||
|
||||
Returns:
|
||||
RouterOutput specifying what tool to use.
|
||||
"""
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def observation_prefix(self) -> str:
|
||||
"""Prefix to append the observation with."""
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def router_prefix(self) -> str:
|
||||
"""Prefix to append the router call with."""
|
||||
|
||||
@property
|
||||
def finish_tool_name(self) -> str:
|
||||
"""Name of the tool to use to finish the chain."""
|
||||
return "Final Answer"
|
||||
|
||||
@property
|
||||
def starter_string(self) -> str:
|
||||
"""Put this string after user input but before first router call."""
|
||||
return "\n"
|
||||
|
||||
|
||||
class LLMRouter(Router, BaseModel, ABC):
|
||||
"""Router that uses an LLM."""
|
||||
|
||||
llm_chain: LLMChain
|
||||
|
||||
@abstractmethod
|
||||
def _extract_tool_and_input(self, text: str) -> Optional[Tuple[str, str]]:
|
||||
"""Extract tool and tool input from llm output."""
|
||||
|
||||
def _fix_text(self, text: str) -> str:
|
||||
"""Fix the text."""
|
||||
raise ValueError("fix_text not implemented for this router.")
|
||||
|
||||
def route(self, text: str) -> RouterOutput:
|
||||
"""Given input, decided how to route it.
|
||||
|
||||
Args:
|
||||
text: input string
|
||||
|
||||
Returns:
|
||||
RouterOutput specifying what tool to use.
|
||||
"""
|
||||
input_key = self.llm_chain.input_keys[0]
|
||||
inputs = {input_key: text, "stop": [self.observation_prefix]}
|
||||
full_output = self.llm_chain.predict(**inputs)
|
||||
parsed_output = self._extract_tool_and_input(full_output)
|
||||
while parsed_output is None:
|
||||
full_output = self._fix_text(full_output)
|
||||
inputs = {input_key: text + full_output, "stop": [self.observation_prefix]}
|
||||
output = self.llm_chain.predict(**inputs)
|
||||
full_output += output
|
||||
parsed_output = self._extract_tool_and_input(full_output)
|
||||
tool, tool_input = parsed_output
|
||||
return RouterOutput(tool, tool_input, full_output)
|
@ -0,0 +1,70 @@
|
||||
"""Router-Expert framework."""
|
||||
from typing import Callable, Dict, List, NamedTuple
|
||||
|
||||
from pydantic import BaseModel, Extra
|
||||
|
||||
from langchain.chains.base import Chain
|
||||
from langchain.input import ChainedInput, get_color_mapping
|
||||
from langchain.routing_chains.router import Router
|
||||
|
||||
|
||||
class ToolConfig(NamedTuple):
|
||||
"""Configuration for tools."""
|
||||
|
||||
tool_name: str
|
||||
tool: Callable[[str], str]
|
||||
|
||||
|
||||
class RoutingChain(Chain, BaseModel):
|
||||
"""Chain that uses a router to use tools."""
|
||||
|
||||
router: Router
|
||||
"""Router to use."""
|
||||
tool_configs: List[ToolConfig]
|
||||
"""Tool configs this chain has access to."""
|
||||
input_key: str = "question" #: :meta private:
|
||||
output_key: str = "answer" #: :meta private:
|
||||
|
||||
class Config:
|
||||
"""Configuration for this pydantic object."""
|
||||
|
||||
extra = Extra.forbid
|
||||
arbitrary_types_allowed = True
|
||||
|
||||
@property
|
||||
def input_keys(self) -> List[str]:
|
||||
"""Expect input key.
|
||||
|
||||
:meta private:
|
||||
"""
|
||||
return [self.input_key]
|
||||
|
||||
@property
|
||||
def output_keys(self) -> List[str]:
|
||||
"""Expect output key.
|
||||
|
||||
:meta private:
|
||||
"""
|
||||
return [self.output_key]
|
||||
|
||||
def _call(self, inputs: Dict[str, str]) -> Dict[str, str]:
|
||||
name_to_tool_map = {tc.tool_name: tc.tool for tc in self.tool_configs}
|
||||
starter_string = (
|
||||
inputs[self.input_key]
|
||||
+ self.router.starter_string
|
||||
+ self.router.router_prefix
|
||||
)
|
||||
chained_input = ChainedInput(starter_string, verbose=self.verbose)
|
||||
color_mapping = get_color_mapping(
|
||||
[c.tool_name for c in self.tool_configs], excluded_colors=["green"]
|
||||
)
|
||||
while True:
|
||||
output = self.router.route(chained_input.input)
|
||||
chained_input.add(output.log, color="green")
|
||||
if output.tool == self.router.finish_tool_name:
|
||||
return {self.output_key: output.tool_input}
|
||||
chain = name_to_tool_map[output.tool]
|
||||
observation = chain(output.tool_input)
|
||||
chained_input.add(f"\n{self.router.observation_prefix}")
|
||||
chained_input.add(observation, color=color_mapping[output.tool])
|
||||
chained_input.add(f"\n{self.router.router_prefix}")
|
@ -1,6 +0,0 @@
|
||||
"""Smart chains."""
|
||||
from langchain.smart_chains.mrkl.base import MRKLChain
|
||||
from langchain.smart_chains.react.base import ReActChain
|
||||
from langchain.smart_chains.self_ask_with_search.base import SelfAskWithSearchChain
|
||||
|
||||
__all__ = ["MRKLChain", "SelfAskWithSearchChain", "ReActChain"]
|
@ -1,90 +0,0 @@
|
||||
"""Chain that takes in an input and produces an action and action input."""
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Dict, List, Optional, Tuple
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
from langchain.chains.base import Chain
|
||||
from langchain.chains.llm import LLMChain
|
||||
|
||||
|
||||
class RouterChain(Chain, BaseModel, ABC):
|
||||
"""Chain responsible for deciding the action to take."""
|
||||
|
||||
input_key: str = "input_text" #: :meta private:
|
||||
action_key: str = "action" #: :meta private:
|
||||
action_input_key: str = "action_input" #: :meta private:
|
||||
log_key: str = "log" #: :meta private:
|
||||
|
||||
@property
|
||||
def input_keys(self) -> List[str]:
|
||||
"""Will be the input key.
|
||||
|
||||
:meta private:
|
||||
"""
|
||||
return [self.input_key]
|
||||
|
||||
@property
|
||||
def output_keys(self) -> List[str]:
|
||||
"""Return three keys: the action, the action input, and the log.
|
||||
|
||||
:meta private:
|
||||
"""
|
||||
return [self.action_key, self.action_input_key, self.log_key]
|
||||
|
||||
@abstractmethod
|
||||
def get_action_and_input(self, text: str) -> Tuple[str, str, str]:
|
||||
"""Return action, action input, and log (in that order)."""
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def observation_prefix(self) -> str:
|
||||
"""Prefix to append the observation with."""
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def router_prefix(self) -> str:
|
||||
"""Prefix to append the router call with."""
|
||||
|
||||
@property
|
||||
def finish_action_name(self) -> str:
|
||||
"""Name of the action of when to finish the chain."""
|
||||
return "Final Answer"
|
||||
|
||||
def _call(self, inputs: Dict[str, str]) -> Dict[str, str]:
|
||||
action, action_input, log = self.get_action_and_input(inputs[self.input_key])
|
||||
return {
|
||||
self.action_key: action,
|
||||
self.action_input_key: action_input,
|
||||
self.log_key: log,
|
||||
}
|
||||
|
||||
|
||||
class LLMRouterChain(RouterChain, BaseModel, ABC):
|
||||
"""RouterChain that uses an LLM."""
|
||||
|
||||
llm_chain: LLMChain
|
||||
stops: Optional[List[str]]
|
||||
|
||||
@abstractmethod
|
||||
def _extract_action_and_input(self, text: str) -> Optional[Tuple[str, str]]:
|
||||
"""Extract action and action input from llm output."""
|
||||
|
||||
def _fix_text(self, text: str) -> str:
|
||||
"""Fix the text."""
|
||||
raise ValueError("fix_text not implemented for this router.")
|
||||
|
||||
def get_action_and_input(self, text: str) -> Tuple[str, str, str]:
|
||||
"""Return action, action input, and log (in that order)."""
|
||||
input_key = self.llm_chain.input_keys[0]
|
||||
inputs = {input_key: text, "stop": self.stops}
|
||||
full_output = self.llm_chain.predict(**inputs)
|
||||
parsed_output = self._extract_action_and_input(full_output)
|
||||
while parsed_output is None:
|
||||
full_output = self._fix_text(full_output)
|
||||
inputs = {input_key: text + full_output, "stop": self.stops}
|
||||
output = self.llm_chain.predict(**inputs)
|
||||
full_output += output
|
||||
parsed_output = self._extract_action_and_input(full_output)
|
||||
action, action_input = parsed_output
|
||||
return action, action_input, full_output
|
@ -1,77 +0,0 @@
|
||||
"""Router-Expert framework."""
|
||||
from typing import Callable, Dict, List, NamedTuple
|
||||
|
||||
from pydantic import BaseModel, Extra
|
||||
|
||||
from langchain.chains.base import Chain
|
||||
from langchain.input import ChainedInput, get_color_mapping
|
||||
from langchain.smart_chains.router import RouterChain
|
||||
|
||||
|
||||
class ExpertConfig(NamedTuple):
|
||||
"""Configuration for experts."""
|
||||
|
||||
expert_name: str
|
||||
expert: Callable[[str], str]
|
||||
|
||||
|
||||
class RouterExpertChain(Chain, BaseModel):
|
||||
"""Chain that implements the Router/Expert system."""
|
||||
|
||||
router_chain: RouterChain
|
||||
"""Router chain."""
|
||||
expert_configs: List[ExpertConfig]
|
||||
"""Expert configs this chain has access to."""
|
||||
starter_string: str = "\n"
|
||||
"""String to put after user input but before first router."""
|
||||
input_key: str = "question" #: :meta private:
|
||||
output_key: str = "answer" #: :meta private:
|
||||
|
||||
class Config:
|
||||
"""Configuration for this pydantic object."""
|
||||
|
||||
extra = Extra.forbid
|
||||
arbitrary_types_allowed = True
|
||||
|
||||
@property
|
||||
def input_keys(self) -> List[str]:
|
||||
"""Expect input key.
|
||||
|
||||
:meta private:
|
||||
"""
|
||||
return [self.input_key]
|
||||
|
||||
@property
|
||||
def output_keys(self) -> List[str]:
|
||||
"""Expect output key.
|
||||
|
||||
:meta private:
|
||||
"""
|
||||
return [self.output_key]
|
||||
|
||||
def _call(self, inputs: Dict[str, str]) -> Dict[str, str]:
|
||||
action_to_chain_map = {e.expert_name: e.expert for e in self.expert_configs}
|
||||
starter_string = (
|
||||
inputs[self.input_key]
|
||||
+ self.starter_string
|
||||
+ self.router_chain.router_prefix
|
||||
)
|
||||
chained_input = ChainedInput(
|
||||
starter_string,
|
||||
verbose=self.verbose,
|
||||
)
|
||||
color_mapping = get_color_mapping(
|
||||
[c.expert_name for c in self.expert_configs], excluded_colors=["green"]
|
||||
)
|
||||
while True:
|
||||
action, action_input, log = self.router_chain.get_action_and_input(
|
||||
chained_input.input
|
||||
)
|
||||
chained_input.add(log, color="green")
|
||||
if action == self.router_chain.finish_action_name:
|
||||
return {self.output_key: action_input}
|
||||
chain = action_to_chain_map[action]
|
||||
ca = chain(action_input)
|
||||
chained_input.add(f"\n{self.router_chain.observation_prefix}")
|
||||
chained_input.add(ca, color=color_mapping[action])
|
||||
chained_input.add(f"\n{self.router_chain.router_prefix}")
|
@ -0,0 +1 @@
|
||||
"""Test routing chain functionality."""
|
@ -1 +0,0 @@
|
||||
"""Test smart chain functionality."""
|
Loading…
Reference in New Issue