forked from Archives/langchain
Compare commits
6 Commits
main
...
harrison/r
Author | SHA1 | Date | |
---|---|---|---|
|
8c8eb47765 | ||
|
68eaf4e5ee | ||
|
2a84d3d5ca | ||
|
45ce74d0bc | ||
|
2a2d3323c9 | ||
|
6f55fa8ba7 |
@ -32,7 +32,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"execution_count": 2,
|
||||
"id": "07e96d99",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@ -64,7 +64,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"execution_count": 3,
|
||||
"id": "a069c4b6",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@ -74,7 +74,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"execution_count": 4,
|
||||
"id": "e603cd7d",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@ -86,32 +86,32 @@
|
||||
"\n",
|
||||
"\u001b[1m> Entering new chain...\u001b[0m\n",
|
||||
"What is the age of Olivia Wilde's boyfriend raised to the 0.23 power?\n",
|
||||
"Thought:\u001b[102m I need to find the age of Olivia Wilde's boyfriend\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I need to find the age of Olivia Wilde's boyfriend\n",
|
||||
"Action: Search\n",
|
||||
"Action Input: \"Olivia Wilde's boyfriend\"\u001b[0m\n",
|
||||
"Observation: \u001b[104mOlivia Wilde started dating Harry Styles after ending her years-long engagement to Jason Sudeikis — see their relationship timeline.\u001b[0m\n",
|
||||
"Thought:\u001b[102m I need to find the age of Harry Styles\n",
|
||||
"Observation: \u001b[36;1m\u001b[1;3mOlivia Wilde started dating Harry Styles after ending her years-long engagement to Jason Sudeikis — see their relationship timeline.\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I need to find the age of Harry Styles\n",
|
||||
"Action: Search\n",
|
||||
"Action Input: \"Harry Styles age\"\u001b[0m\n",
|
||||
"Observation: \u001b[104m28 years\u001b[0m\n",
|
||||
"Thought:\u001b[102m I need to calculate 28 to the 0.23 power\n",
|
||||
"Observation: \u001b[36;1m\u001b[1;3m28 years\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I need to calculate 28 to the 0.23 power\n",
|
||||
"Action: Calculator\n",
|
||||
"Action Input: 28^0.23\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new chain...\u001b[0m\n",
|
||||
"28^0.23\u001b[102m\n",
|
||||
"28^0.23\u001b[32;1m\u001b[1;3m\n",
|
||||
"\n",
|
||||
"```python\n",
|
||||
"print(28**0.23)\n",
|
||||
"```\n",
|
||||
"\u001b[0m\n",
|
||||
"Answer: \u001b[103m2.1520202182226886\n",
|
||||
"Answer: \u001b[33;1m\u001b[1;3m2.1520202182226886\n",
|
||||
"\u001b[0m\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
"\n",
|
||||
"Observation: \u001b[103mAnswer: 2.1520202182226886\n",
|
||||
"Observation: \u001b[33;1m\u001b[1;3mAnswer: 2.1520202182226886\n",
|
||||
"\u001b[0m\n",
|
||||
"Thought:\u001b[102m I now know the final answer\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I now know the final answer\n",
|
||||
"Final Answer: 2.1520202182226886\u001b[0m\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
@ -122,7 +122,7 @@
|
||||
"'2.1520202182226886'"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@ -133,7 +133,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"execution_count": 5,
|
||||
"id": "a5c07010",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@ -145,35 +145,35 @@
|
||||
"\n",
|
||||
"\u001b[1m> Entering new chain...\u001b[0m\n",
|
||||
"Who recently released an album called 'The Storm Before the Calm' and are they in the FooBar database? If so, what albums of theirs are in the FooBar database?\n",
|
||||
"Thought:\u001b[102m I need to find an album called 'The Storm Before the Calm'\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I need to find an album called 'The Storm Before the Calm'\n",
|
||||
"Action: Search\n",
|
||||
"Action Input: \"The Storm Before the Calm album\"\u001b[0m\n",
|
||||
"Observation: \u001b[104mThe Storm Before the Calm (stylized in all lowercase) is the tenth (and eighth international) studio album by Canadian-American singer-songwriter Alanis ...\u001b[0m\n",
|
||||
"Thought:\u001b[102m I need to check if Alanis is in the FooBar database\n",
|
||||
"Observation: \u001b[36;1m\u001b[1;3mThe Storm Before the Calm (stylized in all lowercase) is the tenth (and eighth international) studio album by Canadian-American singer-songwriter Alanis ...\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I need to check if Alanis is in the FooBar database\n",
|
||||
"Action: FooBar DB\n",
|
||||
"Action Input: \"Does Alanis Morissette exist in the FooBar database?\"\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new chain...\u001b[0m\n",
|
||||
"Does Alanis Morissette exist in the FooBar database?\n",
|
||||
"SQLQuery:\u001b[102m SELECT * FROM Artist WHERE Name = 'Alanis Morissette'\u001b[0m\n",
|
||||
"SQLResult: \u001b[103m[(4, 'Alanis Morissette')]\u001b[0m\n",
|
||||
"Answer:\u001b[102m Yes\u001b[0m\n",
|
||||
"SQLQuery:\u001b[32;1m\u001b[1;3m SELECT * FROM Artist WHERE Name = 'Alanis Morissette'\u001b[0m\n",
|
||||
"SQLResult: \u001b[33;1m\u001b[1;3m[(4, 'Alanis Morissette')]\u001b[0m\n",
|
||||
"Answer:\u001b[32;1m\u001b[1;3m Yes\u001b[0m\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
"\n",
|
||||
"Observation: \u001b[101m Yes\u001b[0m\n",
|
||||
"Thought:\u001b[102m I need to find out what albums of Alanis's are in the FooBar database\n",
|
||||
"Observation: \u001b[38;5;200m\u001b[1;3m Yes\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I need to find out what albums of Alanis's are in the FooBar database\n",
|
||||
"Action: FooBar DB\n",
|
||||
"Action Input: \"What albums by Alanis Morissette are in the FooBar database?\"\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new chain...\u001b[0m\n",
|
||||
"What albums by Alanis Morissette are in the FooBar database?\n",
|
||||
"SQLQuery:\u001b[102m SELECT Title FROM Album WHERE ArtistId = (SELECT ArtistId FROM Artist WHERE Name = 'Alanis Morissette')\u001b[0m\n",
|
||||
"SQLResult: \u001b[103m[('Jagged Little Pill',)]\u001b[0m\n",
|
||||
"Answer:\u001b[102m Jagged Little Pill\u001b[0m\n",
|
||||
"SQLQuery:\u001b[32;1m\u001b[1;3m SELECT Album.Title FROM Album JOIN Artist ON Album.ArtistId = Artist.ArtistId WHERE Artist.Name = 'Alanis Morissette'\u001b[0m\n",
|
||||
"SQLResult: \u001b[33;1m\u001b[1;3m[('Jagged Little Pill',)]\u001b[0m\n",
|
||||
"Answer:\u001b[32;1m\u001b[1;3m Jagged Little Pill\u001b[0m\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n",
|
||||
"\n",
|
||||
"Observation: \u001b[101m Jagged Little Pill\u001b[0m\n",
|
||||
"Thought:\u001b[102m I now know the final answer\n",
|
||||
"Observation: \u001b[38;5;200m\u001b[1;3m Jagged Little Pill\u001b[0m\n",
|
||||
"Thought:\u001b[32;1m\u001b[1;3m I now know the final answer\n",
|
||||
"Final Answer: The album is by Alanis Morissette and the albums in the FooBar database by her are Jagged Little Pill\u001b[0m\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
@ -184,7 +184,7 @@
|
||||
"'The album is by Alanis Morissette and the albums in the FooBar database by her are Jagged Little Pill'"
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
|
@ -37,14 +37,14 @@
|
||||
"\n",
|
||||
"\u001b[1m> Entering new chain...\u001b[0m\n",
|
||||
"Author David Chanoff has collaborated with a U.S. Navy admiral who served as the ambassador to the United Kingdom under which President?\n",
|
||||
"Thought 1:\u001b[102m I need to search David Chanoff and find the U.S. Navy admiral he\n",
|
||||
"collaborated with.\n",
|
||||
"Thought 1:\u001b[32;1m\u001b[1;3m I need to search David Chanoff and find the U.S. Navy admiral he collaborated\n",
|
||||
"with.\n",
|
||||
"Action 1: Search[David Chanoff]\u001b[0m\n",
|
||||
"Observation 1: \u001b[103mDavid Chanoff is a noted author of non-fiction work. His work has typically involved collaborations with the principal protagonist of the work concerned. His collaborators have included; Augustus A. White, Joycelyn Elders, Đoàn Văn Toại, William J. Crowe, Ariel Sharon, Kenneth Good and Felix Zandman. He has also written about a wide range of subjects including literary history, education and foreign for The Washington Post, The New Republic and The New York Times Magazine. He has published more than twelve books.\u001b[0m\n",
|
||||
"Thought 2:\u001b[102m The U.S. Navy admiral David Chanoff collaborated with is William J. Crowe.\n",
|
||||
"Observation 1: \u001b[33;1m\u001b[1;3mDavid Chanoff is a noted author of non-fiction work. His work has typically involved collaborations with the principal protagonist of the work concerned. His collaborators have included; Augustus A. White, Joycelyn Elders, Đoàn Văn Toại, William J. Crowe, Ariel Sharon, Kenneth Good and Felix Zandman. He has also written about a wide range of subjects including literary history, education and foreign for The Washington Post, The New Republic and The New York Times Magazine. He has published more than twelve books.\u001b[0m\n",
|
||||
"Thought 2:\u001b[32;1m\u001b[1;3m The U.S. Navy admiral David Chanoff collaborated with is William J. Crowe.\n",
|
||||
"Action 2: Search[William J. Crowe]\u001b[0m\n",
|
||||
"Observation 2: \u001b[103mWilliam James Crowe Jr. (January 2, 1925 – October 18, 2007) was a United States Navy admiral and diplomat who served as the 11th chairman of the Joint Chiefs of Staff under Presidents Ronald Reagan and George H. W. Bush, and as the ambassador to the United Kingdom and Chair of the Intelligence Oversight Board under President Bill Clinton.\u001b[0m\n",
|
||||
"Thought 3:\u001b[102m William J. Crowe served as the ambassador to the United Kingdom under President Bill Clinton. So the answer is Bill Clinton.\n",
|
||||
"Observation 2: \u001b[33;1m\u001b[1;3mWilliam James Crowe Jr. (January 2, 1925 – October 18, 2007) was a United States Navy admiral and diplomat who served as the 11th chairman of the Joint Chiefs of Staff under Presidents Ronald Reagan and George H. W. Bush, and as the ambassador to the United Kingdom and Chair of the Intelligence Oversight Board under President Bill Clinton.\u001b[0m\n",
|
||||
"Thought 3:\u001b[32;1m\u001b[1;3m William J. Crowe served as the ambassador to the United Kingdom under President Bill Clinton.\n",
|
||||
"Action 3: Finish[Bill Clinton]\u001b[0m\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
@ -68,7 +68,7 @@
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "0a6bd3b4",
|
||||
"id": "2d6f4b1d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
|
@ -24,19 +24,19 @@
|
||||
"\n",
|
||||
"\u001b[1m> Entering new chain...\u001b[0m\n",
|
||||
"What is the hometown of the reigning men's U.S. Open champion?\n",
|
||||
"Are follow up questions needed here:\u001b[102m Yes.\n",
|
||||
"Are follow up questions needed here:\u001b[32;1m\u001b[1;3m Yes.\n",
|
||||
"Follow up: Who is the reigning men's U.S. Open champion?\u001b[0m\n",
|
||||
"Intermediate answer: \u001b[103mCarlos Alcaraz won the 2022 Men's single title while Poland's Iga Swiatek won the Women's single title defeating Tunisian's Ons Jabeur..\u001b[0m\u001b[102m\n",
|
||||
"Follow up: Where is Carlos Alcaraz from?\u001b[0m\n",
|
||||
"Intermediate answer: \u001b[103mEl Palmar, Murcia, Spain.\u001b[0m\u001b[102m\n",
|
||||
"So the final answer is: El Palmar, Murcia, Spain\u001b[0m\n",
|
||||
"Intermediate answer: \u001b[36;1m\u001b[1;3mCarlos Alcaraz\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3mFollow up: Where is Carlos Alcaraz from?\u001b[0m\n",
|
||||
"Intermediate answer: \u001b[36;1m\u001b[1;3mEl Palmar, Spain\u001b[0m\n",
|
||||
"\u001b[32;1m\u001b[1;3mSo the final answer is: El Palmar, Spain\u001b[0m\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'\\nSo the final answer is: El Palmar, Murcia, Spain'"
|
||||
"'El Palmar, Spain'"
|
||||
]
|
||||
},
|
||||
"execution_count": 1,
|
||||
|
@ -8,10 +8,7 @@ with open(Path(__file__).absolute().parents[0] / "VERSION") as _f:
|
||||
from langchain.chains import (
|
||||
LLMChain,
|
||||
LLMMathChain,
|
||||
MRKLChain,
|
||||
PythonChain,
|
||||
ReActChain,
|
||||
SelfAskWithSearchChain,
|
||||
SerpAPIChain,
|
||||
SQLDatabaseChain,
|
||||
VectorDBQA,
|
||||
@ -24,6 +21,7 @@ from langchain.prompts import (
|
||||
Prompt,
|
||||
PromptTemplate,
|
||||
)
|
||||
from langchain.smart_chains import MRKLChain, ReActChain, SelfAskWithSearchChain
|
||||
from langchain.sql_database import SQLDatabase
|
||||
from langchain.vectorstores import FAISS, ElasticVectorSearch
|
||||
|
||||
|
@ -1,10 +1,7 @@
|
||||
"""Chains are easily reusable components which can be linked together."""
|
||||
from langchain.chains.llm import LLMChain
|
||||
from langchain.chains.llm_math.base import LLMMathChain
|
||||
from langchain.chains.mrkl.base import MRKLChain
|
||||
from langchain.chains.python import PythonChain
|
||||
from langchain.chains.react.base import ReActChain
|
||||
from langchain.chains.self_ask_with_search.base import SelfAskWithSearchChain
|
||||
from langchain.chains.serpapi import SerpAPIChain
|
||||
from langchain.chains.sql_database.base import SQLDatabaseChain
|
||||
from langchain.chains.vector_db_qa.base import VectorDBQA
|
||||
@ -13,10 +10,7 @@ __all__ = [
|
||||
"LLMChain",
|
||||
"LLMMathChain",
|
||||
"PythonChain",
|
||||
"SelfAskWithSearchChain",
|
||||
"SerpAPIChain",
|
||||
"ReActChain",
|
||||
"SQLDatabaseChain",
|
||||
"MRKLChain",
|
||||
"VectorDBQA",
|
||||
]
|
||||
|
@ -1,107 +0,0 @@
|
||||
"""Chain that implements the ReAct paper from https://arxiv.org/pdf/2210.03629.pdf."""
|
||||
import re
|
||||
from typing import Any, Dict, List, Tuple
|
||||
|
||||
from pydantic import BaseModel, Extra
|
||||
|
||||
from langchain.chains.base import Chain
|
||||
from langchain.chains.llm import LLMChain
|
||||
from langchain.chains.react.prompt import PROMPT
|
||||
from langchain.docstore.base import Docstore
|
||||
from langchain.docstore.document import Document
|
||||
from langchain.input import ChainedInput
|
||||
from langchain.llms.base import LLM
|
||||
|
||||
|
||||
def predict_until_observation(
|
||||
llm_chain: LLMChain, prompt: str, i: int
|
||||
) -> Tuple[str, str, str]:
|
||||
"""Generate text until an observation is needed."""
|
||||
action_prefix = f"Action {i}: "
|
||||
stop_seq = f"\nObservation {i}:"
|
||||
ret_text = llm_chain.predict(input=prompt, stop=[stop_seq])
|
||||
# Sometimes the LLM forgets to take an action, so we prompt it to.
|
||||
while not ret_text.split("\n")[-1].startswith(action_prefix):
|
||||
ret_text += f"\nAction {i}:"
|
||||
new_text = llm_chain.predict(input=prompt + ret_text, stop=[stop_seq])
|
||||
ret_text += new_text
|
||||
# The action block should be the last line.
|
||||
action_block = ret_text.split("\n")[-1]
|
||||
action_str = action_block[len(action_prefix) :]
|
||||
# Parse out the action and the directive.
|
||||
re_matches = re.search(r"(.*?)\[(.*?)\]", action_str)
|
||||
if re_matches is None:
|
||||
raise ValueError(f"Could not parse action directive: {action_str}")
|
||||
return ret_text, re_matches.group(1), re_matches.group(2)
|
||||
|
||||
|
||||
class ReActChain(Chain, BaseModel):
|
||||
"""Chain that implements the ReAct paper.
|
||||
|
||||
Example:
|
||||
.. code-block:: python
|
||||
|
||||
from langchain import ReActChain, OpenAI
|
||||
react = ReAct(llm=OpenAI())
|
||||
"""
|
||||
|
||||
llm: LLM
|
||||
"""LLM wrapper to use."""
|
||||
docstore: Docstore
|
||||
"""Docstore to use."""
|
||||
input_key: str = "question" #: :meta private:
|
||||
output_key: str = "answer" #: :meta private:
|
||||
|
||||
class Config:
|
||||
"""Configuration for this pydantic object."""
|
||||
|
||||
extra = Extra.forbid
|
||||
arbitrary_types_allowed = True
|
||||
|
||||
@property
|
||||
def input_keys(self) -> List[str]:
|
||||
"""Expect input key.
|
||||
|
||||
:meta private:
|
||||
"""
|
||||
return [self.input_key]
|
||||
|
||||
@property
|
||||
def output_keys(self) -> List[str]:
|
||||
"""Expect output key.
|
||||
|
||||
:meta private:
|
||||
"""
|
||||
return [self.output_key]
|
||||
|
||||
def _call(self, inputs: Dict[str, Any]) -> Dict[str, str]:
|
||||
question = inputs[self.input_key]
|
||||
llm_chain = LLMChain(llm=self.llm, prompt=PROMPT)
|
||||
chained_input = ChainedInput(f"{question}\nThought 1:", verbose=self.verbose)
|
||||
i = 1
|
||||
document = None
|
||||
while True:
|
||||
ret_text, action, directive = predict_until_observation(
|
||||
llm_chain, chained_input.input, i
|
||||
)
|
||||
chained_input.add(ret_text, color="green")
|
||||
if action == "Search":
|
||||
result = self.docstore.search(directive)
|
||||
if isinstance(result, Document):
|
||||
document = result
|
||||
observation = document.summary
|
||||
else:
|
||||
document = None
|
||||
observation = result
|
||||
elif action == "Lookup":
|
||||
if document is None:
|
||||
raise ValueError("Cannot lookup without a successful search first")
|
||||
observation = document.lookup(directive)
|
||||
elif action == "Finish":
|
||||
return {self.output_key: directive}
|
||||
else:
|
||||
raise ValueError(f"Got unknown action directive: {action}")
|
||||
chained_input.add(f"\nObservation {i}: ")
|
||||
chained_input.add(observation, color="yellow")
|
||||
chained_input.add(f"\nThought {i + 1}:")
|
||||
i += 1
|
@ -1,149 +0,0 @@
|
||||
"""Chain that does self ask with search."""
|
||||
from typing import Any, Dict, List
|
||||
|
||||
from pydantic import BaseModel, Extra
|
||||
|
||||
from langchain.chains.base import Chain
|
||||
from langchain.chains.llm import LLMChain
|
||||
from langchain.chains.self_ask_with_search.prompt import PROMPT
|
||||
from langchain.chains.serpapi import SerpAPIChain
|
||||
from langchain.input import ChainedInput
|
||||
from langchain.llms.base import LLM
|
||||
|
||||
|
||||
def extract_answer(generated: str) -> str:
|
||||
"""Extract answer from text."""
|
||||
if "\n" not in generated:
|
||||
last_line = generated
|
||||
else:
|
||||
last_line = generated.split("\n")[-1]
|
||||
|
||||
if ":" not in last_line:
|
||||
after_colon = last_line
|
||||
else:
|
||||
after_colon = generated.split(":")[-1]
|
||||
|
||||
if " " == after_colon[0]:
|
||||
after_colon = after_colon[1:]
|
||||
if "." == after_colon[-1]:
|
||||
after_colon = after_colon[:-1]
|
||||
|
||||
return after_colon
|
||||
|
||||
|
||||
def extract_question(generated: str, followup: str) -> str:
|
||||
"""Extract question from text."""
|
||||
if "\n" not in generated:
|
||||
last_line = generated
|
||||
else:
|
||||
last_line = generated.split("\n")[-1]
|
||||
|
||||
if followup not in last_line:
|
||||
print("we probably should never get here..." + generated)
|
||||
|
||||
if ":" not in last_line:
|
||||
after_colon = last_line
|
||||
else:
|
||||
after_colon = generated.split(":")[-1]
|
||||
|
||||
if " " == after_colon[0]:
|
||||
after_colon = after_colon[1:]
|
||||
if "?" != after_colon[-1]:
|
||||
print("we probably should never get here..." + generated)
|
||||
|
||||
return after_colon
|
||||
|
||||
|
||||
def get_last_line(generated: str) -> str:
|
||||
"""Get the last line in text."""
|
||||
if "\n" not in generated:
|
||||
last_line = generated
|
||||
else:
|
||||
last_line = generated.split("\n")[-1]
|
||||
|
||||
return last_line
|
||||
|
||||
|
||||
def greenify(_input: str) -> str:
|
||||
"""Add green highlighting to text."""
|
||||
return "\x1b[102m" + _input + "\x1b[0m"
|
||||
|
||||
|
||||
def yellowfy(_input: str) -> str:
|
||||
"""Add yellow highlighting to text."""
|
||||
return "\x1b[106m" + _input + "\x1b[0m"
|
||||
|
||||
|
||||
class SelfAskWithSearchChain(Chain, BaseModel):
|
||||
"""Chain that does self ask with search.
|
||||
|
||||
Example:
|
||||
.. code-block:: python
|
||||
|
||||
from langchain import SelfAskWithSearchChain, OpenAI, SerpAPIChain
|
||||
search_chain = SerpAPIChain()
|
||||
self_ask = SelfAskWithSearchChain(llm=OpenAI(), search_chain=search_chain)
|
||||
"""
|
||||
|
||||
llm: LLM
|
||||
"""LLM wrapper to use."""
|
||||
search_chain: SerpAPIChain
|
||||
"""Search chain to use."""
|
||||
input_key: str = "question" #: :meta private:
|
||||
output_key: str = "answer" #: :meta private:
|
||||
|
||||
class Config:
|
||||
"""Configuration for this pydantic object."""
|
||||
|
||||
extra = Extra.forbid
|
||||
arbitrary_types_allowed = True
|
||||
|
||||
@property
|
||||
def input_keys(self) -> List[str]:
|
||||
"""Expect input key.
|
||||
|
||||
:meta private:
|
||||
"""
|
||||
return [self.input_key]
|
||||
|
||||
@property
|
||||
def output_keys(self) -> List[str]:
|
||||
"""Expect output key.
|
||||
|
||||
:meta private:
|
||||
"""
|
||||
return [self.output_key]
|
||||
|
||||
def _call(self, inputs: Dict[str, Any]) -> Dict[str, str]:
|
||||
chained_input = ChainedInput(inputs[self.input_key], verbose=self.verbose)
|
||||
chained_input.add("\nAre follow up questions needed here:")
|
||||
llm_chain = LLMChain(llm=self.llm, prompt=PROMPT)
|
||||
intermediate = "\nIntermediate answer:"
|
||||
followup = "Follow up:"
|
||||
finalans = "\nSo the final answer is:"
|
||||
ret_text = llm_chain.predict(input=chained_input.input, stop=[intermediate])
|
||||
chained_input.add(ret_text, color="green")
|
||||
while followup in get_last_line(ret_text):
|
||||
question = extract_question(ret_text, followup)
|
||||
external_answer = self.search_chain.run(question)
|
||||
if external_answer is not None:
|
||||
chained_input.add(intermediate + " ")
|
||||
chained_input.add(external_answer + ".", color="yellow")
|
||||
ret_text = llm_chain.predict(
|
||||
input=chained_input.input, stop=["\nIntermediate answer:"]
|
||||
)
|
||||
chained_input.add(ret_text, color="green")
|
||||
else:
|
||||
# We only get here in the very rare case that Google returns no answer.
|
||||
chained_input.add(intermediate + " ")
|
||||
preds = llm_chain.predict(
|
||||
input=chained_input.input, stop=["\n" + followup, finalans]
|
||||
)
|
||||
chained_input.add(preds, color="green")
|
||||
|
||||
if finalans not in ret_text:
|
||||
chained_input.add(finalans)
|
||||
ret_text = llm_chain.predict(input=chained_input.input, stop=["\n"])
|
||||
chained_input.add(ret_text, color="green")
|
||||
|
||||
return {self.output_key: ret_text}
|
6
langchain/smart_chains/__init__.py
Normal file
6
langchain/smart_chains/__init__.py
Normal file
@ -0,0 +1,6 @@
|
||||
"""Smart chains."""
|
||||
from langchain.smart_chains.mrkl.base import MRKLChain
|
||||
from langchain.smart_chains.react.base import ReActChain
|
||||
from langchain.smart_chains.self_ask_with_search.base import SelfAskWithSearchChain
|
||||
|
||||
__all__ = ["MRKLChain", "SelfAskWithSearchChain", "ReActChain"]
|
@ -1,14 +1,12 @@
|
||||
"""Attempt to implement MRKL systems as described in arxiv.org/pdf/2205.00445.pdf."""
|
||||
from typing import Any, Callable, Dict, List, NamedTuple, Tuple
|
||||
from typing import Any, Callable, List, NamedTuple, Optional, Tuple
|
||||
|
||||
from pydantic import BaseModel, Extra
|
||||
|
||||
from langchain.chains.base import Chain
|
||||
from langchain.chains.llm import LLMChain
|
||||
from langchain.chains.mrkl.prompt import BASE_TEMPLATE
|
||||
from langchain.input import ChainedInput, get_color_mapping
|
||||
from langchain.llms.base import LLM
|
||||
from langchain.prompts import BasePromptTemplate, PromptTemplate
|
||||
from langchain.prompts import PromptTemplate
|
||||
from langchain.smart_chains.mrkl.prompt import BASE_TEMPLATE
|
||||
from langchain.smart_chains.router import LLMRouterChain
|
||||
from langchain.smart_chains.router_expert import ExpertConfig, RouterExpertChain
|
||||
|
||||
FINAL_ANSWER_ACTION = "Final Answer: "
|
||||
|
||||
@ -30,9 +28,9 @@ class ChainConfig(NamedTuple):
|
||||
def get_action_and_input(llm_output: str) -> Tuple[str, str]:
|
||||
"""Parse out the action and input from the LLM output."""
|
||||
ps = [p for p in llm_output.split("\n") if p]
|
||||
if ps[-1].startswith(FINAL_ANSWER_ACTION):
|
||||
if ps[-1].startswith("Final Answer"):
|
||||
directive = ps[-1][len(FINAL_ANSWER_ACTION) :]
|
||||
return FINAL_ANSWER_ACTION, directive
|
||||
return "Final Answer", directive
|
||||
if not ps[-1].startswith("Action Input: "):
|
||||
raise ValueError(
|
||||
"The last line does not have an action input, "
|
||||
@ -48,33 +46,49 @@ def get_action_and_input(llm_output: str) -> Tuple[str, str]:
|
||||
return action, action_input.strip(" ").strip('"')
|
||||
|
||||
|
||||
class MRKLChain(Chain, BaseModel):
|
||||
class MRKLRouterChain(LLMRouterChain):
|
||||
"""Router for the MRKL chain."""
|
||||
|
||||
@property
|
||||
def observation_prefix(self) -> str:
|
||||
"""Prefix to append the observation with."""
|
||||
return "Observation: "
|
||||
|
||||
@property
|
||||
def router_prefix(self) -> str:
|
||||
"""Prefix to append the router call with."""
|
||||
return "Thought:"
|
||||
|
||||
def __init__(self, llm: LLM, chain_configs: List[ChainConfig], **kwargs: Any):
|
||||
"""Initialize with an LLM and the chain configs it has access to."""
|
||||
tools = "\n".join(
|
||||
[f"{c.action_name}: {c.action_description}" for c in chain_configs]
|
||||
)
|
||||
tool_names = ", ".join([chain.action_name for chain in chain_configs])
|
||||
template = BASE_TEMPLATE.format(tools=tools, tool_names=tool_names)
|
||||
prompt = PromptTemplate(template=template, input_variables=["input"])
|
||||
llm_chain = LLMChain(llm=llm, prompt=prompt)
|
||||
stops = ["\nObservation"]
|
||||
super().__init__(llm_chain=llm_chain, stops=stops, **kwargs)
|
||||
|
||||
def _extract_action_and_input(self, text: str) -> Optional[Tuple[str, str]]:
|
||||
return get_action_and_input(text)
|
||||
|
||||
|
||||
class MRKLChain(RouterExpertChain):
|
||||
"""Chain that implements the MRKL system.
|
||||
|
||||
Example:
|
||||
.. code-block:: python
|
||||
|
||||
from langchain import OpenAI, Prompt, MRKLChain
|
||||
from langchain import OpenAI, MRKLChain
|
||||
from langchain.chains.mrkl.base import ChainConfig
|
||||
llm = OpenAI(temperature=0)
|
||||
prompt = PromptTemplate(...)
|
||||
action_to_chain_map = {...}
|
||||
mrkl = MRKLChain(
|
||||
llm=llm,
|
||||
prompt=prompt,
|
||||
action_to_chain_map=action_to_chain_map
|
||||
)
|
||||
chains = [...]
|
||||
mrkl = MRKLChain.from_chains(llm=llm, prompt=prompt)
|
||||
"""
|
||||
|
||||
llm: LLM
|
||||
"""LLM wrapper to use as router."""
|
||||
prompt: BasePromptTemplate
|
||||
"""Prompt to use as router."""
|
||||
action_to_chain_map: Dict[str, Callable]
|
||||
"""Mapping from action name to chain to execute."""
|
||||
input_key: str = "question" #: :meta private:
|
||||
output_key: str = "answer" #: :meta private:
|
||||
|
||||
@classmethod
|
||||
def from_chains(
|
||||
cls, llm: LLM, chains: List[ChainConfig], **kwargs: Any
|
||||
@ -114,57 +128,8 @@ class MRKLChain(Chain, BaseModel):
|
||||
]
|
||||
mrkl = MRKLChain.from_chains(llm, chains)
|
||||
"""
|
||||
tools = "\n".join(
|
||||
[f"{chain.action_name}: {chain.action_description}" for chain in chains]
|
||||
)
|
||||
tool_names = ", ".join([chain.action_name for chain in chains])
|
||||
template = BASE_TEMPLATE.format(tools=tools, tool_names=tool_names)
|
||||
prompt = PromptTemplate(template=template, input_variables=["input"])
|
||||
action_to_chain_map = {chain.action_name: chain.action for chain in chains}
|
||||
return cls(
|
||||
llm=llm, prompt=prompt, action_to_chain_map=action_to_chain_map, **kwargs
|
||||
)
|
||||
|
||||
class Config:
|
||||
"""Configuration for this pydantic object."""
|
||||
|
||||
extra = Extra.forbid
|
||||
arbitrary_types_allowed = True
|
||||
|
||||
@property
|
||||
def input_keys(self) -> List[str]:
|
||||
"""Expect input key.
|
||||
|
||||
:meta private:
|
||||
"""
|
||||
return [self.input_key]
|
||||
|
||||
@property
|
||||
def output_keys(self) -> List[str]:
|
||||
"""Expect output key.
|
||||
|
||||
:meta private:
|
||||
"""
|
||||
return [self.output_key]
|
||||
|
||||
def _call(self, inputs: Dict[str, str]) -> Dict[str, str]:
|
||||
llm_chain = LLMChain(llm=self.llm, prompt=self.prompt)
|
||||
chained_input = ChainedInput(
|
||||
f"{inputs[self.input_key]}\nThought:", verbose=self.verbose
|
||||
)
|
||||
color_mapping = get_color_mapping(
|
||||
list(self.action_to_chain_map.keys()), excluded_colors=["green"]
|
||||
)
|
||||
while True:
|
||||
thought = llm_chain.predict(
|
||||
input=chained_input.input, stop=["\nObservation"]
|
||||
)
|
||||
chained_input.add(thought, color="green")
|
||||
action, action_input = get_action_and_input(thought)
|
||||
if action == FINAL_ANSWER_ACTION:
|
||||
return {self.output_key: action_input}
|
||||
chain = self.action_to_chain_map[action]
|
||||
ca = chain(action_input)
|
||||
chained_input.add("\nObservation: ")
|
||||
chained_input.add(ca, color=color_mapping[action])
|
||||
chained_input.add("\nThought:")
|
||||
router_chain = MRKLRouterChain(llm, chains)
|
||||
expert_configs = [
|
||||
ExpertConfig(expert_name=c.action_name, expert=c.action) for c in chains
|
||||
]
|
||||
return cls(router_chain=router_chain, expert_configs=expert_configs, **kwargs)
|
106
langchain/smart_chains/react/base.py
Normal file
106
langchain/smart_chains/react/base.py
Normal file
@ -0,0 +1,106 @@
|
||||
"""Chain that implements the ReAct paper from https://arxiv.org/pdf/2210.03629.pdf."""
|
||||
import re
|
||||
from typing import Any, Optional, Tuple
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
from langchain.chains.llm import LLMChain
|
||||
from langchain.docstore.base import Docstore
|
||||
from langchain.docstore.document import Document
|
||||
from langchain.llms.base import LLM
|
||||
from langchain.smart_chains.react.prompt import PROMPT
|
||||
from langchain.smart_chains.router import LLMRouterChain
|
||||
from langchain.smart_chains.router_expert import ExpertConfig, RouterExpertChain
|
||||
|
||||
|
||||
class ReActRouterChain(LLMRouterChain, BaseModel):
|
||||
"""Router for the ReAct chin."""
|
||||
|
||||
i: int = 1
|
||||
|
||||
def __init__(self, llm: LLM, **kwargs: Any):
|
||||
"""Initialize with the language model."""
|
||||
llm_chain = LLMChain(llm=llm, prompt=PROMPT)
|
||||
stops = ["\nObservation 1:"]
|
||||
super().__init__(llm_chain=llm_chain, stops=stops, **kwargs)
|
||||
|
||||
def _fix_text(self, text: str) -> str:
|
||||
return text + f"\nAction {self.i}:"
|
||||
|
||||
def _extract_action_and_input(self, text: str) -> Optional[Tuple[str, str]]:
|
||||
action_prefix = f"Action {self.i}: "
|
||||
if not text.split("\n")[-1].startswith(action_prefix):
|
||||
return None
|
||||
self.i += 1
|
||||
self.stops = [f"\nObservation {self.i}:"]
|
||||
action_block = text.split("\n")[-1]
|
||||
|
||||
action_str = action_block[len(action_prefix) :]
|
||||
# Parse out the action and the directive.
|
||||
re_matches = re.search(r"(.*?)\[(.*?)\]", action_str)
|
||||
if re_matches is None:
|
||||
raise ValueError(f"Could not parse action directive: {action_str}")
|
||||
return re_matches.group(1), re_matches.group(2)
|
||||
|
||||
@property
|
||||
def finish_action_name(self) -> str:
|
||||
"""Name of the action of when to finish the chain."""
|
||||
return "Finish"
|
||||
|
||||
@property
|
||||
def observation_prefix(self) -> str:
|
||||
"""Prefix to append the observation with."""
|
||||
return f"Observation {self.i - 1}: "
|
||||
|
||||
@property
|
||||
def router_prefix(self) -> str:
|
||||
"""Prefix to append the router call with."""
|
||||
return f"Thought {self.i}:"
|
||||
|
||||
|
||||
class DocstoreExplorer:
|
||||
"""Class to assist with exploration of a document store."""
|
||||
|
||||
def __init__(self, docstore: Docstore):
|
||||
"""Initialize with a docstore, and set initial document to None."""
|
||||
self.docstore = docstore
|
||||
self.document: Optional[Document] = None
|
||||
|
||||
def search(self, term: str) -> str:
|
||||
"""Search for a term in the docstore, and if found save."""
|
||||
result = self.docstore.search(term)
|
||||
if isinstance(result, Document):
|
||||
self.document = result
|
||||
return self.document.summary
|
||||
else:
|
||||
self.document = None
|
||||
return result
|
||||
|
||||
def lookup(self, term: str) -> str:
|
||||
"""Lookup a term in document (if saved)."""
|
||||
if self.document is None:
|
||||
raise ValueError("Cannot lookup without a successful search first")
|
||||
return self.document.lookup(term)
|
||||
|
||||
|
||||
class ReActChain(RouterExpertChain):
|
||||
"""Chain that implements the ReAct paper.
|
||||
|
||||
Example:
|
||||
.. code-block:: python
|
||||
|
||||
from langchain import ReActChain, OpenAI
|
||||
react = ReAct(llm=OpenAI())
|
||||
"""
|
||||
|
||||
def __init__(self, llm: LLM, docstore: Docstore, **kwargs: Any):
|
||||
"""Initialize with the LLM and a docstore."""
|
||||
router_chain = ReActRouterChain(llm)
|
||||
docstore_explorer = DocstoreExplorer(docstore)
|
||||
expert_configs = [
|
||||
ExpertConfig(expert_name="Search", expert=docstore_explorer.search),
|
||||
ExpertConfig(expert_name="Lookup", expert=docstore_explorer.lookup),
|
||||
]
|
||||
super().__init__(
|
||||
router_chain=router_chain, expert_configs=expert_configs, **kwargs
|
||||
)
|
90
langchain/smart_chains/router.py
Normal file
90
langchain/smart_chains/router.py
Normal file
@ -0,0 +1,90 @@
|
||||
"""Chain that takes in an input and produces an action and action input."""
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Dict, List, Optional, Tuple
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
from langchain.chains.base import Chain
|
||||
from langchain.chains.llm import LLMChain
|
||||
|
||||
|
||||
class RouterChain(Chain, BaseModel, ABC):
|
||||
"""Chain responsible for deciding the action to take."""
|
||||
|
||||
input_key: str = "input_text" #: :meta private:
|
||||
action_key: str = "action" #: :meta private:
|
||||
action_input_key: str = "action_input" #: :meta private:
|
||||
log_key: str = "log" #: :meta private:
|
||||
|
||||
@property
|
||||
def input_keys(self) -> List[str]:
|
||||
"""Will be the input key.
|
||||
|
||||
:meta private:
|
||||
"""
|
||||
return [self.input_key]
|
||||
|
||||
@property
|
||||
def output_keys(self) -> List[str]:
|
||||
"""Return three keys: the action, the action input, and the log.
|
||||
|
||||
:meta private:
|
||||
"""
|
||||
return [self.action_key, self.action_input_key, self.log_key]
|
||||
|
||||
@abstractmethod
|
||||
def get_action_and_input(self, text: str) -> Tuple[str, str, str]:
|
||||
"""Return action, action input, and log (in that order)."""
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def observation_prefix(self) -> str:
|
||||
"""Prefix to append the observation with."""
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def router_prefix(self) -> str:
|
||||
"""Prefix to append the router call with."""
|
||||
|
||||
@property
|
||||
def finish_action_name(self) -> str:
|
||||
"""Name of the action of when to finish the chain."""
|
||||
return "Final Answer"
|
||||
|
||||
def _call(self, inputs: Dict[str, str]) -> Dict[str, str]:
|
||||
action, action_input, log = self.get_action_and_input(inputs[self.input_key])
|
||||
return {
|
||||
self.action_key: action,
|
||||
self.action_input_key: action_input,
|
||||
self.log_key: log,
|
||||
}
|
||||
|
||||
|
||||
class LLMRouterChain(RouterChain, BaseModel, ABC):
|
||||
"""RouterChain that uses an LLM."""
|
||||
|
||||
llm_chain: LLMChain
|
||||
stops: Optional[List[str]]
|
||||
|
||||
@abstractmethod
|
||||
def _extract_action_and_input(self, text: str) -> Optional[Tuple[str, str]]:
|
||||
"""Extract action and action input from llm output."""
|
||||
|
||||
def _fix_text(self, text: str) -> str:
|
||||
"""Fix the text."""
|
||||
raise ValueError("fix_text not implemented for this router.")
|
||||
|
||||
def get_action_and_input(self, text: str) -> Tuple[str, str, str]:
|
||||
"""Return action, action input, and log (in that order)."""
|
||||
input_key = self.llm_chain.input_keys[0]
|
||||
inputs = {input_key: text, "stop": self.stops}
|
||||
full_output = self.llm_chain.predict(**inputs)
|
||||
parsed_output = self._extract_action_and_input(full_output)
|
||||
while parsed_output is None:
|
||||
full_output = self._fix_text(full_output)
|
||||
inputs = {input_key: text + full_output, "stop": self.stops}
|
||||
output = self.llm_chain.predict(**inputs)
|
||||
full_output += output
|
||||
parsed_output = self._extract_action_and_input(full_output)
|
||||
action, action_input = parsed_output
|
||||
return action, action_input, full_output
|
77
langchain/smart_chains/router_expert.py
Normal file
77
langchain/smart_chains/router_expert.py
Normal file
@ -0,0 +1,77 @@
|
||||
"""Router-Expert framework."""
|
||||
from typing import Callable, Dict, List, NamedTuple
|
||||
|
||||
from pydantic import BaseModel, Extra
|
||||
|
||||
from langchain.chains.base import Chain
|
||||
from langchain.input import ChainedInput, get_color_mapping
|
||||
from langchain.smart_chains.router import RouterChain
|
||||
|
||||
|
||||
class ExpertConfig(NamedTuple):
|
||||
"""Configuration for experts."""
|
||||
|
||||
expert_name: str
|
||||
expert: Callable[[str], str]
|
||||
|
||||
|
||||
class RouterExpertChain(Chain, BaseModel):
|
||||
"""Chain that implements the Router/Expert system."""
|
||||
|
||||
router_chain: RouterChain
|
||||
"""Router chain."""
|
||||
expert_configs: List[ExpertConfig]
|
||||
"""Expert configs this chain has access to."""
|
||||
starter_string: str = "\n"
|
||||
"""String to put after user input but before first router."""
|
||||
input_key: str = "question" #: :meta private:
|
||||
output_key: str = "answer" #: :meta private:
|
||||
|
||||
class Config:
|
||||
"""Configuration for this pydantic object."""
|
||||
|
||||
extra = Extra.forbid
|
||||
arbitrary_types_allowed = True
|
||||
|
||||
@property
|
||||
def input_keys(self) -> List[str]:
|
||||
"""Expect input key.
|
||||
|
||||
:meta private:
|
||||
"""
|
||||
return [self.input_key]
|
||||
|
||||
@property
|
||||
def output_keys(self) -> List[str]:
|
||||
"""Expect output key.
|
||||
|
||||
:meta private:
|
||||
"""
|
||||
return [self.output_key]
|
||||
|
||||
def _call(self, inputs: Dict[str, str]) -> Dict[str, str]:
|
||||
action_to_chain_map = {e.expert_name: e.expert for e in self.expert_configs}
|
||||
starter_string = (
|
||||
inputs[self.input_key]
|
||||
+ self.starter_string
|
||||
+ self.router_chain.router_prefix
|
||||
)
|
||||
chained_input = ChainedInput(
|
||||
starter_string,
|
||||
verbose=self.verbose,
|
||||
)
|
||||
color_mapping = get_color_mapping(
|
||||
[c.expert_name for c in self.expert_configs], excluded_colors=["green"]
|
||||
)
|
||||
while True:
|
||||
action, action_input, log = self.router_chain.get_action_and_input(
|
||||
chained_input.input
|
||||
)
|
||||
chained_input.add(log, color="green")
|
||||
if action == self.router_chain.finish_action_name:
|
||||
return {self.output_key: action_input}
|
||||
chain = action_to_chain_map[action]
|
||||
ca = chain(action_input)
|
||||
chained_input.add(f"\n{self.router_chain.observation_prefix}")
|
||||
chained_input.add(ca, color=color_mapping[action])
|
||||
chained_input.add(f"\n{self.router_chain.router_prefix}")
|
79
langchain/smart_chains/self_ask_with_search/base.py
Normal file
79
langchain/smart_chains/self_ask_with_search/base.py
Normal file
@ -0,0 +1,79 @@
|
||||
"""Chain that does self ask with search."""
|
||||
from typing import Any, Tuple
|
||||
|
||||
from langchain.chains.llm import LLMChain
|
||||
from langchain.chains.serpapi import SerpAPIChain
|
||||
from langchain.llms.base import LLM
|
||||
from langchain.smart_chains.router import LLMRouterChain
|
||||
from langchain.smart_chains.router_expert import ExpertConfig, RouterExpertChain
|
||||
from langchain.smart_chains.self_ask_with_search.prompt import PROMPT
|
||||
|
||||
|
||||
class SelfAskWithSearchRouter(LLMRouterChain):
|
||||
"""Router for the self-ask-with-search paper."""
|
||||
|
||||
def __init__(self, llm: LLM, **kwargs: Any):
|
||||
"""Initialize with an LLM."""
|
||||
llm_chain = LLMChain(llm=llm, prompt=PROMPT)
|
||||
super().__init__(llm_chain=llm_chain, **kwargs)
|
||||
|
||||
def _extract_action_and_input(self, text: str) -> Tuple[str, str]:
|
||||
followup = "Follow up:"
|
||||
if "\n" not in text:
|
||||
last_line = text
|
||||
else:
|
||||
last_line = text.split("\n")[-1]
|
||||
|
||||
if followup not in last_line:
|
||||
finish_string = "So the final answer is: "
|
||||
if finish_string not in last_line:
|
||||
raise ValueError("We should probably never get here")
|
||||
return "Final Answer", text[len(finish_string) :]
|
||||
|
||||
if ":" not in last_line:
|
||||
after_colon = last_line
|
||||
else:
|
||||
after_colon = text.split(":")[-1]
|
||||
|
||||
if " " == after_colon[0]:
|
||||
after_colon = after_colon[1:]
|
||||
if "?" != after_colon[-1]:
|
||||
print("we probably should never get here..." + text)
|
||||
|
||||
return "Intermediate Answer", after_colon
|
||||
|
||||
@property
|
||||
def observation_prefix(self) -> str:
|
||||
"""Prefix to append the observation with."""
|
||||
return "Intermediate answer: "
|
||||
|
||||
@property
|
||||
def router_prefix(self) -> str:
|
||||
"""Prefix to append the router call with."""
|
||||
return ""
|
||||
|
||||
|
||||
class SelfAskWithSearchChain(RouterExpertChain):
|
||||
"""Chain that does self ask with search.
|
||||
|
||||
Example:
|
||||
.. code-block:: python
|
||||
|
||||
from langchain import SelfAskWithSearchChain, OpenAI, SerpAPIChain
|
||||
search_chain = SerpAPIChain()
|
||||
self_ask = SelfAskWithSearchChain(llm=OpenAI(), search_chain=search_chain)
|
||||
"""
|
||||
|
||||
def __init__(self, llm: LLM, search_chain: SerpAPIChain, **kwargs: Any):
|
||||
"""Initialize with just an LLM and a search chain."""
|
||||
intermediate = "\nIntermediate answer:"
|
||||
router = SelfAskWithSearchRouter(llm, stops=[intermediate])
|
||||
expert_configs = [
|
||||
ExpertConfig(expert_name="Intermediate Answer", expert=search_chain.run)
|
||||
]
|
||||
super().__init__(
|
||||
router_chain=router,
|
||||
expert_configs=expert_configs,
|
||||
starter_string="\nAre follow up questions needed here:",
|
||||
**kwargs
|
||||
)
|
@ -1,8 +1,8 @@
|
||||
"""Integration test for self ask with search."""
|
||||
|
||||
from langchain.chains.react.base import ReActChain
|
||||
from langchain.docstore.wikipedia import Wikipedia
|
||||
from langchain.llms.openai import OpenAI
|
||||
from langchain.smart_chains.react.base import ReActChain
|
||||
|
||||
|
||||
def test_react() -> None:
|
||||
|
@ -1,7 +1,7 @@
|
||||
"""Integration test for self ask with search."""
|
||||
from langchain.chains.self_ask_with_search.base import SelfAskWithSearchChain
|
||||
from langchain.chains.serpapi import SerpAPIChain
|
||||
from langchain.llms.openai import OpenAI
|
||||
from langchain.smart_chains.self_ask_with_search.base import SelfAskWithSearchChain
|
||||
|
||||
|
||||
def test_self_ask_with_search() -> None:
|
||||
|
1
tests/unit_tests/smart_chains/__init__.py
Normal file
1
tests/unit_tests/smart_chains/__init__.py
Normal file
@ -0,0 +1 @@
|
||||
"""Test smart chain functionality."""
|
@ -2,9 +2,13 @@
|
||||
|
||||
import pytest
|
||||
|
||||
from langchain.chains.mrkl.base import ChainConfig, MRKLChain, get_action_and_input
|
||||
from langchain.chains.mrkl.prompt import BASE_TEMPLATE
|
||||
from langchain.prompts import PromptTemplate
|
||||
from langchain.smart_chains.mrkl.base import (
|
||||
ChainConfig,
|
||||
MRKLRouterChain,
|
||||
get_action_and_input,
|
||||
)
|
||||
from langchain.smart_chains.mrkl.prompt import BASE_TEMPLATE
|
||||
from tests.unit_tests.llms.fake_llm import FakeLLM
|
||||
|
||||
|
||||
@ -59,12 +63,12 @@ def test_from_chains() -> None:
|
||||
action_name="bar", action=lambda x: "bar", action_description="foobar2"
|
||||
),
|
||||
]
|
||||
mrkl_chain = MRKLChain.from_chains(FakeLLM(), chain_configs)
|
||||
router_chain = MRKLRouterChain(FakeLLM(), chain_configs)
|
||||
expected_tools_prompt = "foo: foobar1\nbar: foobar2"
|
||||
expected_tool_names = "foo, bar"
|
||||
expected_template = BASE_TEMPLATE.format(
|
||||
tools=expected_tools_prompt, tool_names=expected_tool_names
|
||||
)
|
||||
prompt = mrkl_chain.prompt
|
||||
prompt = router_chain.llm_chain.prompt
|
||||
assert isinstance(prompt, PromptTemplate)
|
||||
assert prompt.template == expected_template
|
@ -4,12 +4,11 @@ from typing import Any, List, Mapping, Optional, Union
|
||||
|
||||
import pytest
|
||||
|
||||
from langchain.chains.llm import LLMChain
|
||||
from langchain.chains.react.base import ReActChain, predict_until_observation
|
||||
from langchain.docstore.base import Docstore
|
||||
from langchain.docstore.document import Document
|
||||
from langchain.llms.base import LLM
|
||||
from langchain.prompts.prompt import PromptTemplate
|
||||
from langchain.smart_chains.react.base import ReActChain, ReActRouterChain
|
||||
|
||||
_PAGE_CONTENT = """This is a page about LangChain.
|
||||
|
||||
@ -53,8 +52,8 @@ def test_predict_until_observation_normal() -> None:
|
||||
"""Test predict_until_observation when observation is made normally."""
|
||||
outputs = ["foo\nAction 1: search[foo]"]
|
||||
fake_llm = FakeListLLM(outputs)
|
||||
fake_llm_chain = LLMChain(llm=fake_llm, prompt=_FAKE_PROMPT)
|
||||
ret_text, action, directive = predict_until_observation(fake_llm_chain, "", 1)
|
||||
router_chain = ReActRouterChain(llm=fake_llm)
|
||||
action, directive, ret_text = router_chain.get_action_and_input("")
|
||||
assert ret_text == outputs[0]
|
||||
assert action == "search"
|
||||
assert directive == "foo"
|
||||
@ -64,22 +63,13 @@ def test_predict_until_observation_repeat() -> None:
|
||||
"""Test when no action is generated initially."""
|
||||
outputs = ["foo", " search[foo]"]
|
||||
fake_llm = FakeListLLM(outputs)
|
||||
fake_llm_chain = LLMChain(llm=fake_llm, prompt=_FAKE_PROMPT)
|
||||
ret_text, action, directive = predict_until_observation(fake_llm_chain, "", 1)
|
||||
router_chain = ReActRouterChain(llm=fake_llm)
|
||||
action, directive, ret_text = router_chain.get_action_and_input("")
|
||||
assert ret_text == "foo\nAction 1: search[foo]"
|
||||
assert action == "search"
|
||||
assert directive == "foo"
|
||||
|
||||
|
||||
def test_predict_until_observation_error() -> None:
|
||||
"""Test handling of generation of text that cannot be parsed."""
|
||||
outputs = ["foo\nAction 1: foo"]
|
||||
fake_llm = FakeListLLM(outputs)
|
||||
fake_llm_chain = LLMChain(llm=fake_llm, prompt=_FAKE_PROMPT)
|
||||
with pytest.raises(ValueError):
|
||||
predict_until_observation(fake_llm_chain, "", 1)
|
||||
|
||||
|
||||
def test_react_chain() -> None:
|
||||
"""Test react chain."""
|
||||
responses = [
|
Loading…
Reference in New Issue
Block a user