forked from Archives/langchain
Compare commits
3 Commits
main
...
william/co
Author | SHA1 | Date |
---|---|---|
William Fu-Hinthorn | 9814161edb | 2 years ago |
William Fu-Hinthorn | 86fdeaf4ec | 2 years ago |
William Fu-Hinthorn | ad53a2ef81 | 2 years ago |
@ -0,0 +1,116 @@
|
||||
"""Implement an LLM driven browser."""
|
||||
from typing import Dict, List, Optional
|
||||
|
||||
from pydantic import BaseModel, Extra
|
||||
|
||||
from langchain.chains.base import Chain
|
||||
from langchain.chains.llm import SelfConsistencyLLMChain
|
||||
from langchain.chains.self_consistency.prompts.anli_prompt import ANLI_PROMPT
|
||||
from langchain.chains.self_consistency.prompts.aqua_rat_prompt import AQUA_RAT_PROMPT
|
||||
from langchain.chains.self_consistency.prompts.arc_prompt import ARC_PROMPT
|
||||
from langchain.chains.self_consistency.prompts.arithmetic_reasoning_prompt import (
|
||||
ARITHMETIC_REASONING_PROMPT,
|
||||
)
|
||||
from langchain.chains.self_consistency.prompts.boolq_prompt import BOOLQ_PROMPT
|
||||
from langchain.chains.self_consistency.prompts.hotpotqa_prompt import HOTPOTQA_PROMPT
|
||||
from langchain.chains.self_consistency.prompts.esnli_prompt import ESNLI_PROMPT
|
||||
from langchain.llms.base import LLM
|
||||
from langchain.llms.openai import OpenAI
|
||||
from langchain.prompt import Prompt
|
||||
|
||||
_CLASS_TO_PROMPT: Dict[str, Prompt] = {
|
||||
"anli": ANLI_PROMPT,
|
||||
"aqua_rat": AQUA_RAT_PROMPT,
|
||||
"arc": ARC_PROMPT,
|
||||
"arithmetic_reasoning": ARITHMETIC_REASONING_PROMPT,
|
||||
"boolq": BOOLQ_PROMPT,
|
||||
"esnli": ESNLI_PROMPT,
|
||||
"hotpotqa": HOTPOTQA_PROMPT,
|
||||
}
|
||||
|
||||
# TODO: Add auto-routing and more prompts
|
||||
_FALLBACK_MAP: Dict[str, str] = {
|
||||
"nli": "anli",
|
||||
"natural_language_inference": "anli",
|
||||
"rte": "anli",
|
||||
"math": "aqua_rat",
|
||||
"qna": "hotpotqa",
|
||||
}
|
||||
|
||||
|
||||
class SelfConsistencyChain(Chain, BaseModel):
|
||||
"""Implement an LLM chain to reason in a self-consistent manner.
|
||||
|
||||
Based on Self-Consistency Improves Chain of Thought Reasoning in
|
||||
Language Models
|
||||
|
||||
Example:
|
||||
.. code-block:: python
|
||||
|
||||
from langchain import SelfConsistencyChain, OpenAI
|
||||
natbot = SelfConsistencyChain(llm=OpenAI(), objective="Buy me a new hat.")
|
||||
"""
|
||||
|
||||
llm: LLM
|
||||
"""LLM wrapper to use."""
|
||||
default_task: str
|
||||
"""The default task to run."""
|
||||
input_key: str = "prompt_inputs" #: :meta private:
|
||||
output_key: str = "answer" #: :meta private:
|
||||
|
||||
class Config:
|
||||
"""Configuration for this pydantic object."""
|
||||
|
||||
extra = Extra.forbid
|
||||
arbitrary_types_allowed = True
|
||||
|
||||
@classmethod
|
||||
def from_default(cls, objective: str) -> "SelfConsistencyChain":
|
||||
"""Load with default LLM."""
|
||||
llm = OpenAI(temperature=0.5, best_of=10, n=3, max_tokens=50)
|
||||
return cls(llm=llm, objective=objective)
|
||||
|
||||
@property
|
||||
def input_keys(self) -> List[str]:
|
||||
"""Expect different keys depending on the task.
|
||||
|
||||
:meta private:
|
||||
"""
|
||||
return [self.input_key, "task"]
|
||||
|
||||
@property
|
||||
def output_keys(self) -> List[str]:
|
||||
"""Return command.
|
||||
|
||||
:meta private:
|
||||
"""
|
||||
return [self.output_key]
|
||||
|
||||
def _get_prompt(self, task: Optional[str]) -> Prompt:
|
||||
"""Get the prompt for the task."""
|
||||
|
||||
if task in _CLASS_TO_PROMPT:
|
||||
return _CLASS_TO_PROMPT[task]
|
||||
if task in _FALLBACK_MAP:
|
||||
return _CLASS_TO_PROMPT[_FALLBACK_MAP[task]]
|
||||
raise ValueError(f"Unknown task {task}")
|
||||
|
||||
def _run(self, inputs: Dict[str, str]) -> Dict[str, str]:
|
||||
task = inputs["task"]
|
||||
prompt = self._get_prompt(task)
|
||||
llm_executor = SelfConsistencyLLMChain(prompt=prompt, llm=self.llm)
|
||||
llm_inputs = inputs[self.input_key]
|
||||
if 'choices' in llm_inputs:
|
||||
if isinstance(llm_inputs['choices'], list):
|
||||
llm_inputs['choices'] = ' '.join([f"({chr(97+i)}) {choice}" for i, choice in enumerate(llm_inputs['choices'])])
|
||||
answer = llm_executor.predict(**llm_inputs)
|
||||
return {self.output_key: answer}
|
||||
|
||||
def run(self, **kwargs: str) -> str:
|
||||
"""Figure out next browser command to run."""
|
||||
task = kwargs.pop("task", self.default_task)
|
||||
_inputs = {
|
||||
self.input_key: kwargs,
|
||||
"task": task,
|
||||
}
|
||||
return self(_inputs)[self.output_key]
|
@ -0,0 +1,29 @@
|
||||
"""Prompts for the middle school Aqua RAT dataset."""
|
||||
# From https://arxiv.org/pdf/2203.11171.pdf
|
||||
|
||||
from langchain.prompt import Prompt
|
||||
|
||||
|
||||
_PROMPT_TEMPLATE = """Q: John found that the average of 15 numbers is 40. If 10 is added to each number then the mean of the
|
||||
numbers is? Answer Choices: (a) 50 (b) 45 (c) 65 (d) 78 (e) 64
|
||||
A: If 10 is added to each number, then the mean of the numbers also increases by 10. So the new mean
|
||||
would be 50. The answer is (a).
|
||||
Q: If a / b = 3/4 and 8a + 5b = 22,then find the value of a. Answer Choices: (a) 1/2 (b) 3/2 (c) 5/2 (d) 4/2 (e)
|
||||
7/2
|
||||
A: If a / b = 3/4, then b = 4a / 3. So 8a + 5(4a / 3) = 22. This simplifies to 8a + 20a / 3 = 22, which means
|
||||
44a / 3 = 22. So a is equal to 3/2. The answer is (b).
|
||||
Q: A person is traveling at 20 km/hr and reached his destiny in 2.5 hr then find the distance? Answer Choices:
|
||||
(a) 53 km (b) 55 km (c) 52 km (d) 60 km (e) 50 km
|
||||
A: The distance that the person traveled would have been 20 km/hr * 2.5 hrs = 50 km. The answer is (e).
|
||||
Q: How many keystrokes are needed to type the numbers from 1 to 500? Answer Choices: (a) 1156 (b) 1392
|
||||
(c) 1480 (d) 1562 (e) 1788
|
||||
A: There are 9 one-digit numbers from 1 to 9. There are 90 two-digit numbers from 10 to 99. There are 401
|
||||
three-digit numbers from 100 to 500. 9 + 90(2) + 401(3) = 1392. The answer is (b).
|
||||
Q: {question} Answer Choices: {choices}
|
||||
A:"""
|
||||
|
||||
|
||||
AQUA_RAT_PROMPT = Prompt(
|
||||
input_variables=["question", "choices"],
|
||||
template=_PROMPT_TEMPLATE,
|
||||
)
|
@ -0,0 +1,28 @@
|
||||
"""Prompt for the ARC dataset."""
|
||||
# From https://arxiv.org/pdf/2203.11171.pdf
|
||||
|
||||
from langchain.prompt import Prompt
|
||||
|
||||
|
||||
_PROMPT_TEMPLATE = """Q: George wants to warm his hands quickly by rubbing them. Which skin surface will produce the most
|
||||
heat? (a) dry palms. (b) wet palms. (c) palms covered with oil. (d) palms covered with lotion.
|
||||
A: Dry surfaces will more likely cause more friction via rubbing than other smoother surfaces, hence dry
|
||||
palms will produce the most heat. The answer is (a).
|
||||
Q: Which factor will most likely cause a person to develop a fever? (a) a leg muscle relaxing after exercise.
|
||||
(b) a bacterial population in the bloodstream. (c) several viral particles on the skin. (d) carbohydrates being
|
||||
digested in the stomach.
|
||||
A: Option (b), bacterial population is the most likely cause for a person developing fever. The answer is (b).
|
||||
Q: Which change in the state of water particles causes the particles to become arranged in a fixed position?
|
||||
(a) boiling. (b) melting. (c) freezing. (d) evaporating.
|
||||
A: When water is freezed, the particles are arranged in a fixed position; the particles are still moving for all
|
||||
other options. The answer is (c).
|
||||
Q: When a switch is used in an electrical circuit, the switch can (a) cause the charge to build. (b) increase
|
||||
and decrease the voltage. (c) cause the current to change direction. (d) stop and start the flow of current.
|
||||
A: The function of a switch is to start and stop the flow of a current. The answer is (d).
|
||||
Q: {question} {choices}
|
||||
A:"""
|
||||
|
||||
ARC_PROMPT = Prompt(
|
||||
input_variables=["question", "choices"],
|
||||
template=_PROMPT_TEMPLATE,
|
||||
)
|
@ -0,0 +1,24 @@
|
||||
"""Prompt for BoolQ."""
|
||||
# From https://arxiv.org/pdf/2203.11171.pdf
|
||||
|
||||
from langchain.prompt import Prompt
|
||||
|
||||
|
||||
_PROMPT_TEMPLATE = """Q: does system of a down have 2 singers?
|
||||
A: System of a Down currently consists of Serj Tankian, Daron Malakian, Shavo Odadjian and John Dolmayan.
|
||||
Serj and Daron do vocals, so the band does have two singers. The answer is yes.
|
||||
Q: do iran and afghanistan speak the same language?
|
||||
A: Iran and Afghanistan both speak the Indo-European language Persian. The answer is yes.
|
||||
Q: is a cello and a bass the same thing?
|
||||
A: The cello is played sitting down with the instrument between the knees, whereas the double bass is played
|
||||
standing or sitting on a stool. The answer is no.
|
||||
Q: can you use oyster card at epsom station?
|
||||
A: Epsom railway station serves the town of Epsom in Surrey and is not in the London Oyster card zone. The
|
||||
answer is no.
|
||||
Q: {question}
|
||||
A:"""
|
||||
|
||||
BOOLQ_PROMPT = Prompt(
|
||||
input_variables=["question"],
|
||||
template=_PROMPT_TEMPLATE,
|
||||
)
|
@ -0,0 +1,66 @@
|
||||
"""Prompt for e-SNLI."""
|
||||
# From https://arxiv.org/pdf/2203.11171.pdf
|
||||
|
||||
from langchain.prompt import Prompt
|
||||
|
||||
_PROMPT_TEMPLATE = """Premise:
|
||||
"A person on a horse jumps over a broken down airplane."
|
||||
Based on this premise, can we conclude the hypothesis "A person is training his horse for a competition." is true?
|
||||
OPTIONS:
|
||||
- yes
|
||||
- no
|
||||
- it is not possible to tell
|
||||
A: The person is not necessarily training his horse. The answer is it is not possible to tell.
|
||||
Premise:
|
||||
"A person on a horse jumps over a broken down airplane."
|
||||
Based on this premise, can we conclude the hypothesis "A person is at a diner, ordering an omelette." is true?
|
||||
OPTIONS:
|
||||
- yes
|
||||
- no
|
||||
- it is not possible to tell
|
||||
A: One jumping horse cannot be in a diner ordering food. The answer is no.
|
||||
Premise:
|
||||
"A person on a horse jumps over a broken down airplane."
|
||||
Based on this premise, can we conclude the hypothesis "A person is outdoors, on a horse." is true?
|
||||
OPTIONS:
|
||||
- yes
|
||||
- no
|
||||
- it is not possible to tell
|
||||
A: A broken down airplane is outdoors. The answer is yes.
|
||||
Premise:
|
||||
"Children smiling and waving at camera."
|
||||
Based on this premise, can we conclude the hypothesis "They are smiling at their parents." is true?
|
||||
OPTIONS:
|
||||
- yes
|
||||
- no
|
||||
- it is not possible to tell
|
||||
A: Just because they are smiling and waving at a camera does not imply their parents or anyone is anyone behind
|
||||
it. The answer is it is not possible to tell.
|
||||
Premise:
|
||||
"Children smiling and waving at camera."
|
||||
Based on this premise, can we conclude the hypothesis "The kids are frowning." is true? OPTIONS:
|
||||
- yes
|
||||
- no
|
||||
- it is not possible to tell
|
||||
A: One cannot be smiling and frowning at the same time. The answer is no.
|
||||
Premise:
|
||||
"Children smiling and waving at camera."
|
||||
Based on this premise, can we conclude the hypothesis "There are children present." is true?
|
||||
OPTIONS:
|
||||
- yes
|
||||
- no
|
||||
- it is not possible to tell
|
||||
A:The children must be present to see them smiling and waving. The answer is yes.
|
||||
Premise:
|
||||
\"{premise}\"
|
||||
Based on this premise, can we conclude the hypothesis \"{hypothesis}\" is true?
|
||||
OPTIONS:
|
||||
- yes
|
||||
- no
|
||||
- it is not possible to tell
|
||||
A: """
|
||||
|
||||
ESNLI_PROMPT = Prompt(
|
||||
input_variables=["premise", "hypothesis"],
|
||||
template=_PROMPT_TEMPLATE,
|
||||
)
|
@ -1,11 +1,25 @@
|
||||
"""Base interface for large language models to expose."""
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import List, Optional
|
||||
from dataclasses import dataclass
|
||||
|
||||
|
||||
@dataclass
|
||||
class CompletionOutput:
|
||||
"""A completion output."""
|
||||
|
||||
text: str
|
||||
"""The generated text."""
|
||||
logprobs: Optional[List[float]] = None
|
||||
"""The total log probability assigned to the generated text."""
|
||||
|
||||
class LLM(ABC):
|
||||
"""LLM wrapper should take in a prompt and return a string."""
|
||||
"""LLM wrapper that should take in a prompt and return a string."""
|
||||
|
||||
@abstractmethod
|
||||
def generate(self, prompt: str, stop: Optional[List[str]] = None) -> List[CompletionOutput]:
|
||||
"""Generate strings for the given prompt and input."""
|
||||
|
||||
def __call__(self, prompt: str, stop: Optional[List[str]] = None) -> str:
|
||||
"""Run the LLM on the given prompt and input."""
|
||||
return self.generate(prompt=prompt, stop=stop)[0].text
|
||||
|
Loading…
Reference in New Issue