Harrison/more documentation (#19)

harrison/add_dependencies
Harrison Chase 2 years ago committed by GitHub
parent 1ef3ab4d0e
commit 5621ca7b07
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -24,6 +24,13 @@ It aims to create:
2. a flexible interface for combining pieces into a single comprehensive "chain"
3. a schema for easily saving and sharing those chains
## Setting up your environment
Besides the installation of this python package, you will also need to set environment variables for the services that call out to authenticated APIs. You do not need to set an environment variable unless you plan on using that API. Please see below for a comprehensive list of the APIs that require an API key, and the associated environment variable that you should set.
- OpenAI: `OPENAI_API_KEY`
- Cohere: `COHERE_API_KEY`
- SerpAPI (Google Search): `SERPAPI_API_KEY`
## 🚀 What can I do with this
This project was largely inspired by a few projects seen on Twitter for which we thought it would make sense to have more explicit tooling. A lot of the initial functionality was done in an attempt to recreate those. Those are:

@ -37,10 +37,19 @@ extensions = [
"sphinx.ext.autodoc.typehints",
"sphinx.ext.autosummary",
"sphinx.ext.napoleon",
"sphinxcontrib.autodoc_pydantic",
]
autodoc_pydantic_model_show_json = False
autodoc_pydantic_field_list_validators = False
autodoc_pydantic_config_members = False
autodoc_pydantic_model_show_config_summary = False
autodoc_pydantic_model_show_validator_members = False
autodoc_pydantic_model_show_field_summary = False
autodoc_pydantic_model_members = False
autodoc_pydantic_model_undoc_members = False
# autodoc_typehints = "signature"
autodoc_typehints = "description"
# autodoc_typehints = "description"
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
@ -62,4 +71,4 @@ html_theme = "sphinx_rtd_theme"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
html_static_path: list = []

@ -3,4 +3,4 @@
.. automodule:: langchain.llms
:members:
:undoc-members:
:special-members: __call__

@ -3,4 +3,3 @@
.. automodule:: langchain.prompt
:members:
:undoc-members:

@ -2,3 +2,4 @@ sphinx==4.5.0
sphinx-autobuild==2021.3.14
sphinx_rtd_theme==1.0.0
sphinx-typlog-theme==0.8.0
autodoc_pydantic==1.8.0

@ -9,11 +9,22 @@ from langchain.prompt import Prompt
class LLMChain(Chain, BaseModel):
"""Chain to run queries against LLMs."""
"""Chain to run queries against LLMs.
Example:
.. code-block:: python
from langchain import LLMChain, OpenAI, Prompt
prompt_template = "Tell me a {adjective} joke"
prompt = Prompt(input_variables=["adjective"], template=prompt_template)
llm = LLMChain(llm=OpenAI(), prompt=prompt)
"""
prompt: Prompt
"""Prompt object to use."""
llm: LLM
output_key: str = "text"
"""LLM wrapper to use."""
output_key: str = "text" #: :meta private:
class Config:
"""Configuration for this pydantic object."""
@ -23,12 +34,18 @@ class LLMChain(Chain, BaseModel):
@property
def input_keys(self) -> List[str]:
"""Will be whatever keys the prompt expects."""
"""Will be whatever keys the prompt expects.
:meta private:
"""
return self.prompt.input_variables
@property
def output_keys(self) -> List[str]:
"""Will always return text key."""
"""Will always return text key.
:meta private:
"""
return [self.output_key]
def _run(self, inputs: Dict[str, Any]) -> Dict[str, str]:
@ -42,5 +59,17 @@ class LLMChain(Chain, BaseModel):
return {self.output_key: response}
def predict(self, **kwargs: Any) -> str:
"""More user-friendly interface for interacting with LLMs."""
"""Format prompt with kwargs and pass to LLM.
Args:
**kwargs: Keys to pass to prompt template.
Returns:
Completion from LLM.
Example:
.. code-block:: python
completion = llm.predict(adjective="funny")
"""
return self(kwargs)[self.output_key]

@ -11,12 +11,21 @@ from langchain.llms.base import LLM
class LLMMathChain(Chain, BaseModel):
"""Chain that interprets a prompt and executes python code to do math."""
"""Chain that interprets a prompt and executes python code to do math.
Example:
.. code-block:: python
from langchain import LLMMathChain, OpenAI
llm_math = LLMMathChain(llm=OpenAI())
"""
llm: LLM
"""LLM wrapper to use."""
verbose: bool = False
input_key: str = "question"
output_key: str = "answer"
"""Whether to print out the code that was executed."""
input_key: str = "question" #: :meta private:
output_key: str = "answer" #: :meta private:
class Config:
"""Configuration for this pydantic object."""
@ -26,12 +35,18 @@ class LLMMathChain(Chain, BaseModel):
@property
def input_keys(self) -> List[str]:
"""Expect input key."""
"""Expect input key.
:meta private:
"""
return [self.input_key]
@property
def output_keys(self) -> List[str]:
"""Expect output key."""
"""Expect output key.
:meta private:
"""
return [self.output_key]
def _run(self, inputs: Dict[str, str]) -> Dict[str, str]:
@ -53,5 +68,17 @@ class LLMMathChain(Chain, BaseModel):
return {self.output_key: answer}
def run(self, question: str) -> str:
"""More user-friendly interface for interfacing with LLM math."""
"""Understand user question and execute math in Python if necessary.
Args:
question: User question that contains a math question to parse and answer.
Returns:
The answer to the question.
Example:
.. code-block:: python
answer = llm_math.run("What is one plus one?")
"""
return self({self.input_key: question})[self.output_key]

@ -1,4 +1,4 @@
"""Implement a GPT-3 driven browser."""
"""Implement an LLM driven browser."""
from typing import Dict, List
from pydantic import BaseModel, Extra
@ -11,14 +11,23 @@ from langchain.llms.openai import OpenAI
class NatBotChain(Chain, BaseModel):
"""Implement a GPT-3 driven browser."""
"""Implement an LLM driven browser.
Example:
.. code-block:: python
from langchain import NatBotChain, OpenAI
natbot = NatBotChain(llm=OpenAI(), objective="Buy me a new hat.")
"""
llm: LLM
"""LLM wrapper to use."""
objective: str
input_url_key: str = "url"
input_browser_content_key: str = "browser_content"
previous_command: str = ""
output_key: str = "command"
"""Objective that NatBot is tasked with completing."""
input_url_key: str = "url" #: :meta private:
input_browser_content_key: str = "browser_content" #: :meta private:
previous_command: str = "" #: :meta private:
output_key: str = "command" #: :meta private:
class Config:
"""Configuration for this pydantic object."""
@ -34,12 +43,18 @@ class NatBotChain(Chain, BaseModel):
@property
def input_keys(self) -> List[str]:
"""Expect url and browser content."""
"""Expect url and browser content.
:meta private:
"""
return [self.input_url_key, self.input_browser_content_key]
@property
def output_keys(self) -> List[str]:
"""Return command."""
"""Return command.
:meta private:
"""
return [self.output_key]
def _run(self, inputs: Dict[str, str]) -> Dict[str, str]:
@ -57,7 +72,21 @@ class NatBotChain(Chain, BaseModel):
return {self.output_key: llm_cmd}
def run(self, url: str, browser_content: str) -> str:
"""More user-friendly interface for interfacing with natbot."""
"""Figure out next browser command to run.
Args:
url: URL of the site currently on.
browser_content: Content of the page as currently displayed by the browser.
Returns:
Next browser command to run.
Example:
.. code-block:: python
browser_content = "...."
llm_command = natbot.run("www.google.com", browser_content)
"""
_inputs = {
self.input_url_key: url,
self.input_browser_content_key: browser_content,

@ -12,19 +12,32 @@ from langchain.chains.base import Chain
class PythonChain(Chain, BaseModel):
"""Chain to run python code."""
"""Chain to run python code.
input_key: str = "code"
output_key: str = "output"
Example:
.. code-block:: python
from langchain import PythonChain
python_chain = PythonChain()
"""
input_key: str = "code" #: :meta private:
output_key: str = "output" #: :meta private:
@property
def input_keys(self) -> List[str]:
"""Expect input key."""
"""Expect input key.
:meta private:
"""
return [self.input_key]
@property
def output_keys(self) -> List[str]:
"""Return output key."""
"""Return output key.
:meta private:
"""
return [self.output_key]
def _run(self, inputs: Dict[str, str]) -> Dict[str, str]:
@ -36,5 +49,18 @@ class PythonChain(Chain, BaseModel):
return {self.output_key: output}
def run(self, code: str) -> str:
"""More user-friendly interface for interfacing with python."""
"""Run code in python interpreter.
Args:
code: Code snippet to execute, should print out the answer.
Returns:
Answer from running the code and printing out the answer.
Example:
.. code-block:: python
answer = python_chain.run("print(1+1)")
"""
return self({self.input_key: code})[self.output_key]

@ -74,12 +74,22 @@ def yellowfy(_input: str) -> str:
class SelfAskWithSearchChain(Chain, BaseModel):
"""Chain that does self ask with search."""
"""Chain that does self ask with search.
Example:
.. code-block:: python
from langchain import SelfAskWithSearchChain, OpenAI, SerpAPIChain
search_chain = SerpAPIChain()
self_ask = SelfAskWithSearchChain(llm=OpenAI(), search_chain=search_chain)
"""
llm: LLM
"""LLM wrapper to use."""
search_chain: SerpAPIChain
input_key: str = "question"
output_key: str = "answer"
"""Search chain to use."""
input_key: str = "question" #: :meta private:
output_key: str = "answer" #: :meta private:
class Config:
"""Configuration for this pydantic object."""
@ -89,12 +99,18 @@ class SelfAskWithSearchChain(Chain, BaseModel):
@property
def input_keys(self) -> List[str]:
"""Expect input key."""
"""Expect input key.
:meta private:
"""
return [self.input_key]
@property
def output_keys(self) -> List[str]:
"""Expect output key."""
"""Expect output key.
:meta private:
"""
return [self.output_key]
def _run(self, inputs: Dict[str, Any]) -> Dict[str, str]:
@ -138,5 +154,17 @@ class SelfAskWithSearchChain(Chain, BaseModel):
return {self.output_key: cur_prompt + ret_text}
def run(self, question: str) -> str:
"""More user-friendly interface for interfacing with self ask with search."""
"""Run self ask with search chain.
Args:
question: Question to run self-ask-with-search with.
Returns:
The final answer
Example:
.. code-block:: python
answer = selfask.run("What is the capital of Idaho?")
"""
return self({self.input_key: question})[self.output_key]

@ -26,11 +26,21 @@ class HiddenPrints:
class SerpAPIChain(Chain, BaseModel):
"""Chain that calls SerpAPI."""
"""Chain that calls SerpAPI.
search_engine: Any
input_key: str = "search_query"
output_key: str = "search_result"
To use, you should have the ``google-search-results`` python package installed,
and the environment variable ``SERPAPI_API_KEY`` set with your API key.
Example:
.. code-block:: python
from langchain import SerpAPIChain
serpapi = SerpAPIChain()
"""
search_engine: Any #: :meta private:
input_key: str = "search_query" #: :meta private:
output_key: str = "search_result" #: :meta private:
class Config:
"""Configuration for this pydantic object."""
@ -39,12 +49,18 @@ class SerpAPIChain(Chain, BaseModel):
@property
def input_keys(self) -> List[str]:
"""Return the singular input key."""
"""Return the singular input key.
:meta private:
"""
return [self.input_key]
@property
def output_keys(self) -> List[str]:
"""Return the singular output key."""
"""Return the singular output key.
:meta private:
"""
return [self.output_key]
@root_validator()
@ -95,5 +111,17 @@ class SerpAPIChain(Chain, BaseModel):
return {self.output_key: toret}
def search(self, search_question: str) -> str:
"""More user-friendly interface for interfacing with search."""
"""Run search query against SerpAPI.
Args:
search_question: Question to run against the SerpAPI.
Returns:
Answer from the search engine.
Example:
.. code-block:: python
answer = serpapi.search("What is the capital of Idaho?")
"""
return self({self.input_key: search_question})[self.output_key]

@ -1,4 +1,4 @@
"""Wrappers on top of large language models."""
"""Wrappers on top of large language models APIs."""
from langchain.llms.cohere import Cohere
from langchain.llms.openai import OpenAI

@ -16,16 +16,39 @@ def remove_stop_tokens(text: str, stop: List[str]) -> str:
class Cohere(BaseModel, LLM):
"""Wrapper around Cohere large language models."""
"""Wrapper around Cohere large language models.
client: Any
To use, you should have the ``cohere`` python package installed, and the
environment variable ``COHERE_API_KEY`` set with your API key.
Example:
.. code-block:: python
from langchain import Cohere
cohere = Cohere(model="gptd-instruct-tft")
"""
client: Any #: :meta private:
model: str = "gptd-instruct-tft"
"""Model name to use."""
max_tokens: int = 256
temperature: float = 0.6
"""Denotes the number of tokens to predict per generation."""
temperature: float = 0.75
"""A non-negative float that tunes the degree of randomness in generation."""
k: int = 0
"""Number of most likely tokens to consider at each step."""
p: int = 1
"""Total probability mass of tokens to consider at each step."""
frequency_penalty: int = 0
"""Penalizes repeated tokens according to frequency."""
presence_penalty: int = 0
"""Penalizes repeated tokens."""
class Config:
"""Configuration for this pydantic object."""
@ -52,7 +75,20 @@ class Cohere(BaseModel, LLM):
return values
def __call__(self, prompt: str, stop: Optional[List[str]] = None) -> str:
"""Call out to Cohere's generate endpoint."""
"""Call out to Cohere's generate endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = cohere("Tell me a joke.")
"""
response = self.client.generate(
model=self.model,
prompt=prompt,

@ -8,17 +8,35 @@ from langchain.llms.base import LLM
class OpenAI(BaseModel, LLM):
"""Wrapper around OpenAI large language models."""
"""Wrapper around OpenAI large language models.
client: Any
To use, you should have the ``openai`` python package installed, and the
environment variable ``OPENAI_API_KEY`` set with your API key.
Example:
.. code-block:: python
from langchain import OpenAI
openai = OpenAI(model="text-davinci-002")
"""
client: Any #: :meta private:
model_name: str = "text-davinci-002"
"""Model name to use."""
temperature: float = 0.7
"""What sampling temperature to use."""
max_tokens: int = 256
"""The maximum number of tokens to generate in the completion."""
top_p: int = 1
"""Total probability mass of tokens to consider at each step."""
frequency_penalty: int = 0
"""Penalizes repeated tokens according to frequency."""
presence_penalty: int = 0
"""Penalizes repeated tokens."""
n: int = 1
"""How many completions to generate for each prompt."""
best_of: int = 1
"""Generates best_of completions server-side and returns the "best"."""
class Config:
"""Configuration for this pydantic object."""
@ -45,7 +63,7 @@ class OpenAI(BaseModel, LLM):
return values
@property
def default_params(self) -> Mapping[str, Any]:
def _default_params(self) -> Mapping[str, Any]:
"""Get the default parameters for calling OpenAI API."""
return {
"temperature": self.temperature,
@ -58,8 +76,21 @@ class OpenAI(BaseModel, LLM):
}
def __call__(self, prompt: str, stop: Optional[List[str]] = None) -> str:
"""Call out to OpenAI's create endpoint."""
"""Call out to OpenAI's create endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = openai("Tell me a joke.")
"""
response = self.client.create(
model=self.model_name, prompt=prompt, stop=stop, **self.default_params
model=self.model_name, prompt=prompt, stop=stop, **self._default_params
)
return response["choices"][0]["text"]

@ -11,11 +11,23 @@ _FORMATTER_MAPPING = {
class Prompt(BaseModel):
"""Schema to represent a prompt for an LLM."""
"""Schema to represent a prompt for an LLM.
Example:
.. code-block:: python
from langchain import Prompt
prompt = Prompt(input_variables=["foo"], template="Say {foo}")
"""
input_variables: List[str]
"""A list of the names of the variables the prompt template expects."""
template: str
"""The prompt template."""
template_format: str = "f-string"
"""The format of the prompt template. Options are: 'f-string'."""
class Config:
"""Configuration for this pydantic object."""
@ -23,7 +35,20 @@ class Prompt(BaseModel):
extra = Extra.forbid
def format(self, **kwargs: Any) -> str:
"""Format the prompt with the inputs."""
"""Format the prompt with the inputs.
Args:
kwargs: Any arguments to be passed to the prompt template.
Returns:
A formatted string.
Example:
.. code-block:: python
prompt.format(variable1="foo")
"""
return _FORMATTER_MAPPING[self.template_format](self.template, **kwargs)
@root_validator()

Loading…
Cancel
Save