docs[minor]: Add chat model tabs to docs pages (#19589)

pull/18603/head^2
Brace Sproul 4 months ago committed by GitHub
parent bd02b83acd
commit ce0a588ae6
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

@ -40,6 +40,33 @@
"%pip install --upgrade --quiet langchain-core langchain-community langchain-openai" "%pip install --upgrade --quiet langchain-core langchain-community langchain-openai"
] ]
}, },
{
"cell_type": "markdown",
"id": "c3d54f72",
"metadata": {},
"source": [
"```{=mdx}\n",
"import ChatModelTabs from \"@theme/ChatModelTabs\";\n",
"\n",
"<ChatModelTabs openaiParams={`model=\"gpt-4\"`} />\n",
"```"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f9eed8e8",
"metadata": {},
"outputs": [],
"source": [
"# | output: false\n",
"# | echo: false\n",
"\n",
"from langchain_openai import ChatOpenAI\n",
"\n",
"model = ChatOpenAI(model=\"gpt-4\")"
]
},
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 1, "execution_count": 1,
@ -60,10 +87,8 @@
"source": [ "source": [
"from langchain_core.output_parsers import StrOutputParser\n", "from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.prompts import ChatPromptTemplate\n", "from langchain_core.prompts import ChatPromptTemplate\n",
"from langchain_openai import ChatOpenAI\n",
"\n", "\n",
"prompt = ChatPromptTemplate.from_template(\"tell me a short joke about {topic}\")\n", "prompt = ChatPromptTemplate.from_template(\"tell me a short joke about {topic}\")\n",
"model = ChatOpenAI(model=\"gpt-4\")\n",
"output_parser = StrOutputParser()\n", "output_parser = StrOutputParser()\n",
"\n", "\n",
"chain = prompt | model | output_parser\n", "chain = prompt | model | output_parser\n",
@ -324,6 +349,16 @@
"For our next example, we want to run a retrieval-augmented generation chain to add some context when responding to questions." "For our next example, we want to run a retrieval-augmented generation chain to add some context when responding to questions."
] ]
}, },
{
"cell_type": "markdown",
"id": "b8fe8eb4",
"metadata": {},
"source": [
"```{=mdx}\n",
"<ChatModelTabs />\n",
"```"
]
},
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": null,
@ -338,7 +373,7 @@
"from langchain_core.output_parsers import StrOutputParser\n", "from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.prompts import ChatPromptTemplate\n", "from langchain_core.prompts import ChatPromptTemplate\n",
"from langchain_core.runnables import RunnableParallel, RunnablePassthrough\n", "from langchain_core.runnables import RunnableParallel, RunnablePassthrough\n",
"from langchain_openai import ChatOpenAI, OpenAIEmbeddings\n", "from langchain_openai import OpenAIEmbeddings\n",
"\n", "\n",
"vectorstore = DocArrayInMemorySearch.from_texts(\n", "vectorstore = DocArrayInMemorySearch.from_texts(\n",
" [\"harrison worked at kensho\", \"bears like to eat honey\"],\n", " [\"harrison worked at kensho\", \"bears like to eat honey\"],\n",
@ -352,7 +387,6 @@
"Question: {question}\n", "Question: {question}\n",
"\"\"\"\n", "\"\"\"\n",
"prompt = ChatPromptTemplate.from_template(template)\n", "prompt = ChatPromptTemplate.from_template(template)\n",
"model = ChatOpenAI()\n",
"output_parser = StrOutputParser()\n", "output_parser = StrOutputParser()\n",
"\n", "\n",
"setup_and_retrieval = RunnableParallel(\n", "setup_and_retrieval = RunnableParallel(\n",
@ -495,7 +529,7 @@
"name": "python", "name": "python",
"nbconvert_exporter": "python", "nbconvert_exporter": "python",
"pygments_lexer": "ipython3", "pygments_lexer": "ipython3",
"version": "3.11.4" "version": "3.11.0"
} }
}, },
"nbformat": 4, "nbformat": 4,

File diff suppressed because it is too large Load Diff

@ -12,19 +12,44 @@
"It can speed up your application by reducing the number of API calls you make to the LLM provider.\n" "It can speed up your application by reducing the number of API calls you make to the LLM provider.\n"
] ]
}, },
{
"cell_type": "markdown",
"id": "289b31de",
"metadata": {},
"source": [
"```{=mdx}\n",
"import ChatModelTabs from \"@theme/ChatModelTabs\";\n",
"\n",
"<ChatModelTabs customVarName=\"llm\" />\n",
"```"
]
},
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 1, "execution_count": null,
"id": "5472a032", "id": "c6641f37",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"from langchain.globals import set_llm_cache\n", "# | output: false\n",
"# | echo: false\n",
"\n",
"from langchain_openai import ChatOpenAI\n", "from langchain_openai import ChatOpenAI\n",
"\n", "\n",
"llm = ChatOpenAI()" "llm = ChatOpenAI()"
] ]
}, },
{
"cell_type": "code",
"execution_count": 1,
"id": "5472a032",
"metadata": {},
"outputs": [],
"source": [
"# <!-- ruff: noqa: F821 -->\n",
"from langchain.globals import set_llm_cache"
]
},
{ {
"cell_type": "markdown", "cell_type": "markdown",
"id": "357b89a8", "id": "357b89a8",

@ -18,13 +18,13 @@ structured outputs from models more generally.
LangChain comes with a number of utilities to make function-calling LangChain comes with a number of utilities to make function-calling
easy. Namely, it comes with: easy. Namely, it comes with:
- simple syntax for binding functions to models - simple syntax for binding functions to models
- converters for formatting various types of objects to the expected - converters for formatting various types of objects to the expected
function schemas function schemas
- output parsers for extracting the function invocations from API - output parsers for extracting the function invocations from API
responses responses
- chains for getting structured outputs from a model, built on top of - chains for getting structured outputs from a model, built on top of
function calling function calling
Well focus here on the first two points. For a detailed guide on output Well focus here on the first two points. For a detailed guide on output
parsing check out the [OpenAI Tools output parsing check out the [OpenAI Tools output
@ -38,7 +38,6 @@ Before getting started make sure you have `langchain-core` installed.
%pip install -qU langchain-core langchain-openai %pip install -qU langchain-core langchain-openai
``` ```
```python ```python
import getpass import getpass
import os import os
@ -64,38 +63,26 @@ class Multiply(BaseModel):
b: int = Field(..., description="Second integer") b: int = Field(..., description="Second integer")
``` ```
import Tabs from '@theme/Tabs'; import Tabs from "@theme/Tabs";
import TabItem from '@theme/TabItem'; import TabItem from "@theme/TabItem";
<Tabs>
<TabItem value="openai" label="OpenAI" default>
Set up dependencies and API keys:
```python import ChatModelTabs from "@theme/ChatModelTabs";
%pip install -qU langchain-openai
```
<ChatModelTabs
customVarName="llm"
fireworksParams={`model="accounts/fireworks/models/firefunction-v1", temperature=0`}
/>
```python We can use the `bind_tools()` method to handle converting
os.environ["OPENAI_API_KEY"] = getpass.getpass() `Multiply` to a "function" and binding it to the model (i.e.,
```
We can use the `ChatOpenAI.bind_tools()` method to handle converting
`Multiply` to an OpenAI function and binding it to the model (i.e.,
passing it in each time the model is invoked). passing it in each time the model is invoked).
```python ```python
from langchain_openai import ChatOpenAI
llm = ChatOpenAI(model="gpt-3.5-turbo-0125", temperature=0)
llm_with_tools = llm.bind_tools([Multiply]) llm_with_tools = llm.bind_tools([Multiply])
llm_with_tools.invoke("what's 3 * 12") llm_with_tools.invoke("what's 3 * 12")
``` ```
``` text ```text
AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_Q8ZQ97Qrj5zalugSkYMGV1Uo', 'function': {'arguments': '{"a":3,"b":12}', 'name': 'Multiply'}, 'type': 'function'}]}) AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_Q8ZQ97Qrj5zalugSkYMGV1Uo', 'function': {'arguments': '{"a":3,"b":12}', 'name': 'Multiply'}, 'type': 'function'}]})
``` ```
@ -109,7 +96,7 @@ tool_chain = llm_with_tools | JsonOutputToolsParser()
tool_chain.invoke("what's 3 * 12") tool_chain.invoke("what's 3 * 12")
``` ```
``` text ```text
[{'type': 'Multiply', 'args': {'a': 3, 'b': 12}}] [{'type': 'Multiply', 'args': {'a': 3, 'b': 12}}]
``` ```
@ -122,57 +109,10 @@ tool_chain = llm_with_tools | PydanticToolsParser(tools=[Multiply])
tool_chain.invoke("what's 3 * 12") tool_chain.invoke("what's 3 * 12")
``` ```
``` text ```text
[Multiply(a=3, b=12)] [Multiply(a=3, b=12)]
``` ```
If we wanted to force that a tool is used (and that it is used only
once), we can set the `tool_choice` argument:
```python
llm_with_multiply = llm.bind_tools([Multiply], tool_choice="Multiply")
llm_with_multiply.invoke(
"make up some numbers if you really want but I'm not forcing you"
)
```
``` text
AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_f3DApOzb60iYjTfOhVFhDRMI', 'function': {'arguments': '{"a":5,"b":10}', 'name': 'Multiply'}, 'type': 'function'}]})
```
For more see the [ChatOpenAI API
reference](https://api.python.langchain.com/en/latest/chat_models/langchain_openai.chat_models.base.ChatOpenAI.html#langchain_openai.chat_models.base.ChatOpenAI.bind_tools).
</TabItem>
<TabItem value="fireworks" label="Fireworks">
Install dependencies and set API keys:
```python
%pip install -qU langchain-fireworks
```
```python
os.environ["FIREWORKS_API_KEY"] = getpass.getpass()
```
We can use the `ChatFireworks.bind_tools()` method to handle converting
`Multiply` to a valid function schema and binding it to the model (i.e.,
passing it in each time the model is invoked).
```python
from langchain_fireworks import ChatFireworks
llm = ChatFireworks(model="accounts/fireworks/models/firefunction-v1", temperature=0)
llm_with_tools = llm.bind_tools([Multiply])
llm_with_tools.invoke("what's 3 * 12")
```
``` text
AIMessage(content='Three multiplied by twelve is 36.')
```
If our model isnt using the tool, as is the case here, we can force If our model isnt using the tool, as is the case here, we can force
tool usage by specifying `tool_choice="any"` or by specifying the name tool usage by specifying `tool_choice="any"` or by specifying the name
of the specific tool we want used: of the specific tool we want used:
@ -182,175 +122,12 @@ llm_with_tools = llm.bind_tools([Multiply], tool_choice="Multiply")
llm_with_tools.invoke("what's 3 * 12") llm_with_tools.invoke("what's 3 * 12")
``` ```
``` text ```text
AIMessage(content='', additional_kwargs={'tool_calls': [{'index': 0, 'id': 'call_qIP2bJugb67LGvc6Zhwkvfqc', 'type': 'function', 'function': {'name': 'Multiply', 'arguments': '{"a": 3, "b": 12}'}}]}) AIMessage(content='', additional_kwargs={'tool_calls': [{'index': 0, 'id': 'call_qIP2bJugb67LGvc6Zhwkvfqc', 'type': 'function', 'function': {'name': 'Multiply', 'arguments': '{"a": 3, "b": 12}'}}]})
``` ```
We can add a tool parser to extract the tool calls from the generated
message to JSON:
```python
from langchain_core.output_parsers.openai_tools import JsonOutputToolsParser
tool_chain = llm_with_tools | JsonOutputToolsParser()
tool_chain.invoke("what's 3 * 12")
```
``` text
[{'type': 'Multiply', 'args': {'a': 3, 'b': 12}}]
```
Or back to the original Pydantic class:
```python
from langchain_core.output_parsers.openai_tools import PydanticToolsParser
tool_chain = llm_with_tools | PydanticToolsParser(tools=[Multiply])
tool_chain.invoke("what's 3 * 12")
```
``` text
[Multiply(a=3, b=12)]
```
For more see the [ChatFireworks](https://api.python.langchain.com/en/latest/chat_models/langchain_fireworks.chat_models.ChatFireworks.html#langchain_fireworks.chat_models.ChatFireworks.bind_tools) reference.
</TabItem>
<TabItem value="mistral" label="Mistral">
Install dependencies and set API keys:
```python
%pip install -qU langchain-mistralai
```
```python
os.environ["MISTRAL_API_KEY"] = getpass.getpass()
```
We can use the `ChatMistralAI.bind_tools()` method to handle converting
`Multiply` to a valid function schema and binding it to the model (i.e.,
passing it in each time the model is invoked).
```python
from langchain_mistralai import ChatMistralAI
llm = ChatMistralAI(model="mistral-large-latest", temperature=0)
llm_with_tools = llm.bind_tools([Multiply])
llm_with_tools.invoke("what's 3 * 12")
```
``` text
AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'null', 'type': <ToolType.function: 'function'>, 'function': {'name': 'Multiply', 'arguments': '{"a": 3, "b": 12}'}}]})
```
We can add a tool parser to extract the tool calls from the generated
message to JSON:
```python
from langchain_core.output_parsers.openai_tools import JsonOutputToolsParser
tool_chain = llm_with_tools | JsonOutputToolsParser()
tool_chain.invoke("what's 3 * 12")
```
``` text
[{'type': 'Multiply', 'args': {'a': 3, 'b': 12}}]
```
Or back to the original Pydantic class:
```python
from langchain_core.output_parsers.openai_tools import PydanticToolsParser
tool_chain = llm_with_tools | PydanticToolsParser(tools=[Multiply])
tool_chain.invoke("what's 3 * 12")
```
``` text
[Multiply(a=3, b=12)]
```
We can force tool usage by specifying `tool_choice="any"`:
```python
llm_with_tools = llm.bind_tools([Multiply], tool_choice="any")
llm_with_tools.invoke("I don't even want you to use the tool")
```
``` text
AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'null', 'type': <ToolType.function: 'function'>, 'function': {'name': 'Multiply', 'arguments': '{"a": 5, "b": 7}'}}]})
```
For more see the [ChatMistralAI API reference](https://api.python.langchain.com/en/latest/chat_models/langchain_mistralai.chat_models.ChatMistralAI.html#langchain_mistralai.chat_models.ChatMistralAI).
</TabItem>
<TabItem value="together" label="Together">
Since TogetherAI is a drop-in replacement for OpenAI, we can just use
the OpenAI integration.
Install dependencies and set API keys:
```python
%pip install -qU langchain-openai
```
```python
os.environ["TOGETHER_API_KEY"] = getpass.getpass()
```
We can use the `ChatOpenAI.bind_tools()` method to handle converting
`Multiply` to a valid function schema and binding it to the model (i.e.,
passing it in each time the model is invoked).
```python
from langchain_openai import ChatOpenAI
llm = ChatOpenAI(
base_url="https://api.together.xyz/v1",
api_key=os.environ["TOGETHER_API_KEY"],
model="mistralai/Mixtral-8x7B-Instruct-v0.1",
)
llm_with_tools = llm.bind_tools([Multiply])
llm_with_tools.invoke("what's 3 * 12")
```
``` text
AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_4tc61dp0478zafqe33hfriee', 'function': {'arguments': '{"a":3,"b":12}', 'name': 'Multiply'}, 'type': 'function'}]})
```
We can add a tool parser to extract the tool calls from the generated
message to JSON:
```python
from langchain_core.output_parsers.openai_tools import JsonOutputToolsParser
tool_chain = llm_with_tools | JsonOutputToolsParser()
tool_chain.invoke("what's 3 * 12")
```
``` text
[{'type': 'Multiply', 'args': {'a': 3, 'b': 12}}]
```
Or back to the original Pydantic class:
```python
from langchain_core.output_parsers.openai_tools import PydanticToolsParser
tool_chain = llm_with_tools | PydanticToolsParser(tools=[Multiply])
tool_chain.invoke("what's 3 * 12")
```
``` text
[Multiply(a=3, b=12)]
```
If we wanted to force that a tool is used (and that it is used only If we wanted to force that a tool is used (and that it is used only
once), we can set the `tool_choice` argument: once), we can set the `tool_choice` argument to the name of the tool:
```python ```python
llm_with_multiply = llm.bind_tools([Multiply], tool_choice="Multiply") llm_with_multiply = llm.bind_tools([Multiply], tool_choice="Multiply")
@ -359,16 +136,13 @@ llm_with_multiply.invoke(
) )
``` ```
``` text ```text
AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_6k6d0gr3jhqil2kqf7sgeusl', 'function': {'arguments': '{"a":5,"b":7}', 'name': 'Multiply'}, 'type': 'function'}]}) AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_f3DApOzb60iYjTfOhVFhDRMI', 'function': {'arguments': '{"a":5,"b":10}', 'name': 'Multiply'}, 'type': 'function'}]})
``` ```
For more see the [ChatOpenAI API For more see the [ChatOpenAI API
reference](https://api.python.langchain.com/en/latest/chat_models/langchain_openai.chat_models.base.ChatOpenAI.html#langchain_openai.chat_models.base.ChatOpenAI.bind_tools). reference](https://api.python.langchain.com/en/latest/chat_models/langchain_openai.chat_models.base.ChatOpenAI.html#langchain_openai.chat_models.base.ChatOpenAI.bind_tools).
</TabItem>
</Tabs>
## Defining functions schemas ## Defining functions schemas
In case you need to access function schemas directly, LangChain has a built-in converter that can turn In case you need to access function schemas directly, LangChain has a built-in converter that can turn
@ -395,7 +169,7 @@ def multiply(a: int, b: int) -> int:
print(json.dumps(convert_to_openai_tool(multiply), indent=2)) print(json.dumps(convert_to_openai_tool(multiply), indent=2))
``` ```
``` text ```text
{ {
"type": "function", "type": "function",
"function": { "function": {
@ -438,7 +212,7 @@ class multiply(BaseModel):
print(json.dumps(convert_to_openai_tool(multiply), indent=2)) print(json.dumps(convert_to_openai_tool(multiply), indent=2))
``` ```
``` text ```text
{ {
"type": "function", "type": "function",
"function": { "function": {
@ -493,7 +267,7 @@ class Multiply(BaseTool):
print(json.dumps(convert_to_openai_tool(Multiply()), indent=2)) print(json.dumps(convert_to_openai_tool(Multiply()), indent=2))
``` ```
``` text ```text
{ {
"type": "function", "type": "function",
"function": { "function": {
@ -522,14 +296,14 @@ print(json.dumps(convert_to_openai_tool(Multiply()), indent=2))
## Next steps ## Next steps
- **Output parsing**: See [OpenAI Tools output - **Output parsing**: See [OpenAI Tools output
parsers](../../../../docs/modules/model_io/output_parsers/types/openai_tools) parsers](../../../../docs/modules/model_io/output_parsers/types/openai_tools)
and [OpenAI Functions output and [OpenAI Functions output
parsers](../../../../docs/modules/model_io/output_parsers/types/openai_functions) parsers](../../../../docs/modules/model_io/output_parsers/types/openai_functions)
to learn about extracting the function calling API responses into to learn about extracting the function calling API responses into
various formats. various formats.
- **Structured output chains**: [Some models have constructors](../../../../docs/guides/structured_output) that - **Structured output chains**: [Some models have constructors](../../../../docs/guides/structured_output) that
handle creating a structured output chain for you. handle creating a structured output chain for you.
- **Tool use**: See how to construct chains and agents that actually - **Tool use**: See how to construct chains and agents that actually
call the invoked tools in [these call the invoked tools in [these
guides](../../../../docs/use_cases/tool_use/). guides](../../../../docs/use_cases/tool_use/).

@ -22,32 +22,19 @@
"While chat models use language models under the hood, the interface they use is a bit different.\n", "While chat models use language models under the hood, the interface they use is a bit different.\n",
"Rather than using a \"text in, text out\" API, they use an interface where \"chat messages\" are the inputs and outputs.\n", "Rather than using a \"text in, text out\" API, they use an interface where \"chat messages\" are the inputs and outputs.\n",
"\n", "\n",
"## Setup\n", "## Setup\n"
"\n",
"For this example we'll need to install the OpenAI partner package:\n",
"\n",
"```bash\n",
"pip install langchain-openai\n",
"```\n",
"\n",
"Accessing the API requires an API key, which you can get by creating an account and heading [here](https://platform.openai.com/account/api-keys). Once we have a key we'll want to set it as an environment variable by running:\n",
"\n",
"```bash\n",
"export OPENAI_API_KEY=\"...\"\n",
"```\n",
"If you'd prefer not to set an environment variable you can pass the key in directly via the `openai_api_key` named parameter when initiating the OpenAI LLM class:\n"
] ]
}, },
{ {
"cell_type": "code", "cell_type": "markdown",
"execution_count": null,
"id": "e230abb2-bc84-438b-b9ff-dd124acb1375", "id": "e230abb2-bc84-438b-b9ff-dd124acb1375",
"metadata": {}, "metadata": {},
"outputs": [],
"source": [ "source": [
"from langchain_openai import ChatOpenAI\n", "```{=mdx}\n",
"import ChatModelTabs from \"@theme/ChatModelTabs\";\n",
"\n", "\n",
"chat = ChatOpenAI(openai_api_key=\"...\")" "<ChatModelTabs customVarName=\"chat\" />\n",
"```"
] ]
}, },
{ {
@ -55,19 +42,25 @@
"id": "609bbd5c-e5a1-4166-89e1-d6c52054860d", "id": "609bbd5c-e5a1-4166-89e1-d6c52054860d",
"metadata": {}, "metadata": {},
"source": [ "source": [
"Otherwise you can initialize without any params:" "If you'd prefer not to set an environment variable you can pass the key in directly via the api key arg named parameter when initiating the chat model class:"
] ]
}, },
{ {
"cell_type": "code", "cell_type": "markdown",
"execution_count": 1,
"id": "3d9dbf70-2397-4d6b-87ec-3e6d4699f3df", "id": "3d9dbf70-2397-4d6b-87ec-3e6d4699f3df",
"metadata": {}, "metadata": {},
"outputs": [],
"source": [ "source": [
"from langchain_openai import ChatOpenAI\n", "```{=mdx}\n",
"\n", "<ChatModelTabs\n",
"chat = ChatOpenAI()" " openaiParams={`model=\"gpt-3.5-turbo-0125\", openai_api_key=\"...\"`}\n",
" anthropicParams={`model=\"claude-3-sonnet-20240229\", anthropic_api_key=\"...\"`}\n",
" fireworksParams={`model=\"accounts/fireworks/models/mixtral-8x7b-instruct\", fireworks_api_key=\"...\"`}\n",
" mistralParams={`model=\"mistral-large-latest\", mistral_api_key=\"...\"`}\n",
" googleParams={`model=\"gemini-pro\", google_api_key=\"...\"`}\n",
" togetherParams={`, together_api_key=\"...\"`}\n",
" customVarName=\"chat\"\n",
"/>\n",
"```"
] ]
}, },
{ {
@ -108,6 +101,21 @@
"]" "]"
] ]
}, },
{
"cell_type": "code",
"execution_count": null,
"id": "570dae71",
"metadata": {},
"outputs": [],
"source": [
"# | output: false\n",
"# | echo: false\n",
"\n",
"from langchain_openai import ChatOpenAI\n",
"\n",
"chat = ChatOpenAI()"
]
},
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 11, "execution_count": 11,

@ -516,7 +516,7 @@
"name": "python", "name": "python",
"nbconvert_exporter": "python", "nbconvert_exporter": "python",
"pygments_lexer": "ipython3", "pygments_lexer": "ipython3",
"version": "3.10.4" "version": "3.11.0"
} }
}, },
"nbformat": 4, "nbformat": 4,

@ -24,7 +24,7 @@ introduction](../../../docs/use_cases/question_answering/), which has
two main components: two main components:
**Indexing**: a pipeline for ingesting data from a source and indexing **Indexing**: a pipeline for ingesting data from a source and indexing
it. *This usually happens offline.* it. _This usually happens offline._
**Retrieval and generation**: the actual RAG chain, which takes the user **Retrieval and generation**: the actual RAG chain, which takes the user
query at run time and retrieves the relevant data from the index, then query at run time and retrieves the relevant data from the index, then
@ -77,7 +77,7 @@ Well use the following packages:
%pip install --upgrade --quiet langchain langchain-community langchainhub langchain-openai chromadb bs4 %pip install --upgrade --quiet langchain langchain-community langchainhub langchain-openai chromadb bs4
``` ```
We need to set environment variable `OPENAI_API_KEY`, which can be done We need to set environment variable `OPENAI_API_KEY` for the embeddings model, which can be done
directly or loaded from a `.env` file like so: directly or loaded from a `.env` file like so:
```python ```python
@ -125,10 +125,13 @@ from langchain_community.document_loaders import WebBaseLoader
from langchain_community.vectorstores import Chroma from langchain_community.vectorstores import Chroma
from langchain_core.output_parsers import StrOutputParser from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import RunnablePassthrough from langchain_core.runnables import RunnablePassthrough
from langchain_openai import ChatOpenAI, OpenAIEmbeddings from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import RecursiveCharacterTextSplitter from langchain_text_splitters import RecursiveCharacterTextSplitter
``` ```
import ChatModelTabs from "@theme/ChatModelTabs";
<ChatModelTabs customVarName="llm" />
```python ```python
# Load, chunk and index the contents of the blog. # Load, chunk and index the contents of the blog.
@ -149,8 +152,6 @@ vectorstore = Chroma.from_documents(documents=splits, embedding=OpenAIEmbeddings
# Retrieve and generate using the relevant snippets of the blog. # Retrieve and generate using the relevant snippets of the blog.
retriever = vectorstore.as_retriever() retriever = vectorstore.as_retriever()
prompt = hub.pull("rlm/rag-prompt") prompt = hub.pull("rlm/rag-prompt")
llm = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0)
def format_docs(docs): def format_docs(docs):
return "\n\n".join(doc.page_content for doc in docs) return "\n\n".join(doc.page_content for doc in docs)
@ -164,12 +165,11 @@ rag_chain = (
) )
``` ```
```python ```python
rag_chain.invoke("What is Task Decomposition?") rag_chain.invoke("What is Task Decomposition?")
``` ```
``` text ```text
'Task decomposition is a technique used to break down complex tasks into smaller and simpler steps. It can be done through prompting techniques like Chain of Thought or Tree of Thoughts, or by using task-specific instructions or human inputs. Task decomposition helps agents plan ahead and manage complicated tasks more effectively.' 'Task decomposition is a technique used to break down complex tasks into smaller and simpler steps. It can be done through prompting techniques like Chain of Thought or Tree of Thoughts, or by using task-specific instructions or human inputs. Task decomposition helps agents plan ahead and manage complicated tasks more effectively.'
``` ```
@ -219,12 +219,11 @@ loader = WebBaseLoader(
docs = loader.load() docs = loader.load()
``` ```
```python ```python
len(docs[0].page_content) len(docs[0].page_content)
``` ```
``` text ```text
42824 42824
``` ```
@ -232,7 +231,7 @@ len(docs[0].page_content)
print(docs[0].page_content[:500]) print(docs[0].page_content[:500])
``` ```
``` text ```text
LLM Powered Autonomous Agents LLM Powered Autonomous Agents
@ -249,12 +248,13 @@ In
`DocumentLoader`: Object that loads data from a source as list of `DocumentLoader`: Object that loads data from a source as list of
`Documents`. `Documents`.
- [Docs](../../../docs/modules/data_connection/document_loaders/): - [Docs](../../../docs/modules/data_connection/document_loaders/):
Detailed documentation on how to use `DocumentLoaders`. Detailed documentation on how to use `DocumentLoaders`.
- [Integrations](../../../docs/integrations/document_loaders/): 160+ - [Integrations](../../../docs/integrations/document_loaders/): 160+
integrations to choose from. integrations to choose from.
- [Interface](https://api.python.langchain.com/en/latest/document_loaders/langchain_core.document_loaders.base.BaseLoader.html): - [Interface](https://api.python.langchain.com/en/latest/document_loaders/langchain_core.document_loaders.base.BaseLoader.html):
API reference  for the base interface. API reference  for the base interface.
## 2. Indexing: Split {#indexing-split} ## 2. Indexing: Split {#indexing-split}
@ -289,12 +289,11 @@ text_splitter = RecursiveCharacterTextSplitter(
all_splits = text_splitter.split_documents(docs) all_splits = text_splitter.split_documents(docs)
``` ```
```python ```python
len(all_splits) len(all_splits)
``` ```
``` text ```text
66 66
``` ```
@ -302,7 +301,7 @@ len(all_splits)
len(all_splits[0].page_content) len(all_splits[0].page_content)
``` ```
``` text ```text
969 969
``` ```
@ -310,7 +309,7 @@ len(all_splits[0].page_content)
all_splits[10].metadata all_splits[10].metadata
``` ```
``` text ```text
{'source': 'https://lilianweng.github.io/posts/2023-06-23-agent/', {'source': 'https://lilianweng.github.io/posts/2023-06-23-agent/',
'start_index': 7056} 'start_index': 7056}
``` ```
@ -319,15 +318,17 @@ all_splits[10].metadata
`TextSplitter`: Object that splits a list of `Document`s into smaller `TextSplitter`: Object that splits a list of `Document`s into smaller
chunks. Subclass of `DocumentTransformer`s. chunks. Subclass of `DocumentTransformer`s.
- Explore `Context-aware splitters`, which keep the location (“context”) of each - Explore `Context-aware splitters`, which keep the location (“context”) of each
split in the original `Document`: - [Markdown split in the original `Document`: - [Markdown
files](../../../docs/modules/data_connection/document_transformers/markdown_header_metadata) files](../../../docs/modules/data_connection/document_transformers/markdown_header_metadata)
- [Code (py or js)](../../../docs/integrations/document_loaders/source_code) - [Code (py or js)](../../../docs/integrations/document_loaders/source_code)
- [Scientific papers](../../../docs/integrations/document_loaders/grobid) - [Scientific papers](../../../docs/integrations/document_loaders/grobid)
- [Interface](https://api.python.langchain.com/en/latest/base/langchain_text_splitters.base.TextSplitter.html): API reference for the base interface. - [Interface](https://api.python.langchain.com/en/latest/base/langchain_text_splitters.base.TextSplitter.html): API reference for the base interface.
`DocumentTransformer`: Object that performs a transformation on a list `DocumentTransformer`: Object that performs a transformation on a list
of `Document`s. of `Document`s.
- [Docs](../../../docs/modules/data_connection/document_transformers/): Detailed documentation on how to use `DocumentTransformers` - [Docs](../../../docs/modules/data_connection/document_transformers/): Detailed documentation on how to use `DocumentTransformers`
- [Integrations](../../../docs/integrations/document_transformers/) - [Integrations](../../../docs/integrations/document_transformers/)
- [Interface](https://api.python.langchain.com/en/latest/documents/langchain_core.documents.transformers.BaseDocumentTransformer.html): API reference for the base interface. - [Interface](https://api.python.langchain.com/en/latest/documents/langchain_core.documents.transformers.BaseDocumentTransformer.html): API reference for the base interface.
@ -361,12 +362,14 @@ vectorstore = Chroma.from_documents(documents=all_splits, embedding=OpenAIEmbedd
`Embeddings`: Wrapper around a text embedding model, used for converting `Embeddings`: Wrapper around a text embedding model, used for converting
text to embeddings. text to embeddings.
- [Docs](../../../docs/modules/data_connection/text_embedding): Detailed documentation on how to use embeddings. - [Docs](../../../docs/modules/data_connection/text_embedding): Detailed documentation on how to use embeddings.
- [Integrations](../../../docs/integrations/text_embedding/): 30+ integrations to choose from. - [Integrations](../../../docs/integrations/text_embedding/): 30+ integrations to choose from.
- [Interface](https://api.python.langchain.com/en/latest/embeddings/langchain_core.embeddings.Embeddings.html): API reference for the base interface. - [Interface](https://api.python.langchain.com/en/latest/embeddings/langchain_core.embeddings.Embeddings.html): API reference for the base interface.
`VectorStore`: Wrapper around a vector database, used for storing and `VectorStore`: Wrapper around a vector database, used for storing and
querying embeddings. querying embeddings.
- [Docs](../../../docs/modules/data_connection/vectorstores/): Detailed documentation on how to use vector stores. - [Docs](../../../docs/modules/data_connection/vectorstores/): Detailed documentation on how to use vector stores.
- [Integrations](../../../docs/integrations/vectorstores/): 40+ integrations to choose from. - [Integrations](../../../docs/integrations/vectorstores/): 40+ integrations to choose from.
- [Interface](https://api.python.langchain.com/en/latest/vectorstores/langchain_core.vectorstores.VectorStore.html): API reference for the base interface. - [Interface](https://api.python.langchain.com/en/latest/vectorstores/langchain_core.vectorstores.VectorStore.html): API reference for the base interface.
@ -399,17 +402,15 @@ facilitate retrieval. Any `VectorStore` can easily be turned into a
retriever = vectorstore.as_retriever(search_type="similarity", search_kwargs={"k": 6}) retriever = vectorstore.as_retriever(search_type="similarity", search_kwargs={"k": 6})
``` ```
```python ```python
retrieved_docs = retriever.invoke("What are the approaches to Task Decomposition?") retrieved_docs = retriever.invoke("What are the approaches to Task Decomposition?")
``` ```
```python ```python
len(retrieved_docs) len(retrieved_docs)
``` ```
``` text ```text
6 6
``` ```
@ -417,7 +418,7 @@ len(retrieved_docs)
print(retrieved_docs[0].page_content) print(retrieved_docs[0].page_content)
``` ```
``` text ```text
Tree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote. Tree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.
Task decomposition can be done (1) by LLM with simple prompting like "Steps for XYZ.\n1.", "What are the subgoals for achieving XYZ?", (2) by using task-specific instructions; e.g. "Write a story outline." for writing a novel, or (3) with human inputs. Task decomposition can be done (1) by LLM with simple prompting like "Steps for XYZ.\n1.", "What are the subgoals for achieving XYZ?", (2) by using task-specific instructions; e.g. "Write a story outline." for writing a novel, or (3) with human inputs.
``` ```
@ -429,27 +430,27 @@ to do retrieval, too.
`Retriever`: An object that returns `Document`s given a text query `Retriever`: An object that returns `Document`s given a text query
- [Docs](../../../docs/modules/data_connection/retrievers/): Further - [Docs](../../../docs/modules/data_connection/retrievers/): Further
documentation on the interface and built-in retrieval techniques. documentation on the interface and built-in retrieval techniques.
Some of which include: Some of which include:
- `MultiQueryRetriever` [generates variants of the input - `MultiQueryRetriever` [generates variants of the input
question](../../../docs/modules/data_connection/retrievers/MultiQueryRetriever) question](../../../docs/modules/data_connection/retrievers/MultiQueryRetriever)
to improve retrieval hit rate. to improve retrieval hit rate.
- `MultiVectorRetriever` (diagram below) instead generates - `MultiVectorRetriever` (diagram below) instead generates
[variants of the [variants of the
embeddings](../../../docs/modules/data_connection/retrievers/multi_vector), embeddings](../../../docs/modules/data_connection/retrievers/multi_vector),
also in order to improve retrieval hit rate. also in order to improve retrieval hit rate.
- `Max marginal relevance` selects for [relevance and - `Max marginal relevance` selects for [relevance and
diversity](https://www.cs.cmu.edu/~jgc/publication/The_Use_MMR_Diversity_Based_LTMIR_1998.pdf) diversity](https://www.cs.cmu.edu/~jgc/publication/The_Use_MMR_Diversity_Based_LTMIR_1998.pdf)
among the retrieved documents to avoid passing in duplicate among the retrieved documents to avoid passing in duplicate
context. context.
- Documents can be filtered during vector store retrieval using - Documents can be filtered during vector store retrieval using
metadata filters, such as with a [Self Query metadata filters, such as with a [Self Query
Retriever](../../../docs/modules/data_connection/retrievers/self_query). Retriever](../../../docs/modules/data_connection/retrievers/self_query).
- [Integrations](../../../docs/integrations/retrievers/): Integrations - [Integrations](../../../docs/integrations/retrievers/): Integrations
with retrieval services. with retrieval services.
- [Interface](https://api.python.langchain.com/en/latest/retrievers/langchain_core.retrievers.BaseRetriever.html): - [Interface](https://api.python.langchain.com/en/latest/retrievers/langchain_core.retrievers.BaseRetriever.html):
API reference for the base interface. API reference for the base interface.
## 5. Retrieval and Generation: Generate {#retrieval-and-generation-generate} ## 5. Retrieval and Generation: Generate {#retrieval-and-generation-generate}
@ -460,34 +461,13 @@ parses the output.
Well use the gpt-3.5-turbo OpenAI chat model, but any LangChain `LLM` Well use the gpt-3.5-turbo OpenAI chat model, but any LangChain `LLM`
or `ChatModel` could be substituted in. or `ChatModel` could be substituted in.
import Tabs from '@theme/Tabs'; import Tabs from "@theme/Tabs";
import TabItem from '@theme/TabItem'; import TabItem from "@theme/TabItem";
<Tabs>
<TabItem value="openai" label="OpenAI" default>
```python <ChatModelTabs
from langchain_openai import ChatOpenAI customVarName="llm"
anthropicParams={`"model="claude-3-sonnet-20240229", temperature=0.2, max_tokens=1024"`}
llm = ChatOpenAI(model_name="gpt-3.5-turbo-0125", temperature=0) />
```
</TabItem>
<TabItem value="local" label="Anthropic">
```python
%pip install -qU langchain-anthropic
```
```python
from langchain_anthropic import ChatAnthropic
llm = ChatAnthropic(model="claude-3-sonnet-20240229", temperature=0.2, max_tokens=1024)
```
</TabItem>
</Tabs>
Well use a prompt for RAG that is checked into the LangChain prompt hub Well use a prompt for RAG that is checked into the LangChain prompt hub
([here](https://smith.langchain.com/hub/rlm/rag-prompt)). ([here](https://smith.langchain.com/hub/rlm/rag-prompt)).
@ -498,7 +478,6 @@ from langchain import hub
prompt = hub.pull("rlm/rag-prompt") prompt = hub.pull("rlm/rag-prompt")
``` ```
```python ```python
example_messages = prompt.invoke( example_messages = prompt.invoke(
{"context": "filler context", "question": "filler question"} {"context": "filler context", "question": "filler question"}
@ -506,7 +485,7 @@ example_messages = prompt.invoke(
example_messages example_messages
``` ```
``` text ```text
[HumanMessage(content="You are an assistant for question-answering tasks. Use the following pieces of retrieved context to answer the question. If you don't know the answer, just say that you don't know. Use three sentences maximum and keep the answer concise.\nQuestion: filler question \nContext: filler context \nAnswer:")] [HumanMessage(content="You are an assistant for question-answering tasks. Use the following pieces of retrieved context to answer the question. If you don't know the answer, just say that you don't know. Use three sentences maximum and keep the answer concise.\nQuestion: filler question \nContext: filler context \nAnswer:")]
``` ```
@ -514,7 +493,7 @@ example_messages
print(example_messages[0].content) print(example_messages[0].content)
``` ```
``` text ```text
You are an assistant for question-answering tasks. Use the following pieces of retrieved context to answer the question. If you don't know the answer, just say that you don't know. Use three sentences maximum and keep the answer concise. You are an assistant for question-answering tasks. Use the following pieces of retrieved context to answer the question. If you don't know the answer, just say that you don't know. Use three sentences maximum and keep the answer concise.
Question: filler question Question: filler question
Context: filler context Context: filler context
@ -543,13 +522,12 @@ rag_chain = (
) )
``` ```
```python ```python
for chunk in rag_chain.stream("What is Task Decomposition?"): for chunk in rag_chain.stream("What is Task Decomposition?"):
print(chunk, end="", flush=True) print(chunk, end="", flush=True)
``` ```
``` text ```text
Task decomposition is a technique used to break down complex tasks into smaller and simpler steps. It involves transforming big tasks into multiple manageable tasks, allowing for easier interpretation and execution by autonomous agents or models. Task decomposition can be done through various methods, such as using prompting techniques, task-specific instructions, or human inputs. Task decomposition is a technique used to break down complex tasks into smaller and simpler steps. It involves transforming big tasks into multiple manageable tasks, allowing for easier interpretation and execution by autonomous agents or models. Task decomposition can be done through various methods, such as using prompting techniques, task-specific instructions, or human inputs.
``` ```
@ -562,11 +540,13 @@ trace](https://smith.langchain.com/public/1799e8db-8a6d-4eb2-84d5-46e8d7d5a99b/r
`ChatModel`: An LLM-backed chat model. Takes in a sequence of messages `ChatModel`: An LLM-backed chat model. Takes in a sequence of messages
and returns a message. and returns a message.
- [Docs](../../../docs/modules/model_io/chat/) - [Docs](../../../docs/modules/model_io/chat/)
- [Integrations](../../../docs/integrations/chat/): 25+ integrations to choose from. - [Integrations](../../../docs/integrations/chat/): 25+ integrations to choose from.
- [Interface](https://api.python.langchain.com/en/latest/language_models/langchain_core.language_models.chat_models.BaseChatModel.html): API reference for the base interface. - [Interface](https://api.python.langchain.com/en/latest/language_models/langchain_core.language_models.chat_models.BaseChatModel.html): API reference for the base interface.
`LLM`: A text-in-text-out LLM. Takes in a string and returns a string. `LLM`: A text-in-text-out LLM. Takes in a string and returns a string.
- [Docs](../../../docs/modules/model_io/llms) - [Docs](../../../docs/modules/model_io/llms)
- [Integrations](../../../docs/integrations/llms): 75+ integrations to choose from. - [Integrations](../../../docs/integrations/llms): 75+ integrations to choose from.
- [Interface](https://api.python.langchain.com/en/latest/language_models/langchain_core.language_models.llms.BaseLLM.html): API reference for the base interface. - [Interface](https://api.python.langchain.com/en/latest/language_models/langchain_core.language_models.llms.BaseLLM.html): API reference for the base interface.
@ -605,7 +585,7 @@ rag_chain = (
rag_chain.invoke("What is Task Decomposition?") rag_chain.invoke("What is Task Decomposition?")
``` ```
``` text ```text
'Task decomposition is a technique used to break down complex tasks into smaller and simpler steps. It involves transforming big tasks into multiple manageable tasks, allowing for a more systematic and organized approach to problem-solving. Thanks for asking!' 'Task decomposition is a technique used to break down complex tasks into smaller and simpler steps. It involves transforming big tasks into multiple manageable tasks, allowing for a more systematic and organized approach to problem-solving. Thanks for asking!'
``` ```
@ -619,11 +599,11 @@ plenty of features, integrations, and extensions to explore in each of
the above sections. Along from the **Go deeper** sources mentioned the above sections. Along from the **Go deeper** sources mentioned
above, good next steps include: above, good next steps include:
- [Return - [Return
sources](../../../docs/use_cases/question_answering/sources): Learn sources](../../../docs/use_cases/question_answering/sources): Learn
how to return source documents how to return source documents
- [Streaming](../../../docs/use_cases/question_answering/streaming): - [Streaming](../../../docs/use_cases/question_answering/streaming):
Learn how to stream outputs and intermediate steps Learn how to stream outputs and intermediate steps
- [Add chat - [Add chat
history](../../../docs/use_cases/question_answering/chat_history): history](../../../docs/use_cases/question_answering/chat_history):
Learn how to add chat history to your app Learn how to add chat history to your app

@ -1,4 +1,4 @@
/* eslint-disable react/jsx-props-no-spreading */ /* eslint-disable react/jsx-props-no-spreading, react/destructuring-assignment */
import React from "react"; import React from "react";
import Tabs from "@theme/Tabs"; import Tabs from "@theme/Tabs";
import TabItem from "@theme/TabItem"; import TabItem from "@theme/TabItem";
@ -20,7 +20,24 @@ os.environ["${apiKeyName}"] = getpass.getpass()`;
} }
/** /**
* @param {{ openaiParams?: string, anthropicParams?: string, fireworksParams?: string, mistralParams?: string, googleParams?: string, hideOpenai?: boolean, hideAnthropic?: boolean, hideFireworks?: boolean, hideMistral?: boolean, hideGoogle?: boolean }} props * @typedef {Object} ChatModelTabsProps - Component props.
* @property {string} [openaiParams] - Parameters for OpenAI chat model. Defaults to `model="gpt-3.5-turbo-0125"`
* @property {string} [anthropicParams] - Parameters for Anthropic chat model. Defaults to `model="claude-3-sonnet-20240229"`
* @property {string} [fireworksParams] - Parameters for Fireworks chat model. Defaults to `model="accounts/fireworks/models/mixtral-8x7b-instruct"`
* @property {string} [mistralParams] - Parameters for Mistral chat model. Defaults to `model="mistral-large-latest"`
* @property {string} [googleParams] - Parameters for Google chat model. Defaults to `model="gemini-pro"`
* @property {string} [togetherParams] - Parameters for Google chat model. Defaults to `model="gemini-pro"`
* @property {boolean} [hideOpenai] - Whether or not to hide OpenAI chat model.
* @property {boolean} [hideAnthropic] - Whether or not to hide Anthropic chat model.
* @property {boolean} [hideFireworks] - Whether or not to hide Fireworks chat model.
* @property {boolean} [hideMistral] - Whether or not to hide Mistral chat model.
* @property {boolean} [hideGoogle] - Whether or not to hide Google chat model.
* @property {boolean} [hideTogether] - Whether or not to hide Together chat model.
* @property {string} [customVarName] - Custom variable name for the model. Defaults to `model`.
*/
/**
* @param {ChatModelTabsProps} props - Component props.
*/ */
export default function ChatModelTabs(props) { export default function ChatModelTabs(props) {
const { const {
@ -29,24 +46,36 @@ export default function ChatModelTabs(props) {
fireworksParams, fireworksParams,
mistralParams, mistralParams,
googleParams, googleParams,
togetherParams,
hideOpenai, hideOpenai,
hideAnthropic, hideAnthropic,
hideFireworks, hideFireworks,
hideMistral, hideMistral,
hideGoogle, hideGoogle,
hideTogether,
customVarName,
} = props; } = props;
const openAIParamsOrDefault = openaiParams ?? `model="gpt-3.5-turbo-0125"` const openAIParamsOrDefault = openaiParams ?? `model="gpt-3.5-turbo-0125"`;
const anthropicParamsOrDefault = anthropicParams ?? `model="claude-3-sonnet-20240229"` const anthropicParamsOrDefault =
const fireworksParamsOrDefault = fireworksParams ?? `model="accounts/fireworks/models/mixtral-8x7b-instruct"` anthropicParams ?? `model="claude-3-sonnet-20240229"`;
const mistralParamsOrDefault = mistralParams ?? `model="mistral-large-latest"` const fireworksParamsOrDefault =
const googleParamsOrDefault = googleParams ?? `model="gemini-pro"` fireworksParams ??
`model="accounts/fireworks/models/mixtral-8x7b-instruct"`;
const mistralParamsOrDefault =
mistralParams ?? `model="mistral-large-latest"`;
const googleParamsOrDefault = googleParams ?? `model="gemini-pro"`;
const togetherParamsOrDefault =
togetherParams ??
`\n base_url="https://api.together.xyz/v1",\n api_key=os.environ["TOGETHER_API_KEY"],\n model="mistralai/Mixtral-8x7B-Instruct-v0.1",`;
const llmVarName = customVarName ?? "model";
const tabItems = [ const tabItems = [
{ {
value: "OpenAI", value: "OpenAI",
label: "OpenAI", label: "OpenAI",
text: `from langchain_openai import ChatOpenAI\n\nmodel = ChatOpenAI(${openAIParamsOrDefault})`, text: `from langchain_openai import ChatOpenAI\n\n${llmVarName} = ChatOpenAI(${openAIParamsOrDefault})`,
apiKeyName: "OPENAI_API_KEY", apiKeyName: "OPENAI_API_KEY",
packageName: "langchain-openai", packageName: "langchain-openai",
default: true, default: true,
@ -55,7 +84,7 @@ export default function ChatModelTabs(props) {
{ {
value: "Anthropic", value: "Anthropic",
label: "Anthropic", label: "Anthropic",
text: `from langchain_anthropic import ChatAnthropic\n\nmodel = ChatAnthropic(${anthropicParamsOrDefault})`, text: `from langchain_anthropic import ChatAnthropic\n\n${llmVarName} = ChatAnthropic(${anthropicParamsOrDefault})`,
apiKeyName: "ANTHROPIC_API_KEY", apiKeyName: "ANTHROPIC_API_KEY",
packageName: "langchain-anthropic", packageName: "langchain-anthropic",
default: false, default: false,
@ -64,7 +93,7 @@ export default function ChatModelTabs(props) {
{ {
value: "FireworksAI", value: "FireworksAI",
label: "FireworksAI", label: "FireworksAI",
text: `from langchain_fireworks import ChatFireworks\n\nmodel = ChatFireworks(${fireworksParamsOrDefault})`, text: `from langchain_fireworks import ChatFireworks\n\n${llmVarName} = ChatFireworks(${fireworksParamsOrDefault})`,
apiKeyName: "FIREWORKS_API_KEY", apiKeyName: "FIREWORKS_API_KEY",
packageName: "langchain-fireworks", packageName: "langchain-fireworks",
default: false, default: false,
@ -73,7 +102,7 @@ export default function ChatModelTabs(props) {
{ {
value: "MistralAI", value: "MistralAI",
label: "MistralAI", label: "MistralAI",
text: `from langchain_mistralai import ChatMistralAI\n\nmodel = ChatMistralAI(${mistralParamsOrDefault})`, text: `from langchain_mistralai import ChatMistralAI\n\n${llmVarName} = ChatMistralAI(${mistralParamsOrDefault})`,
apiKeyName: "MISTRAL_API_KEY", apiKeyName: "MISTRAL_API_KEY",
packageName: "langchain-mistralai", packageName: "langchain-mistralai",
default: false, default: false,
@ -82,22 +111,40 @@ export default function ChatModelTabs(props) {
{ {
value: "Google", value: "Google",
label: "Google", label: "Google",
text: `from langchain_google_genai import ChatGoogleGenerativeAI\n\nmodel = ChatGoogleGenerativeAI(${googleParamsOrDefault})`, text: `from langchain_google_genai import ChatGoogleGenerativeAI\n\n${llmVarName} = ChatGoogleGenerativeAI(${googleParamsOrDefault})`,
apiKeyName: "GOOGLE_API_KEY", apiKeyName: "GOOGLE_API_KEY",
packageName: "langchain-google-genai", packageName: "langchain-google-genai",
default: false, default: false,
shouldHide: hideGoogle, shouldHide: hideGoogle,
} },
] {
value: "TogetherAI",
label: "TogetherAI",
text: `from langchain_openai import ChatOpenAI\n\n${llmVarName} = ChatOpenAI(${togetherParamsOrDefault})`,
apiKeyName: "TOGETHER_API_KEY",
packageName: "langchain-openai",
default: false,
shouldHide: hideTogether,
},
];
return ( return (
<Tabs groupId="modelTabs"> <Tabs groupId="modelTabs">
{tabItems.filter((tabItem) => !tabItem.shouldHide).map((tabItem) => ( {tabItems
<TabItem value={tabItem.value} label={tabItem.label} default={tabItem.default}> .filter((tabItem) => !tabItem.shouldHide)
<Setup apiKeyName={tabItem.apiKeyName} packageName={tabItem.packageName} /> .map((tabItem) => (
<CodeBlock language="python">{tabItem.text}</CodeBlock> <TabItem
</TabItem> value={tabItem.value}
))} label={tabItem.label}
default={tabItem.default}
>
<Setup
apiKeyName={tabItem.apiKeyName}
packageName={tabItem.packageName}
/>
<CodeBlock language="python">{tabItem.text}</CodeBlock>
</TabItem>
))}
</Tabs> </Tabs>
); );
} }

@ -4,9 +4,9 @@ yum -y update
yum install gcc bzip2-devel libffi-devel zlib-devel wget tar gzip -y yum install gcc bzip2-devel libffi-devel zlib-devel wget tar gzip -y
# install quarto # install quarto
wget -q https://github.com/quarto-dev/quarto-cli/releases/download/v1.3.450/quarto-1.3.450-linux-amd64.tar.gz wget -q https://github.com/quarto-dev/quarto-cli/releases/download/v1.4.552/quarto-1.4.552-linux-amd64.tar.gz
tar -xzf quarto-1.3.450-linux-amd64.tar.gz tar -xzf quarto-1.4.552-linux-amd64.tar.gz
export PATH=$PATH:$(pwd)/quarto-1.3.450/bin/ export PATH=$PATH:$(pwd)/quarto-1.4.552/bin/
# setup python env # setup python env

Loading…
Cancel
Save