mirror of https://github.com/hwchase17/langchain
add new chain howtos (#15430)
parent
81886ad345
commit
7a93356cbc
@ -0,0 +1,126 @@
|
|||||||
|
{
|
||||||
|
"cells": [
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "b45110ef",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"# Create a runnable with the `@chain` decorator\n",
|
||||||
|
"\n",
|
||||||
|
"You can also turn an arbitrary function into a chain by adding a `@chain` decorator. This is functionaly equivalent to wrapping in a [`RunnableLambda`](./functions).\n",
|
||||||
|
"\n",
|
||||||
|
"This will have the benefit of improved observability by tracing your chain correctly. Any calls to runnables inside this function will be traced as nested childen.\n",
|
||||||
|
"\n",
|
||||||
|
"It will also allow you to use this as any other runnable, compose it in chain, etc.\n",
|
||||||
|
"\n",
|
||||||
|
"Let's take a look at this in action!"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 16,
|
||||||
|
"id": "d9370420",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from langchain_community.chat_models import ChatOpenAI\n",
|
||||||
|
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||||
|
"from langchain_core.prompts import ChatPromptTemplate\n",
|
||||||
|
"from langchain_core.runnables import chain"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 17,
|
||||||
|
"id": "b7f74f7e",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"prompt1 = ChatPromptTemplate.from_template(\"Tell me a joke about {topic}\")\n",
|
||||||
|
"prompt2 = ChatPromptTemplate.from_template(\"What is the subject of this joke: {joke}\")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 18,
|
||||||
|
"id": "2b0365c4",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"@chain\n",
|
||||||
|
"def custom_chain(text):\n",
|
||||||
|
" prompt_val1 = prompt1.invoke({\"topic\": text})\n",
|
||||||
|
" output1 = ChatOpenAI().invoke(prompt_val1)\n",
|
||||||
|
" parsed_output1 = StrOutputParser().invoke(output1)\n",
|
||||||
|
" chain2 = prompt2 | ChatOpenAI() | StrOutputParser()\n",
|
||||||
|
" return chain2.invoke({\"joke\": parsed_output1})"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "904d6872",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"`custom_chain` is now a runnable, meaning you will need to use `invoke`"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 21,
|
||||||
|
"id": "6448bdd3",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/plain": [
|
||||||
|
"'The subject of this joke is bears.'"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"execution_count": 21,
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "execute_result"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"custom_chain.invoke(\"bears\")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "aa767ea9",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"If you check out your LangSmith traces, you should see a `custom_chain` trace in there, with the calls to OpenAI nested underneath"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "f1245bdc",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": []
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"kernelspec": {
|
||||||
|
"display_name": "Python 3 (ipykernel)",
|
||||||
|
"language": "python",
|
||||||
|
"name": "python3"
|
||||||
|
},
|
||||||
|
"language_info": {
|
||||||
|
"codemirror_mode": {
|
||||||
|
"name": "ipython",
|
||||||
|
"version": 3
|
||||||
|
},
|
||||||
|
"file_extension": ".py",
|
||||||
|
"mimetype": "text/x-python",
|
||||||
|
"name": "python",
|
||||||
|
"nbconvert_exporter": "python",
|
||||||
|
"pygments_lexer": "ipython3",
|
||||||
|
"version": "3.10.1"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nbformat": 4,
|
||||||
|
"nbformat_minor": 5
|
||||||
|
}
|
@ -0,0 +1,237 @@
|
|||||||
|
{
|
||||||
|
"cells": [
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "8c5eb99a",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"# Inspect your runnables\n",
|
||||||
|
"\n",
|
||||||
|
"Once you create a runnable with LCEL, you may often want to inspect it to get a better sense for what is going on. This notebook covers some methods for doing so.\n",
|
||||||
|
"\n",
|
||||||
|
"First, let's create an example LCEL. We will create one that does retrieval"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "8bc5d235",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"!pip install langchain openai faiss-cpu tiktoken"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 1,
|
||||||
|
"id": "a88f4b24",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from operator import itemgetter\n",
|
||||||
|
"\n",
|
||||||
|
"from langchain.prompts import ChatPromptTemplate\n",
|
||||||
|
"from langchain.vectorstores import FAISS\n",
|
||||||
|
"from langchain_community.chat_models import ChatOpenAI\n",
|
||||||
|
"from langchain_community.embeddings import OpenAIEmbeddings\n",
|
||||||
|
"from langchain_core.output_parsers import StrOutputParser\n",
|
||||||
|
"from langchain_core.runnables import RunnableLambda, RunnablePassthrough"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 2,
|
||||||
|
"id": "139228c2",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"vectorstore = FAISS.from_texts(\n",
|
||||||
|
" [\"harrison worked at kensho\"], embedding=OpenAIEmbeddings()\n",
|
||||||
|
")\n",
|
||||||
|
"retriever = vectorstore.as_retriever()\n",
|
||||||
|
"\n",
|
||||||
|
"template = \"\"\"Answer the question based only on the following context:\n",
|
||||||
|
"{context}\n",
|
||||||
|
"\n",
|
||||||
|
"Question: {question}\n",
|
||||||
|
"\"\"\"\n",
|
||||||
|
"prompt = ChatPromptTemplate.from_template(template)\n",
|
||||||
|
"\n",
|
||||||
|
"model = ChatOpenAI()"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 3,
|
||||||
|
"id": "70e3fe93",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"chain = (\n",
|
||||||
|
" {\"context\": retriever, \"question\": RunnablePassthrough()}\n",
|
||||||
|
" | prompt\n",
|
||||||
|
" | model\n",
|
||||||
|
" | StrOutputParser()\n",
|
||||||
|
")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "849e3c42",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Get a graph\n",
|
||||||
|
"\n",
|
||||||
|
"You can get a graph of the runnable"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 4,
|
||||||
|
"id": "2448b6c2",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/plain": [
|
||||||
|
"Graph(nodes={'7308e6063c6d40818c5a0cc1cc7444f2': Node(id='7308e6063c6d40818c5a0cc1cc7444f2', data=<class 'pydantic.main.RunnableParallel<context,question>Input'>), '292bbd8021d44ec3a31fbe724d9002c1': Node(id='292bbd8021d44ec3a31fbe724d9002c1', data=<class 'pydantic.main.RunnableParallel<context,question>Output'>), '9212f219cf05488f95229c56ea02b192': Node(id='9212f219cf05488f95229c56ea02b192', data=VectorStoreRetriever(tags=['FAISS', 'OpenAIEmbeddings'], vectorstore=<langchain_community.vectorstores.faiss.FAISS object at 0x117334f70>)), 'c7a8e65fa5cf44b99dbe7d1d6e36886f': Node(id='c7a8e65fa5cf44b99dbe7d1d6e36886f', data=RunnablePassthrough()), '818b9bfd40a341008373d5b9f9d0784b': Node(id='818b9bfd40a341008373d5b9f9d0784b', data=ChatPromptTemplate(input_variables=['context', 'question'], messages=[HumanMessagePromptTemplate(prompt=PromptTemplate(input_variables=['context', 'question'], template='Answer the question based only on the following context:\\n{context}\\n\\nQuestion: {question}\\n'))])), 'b9f1d3ddfa6b4334a16ea439df22b11e': Node(id='b9f1d3ddfa6b4334a16ea439df22b11e', data=ChatOpenAI(client=<class 'openai.api_resources.chat_completion.ChatCompletion'>, openai_api_key='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', openai_proxy='')), '2bf84f6355c44731848345ca7d0f8ab9': Node(id='2bf84f6355c44731848345ca7d0f8ab9', data=StrOutputParser()), '1aeb2da5da5a43bb8771d3f338a473a2': Node(id='1aeb2da5da5a43bb8771d3f338a473a2', data=<class 'pydantic.main.StrOutputParserOutput'>)}, edges=[Edge(source='7308e6063c6d40818c5a0cc1cc7444f2', target='9212f219cf05488f95229c56ea02b192'), Edge(source='9212f219cf05488f95229c56ea02b192', target='292bbd8021d44ec3a31fbe724d9002c1'), Edge(source='7308e6063c6d40818c5a0cc1cc7444f2', target='c7a8e65fa5cf44b99dbe7d1d6e36886f'), Edge(source='c7a8e65fa5cf44b99dbe7d1d6e36886f', target='292bbd8021d44ec3a31fbe724d9002c1'), Edge(source='292bbd8021d44ec3a31fbe724d9002c1', target='818b9bfd40a341008373d5b9f9d0784b'), Edge(source='818b9bfd40a341008373d5b9f9d0784b', target='b9f1d3ddfa6b4334a16ea439df22b11e'), Edge(source='2bf84f6355c44731848345ca7d0f8ab9', target='1aeb2da5da5a43bb8771d3f338a473a2'), Edge(source='b9f1d3ddfa6b4334a16ea439df22b11e', target='2bf84f6355c44731848345ca7d0f8ab9')])"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"execution_count": 4,
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "execute_result"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"chain.get_graph()"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "065b02fb",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Print a graph\n",
|
||||||
|
"\n",
|
||||||
|
"While that is not super legible, you can print it to get a display that's easier to understand"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 5,
|
||||||
|
"id": "d5ab1515",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "stdout",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
" +---------------------------------+ \n",
|
||||||
|
" | Parallel<context,question>Input | \n",
|
||||||
|
" +---------------------------------+ \n",
|
||||||
|
" ** ** \n",
|
||||||
|
" *** *** \n",
|
||||||
|
" ** ** \n",
|
||||||
|
"+----------------------+ +-------------+ \n",
|
||||||
|
"| VectorStoreRetriever | | Passthrough | \n",
|
||||||
|
"+----------------------+ +-------------+ \n",
|
||||||
|
" ** ** \n",
|
||||||
|
" *** *** \n",
|
||||||
|
" ** ** \n",
|
||||||
|
" +----------------------------------+ \n",
|
||||||
|
" | Parallel<context,question>Output | \n",
|
||||||
|
" +----------------------------------+ \n",
|
||||||
|
" * \n",
|
||||||
|
" * \n",
|
||||||
|
" * \n",
|
||||||
|
" +--------------------+ \n",
|
||||||
|
" | ChatPromptTemplate | \n",
|
||||||
|
" +--------------------+ \n",
|
||||||
|
" * \n",
|
||||||
|
" * \n",
|
||||||
|
" * \n",
|
||||||
|
" +------------+ \n",
|
||||||
|
" | ChatOpenAI | \n",
|
||||||
|
" +------------+ \n",
|
||||||
|
" * \n",
|
||||||
|
" * \n",
|
||||||
|
" * \n",
|
||||||
|
" +-----------------+ \n",
|
||||||
|
" | StrOutputParser | \n",
|
||||||
|
" +-----------------+ \n",
|
||||||
|
" * \n",
|
||||||
|
" * \n",
|
||||||
|
" * \n",
|
||||||
|
" +-----------------------+ \n",
|
||||||
|
" | StrOutputParserOutput | \n",
|
||||||
|
" +-----------------------+ \n"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"chain.get_graph().print_ascii()"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "2babf851",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Get the prompts\n",
|
||||||
|
"\n",
|
||||||
|
"An important part of every chain is the prompts that are used. You can get the graphs present in the chain:"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 6,
|
||||||
|
"id": "34b2118d",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/plain": [
|
||||||
|
"[ChatPromptTemplate(input_variables=['context', 'question'], messages=[HumanMessagePromptTemplate(prompt=PromptTemplate(input_variables=['context', 'question'], template='Answer the question based only on the following context:\\n{context}\\n\\nQuestion: {question}\\n'))])]"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"execution_count": 6,
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "execute_result"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"chain.get_prompts()"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "ed965769",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": []
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"kernelspec": {
|
||||||
|
"display_name": "Python 3 (ipykernel)",
|
||||||
|
"language": "python",
|
||||||
|
"name": "python3"
|
||||||
|
},
|
||||||
|
"language_info": {
|
||||||
|
"codemirror_mode": {
|
||||||
|
"name": "ipython",
|
||||||
|
"version": 3
|
||||||
|
},
|
||||||
|
"file_extension": ".py",
|
||||||
|
"mimetype": "text/x-python",
|
||||||
|
"name": "python",
|
||||||
|
"nbconvert_exporter": "python",
|
||||||
|
"pygments_lexer": "ipython3",
|
||||||
|
"version": "3.10.1"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nbformat": 4,
|
||||||
|
"nbformat_minor": 5
|
||||||
|
}
|
Loading…
Reference in New Issue