mirror of https://github.com/hwchase17/langchain
Merge branch 'master' into deepsense/text-to-speech
commit
69fe0621d4
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,119 @@
|
|||||||
|
{
|
||||||
|
"cells": [
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "f09fd305",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"# Code writing\n",
|
||||||
|
"\n",
|
||||||
|
"Example of how to use LCEL to write Python code."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 11,
|
||||||
|
"id": "bd7c259a",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from langchain.chat_models import ChatOpenAI\n",
|
||||||
|
"from langchain.prompts import ChatPromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate\n",
|
||||||
|
"from langchain.schema.output_parser import StrOutputParser\n",
|
||||||
|
"from langchain.utilities import PythonREPL"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 12,
|
||||||
|
"id": "73795d2d",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"template = \"\"\"Write some python code to solve the user's problem. \n",
|
||||||
|
"\n",
|
||||||
|
"Return only python code in Markdown format, e.g.:\n",
|
||||||
|
"\n",
|
||||||
|
"```python\n",
|
||||||
|
"....\n",
|
||||||
|
"```\"\"\"\n",
|
||||||
|
"prompt = ChatPromptTemplate.from_messages(\n",
|
||||||
|
" [(\"system\", template), (\"human\", \"{input}\")]\n",
|
||||||
|
")\n",
|
||||||
|
"\n",
|
||||||
|
"model = ChatOpenAI()"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 13,
|
||||||
|
"id": "42859e8a",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"def _sanitize_output(text: str):\n",
|
||||||
|
" _, after = text.split(\"```python\")\n",
|
||||||
|
" return after.split(\"```\")[0]"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 14,
|
||||||
|
"id": "5ded1a86",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"chain = prompt | model | StrOutputParser() | _sanitize_output | PythonREPL().run"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 15,
|
||||||
|
"id": "208c2b75",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "stderr",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"Python REPL can execute arbitrary code. Use with caution.\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/plain": [
|
||||||
|
"'4\\n'"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"execution_count": 15,
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "execute_result"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"chain.invoke({\"input\": \"whats 2 plus 2\"})"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"kernelspec": {
|
||||||
|
"display_name": "Python 3 (ipykernel)",
|
||||||
|
"language": "python",
|
||||||
|
"name": "python3"
|
||||||
|
},
|
||||||
|
"language_info": {
|
||||||
|
"codemirror_mode": {
|
||||||
|
"name": "ipython",
|
||||||
|
"version": 3
|
||||||
|
},
|
||||||
|
"file_extension": ".py",
|
||||||
|
"mimetype": "text/x-python",
|
||||||
|
"name": "python",
|
||||||
|
"nbconvert_exporter": "python",
|
||||||
|
"pygments_lexer": "ipython3",
|
||||||
|
"version": "3.9.1"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nbformat": 4,
|
||||||
|
"nbformat_minor": 5
|
||||||
|
}
|
@ -0,0 +1,11 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 2
|
||||||
|
---
|
||||||
|
|
||||||
|
# Cookbook
|
||||||
|
|
||||||
|
import DocCardList from "@theme/DocCardList";
|
||||||
|
|
||||||
|
Example code for accomplishing common tasks with the LangChain Expression Language (LCEL). These examples show how to compose different Runnable (the core LCEL interface) components to achieve various tasks. If you're just getting acquainted with LCEL, the [Prompt + LLM](/docs/expression_language/cookbook/prompt_llm_parser) page is a good place to start.
|
||||||
|
|
||||||
|
<DocCardList />
|
@ -0,0 +1,180 @@
|
|||||||
|
{
|
||||||
|
"cells": [
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "5062941a",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"# Adding memory\n",
|
||||||
|
"\n",
|
||||||
|
"This shows how to add memory to an arbitrary chain. Right now, you can use the memory classes but need to hook it up manually"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 1,
|
||||||
|
"id": "7998efd8",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from langchain.chat_models import ChatOpenAI\n",
|
||||||
|
"from langchain.memory import ConversationBufferMemory\n",
|
||||||
|
"from langchain.schema.runnable import RunnableMap\n",
|
||||||
|
"from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder\n",
|
||||||
|
"\n",
|
||||||
|
"model = ChatOpenAI()\n",
|
||||||
|
"prompt = ChatPromptTemplate.from_messages([\n",
|
||||||
|
" (\"system\", \"You are a helpful chatbot\"),\n",
|
||||||
|
" MessagesPlaceholder(variable_name=\"history\"),\n",
|
||||||
|
" (\"human\", \"{input}\")\n",
|
||||||
|
"])"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 2,
|
||||||
|
"id": "fa0087f3",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"memory = ConversationBufferMemory(return_messages=True)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 3,
|
||||||
|
"id": "06b531ae",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/plain": [
|
||||||
|
"{'history': []}"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"execution_count": 3,
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "execute_result"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"memory.load_memory_variables({})"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 4,
|
||||||
|
"id": "d9437af6",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"chain = RunnableMap({\n",
|
||||||
|
" \"input\": lambda x: x[\"input\"],\n",
|
||||||
|
" \"memory\": memory.load_memory_variables\n",
|
||||||
|
"}) | {\n",
|
||||||
|
" \"input\": lambda x: x[\"input\"],\n",
|
||||||
|
" \"history\": lambda x: x[\"memory\"][\"history\"]\n",
|
||||||
|
"} | prompt | model"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 5,
|
||||||
|
"id": "bed1e260",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/plain": [
|
||||||
|
"AIMessage(content='Hello Bob! How can I assist you today?', additional_kwargs={}, example=False)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"execution_count": 5,
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "execute_result"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"inputs = {\"input\": \"hi im bob\"}\n",
|
||||||
|
"response = chain.invoke(inputs)\n",
|
||||||
|
"response"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 6,
|
||||||
|
"id": "890475b4",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"memory.save_context(inputs, {\"output\": response.content})"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 7,
|
||||||
|
"id": "e8fcb77f",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/plain": [
|
||||||
|
"{'history': [HumanMessage(content='hi im bob', additional_kwargs={}, example=False),\n",
|
||||||
|
" AIMessage(content='Hello Bob! How can I assist you today?', additional_kwargs={}, example=False)]}"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"execution_count": 7,
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "execute_result"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"memory.load_memory_variables({})"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 8,
|
||||||
|
"id": "d837d5c3",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/plain": [
|
||||||
|
"AIMessage(content='Your name is Bob.', additional_kwargs={}, example=False)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"execution_count": 8,
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "execute_result"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"inputs = {\"input\": \"whats my name\"}\n",
|
||||||
|
"response = chain.invoke(inputs)\n",
|
||||||
|
"response"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"kernelspec": {
|
||||||
|
"display_name": "Python 3 (ipykernel)",
|
||||||
|
"language": "python",
|
||||||
|
"name": "python3"
|
||||||
|
},
|
||||||
|
"language_info": {
|
||||||
|
"codemirror_mode": {
|
||||||
|
"name": "ipython",
|
||||||
|
"version": 3
|
||||||
|
},
|
||||||
|
"file_extension": ".py",
|
||||||
|
"mimetype": "text/x-python",
|
||||||
|
"name": "python",
|
||||||
|
"nbconvert_exporter": "python",
|
||||||
|
"pygments_lexer": "ipython3",
|
||||||
|
"version": "3.9.1"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nbformat": 4,
|
||||||
|
"nbformat_minor": 5
|
||||||
|
}
|
@ -0,0 +1,133 @@
|
|||||||
|
{
|
||||||
|
"cells": [
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "4927a727-b4c8-453c-8c83-bd87b4fcac14",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"# Adding moderation\n",
|
||||||
|
"\n",
|
||||||
|
"This shows how to add in moderation (or other safeguards) around your LLM application."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 20,
|
||||||
|
"id": "4f5f6449-940a-4f5c-97c0-39b71c3e2a68",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from langchain.chains import OpenAIModerationChain\n",
|
||||||
|
"from langchain.llms import OpenAI\n",
|
||||||
|
"from langchain.prompts import ChatPromptTemplate"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 6,
|
||||||
|
"id": "fcb8312b-7e7a-424f-a3ec-76738c9a9d21",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"moderate = OpenAIModerationChain()"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 21,
|
||||||
|
"id": "b24b9148-f6b0-4091-8ea8-d3fb281bd950",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"model = OpenAI()\n",
|
||||||
|
"prompt = ChatPromptTemplate.from_messages([\n",
|
||||||
|
" (\"system\", \"repeat after me: {input}\")\n",
|
||||||
|
"])"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 22,
|
||||||
|
"id": "1c8ed87c-9ca6-4559-bf60-d40e94a0af08",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"chain = prompt | model"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 23,
|
||||||
|
"id": "5256b9bd-381a-42b0-bfa8-7e6d18f853cb",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/plain": [
|
||||||
|
"'\\n\\nYou are stupid.'"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"execution_count": 23,
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "execute_result"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"chain.invoke({\"input\": \"you are stupid\"})"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 24,
|
||||||
|
"id": "fe6e3b33-dc9a-49d5-b194-ba750c58a628",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"moderated_chain = chain | moderate"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 25,
|
||||||
|
"id": "d8ba0cbd-c739-4d23-be9f-6ae092bd5ffb",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/plain": [
|
||||||
|
"{'input': '\\n\\nYou are stupid',\n",
|
||||||
|
" 'output': \"Text was found that violates OpenAI's content policy.\"}"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"execution_count": 25,
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "execute_result"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"moderated_chain.invoke({\"input\": \"you are stupid\"})"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"kernelspec": {
|
||||||
|
"display_name": "Python 3 (ipykernel)",
|
||||||
|
"language": "python",
|
||||||
|
"name": "python3"
|
||||||
|
},
|
||||||
|
"language_info": {
|
||||||
|
"codemirror_mode": {
|
||||||
|
"name": "ipython",
|
||||||
|
"version": 3
|
||||||
|
},
|
||||||
|
"file_extension": ".py",
|
||||||
|
"mimetype": "text/x-python",
|
||||||
|
"name": "python",
|
||||||
|
"nbconvert_exporter": "python",
|
||||||
|
"pygments_lexer": "ipython3",
|
||||||
|
"version": "3.9.1"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nbformat": 4,
|
||||||
|
"nbformat_minor": 5
|
||||||
|
}
|
@ -0,0 +1,240 @@
|
|||||||
|
{
|
||||||
|
"cells": [
|
||||||
|
{
|
||||||
|
"cell_type": "raw",
|
||||||
|
"id": "877102d1-02ea-4fa3-8ec7-a08e242b95b3",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"---\n",
|
||||||
|
"sidebar_position: 2\n",
|
||||||
|
"title: Multiple chains\n",
|
||||||
|
"---"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "0f2bf8d3",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"Runnables can easily be used to string together multiple Chains"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 4,
|
||||||
|
"id": "d65d4e9e",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/plain": [
|
||||||
|
"'El país donde se encuentra la ciudad de Honolulu, donde nació Barack Obama, el 44º Presidente de los Estados Unidos, es Estados Unidos. Honolulu se encuentra en la isla de Oahu, en el estado de Hawái.'"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"execution_count": 4,
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "execute_result"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"from operator import itemgetter\n",
|
||||||
|
"\n",
|
||||||
|
"from langchain.chat_models import ChatOpenAI\n",
|
||||||
|
"from langchain.prompts import ChatPromptTemplate\n",
|
||||||
|
"from langchain.schema import StrOutputParser\n",
|
||||||
|
"\n",
|
||||||
|
"prompt1 = ChatPromptTemplate.from_template(\"what is the city {person} is from?\")\n",
|
||||||
|
"prompt2 = ChatPromptTemplate.from_template(\"what country is the city {city} in? respond in {language}\")\n",
|
||||||
|
"\n",
|
||||||
|
"model = ChatOpenAI()\n",
|
||||||
|
"\n",
|
||||||
|
"chain1 = prompt1 | model | StrOutputParser()\n",
|
||||||
|
"\n",
|
||||||
|
"chain2 = {\"city\": chain1, \"language\": itemgetter(\"language\")} | prompt2 | model | StrOutputParser()\n",
|
||||||
|
"\n",
|
||||||
|
"chain2.invoke({\"person\": \"obama\", \"language\": \"spanish\"})"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 8,
|
||||||
|
"id": "878f8176",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from langchain.schema.runnable import RunnableMap, RunnablePassthrough\n",
|
||||||
|
"\n",
|
||||||
|
"prompt1 = ChatPromptTemplate.from_template(\"generate a {attribute} color. Return the name of the color and nothing else:\")\n",
|
||||||
|
"prompt2 = ChatPromptTemplate.from_template(\"what is a fruit of color: {color}. Return the name of the fruit and nothing else:\")\n",
|
||||||
|
"prompt3 = ChatPromptTemplate.from_template(\"what is a country with a flag that has the color: {color}. Return the name of the country and nothing else:\")\n",
|
||||||
|
"prompt4 = ChatPromptTemplate.from_template(\"What is the color of {fruit} and the flag of {country}?\")\n",
|
||||||
|
"\n",
|
||||||
|
"model_parser = model | StrOutputParser()\n",
|
||||||
|
"\n",
|
||||||
|
"color_generator = {\"attribute\": RunnablePassthrough()} | prompt1 | {\"color\": model_parser}\n",
|
||||||
|
"color_to_fruit = prompt2 | model_parser\n",
|
||||||
|
"color_to_country = prompt3 | model_parser\n",
|
||||||
|
"question_generator = color_generator | {\"fruit\": color_to_fruit, \"country\": color_to_country} | prompt4"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 9,
|
||||||
|
"id": "d621a870",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/plain": [
|
||||||
|
"ChatPromptValue(messages=[HumanMessage(content='What is the color of strawberry and the flag of China?', additional_kwargs={}, example=False)])"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"execution_count": 9,
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "execute_result"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"question_generator.invoke({\"warm\"})"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 10,
|
||||||
|
"id": "b4a9812b-bead-4fd9-ae27-0b8be57e5dc1",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/plain": [
|
||||||
|
"AIMessage(content='The color of an apple is typically red or green. The flag of China is predominantly red with a large yellow star in the upper left corner and four smaller yellow stars surrounding it.', additional_kwargs={}, example=False)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"execution_count": 10,
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "execute_result"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"prompt = question_generator.invoke({\"warm\"})\n",
|
||||||
|
"model.invoke(prompt)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "6d75a313-f1c8-4e94-9a17-24e0bf4a2bdc",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Branching and Merging\n",
|
||||||
|
"\n",
|
||||||
|
"You may want the output of one component to be processed by 2 or more other components. [RunnableMaps](https://api.python.langchain.com/en/latest/schema/langchain.schema.runnable.base.RunnableMap.html) let you split or fork the chain so multiple components can process the input in parallel. Later, other components can join or merge the results to synthesize a final response. This type of chain creates a computation graph that looks like the following:\n",
|
||||||
|
"\n",
|
||||||
|
"```text\n",
|
||||||
|
" Input\n",
|
||||||
|
" / \\\n",
|
||||||
|
" / \\\n",
|
||||||
|
" Branch1 Branch2\n",
|
||||||
|
" \\ /\n",
|
||||||
|
" \\ /\n",
|
||||||
|
" Combine\n",
|
||||||
|
"```"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 11,
|
||||||
|
"id": "247fa0bd-4596-4063-8cb3-1d7fc119d982",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"planner = (\n",
|
||||||
|
" ChatPromptTemplate.from_template(\n",
|
||||||
|
" \"Generate an argument about: {input}\"\n",
|
||||||
|
" )\n",
|
||||||
|
" | ChatOpenAI()\n",
|
||||||
|
" | StrOutputParser()\n",
|
||||||
|
" | {\"base_response\": RunnablePassthrough()}\n",
|
||||||
|
")\n",
|
||||||
|
"\n",
|
||||||
|
"arguments_for = (\n",
|
||||||
|
" ChatPromptTemplate.from_template(\n",
|
||||||
|
" \"List the pros or positive aspects of {base_response}\"\n",
|
||||||
|
" )\n",
|
||||||
|
" | ChatOpenAI()\n",
|
||||||
|
" | StrOutputParser()\n",
|
||||||
|
")\n",
|
||||||
|
"arguments_against = (\n",
|
||||||
|
" ChatPromptTemplate.from_template(\n",
|
||||||
|
" \"List the cons or negative aspects of {base_response}\"\n",
|
||||||
|
" )\n",
|
||||||
|
" | ChatOpenAI()\n",
|
||||||
|
" | StrOutputParser()\n",
|
||||||
|
")\n",
|
||||||
|
"\n",
|
||||||
|
"final_responder = (\n",
|
||||||
|
" ChatPromptTemplate.from_messages(\n",
|
||||||
|
" [\n",
|
||||||
|
" (\"ai\", \"{original_response}\"),\n",
|
||||||
|
" (\"human\", \"Pros:\\n{results_1}\\n\\nCons:\\n{results_2}\"),\n",
|
||||||
|
" (\"system\", \"Generate a final response given the critique\"),\n",
|
||||||
|
" ]\n",
|
||||||
|
" )\n",
|
||||||
|
" | ChatOpenAI()\n",
|
||||||
|
" | StrOutputParser()\n",
|
||||||
|
")\n",
|
||||||
|
"\n",
|
||||||
|
"chain = (\n",
|
||||||
|
" planner \n",
|
||||||
|
" | {\n",
|
||||||
|
" \"results_1\": arguments_for,\n",
|
||||||
|
" \"results_2\": arguments_against,\n",
|
||||||
|
" \"original_response\": itemgetter(\"base_response\"),\n",
|
||||||
|
" }\n",
|
||||||
|
" | final_responder\n",
|
||||||
|
")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 12,
|
||||||
|
"id": "2564f310-0674-4bb1-9c4e-d7848ca73511",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/plain": [
|
||||||
|
"'While Scrum has its potential cons and challenges, many organizations have successfully embraced and implemented this project management framework to great effect. The cons mentioned above can be mitigated or overcome with proper training, support, and a commitment to continuous improvement. It is also important to note that not all cons may be applicable to every organization or project.\\n\\nFor example, while Scrum may be complex initially, with proper training and guidance, teams can quickly grasp the concepts and practices. The lack of predictability can be mitigated by implementing techniques such as velocity tracking and release planning. The limited documentation can be addressed by maintaining a balance between lightweight documentation and clear communication among team members. The dependency on team collaboration can be improved through effective communication channels and regular team-building activities.\\n\\nScrum can be scaled and adapted to larger projects by using frameworks like Scrum of Scrums or LeSS (Large Scale Scrum). Concerns about speed versus quality can be addressed by incorporating quality assurance practices, such as continuous integration and automated testing, into the Scrum process. Scope creep can be managed by having a well-defined and prioritized product backlog, and a strong product owner can be developed through training and mentorship.\\n\\nResistance to change can be overcome by providing proper education and communication to stakeholders and involving them in the decision-making process. Ultimately, the cons of Scrum can be seen as opportunities for growth and improvement, and with the right mindset and support, they can be effectively managed.\\n\\nIn conclusion, while Scrum may have its challenges and potential cons, the benefits and advantages it offers in terms of collaboration, flexibility, adaptability, transparency, and customer satisfaction make it a widely adopted and successful project management framework. With proper implementation and continuous improvement, organizations can leverage Scrum to drive innovation, efficiency, and project success.'"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"execution_count": 12,
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "execute_result"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"chain.invoke({\"input\": \"scrum\"})"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"kernelspec": {
|
||||||
|
"display_name": "poetry-venv",
|
||||||
|
"language": "python",
|
||||||
|
"name": "poetry-venv"
|
||||||
|
},
|
||||||
|
"language_info": {
|
||||||
|
"codemirror_mode": {
|
||||||
|
"name": "ipython",
|
||||||
|
"version": 3
|
||||||
|
},
|
||||||
|
"file_extension": ".py",
|
||||||
|
"mimetype": "text/x-python",
|
||||||
|
"name": "python",
|
||||||
|
"nbconvert_exporter": "python",
|
||||||
|
"pygments_lexer": "ipython3",
|
||||||
|
"version": "3.9.1"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nbformat": 4,
|
||||||
|
"nbformat_minor": 5
|
||||||
|
}
|
@ -0,0 +1,431 @@
|
|||||||
|
{
|
||||||
|
"cells": [
|
||||||
|
{
|
||||||
|
"cell_type": "raw",
|
||||||
|
"id": "abf7263d-3a62-4016-b5d5-b157f92f2070",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"---\n",
|
||||||
|
"sidebar_position: 0\n",
|
||||||
|
"title: Prompt + LLM\n",
|
||||||
|
"---"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "9a434f2b-9405-468c-9dfd-254d456b57a6",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"The most common and valuable composition is taking:\n",
|
||||||
|
"\n",
|
||||||
|
"``PromptTemplate`` / ``ChatPromptTemplate`` -> ``LLM`` / ``ChatModel`` -> ``OutputParser``\n",
|
||||||
|
"\n",
|
||||||
|
"Almost any other chains you build will use this building block."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "93aa2c87",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## PromptTemplate + LLM\n",
|
||||||
|
"\n",
|
||||||
|
"The simplest composition is just combing a prompt and model to create a chain that takes user input, adds it to a prompt, passes it to a model, and returns the raw model input.\n",
|
||||||
|
"\n",
|
||||||
|
"Note, you can mix and match PromptTemplate/ChatPromptTemplates and LLMs/ChatModels as you like here."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 1,
|
||||||
|
"id": "466b65b3",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from langchain.prompts import ChatPromptTemplate\n",
|
||||||
|
"from langchain.chat_models import ChatOpenAI\n",
|
||||||
|
"\n",
|
||||||
|
"prompt = ChatPromptTemplate.from_template(\"tell me a joke about {foo}\")\n",
|
||||||
|
"model = ChatOpenAI()\n",
|
||||||
|
"chain = prompt | model"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 2,
|
||||||
|
"id": "e3d0a6cd",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/plain": [
|
||||||
|
"AIMessage(content=\"Why don't bears wear shoes?\\n\\nBecause they have bear feet!\", additional_kwargs={}, example=False)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"execution_count": 2,
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "execute_result"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"chain.invoke({\"foo\": \"bears\"})"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "7eb9ef50",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"Often times we want to attach kwargs that'll be passed to each model call. Here's a few examples of that:"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "0b1d8f88",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Attaching Stop Sequences"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 3,
|
||||||
|
"id": "562a06bf",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"chain = prompt | model.bind(stop=[\"\\n\"])"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 4,
|
||||||
|
"id": "43f5d04c",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/plain": [
|
||||||
|
"AIMessage(content='Why did the bear never wear shoes?', additional_kwargs={}, example=False)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"execution_count": 4,
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "execute_result"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"chain.invoke({\"foo\": \"bears\"})"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "f3eaf88a",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Attaching Function Call information"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 5,
|
||||||
|
"id": "f94b71b2",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"functions = [\n",
|
||||||
|
" {\n",
|
||||||
|
" \"name\": \"joke\",\n",
|
||||||
|
" \"description\": \"A joke\",\n",
|
||||||
|
" \"parameters\": {\n",
|
||||||
|
" \"type\": \"object\",\n",
|
||||||
|
" \"properties\": {\n",
|
||||||
|
" \"setup\": {\n",
|
||||||
|
" \"type\": \"string\",\n",
|
||||||
|
" \"description\": \"The setup for the joke\"\n",
|
||||||
|
" },\n",
|
||||||
|
" \"punchline\": {\n",
|
||||||
|
" \"type\": \"string\",\n",
|
||||||
|
" \"description\": \"The punchline for the joke\"\n",
|
||||||
|
" }\n",
|
||||||
|
" },\n",
|
||||||
|
" \"required\": [\"setup\", \"punchline\"]\n",
|
||||||
|
" }\n",
|
||||||
|
" }\n",
|
||||||
|
" ]\n",
|
||||||
|
"chain = prompt | model.bind(function_call= {\"name\": \"joke\"}, functions= functions)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 6,
|
||||||
|
"id": "decf7710",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/plain": [
|
||||||
|
"AIMessage(content='', additional_kwargs={'function_call': {'name': 'joke', 'arguments': '{\\n \"setup\": \"Why don\\'t bears wear shoes?\",\\n \"punchline\": \"Because they have bear feet!\"\\n}'}}, example=False)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"execution_count": 6,
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "execute_result"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"chain.invoke({\"foo\": \"bears\"}, config={})"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "9098c5ed",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## PromptTemplate + LLM + OutputParser\n",
|
||||||
|
"\n",
|
||||||
|
"We can also add in an output parser to easily trasform the raw LLM/ChatModel output into a more workable format"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 7,
|
||||||
|
"id": "cc194c78",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from langchain.schema.output_parser import StrOutputParser\n",
|
||||||
|
"\n",
|
||||||
|
"chain = prompt | model | StrOutputParser()"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "77acf448",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"Notice that this now returns a string - a much more workable format for downstream tasks"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 8,
|
||||||
|
"id": "e3d69a18",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/plain": [
|
||||||
|
"\"Why don't bears wear shoes?\\n\\nBecause they have bear feet!\""
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"execution_count": 8,
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "execute_result"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"chain.invoke({\"foo\": \"bears\"})"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "c01864e5",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Functions Output Parser\n",
|
||||||
|
"\n",
|
||||||
|
"When you specify the function to return, you may just want to parse that directly"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 9,
|
||||||
|
"id": "ad0dd88e",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from langchain.output_parsers.openai_functions import JsonOutputFunctionsParser\n",
|
||||||
|
"\n",
|
||||||
|
"chain = (\n",
|
||||||
|
" prompt \n",
|
||||||
|
" | model.bind(function_call= {\"name\": \"joke\"}, functions= functions) \n",
|
||||||
|
" | JsonOutputFunctionsParser()\n",
|
||||||
|
")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 10,
|
||||||
|
"id": "1e7aa8eb",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/plain": [
|
||||||
|
"{'setup': \"Why don't bears like fast food?\",\n",
|
||||||
|
" 'punchline': \"Because they can't catch it!\"}"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"execution_count": 10,
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "execute_result"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"chain.invoke({\"foo\": \"bears\"})"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 11,
|
||||||
|
"id": "d4aa1a01",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from langchain.output_parsers.openai_functions import JsonKeyOutputFunctionsParser\n",
|
||||||
|
"\n",
|
||||||
|
"chain = (\n",
|
||||||
|
" prompt \n",
|
||||||
|
" | model.bind(function_call= {\"name\": \"joke\"}, functions= functions) \n",
|
||||||
|
" | JsonKeyOutputFunctionsParser(key_name=\"setup\")\n",
|
||||||
|
")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 12,
|
||||||
|
"id": "8b6df9ba",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/plain": [
|
||||||
|
"\"Why don't bears wear shoes?\""
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"execution_count": 12,
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "execute_result"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"chain.invoke({\"foo\": \"bears\"})"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "023fbccb-ef7d-489e-a9ba-f98e17283d51",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Simplifying input\n",
|
||||||
|
"\n",
|
||||||
|
"To make invocation even simpler, we can add a `RunnableMap` to take care of creating the prompt input dict for us:"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 13,
|
||||||
|
"id": "9601c0f0-71f9-4bd4-a672-7bd04084b018",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from langchain.schema.runnable import RunnableMap, RunnablePassthrough\n",
|
||||||
|
"\n",
|
||||||
|
"map_ = RunnableMap({\"foo\": RunnablePassthrough()})\n",
|
||||||
|
"chain = (\n",
|
||||||
|
" map_ \n",
|
||||||
|
" | prompt\n",
|
||||||
|
" | model.bind(function_call= {\"name\": \"joke\"}, functions= functions) \n",
|
||||||
|
" | JsonKeyOutputFunctionsParser(key_name=\"setup\")\n",
|
||||||
|
")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 14,
|
||||||
|
"id": "7ec4f154-fda5-4847-9220-41aa902fdc33",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/plain": [
|
||||||
|
"\"Why don't bears wear shoes?\""
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"execution_count": 14,
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "execute_result"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"chain.invoke(\"bears\")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "def00bfe-0f83-4805-8c8f-8a53f99fa8ea",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"Since we're composing our map with another Runnable, we can even use some syntactic sugar and just use a dict:"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 21,
|
||||||
|
"id": "7bf3846a-02ee-41a3-ba1b-a708827d4f3a",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"chain = (\n",
|
||||||
|
" {\"foo\": RunnablePassthrough()} \n",
|
||||||
|
" | prompt\n",
|
||||||
|
" | model.bind(function_call= {\"name\": \"joke\"}, functions= functions) \n",
|
||||||
|
" | JsonKeyOutputFunctionsParser(key_name=\"setup\")\n",
|
||||||
|
")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 22,
|
||||||
|
"id": "e566d6a1-538d-4cb5-a210-a63e082e4c74",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/plain": [
|
||||||
|
"\"Why don't bears like fast food?\""
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"execution_count": 22,
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "execute_result"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"chain.invoke(\"bears\")"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"kernelspec": {
|
||||||
|
"display_name": "Python 3 (ipykernel)",
|
||||||
|
"language": "python",
|
||||||
|
"name": "python3"
|
||||||
|
},
|
||||||
|
"language_info": {
|
||||||
|
"codemirror_mode": {
|
||||||
|
"name": "ipython",
|
||||||
|
"version": 3
|
||||||
|
},
|
||||||
|
"file_extension": ".py",
|
||||||
|
"mimetype": "text/x-python",
|
||||||
|
"name": "python",
|
||||||
|
"nbconvert_exporter": "python",
|
||||||
|
"pygments_lexer": "ipython3",
|
||||||
|
"version": "3.9.1"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nbformat": 4,
|
||||||
|
"nbformat_minor": 5
|
||||||
|
}
|
@ -0,0 +1,461 @@
|
|||||||
|
{
|
||||||
|
"cells": [
|
||||||
|
{
|
||||||
|
"cell_type": "raw",
|
||||||
|
"id": "abe47592-909c-4844-bf44-9e55c2fb4bfa",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"---\n",
|
||||||
|
"sidebar_position: 1\n",
|
||||||
|
"title: RAG\n",
|
||||||
|
"---"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "91c5ef3d",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"Let's look at adding in a retrieval step to a prompt and LLM, which adds up to a \"retrieval-augmented generation\" chain"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "7f25d9e9-d192-42e9-af50-5660a4bfb0d9",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"!pip install langchain openai faiss-cpu"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 2,
|
||||||
|
"id": "33be32af",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from operator import itemgetter\n",
|
||||||
|
"\n",
|
||||||
|
"from langchain.prompts import ChatPromptTemplate\n",
|
||||||
|
"from langchain.chat_models import ChatOpenAI\n",
|
||||||
|
"from langchain.embeddings import OpenAIEmbeddings\n",
|
||||||
|
"from langchain.schema.output_parser import StrOutputParser\n",
|
||||||
|
"from langchain.schema.runnable import RunnablePassthrough\n",
|
||||||
|
"from langchain.vectorstores import FAISS"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 3,
|
||||||
|
"id": "bfc47ec1",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"vectorstore = FAISS.from_texts([\"harrison worked at kensho\"], embedding=OpenAIEmbeddings())\n",
|
||||||
|
"retriever = vectorstore.as_retriever()\n",
|
||||||
|
"\n",
|
||||||
|
"template = \"\"\"Answer the question based only on the following context:\n",
|
||||||
|
"{context}\n",
|
||||||
|
"\n",
|
||||||
|
"Question: {question}\n",
|
||||||
|
"\"\"\"\n",
|
||||||
|
"prompt = ChatPromptTemplate.from_template(template)\n",
|
||||||
|
"\n",
|
||||||
|
"model = ChatOpenAI()"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 4,
|
||||||
|
"id": "eae31755",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"chain = (\n",
|
||||||
|
" {\"context\": retriever, \"question\": RunnablePassthrough()} \n",
|
||||||
|
" | prompt \n",
|
||||||
|
" | model \n",
|
||||||
|
" | StrOutputParser()\n",
|
||||||
|
")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 5,
|
||||||
|
"id": "f3040b0c",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/plain": [
|
||||||
|
"'Harrison worked at Kensho.'"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"execution_count": 5,
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "execute_result"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"chain.invoke(\"where did harrison work?\")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 6,
|
||||||
|
"id": "e1d20c7c",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"template = \"\"\"Answer the question based only on the following context:\n",
|
||||||
|
"{context}\n",
|
||||||
|
"\n",
|
||||||
|
"Question: {question}\n",
|
||||||
|
"\n",
|
||||||
|
"Answer in the following language: {language}\n",
|
||||||
|
"\"\"\"\n",
|
||||||
|
"prompt = ChatPromptTemplate.from_template(template)\n",
|
||||||
|
"\n",
|
||||||
|
"chain = {\n",
|
||||||
|
" \"context\": itemgetter(\"question\") | retriever, \n",
|
||||||
|
" \"question\": itemgetter(\"question\"), \n",
|
||||||
|
" \"language\": itemgetter(\"language\")\n",
|
||||||
|
"} | prompt | model | StrOutputParser()"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 7,
|
||||||
|
"id": "7ee8b2d4",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/plain": [
|
||||||
|
"'Harrison ha lavorato a Kensho.'"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"execution_count": 7,
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "execute_result"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"chain.invoke({\"question\": \"where did harrison work\", \"language\": \"italian\"})"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "f007669c",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Conversational Retrieval Chain\n",
|
||||||
|
"\n",
|
||||||
|
"We can easily add in conversation history. This primarily means adding in chat_message_history"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 8,
|
||||||
|
"id": "3f30c348",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from langchain.schema.runnable import RunnableMap\n",
|
||||||
|
"from langchain.schema import format_document"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 9,
|
||||||
|
"id": "64ab1dbf",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from langchain.prompts.prompt import PromptTemplate\n",
|
||||||
|
"\n",
|
||||||
|
"_template = \"\"\"Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question, in its original language.\n",
|
||||||
|
"\n",
|
||||||
|
"Chat History:\n",
|
||||||
|
"{chat_history}\n",
|
||||||
|
"Follow Up Input: {question}\n",
|
||||||
|
"Standalone question:\"\"\"\n",
|
||||||
|
"CONDENSE_QUESTION_PROMPT = PromptTemplate.from_template(_template)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 10,
|
||||||
|
"id": "7d628c97",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"template = \"\"\"Answer the question based only on the following context:\n",
|
||||||
|
"{context}\n",
|
||||||
|
"\n",
|
||||||
|
"Question: {question}\n",
|
||||||
|
"\"\"\"\n",
|
||||||
|
"ANSWER_PROMPT = ChatPromptTemplate.from_template(template)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 11,
|
||||||
|
"id": "f60a5d0f",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"DEFAULT_DOCUMENT_PROMPT = PromptTemplate.from_template(template=\"{page_content}\")\n",
|
||||||
|
"def _combine_documents(docs, document_prompt = DEFAULT_DOCUMENT_PROMPT, document_separator=\"\\n\\n\"):\n",
|
||||||
|
" doc_strings = [format_document(doc, document_prompt) for doc in docs]\n",
|
||||||
|
" return document_separator.join(doc_strings)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 12,
|
||||||
|
"id": "7d007db6",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from typing import Tuple, List\n",
|
||||||
|
"def _format_chat_history(chat_history: List[Tuple]) -> str:\n",
|
||||||
|
" buffer = \"\"\n",
|
||||||
|
" for dialogue_turn in chat_history:\n",
|
||||||
|
" human = \"Human: \" + dialogue_turn[0]\n",
|
||||||
|
" ai = \"Assistant: \" + dialogue_turn[1]\n",
|
||||||
|
" buffer += \"\\n\" + \"\\n\".join([human, ai])\n",
|
||||||
|
" return buffer"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 13,
|
||||||
|
"id": "5c32cc89",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"_inputs = RunnableMap(\n",
|
||||||
|
" {\n",
|
||||||
|
" \"standalone_question\": {\n",
|
||||||
|
" \"question\": lambda x: x[\"question\"],\n",
|
||||||
|
" \"chat_history\": lambda x: _format_chat_history(x['chat_history'])\n",
|
||||||
|
" } | CONDENSE_QUESTION_PROMPT | ChatOpenAI(temperature=0) | StrOutputParser(),\n",
|
||||||
|
" }\n",
|
||||||
|
")\n",
|
||||||
|
"_context = {\n",
|
||||||
|
" \"context\": itemgetter(\"standalone_question\") | retriever | _combine_documents,\n",
|
||||||
|
" \"question\": lambda x: x[\"standalone_question\"]\n",
|
||||||
|
"}\n",
|
||||||
|
"conversational_qa_chain = _inputs | _context | ANSWER_PROMPT | ChatOpenAI()"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 14,
|
||||||
|
"id": "135c8205",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/plain": [
|
||||||
|
"AIMessage(content='Harrison was employed at Kensho.', additional_kwargs={}, example=False)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"execution_count": 14,
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "execute_result"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"conversational_qa_chain.invoke({\n",
|
||||||
|
" \"question\": \"where did harrison work?\",\n",
|
||||||
|
" \"chat_history\": [],\n",
|
||||||
|
"})"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 15,
|
||||||
|
"id": "424e7e7a",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/plain": [
|
||||||
|
"AIMessage(content='Harrison worked at Kensho.', additional_kwargs={}, example=False)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"execution_count": 15,
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "execute_result"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"conversational_qa_chain.invoke({\n",
|
||||||
|
" \"question\": \"where did he work?\",\n",
|
||||||
|
" \"chat_history\": [(\"Who wrote this notebook?\", \"Harrison\")],\n",
|
||||||
|
"})"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "c5543183",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### With Memory and returning source documents\n",
|
||||||
|
"\n",
|
||||||
|
"This shows how to use memory with the above. For memory, we need to manage that outside at the memory. For returning the retrieved documents, we just need to pass them through all the way."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 16,
|
||||||
|
"id": "e31dd17c",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from langchain.memory import ConversationBufferMemory"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 17,
|
||||||
|
"id": "d4bffe94",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"memory = ConversationBufferMemory(return_messages=True, output_key=\"answer\", input_key=\"question\")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 18,
|
||||||
|
"id": "733be985",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# First we add a step to load memory\n",
|
||||||
|
"# This needs to be a RunnableMap because its the first input\n",
|
||||||
|
"loaded_memory = RunnableMap(\n",
|
||||||
|
" {\n",
|
||||||
|
" \"question\": itemgetter(\"question\"),\n",
|
||||||
|
" \"memory\": memory.load_memory_variables,\n",
|
||||||
|
" }\n",
|
||||||
|
")\n",
|
||||||
|
"# Next we add a step to expand memory into the variables\n",
|
||||||
|
"expanded_memory = {\n",
|
||||||
|
" \"question\": itemgetter(\"question\"),\n",
|
||||||
|
" \"chat_history\": lambda x: x[\"memory\"][\"history\"]\n",
|
||||||
|
"}\n",
|
||||||
|
"\n",
|
||||||
|
"# Now we calculate the standalone question\n",
|
||||||
|
"standalone_question = {\n",
|
||||||
|
" \"standalone_question\": {\n",
|
||||||
|
" \"question\": lambda x: x[\"question\"],\n",
|
||||||
|
" \"chat_history\": lambda x: _format_chat_history(x['chat_history'])\n",
|
||||||
|
" } | CONDENSE_QUESTION_PROMPT | ChatOpenAI(temperature=0) | StrOutputParser(),\n",
|
||||||
|
"}\n",
|
||||||
|
"# Now we retrieve the documents\n",
|
||||||
|
"retrieved_documents = {\n",
|
||||||
|
" \"docs\": itemgetter(\"standalone_question\") | retriever,\n",
|
||||||
|
" \"question\": lambda x: x[\"standalone_question\"]\n",
|
||||||
|
"}\n",
|
||||||
|
"# Now we construct the inputs for the final prompt\n",
|
||||||
|
"final_inputs = {\n",
|
||||||
|
" \"context\": lambda x: _combine_documents(x[\"docs\"]),\n",
|
||||||
|
" \"question\": itemgetter(\"question\")\n",
|
||||||
|
"}\n",
|
||||||
|
"# And finally, we do the part that returns the answers\n",
|
||||||
|
"answer = {\n",
|
||||||
|
" \"answer\": final_inputs | ANSWER_PROMPT | ChatOpenAI(),\n",
|
||||||
|
" \"docs\": itemgetter(\"docs\"),\n",
|
||||||
|
"}\n",
|
||||||
|
"# And now we put it all together!\n",
|
||||||
|
"final_chain = loaded_memory | expanded_memory | standalone_question | retrieved_documents | answer"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 19,
|
||||||
|
"id": "806e390c",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/plain": [
|
||||||
|
"{'answer': AIMessage(content='Harrison was employed at Kensho.', additional_kwargs={}, example=False),\n",
|
||||||
|
" 'docs': [Document(page_content='harrison worked at kensho', metadata={})]}"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"execution_count": 19,
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "execute_result"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"inputs = {\"question\": \"where did harrison work?\"}\n",
|
||||||
|
"result = final_chain.invoke(inputs)\n",
|
||||||
|
"result"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 20,
|
||||||
|
"id": "977399fd",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# Note that the memory does not save automatically\n",
|
||||||
|
"# This will be improved in the future\n",
|
||||||
|
"# For now you need to save it yourself\n",
|
||||||
|
"memory.save_context(inputs, {\"answer\": result[\"answer\"].content})"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 21,
|
||||||
|
"id": "f94f7de4",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/plain": [
|
||||||
|
"{'history': [HumanMessage(content='where did harrison work?', additional_kwargs={}, example=False),\n",
|
||||||
|
" AIMessage(content='Harrison was employed at Kensho.', additional_kwargs={}, example=False)]}"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"execution_count": 21,
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "execute_result"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"memory.load_memory_variables({})"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"kernelspec": {
|
||||||
|
"display_name": "poetry-venv",
|
||||||
|
"language": "python",
|
||||||
|
"name": "poetry-venv"
|
||||||
|
},
|
||||||
|
"language_info": {
|
||||||
|
"codemirror_mode": {
|
||||||
|
"name": "ipython",
|
||||||
|
"version": 3
|
||||||
|
},
|
||||||
|
"file_extension": ".py",
|
||||||
|
"mimetype": "text/x-python",
|
||||||
|
"name": "python",
|
||||||
|
"nbconvert_exporter": "python",
|
||||||
|
"pygments_lexer": "ipython3",
|
||||||
|
"version": "3.9.1"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nbformat": 4,
|
||||||
|
"nbformat_minor": 5
|
||||||
|
}
|
@ -0,0 +1,227 @@
|
|||||||
|
{
|
||||||
|
"cells": [
|
||||||
|
{
|
||||||
|
"cell_type": "raw",
|
||||||
|
"id": "c14da114-1a4a-487d-9cff-e0e8c30ba366",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"---\n",
|
||||||
|
"sidebar_position: 3\n",
|
||||||
|
"title: Querying a SQL DB\n",
|
||||||
|
"---"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "506e9636",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"We can replicate our SQLDatabaseChain with Runnables."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 1,
|
||||||
|
"id": "7a927516",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from langchain.prompts import ChatPromptTemplate\n",
|
||||||
|
"\n",
|
||||||
|
"template = \"\"\"Based on the table schema below, write a SQL query that would answer the user's question:\n",
|
||||||
|
"{schema}\n",
|
||||||
|
"\n",
|
||||||
|
"Question: {question}\n",
|
||||||
|
"SQL Query:\"\"\"\n",
|
||||||
|
"prompt = ChatPromptTemplate.from_template(template)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 2,
|
||||||
|
"id": "3f51f386",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from langchain.utilities import SQLDatabase"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "7c3449d6-684b-416e-ba16-90a035835a88",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"We'll need the Chinook sample DB for this example. There's many places to download it from, e.g. https://database.guide/2-sample-databases-sqlite/"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 20,
|
||||||
|
"id": "2ccca6fc",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"db = SQLDatabase.from_uri(\"sqlite:///./Chinook.db\")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 21,
|
||||||
|
"id": "05ba88ee",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"def get_schema(_):\n",
|
||||||
|
" return db.get_table_info()"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 22,
|
||||||
|
"id": "a4eda902",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"def run_query(query):\n",
|
||||||
|
" return db.run(query)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 23,
|
||||||
|
"id": "5046cb17",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from operator import itemgetter\n",
|
||||||
|
"\n",
|
||||||
|
"from langchain.chat_models import ChatOpenAI\n",
|
||||||
|
"from langchain.schema.output_parser import StrOutputParser\n",
|
||||||
|
"from langchain.schema.runnable import RunnableLambda, RunnableMap\n",
|
||||||
|
"\n",
|
||||||
|
"model = ChatOpenAI()\n",
|
||||||
|
"\n",
|
||||||
|
"inputs = {\n",
|
||||||
|
" \"schema\": RunnableLambda(get_schema),\n",
|
||||||
|
" \"question\": itemgetter(\"question\")\n",
|
||||||
|
"}\n",
|
||||||
|
"sql_response = (\n",
|
||||||
|
" RunnableMap(inputs)\n",
|
||||||
|
" | prompt\n",
|
||||||
|
" | model.bind(stop=[\"\\nSQLResult:\"])\n",
|
||||||
|
" | StrOutputParser()\n",
|
||||||
|
" )"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 24,
|
||||||
|
"id": "a5552039",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/plain": [
|
||||||
|
"'SELECT COUNT(*) FROM Employee'"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"execution_count": 24,
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "execute_result"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"sql_response.invoke({\"question\": \"How many employees are there?\"})"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 25,
|
||||||
|
"id": "d6fee130",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"template = \"\"\"Based on the table schema below, question, sql query, and sql response, write a natural language response:\n",
|
||||||
|
"{schema}\n",
|
||||||
|
"\n",
|
||||||
|
"Question: {question}\n",
|
||||||
|
"SQL Query: {query}\n",
|
||||||
|
"SQL Response: {response}\"\"\"\n",
|
||||||
|
"prompt_response = ChatPromptTemplate.from_template(template)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 26,
|
||||||
|
"id": "923aa634",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"full_chain = (\n",
|
||||||
|
" RunnableMap({\n",
|
||||||
|
" \"question\": itemgetter(\"question\"),\n",
|
||||||
|
" \"query\": sql_response,\n",
|
||||||
|
" }) \n",
|
||||||
|
" | {\n",
|
||||||
|
" \"schema\": RunnableLambda(get_schema),\n",
|
||||||
|
" \"question\": itemgetter(\"question\"),\n",
|
||||||
|
" \"query\": itemgetter(\"query\"),\n",
|
||||||
|
" \"response\": lambda x: db.run(x[\"query\"]) \n",
|
||||||
|
" } \n",
|
||||||
|
" | prompt_response \n",
|
||||||
|
" | model\n",
|
||||||
|
")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 27,
|
||||||
|
"id": "e94963d8",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/plain": [
|
||||||
|
"AIMessage(content='There are 8 employees.', additional_kwargs={}, example=False)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"execution_count": 27,
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "execute_result"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"full_chain.invoke({\"question\": \"How many employees are there?\"})"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "4f358d7b-a721-4db3-9f92-f06913428afc",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": []
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"kernelspec": {
|
||||||
|
"display_name": "Python 3 (ipykernel)",
|
||||||
|
"language": "python",
|
||||||
|
"name": "python3"
|
||||||
|
},
|
||||||
|
"language_info": {
|
||||||
|
"codemirror_mode": {
|
||||||
|
"name": "ipython",
|
||||||
|
"version": 3
|
||||||
|
},
|
||||||
|
"file_extension": ".py",
|
||||||
|
"mimetype": "text/x-python",
|
||||||
|
"name": "python",
|
||||||
|
"nbconvert_exporter": "python",
|
||||||
|
"pygments_lexer": "ipython3",
|
||||||
|
"version": "3.9.1"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nbformat": 4,
|
||||||
|
"nbformat_minor": 5
|
||||||
|
}
|
@ -0,0 +1,122 @@
|
|||||||
|
{
|
||||||
|
"cells": [
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "29781123",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"# Using tools\n",
|
||||||
|
"\n",
|
||||||
|
"You can use any Tools with Runnables easily."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "a5c579dd-2e22-41b0-a789-346dfdecb5a2",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"!pip install duckduckgo-search"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 6,
|
||||||
|
"id": "9232d2a9",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from langchain.chat_models import ChatOpenAI\n",
|
||||||
|
"from langchain.prompts import ChatPromptTemplate\n",
|
||||||
|
"from langchain.schema.output_parser import StrOutputParser\n",
|
||||||
|
"from langchain.tools import DuckDuckGoSearchRun"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 4,
|
||||||
|
"id": "a0c64d2c",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"search = DuckDuckGoSearchRun()"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 7,
|
||||||
|
"id": "391969b6",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"template = \"\"\"turn the following user input into a search query for a search engine:\n",
|
||||||
|
"\n",
|
||||||
|
"{input}\"\"\"\n",
|
||||||
|
"prompt = ChatPromptTemplate.from_template(template)\n",
|
||||||
|
"\n",
|
||||||
|
"model = ChatOpenAI()"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 8,
|
||||||
|
"id": "e3d9d20d",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"chain = prompt | model | StrOutputParser() | search"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 9,
|
||||||
|
"id": "55f2967d",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/plain": [
|
||||||
|
"'What sports games are on TV today & tonight? Watch and stream live sports on TV today, tonight, tomorrow. Today\\'s 2023 sports TV schedule includes football, basketball, baseball, hockey, motorsports, soccer and more. Watch on TV or stream online on ESPN, FOX, FS1, CBS, NBC, ABC, Peacock, Paramount+, fuboTV, local channels and many other networks. MLB Games Tonight: How to Watch on TV, Streaming & Odds - Thursday, September 7. Seattle Mariners\\' Julio Rodriguez greets teammates in the dugout after scoring against the Oakland Athletics in a ... Circle - Country Music and Lifestyle. Live coverage of all the MLB action today is available to you, with the information provided below. The Brewers will look to pick up a road win at PNC Park against the Pirates on Wednesday at 12:35 PM ET. Check out the latest odds and with BetMGM Sportsbook. Use bonus code \"GNPLAY\" for special offers! MLB Games Tonight: How to Watch on TV, Streaming & Odds - Tuesday, September 5. Houston Astros\\' Kyle Tucker runs after hitting a double during the fourth inning of a baseball game against the Los Angeles Angels, Sunday, Aug. 13, 2023, in Houston. (AP Photo/Eric Christian Smith) (APMedia) The Houston Astros versus the Texas Rangers is one of ... The second half of tonight\\'s college football schedule still has some good games remaining to watch on your television.. We\\'ve already seen an exciting one when Colorado upset TCU. And we saw some ...'"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"execution_count": 9,
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "execute_result"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"chain.invoke({\"input\": \"I'd like to figure out what games are tonight\"})"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "a16949cf-00ea-43c6-a6aa-797ad4f6918d",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": []
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"kernelspec": {
|
||||||
|
"display_name": "poetry-venv",
|
||||||
|
"language": "python",
|
||||||
|
"name": "poetry-venv"
|
||||||
|
},
|
||||||
|
"language_info": {
|
||||||
|
"codemirror_mode": {
|
||||||
|
"name": "ipython",
|
||||||
|
"version": 3
|
||||||
|
},
|
||||||
|
"file_extension": ".py",
|
||||||
|
"mimetype": "text/x-python",
|
||||||
|
"name": "python",
|
||||||
|
"nbconvert_exporter": "python",
|
||||||
|
"pygments_lexer": "ipython3",
|
||||||
|
"version": "3.9.1"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nbformat": 4,
|
||||||
|
"nbformat_minor": 5
|
||||||
|
}
|
@ -0,0 +1,2 @@
|
|||||||
|
label: 'How to'
|
||||||
|
position: 1
|
@ -0,0 +1,158 @@
|
|||||||
|
{
|
||||||
|
"cells": [
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "fbc4bf6e",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"# Run arbitrary functions\n",
|
||||||
|
"\n",
|
||||||
|
"You can use arbitrary functions in the pipeline\n",
|
||||||
|
"\n",
|
||||||
|
"Note that all inputs to these functions need to be a SINGLE argument. If you have a function that accepts multiple arguments, you should write a wrapper that accepts a single input and unpacks it into multiple argument."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 77,
|
||||||
|
"id": "6bb221b3",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from langchain.schema.runnable import RunnableLambda\n",
|
||||||
|
"\n",
|
||||||
|
"def length_function(text):\n",
|
||||||
|
" return len(text)\n",
|
||||||
|
"\n",
|
||||||
|
"def _multiple_length_function(text1, text2):\n",
|
||||||
|
" return len(text1) * len(text2)\n",
|
||||||
|
"\n",
|
||||||
|
"def multiple_length_function(_dict):\n",
|
||||||
|
" return _multiple_length_function(_dict[\"text1\"], _dict[\"text2\"])\n",
|
||||||
|
"\n",
|
||||||
|
"prompt = ChatPromptTemplate.from_template(\"what is {a} + {b}\")\n",
|
||||||
|
"\n",
|
||||||
|
"chain1 = prompt | model\n",
|
||||||
|
"\n",
|
||||||
|
"chain = {\n",
|
||||||
|
" \"a\": itemgetter(\"foo\") | RunnableLambda(length_function),\n",
|
||||||
|
" \"b\": {\"text1\": itemgetter(\"foo\"), \"text2\": itemgetter(\"bar\")} | RunnableLambda(multiple_length_function)\n",
|
||||||
|
"} | prompt | model"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 78,
|
||||||
|
"id": "5488ec85",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/plain": [
|
||||||
|
"AIMessage(content='3 + 9 equals 12.', additional_kwargs={}, example=False)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"execution_count": 78,
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "execute_result"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"chain.invoke({\"foo\": \"bar\", \"bar\": \"gah\"})"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "4728ddd9-914d-42ce-ae9b-72c9ce8ec940",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Accepting a Runnable Config\n",
|
||||||
|
"\n",
|
||||||
|
"Runnable lambdas can optionally accept a [RunnableConfig](https://api.python.langchain.com/en/latest/schema/langchain.schema.runnable.config.RunnableConfig.html?highlight=runnableconfig#langchain.schema.runnable.config.RunnableConfig), which they can use to pass callbacks, tags, and other configuration information to nested runs."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 139,
|
||||||
|
"id": "80b3b5f6-5d58-44b9-807e-cce9a46bf49f",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from langchain.schema.runnable import RunnableConfig"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 149,
|
||||||
|
"id": "ff0daf0c-49dd-4d21-9772-e5fa133c5f36",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"import json\n",
|
||||||
|
"\n",
|
||||||
|
"def parse_or_fix(text: str, config: RunnableConfig):\n",
|
||||||
|
" fixing_chain = (\n",
|
||||||
|
" ChatPromptTemplate.from_template(\n",
|
||||||
|
" \"Fix the following text:\\n\\n```text\\n{input}\\n```\\nError: {error}\"\n",
|
||||||
|
" \" Don't narrate, just respond with the fixed data.\"\n",
|
||||||
|
" )\n",
|
||||||
|
" | ChatOpenAI()\n",
|
||||||
|
" | StrOutputParser()\n",
|
||||||
|
" )\n",
|
||||||
|
" for _ in range(3):\n",
|
||||||
|
" try:\n",
|
||||||
|
" return json.loads(text)\n",
|
||||||
|
" except Exception as e:\n",
|
||||||
|
" text = fixing_chain.invoke({\"input\": text, \"error\": e}, config)\n",
|
||||||
|
" return \"Failed to parse\""
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 152,
|
||||||
|
"id": "1a5e709e-9d75-48c7-bb9c-503251990505",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "stdout",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"Tokens Used: 65\n",
|
||||||
|
"\tPrompt Tokens: 56\n",
|
||||||
|
"\tCompletion Tokens: 9\n",
|
||||||
|
"Successful Requests: 1\n",
|
||||||
|
"Total Cost (USD): $0.00010200000000000001\n"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"from langchain.callbacks import get_openai_callback\n",
|
||||||
|
"\n",
|
||||||
|
"with get_openai_callback() as cb:\n",
|
||||||
|
" RunnableLambda(parse_or_fix).invoke(\"{foo: bar}\", {\"tags\": [\"my-tag\"], \"callbacks\": [cb]})\n",
|
||||||
|
" print(cb)"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"kernelspec": {
|
||||||
|
"display_name": "Python 3 (ipykernel)",
|
||||||
|
"language": "python",
|
||||||
|
"name": "python3"
|
||||||
|
},
|
||||||
|
"language_info": {
|
||||||
|
"codemirror_mode": {
|
||||||
|
"name": "ipython",
|
||||||
|
"version": 3
|
||||||
|
},
|
||||||
|
"file_extension": ".py",
|
||||||
|
"mimetype": "text/x-python",
|
||||||
|
"name": "python",
|
||||||
|
"nbconvert_exporter": "python",
|
||||||
|
"pygments_lexer": "ipython3",
|
||||||
|
"version": "3.9.1"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nbformat": 4,
|
||||||
|
"nbformat_minor": 5
|
||||||
|
}
|
@ -0,0 +1,520 @@
|
|||||||
|
{
|
||||||
|
"cells": [
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"# Mutli-language data anonymization with Microsoft Presidio\n",
|
||||||
|
"\n",
|
||||||
|
"[![Open In Collab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/langchain-ai/langchain/blob/master/docs/extras/guides/privacy/presidio_data_anonymization/multi_language.ipynb)\n",
|
||||||
|
"\n",
|
||||||
|
"\n",
|
||||||
|
"## Use case\n",
|
||||||
|
"\n",
|
||||||
|
"Multi-language support in data pseudonymization is essential due to differences in language structures and cultural contexts. Different languages may have varying formats for personal identifiers. For example, the structure of names, locations and dates can differ greatly between languages and regions. Furthermore, non-alphanumeric characters, accents, and the direction of writing can impact pseudonymization processes. Without multi-language support, data could remain identifiable or be misinterpreted, compromising data privacy and accuracy. Hence, it enables effective and precise pseudonymization suited for global operations.\n",
|
||||||
|
"\n",
|
||||||
|
"## Overview\n",
|
||||||
|
"\n",
|
||||||
|
"PII detection in Microsoft Presidio relies on several components - in addition to the usual pattern matching (e.g. using regex), the analyser uses a model for Named Entity Recognition (NER) to extract entities such as:\n",
|
||||||
|
"- `PERSON`\n",
|
||||||
|
"- `LOCATION`\n",
|
||||||
|
"- `DATE_TIME`\n",
|
||||||
|
"- `NRP`\n",
|
||||||
|
"- `ORGANIZATION`\n",
|
||||||
|
"\n",
|
||||||
|
"[[Source]](https://github.com/microsoft/presidio/blob/main/presidio-analyzer/presidio_analyzer/predefined_recognizers/spacy_recognizer.py)\n",
|
||||||
|
"\n",
|
||||||
|
"To handle NER in specific languages, we utilize unique models from the `spaCy` library, recognized for its extensive selection covering multiple languages and sizes. However, it's not restrictive, allowing for integration of alternative frameworks such as [Stanza](https://microsoft.github.io/presidio/analyzer/nlp_engines/spacy_stanza/) or [transformers](https://microsoft.github.io/presidio/analyzer/nlp_engines/transformers/) when necessary.\n",
|
||||||
|
"\n",
|
||||||
|
"\n",
|
||||||
|
"## Quickstart\n",
|
||||||
|
"\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 1,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# Install necessary packages\n",
|
||||||
|
"# ! pip install langchain langchain-experimental openai presidio-analyzer presidio-anonymizer spacy Faker\n",
|
||||||
|
"# ! python -m spacy download en_core_web_lg"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 2,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer\n",
|
||||||
|
"\n",
|
||||||
|
"anonymizer = PresidioReversibleAnonymizer(\n",
|
||||||
|
" analyzed_fields=[\"PERSON\"],\n",
|
||||||
|
")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"By default, `PresidioAnonymizer` and `PresidioReversibleAnonymizer` use a model trained on English texts, so they handle other languages moderately well. \n",
|
||||||
|
"\n",
|
||||||
|
"For example, here the model did not detect the person:"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 3,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/plain": [
|
||||||
|
"'Me llamo Sofía'"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"execution_count": 3,
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "execute_result"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"anonymizer.anonymize(\"Me llamo Sofía\") # \"My name is Sofía\" in Spanish"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"They may also take words from another language as actual entities. Here, both the word *'Yo'* (*'I'* in Spanish) and *Sofía* have been classified as `PERSON`:"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 4,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/plain": [
|
||||||
|
"'Bridget Kirk soy Sally Knight'"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"execution_count": 4,
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "execute_result"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"anonymizer.anonymize(\"Yo soy Sofía\") # \"I am Sofía\" in Spanish"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"If you want to anonymise texts from other languages, you need to download other models and add them to the anonymiser configuration:"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 5,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# Download the models for the languages you want to use\n",
|
||||||
|
"# ! python -m spacy download en_core_web_md\n",
|
||||||
|
"# ! python -m spacy download es_core_news_md"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 6,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"nlp_config = {\n",
|
||||||
|
" \"nlp_engine_name\": \"spacy\",\n",
|
||||||
|
" \"models\": [\n",
|
||||||
|
" {\"lang_code\": \"en\", \"model_name\": \"en_core_web_md\"},\n",
|
||||||
|
" {\"lang_code\": \"es\", \"model_name\": \"es_core_news_md\"},\n",
|
||||||
|
" ],\n",
|
||||||
|
"}"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"We have therefore added a Spanish language model. Note also that we have downloaded an alternative model for English as well - in this case we have replaced the large model `en_core_web_lg` (560MB) with its smaller version `en_core_web_md` (40MB) - the size is therefore reduced by 14 times! If you care about the speed of anonymisation, it is worth considering it.\n",
|
||||||
|
"\n",
|
||||||
|
"All models for the different languages can be found in the [spaCy documentation](https://spacy.io/usage/models).\n",
|
||||||
|
"\n",
|
||||||
|
"Now pass the configuration as the `languages_config` parameter to Anonymiser. As you can see, both previous examples work flawlessly:"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 7,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "stdout",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"Me llamo Michelle Smith\n",
|
||||||
|
"Yo soy Rachel Wright\n"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"anonymizer = PresidioReversibleAnonymizer(\n",
|
||||||
|
" analyzed_fields=[\"PERSON\"],\n",
|
||||||
|
" languages_config=nlp_config,\n",
|
||||||
|
")\n",
|
||||||
|
"\n",
|
||||||
|
"print(\n",
|
||||||
|
" anonymizer.anonymize(\"Me llamo Sofía\", language=\"es\")\n",
|
||||||
|
") # \"My name is Sofía\" in Spanish\n",
|
||||||
|
"print(anonymizer.anonymize(\"Yo soy Sofía\", language=\"es\")) # \"I am Sofía\" in Spanish"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"By default, the language indicated first in the configuration will be used when anonymising text (in this case English):"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 8,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "stdout",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"My name is Ronnie Ayala\n"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"print(anonymizer.anonymize(\"My name is John\"))"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Advanced usage\n",
|
||||||
|
"\n",
|
||||||
|
"### Custom labels in NER model"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"It may be that the spaCy model has different class names than those supported by the Microsoft Presidio by default. Take Polish, for example:"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 9,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "stdout",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"Text: Wiktoria, Start: 12, End: 20, Label: persName\n"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"# ! python -m spacy download pl_core_news_md\n",
|
||||||
|
"\n",
|
||||||
|
"import spacy\n",
|
||||||
|
"\n",
|
||||||
|
"nlp = spacy.load(\"pl_core_news_md\")\n",
|
||||||
|
"doc = nlp(\"Nazywam się Wiktoria\") # \"My name is Wiktoria\" in Polish\n",
|
||||||
|
"\n",
|
||||||
|
"for ent in doc.ents:\n",
|
||||||
|
" print(\n",
|
||||||
|
" f\"Text: {ent.text}, Start: {ent.start_char}, End: {ent.end_char}, Label: {ent.label_}\"\n",
|
||||||
|
" )"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"The name *Victoria* was classified as `persName`, which does not correspond to the default class names `PERSON`/`PER` implemented in Microsoft Presidio (look for `CHECK_LABEL_GROUPS` in [SpacyRecognizer implementation](https://github.com/microsoft/presidio/blob/main/presidio-analyzer/presidio_analyzer/predefined_recognizers/spacy_recognizer.py)). \n",
|
||||||
|
"\n",
|
||||||
|
"You can find out more about custom labels in spaCy models (including your own, trained ones) in [this thread](https://github.com/microsoft/presidio/issues/851).\n",
|
||||||
|
"\n",
|
||||||
|
"That's why our sentence will not be anonymized:"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 10,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "stdout",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"Nazywam się Wiktoria\n"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"nlp_config = {\n",
|
||||||
|
" \"nlp_engine_name\": \"spacy\",\n",
|
||||||
|
" \"models\": [\n",
|
||||||
|
" {\"lang_code\": \"en\", \"model_name\": \"en_core_web_md\"},\n",
|
||||||
|
" {\"lang_code\": \"es\", \"model_name\": \"es_core_news_md\"},\n",
|
||||||
|
" {\"lang_code\": \"pl\", \"model_name\": \"pl_core_news_md\"},\n",
|
||||||
|
" ],\n",
|
||||||
|
"}\n",
|
||||||
|
"\n",
|
||||||
|
"anonymizer = PresidioReversibleAnonymizer(\n",
|
||||||
|
" analyzed_fields=[\"PERSON\", \"LOCATION\", \"DATE_TIME\"],\n",
|
||||||
|
" languages_config=nlp_config,\n",
|
||||||
|
")\n",
|
||||||
|
"\n",
|
||||||
|
"print(\n",
|
||||||
|
" anonymizer.anonymize(\"Nazywam się Wiktoria\", language=\"pl\")\n",
|
||||||
|
") # \"My name is Wiktoria\" in Polish"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"To address this, create your own `SpacyRecognizer` with your own class mapping and add it to the anonymizer:"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 11,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from presidio_analyzer.predefined_recognizers import SpacyRecognizer\n",
|
||||||
|
"\n",
|
||||||
|
"polish_check_label_groups = [\n",
|
||||||
|
" ({\"LOCATION\"}, {\"placeName\", \"geogName\"}),\n",
|
||||||
|
" ({\"PERSON\"}, {\"persName\"}),\n",
|
||||||
|
" ({\"DATE_TIME\"}, {\"date\", \"time\"}),\n",
|
||||||
|
"]\n",
|
||||||
|
"\n",
|
||||||
|
"spacy_recognizer = SpacyRecognizer(\n",
|
||||||
|
" supported_language=\"pl\",\n",
|
||||||
|
" check_label_groups=polish_check_label_groups,\n",
|
||||||
|
")\n",
|
||||||
|
"\n",
|
||||||
|
"anonymizer.add_recognizer(spacy_recognizer)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"Now everything works smoothly:"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 12,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "stdout",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"Nazywam się Morgan Walters\n"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"print(\n",
|
||||||
|
" anonymizer.anonymize(\"Nazywam się Wiktoria\", language=\"pl\")\n",
|
||||||
|
") # \"My name is Wiktoria\" in Polish"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"Let's try on more complex example:"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 13,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "stdout",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"Nazywam się Ernest Liu. New Taylorburgh to moje miasto rodzinne. Urodziłam się 1987-01-19\n"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"print(\n",
|
||||||
|
" anonymizer.anonymize(\n",
|
||||||
|
" \"Nazywam się Wiktoria. Płock to moje miasto rodzinne. Urodziłam się dnia 6 kwietnia 2001 roku\",\n",
|
||||||
|
" language=\"pl\",\n",
|
||||||
|
" )\n",
|
||||||
|
") # \"My name is Wiktoria. Płock is my home town. I was born on 6 April 2001\" in Polish"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"As you can see, thanks to class mapping, the anonymiser can cope with different types of entities. "
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Custom language-specific operators\n",
|
||||||
|
"\n",
|
||||||
|
"In the example above, the sentence has been anonymised correctly, but the fake data does not fit the Polish language at all. Custom operators can therefore be added, which will resolve the issue:"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 14,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from faker import Faker\n",
|
||||||
|
"from presidio_anonymizer.entities import OperatorConfig\n",
|
||||||
|
"\n",
|
||||||
|
"fake = Faker(locale=\"pl_PL\") # Setting faker to provide Polish data\n",
|
||||||
|
"\n",
|
||||||
|
"new_operators = {\n",
|
||||||
|
" \"PERSON\": OperatorConfig(\"custom\", {\"lambda\": lambda _: fake.first_name_female()}),\n",
|
||||||
|
" \"LOCATION\": OperatorConfig(\"custom\", {\"lambda\": lambda _: fake.city()}),\n",
|
||||||
|
"}\n",
|
||||||
|
"\n",
|
||||||
|
"anonymizer.add_operators(new_operators)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 15,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "stdout",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"Nazywam się Marianna. Szczecin to moje miasto rodzinne. Urodziłam się 1976-11-16\n"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"print(\n",
|
||||||
|
" anonymizer.anonymize(\n",
|
||||||
|
" \"Nazywam się Wiktoria. Płock to moje miasto rodzinne. Urodziłam się dnia 6 kwietnia 2001 roku\",\n",
|
||||||
|
" language=\"pl\",\n",
|
||||||
|
" )\n",
|
||||||
|
") # \"My name is Wiktoria. Płock is my home town. I was born on 6 April 2001\" in Polish"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Limitations\n",
|
||||||
|
"\n",
|
||||||
|
"Remember - results are as good as your recognizers and as your NER models!\n",
|
||||||
|
"\n",
|
||||||
|
"Look at the example below - we downloaded the small model for Spanish (12MB) and it no longer performs as well as the medium version (40MB):"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 16,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "stdout",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"Model: es_core_news_sm. Result: Me llamo Sofía\n",
|
||||||
|
"Model: es_core_news_md. Result: Me llamo Lawrence Davis\n"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"# ! python -m spacy download es_core_news_sm\n",
|
||||||
|
"\n",
|
||||||
|
"for model in [\"es_core_news_sm\", \"es_core_news_md\"]:\n",
|
||||||
|
" nlp_config = {\n",
|
||||||
|
" \"nlp_engine_name\": \"spacy\",\n",
|
||||||
|
" \"models\": [\n",
|
||||||
|
" {\"lang_code\": \"es\", \"model_name\": model},\n",
|
||||||
|
" ],\n",
|
||||||
|
" }\n",
|
||||||
|
"\n",
|
||||||
|
" anonymizer = PresidioReversibleAnonymizer(\n",
|
||||||
|
" analyzed_fields=[\"PERSON\"],\n",
|
||||||
|
" languages_config=nlp_config,\n",
|
||||||
|
" )\n",
|
||||||
|
"\n",
|
||||||
|
" print(\n",
|
||||||
|
" f\"Model: {model}. Result: {anonymizer.anonymize('Me llamo Sofía', language='es')}\"\n",
|
||||||
|
" )"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"In many cases, even the larger models from spaCy will not be sufficient - there are already other, more complex and better methods of detecting named entities, based on transformers. You can read more about this [here](https://microsoft.github.io/presidio/analyzer/nlp_engines/transformers/)."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Future works\n",
|
||||||
|
"\n",
|
||||||
|
"- **automatic language detection** - instead of passing the language as a parameter in `anonymizer.anonymize`, we could detect the language/s beforehand and then use the corresponding NER model."
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"kernelspec": {
|
||||||
|
"display_name": "Python 3 (ipykernel)",
|
||||||
|
"language": "python",
|
||||||
|
"name": "python3"
|
||||||
|
},
|
||||||
|
"language_info": {
|
||||||
|
"codemirror_mode": {
|
||||||
|
"name": "ipython",
|
||||||
|
"version": 3
|
||||||
|
},
|
||||||
|
"file_extension": ".py",
|
||||||
|
"mimetype": "text/x-python",
|
||||||
|
"name": "python",
|
||||||
|
"nbconvert_exporter": "python",
|
||||||
|
"pygments_lexer": "ipython3",
|
||||||
|
"version": "3.9.1"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nbformat": 4,
|
||||||
|
"nbformat_minor": 4
|
||||||
|
}
|
@ -1,20 +1,24 @@
|
|||||||
# ModelScope
|
# ModelScope
|
||||||
|
|
||||||
|
>[ModelScope](https://www.modelscope.cn/home) is a big repository of the models and datasets.
|
||||||
|
|
||||||
This page covers how to use the modelscope ecosystem within LangChain.
|
This page covers how to use the modelscope ecosystem within LangChain.
|
||||||
It is broken into two parts: installation and setup, and then references to specific modelscope wrappers.
|
It is broken into two parts: installation and setup, and then references to specific modelscope wrappers.
|
||||||
|
|
||||||
## Installation and Setup
|
## Installation and Setup
|
||||||
|
|
||||||
* Install the Python SDK with `pip install modelscope`
|
Install the `modelscope` package.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
pip install modelscope
|
||||||
|
```
|
||||||
|
|
||||||
## Wrappers
|
|
||||||
|
|
||||||
### Embeddings
|
## Text Embedding Models
|
||||||
|
|
||||||
There exists a modelscope Embeddings wrapper, which you can access with
|
|
||||||
|
|
||||||
```python
|
```python
|
||||||
from langchain.embeddings import ModelScopeEmbeddings
|
from langchain.embeddings import ModelScopeEmbeddings
|
||||||
```
|
```
|
||||||
|
|
||||||
For a more detailed walkthrough of this, see [this notebook](/docs/integrations/text_embedding/modelscope_hub.html)
|
For a more detailed walkthrough of this, see [this notebook](/docs/integrations/text_embedding/modelscope_hub)
|
||||||
|
@ -1,17 +1,31 @@
|
|||||||
# NLPCloud
|
# NLPCloud
|
||||||
|
|
||||||
This page covers how to use the NLPCloud ecosystem within LangChain.
|
>[NLP Cloud](https://docs.nlpcloud.com/#introduction) is an artificial intelligence platform that allows you to use the most advanced AI engines, and even train your own engines with your own data.
|
||||||
It is broken into two parts: installation and setup, and then references to specific NLPCloud wrappers.
|
|
||||||
|
|
||||||
## Installation and Setup
|
## Installation and Setup
|
||||||
- Install the Python SDK with `pip install nlpcloud`
|
|
||||||
|
- Install the `nlpcloud` package.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
pip install nlpcloud
|
||||||
|
```
|
||||||
|
|
||||||
- Get an NLPCloud api key and set it as an environment variable (`NLPCLOUD_API_KEY`)
|
- Get an NLPCloud api key and set it as an environment variable (`NLPCLOUD_API_KEY`)
|
||||||
|
|
||||||
## Wrappers
|
|
||||||
|
|
||||||
### LLM
|
## LLM
|
||||||
|
|
||||||
|
See a [usage example](/docs/integrations/llms/nlpcloud).
|
||||||
|
|
||||||
There exists an NLPCloud LLM wrapper, which you can access with
|
|
||||||
```python
|
```python
|
||||||
from langchain.llms import NLPCloud
|
from langchain.llms import NLPCloud
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Text Embedding Models
|
||||||
|
|
||||||
|
See a [usage example](/docs/integrations/text_embedding/nlp_cloud)
|
||||||
|
|
||||||
|
```python
|
||||||
|
from langchain.embeddings import NLPCloudEmbeddings
|
||||||
|
```
|
||||||
|
@ -0,0 +1,587 @@
|
|||||||
|
{
|
||||||
|
"cells": [
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "13afcae7",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"# Supabase Vector self-querying \n",
|
||||||
|
"\n",
|
||||||
|
">[Supabase](https://supabase.com/docs) is an open source `Firebase` alternative. \n",
|
||||||
|
"> `Supabase` is built on top of `PostgreSQL`, which offers strong `SQL` \n",
|
||||||
|
"> querying capabilities and enables a simple interface with already-existing tools and frameworks.\n",
|
||||||
|
"\n",
|
||||||
|
">[PostgreSQL](https://en.wikipedia.org/wiki/PostgreSQL) also known as `Postgres`,\n",
|
||||||
|
"> is a free and open-source relational database management system (RDBMS) \n",
|
||||||
|
"> emphasizing extensibility and `SQL` compliance.\n",
|
||||||
|
"\n",
|
||||||
|
"In the notebook we'll demo the `SelfQueryRetriever` wrapped around a Supabase vector store.\n",
|
||||||
|
"\n",
|
||||||
|
"Specifically we will:\n",
|
||||||
|
"1. Create a Supabase database\n",
|
||||||
|
"2. Enable the `pgvector` extension\n",
|
||||||
|
"3. Create a `documents` table and `match_documents` function that will be used by `SupabaseVectorStore`\n",
|
||||||
|
"4. Load sample documents into the vector store (database table)\n",
|
||||||
|
"5. Build and test a self-querying retriever"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "347935ad",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Setup Supabase Database\n",
|
||||||
|
"\n",
|
||||||
|
"1. Head over to https://database.new to provision your Supabase database.\n",
|
||||||
|
"2. In the studio, jump to the [SQL editor](https://supabase.com/dashboard/project/_/sql/new) and run the following script to enable `pgvector` and setup your database as a vector store:\n",
|
||||||
|
" ```sql\n",
|
||||||
|
" -- Enable the pgvector extension to work with embedding vectors\n",
|
||||||
|
" create extension if not exists vector;\n",
|
||||||
|
"\n",
|
||||||
|
" -- Create a table to store your documents\n",
|
||||||
|
" create table\n",
|
||||||
|
" documents (\n",
|
||||||
|
" id uuid primary key,\n",
|
||||||
|
" content text, -- corresponds to Document.pageContent\n",
|
||||||
|
" metadata jsonb, -- corresponds to Document.metadata\n",
|
||||||
|
" embedding vector (1536) -- 1536 works for OpenAI embeddings, change if needed\n",
|
||||||
|
" );\n",
|
||||||
|
"\n",
|
||||||
|
" -- Create a function to search for documents\n",
|
||||||
|
" create function match_documents (\n",
|
||||||
|
" query_embedding vector (1536),\n",
|
||||||
|
" filter jsonb default '{}'\n",
|
||||||
|
" ) returns table (\n",
|
||||||
|
" id uuid,\n",
|
||||||
|
" content text,\n",
|
||||||
|
" metadata jsonb,\n",
|
||||||
|
" similarity float\n",
|
||||||
|
" ) language plpgsql as $$\n",
|
||||||
|
" #variable_conflict use_column\n",
|
||||||
|
" begin\n",
|
||||||
|
" return query\n",
|
||||||
|
" select\n",
|
||||||
|
" id,\n",
|
||||||
|
" content,\n",
|
||||||
|
" metadata,\n",
|
||||||
|
" 1 - (documents.embedding <=> query_embedding) as similarity\n",
|
||||||
|
" from documents\n",
|
||||||
|
" where metadata @> filter\n",
|
||||||
|
" order by documents.embedding <=> query_embedding;\n",
|
||||||
|
" end;\n",
|
||||||
|
" $$;\n",
|
||||||
|
" ```"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "68e75fb9",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Creating a Supabase vector store\n",
|
||||||
|
"Next we'll want to create a Supabase vector store and seed it with some data. We've created a small demo set of documents that contain summaries of movies.\n",
|
||||||
|
"\n",
|
||||||
|
"Be sure to install the latest version of `langchain`:"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "78546fd7",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"%pip install langchain"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "e06df198",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"The self-query retriever requires you to have `lark` installed:"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "63a8af5b",
|
||||||
|
"metadata": {
|
||||||
|
"tags": []
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"%pip install lark"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "114f768f",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"We also need the `openai` and `supabase` packages:"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "434ae558",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"%pip install openai"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "22431060-52c4-48a7-a97b-9f542b8b0928",
|
||||||
|
"metadata": {
|
||||||
|
"tags": []
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"%pip install supabase==1.0.0"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "83811610-7df3-4ede-b268-68a6a83ba9e2",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"Since we are using `SupabaseVectorStore` and `OpenAIEmbeddings`, we have to load their API keys.\n",
|
||||||
|
"\n",
|
||||||
|
"- To find your `SUPABASE_URL` and `SUPABASE_SERVICE_KEY`, head to your Supabase project's [API settings](https://supabase.com/dashboard/project/_/settings/api).\n",
|
||||||
|
" - `SUPABASE_URL` corresponds to the Project URL\n",
|
||||||
|
" - `SUPABASE_SERVICE_KEY` corresponds to the `service_role` API key\n",
|
||||||
|
"\n",
|
||||||
|
"- To get your `OPENAI_API_KEY`, navigate to [API keys](https://platform.openai.com/account/api-keys) on your OpenAI account and create a new secret key."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 1,
|
||||||
|
"id": "dd01b61b-7d32-4a55-85d6-b2d2d4f18840",
|
||||||
|
"metadata": {
|
||||||
|
"tags": []
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"import os\n",
|
||||||
|
"import getpass\n",
|
||||||
|
"\n",
|
||||||
|
"os.environ[\"SUPABASE_URL\"] = getpass.getpass(\"Supabase URL:\")\n",
|
||||||
|
"os.environ[\"SUPABASE_SERVICE_KEY\"] = getpass.getpass(\"Supabase Service Key:\")\n",
|
||||||
|
"os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "3aaf5075",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"_Optional:_ If you're storing your Supabase and OpenAI API keys in a `.env` file, you can load them with [`dotenv`](https://github.com/theskumar/python-dotenv)."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "e0089221",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"%pip install python-dotenv"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "3d56c5ef",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from dotenv import load_dotenv\n",
|
||||||
|
"\n",
|
||||||
|
"load_dotenv()"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "f6dd9aef",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"First we'll create a Supabase client and instantiate a OpenAI embeddings class."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 2,
|
||||||
|
"id": "cb4a5787",
|
||||||
|
"metadata": {
|
||||||
|
"tags": []
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"import os\n",
|
||||||
|
"from supabase.client import Client, create_client\n",
|
||||||
|
"from langchain.schema import Document\n",
|
||||||
|
"from langchain.embeddings.openai import OpenAIEmbeddings\n",
|
||||||
|
"from langchain.vectorstores import SupabaseVectorStore\n",
|
||||||
|
"\n",
|
||||||
|
"supabase_url = os.environ.get(\"SUPABASE_URL\")\n",
|
||||||
|
"supabase_key = os.environ.get(\"SUPABASE_SERVICE_KEY\")\n",
|
||||||
|
"supabase: Client = create_client(supabase_url, supabase_key)\n",
|
||||||
|
"\n",
|
||||||
|
"embeddings = OpenAIEmbeddings()"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "0fca9b0b",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"Next let's create our documents."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 3,
|
||||||
|
"id": "bcbe04d9",
|
||||||
|
"metadata": {
|
||||||
|
"tags": []
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"docs = [\n",
|
||||||
|
" Document(\n",
|
||||||
|
" page_content=\"A bunch of scientists bring back dinosaurs and mayhem breaks loose\",\n",
|
||||||
|
" metadata={\"year\": 1993, \"rating\": 7.7, \"genre\": \"science fiction\"},\n",
|
||||||
|
" ),\n",
|
||||||
|
" Document(\n",
|
||||||
|
" page_content=\"Leo DiCaprio gets lost in a dream within a dream within a dream within a ...\",\n",
|
||||||
|
" metadata={\"year\": 2010, \"director\": \"Christopher Nolan\", \"rating\": 8.2},\n",
|
||||||
|
" ),\n",
|
||||||
|
" Document(\n",
|
||||||
|
" page_content=\"A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea\",\n",
|
||||||
|
" metadata={\"year\": 2006, \"director\": \"Satoshi Kon\", \"rating\": 8.6},\n",
|
||||||
|
" ),\n",
|
||||||
|
" Document(\n",
|
||||||
|
" page_content=\"A bunch of normal-sized women are supremely wholesome and some men pine after them\",\n",
|
||||||
|
" metadata={\"year\": 2019, \"director\": \"Greta Gerwig\", \"rating\": 8.3},\n",
|
||||||
|
" ),\n",
|
||||||
|
" Document(\n",
|
||||||
|
" page_content=\"Toys come alive and have a blast doing so\",\n",
|
||||||
|
" metadata={\"year\": 1995, \"genre\": \"animated\"},\n",
|
||||||
|
" ),\n",
|
||||||
|
" Document(\n",
|
||||||
|
" page_content=\"Three men walk into the Zone, three men walk out of the Zone\",\n",
|
||||||
|
" metadata={\n",
|
||||||
|
" \"year\": 1979,\n",
|
||||||
|
" \"rating\": 9.9,\n",
|
||||||
|
" \"director\": \"Andrei Tarkovsky\",\n",
|
||||||
|
" \"genre\": \"science fiction\",\n",
|
||||||
|
" \"rating\": 9.9,\n",
|
||||||
|
" },\n",
|
||||||
|
" ),\n",
|
||||||
|
"]\n",
|
||||||
|
"\n",
|
||||||
|
"vectorstore = SupabaseVectorStore.from_documents(docs, embeddings, client=supabase, table_name=\"documents\", query_name=\"match_documents\")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "5ecaab6d",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Creating our self-querying retriever\n",
|
||||||
|
"Now we can instantiate our retriever. To do this we'll need to provide some information upfront about the metadata fields that our documents support and a short description of the document contents."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 4,
|
||||||
|
"id": "86e34dbf",
|
||||||
|
"metadata": {
|
||||||
|
"tags": []
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from langchain.llms import OpenAI\n",
|
||||||
|
"from langchain.retrievers.self_query.base import SelfQueryRetriever\n",
|
||||||
|
"from langchain.chains.query_constructor.base import AttributeInfo\n",
|
||||||
|
"\n",
|
||||||
|
"metadata_field_info = [\n",
|
||||||
|
" AttributeInfo(\n",
|
||||||
|
" name=\"genre\",\n",
|
||||||
|
" description=\"The genre of the movie\",\n",
|
||||||
|
" type=\"string or list[string]\",\n",
|
||||||
|
" ),\n",
|
||||||
|
" AttributeInfo(\n",
|
||||||
|
" name=\"year\",\n",
|
||||||
|
" description=\"The year the movie was released\",\n",
|
||||||
|
" type=\"integer\",\n",
|
||||||
|
" ),\n",
|
||||||
|
" AttributeInfo(\n",
|
||||||
|
" name=\"director\",\n",
|
||||||
|
" description=\"The name of the movie director\",\n",
|
||||||
|
" type=\"string\",\n",
|
||||||
|
" ),\n",
|
||||||
|
" AttributeInfo(\n",
|
||||||
|
" name=\"rating\", description=\"A 1-10 rating for the movie\", type=\"float\"\n",
|
||||||
|
" ),\n",
|
||||||
|
"]\n",
|
||||||
|
"document_content_description = \"Brief summary of a movie\"\n",
|
||||||
|
"llm = OpenAI(temperature=0)\n",
|
||||||
|
"retriever = SelfQueryRetriever.from_llm(\n",
|
||||||
|
" llm, vectorstore, document_content_description, metadata_field_info, verbose=True\n",
|
||||||
|
")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "ea9df8d4",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Testing it out\n",
|
||||||
|
"And now we can try actually using our retriever!"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 5,
|
||||||
|
"id": "38a126e9",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "stdout",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"query='dinosaur' filter=None limit=None\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/plain": [
|
||||||
|
"[Document(page_content='A bunch of scientists bring back dinosaurs and mayhem breaks loose', metadata={'year': 1993, 'genre': 'science fiction', 'rating': 7.7}),\n",
|
||||||
|
" Document(page_content='Toys come alive and have a blast doing so', metadata={'year': 1995, 'genre': 'animated'}),\n",
|
||||||
|
" Document(page_content='Three men walk into the Zone, three men walk out of the Zone', metadata={'year': 1979, 'genre': 'science fiction', 'rating': 9.9, 'director': 'Andrei Tarkovsky'}),\n",
|
||||||
|
" Document(page_content='A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea', metadata={'year': 2006, 'rating': 8.6, 'director': 'Satoshi Kon'})]"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"execution_count": 5,
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "execute_result"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"# This example only specifies a relevant query\n",
|
||||||
|
"retriever.get_relevant_documents(\"What are some movies about dinosaurs\")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 7,
|
||||||
|
"id": "fc3f1e6e",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "stdout",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"query=' ' filter=Comparison(comparator=<Comparator.GT: 'gt'>, attribute='rating', value=8.5) limit=None\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/plain": [
|
||||||
|
"[Document(page_content='Three men walk into the Zone, three men walk out of the Zone', metadata={'year': 1979, 'genre': 'science fiction', 'rating': 9.9, 'director': 'Andrei Tarkovsky'}),\n",
|
||||||
|
" Document(page_content='A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea', metadata={'year': 2006, 'rating': 8.6, 'director': 'Satoshi Kon'})]"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"execution_count": 7,
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "execute_result"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"# This example only specifies a filter\n",
|
||||||
|
"retriever.get_relevant_documents(\"I want to watch a movie rated higher than 8.5\")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 9,
|
||||||
|
"id": "b19d4da0",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "stdout",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"query='women' filter=Comparison(comparator=<Comparator.EQ: 'eq'>, attribute='director', value='Greta Gerwig') limit=None\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/plain": [
|
||||||
|
"[Document(page_content='A bunch of normal-sized women are supremely wholesome and some men pine after them', metadata={'year': 2019, 'rating': 8.3, 'director': 'Greta Gerwig'})]"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"execution_count": 9,
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "execute_result"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"# This example specifies a query and a filter\n",
|
||||||
|
"retriever.get_relevant_documents(\"Has Greta Gerwig directed any movies about women?\")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 8,
|
||||||
|
"id": "f900e40e",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "stdout",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"query=' ' filter=Operation(operator=<Operator.AND: 'and'>, arguments=[Comparison(comparator=<Comparator.GTE: 'gte'>, attribute='rating', value=8.5), Comparison(comparator=<Comparator.EQ: 'eq'>, attribute='genre', value='science fiction')]) limit=None\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/plain": [
|
||||||
|
"[Document(page_content='Three men walk into the Zone, three men walk out of the Zone', metadata={'year': 1979, 'genre': 'science fiction', 'rating': 9.9, 'director': 'Andrei Tarkovsky'})]"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"execution_count": 8,
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "execute_result"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"# This example specifies a composite filter\n",
|
||||||
|
"retriever.get_relevant_documents(\n",
|
||||||
|
" \"What's a highly rated (above 8.5) science fiction film?\"\n",
|
||||||
|
")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 9,
|
||||||
|
"id": "12a51522",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "stdout",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"query='toys' filter=Operation(operator=<Operator.AND: 'and'>, arguments=[Comparison(comparator=<Comparator.GT: 'gt'>, attribute='year', value=1990), Comparison(comparator=<Comparator.LTE: 'lte'>, attribute='year', value=2005), Comparison(comparator=<Comparator.LIKE: 'like'>, attribute='genre', value='animated')]) limit=None\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/plain": [
|
||||||
|
"[Document(page_content='Toys come alive and have a blast doing so', metadata={'year': 1995, 'genre': 'animated'})]"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"execution_count": 9,
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "execute_result"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"# This example specifies a query and composite filter\n",
|
||||||
|
"retriever.get_relevant_documents(\n",
|
||||||
|
" \"What's a movie after 1990 but before (or on) 2005 that's all about toys, and preferably is animated\"\n",
|
||||||
|
")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "39bd1de1-b9fe-4a98-89da-58d8a7a6ae51",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Filter k\n",
|
||||||
|
"\n",
|
||||||
|
"We can also use the self query retriever to specify `k`: the number of documents to fetch.\n",
|
||||||
|
"\n",
|
||||||
|
"We can do this by passing `enable_limit=True` to the constructor."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 10,
|
||||||
|
"id": "bff36b88-b506-4877-9c63-e5a1a8d78e64",
|
||||||
|
"metadata": {
|
||||||
|
"tags": []
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"retriever = SelfQueryRetriever.from_llm(\n",
|
||||||
|
" llm,\n",
|
||||||
|
" vectorstore,\n",
|
||||||
|
" document_content_description,\n",
|
||||||
|
" metadata_field_info,\n",
|
||||||
|
" enable_limit=True,\n",
|
||||||
|
" verbose=True,\n",
|
||||||
|
")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 11,
|
||||||
|
"id": "2758d229-4f97-499c-819f-888acaf8ee10",
|
||||||
|
"metadata": {
|
||||||
|
"tags": []
|
||||||
|
},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "stdout",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"query='dinosaur' filter=None limit=2\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/plain": [
|
||||||
|
"[Document(page_content='A bunch of scientists bring back dinosaurs and mayhem breaks loose', metadata={'year': 1993, 'genre': 'science fiction', 'rating': 7.7}),\n",
|
||||||
|
" Document(page_content='Toys come alive and have a blast doing so', metadata={'year': 1995, 'genre': 'animated'})]"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"execution_count": 11,
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "execute_result"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"# This example only specifies a relevant query\n",
|
||||||
|
"retriever.get_relevant_documents(\"what are two movies about dinosaurs\")"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"kernelspec": {
|
||||||
|
"display_name": "Python 3 (ipykernel)",
|
||||||
|
"language": "python",
|
||||||
|
"name": "python3"
|
||||||
|
},
|
||||||
|
"language_info": {
|
||||||
|
"codemirror_mode": {
|
||||||
|
"name": "ipython",
|
||||||
|
"version": 3
|
||||||
|
},
|
||||||
|
"file_extension": ".py",
|
||||||
|
"mimetype": "text/x-python",
|
||||||
|
"name": "python",
|
||||||
|
"nbconvert_exporter": "python",
|
||||||
|
"pygments_lexer": "ipython3",
|
||||||
|
"version": "3.10.12"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nbformat": 4,
|
||||||
|
"nbformat_minor": 5
|
||||||
|
}
|
@ -0,0 +1,440 @@
|
|||||||
|
{
|
||||||
|
"cells": [
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "13afcae7",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"# Vectara self-querying \n",
|
||||||
|
"\n",
|
||||||
|
">[Vectara](https://docs.vectara.com/docs/) is a GenAI platform for developers. It provides a simple API to build Grounded Generation (aka Retrieval-augmented-generation) applications.\n",
|
||||||
|
"\n",
|
||||||
|
"In the notebook we'll demo the `SelfQueryRetriever` wrapped around a Vectara vector store. "
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "68e75fb9",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"# Setup\n",
|
||||||
|
"\n",
|
||||||
|
"You will need a Vectara account to use Vectara with LangChain. To get started, use the following steps (see our [quickstart](https://docs.vectara.com/docs/quickstart) guide):\n",
|
||||||
|
"1. [Sign up](https://console.vectara.com/signup) for a Vectara account if you don't already have one. Once you have completed your sign up you will have a Vectara customer ID. You can find your customer ID by clicking on your name, on the top-right of the Vectara console window.\n",
|
||||||
|
"2. Within your account you can create one or more corpora. Each corpus represents an area that stores text data upon ingest from input documents. To create a corpus, use the **\"Create Corpus\"** button. You then provide a name to your corpus as well as a description. Optionally you can define filtering attributes and apply some advanced options. If you click on your created corpus, you can see its name and corpus ID right on the top.\n",
|
||||||
|
"3. Next you'll need to create API keys to access the corpus. Click on the **\"Authorization\"** tab in the corpus view and then the **\"Create API Key\"** button. Give your key a name, and choose whether you want query only or query+index for your key. Click \"Create\" and you now have an active API key. Keep this key confidential. \n",
|
||||||
|
"\n",
|
||||||
|
"To use LangChain with Vectara, you'll need to have these three values: customer ID, corpus ID and api_key.\n",
|
||||||
|
"You can provide those to LangChain in two ways:\n",
|
||||||
|
"\n",
|
||||||
|
"1. Include in your environment these three variables: `VECTARA_CUSTOMER_ID`, `VECTARA_CORPUS_ID` and `VECTARA_API_KEY`.\n",
|
||||||
|
"\n",
|
||||||
|
"> For example, you can set these variables using os.environ and getpass as follows:\n",
|
||||||
|
"\n",
|
||||||
|
"```python\n",
|
||||||
|
"import os\n",
|
||||||
|
"import getpass\n",
|
||||||
|
"\n",
|
||||||
|
"os.environ[\"VECTARA_CUSTOMER_ID\"] = getpass.getpass(\"Vectara Customer ID:\")\n",
|
||||||
|
"os.environ[\"VECTARA_CORPUS_ID\"] = getpass.getpass(\"Vectara Corpus ID:\")\n",
|
||||||
|
"os.environ[\"VECTARA_API_KEY\"] = getpass.getpass(\"Vectara API Key:\")\n",
|
||||||
|
"```\n",
|
||||||
|
"\n",
|
||||||
|
"1. Provide them as arguments when creating the Vectara vectorstore object:\n",
|
||||||
|
"\n",
|
||||||
|
"```python\n",
|
||||||
|
"vectorstore = Vectara(\n",
|
||||||
|
" vectara_customer_id=vectara_customer_id,\n",
|
||||||
|
" vectara_corpus_id=vectara_corpus_id,\n",
|
||||||
|
" vectara_api_key=vectara_api_key\n",
|
||||||
|
" )\n",
|
||||||
|
"```\n",
|
||||||
|
"\n",
|
||||||
|
"**Note:** The self-query retriever requires you to have `lark` installed (`pip install lark`). "
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "742ac16d",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Connecting to Vectara from LangChain\n",
|
||||||
|
"\n",
|
||||||
|
"In this example, we assume that you've created an account and a corpus, and added your VECTARA_CUSTOMER_ID, VECTARA_CORPUS_ID and VECTARA_API_KEY (created with permissions for both indexing and query) as environment variables.\n",
|
||||||
|
"\n",
|
||||||
|
"The corpus has 4 fields defined as metadata for filtering: year, director, rating, and genre\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 2,
|
||||||
|
"id": "cb4a5787",
|
||||||
|
"metadata": {
|
||||||
|
"tags": []
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from langchain.embeddings import FakeEmbeddings\n",
|
||||||
|
"from langchain.schema import Document\n",
|
||||||
|
"from langchain.text_splitter import CharacterTextSplitter\n",
|
||||||
|
"from langchain.vectorstores import Vectara\n",
|
||||||
|
"from langchain.document_loaders import TextLoader\n",
|
||||||
|
"\n",
|
||||||
|
"from langchain.llms import OpenAI\n",
|
||||||
|
"from langchain.chains import ConversationalRetrievalChain\n",
|
||||||
|
"from langchain.retrievers.self_query.base import SelfQueryRetriever\n",
|
||||||
|
"from langchain.chains.query_constructor.base import AttributeInfo\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 3,
|
||||||
|
"id": "bcbe04d9",
|
||||||
|
"metadata": {
|
||||||
|
"tags": []
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"docs = [\n",
|
||||||
|
" Document(\n",
|
||||||
|
" page_content=\"A bunch of scientists bring back dinosaurs and mayhem breaks loose\",\n",
|
||||||
|
" metadata={\"year\": 1993, \"rating\": 7.7, \"genre\": \"science fiction\"},\n",
|
||||||
|
" ),\n",
|
||||||
|
" Document(\n",
|
||||||
|
" page_content=\"Leo DiCaprio gets lost in a dream within a dream within a dream within a ...\",\n",
|
||||||
|
" metadata={\"year\": 2010, \"director\": \"Christopher Nolan\", \"rating\": 8.2},\n",
|
||||||
|
" ),\n",
|
||||||
|
" Document(\n",
|
||||||
|
" page_content=\"A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea\",\n",
|
||||||
|
" metadata={\"year\": 2006, \"director\": \"Satoshi Kon\", \"rating\": 8.6},\n",
|
||||||
|
" ),\n",
|
||||||
|
" Document(\n",
|
||||||
|
" page_content=\"A bunch of normal-sized women are supremely wholesome and some men pine after them\",\n",
|
||||||
|
" metadata={\"year\": 2019, \"director\": \"Greta Gerwig\", \"rating\": 8.3},\n",
|
||||||
|
" ),\n",
|
||||||
|
" Document(\n",
|
||||||
|
" page_content=\"Toys come alive and have a blast doing so\",\n",
|
||||||
|
" metadata={\"year\": 1995, \"genre\": \"animated\"},\n",
|
||||||
|
" ),\n",
|
||||||
|
" Document(\n",
|
||||||
|
" page_content=\"Three men walk into the Zone, three men walk out of the Zone\",\n",
|
||||||
|
" metadata={\n",
|
||||||
|
" \"year\": 1979,\n",
|
||||||
|
" \"rating\": 9.9,\n",
|
||||||
|
" \"director\": \"Andrei Tarkovsky\",\n",
|
||||||
|
" \"genre\": \"science fiction\",\n",
|
||||||
|
" },\n",
|
||||||
|
" ),\n",
|
||||||
|
"]\n",
|
||||||
|
"\n",
|
||||||
|
"vectara = Vectara()\n",
|
||||||
|
"for doc in docs:\n",
|
||||||
|
" vectara.add_texts([doc.page_content], embedding=FakeEmbeddings(size=768), doc_metadata=doc.metadata)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "5ecaab6d",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Creating our self-querying retriever\n",
|
||||||
|
"Now we can instantiate our retriever. To do this we'll need to provide some information upfront about the metadata fields that our documents support and a short description of the document contents."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 4,
|
||||||
|
"id": "86e34dbf",
|
||||||
|
"metadata": {
|
||||||
|
"tags": []
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from langchain.llms import OpenAI\n",
|
||||||
|
"from langchain.retrievers.self_query.base import SelfQueryRetriever\n",
|
||||||
|
"from langchain.chains.query_constructor.base import AttributeInfo\n",
|
||||||
|
"\n",
|
||||||
|
"metadata_field_info = [\n",
|
||||||
|
" AttributeInfo(\n",
|
||||||
|
" name=\"genre\",\n",
|
||||||
|
" description=\"The genre of the movie\",\n",
|
||||||
|
" type=\"string or list[string]\",\n",
|
||||||
|
" ),\n",
|
||||||
|
" AttributeInfo(\n",
|
||||||
|
" name=\"year\",\n",
|
||||||
|
" description=\"The year the movie was released\",\n",
|
||||||
|
" type=\"integer\",\n",
|
||||||
|
" ),\n",
|
||||||
|
" AttributeInfo(\n",
|
||||||
|
" name=\"director\",\n",
|
||||||
|
" description=\"The name of the movie director\",\n",
|
||||||
|
" type=\"string\",\n",
|
||||||
|
" ),\n",
|
||||||
|
" AttributeInfo(\n",
|
||||||
|
" name=\"rating\", description=\"A 1-10 rating for the movie\", type=\"float\"\n",
|
||||||
|
" ),\n",
|
||||||
|
"]\n",
|
||||||
|
"document_content_description = \"Brief summary of a movie\"\n",
|
||||||
|
"llm = OpenAI(temperature=0)\n",
|
||||||
|
"retriever = SelfQueryRetriever.from_llm(\n",
|
||||||
|
" llm, vectara, document_content_description, metadata_field_info, verbose=True\n",
|
||||||
|
")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "ea9df8d4",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Testing it out\n",
|
||||||
|
"And now we can try actually using our retriever!"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 5,
|
||||||
|
"id": "38a126e9",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "stderr",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"/Users/ofer/dev/langchain/libs/langchain/langchain/chains/llm.py:278: UserWarning: The predict_and_parse method is deprecated, instead pass an output parser directly to LLMChain.\n",
|
||||||
|
" warnings.warn(\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "stdout",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"query='dinosaur' filter=None limit=None\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/plain": [
|
||||||
|
"[Document(page_content='A bunch of scientists bring back dinosaurs and mayhem breaks loose', metadata={'lang': 'eng', 'offset': '0', 'len': '66', 'year': '1993', 'rating': '7.7', 'genre': 'science fiction', 'source': 'langchain'}),\n",
|
||||||
|
" Document(page_content='Toys come alive and have a blast doing so', metadata={'lang': 'eng', 'offset': '0', 'len': '41', 'year': '1995', 'genre': 'animated', 'source': 'langchain'}),\n",
|
||||||
|
" Document(page_content='Three men walk into the Zone, three men walk out of the Zone', metadata={'lang': 'eng', 'offset': '0', 'len': '60', 'year': '1979', 'rating': '9.9', 'director': 'Andrei Tarkovsky', 'genre': 'science fiction', 'source': 'langchain'}),\n",
|
||||||
|
" Document(page_content='Leo DiCaprio gets lost in a dream within a dream within a dream within a ...', metadata={'lang': 'eng', 'offset': '0', 'len': '76', 'year': '2010', 'director': 'Christopher Nolan', 'rating': '8.2', 'source': 'langchain'}),\n",
|
||||||
|
" Document(page_content='A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea', metadata={'lang': 'eng', 'offset': '0', 'len': '116', 'year': '2006', 'director': 'Satoshi Kon', 'rating': '8.6', 'source': 'langchain'})]"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"execution_count": 5,
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "execute_result"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"# This example only specifies a relevant query\n",
|
||||||
|
"retriever.get_relevant_documents(\"What are some movies about dinosaurs\")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 6,
|
||||||
|
"id": "fc3f1e6e",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "stdout",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"query=' ' filter=Comparison(comparator=<Comparator.GT: 'gt'>, attribute='rating', value=8.5) limit=None\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/plain": [
|
||||||
|
"[Document(page_content='Three men walk into the Zone, three men walk out of the Zone', metadata={'lang': 'eng', 'offset': '0', 'len': '60', 'year': '1979', 'rating': '9.9', 'director': 'Andrei Tarkovsky', 'genre': 'science fiction', 'source': 'langchain'}),\n",
|
||||||
|
" Document(page_content='A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea', metadata={'lang': 'eng', 'offset': '0', 'len': '116', 'year': '2006', 'director': 'Satoshi Kon', 'rating': '8.6', 'source': 'langchain'})]"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"execution_count": 6,
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "execute_result"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"# This example only specifies a filter\n",
|
||||||
|
"retriever.get_relevant_documents(\"I want to watch a movie rated higher than 8.5\")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 7,
|
||||||
|
"id": "b19d4da0",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "stdout",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"query='women' filter=Comparison(comparator=<Comparator.EQ: 'eq'>, attribute='director', value='Greta Gerwig') limit=None\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/plain": [
|
||||||
|
"[Document(page_content='A bunch of normal-sized women are supremely wholesome and some men pine after them', metadata={'lang': 'eng', 'offset': '0', 'len': '82', 'year': '2019', 'director': 'Greta Gerwig', 'rating': '8.3', 'source': 'langchain'})]"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"execution_count": 7,
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "execute_result"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"# This example specifies a query and a filter\n",
|
||||||
|
"retriever.get_relevant_documents(\"Has Greta Gerwig directed any movies about women\")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 8,
|
||||||
|
"id": "f900e40e",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "stdout",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"query=' ' filter=Operation(operator=<Operator.AND: 'and'>, arguments=[Comparison(comparator=<Comparator.GTE: 'gte'>, attribute='rating', value=8.5), Comparison(comparator=<Comparator.EQ: 'eq'>, attribute='genre', value='science fiction')]) limit=None\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/plain": [
|
||||||
|
"[Document(page_content='Three men walk into the Zone, three men walk out of the Zone', metadata={'lang': 'eng', 'offset': '0', 'len': '60', 'year': '1979', 'rating': '9.9', 'director': 'Andrei Tarkovsky', 'genre': 'science fiction', 'source': 'langchain'})]"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"execution_count": 8,
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "execute_result"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"# This example specifies a composite filter\n",
|
||||||
|
"retriever.get_relevant_documents(\n",
|
||||||
|
" \"What's a highly rated (above 8.5) science fiction film?\"\n",
|
||||||
|
")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 9,
|
||||||
|
"id": "12a51522",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "stdout",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"query='toys' filter=Operation(operator=<Operator.AND: 'and'>, arguments=[Comparison(comparator=<Comparator.GT: 'gt'>, attribute='year', value=1990), Comparison(comparator=<Comparator.LT: 'lt'>, attribute='year', value=2005), Comparison(comparator=<Comparator.EQ: 'eq'>, attribute='genre', value='animated')]) limit=None\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/plain": [
|
||||||
|
"[Document(page_content='Toys come alive and have a blast doing so', metadata={'lang': 'eng', 'offset': '0', 'len': '41', 'year': '1995', 'genre': 'animated', 'source': 'langchain'})]"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"execution_count": 9,
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "execute_result"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"# This example specifies a query and composite filter\n",
|
||||||
|
"retriever.get_relevant_documents(\n",
|
||||||
|
" \"What's a movie after 1990 but before 2005 that's all about toys, and preferably is animated\"\n",
|
||||||
|
")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "39bd1de1-b9fe-4a98-89da-58d8a7a6ae51",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Filter k\n",
|
||||||
|
"\n",
|
||||||
|
"We can also use the self query retriever to specify `k`: the number of documents to fetch.\n",
|
||||||
|
"\n",
|
||||||
|
"We can do this by passing `enable_limit=True` to the constructor."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 10,
|
||||||
|
"id": "bff36b88-b506-4877-9c63-e5a1a8d78e64",
|
||||||
|
"metadata": {
|
||||||
|
"tags": []
|
||||||
|
},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"retriever = SelfQueryRetriever.from_llm(\n",
|
||||||
|
" llm,\n",
|
||||||
|
" vectara,\n",
|
||||||
|
" document_content_description,\n",
|
||||||
|
" metadata_field_info,\n",
|
||||||
|
" enable_limit=True,\n",
|
||||||
|
" verbose=True,\n",
|
||||||
|
")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 11,
|
||||||
|
"id": "2758d229-4f97-499c-819f-888acaf8ee10",
|
||||||
|
"metadata": {
|
||||||
|
"tags": []
|
||||||
|
},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "stdout",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"query='dinosaur' filter=None limit=2\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/plain": [
|
||||||
|
"[Document(page_content='A bunch of scientists bring back dinosaurs and mayhem breaks loose', metadata={'lang': 'eng', 'offset': '0', 'len': '66', 'year': '1993', 'rating': '7.7', 'genre': 'science fiction', 'source': 'langchain'}),\n",
|
||||||
|
" Document(page_content='Toys come alive and have a blast doing so', metadata={'lang': 'eng', 'offset': '0', 'len': '41', 'year': '1995', 'genre': 'animated', 'source': 'langchain'})]"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"execution_count": 11,
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "execute_result"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"# This example only specifies a relevant query\n",
|
||||||
|
"retriever.get_relevant_documents(\"what are two movies about dinosaurs\")"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"kernelspec": {
|
||||||
|
"display_name": "Python 3 (ipykernel)",
|
||||||
|
"language": "python",
|
||||||
|
"name": "python3"
|
||||||
|
},
|
||||||
|
"language_info": {
|
||||||
|
"codemirror_mode": {
|
||||||
|
"name": "ipython",
|
||||||
|
"version": 3
|
||||||
|
},
|
||||||
|
"file_extension": ".py",
|
||||||
|
"mimetype": "text/x-python",
|
||||||
|
"name": "python",
|
||||||
|
"nbconvert_exporter": "python",
|
||||||
|
"pygments_lexer": "ipython3",
|
||||||
|
"version": "3.10.9"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nbformat": 4,
|
||||||
|
"nbformat_minor": 5
|
||||||
|
}
|
@ -1,6 +1,19 @@
|
|||||||
"""Load chat messages from common communications platforms for finetuning.
|
"""**Chat Loaders** load chat messages from common communications platforms.
|
||||||
|
|
||||||
This module provides functions to load chat messages from various
|
Load chat messages from various
|
||||||
communications platforms such as Facebook Messenger, Telegram, and
|
communications platforms such as Facebook Messenger, Telegram, and
|
||||||
WhatsApp. The loaded chat messages can be used for finetuning models.
|
WhatsApp. The loaded chat messages can be used for fine-tuning models.
|
||||||
"""
|
|
||||||
|
**Class hierarchy:**
|
||||||
|
|
||||||
|
.. code-block::
|
||||||
|
|
||||||
|
BaseChatLoader --> <name>ChatLoader # Examples: WhatsAppChatLoader, IMessageChatLoader
|
||||||
|
|
||||||
|
**Main helpers:**
|
||||||
|
|
||||||
|
.. code-block::
|
||||||
|
|
||||||
|
ChatSession
|
||||||
|
|
||||||
|
""" # noqa: E501
|
||||||
|
@ -0,0 +1,97 @@
|
|||||||
|
from typing import Any, Dict, Tuple
|
||||||
|
|
||||||
|
from langchain.chains.query_constructor.ir import (
|
||||||
|
Comparator,
|
||||||
|
Comparison,
|
||||||
|
Operation,
|
||||||
|
Operator,
|
||||||
|
StructuredQuery,
|
||||||
|
Visitor,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class SupabaseVectorTranslator(Visitor):
|
||||||
|
"""Translate Langchain filters to Supabase PostgREST filters."""
|
||||||
|
|
||||||
|
allowed_operators = [Operator.AND, Operator.OR]
|
||||||
|
"""Subset of allowed logical operators."""
|
||||||
|
|
||||||
|
allowed_comparators = [
|
||||||
|
Comparator.EQ,
|
||||||
|
Comparator.NE,
|
||||||
|
Comparator.GT,
|
||||||
|
Comparator.GTE,
|
||||||
|
Comparator.LT,
|
||||||
|
Comparator.LTE,
|
||||||
|
Comparator.LIKE,
|
||||||
|
]
|
||||||
|
"""Subset of allowed logical comparators."""
|
||||||
|
|
||||||
|
metadata_column = "metadata"
|
||||||
|
|
||||||
|
def _map_comparator(self, comparator: Comparator) -> str:
|
||||||
|
"""
|
||||||
|
Maps Langchain comparator to PostgREST comparator:
|
||||||
|
|
||||||
|
https://postgrest.org/en/stable/references/api/tables_views.html#operators
|
||||||
|
"""
|
||||||
|
postgrest_comparator = {
|
||||||
|
Comparator.EQ: "eq",
|
||||||
|
Comparator.NE: "neq",
|
||||||
|
Comparator.GT: "gt",
|
||||||
|
Comparator.GTE: "gte",
|
||||||
|
Comparator.LT: "lt",
|
||||||
|
Comparator.LTE: "lte",
|
||||||
|
Comparator.LIKE: "like",
|
||||||
|
}.get(comparator)
|
||||||
|
|
||||||
|
if postgrest_comparator is None:
|
||||||
|
raise Exception(
|
||||||
|
f"Comparator '{comparator}' is not currently "
|
||||||
|
"supported in Supabase Vector"
|
||||||
|
)
|
||||||
|
|
||||||
|
return postgrest_comparator
|
||||||
|
|
||||||
|
def _get_json_operator(self, value: Any) -> str:
|
||||||
|
if isinstance(value, str):
|
||||||
|
return "->>"
|
||||||
|
else:
|
||||||
|
return "->"
|
||||||
|
|
||||||
|
def visit_operation(self, operation: Operation) -> str:
|
||||||
|
args = [arg.accept(self) for arg in operation.arguments]
|
||||||
|
return f"{operation.operator.value}({','.join(args)})"
|
||||||
|
|
||||||
|
def visit_comparison(self, comparison: Comparison) -> str:
|
||||||
|
if isinstance(comparison.value, list):
|
||||||
|
return self.visit_operation(
|
||||||
|
Operation(
|
||||||
|
operator=Operator.AND,
|
||||||
|
arguments=(
|
||||||
|
Comparison(
|
||||||
|
comparator=comparison.comparator,
|
||||||
|
attribute=comparison.attribute,
|
||||||
|
value=value,
|
||||||
|
)
|
||||||
|
for value in comparison.value
|
||||||
|
),
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
return ".".join(
|
||||||
|
[
|
||||||
|
f"{self.metadata_column}{self._get_json_operator(comparison.value)}{comparison.attribute}",
|
||||||
|
f"{self._map_comparator(comparison.comparator)}",
|
||||||
|
f"{comparison.value}",
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
|
def visit_structured_query(
|
||||||
|
self, structured_query: StructuredQuery
|
||||||
|
) -> Tuple[str, Dict[str, str]]:
|
||||||
|
if structured_query.filter is None:
|
||||||
|
kwargs = {}
|
||||||
|
else:
|
||||||
|
kwargs = {"postgrest_filter": structured_query.filter.accept(self)}
|
||||||
|
return structured_query.query, kwargs
|
@ -0,0 +1,69 @@
|
|||||||
|
from typing import Tuple, Union
|
||||||
|
|
||||||
|
from langchain.chains.query_constructor.ir import (
|
||||||
|
Comparator,
|
||||||
|
Comparison,
|
||||||
|
Operation,
|
||||||
|
Operator,
|
||||||
|
StructuredQuery,
|
||||||
|
Visitor,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def process_value(value: Union[int, float, str]) -> str:
|
||||||
|
if isinstance(value, str):
|
||||||
|
return f"'{value}'"
|
||||||
|
else:
|
||||||
|
return str(value)
|
||||||
|
|
||||||
|
|
||||||
|
class VectaraTranslator(Visitor):
|
||||||
|
"""Translate `Vectara` internal query language elements to valid filters."""
|
||||||
|
|
||||||
|
allowed_operators = [Operator.AND, Operator.OR]
|
||||||
|
"""Subset of allowed logical operators."""
|
||||||
|
allowed_comparators = [
|
||||||
|
Comparator.EQ,
|
||||||
|
Comparator.NE,
|
||||||
|
Comparator.GT,
|
||||||
|
Comparator.GTE,
|
||||||
|
Comparator.LT,
|
||||||
|
Comparator.LTE,
|
||||||
|
]
|
||||||
|
"""Subset of allowed logical comparators."""
|
||||||
|
|
||||||
|
def _format_func(self, func: Union[Operator, Comparator]) -> str:
|
||||||
|
map_dict = {
|
||||||
|
Operator.AND: " and ",
|
||||||
|
Operator.OR: " or ",
|
||||||
|
Comparator.EQ: "=",
|
||||||
|
Comparator.NE: "!=",
|
||||||
|
Comparator.GT: ">",
|
||||||
|
Comparator.GTE: ">=",
|
||||||
|
Comparator.LT: "<",
|
||||||
|
Comparator.LTE: "<=",
|
||||||
|
}
|
||||||
|
self._validate_func(func)
|
||||||
|
return map_dict[func]
|
||||||
|
|
||||||
|
def visit_operation(self, operation: Operation) -> str:
|
||||||
|
args = [arg.accept(self) for arg in operation.arguments]
|
||||||
|
operator = self._format_func(operation.operator)
|
||||||
|
return "( " + operator.join(args) + " )"
|
||||||
|
|
||||||
|
def visit_comparison(self, comparison: Comparison) -> str:
|
||||||
|
comparator = self._format_func(comparison.comparator)
|
||||||
|
processed_value = process_value(comparison.value)
|
||||||
|
attribute = comparison.attribute
|
||||||
|
return (
|
||||||
|
"( " + "doc." + attribute + " " + comparator + " " + processed_value + " )"
|
||||||
|
)
|
||||||
|
|
||||||
|
def visit_structured_query(
|
||||||
|
self, structured_query: StructuredQuery
|
||||||
|
) -> Tuple[str, dict]:
|
||||||
|
if structured_query.filter is None:
|
||||||
|
kwargs = {}
|
||||||
|
else:
|
||||||
|
kwargs = {"filter": structured_query.filter.accept(self)}
|
||||||
|
return structured_query.query, kwargs
|
@ -0,0 +1,85 @@
|
|||||||
|
from typing import Dict, Tuple
|
||||||
|
|
||||||
|
from langchain.chains.query_constructor.ir import (
|
||||||
|
Comparator,
|
||||||
|
Comparison,
|
||||||
|
Operation,
|
||||||
|
Operator,
|
||||||
|
StructuredQuery,
|
||||||
|
)
|
||||||
|
from langchain.retrievers.self_query.supabase import SupabaseVectorTranslator
|
||||||
|
|
||||||
|
DEFAULT_TRANSLATOR = SupabaseVectorTranslator()
|
||||||
|
|
||||||
|
|
||||||
|
def test_visit_comparison() -> None:
|
||||||
|
comp = Comparison(comparator=Comparator.LT, attribute="foo", value=["1", "2"])
|
||||||
|
expected = "and(metadata->>foo.lt.1,metadata->>foo.lt.2)"
|
||||||
|
actual = DEFAULT_TRANSLATOR.visit_comparison(comp)
|
||||||
|
assert expected == actual
|
||||||
|
|
||||||
|
|
||||||
|
def test_visit_operation() -> None:
|
||||||
|
op = Operation(
|
||||||
|
operator=Operator.AND,
|
||||||
|
arguments=[
|
||||||
|
Comparison(comparator=Comparator.LT, attribute="foo", value=2),
|
||||||
|
Comparison(comparator=Comparator.EQ, attribute="bar", value="baz"),
|
||||||
|
Comparison(comparator=Comparator.LT, attribute="abc", value=["1", "2"]),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
expected = (
|
||||||
|
"and("
|
||||||
|
"metadata->foo.lt.2,"
|
||||||
|
"metadata->>bar.eq.baz,"
|
||||||
|
"and(metadata->>abc.lt.1,metadata->>abc.lt.2)"
|
||||||
|
")"
|
||||||
|
)
|
||||||
|
actual = DEFAULT_TRANSLATOR.visit_operation(op)
|
||||||
|
assert expected == actual
|
||||||
|
|
||||||
|
|
||||||
|
def test_visit_structured_query() -> None:
|
||||||
|
query = "What is the capital of France?"
|
||||||
|
structured_query = StructuredQuery(
|
||||||
|
query=query,
|
||||||
|
filter=None,
|
||||||
|
)
|
||||||
|
expected: Tuple[str, Dict] = (query, {})
|
||||||
|
actual = DEFAULT_TRANSLATOR.visit_structured_query(structured_query)
|
||||||
|
assert expected == actual
|
||||||
|
|
||||||
|
comp = Comparison(comparator=Comparator.LT, attribute="foo", value=["1", "2"])
|
||||||
|
expected = (
|
||||||
|
query,
|
||||||
|
{"postgrest_filter": "and(metadata->>foo.lt.1,metadata->>foo.lt.2)"},
|
||||||
|
)
|
||||||
|
structured_query = StructuredQuery(
|
||||||
|
query=query,
|
||||||
|
filter=comp,
|
||||||
|
)
|
||||||
|
actual = DEFAULT_TRANSLATOR.visit_structured_query(structured_query)
|
||||||
|
assert expected == actual
|
||||||
|
|
||||||
|
op = Operation(
|
||||||
|
operator=Operator.AND,
|
||||||
|
arguments=[
|
||||||
|
Comparison(comparator=Comparator.LT, attribute="foo", value=2),
|
||||||
|
Comparison(comparator=Comparator.EQ, attribute="bar", value="baz"),
|
||||||
|
Comparison(comparator=Comparator.LT, attribute="abc", value=["1", "2"]),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
structured_query = StructuredQuery(
|
||||||
|
query=query,
|
||||||
|
filter=op,
|
||||||
|
)
|
||||||
|
expected = (
|
||||||
|
query,
|
||||||
|
{
|
||||||
|
"postgrest_filter": (
|
||||||
|
"and(metadata->foo.lt.2,metadata->>bar.eq.baz,and(metadata->>abc.lt.1,metadata->>abc.lt.2))"
|
||||||
|
)
|
||||||
|
},
|
||||||
|
)
|
||||||
|
actual = DEFAULT_TRANSLATOR.visit_structured_query(structured_query)
|
||||||
|
assert expected == actual
|
@ -0,0 +1,71 @@
|
|||||||
|
from typing import Dict, Tuple
|
||||||
|
|
||||||
|
from langchain.chains.query_constructor.ir import (
|
||||||
|
Comparator,
|
||||||
|
Comparison,
|
||||||
|
Operation,
|
||||||
|
Operator,
|
||||||
|
StructuredQuery,
|
||||||
|
)
|
||||||
|
from langchain.retrievers.self_query.vectara import VectaraTranslator
|
||||||
|
|
||||||
|
DEFAULT_TRANSLATOR = VectaraTranslator()
|
||||||
|
|
||||||
|
|
||||||
|
def test_visit_comparison() -> None:
|
||||||
|
comp = Comparison(comparator=Comparator.LT, attribute="foo", value="1")
|
||||||
|
expected = "( doc.foo < '1' )"
|
||||||
|
actual = DEFAULT_TRANSLATOR.visit_comparison(comp)
|
||||||
|
assert expected == actual
|
||||||
|
|
||||||
|
|
||||||
|
def test_visit_operation() -> None:
|
||||||
|
op = Operation(
|
||||||
|
operator=Operator.AND,
|
||||||
|
arguments=[
|
||||||
|
Comparison(comparator=Comparator.LT, attribute="foo", value=2),
|
||||||
|
Comparison(comparator=Comparator.EQ, attribute="bar", value="baz"),
|
||||||
|
Comparison(comparator=Comparator.LT, attribute="abc", value=1),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
expected = "( ( doc.foo < 2 ) and ( doc.bar = 'baz' ) and ( doc.abc < 1 ) )"
|
||||||
|
actual = DEFAULT_TRANSLATOR.visit_operation(op)
|
||||||
|
assert expected == actual
|
||||||
|
|
||||||
|
|
||||||
|
def test_visit_structured_query() -> None:
|
||||||
|
query = "What is the capital of France?"
|
||||||
|
structured_query = StructuredQuery(
|
||||||
|
query=query,
|
||||||
|
filter=None,
|
||||||
|
limit=None,
|
||||||
|
)
|
||||||
|
expected: Tuple[str, Dict] = (query, {})
|
||||||
|
actual = DEFAULT_TRANSLATOR.visit_structured_query(structured_query)
|
||||||
|
assert expected == actual
|
||||||
|
|
||||||
|
comp = Comparison(comparator=Comparator.LT, attribute="foo", value=1)
|
||||||
|
expected = (query, {"filter": "( doc.foo < 1 )"})
|
||||||
|
structured_query = StructuredQuery(
|
||||||
|
query=query,
|
||||||
|
filter=comp,
|
||||||
|
limit=None,
|
||||||
|
)
|
||||||
|
actual = DEFAULT_TRANSLATOR.visit_structured_query(structured_query)
|
||||||
|
assert expected == actual
|
||||||
|
|
||||||
|
op = Operation(
|
||||||
|
operator=Operator.AND,
|
||||||
|
arguments=[
|
||||||
|
Comparison(comparator=Comparator.LT, attribute="foo", value=2),
|
||||||
|
Comparison(comparator=Comparator.EQ, attribute="bar", value="baz"),
|
||||||
|
Comparison(comparator=Comparator.LT, attribute="abc", value=1),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
structured_query = StructuredQuery(query=query, filter=op, limit=None)
|
||||||
|
expected = (
|
||||||
|
query,
|
||||||
|
{"filter": "( ( doc.foo < 2 ) and ( doc.bar = 'baz' ) and ( doc.abc < 1 ) )"},
|
||||||
|
)
|
||||||
|
actual = DEFAULT_TRANSLATOR.visit_structured_query(structured_query)
|
||||||
|
assert expected == actual
|
Loading…
Reference in New Issue