Konko fix dependency

This commit is contained in:
Bagatur 2023-09-08 10:06:37 -07:00
parent c6b27b3692
commit 9095dc69ac
138 changed files with 10707 additions and 2862 deletions

View File

@ -87,7 +87,7 @@ jobs:
python-version: ${{ matrix.python-version }} python-version: ${{ matrix.python-version }}
poetry-version: ${{ env.POETRY_VERSION }} poetry-version: ${{ env.POETRY_VERSION }}
working-directory: ${{ inputs.working-directory }} working-directory: ${{ inputs.working-directory }}
cache-key: lint cache-key: lint-with-extras
- name: Check Poetry File - name: Check Poetry File
shell: bash shell: bash
@ -102,9 +102,17 @@ jobs:
poetry lock --check poetry lock --check
- name: Install dependencies - name: Install dependencies
# Also installs dev/lint/test/typing dependencies, to ensure we have
# type hints for as many of our libraries as possible.
# This helps catch errors that require dependencies to be spotted, for example:
# https://github.com/langchain-ai/langchain/pull/10249/files#diff-935185cd488d015f026dcd9e19616ff62863e8cde8c0bee70318d3ccbca98341
#
# If you change this configuration, make sure to change the `cache-key`
# in the `poetry_setup` action above to stop using the old cache.
# It doesn't matter how you change it, any change will cause a cache-bust.
working-directory: ${{ inputs.working-directory }} working-directory: ${{ inputs.working-directory }}
run: | run: |
poetry install poetry install --with dev,lint,test,typing
- name: Install langchain editable - name: Install langchain editable
working-directory: ${{ inputs.working-directory }} working-directory: ${{ inputs.working-directory }}

View File

@ -79,3 +79,15 @@ jobs:
- name: Run pydantic compatibility tests - name: Run pydantic compatibility tests
shell: bash shell: bash
run: make test run: make test
- name: Ensure the tests did not create any additional files
shell: bash
run: |
set -eu
STATUS="$(git status)"
echo "$STATUS"
# grep will exit non-zero if the target message isn't found,
# and `set -e` above will cause the step to fail.
echo "$STATUS" | grep 'nothing to commit, working tree clean'

View File

@ -43,3 +43,15 @@ jobs:
- name: Run core tests - name: Run core tests
shell: bash shell: bash
run: make test run: make test
- name: Ensure the tests did not create any additional files
shell: bash
run: |
set -eu
STATUS="$(git status)"
echo "$STATUS"
# grep will exit non-zero if the target message isn't found,
# and `set -e` above will cause the step to fail.
echo "$STATUS" | grep 'nothing to commit, working tree clean'

View File

@ -83,3 +83,15 @@ jobs:
- name: Run extended tests - name: Run extended tests
run: make extended_tests run: make extended_tests
- name: Ensure the tests did not create any additional files
shell: bash
run: |
set -eu
STATUS="$(git status)"
echo "$STATUS"
# grep will exit non-zero if the target message isn't found,
# and `set -e` above will cause the step to fail.
echo "$STATUS" | grep 'nothing to commit, working tree clean'

View File

@ -115,3 +115,15 @@ jobs:
- name: Run extended tests - name: Run extended tests
run: make extended_tests run: make extended_tests
- name: Ensure the tests did not create any additional files
shell: bash
run: |
set -eu
STATUS="$(git status)"
echo "$STATUS"
# grep will exit non-zero if the target message isn't found,
# and `set -e` above will cause the step to fail.
echo "$STATUS" | grep 'nothing to commit, working tree clean'

View File

@ -47,3 +47,15 @@ jobs:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
run: | run: |
make scheduled_tests make scheduled_tests
- name: Ensure the tests did not create any additional files
shell: bash
run: |
set -eu
STATUS="$(git status)"
echo "$STATUS"
# grep will exit non-zero if the target message isn't found,
# and `set -e` above will cause the step to fail.
echo "$STATUS" | grep 'nothing to commit, working tree clean'

View File

@ -317,7 +317,7 @@
"Chatbots": "https://python.langchain.com/docs/use_cases/chatbots", "Chatbots": "https://python.langchain.com/docs/use_cases/chatbots",
"Summarization": "https://python.langchain.com/docs/use_cases/summarization", "Summarization": "https://python.langchain.com/docs/use_cases/summarization",
"Extraction": "https://python.langchain.com/docs/use_cases/extraction", "Extraction": "https://python.langchain.com/docs/use_cases/extraction",
"SQL": "https://python.langchain.com/docs/use_cases/sql", "SQL": "https://python.langchain.com/docs/use_cases/qa_structured/sql",
"Tagging": "https://python.langchain.com/docs/use_cases/tagging", "Tagging": "https://python.langchain.com/docs/use_cases/tagging",
"Code Understanding": "https://python.langchain.com/docs/use_cases/code_understanding", "Code Understanding": "https://python.langchain.com/docs/use_cases/code_understanding",
"AutoGPT": "https://python.langchain.com/docs/use_cases/autonomous_agents/autogpt", "AutoGPT": "https://python.langchain.com/docs/use_cases/autonomous_agents/autogpt",
@ -400,7 +400,7 @@
"Summarization": "https://python.langchain.com/docs/use_cases/summarization", "Summarization": "https://python.langchain.com/docs/use_cases/summarization",
"Extraction": "https://python.langchain.com/docs/use_cases/extraction", "Extraction": "https://python.langchain.com/docs/use_cases/extraction",
"Interacting with APIs": "https://python.langchain.com/docs/use_cases/apis", "Interacting with APIs": "https://python.langchain.com/docs/use_cases/apis",
"SQL": "https://python.langchain.com/docs/use_cases/sql", "SQL": "https://python.langchain.com/docs/use_cases/qa_structured/sql",
"QA over Documents": "https://python.langchain.com/docs/use_cases/question_answering/index", "QA over Documents": "https://python.langchain.com/docs/use_cases/question_answering/index",
"Retrieve from vector stores directly": "https://python.langchain.com/docs/use_cases/question_answering/how_to/vector_db_text_generation", "Retrieve from vector stores directly": "https://python.langchain.com/docs/use_cases/question_answering/how_to/vector_db_text_generation",
"Improve document indexing with HyDE": "https://python.langchain.com/docs/use_cases/question_answering/how_to/hyde", "Improve document indexing with HyDE": "https://python.langchain.com/docs/use_cases/question_answering/how_to/hyde",
@ -641,7 +641,7 @@
"Chatbots": "https://python.langchain.com/docs/use_cases/chatbots", "Chatbots": "https://python.langchain.com/docs/use_cases/chatbots",
"Extraction": "https://python.langchain.com/docs/use_cases/extraction", "Extraction": "https://python.langchain.com/docs/use_cases/extraction",
"Interacting with APIs": "https://python.langchain.com/docs/use_cases/apis", "Interacting with APIs": "https://python.langchain.com/docs/use_cases/apis",
"SQL": "https://python.langchain.com/docs/use_cases/sql", "SQL": "https://python.langchain.com/docs/use_cases/qa_structured/sql",
"HuggingGPT": "https://python.langchain.com/docs/use_cases/autonomous_agents/hugginggpt", "HuggingGPT": "https://python.langchain.com/docs/use_cases/autonomous_agents/hugginggpt",
"Perform context-aware text splitting": "https://python.langchain.com/docs/use_cases/question_answering/how_to/document-context-aware-QA", "Perform context-aware text splitting": "https://python.langchain.com/docs/use_cases/question_answering/how_to/document-context-aware-QA",
"Retrieve from vector stores directly": "https://python.langchain.com/docs/use_cases/question_answering/how_to/vector_db_text_generation", "Retrieve from vector stores directly": "https://python.langchain.com/docs/use_cases/question_answering/how_to/vector_db_text_generation",
@ -1009,7 +1009,7 @@
"LangSmith Walkthrough": "https://python.langchain.com/docs/guides/langsmith/walkthrough", "LangSmith Walkthrough": "https://python.langchain.com/docs/guides/langsmith/walkthrough",
"Comparing Chain Outputs": "https://python.langchain.com/docs/guides/evaluation/examples/comparisons", "Comparing Chain Outputs": "https://python.langchain.com/docs/guides/evaluation/examples/comparisons",
"Agent Trajectory": "https://python.langchain.com/docs/guides/evaluation/trajectory/trajectory_eval", "Agent Trajectory": "https://python.langchain.com/docs/guides/evaluation/trajectory/trajectory_eval",
"SQL": "https://python.langchain.com/docs/use_cases/sql", "SQL": "https://python.langchain.com/docs/use_cases/qa_structured/sql",
"Multi-modal outputs: Image & Text": "https://python.langchain.com/docs/use_cases/multi_modal/image_agent", "Multi-modal outputs: Image & Text": "https://python.langchain.com/docs/use_cases/multi_modal/image_agent",
"Agent Debates with Tools": "https://python.langchain.com/docs/use_cases/agent_simulations/two_agent_debate_tools", "Agent Debates with Tools": "https://python.langchain.com/docs/use_cases/agent_simulations/two_agent_debate_tools",
"Multiple callback handlers": "https://python.langchain.com/docs/modules/callbacks/multiple_callbacks", "Multiple callback handlers": "https://python.langchain.com/docs/modules/callbacks/multiple_callbacks",
@ -1268,7 +1268,7 @@
"SQL Database Agent": "https://python.langchain.com/docs/integrations/toolkits/sql_database", "SQL Database Agent": "https://python.langchain.com/docs/integrations/toolkits/sql_database",
"JSON Agent": "https://python.langchain.com/docs/integrations/toolkits/json", "JSON Agent": "https://python.langchain.com/docs/integrations/toolkits/json",
"NIBittensorLLM": "https://python.langchain.com/docs/integrations/llms/bittensor", "NIBittensorLLM": "https://python.langchain.com/docs/integrations/llms/bittensor",
"SQL": "https://python.langchain.com/docs/use_cases/sql", "SQL": "https://python.langchain.com/docs/use_cases/qa_structured/sql",
"BabyAGI with Tools": "https://python.langchain.com/docs/use_cases/agents/baby_agi_with_agent", "BabyAGI with Tools": "https://python.langchain.com/docs/use_cases/agents/baby_agi_with_agent",
"Conversational Retrieval Agent": "https://python.langchain.com/docs/use_cases/question_answering/how_to/conversational_retrieval_agents", "Conversational Retrieval Agent": "https://python.langchain.com/docs/use_cases/question_answering/how_to/conversational_retrieval_agents",
"Plug-and-Plai": "https://python.langchain.com/docs/use_cases/agents/custom_agent_with_plugin_retrieval_using_plugnplai", "Plug-and-Plai": "https://python.langchain.com/docs/use_cases/agents/custom_agent_with_plugin_retrieval_using_plugnplai",
@ -1832,12 +1832,12 @@
"create_sql_agent": { "create_sql_agent": {
"CnosDB": "https://python.langchain.com/docs/integrations/providers/cnosdb", "CnosDB": "https://python.langchain.com/docs/integrations/providers/cnosdb",
"SQL Database Agent": "https://python.langchain.com/docs/integrations/toolkits/sql_database", "SQL Database Agent": "https://python.langchain.com/docs/integrations/toolkits/sql_database",
"SQL": "https://python.langchain.com/docs/use_cases/sql" "SQL": "https://python.langchain.com/docs/use_cases/qa_structured/sql"
}, },
"SQLDatabaseToolkit": { "SQLDatabaseToolkit": {
"CnosDB": "https://python.langchain.com/docs/integrations/providers/cnosdb", "CnosDB": "https://python.langchain.com/docs/integrations/providers/cnosdb",
"SQL Database Agent": "https://python.langchain.com/docs/integrations/toolkits/sql_database", "SQL Database Agent": "https://python.langchain.com/docs/integrations/toolkits/sql_database",
"SQL": "https://python.langchain.com/docs/use_cases/sql", "SQL": "https://python.langchain.com/docs/use_cases/qa_structured/sql",
"Use ToolKits with OpenAI Functions": "https://python.langchain.com/docs/modules/agents/how_to/use_toolkits_with_openai_functions" "Use ToolKits with OpenAI Functions": "https://python.langchain.com/docs/modules/agents/how_to/use_toolkits_with_openai_functions"
}, },
"SageMakerCallbackHandler": { "SageMakerCallbackHandler": {
@ -1899,7 +1899,7 @@
"Rebuff": "https://python.langchain.com/docs/integrations/providers/rebuff", "Rebuff": "https://python.langchain.com/docs/integrations/providers/rebuff",
"SQL Database Agent": "https://python.langchain.com/docs/integrations/toolkits/sql_database", "SQL Database Agent": "https://python.langchain.com/docs/integrations/toolkits/sql_database",
"Cookbook": "https://python.langchain.com/docs/guides/expression_language/cookbook", "Cookbook": "https://python.langchain.com/docs/guides/expression_language/cookbook",
"SQL": "https://python.langchain.com/docs/use_cases/sql", "SQL": "https://python.langchain.com/docs/use_cases/qa_structured/sql",
"Multiple Retrieval Sources": "https://python.langchain.com/docs/use_cases/question_answering/how_to/multiple_retrieval" "Multiple Retrieval Sources": "https://python.langchain.com/docs/use_cases/question_answering/how_to/multiple_retrieval"
}, },
"Weaviate": { "Weaviate": {
@ -3035,11 +3035,11 @@
"Interacting with APIs": "https://python.langchain.com/docs/use_cases/apis" "Interacting with APIs": "https://python.langchain.com/docs/use_cases/apis"
}, },
"create_sql_query_chain": { "create_sql_query_chain": {
"SQL": "https://python.langchain.com/docs/use_cases/sql", "SQL": "https://python.langchain.com/docs/use_cases/qa_structured/sql",
"Multiple Retrieval Sources": "https://python.langchain.com/docs/use_cases/question_answering/how_to/multiple_retrieval" "Multiple Retrieval Sources": "https://python.langchain.com/docs/use_cases/question_answering/how_to/multiple_retrieval"
}, },
"ElasticsearchDatabaseChain": { "ElasticsearchDatabaseChain": {
"SQL": "https://python.langchain.com/docs/use_cases/sql" "SQL": "https://python.langchain.com/docs/use_cases/qa_structured/sql"
}, },
"FileChatMessageHistory": { "FileChatMessageHistory": {
"AutoGPT": "https://python.langchain.com/docs/use_cases/autonomous_agents/autogpt" "AutoGPT": "https://python.langchain.com/docs/use_cases/autonomous_agents/autogpt"

View File

@ -12,7 +12,7 @@ Output parsers are classes that help structure language model responses. There a
And then one optional one: And then one optional one:
- "Parse with prompt": A method which takes in a string (assumed to be the response from a language model) and a prompt (assumed to the prompt that generated such a response) and parses it into some structure. The prompt is largely provided in the event the OutputParser wants to retry or fix the output in some way, and needs information from the prompt to do so. - "Parse with prompt": A method which takes in a string (assumed to be the response from a language model) and a prompt (assumed to be the prompt that generated such a response) and parses it into some structure. The prompt is largely provided in the event the OutputParser wants to retry or fix the output in some way, and needs information from the prompt to do so.
## Get started ## Get started

View File

@ -1 +1,2 @@
position: 0 position: 0
collapsed: false

View File

@ -1,5 +0,0 @@
# Web Scraping
Web scraping has historically been a challenging endeavor due to the ever-changing nature of website structures, making it tedious for developers to maintain their scraping scripts. Traditional methods often rely on specific HTML tags and patterns which, when altered, can disrupt data extraction processes.
Enter the LLM-based method for parsing HTML: By leveraging the capabilities of LLMs, and especially OpenAI Functions in LangChain's extraction chain, developers can instruct the model to extract only the desired data in a specified format. This method not only streamlines the extraction process but also significantly reduces the time spent on manual debugging and script modifications. Its adaptability means that even if websites undergo significant design changes, the extraction remains consistent and robust. This level of resilience translates to reduced maintenance efforts, cost savings, and ensures a higher quality of extracted data. Compared to its predecessors, the LLM-based approach wins out in the web scraping domain by transforming a historically cumbersome task into a more automated and efficient process.

View File

@ -1076,6 +1076,10 @@
"source": "/docs/modules/agents/tools/integrations/zapier", "source": "/docs/modules/agents/tools/integrations/zapier",
"destination": "/docs/integrations/tools/zapier" "destination": "/docs/integrations/tools/zapier"
}, },
{
"source": "/docs/integrations/tools/sqlite",
"destination": "/docs/use_cases/sql/sqlite"
},
{ {
"source": "/en/latest/modules/callbacks/filecallbackhandler.html", "source": "/en/latest/modules/callbacks/filecallbackhandler.html",
"destination": "/docs/modules/callbacks/how_to/filecallbackhandler" "destination": "/docs/modules/callbacks/how_to/filecallbackhandler"
@ -2216,6 +2220,10 @@
"source": "/docs/modules/data_connection/text_embedding/integrations/tensorflowhub", "source": "/docs/modules/data_connection/text_embedding/integrations/tensorflowhub",
"destination": "/docs/integrations/text_embedding/tensorflowhub" "destination": "/docs/integrations/text_embedding/tensorflowhub"
}, },
{
"source": "/docs/integrations/text_embedding/Awa",
"destination": "/docs/integrations/text_embedding/awadb"
},
{ {
"source": "/en/latest/modules/indexes/vectorstores/examples/analyticdb.html", "source": "/en/latest/modules/indexes/vectorstores/examples/analyticdb.html",
"destination": "/docs/integrations/vectorstores/analyticdb" "destination": "/docs/integrations/vectorstores/analyticdb"
@ -3178,7 +3186,11 @@
}, },
{ {
"source": "/en/latest/use_cases/tabular.html", "source": "/en/latest/use_cases/tabular.html",
"destination": "/docs/use_cases/tabular" "destination": "/docs/use_cases/qa_structured"
},
{
"source": "/docs/use_cases/sql(/?)",
"destination": "/docs/use_cases/qa_structured/sql"
}, },
{ {
"source": "/en/latest/youtube.html", "source": "/en/latest/youtube.html",
@ -3370,7 +3382,7 @@
}, },
{ {
"source": "/docs/modules/chains/popular/sqlite", "source": "/docs/modules/chains/popular/sqlite",
"destination": "/docs/use_cases/tabular/sqlite" "destination": "/docs/use_cases/qa_structured/sql"
}, },
{ {
"source": "/docs/modules/chains/popular/openai_functions", "source": "/docs/modules/chains/popular/openai_functions",
@ -3582,7 +3594,7 @@
}, },
{ {
"source": "/docs/modules/chains/additional/elasticsearch_database", "source": "/docs/modules/chains/additional/elasticsearch_database",
"destination": "/docs/use_cases/tabular/elasticsearch_database" "destination": "/docs/use_cases/qa_structured/integrations/elasticsearch"
}, },
{ {
"source": "/docs/modules/chains/additional/tagging", "source": "/docs/modules/chains/additional/tagging",

View File

@ -1,6 +1,6 @@
# YouTube videos # YouTube videos
⛓ icon marks a new addition [last update 2023-06-20] ⛓ icon marks a new addition [last update 2023-09-05]
### [Official LangChain YouTube channel](https://www.youtube.com/@LangChain) ### [Official LangChain YouTube channel](https://www.youtube.com/@LangChain)
@ -86,20 +86,20 @@
- [`Llama Index`: Chat with Documentation using URL Loader](https://youtu.be/XJRoDEctAwA) by [Merk](https://www.youtube.com/@merksworld) - [`Llama Index`: Chat with Documentation using URL Loader](https://youtu.be/XJRoDEctAwA) by [Merk](https://www.youtube.com/@merksworld)
- [Using OpenAI, LangChain, and `Gradio` to Build Custom GenAI Applications](https://youtu.be/1MsmqMg3yUc) by [David Hundley](https://www.youtube.com/@dkhundley) - [Using OpenAI, LangChain, and `Gradio` to Build Custom GenAI Applications](https://youtu.be/1MsmqMg3yUc) by [David Hundley](https://www.youtube.com/@dkhundley)
- [LangChain, Chroma DB, OpenAI Beginner Guide | ChatGPT with your PDF](https://youtu.be/FuqdVNB_8c0) - [LangChain, Chroma DB, OpenAI Beginner Guide | ChatGPT with your PDF](https://youtu.be/FuqdVNB_8c0)
- [Build AI chatbot with custom knowledge base using OpenAI API and GPT Index](https://youtu.be/vDZAZuaXf48) by [Irina Nik](https://www.youtube.com/@irina_nik) - [Build AI chatbot with custom knowledge base using OpenAI API and GPT Index](https://youtu.be/vDZAZuaXf48) by [Irina Nik](https://www.youtube.com/@irina_nik)
- [Build Your Own Auto-GPT Apps with LangChain (Python Tutorial)](https://youtu.be/NYSWn1ipbgg) by [Dave Ebbelaar](https://www.youtube.com/@daveebbelaar) - [Build Your Own Auto-GPT Apps with LangChain (Python Tutorial)](https://youtu.be/NYSWn1ipbgg) by [Dave Ebbelaar](https://www.youtube.com/@daveebbelaar)
- [Chat with Multiple `PDFs` | LangChain App Tutorial in Python (Free LLMs and Embeddings)](https://youtu.be/dXxQ0LR-3Hg) by [Alejandro AO - Software & Ai](https://www.youtube.com/@alejandro_ao) - [Chat with Multiple `PDFs` | LangChain App Tutorial in Python (Free LLMs and Embeddings)](https://youtu.be/dXxQ0LR-3Hg) by [Alejandro AO - Software & Ai](https://www.youtube.com/@alejandro_ao)
- [Chat with a `CSV` | `LangChain Agents` Tutorial (Beginners)](https://youtu.be/tjeti5vXWOU) by [Alejandro AO - Software & Ai](https://www.youtube.com/@alejandro_ao) - [Chat with a `CSV` | `LangChain Agents` Tutorial (Beginners)](https://youtu.be/tjeti5vXWOU) by [Alejandro AO - Software & Ai](https://www.youtube.com/@alejandro_ao)
- [Create Your Own ChatGPT with `PDF` Data in 5 Minutes (LangChain Tutorial)](https://youtu.be/au2WVVGUvc8) by [Liam Ottley](https://www.youtube.com/@LiamOttley) - [Create Your Own ChatGPT with `PDF` Data in 5 Minutes (LangChain Tutorial)](https://youtu.be/au2WVVGUvc8) by [Liam Ottley](https://www.youtube.com/@LiamOttley)
- [Using ChatGPT with YOUR OWN Data. This is magical. (LangChain OpenAI API)](https://youtu.be/9AXP7tCI9PI) by [TechLead](https://www.youtube.com/@TechLead) - [Using ChatGPT with YOUR OWN Data. This is magical. (LangChain OpenAI API)](https://youtu.be/9AXP7tCI9PI) by [TechLead](https://www.youtube.com/@TechLead)
- [Build a Custom Chatbot with OpenAI: `GPT-Index` & LangChain | Step-by-Step Tutorial](https://youtu.be/FIDv6nc4CgU) by [Fabrikod](https://www.youtube.com/@fabrikod) - [Build a Custom Chatbot with OpenAI: `GPT-Index` & LangChain | Step-by-Step Tutorial](https://youtu.be/FIDv6nc4CgU) by [Fabrikod](https://www.youtube.com/@fabrikod)
- [`Flowise` is an open source no-code UI visual tool to build 🦜🔗LangChain applications](https://youtu.be/CovAPtQPU0k) by [Cobus Greyling](https://www.youtube.com/@CobusGreylingZA) - [`Flowise` is an open source no-code UI visual tool to build 🦜🔗LangChain applications](https://youtu.be/CovAPtQPU0k) by [Cobus Greyling](https://www.youtube.com/@CobusGreylingZA)
- [LangChain & GPT 4 For Data Analysis: The `Pandas` Dataframe Agent](https://youtu.be/rFQ5Kmkd4jc) by [Rabbitmetrics](https://www.youtube.com/@rabbitmetrics) - [LangChain & GPT 4 For Data Analysis: The `Pandas` Dataframe Agent](https://youtu.be/rFQ5Kmkd4jc) by [Rabbitmetrics](https://www.youtube.com/@rabbitmetrics)
- [`GirlfriendGPT` - AI girlfriend with LangChain](https://youtu.be/LiN3D1QZGQw) by [Toolfinder AI](https://www.youtube.com/@toolfinderai) - [`GirlfriendGPT` - AI girlfriend with LangChain](https://youtu.be/LiN3D1QZGQw) by [Toolfinder AI](https://www.youtube.com/@toolfinderai)
- [`PrivateGPT`: Chat to your FILES OFFLINE and FREE [Installation and Tutorial]](https://youtu.be/G7iLllmx4qc) by [Prompt Engineering](https://www.youtube.com/@engineerprompt) - [`PrivateGPT`: Chat to your FILES OFFLINE and FREE [Installation and Tutorial]](https://youtu.be/G7iLllmx4qc) by [Prompt Engineering](https://www.youtube.com/@engineerprompt)
- [How to build with Langchain 10x easier | ⛓️ LangFlow & `Flowise`](https://youtu.be/Ya1oGL7ZTvU) by [AI Jason](https://www.youtube.com/@AIJasonZ) - [How to build with Langchain 10x easier | ⛓️ LangFlow & `Flowise`](https://youtu.be/Ya1oGL7ZTvU) by [AI Jason](https://www.youtube.com/@AIJasonZ)
- [Getting Started With LangChain In 20 Minutes- Build Celebrity Search Application](https://youtu.be/_FpT1cwcSLg) by [Krish Naik](https://www.youtube.com/@krishnaik06) - [Getting Started With LangChain In 20 Minutes- Build Celebrity Search Application](https://youtu.be/_FpT1cwcSLg) by [Krish Naik](https://www.youtube.com/@krishnaik06)
- ⛓ [LangChain HowTo and Guides YouTube playlist](https://www.youtube.com/playlist?list=PL8motc6AQftk1Bs42EW45kwYbyJ4jOdiZ) by [Sam Witteveen](https://www.youtube.com/@samwitteveenai/)
### [Prompt Engineering and LangChain](https://www.youtube.com/watch?v=muXbPpG_ys4&list=PLEJK-H61Xlwzm5FYLDdKt_6yibO33zoMW) by [Venelin Valkov](https://www.youtube.com/@venelin_valkov) ### [Prompt Engineering and LangChain](https://www.youtube.com/watch?v=muXbPpG_ys4&list=PLEJK-H61Xlwzm5FYLDdKt_6yibO33zoMW) by [Venelin Valkov](https://www.youtube.com/@venelin_valkov)

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,119 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "f09fd305",
"metadata": {},
"source": [
"# Code writing\n",
"\n",
"Example of how to use LCEL to write Python code."
]
},
{
"cell_type": "code",
"execution_count": 11,
"id": "bd7c259a",
"metadata": {},
"outputs": [],
"source": [
"from langchain.chat_models import ChatOpenAI\n",
"from langchain.prompts import ChatPromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate\n",
"from langchain.schema.output_parser import StrOutputParser\n",
"from langchain.utilities import PythonREPL"
]
},
{
"cell_type": "code",
"execution_count": 12,
"id": "73795d2d",
"metadata": {},
"outputs": [],
"source": [
"template = \"\"\"Write some python code to solve the user's problem. \n",
"\n",
"Return only python code in Markdown format, e.g.:\n",
"\n",
"```python\n",
"....\n",
"```\"\"\"\n",
"prompt = ChatPromptTemplate.from_messages(\n",
" [(\"system\", template), (\"human\", \"{input}\")]\n",
")\n",
"\n",
"model = ChatOpenAI()"
]
},
{
"cell_type": "code",
"execution_count": 13,
"id": "42859e8a",
"metadata": {},
"outputs": [],
"source": [
"def _sanitize_output(text: str):\n",
" _, after = text.split(\"```python\")\n",
" return after.split(\"```\")[0]"
]
},
{
"cell_type": "code",
"execution_count": 14,
"id": "5ded1a86",
"metadata": {},
"outputs": [],
"source": [
"chain = prompt | model | StrOutputParser() | _sanitize_output | PythonREPL().run"
]
},
{
"cell_type": "code",
"execution_count": 15,
"id": "208c2b75",
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"Python REPL can execute arbitrary code. Use with caution.\n"
]
},
{
"data": {
"text/plain": [
"'4\\n'"
]
},
"execution_count": 15,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"chain.invoke({\"input\": \"whats 2 plus 2\"})"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.1"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@ -0,0 +1,11 @@
---
sidebar_position: 2
---
# Cookbook
import DocCardList from "@theme/DocCardList";
Example code for accomplishing common tasks with the LangChain Expression Language (LCEL). These examples show how to compose different Runnable (the core LCEL interface) components to achieve various tasks. If you're just getting acquainted with LCEL, the [Prompt + LLM](/docs/expression_language/cookbook/prompt_llm_parser) page is a good place to start.
<DocCardList />

View File

@ -0,0 +1,180 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "5062941a",
"metadata": {},
"source": [
"# Adding memory\n",
"\n",
"This shows how to add memory to an arbitrary chain. Right now, you can use the memory classes but need to hook it up manually"
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "7998efd8",
"metadata": {},
"outputs": [],
"source": [
"from langchain.chat_models import ChatOpenAI\n",
"from langchain.memory import ConversationBufferMemory\n",
"from langchain.schema.runnable import RunnableMap\n",
"from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder\n",
"\n",
"model = ChatOpenAI()\n",
"prompt = ChatPromptTemplate.from_messages([\n",
" (\"system\", \"You are a helpful chatbot\"),\n",
" MessagesPlaceholder(variable_name=\"history\"),\n",
" (\"human\", \"{input}\")\n",
"])"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "fa0087f3",
"metadata": {},
"outputs": [],
"source": [
"memory = ConversationBufferMemory(return_messages=True)"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "06b531ae",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'history': []}"
]
},
"execution_count": 3,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"memory.load_memory_variables({})"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "d9437af6",
"metadata": {},
"outputs": [],
"source": [
"chain = RunnableMap({\n",
" \"input\": lambda x: x[\"input\"],\n",
" \"memory\": memory.load_memory_variables\n",
"}) | {\n",
" \"input\": lambda x: x[\"input\"],\n",
" \"history\": lambda x: x[\"memory\"][\"history\"]\n",
"} | prompt | model"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "bed1e260",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"AIMessage(content='Hello Bob! How can I assist you today?', additional_kwargs={}, example=False)"
]
},
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"inputs = {\"input\": \"hi im bob\"}\n",
"response = chain.invoke(inputs)\n",
"response"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "890475b4",
"metadata": {},
"outputs": [],
"source": [
"memory.save_context(inputs, {\"output\": response.content})"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "e8fcb77f",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'history': [HumanMessage(content='hi im bob', additional_kwargs={}, example=False),\n",
" AIMessage(content='Hello Bob! How can I assist you today?', additional_kwargs={}, example=False)]}"
]
},
"execution_count": 7,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"memory.load_memory_variables({})"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "d837d5c3",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"AIMessage(content='Your name is Bob.', additional_kwargs={}, example=False)"
]
},
"execution_count": 8,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"inputs = {\"input\": \"whats my name\"}\n",
"response = chain.invoke(inputs)\n",
"response"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.1"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@ -0,0 +1,133 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "4927a727-b4c8-453c-8c83-bd87b4fcac14",
"metadata": {},
"source": [
"# Adding moderation\n",
"\n",
"This shows how to add in moderation (or other safeguards) around your LLM application."
]
},
{
"cell_type": "code",
"execution_count": 20,
"id": "4f5f6449-940a-4f5c-97c0-39b71c3e2a68",
"metadata": {},
"outputs": [],
"source": [
"from langchain.chains import OpenAIModerationChain\n",
"from langchain.llms import OpenAI\n",
"from langchain.prompts import ChatPromptTemplate"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "fcb8312b-7e7a-424f-a3ec-76738c9a9d21",
"metadata": {},
"outputs": [],
"source": [
"moderate = OpenAIModerationChain()"
]
},
{
"cell_type": "code",
"execution_count": 21,
"id": "b24b9148-f6b0-4091-8ea8-d3fb281bd950",
"metadata": {},
"outputs": [],
"source": [
"model = OpenAI()\n",
"prompt = ChatPromptTemplate.from_messages([\n",
" (\"system\", \"repeat after me: {input}\")\n",
"])"
]
},
{
"cell_type": "code",
"execution_count": 22,
"id": "1c8ed87c-9ca6-4559-bf60-d40e94a0af08",
"metadata": {},
"outputs": [],
"source": [
"chain = prompt | model"
]
},
{
"cell_type": "code",
"execution_count": 23,
"id": "5256b9bd-381a-42b0-bfa8-7e6d18f853cb",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'\\n\\nYou are stupid.'"
]
},
"execution_count": 23,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"chain.invoke({\"input\": \"you are stupid\"})"
]
},
{
"cell_type": "code",
"execution_count": 24,
"id": "fe6e3b33-dc9a-49d5-b194-ba750c58a628",
"metadata": {},
"outputs": [],
"source": [
"moderated_chain = chain | moderate"
]
},
{
"cell_type": "code",
"execution_count": 25,
"id": "d8ba0cbd-c739-4d23-be9f-6ae092bd5ffb",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'input': '\\n\\nYou are stupid',\n",
" 'output': \"Text was found that violates OpenAI's content policy.\"}"
]
},
"execution_count": 25,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"moderated_chain.invoke({\"input\": \"you are stupid\"})"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.1"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@ -0,0 +1,240 @@
{
"cells": [
{
"cell_type": "raw",
"id": "877102d1-02ea-4fa3-8ec7-a08e242b95b3",
"metadata": {},
"source": [
"---\n",
"sidebar_position: 2\n",
"title: Multiple chains\n",
"---"
]
},
{
"cell_type": "markdown",
"id": "0f2bf8d3",
"metadata": {},
"source": [
"Runnables can easily be used to string together multiple Chains"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "d65d4e9e",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'El país donde se encuentra la ciudad de Honolulu, donde nació Barack Obama, el 44º Presidente de los Estados Unidos, es Estados Unidos. Honolulu se encuentra en la isla de Oahu, en el estado de Hawái.'"
]
},
"execution_count": 4,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from operator import itemgetter\n",
"\n",
"from langchain.chat_models import ChatOpenAI\n",
"from langchain.prompts import ChatPromptTemplate\n",
"from langchain.schema import StrOutputParser\n",
"\n",
"prompt1 = ChatPromptTemplate.from_template(\"what is the city {person} is from?\")\n",
"prompt2 = ChatPromptTemplate.from_template(\"what country is the city {city} in? respond in {language}\")\n",
"\n",
"model = ChatOpenAI()\n",
"\n",
"chain1 = prompt1 | model | StrOutputParser()\n",
"\n",
"chain2 = {\"city\": chain1, \"language\": itemgetter(\"language\")} | prompt2 | model | StrOutputParser()\n",
"\n",
"chain2.invoke({\"person\": \"obama\", \"language\": \"spanish\"})"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "878f8176",
"metadata": {},
"outputs": [],
"source": [
"from langchain.schema.runnable import RunnableMap, RunnablePassthrough\n",
"\n",
"prompt1 = ChatPromptTemplate.from_template(\"generate a {attribute} color. Return the name of the color and nothing else:\")\n",
"prompt2 = ChatPromptTemplate.from_template(\"what is a fruit of color: {color}. Return the name of the fruit and nothing else:\")\n",
"prompt3 = ChatPromptTemplate.from_template(\"what is a country with a flag that has the color: {color}. Return the name of the country and nothing else:\")\n",
"prompt4 = ChatPromptTemplate.from_template(\"What is the color of {fruit} and the flag of {country}?\")\n",
"\n",
"model_parser = model | StrOutputParser()\n",
"\n",
"color_generator = {\"attribute\": RunnablePassthrough()} | prompt1 | {\"color\": model_parser}\n",
"color_to_fruit = prompt2 | model_parser\n",
"color_to_country = prompt3 | model_parser\n",
"question_generator = color_generator | {\"fruit\": color_to_fruit, \"country\": color_to_country} | prompt4"
]
},
{
"cell_type": "code",
"execution_count": 9,
"id": "d621a870",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"ChatPromptValue(messages=[HumanMessage(content='What is the color of strawberry and the flag of China?', additional_kwargs={}, example=False)])"
]
},
"execution_count": 9,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"question_generator.invoke({\"warm\"})"
]
},
{
"cell_type": "code",
"execution_count": 10,
"id": "b4a9812b-bead-4fd9-ae27-0b8be57e5dc1",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"AIMessage(content='The color of an apple is typically red or green. The flag of China is predominantly red with a large yellow star in the upper left corner and four smaller yellow stars surrounding it.', additional_kwargs={}, example=False)"
]
},
"execution_count": 10,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"prompt = question_generator.invoke({\"warm\"})\n",
"model.invoke(prompt)"
]
},
{
"cell_type": "markdown",
"id": "6d75a313-f1c8-4e94-9a17-24e0bf4a2bdc",
"metadata": {},
"source": [
"### Branching and Merging\n",
"\n",
"You may want the output of one component to be processed by 2 or more other components. [RunnableMaps](https://api.python.langchain.com/en/latest/schema/langchain.schema.runnable.base.RunnableMap.html) let you split or fork the chain so multiple components can process the input in parallel. Later, other components can join or merge the results to synthesize a final response. This type of chain creates a computation graph that looks like the following:\n",
"\n",
"```text\n",
" Input\n",
" / \\\n",
" / \\\n",
" Branch1 Branch2\n",
" \\ /\n",
" \\ /\n",
" Combine\n",
"```"
]
},
{
"cell_type": "code",
"execution_count": 11,
"id": "247fa0bd-4596-4063-8cb3-1d7fc119d982",
"metadata": {},
"outputs": [],
"source": [
"planner = (\n",
" ChatPromptTemplate.from_template(\n",
" \"Generate an argument about: {input}\"\n",
" )\n",
" | ChatOpenAI()\n",
" | StrOutputParser()\n",
" | {\"base_response\": RunnablePassthrough()}\n",
")\n",
"\n",
"arguments_for = (\n",
" ChatPromptTemplate.from_template(\n",
" \"List the pros or positive aspects of {base_response}\"\n",
" )\n",
" | ChatOpenAI()\n",
" | StrOutputParser()\n",
")\n",
"arguments_against = (\n",
" ChatPromptTemplate.from_template(\n",
" \"List the cons or negative aspects of {base_response}\"\n",
" )\n",
" | ChatOpenAI()\n",
" | StrOutputParser()\n",
")\n",
"\n",
"final_responder = (\n",
" ChatPromptTemplate.from_messages(\n",
" [\n",
" (\"ai\", \"{original_response}\"),\n",
" (\"human\", \"Pros:\\n{results_1}\\n\\nCons:\\n{results_2}\"),\n",
" (\"system\", \"Generate a final response given the critique\"),\n",
" ]\n",
" )\n",
" | ChatOpenAI()\n",
" | StrOutputParser()\n",
")\n",
"\n",
"chain = (\n",
" planner \n",
" | {\n",
" \"results_1\": arguments_for,\n",
" \"results_2\": arguments_against,\n",
" \"original_response\": itemgetter(\"base_response\"),\n",
" }\n",
" | final_responder\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 12,
"id": "2564f310-0674-4bb1-9c4e-d7848ca73511",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'While Scrum has its potential cons and challenges, many organizations have successfully embraced and implemented this project management framework to great effect. The cons mentioned above can be mitigated or overcome with proper training, support, and a commitment to continuous improvement. It is also important to note that not all cons may be applicable to every organization or project.\\n\\nFor example, while Scrum may be complex initially, with proper training and guidance, teams can quickly grasp the concepts and practices. The lack of predictability can be mitigated by implementing techniques such as velocity tracking and release planning. The limited documentation can be addressed by maintaining a balance between lightweight documentation and clear communication among team members. The dependency on team collaboration can be improved through effective communication channels and regular team-building activities.\\n\\nScrum can be scaled and adapted to larger projects by using frameworks like Scrum of Scrums or LeSS (Large Scale Scrum). Concerns about speed versus quality can be addressed by incorporating quality assurance practices, such as continuous integration and automated testing, into the Scrum process. Scope creep can be managed by having a well-defined and prioritized product backlog, and a strong product owner can be developed through training and mentorship.\\n\\nResistance to change can be overcome by providing proper education and communication to stakeholders and involving them in the decision-making process. Ultimately, the cons of Scrum can be seen as opportunities for growth and improvement, and with the right mindset and support, they can be effectively managed.\\n\\nIn conclusion, while Scrum may have its challenges and potential cons, the benefits and advantages it offers in terms of collaboration, flexibility, adaptability, transparency, and customer satisfaction make it a widely adopted and successful project management framework. With proper implementation and continuous improvement, organizations can leverage Scrum to drive innovation, efficiency, and project success.'"
]
},
"execution_count": 12,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"chain.invoke({\"input\": \"scrum\"})"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "poetry-venv",
"language": "python",
"name": "poetry-venv"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.1"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@ -0,0 +1,431 @@
{
"cells": [
{
"cell_type": "raw",
"id": "abf7263d-3a62-4016-b5d5-b157f92f2070",
"metadata": {},
"source": [
"---\n",
"sidebar_position: 0\n",
"title: Prompt + LLM\n",
"---"
]
},
{
"cell_type": "markdown",
"id": "9a434f2b-9405-468c-9dfd-254d456b57a6",
"metadata": {},
"source": [
"The most common and valuable composition is taking:\n",
"\n",
"``PromptTemplate`` / ``ChatPromptTemplate`` -> ``LLM`` / ``ChatModel`` -> ``OutputParser``\n",
"\n",
"Almost any other chains you build will use this building block."
]
},
{
"cell_type": "markdown",
"id": "93aa2c87",
"metadata": {},
"source": [
"## PromptTemplate + LLM\n",
"\n",
"The simplest composition is just combing a prompt and model to create a chain that takes user input, adds it to a prompt, passes it to a model, and returns the raw model input.\n",
"\n",
"Note, you can mix and match PromptTemplate/ChatPromptTemplates and LLMs/ChatModels as you like here."
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "466b65b3",
"metadata": {},
"outputs": [],
"source": [
"from langchain.prompts import ChatPromptTemplate\n",
"from langchain.chat_models import ChatOpenAI\n",
"\n",
"prompt = ChatPromptTemplate.from_template(\"tell me a joke about {foo}\")\n",
"model = ChatOpenAI()\n",
"chain = prompt | model"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "e3d0a6cd",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"AIMessage(content=\"Why don't bears wear shoes?\\n\\nBecause they have bear feet!\", additional_kwargs={}, example=False)"
]
},
"execution_count": 2,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"chain.invoke({\"foo\": \"bears\"})"
]
},
{
"cell_type": "markdown",
"id": "7eb9ef50",
"metadata": {},
"source": [
"Often times we want to attach kwargs that'll be passed to each model call. Here's a few examples of that:"
]
},
{
"cell_type": "markdown",
"id": "0b1d8f88",
"metadata": {},
"source": [
"### Attaching Stop Sequences"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "562a06bf",
"metadata": {},
"outputs": [],
"source": [
"chain = prompt | model.bind(stop=[\"\\n\"])"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "43f5d04c",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"AIMessage(content='Why did the bear never wear shoes?', additional_kwargs={}, example=False)"
]
},
"execution_count": 4,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"chain.invoke({\"foo\": \"bears\"})"
]
},
{
"cell_type": "markdown",
"id": "f3eaf88a",
"metadata": {},
"source": [
"### Attaching Function Call information"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "f94b71b2",
"metadata": {},
"outputs": [],
"source": [
"functions = [\n",
" {\n",
" \"name\": \"joke\",\n",
" \"description\": \"A joke\",\n",
" \"parameters\": {\n",
" \"type\": \"object\",\n",
" \"properties\": {\n",
" \"setup\": {\n",
" \"type\": \"string\",\n",
" \"description\": \"The setup for the joke\"\n",
" },\n",
" \"punchline\": {\n",
" \"type\": \"string\",\n",
" \"description\": \"The punchline for the joke\"\n",
" }\n",
" },\n",
" \"required\": [\"setup\", \"punchline\"]\n",
" }\n",
" }\n",
" ]\n",
"chain = prompt | model.bind(function_call= {\"name\": \"joke\"}, functions= functions)"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "decf7710",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"AIMessage(content='', additional_kwargs={'function_call': {'name': 'joke', 'arguments': '{\\n \"setup\": \"Why don\\'t bears wear shoes?\",\\n \"punchline\": \"Because they have bear feet!\"\\n}'}}, example=False)"
]
},
"execution_count": 6,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"chain.invoke({\"foo\": \"bears\"}, config={})"
]
},
{
"cell_type": "markdown",
"id": "9098c5ed",
"metadata": {},
"source": [
"## PromptTemplate + LLM + OutputParser\n",
"\n",
"We can also add in an output parser to easily trasform the raw LLM/ChatModel output into a more workable format"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "cc194c78",
"metadata": {},
"outputs": [],
"source": [
"from langchain.schema.output_parser import StrOutputParser\n",
"\n",
"chain = prompt | model | StrOutputParser()"
]
},
{
"cell_type": "markdown",
"id": "77acf448",
"metadata": {},
"source": [
"Notice that this now returns a string - a much more workable format for downstream tasks"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "e3d69a18",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"\"Why don't bears wear shoes?\\n\\nBecause they have bear feet!\""
]
},
"execution_count": 8,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"chain.invoke({\"foo\": \"bears\"})"
]
},
{
"cell_type": "markdown",
"id": "c01864e5",
"metadata": {},
"source": [
"### Functions Output Parser\n",
"\n",
"When you specify the function to return, you may just want to parse that directly"
]
},
{
"cell_type": "code",
"execution_count": 9,
"id": "ad0dd88e",
"metadata": {},
"outputs": [],
"source": [
"from langchain.output_parsers.openai_functions import JsonOutputFunctionsParser\n",
"\n",
"chain = (\n",
" prompt \n",
" | model.bind(function_call= {\"name\": \"joke\"}, functions= functions) \n",
" | JsonOutputFunctionsParser()\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 10,
"id": "1e7aa8eb",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'setup': \"Why don't bears like fast food?\",\n",
" 'punchline': \"Because they can't catch it!\"}"
]
},
"execution_count": 10,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"chain.invoke({\"foo\": \"bears\"})"
]
},
{
"cell_type": "code",
"execution_count": 11,
"id": "d4aa1a01",
"metadata": {},
"outputs": [],
"source": [
"from langchain.output_parsers.openai_functions import JsonKeyOutputFunctionsParser\n",
"\n",
"chain = (\n",
" prompt \n",
" | model.bind(function_call= {\"name\": \"joke\"}, functions= functions) \n",
" | JsonKeyOutputFunctionsParser(key_name=\"setup\")\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 12,
"id": "8b6df9ba",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"\"Why don't bears wear shoes?\""
]
},
"execution_count": 12,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"chain.invoke({\"foo\": \"bears\"})"
]
},
{
"cell_type": "markdown",
"id": "023fbccb-ef7d-489e-a9ba-f98e17283d51",
"metadata": {},
"source": [
"## Simplifying input\n",
"\n",
"To make invocation even simpler, we can add a `RunnableMap` to take care of creating the prompt input dict for us:"
]
},
{
"cell_type": "code",
"execution_count": 13,
"id": "9601c0f0-71f9-4bd4-a672-7bd04084b018",
"metadata": {},
"outputs": [],
"source": [
"from langchain.schema.runnable import RunnableMap, RunnablePassthrough\n",
"\n",
"map_ = RunnableMap({\"foo\": RunnablePassthrough()})\n",
"chain = (\n",
" map_ \n",
" | prompt\n",
" | model.bind(function_call= {\"name\": \"joke\"}, functions= functions) \n",
" | JsonKeyOutputFunctionsParser(key_name=\"setup\")\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 14,
"id": "7ec4f154-fda5-4847-9220-41aa902fdc33",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"\"Why don't bears wear shoes?\""
]
},
"execution_count": 14,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"chain.invoke(\"bears\")"
]
},
{
"cell_type": "markdown",
"id": "def00bfe-0f83-4805-8c8f-8a53f99fa8ea",
"metadata": {},
"source": [
"Since we're composing our map with another Runnable, we can even use some syntactic sugar and just use a dict:"
]
},
{
"cell_type": "code",
"execution_count": 21,
"id": "7bf3846a-02ee-41a3-ba1b-a708827d4f3a",
"metadata": {},
"outputs": [],
"source": [
"chain = (\n",
" {\"foo\": RunnablePassthrough()} \n",
" | prompt\n",
" | model.bind(function_call= {\"name\": \"joke\"}, functions= functions) \n",
" | JsonKeyOutputFunctionsParser(key_name=\"setup\")\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 22,
"id": "e566d6a1-538d-4cb5-a210-a63e082e4c74",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"\"Why don't bears like fast food?\""
]
},
"execution_count": 22,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"chain.invoke(\"bears\")"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.1"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@ -0,0 +1,461 @@
{
"cells": [
{
"cell_type": "raw",
"id": "abe47592-909c-4844-bf44-9e55c2fb4bfa",
"metadata": {},
"source": [
"---\n",
"sidebar_position: 1\n",
"title: RAG\n",
"---"
]
},
{
"cell_type": "markdown",
"id": "91c5ef3d",
"metadata": {},
"source": [
"Let's look at adding in a retrieval step to a prompt and LLM, which adds up to a \"retrieval-augmented generation\" chain"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "7f25d9e9-d192-42e9-af50-5660a4bfb0d9",
"metadata": {},
"outputs": [],
"source": [
"!pip install langchain openai faiss-cpu"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "33be32af",
"metadata": {},
"outputs": [],
"source": [
"from operator import itemgetter\n",
"\n",
"from langchain.prompts import ChatPromptTemplate\n",
"from langchain.chat_models import ChatOpenAI\n",
"from langchain.embeddings import OpenAIEmbeddings\n",
"from langchain.schema.output_parser import StrOutputParser\n",
"from langchain.schema.runnable import RunnablePassthrough\n",
"from langchain.vectorstores import FAISS"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "bfc47ec1",
"metadata": {},
"outputs": [],
"source": [
"vectorstore = FAISS.from_texts([\"harrison worked at kensho\"], embedding=OpenAIEmbeddings())\n",
"retriever = vectorstore.as_retriever()\n",
"\n",
"template = \"\"\"Answer the question based only on the following context:\n",
"{context}\n",
"\n",
"Question: {question}\n",
"\"\"\"\n",
"prompt = ChatPromptTemplate.from_template(template)\n",
"\n",
"model = ChatOpenAI()"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "eae31755",
"metadata": {},
"outputs": [],
"source": [
"chain = (\n",
" {\"context\": retriever, \"question\": RunnablePassthrough()} \n",
" | prompt \n",
" | model \n",
" | StrOutputParser()\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "f3040b0c",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'Harrison worked at Kensho.'"
]
},
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"chain.invoke(\"where did harrison work?\")"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "e1d20c7c",
"metadata": {},
"outputs": [],
"source": [
"template = \"\"\"Answer the question based only on the following context:\n",
"{context}\n",
"\n",
"Question: {question}\n",
"\n",
"Answer in the following language: {language}\n",
"\"\"\"\n",
"prompt = ChatPromptTemplate.from_template(template)\n",
"\n",
"chain = {\n",
" \"context\": itemgetter(\"question\") | retriever, \n",
" \"question\": itemgetter(\"question\"), \n",
" \"language\": itemgetter(\"language\")\n",
"} | prompt | model | StrOutputParser()"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "7ee8b2d4",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'Harrison ha lavorato a Kensho.'"
]
},
"execution_count": 7,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"chain.invoke({\"question\": \"where did harrison work\", \"language\": \"italian\"})"
]
},
{
"cell_type": "markdown",
"id": "f007669c",
"metadata": {},
"source": [
"## Conversational Retrieval Chain\n",
"\n",
"We can easily add in conversation history. This primarily means adding in chat_message_history"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "3f30c348",
"metadata": {},
"outputs": [],
"source": [
"from langchain.schema.runnable import RunnableMap\n",
"from langchain.schema import format_document"
]
},
{
"cell_type": "code",
"execution_count": 9,
"id": "64ab1dbf",
"metadata": {},
"outputs": [],
"source": [
"from langchain.prompts.prompt import PromptTemplate\n",
"\n",
"_template = \"\"\"Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question, in its original language.\n",
"\n",
"Chat History:\n",
"{chat_history}\n",
"Follow Up Input: {question}\n",
"Standalone question:\"\"\"\n",
"CONDENSE_QUESTION_PROMPT = PromptTemplate.from_template(_template)"
]
},
{
"cell_type": "code",
"execution_count": 10,
"id": "7d628c97",
"metadata": {},
"outputs": [],
"source": [
"template = \"\"\"Answer the question based only on the following context:\n",
"{context}\n",
"\n",
"Question: {question}\n",
"\"\"\"\n",
"ANSWER_PROMPT = ChatPromptTemplate.from_template(template)"
]
},
{
"cell_type": "code",
"execution_count": 11,
"id": "f60a5d0f",
"metadata": {},
"outputs": [],
"source": [
"DEFAULT_DOCUMENT_PROMPT = PromptTemplate.from_template(template=\"{page_content}\")\n",
"def _combine_documents(docs, document_prompt = DEFAULT_DOCUMENT_PROMPT, document_separator=\"\\n\\n\"):\n",
" doc_strings = [format_document(doc, document_prompt) for doc in docs]\n",
" return document_separator.join(doc_strings)"
]
},
{
"cell_type": "code",
"execution_count": 12,
"id": "7d007db6",
"metadata": {},
"outputs": [],
"source": [
"from typing import Tuple, List\n",
"def _format_chat_history(chat_history: List[Tuple]) -> str:\n",
" buffer = \"\"\n",
" for dialogue_turn in chat_history:\n",
" human = \"Human: \" + dialogue_turn[0]\n",
" ai = \"Assistant: \" + dialogue_turn[1]\n",
" buffer += \"\\n\" + \"\\n\".join([human, ai])\n",
" return buffer"
]
},
{
"cell_type": "code",
"execution_count": 13,
"id": "5c32cc89",
"metadata": {},
"outputs": [],
"source": [
"_inputs = RunnableMap(\n",
" {\n",
" \"standalone_question\": {\n",
" \"question\": lambda x: x[\"question\"],\n",
" \"chat_history\": lambda x: _format_chat_history(x['chat_history'])\n",
" } | CONDENSE_QUESTION_PROMPT | ChatOpenAI(temperature=0) | StrOutputParser(),\n",
" }\n",
")\n",
"_context = {\n",
" \"context\": itemgetter(\"standalone_question\") | retriever | _combine_documents,\n",
" \"question\": lambda x: x[\"standalone_question\"]\n",
"}\n",
"conversational_qa_chain = _inputs | _context | ANSWER_PROMPT | ChatOpenAI()"
]
},
{
"cell_type": "code",
"execution_count": 14,
"id": "135c8205",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"AIMessage(content='Harrison was employed at Kensho.', additional_kwargs={}, example=False)"
]
},
"execution_count": 14,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"conversational_qa_chain.invoke({\n",
" \"question\": \"where did harrison work?\",\n",
" \"chat_history\": [],\n",
"})"
]
},
{
"cell_type": "code",
"execution_count": 15,
"id": "424e7e7a",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"AIMessage(content='Harrison worked at Kensho.', additional_kwargs={}, example=False)"
]
},
"execution_count": 15,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"conversational_qa_chain.invoke({\n",
" \"question\": \"where did he work?\",\n",
" \"chat_history\": [(\"Who wrote this notebook?\", \"Harrison\")],\n",
"})"
]
},
{
"cell_type": "markdown",
"id": "c5543183",
"metadata": {},
"source": [
"### With Memory and returning source documents\n",
"\n",
"This shows how to use memory with the above. For memory, we need to manage that outside at the memory. For returning the retrieved documents, we just need to pass them through all the way."
]
},
{
"cell_type": "code",
"execution_count": 16,
"id": "e31dd17c",
"metadata": {},
"outputs": [],
"source": [
"from langchain.memory import ConversationBufferMemory"
]
},
{
"cell_type": "code",
"execution_count": 17,
"id": "d4bffe94",
"metadata": {},
"outputs": [],
"source": [
"memory = ConversationBufferMemory(return_messages=True, output_key=\"answer\", input_key=\"question\")"
]
},
{
"cell_type": "code",
"execution_count": 18,
"id": "733be985",
"metadata": {},
"outputs": [],
"source": [
"# First we add a step to load memory\n",
"# This needs to be a RunnableMap because its the first input\n",
"loaded_memory = RunnableMap(\n",
" {\n",
" \"question\": itemgetter(\"question\"),\n",
" \"memory\": memory.load_memory_variables,\n",
" }\n",
")\n",
"# Next we add a step to expand memory into the variables\n",
"expanded_memory = {\n",
" \"question\": itemgetter(\"question\"),\n",
" \"chat_history\": lambda x: x[\"memory\"][\"history\"]\n",
"}\n",
"\n",
"# Now we calculate the standalone question\n",
"standalone_question = {\n",
" \"standalone_question\": {\n",
" \"question\": lambda x: x[\"question\"],\n",
" \"chat_history\": lambda x: _format_chat_history(x['chat_history'])\n",
" } | CONDENSE_QUESTION_PROMPT | ChatOpenAI(temperature=0) | StrOutputParser(),\n",
"}\n",
"# Now we retrieve the documents\n",
"retrieved_documents = {\n",
" \"docs\": itemgetter(\"standalone_question\") | retriever,\n",
" \"question\": lambda x: x[\"standalone_question\"]\n",
"}\n",
"# Now we construct the inputs for the final prompt\n",
"final_inputs = {\n",
" \"context\": lambda x: _combine_documents(x[\"docs\"]),\n",
" \"question\": itemgetter(\"question\")\n",
"}\n",
"# And finally, we do the part that returns the answers\n",
"answer = {\n",
" \"answer\": final_inputs | ANSWER_PROMPT | ChatOpenAI(),\n",
" \"docs\": itemgetter(\"docs\"),\n",
"}\n",
"# And now we put it all together!\n",
"final_chain = loaded_memory | expanded_memory | standalone_question | retrieved_documents | answer"
]
},
{
"cell_type": "code",
"execution_count": 19,
"id": "806e390c",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'answer': AIMessage(content='Harrison was employed at Kensho.', additional_kwargs={}, example=False),\n",
" 'docs': [Document(page_content='harrison worked at kensho', metadata={})]}"
]
},
"execution_count": 19,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"inputs = {\"question\": \"where did harrison work?\"}\n",
"result = final_chain.invoke(inputs)\n",
"result"
]
},
{
"cell_type": "code",
"execution_count": 20,
"id": "977399fd",
"metadata": {},
"outputs": [],
"source": [
"# Note that the memory does not save automatically\n",
"# This will be improved in the future\n",
"# For now you need to save it yourself\n",
"memory.save_context(inputs, {\"answer\": result[\"answer\"].content})"
]
},
{
"cell_type": "code",
"execution_count": 21,
"id": "f94f7de4",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'history': [HumanMessage(content='where did harrison work?', additional_kwargs={}, example=False),\n",
" AIMessage(content='Harrison was employed at Kensho.', additional_kwargs={}, example=False)]}"
]
},
"execution_count": 21,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"memory.load_memory_variables({})"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "poetry-venv",
"language": "python",
"name": "poetry-venv"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.1"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@ -0,0 +1,227 @@
{
"cells": [
{
"cell_type": "raw",
"id": "c14da114-1a4a-487d-9cff-e0e8c30ba366",
"metadata": {},
"source": [
"---\n",
"sidebar_position: 3\n",
"title: Querying a SQL DB\n",
"---"
]
},
{
"cell_type": "markdown",
"id": "506e9636",
"metadata": {},
"source": [
"We can replicate our SQLDatabaseChain with Runnables."
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "7a927516",
"metadata": {},
"outputs": [],
"source": [
"from langchain.prompts import ChatPromptTemplate\n",
"\n",
"template = \"\"\"Based on the table schema below, write a SQL query that would answer the user's question:\n",
"{schema}\n",
"\n",
"Question: {question}\n",
"SQL Query:\"\"\"\n",
"prompt = ChatPromptTemplate.from_template(template)"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "3f51f386",
"metadata": {},
"outputs": [],
"source": [
"from langchain.utilities import SQLDatabase"
]
},
{
"cell_type": "markdown",
"id": "7c3449d6-684b-416e-ba16-90a035835a88",
"metadata": {},
"source": [
"We'll need the Chinook sample DB for this example. There's many places to download it from, e.g. https://database.guide/2-sample-databases-sqlite/"
]
},
{
"cell_type": "code",
"execution_count": 20,
"id": "2ccca6fc",
"metadata": {},
"outputs": [],
"source": [
"db = SQLDatabase.from_uri(\"sqlite:///./Chinook.db\")"
]
},
{
"cell_type": "code",
"execution_count": 21,
"id": "05ba88ee",
"metadata": {},
"outputs": [],
"source": [
"def get_schema(_):\n",
" return db.get_table_info()"
]
},
{
"cell_type": "code",
"execution_count": 22,
"id": "a4eda902",
"metadata": {},
"outputs": [],
"source": [
"def run_query(query):\n",
" return db.run(query)"
]
},
{
"cell_type": "code",
"execution_count": 23,
"id": "5046cb17",
"metadata": {},
"outputs": [],
"source": [
"from operator import itemgetter\n",
"\n",
"from langchain.chat_models import ChatOpenAI\n",
"from langchain.schema.output_parser import StrOutputParser\n",
"from langchain.schema.runnable import RunnableLambda, RunnableMap\n",
"\n",
"model = ChatOpenAI()\n",
"\n",
"inputs = {\n",
" \"schema\": RunnableLambda(get_schema),\n",
" \"question\": itemgetter(\"question\")\n",
"}\n",
"sql_response = (\n",
" RunnableMap(inputs)\n",
" | prompt\n",
" | model.bind(stop=[\"\\nSQLResult:\"])\n",
" | StrOutputParser()\n",
" )"
]
},
{
"cell_type": "code",
"execution_count": 24,
"id": "a5552039",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'SELECT COUNT(*) FROM Employee'"
]
},
"execution_count": 24,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"sql_response.invoke({\"question\": \"How many employees are there?\"})"
]
},
{
"cell_type": "code",
"execution_count": 25,
"id": "d6fee130",
"metadata": {},
"outputs": [],
"source": [
"template = \"\"\"Based on the table schema below, question, sql query, and sql response, write a natural language response:\n",
"{schema}\n",
"\n",
"Question: {question}\n",
"SQL Query: {query}\n",
"SQL Response: {response}\"\"\"\n",
"prompt_response = ChatPromptTemplate.from_template(template)"
]
},
{
"cell_type": "code",
"execution_count": 26,
"id": "923aa634",
"metadata": {},
"outputs": [],
"source": [
"full_chain = (\n",
" RunnableMap({\n",
" \"question\": itemgetter(\"question\"),\n",
" \"query\": sql_response,\n",
" }) \n",
" | {\n",
" \"schema\": RunnableLambda(get_schema),\n",
" \"question\": itemgetter(\"question\"),\n",
" \"query\": itemgetter(\"query\"),\n",
" \"response\": lambda x: db.run(x[\"query\"]) \n",
" } \n",
" | prompt_response \n",
" | model\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 27,
"id": "e94963d8",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"AIMessage(content='There are 8 employees.', additional_kwargs={}, example=False)"
]
},
"execution_count": 27,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"full_chain.invoke({\"question\": \"How many employees are there?\"})"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "4f358d7b-a721-4db3-9f92-f06913428afc",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.1"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@ -0,0 +1,122 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "29781123",
"metadata": {},
"source": [
"# Using tools\n",
"\n",
"You can use any Tools with Runnables easily."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "a5c579dd-2e22-41b0-a789-346dfdecb5a2",
"metadata": {},
"outputs": [],
"source": [
"!pip install duckduckgo-search"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "9232d2a9",
"metadata": {},
"outputs": [],
"source": [
"from langchain.chat_models import ChatOpenAI\n",
"from langchain.prompts import ChatPromptTemplate\n",
"from langchain.schema.output_parser import StrOutputParser\n",
"from langchain.tools import DuckDuckGoSearchRun"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "a0c64d2c",
"metadata": {},
"outputs": [],
"source": [
"search = DuckDuckGoSearchRun()"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "391969b6",
"metadata": {},
"outputs": [],
"source": [
"template = \"\"\"turn the following user input into a search query for a search engine:\n",
"\n",
"{input}\"\"\"\n",
"prompt = ChatPromptTemplate.from_template(template)\n",
"\n",
"model = ChatOpenAI()"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "e3d9d20d",
"metadata": {},
"outputs": [],
"source": [
"chain = prompt | model | StrOutputParser() | search"
]
},
{
"cell_type": "code",
"execution_count": 9,
"id": "55f2967d",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'What sports games are on TV today & tonight? Watch and stream live sports on TV today, tonight, tomorrow. Today\\'s 2023 sports TV schedule includes football, basketball, baseball, hockey, motorsports, soccer and more. Watch on TV or stream online on ESPN, FOX, FS1, CBS, NBC, ABC, Peacock, Paramount+, fuboTV, local channels and many other networks. MLB Games Tonight: How to Watch on TV, Streaming & Odds - Thursday, September 7. Seattle Mariners\\' Julio Rodriguez greets teammates in the dugout after scoring against the Oakland Athletics in a ... Circle - Country Music and Lifestyle. Live coverage of all the MLB action today is available to you, with the information provided below. The Brewers will look to pick up a road win at PNC Park against the Pirates on Wednesday at 12:35 PM ET. Check out the latest odds and with BetMGM Sportsbook. Use bonus code \"GNPLAY\" for special offers! MLB Games Tonight: How to Watch on TV, Streaming & Odds - Tuesday, September 5. Houston Astros\\' Kyle Tucker runs after hitting a double during the fourth inning of a baseball game against the Los Angeles Angels, Sunday, Aug. 13, 2023, in Houston. (AP Photo/Eric Christian Smith) (APMedia) The Houston Astros versus the Texas Rangers is one of ... The second half of tonight\\'s college football schedule still has some good games remaining to watch on your television.. We\\'ve already seen an exciting one when Colorado upset TCU. And we saw some ...'"
]
},
"execution_count": 9,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"chain.invoke({\"input\": \"I'd like to figure out what games are tonight\"})"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "a16949cf-00ea-43c6-a6aa-797ad4f6918d",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "poetry-venv",
"language": "python",
"name": "poetry-venv"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.1"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@ -0,0 +1,2 @@
label: 'How to'
position: 1

View File

@ -0,0 +1,158 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "fbc4bf6e",
"metadata": {},
"source": [
"# Run arbitrary functions\n",
"\n",
"You can use arbitrary functions in the pipeline\n",
"\n",
"Note that all inputs to these functions need to be a SINGLE argument. If you have a function that accepts multiple arguments, you should write a wrapper that accepts a single input and unpacks it into multiple argument."
]
},
{
"cell_type": "code",
"execution_count": 77,
"id": "6bb221b3",
"metadata": {},
"outputs": [],
"source": [
"from langchain.schema.runnable import RunnableLambda\n",
"\n",
"def length_function(text):\n",
" return len(text)\n",
"\n",
"def _multiple_length_function(text1, text2):\n",
" return len(text1) * len(text2)\n",
"\n",
"def multiple_length_function(_dict):\n",
" return _multiple_length_function(_dict[\"text1\"], _dict[\"text2\"])\n",
"\n",
"prompt = ChatPromptTemplate.from_template(\"what is {a} + {b}\")\n",
"\n",
"chain1 = prompt | model\n",
"\n",
"chain = {\n",
" \"a\": itemgetter(\"foo\") | RunnableLambda(length_function),\n",
" \"b\": {\"text1\": itemgetter(\"foo\"), \"text2\": itemgetter(\"bar\")} | RunnableLambda(multiple_length_function)\n",
"} | prompt | model"
]
},
{
"cell_type": "code",
"execution_count": 78,
"id": "5488ec85",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"AIMessage(content='3 + 9 equals 12.', additional_kwargs={}, example=False)"
]
},
"execution_count": 78,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"chain.invoke({\"foo\": \"bar\", \"bar\": \"gah\"})"
]
},
{
"cell_type": "markdown",
"id": "4728ddd9-914d-42ce-ae9b-72c9ce8ec940",
"metadata": {},
"source": [
"## Accepting a Runnable Config\n",
"\n",
"Runnable lambdas can optionally accept a [RunnableConfig](https://api.python.langchain.com/en/latest/schema/langchain.schema.runnable.config.RunnableConfig.html?highlight=runnableconfig#langchain.schema.runnable.config.RunnableConfig), which they can use to pass callbacks, tags, and other configuration information to nested runs."
]
},
{
"cell_type": "code",
"execution_count": 139,
"id": "80b3b5f6-5d58-44b9-807e-cce9a46bf49f",
"metadata": {},
"outputs": [],
"source": [
"from langchain.schema.runnable import RunnableConfig"
]
},
{
"cell_type": "code",
"execution_count": 149,
"id": "ff0daf0c-49dd-4d21-9772-e5fa133c5f36",
"metadata": {},
"outputs": [],
"source": [
"import json\n",
"\n",
"def parse_or_fix(text: str, config: RunnableConfig):\n",
" fixing_chain = (\n",
" ChatPromptTemplate.from_template(\n",
" \"Fix the following text:\\n\\n```text\\n{input}\\n```\\nError: {error}\"\n",
" \" Don't narrate, just respond with the fixed data.\"\n",
" )\n",
" | ChatOpenAI()\n",
" | StrOutputParser()\n",
" )\n",
" for _ in range(3):\n",
" try:\n",
" return json.loads(text)\n",
" except Exception as e:\n",
" text = fixing_chain.invoke({\"input\": text, \"error\": e}, config)\n",
" return \"Failed to parse\""
]
},
{
"cell_type": "code",
"execution_count": 152,
"id": "1a5e709e-9d75-48c7-bb9c-503251990505",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Tokens Used: 65\n",
"\tPrompt Tokens: 56\n",
"\tCompletion Tokens: 9\n",
"Successful Requests: 1\n",
"Total Cost (USD): $0.00010200000000000001\n"
]
}
],
"source": [
"from langchain.callbacks import get_openai_callback\n",
"\n",
"with get_openai_callback() as cb:\n",
" RunnableLambda(parse_or_fix).invoke(\"{foo: bar}\", {\"tags\": [\"my-tag\"], \"callbacks\": [cb]})\n",
" print(cb)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.1"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@ -1,12 +1,21 @@
{ {
"cells": [ "cells": [
{
"cell_type": "raw",
"id": "366a0e68-fd67-4fe5-a292-5c33733339ea",
"metadata": {},
"source": [
"---\n",
"sidebar_position: 0\n",
"title: Interface\n",
"---"
]
},
{ {
"cell_type": "markdown", "cell_type": "markdown",
"id": "9a9acd2e", "id": "9a9acd2e",
"metadata": {}, "metadata": {},
"source": [ "source": [
"# Interface\n",
"\n",
"In an effort to make it as easy as possible to create custom chains, we've implemented a [\"Runnable\"](https://api.python.langchain.com/en/latest/schema/langchain.schema.runnable.Runnable.html#langchain.schema.runnable.Runnable) protocol that most components implement. This is a standard interface with a few different methods, which makes it easy to define custom chains as well as making it possible to invoke them in a standard way. The standard interface exposed includes:\n", "In an effort to make it as easy as possible to create custom chains, we've implemented a [\"Runnable\"](https://api.python.langchain.com/en/latest/schema/langchain.schema.runnable.Runnable.html#langchain.schema.runnable.Runnable) protocol that most components implement. This is a standard interface with a few different methods, which makes it easy to define custom chains as well as making it possible to invoke them in a standard way. The standard interface exposed includes:\n",
"\n", "\n",
"- `stream`: stream back chunks of the response\n", "- `stream`: stream back chunks of the response\n",
@ -429,7 +438,7 @@
"name": "python", "name": "python",
"nbconvert_exporter": "python", "nbconvert_exporter": "python",
"pygments_lexer": "ipython3", "pygments_lexer": "ipython3",
"version": "3.10.1" "version": "3.9.1"
} }
}, },
"nbformat": 4, "nbformat": 4,

View File

@ -6,7 +6,7 @@
"source": [ "source": [
"# Data anonymization with Microsoft Presidio\n", "# Data anonymization with Microsoft Presidio\n",
"\n", "\n",
"[![Open In Collab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/langchain-ai/langchain/blob/master/docs/extras/guides/privacy/presidio_data_anonymization.ipynb)\n", "[![Open In Collab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/langchain-ai/langchain/blob/master/docs/extras/guides/privacy/presidio_data_anonymization/index.ipynb)\n",
"\n", "\n",
"## Use case\n", "## Use case\n",
"\n", "\n",
@ -28,7 +28,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 1,
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
@ -47,16 +47,16 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 14, "execution_count": 2,
"metadata": {}, "metadata": {},
"outputs": [ "outputs": [
{ {
"data": { "data": {
"text/plain": [ "text/plain": [
"'My name is Mrs. Rachel Chen DDS, call me at 849-829-7628x073 or email me at christopherfrey@example.org'" "'My name is Laura Ruiz, call me at +1-412-982-8374x13414 or email me at javierwatkins@example.net'"
] ]
}, },
"execution_count": 14, "execution_count": 2,
"metadata": {}, "metadata": {},
"output_type": "execute_result" "output_type": "execute_result"
} }
@ -82,7 +82,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 3,
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
@ -94,35 +94,53 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 16, "execution_count": 4,
"metadata": {},
"outputs": [],
"source": [
"text = f\"\"\"Slim Shady recently lost his wallet. \n",
"Inside is some cash and his credit card with the number 4916 0387 9536 0861. \n",
"If you would find it, please call at 313-666-7440 or write an email here: real.slim.shady@gmail.com.\"\"\""
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {}, "metadata": {},
"outputs": [ "outputs": [
{ {
"data": { "name": "stdout",
"text/plain": [ "output_type": "stream",
"AIMessage(content='You can find our super secret data at https://www.ross.com/', additional_kwargs={}, example=False)" "text": [
"Dear Sir/Madam,\n",
"\n",
"We regret to inform you that Richard Fields has recently misplaced his wallet, which contains a sum of cash and his credit card bearing the number 30479847307774. \n",
"\n",
"Should you happen to come across it, we kindly request that you contact us immediately at 6439182672 or via email at frank45@example.com.\n",
"\n",
"Thank you for your attention to this matter.\n",
"\n",
"Yours faithfully,\n",
"\n",
"[Your Name]\n"
] ]
},
"execution_count": 16,
"metadata": {},
"output_type": "execute_result"
} }
], ],
"source": [ "source": [
"from langchain.prompts.prompt import PromptTemplate\n", "from langchain.prompts.prompt import PromptTemplate\n",
"from langchain.chat_models import ChatOpenAI\n", "from langchain.chat_models import ChatOpenAI\n",
"from langchain.schema.runnable import RunnablePassthrough\n",
"\n", "\n",
"template = \"\"\"According to this text, where can you find our super secret data?\n", "anonymizer = PresidioAnonymizer()\n",
"\n", "\n",
"{anonymized_text}\n", "template = \"\"\"Rewrite this text into an official, short email:\n",
"\n", "\n",
"Answer:\"\"\"\n", "{anonymized_text}\"\"\"\n",
"prompt = PromptTemplate.from_template(template)\n", "prompt = PromptTemplate.from_template(template)\n",
"llm = ChatOpenAI()\n", "llm = ChatOpenAI(temperature=0)\n",
"\n", "\n",
"chain = {\"anonymized_text\": anonymizer.anonymize} | prompt | llm\n", "chain = {\"anonymized_text\": anonymizer.anonymize} | prompt | llm\n",
"chain.invoke(\"You can find our super secret data at https://supersecretdata.com\")" "response = chain.invoke(text)\n",
"print(response.content)"
] ]
}, },
{ {
@ -135,16 +153,16 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 18, "execution_count": 6,
"metadata": {}, "metadata": {},
"outputs": [ "outputs": [
{ {
"data": { "data": {
"text/plain": [ "text/plain": [
"'My name is Gabrielle Edwards, call me at 313-666-7440 or email me at real.slim.shady@gmail.com'" "'My name is Adrian Fleming, call me at 313-666-7440 or email me at real.slim.shady@gmail.com'"
] ]
}, },
"execution_count": 18, "execution_count": 6,
"metadata": {}, "metadata": {},
"output_type": "execute_result" "output_type": "execute_result"
} }
@ -166,16 +184,16 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 3, "execution_count": 7,
"metadata": {}, "metadata": {},
"outputs": [ "outputs": [
{ {
"data": { "data": {
"text/plain": [ "text/plain": [
"'My name is Victoria Mckinney, call me at 713-549-8623 or email me at real.slim.shady@gmail.com'" "'My name is Justin Miller, call me at 761-824-1889 or email me at real.slim.shady@gmail.com'"
] ]
}, },
"execution_count": 3, "execution_count": 7,
"metadata": {}, "metadata": {},
"output_type": "execute_result" "output_type": "execute_result"
} }
@ -201,16 +219,16 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 4, "execution_count": 8,
"metadata": {}, "metadata": {},
"outputs": [ "outputs": [
{ {
"data": { "data": {
"text/plain": [ "text/plain": [
"'My name is Billy Russo, call me at 970-996-9453x038 or email me at jamie80@example.org'" "'My name is Dr. Jennifer Baker, call me at (508)839-9329x232 or email me at ehamilton@example.com'"
] ]
}, },
"execution_count": 4, "execution_count": 8,
"metadata": {}, "metadata": {},
"output_type": "execute_result" "output_type": "execute_result"
} }
@ -232,16 +250,16 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 5, "execution_count": 9,
"metadata": {}, "metadata": {},
"outputs": [ "outputs": [
{ {
"data": { "data": {
"text/plain": [ "text/plain": [
"'My polish phone number is EVIA70648911396944'" "'My polish phone number is NRGN41434238921378'"
] ]
}, },
"execution_count": 5, "execution_count": 9,
"metadata": {}, "metadata": {},
"output_type": "execute_result" "output_type": "execute_result"
} }
@ -261,7 +279,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 6, "execution_count": 10,
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
@ -291,7 +309,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 7, "execution_count": 11,
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
@ -308,7 +326,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 8, "execution_count": 12,
"metadata": {}, "metadata": {},
"outputs": [ "outputs": [
{ {
@ -337,16 +355,16 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 9, "execution_count": 13,
"metadata": {}, "metadata": {},
"outputs": [ "outputs": [
{ {
"data": { "data": {
"text/plain": [ "text/plain": [
"'+48 533 220 543'" "'511 622 683'"
] ]
}, },
"execution_count": 9, "execution_count": 13,
"metadata": {}, "metadata": {},
"output_type": "execute_result" "output_type": "execute_result"
} }
@ -374,7 +392,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 10, "execution_count": 14,
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
@ -389,7 +407,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 11, "execution_count": 15,
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
@ -398,16 +416,16 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 12, "execution_count": 16,
"metadata": {}, "metadata": {},
"outputs": [ "outputs": [
{ {
"data": { "data": {
"text/plain": [ "text/plain": [
"'My polish phone number is +48 692 715 636'" "'My polish phone number is +48 734 630 977'"
] ]
}, },
"execution_count": 12, "execution_count": 16,
"metadata": {}, "metadata": {},
"output_type": "execute_result" "output_type": "execute_result"
} }
@ -421,8 +439,6 @@
"metadata": {}, "metadata": {},
"source": [ "source": [
"## Future works\n", "## Future works\n",
"\n",
"- **deanonymization** - add the ability to reverse anonymization. For example, the workflow could look like this: `anonymize -> LLMChain -> deanonymize`. By doing this, we will retain anonymity in requests to, for example, OpenAI, and then be able restore the original data.\n",
"- **instance anonymization** - at this point, each occurrence of PII is treated as a separate entity and separately anonymized. Therefore, two occurrences of the name John Doe in the text will be changed to two different names. It is therefore worth introducing support for full instance detection, so that repeated occurrences are treated as a single object." "- **instance anonymization** - at this point, each occurrence of PII is treated as a separate entity and separately anonymized. Therefore, two occurrences of the name John Doe in the text will be changed to two different names. It is therefore worth introducing support for full instance detection, so that repeated occurrences are treated as a single object."
] ]
} }

View File

@ -0,0 +1,520 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Mutli-language data anonymization with Microsoft Presidio\n",
"\n",
"[![Open In Collab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/langchain-ai/langchain/blob/master/docs/extras/guides/privacy/presidio_data_anonymization/multi_language.ipynb)\n",
"\n",
"\n",
"## Use case\n",
"\n",
"Multi-language support in data pseudonymization is essential due to differences in language structures and cultural contexts. Different languages may have varying formats for personal identifiers. For example, the structure of names, locations and dates can differ greatly between languages and regions. Furthermore, non-alphanumeric characters, accents, and the direction of writing can impact pseudonymization processes. Without multi-language support, data could remain identifiable or be misinterpreted, compromising data privacy and accuracy. Hence, it enables effective and precise pseudonymization suited for global operations.\n",
"\n",
"## Overview\n",
"\n",
"PII detection in Microsoft Presidio relies on several components - in addition to the usual pattern matching (e.g. using regex), the analyser uses a model for Named Entity Recognition (NER) to extract entities such as:\n",
"- `PERSON`\n",
"- `LOCATION`\n",
"- `DATE_TIME`\n",
"- `NRP`\n",
"- `ORGANIZATION`\n",
"\n",
"[[Source]](https://github.com/microsoft/presidio/blob/main/presidio-analyzer/presidio_analyzer/predefined_recognizers/spacy_recognizer.py)\n",
"\n",
"To handle NER in specific languages, we utilize unique models from the `spaCy` library, recognized for its extensive selection covering multiple languages and sizes. However, it's not restrictive, allowing for integration of alternative frameworks such as [Stanza](https://microsoft.github.io/presidio/analyzer/nlp_engines/spacy_stanza/) or [transformers](https://microsoft.github.io/presidio/analyzer/nlp_engines/transformers/) when necessary.\n",
"\n",
"\n",
"## Quickstart\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"# Install necessary packages\n",
"# ! pip install langchain langchain-experimental openai presidio-analyzer presidio-anonymizer spacy Faker\n",
"# ! python -m spacy download en_core_web_lg"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer\n",
"\n",
"anonymizer = PresidioReversibleAnonymizer(\n",
" analyzed_fields=[\"PERSON\"],\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"By default, `PresidioAnonymizer` and `PresidioReversibleAnonymizer` use a model trained on English texts, so they handle other languages moderately well. \n",
"\n",
"For example, here the model did not detect the person:"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'Me llamo Sofía'"
]
},
"execution_count": 3,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"anonymizer.anonymize(\"Me llamo Sofía\") # \"My name is Sofía\" in Spanish"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"They may also take words from another language as actual entities. Here, both the word *'Yo'* (*'I'* in Spanish) and *Sofía* have been classified as `PERSON`:"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'Bridget Kirk soy Sally Knight'"
]
},
"execution_count": 4,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"anonymizer.anonymize(\"Yo soy Sofía\") # \"I am Sofía\" in Spanish"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"If you want to anonymise texts from other languages, you need to download other models and add them to the anonymiser configuration:"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [],
"source": [
"# Download the models for the languages you want to use\n",
"# ! python -m spacy download en_core_web_md\n",
"# ! python -m spacy download es_core_news_md"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [],
"source": [
"nlp_config = {\n",
" \"nlp_engine_name\": \"spacy\",\n",
" \"models\": [\n",
" {\"lang_code\": \"en\", \"model_name\": \"en_core_web_md\"},\n",
" {\"lang_code\": \"es\", \"model_name\": \"es_core_news_md\"},\n",
" ],\n",
"}"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"We have therefore added a Spanish language model. Note also that we have downloaded an alternative model for English as well - in this case we have replaced the large model `en_core_web_lg` (560MB) with its smaller version `en_core_web_md` (40MB) - the size is therefore reduced by 14 times! If you care about the speed of anonymisation, it is worth considering it.\n",
"\n",
"All models for the different languages can be found in the [spaCy documentation](https://spacy.io/usage/models).\n",
"\n",
"Now pass the configuration as the `languages_config` parameter to Anonymiser. As you can see, both previous examples work flawlessly:"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Me llamo Michelle Smith\n",
"Yo soy Rachel Wright\n"
]
}
],
"source": [
"anonymizer = PresidioReversibleAnonymizer(\n",
" analyzed_fields=[\"PERSON\"],\n",
" languages_config=nlp_config,\n",
")\n",
"\n",
"print(\n",
" anonymizer.anonymize(\"Me llamo Sofía\", language=\"es\")\n",
") # \"My name is Sofía\" in Spanish\n",
"print(anonymizer.anonymize(\"Yo soy Sofía\", language=\"es\")) # \"I am Sofía\" in Spanish"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"By default, the language indicated first in the configuration will be used when anonymising text (in this case English):"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"My name is Ronnie Ayala\n"
]
}
],
"source": [
"print(anonymizer.anonymize(\"My name is John\"))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Advanced usage\n",
"\n",
"### Custom labels in NER model"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"It may be that the spaCy model has different class names than those supported by the Microsoft Presidio by default. Take Polish, for example:"
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Text: Wiktoria, Start: 12, End: 20, Label: persName\n"
]
}
],
"source": [
"# ! python -m spacy download pl_core_news_md\n",
"\n",
"import spacy\n",
"\n",
"nlp = spacy.load(\"pl_core_news_md\")\n",
"doc = nlp(\"Nazywam się Wiktoria\") # \"My name is Wiktoria\" in Polish\n",
"\n",
"for ent in doc.ents:\n",
" print(\n",
" f\"Text: {ent.text}, Start: {ent.start_char}, End: {ent.end_char}, Label: {ent.label_}\"\n",
" )"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"The name *Victoria* was classified as `persName`, which does not correspond to the default class names `PERSON`/`PER` implemented in Microsoft Presidio (look for `CHECK_LABEL_GROUPS` in [SpacyRecognizer implementation](https://github.com/microsoft/presidio/blob/main/presidio-analyzer/presidio_analyzer/predefined_recognizers/spacy_recognizer.py)). \n",
"\n",
"You can find out more about custom labels in spaCy models (including your own, trained ones) in [this thread](https://github.com/microsoft/presidio/issues/851).\n",
"\n",
"That's why our sentence will not be anonymized:"
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Nazywam się Wiktoria\n"
]
}
],
"source": [
"nlp_config = {\n",
" \"nlp_engine_name\": \"spacy\",\n",
" \"models\": [\n",
" {\"lang_code\": \"en\", \"model_name\": \"en_core_web_md\"},\n",
" {\"lang_code\": \"es\", \"model_name\": \"es_core_news_md\"},\n",
" {\"lang_code\": \"pl\", \"model_name\": \"pl_core_news_md\"},\n",
" ],\n",
"}\n",
"\n",
"anonymizer = PresidioReversibleAnonymizer(\n",
" analyzed_fields=[\"PERSON\", \"LOCATION\", \"DATE_TIME\"],\n",
" languages_config=nlp_config,\n",
")\n",
"\n",
"print(\n",
" anonymizer.anonymize(\"Nazywam się Wiktoria\", language=\"pl\")\n",
") # \"My name is Wiktoria\" in Polish"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"To address this, create your own `SpacyRecognizer` with your own class mapping and add it to the anonymizer:"
]
},
{
"cell_type": "code",
"execution_count": 11,
"metadata": {},
"outputs": [],
"source": [
"from presidio_analyzer.predefined_recognizers import SpacyRecognizer\n",
"\n",
"polish_check_label_groups = [\n",
" ({\"LOCATION\"}, {\"placeName\", \"geogName\"}),\n",
" ({\"PERSON\"}, {\"persName\"}),\n",
" ({\"DATE_TIME\"}, {\"date\", \"time\"}),\n",
"]\n",
"\n",
"spacy_recognizer = SpacyRecognizer(\n",
" supported_language=\"pl\",\n",
" check_label_groups=polish_check_label_groups,\n",
")\n",
"\n",
"anonymizer.add_recognizer(spacy_recognizer)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Now everything works smoothly:"
]
},
{
"cell_type": "code",
"execution_count": 12,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Nazywam się Morgan Walters\n"
]
}
],
"source": [
"print(\n",
" anonymizer.anonymize(\"Nazywam się Wiktoria\", language=\"pl\")\n",
") # \"My name is Wiktoria\" in Polish"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Let's try on more complex example:"
]
},
{
"cell_type": "code",
"execution_count": 13,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Nazywam się Ernest Liu. New Taylorburgh to moje miasto rodzinne. Urodziłam się 1987-01-19\n"
]
}
],
"source": [
"print(\n",
" anonymizer.anonymize(\n",
" \"Nazywam się Wiktoria. Płock to moje miasto rodzinne. Urodziłam się dnia 6 kwietnia 2001 roku\",\n",
" language=\"pl\",\n",
" )\n",
") # \"My name is Wiktoria. Płock is my home town. I was born on 6 April 2001\" in Polish"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"As you can see, thanks to class mapping, the anonymiser can cope with different types of entities. "
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Custom language-specific operators\n",
"\n",
"In the example above, the sentence has been anonymised correctly, but the fake data does not fit the Polish language at all. Custom operators can therefore be added, which will resolve the issue:"
]
},
{
"cell_type": "code",
"execution_count": 14,
"metadata": {},
"outputs": [],
"source": [
"from faker import Faker\n",
"from presidio_anonymizer.entities import OperatorConfig\n",
"\n",
"fake = Faker(locale=\"pl_PL\") # Setting faker to provide Polish data\n",
"\n",
"new_operators = {\n",
" \"PERSON\": OperatorConfig(\"custom\", {\"lambda\": lambda _: fake.first_name_female()}),\n",
" \"LOCATION\": OperatorConfig(\"custom\", {\"lambda\": lambda _: fake.city()}),\n",
"}\n",
"\n",
"anonymizer.add_operators(new_operators)"
]
},
{
"cell_type": "code",
"execution_count": 15,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Nazywam się Marianna. Szczecin to moje miasto rodzinne. Urodziłam się 1976-11-16\n"
]
}
],
"source": [
"print(\n",
" anonymizer.anonymize(\n",
" \"Nazywam się Wiktoria. Płock to moje miasto rodzinne. Urodziłam się dnia 6 kwietnia 2001 roku\",\n",
" language=\"pl\",\n",
" )\n",
") # \"My name is Wiktoria. Płock is my home town. I was born on 6 April 2001\" in Polish"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Limitations\n",
"\n",
"Remember - results are as good as your recognizers and as your NER models!\n",
"\n",
"Look at the example below - we downloaded the small model for Spanish (12MB) and it no longer performs as well as the medium version (40MB):"
]
},
{
"cell_type": "code",
"execution_count": 16,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Model: es_core_news_sm. Result: Me llamo Sofía\n",
"Model: es_core_news_md. Result: Me llamo Lawrence Davis\n"
]
}
],
"source": [
"# ! python -m spacy download es_core_news_sm\n",
"\n",
"for model in [\"es_core_news_sm\", \"es_core_news_md\"]:\n",
" nlp_config = {\n",
" \"nlp_engine_name\": \"spacy\",\n",
" \"models\": [\n",
" {\"lang_code\": \"es\", \"model_name\": model},\n",
" ],\n",
" }\n",
"\n",
" anonymizer = PresidioReversibleAnonymizer(\n",
" analyzed_fields=[\"PERSON\"],\n",
" languages_config=nlp_config,\n",
" )\n",
"\n",
" print(\n",
" f\"Model: {model}. Result: {anonymizer.anonymize('Me llamo Sofía', language='es')}\"\n",
" )"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"In many cases, even the larger models from spaCy will not be sufficient - there are already other, more complex and better methods of detecting named entities, based on transformers. You can read more about this [here](https://microsoft.github.io/presidio/analyzer/nlp_engines/transformers/)."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Future works\n",
"\n",
"- **automatic language detection** - instead of passing the language as a parameter in `anonymizer.anonymize`, we could detect the language/s beforehand and then use the corresponding NER model."
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.1"
}
},
"nbformat": 4,
"nbformat_minor": 4
}

View File

@ -0,0 +1,461 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Reversible data anonymization with Microsoft Presidio\n",
"\n",
"[![Open In Collab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/langchain-ai/langchain/blob/master/docs/extras/guides/privacy/presidio_data_anonymization/reversible.ipynb)\n",
"\n",
"\n",
"## Use case\n",
"\n",
"We have already written about the importance of anonymizing sensitive data in the previous section. **Reversible Anonymization** is an equally essential technology while sharing information with language models, as it balances data protection with data usability. This technique involves masking sensitive personally identifiable information (PII), yet it can be reversed and original data can be restored when authorized users need it. Its main advantage lies in the fact that while it conceals individual identities to prevent misuse, it also allows the concealed data to be accurately unmasked should it be necessary for legal or compliance purposes. \n",
"\n",
"## Overview\n",
"\n",
"We implemented the `PresidioReversibleAnonymizer`, which consists of two parts:\n",
"\n",
"1. anonymization - it works the same way as `PresidioAnonymizer`, plus the object itself stores a mapping of made-up values to original ones, for example:\n",
"```\n",
" {\n",
" \"PERSON\": {\n",
" \"<anonymized>\": \"<original>\",\n",
" \"John Doe\": \"Slim Shady\"\n",
" },\n",
" \"PHONE_NUMBER\": {\n",
" \"111-111-1111\": \"555-555-5555\"\n",
" }\n",
" ...\n",
" }\n",
"```\n",
"\n",
"2. deanonymization - using the mapping described above, it matches fake data with original data and then substitutes it.\n",
"\n",
"Between anonymization and deanonymization user can perform different operations, for example, passing the output to LLM.\n",
"\n",
"## Quickstart\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"# Install necessary packages\n",
"# ! pip install langchain langchain-experimental openai presidio-analyzer presidio-anonymizer spacy Faker\n",
"# ! python -m spacy download en_core_web_lg"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"`PresidioReversibleAnonymizer` is not significantly different from its predecessor (`PresidioAnonymizer`) in terms of anonymization:"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'My name is Maria Lynch, call me at 7344131647 or email me at jamesmichael@example.com. By the way, my card number is: 4838637940262'"
]
},
"execution_count": 2,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer\n",
"\n",
"anonymizer = PresidioReversibleAnonymizer(\n",
" analyzed_fields=[\"PERSON\", \"PHONE_NUMBER\", \"EMAIL_ADDRESS\", \"CREDIT_CARD\"],\n",
" # Faker seed is used here to make sure the same fake data is generated for the test purposes\n",
" # In production, it is recommended to remove the faker_seed parameter (it will default to None)\n",
" faker_seed=42,\n",
")\n",
"\n",
"anonymizer.anonymize(\n",
" \"My name is Slim Shady, call me at 313-666-7440 or email me at real.slim.shady@gmail.com. \"\n",
" \"By the way, my card number is: 4916 0387 9536 0861\"\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"This is what the full string we want to deanonymize looks like:"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Maria Lynch recently lost his wallet. \n",
"Inside is some cash and his credit card with the number 4838637940262. \n",
"If you would find it, please call at 7344131647 or write an email here: jamesmichael@example.com.\n",
"Maria Lynch would be very grateful!\n"
]
}
],
"source": [
"# We know this data, as we set the faker_seed parameter\n",
"fake_name = \"Maria Lynch\"\n",
"fake_phone = \"7344131647\"\n",
"fake_email = \"jamesmichael@example.com\"\n",
"fake_credit_card = \"4838637940262\"\n",
"\n",
"anonymized_text = f\"\"\"{fake_name} recently lost his wallet. \n",
"Inside is some cash and his credit card with the number {fake_credit_card}. \n",
"If you would find it, please call at {fake_phone} or write an email here: {fake_email}.\n",
"{fake_name} would be very grateful!\"\"\"\n",
"\n",
"print(anonymized_text)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"And now, using the `deanonymize` method, we can reverse the process:"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Slim Shady recently lost his wallet. \n",
"Inside is some cash and his credit card with the number 4916 0387 9536 0861. \n",
"If you would find it, please call at 313-666-7440 or write an email here: real.slim.shady@gmail.com.\n",
"Slim Shady would be very grateful!\n"
]
}
],
"source": [
"print(anonymizer.deanonymize(anonymized_text))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Using with LangChain Expression Language\n",
"\n",
"With LCEL we can easily chain together anonymization and deanonymization with the rest of our application. This is an example of using the anonymization mechanism with a query to LLM (without deanonymization for now):"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [],
"source": [
"text = f\"\"\"Slim Shady recently lost his wallet. \n",
"Inside is some cash and his credit card with the number 4916 0387 9536 0861. \n",
"If you would find it, please call at 313-666-7440 or write an email here: real.slim.shady@gmail.com.\"\"\""
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Dear Sir/Madam,\n",
"\n",
"We regret to inform you that Mr. Dana Rhodes has reported the loss of his wallet. The wallet contains a sum of cash and his credit card, bearing the number 4397528473885757. \n",
"\n",
"If you happen to come across the aforementioned wallet, we kindly request that you contact us immediately at 258-481-7074x714 or via email at laurengoodman@example.com.\n",
"\n",
"Your prompt assistance in this matter would be greatly appreciated.\n",
"\n",
"Yours faithfully,\n",
"\n",
"[Your Name]\n"
]
}
],
"source": [
"from langchain.prompts.prompt import PromptTemplate\n",
"from langchain.chat_models import ChatOpenAI\n",
"\n",
"anonymizer = PresidioReversibleAnonymizer()\n",
"\n",
"template = \"\"\"Rewrite this text into an official, short email:\n",
"\n",
"{anonymized_text}\"\"\"\n",
"prompt = PromptTemplate.from_template(template)\n",
"llm = ChatOpenAI(temperature=0)\n",
"\n",
"chain = {\"anonymized_text\": anonymizer.anonymize} | prompt | llm\n",
"response = chain.invoke(text)\n",
"print(response.content)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Now, let's add **deanonymization step** to our sequence:"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Dear Sir/Madam,\n",
"\n",
"We regret to inform you that Mr. Slim Shady has recently misplaced his wallet. The wallet contains a sum of cash and his credit card, bearing the number 4916 0387 9536 0861. \n",
"\n",
"If by any chance you come across the lost wallet, kindly contact us immediately at 313-666-7440 or send an email to real.slim.shady@gmail.com.\n",
"\n",
"Your prompt assistance in this matter would be greatly appreciated.\n",
"\n",
"Yours faithfully,\n",
"\n",
"[Your Name]\n"
]
}
],
"source": [
"chain = chain | (lambda ai_message: anonymizer.deanonymize(ai_message.content))\n",
"response = chain.invoke(text)\n",
"print(response)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Anonymized data was given to the model itself, and therefore it was protected from being leaked to the outside world. Then, the model's response was processed, and the factual value was replaced with the real one."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Extra knowledge"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"`PresidioReversibleAnonymizer` stores the mapping of the fake values to the original values in the `deanonymizer_mapping` parameter, where key is fake PII and value is the original one: "
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'PERSON': {'Maria Lynch': 'Slim Shady'},\n",
" 'PHONE_NUMBER': {'7344131647': '313-666-7440'},\n",
" 'EMAIL_ADDRESS': {'jamesmichael@example.com': 'real.slim.shady@gmail.com'},\n",
" 'CREDIT_CARD': {'4838637940262': '4916 0387 9536 0861'}}"
]
},
"execution_count": 8,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer\n",
"\n",
"anonymizer = PresidioReversibleAnonymizer(\n",
" analyzed_fields=[\"PERSON\", \"PHONE_NUMBER\", \"EMAIL_ADDRESS\", \"CREDIT_CARD\"],\n",
" # Faker seed is used here to make sure the same fake data is generated for the test purposes\n",
" # In production, it is recommended to remove the faker_seed parameter (it will default to None)\n",
" faker_seed=42,\n",
")\n",
"\n",
"anonymizer.anonymize(\n",
" \"My name is Slim Shady, call me at 313-666-7440 or email me at real.slim.shady@gmail.com. \"\n",
" \"By the way, my card number is: 4916 0387 9536 0861\"\n",
")\n",
"\n",
"anonymizer.deanonymizer_mapping"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Anonymizing more texts will result in new mapping entries:"
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Do you have his VISA card number? Yep, it's 3537672423884966. I'm William Bowman by the way.\n"
]
},
{
"data": {
"text/plain": [
"{'PERSON': {'Maria Lynch': 'Slim Shady', 'William Bowman': 'John Doe'},\n",
" 'PHONE_NUMBER': {'7344131647': '313-666-7440'},\n",
" 'EMAIL_ADDRESS': {'jamesmichael@example.com': 'real.slim.shady@gmail.com'},\n",
" 'CREDIT_CARD': {'4838637940262': '4916 0387 9536 0861',\n",
" '3537672423884966': '4001 9192 5753 7193'}}"
]
},
"execution_count": 9,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"print(\n",
" anonymizer.anonymize(\n",
" \"Do you have his VISA card number? Yep, it's 4001 9192 5753 7193. I'm John Doe by the way.\"\n",
" )\n",
")\n",
"\n",
"anonymizer.deanonymizer_mapping"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"We can save the mapping itself to a file for future use: "
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {},
"outputs": [],
"source": [
"# We can save the deanonymizer mapping as a JSON or YAML file\n",
"\n",
"anonymizer.save_deanonymizer_mapping(\"deanonymizer_mapping.json\")\n",
"# anonymizer.save_deanonymizer_mapping(\"deanonymizer_mapping.yaml\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"And then, load it in another `PresidioReversibleAnonymizer` instance:"
]
},
{
"cell_type": "code",
"execution_count": 11,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{}"
]
},
"execution_count": 11,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"anonymizer = PresidioReversibleAnonymizer()\n",
"\n",
"anonymizer.deanonymizer_mapping"
]
},
{
"cell_type": "code",
"execution_count": 12,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'PERSON': {'Maria Lynch': 'Slim Shady', 'William Bowman': 'John Doe'},\n",
" 'PHONE_NUMBER': {'7344131647': '313-666-7440'},\n",
" 'EMAIL_ADDRESS': {'jamesmichael@example.com': 'real.slim.shady@gmail.com'},\n",
" 'CREDIT_CARD': {'4838637940262': '4916 0387 9536 0861',\n",
" '3537672423884966': '4001 9192 5753 7193'}}"
]
},
"execution_count": 12,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"anonymizer.load_deanonymizer_mapping(\"deanonymizer_mapping.json\")\n",
"\n",
"anonymizer.deanonymizer_mapping"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Future works\n",
"\n",
"- **instance anonymization** - at this point, each occurrence of PII is treated as a separate entity and separately anonymized. Therefore, two occurrences of the name John Doe in the text will be changed to two different names. It is therefore worth introducing support for full instance detection, so that repeated occurrences are treated as a single object.\n",
"- **better matching and substitution of fake values for real ones** - currently the strategy is based on matching full strings and then substituting them. Due to the indeterminism of language models, it may happen that the value in the answer is slightly changed (e.g. *John Doe* -> *John* or *Main St, New York* -> *New York*) and such a substitution is then no longer possible. Therefore, it is worth adjusting the matching for your needs."
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.1"
}
},
"nbformat": 4,
"nbformat_minor": 4
}

View File

@ -512,9 +512,9 @@
"# Examples\n", "# Examples\n",
"---\n", "---\n",
"\n", "\n",
"## With HuggingFace Hub Models\n", "## With Hugging Face Hub Models\n",
"\n", "\n",
"Get your API Key from Huggingface hub - https://huggingface.co/docs/api-inference/quicktour#get-your-api-token" "Get your API Key from Hugging Face hub - https://huggingface.co/docs/api-inference/quicktour#get-your-api-token"
] ]
}, },
{ {

View File

@ -18,7 +18,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 1,
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
@ -93,8 +93,22 @@
} }
], ],
"metadata": { "metadata": {
"kernelspec": {
"display_name": "langchain",
"language": "python",
"name": "python3"
},
"language_info": { "language_info": {
"name": "python" "codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.5"
}, },
"orig_nbformat": 4 "orig_nbformat": 4
}, },

View File

@ -31,11 +31,16 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"# get new tokens: https://app.banana.dev/\n", "# get new tokens: https://app.banana.dev/\n",
"# We need two tokens, not just an `api_key`: `BANANA_API_KEY` and `YOUR_MODEL_KEY`\n", "# We need three parameters to make a Banana.dev API call:\n",
"# * a team api key\n",
"# * the model's unique key\n",
"# * the model's url slug\n",
"\n", "\n",
"import os\n", "import os\n",
"from getpass import getpass\n", "from getpass import getpass\n",
"\n", "\n",
"# You can get this from the main dashboard\n",
"# at https://app.banana.dev\n",
"os.environ[\"BANANA_API_KEY\"] = \"YOUR_API_KEY\"\n", "os.environ[\"BANANA_API_KEY\"] = \"YOUR_API_KEY\"\n",
"# OR\n", "# OR\n",
"# BANANA_API_KEY = getpass()" "# BANANA_API_KEY = getpass()"
@ -70,7 +75,9 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"llm = Banana(model_key=\"YOUR_MODEL_KEY\")" "# Both of these are found in your model's \n",
"# detail page in https://app.banana.dev\n",
"llm = Banana(model_key=\"YOUR_MODEL_KEY\", model_url_slug=\"YOUR_MODEL_URL_SLUG\")"
] ]
}, },
{ {

View File

@ -9,13 +9,20 @@ pip install awadb
``` ```
## VectorStore ## Vector Store
There exists a wrapper around AwaDB vector databases, allowing you to use it as a vectorstore,
whether for semantic search or example selection.
```python ```python
from langchain.vectorstores import AwaDB from langchain.vectorstores import AwaDB
``` ```
For a more detailed walkthrough of the AwaDB wrapper, see [here](/docs/integrations/vectorstores/awadb.html). See a [usage example](/docs/integrations/vectorstores/awadb).
## Text Embedding Model
```python
from langchain.embeddings import AwaEmbeddings
```
See a [usage example](/docs/integrations/text_embedding/awadb).

View File

@ -1,79 +1,72 @@
# Banana # Banana
This page covers how to use the Banana ecosystem within LangChain. Banana provided serverless GPU inference for AI models, including a CI/CD build pipeline and a simple Python framework (Potassium) to server your models.
It is broken into two parts: installation and setup, and then references to specific Banana wrappers.
This page covers how to use the [Banana](https://www.banana.dev) ecosystem within LangChain.
It is broken into two parts:
* installation and setup,
* and then references to specific Banana wrappers.
## Installation and Setup ## Installation and Setup
- Install with `pip install banana-dev` - Install with `pip install banana-dev`
- Get an Banana api key and set it as an environment variable (`BANANA_API_KEY`) - Get an Banana api key from the [Banana.dev dashboard](https://app.banana.dev) and set it as an environment variable (`BANANA_API_KEY`)
- Get your model's key and url slug from the model's details page
## Define your Banana Template ## Define your Banana Template
If you want to use an available language model template you can find one [here](https://app.banana.dev/templates/conceptofmind/serverless-template-palmyra-base). You'll need to set up a Github repo for your Banana app. You can get started in 5 minutes using [this guide](https://docs.banana.dev/banana-docs/).
This template uses the Palmyra-Base model by [Writer](https://writer.com/product/api/).
You can check out an example Banana repository [here](https://github.com/conceptofmind/serverless-template-palmyra-base). Alternatively, for a ready-to-go LLM example, you can check out Banana's [CodeLlama-7B-Instruct-GPTQ](https://github.com/bananaml/demo-codellama-7b-instruct-gptq) GitHub repository. Just fork it and deploy it within Banana.
Other starter repos are available [here](https://github.com/orgs/bananaml/repositories?q=demo-&type=all&language=&sort=).
## Build the Banana app ## Build the Banana app
Banana Apps must include the "output" key in the return json. To use Banana apps within Langchain, they must include the `outputs` key
There is a rigid response structure. in the returned json, and the value must be a string.
```python ```python
# Return the results as a dictionary # Return the results as a dictionary
result = {'output': result} result = {'outputs': result}
``` ```
An example inference function would be: An example inference function would be:
```python ```python
def inference(model_inputs:dict) -> dict: @app.handler("/")
global model def handler(context: dict, request: Request) -> Response:
global tokenizer """Handle a request to generate code from a prompt."""
model = context.get("model")
# Parse out your arguments tokenizer = context.get("tokenizer")
prompt = model_inputs.get('prompt', None) max_new_tokens = request.json.get("max_new_tokens", 512)
if prompt == None: temperature = request.json.get("temperature", 0.7)
return {'message': "No prompt provided"} prompt = request.json.get("prompt")
prompt_template=f'''[INST] Write code to solve the following coding problem that obeys the constraints and passes the example test cases. Please wrap your code answer using ```:
# Run the model {prompt}
input_ids = tokenizer.encode(prompt, return_tensors='pt').cuda() [/INST]
output = model.generate( '''
input_ids, input_ids = tokenizer(prompt_template, return_tensors='pt').input_ids.cuda()
max_length=100, output = model.generate(inputs=input_ids, temperature=temperature, max_new_tokens=max_new_tokens)
do_sample=True, result = tokenizer.decode(output[0])
top_k=50, return Response(json={"outputs": result}, status=200)
top_p=0.95,
num_return_sequences=1,
temperature=0.9,
early_stopping=True,
no_repeat_ngram_size=3,
num_beams=5,
length_penalty=1.5,
repetition_penalty=1.5,
bad_words_ids=[[tokenizer.encode(' ', add_prefix_space=True)[0]]]
)
result = tokenizer.decode(output[0], skip_special_tokens=True)
# Return the results as a dictionary
result = {'output': result}
return result
``` ```
You can find a full example of a Banana app [here](https://github.com/conceptofmind/serverless-template-palmyra-base/blob/main/app.py). This example is from the `app.py` file in [CodeLlama-7B-Instruct-GPTQ](https://github.com/bananaml/demo-codellama-7b-instruct-gptq).
## Wrappers ## Wrappers
### LLM ### LLM
There exists an Banana LLM wrapper, which you can access with Within Langchain, there exists a Banana LLM wrapper, which you can access with
```python ```python
from langchain.llms import Banana from langchain.llms import Banana
``` ```
You need to provide a model key located in the dashboard: You need to provide a model key and model url slug, which you can get from the model's details page in the [Banana.dev dashboard](https://app.banana.dev).
```python ```python
llm = Banana(model_key="YOUR_MODEL_KEY") llm = Banana(model_key="YOUR_MODEL_KEY", model_url_slug="YOUR_MODEL_URL_SLUG")
``` ```

View File

@ -1,20 +1,24 @@
# ModelScope # ModelScope
>[ModelScope](https://www.modelscope.cn/home) is a big repository of the models and datasets.
This page covers how to use the modelscope ecosystem within LangChain. This page covers how to use the modelscope ecosystem within LangChain.
It is broken into two parts: installation and setup, and then references to specific modelscope wrappers. It is broken into two parts: installation and setup, and then references to specific modelscope wrappers.
## Installation and Setup ## Installation and Setup
* Install the Python SDK with `pip install modelscope` Install the `modelscope` package.
## Wrappers ```bash
pip install modelscope
```
### Embeddings
There exists a modelscope Embeddings wrapper, which you can access with ## Text Embedding Models
```python ```python
from langchain.embeddings import ModelScopeEmbeddings from langchain.embeddings import ModelScopeEmbeddings
``` ```
For a more detailed walkthrough of this, see [this notebook](/docs/integrations/text_embedding/modelscope_hub.html) For a more detailed walkthrough of this, see [this notebook](/docs/integrations/text_embedding/modelscope_hub)

View File

@ -1,17 +1,31 @@
# NLPCloud # NLPCloud
This page covers how to use the NLPCloud ecosystem within LangChain. >[NLP Cloud](https://docs.nlpcloud.com/#introduction) is an artificial intelligence platform that allows you to use the most advanced AI engines, and even train your own engines with your own data.
It is broken into two parts: installation and setup, and then references to specific NLPCloud wrappers.
## Installation and Setup ## Installation and Setup
- Install the Python SDK with `pip install nlpcloud`
- Install the `nlpcloud` package.
```bash
pip install nlpcloud
```
- Get an NLPCloud api key and set it as an environment variable (`NLPCLOUD_API_KEY`) - Get an NLPCloud api key and set it as an environment variable (`NLPCLOUD_API_KEY`)
## Wrappers
### LLM ## LLM
See a [usage example](/docs/integrations/llms/nlpcloud).
There exists an NLPCloud LLM wrapper, which you can access with
```python ```python
from langchain.llms import NLPCloud from langchain.llms import NLPCloud
``` ```
## Text Embedding Models
See a [usage example](/docs/integrations/text_embedding/nlp_cloud)
```python
from langchain.embeddings import NLPCloudEmbeddings
```

View File

@ -1,4 +1,10 @@
# Portkey # Portkey
>[Portkey](https://docs.portkey.ai/overview/introduction) is a platform designed to streamline the deployment
> and management of Generative AI applications.
> It provides comprehensive features for monitoring, managing models,
> and improving the performance of your AI applications.
## LLMOps for Langchain ## LLMOps for Langchain
Portkey brings production readiness to Langchain. With Portkey, you can Portkey brings production readiness to Langchain. With Portkey, you can

View File

@ -1,19 +1,14 @@
{ {
"cells": [ "cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": []
},
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
"# Log, Trace, and Monitor Langchain LLM Calls\n", "# Log, Trace, and Monitor\n",
"\n", "\n",
"When building apps or agents using Langchain, you end up making multiple API calls to fulfill a single user request. However, these requests are not chained when you want to analyse them. With [**Portkey**](/docs/ecosystem/integrations/portkey), all the embeddings, completion, and other requests from a single user request will get logged and traced to a common ID, enabling you to gain full visibility of user interactions.\n", "When building apps or agents using Langchain, you end up making multiple API calls to fulfill a single user request. However, these requests are not chained when you want to analyse them. With [**Portkey**](/docs/ecosystem/integrations/portkey), all the embeddings, completion, and other requests from a single user request will get logged and traced to a common ID, enabling you to gain full visibility of user interactions.\n",
"\n", "\n",
"This notebook serves as a step-by-step guide on how to integrate and use Portkey in your Langchain app." "This notebook serves as a step-by-step guide on how to log, trace, and monitor Langchain LLM calls using `Portkey` in your Langchain app."
] ]
}, },
{ {
@ -234,9 +229,9 @@
"name": "python", "name": "python",
"nbconvert_exporter": "python", "nbconvert_exporter": "python",
"pygments_lexer": "ipython3", "pygments_lexer": "ipython3",
"version": "3.11.3" "version": "3.10.12"
} }
}, },
"nbformat": 4, "nbformat": 4,
"nbformat_minor": 2 "nbformat_minor": 4
} }

View File

@ -18,3 +18,11 @@ See a [usage example](/docs/modules/data_connection/document_transformers/text_s
```python ```python
from langchain.text_splitter import SpacyTextSplitter from langchain.text_splitter import SpacyTextSplitter
``` ```
## Text Embedding Models
See a [usage example](/docs/integrations/text_embedding/spacy_embedding)
```python
from langchain.embeddings.spacy_embeddings import SpacyEmbeddings
```

View File

@ -11,9 +11,10 @@ What is Vectara?
- You can use Vectara's integration with LangChain as a Vector store or using the Retriever abstraction. - You can use Vectara's integration with LangChain as a Vector store or using the Retriever abstraction.
## Installation and Setup ## Installation and Setup
To use Vectara with LangChain no special installation steps are required. You just have to provide your customer_id, corpus ID, and an API key created within the Vectara console to enable indexing and searching. To use Vectara with LangChain no special installation steps are required.
To get started, follow our [quickstart](https://docs.vectara.com/docs/quickstart) guide to create an account, a corpus and an API key.
Once you have these, you can provide them as arguments to the Vectara vectorstore, or you can set them as environment variables.
Alternatively these can be provided as environment variables
- export `VECTARA_CUSTOMER_ID`="your_customer_id" - export `VECTARA_CUSTOMER_ID`="your_customer_id"
- export `VECTARA_CORPUS_ID`="your_corpus_id" - export `VECTARA_CORPUS_ID`="your_corpus_id"
- export `VECTARA_API_KEY`="your-vectara-api-key" - export `VECTARA_API_KEY`="your-vectara-api-key"

View File

@ -5,9 +5,11 @@
"id": "b14a24db", "id": "b14a24db",
"metadata": {}, "metadata": {},
"source": [ "source": [
"# AwaEmbedding\n", "# AwaDB\n",
"\n", "\n",
"This notebook explains how to use AwaEmbedding, which is included in [awadb](https://github.com/awa-ai/awadb), to embedding texts in langchain." ">[AwaDB](https://github.com/awa-ai/awadb) is an AI Native database for the search and storage of embedding vectors used by LLM Applications.\n",
"\n",
"This notebook explains how to use `AwaEmbeddings` in LangChain."
] ]
}, },
{ {
@ -101,7 +103,7 @@
"name": "python", "name": "python",
"nbconvert_exporter": "python", "nbconvert_exporter": "python",
"pygments_lexer": "ipython3", "pygments_lexer": "ipython3",
"version": "3.11.4" "version": "3.10.12"
} }
}, },
"nbformat": 4, "nbformat": 4,

View File

@ -5,7 +5,9 @@
"id": "75e378f5-55d7-44b6-8e2e-6d7b8b171ec4", "id": "75e378f5-55d7-44b6-8e2e-6d7b8b171ec4",
"metadata": {}, "metadata": {},
"source": [ "source": [
"# Bedrock Embeddings" "# Bedrock\n",
"\n",
">[Amazon Bedrock](https://aws.amazon.com/bedrock/) is a fully managed service that makes FMs from leading AI startups and Amazon available via an API, so you can choose from a wide range of FMs to find the model that is best suited for your use case.\n"
] ]
}, },
{ {
@ -91,7 +93,7 @@
"name": "python", "name": "python",
"nbconvert_exporter": "python", "nbconvert_exporter": "python",
"pygments_lexer": "ipython3", "pygments_lexer": "ipython3",
"version": "3.9.13" "version": "3.10.12"
} }
}, },
"nbformat": 4, "nbformat": 4,

View File

@ -5,26 +5,29 @@
"id": "719619d3", "id": "719619d3",
"metadata": {}, "metadata": {},
"source": [ "source": [
"# BGE Hugging Face Embeddings\n", "# BGE on Hugging Face\n",
"\n", "\n",
"This notebook shows how to use BGE Embeddings through Hugging Face" ">[BGE models on the HuggingFace](https://huggingface.co/BAAI/bge-large-en) are [the best open-source embedding models](https://huggingface.co/spaces/mteb/leaderboard).\n",
">BGE model is created by the [Beijing Academy of Artificial Intelligence (BAAI)](https://www.baai.ac.cn/english.html). `BAAI` is a private non-profit organization engaged in AI research and development.\n",
"\n",
"This notebook shows how to use `BGE Embeddings` through `Hugging Face`"
] ]
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 8, "execution_count": null,
"id": "f7a54279", "id": "f7a54279",
"metadata": { "metadata": {
"scrolled": true "scrolled": true
}, },
"outputs": [], "outputs": [],
"source": [ "source": [
"# !pip install sentence_transformers" "#!pip install sentence_transformers"
] ]
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 5, "execution_count": null,
"id": "9e1d5b6b", "id": "9e1d5b6b",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
@ -43,12 +46,24 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 7, "execution_count": 5,
"id": "e59d1a89", "id": "e59d1a89",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [
{
"data": {
"text/plain": [
"384"
]
},
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [ "source": [
"embedding = hf.embed_query(\"hi this is harrison\")" "embedding = hf.embed_query(\"hi this is harrison\")\n",
"len(embedding)"
] ]
}, },
{ {
@ -76,7 +91,7 @@
"name": "python", "name": "python",
"nbconvert_exporter": "python", "nbconvert_exporter": "python",
"pygments_lexer": "ipython3", "pygments_lexer": "ipython3",
"version": "3.10.1" "version": "3.10.12"
} }
}, },
"nbformat": 4, "nbformat": 4,

View File

@ -1,13 +1,14 @@
{ {
"cells": [ "cells": [
{ {
"attachments": {},
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
"# Google Cloud Platform Vertex AI PaLM \n", "# Google Vertex AI PaLM \n",
"\n", "\n",
"Note: This is seperate from the Google PaLM integration, it exposes [Vertex AI PaLM API](https://cloud.google.com/vertex-ai/docs/generative-ai/learn/overview) on Google Cloud. \n", ">[Vertex AI PaLM API](https://cloud.google.com/vertex-ai/docs/generative-ai/learn/overview) is a service on Google Cloud exposing the embedding models. \n",
"\n",
"Note: This integration is seperate from the Google PaLM integration.\n",
"\n", "\n",
"By default, Google Cloud [does not use](https://cloud.google.com/vertex-ai/docs/generative-ai/data-governance#foundation_model_development) Customer Data to train its foundation models as part of Google Cloud`s AI/ML Privacy Commitment. More details about how Google processes data can also be found in [Google's Customer Data Processing Addendum (CDPA)](https://cloud.google.com/terms/data-processing-addendum).\n", "By default, Google Cloud [does not use](https://cloud.google.com/vertex-ai/docs/generative-ai/data-governance#foundation_model_development) Customer Data to train its foundation models as part of Google Cloud`s AI/ML Privacy Commitment. More details about how Google processes data can also be found in [Google's Customer Data Processing Addendum (CDPA)](https://cloud.google.com/terms/data-processing-addendum).\n",
"\n", "\n",
@ -96,7 +97,7 @@
"name": "python", "name": "python",
"nbconvert_exporter": "python", "nbconvert_exporter": "python",
"pygments_lexer": "ipython3", "pygments_lexer": "ipython3",
"version": "3.9.1" "version": "3.10.12"
}, },
"vscode": { "vscode": {
"interpreter": { "interpreter": {

View File

@ -5,13 +5,23 @@
"id": "ed47bb62", "id": "ed47bb62",
"metadata": {}, "metadata": {},
"source": [ "source": [
"# Hugging Face Hub\n", "# Hugging Face\n",
"Let's load the Hugging Face Embedding class." "Let's load the Hugging Face Embedding class."
] ]
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 7, "execution_count": null,
"id": "16b20335-da1d-46ba-aa23-fbf3e2c6fe60",
"metadata": {},
"outputs": [],
"source": [
"!pip install langchain sentence_transformers"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "861521a9", "id": "861521a9",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
@ -21,7 +31,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 16, "execution_count": 3,
"id": "ff9be586", "id": "ff9be586",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
@ -31,7 +41,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 12, "execution_count": 3,
"id": "d0a98ae9", "id": "d0a98ae9",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
@ -41,7 +51,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 13, "execution_count": 5,
"id": "5d6c682b", "id": "5d6c682b",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
@ -51,7 +61,28 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 14, "execution_count": 6,
"id": "b57b8ce9-ef7d-4e63-979e-aa8763d1f9a8",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"[-0.04895168915390968, -0.03986193612217903, -0.021562768146395683]"
]
},
"execution_count": 6,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"query_result[:3]"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "bb5e74c0", "id": "bb5e74c0",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
@ -60,19 +91,71 @@
] ]
}, },
{ {
"cell_type": "code", "cell_type": "markdown",
"execution_count": null, "id": "92019ef1-5d30-4985-b4e6-c0d98bdfe265",
"id": "aaad49f8",
"metadata": {}, "metadata": {},
"outputs": [], "source": [
"source": [] "## Hugging Face Inference API\n",
"We can also access embedding models via the Hugging Face Inference API, which does not require us to install ``sentence_transformers`` and download models locally."
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "66f5c6ba-1446-43e1-b012-800d17cef300",
"metadata": {},
"outputs": [
{
"name": "stdin",
"output_type": "stream",
"text": [
"Enter your HF Inference API Key:\n",
"\n",
" ········\n"
]
}
],
"source": [
"import getpass\n",
"\n",
"inference_api_key = getpass.getpass(\"Enter your HF Inference API Key:\\n\\n\")"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "d0623c1f-cd82-4862-9bce-3655cb9b66ac",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"[-0.038338541984558105, 0.1234646737575531, -0.028642963618040085]"
]
},
"execution_count": 4,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from langchain.embeddings import HuggingFaceInferenceAPIEmbeddings\n",
"\n",
"embeddings = HuggingFaceInferenceAPIEmbeddings(\n",
" api_key=inference_api_key,\n",
" model_name=\"sentence-transformers/all-MiniLM-l6-v2\"\n",
")\n",
"\n",
"query_result = embeddings.embed_query(text)\n",
"query_result[:3]"
]
} }
], ],
"metadata": { "metadata": {
"kernelspec": { "kernelspec": {
"display_name": "Python 3 (ipykernel)", "display_name": "poetry-venv",
"language": "python", "language": "python",
"name": "python3" "name": "poetry-venv"
}, },
"language_info": { "language_info": {
"codemirror_mode": { "codemirror_mode": {

View File

@ -1,12 +1,13 @@
{ {
"cells": [ "cells": [
{ {
"attachments": {},
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
"# ModelScope\n", "# ModelScope\n",
"\n", "\n",
">[ModelScope](https://www.modelscope.cn/home) is big repository of the models and datasets.\n",
"\n",
"Let's load the ModelScope Embedding class." "Let's load the ModelScope Embedding class."
] ]
}, },
@ -67,16 +68,23 @@
], ],
"metadata": { "metadata": {
"kernelspec": { "kernelspec": {
"display_name": "chatgpt", "display_name": "Python 3 (ipykernel)",
"language": "python", "language": "python",
"name": "python3" "name": "python3"
}, },
"language_info": { "language_info": {
"name": "python", "codemirror_mode": {
"version": "3.9.15" "name": "ipython",
"version": 3
}, },
"orig_nbformat": 4 "file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.12"
}
}, },
"nbformat": 4, "nbformat": 4,
"nbformat_minor": 2 "nbformat_minor": 4
} }

View File

@ -1,15 +1,14 @@
{ {
"cells": [ "cells": [
{ {
"attachments": {},
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
"# MosaicML embeddings\n", "# MosaicML\n",
"\n", "\n",
"[MosaicML](https://docs.mosaicml.com/en/latest/inference.html) offers a managed inference service. You can either use a variety of open source models, or deploy your own.\n", ">[MosaicML](https://docs.mosaicml.com/en/latest/inference.html) offers a managed inference service. You can either use a variety of open source models, or deploy your own.\n",
"\n", "\n",
"This example goes over how to use LangChain to interact with MosaicML Inference for text embedding." "This example goes over how to use LangChain to interact with `MosaicML` Inference for text embedding."
] ]
}, },
{ {
@ -94,6 +93,11 @@
} }
], ],
"metadata": { "metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": { "language_info": {
"codemirror_mode": { "codemirror_mode": {
"name": "ipython", "name": "ipython",
@ -103,9 +107,10 @@
"mimetype": "text/x-python", "mimetype": "text/x-python",
"name": "python", "name": "python",
"nbconvert_exporter": "python", "nbconvert_exporter": "python",
"pygments_lexer": "ipython3" "pygments_lexer": "ipython3",
"version": "3.10.12"
} }
}, },
"nbformat": 4, "nbformat": 4,
"nbformat_minor": 2 "nbformat_minor": 4
} }

View File

@ -7,7 +7,7 @@
"source": [ "source": [
"# NLP Cloud\n", "# NLP Cloud\n",
"\n", "\n",
"NLP Cloud is an artificial intelligence platform that allows you to use the most advanced AI engines, and even train your own engines with your own data. \n", ">[NLP Cloud](https://docs.nlpcloud.com/#introduction) is an artificial intelligence platform that allows you to use the most advanced AI engines, and even train your own engines with your own data. \n",
"\n", "\n",
"The [embeddings](https://docs.nlpcloud.com/#embeddings) endpoint offers the following model:\n", "The [embeddings](https://docs.nlpcloud.com/#embeddings) endpoint offers the following model:\n",
"\n", "\n",
@ -80,7 +80,7 @@
], ],
"metadata": { "metadata": {
"kernelspec": { "kernelspec": {
"display_name": "Python 3.11.2 64-bit", "display_name": "Python 3 (ipykernel)",
"language": "python", "language": "python",
"name": "python3" "name": "python3"
}, },
@ -94,7 +94,7 @@
"name": "python", "name": "python",
"nbconvert_exporter": "python", "nbconvert_exporter": "python",
"pygments_lexer": "ipython3", "pygments_lexer": "ipython3",
"version": "3.11.2" "version": "3.10.12"
}, },
"vscode": { "vscode": {
"interpreter": { "interpreter": {

View File

@ -5,11 +5,13 @@
"id": "1f83f273", "id": "1f83f273",
"metadata": {}, "metadata": {},
"source": [ "source": [
"# SageMaker Endpoint Embeddings\n", "# SageMaker\n",
"\n", "\n",
"Let's load the SageMaker Endpoints Embeddings class. The class can be used if you host, e.g. your own Hugging Face model on SageMaker.\n", "Let's load the `SageMaker Endpoints Embeddings` class. The class can be used if you host, e.g. your own Hugging Face model on SageMaker.\n",
"\n", "\n",
"For instructions on how to do this, please see [here](https://www.philschmid.de/custom-inference-huggingface-sagemaker). **Note**: In order to handle batched requests, you will need to adjust the return line in the `predict_fn()` function within the custom `inference.py` script:\n", "For instructions on how to do this, please see [here](https://www.philschmid.de/custom-inference-huggingface-sagemaker). \n",
"\n",
"**Note**: In order to handle batched requests, you will need to adjust the return line in the `predict_fn()` function within the custom `inference.py` script:\n",
"\n", "\n",
"Change from\n", "Change from\n",
"\n", "\n",
@ -143,7 +145,7 @@
"name": "python", "name": "python",
"nbconvert_exporter": "python", "nbconvert_exporter": "python",
"pygments_lexer": "ipython3", "pygments_lexer": "ipython3",
"version": "3.9.1" "version": "3.10.12"
}, },
"vscode": { "vscode": {
"interpreter": { "interpreter": {

View File

@ -5,8 +5,8 @@
"id": "eec4efda", "id": "eec4efda",
"metadata": {}, "metadata": {},
"source": [ "source": [
"# Self Hosted Embeddings\n", "# Self Hosted\n",
"Let's load the SelfHostedEmbeddings, SelfHostedHuggingFaceEmbeddings, and SelfHostedHuggingFaceInstructEmbeddings classes." "Let's load the `SelfHostedEmbeddings`, `SelfHostedHuggingFaceEmbeddings`, and `SelfHostedHuggingFaceInstructEmbeddings` classes."
] ]
}, },
{ {
@ -149,9 +149,7 @@
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": null,
"id": "fc1bfd0f", "id": "fc1bfd0f",
"metadata": { "metadata": {},
"scrolled": false
},
"outputs": [], "outputs": [],
"source": [ "source": [
"query_result = embeddings.embed_query(text)" "query_result = embeddings.embed_query(text)"
@ -182,7 +180,7 @@
"name": "python", "name": "python",
"nbconvert_exporter": "python", "nbconvert_exporter": "python",
"pygments_lexer": "ipython3", "pygments_lexer": "ipython3",
"version": "3.9.1" "version": "3.10.12"
}, },
"vscode": { "vscode": {
"interpreter": { "interpreter": {

View File

@ -1,16 +1,15 @@
{ {
"cells": [ "cells": [
{ {
"attachments": {},
"cell_type": "markdown", "cell_type": "markdown",
"id": "ed47bb62", "id": "ed47bb62",
"metadata": {}, "metadata": {},
"source": [ "source": [
"# Sentence Transformers Embeddings\n", "# Sentence Transformers\n",
"\n", "\n",
"[SentenceTransformers](https://www.sbert.net/) embeddings are called using the `HuggingFaceEmbeddings` integration. We have also added an alias for `SentenceTransformerEmbeddings` for users who are more familiar with directly using that package.\n", ">[SentenceTransformers](https://www.sbert.net/) embeddings are called using the `HuggingFaceEmbeddings` integration. We have also added an alias for `SentenceTransformerEmbeddings` for users who are more familiar with directly using that package.\n",
"\n", "\n",
"SentenceTransformers is a python package that can generate text and image embeddings, originating from [Sentence-BERT](https://arxiv.org/abs/1908.10084)" "`SentenceTransformers` is a python package that can generate text and image embeddings, originating from [Sentence-BERT](https://arxiv.org/abs/1908.10084)"
] ]
}, },
{ {
@ -109,7 +108,7 @@
"name": "python", "name": "python",
"nbconvert_exporter": "python", "nbconvert_exporter": "python",
"pygments_lexer": "ipython3", "pygments_lexer": "ipython3",
"version": "3.8.16" "version": "3.10.12"
}, },
"vscode": { "vscode": {
"interpreter": { "interpreter": {

View File

@ -1,21 +1,31 @@
{ {
"cells": [ "cells": [
{ {
"attachments": {},
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
"# Spacy Embedding\n", "# SpaCy\n",
"\n", "\n",
"### Loading the Spacy embedding class to generate and query embeddings" ">[spaCy](https://spacy.io/) is an open-source software library for advanced natural language processing, written in the programming languages Python and Cython.\n",
" \n",
"\n",
"## Installation and Setup"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#!pip install spacy"
] ]
}, },
{ {
"attachments": {},
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
"#### Import the necessary classes" "Import the necessary classes"
] ]
}, },
{ {
@ -28,11 +38,12 @@
] ]
}, },
{ {
"attachments": {},
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
"#### Initialize SpacyEmbeddings.This will load the Spacy model into memory." "## Example\n",
"\n",
"Initialize SpacyEmbeddings.This will load the Spacy model into memory."
] ]
}, },
{ {
@ -45,11 +56,10 @@
] ]
}, },
{ {
"attachments": {},
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
"#### Define some example texts . These could be any documents that you want to analyze - for example, news articles, social media posts, or product reviews." "Define some example texts . These could be any documents that you want to analyze - for example, news articles, social media posts, or product reviews."
] ]
}, },
{ {
@ -67,11 +77,10 @@
] ]
}, },
{ {
"attachments": {},
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
"#### Generate and print embeddings for the texts . The SpacyEmbeddings class generates an embedding for each document, which is a numerical representation of the document's content. These embeddings can be used for various natural language processing tasks, such as document similarity comparison or text classification." "Generate and print embeddings for the texts . The SpacyEmbeddings class generates an embedding for each document, which is a numerical representation of the document's content. These embeddings can be used for various natural language processing tasks, such as document similarity comparison or text classification."
] ]
}, },
{ {
@ -86,11 +95,10 @@
] ]
}, },
{ {
"attachments": {},
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
"#### Generate and print an embedding for a single piece of text. You can also generate an embedding for a single piece of text, such as a search query. This can be useful for tasks like information retrieval, where you want to find documents that are similar to a given query." "Generate and print an embedding for a single piece of text. You can also generate an embedding for a single piece of text, such as a search query. This can be useful for tasks like information retrieval, where you want to find documents that are similar to a given query."
] ]
}, },
{ {
@ -106,11 +114,24 @@
} }
], ],
"metadata": { "metadata": {
"language_info": { "kernelspec": {
"name": "python" "display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
}, },
"orig_nbformat": 4 "language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.12"
}
}, },
"nbformat": 4, "nbformat": 4,
"nbformat_minor": 2 "nbformat_minor": 4
} }

View File

@ -0,0 +1,126 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# NucliaDB\n",
"\n",
"You can use a local NucliaDB instance or use [Nuclia Cloud](https://nuclia.cloud).\n",
"\n",
"When using a local instance, you need a Nuclia Understanding API key, so your texts are properly vectorized and indexed. You can get a key by creating a free account at [https://nuclia.cloud](https://nuclia.cloud), and then [create a NUA key](https://docs.nuclia.dev/docs/docs/using/understanding/intro)."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#!pip install langchain nuclia"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Usage with nuclia.cloud"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from langchain.vectorstores.nucliadb import NucliaDB\n",
"API_KEY = \"YOUR_API_KEY\"\n",
"\n",
"ndb = NucliaDB(knowledge_box=\"YOUR_KB_ID\", local=False, api_key=API_KEY)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Usage with a local instance\n",
"\n",
"Note: By default `backend` is set to `http://localhost:8080`."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from langchain.vectorstores.nucliadb import NucliaDB\n",
"\n",
"ndb = NucliaDB(knowledge_box=\"YOUR_KB_ID\", local=True, backend=\"http://my-local-server\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Add and delete texts to your Knowledge Box"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"ids = ndb.add_texts([\"This is a new test\", \"This is a second test\"])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"ndb.delete(ids=ids)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Search in your Knowledge Box"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"results = ndb.similarity_search(\"Who was inspired by Ada Lovelace?\")\n",
"print(res.page_content)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.1"
}
},
"nbformat": 4,
"nbformat_minor": 4
}

View File

@ -0,0 +1,207 @@
{
"cells": [
{
"cell_type": "markdown",
"source": [
"# sqlite-vss\n",
"\n",
">[sqlite-vss](https://alexgarcia.xyz/sqlite-vss/) is an SQLite extension designed for vector search, emphasizing local-first operations and easy integration into applications without external servers. Leveraging the Faiss library, it offers efficient similarity search and clustering capabilities.\n",
"\n",
"This notebook shows how to use the `SQLiteVSS` vector database."
],
"metadata": {
"collapsed": false
}
},
{
"cell_type": "code",
"execution_count": null,
"outputs": [],
"source": [
"# You need to install sqlite-vss as a dependency.\n",
"%pip install sqlite-vss"
],
"metadata": {
"collapsed": false
}
},
{
"cell_type": "markdown",
"source": [
"### Quickstart"
],
"metadata": {
"collapsed": false
}
},
{
"cell_type": "code",
"execution_count": 2,
"outputs": [
{
"data": {
"text/plain": "'Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while youre at it, pass the Disclose Act so Americans can know who is funding our elections. \\n\\nTonight, Id like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \\n\\nOne of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \\n\\nAnd I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nations top legal minds, who will continue Justice Breyers legacy of excellence.'"
},
"execution_count": 2,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from langchain.embeddings.sentence_transformer import SentenceTransformerEmbeddings\n",
"from langchain.text_splitter import CharacterTextSplitter\n",
"from langchain.vectorstores import SQLiteVSS\n",
"from langchain.document_loaders import TextLoader\n",
"\n",
"# load the document and split it into chunks\n",
"loader = TextLoader(\"../../../state_of_the_union.txt\")\n",
"documents = loader.load()\n",
"\n",
"# split it into chunks\n",
"text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\n",
"docs = text_splitter.split_documents(documents)\n",
"texts = [doc.page_content for doc in docs]\n",
"\n",
"\n",
"# create the open-source embedding function\n",
"embedding_function = SentenceTransformerEmbeddings(model_name=\"all-MiniLM-L6-v2\")\n",
"\n",
"\n",
"# load it in sqlite-vss in a table named state_union.\n",
"# the db_file parameter is the name of the file you want\n",
"# as your sqlite database.\n",
"db = SQLiteVSS.from_texts(\n",
" texts=texts,\n",
" embedding=embedding_function,\n",
" table=\"state_union\",\n",
" db_file=\"/tmp/vss.db\"\n",
")\n",
"\n",
"# query it\n",
"query = \"What did the president say about Ketanji Brown Jackson\"\n",
"data = db.similarity_search(query)\n",
"\n",
"# print results\n",
"data[0].page_content"
],
"metadata": {
"collapsed": false,
"ExecuteTime": {
"end_time": "2023-09-06T14:55:55.370351Z",
"start_time": "2023-09-06T14:55:53.547755Z"
}
}
},
{
"cell_type": "markdown",
"source": [
"### Using existing sqlite connection"
],
"metadata": {
"collapsed": false
}
},
{
"cell_type": "code",
"execution_count": 7,
"outputs": [
{
"data": {
"text/plain": "'Ketanji Brown Jackson is awesome'"
},
"execution_count": 7,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from langchain.embeddings.sentence_transformer import SentenceTransformerEmbeddings\n",
"from langchain.text_splitter import CharacterTextSplitter\n",
"from langchain.vectorstores import SQLiteVSS\n",
"from langchain.document_loaders import TextLoader\n",
"\n",
"# load the document and split it into chunks\n",
"loader = TextLoader(\"../../../state_of_the_union.txt\")\n",
"documents = loader.load()\n",
"\n",
"# split it into chunks\n",
"text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\n",
"docs = text_splitter.split_documents(documents)\n",
"texts = [doc.page_content for doc in docs]\n",
"\n",
"\n",
"# create the open-source embedding function\n",
"embedding_function = SentenceTransformerEmbeddings(model_name=\"all-MiniLM-L6-v2\")\n",
"connection = SQLiteVSS.create_connection(db_file=\"/tmp/vss.db\")\n",
"\n",
"db1 = SQLiteVSS(\n",
" table=\"state_union\",\n",
" embedding=embedding_function,\n",
" connection=connection\n",
")\n",
"\n",
"db1.add_texts([\"Ketanji Brown Jackson is awesome\"])\n",
"# query it again\n",
"query = \"What did the president say about Ketanji Brown Jackson\"\n",
"data = db1.similarity_search(query)\n",
"\n",
"# print results\n",
"data[0].page_content"
],
"metadata": {
"collapsed": false,
"ExecuteTime": {
"end_time": "2023-09-06T14:59:22.086252Z",
"start_time": "2023-09-06T14:59:21.693237Z"
}
}
},
{
"cell_type": "code",
"execution_count": 13,
"outputs": [],
"source": [
"# Cleaning up\n",
"import os\n",
"os.remove(\"/tmp/vss.db\")"
],
"metadata": {
"collapsed": false,
"ExecuteTime": {
"end_time": "2023-09-06T15:01:15.550318Z",
"start_time": "2023-09-06T15:01:15.546428Z"
}
}
},
{
"cell_type": "code",
"execution_count": null,
"outputs": [],
"source": [],
"metadata": {
"collapsed": false
}
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 2
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython2",
"version": "2.7.6"
}
},
"nbformat": 4,
"nbformat_minor": 0
}

View File

@ -28,43 +28,41 @@
"The following function determines cosine similarity, but you can adjust to your needs.\n", "The following function determines cosine similarity, but you can adjust to your needs.\n",
"\n", "\n",
"```sql\n", "```sql\n",
" -- Enable the pgvector extension to work with embedding vectors\n", "-- Enable the pgvector extension to work with embedding vectors\n",
" create extension vector;\n", "create extension if not exists vector;\n",
"\n", "\n",
" -- Create a table to store your documents\n", "-- Create a table to store your documents\n",
" create table documents (\n", "create table\n",
" documents (\n",
" id uuid primary key,\n", " id uuid primary key,\n",
" content text, -- corresponds to Document.pageContent\n", " content text, -- corresponds to Document.pageContent\n",
" metadata jsonb, -- corresponds to Document.metadata\n", " metadata jsonb, -- corresponds to Document.metadata\n",
" embedding vector(1536) -- 1536 works for OpenAI embeddings, change if needed\n", " embedding vector (1536) -- 1536 works for OpenAI embeddings, change if needed\n",
" );\n", " );\n",
"\n", "\n",
" CREATE FUNCTION match_documents(query_embedding vector(1536), match_count int)\n", "-- Create a function to search for documents\n",
" RETURNS TABLE(\n", "create function match_documents (\n",
" query_embedding vector (1536),\n",
" filter jsonb default '{}'\n",
") returns table (\n",
" id uuid,\n", " id uuid,\n",
" content text,\n", " content text,\n",
" metadata jsonb,\n", " metadata jsonb,\n",
" -- we return matched vectors to enable maximal marginal relevance searches\n", " similarity float\n",
" embedding vector(1536),\n", ") language plpgsql as $$\n",
" similarity float)\n", "#variable_conflict use_column\n",
" LANGUAGE plpgsql\n", "begin\n",
" AS $$\n", " return query\n",
" # variable_conflict use_column\n", " select\n",
" BEGIN\n",
" RETURN query\n",
" SELECT\n",
" id,\n", " id,\n",
" content,\n", " content,\n",
" metadata,\n", " metadata,\n",
" embedding,\n", " 1 - (documents.embedding <=> query_embedding) as similarity\n",
" 1 -(documents.embedding <=> query_embedding) AS similarity\n", " from documents\n",
" FROM\n", " where metadata @> filter\n",
" documents\n", " order by documents.embedding <=> query_embedding;\n",
" ORDER BY\n", "end;\n",
" documents.embedding <=> query_embedding\n", "$$;\n",
" LIMIT match_count;\n",
" END;\n",
" $$;\n",
"```" "```"
] ]
}, },

View File

@ -26,7 +26,7 @@
"source": [ "source": [
"# Setup\n", "# Setup\n",
"\n", "\n",
"You will need a Vectara account to use Vectara with LangChain. To get started, use the following steps:\n", "You will need a Vectara account to use Vectara with LangChain. To get started, use the following steps (see our [quickstart](https://docs.vectara.com/docs/quickstart) guide):\n",
"1. [Sign up](https://console.vectara.com/signup) for a Vectara account if you don't already have one. Once you have completed your sign up you will have a Vectara customer ID. You can find your customer ID by clicking on your name, on the top-right of the Vectara console window.\n", "1. [Sign up](https://console.vectara.com/signup) for a Vectara account if you don't already have one. Once you have completed your sign up you will have a Vectara customer ID. You can find your customer ID by clicking on your name, on the top-right of the Vectara console window.\n",
"2. Within your account you can create one or more corpora. Each corpus represents an area that stores text data upon ingest from input documents. To create a corpus, use the **\"Create Corpus\"** button. You then provide a name to your corpus as well as a description. Optionally you can define filtering attributes and apply some advanced options. If you click on your created corpus, you can see its name and corpus ID right on the top.\n", "2. Within your account you can create one or more corpora. Each corpus represents an area that stores text data upon ingest from input documents. To create a corpus, use the **\"Create Corpus\"** button. You then provide a name to your corpus as well as a description. Optionally you can define filtering attributes and apply some advanced options. If you click on your created corpus, you can see its name and corpus ID right on the top.\n",
"3. Next you'll need to create API keys to access the corpus. Click on the **\"Authorization\"** tab in the corpus view and then the **\"Create API Key\"** button. Give your key a name, and choose whether you want query only or query+index for your key. Click \"Create\" and you now have an active API key. Keep this key confidential. \n", "3. Next you'll need to create API keys to access the corpus. Click on the **\"Authorization\"** tab in the corpus view and then the **\"Create API Key\"** button. Give your key a name, and choose whether you want query only or query+index for your key. Click \"Create\" and you now have an active API key. Keep this key confidential. \n",
@ -47,7 +47,7 @@
"os.environ[\"VECTARA_API_KEY\"] = getpass.getpass(\"Vectara API Key:\")\n", "os.environ[\"VECTARA_API_KEY\"] = getpass.getpass(\"Vectara API Key:\")\n",
"```\n", "```\n",
"\n", "\n",
"2. Add them to the Vectara vectorstore constructor:\n", "1. Provide them as arguments when creating the Vectara vectorstore object:\n",
"\n", "\n",
"```python\n", "```python\n",
"vectorstore = Vectara(\n", "vectorstore = Vectara(\n",
@ -65,13 +65,22 @@
"source": [ "source": [
"## Connecting to Vectara from LangChain\n", "## Connecting to Vectara from LangChain\n",
"\n", "\n",
"To get started, let's ingest the documents using the from_documents() method.\n", "In this example, we assume that you've created an account and a corpus, and added your VECTARA_CUSTOMER_ID, VECTARA_CORPUS_ID and VECTARA_API_KEY (created with permissions for both indexing and query) as environment variables.\n",
"We assume here that you've added your VECTARA_CUSTOMER_ID, VECTARA_CORPUS_ID and query+indexing VECTARA_API_KEY as environment variables." "\n",
"The corpus has 3 fields defined as metadata for filtering:\n",
"* url: a string field containing the source URL of the document (where relevant)\n",
"* speech: a string field containing the name of the speech\n",
"* author: the name of the author\n",
"\n",
"Let's start by ingesting 3 documents into the corpus:\n",
"1. The State of the Union speech from 2022, available in the LangChain repository as a text file\n",
"2. The \"I have a dream\" speech by Dr. Kind\n",
"3. The \"We shall Fight on the Beaches\" speech by Winston Churchil"
] ]
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 1, "execution_count": 2,
"id": "04a1f1a0", "id": "04a1f1a0",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
@ -79,12 +88,17 @@
"from langchain.embeddings import FakeEmbeddings\n", "from langchain.embeddings import FakeEmbeddings\n",
"from langchain.text_splitter import CharacterTextSplitter\n", "from langchain.text_splitter import CharacterTextSplitter\n",
"from langchain.vectorstores import Vectara\n", "from langchain.vectorstores import Vectara\n",
"from langchain.document_loaders import TextLoader" "from langchain.document_loaders import TextLoader\n",
"\n",
"from langchain.llms import OpenAI\n",
"from langchain.chains import ConversationalRetrievalChain\n",
"from langchain.retrievers.self_query.base import SelfQueryRetriever\n",
"from langchain.chains.query_constructor.base import AttributeInfo"
] ]
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 2, "execution_count": 3,
"id": "be0a4973", "id": "be0a4973",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
@ -97,7 +111,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 3, "execution_count": 4,
"id": "8429667e", "id": "8429667e",
"metadata": { "metadata": {
"ExecuteTime": { "ExecuteTime": {
@ -111,7 +125,7 @@
"vectara = Vectara.from_documents(\n", "vectara = Vectara.from_documents(\n",
" docs,\n", " docs,\n",
" embedding=FakeEmbeddings(size=768),\n", " embedding=FakeEmbeddings(size=768),\n",
" doc_metadata={\"speech\": \"state-of-the-union\"},\n", " doc_metadata={\"speech\": \"state-of-the-union\", \"author\": \"Biden\"},\n",
")" ")"
] ]
}, },
@ -130,7 +144,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 4, "execution_count": 5,
"id": "85ef3468", "id": "85ef3468",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
@ -142,14 +156,16 @@
" [\n", " [\n",
" \"https://www.gilderlehrman.org/sites/default/files/inline-pdfs/king.dreamspeech.excerpts.pdf\",\n", " \"https://www.gilderlehrman.org/sites/default/files/inline-pdfs/king.dreamspeech.excerpts.pdf\",\n",
" \"I-have-a-dream\",\n", " \"I-have-a-dream\",\n",
" \"Dr. King\"\n",
" ],\n", " ],\n",
" [\n", " [\n",
" \"https://www.parkwayschools.net/cms/lib/MO01931486/Centricity/Domain/1578/Churchill_Beaches_Speech.pdf\",\n", " \"https://www.parkwayschools.net/cms/lib/MO01931486/Centricity/Domain/1578/Churchill_Beaches_Speech.pdf\",\n",
" \"we shall fight on the beaches\",\n", " \"we shall fight on the beaches\",\n",
" \"Churchil\"\n",
" ],\n", " ],\n",
"]\n", "]\n",
"files_list = []\n", "files_list = []\n",
"for url, _ in urls:\n", "for url, _, _ in urls:\n",
" name = tempfile.NamedTemporaryFile().name\n", " name = tempfile.NamedTemporaryFile().name\n",
" urllib.request.urlretrieve(url, name)\n", " urllib.request.urlretrieve(url, name)\n",
" files_list.append(name)\n", " files_list.append(name)\n",
@ -157,7 +173,7 @@
"docsearch: Vectara = Vectara.from_files(\n", "docsearch: Vectara = Vectara.from_files(\n",
" files=files_list,\n", " files=files_list,\n",
" embedding=FakeEmbeddings(size=768),\n", " embedding=FakeEmbeddings(size=768),\n",
" metadatas=[{\"url\": url, \"speech\": title} for url, title in urls],\n", " metadatas=[{\"url\": url, \"speech\": title, \"author\": author} for url, title, author in urls],\n",
")" ")"
] ]
}, },
@ -178,7 +194,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 5, "execution_count": 6,
"id": "a8c513ab", "id": "a8c513ab",
"metadata": { "metadata": {
"ExecuteTime": { "ExecuteTime": {
@ -197,7 +213,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 6, "execution_count": 7,
"id": "fc516993", "id": "fc516993",
"metadata": { "metadata": {
"ExecuteTime": { "ExecuteTime": {
@ -231,7 +247,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 7, "execution_count": 8,
"id": "8804a21d", "id": "8804a21d",
"metadata": { "metadata": {
"ExecuteTime": { "ExecuteTime": {
@ -249,7 +265,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 8, "execution_count": 9,
"id": "756a6887", "id": "756a6887",
"metadata": { "metadata": {
"ExecuteTime": { "ExecuteTime": {
@ -264,7 +280,7 @@
"text": [ "text": [
"Justice Breyer, thank you for your service. One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nations top legal minds, who will continue Justice Breyers legacy of excellence. A former top litigator in private practice.\n", "Justice Breyer, thank you for your service. One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nations top legal minds, who will continue Justice Breyers legacy of excellence. A former top litigator in private practice.\n",
"\n", "\n",
"Score: 0.786569\n" "Score: 0.8299499\n"
] ]
} }
], ],
@ -284,7 +300,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 9, "execution_count": 10,
"id": "47784de5", "id": "47784de5",
"metadata": {}, "metadata": {},
"outputs": [ "outputs": [
@ -307,7 +323,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 10, "execution_count": 11,
"id": "3e22949f", "id": "3e22949f",
"metadata": {}, "metadata": {},
"outputs": [ "outputs": [
@ -315,7 +331,7 @@
"name": "stdout", "name": "stdout",
"output_type": "stream", "output_type": "stream",
"text": [ "text": [
"With this threshold of 0.2 we have 3 documents\n" "With this threshold of 0.2 we have 5 documents\n"
] ]
} }
], ],
@ -340,7 +356,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 11, "execution_count": 12,
"id": "9427195f", "id": "9427195f",
"metadata": { "metadata": {
"ExecuteTime": { "ExecuteTime": {
@ -352,10 +368,10 @@
{ {
"data": { "data": {
"text/plain": [ "text/plain": [
"VectaraRetriever(tags=['Vectara'], metadata=None, vectorstore=<langchain.vectorstores.vectara.Vectara object at 0x1586bd330>, search_type='similarity', search_kwargs={'lambda_val': 0.025, 'k': 5, 'filter': '', 'n_sentence_context': '2'})" "VectaraRetriever(tags=['Vectara'], metadata=None, vectorstore=<langchain.vectorstores.vectara.Vectara object at 0x13b15e9b0>, search_type='similarity', search_kwargs={'lambda_val': 0.025, 'k': 5, 'filter': '', 'n_sentence_context': '2'})"
] ]
}, },
"execution_count": 11, "execution_count": 12,
"metadata": {}, "metadata": {},
"output_type": "execute_result" "output_type": "execute_result"
} }
@ -367,7 +383,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 12, "execution_count": 13,
"id": "f3c70c31", "id": "f3c70c31",
"metadata": { "metadata": {
"ExecuteTime": { "ExecuteTime": {
@ -379,10 +395,10 @@
{ {
"data": { "data": {
"text/plain": [ "text/plain": [
"Document(page_content='Justice Breyer, thank you for your service. One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nations top legal minds, who will continue Justice Breyers legacy of excellence. A former top litigator in private practice.', metadata={'source': 'langchain', 'lang': 'eng', 'offset': '596', 'len': '97', 'speech': 'state-of-the-union'})" "Document(page_content='Justice Breyer, thank you for your service. One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nations top legal minds, who will continue Justice Breyers legacy of excellence. A former top litigator in private practice.', metadata={'source': 'langchain', 'lang': 'eng', 'offset': '596', 'len': '97', 'speech': 'state-of-the-union', 'author': 'Biden'})"
] ]
}, },
"execution_count": 12, "execution_count": 13,
"metadata": {}, "metadata": {},
"output_type": "execute_result" "output_type": "execute_result"
} }
@ -392,10 +408,118 @@
"retriever.get_relevant_documents(query)[0]" "retriever.get_relevant_documents(query)[0]"
] ]
}, },
{
"cell_type": "markdown",
"id": "e944c26a",
"metadata": {},
"source": [
"## Using Vectara as a SelfQuery Retriever"
]
},
{
"cell_type": "code",
"execution_count": 15,
"id": "8be674de",
"metadata": {},
"outputs": [],
"source": [
"metadata_field_info = [\n",
" AttributeInfo(\n",
" name=\"speech\",\n",
" description=\"what name of the speech\",\n",
" type=\"string or list[string]\",\n",
" ),\n",
" AttributeInfo(\n",
" name=\"author\",\n",
" description=\"author of the speech\",\n",
" type=\"string or list[string]\",\n",
" ),\n",
"]\n",
"document_content_description = \"the text of the speech\"\n",
"\n",
"vectordb = Vectara()\n",
"llm = OpenAI(temperature=0)\n",
"retriever = SelfQueryRetriever.from_llm(llm, vectara, \n",
" document_content_description, metadata_field_info, \n",
" verbose=True)"
]
},
{
"cell_type": "code",
"execution_count": 16,
"id": "f8938999",
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"/Users/ofer/dev/langchain/libs/langchain/langchain/chains/llm.py:278: UserWarning: The predict_and_parse method is deprecated, instead pass an output parser directly to LLMChain.\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"query='freedom' filter=Comparison(comparator=<Comparator.EQ: 'eq'>, attribute='author', value='Biden') limit=None\n"
]
},
{
"data": {
"text/plain": [
"[Document(page_content='Well I know this nation. We will meet the test. To protect freedom and liberty, to expand fairness and opportunity. We will save democracy. As hard as these times have been, I am more optimistic about America today than I have been my whole life.', metadata={'source': 'langchain', 'lang': 'eng', 'offset': '346', 'len': '67', 'speech': 'state-of-the-union', 'author': 'Biden'}),\n",
" Document(page_content='To our fellow Ukrainian Americans who forge a deep bond that connects our two nations we stand with you. Putin may circle Kyiv with tanks, but he will never gain the hearts and souls of the Ukrainian people. He will never extinguish their love of freedom. He will never weaken the resolve of the free world. We meet tonight in an America that has lived through two of the hardest years this nation has ever faced.', metadata={'source': 'langchain', 'lang': 'eng', 'offset': '740', 'len': '47', 'speech': 'state-of-the-union', 'author': 'Biden'}),\n",
" Document(page_content='But most importantly as Americans. With a duty to one another to the American people to the Constitution. And with an unwavering resolve that freedom will always triumph over tyranny. Six days ago, Russias Vladimir Putin sought to shake the foundations of the free world thinking he could make it bend to his menacing ways. But he badly miscalculated.', metadata={'source': 'langchain', 'lang': 'eng', 'offset': '413', 'len': '77', 'speech': 'state-of-the-union', 'author': 'Biden'}),\n",
" Document(page_content='We can do this. \\n\\nMy fellow Americans—tonight , we have gathered in a sacred space—the citadel of our democracy. In this Capitol, generation after generation, Americans have debated great questions amid great strife, and have done great things. We have fought for freedom, expanded liberty, defeated totalitarianism and terror. And built the strongest, freest, and most prosperous nation the world has ever known. Now is the hour. \\n\\nOur moment of responsibility.', metadata={'source': 'langchain', 'lang': 'eng', 'offset': '906', 'len': '82', 'speech': 'state-of-the-union', 'author': 'Biden'}),\n",
" Document(page_content='In state after state, new laws have been passed, not only to suppress the vote, but to subvert entire elections. We cannot let this happen. Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while youre at it, pass the Disclose Act so Americans can know who is funding our elections.', metadata={'source': 'langchain', 'lang': 'eng', 'offset': '0', 'len': '63', 'speech': 'state-of-the-union', 'author': 'Biden'})]"
]
},
"execution_count": 16,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"retriever.get_relevant_documents(\"what did Biden say about the freedom?\")"
]
},
{
"cell_type": "code",
"execution_count": 17,
"id": "a97037fb",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"query='freedom' filter=Comparison(comparator=<Comparator.EQ: 'eq'>, attribute='author', value='Dr. King') limit=None\n"
]
},
{
"data": {
"text/plain": [
"[Document(page_content='And if America is to be a great nation, this must become true. So\\nlet freedom ring from the prodigious hilltops of New Hampshire. Let freedom ring from the mighty\\nmountains of New York. Let freedom ring from the heightening Alleghenies of Pennsylvania. Let\\nfreedom ring from the snowcapped Rockies of Colorado.', metadata={'lang': 'eng', 'section': '3', 'offset': '1534', 'len': '55', 'CreationDate': '1424880481', 'Producer': 'Adobe PDF Library 10.0', 'Author': 'Sasha Rolon-Pereira', 'Title': 'Martin Luther King Jr.pdf', 'Creator': 'Acrobat PDFMaker 10.1 for Word', 'ModDate': '1424880524', 'url': 'https://www.gilderlehrman.org/sites/default/files/inline-pdfs/king.dreamspeech.excerpts.pdf', 'speech': 'I-have-a-dream', 'author': 'Dr. King', 'title': 'Martin Luther King Jr.pdf'}),\n",
" Document(page_content='And if America is to be a great nation, this must become true. So\\nlet freedom ring from the prodigious hilltops of New Hampshire. Let freedom ring from the mighty\\nmountains of New York. Let freedom ring from the heightening Alleghenies of Pennsylvania. Let\\nfreedom ring from the snowcapped Rockies of Colorado.', metadata={'lang': 'eng', 'section': '3', 'offset': '1534', 'len': '55', 'CreationDate': '1424880481', 'Producer': 'Adobe PDF Library 10.0', 'Author': 'Sasha Rolon-Pereira', 'Title': 'Martin Luther King Jr.pdf', 'Creator': 'Acrobat PDFMaker 10.1 for Word', 'ModDate': '1424880524', 'url': 'https://www.gilderlehrman.org/sites/default/files/inline-pdfs/king.dreamspeech.excerpts.pdf', 'speech': 'I-have-a-dream', 'author': 'Dr. King', 'title': 'Martin Luther King Jr.pdf'}),\n",
" Document(page_content='Let freedom ring from the curvaceous slopes of\\nCalifornia. But not only that. Let freedom ring from Stone Mountain of Georgia. Let freedom ring from Lookout\\nMountain of Tennessee. Let freedom ring from every hill and molehill of Mississippi, from every\\nmountain side. Let freedom ring . . .\\nWhen we allow freedom to ring—when we let it ring from every city and every hamlet, from every state\\nand every city, we will be able to speed up that day when all of Gods children, black men and white\\nmen, Jews and Gentiles, Protestants and Catholics, will be able to join hands and sing in the words of the\\nold Negro spiritual, “Free at last, Free at last, Great God a-mighty, We are free at last.”', metadata={'lang': 'eng', 'section': '3', 'offset': '1842', 'len': '52', 'CreationDate': '1424880481', 'Producer': 'Adobe PDF Library 10.0', 'Author': 'Sasha Rolon-Pereira', 'Title': 'Martin Luther King Jr.pdf', 'Creator': 'Acrobat PDFMaker 10.1 for Word', 'ModDate': '1424880524', 'url': 'https://www.gilderlehrman.org/sites/default/files/inline-pdfs/king.dreamspeech.excerpts.pdf', 'speech': 'I-have-a-dream', 'author': 'Dr. King', 'title': 'Martin Luther King Jr.pdf'}),\n",
" Document(page_content='Let freedom ring from the curvaceous slopes of\\nCalifornia. But not only that. Let freedom ring from Stone Mountain of Georgia. Let freedom ring from Lookout\\nMountain of Tennessee. Let freedom ring from every hill and molehill of Mississippi, from every\\nmountain side. Let freedom ring . . .\\nWhen we allow freedom to ring—when we let it ring from every city and every hamlet, from every state\\nand every city, we will be able to speed up that day when all of Gods children, black men and white\\nmen, Jews and Gentiles, Protestants and Catholics, will be able to join hands and sing in the words of the\\nold Negro spiritual, “Free at last, Free at last, Great God a-mighty, We are free at last.”', metadata={'lang': 'eng', 'section': '3', 'offset': '1842', 'len': '52', 'CreationDate': '1424880481', 'Producer': 'Adobe PDF Library 10.0', 'Author': 'Sasha Rolon-Pereira', 'Title': 'Martin Luther King Jr.pdf', 'Creator': 'Acrobat PDFMaker 10.1 for Word', 'ModDate': '1424880524', 'url': 'https://www.gilderlehrman.org/sites/default/files/inline-pdfs/king.dreamspeech.excerpts.pdf', 'speech': 'I-have-a-dream', 'author': 'Dr. King', 'title': 'Martin Luther King Jr.pdf'}),\n",
" Document(page_content='Let freedom ring from the mighty\\nmountains of New York. Let freedom ring from the heightening Alleghenies of Pennsylvania. Let\\nfreedom ring from the snowcapped Rockies of Colorado. Let freedom ring from the curvaceous slopes of\\nCalifornia. But not only that. Let freedom ring from Stone Mountain of Georgia.', metadata={'lang': 'eng', 'section': '3', 'offset': '1657', 'len': '57', 'CreationDate': '1424880481', 'Producer': 'Adobe PDF Library 10.0', 'Author': 'Sasha Rolon-Pereira', 'Title': 'Martin Luther King Jr.pdf', 'Creator': 'Acrobat PDFMaker 10.1 for Word', 'ModDate': '1424880524', 'url': 'https://www.gilderlehrman.org/sites/default/files/inline-pdfs/king.dreamspeech.excerpts.pdf', 'speech': 'I-have-a-dream', 'author': 'Dr. King', 'title': 'Martin Luther King Jr.pdf'})]"
]
},
"execution_count": 17,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"retriever.get_relevant_documents(\"what did Dr. King say about the freedom?\")"
]
},
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": null,
"id": "2300e785", "id": "f6d17e90",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [] "source": []

View File

@ -0,0 +1,587 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "13afcae7",
"metadata": {},
"source": [
"# Supabase Vector self-querying \n",
"\n",
">[Supabase](https://supabase.com/docs) is an open source `Firebase` alternative. \n",
"> `Supabase` is built on top of `PostgreSQL`, which offers strong `SQL` \n",
"> querying capabilities and enables a simple interface with already-existing tools and frameworks.\n",
"\n",
">[PostgreSQL](https://en.wikipedia.org/wiki/PostgreSQL) also known as `Postgres`,\n",
"> is a free and open-source relational database management system (RDBMS) \n",
"> emphasizing extensibility and `SQL` compliance.\n",
"\n",
"In the notebook we'll demo the `SelfQueryRetriever` wrapped around a Supabase vector store.\n",
"\n",
"Specifically we will:\n",
"1. Create a Supabase database\n",
"2. Enable the `pgvector` extension\n",
"3. Create a `documents` table and `match_documents` function that will be used by `SupabaseVectorStore`\n",
"4. Load sample documents into the vector store (database table)\n",
"5. Build and test a self-querying retriever"
]
},
{
"cell_type": "markdown",
"id": "347935ad",
"metadata": {},
"source": [
"## Setup Supabase Database\n",
"\n",
"1. Head over to https://database.new to provision your Supabase database.\n",
"2. In the studio, jump to the [SQL editor](https://supabase.com/dashboard/project/_/sql/new) and run the following script to enable `pgvector` and setup your database as a vector store:\n",
" ```sql\n",
" -- Enable the pgvector extension to work with embedding vectors\n",
" create extension if not exists vector;\n",
"\n",
" -- Create a table to store your documents\n",
" create table\n",
" documents (\n",
" id uuid primary key,\n",
" content text, -- corresponds to Document.pageContent\n",
" metadata jsonb, -- corresponds to Document.metadata\n",
" embedding vector (1536) -- 1536 works for OpenAI embeddings, change if needed\n",
" );\n",
"\n",
" -- Create a function to search for documents\n",
" create function match_documents (\n",
" query_embedding vector (1536),\n",
" filter jsonb default '{}'\n",
" ) returns table (\n",
" id uuid,\n",
" content text,\n",
" metadata jsonb,\n",
" similarity float\n",
" ) language plpgsql as $$\n",
" #variable_conflict use_column\n",
" begin\n",
" return query\n",
" select\n",
" id,\n",
" content,\n",
" metadata,\n",
" 1 - (documents.embedding <=> query_embedding) as similarity\n",
" from documents\n",
" where metadata @> filter\n",
" order by documents.embedding <=> query_embedding;\n",
" end;\n",
" $$;\n",
" ```"
]
},
{
"cell_type": "markdown",
"id": "68e75fb9",
"metadata": {},
"source": [
"## Creating a Supabase vector store\n",
"Next we'll want to create a Supabase vector store and seed it with some data. We've created a small demo set of documents that contain summaries of movies.\n",
"\n",
"Be sure to install the latest version of `langchain`:"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "78546fd7",
"metadata": {},
"outputs": [],
"source": [
"%pip install langchain"
]
},
{
"cell_type": "markdown",
"id": "e06df198",
"metadata": {},
"source": [
"The self-query retriever requires you to have `lark` installed:"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "63a8af5b",
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"%pip install lark"
]
},
{
"cell_type": "markdown",
"id": "114f768f",
"metadata": {},
"source": [
"We also need the `openai` and `supabase` packages:"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "434ae558",
"metadata": {},
"outputs": [],
"source": [
"%pip install openai"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "22431060-52c4-48a7-a97b-9f542b8b0928",
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"%pip install supabase==1.0.0"
]
},
{
"cell_type": "markdown",
"id": "83811610-7df3-4ede-b268-68a6a83ba9e2",
"metadata": {},
"source": [
"Since we are using `SupabaseVectorStore` and `OpenAIEmbeddings`, we have to load their API keys.\n",
"\n",
"- To find your `SUPABASE_URL` and `SUPABASE_SERVICE_KEY`, head to your Supabase project's [API settings](https://supabase.com/dashboard/project/_/settings/api).\n",
" - `SUPABASE_URL` corresponds to the Project URL\n",
" - `SUPABASE_SERVICE_KEY` corresponds to the `service_role` API key\n",
"\n",
"- To get your `OPENAI_API_KEY`, navigate to [API keys](https://platform.openai.com/account/api-keys) on your OpenAI account and create a new secret key."
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "dd01b61b-7d32-4a55-85d6-b2d2d4f18840",
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"import os\n",
"import getpass\n",
"\n",
"os.environ[\"SUPABASE_URL\"] = getpass.getpass(\"Supabase URL:\")\n",
"os.environ[\"SUPABASE_SERVICE_KEY\"] = getpass.getpass(\"Supabase Service Key:\")\n",
"os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")"
]
},
{
"cell_type": "markdown",
"id": "3aaf5075",
"metadata": {},
"source": [
"_Optional:_ If you're storing your Supabase and OpenAI API keys in a `.env` file, you can load them with [`dotenv`](https://github.com/theskumar/python-dotenv)."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "e0089221",
"metadata": {},
"outputs": [],
"source": [
"%pip install python-dotenv"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "3d56c5ef",
"metadata": {},
"outputs": [],
"source": [
"from dotenv import load_dotenv\n",
"\n",
"load_dotenv()"
]
},
{
"cell_type": "markdown",
"id": "f6dd9aef",
"metadata": {},
"source": [
"First we'll create a Supabase client and instantiate a OpenAI embeddings class."
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "cb4a5787",
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"import os\n",
"from supabase.client import Client, create_client\n",
"from langchain.schema import Document\n",
"from langchain.embeddings.openai import OpenAIEmbeddings\n",
"from langchain.vectorstores import SupabaseVectorStore\n",
"\n",
"supabase_url = os.environ.get(\"SUPABASE_URL\")\n",
"supabase_key = os.environ.get(\"SUPABASE_SERVICE_KEY\")\n",
"supabase: Client = create_client(supabase_url, supabase_key)\n",
"\n",
"embeddings = OpenAIEmbeddings()"
]
},
{
"cell_type": "markdown",
"id": "0fca9b0b",
"metadata": {},
"source": [
"Next let's create our documents."
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "bcbe04d9",
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"docs = [\n",
" Document(\n",
" page_content=\"A bunch of scientists bring back dinosaurs and mayhem breaks loose\",\n",
" metadata={\"year\": 1993, \"rating\": 7.7, \"genre\": \"science fiction\"},\n",
" ),\n",
" Document(\n",
" page_content=\"Leo DiCaprio gets lost in a dream within a dream within a dream within a ...\",\n",
" metadata={\"year\": 2010, \"director\": \"Christopher Nolan\", \"rating\": 8.2},\n",
" ),\n",
" Document(\n",
" page_content=\"A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea\",\n",
" metadata={\"year\": 2006, \"director\": \"Satoshi Kon\", \"rating\": 8.6},\n",
" ),\n",
" Document(\n",
" page_content=\"A bunch of normal-sized women are supremely wholesome and some men pine after them\",\n",
" metadata={\"year\": 2019, \"director\": \"Greta Gerwig\", \"rating\": 8.3},\n",
" ),\n",
" Document(\n",
" page_content=\"Toys come alive and have a blast doing so\",\n",
" metadata={\"year\": 1995, \"genre\": \"animated\"},\n",
" ),\n",
" Document(\n",
" page_content=\"Three men walk into the Zone, three men walk out of the Zone\",\n",
" metadata={\n",
" \"year\": 1979,\n",
" \"rating\": 9.9,\n",
" \"director\": \"Andrei Tarkovsky\",\n",
" \"genre\": \"science fiction\",\n",
" \"rating\": 9.9,\n",
" },\n",
" ),\n",
"]\n",
"\n",
"vectorstore = SupabaseVectorStore.from_documents(docs, embeddings, client=supabase, table_name=\"documents\", query_name=\"match_documents\")"
]
},
{
"cell_type": "markdown",
"id": "5ecaab6d",
"metadata": {},
"source": [
"## Creating our self-querying retriever\n",
"Now we can instantiate our retriever. To do this we'll need to provide some information upfront about the metadata fields that our documents support and a short description of the document contents."
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "86e34dbf",
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"from langchain.llms import OpenAI\n",
"from langchain.retrievers.self_query.base import SelfQueryRetriever\n",
"from langchain.chains.query_constructor.base import AttributeInfo\n",
"\n",
"metadata_field_info = [\n",
" AttributeInfo(\n",
" name=\"genre\",\n",
" description=\"The genre of the movie\",\n",
" type=\"string or list[string]\",\n",
" ),\n",
" AttributeInfo(\n",
" name=\"year\",\n",
" description=\"The year the movie was released\",\n",
" type=\"integer\",\n",
" ),\n",
" AttributeInfo(\n",
" name=\"director\",\n",
" description=\"The name of the movie director\",\n",
" type=\"string\",\n",
" ),\n",
" AttributeInfo(\n",
" name=\"rating\", description=\"A 1-10 rating for the movie\", type=\"float\"\n",
" ),\n",
"]\n",
"document_content_description = \"Brief summary of a movie\"\n",
"llm = OpenAI(temperature=0)\n",
"retriever = SelfQueryRetriever.from_llm(\n",
" llm, vectorstore, document_content_description, metadata_field_info, verbose=True\n",
")"
]
},
{
"cell_type": "markdown",
"id": "ea9df8d4",
"metadata": {},
"source": [
"## Testing it out\n",
"And now we can try actually using our retriever!"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "38a126e9",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"query='dinosaur' filter=None limit=None\n"
]
},
{
"data": {
"text/plain": [
"[Document(page_content='A bunch of scientists bring back dinosaurs and mayhem breaks loose', metadata={'year': 1993, 'genre': 'science fiction', 'rating': 7.7}),\n",
" Document(page_content='Toys come alive and have a blast doing so', metadata={'year': 1995, 'genre': 'animated'}),\n",
" Document(page_content='Three men walk into the Zone, three men walk out of the Zone', metadata={'year': 1979, 'genre': 'science fiction', 'rating': 9.9, 'director': 'Andrei Tarkovsky'}),\n",
" Document(page_content='A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea', metadata={'year': 2006, 'rating': 8.6, 'director': 'Satoshi Kon'})]"
]
},
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# This example only specifies a relevant query\n",
"retriever.get_relevant_documents(\"What are some movies about dinosaurs\")"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "fc3f1e6e",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"query=' ' filter=Comparison(comparator=<Comparator.GT: 'gt'>, attribute='rating', value=8.5) limit=None\n"
]
},
{
"data": {
"text/plain": [
"[Document(page_content='Three men walk into the Zone, three men walk out of the Zone', metadata={'year': 1979, 'genre': 'science fiction', 'rating': 9.9, 'director': 'Andrei Tarkovsky'}),\n",
" Document(page_content='A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea', metadata={'year': 2006, 'rating': 8.6, 'director': 'Satoshi Kon'})]"
]
},
"execution_count": 7,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# This example only specifies a filter\n",
"retriever.get_relevant_documents(\"I want to watch a movie rated higher than 8.5\")"
]
},
{
"cell_type": "code",
"execution_count": 9,
"id": "b19d4da0",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"query='women' filter=Comparison(comparator=<Comparator.EQ: 'eq'>, attribute='director', value='Greta Gerwig') limit=None\n"
]
},
{
"data": {
"text/plain": [
"[Document(page_content='A bunch of normal-sized women are supremely wholesome and some men pine after them', metadata={'year': 2019, 'rating': 8.3, 'director': 'Greta Gerwig'})]"
]
},
"execution_count": 9,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# This example specifies a query and a filter\n",
"retriever.get_relevant_documents(\"Has Greta Gerwig directed any movies about women?\")"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "f900e40e",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"query=' ' filter=Operation(operator=<Operator.AND: 'and'>, arguments=[Comparison(comparator=<Comparator.GTE: 'gte'>, attribute='rating', value=8.5), Comparison(comparator=<Comparator.EQ: 'eq'>, attribute='genre', value='science fiction')]) limit=None\n"
]
},
{
"data": {
"text/plain": [
"[Document(page_content='Three men walk into the Zone, three men walk out of the Zone', metadata={'year': 1979, 'genre': 'science fiction', 'rating': 9.9, 'director': 'Andrei Tarkovsky'})]"
]
},
"execution_count": 8,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# This example specifies a composite filter\n",
"retriever.get_relevant_documents(\n",
" \"What's a highly rated (above 8.5) science fiction film?\"\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 9,
"id": "12a51522",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"query='toys' filter=Operation(operator=<Operator.AND: 'and'>, arguments=[Comparison(comparator=<Comparator.GT: 'gt'>, attribute='year', value=1990), Comparison(comparator=<Comparator.LTE: 'lte'>, attribute='year', value=2005), Comparison(comparator=<Comparator.LIKE: 'like'>, attribute='genre', value='animated')]) limit=None\n"
]
},
{
"data": {
"text/plain": [
"[Document(page_content='Toys come alive and have a blast doing so', metadata={'year': 1995, 'genre': 'animated'})]"
]
},
"execution_count": 9,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# This example specifies a query and composite filter\n",
"retriever.get_relevant_documents(\n",
" \"What's a movie after 1990 but before (or on) 2005 that's all about toys, and preferably is animated\"\n",
")"
]
},
{
"cell_type": "markdown",
"id": "39bd1de1-b9fe-4a98-89da-58d8a7a6ae51",
"metadata": {},
"source": [
"## Filter k\n",
"\n",
"We can also use the self query retriever to specify `k`: the number of documents to fetch.\n",
"\n",
"We can do this by passing `enable_limit=True` to the constructor."
]
},
{
"cell_type": "code",
"execution_count": 10,
"id": "bff36b88-b506-4877-9c63-e5a1a8d78e64",
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"retriever = SelfQueryRetriever.from_llm(\n",
" llm,\n",
" vectorstore,\n",
" document_content_description,\n",
" metadata_field_info,\n",
" enable_limit=True,\n",
" verbose=True,\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 11,
"id": "2758d229-4f97-499c-819f-888acaf8ee10",
"metadata": {
"tags": []
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"query='dinosaur' filter=None limit=2\n"
]
},
{
"data": {
"text/plain": [
"[Document(page_content='A bunch of scientists bring back dinosaurs and mayhem breaks loose', metadata={'year': 1993, 'genre': 'science fiction', 'rating': 7.7}),\n",
" Document(page_content='Toys come alive and have a blast doing so', metadata={'year': 1995, 'genre': 'animated'})]"
]
},
"execution_count": 11,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# This example only specifies a relevant query\n",
"retriever.get_relevant_documents(\"what are two movies about dinosaurs\")"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.12"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@ -0,0 +1,440 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "13afcae7",
"metadata": {},
"source": [
"# Vectara self-querying \n",
"\n",
">[Vectara](https://docs.vectara.com/docs/) is a GenAI platform for developers. It provides a simple API to build Grounded Generation (aka Retrieval-augmented-generation) applications.\n",
"\n",
"In the notebook we'll demo the `SelfQueryRetriever` wrapped around a Vectara vector store. "
]
},
{
"cell_type": "markdown",
"id": "68e75fb9",
"metadata": {},
"source": [
"# Setup\n",
"\n",
"You will need a Vectara account to use Vectara with LangChain. To get started, use the following steps (see our [quickstart](https://docs.vectara.com/docs/quickstart) guide):\n",
"1. [Sign up](https://console.vectara.com/signup) for a Vectara account if you don't already have one. Once you have completed your sign up you will have a Vectara customer ID. You can find your customer ID by clicking on your name, on the top-right of the Vectara console window.\n",
"2. Within your account you can create one or more corpora. Each corpus represents an area that stores text data upon ingest from input documents. To create a corpus, use the **\"Create Corpus\"** button. You then provide a name to your corpus as well as a description. Optionally you can define filtering attributes and apply some advanced options. If you click on your created corpus, you can see its name and corpus ID right on the top.\n",
"3. Next you'll need to create API keys to access the corpus. Click on the **\"Authorization\"** tab in the corpus view and then the **\"Create API Key\"** button. Give your key a name, and choose whether you want query only or query+index for your key. Click \"Create\" and you now have an active API key. Keep this key confidential. \n",
"\n",
"To use LangChain with Vectara, you'll need to have these three values: customer ID, corpus ID and api_key.\n",
"You can provide those to LangChain in two ways:\n",
"\n",
"1. Include in your environment these three variables: `VECTARA_CUSTOMER_ID`, `VECTARA_CORPUS_ID` and `VECTARA_API_KEY`.\n",
"\n",
"> For example, you can set these variables using os.environ and getpass as follows:\n",
"\n",
"```python\n",
"import os\n",
"import getpass\n",
"\n",
"os.environ[\"VECTARA_CUSTOMER_ID\"] = getpass.getpass(\"Vectara Customer ID:\")\n",
"os.environ[\"VECTARA_CORPUS_ID\"] = getpass.getpass(\"Vectara Corpus ID:\")\n",
"os.environ[\"VECTARA_API_KEY\"] = getpass.getpass(\"Vectara API Key:\")\n",
"```\n",
"\n",
"1. Provide them as arguments when creating the Vectara vectorstore object:\n",
"\n",
"```python\n",
"vectorstore = Vectara(\n",
" vectara_customer_id=vectara_customer_id,\n",
" vectara_corpus_id=vectara_corpus_id,\n",
" vectara_api_key=vectara_api_key\n",
" )\n",
"```\n",
"\n",
"**Note:** The self-query retriever requires you to have `lark` installed (`pip install lark`). "
]
},
{
"cell_type": "markdown",
"id": "742ac16d",
"metadata": {},
"source": [
"## Connecting to Vectara from LangChain\n",
"\n",
"In this example, we assume that you've created an account and a corpus, and added your VECTARA_CUSTOMER_ID, VECTARA_CORPUS_ID and VECTARA_API_KEY (created with permissions for both indexing and query) as environment variables.\n",
"\n",
"The corpus has 4 fields defined as metadata for filtering: year, director, rating, and genre\n"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "cb4a5787",
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"from langchain.embeddings import FakeEmbeddings\n",
"from langchain.schema import Document\n",
"from langchain.text_splitter import CharacterTextSplitter\n",
"from langchain.vectorstores import Vectara\n",
"from langchain.document_loaders import TextLoader\n",
"\n",
"from langchain.llms import OpenAI\n",
"from langchain.chains import ConversationalRetrievalChain\n",
"from langchain.retrievers.self_query.base import SelfQueryRetriever\n",
"from langchain.chains.query_constructor.base import AttributeInfo\n"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "bcbe04d9",
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"docs = [\n",
" Document(\n",
" page_content=\"A bunch of scientists bring back dinosaurs and mayhem breaks loose\",\n",
" metadata={\"year\": 1993, \"rating\": 7.7, \"genre\": \"science fiction\"},\n",
" ),\n",
" Document(\n",
" page_content=\"Leo DiCaprio gets lost in a dream within a dream within a dream within a ...\",\n",
" metadata={\"year\": 2010, \"director\": \"Christopher Nolan\", \"rating\": 8.2},\n",
" ),\n",
" Document(\n",
" page_content=\"A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea\",\n",
" metadata={\"year\": 2006, \"director\": \"Satoshi Kon\", \"rating\": 8.6},\n",
" ),\n",
" Document(\n",
" page_content=\"A bunch of normal-sized women are supremely wholesome and some men pine after them\",\n",
" metadata={\"year\": 2019, \"director\": \"Greta Gerwig\", \"rating\": 8.3},\n",
" ),\n",
" Document(\n",
" page_content=\"Toys come alive and have a blast doing so\",\n",
" metadata={\"year\": 1995, \"genre\": \"animated\"},\n",
" ),\n",
" Document(\n",
" page_content=\"Three men walk into the Zone, three men walk out of the Zone\",\n",
" metadata={\n",
" \"year\": 1979,\n",
" \"rating\": 9.9,\n",
" \"director\": \"Andrei Tarkovsky\",\n",
" \"genre\": \"science fiction\",\n",
" },\n",
" ),\n",
"]\n",
"\n",
"vectara = Vectara()\n",
"for doc in docs:\n",
" vectara.add_texts([doc.page_content], embedding=FakeEmbeddings(size=768), doc_metadata=doc.metadata)"
]
},
{
"cell_type": "markdown",
"id": "5ecaab6d",
"metadata": {},
"source": [
"## Creating our self-querying retriever\n",
"Now we can instantiate our retriever. To do this we'll need to provide some information upfront about the metadata fields that our documents support and a short description of the document contents."
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "86e34dbf",
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"from langchain.llms import OpenAI\n",
"from langchain.retrievers.self_query.base import SelfQueryRetriever\n",
"from langchain.chains.query_constructor.base import AttributeInfo\n",
"\n",
"metadata_field_info = [\n",
" AttributeInfo(\n",
" name=\"genre\",\n",
" description=\"The genre of the movie\",\n",
" type=\"string or list[string]\",\n",
" ),\n",
" AttributeInfo(\n",
" name=\"year\",\n",
" description=\"The year the movie was released\",\n",
" type=\"integer\",\n",
" ),\n",
" AttributeInfo(\n",
" name=\"director\",\n",
" description=\"The name of the movie director\",\n",
" type=\"string\",\n",
" ),\n",
" AttributeInfo(\n",
" name=\"rating\", description=\"A 1-10 rating for the movie\", type=\"float\"\n",
" ),\n",
"]\n",
"document_content_description = \"Brief summary of a movie\"\n",
"llm = OpenAI(temperature=0)\n",
"retriever = SelfQueryRetriever.from_llm(\n",
" llm, vectara, document_content_description, metadata_field_info, verbose=True\n",
")"
]
},
{
"cell_type": "markdown",
"id": "ea9df8d4",
"metadata": {},
"source": [
"## Testing it out\n",
"And now we can try actually using our retriever!"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "38a126e9",
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"/Users/ofer/dev/langchain/libs/langchain/langchain/chains/llm.py:278: UserWarning: The predict_and_parse method is deprecated, instead pass an output parser directly to LLMChain.\n",
" warnings.warn(\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"query='dinosaur' filter=None limit=None\n"
]
},
{
"data": {
"text/plain": [
"[Document(page_content='A bunch of scientists bring back dinosaurs and mayhem breaks loose', metadata={'lang': 'eng', 'offset': '0', 'len': '66', 'year': '1993', 'rating': '7.7', 'genre': 'science fiction', 'source': 'langchain'}),\n",
" Document(page_content='Toys come alive and have a blast doing so', metadata={'lang': 'eng', 'offset': '0', 'len': '41', 'year': '1995', 'genre': 'animated', 'source': 'langchain'}),\n",
" Document(page_content='Three men walk into the Zone, three men walk out of the Zone', metadata={'lang': 'eng', 'offset': '0', 'len': '60', 'year': '1979', 'rating': '9.9', 'director': 'Andrei Tarkovsky', 'genre': 'science fiction', 'source': 'langchain'}),\n",
" Document(page_content='Leo DiCaprio gets lost in a dream within a dream within a dream within a ...', metadata={'lang': 'eng', 'offset': '0', 'len': '76', 'year': '2010', 'director': 'Christopher Nolan', 'rating': '8.2', 'source': 'langchain'}),\n",
" Document(page_content='A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea', metadata={'lang': 'eng', 'offset': '0', 'len': '116', 'year': '2006', 'director': 'Satoshi Kon', 'rating': '8.6', 'source': 'langchain'})]"
]
},
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# This example only specifies a relevant query\n",
"retriever.get_relevant_documents(\"What are some movies about dinosaurs\")"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "fc3f1e6e",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"query=' ' filter=Comparison(comparator=<Comparator.GT: 'gt'>, attribute='rating', value=8.5) limit=None\n"
]
},
{
"data": {
"text/plain": [
"[Document(page_content='Three men walk into the Zone, three men walk out of the Zone', metadata={'lang': 'eng', 'offset': '0', 'len': '60', 'year': '1979', 'rating': '9.9', 'director': 'Andrei Tarkovsky', 'genre': 'science fiction', 'source': 'langchain'}),\n",
" Document(page_content='A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea', metadata={'lang': 'eng', 'offset': '0', 'len': '116', 'year': '2006', 'director': 'Satoshi Kon', 'rating': '8.6', 'source': 'langchain'})]"
]
},
"execution_count": 6,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# This example only specifies a filter\n",
"retriever.get_relevant_documents(\"I want to watch a movie rated higher than 8.5\")"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "b19d4da0",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"query='women' filter=Comparison(comparator=<Comparator.EQ: 'eq'>, attribute='director', value='Greta Gerwig') limit=None\n"
]
},
{
"data": {
"text/plain": [
"[Document(page_content='A bunch of normal-sized women are supremely wholesome and some men pine after them', metadata={'lang': 'eng', 'offset': '0', 'len': '82', 'year': '2019', 'director': 'Greta Gerwig', 'rating': '8.3', 'source': 'langchain'})]"
]
},
"execution_count": 7,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# This example specifies a query and a filter\n",
"retriever.get_relevant_documents(\"Has Greta Gerwig directed any movies about women\")"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "f900e40e",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"query=' ' filter=Operation(operator=<Operator.AND: 'and'>, arguments=[Comparison(comparator=<Comparator.GTE: 'gte'>, attribute='rating', value=8.5), Comparison(comparator=<Comparator.EQ: 'eq'>, attribute='genre', value='science fiction')]) limit=None\n"
]
},
{
"data": {
"text/plain": [
"[Document(page_content='Three men walk into the Zone, three men walk out of the Zone', metadata={'lang': 'eng', 'offset': '0', 'len': '60', 'year': '1979', 'rating': '9.9', 'director': 'Andrei Tarkovsky', 'genre': 'science fiction', 'source': 'langchain'})]"
]
},
"execution_count": 8,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# This example specifies a composite filter\n",
"retriever.get_relevant_documents(\n",
" \"What's a highly rated (above 8.5) science fiction film?\"\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 9,
"id": "12a51522",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"query='toys' filter=Operation(operator=<Operator.AND: 'and'>, arguments=[Comparison(comparator=<Comparator.GT: 'gt'>, attribute='year', value=1990), Comparison(comparator=<Comparator.LT: 'lt'>, attribute='year', value=2005), Comparison(comparator=<Comparator.EQ: 'eq'>, attribute='genre', value='animated')]) limit=None\n"
]
},
{
"data": {
"text/plain": [
"[Document(page_content='Toys come alive and have a blast doing so', metadata={'lang': 'eng', 'offset': '0', 'len': '41', 'year': '1995', 'genre': 'animated', 'source': 'langchain'})]"
]
},
"execution_count": 9,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# This example specifies a query and composite filter\n",
"retriever.get_relevant_documents(\n",
" \"What's a movie after 1990 but before 2005 that's all about toys, and preferably is animated\"\n",
")"
]
},
{
"cell_type": "markdown",
"id": "39bd1de1-b9fe-4a98-89da-58d8a7a6ae51",
"metadata": {},
"source": [
"## Filter k\n",
"\n",
"We can also use the self query retriever to specify `k`: the number of documents to fetch.\n",
"\n",
"We can do this by passing `enable_limit=True` to the constructor."
]
},
{
"cell_type": "code",
"execution_count": 10,
"id": "bff36b88-b506-4877-9c63-e5a1a8d78e64",
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"retriever = SelfQueryRetriever.from_llm(\n",
" llm,\n",
" vectara,\n",
" document_content_description,\n",
" metadata_field_info,\n",
" enable_limit=True,\n",
" verbose=True,\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 11,
"id": "2758d229-4f97-499c-819f-888acaf8ee10",
"metadata": {
"tags": []
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"query='dinosaur' filter=None limit=2\n"
]
},
{
"data": {
"text/plain": [
"[Document(page_content='A bunch of scientists bring back dinosaurs and mayhem breaks loose', metadata={'lang': 'eng', 'offset': '0', 'len': '66', 'year': '1993', 'rating': '7.7', 'genre': 'science fiction', 'source': 'langchain'}),\n",
" Document(page_content='Toys come alive and have a blast doing so', metadata={'lang': 'eng', 'offset': '0', 'len': '41', 'year': '1995', 'genre': 'animated', 'source': 'langchain'})]"
]
},
"execution_count": 11,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# This example only specifies a relevant query\n",
"retriever.get_relevant_documents(\"what are two movies about dinosaurs\")"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.9"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@ -1,12 +1,21 @@
{ {
"cells": [ "cells": [
{
"cell_type": "raw",
"id": "ea5c61b2-8b52-4270-bdb0-c4df88608f15",
"metadata": {},
"source": [
"---\n",
"sidebar_position: 1\n",
"title: Interacting with APIs\n",
"---"
]
},
{ {
"cell_type": "markdown", "cell_type": "markdown",
"id": "a15e6a18", "id": "a15e6a18",
"metadata": {}, "metadata": {},
"source": [ "source": [
"# Interacting with APIs\n",
"\n",
"[![Open In Collab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/langchain-ai/langchain/blob/master/docs/extras/use_cases/apis.ipynb)\n", "[![Open In Collab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/langchain-ai/langchain/blob/master/docs/extras/use_cases/apis.ipynb)\n",
"\n", "\n",
"## Use case \n", "## Use case \n",
@ -69,9 +78,7 @@
"cell_type": "code", "cell_type": "code",
"execution_count": 2, "execution_count": 2,
"id": "30b780e3", "id": "30b780e3",
"metadata": { "metadata": {},
"scrolled": false
},
"outputs": [ "outputs": [
{ {
"name": "stderr", "name": "stderr",
@ -415,7 +422,7 @@
"name": "python", "name": "python",
"nbconvert_exporter": "python", "nbconvert_exporter": "python",
"pygments_lexer": "ipython3", "pygments_lexer": "ipython3",
"version": "3.9.16" "version": "3.9.1"
} }
}, },
"nbformat": 4, "nbformat": 4,

View File

@ -1,12 +1,21 @@
{ {
"cells": [ "cells": [
{
"cell_type": "raw",
"id": "22fd28c9-9b48-476c-bca8-20efef7fdb14",
"metadata": {},
"source": [
"---\n",
"sidebar_position: 1\n",
"title: Chatbots\n",
"---"
]
},
{ {
"cell_type": "markdown", "cell_type": "markdown",
"id": "ee7f95e4", "id": "ee7f95e4",
"metadata": {}, "metadata": {},
"source": [ "source": [
"# Chatbots\n",
"\n",
"[![Open In Collab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/langchain-ai/langchain/blob/master/docs/extras/use_cases/chatbots.ipynb)\n", "[![Open In Collab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/langchain-ai/langchain/blob/master/docs/extras/use_cases/chatbots.ipynb)\n",
"\n", "\n",
"## Use case\n", "## Use case\n",

View File

@ -1,11 +1,19 @@
{ {
"cells": [ "cells": [
{
"cell_type": "raw",
"metadata": {},
"source": [
"---\n",
"sidebar_position: 1\n",
"title: Code understanding\n",
"---"
]
},
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
"# Code Understanding\n",
"\n",
"[![Open In Collab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/langchain-ai/langchain/blob/master/docs/extras/use_cases/code_understanding.ipynb)\n", "[![Open In Collab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/langchain-ai/langchain/blob/master/docs/extras/use_cases/code_understanding.ipynb)\n",
"\n", "\n",
"## Use case\n", "## Use case\n",
@ -1047,7 +1055,7 @@
"name": "python", "name": "python",
"nbconvert_exporter": "python", "nbconvert_exporter": "python",
"pygments_lexer": "ipython3", "pygments_lexer": "ipython3",
"version": "3.9.16" "version": "3.9.1"
} }
}, },
"nbformat": 4, "nbformat": 4,

View File

@ -1,12 +1,21 @@
{ {
"cells": [ "cells": [
{
"cell_type": "raw",
"id": "df29b30a-fd27-4e08-8269-870df5631f9e",
"metadata": {},
"source": [
"---\n",
"sidebar_position: 1\n",
"title: Extraction\n",
"---"
]
},
{ {
"cell_type": "markdown", "cell_type": "markdown",
"id": "b84edb4e", "id": "b84edb4e",
"metadata": {}, "metadata": {},
"source": [ "source": [
"# Extraction\n",
"\n",
"[![Open In Collab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/langchain-ai/langchain/blob/master/docs/extras/use_cases/extraction.ipynb)\n", "[![Open In Collab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/langchain-ai/langchain/blob/master/docs/extras/use_cases/extraction.ipynb)\n",
"\n", "\n",
"## Use case\n", "## Use case\n",
@ -589,7 +598,7 @@
"name": "python", "name": "python",
"nbconvert_exporter": "python", "nbconvert_exporter": "python",
"pygments_lexer": "ipython3", "pygments_lexer": "ipython3",
"version": "3.9.16" "version": "3.9.1"
} }
}, },
"nbformat": 4, "nbformat": 4,

View File

@ -1 +1,2 @@
label: 'More' label: 'More'
position: 2

View File

@ -584,7 +584,7 @@
"\n", "\n",
"Collectivly, this tells us: carefully inspect Agent traces and tool outputs. \n", "Collectivly, this tells us: carefully inspect Agent traces and tool outputs. \n",
"\n", "\n",
"As we saw with the [SQL use case](/docs/use_cases/sql), `ReAct agents` can be work very well for specific problems. \n", "As we saw with the [SQL use case](/docs/use_cases/qa_structured/sql), `ReAct agents` can be work very well for specific problems. \n",
"\n", "\n",
"But, as shown here, the result is degraded relative to what we see with the OpenAI agent." "But, as shown here, the result is degraded relative to what we see with the OpenAI agent."
] ]

View File

@ -0,0 +1,307 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "7f0b0c06-ee70-468c-8bf5-b023f9e5e0a2",
"metadata": {},
"source": [
"# Diffbot Graph Transformer\n",
"\n",
"[![Open In Collab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/langchain-ai/langchain/blob/master/docs/extras/use_cases/more/graph/diffbot_graphtransformer.ipynb)\n",
"\n",
"## Use case\n",
"\n",
"Text data often contain rich relationships and insights that can be useful for various analytics, recommendation engines, or knowledge management applications.\n",
"\n",
"Diffbot's NLP API allows for the extraction of entities, relationships, and semantic meaning from unstructured text data.\n",
"\n",
"By coupling Diffbot's NLP API with Neo4j, a graph database, you can create powerful, dynamic graph structures based on the information extracted from text. These graph structures are fully queryable and can be integrated into various applications.\n",
"\n",
"This combination allows for use cases such as:\n",
"\n",
"* Building knowledge graphs from textual documents, websites, or social media feeds.\n",
"* Generating recommendations based on semantic relationships in the data.\n",
"* Creating advanced search features that understand the relationships between entities.\n",
"* Building analytics dashboards that allow users to explore the hidden relationships in data.\n",
"\n",
"## Overview\n",
"\n",
"LangChain provides tools to interact with Graph Databases:\n",
"\n",
"1. `Construct knowledge graphs from text` using graph transformer and store integrations \n",
"2. `Query a graph database` using chains for query creation and execution\n",
"3. `Interact with a graph database` using agents for robust and flexible querying \n",
"\n",
"## Quickstart\n",
"\n",
"First, get required packages and set environment variables:"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "975648da-b24f-4164-a671-6772179e12df",
"metadata": {},
"outputs": [],
"source": [
"!pip install langchain langchain-experimental openai neo4j wikipedia"
]
},
{
"cell_type": "markdown",
"id": "77718977-629e-46c2-b091-f9191b9ec569",
"metadata": {},
"source": [
"## Diffbot NLP Service\n",
"\n",
"Diffbot's NLP service is a tool for extracting entities, relationships, and semantic context from unstructured text data.\n",
"This extracted information can be used to construct a knowledge graph.\n",
"To use their service, you'll need to obtain an API key from [Diffbot](https://www.diffbot.com/products/natural-language/)."
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "2cbf97d0-3682-439b-8750-b695ff726789",
"metadata": {},
"outputs": [],
"source": [
"from langchain_experimental.graph_transformers.diffbot import DiffbotGraphTransformer\n",
"\n",
"diffbot_api_key = \"DIFFBOT_API_KEY\"\n",
"diffbot_nlp = DiffbotGraphTransformer(diffbot_api_key=diffbot_api_key)"
]
},
{
"cell_type": "markdown",
"id": "5e3b894a-e3ee-46c7-8116-f8377f8f0159",
"metadata": {},
"source": [
"This code fetches Wikipedia articles about \"Warren Buffett\" and then uses `DiffbotGraphTransformer` to extract entities and relationships.\n",
"The `DiffbotGraphTransformer` outputs a structured data `GraphDocument`, which can be used to populate a graph database.\n",
"Note that text chunking is avoided due to Diffbot's [character limit per API request](https://docs.diffbot.com/reference/introduction-to-natural-language-api)."
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "53f8df86-47a1-44a1-9a0f-6725b90703bc",
"metadata": {},
"outputs": [],
"source": [
"from langchain.document_loaders import WikipediaLoader\n",
"\n",
"query = \"Warren Buffett\"\n",
"raw_documents = WikipediaLoader(query=query).load()\n",
"graph_documents = diffbot_nlp.convert_to_graph_documents(raw_documents)"
]
},
{
"cell_type": "markdown",
"id": "31bb851a-aab4-4b97-a6b7-fce397d32b47",
"metadata": {},
"source": [
"## Loading the data into a knowledge graph\n",
"\n",
"You will need to have a running Neo4j instance. One option is to create a [free Neo4j database instance in their Aura cloud service](https://neo4j.com/cloud/platform/aura-graph-database/). You can also run the database locally using the [Neo4j Desktop application](https://neo4j.com/download/), or running a docker container. You can run a local docker container by running the executing the following script:\n",
"```\n",
"docker run \\\n",
" --name neo4j \\\n",
" -p 7474:7474 -p 7687:7687 \\\n",
" -d \\\n",
" -e NEO4J_AUTH=neo4j/pleaseletmein \\\n",
" -e NEO4J_PLUGINS=\\[\\\"apoc\\\"\\] \\\n",
" neo4j:latest\n",
"``` \n",
"If you are using the docker container, you need to wait a couple of second for the database to start."
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "0b2b6641-5a5d-467c-b148-e6aad5e4baa7",
"metadata": {},
"outputs": [],
"source": [
"from langchain.graphs import Neo4jGraph\n",
"\n",
"url=\"bolt://localhost:7687\"\n",
"username=\"neo4j\"\n",
"password=\"pleaseletmein\"\n",
"\n",
"graph = Neo4jGraph(\n",
" url=url,\n",
" username=username, \n",
" password=password\n",
")"
]
},
{
"cell_type": "markdown",
"id": "0b15e840-fe6f-45db-9193-1b4e2df5c12c",
"metadata": {},
"source": [
"The `GraphDocuments` can be loaded into a knowledge graph using the `add_graph_documents` method."
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "1a67c4a8-955c-42a2-9c5d-de3ac0e640ec",
"metadata": {},
"outputs": [],
"source": [
"graph.add_graph_documents(graph_documents)"
]
},
{
"cell_type": "markdown",
"id": "ed411e05-2b03-460d-997e-938482774f40",
"metadata": {},
"source": [
"## Refresh graph schema information\n",
"If the schema of database changes, you can refresh the schema information needed to generate Cypher statements"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "904c9ee3-787c-403f-857d-459ce5ad5a1b",
"metadata": {},
"outputs": [],
"source": [
"graph.refresh_schema()"
]
},
{
"cell_type": "markdown",
"id": "f19d1387-5899-4258-8c94-8ef5fa7db464",
"metadata": {},
"source": [
"## Querying the graph\n",
"We can now use the graph cypher QA chain to ask question of the graph. It is advisable to use **gpt-4** to construct Cypher queries to get the best experience."
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "9393b732-67c8-45c1-9ec2-089f49c62448",
"metadata": {},
"outputs": [],
"source": [
"from langchain.chains import GraphCypherQAChain\n",
"from langchain.chat_models import ChatOpenAI\n",
"\n",
"chain = GraphCypherQAChain.from_llm(\n",
" cypher_llm=ChatOpenAI(temperature=0, model_name=\"gpt-4\"),\n",
" qa_llm=ChatOpenAI(temperature=0, model_name=\"gpt-3.5-turbo\"),\n",
" graph=graph, verbose=True,\n",
" \n",
")\n",
" "
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "1a9b3652-b436-404d-aa25-5fb576f23dc0",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"\n",
"\u001b[1m> Entering new GraphCypherQAChain chain...\u001b[0m\n",
"Generated Cypher:\n",
"\u001b[32;1m\u001b[1;3mMATCH (p:Person {name: \"Warren Buffett\"})-[:EDUCATED_AT]->(o:Organization)\n",
"RETURN o.name\u001b[0m\n",
"Full Context:\n",
"\u001b[32;1m\u001b[1;3m[{'o.name': 'New York Institute of Finance'}, {'o.name': 'Alice Deal Junior High School'}, {'o.name': 'Woodrow Wilson High School'}, {'o.name': 'University of Nebraska'}]\u001b[0m\n",
"\n",
"\u001b[1m> Finished chain.\u001b[0m\n"
]
},
{
"data": {
"text/plain": [
"'Warren Buffett attended the University of Nebraska.'"
]
},
"execution_count": 8,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"chain.run(\"Which university did Warren Buffett attend?\")"
]
},
{
"cell_type": "code",
"execution_count": 9,
"id": "adc0ba0f-a62c-4875-89ce-da717f3ab148",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"\n",
"\u001b[1m> Entering new GraphCypherQAChain chain...\u001b[0m\n",
"Generated Cypher:\n",
"\u001b[32;1m\u001b[1;3mMATCH (p:Person)-[r:EMPLOYEE_OR_MEMBER_OF]->(o:Organization) WHERE o.name = 'Berkshire Hathaway' RETURN p.name\u001b[0m\n",
"Full Context:\n",
"\u001b[32;1m\u001b[1;3m[{'p.name': 'Charlie Munger'}, {'p.name': 'Oliver Chace'}, {'p.name': 'Howard Buffett'}, {'p.name': 'Howard'}, {'p.name': 'Susan Buffett'}, {'p.name': 'Warren Buffett'}]\u001b[0m\n",
"\n",
"\u001b[1m> Finished chain.\u001b[0m\n"
]
},
{
"data": {
"text/plain": [
"'Charlie Munger, Oliver Chace, Howard Buffett, Susan Buffett, and Warren Buffett are or were working at Berkshire Hathaway.'"
]
},
"execution_count": 9,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"chain.run(\"Who is or was working at Berkshire Hathaway?\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "d636954b-d967-4e96-9489-92e11c74af35",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.4"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@ -0,0 +1,3 @@
label: 'QA over structured data'
collapsed: false
position: 0.5

View File

@ -0,0 +1 @@
label: 'Integration-specific'

View File

@ -0,0 +1,158 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Elasticsearch\n",
"\n",
"[![Open In Collab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/langchain-ai/langchain/blob/master/docs/extras/use_cases/qa_structured/integrations/elasticsearch.ipynb)\n",
"\n",
"We can use LLMs to interact with Elasticsearch analytics databases in natural language.\n",
"\n",
"This chain builds search queries via the Elasticsearch DSL API (filters and aggregations).\n",
"\n",
"The Elasticsearch client must have permissions for index listing, mapping description and search queries.\n",
"\n",
"See [here](https://www.elastic.co/guide/en/elasticsearch/reference/current/docker.html) for instructions on how to run Elasticsearch locally."
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"! pip install langchain langchain-experimental openai elasticsearch\n",
"\n",
"# Set env var OPENAI_API_KEY or load from a .env file\n",
"# import dotenv\n",
"\n",
"# dotenv.load_dotenv()"
]
},
{
"cell_type": "code",
"execution_count": 15,
"metadata": {},
"outputs": [],
"source": [
"from elasticsearch import Elasticsearch\n",
"\n",
"from langchain.chat_models import ChatOpenAI\n",
"from langchain.chains.elasticsearch_database import ElasticsearchDatabaseChain"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Initialize Elasticsearch python client.\n",
"# See https://elasticsearch-py.readthedocs.io/en/v8.8.2/api.html#elasticsearch.Elasticsearch\n",
"ELASTIC_SEARCH_SERVER = \"https://elastic:pass@localhost:9200\"\n",
"db = Elasticsearch(ELASTIC_SEARCH_SERVER)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Uncomment the next cell to initially populate your db."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# customers = [\n",
"# {\"firstname\": \"Jennifer\", \"lastname\": \"Walters\"},\n",
"# {\"firstname\": \"Monica\",\"lastname\":\"Rambeau\"},\n",
"# {\"firstname\": \"Carol\",\"lastname\":\"Danvers\"},\n",
"# {\"firstname\": \"Wanda\",\"lastname\":\"Maximoff\"},\n",
"# {\"firstname\": \"Jennifer\",\"lastname\":\"Takeda\"},\n",
"# ]\n",
"# for i, customer in enumerate(customers):\n",
"# db.create(index=\"customers\", document=customer, id=i)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"llm = ChatOpenAI(model_name=\"gpt-4\", temperature=0)\n",
"chain = ElasticsearchDatabaseChain.from_llm(llm=llm, database=db, verbose=True)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"question = \"What are the first names of all the customers?\"\n",
"chain.run(question)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"We can customize the prompt."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from langchain.chains.elasticsearch_database.prompts import DEFAULT_DSL_TEMPLATE\n",
"from langchain.prompts.prompt import PromptTemplate\n",
"\n",
"PROMPT_TEMPLATE = \"\"\"Given an input question, create a syntactically correct Elasticsearch query to run. Unless the user specifies in their question a specific number of examples they wish to obtain, always limit your query to at most {top_k} results. You can order the results by a relevant column to return the most interesting examples in the database.\n",
"\n",
"Unless told to do not query for all the columns from a specific index, only ask for a the few relevant columns given the question.\n",
"\n",
"Pay attention to use only the column names that you can see in the mapping description. Be careful to not query for columns that do not exist. Also, pay attention to which column is in which index. Return the query as valid json.\n",
"\n",
"Use the following format:\n",
"\n",
"Question: Question here\n",
"ESQuery: Elasticsearch Query formatted as json\n",
"\"\"\"\n",
"\n",
"PROMPT = PromptTemplate.from_template(\n",
" PROMPT_TEMPLATE,\n",
")\n",
"chain = ElasticsearchDatabaseChain.from_llm(llm=llm, database=db, query_prompt=PROMPT)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.1"
}
},
"nbformat": 4,
"nbformat_minor": 4
}

View File

@ -0,0 +1,200 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "245065c6",
"metadata": {},
"source": [
"# Vector SQL Retriever with MyScale\n",
"\n",
">[MyScale](https://docs.myscale.com/en/) is an integrated vector database. You can access your database in SQL and also from here, LangChain. MyScale can make a use of [various data types and functions for filters](https://blog.myscale.com/2023/06/06/why-integrated-database-solution-can-boost-your-llm-apps/#filter-on-anything-without-constraints). It will boost up your LLM app no matter if you are scaling up your data or expand your system to broader application."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "0246c5bf",
"metadata": {},
"outputs": [],
"source": [
"!pip3 install clickhouse-sqlalchemy InstructorEmbedding sentence_transformers openai langchain-experimental"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "7585d2c3",
"metadata": {},
"outputs": [],
"source": [
"\n",
"from os import environ\n",
"import getpass\n",
"from typing import Dict, Any\n",
"from langchain import OpenAI, SQLDatabase, LLMChain\n",
"from langchain_experimental.sql.vector_sql import VectorSQLDatabaseChain\n",
"from sqlalchemy import create_engine, Column, MetaData\n",
"from langchain import PromptTemplate\n",
"\n",
"\n",
"from sqlalchemy import create_engine\n",
"\n",
"MYSCALE_HOST = \"msc-1decbcc9.us-east-1.aws.staging.myscale.cloud\"\n",
"MYSCALE_PORT = 443\n",
"MYSCALE_USER = \"chatdata\"\n",
"MYSCALE_PASSWORD = \"myscale_rocks\"\n",
"OPENAI_API_KEY = getpass.getpass(\"OpenAI API Key:\")\n",
"\n",
"engine = create_engine(\n",
" f\"clickhouse://{MYSCALE_USER}:{MYSCALE_PASSWORD}@{MYSCALE_HOST}:{MYSCALE_PORT}/default?protocol=https\"\n",
")\n",
"metadata = MetaData(bind=engine)\n",
"environ[\"OPENAI_API_KEY\"] = OPENAI_API_KEY"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "e08d9ddc",
"metadata": {},
"outputs": [],
"source": [
"from langchain.embeddings import HuggingFaceInstructEmbeddings\n",
"from langchain_experimental.sql.vector_sql import VectorSQLOutputParser\n",
"\n",
"output_parser = VectorSQLOutputParser.from_embeddings(\n",
" model=HuggingFaceInstructEmbeddings(\n",
" model_name=\"hkunlp/instructor-xl\", model_kwargs={\"device\": \"cpu\"}\n",
" )\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "84b705b2",
"metadata": {},
"outputs": [],
"source": [
"\n",
"from langchain.llms import OpenAI\n",
"from langchain.callbacks import StdOutCallbackHandler\n",
"\n",
"from langchain.utilities.sql_database import SQLDatabase\n",
"from langchain_experimental.sql.prompt import MYSCALE_PROMPT\n",
"from langchain_experimental.sql.vector_sql import VectorSQLDatabaseChain\n",
"\n",
"chain = VectorSQLDatabaseChain(\n",
" llm_chain=LLMChain(\n",
" llm=OpenAI(openai_api_key=OPENAI_API_KEY, temperature=0),\n",
" prompt=MYSCALE_PROMPT,\n",
" ),\n",
" top_k=10,\n",
" return_direct=True,\n",
" sql_cmd_parser=output_parser,\n",
" database=SQLDatabase(engine, None, metadata),\n",
")\n",
"\n",
"import pandas as pd\n",
"\n",
"pd.DataFrame(\n",
" chain.run(\n",
" \"Please give me 10 papers to ask what is PageRank?\",\n",
" callbacks=[StdOutCallbackHandler()],\n",
" )\n",
")"
]
},
{
"cell_type": "markdown",
"id": "6c09cda0",
"metadata": {},
"source": [
"## SQL Database as Retriever"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "734d7ff5",
"metadata": {},
"outputs": [],
"source": [
"from langchain.chat_models import ChatOpenAI\n",
"from langchain.chains.qa_with_sources.retrieval import RetrievalQAWithSourcesChain\n",
"\n",
"from langchain_experimental.sql.vector_sql import VectorSQLDatabaseChain\n",
"from langchain_experimental.retrievers.vector_sql_database \\\n",
" import VectorSQLDatabaseChainRetriever\n",
"from langchain_experimental.sql.prompt import MYSCALE_PROMPT\n",
"from langchain_experimental.sql.vector_sql import VectorSQLRetrieveAllOutputParser\n",
"\n",
"output_parser_retrieve_all = VectorSQLRetrieveAllOutputParser.from_embeddings(\n",
" output_parser.model\n",
")\n",
"\n",
"chain = VectorSQLDatabaseChain.from_llm(\n",
" llm=OpenAI(openai_api_key=OPENAI_API_KEY, temperature=0),\n",
" prompt=MYSCALE_PROMPT,\n",
" top_k=10,\n",
" return_direct=True,\n",
" db=SQLDatabase(engine, None, metadata),\n",
" sql_cmd_parser=output_parser_retrieve_all,\n",
" native_format=True,\n",
")\n",
"\n",
"# You need all those keys to get docs\n",
"retriever = VectorSQLDatabaseChainRetriever(sql_db_chain=chain, page_content_key=\"abstract\")\n",
"\n",
"document_with_metadata_prompt = PromptTemplate(\n",
" input_variables=[\"page_content\", \"id\", \"title\", \"authors\", \"pubdate\", \"categories\"],\n",
" template=\"Content:\\n\\tTitle: {title}\\n\\tAbstract: {page_content}\\n\\tAuthors: {authors}\\n\\tDate of Publication: {pubdate}\\n\\tCategories: {categories}\\nSOURCE: {id}\",\n",
")\n",
"\n",
"chain = RetrievalQAWithSourcesChain.from_chain_type(\n",
" ChatOpenAI(\n",
" model_name=\"gpt-3.5-turbo-16k\", openai_api_key=OPENAI_API_KEY, temperature=0.6\n",
" ),\n",
" retriever=retriever,\n",
" chain_type=\"stuff\",\n",
" chain_type_kwargs={\n",
" \"document_prompt\": document_with_metadata_prompt,\n",
" },\n",
" return_source_documents=True,\n",
")\n",
"ans = chain(\"Please give me 10 papers to ask what is PageRank?\",\n",
" callbacks=[StdOutCallbackHandler()])\n",
"print(ans[\"answer\"])"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "4948ff25",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.3"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

File diff suppressed because it is too large Load Diff

View File

@ -1,12 +1,21 @@
{ {
"cells": [ "cells": [
{
"cell_type": "raw",
"id": "2aca8168-62ec-4bba-93f0-73da08cd1920",
"metadata": {},
"source": [
"---\n",
"sidebar_position: 1\n",
"title: Summarization\n",
"---"
]
},
{ {
"cell_type": "markdown", "cell_type": "markdown",
"id": "cf13f702", "id": "cf13f702",
"metadata": {}, "metadata": {},
"source": [ "source": [
"# Summarization\n",
"\n",
"[![Open In Collab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/langchain-ai/langchain/blob/master/docs/extras/use_cases/summarization.ipynb)\n", "[![Open In Collab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/langchain-ai/langchain/blob/master/docs/extras/use_cases/summarization.ipynb)\n",
"\n", "\n",
"## Use case\n", "## Use case\n",
@ -548,7 +557,7 @@
"name": "python", "name": "python",
"nbconvert_exporter": "python", "nbconvert_exporter": "python",
"pygments_lexer": "ipython3", "pygments_lexer": "ipython3",
"version": "3.9.16" "version": "3.9.1"
} }
}, },
"nbformat": 4, "nbformat": 4,

View File

@ -1,12 +1,21 @@
{ {
"cells": [ "cells": [
{
"cell_type": "raw",
"id": "cb6f552e-775f-4d84-bc7c-dca94c06a33c",
"metadata": {},
"source": [
"---\n",
"sidebar_position: 1\n",
"title: Tagging\n",
"---"
]
},
{ {
"cell_type": "markdown", "cell_type": "markdown",
"id": "a0507a4b", "id": "a0507a4b",
"metadata": {}, "metadata": {},
"source": [ "source": [
"# Tagging\n",
"\n",
"[![Open In Collab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/langchain-ai/langchain/blob/master/docs/extras/use_cases/tagging.ipynb)\n", "[![Open In Collab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/langchain-ai/langchain/blob/master/docs/extras/use_cases/tagging.ipynb)\n",
"\n", "\n",
"## Use case\n", "## Use case\n",
@ -408,7 +417,7 @@
"name": "python", "name": "python",
"nbconvert_exporter": "python", "nbconvert_exporter": "python",
"pygments_lexer": "ipython3", "pygments_lexer": "ipython3",
"version": "3.9.16" "version": "3.9.1"
} }
}, },
"nbformat": 4, "nbformat": 4,

View File

@ -1,12 +1,21 @@
{ {
"cells": [ "cells": [
{
"cell_type": "raw",
"id": "e254cf03-49fc-4051-a4df-3a8e4e7d2688",
"metadata": {},
"source": [
"---\n",
"sidebar_position: 1\n",
"title: Web scraping\n",
"---"
]
},
{ {
"cell_type": "markdown", "cell_type": "markdown",
"id": "6605e7f7", "id": "6605e7f7",
"metadata": {}, "metadata": {},
"source": [ "source": [
"# Web Scraping\n",
"\n",
"[![Open In Collab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/langchain-ai/langchain/blob/master/docs/extras/use_cases/web_scraping.ipynb)\n", "[![Open In Collab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/langchain-ai/langchain/blob/master/docs/extras/use_cases/web_scraping.ipynb)\n",
"\n", "\n",
"## Use case\n", "## Use case\n",
@ -306,9 +315,7 @@
"cell_type": "code", "cell_type": "code",
"execution_count": 7, "execution_count": 7,
"id": "977560ba", "id": "977560ba",
"metadata": { "metadata": {},
"scrolled": false
},
"outputs": [ "outputs": [
{ {
"name": "stdout", "name": "stdout",
@ -591,7 +598,7 @@
"name": "python", "name": "python",
"nbconvert_exporter": "python", "nbconvert_exporter": "python",
"pygments_lexer": "ipython3", "pygments_lexer": "ipython3",
"version": "3.9.16" "version": "3.9.1"
} }
}, },
"nbformat": 4, "nbformat": 4,

View File

@ -5,10 +5,12 @@ pip install openai google-search-results
``` ```
```python ```python
from langchain import LLMMathChain, OpenAI, SerpAPIWrapper, SQLDatabase, SQLDatabaseChain from langchain.agents import initialize_agent, AgentType, Tool
from langchain.agents import initialize_agent, Tool from langchain.chains import LLMMathChain
from langchain.agents import AgentType
from langchain.chat_models import ChatOpenAI from langchain.chat_models import ChatOpenAI
from langchain.llms import OpenAI
from langchain.utilities import SerpAPIWrapper, SQLDatabase
from langchain_experimental.sql import SQLDatabaseChain
``` ```

View File

@ -1,4 +1,7 @@
"""Data anonymizer package""" """Data anonymizer package"""
from langchain_experimental.data_anonymizer.presidio import PresidioAnonymizer from langchain_experimental.data_anonymizer.presidio import (
PresidioAnonymizer,
PresidioReversibleAnonymizer,
)
__all__ = ["PresidioAnonymizer"] __all__ = ["PresidioAnonymizer", "PresidioReversibleAnonymizer"]

View File

@ -1,4 +1,5 @@
from abc import ABC, abstractmethod from abc import ABC, abstractmethod
from typing import Optional
class AnonymizerBase(ABC): class AnonymizerBase(ABC):
@ -8,10 +9,24 @@ class AnonymizerBase(ABC):
wrapping the behavior for all methods in a base class. wrapping the behavior for all methods in a base class.
""" """
def anonymize(self, text: str) -> str: def anonymize(self, text: str, language: Optional[str] = None) -> str:
"""Anonymize text""" """Anonymize text"""
return self._anonymize(text) return self._anonymize(text, language)
@abstractmethod @abstractmethod
def _anonymize(self, text: str) -> str: def _anonymize(self, text: str, language: Optional[str]) -> str:
"""Abstract method to anonymize text""" """Abstract method to anonymize text"""
class ReversibleAnonymizerBase(AnonymizerBase):
"""
Base abstract class for reversible anonymizers.
"""
def deanonymize(self, text: str) -> str:
"""Deanonymize text"""
return self._deanonymize(text)
@abstractmethod
def _deanonymize(self, text: str) -> str:
"""Abstract method to deanonymize text"""

View File

@ -0,0 +1,21 @@
from collections import defaultdict
from dataclasses import dataclass, field
from typing import Dict
MappingDataType = Dict[str, Dict[str, str]]
@dataclass
class DeanonymizerMapping:
mapping: MappingDataType = field(
default_factory=lambda: defaultdict(lambda: defaultdict(str))
)
@property
def data(self) -> MappingDataType:
"""Return the deanonymizer mapping"""
return {k: dict(v) for k, v in self.mapping.items()}
def update(self, new_mapping: MappingDataType) -> None:
for entity_type, values in new_mapping.items():
self.mapping[entity_type].update(values)

View File

@ -0,0 +1,17 @@
from langchain_experimental.data_anonymizer.presidio import MappingDataType
def default_matching_strategy(text: str, deanonymizer_mapping: MappingDataType) -> str:
"""
Default matching strategy for deanonymization.
It replaces all the anonymized entities with the original ones.
Args:
text: text to deanonymize
deanonymizer_mapping: mapping between anonymized entities and original ones"""
# Iterate over all the entities (PERSON, EMAIL_ADDRESS, etc.)
for entity_type in deanonymizer_mapping:
for anonymized, original in deanonymizer_mapping[entity_type].items():
text = text.replace(anonymized, original)
return text

View File

@ -1,8 +1,8 @@
import string import string
from typing import Callable, Dict from typing import Callable, Dict, Optional
def get_pseudoanonymizer_mapping() -> Dict[str, Callable]: def get_pseudoanonymizer_mapping(seed: Optional[int] = None) -> Dict[str, Callable]:
try: try:
from faker import Faker from faker import Faker
except ImportError as e: except ImportError as e:
@ -11,6 +11,7 @@ def get_pseudoanonymizer_mapping() -> Dict[str, Callable]:
) from e ) from e
fake = Faker() fake = Faker()
fake.seed_instance(seed)
# Listed entities supported by Microsoft Presidio (for now, global and US only) # Listed entities supported by Microsoft Presidio (for now, global and US only)
# Source: https://microsoft.github.io/presidio/supported_entities/ # Source: https://microsoft.github.io/presidio/supported_entities/
@ -26,8 +27,8 @@ def get_pseudoanonymizer_mapping() -> Dict[str, Callable]:
fake.random_choices(string.ascii_lowercase + string.digits, length=26) fake.random_choices(string.ascii_lowercase + string.digits, length=26)
), ),
"IP_ADDRESS": lambda _: fake.ipv4_public(), "IP_ADDRESS": lambda _: fake.ipv4_public(),
"LOCATION": lambda _: fake.address(), "LOCATION": lambda _: fake.city(),
"DATE_TIME": lambda _: fake.iso8601(), "DATE_TIME": lambda _: fake.date(),
"NRP": lambda _: str(fake.random_number(digits=8, fix_len=True)), "NRP": lambda _: str(fake.random_number(digits=8, fix_len=True)),
"MEDICAL_LICENSE": lambda _: fake.bothify(text="??######").upper(), "MEDICAL_LICENSE": lambda _: fake.bothify(text="??######").upper(),
"URL": lambda _: fake.url(), "URL": lambda _: fake.url(),

View File

@ -1,24 +1,75 @@
from __future__ import annotations from __future__ import annotations
from typing import TYPE_CHECKING, Dict, List, Optional import json
from collections import defaultdict
from pathlib import Path
from typing import TYPE_CHECKING, Callable, Dict, List, Optional, Union
from langchain_experimental.data_anonymizer.base import AnonymizerBase import yaml
from langchain_experimental.data_anonymizer.base import (
AnonymizerBase,
ReversibleAnonymizerBase,
)
from langchain_experimental.data_anonymizer.deanonymizer_mapping import (
DeanonymizerMapping,
MappingDataType,
)
from langchain_experimental.data_anonymizer.deanonymizer_matching_strategies import (
default_matching_strategy,
)
from langchain_experimental.data_anonymizer.faker_presidio_mapping import ( from langchain_experimental.data_anonymizer.faker_presidio_mapping import (
get_pseudoanonymizer_mapping, get_pseudoanonymizer_mapping,
) )
if TYPE_CHECKING: try:
from presidio_analyzer import EntityRecognizer from presidio_analyzer import AnalyzerEngine
from presidio_analyzer.nlp_engine import NlpEngineProvider
except ImportError as e:
raise ImportError(
"Could not import presidio_analyzer, please install with "
"`pip install presidio-analyzer`. You will also need to download a "
"spaCy model to use the analyzer, e.g. "
"`python -m spacy download en_core_web_lg`."
) from e
try:
from presidio_anonymizer import AnonymizerEngine
from presidio_anonymizer.entities import OperatorConfig from presidio_anonymizer.entities import OperatorConfig
except ImportError as e:
raise ImportError(
"Could not import presidio_anonymizer, please install with "
"`pip install presidio-anonymizer`."
) from e
if TYPE_CHECKING:
from presidio_analyzer import EntityRecognizer, RecognizerResult
from presidio_anonymizer.entities import EngineResult
# Configuring Anonymizer for multiple languages
# Detailed description and examples can be found here:
# langchain/docs/extras/guides/privacy/multi_language_anonymization.ipynb
DEFAULT_LANGUAGES_CONFIG = {
# You can also use Stanza or transformers library.
# See https://microsoft.github.io/presidio/analyzer/customizing_nlp_models/
"nlp_engine_name": "spacy",
"models": [
{"lang_code": "en", "model_name": "en_core_web_lg"},
# {"lang_code": "de", "model_name": "de_core_news_md"},
# {"lang_code": "es", "model_name": "es_core_news_md"},
# ...
# List of available models: https://spacy.io/usage/models
],
}
class PresidioAnonymizer(AnonymizerBase): class PresidioAnonymizerBase(AnonymizerBase):
"""Anonymizer using Microsoft Presidio."""
def __init__( def __init__(
self, self,
analyzed_fields: Optional[List[str]] = None, analyzed_fields: Optional[List[str]] = None,
operators: Optional[Dict[str, OperatorConfig]] = None, operators: Optional[Dict[str, OperatorConfig]] = None,
languages_config: Dict = DEFAULT_LANGUAGES_CONFIG,
faker_seed: Optional[int] = None,
): ):
""" """
Args: Args:
@ -28,25 +79,15 @@ class PresidioAnonymizer(AnonymizerBase):
Operators allow for custom anonymization of detected PII. Operators allow for custom anonymization of detected PII.
Learn more: Learn more:
https://microsoft.github.io/presidio/tutorial/10_simple_anonymization/ https://microsoft.github.io/presidio/tutorial/10_simple_anonymization/
languages_config: Configuration for the NLP engine.
First language in the list will be used as the main language
in self.anonymize(...) when no language is specified.
Learn more:
https://microsoft.github.io/presidio/analyzer/customizing_nlp_models/
faker_seed: Seed used to initialize faker.
Defaults to None, in which case faker will be seeded randomly
and provide random values.
""" """
try:
from presidio_analyzer import AnalyzerEngine
except ImportError as e:
raise ImportError(
"Could not import presidio_analyzer, please install with "
"`pip install presidio-analyzer`. You will also need to download a "
"spaCy model to use the analyzer, e.g. "
"`python -m spacy download en_core_web_lg`."
) from e
try:
from presidio_anonymizer import AnonymizerEngine
from presidio_anonymizer.entities import OperatorConfig
except ImportError as e:
raise ImportError(
"Could not import presidio_anonymizer, please install with "
"`pip install presidio-anonymizer`."
) from e
self.analyzed_fields = ( self.analyzed_fields = (
analyzed_fields analyzed_fields
if analyzed_fields is not None if analyzed_fields is not None
@ -59,17 +100,66 @@ class PresidioAnonymizer(AnonymizerBase):
field: OperatorConfig( field: OperatorConfig(
operator_name="custom", params={"lambda": faker_function} operator_name="custom", params={"lambda": faker_function}
) )
for field, faker_function in get_pseudoanonymizer_mapping().items() for field, faker_function in get_pseudoanonymizer_mapping(
faker_seed
).items()
} }
) )
self._analyzer = AnalyzerEngine()
provider = NlpEngineProvider(nlp_configuration=languages_config)
nlp_engine = provider.create_engine()
self.supported_languages = list(nlp_engine.nlp.keys())
self._analyzer = AnalyzerEngine(
supported_languages=self.supported_languages, nlp_engine=nlp_engine
)
self._anonymizer = AnonymizerEngine() self._anonymizer = AnonymizerEngine()
def _anonymize(self, text: str) -> str: def add_recognizer(self, recognizer: EntityRecognizer) -> None:
"""Add a recognizer to the analyzer
Args:
recognizer: Recognizer to add to the analyzer.
"""
self._analyzer.registry.add_recognizer(recognizer)
self.analyzed_fields.extend(recognizer.supported_entities)
def add_operators(self, operators: Dict[str, OperatorConfig]) -> None:
"""Add operators to the anonymizer
Args:
operators: Operators to add to the anonymizer.
"""
self.operators.update(operators)
class PresidioAnonymizer(PresidioAnonymizerBase):
def _anonymize(self, text: str, language: Optional[str] = None) -> str:
"""Anonymize text.
Each PII entity is replaced with a fake value.
Each time fake values will be different, as they are generated randomly.
Args:
text: text to anonymize
language: language to use for analysis of PII
If None, the first (main) language in the list
of languages specified in the configuration will be used.
"""
if language is None:
language = self.supported_languages[0]
if language not in self.supported_languages:
raise ValueError(
f"Language '{language}' is not supported. "
f"Supported languages are: {self.supported_languages}. "
"Change your language configuration file to add more languages."
)
results = self._analyzer.analyze( results = self._analyzer.analyze(
text, text,
entities=self.analyzed_fields, entities=self.analyzed_fields,
language="en", language=language,
) )
return self._anonymizer.anonymize( return self._anonymizer.anonymize(
@ -78,11 +168,199 @@ class PresidioAnonymizer(AnonymizerBase):
operators=self.operators, operators=self.operators,
).text ).text
def add_recognizer(self, recognizer: EntityRecognizer) -> None:
"""Add a recognizer to the analyzer"""
self._analyzer.registry.add_recognizer(recognizer)
self.analyzed_fields.extend(recognizer.supported_entities)
def add_operators(self, operators: Dict[str, OperatorConfig]) -> None: class PresidioReversibleAnonymizer(PresidioAnonymizerBase, ReversibleAnonymizerBase):
"""Add operators to the anonymizer""" def __init__(
self.operators.update(operators) self,
analyzed_fields: Optional[List[str]] = None,
operators: Optional[Dict[str, OperatorConfig]] = None,
languages_config: Dict = DEFAULT_LANGUAGES_CONFIG,
faker_seed: Optional[int] = None,
):
super().__init__(analyzed_fields, operators, languages_config, faker_seed)
self._deanonymizer_mapping = DeanonymizerMapping()
@property
def deanonymizer_mapping(self) -> MappingDataType:
"""Return the deanonymizer mapping"""
return self._deanonymizer_mapping.data
def _update_deanonymizer_mapping(
self,
original_text: str,
analyzer_results: List[RecognizerResult],
anonymizer_results: EngineResult,
) -> None:
"""Creates or updates the mapping used to de-anonymize text.
This method exploits the results returned by the
analysis and anonymization processes.
It constructs a mapping from each anonymized entity
back to its original text value.
Mapping will be stored as "deanonymizer_mapping" property.
Example of "deanonymizer_mapping":
{
"PERSON": {
"<anonymized>": "<original>",
"John Doe": "Slim Shady"
},
"PHONE_NUMBER": {
"111-111-1111": "555-555-5555"
}
...
}
"""
# We are able to zip and loop through both lists because we expect
# them to return corresponding entities for each identified piece
# of analyzable data from our input.
# We sort them by their 'start' attribute because it allows us to
# match corresponding entities by their position in the input text.
analyzer_results = sorted(analyzer_results, key=lambda d: d.start)
anonymizer_results.items = sorted(
anonymizer_results.items, key=lambda d: d.start
)
new_deanonymizer_mapping: MappingDataType = defaultdict(dict)
for analyzed_entity, anonymized_entity in zip(
analyzer_results, anonymizer_results.items
):
original_value = original_text[analyzed_entity.start : analyzed_entity.end]
new_deanonymizer_mapping[anonymized_entity.entity_type][
anonymized_entity.text
] = original_value
self._deanonymizer_mapping.update(new_deanonymizer_mapping)
def _anonymize(self, text: str, language: Optional[str] = None) -> str:
"""Anonymize text.
Each PII entity is replaced with a fake value.
Each time fake values will be different, as they are generated randomly.
At the same time, we will create a mapping from each anonymized entity
back to its original text value.
Args:
text: text to anonymize
language: language to use for analysis of PII
If None, the first (main) language in the list
of languages specified in the configuration will be used.
"""
if language is None:
language = self.supported_languages[0]
if language not in self.supported_languages:
raise ValueError(
f"Language '{language}' is not supported. "
f"Supported languages are: {self.supported_languages}. "
"Change your language configuration file to add more languages."
)
analyzer_results = self._analyzer.analyze(
text,
entities=self.analyzed_fields,
language=language,
)
filtered_analyzer_results = (
self._anonymizer._remove_conflicts_and_get_text_manipulation_data(
analyzer_results
)
)
anonymizer_results = self._anonymizer.anonymize(
text,
analyzer_results=analyzer_results,
operators=self.operators,
)
self._update_deanonymizer_mapping(
text, filtered_analyzer_results, anonymizer_results
)
return anonymizer_results.text
def _deanonymize(
self,
text_to_deanonymize: str,
deanonymizer_matching_strategy: Callable[
[str, MappingDataType], str
] = default_matching_strategy,
) -> str:
"""Deanonymize text.
Each anonymized entity is replaced with its original value.
This method exploits the mapping created during the anonymization process.
Args:
text_to_deanonymize: text to deanonymize
deanonymizer_matching_strategy: function to use to match
anonymized entities with their original values and replace them.
"""
if not self._deanonymizer_mapping:
raise ValueError(
"Deanonymizer mapping is empty.",
"Please call anonymize() and anonymize some text first.",
)
text_to_deanonymize = deanonymizer_matching_strategy(
text_to_deanonymize, self.deanonymizer_mapping
)
return text_to_deanonymize
def save_deanonymizer_mapping(self, file_path: Union[Path, str]) -> None:
"""Save the deanonymizer mapping to a JSON or YAML file.
Args:
file_path: Path to file to save the mapping to.
Example:
.. code-block:: python
anonymizer.save_deanonymizer_mapping(file_path="path/mapping.json")
"""
save_path = Path(file_path)
if save_path.suffix not in [".json", ".yaml"]:
raise ValueError(f"{save_path} must have an extension of .json or .yaml")
# Make sure parent directories exist
save_path.parent.mkdir(parents=True, exist_ok=True)
if save_path.suffix == ".json":
with open(save_path, "w") as f:
json.dump(self.deanonymizer_mapping, f, indent=2)
elif save_path.suffix == ".yaml":
with open(save_path, "w") as f:
yaml.dump(self.deanonymizer_mapping, f, default_flow_style=False)
def load_deanonymizer_mapping(self, file_path: Union[Path, str]) -> None:
"""Load the deanonymizer mapping from a JSON or YAML file.
Args:
file_path: Path to file to load the mapping from.
Example:
.. code-block:: python
anonymizer.load_deanonymizer_mapping(file_path="path/mapping.json")
"""
load_path = Path(file_path)
if load_path.suffix not in [".json", ".yaml"]:
raise ValueError(f"{load_path} must have an extension of .json or .yaml")
if load_path.suffix == ".json":
with open(load_path, "r") as f:
loaded_mapping = json.load(f)
elif load_path.suffix == ".yaml":
with open(load_path, "r") as f:
loaded_mapping = yaml.load(f, Loader=yaml.FullLoader)
self._deanonymizer_mapping.update(loaded_mapping)

View File

@ -0,0 +1,5 @@
from langchain_experimental.graph_transformers.diffbot import DiffbotGraphTransformer
__all__ = [
"DiffbotGraphTransformer",
]

View File

@ -0,0 +1,316 @@
from typing import Any, Dict, List, Optional, Sequence, Tuple, Union
import requests
from langchain.graphs.graph_document import GraphDocument, Node, Relationship
from langchain.schema import Document
from langchain.utils import get_from_env
def format_property_key(s: str) -> str:
words = s.split()
if not words:
return s
first_word = words[0].lower()
capitalized_words = [word.capitalize() for word in words[1:]]
return "".join([first_word] + capitalized_words)
class NodesList:
"""
Manages a list of nodes with associated properties.
Attributes:
nodes (Dict[Tuple, Any]): Stores nodes as keys and their properties as values.
Each key is a tuple where the first element is the
node ID and the second is the node type.
"""
def __init__(self) -> None:
self.nodes: Dict[Tuple[Union[str, int], str], Any] = dict()
def add_node_property(
self, node: Tuple[Union[str, int], str], properties: Dict[str, Any]
) -> None:
"""
Adds or updates node properties.
If the node does not exist in the list, it's added along with its properties.
If the node already exists, its properties are updated with the new values.
Args:
node (Tuple): A tuple containing the node ID and node type.
properties (Dict): A dictionary of properties to add or update for the node.
"""
if node not in self.nodes:
self.nodes[node] = properties
else:
self.nodes[node].update(properties)
def return_node_list(self) -> List[Node]:
"""
Returns the nodes as a list of Node objects.
Each Node object will have its ID, type, and properties populated.
Returns:
List[Node]: A list of Node objects.
"""
nodes = [
Node(id=key[0], type=key[1], properties=self.nodes[key])
for key in self.nodes
]
return nodes
# Properties that should be treated as node properties instead of relationships
FACT_TO_PROPERTY_TYPE = [
"Date",
"Number",
"Job title",
"Cause of death",
"Organization type",
"Academic title",
]
schema_mapping = [
("HEADQUARTERS", "ORGANIZATION_LOCATIONS"),
("RESIDENCE", "PERSON_LOCATION"),
("ALL_PERSON_LOCATIONS", "PERSON_LOCATION"),
("CHILD", "HAS_CHILD"),
("PARENT", "HAS_PARENT"),
("CUSTOMERS", "HAS_CUSTOMER"),
("SKILLED_AT", "INTERESTED_IN"),
]
class SimplifiedSchema:
"""
Provides functionality for working with a simplified schema mapping.
Attributes:
schema (Dict): A dictionary containing the mapping to simplified schema types.
"""
def __init__(self) -> None:
"""Initializes the schema dictionary based on the predefined list."""
self.schema = dict()
for row in schema_mapping:
self.schema[row[0]] = row[1]
def get_type(self, type: str) -> str:
"""
Retrieves the simplified schema type for a given original type.
Args:
type (str): The original schema type to find the simplified type for.
Returns:
str: The simplified schema type if it exists;
otherwise, returns the original type.
"""
try:
return self.schema[type]
except KeyError:
return type
class DiffbotGraphTransformer:
"""Transforms documents into graph documents using Diffbot's NLP API.
A graph document transformation system takes a sequence of Documents and returns a
sequence of Graph Documents.
Example:
.. code-block:: python
class DiffbotGraphTransformer(BaseGraphDocumentTransformer):
def transform_documents(
self, documents: Sequence[Document], **kwargs: Any
) -> Sequence[GraphDocument]:
results = []
for document in documents:
raw_results = self.nlp_request(document.page_content)
graph_document = self.process_response(raw_results, document)
results.append(graph_document)
return results
async def atransform_documents(
self, documents: Sequence[Document], **kwargs: Any
) -> Sequence[Document]:
raise NotImplementedError
"""
def __init__(
self,
diffbot_api_key: Optional[str] = None,
fact_confidence_threshold: float = 0.7,
include_qualifiers: bool = True,
include_evidence: bool = True,
simplified_schema: bool = True,
) -> None:
"""
Initialize the graph transformer with various options.
Args:
diffbot_api_key (str):
The API key for Diffbot's NLP services.
fact_confidence_threshold (float):
Minimum confidence level for facts to be included.
include_qualifiers (bool):
Whether to include qualifiers in the relationships.
include_evidence (bool):
Whether to include evidence for the relationships.
simplified_schema (bool):
Whether to use a simplified schema for relationships.
"""
self.diffbot_api_key = diffbot_api_key or get_from_env(
"diffbot_api_key", "DIFFBOT_API_KEY"
)
self.fact_threshold_confidence = fact_confidence_threshold
self.include_qualifiers = include_qualifiers
self.include_evidence = include_evidence
self.simplified_schema = None
if simplified_schema:
self.simplified_schema = SimplifiedSchema()
def nlp_request(self, text: str) -> Dict[str, Any]:
"""
Make an API request to the Diffbot NLP endpoint.
Args:
text (str): The text to be processed.
Returns:
Dict[str, Any]: The JSON response from the API.
"""
# Relationship extraction only works for English
payload = {
"content": text,
"lang": "en",
}
FIELDS = "facts"
HOST = "nl.diffbot.com"
url = (
f"https://{HOST}/v1/?fields={FIELDS}&"
f"token={self.diffbot_api_key}&language=en"
)
result = requests.post(url, data=payload)
return result.json()
def process_response(
self, payload: Dict[str, Any], document: Document
) -> GraphDocument:
"""
Transform the Diffbot NLP response into a GraphDocument.
Args:
payload (Dict[str, Any]): The JSON response from Diffbot's NLP API.
document (Document): The original document.
Returns:
GraphDocument: The transformed document as a graph.
"""
# Return empty result if there are no facts
if "facts" not in payload or not payload["facts"]:
return GraphDocument(nodes=[], relationships=[], source=document)
# Nodes are a custom class because we need to deduplicate
nodes_list = NodesList()
# Relationships are a list because we don't deduplicate nor anything else
relationships = list()
for record in payload["facts"]:
# Skip if the fact is below the threshold confidence
if record["confidence"] < self.fact_threshold_confidence:
continue
# TODO: It should probably be treated as a node property
if not record["value"]["allTypes"]:
continue
# Define source node
source_id = (
record["entity"]["allUris"][0]
if record["entity"]["allUris"]
else record["entity"]["name"]
)
source_label = record["entity"]["allTypes"][0]["name"].capitalize()
source_name = record["entity"]["name"]
source_node = Node(id=source_id, type=source_label)
nodes_list.add_node_property(
(source_id, source_label), {"name": source_name}
)
# Define target node
target_id = (
record["value"]["allUris"][0]
if record["value"]["allUris"]
else record["value"]["name"]
)
target_label = record["value"]["allTypes"][0]["name"].capitalize()
target_name = record["value"]["name"]
# Some facts are better suited as node properties
if target_label in FACT_TO_PROPERTY_TYPE:
nodes_list.add_node_property(
(source_id, source_label),
{format_property_key(record["property"]["name"]): target_name},
)
else: # Define relationship
# Define target node object
target_node = Node(id=target_id, type=target_label)
nodes_list.add_node_property(
(target_id, target_label), {"name": target_name}
)
# Define relationship type
rel_type = record["property"]["name"].replace(" ", "_").upper()
if self.simplified_schema:
rel_type = self.simplified_schema.get_type(rel_type)
# Relationship qualifiers/properties
rel_properties = dict()
relationship_evidence = [el["passage"] for el in record["evidence"]][0]
if self.include_evidence:
rel_properties.update({"evidence": relationship_evidence})
if self.include_qualifiers and record.get("qualifiers"):
for property in record["qualifiers"]:
prop_key = format_property_key(property["property"]["name"])
rel_properties[prop_key] = property["value"]["name"]
relationship = Relationship(
source=source_node,
target=target_node,
type=rel_type,
properties=rel_properties,
)
relationships.append(relationship)
return GraphDocument(
nodes=nodes_list.return_node_list(),
relationships=relationships,
source=document,
)
def convert_to_graph_documents(
self, documents: Sequence[Document]
) -> List[GraphDocument]:
"""Convert a sequence of documents into graph documents.
Args:
documents (Sequence[Document]): The original documents.
**kwargs: Additional keyword arguments.
Returns:
Sequence[GraphDocument]: The transformed documents as graphs.
"""
results = []
for document in documents:
raw_results = self.nlp_request(document.page_content)
graph_document = self.process_response(raw_results, document)
results.append(graph_document)
return results

View File

@ -0,0 +1,38 @@
"""Vector SQL Database Chain Retriever"""
from typing import Any, Dict, List
from langchain.callbacks.manager import (
AsyncCallbackManagerForRetrieverRun,
CallbackManagerForRetrieverRun,
)
from langchain.schema import BaseRetriever, Document
from langchain_experimental.sql.vector_sql import VectorSQLDatabaseChain
class VectorSQLDatabaseChainRetriever(BaseRetriever):
"""Retriever that uses SQLDatabase as Retriever"""
sql_db_chain: VectorSQLDatabaseChain
"""SQL Database Chain"""
page_content_key: str = "content"
"""column name for page content of documents"""
def _get_relevant_documents(
self,
query: str,
*,
run_manager: CallbackManagerForRetrieverRun,
**kwargs: Any,
) -> List[Document]:
ret: List[Dict[str, Any]] = self.sql_db_chain(
query, callbacks=run_manager.get_child(), **kwargs
)["result"]
return [
Document(page_content=r[self.page_content_key], metadata=r) for r in ret
]
async def _aget_relevant_documents(
self, query: str, *, run_manager: AsyncCallbackManagerForRetrieverRun
) -> List[Document]:
raise NotImplementedError

View File

@ -0,0 +1,85 @@
# flake8: noqa
from langchain.prompts.prompt import PromptTemplate
PROMPT_SUFFIX = """Only use the following tables:
{table_info}
Question: {input}"""
_VECTOR_SQL_DEFAULT_TEMPLATE = """You are a {dialect} expert. Given an input question, first create a syntactically correct {dialect} query to run, then look at the results of the query and return the answer to the input question.
{dialect} queries has a vector distance function called `DISTANCE(column, array)` to compute relevance to the user's question and sort the feature array column by the relevance.
When the query is asking for {top_k} closest row, you have to use this distance function to calculate distance to entity's array on vector column and order by the distance to retrieve relevant rows.
*NOTICE*: `DISTANCE(column, array)` only accept an array column as its first argument and a `NeuralArray(entity)` as its second argument. You also need a user defined function called `NeuralArray(entity)` to retrieve the entity's array.
Unless the user specifies in the question a specific number of examples to obtain, query for at most {top_k} results using the LIMIT clause as per {dialect}. You should only order according to the distance function.
Never query for all columns from a table. You must query only the columns that are needed to answer the question. Wrap each column name in double quotes (") to denote them as delimited identifiers.
Pay attention to use only the column names you can see in the tables below. Be careful to not query for columns that do not exist. Also, pay attention to which column is in which table.
Pay attention to use today() function to get the current date, if the question involves "today". `ORDER BY` clause should always be after `WHERE` clause. DO NOT add semicolon to the end of SQL. Pay attention to the comment in table schema.
Use the following format:
Question: "Question here"
SQLQuery: "SQL Query to run"
SQLResult: "Result of the SQLQuery"
Answer: "Final answer here"
"""
VECTOR_SQL_PROMPT = PromptTemplate(
input_variables=["input", "table_info", "dialect", "top_k"],
template=_VECTOR_SQL_DEFAULT_TEMPLATE + PROMPT_SUFFIX,
)
_myscale_prompt = """You are a MyScale expert. Given an input question, first create a syntactically correct MyScale query to run, then look at the results of the query and return the answer to the input question.
MyScale queries has a vector distance function called `DISTANCE(column, array)` to compute relevance to the user's question and sort the feature array column by the relevance.
When the query is asking for {top_k} closest row, you have to use this distance function to calculate distance to entity's array on vector column and order by the distance to retrieve relevant rows.
*NOTICE*: `DISTANCE(column, array)` only accept an array column as its first argument and a `NeuralArray(entity)` as its second argument. You also need a user defined function called `NeuralArray(entity)` to retrieve the entity's array.
Unless the user specifies in the question a specific number of examples to obtain, query for at most {top_k} results using the LIMIT clause as per MyScale. You should only order according to the distance function.
Never query for all columns from a table. You must query only the columns that are needed to answer the question. Wrap each column name in double quotes (") to denote them as delimited identifiers.
Pay attention to use only the column names you can see in the tables below. Be careful to not query for columns that do not exist. Also, pay attention to which column is in which table.
Pay attention to use today() function to get the current date, if the question involves "today". `ORDER BY` clause should always be after `WHERE` clause. DO NOT add semicolon to the end of SQL. Pay attention to the comment in table schema.
Use the following format:
======== table info ========
<some table infos>
Question: "Question here"
SQLQuery: "SQL Query to run"
Here are some examples:
======== table info ========
CREATE TABLE "ChatPaper" (
abstract String,
id String,
vector Array(Float32),
) ENGINE = ReplicatedReplacingMergeTree()
ORDER BY id
PRIMARY KEY id
Question: What is Feartue Pyramid Network?
SQLQuery: SELECT ChatPaper.title, ChatPaper.id, ChatPaper.authors FROM ChatPaper ORDER BY DISTANCE(vector, NeuralArray(PaperRank contribution)) LIMIT {top_k}
Let's begin:
======== table info ========
{table_info}
Question: {input}
SQLQuery: """
MYSCALE_PROMPT = PromptTemplate(
input_variables=["input", "table_info", "top_k"],
template=_myscale_prompt + PROMPT_SUFFIX,
)
VECTOR_SQL_PROMPTS = {
"myscale": MYSCALE_PROMPT,
}

View File

@ -0,0 +1,237 @@
"""Vector SQL Database Chain Retriever"""
from __future__ import annotations
from typing import Any, Dict, List, Optional, Union
from langchain.callbacks.manager import CallbackManagerForChainRun
from langchain.chains.llm import LLMChain
from langchain.chains.sql_database.prompt import PROMPT, SQL_PROMPTS
from langchain.embeddings.base import Embeddings
from langchain.prompts.prompt import PromptTemplate
from langchain.schema import BaseOutputParser, BasePromptTemplate
from langchain.schema.language_model import BaseLanguageModel
from langchain.tools.sql_database.prompt import QUERY_CHECKER
from langchain.utilities.sql_database import SQLDatabase
from langchain_experimental.sql.base import INTERMEDIATE_STEPS_KEY, SQLDatabaseChain
class VectorSQLOutputParser(BaseOutputParser[str]):
"""Output Parser for Vector SQL
1. finds for `NeuralArray()` and replace it with the embedding
2. finds for `DISTANCE()` and replace it with the distance name in backend SQL
"""
model: Embeddings
"""Embedding model to extract embedding for entity"""
distance_func_name: str = "distance"
"""Distance name for Vector SQL"""
class Config:
arbitrary_types_allowed = 1
@property
def _type(self) -> str:
return "vector_sql_parser"
@classmethod
def from_embeddings(
cls, model: Embeddings, distance_func_name: str = "distance", **kwargs: Any
) -> BaseOutputParser:
return cls(model=model, distance_func_name=distance_func_name, **kwargs)
def parse(self, text: str) -> str:
text = text.strip()
start = text.find("NeuralArray(")
_sql_str_compl = text
if start > 0:
_matched = text[text.find("NeuralArray(") + len("NeuralArray(") :]
end = _matched.find(")") + start + len("NeuralArray(") + 1
entity = _matched[: _matched.find(")")]
vecs = self.model.embed_query(entity)
vecs_str = "[" + ",".join(map(str, vecs)) + "]"
_sql_str_compl = text.replace("DISTANCE", self.distance_func_name).replace(
text[start:end], vecs_str
)
if _sql_str_compl[-1] == ";":
_sql_str_compl = _sql_str_compl[:-1]
return _sql_str_compl
class VectorSQLRetrieveAllOutputParser(VectorSQLOutputParser):
"""Based on VectorSQLOutputParser
It also modify the SQL to get all columns
"""
@property
def _type(self) -> str:
return "vector_sql_retrieve_all_parser"
def parse(self, text: str) -> str:
text = text.strip()
start = text.upper().find("SELECT")
if start >= 0:
end = text.upper().find("FROM")
text = text.replace(text[start + len("SELECT") + 1 : end - 1], "*")
return super().parse(text)
def _try_eval(x: Any) -> Any:
try:
return eval(x)
except Exception:
return x
def get_result_from_sqldb(
db: SQLDatabase, cmd: str
) -> Union[str, List[Dict[str, Any]], Dict[str, Any]]:
result = db._execute(cmd, fetch="all") # type: ignore
if isinstance(result, list):
return [{k: _try_eval(v) for k, v in dict(d._asdict()).items()} for d in result]
else:
return {
k: _try_eval(v) for k, v in dict(result._asdict()).items() # type: ignore
}
class VectorSQLDatabaseChain(SQLDatabaseChain):
"""Chain for interacting with Vector SQL Database.
Example:
.. code-block:: python
from langchain_experimental.sql import SQLDatabaseChain
from langchain import OpenAI, SQLDatabase, OpenAIEmbeddings
db = SQLDatabase(...)
db_chain = VectorSQLDatabaseChain.from_llm(OpenAI(), db, OpenAIEmbeddings())
*Security note*: Make sure that the database connection uses credentials
that are narrowly-scoped to only include the permissions this chain needs.
Failure to do so may result in data corruption or loss, since this chain may
attempt commands like `DROP TABLE` or `INSERT` if appropriately prompted.
The best way to guard against such negative outcomes is to (as appropriate)
limit the permissions granted to the credentials used with this chain.
This issue shows an example negative outcome if these steps are not taken:
https://github.com/langchain-ai/langchain/issues/5923
"""
sql_cmd_parser: VectorSQLOutputParser
"""Parser for Vector SQL"""
native_format: bool = False
"""If return_direct, controls whether to return in python native format"""
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, Any]:
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
input_text = f"{inputs[self.input_key]}\nSQLQuery:"
_run_manager.on_text(input_text, verbose=self.verbose)
# If not present, then defaults to None which is all tables.
table_names_to_use = inputs.get("table_names_to_use")
table_info = self.database.get_table_info(table_names=table_names_to_use)
llm_inputs = {
"input": input_text,
"top_k": str(self.top_k),
"dialect": self.database.dialect,
"table_info": table_info,
"stop": ["\nSQLResult:"],
}
intermediate_steps: List = []
try:
intermediate_steps.append(llm_inputs) # input: sql generation
llm_out = self.llm_chain.predict(
callbacks=_run_manager.get_child(),
**llm_inputs,
)
sql_cmd = self.sql_cmd_parser.parse(llm_out)
if self.return_sql:
return {self.output_key: sql_cmd}
if not self.use_query_checker:
_run_manager.on_text(llm_out, color="green", verbose=self.verbose)
intermediate_steps.append(
llm_out
) # output: sql generation (no checker)
intermediate_steps.append({"sql_cmd": llm_out}) # input: sql exec
result = get_result_from_sqldb(self.database, sql_cmd)
intermediate_steps.append(str(result)) # output: sql exec
else:
query_checker_prompt = self.query_checker_prompt or PromptTemplate(
template=QUERY_CHECKER, input_variables=["query", "dialect"]
)
query_checker_chain = LLMChain(
llm=self.llm_chain.llm,
prompt=query_checker_prompt,
output_parser=self.llm_chain.output_parser,
)
query_checker_inputs = {
"query": llm_out,
"dialect": self.database.dialect,
}
checked_llm_out = query_checker_chain.predict(
callbacks=_run_manager.get_child(), **query_checker_inputs
)
checked_sql_command = self.sql_cmd_parser.parse(checked_llm_out)
intermediate_steps.append(
checked_llm_out
) # output: sql generation (checker)
_run_manager.on_text(
checked_llm_out, color="green", verbose=self.verbose
)
intermediate_steps.append(
{"sql_cmd": checked_llm_out}
) # input: sql exec
result = get_result_from_sqldb(self.database, checked_sql_command)
intermediate_steps.append(str(result)) # output: sql exec
llm_out = checked_llm_out
sql_cmd = checked_sql_command
_run_manager.on_text("\nSQLResult: ", verbose=self.verbose)
_run_manager.on_text(str(result), color="yellow", verbose=self.verbose)
# If return direct, we just set the final result equal to
# the result of the sql query result, otherwise try to get a human readable
# final answer
if self.return_direct:
final_result = result
else:
_run_manager.on_text("\nAnswer:", verbose=self.verbose)
input_text += f"{llm_out}\nSQLResult: {result}\nAnswer:"
llm_inputs["input"] = input_text
intermediate_steps.append(llm_inputs) # input: final answer
final_result = self.llm_chain.predict(
callbacks=_run_manager.get_child(),
**llm_inputs,
).strip()
intermediate_steps.append(final_result) # output: final answer
_run_manager.on_text(final_result, color="green", verbose=self.verbose)
chain_result: Dict[str, Any] = {self.output_key: final_result}
if self.return_intermediate_steps:
chain_result[INTERMEDIATE_STEPS_KEY] = intermediate_steps
return chain_result
except Exception as exc:
# Append intermediate steps to exception, to aid in logging and later
# improvement of few shot prompt seeds
exc.intermediate_steps = intermediate_steps # type: ignore
raise exc
@property
def _chain_type(self) -> str:
return "vector_sql_database_chain"
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
db: SQLDatabase,
prompt: Optional[BasePromptTemplate] = None,
sql_cmd_parser: Optional[VectorSQLOutputParser] = None,
**kwargs: Any,
) -> VectorSQLDatabaseChain:
assert sql_cmd_parser, "`sql_cmd_parser` must be set in VectorSQLDatabaseChain."
prompt = prompt or SQL_PROMPTS.get(db.dialect, PROMPT)
llm_chain = LLMChain(llm=llm, prompt=prompt)
return cls(
llm_chain=llm_chain, database=db, sql_cmd_parser=sql_cmd_parser, **kwargs
)

View File

@ -1245,6 +1245,7 @@ optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, !=3.6.*" python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, !=3.6.*"
files = [ files = [
{file = "jsonpointer-2.4-py2.py3-none-any.whl", hash = "sha256:15d51bba20eea3165644553647711d150376234112651b4f1811022aecad7d7a"}, {file = "jsonpointer-2.4-py2.py3-none-any.whl", hash = "sha256:15d51bba20eea3165644553647711d150376234112651b4f1811022aecad7d7a"},
{file = "jsonpointer-2.4.tar.gz", hash = "sha256:585cee82b70211fa9e6043b7bb89db6e1aa49524340dde8ad6b63206ea689d88"},
] ]
[[package]] [[package]]
@ -3752,6 +3753,31 @@ files = [
{file = "types_PyYAML-6.0.12.11-py3-none-any.whl", hash = "sha256:a461508f3096d1d5810ec5ab95d7eeecb651f3a15b71959999988942063bf01d"}, {file = "types_PyYAML-6.0.12.11-py3-none-any.whl", hash = "sha256:a461508f3096d1d5810ec5ab95d7eeecb651f3a15b71959999988942063bf01d"},
] ]
[[package]]
name = "types-requests"
version = "2.31.0.2"
description = "Typing stubs for requests"
optional = false
python-versions = "*"
files = [
{file = "types-requests-2.31.0.2.tar.gz", hash = "sha256:6aa3f7faf0ea52d728bb18c0a0d1522d9bfd8c72d26ff6f61bfc3d06a411cf40"},
{file = "types_requests-2.31.0.2-py3-none-any.whl", hash = "sha256:56d181c85b5925cbc59f4489a57e72a8b2166f18273fd8ba7b6fe0c0b986f12a"},
]
[package.dependencies]
types-urllib3 = "*"
[[package]]
name = "types-urllib3"
version = "1.26.25.14"
description = "Typing stubs for urllib3"
optional = false
python-versions = "*"
files = [
{file = "types-urllib3-1.26.25.14.tar.gz", hash = "sha256:229b7f577c951b8c1b92c1bc2b2fdb0b49847bd2af6d1cc2a2e3dd340f3bda8f"},
{file = "types_urllib3-1.26.25.14-py3-none-any.whl", hash = "sha256:9683bbb7fb72e32bfe9d2be6e04875fbe1b3eeec3cbb4ea231435aa7fd6b4f0e"},
]
[[package]] [[package]]
name = "typing-extensions" name = "typing-extensions"
version = "4.7.1" version = "4.7.1"
@ -3995,4 +4021,4 @@ extended-testing = ["faker", "presidio-analyzer", "presidio-anonymizer"]
[metadata] [metadata]
lock-version = "2.0" lock-version = "2.0"
python-versions = ">=3.8.1,<4.0" python-versions = ">=3.8.1,<4.0"
content-hash = "66ac482bd05eb74414210ac28fc1e8dae1a9928a4a1314e1326fada3551aa8ad" content-hash = "443e88f690572715cf58671e4480a006574c7141a1258dff0a0818b954184901"

View File

@ -1,6 +1,6 @@
[tool.poetry] [tool.poetry]
name = "langchain-experimental" name = "langchain-experimental"
version = "0.0.14" version = "0.0.16"
description = "Building applications with LLMs through composability" description = "Building applications with LLMs through composability"
authors = [] authors = []
license = "MIT" license = "MIT"
@ -23,6 +23,7 @@ black = "^23.1.0"
[tool.poetry.group.typing.dependencies] [tool.poetry.group.typing.dependencies]
mypy = "^0.991" mypy = "^0.991"
types-pyyaml = "^6.0.12.2" types-pyyaml = "^6.0.12.2"
types-requests = "^2.28.11.5"
[tool.poetry.group.dev.dependencies] [tool.poetry.group.dev.dependencies]
jupyter = "^1.0.0" jupyter = "^1.0.0"

View File

@ -0,0 +1,154 @@
import os
from typing import Iterator, List
import pytest
@pytest.fixture(scope="module", autouse=True)
def check_spacy_model() -> Iterator[None]:
import spacy
if not spacy.util.is_package("en_core_web_lg"):
pytest.skip(reason="Spacy model 'en_core_web_lg' not installed")
yield
@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker")
@pytest.mark.parametrize(
"analyzed_fields,should_contain",
[(["PERSON"], False), (["PHONE_NUMBER"], True), (None, False)],
)
def test_anonymize(analyzed_fields: List[str], should_contain: bool) -> None:
"""Test anonymizing a name in a simple sentence"""
from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer
text = "Hello, my name is John Doe."
anonymizer = PresidioReversibleAnonymizer(analyzed_fields=analyzed_fields)
anonymized_text = anonymizer.anonymize(text)
assert ("John Doe" in anonymized_text) == should_contain
@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker")
def test_anonymize_multiple() -> None:
"""Test anonymizing multiple items in a sentence"""
from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer
text = "John Smith's phone number is 313-666-7440 and email is johnsmith@gmail.com"
anonymizer = PresidioReversibleAnonymizer()
anonymized_text = anonymizer.anonymize(text)
for phrase in ["John Smith", "313-666-7440", "johnsmith@gmail.com"]:
assert phrase not in anonymized_text
@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker")
def test_anonymize_with_custom_operator() -> None:
"""Test anonymize a name with a custom operator"""
from presidio_anonymizer.entities import OperatorConfig
from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer
custom_operator = {"PERSON": OperatorConfig("replace", {"new_value": "<name>"})}
anonymizer = PresidioReversibleAnonymizer(operators=custom_operator)
text = "Jane Doe was here."
anonymized_text = anonymizer.anonymize(text)
assert anonymized_text == "<name> was here."
@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker")
def test_add_recognizer_operator() -> None:
"""
Test add recognizer and anonymize a new type of entity and with a custom operator
"""
from presidio_analyzer import PatternRecognizer
from presidio_anonymizer.entities import OperatorConfig
from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer
anonymizer = PresidioReversibleAnonymizer(analyzed_fields=[])
titles_list = ["Sir", "Madam", "Professor"]
custom_recognizer = PatternRecognizer(
supported_entity="TITLE", deny_list=titles_list
)
anonymizer.add_recognizer(custom_recognizer)
# anonymizing with custom recognizer
text = "Madam Jane Doe was here."
anonymized_text = anonymizer.anonymize(text)
assert anonymized_text == "<TITLE> Jane Doe was here."
# anonymizing with custom recognizer and operator
custom_operator = {"TITLE": OperatorConfig("replace", {"new_value": "Dear"})}
anonymizer.add_operators(custom_operator)
anonymized_text = anonymizer.anonymize(text)
assert anonymized_text == "Dear Jane Doe was here."
@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker")
def test_deanonymizer_mapping() -> None:
"""Test if deanonymizer mapping is correctly populated"""
from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer
anonymizer = PresidioReversibleAnonymizer(
analyzed_fields=["PERSON", "PHONE_NUMBER", "EMAIL_ADDRESS", "CREDIT_CARD"]
)
anonymizer.anonymize("Hello, my name is John Doe and my number is 444 555 6666.")
# ["PERSON", "PHONE_NUMBER"]
assert len(anonymizer.deanonymizer_mapping.keys()) == 2
assert "John Doe" in anonymizer.deanonymizer_mapping.get("PERSON", {}).values()
assert (
"444 555 6666"
in anonymizer.deanonymizer_mapping.get("PHONE_NUMBER", {}).values()
)
text_to_anonymize = (
"And my name is Jane Doe, my email is jane@gmail.com and "
"my credit card is 4929 5319 6292 5362."
)
anonymizer.anonymize(text_to_anonymize)
# ["PERSON", "PHONE_NUMBER", "EMAIL_ADDRESS", "CREDIT_CARD"]
assert len(anonymizer.deanonymizer_mapping.keys()) == 4
assert "Jane Doe" in anonymizer.deanonymizer_mapping.get("PERSON", {}).values()
assert (
"jane@gmail.com"
in anonymizer.deanonymizer_mapping.get("EMAIL_ADDRESS", {}).values()
)
assert (
"4929 5319 6292 5362"
in anonymizer.deanonymizer_mapping.get("CREDIT_CARD", {}).values()
)
@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker")
def test_deanonymize() -> None:
"""Test deanonymizing a name in a simple sentence"""
from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer
text = "Hello, my name is John Doe."
anonymizer = PresidioReversibleAnonymizer(analyzed_fields=["PERSON"])
anonymized_text = anonymizer.anonymize(text)
deanonymized_text = anonymizer.deanonymize(anonymized_text)
assert deanonymized_text == text
@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker")
def test_save_load_deanonymizer_mapping() -> None:
from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer
anonymizer = PresidioReversibleAnonymizer(analyzed_fields=["PERSON"])
anonymizer.anonymize("Hello, my name is John Doe.")
try:
anonymizer.save_deanonymizer_mapping("test_file.json")
assert os.path.isfile("test_file.json")
anonymizer = PresidioReversibleAnonymizer()
anonymizer.load_deanonymizer_mapping("test_file.json")
assert "John Doe" in anonymizer.deanonymizer_mapping.get("PERSON", {}).values()
finally:
os.remove("test_file.json")

View File

@ -2,29 +2,20 @@
from __future__ import annotations from __future__ import annotations
import logging import logging
from concurrent.futures import Future, ThreadPoolExecutor, wait from concurrent.futures import Future, ThreadPoolExecutor
from typing import Any, Dict, List, Optional, Sequence, Set, Union from typing import Any, Dict, List, Optional, Sequence, Set, Union
from uuid import UUID from uuid import UUID
import langsmith import langsmith
from langsmith import schemas as langsmith_schemas from langsmith import schemas as langsmith_schemas
from langchain.callbacks.manager import tracing_v2_enabled from langchain.callbacks import manager
from langchain.callbacks.tracers import langchain as langchain_tracer
from langchain.callbacks.tracers.base import BaseTracer from langchain.callbacks.tracers.base import BaseTracer
from langchain.callbacks.tracers.langchain import _get_client
from langchain.callbacks.tracers.schemas import Run from langchain.callbacks.tracers.schemas import Run
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
_TRACERS: List[EvaluatorCallbackHandler] = []
def wait_for_all_evaluators() -> None:
"""Wait for all tracers to finish."""
global _TRACERS
for tracer in _TRACERS:
tracer.wait_for_futures()
class EvaluatorCallbackHandler(BaseTracer): class EvaluatorCallbackHandler(BaseTracer):
"""A tracer that runs a run evaluator whenever a run is persisted. """A tracer that runs a run evaluator whenever a run is persisted.
@ -79,17 +70,13 @@ class EvaluatorCallbackHandler(BaseTracer):
self.example_id = ( self.example_id = (
UUID(example_id) if isinstance(example_id, str) else example_id UUID(example_id) if isinstance(example_id, str) else example_id
) )
self.client = client or _get_client() self.client = client or langchain_tracer.get_client()
self.evaluators = evaluators self.evaluators = evaluators
self.executor = ThreadPoolExecutor( self.max_workers = max_workers or len(evaluators)
max_workers=max(max_workers or len(evaluators), 1)
)
self.futures: Set[Future] = set() self.futures: Set[Future] = set()
self.skip_unfinished = skip_unfinished self.skip_unfinished = skip_unfinished
self.project_name = project_name self.project_name = project_name
self.logged_feedback: Dict[str, List[langsmith_schemas.Feedback]] = {} self.logged_feedback: Dict[str, List[langsmith_schemas.Feedback]] = {}
global _TRACERS
_TRACERS.append(self)
def _evaluate_in_project(self, run: Run, evaluator: langsmith.RunEvaluator) -> None: def _evaluate_in_project(self, run: Run, evaluator: langsmith.RunEvaluator) -> None:
"""Evaluate the run in the project. """Evaluate the run in the project.
@ -105,7 +92,7 @@ class EvaluatorCallbackHandler(BaseTracer):
try: try:
if self.project_name is None: if self.project_name is None:
feedback = self.client.evaluate_run(run, evaluator) feedback = self.client.evaluate_run(run, evaluator)
with tracing_v2_enabled( with manager.tracing_v2_enabled(
project_name=self.project_name, tags=["eval"], client=self.client project_name=self.project_name, tags=["eval"], client=self.client
): ):
feedback = self.client.evaluate_run(run, evaluator) feedback = self.client.evaluate_run(run, evaluator)
@ -133,14 +120,15 @@ class EvaluatorCallbackHandler(BaseTracer):
return return
run_ = run.copy() run_ = run.copy()
run_.reference_example_id = self.example_id run_.reference_example_id = self.example_id
for evaluator in self.evaluators: if self.max_workers > 0:
self.futures.add( with ThreadPoolExecutor(max_workers=self.max_workers) as executor:
self.executor.submit(self._evaluate_in_project, run_, evaluator) list(
executor.map(
self._evaluate_in_project,
[run_ for _ in range(len(self.evaluators))],
self.evaluators,
) )
)
def wait_for_futures(self) -> None: else:
"""Wait for all futures to complete.""" for evaluator in self.evaluators:
futures = list(self.futures) self._evaluate_in_project(run_, evaluator)
wait(futures)
for future in futures:
self.futures.remove(future)

View File

@ -42,7 +42,7 @@ def wait_for_all_tracers() -> None:
tracer.wait_for_futures() tracer.wait_for_futures()
def _get_client() -> Client: def get_client() -> Client:
"""Get the client.""" """Get the client."""
global _CLIENT global _CLIENT
if _CLIENT is None: if _CLIENT is None:
@ -83,7 +83,7 @@ class LangChainTracer(BaseTracer):
_EXECUTORS.append(self.executor) _EXECUTORS.append(self.executor)
else: else:
self.executor = None self.executor = None
self.client = client or _get_client() self.client = client or get_client()
self._futures: Set[Future] = set() self._futures: Set[Future] = set()
self.tags = tags or [] self.tags = tags or []
global _TRACERS global _TRACERS

View File

@ -1,6 +1,19 @@
"""Load chat messages from common communications platforms for finetuning. """**Chat Loaders** load chat messages from common communications platforms.
This module provides functions to load chat messages from various Load chat messages from various
communications platforms such as Facebook Messenger, Telegram, and communications platforms such as Facebook Messenger, Telegram, and
WhatsApp. The loaded chat messages can be used for finetuning models. WhatsApp. The loaded chat messages can be used for fine-tuning models.
"""
**Class hierarchy:**
.. code-block::
BaseChatLoader --> <name>ChatLoader # Examples: WhatsAppChatLoader, IMessageChatLoader
**Main helpers:**
.. code-block::
ChatSession
""" # noqa: E501

View File

@ -1,10 +1,3 @@
"""Base definitions for chat loaders.
A chat loader is a class that loads chat messages from an external
source such as a file or a database. The chat messages can then be
used for finetuning.
"""
from abc import ABC, abstractmethod from abc import ABC, abstractmethod
from typing import Iterator, List, Sequence, TypedDict from typing import Iterator, List, Sequence, TypedDict
@ -12,7 +5,7 @@ from langchain.schema.messages import BaseMessage
class ChatSession(TypedDict): class ChatSession(TypedDict):
"""A chat session represents a single """Chat Session represents a single
conversation, channel, or other group of messages.""" conversation, channel, or other group of messages."""
messages: Sequence[BaseMessage] messages: Sequence[BaseMessage]

View File

@ -10,7 +10,7 @@ logger = logging.getLogger(__file__)
class SingleFileFacebookMessengerChatLoader(BaseChatLoader): class SingleFileFacebookMessengerChatLoader(BaseChatLoader):
"""A chat loader for loading Facebook Messenger chat data from a single file. """Load `Facebook Messenger` chat data from a single file.
Args: Args:
path (Union[Path, str]): The path to the chat file. path (Union[Path, str]): The path to the chat file.
@ -45,7 +45,7 @@ class SingleFileFacebookMessengerChatLoader(BaseChatLoader):
class FolderFacebookMessengerChatLoader(BaseChatLoader): class FolderFacebookMessengerChatLoader(BaseChatLoader):
"""A chat loader for loading Facebook Messenger chat data from a folder. """Load `Facebook Messenger` chat data from a folder.
Args: Args:
path (Union[str, Path]): The path to the directory path (Union[str, Path]): The path to the directory

View File

@ -62,7 +62,7 @@ def _get_message_data(service: Any, message: Any) -> ChatSession:
class GMailLoader(BaseChatLoader): class GMailLoader(BaseChatLoader):
"""This loader goes over how to load data from GMail. """Load data from `GMail`.
There are many ways you could want to load data from GMail. There are many ways you could want to load data from GMail.
This loader is currently fairly opinionated in how to do so. This loader is currently fairly opinionated in how to do so.

View File

@ -1,14 +1,3 @@
"""IMessage Chat Loader.
This class is used to load chat sessions from the iMessage chat.db SQLite file.
It only works on macOS when you have iMessage enabled and have the chat.db file.
The chat.db file is likely located at ~/Library/Messages/chat.db. However, your
terminal may not have permission to access this file. To resolve this, you can
copy the file to a different location, change the permissions of the file, or
grant full disk access for your terminal emulator in System Settings > Security
and Privacy > Full Disk Access.
"""
from __future__ import annotations from __future__ import annotations
from pathlib import Path from pathlib import Path
@ -22,6 +11,17 @@ if TYPE_CHECKING:
class IMessageChatLoader(chat_loaders.BaseChatLoader): class IMessageChatLoader(chat_loaders.BaseChatLoader):
"""Load chat sessions from the `iMessage` chat.db SQLite file.
It only works on macOS when you have iMessage enabled and have the chat.db file.
The chat.db file is likely located at ~/Library/Messages/chat.db. However, your
terminal may not have permission to access this file. To resolve this, you can
copy the file to a different location, change the permissions of the file, or
grant full disk access for your terminal emulator in System Settings > Security
and Privacy > Full Disk Access.
"""
def __init__(self, path: Optional[Union[str, Path]] = None): def __init__(self, path: Optional[Union[str, Path]] = None):
""" """
Initialize the IMessageChatLoader. Initialize the IMessageChatLoader.

View File

@ -12,6 +12,8 @@ logger = logging.getLogger(__name__)
class SlackChatLoader(chat_loaders.BaseChatLoader): class SlackChatLoader(chat_loaders.BaseChatLoader):
"""Load `Slack` conversations from a dump zip file."""
def __init__( def __init__(
self, self,
path: Union[str, Path], path: Union[str, Path],

View File

@ -1,6 +1,7 @@
import json import json
import logging import logging
import os import os
import tempfile
import zipfile import zipfile
from pathlib import Path from pathlib import Path
from typing import Iterator, List, Union from typing import Iterator, List, Union
@ -12,8 +13,7 @@ logger = logging.getLogger(__name__)
class TelegramChatLoader(chat_loaders.BaseChatLoader): class TelegramChatLoader(chat_loaders.BaseChatLoader):
"""A loading utility for converting telegram conversations """Load `telegram` conversations to LangChain chat messages.
to LangChain chat messages.
To export, use the Telegram Desktop app from To export, use the Telegram Desktop app from
https://desktop.telegram.org/, select a conversation, click the three dots https://desktop.telegram.org/, select a conversation, click the three dots
@ -136,7 +136,8 @@ class TelegramChatLoader(chat_loaders.BaseChatLoader):
with zipfile.ZipFile(path) as zip_file: with zipfile.ZipFile(path) as zip_file:
for file in zip_file.namelist(): for file in zip_file.namelist():
if file.endswith((".html", ".json")): if file.endswith((".html", ".json")):
yield zip_file.extract(file) with tempfile.TemporaryDirectory() as temp_dir:
yield zip_file.extract(file, path=temp_dir)
def lazy_load(self) -> Iterator[chat_loaders.ChatSession]: def lazy_load(self) -> Iterator[chat_loaders.ChatSession]:
"""Lazy load the messages from the chat file and yield them """Lazy load the messages from the chat file and yield them

View File

@ -12,6 +12,8 @@ logger = logging.getLogger(__name__)
class WhatsAppChatLoader(chat_loaders.BaseChatLoader): class WhatsAppChatLoader(chat_loaders.BaseChatLoader):
"""Load `WhatsApp` conversations from a dump zip file or directory."""
def __init__(self, path: str): def __init__(self, path: str):
"""Initialize the WhatsAppChatLoader. """Initialize the WhatsAppChatLoader.

View File

@ -8,7 +8,9 @@ from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader from langchain.document_loaders.base import BaseLoader
if TYPE_CHECKING: if TYPE_CHECKING:
from playwright.async_api import AsyncBrowser, AsyncPage, AsyncResponse from playwright.async_api import Browser as AsyncBrowser
from playwright.async_api import Page as AsyncPage
from playwright.async_api import Response as AsyncResponse
from playwright.sync_api import Browser, Page, Response from playwright.sync_api import Browser, Page, Response
@ -155,6 +157,9 @@ class PlaywrightURLLoader(BaseLoader):
try: try:
page = browser.new_page() page = browser.new_page()
response = page.goto(url) response = page.goto(url)
if response is None:
raise ValueError(f"page.goto() returned None for url {url}")
text = self.evaluator.evaluate(page, browser, response) text = self.evaluator.evaluate(page, browser, response)
metadata = {"source": url} metadata = {"source": url}
docs.append(Document(page_content=text, metadata=metadata)) docs.append(Document(page_content=text, metadata=metadata))
@ -185,6 +190,9 @@ class PlaywrightURLLoader(BaseLoader):
try: try:
page = await browser.new_page() page = await browser.new_page()
response = await page.goto(url) response = await page.goto(url)
if response is None:
raise ValueError(f"page.goto() returned None for url {url}")
text = await self.evaluator.evaluate_async(page, browser, response) text = await self.evaluator.evaluate_async(page, browser, response)
metadata = {"source": url} metadata = {"source": url}
docs.append(Document(page_content=text, metadata=metadata)) docs.append(Document(page_content=text, metadata=metadata))

Some files were not shown because too many files have changed in this diff Show More