docs: integration package pip installs (#15762)

More than 300 files - will fail check_diff. Will merge after Vercel
deploy succeeds

Still occurrences that need changing - will update more later
pull/15773/head
Erick Friis 6 months ago committed by GitHub
parent 1b0db82dbe
commit 7bc100fd43
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -10,6 +10,16 @@
"Example of how to use LCEL to write Python code."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "0653c7c7",
"metadata": {},
"outputs": [],
"source": [
"%pip install --upgrade --quiet langchain-core langchain-experimental langchain-openai"
]
},
{
"cell_type": "code",
"execution_count": 1,
@ -17,10 +27,10 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.prompts import (\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.prompts import (\n",
" ChatPromptTemplate,\n",
")\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_experimental.utilities import PythonREPL\n",
"from langchain_openai import ChatOpenAI"
]

@ -12,6 +12,16 @@
"One especially useful technique is to use embeddings to route a query to the most relevant prompt. Here's a very simple example."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "b793a0aa",
"metadata": {},
"outputs": [],
"source": [
"%pip install --upgrade --quiet langchain-core langchain langchain-openai"
]
},
{
"cell_type": "code",
"execution_count": 1,
@ -19,9 +29,9 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.prompts import PromptTemplate\n",
"from langchain.utils.math import cosine_similarity\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.prompts import PromptTemplate\n",
"from langchain_core.runnables import RunnableLambda, RunnablePassthrough\n",
"from langchain_openai import ChatOpenAI, OpenAIEmbeddings\n",
"\n",

@ -10,6 +10,16 @@
"This shows how to add memory to an arbitrary chain. Right now, you can use the memory classes but need to hook it up manually"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "18753dee",
"metadata": {},
"outputs": [],
"source": [
"%pip install --upgrade --quiet langchain langchain-openai"
]
},
{
"cell_type": "code",
"execution_count": 1,

@ -10,6 +10,16 @@
"This shows how to add in moderation (or other safeguards) around your LLM application."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "6acf3505",
"metadata": {},
"outputs": [],
"source": [
"%pip install --upgrade --quiet langchain langchain-openai"
]
},
{
"cell_type": "code",
"execution_count": 20,

@ -19,6 +19,14 @@
"Runnables can easily be used to string together multiple Chains"
]
},
{
"cell_type": "raw",
"id": "0f316b5c",
"metadata": {},
"source": [
"%pip install --upgrade --quiet langchain langchain-openai"
]
},
{
"cell_type": "code",
"execution_count": 4,

@ -35,6 +35,14 @@
"Note, you can mix and match PromptTemplate/ChatPromptTemplates and LLMs/ChatModels as you like here."
]
},
{
"cell_type": "raw",
"id": "ef79a54b",
"metadata": {},
"source": [
"%pip install --upgrade --quiet langchain langchain-openai"
]
},
{
"cell_type": "code",
"execution_count": 1,

@ -12,6 +12,16 @@
"With LCEL, it's easy to add custom functionality for managing the size of prompts within your chain or agent. Let's look at simple agent example that can search Wikipedia for information."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "1846587d",
"metadata": {},
"outputs": [],
"source": [
"%pip install --upgrade --quiet langchain langchain-openai wikipedia"
]
},
{
"cell_type": "code",
"execution_count": 1,
@ -19,8 +29,6 @@
"metadata": {},
"outputs": [],
"source": [
"# !pip install langchain wikipedia\n",
"\n",
"from operator import itemgetter\n",
"\n",
"from langchain.agents import AgentExecutor, load_tools\n",

@ -26,7 +26,7 @@
"metadata": {},
"outputs": [],
"source": [
"!pip install langchain openai faiss-cpu tiktoken"
"%pip install --upgrade --quiet langchain langchain-openai faiss-cpu tiktoken"
]
},
{

@ -19,6 +19,14 @@
"We can replicate our SQLDatabaseChain with Runnables."
]
},
{
"cell_type": "raw",
"id": "b3121aa8",
"metadata": {},
"source": [
"%pip install --upgrade --quiet langchain langchain-openai"
]
},
{
"cell_type": "code",
"execution_count": 1,

@ -17,7 +17,7 @@
"metadata": {},
"outputs": [],
"source": [
"!pip install duckduckgo-search"
"%pip install --upgrade --quiet langchain langchain-openai duckduckgo-search"
]
},
{

@ -30,6 +30,14 @@
"The most basic and common use case is chaining a prompt template and a model together. To see how this works, let's create a chain that takes a topic and generates a joke:"
]
},
{
"cell_type": "raw",
"id": "278b0027",
"metadata": {},
"source": [
"%pip install --upgrade --quiet langchain-core langchain-community langchain-openai"
]
},
{
"cell_type": "code",
"execution_count": 1,
@ -486,7 +494,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.1"
"version": "3.11.4"
}
},
"nbformat": 4,

@ -12,6 +12,16 @@
"Suppose we have a simple prompt + model sequence:"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "c5dad8b5",
"metadata": {},
"outputs": [],
"source": [
"%pip install --upgrade --quiet langchain langchain-openai"
]
},
{
"cell_type": "code",
"execution_count": 1,

@ -34,6 +34,16 @@
"With LLMs we can configure things like temperature"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "40ed76a2",
"metadata": {},
"outputs": [],
"source": [
"%pip install --upgrade --quiet langchain langchain-openai"
]
},
{
"cell_type": "code",
"execution_count": 35,

@ -16,6 +16,16 @@
"Let's take a look at this in action!"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "23b2b564",
"metadata": {},
"outputs": [],
"source": [
"%pip install --upgrade --quiet langchain langchain-openai"
]
},
{
"cell_type": "code",
"execution_count": 16,

@ -24,6 +24,16 @@
"IMPORTANT: By default, a lot of the LLM wrappers catch errors and retry. You will most likely want to turn those off when working with fallbacks. Otherwise the first wrapper will keep on retrying and not failing."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "ebb61b1f",
"metadata": {},
"outputs": [],
"source": [
"%pip install --upgrade --quiet langchain langchain-openai"
]
},
{
"cell_type": "code",
"execution_count": 1,

@ -24,6 +24,14 @@
"Note that all inputs to these functions need to be a SINGLE argument. If you have a function that accepts multiple arguments, you should write a wrapper that accepts a single input and unpacks it into multiple argument."
]
},
{
"cell_type": "raw",
"id": "9a5fe916",
"metadata": {},
"source": [
"%pip install --upgrade --quiet langchain langchain-openai"
]
},
{
"cell_type": "code",
"execution_count": 1,

@ -24,6 +24,15 @@
"## Sync version"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"%pip install --upgrade --quiet langchain langchain-openai"
]
},
{
"cell_type": "code",
"execution_count": 1,

@ -15,11 +15,11 @@
{
"cell_type": "code",
"execution_count": null,
"id": "8bc5d235",
"id": "d816e954",
"metadata": {},
"outputs": [],
"source": [
"!pip install langchain openai faiss-cpu tiktoken"
"%pip install --upgrade --quiet langchain langchain-openai faiss-cpu tiktoken"
]
},
{
@ -29,8 +29,6 @@
"metadata": {},
"outputs": [],
"source": [
"from operator import itemgetter\n",
"\n",
"from langchain.prompts import ChatPromptTemplate\n",
"from langchain.vectorstores import FAISS\n",
"from langchain_core.output_parsers import StrOutputParser\n",

@ -1,7 +1,7 @@
{
"cells": [
{
"cell_type": "markdown",
"cell_type": "raw",
"id": "e2596041-9b76-4e74-836f-e6235086bbf0",
"metadata": {},
"source": [
@ -26,6 +26,16 @@
"\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "2627ffd7",
"metadata": {},
"outputs": [],
"source": [
"%pip install --upgrade --quiet langchain langchain-openai"
]
},
{
"cell_type": "code",
"execution_count": 3,

@ -41,7 +41,7 @@
"metadata": {},
"outputs": [],
"source": [
"!pip install -U langchain redis anthropic"
"%pip install --upgrade --quiet langchain redis anthropic"
]
},
{

@ -28,6 +28,16 @@
"See the example below:"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "e169b952",
"metadata": {},
"outputs": [],
"source": [
"%pip install --upgrade --quiet langchain langchain-openai"
]
},
{
"cell_type": "code",
"execution_count": 11,

@ -50,6 +50,14 @@
"Let's take a look at these methods. To do so, we'll create a super simple PromptTemplate + ChatModel chain."
]
},
{
"cell_type": "raw",
"id": "57768739",
"metadata": {},
"source": [
"%pip install --upgrade --quiet langchain-core langchain-community langchain-openai"
]
},
{
"cell_type": "code",
"execution_count": 1,

@ -35,6 +35,14 @@
"To better understand the value of LCEL, it's helpful to see it in action and think about how we might recreate similar functionality without it. In this walkthrough we'll do just that with our [basic example](/docs/expression_language/get_started#basic_example) from the get started section. We'll take our simple prompt + model chain, which under the hood already defines a lot of functionality, and see what it would take to recreate all of it."
]
},
{
"cell_type": "raw",
"id": "b99b47ec",
"metadata": {},
"source": [
"%pip install --upgrade --quiet langchain-core langchain-openai"
]
},
{
"cell_type": "code",
"execution_count": null,

@ -73,7 +73,7 @@ For this getting started guide, we will provide two options: using OpenAI (a pop
First we'll need to import the LangChain x OpenAI integration package.
```shell
pip install langchain_openai
pip install langchain-openai
```
Accessing the API requires an API key, which you can get by creating an account and heading [here](https://platform.openai.com/account/api-keys). Once we have a key we'll want to set it as an environment variable by running:

@ -104,7 +104,7 @@
},
"outputs": [],
"source": [
"# %pip install anthropic\n",
"%pip install --upgrade --quiet anthropic\n",
"# %env ANTHROPIC_API_KEY=YOUR_API_KEY"
]
},

@ -23,6 +23,15 @@
"In this example, you will use gpt-4 to select which output is preferred."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"%pip install --upgrade --quiet langchain langchain-openai"
]
},
{
"cell_type": "code",
"execution_count": 1,

@ -318,7 +318,7 @@
},
"outputs": [],
"source": [
"# %pip install ChatAnthropic\n",
"%pip install --upgrade --quiet anthropic\n",
"# %env ANTHROPIC_API_KEY=<API_KEY>"
]
},
@ -464,4 +464,4 @@
},
"nbformat": 4,
"nbformat_minor": 5
}
}

@ -23,7 +23,7 @@
},
"outputs": [],
"source": [
"# %pip install evaluate > /dev/null"
"%pip install --upgrade --quiet evaluate > /dev/null"
]
},
{

@ -18,6 +18,15 @@
"Below is an example demonstrating the usage of `LabeledScoreStringEvalChain` using the default prompt:\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"%pip install --upgrade --quiet langchain langchain-openai"
]
},
{
"cell_type": "code",
"execution_count": 10,

@ -29,7 +29,7 @@
},
"outputs": [],
"source": [
"# %pip install rapidfuzz"
"%pip install --upgrade --quiet rapidfuzz"
]
},
{

@ -14,6 +14,16 @@
"In this example, you will make a simple trajectory evaluator that uses an LLM to determine if any actions were unnecessary."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "3c96b340",
"metadata": {},
"outputs": [],
"source": [
"%pip install --upgrade --quiet langchain langchain-openai"
]
},
{
"cell_type": "code",
"execution_count": 1,
@ -140,4 +150,4 @@
},
"nbformat": 4,
"nbformat_minor": 5
}
}

@ -17,6 +17,16 @@
"For more information, check out the reference docs for the [TrajectoryEvalChain](https://api.python.langchain.com/en/latest/evaluation/langchain.evaluation.agents.trajectory_eval_chain.TrajectoryEvalChain.html#langchain.evaluation.agents.trajectory_eval_chain.TrajectoryEvalChain) for more info."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f4d22262",
"metadata": {},
"outputs": [],
"source": [
"%pip install --upgrade --quiet langchain langchain-openai"
]
},
{
"cell_type": "code",
"execution_count": 1,
@ -177,7 +187,7 @@
},
"outputs": [],
"source": [
"# %pip install anthropic\n",
"%pip install --upgrade --quiet anthropic\n",
"# ANTHROPIC_API_KEY=<YOUR ANTHROPIC API KEY>"
]
},
@ -300,4 +310,4 @@
},
"nbformat": 4,
"nbformat_minor": 5
}
}

@ -26,6 +26,16 @@
"IMPORTANT: By default, a lot of the LLM wrappers catch errors and retry. You will most likely want to turn those off when working with fallbacks. Otherwise the first wrapper will keep on retrying and not failing."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "3a449a2e",
"metadata": {},
"outputs": [],
"source": [
"%pip install --upgrade --quiet langchain langchain-openai"
]
},
{
"cell_type": "code",
"execution_count": 1,

@ -277,7 +277,7 @@
"source": [
"%env CMAKE_ARGS=\"-DLLAMA_METAL=on\"\n",
"%env FORCE_CMAKE=1\n",
"%pip install -U llama-cpp-python --no-cache-dirclear"
"%pip install --upgrade --quiet llama-cpp-python --no-cache-dirclear"
]
},
{

@ -12,6 +12,16 @@
"LangChain provides the concept of a ModelLaboratory to test out and try different models."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "12ebae56",
"metadata": {},
"outputs": [],
"source": [
"%pip install --upgrade --quiet langchain langchain-openai"
]
},
{
"cell_type": "code",
"execution_count": 1,

@ -28,15 +28,23 @@
"Below you will find the use case on how to leverage anonymization in LangChain."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"%pip install --upgrade --quiet langchain langchain-openai langchain-experimental presidio-analyzer presidio-anonymizer spacy Faker"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"# Install necessary packages\n",
"# ! pip install langchain langchain-experimental openai presidio-analyzer presidio-anonymizer spacy Faker\n",
"# ! python -m spacy download en_core_web_lg"
"# Download model\n",
"!python -m spacy download en_core_web_lg"
]
},
{

@ -41,15 +41,21 @@
"\n"
]
},
{
"cell_type": "raw",
"metadata": {},
"source": [
"%pip install --upgrade --quiet langchain langchain-openai langchain-experimental presidio-analyzer presidio-anonymizer spacy Faker"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"# Install necessary packages\n",
"# ! pip install langchain langchain-experimental openai presidio-analyzer presidio-anonymizer spacy Faker\n",
"# ! python -m spacy download en_core_web_lg"
"# Download model\n",
"!python -m spacy download en_core_web_lg"
]
},
{
@ -239,7 +245,7 @@
"outputs": [],
"source": [
"# Install necessary packages\n",
"# ! pip install fasttext langdetect"
"%pip install --upgrade --quiet fasttext langdetect"
]
},
{

@ -31,15 +31,21 @@
"### Iterative process of upgrading the anonymizer"
]
},
{
"cell_type": "raw",
"metadata": {},
"source": [
"%pip install --upgrade --quiet langchain langchain-experimental langchain-openai presidio-analyzer presidio-anonymizer spacy Faker faiss-cpu tiktoken"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"# Install necessary packages\n",
"# !pip install langchain langchain-experimental openai presidio-analyzer presidio-anonymizer spacy Faker faiss-cpu tiktoken\n",
"# ! python -m spacy download en_core_web_lg"
"# Download model\n",
"! python -m spacy download en_core_web_lg"
]
},
{

@ -56,7 +56,7 @@
"outputs": [],
"source": [
"# Install necessary packages\n",
"# ! pip install langchain langchain-experimental openai presidio-analyzer presidio-anonymizer spacy Faker\n",
"%pip install --upgrade --quiet langchain langchain-experimental langchain-openai presidio-analyzer presidio-anonymizer spacy Faker\n",
"# ! python -m spacy download en_core_web_lg"
]
},

@ -24,7 +24,7 @@
},
"outputs": [],
"source": [
"%pip install boto3 nltk"
"%pip install --upgrade --quiet boto3 nltk"
]
},
{
@ -37,7 +37,7 @@
},
"outputs": [],
"source": [
"%pip install -U langchain_experimental"
"%pip install --upgrade --quiet langchain_experimental"
]
},
{
@ -50,7 +50,7 @@
},
"outputs": [],
"source": [
"%pip install -U langchain pydantic"
"%pip install --upgrade --quiet langchain pydantic"
]
},
{
@ -527,7 +527,7 @@
},
"outputs": [],
"source": [
"%pip install huggingface_hub"
"%pip install --upgrade --quiet huggingface_hub"
]
},
{

@ -31,7 +31,7 @@
"metadata": {},
"outputs": [],
"source": [
"!pip install \"optimum[onnxruntime]\""
"%pip install --upgrade --quiet \"optimum[onnxruntime]\" langchain transformers langchain-experimental langchain-openai"
]
},
{

@ -43,8 +43,7 @@
"metadata": {},
"outputs": [],
"source": [
"!pip install argilla --upgrade\n",
"!pip install openai"
"%pip install --upgrade --quiet langchain langchain-openai argilla"
]
},
{

@ -42,7 +42,7 @@
"metadata": {},
"outputs": [],
"source": [
"!pip install deepeval --upgrade"
"%pip install --upgrade --quiet langchain langchain-openai deepeval"
]
},
{

@ -36,7 +36,7 @@
"metadata": {},
"outputs": [],
"source": [
"!pip install context-python --upgrade"
"%pip install --upgrade --quiet langchain langchain-openai context-python"
]
},
{

@ -34,9 +34,9 @@
"outputs": [],
"source": [
"# Install necessary dependencies.\n",
"!pip install -q infinopy\n",
"!pip install -q matplotlib\n",
"!pip install -q tiktoken"
"%pip install --upgrade --quiet infinopy\n",
"%pip install --upgrade --quiet matplotlib\n",
"%pip install --upgrade --quiet tiktoken"
]
},
{

@ -56,7 +56,7 @@
},
"outputs": [],
"source": [
"!pip install -U label-studio label-studio-sdk openai"
"%pip install --upgrade --quiet langchain label-studio label-studio-sdk langchain-openai"
]
},
{

@ -32,7 +32,7 @@
"metadata": {},
"outputs": [],
"source": [
"!pip install promptlayer --upgrade"
"%pip install --upgrade --quiet promptlayer --upgrade"
]
},
{

@ -38,9 +38,9 @@
},
"outputs": [],
"source": [
"!pip install sagemaker\n",
"!pip install openai\n",
"!pip install google-search-results"
"%pip install --upgrade --quiet sagemaker\n",
"%pip install --upgrade --quiet langchain-openai\n",
"%pip install --upgrade --quiet google-search-results"
]
},
{

@ -35,7 +35,7 @@
"metadata": {},
"outputs": [],
"source": [
"!pip install trubrics"
"%pip install --upgrade --quiet trubrics"
]
},
{

@ -129,7 +129,7 @@
"metadata": {},
"outputs": [],
"source": [
"!pip install langchain-anthropic"
"%pip install --upgrade --quiet langchain-anthropic"
]
},
{

@ -33,7 +33,7 @@
},
"outputs": [],
"source": [
"# !pip install openai"
"%pip install --upgrade --quiet langchain-openai"
]
},
{

@ -35,7 +35,7 @@
"metadata": {},
"outputs": [],
"source": [
"%pip install boto3"
"%pip install --upgrade --quiet boto3"
]
},
{

@ -34,7 +34,7 @@
},
"outputs": [],
"source": [
"# !pip install openai"
"%pip install --upgrade --quiet langchain-openai"
]
},
{

@ -19,7 +19,7 @@
},
"outputs": [],
"source": [
"# !pip install gigachat"
"%pip install --upgrade --quiet gigachat"
]
},
{

@ -28,7 +28,7 @@
"metadata": {},
"outputs": [],
"source": [
"%pip install -U --quiet langchain-google-genai pillow"
"%pip install --upgrade --quiet langchain-google-genai pillow"
]
},
{

@ -21,7 +21,7 @@
"\n",
"By default, Google Cloud [does not use](https://cloud.google.com/vertex-ai/docs/generative-ai/data-governance#foundation_model_development) customer data to train its foundation models as part of Google Cloud`s AI/ML Privacy Commitment. More details about how Google processes data can also be found in [Google's Customer Data Processing Addendum (CDPA)](https://cloud.google.com/terms/data-processing-addendum).\n",
"\n",
"To use `Google Cloud Vertex AI` PaLM you must have the `google-cloud-aiplatform` Python package installed and either:\n",
"To use `Google Cloud Vertex AI` PaLM you must have the `langchain-google-vertexai` Python package installed and either:\n",
"- Have credentials configured for your environment (gcloud, workload identity, etc...)\n",
"- Store the path to a service account JSON file as the GOOGLE_APPLICATION_CREDENTIALS environment variable\n",
"\n",
@ -35,13 +35,24 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 3,
"metadata": {
"tags": []
},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m A new release of pip is available: \u001b[0m\u001b[31;49m23.2\u001b[0m\u001b[39;49m -> \u001b[0m\u001b[32;49m23.3.2\u001b[0m\n",
"\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m To update, run: \u001b[0m\u001b[32;49mpip install --upgrade pip\u001b[0m\n",
"Note: you may need to restart the kernel to use updated packages.\n"
]
}
],
"source": [
"!pip install -U google-cloud-aiplatform"
"%pip install --upgrade --quiet langchain-google-vertexai"
]
},
{
@ -50,8 +61,8 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain_community.chat_models import ChatVertexAI\n",
"from langchain_core.prompts import ChatPromptTemplate"
"from langchain_core.prompts import ChatPromptTemplate\n",
"from langchain_google_vertexai import ChatVertexAI"
]
},
{

@ -58,7 +58,7 @@
}
],
"source": [
"%pip install GPTRouter"
"%pip install --upgrade --quiet GPTRouter"
]
},
{

@ -33,7 +33,7 @@
}
],
"source": [
"%pip install -q text-generation transformers google-search-results numexpr langchainhub sentencepiece jinja2"
"%pip install --upgrade --quiet text-generation transformers google-search-results numexpr langchainhub sentencepiece jinja2"
]
},
{

@ -25,7 +25,7 @@
"id": "f5b652cf",
"metadata": {},
"source": [
"!pip install -U llamaapi"
"%pip install --upgrade --quiet llamaapi"
]
},
{

@ -44,7 +44,7 @@
}
],
"source": [
"%pip install -U --quiet langchain-nvidia-ai-endpoints"
"%pip install --upgrade --quiet langchain-nvidia-ai-endpoints"
]
},
{
@ -795,7 +795,7 @@
}
],
"source": [
"%pip install -U --quiet langchain"
"%pip install --upgrade --quiet langchain"
]
},
{

@ -264,7 +264,7 @@
},
"outputs": [],
"source": [
"%pip install pillow"
"%pip install --upgrade --quiet pillow"
]
},
{

@ -36,7 +36,7 @@
"outputs": [],
"source": [
"# Install the package\n",
"!pip install dashscope"
"%pip install --upgrade --quiet dashscope"
]
},
{

@ -32,7 +32,7 @@
"outputs": [],
"source": [
"# Install the package\n",
"!pip install volcengine"
"%pip install --upgrade --quiet volcengine"
]
},
{

@ -29,7 +29,7 @@
"metadata": {},
"outputs": [],
"source": [
"%pip install yandexcloud"
"%pip install --upgrade --quiet yandexcloud"
]
},
{

@ -32,7 +32,7 @@
"metadata": {},
"outputs": [],
"source": [
"# !pip install zhipuai"
"%pip install --upgrade --quiet zhipuai"
]
},
{

@ -369,7 +369,7 @@
"metadata": {},
"outputs": [],
"source": [
"# %pip install -U openai --quiet"
"%pip install --upgrade --quiet langchain-openai"
]
},
{

@ -25,7 +25,7 @@
"metadata": {},
"outputs": [],
"source": [
"!pip install --upgrade google-auth google-auth-oauthlib google-auth-httplib2 google-api-python-client"
"%pip install --upgrade --quiet google-auth google-auth-oauthlib google-auth-httplib2 google-api-python-client"
]
},
{

@ -213,7 +213,7 @@
"metadata": {},
"outputs": [],
"source": [
"# %pip install -U openai --quiet"
"%pip install --upgrade --quiet langchain-openai"
]
},
{

@ -30,7 +30,7 @@
"metadata": {},
"outputs": [],
"source": [
"%pip install -U langchain openai"
"%pip install --upgrade --quiet langchain langchain-openai"
]
},
{

@ -30,7 +30,7 @@
"metadata": {},
"outputs": [],
"source": [
"%pip install -U langchain openai"
"%pip install --upgrade --quiet langchain langchain-openai"
]
},
{

@ -41,7 +41,7 @@
"metadata": {},
"outputs": [],
"source": [
"#!pip install airbyte-cdk"
"%pip install --upgrade --quiet airbyte-cdk"
]
},
{
@ -61,7 +61,7 @@
"metadata": {},
"outputs": [],
"source": [
"#!pip install \"source_github@git+https://github.com/airbytehq/airbyte.git@master#subdirectory=airbyte-integrations/connectors/source-github\""
"%pip install --upgrade --quiet \"source_github@git+https://github.com/airbytehq/airbyte.git@master#subdirectory=airbyte-integrations/connectors/source-github\""
]
},
{

@ -47,7 +47,7 @@
"metadata": {},
"outputs": [],
"source": [
"#!pip install airbyte-source-gong"
"%pip install --upgrade --quiet airbyte-source-gong"
]
},
{

@ -47,7 +47,7 @@
"metadata": {},
"outputs": [],
"source": [
"#!pip install airbyte-source-hubspot"
"%pip install --upgrade --quiet airbyte-source-hubspot"
]
},
{

@ -47,7 +47,7 @@
"metadata": {},
"outputs": [],
"source": [
"#!pip install airbyte-source-salesforce"
"%pip install --upgrade --quiet airbyte-source-salesforce"
]
},
{

@ -47,7 +47,7 @@
"metadata": {},
"outputs": [],
"source": [
"#!pip install airbyte-source-shopify"
"%pip install --upgrade --quiet airbyte-source-shopify"
]
},
{

@ -47,7 +47,7 @@
"metadata": {},
"outputs": [],
"source": [
"#!pip install airbyte-source-stripe"
"%pip install --upgrade --quiet airbyte-source-stripe"
]
},
{

@ -47,7 +47,7 @@
"metadata": {},
"outputs": [],
"source": [
"#!pip install airbyte-source-typeform"
"%pip install --upgrade --quiet airbyte-source-typeform"
]
},
{

@ -47,7 +47,7 @@
"metadata": {},
"outputs": [],
"source": [
"#!pip install airbyte-source-zendesk-support"
"%pip install --upgrade --quiet airbyte-source-zendesk-support"
]
},
{

@ -15,7 +15,7 @@
"metadata": {},
"outputs": [],
"source": [
"! pip install pyairtable"
"%pip install --upgrade --quiet pyairtable"
]
},
{

@ -37,7 +37,7 @@
}
],
"source": [
"!pip install pyodps"
"%pip install --upgrade --quiet pyodps"
]
},
{

@ -25,7 +25,7 @@
"metadata": {},
"outputs": [],
"source": [
"#!pip install boto3 openai tiktoken python-dotenv"
"%pip install --upgrade --quiet boto3 langchain-openai tiktoken python-dotenv"
]
},
{
@ -35,7 +35,7 @@
"metadata": {},
"outputs": [],
"source": [
"#!pip install \"amazon-textract-caller>=0.2.0\""
"%pip install --upgrade --quiet \"amazon-textract-caller>=0.2.0\""
]
},
{

@ -24,7 +24,7 @@
},
"outputs": [],
"source": [
"#!pip install apify-client"
"%pip install --upgrade --quiet apify-client"
]
},
{

@ -37,7 +37,7 @@
},
"outputs": [],
"source": [
"#!pip install arxiv"
"%pip install --upgrade --quiet arxiv"
]
},
{
@ -59,7 +59,7 @@
},
"outputs": [],
"source": [
"#!pip install pymupdf"
"%pip install --upgrade --quiet pymupdf"
]
},
{

@ -35,7 +35,7 @@
"metadata": {},
"outputs": [],
"source": [
"#!pip install assemblyai"
"%pip install --upgrade --quiet assemblyai"
]
},
{

@ -23,7 +23,7 @@
"metadata": {},
"outputs": [],
"source": [
"! pip install -q playwright beautifulsoup4\n",
"%pip install --upgrade --quiet playwright beautifulsoup4\n",
"! playwright install"
]
},

@ -23,7 +23,7 @@
},
"outputs": [],
"source": [
"#!pip install boto3"
"%pip install --upgrade --quiet boto3"
]
},
{

@ -31,7 +31,7 @@
"metadata": {},
"outputs": [],
"source": [
"#!pip install boto3"
"%pip install --upgrade --quiet boto3"
]
},
{

@ -25,7 +25,7 @@
"metadata": {},
"outputs": [],
"source": [
"#!pip install azureml-fsspec, azure-ai-generative"
"%pip install --upgrade --quiet azureml-fsspec, azure-ai-generative"
]
},
{

@ -27,7 +27,7 @@
"metadata": {},
"outputs": [],
"source": [
"#!pip install azure-storage-blob"
"%pip install --upgrade --quiet azure-storage-blob"
]
},
{

@ -21,7 +21,7 @@
},
"outputs": [],
"source": [
"#!pip install azure-storage-blob"
"%pip install --upgrade --quiet azure-storage-blob"
]
},
{

@ -39,7 +39,7 @@
}
],
"source": [
"%pip install langchain langchain-community azure-ai-documentintelligence -q"
"%pip install --upgrade --quiet langchain langchain-community azure-ai-documentintelligence -q"
]
},
{

@ -32,7 +32,7 @@
},
"outputs": [],
"source": [
"#!pip install bibtexparser pymupdf"
"%pip install --upgrade --quiet bibtexparser pymupdf"
]
},
{

@ -23,7 +23,7 @@
},
"outputs": [],
"source": [
"#!pip install bilibili-api-python"
"%pip install --upgrade --quiet bilibili-api-python"
]
},
{

@ -37,7 +37,7 @@
},
"outputs": [],
"source": [
"#!pip install atlassian-python-api"
"%pip install --upgrade --quiet atlassian-python-api"
]
},
{

@ -24,7 +24,7 @@
"metadata": {},
"outputs": [],
"source": [
"#!pip install couchbase"
"%pip install --upgrade --quiet couchbase"
]
},
{

@ -27,7 +27,7 @@
"metadata": {},
"outputs": [],
"source": [
"#!pip install datadog-api-client"
"%pip install --upgrade --quiet datadog-api-client"
]
},
{

@ -162,7 +162,7 @@
"metadata": {},
"outputs": [],
"source": [
"!poetry run pip install --upgrade openai tiktoken chromadb hnswlib --quiet"
"!poetry run pip install --upgrade langchain-openai tiktoken chromadb hnswlib"
]
},
{

@ -32,7 +32,7 @@
"metadata": {},
"outputs": [],
"source": [
"!pip install -U beautifulsoup4 lxml"
"%pip install --upgrade --quiet beautifulsoup4 lxml"
]
},
{
@ -241,7 +241,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.18"
"version": "3.11.4"
}
},
"nbformat": 4,

@ -19,7 +19,7 @@
},
"outputs": [],
"source": [
"#!pip install duckdb"
"%pip install --upgrade --quiet duckdb"
]
},
{

@ -27,7 +27,7 @@
},
"outputs": [],
"source": [
"#!pip install unstructured"
"%pip install --upgrade --quiet unstructured"
]
},
{
@ -210,7 +210,7 @@
"metadata": {},
"outputs": [],
"source": [
"#!pip install extract_msg"
"%pip install --upgrade --quiet extract_msg"
]
},
{

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save