Merge branch 'master' into fix_supabase

pull/21762/head
缨缨 2 weeks ago committed by GitHub
commit 478a738504
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

@ -69,9 +69,9 @@ md-sync:
generate-references:
$(PYTHON) scripts/generate_api_reference_links.py --docs_dir $(OUTPUT_NEW_DOCS_DIR)
build: install-py-deps generate-files copy-infra render md-sync generate-references
build: install-py-deps generate-files copy-infra render md-sync
vercel-build: install-vercel-deps build
vercel-build: install-vercel-deps build generate-references
rm -rf docs
mv $(OUTPUT_NEW_DOCS_DIR) docs
rm -rf build

@ -0,0 +1,519 @@
# arXiv
LangChain implements the latest research in the field of Natural Language Processing.
This page contains `arXiv` papers referenced in the LangChain Documentation and API Reference.
## Summary
| arXiv id / Title | Authors | Published date 🔻 | LangChain Documentation and API Reference |
|------------------|---------|-------------------|-------------------------|
| `2307.03172v3` [Lost in the Middle: How Language Models Use Long Contexts](http://arxiv.org/abs/2307.03172v3) | Nelson F. Liu, Kevin Lin, John Hewitt, et al. | 2023-07-06 | `Docs:` [docs/modules/data_connection/retrievers/long_context_reorder](https://python.langchain.com/docs/modules/data_connection/retrievers/long_context_reorder)
| `2305.08291v1` [Large Language Model Guided Tree-of-Thought](http://arxiv.org/abs/2305.08291v1) | Jieyi Long | 2023-05-15 | `API:` [langchain_experimental.tot](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.tot)
| `2305.06983v2` [Active Retrieval Augmented Generation](http://arxiv.org/abs/2305.06983v2) | Zhengbao Jiang, Frank F. Xu, Luyu Gao, et al. | 2023-05-11 | `Docs:` [docs/modules/chains](https://python.langchain.com/docs/modules/chains)
| `2303.17580v4` [HuggingGPT: Solving AI Tasks with ChatGPT and its Friends in Hugging Face](http://arxiv.org/abs/2303.17580v4) | Yongliang Shen, Kaitao Song, Xu Tan, et al. | 2023-03-30 | `API:` [langchain_experimental.autonomous_agents](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.autonomous_agents)
| `2303.08774v6` [GPT-4 Technical Report](http://arxiv.org/abs/2303.08774v6) | OpenAI, Josh Achiam, Steven Adler, et al. | 2023-03-15 | `Docs:` [docs/integrations/vectorstores/mongodb_atlas](https://python.langchain.com/docs/integrations/vectorstores/mongodb_atlas)
| `2301.10226v4` [A Watermark for Large Language Models](http://arxiv.org/abs/2301.10226v4) | John Kirchenbauer, Jonas Geiping, Yuxin Wen, et al. | 2023-01-24 | `API:` [langchain_community.llms...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference), [langchain_community.llms...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_community.llms...OCIModelDeploymentTGI](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI.html#langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI)
| `2212.10496v1` [Precise Zero-Shot Dense Retrieval without Relevance Labels](http://arxiv.org/abs/2212.10496v1) | Luyu Gao, Xueguang Ma, Jimmy Lin, et al. | 2022-12-20 | `Docs:` [docs/use_cases/query_analysis/techniques/hyde](https://python.langchain.com/docs/use_cases/query_analysis/techniques/hyde), `API:` [langchain.chains...HypotheticalDocumentEmbedder](https://api.python.langchain.com/en/latest/chains/langchain.chains.hyde.base.HypotheticalDocumentEmbedder.html#langchain.chains.hyde.base.HypotheticalDocumentEmbedder)
| `2212.08073v1` [Constitutional AI: Harmlessness from AI Feedback](http://arxiv.org/abs/2212.08073v1) | Yuntao Bai, Saurav Kadavath, Sandipan Kundu, et al. | 2022-12-15 | `Docs:` [docs/guides/productionization/evaluation/string/criteria_eval_chain](https://python.langchain.com/docs/guides/productionization/evaluation/string/criteria_eval_chain)
| `2212.07425v3` [Robust and Explainable Identification of Logical Fallacies in Natural Language Arguments](http://arxiv.org/abs/2212.07425v3) | Zhivar Sourati, Vishnu Priya Prasanna Venkatesh, Darshan Deshpande, et al. | 2022-12-12 | `API:` [langchain_experimental.fallacy_removal](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.fallacy_removal)
| `2211.13892v2` [Complementary Explanations for Effective In-Context Learning](http://arxiv.org/abs/2211.13892v2) | Xi Ye, Srinivasan Iyer, Asli Celikyilmaz, et al. | 2022-11-25 | `API:` [langchain_core.example_selectors...MaxMarginalRelevanceExampleSelector](https://api.python.langchain.com/en/latest/example_selectors/langchain_core.example_selectors.semantic_similarity.MaxMarginalRelevanceExampleSelector.html#langchain_core.example_selectors.semantic_similarity.MaxMarginalRelevanceExampleSelector)
| `2211.10435v2` [PAL: Program-aided Language Models](http://arxiv.org/abs/2211.10435v2) | Luyu Gao, Aman Madaan, Shuyan Zhou, et al. | 2022-11-18 | `API:` [langchain_experimental.pal_chain...PALChain](https://api.python.langchain.com/en/latest/pal_chain/langchain_experimental.pal_chain.base.PALChain.html#langchain_experimental.pal_chain.base.PALChain), [langchain_experimental.pal_chain](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.pal_chain)
| `2209.10785v2` [Deep Lake: a Lakehouse for Deep Learning](http://arxiv.org/abs/2209.10785v2) | Sasun Hambardzumyan, Abhinav Tuli, Levon Ghukasyan, et al. | 2022-09-22 | `Docs:` [docs/integrations/providers/activeloop_deeplake](https://python.langchain.com/docs/integrations/providers/activeloop_deeplake)
| `2205.12654v1` [Bitext Mining Using Distilled Sentence Representations for Low-Resource Languages](http://arxiv.org/abs/2205.12654v1) | Kevin Heffernan, Onur Çelebi, Holger Schwenk | 2022-05-25 | `API:` [langchain_community.embeddings...LaserEmbeddings](https://api.python.langchain.com/en/latest/embeddings/langchain_community.embeddings.laser.LaserEmbeddings.html#langchain_community.embeddings.laser.LaserEmbeddings)
| `2204.00498v1` [Evaluating the Text-to-SQL Capabilities of Large Language Models](http://arxiv.org/abs/2204.00498v1) | Nitarshan Rajkumar, Raymond Li, Dzmitry Bahdanau | 2022-03-15 | `Docs:` [docs/use_cases/sql/quickstart](https://python.langchain.com/docs/use_cases/sql/quickstart), `API:` [langchain_community.utilities...SQLDatabase](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.sql_database.SQLDatabase.html#langchain_community.utilities.sql_database.SQLDatabase), [langchain_community.utilities...SparkSQL](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.spark_sql.SparkSQL.html#langchain_community.utilities.spark_sql.SparkSQL)
| `2202.00666v5` [Locally Typical Sampling](http://arxiv.org/abs/2202.00666v5) | Clara Meister, Tiago Pimentel, Gian Wiher, et al. | 2022-02-01 | `API:` [langchain_community.llms...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference), [langchain_community.llms...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint)
| `2103.00020v1` [Learning Transferable Visual Models From Natural Language Supervision](http://arxiv.org/abs/2103.00020v1) | Alec Radford, Jong Wook Kim, Chris Hallacy, et al. | 2021-02-26 | `API:` [langchain_experimental.open_clip](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.open_clip)
| `1909.05858v2` [CTRL: A Conditional Transformer Language Model for Controllable Generation](http://arxiv.org/abs/1909.05858v2) | Nitish Shirish Keskar, Bryan McCann, Lav R. Varshney, et al. | 2019-09-11 | `API:` [langchain_community.llms...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference), [langchain_community.llms...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint)
| `1908.10084v1` [Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks](http://arxiv.org/abs/1908.10084v1) | Nils Reimers, Iryna Gurevych | 2019-08-27 | `Docs:` [docs/integrations/text_embedding/sentence_transformers](https://python.langchain.com/docs/integrations/text_embedding/sentence_transformers)
## Lost in the Middle: How Language Models Use Long Contexts
- **arXiv id:** 2307.03172v3
- **Title:** Lost in the Middle: How Language Models Use Long Contexts
- **Authors:** Nelson F. Liu, Kevin Lin, John Hewitt, et al.
- **Published Date:** 2023-07-06
- **URL:** http://arxiv.org/abs/2307.03172v3
- **LangChain Documentation:** [docs/modules/data_connection/retrievers/long_context_reorder](https://python.langchain.com/docs/modules/data_connection/retrievers/long_context_reorder)
**Abstract:** While recent language models have the ability to take long contexts as input,
relatively little is known about how well they use longer context. We analyze
the performance of language models on two tasks that require identifying
relevant information in their input contexts: multi-document question answering
and key-value retrieval. We find that performance can degrade significantly
when changing the position of relevant information, indicating that current
language models do not robustly make use of information in long input contexts.
In particular, we observe that performance is often highest when relevant
information occurs at the beginning or end of the input context, and
significantly degrades when models must access relevant information in the
middle of long contexts, even for explicitly long-context models. Our analysis
provides a better understanding of how language models use their input context
and provides new evaluation protocols for future long-context language models.
## Large Language Model Guided Tree-of-Thought
- **arXiv id:** 2305.08291v1
- **Title:** Large Language Model Guided Tree-of-Thought
- **Authors:** Jieyi Long
- **Published Date:** 2023-05-15
- **URL:** http://arxiv.org/abs/2305.08291v1
- **LangChain API Reference:** [langchain_experimental.tot](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.tot)
**Abstract:** In this paper, we introduce the Tree-of-Thought (ToT) framework, a novel
approach aimed at improving the problem-solving capabilities of auto-regressive
large language models (LLMs). The ToT technique is inspired by the human mind's
approach for solving complex reasoning tasks through trial and error. In this
process, the human mind explores the solution space through a tree-like thought
process, allowing for backtracking when necessary. To implement ToT as a
software system, we augment an LLM with additional modules including a prompter
agent, a checker module, a memory module, and a ToT controller. In order to
solve a given problem, these modules engage in a multi-round conversation with
the LLM. The memory module records the conversation and state history of the
problem solving process, which allows the system to backtrack to the previous
steps of the thought-process and explore other directions from there. To verify
the effectiveness of the proposed technique, we implemented a ToT-based solver
for the Sudoku Puzzle. Experimental results show that the ToT framework can
significantly increase the success rate of Sudoku puzzle solving. Our
implementation of the ToT-based Sudoku solver is available on GitHub:
\url{https://github.com/jieyilong/tree-of-thought-puzzle-solver}.
## Active Retrieval Augmented Generation
- **arXiv id:** 2305.06983v2
- **Title:** Active Retrieval Augmented Generation
- **Authors:** Zhengbao Jiang, Frank F. Xu, Luyu Gao, et al.
- **Published Date:** 2023-05-11
- **URL:** http://arxiv.org/abs/2305.06983v2
- **LangChain Documentation:** [docs/modules/chains](https://python.langchain.com/docs/modules/chains)
**Abstract:** Despite the remarkable ability of large language models (LMs) to comprehend
and generate language, they have a tendency to hallucinate and create factually
inaccurate output. Augmenting LMs by retrieving information from external
knowledge resources is one promising solution. Most existing retrieval
augmented LMs employ a retrieve-and-generate setup that only retrieves
information once based on the input. This is limiting, however, in more general
scenarios involving generation of long texts, where continually gathering
information throughout generation is essential. In this work, we provide a
generalized view of active retrieval augmented generation, methods that
actively decide when and what to retrieve across the course of the generation.
We propose Forward-Looking Active REtrieval augmented generation (FLARE), a
generic method which iteratively uses a prediction of the upcoming sentence to
anticipate future content, which is then utilized as a query to retrieve
relevant documents to regenerate the sentence if it contains low-confidence
tokens. We test FLARE along with baselines comprehensively over 4 long-form
knowledge-intensive generation tasks/datasets. FLARE achieves superior or
competitive performance on all tasks, demonstrating the effectiveness of our
method. Code and datasets are available at https://github.com/jzbjyb/FLARE.
## HuggingGPT: Solving AI Tasks with ChatGPT and its Friends in Hugging Face
- **arXiv id:** 2303.17580v4
- **Title:** HuggingGPT: Solving AI Tasks with ChatGPT and its Friends in Hugging Face
- **Authors:** Yongliang Shen, Kaitao Song, Xu Tan, et al.
- **Published Date:** 2023-03-30
- **URL:** http://arxiv.org/abs/2303.17580v4
- **LangChain API Reference:** [langchain_experimental.autonomous_agents](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.autonomous_agents)
**Abstract:** Solving complicated AI tasks with different domains and modalities is a key
step toward artificial general intelligence. While there are numerous AI models
available for various domains and modalities, they cannot handle complicated AI
tasks autonomously. Considering large language models (LLMs) have exhibited
exceptional abilities in language understanding, generation, interaction, and
reasoning, we advocate that LLMs could act as a controller to manage existing
AI models to solve complicated AI tasks, with language serving as a generic
interface to empower this. Based on this philosophy, we present HuggingGPT, an
LLM-powered agent that leverages LLMs (e.g., ChatGPT) to connect various AI
models in machine learning communities (e.g., Hugging Face) to solve AI tasks.
Specifically, we use ChatGPT to conduct task planning when receiving a user
request, select models according to their function descriptions available in
Hugging Face, execute each subtask with the selected AI model, and summarize
the response according to the execution results. By leveraging the strong
language capability of ChatGPT and abundant AI models in Hugging Face,
HuggingGPT can tackle a wide range of sophisticated AI tasks spanning different
modalities and domains and achieve impressive results in language, vision,
speech, and other challenging tasks, which paves a new way towards the
realization of artificial general intelligence.
## GPT-4 Technical Report
- **arXiv id:** 2303.08774v6
- **Title:** GPT-4 Technical Report
- **Authors:** OpenAI, Josh Achiam, Steven Adler, et al.
- **Published Date:** 2023-03-15
- **URL:** http://arxiv.org/abs/2303.08774v6
- **LangChain Documentation:** [docs/integrations/vectorstores/mongodb_atlas](https://python.langchain.com/docs/integrations/vectorstores/mongodb_atlas)
**Abstract:** We report the development of GPT-4, a large-scale, multimodal model which can
accept image and text inputs and produce text outputs. While less capable than
humans in many real-world scenarios, GPT-4 exhibits human-level performance on
various professional and academic benchmarks, including passing a simulated bar
exam with a score around the top 10% of test takers. GPT-4 is a
Transformer-based model pre-trained to predict the next token in a document.
The post-training alignment process results in improved performance on measures
of factuality and adherence to desired behavior. A core component of this
project was developing infrastructure and optimization methods that behave
predictably across a wide range of scales. This allowed us to accurately
predict some aspects of GPT-4's performance based on models trained with no
more than 1/1,000th the compute of GPT-4.
## A Watermark for Large Language Models
- **arXiv id:** 2301.10226v4
- **Title:** A Watermark for Large Language Models
- **Authors:** John Kirchenbauer, Jonas Geiping, Yuxin Wen, et al.
- **Published Date:** 2023-01-24
- **URL:** http://arxiv.org/abs/2301.10226v4
- **LangChain API Reference:** [langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference), [langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI.html#langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI)
**Abstract:** Potential harms of large language models can be mitigated by watermarking
model output, i.e., embedding signals into generated text that are invisible to
humans but algorithmically detectable from a short span of tokens. We propose a
watermarking framework for proprietary language models. The watermark can be
embedded with negligible impact on text quality, and can be detected using an
efficient open-source algorithm without access to the language model API or
parameters. The watermark works by selecting a randomized set of "green" tokens
before a word is generated, and then softly promoting use of green tokens
during sampling. We propose a statistical test for detecting the watermark with
interpretable p-values, and derive an information-theoretic framework for
analyzing the sensitivity of the watermark. We test the watermark using a
multi-billion parameter model from the Open Pretrained Transformer (OPT)
family, and discuss robustness and security.
## Precise Zero-Shot Dense Retrieval without Relevance Labels
- **arXiv id:** 2212.10496v1
- **Title:** Precise Zero-Shot Dense Retrieval without Relevance Labels
- **Authors:** Luyu Gao, Xueguang Ma, Jimmy Lin, et al.
- **Published Date:** 2022-12-20
- **URL:** http://arxiv.org/abs/2212.10496v1
- **LangChain Documentation:** [docs/use_cases/query_analysis/techniques/hyde](https://python.langchain.com/docs/use_cases/query_analysis/techniques/hyde)
- **LangChain API Reference:** [langchain.chains.hyde.base.HypotheticalDocumentEmbedder](https://api.python.langchain.com/en/latest/chains/langchain.chains.hyde.base.HypotheticalDocumentEmbedder.html#langchain.chains.hyde.base.HypotheticalDocumentEmbedder)
**Abstract:** While dense retrieval has been shown effective and efficient across tasks and
languages, it remains difficult to create effective fully zero-shot dense
retrieval systems when no relevance label is available. In this paper, we
recognize the difficulty of zero-shot learning and encoding relevance. Instead,
we propose to pivot through Hypothetical Document Embeddings~(HyDE). Given a
query, HyDE first zero-shot instructs an instruction-following language model
(e.g. InstructGPT) to generate a hypothetical document. The document captures
relevance patterns but is unreal and may contain false details. Then, an
unsupervised contrastively learned encoder~(e.g. Contriever) encodes the
document into an embedding vector. This vector identifies a neighborhood in the
corpus embedding space, where similar real documents are retrieved based on
vector similarity. This second step ground the generated document to the actual
corpus, with the encoder's dense bottleneck filtering out the incorrect
details. Our experiments show that HyDE significantly outperforms the
state-of-the-art unsupervised dense retriever Contriever and shows strong
performance comparable to fine-tuned retrievers, across various tasks (e.g. web
search, QA, fact verification) and languages~(e.g. sw, ko, ja).
## Constitutional AI: Harmlessness from AI Feedback
- **arXiv id:** 2212.08073v1
- **Title:** Constitutional AI: Harmlessness from AI Feedback
- **Authors:** Yuntao Bai, Saurav Kadavath, Sandipan Kundu, et al.
- **Published Date:** 2022-12-15
- **URL:** http://arxiv.org/abs/2212.08073v1
- **LangChain Documentation:** [docs/guides/productionization/evaluation/string/criteria_eval_chain](https://python.langchain.com/docs/guides/productionization/evaluation/string/criteria_eval_chain)
**Abstract:** As AI systems become more capable, we would like to enlist their help to
supervise other AIs. We experiment with methods for training a harmless AI
assistant through self-improvement, without any human labels identifying
harmful outputs. The only human oversight is provided through a list of rules
or principles, and so we refer to the method as 'Constitutional AI'. The
process involves both a supervised learning and a reinforcement learning phase.
In the supervised phase we sample from an initial model, then generate
self-critiques and revisions, and then finetune the original model on revised
responses. In the RL phase, we sample from the finetuned model, use a model to
evaluate which of the two samples is better, and then train a preference model
from this dataset of AI preferences. We then train with RL using the preference
model as the reward signal, i.e. we use 'RL from AI Feedback' (RLAIF). As a
result we are able to train a harmless but non-evasive AI assistant that
engages with harmful queries by explaining its objections to them. Both the SL
and RL methods can leverage chain-of-thought style reasoning to improve the
human-judged performance and transparency of AI decision making. These methods
make it possible to control AI behavior more precisely and with far fewer human
labels.
## Robust and Explainable Identification of Logical Fallacies in Natural Language Arguments
- **arXiv id:** 2212.07425v3
- **Title:** Robust and Explainable Identification of Logical Fallacies in Natural Language Arguments
- **Authors:** Zhivar Sourati, Vishnu Priya Prasanna Venkatesh, Darshan Deshpande, et al.
- **Published Date:** 2022-12-12
- **URL:** http://arxiv.org/abs/2212.07425v3
- **LangChain API Reference:** [langchain_experimental.fallacy_removal](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.fallacy_removal)
**Abstract:** The spread of misinformation, propaganda, and flawed argumentation has been
amplified in the Internet era. Given the volume of data and the subtlety of
identifying violations of argumentation norms, supporting information analytics
tasks, like content moderation, with trustworthy methods that can identify
logical fallacies is essential. In this paper, we formalize prior theoretical
work on logical fallacies into a comprehensive three-stage evaluation framework
of detection, coarse-grained, and fine-grained classification. We adapt
existing evaluation datasets for each stage of the evaluation. We employ three
families of robust and explainable methods based on prototype reasoning,
instance-based reasoning, and knowledge injection. The methods combine language
models with background knowledge and explainable mechanisms. Moreover, we
address data sparsity with strategies for data augmentation and curriculum
learning. Our three-stage framework natively consolidates prior datasets and
methods from existing tasks, like propaganda detection, serving as an
overarching evaluation testbed. We extensively evaluate these methods on our
datasets, focusing on their robustness and explainability. Our results provide
insight into the strengths and weaknesses of the methods on different
components and fallacy classes, indicating that fallacy identification is a
challenging task that may require specialized forms of reasoning to capture
various classes. We share our open-source code and data on GitHub to support
further work on logical fallacy identification.
## Complementary Explanations for Effective In-Context Learning
- **arXiv id:** 2211.13892v2
- **Title:** Complementary Explanations for Effective In-Context Learning
- **Authors:** Xi Ye, Srinivasan Iyer, Asli Celikyilmaz, et al.
- **Published Date:** 2022-11-25
- **URL:** http://arxiv.org/abs/2211.13892v2
- **LangChain API Reference:** [langchain_core.example_selectors.semantic_similarity.MaxMarginalRelevanceExampleSelector](https://api.python.langchain.com/en/latest/example_selectors/langchain_core.example_selectors.semantic_similarity.MaxMarginalRelevanceExampleSelector.html#langchain_core.example_selectors.semantic_similarity.MaxMarginalRelevanceExampleSelector)
**Abstract:** Large language models (LLMs) have exhibited remarkable capabilities in
learning from explanations in prompts, but there has been limited understanding
of exactly how these explanations function or why they are effective. This work
aims to better understand the mechanisms by which explanations are used for
in-context learning. We first study the impact of two different factors on the
performance of prompts with explanations: the computation trace (the way the
solution is decomposed) and the natural language used to express the prompt. By
perturbing explanations on three controlled tasks, we show that both factors
contribute to the effectiveness of explanations. We further study how to form
maximally effective sets of explanations for solving a given test query. We
find that LLMs can benefit from the complementarity of the explanation set:
diverse reasoning skills shown by different exemplars can lead to better
performance. Therefore, we propose a maximal marginal relevance-based exemplar
selection approach for constructing exemplar sets that are both relevant as
well as complementary, which successfully improves the in-context learning
performance across three real-world tasks on multiple LLMs.
## PAL: Program-aided Language Models
- **arXiv id:** 2211.10435v2
- **Title:** PAL: Program-aided Language Models
- **Authors:** Luyu Gao, Aman Madaan, Shuyan Zhou, et al.
- **Published Date:** 2022-11-18
- **URL:** http://arxiv.org/abs/2211.10435v2
- **LangChain API Reference:** [langchain_experimental.pal_chain.base.PALChain](https://api.python.langchain.com/en/latest/pal_chain/langchain_experimental.pal_chain.base.PALChain.html#langchain_experimental.pal_chain.base.PALChain), [langchain_experimental.pal_chain](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.pal_chain)
**Abstract:** Large language models (LLMs) have recently demonstrated an impressive ability
to perform arithmetic and symbolic reasoning tasks, when provided with a few
examples at test time ("few-shot prompting"). Much of this success can be
attributed to prompting methods such as "chain-of-thought'', which employ LLMs
for both understanding the problem description by decomposing it into steps, as
well as solving each step of the problem. While LLMs seem to be adept at this
sort of step-by-step decomposition, LLMs often make logical and arithmetic
mistakes in the solution part, even when the problem is decomposed correctly.
In this paper, we present Program-Aided Language models (PAL): a novel approach
that uses the LLM to read natural language problems and generate programs as
the intermediate reasoning steps, but offloads the solution step to a runtime
such as a Python interpreter. With PAL, decomposing the natural language
problem into runnable steps remains the only learning task for the LLM, while
solving is delegated to the interpreter. We demonstrate this synergy between a
neural LLM and a symbolic interpreter across 13 mathematical, symbolic, and
algorithmic reasoning tasks from BIG-Bench Hard and other benchmarks. In all
these natural language reasoning tasks, generating code using an LLM and
reasoning using a Python interpreter leads to more accurate results than much
larger models. For example, PAL using Codex achieves state-of-the-art few-shot
accuracy on the GSM8K benchmark of math word problems, surpassing PaLM-540B
which uses chain-of-thought by absolute 15% top-1. Our code and data are
publicly available at http://reasonwithpal.com/ .
## Deep Lake: a Lakehouse for Deep Learning
- **arXiv id:** 2209.10785v2
- **Title:** Deep Lake: a Lakehouse for Deep Learning
- **Authors:** Sasun Hambardzumyan, Abhinav Tuli, Levon Ghukasyan, et al.
- **Published Date:** 2022-09-22
- **URL:** http://arxiv.org/abs/2209.10785v2
- **LangChain Documentation:** [docs/integrations/providers/activeloop_deeplake](https://python.langchain.com/docs/integrations/providers/activeloop_deeplake)
**Abstract:** Traditional data lakes provide critical data infrastructure for analytical
workloads by enabling time travel, running SQL queries, ingesting data with
ACID transactions, and visualizing petabyte-scale datasets on cloud storage.
They allow organizations to break down data silos, unlock data-driven
decision-making, improve operational efficiency, and reduce costs. However, as
deep learning usage increases, traditional data lakes are not well-designed for
applications such as natural language processing (NLP), audio processing,
computer vision, and applications involving non-tabular datasets. This paper
presents Deep Lake, an open-source lakehouse for deep learning applications
developed at Activeloop. Deep Lake maintains the benefits of a vanilla data
lake with one key difference: it stores complex data, such as images, videos,
annotations, as well as tabular data, in the form of tensors and rapidly
streams the data over the network to (a) Tensor Query Language, (b) in-browser
visualization engine, or (c) deep learning frameworks without sacrificing GPU
utilization. Datasets stored in Deep Lake can be accessed from PyTorch,
TensorFlow, JAX, and integrate with numerous MLOps tools.
## Bitext Mining Using Distilled Sentence Representations for Low-Resource Languages
- **arXiv id:** 2205.12654v1
- **Title:** Bitext Mining Using Distilled Sentence Representations for Low-Resource Languages
- **Authors:** Kevin Heffernan, Onur Çelebi, Holger Schwenk
- **Published Date:** 2022-05-25
- **URL:** http://arxiv.org/abs/2205.12654v1
- **LangChain API Reference:** [langchain_community.embeddings.laser.LaserEmbeddings](https://api.python.langchain.com/en/latest/embeddings/langchain_community.embeddings.laser.LaserEmbeddings.html#langchain_community.embeddings.laser.LaserEmbeddings)
**Abstract:** Scaling multilingual representation learning beyond the hundred most frequent
languages is challenging, in particular to cover the long tail of low-resource
languages. A promising approach has been to train one-for-all multilingual
models capable of cross-lingual transfer, but these models often suffer from
insufficient capacity and interference between unrelated languages. Instead, we
move away from this approach and focus on training multiple language (family)
specific representations, but most prominently enable all languages to still be
encoded in the same representational space. To achieve this, we focus on
teacher-student training, allowing all encoders to be mutually compatible for
bitext mining, and enabling fast learning of new languages. We introduce a new
teacher-student training scheme which combines supervised and self-supervised
training, allowing encoders to take advantage of monolingual training data,
which is valuable in the low-resource setting.
Our approach significantly outperforms the original LASER encoder. We study
very low-resource languages and handle 50 African languages, many of which are
not covered by any other model. For these languages, we train sentence
encoders, mine bitexts, and validate the bitexts by training NMT systems.
## Evaluating the Text-to-SQL Capabilities of Large Language Models
- **arXiv id:** 2204.00498v1
- **Title:** Evaluating the Text-to-SQL Capabilities of Large Language Models
- **Authors:** Nitarshan Rajkumar, Raymond Li, Dzmitry Bahdanau
- **Published Date:** 2022-03-15
- **URL:** http://arxiv.org/abs/2204.00498v1
- **LangChain Documentation:** [docs/use_cases/sql/quickstart](https://python.langchain.com/docs/use_cases/sql/quickstart)
- **LangChain API Reference:** [langchain_community.utilities.sql_database.SQLDatabase](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.sql_database.SQLDatabase.html#langchain_community.utilities.sql_database.SQLDatabase), [langchain_community.utilities.spark_sql.SparkSQL](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.spark_sql.SparkSQL.html#langchain_community.utilities.spark_sql.SparkSQL)
**Abstract:** We perform an empirical evaluation of Text-to-SQL capabilities of the Codex
language model. We find that, without any finetuning, Codex is a strong
baseline on the Spider benchmark; we also analyze the failure modes of Codex in
this setting. Furthermore, we demonstrate on the GeoQuery and Scholar
benchmarks that a small number of in-domain examples provided in the prompt
enables Codex to perform better than state-of-the-art models finetuned on such
few-shot examples.
## Locally Typical Sampling
- **arXiv id:** 2202.00666v5
- **Title:** Locally Typical Sampling
- **Authors:** Clara Meister, Tiago Pimentel, Gian Wiher, et al.
- **Published Date:** 2022-02-01
- **URL:** http://arxiv.org/abs/2202.00666v5
- **LangChain API Reference:** [langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference), [langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint)
**Abstract:** Today's probabilistic language generators fall short when it comes to
producing coherent and fluent text despite the fact that the underlying models
perform well under standard metrics, e.g., perplexity. This discrepancy has
puzzled the language generation community for the last few years. In this work,
we posit that the abstraction of natural language generation as a discrete
stochastic process--which allows for an information-theoretic analysis--can
provide new insights into the behavior of probabilistic language generators,
e.g., why high-probability texts can be dull or repetitive. Humans use language
as a means of communicating information, aiming to do so in a simultaneously
efficient and error-minimizing manner; in fact, psycholinguistics research
suggests humans choose each word in a string with this subconscious goal in
mind. We formally define the set of strings that meet this criterion: those for
which each word has an information content close to the expected information
content, i.e., the conditional entropy of our model. We then propose a simple
and efficient procedure for enforcing this criterion when generating from
probabilistic models, which we call locally typical sampling. Automatic and
human evaluations show that, in comparison to nucleus and top-k sampling,
locally typical sampling offers competitive performance (in both abstractive
summarization and story generation) in terms of quality while consistently
reducing degenerate repetitions.
## Learning Transferable Visual Models From Natural Language Supervision
- **arXiv id:** 2103.00020v1
- **Title:** Learning Transferable Visual Models From Natural Language Supervision
- **Authors:** Alec Radford, Jong Wook Kim, Chris Hallacy, et al.
- **Published Date:** 2021-02-26
- **URL:** http://arxiv.org/abs/2103.00020v1
- **LangChain API Reference:** [langchain_experimental.open_clip](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.open_clip)
**Abstract:** State-of-the-art computer vision systems are trained to predict a fixed set
of predetermined object categories. This restricted form of supervision limits
their generality and usability since additional labeled data is needed to
specify any other visual concept. Learning directly from raw text about images
is a promising alternative which leverages a much broader source of
supervision. We demonstrate that the simple pre-training task of predicting
which caption goes with which image is an efficient and scalable way to learn
SOTA image representations from scratch on a dataset of 400 million (image,
text) pairs collected from the internet. After pre-training, natural language
is used to reference learned visual concepts (or describe new ones) enabling
zero-shot transfer of the model to downstream tasks. We study the performance
of this approach by benchmarking on over 30 different existing computer vision
datasets, spanning tasks such as OCR, action recognition in videos,
geo-localization, and many types of fine-grained object classification. The
model transfers non-trivially to most tasks and is often competitive with a
fully supervised baseline without the need for any dataset specific training.
For instance, we match the accuracy of the original ResNet-50 on ImageNet
zero-shot without needing to use any of the 1.28 million training examples it
was trained on. We release our code and pre-trained model weights at
https://github.com/OpenAI/CLIP.
## CTRL: A Conditional Transformer Language Model for Controllable Generation
- **arXiv id:** 1909.05858v2
- **Title:** CTRL: A Conditional Transformer Language Model for Controllable Generation
- **Authors:** Nitish Shirish Keskar, Bryan McCann, Lav R. Varshney, et al.
- **Published Date:** 2019-09-11
- **URL:** http://arxiv.org/abs/1909.05858v2
- **LangChain API Reference:** [langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference), [langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint)
**Abstract:** Large-scale language models show promising text generation capabilities, but
users cannot easily control particular aspects of the generated text. We
release CTRL, a 1.63 billion-parameter conditional transformer language model,
trained to condition on control codes that govern style, content, and
task-specific behavior. Control codes were derived from structure that
naturally co-occurs with raw text, preserving the advantages of unsupervised
learning while providing more explicit control over text generation. These
codes also allow CTRL to predict which parts of the training data are most
likely given a sequence. This provides a potential method for analyzing large
amounts of data via model-based source attribution. We have released multiple
full-sized, pretrained versions of CTRL at https://github.com/salesforce/ctrl.
## Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks
- **arXiv id:** 1908.10084v1
- **Title:** Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks
- **Authors:** Nils Reimers, Iryna Gurevych
- **Published Date:** 2019-08-27
- **URL:** http://arxiv.org/abs/1908.10084v1
- **LangChain Documentation:** [docs/integrations/text_embedding/sentence_transformers](https://python.langchain.com/docs/integrations/text_embedding/sentence_transformers)
**Abstract:** BERT (Devlin et al., 2018) and RoBERTa (Liu et al., 2019) has set a new
state-of-the-art performance on sentence-pair regression tasks like semantic
textual similarity (STS). However, it requires that both sentences are fed into
the network, which causes a massive computational overhead: Finding the most
similar pair in a collection of 10,000 sentences requires about 50 million
inference computations (~65 hours) with BERT. The construction of BERT makes it
unsuitable for semantic similarity search as well as for unsupervised tasks
like clustering.
In this publication, we present Sentence-BERT (SBERT), a modification of the
pretrained BERT network that use siamese and triplet network structures to
derive semantically meaningful sentence embeddings that can be compared using
cosine-similarity. This reduces the effort for finding the most similar pair
from 65 hours with BERT / RoBERTa to about 5 seconds with SBERT, while
maintaining the accuracy from BERT.
We evaluate SBERT and SRoBERTa on common STS tasks and transfer learning
tasks, where it outperforms other state-of-the-art sentence embeddings methods.

@ -476,6 +476,87 @@ If you are still using AgentExecutor, do not fear: we still have a guide on [how
It is recommended, however, that you start to transition to LangGraph.
In order to assist in this we have put together a [transition guide on how to do so](/docs/how_to/migrate_agent)
### Callbacks
LangChain provides a callbacks system that allows you to hook into the various stages of your LLM application. This is useful for logging, monitoring, streaming, and other tasks.
You can subscribe to these events by using the `callbacks` argument available throughout the API. This argument is list of handler objects, which are expected to implement one or more of the methods described below in more detail.
#### Callback handlers
`CallbackHandlers` are objects that implement the [`CallbackHandler`](https://api.python.langchain.com/en/latest/callbacks/langchain_core.callbacks.base.BaseCallbackHandler.html#langchain-core-callbacks-base-basecallbackhandler) interface, which has a method for each event that can be subscribed to.
The `CallbackManager` will call the appropriate method on each handler when the event is triggered.
```python
class BaseCallbackHandler:
"""Base callback handler that can be used to handle callbacks from langchain."""
def on_llm_start(
self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
) -> Any:
"""Run when LLM starts running."""
def on_chat_model_start(
self, serialized: Dict[str, Any], messages: List[List[BaseMessage]], **kwargs: Any
) -> Any:
"""Run when Chat Model starts running."""
def on_llm_new_token(self, token: str, **kwargs: Any) -> Any:
"""Run on new LLM token. Only available when streaming is enabled."""
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> Any:
"""Run when LLM ends running."""
def on_llm_error(
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
) -> Any:
"""Run when LLM errors."""
def on_chain_start(
self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any
) -> Any:
"""Run when chain starts running."""
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> Any:
"""Run when chain ends running."""
def on_chain_error(
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
) -> Any:
"""Run when chain errors."""
def on_tool_start(
self, serialized: Dict[str, Any], input_str: str, **kwargs: Any
) -> Any:
"""Run when tool starts running."""
def on_tool_end(self, output: Any, **kwargs: Any) -> Any:
"""Run when tool ends running."""
def on_tool_error(
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
) -> Any:
"""Run when tool errors."""
def on_text(self, text: str, **kwargs: Any) -> Any:
"""Run on arbitrary text."""
def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
"""Run on agent action."""
def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> Any:
"""Run on agent end."""
```
#### Passing callbacks
The `callbacks` property is available on most objects throughout the API (Models, Tools, Agents, etc.) in two different places:
- **Constructor callbacks**: defined in the constructor, e.g. `ChatAnthropic(callbacks=[handler], tags=['a-tag'])`. In this case, the callbacks will be used for all calls made on that object, and will be scoped to that object only.
For example, if you initialize a chat model with constructor callbacks, then use it within a chain, the callbacks will only be invoked for calls to that model.
- **Request callbacks**: passed into the `invoke` method used for issuing a request. In this case, the callbacks will be used for that specific request only, and all sub-requests that it contains (e.g. a call to a sequence that triggers a call to a model, which uses the same handler passed in the `invoke()` method).
In the `invoke()` method, callbacks are passed through the `config` parameter.
## Techniques
### Function/tool calling

@ -16,7 +16,7 @@
"id": "711752cb-4f15-42a3-9838-a0c67f397771",
"metadata": {},
"source": [
"# How to attach runtime arguments to a Runnable\n",
"# How to add default invocation args to a Runnable\n",
"\n",
":::info Prerequisites\n",
"\n",

@ -0,0 +1,171 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# How to use callbacks in async environments\n",
"\n",
":::info Prerequisites\n",
"\n",
"This guide assumes familiarity with the following concepts:\n",
"\n",
"- [Callbacks](/docs/concepts/#callbacks)\n",
"- [Custom callback handlers](/docs/how_to/custom_callbacks)\n",
"\n",
":::\n",
"\n",
"If you are planning to use the async APIs, it is recommended to use and extend [`AsyncCallbackHandler`](https://api.python.langchain.com/en/latest/callbacks/langchain_core.callbacks.base.AsyncCallbackHandler.html) to avoid blocking the runloop.\n",
"\n",
"**Note**: if you use a sync `CallbackHandler` while using an async method to run your LLM / Chain / Tool / Agent, it will still work. However, under the hood, it will be called with [`run_in_executor`](https://docs.python.org/3/library/asyncio-eventloop.html#asyncio.loop.run_in_executor) which can cause issues if your `CallbackHandler` is not thread-safe."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# | output: false\n",
"# | echo: false\n",
"\n",
"%pip install -qU langchain langchain_anthropic\n",
"\n",
"import getpass\n",
"import os\n",
"\n",
"os.environ[\"ANTHROPIC_API_KEY\"] = getpass.getpass()"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"zzzz....\n",
"Hi! I just woke up. Your llm is starting\n",
"Sync handler being called in a `thread_pool_executor`: token: Here\n",
"Sync handler being called in a `thread_pool_executor`: token: 's\n",
"Sync handler being called in a `thread_pool_executor`: token: a\n",
"Sync handler being called in a `thread_pool_executor`: token: little\n",
"Sync handler being called in a `thread_pool_executor`: token: joke\n",
"Sync handler being called in a `thread_pool_executor`: token: for\n",
"Sync handler being called in a `thread_pool_executor`: token: you\n",
"Sync handler being called in a `thread_pool_executor`: token: :\n",
"Sync handler being called in a `thread_pool_executor`: token: \n",
"\n",
"Why\n",
"Sync handler being called in a `thread_pool_executor`: token: can\n",
"Sync handler being called in a `thread_pool_executor`: token: 't\n",
"Sync handler being called in a `thread_pool_executor`: token: a\n",
"Sync handler being called in a `thread_pool_executor`: token: bicycle\n",
"Sync handler being called in a `thread_pool_executor`: token: stan\n",
"Sync handler being called in a `thread_pool_executor`: token: d up\n",
"Sync handler being called in a `thread_pool_executor`: token: by\n",
"Sync handler being called in a `thread_pool_executor`: token: itself\n",
"Sync handler being called in a `thread_pool_executor`: token: ?\n",
"Sync handler being called in a `thread_pool_executor`: token: Because\n",
"Sync handler being called in a `thread_pool_executor`: token: it\n",
"Sync handler being called in a `thread_pool_executor`: token: 's\n",
"Sync handler being called in a `thread_pool_executor`: token: two\n",
"Sync handler being called in a `thread_pool_executor`: token: -\n",
"Sync handler being called in a `thread_pool_executor`: token: tire\n",
"zzzz....\n",
"Hi! I just woke up. Your llm is ending\n"
]
},
{
"data": {
"text/plain": [
"LLMResult(generations=[[ChatGeneration(text=\"Here's a little joke for you:\\n\\nWhy can't a bicycle stand up by itself? Because it's two-tire\", message=AIMessage(content=\"Here's a little joke for you:\\n\\nWhy can't a bicycle stand up by itself? Because it's two-tire\", id='run-8afc89e8-02c0-4522-8480-d96977240bd4-0'))]], llm_output={}, run=[RunInfo(run_id=UUID('8afc89e8-02c0-4522-8480-d96977240bd4'))])"
]
},
"execution_count": 2,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"import asyncio\n",
"from typing import Any, Dict, List\n",
"\n",
"from langchain.callbacks.base import AsyncCallbackHandler, BaseCallbackHandler\n",
"from langchain_anthropic import ChatAnthropic\n",
"from langchain_core.messages import HumanMessage\n",
"from langchain_core.outputs import LLMResult\n",
"\n",
"\n",
"class MyCustomSyncHandler(BaseCallbackHandler):\n",
" def on_llm_new_token(self, token: str, **kwargs) -> None:\n",
" print(f\"Sync handler being called in a `thread_pool_executor`: token: {token}\")\n",
"\n",
"\n",
"class MyCustomAsyncHandler(AsyncCallbackHandler):\n",
" \"\"\"Async callback handler that can be used to handle callbacks from langchain.\"\"\"\n",
"\n",
" async def on_llm_start(\n",
" self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any\n",
" ) -> None:\n",
" \"\"\"Run when chain starts running.\"\"\"\n",
" print(\"zzzz....\")\n",
" await asyncio.sleep(0.3)\n",
" class_name = serialized[\"name\"]\n",
" print(\"Hi! I just woke up. Your llm is starting\")\n",
"\n",
" async def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:\n",
" \"\"\"Run when chain ends running.\"\"\"\n",
" print(\"zzzz....\")\n",
" await asyncio.sleep(0.3)\n",
" print(\"Hi! I just woke up. Your llm is ending\")\n",
"\n",
"\n",
"# To enable streaming, we pass in `streaming=True` to the ChatModel constructor\n",
"# Additionally, we pass in a list with our custom handler\n",
"chat = ChatAnthropic(\n",
" model=\"claude-3-sonnet-20240229\",\n",
" max_tokens=25,\n",
" streaming=True,\n",
" callbacks=[MyCustomSyncHandler(), MyCustomAsyncHandler()],\n",
")\n",
"\n",
"await chat.agenerate([[HumanMessage(content=\"Tell me a joke\")]])"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Next steps\n",
"\n",
"You've now learned how to create your own custom callback handlers.\n",
"\n",
"Next, check out the other how-to guides in this section, such as [how to attach callbacks to a runnable](/docs/how_to/callbacks_attach)."
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.5"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

@ -0,0 +1,144 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# How to attach callbacks to a module\n",
"\n",
":::info Prerequisites\n",
"\n",
"This guide assumes familiarity with the following concepts:\n",
"\n",
"- [Callbacks](/docs/concepts/#callbacks)\n",
"- [Custom callback handlers](/docs/how_to/custom_callbacks)\n",
"- [Chaining runnables](/docs/how_to/sequence)\n",
"- [Attach runtime arguments to a Runnable](/docs/how_to/binding)\n",
"\n",
":::\n",
"\n",
"If you are composing a chain of runnables and want to reuse callbacks across multiple executions, you can attach callbacks with the [`.with_config()`](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.with_config) method. This saves you the need to pass callbacks in each time you invoke the chain.\n",
"\n",
"Here's an example:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# | output: false\n",
"# | echo: false\n",
"\n",
"%pip install -qU langchain langchain_anthropic\n",
"\n",
"import getpass\n",
"import os\n",
"\n",
"os.environ[\"ANTHROPIC_API_KEY\"] = getpass.getpass()"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Chain RunnableSequence started\n",
"Chain ChatPromptTemplate started\n",
"Chain ended, outputs: messages=[HumanMessage(content='What is 1 + 2?')]\n",
"Chat model started\n",
"Chat model ended, response: generations=[[ChatGeneration(text='1 + 2 = 3', message=AIMessage(content='1 + 2 = 3', response_metadata={'id': 'msg_01LjC57hgrmzVhEma4yXdLKF', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 16, 'output_tokens': 13}}, id='run-393950f9-79b9-4fd6-ac6e-50d93d75b906-0'))]] llm_output={'id': 'msg_01LjC57hgrmzVhEma4yXdLKF', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 16, 'output_tokens': 13}} run=None\n",
"Chain ended, outputs: content='1 + 2 = 3' response_metadata={'id': 'msg_01LjC57hgrmzVhEma4yXdLKF', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 16, 'output_tokens': 13}} id='run-393950f9-79b9-4fd6-ac6e-50d93d75b906-0'\n"
]
},
{
"data": {
"text/plain": [
"AIMessage(content='1 + 2 = 3', response_metadata={'id': 'msg_01LjC57hgrmzVhEma4yXdLKF', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 16, 'output_tokens': 13}}, id='run-393950f9-79b9-4fd6-ac6e-50d93d75b906-0')"
]
},
"execution_count": 2,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from typing import Any, Dict, List\n",
"\n",
"from langchain_anthropic import ChatAnthropic\n",
"from langchain_core.callbacks import BaseCallbackHandler\n",
"from langchain_core.messages import BaseMessage\n",
"from langchain_core.outputs import LLMResult\n",
"from langchain_core.prompts import ChatPromptTemplate\n",
"\n",
"\n",
"class LoggingHandler(BaseCallbackHandler):\n",
" def on_chat_model_start(\n",
" self, serialized: Dict[str, Any], messages: List[List[BaseMessage]], **kwargs\n",
" ) -> None:\n",
" print(\"Chat model started\")\n",
"\n",
" def on_llm_end(self, response: LLMResult, **kwargs) -> None:\n",
" print(f\"Chat model ended, response: {response}\")\n",
"\n",
" def on_chain_start(\n",
" self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs\n",
" ) -> None:\n",
" print(f\"Chain {serialized.get('name')} started\")\n",
"\n",
" def on_chain_end(self, outputs: Dict[str, Any], **kwargs) -> None:\n",
" print(f\"Chain ended, outputs: {outputs}\")\n",
"\n",
"\n",
"callbacks = [LoggingHandler()]\n",
"llm = ChatAnthropic(model=\"claude-3-sonnet-20240229\")\n",
"prompt = ChatPromptTemplate.from_template(\"What is 1 + {number}?\")\n",
"\n",
"chain = prompt | llm\n",
"\n",
"chain_with_callbacks = chain.with_config(callbacks=callbacks)\n",
"\n",
"chain_with_callbacks.invoke({\"number\": \"2\"})"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"The bound callbacks will run for all nested module runs.\n",
"\n",
"## Next steps\n",
"\n",
"You've now learned how to attach callbacks to a chain.\n",
"\n",
"Next, check out the other how-to guides in this section, such as how to [pass callbacks in at runtime](/docs/how_to/callbacks_runtime)."
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.5"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

@ -0,0 +1,136 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# How to pass callbacks into a module constructor\n",
"\n",
":::info Prerequisites\n",
"\n",
"This guide assumes familiarity with the following concepts:\n",
"\n",
"- [Callbacks](/docs/concepts/#callbacks)\n",
"- [Custom callback handlers](/docs/how_to/custom_callbacks)\n",
"\n",
":::\n",
"\n",
"Most LangChain modules allow you to pass `callbacks` directly into the constructor. In this case, the callbacks will only be called for that instance (and any nested runs).\n",
"\n",
"Here's an example:"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"# | output: false\n",
"# | echo: false\n",
"\n",
"%pip install -qU langchain langchain_anthropic\n",
"\n",
"import getpass\n",
"import os\n",
"\n",
"os.environ[\"ANTHROPIC_API_KEY\"] = getpass.getpass()"
]
},
{
"cell_type": "code",
"execution_count": 18,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Chat model started\n",
"Chat model ended, response: generations=[[ChatGeneration(text='1 + 2 = 3', message=AIMessage(content='1 + 2 = 3', response_metadata={'id': 'msg_01CdKsRmeS9WRb8BWnHDEHm7', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 16, 'output_tokens': 13}}, id='run-2d7fdf2a-7405-4e17-97c0-67e6b2a65305-0'))]] llm_output={'id': 'msg_01CdKsRmeS9WRb8BWnHDEHm7', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 16, 'output_tokens': 13}} run=None\n"
]
},
{
"data": {
"text/plain": [
"AIMessage(content='1 + 2 = 3', response_metadata={'id': 'msg_01CdKsRmeS9WRb8BWnHDEHm7', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 16, 'output_tokens': 13}}, id='run-2d7fdf2a-7405-4e17-97c0-67e6b2a65305-0')"
]
},
"execution_count": 18,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from typing import Any, Dict, List\n",
"\n",
"from langchain_anthropic import ChatAnthropic\n",
"from langchain_core.callbacks import BaseCallbackHandler\n",
"from langchain_core.messages import BaseMessage\n",
"from langchain_core.outputs import LLMResult\n",
"from langchain_core.prompts import ChatPromptTemplate\n",
"\n",
"\n",
"class LoggingHandler(BaseCallbackHandler):\n",
" def on_chat_model_start(\n",
" self, serialized: Dict[str, Any], messages: List[List[BaseMessage]], **kwargs\n",
" ) -> None:\n",
" print(\"Chat model started\")\n",
"\n",
" def on_llm_end(self, response: LLMResult, **kwargs) -> None:\n",
" print(f\"Chat model ended, response: {response}\")\n",
"\n",
" def on_chain_start(\n",
" self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs\n",
" ) -> None:\n",
" print(f\"Chain {serialized.get('name')} started\")\n",
"\n",
" def on_chain_end(self, outputs: Dict[str, Any], **kwargs) -> None:\n",
" print(f\"Chain ended, outputs: {outputs}\")\n",
"\n",
"\n",
"callbacks = [LoggingHandler()]\n",
"llm = ChatAnthropic(model=\"claude-3-sonnet-20240229\", callbacks=callbacks)\n",
"prompt = ChatPromptTemplate.from_template(\"What is 1 + {number}?\")\n",
"\n",
"chain = prompt | llm\n",
"\n",
"chain.invoke({\"number\": \"2\"})"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"You can see that we only see events from the chat model run - no chain events from the prompt or broader chain.\n",
"\n",
"## Next steps\n",
"\n",
"You've now learned how to pass callbacks into a constructor.\n",
"\n",
"Next, check out the other how-to guides in this section, such as how to [pass callbacks at runtime](/docs/how_to/callbacks_runtime)."
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.5"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

@ -0,0 +1,140 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# How to pass callbacks in at runtime\n",
"\n",
":::info Prerequisites\n",
"\n",
"This guide assumes familiarity with the following concepts:\n",
"\n",
"- [Callbacks](/docs/concepts/#callbacks)\n",
"- [Custom callback handlers](/docs/how_to/custom_callbacks)\n",
"\n",
":::\n",
"\n",
"In many cases, it is advantageous to pass in handlers instead when running the object. When we pass through [`CallbackHandlers`](https://api.python.langchain.com/en/latest/callbacks/langchain_core.callbacks.base.BaseCallbackHandler.html#langchain-core-callbacks-base-basecallbackhandler) using the `callbacks` keyword arg when executing an run, those callbacks will be issued by all nested objects involved in the execution. For example, when a handler is passed through to an Agent, it will be used for all callbacks related to the agent and all the objects involved in the agent's execution, in this case, the Tools and LLM.\n",
"\n",
"This prevents us from having to manually attach the handlers to each individual nested object. Here's an example:"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"# | output: false\n",
"# | echo: false\n",
"\n",
"%pip install -qU langchain langchain_anthropic\n",
"\n",
"import getpass\n",
"import os\n",
"\n",
"os.environ[\"ANTHROPIC_API_KEY\"] = getpass.getpass()"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Chain RunnableSequence started\n",
"Chain ChatPromptTemplate started\n",
"Chain ended, outputs: messages=[HumanMessage(content='What is 1 + 2?')]\n",
"Chat model started\n",
"Chat model ended, response: generations=[[ChatGeneration(text='1 + 2 = 3', message=AIMessage(content='1 + 2 = 3', response_metadata={'id': 'msg_01D8Tt5FdtBk5gLTfBPm2tac', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 16, 'output_tokens': 13}}, id='run-bb0dddd8-85f3-4e6b-8553-eaa79f859ef8-0'))]] llm_output={'id': 'msg_01D8Tt5FdtBk5gLTfBPm2tac', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 16, 'output_tokens': 13}} run=None\n",
"Chain ended, outputs: content='1 + 2 = 3' response_metadata={'id': 'msg_01D8Tt5FdtBk5gLTfBPm2tac', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 16, 'output_tokens': 13}} id='run-bb0dddd8-85f3-4e6b-8553-eaa79f859ef8-0'\n"
]
},
{
"data": {
"text/plain": [
"AIMessage(content='1 + 2 = 3', response_metadata={'id': 'msg_01D8Tt5FdtBk5gLTfBPm2tac', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 16, 'output_tokens': 13}}, id='run-bb0dddd8-85f3-4e6b-8553-eaa79f859ef8-0')"
]
},
"execution_count": 4,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from typing import Any, Dict, List\n",
"\n",
"from langchain_anthropic import ChatAnthropic\n",
"from langchain_core.callbacks import BaseCallbackHandler\n",
"from langchain_core.messages import BaseMessage\n",
"from langchain_core.outputs import LLMResult\n",
"from langchain_core.prompts import ChatPromptTemplate\n",
"\n",
"\n",
"class LoggingHandler(BaseCallbackHandler):\n",
" def on_chat_model_start(\n",
" self, serialized: Dict[str, Any], messages: List[List[BaseMessage]], **kwargs\n",
" ) -> None:\n",
" print(\"Chat model started\")\n",
"\n",
" def on_llm_end(self, response: LLMResult, **kwargs) -> None:\n",
" print(f\"Chat model ended, response: {response}\")\n",
"\n",
" def on_chain_start(\n",
" self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs\n",
" ) -> None:\n",
" print(f\"Chain {serialized.get('name')} started\")\n",
"\n",
" def on_chain_end(self, outputs: Dict[str, Any], **kwargs) -> None:\n",
" print(f\"Chain ended, outputs: {outputs}\")\n",
"\n",
"\n",
"callbacks = [LoggingHandler()]\n",
"llm = ChatAnthropic(model=\"claude-3-sonnet-20240229\")\n",
"prompt = ChatPromptTemplate.from_template(\"What is 1 + {number}?\")\n",
"\n",
"chain = prompt | llm\n",
"\n",
"chain.invoke({\"number\": \"2\"}, config={\"callbacks\": callbacks})"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"If there are already existing callbacks associated with a module, these will run in addition to any passed in at runtime.\n",
"\n",
"## Next steps\n",
"\n",
"You've now learned how to pass callbacks at runtime.\n",
"\n",
"Next, check out the other how-to guides in this section, such as how to [pass callbacks into a module constructor](/docs/how_to/custom_callbacks)."
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.5"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

@ -0,0 +1,141 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# How to create custom callback handlers\n",
"\n",
":::info Prerequisites\n",
"\n",
"This guide assumes familiarity with the following concepts:\n",
"\n",
"- [Callbacks](/docs/concepts/#callbacks)\n",
"\n",
":::\n",
"\n",
"LangChain has some built-in callback handlers, but you will often want to create your own handlers with custom logic.\n",
"\n",
"To create a custom callback handler, we need to determine the [event(s)](https://api.python.langchain.com/en/latest/callbacks/langchain_core.callbacks.base.BaseCallbackHandler.html#langchain-core-callbacks-base-basecallbackhandler) we want our callback handler to handle as well as what we want our callback handler to do when the event is triggered. Then all we need to do is attach the callback handler to the object, for example via [the constructor](/docs/how_to/callbacks_constructor) or [at runtime](/docs/how_to/callbacks_runtime).\n",
"\n",
"In the example below, we'll implement streaming with a custom handler.\n",
"\n",
"In our custom callback handler `MyCustomHandler`, we implement the `on_llm_new_token` handler to print the token we have just received. We then attach our custom handler to the model object as a constructor callback."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# | output: false\n",
"# | echo: false\n",
"\n",
"%pip install -qU langchain langchain_anthropic\n",
"\n",
"import getpass\n",
"import os\n",
"\n",
"os.environ[\"ANTHROPIC_API_KEY\"] = getpass.getpass()"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"My custom handler, token: Here\n",
"My custom handler, token: 's\n",
"My custom handler, token: a\n",
"My custom handler, token: bear\n",
"My custom handler, token: joke\n",
"My custom handler, token: for\n",
"My custom handler, token: you\n",
"My custom handler, token: :\n",
"My custom handler, token: \n",
"\n",
"Why\n",
"My custom handler, token: di\n",
"My custom handler, token: d the\n",
"My custom handler, token: bear\n",
"My custom handler, token: dissol\n",
"My custom handler, token: ve\n",
"My custom handler, token: in\n",
"My custom handler, token: water\n",
"My custom handler, token: ?\n",
"My custom handler, token: \n",
"Because\n",
"My custom handler, token: it\n",
"My custom handler, token: was\n",
"My custom handler, token: a\n",
"My custom handler, token: polar\n",
"My custom handler, token: bear\n",
"My custom handler, token: !\n"
]
}
],
"source": [
"from langchain_anthropic import ChatAnthropic\n",
"from langchain_core.callbacks import BaseCallbackHandler\n",
"from langchain_core.prompts import ChatPromptTemplate\n",
"\n",
"\n",
"class MyCustomHandler(BaseCallbackHandler):\n",
" def on_llm_new_token(self, token: str, **kwargs) -> None:\n",
" print(f\"My custom handler, token: {token}\")\n",
"\n",
"\n",
"prompt = ChatPromptTemplate.from_messages([\"Tell me a joke about {animal}\"])\n",
"\n",
"# To enable streaming, we pass in `streaming=True` to the ChatModel constructor\n",
"# Additionally, we pass in our custom handler as a list to the callbacks parameter\n",
"model = ChatAnthropic(\n",
" model=\"claude-3-sonnet-20240229\", streaming=True, callbacks=[MyCustomHandler()]\n",
")\n",
"\n",
"chain = prompt | model\n",
"\n",
"response = chain.invoke({\"animal\": \"bears\"})"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"You can see [this reference page](https://api.python.langchain.com/en/latest/callbacks/langchain_core.callbacks.base.BaseCallbackHandler.html#langchain-core-callbacks-base-basecallbackhandler) for a list of events you can handle. Note that the `handle_chain_*` events run for most LCEL runnables.\n",
"\n",
"## Next steps\n",
"\n",
"You've now learned how to create your own custom callback handlers.\n",
"\n",
"Next, check out the other how-to guides in this section, such as [how to attach callbacks to a runnable](/docs/how_to/callbacks_attach)."
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.5"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

@ -0,0 +1,200 @@
{
"cells": [
{
"cell_type": "raw",
"id": "77bf57fb-e990-45f2-8b5f-c76388b05966",
"metadata": {},
"source": [
"---\n",
"keywords: [LCEL]\n",
"---"
]
},
{
"cell_type": "markdown",
"id": "50d57bf2-7104-4570-b3e5-90fd71e1bea1",
"metadata": {},
"source": [
"# How to create a dynamic (self-constructing) chain\n",
"\n",
":::info Prerequisites\n",
"\n",
"This guide assumes familiarity with the following:\n",
"- [LangChain Expression Language (LCEL)](/docs/concepts/#langchain-expression-language)\n",
"- [How to turn any function into a runnable](/docs/how_to/functions)\n",
"\n",
":::\n",
"\n",
"Sometimes we want to construct parts of a chain at runtime, depending on the chain inputs ([routing](/docs/how_to/routing/) is the most common example of this). We can create dynamic chains like this using a very useful property of RunnableLambda's, which is that if a RunnableLambda returns a Runnable, that Runnable is itself invoked. Let's see an example.\n",
"\n",
"```{=mdx}\n",
"import ChatModelTabs from \"@theme/ChatModelTabs\";\n",
"\n",
"<ChatModelTabs\n",
" customVarName=\"llm\"\n",
"/>\n",
"```"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "406bffc2-86d0-4cb9-9262-5c1e3442397a",
"metadata": {},
"outputs": [],
"source": [
"# | echo: false\n",
"\n",
"from langchain_anthropic import ChatAnthropic\n",
"\n",
"llm = ChatAnthropic(model=\"claude-3-sonnet-20240229\")"
]
},
{
"cell_type": "code",
"execution_count": 10,
"id": "0ae6692b-983e-40b8-aa2a-6c078d945b9e",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"\"According to the context provided, Egypt's population in 2024 is estimated to be about 111 million.\""
]
},
"execution_count": 10,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.prompts import ChatPromptTemplate\n",
"from langchain_core.runnables import Runnable, RunnablePassthrough, chain\n",
"\n",
"contextualize_instructions = \"\"\"Convert the latest user question into a standalone question given the chat history. Don't answer the question, return the question and nothing else (no descriptive text).\"\"\"\n",
"contextualize_prompt = ChatPromptTemplate.from_messages(\n",
" [\n",
" (\"system\", contextualize_instructions),\n",
" (\"placeholder\", \"{chat_history}\"),\n",
" (\"human\", \"{question}\"),\n",
" ]\n",
")\n",
"contextualize_question = contextualize_prompt | llm | StrOutputParser()\n",
"\n",
"qa_instructions = (\n",
" \"\"\"Answer the user question given the following context:\\n\\n{context}.\"\"\"\n",
")\n",
"qa_prompt = ChatPromptTemplate.from_messages(\n",
" [(\"system\", qa_instructions), (\"human\", \"{question}\")]\n",
")\n",
"\n",
"\n",
"@chain\n",
"def contextualize_if_needed(input_: dict) -> Runnable:\n",
" if input_.get(\"chat_history\"):\n",
" # NOTE: This is returning another Runnable, not an actual output.\n",
" return contextualize_question\n",
" else:\n",
" return RunnablePassthrough()\n",
"\n",
"\n",
"@chain\n",
"def fake_retriever(input_: dict) -> str:\n",
" return \"egypt's population in 2024 is about 111 million\"\n",
"\n",
"\n",
"full_chain = (\n",
" RunnablePassthrough.assign(question=contextualize_if_needed).assign(\n",
" context=fake_retriever\n",
" )\n",
" | qa_prompt\n",
" | llm\n",
" | StrOutputParser()\n",
")\n",
"\n",
"full_chain.invoke(\n",
" {\n",
" \"question\": \"what about egypt\",\n",
" \"chat_history\": [\n",
" (\"human\", \"what's the population of indonesia\"),\n",
" (\"ai\", \"about 276 million\"),\n",
" ],\n",
" }\n",
")"
]
},
{
"cell_type": "markdown",
"id": "5076ddb4-4a99-47ad-b549-8ac27ca3e2c6",
"metadata": {},
"source": [
"The key here is that `contextualize_if_needed` returns another Runnable and not an actual output. This returned Runnable is itself run when the full chain is executed.\n",
"\n",
"Looking at the trace we can see that, since we passed in chat_history, we executed the contextualize_question chain as part of the full chain: https://smith.langchain.com/public/9e0ae34c-4082-4f3f-beed-34a2a2f4c991/r"
]
},
{
"cell_type": "markdown",
"id": "4fe6ca44-a643-4859-a290-be68403f51f0",
"metadata": {},
"source": [
"Note that the streaming, batching, etc. capabilities of the returned Runnable are all preserved"
]
},
{
"cell_type": "code",
"execution_count": 11,
"id": "6def37fa-5105-4090-9b07-77cb488ecd9c",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"What\n",
" is\n",
" the\n",
" population\n",
" of\n",
" Egypt\n",
"?\n"
]
}
],
"source": [
"for chunk in contextualize_if_needed.stream(\n",
" {\n",
" \"question\": \"what about egypt\",\n",
" \"chat_history\": [\n",
" (\"human\", \"what's the population of indonesia\"),\n",
" (\"ai\", \"about 276 million\"),\n",
" ],\n",
" }\n",
"):\n",
" print(chunk)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "poetry-venv-2",
"language": "python",
"name": "poetry-venv-2"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.1"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

@ -1,11 +1,21 @@
{
"cells": [
{
"cell_type": "raw",
"id": "018f3868-e60d-4db6-a1c6-c6633c66b1f4",
"metadata": {},
"source": [
"---\n",
"keywords: [LCEL, fallbacks]\n",
"---"
]
},
{
"cell_type": "markdown",
"id": "19c9cbd6",
"metadata": {},
"source": [
"# Fallbacks\n",
"# How to add fallbacks to a runnable\n",
"\n",
"When working with language models, you may often encounter issues from the underlying APIs, whether these be rate limiting or downtime. Therefore, as you go to move your LLM applications into production it becomes more and more important to safeguard against these. That's why we've introduced the concept of fallbacks. \n",
"\n",
@ -447,7 +457,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.5"
"version": "3.9.1"
}
},
"nbformat": 4,

@ -19,27 +19,29 @@ For comprehensive descriptions of every class and function see the [API Referenc
This highlights functionality that is core to using LangChain.
- [How to: return structured data from an LLM](/docs/how_to/structured_output/)
- [How to: use a chat model to call tools](/docs/how_to/tool_calling/)
- [How to: return structured data from a model](/docs/how_to/structured_output/)
- [How to: use a model to call tools](/docs/how_to/tool_calling/)
- [How to: stream runnables](/docs/how_to/streaming)
- [How to: debug your LLM apps](/docs/how_to/debugging/)
## LangChain Expression Language (LCEL)
LangChain Expression Language is a way to create arbitrary custom chains. It is built on the [Runnable](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html) protocol.
[LangChain Expression Language](/docs/concepts/#langchain-expression-language-lcel) is a way to create arbitrary custom chains. It is built on the [Runnable](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html) protocol.
[**LCEL cheatsheet**](/docs/how_to/lcel_cheatsheet/): For a quick overview of how to use the main LCEL primitives.
- [How to: chain runnables](/docs/how_to/sequence)
- [How to: stream runnables](/docs/how_to/streaming)
- [How to: invoke runnables in parallel](/docs/how_to/parallel/)
- [How to: attach runtime arguments to a runnable](/docs/how_to/binding/)
- [How to: run custom functions](/docs/how_to/functions)
- [How to: pass through arguments from one step to the next](/docs/how_to/passthrough)
- [How to: add values to a chain's state](/docs/how_to/assign)
- [How to: configure a chain at runtime](/docs/how_to/configure)
- [How to: add message history](/docs/how_to/message_history)
- [How to: route execution within a chain](/docs/how_to/routing)
- [How to: add default invocation args to runnables](/docs/how_to/binding/)
- [How to: turn any function into a runnable](/docs/how_to/functions)
- [How to: pass through inputs from one chain step to the next](/docs/how_to/passthrough)
- [How to: configure runnable behavior at runtime](/docs/how_to/configure)
- [How to: add message history (memory) to a chain](/docs/how_to/message_history)
- [How to: route between sub-chains](/docs/how_to/routing)
- [How to: create a dynamic (self-constructing) chain](/docs/how_to/dynamic_chain/)
- [How to: inspect runnables](/docs/how_to/inspect)
- [How to: add fallbacks](/docs/how_to/fallbacks)
- [How to: add fallbacks to a runnable](/docs/how_to/fallbacks)
## Components
@ -166,15 +168,11 @@ Indexing is the process of keeping your vectorstore in-sync with the underlying
LangChain Tools contain a description of the tool (to pass to the language model) as well as the implementation of the function to call).
- [How to: use LangChain tools](/docs/how_to/tools)
- [How to: create custom tools](/docs/how_to/custom_tools)
- [How to: use built-in tools and built-in toolkits](/docs/how_to/tools_builtin)
- [How to: use a chat model to call tools](/docs/how_to/tool_calling/)
- [How to: use LangChain toolkits](/docs/how_to/toolkits)
- [How to: define a custom tool](/docs/how_to/custom_tools)
- [How to: convert LangChain tools to OpenAI functions](/docs/how_to/tools_as_openai_functions)
- [How to: use tools without function calling](/docs/how_to/tools_prompting)
- [How to: let the LLM choose between multiple tools](/docs/how_to/tools_multiple)
- [How to: add ad-hoc tool calling capability to LLMs and chat models](/docs/how_to/tools_prompting)
- [How to: add a human in the loop to tool usage](/docs/how_to/tools_human)
- [How to: do parallel tool use](/docs/how_to/tools_parallel)
- [How to: handle errors when calling tools](/docs/how_to/tools_error)
- [How to: call tools using multi-modal data](/docs/how_to/tool_calls_multi_modal)
@ -189,6 +187,14 @@ For in depth how-to guides for agents, please check out [LangGraph](https://gith
- [How to: use legacy LangChain Agents (AgentExecutor)](/docs/how_to/agent_executor)
- [How to: migrate from legacy LangChain agents to LangGraph](/docs/how_to/migrate_agent)
### Callbacks
- [How to: pass in callbacks at runtime](/docs/how_to/callbacks_runtime)
- [How to: attach callbacks to a module](/docs/how_to/callbacks_attach)
- [How to: pass callbacks into a module constructor](/docs/how_to/callbacks_constructor)
- [How to: create custom callback handlers](/docs/how_to/custom_callbacks)
- [How to: use callbacks in async environments](/docs/how_to/callbacks_async)
### Custom
All of LangChain components can easily be extended to support your own versions.
@ -198,6 +204,7 @@ All of LangChain components can easily be extended to support your own versions.
- [How to: write a custom retriever class](/docs/how_to/custom_retriever)
- [How to: write a custom document loader](/docs/how_to/document_loader_custom)
- [How to: write a custom output parser class](/docs/how_to/output_parser_custom)
- [How to: create custom callback handlers](/docs/how_to/custom_callbacks)
- [How to: define a custom tool](/docs/how_to/custom_tools)

File diff suppressed because it is too large Load Diff

@ -941,7 +941,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.4"
"version": "3.9.1"
}
},
"nbformat": 4,

@ -16,7 +16,7 @@
"id": "4b47436a",
"metadata": {},
"source": [
"# How to route execution within a chain\n",
"# How to route between sub-chains\n",
"\n",
":::info Prerequisites\n",
"\n",

@ -30,7 +30,7 @@
"\n",
"The resulting [`RunnableSequence`](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableSequence.html) is itself a runnable, which means it can be invoked, streamed, or further chained just like any other runnable. Advantages of chaining runnables in this way are efficient streaming (the sequence will stream output as soon as it is available), and debugging and tracing with tools like [LangSmith](/docs/how_to/debugging).\n",
"\n",
"## The pipe operator\n",
"## The pipe operator: `|`\n",
"\n",
"To show off how this works, let's go through an example. We'll walk through a common pattern in LangChain: using a [prompt template](/docs/how_to#prompt-templates) to format input into a [chat model](/docs/how_to#chat-models), and finally converting the chat message output into a string with an [output parser](/docs/how_to#output-parsers).\n",
"\n",
@ -230,11 +230,28 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"## Next steps\n",
"\n",
"You now know some ways to chain two runnables together.\n",
"Or the abbreviated:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"composed_chain_with_pipe = RunnableParallel({\"joke\": chain}).pipe(\n",
" analysis_prompt, model, StrOutputParser()\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Related\n",
"\n",
"To learn more, see the other how-to guides on runnables in this section."
"- [Streaming](/docs/how_to/streaming/): Check out the streaming guide to understand the streaming behavior of a chain\n",
"- "
]
}
],

@ -1524,7 +1524,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.4"
"version": "3.9.1"
}
},
"nbformat": 4,

@ -4,7 +4,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"# How to use a chat model to call tools\n",
"# How to use a model to call tools\n",
"\n",
":::info Prerequisites\n",
"\n",
@ -705,7 +705,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.1"
"version": "3.11.4"
}
},
"nbformat": 4,

@ -1,450 +0,0 @@
{
"cells": [
{
"cell_type": "raw",
"id": "7f219241",
"metadata": {},
"source": [
"---\n",
"sidebar_position: 4\n",
"sidebar_class_name: hidden\n",
"---"
]
},
{
"cell_type": "markdown",
"id": "15780a65",
"metadata": {},
"source": [
"# How to use LangChain tools\n",
"\n",
"Tools are interfaces that an agent, chain, or LLM can use to interact with the world.\n",
"They combine a few things:\n",
"\n",
"1. The name of the tool\n",
"2. A description of what the tool is\n",
"3. JSON schema of what the inputs to the tool are\n",
"4. The function to call \n",
"5. Whether the result of a tool should be returned directly to the user\n",
"\n",
"It is useful to have all this information because this information can be used to build action-taking systems! The name, description, and JSON schema can be used to prompt the LLM so it knows how to specify what action to take, and then the function to call is equivalent to taking that action.\n",
"\n",
"The simpler the input to a tool is, the easier it is for an LLM to be able to use it.\n",
"Many agents will only work with tools that have a single string input.\n",
"For a list of agent types and which ones work with more complicated inputs, please see [this documentation](https://python.langchain.com/v0.1/docs/modules/agents/agent_types/)\n",
"\n",
"Importantly, the name, description, and JSON schema (if used) are all used in the prompt. Therefore, it is really important that they are clear and describe exactly how the tool should be used. You may need to change the default name, description, or JSON schema if the LLM is not understanding how to use the tool.\n",
"\n",
"## Default Tools\n",
"\n",
"Let's take a look at how to work with tools. To do this, we'll work with a built in tool."
]
},
{
"cell_type": "code",
"execution_count": 11,
"id": "19297004",
"metadata": {},
"outputs": [],
"source": [
"from langchain_community.tools import WikipediaQueryRun\n",
"from langchain_community.utilities import WikipediaAPIWrapper"
]
},
{
"cell_type": "markdown",
"id": "1098e51a",
"metadata": {},
"source": [
"Now we initialize the tool. This is where we can configure it as we please"
]
},
{
"cell_type": "code",
"execution_count": 22,
"id": "27a48655",
"metadata": {},
"outputs": [],
"source": [
"api_wrapper = WikipediaAPIWrapper(top_k_results=1, doc_content_chars_max=100)\n",
"tool = WikipediaQueryRun(api_wrapper=api_wrapper)"
]
},
{
"cell_type": "markdown",
"id": "7db48439",
"metadata": {},
"source": [
"This is the default name"
]
},
{
"cell_type": "code",
"execution_count": 23,
"id": "50f1ece1",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'Wikipedia'"
]
},
"execution_count": 23,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"tool.name"
]
},
{
"cell_type": "markdown",
"id": "075499b1",
"metadata": {},
"source": [
"This is the default description"
]
},
{
"cell_type": "code",
"execution_count": 24,
"id": "e9be09e2",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'A wrapper around Wikipedia. Useful for when you need to answer general questions about people, places, companies, facts, historical events, or other subjects. Input should be a search query.'"
]
},
"execution_count": 24,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"tool.description"
]
},
{
"cell_type": "markdown",
"id": "89c86b00",
"metadata": {},
"source": [
"This is the default JSON schema of the inputs"
]
},
{
"cell_type": "code",
"execution_count": 20,
"id": "963a2e8c",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'query': {'title': 'Query', 'type': 'string'}}"
]
},
"execution_count": 20,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"tool.args"
]
},
{
"cell_type": "markdown",
"id": "5c467a35",
"metadata": {},
"source": [
"We can see if the tool should return directly to the user"
]
},
{
"cell_type": "code",
"execution_count": 33,
"id": "039334b3",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"False"
]
},
"execution_count": 33,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"tool.return_direct"
]
},
{
"cell_type": "markdown",
"id": "fc421b02",
"metadata": {},
"source": [
"We can call this tool with a dictionary input"
]
},
{
"cell_type": "code",
"execution_count": 25,
"id": "6669a13c",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'Page: LangChain\\nSummary: LangChain is a framework designed to simplify the creation of applications '"
]
},
"execution_count": 25,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"tool.run({\"query\": \"langchain\"})"
]
},
{
"cell_type": "markdown",
"id": "587d6a58",
"metadata": {},
"source": [
"We can also call this tool with a single string input. \n",
"We can do this because this tool expects only a single input.\n",
"If it required multiple inputs, we would not be able to do that."
]
},
{
"cell_type": "code",
"execution_count": 26,
"id": "8cb23935",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'Page: LangChain\\nSummary: LangChain is a framework designed to simplify the creation of applications '"
]
},
"execution_count": 26,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"tool.run(\"langchain\")"
]
},
{
"cell_type": "markdown",
"id": "19eee1d5",
"metadata": {},
"source": [
"## Customizing Default Tools\n",
"We can also modify the built in name, description, and JSON schema of the arguments.\n",
"\n",
"When defining the JSON schema of the arguments, it is important that the inputs remain the same as the function, so you shouldn't change that. But you can define custom descriptions for each input easily."
]
},
{
"cell_type": "code",
"execution_count": 27,
"id": "599c4da7",
"metadata": {},
"outputs": [],
"source": [
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
"\n",
"\n",
"class WikiInputs(BaseModel):\n",
" \"\"\"Inputs to the wikipedia tool.\"\"\"\n",
"\n",
" query: str = Field(\n",
" description=\"query to look up in Wikipedia, should be 3 or less words\"\n",
" )"
]
},
{
"cell_type": "code",
"execution_count": 34,
"id": "6bde63e1",
"metadata": {},
"outputs": [],
"source": [
"tool = WikipediaQueryRun(\n",
" name=\"wiki-tool\",\n",
" description=\"look up things in wikipedia\",\n",
" args_schema=WikiInputs,\n",
" api_wrapper=api_wrapper,\n",
" return_direct=True,\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 29,
"id": "eeaa1d9a",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'wiki-tool'"
]
},
"execution_count": 29,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"tool.name"
]
},
{
"cell_type": "code",
"execution_count": 30,
"id": "7599d88c",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'look up things in wikipedia'"
]
},
"execution_count": 30,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"tool.description"
]
},
{
"cell_type": "code",
"execution_count": 31,
"id": "80042cb1",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'query': {'title': 'Query',\n",
" 'description': 'query to look up in Wikipedia, should be 3 or less words',\n",
" 'type': 'string'}}"
]
},
"execution_count": 31,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"tool.args"
]
},
{
"cell_type": "code",
"execution_count": 35,
"id": "8455fb9e",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"True"
]
},
"execution_count": 35,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"tool.return_direct"
]
},
{
"cell_type": "code",
"execution_count": 32,
"id": "86f731a8",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'Page: LangChain\\nSummary: LangChain is a framework designed to simplify the creation of applications '"
]
},
"execution_count": 32,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"tool.run(\"langchain\")"
]
},
{
"cell_type": "markdown",
"id": "c5b8b6bc",
"metadata": {},
"source": [
"## More Topics\n",
"\n",
"This was a quick introduction to tools in LangChain, but there is a lot more to learn\n",
"\n",
"**[Built-In Tools](/docs/integrations/tools/)**: For a list of all built-in tools, see [this page](/docs/integrations/tools/)\n",
" \n",
"**[Custom Tools](/docs/how_to/custom_tools)**: Although built-in tools are useful, it's highly likely that you'll have to define your own tools. See [this guide](/docs/how_to/custom_tools) for instructions on how to do so.\n",
" \n",
"**[Toolkits](/docs/how_to/toolkits)**: Toolkits are collections of tools that work well together. For a more in depth description as well as a list of all built-in toolkits, see [this page](/docs/how_to/toolkits)\n",
"\n",
"**[Tools as OpenAI Functions](/docs/how_to/tools_as_openai_functions/)**: Tools are very similar to OpenAI Functions, and can easily be converted to that format. See [this notebook](/docs/how_to/tools_as_openai_functions) for instructions on how to do that.\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "78e2d0b3",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.1"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

@ -0,0 +1,236 @@
{
"cells": [
{
"cell_type": "raw",
"id": "7f219241",
"metadata": {},
"source": [
"---\n",
"sidebar_position: 4\n",
"sidebar_class_name: hidden\n",
"---"
]
},
{
"attachments": {},
"cell_type": "markdown",
"id": "e8f68de0-7df7-4bfd-9207-3258431426ef",
"metadata": {},
"source": [
"# How to use built-in tools and toolkits\n",
"\n",
":::info Prerequisites\n",
"\n",
"This guide assumes familiarity with the following concepts:\n",
"\n",
"- [LangChain Tools](/docs/concepts/#tools)\n",
"- [LangChain Toolkits](/docs/concepts/#tools)\n",
"\n",
":::\n",
"\n",
"## Tools\n",
"\n",
"LangChain has a large collection of 3rd party tools. Please visit [Tool Integrations](/docs/integrations/tools/) for a list of the available tools.\n",
"\n",
":::{.callout-important}\n",
"\n",
"When using 3rd party tools, make sure that you understand how the tool works, what permissions\n",
"it has. Read over its documentation and check if anything is required from you\n",
"from a security point of view. Please see our [security](https://python.langchain.com/v0.1/docs/security/) \n",
"guidelines for more information.\n",
"\n",
":::\n",
"\n",
"Let's try out the [Wikipedia integration](/docs/integrations/tools/wikipedia/)."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "84f70856-b865-4658-9930-7577fb4712ce",
"metadata": {},
"outputs": [],
"source": [
"!pip install -qU wikipedia"
]
},
{
"cell_type": "code",
"execution_count": 51,
"id": "b4eaed85-c5a6-4ba9-b401-40258b0131c2",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Page: LangChain\n",
"Summary: LangChain is a framework designed to simplify the creation of applications \n"
]
}
],
"source": [
"from langchain_community.tools import WikipediaQueryRun\n",
"from langchain_community.utilities import WikipediaAPIWrapper\n",
"\n",
"api_wrapper = WikipediaAPIWrapper(top_k_results=1, doc_content_chars_max=100)\n",
"tool = WikipediaQueryRun(api_wrapper=api_wrapper)\n",
"\n",
"print(tool.invoke({\"query\": \"langchain\"}))"
]
},
{
"cell_type": "markdown",
"id": "cb870984-52d5-4453-be35-7072a08c6c14",
"metadata": {},
"source": [
"The tool has the following defaults associated with it:"
]
},
{
"cell_type": "code",
"execution_count": 55,
"id": "7f094f01-2e98-4947-acc4-0846963a96e0",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Name: wiki-tool\n",
"Description: look up things in wikipedia\n",
"args schema: {'query': {'title': 'Query', 'description': 'query to look up in Wikipedia, should be 3 or less words', 'type': 'string'}}\n",
"returns directly?: True\n"
]
}
],
"source": [
"print(f\"Name: {tool.name}\")\n",
"print(f\"Description: {tool.description}\")\n",
"print(f\"args schema: {tool.args}\")\n",
"print(f\"returns directly?: {tool.return_direct}\")"
]
},
{
"cell_type": "markdown",
"id": "19eee1d5",
"metadata": {},
"source": [
"## Customizing Default Tools\n",
"We can also modify the built in name, description, and JSON schema of the arguments.\n",
"\n",
"When defining the JSON schema of the arguments, it is important that the inputs remain the same as the function, so you shouldn't change that. But you can define custom descriptions for each input easily."
]
},
{
"cell_type": "code",
"execution_count": 56,
"id": "1365784c-e666-41c8-a1bb-e50f822b5936",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Page: LangChain\n",
"Summary: LangChain is a framework designed to simplify the creation of applications \n"
]
}
],
"source": [
"from langchain_community.tools import WikipediaQueryRun\n",
"from langchain_community.utilities import WikipediaAPIWrapper\n",
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
"\n",
"\n",
"class WikiInputs(BaseModel):\n",
" \"\"\"Inputs to the wikipedia tool.\"\"\"\n",
"\n",
" query: str = Field(\n",
" description=\"query to look up in Wikipedia, should be 3 or less words\"\n",
" )\n",
"\n",
"\n",
"tool = WikipediaQueryRun(\n",
" name=\"wiki-tool\",\n",
" description=\"look up things in wikipedia\",\n",
" args_schema=WikiInputs,\n",
" api_wrapper=api_wrapper,\n",
" return_direct=True,\n",
")\n",
"\n",
"print(tool.run(\"langchain\"))"
]
},
{
"cell_type": "code",
"execution_count": 57,
"id": "6e8850d6-6840-443e-a2be-adf64b30975c",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Name: wiki-tool\n",
"Description: look up things in wikipedia\n",
"args schema: {'query': {'title': 'Query', 'description': 'query to look up in Wikipedia, should be 3 or less words', 'type': 'string'}}\n",
"returns directly?: True\n"
]
}
],
"source": [
"print(f\"Name: {tool.name}\")\n",
"print(f\"Description: {tool.description}\")\n",
"print(f\"args schema: {tool.args}\")\n",
"print(f\"returns directly?: {tool.return_direct}\")"
]
},
{
"attachments": {},
"cell_type": "markdown",
"id": "acf0c2f7-ddc6-4633-8cef-59f234321e5c",
"metadata": {},
"source": [
"## How to use built-in toolkits\n",
"\n",
"Toolkits are collections of tools that are designed to be used together for specific tasks. They have convenient loading methods.\n",
"\n",
"For a complete list of available ready-made toolkits, visit [Integrations](/docs/integrations/toolkits/).\n",
"\n",
"All Toolkits expose a `get_tools` method which returns a list of tools.\n",
"\n",
"You're usually meant to use them this way:\n",
"\n",
"```python\n",
"# Initialize a toolkit\n",
"toolkit = ExampleTookit(...)\n",
"\n",
"# Get list of tools\n",
"tools = toolkit.get_tools()\n",
"```"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.4"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

@ -335,7 +335,7 @@
"id": "616f9714-5b18-4eed-b88a-d38e4cb1de99",
"metadata": {},
"source": [
"Agents are also great because they make it easy to use multiple tools. To learn how to build Chains that use multiple tools, check out the [Chains with multiple tools](/docs/how_to/tools_multiple) page."
"Agents are also great because they make it easy to use multiple tools."
]
},
{
@ -457,21 +457,6 @@
"source": [
"Check out the [LangSmith trace here](https://smith.langchain.com/public/eeeb27a4-a2f8-4f06-a3af-9c983f76146c/r)."
]
},
{
"cell_type": "markdown",
"id": "b0e4b7f4-58ce-4ca0-a986-d05a436a7ccf",
"metadata": {},
"source": [
"## Next steps\n",
"\n",
"Here we've gone over the basic ways to use Tools with Chains and Agents. We recommend the following sections to explore next:\n",
"\n",
"- [Agents](/docs/tutorials/agents): Everything related to Agents.\n",
"- [Choosing between multiple tools](/docs/how_to/tools_multiple): How to make tool chains that select from multiple tools.\n",
"- [Prompting for tool use](/docs/how_to/tools_prompting): How to make tool chains that prompt models directly, without using function-calling APIs.\n",
"- [Parallel tool use](/docs/how_to/tools_parallel): How to make tool chains that invoke multiple tools at once."
]
}
],
"metadata": {
@ -490,7 +475,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.4"
"version": "3.11.4"
}
},
"nbformat": 4,

@ -7,7 +7,16 @@
"source": [
"# How to add a human-in-the-loop for tools\n",
"\n",
"There are certain tools that we don't trust a model to execute on its own. One thing we can do in such situations is require human approval before the tool is invoked."
"There are certain tools that we don't trust a model to execute on its own. One thing we can do in such situations is require human approval before the tool is invoked.\n",
"\n",
":::{.callout-info}\n",
"\n",
"This how-to guide shows a simple way to add human-in-the-loop for code running in a jupyter notebook or in a terminal.\n",
"\n",
"To build a production application, you will need to do more work to keep track of application state appropriately.\n",
"\n",
"We recommend using `langgraph` for powering such a capability. For more details, please see this [guide](https://langchain-ai.github.io/langgraph/how-tos/human-in-the-loop/).\n",
":::\n"
]
},
{
@ -40,7 +49,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 8,
"id": "2bed0ccf-20cc-4fd3-9947-55471dd8c4da",
"metadata": {},
"outputs": [],
@ -55,13 +64,19 @@
},
{
"cell_type": "markdown",
"id": "43721981-4595-4721-bea0-5c67696426d3",
"id": "7ecd5d7e-7c3c-4180-8958-7db2c1e43564",
"metadata": {},
"source": [
"## Chain\n",
"\n",
"Suppose we have the following (dummy) tools and tool-calling chain:\n",
"\n",
"Let's create a few simple (dummy) tools and a tool-calling chain:"
]
},
{
"cell_type": "markdown",
"id": "43721981-4595-4721-bea0-5c67696426d3",
"metadata": {},
"source": [
"```{=mdx}\n",
"import ChatModelTabs from \"@theme/ChatModelTabs\";\n",
"\n",
@ -71,13 +86,13 @@
},
{
"cell_type": "code",
"execution_count": 2,
"execution_count": 1,
"id": "e0ff02ac-e750-493b-9b09-4578711a6726",
"metadata": {},
"outputs": [],
"source": [
"# | output: false\n",
"# | echo: false\n",
"# | outout: false\n",
"\n",
"from langchain_anthropic import ChatAnthropic\n",
"\n",
@ -86,7 +101,7 @@
},
{
"cell_type": "code",
"execution_count": 4,
"execution_count": 3,
"id": "0221fdfd-2a18-4449-a123-e6b0b15bb3d9",
"metadata": {},
"outputs": [
@ -95,17 +110,16 @@
"text/plain": [
"[{'name': 'count_emails',\n",
" 'args': {'last_n_days': 5},\n",
" 'id': 'toolu_012VHuh7vk5dVNct5SgZj3gh',\n",
" 'id': 'toolu_01QYZdJ4yPiqsdeENWHqioFW',\n",
" 'output': 10}]"
]
},
"execution_count": 4,
"execution_count": 3,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from operator import itemgetter\n",
"from typing import Dict, List\n",
"\n",
"from langchain_core.messages import AIMessage\n",
@ -149,12 +163,14 @@
"source": [
"## Adding human approval\n",
"\n",
"We can add a simple human approval step to our tool_chain function:"
"Let's add a step in the chain that will ask a person to approve or reject the tall call request.\n",
"\n",
"On rejection, the step will raise an exception which will stop execution of the rest of the chain."
]
},
{
"cell_type": "code",
"execution_count": 9,
"execution_count": 12,
"id": "341fb055-0315-47bc-8f72-ed6103d2981f",
"metadata": {},
"outputs": [],
@ -162,23 +178,35 @@
"import json\n",
"\n",
"\n",
"def human_approval(msg: AIMessage) -> Runnable:\n",
"class NotApproved(Exception):\n",
" \"\"\"Custom exception.\"\"\"\n",
"\n",
"\n",
"def human_approval(msg: AIMessage) -> AIMessage:\n",
" \"\"\"Responsible for passing through its input or raising an exception.\n",
"\n",
" Args:\n",
" msg: output from the chat model\n",
"\n",
" Returns:\n",
" msg: original output from the msg\n",
" \"\"\"\n",
" tool_strs = \"\\n\\n\".join(\n",
" json.dumps(tool_call, indent=2) for tool_call in msg.tool_calls\n",
" )\n",
" input_msg = (\n",
" f\"Do you approve of the following tool invocations\\n\\n{tool_strs}\\n\\n\"\n",
" \"Anything except 'Y'/'Yes' (case-insensitive) will be treated as a no.\"\n",
" \"Anything except 'Y'/'Yes' (case-insensitive) will be treated as a no.\\n >>>\"\n",
" )\n",
" resp = input(input_msg)\n",
" if resp.lower() not in (\"yes\", \"y\"):\n",
" raise ValueError(f\"Tool invocations not approved:\\n\\n{tool_strs}\")\n",
" raise NotApproved(f\"Tool invocations not approved:\\n\\n{tool_strs}\")\n",
" return msg"
]
},
{
"cell_type": "code",
"execution_count": 10,
"execution_count": 13,
"id": "25dca07b-56ca-4b94-9955-d4f3e9895e03",
"metadata": {},
"outputs": [
@ -193,10 +221,11 @@
" \"args\": {\n",
" \"last_n_days\": 5\n",
" },\n",
" \"id\": \"toolu_01LCpjpFxrRspygDscnHYyPm\"\n",
" \"id\": \"toolu_01WbD8XeMoQaRFtsZezfsHor\"\n",
"}\n",
"\n",
"Anything except 'Y'/'Yes' (case-insensitive) will be treated as a no. yes\n"
"Anything except 'Y'/'Yes' (case-insensitive) will be treated as a no.\n",
" >>> yes\n"
]
},
{
@ -204,11 +233,11 @@
"text/plain": [
"[{'name': 'count_emails',\n",
" 'args': {'last_n_days': 5},\n",
" 'id': 'toolu_01LCpjpFxrRspygDscnHYyPm',\n",
" 'id': 'toolu_01WbD8XeMoQaRFtsZezfsHor',\n",
" 'output': 10}]"
]
},
"execution_count": 10,
"execution_count": 13,
"metadata": {},
"output_type": "execute_result"
}
@ -220,7 +249,7 @@
},
{
"cell_type": "code",
"execution_count": 11,
"execution_count": 14,
"id": "f558f2cd-847b-4ef9-a770-3961082b540c",
"metadata": {},
"outputs": [
@ -233,45 +262,41 @@
"{\n",
" \"name\": \"send_email\",\n",
" \"args\": {\n",
" \"message\": \"What's up homie\",\n",
" \"recipient\": \"sally@gmail.com\"\n",
" \"recipient\": \"sally@gmail.com\",\n",
" \"message\": \"What's up homie\"\n",
" },\n",
" \"id\": \"toolu_0158qJVd1AL32Y1xxYUAtNEy\"\n",
" \"id\": \"toolu_014XccHFzBiVcc9GV1harV9U\"\n",
"}\n",
"\n",
"Anything except 'Y'/'Yes' (case-insensitive) will be treated as a no. no\n"
"Anything except 'Y'/'Yes' (case-insensitive) will be treated as a no.\n",
" >>> no\n"
]
},
{
"ename": "ValueError",
"evalue": "Tool invocations not approved:\n\n{\n \"name\": \"send_email\",\n \"args\": {\n \"message\": \"What's up homie\",\n \"recipient\": \"sally@gmail.com\"\n },\n \"id\": \"toolu_0158qJVd1AL32Y1xxYUAtNEy\"\n}",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mValueError\u001b[0m Traceback (most recent call last)",
"Cell \u001b[0;32mIn[11], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m \u001b[43mchain\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43minvoke\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mSend sally@gmail.com an email saying \u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43mWhat\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43ms up homie\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\n",
"File \u001b[0;32m~/langchain/libs/core/langchain_core/runnables/base.py:2499\u001b[0m, in \u001b[0;36mRunnableSequence.invoke\u001b[0;34m(self, input, config)\u001b[0m\n\u001b[1;32m 2497\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m 2498\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m i, step \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28menumerate\u001b[39m(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39msteps):\n\u001b[0;32m-> 2499\u001b[0m \u001b[38;5;28minput\u001b[39m \u001b[38;5;241m=\u001b[39m \u001b[43mstep\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43minvoke\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 2500\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[1;32m 2501\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;66;43;03m# mark each step as a child run\u001b[39;49;00m\n\u001b[1;32m 2502\u001b[0m \u001b[43m \u001b[49m\u001b[43mpatch_config\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 2503\u001b[0m \u001b[43m \u001b[49m\u001b[43mconfig\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcallbacks\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mrun_manager\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget_child\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43mf\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mseq:step:\u001b[39;49m\u001b[38;5;132;43;01m{\u001b[39;49;00m\u001b[43mi\u001b[49m\u001b[38;5;241;43m+\u001b[39;49m\u001b[38;5;241;43m1\u001b[39;49m\u001b[38;5;132;43;01m}\u001b[39;49;00m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\n\u001b[1;32m 2504\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 2505\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 2506\u001b[0m \u001b[38;5;66;03m# finish the root run\u001b[39;00m\n\u001b[1;32m 2507\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mBaseException\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m e:\n",
"File \u001b[0;32m~/langchain/libs/core/langchain_core/runnables/base.py:3961\u001b[0m, in \u001b[0;36mRunnableLambda.invoke\u001b[0;34m(self, input, config, **kwargs)\u001b[0m\n\u001b[1;32m 3959\u001b[0m \u001b[38;5;250m\u001b[39m\u001b[38;5;124;03m\"\"\"Invoke this runnable synchronously.\"\"\"\u001b[39;00m\n\u001b[1;32m 3960\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mhasattr\u001b[39m(\u001b[38;5;28mself\u001b[39m, \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mfunc\u001b[39m\u001b[38;5;124m\"\u001b[39m):\n\u001b[0;32m-> 3961\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_call_with_config\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 3962\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_invoke\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 3963\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[1;32m 3964\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_config\u001b[49m\u001b[43m(\u001b[49m\u001b[43mconfig\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mfunc\u001b[49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 3965\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 3966\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 3967\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 3968\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mTypeError\u001b[39;00m(\n\u001b[1;32m 3969\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mCannot invoke a coroutine function synchronously.\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 3970\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mUse `ainvoke` instead.\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 3971\u001b[0m )\n",
"File \u001b[0;32m~/langchain/libs/core/langchain_core/runnables/base.py:1625\u001b[0m, in \u001b[0;36mRunnable._call_with_config\u001b[0;34m(self, func, input, config, run_type, **kwargs)\u001b[0m\n\u001b[1;32m 1621\u001b[0m context \u001b[38;5;241m=\u001b[39m copy_context()\n\u001b[1;32m 1622\u001b[0m context\u001b[38;5;241m.\u001b[39mrun(var_child_runnable_config\u001b[38;5;241m.\u001b[39mset, child_config)\n\u001b[1;32m 1623\u001b[0m output \u001b[38;5;241m=\u001b[39m cast(\n\u001b[1;32m 1624\u001b[0m Output,\n\u001b[0;32m-> 1625\u001b[0m \u001b[43mcontext\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mrun\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 1626\u001b[0m \u001b[43m \u001b[49m\u001b[43mcall_func_with_variable_args\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;66;43;03m# type: ignore[arg-type]\u001b[39;49;00m\n\u001b[1;32m 1627\u001b[0m \u001b[43m \u001b[49m\u001b[43mfunc\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;66;43;03m# type: ignore[arg-type]\u001b[39;49;00m\n\u001b[1;32m 1628\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;66;43;03m# type: ignore[arg-type]\u001b[39;49;00m\n\u001b[1;32m 1629\u001b[0m \u001b[43m \u001b[49m\u001b[43mconfig\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1630\u001b[0m \u001b[43m \u001b[49m\u001b[43mrun_manager\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1631\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1632\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m,\n\u001b[1;32m 1633\u001b[0m )\n\u001b[1;32m 1634\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mBaseException\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m e:\n\u001b[1;32m 1635\u001b[0m run_manager\u001b[38;5;241m.\u001b[39mon_chain_error(e)\n",
"File \u001b[0;32m~/langchain/libs/core/langchain_core/runnables/config.py:347\u001b[0m, in \u001b[0;36mcall_func_with_variable_args\u001b[0;34m(func, input, config, run_manager, **kwargs)\u001b[0m\n\u001b[1;32m 345\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m run_manager \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;129;01mand\u001b[39;00m accepts_run_manager(func):\n\u001b[1;32m 346\u001b[0m kwargs[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mrun_manager\u001b[39m\u001b[38;5;124m\"\u001b[39m] \u001b[38;5;241m=\u001b[39m run_manager\n\u001b[0;32m--> 347\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mfunc\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
"File \u001b[0;32m~/langchain/libs/core/langchain_core/runnables/base.py:3835\u001b[0m, in \u001b[0;36mRunnableLambda._invoke\u001b[0;34m(self, input, run_manager, config, **kwargs)\u001b[0m\n\u001b[1;32m 3833\u001b[0m output \u001b[38;5;241m=\u001b[39m chunk\n\u001b[1;32m 3834\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m-> 3835\u001b[0m output \u001b[38;5;241m=\u001b[39m \u001b[43mcall_func_with_variable_args\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 3836\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mfunc\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mconfig\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mrun_manager\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\n\u001b[1;32m 3837\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 3838\u001b[0m \u001b[38;5;66;03m# If the output is a runnable, invoke it\u001b[39;00m\n\u001b[1;32m 3839\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(output, Runnable):\n",
"File \u001b[0;32m~/langchain/libs/core/langchain_core/runnables/config.py:347\u001b[0m, in \u001b[0;36mcall_func_with_variable_args\u001b[0;34m(func, input, config, run_manager, **kwargs)\u001b[0m\n\u001b[1;32m 345\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m run_manager \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;129;01mand\u001b[39;00m accepts_run_manager(func):\n\u001b[1;32m 346\u001b[0m kwargs[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mrun_manager\u001b[39m\u001b[38;5;124m\"\u001b[39m] \u001b[38;5;241m=\u001b[39m run_manager\n\u001b[0;32m--> 347\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mfunc\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
"Cell \u001b[0;32mIn[9], line 14\u001b[0m, in \u001b[0;36mhuman_approval\u001b[0;34m(msg)\u001b[0m\n\u001b[1;32m 12\u001b[0m resp \u001b[38;5;241m=\u001b[39m \u001b[38;5;28minput\u001b[39m(input_msg)\n\u001b[1;32m 13\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m resp\u001b[38;5;241m.\u001b[39mlower() \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;129;01min\u001b[39;00m (\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124myes\u001b[39m\u001b[38;5;124m\"\u001b[39m, \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124my\u001b[39m\u001b[38;5;124m\"\u001b[39m):\n\u001b[0;32m---> 14\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mTool invocations not approved:\u001b[39m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;132;01m{\u001b[39;00mtool_strs\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m)\n\u001b[1;32m 15\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m msg\n",
"\u001b[0;31mValueError\u001b[0m: Tool invocations not approved:\n\n{\n \"name\": \"send_email\",\n \"args\": {\n \"message\": \"What's up homie\",\n \"recipient\": \"sally@gmail.com\"\n },\n \"id\": \"toolu_0158qJVd1AL32Y1xxYUAtNEy\"\n}"
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"Tool invocations not approved:\n",
"\n",
"{\n",
" \"name\": \"send_email\",\n",
" \"args\": {\n",
" \"recipient\": \"sally@gmail.com\",\n",
" \"message\": \"What's up homie\"\n",
" },\n",
" \"id\": \"toolu_014XccHFzBiVcc9GV1harV9U\"\n",
"}\n"
]
}
],
"source": [
"chain.invoke(\"Send sally@gmail.com an email saying 'What's up homie'\")"
"try:\n",
" chain.invoke(\"Send sally@gmail.com an email saying 'What's up homie'\")\n",
"except NotApproved as e:\n",
" print()\n",
" print(e)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "e938d8f1-df93-4726-a465-78e596312246",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
@ -290,7 +315,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.1"
"version": "3.11.4"
}
},
"nbformat": 4,

@ -1,273 +0,0 @@
{
"cells": [
{
"cell_type": "raw",
"id": "1ea1fe24-fe1e-463b-a52c-79f0ef02328e",
"metadata": {},
"source": [
"---\n",
"sidebar_position: 2\n",
"---"
]
},
{
"cell_type": "markdown",
"id": "95982bf1-7d9d-4dd6-a4ad-9de0719fe17f",
"metadata": {},
"source": [
"# How to use an LLM to choose between multiple tools\n",
"\n",
"In our [Quickstart](/docs/how_to/tool_calling) we went over how to build a Chain that calls a single `multiply` tool. Now let's take a look at how we might augment this chain so that it can pick from a number of tools to call. We'll focus on Chains since [Agents](/docs/tutorials/agents) can route between multiple tools by default."
]
},
{
"cell_type": "markdown",
"id": "3fafec38-443a-42ad-a913-5be7667e3734",
"metadata": {},
"source": [
"## Setup\n",
"\n",
"We'll need to install the following packages for this guide:"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "78411bf1-0117-4f33-a3d7-f3d77a97bb78",
"metadata": {},
"outputs": [],
"source": [
"%pip install --upgrade --quiet langchain-core"
]
},
{
"cell_type": "markdown",
"id": "59d08fd0-ddd9-4c74-bcea-a5ca3a86e542",
"metadata": {},
"source": [
"If you'd like to trace your runs in [LangSmith](/docs/langsmith/) uncomment and set the following environment variables:"
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "4185e74b-0500-4cad-ace0-bac37de466ac",
"metadata": {},
"outputs": [],
"source": [
"import getpass\n",
"import os\n",
"\n",
"# os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"\n",
"# os.environ[\"LANGCHAIN_API_KEY\"] = getpass.getpass()"
]
},
{
"cell_type": "markdown",
"id": "d28159f5-b7d0-4385-aa44-4cd1b64507bb",
"metadata": {},
"source": [
"## Tools\n",
"\n",
"Recall we already had a `multiply` tool:"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "e13ec98c-8521-4d63-b521-caf92da87b70",
"metadata": {},
"outputs": [],
"source": [
"from langchain_core.tools import tool\n",
"\n",
"\n",
"@tool\n",
"def multiply(first_int: int, second_int: int) -> int:\n",
" \"\"\"Multiply two integers together.\"\"\"\n",
" return first_int * second_int"
]
},
{
"cell_type": "markdown",
"id": "3de233af-b3bd-4f0c-8b1a-83527143a8db",
"metadata": {},
"source": [
"And now we can add to it an `exponentiate` and `add` tool:"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "e93661cd-a2ba-4ada-91ad-baf1b60879ec",
"metadata": {},
"outputs": [],
"source": [
"@tool\n",
"def add(first_int: int, second_int: int) -> int:\n",
" \"Add two integers.\"\n",
" return first_int + second_int\n",
"\n",
"\n",
"@tool\n",
"def exponentiate(base: int, exponent: int) -> int:\n",
" \"Exponentiate the base to the exponent power.\"\n",
" return base**exponent"
]
},
{
"cell_type": "markdown",
"id": "bbea4555-ed10-4a18-b802-e9a3071f132b",
"metadata": {},
"source": [
"The main difference between using one Tool and many is that we can't be sure which Tool the model will invoke upfront, so we cannot hardcode, like we did in the [Quickstart](/docs/how_to/tool_calling), a specific tool into our chain. Instead we'll add `call_tools`, a `RunnableLambda` that takes the output AI message with tools calls and routes to the correct tools.\n",
"\n",
"```{=mdx}\n",
"import ChatModelTabs from \"@theme/ChatModelTabs\";\n",
"\n",
"<ChatModelTabs customVarName=\"llm\"/>\n",
"```"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "f00f0f3f-8530-4c1d-a26c-d20824e31faf",
"metadata": {},
"outputs": [],
"source": [
"from langchain_anthropic import ChatAnthropic\n",
"\n",
"llm = ChatAnthropic(model=\"claude-3-sonnet-20240229\", temperature=0)"
]
},
{
"cell_type": "code",
"execution_count": 11,
"id": "c35359ae-a740-48c5-b5e7-1a377fb25aa2",
"metadata": {},
"outputs": [],
"source": [
"from operator import itemgetter\n",
"from typing import Dict, List, Union\n",
"\n",
"from langchain_core.messages import AIMessage\n",
"from langchain_core.runnables import (\n",
" Runnable,\n",
" RunnableLambda,\n",
" RunnableMap,\n",
" RunnablePassthrough,\n",
")\n",
"\n",
"tools = [multiply, exponentiate, add]\n",
"llm_with_tools = llm.bind_tools(tools)\n",
"tool_map = {tool.name: tool for tool in tools}\n",
"\n",
"\n",
"def call_tools(msg: AIMessage) -> Runnable:\n",
" \"\"\"Simple sequential tool calling helper.\"\"\"\n",
" tool_map = {tool.name: tool for tool in tools}\n",
" tool_calls = msg.tool_calls.copy()\n",
" for tool_call in tool_calls:\n",
" tool_call[\"output\"] = tool_map[tool_call[\"name\"]].invoke(tool_call[\"args\"])\n",
" return tool_calls\n",
"\n",
"\n",
"chain = llm_with_tools | call_tools"
]
},
{
"cell_type": "code",
"execution_count": 12,
"id": "ea6dbb32-ec9b-4c70-a90f-a2db93978cf1",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"[{'name': 'multiply',\n",
" 'args': {'first_int': 23, 'second_int': 7},\n",
" 'id': 'toolu_01Wf8kUs36kxRKLDL8vs7G8q',\n",
" 'output': 161}]"
]
},
"execution_count": 12,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"chain.invoke(\"What's 23 times 7\")"
]
},
{
"cell_type": "code",
"execution_count": 13,
"id": "b1c6c0f8-6d04-40d4-a40e-8719ca7b27c2",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"[{'name': 'add',\n",
" 'args': {'first_int': 1000000, 'second_int': 1000000000},\n",
" 'id': 'toolu_012aK4xZBQg2sXARsFZnqxHh',\n",
" 'output': 1001000000}]"
]
},
"execution_count": 13,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"chain.invoke(\"add a million plus a billion\")"
]
},
{
"cell_type": "code",
"execution_count": 14,
"id": "ce76f299-1a4d-421c-afa4-a6346e34285c",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"[{'name': 'exponentiate',\n",
" 'args': {'base': 37, 'exponent': 3},\n",
" 'id': 'toolu_01VDU6X3ugDb9cpnnmCZFPbC',\n",
" 'output': 50653}]"
]
},
"execution_count": 14,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"chain.invoke(\"cube thirty-seven\")"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.1"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

@ -1,215 +0,0 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "95982bf1-7d9d-4dd6-a4ad-9de0719fe17f",
"metadata": {},
"source": [
"# How to call tools in parallel\n",
"\n",
"In the [Chains with multiple tools](/docs/how_to/tools_multiple) guide we saw how to build function-calling chains that select between multiple tools. Some models, like the OpenAI models released in Fall 2023, also support parallel function calling, which allows you to invoke multiple functions (or the same function multiple times) in a single model call. Our previous chain from the multiple tools guides actually already supports this."
]
},
{
"cell_type": "markdown",
"id": "3fafec38-443a-42ad-a913-5be7667e3734",
"metadata": {},
"source": [
"## Setup\n",
"\n",
"We'll need to install the following packages for this guide:"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "78411bf1-0117-4f33-a3d7-f3d77a97bb78",
"metadata": {},
"outputs": [],
"source": [
"%pip install --upgrade --quiet langchain-core"
]
},
{
"cell_type": "markdown",
"id": "59d08fd0-ddd9-4c74-bcea-a5ca3a86e542",
"metadata": {},
"source": [
"If you'd like to trace your runs in [LangSmith](/docs/langsmith/) uncomment and set the following environment variables:"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "4185e74b-0500-4cad-ace0-bac37de466ac",
"metadata": {},
"outputs": [],
"source": [
"import getpass\n",
"import os\n",
"\n",
"# os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"\n",
"# os.environ[\"LANGCHAIN_API_KEY\"] = getpass.getpass()"
]
},
{
"cell_type": "markdown",
"id": "d28159f5-b7d0-4385-aa44-4cd1b64507bb",
"metadata": {},
"source": [
"## Tools"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "e13ec98c-8521-4d63-b521-caf92da87b70",
"metadata": {},
"outputs": [],
"source": [
"from langchain_core.tools import tool\n",
"\n",
"\n",
"@tool\n",
"def multiply(first_int: int, second_int: int) -> int:\n",
" \"\"\"Multiply two integers together.\"\"\"\n",
" return first_int * second_int\n",
"\n",
"\n",
"@tool\n",
"def add(first_int: int, second_int: int) -> int:\n",
" \"Add two integers.\"\n",
" return first_int + second_int\n",
"\n",
"\n",
"@tool\n",
"def exponentiate(base: int, exponent: int) -> int:\n",
" \"Exponentiate the base to the exponent power.\"\n",
" return base**exponent"
]
},
{
"cell_type": "markdown",
"id": "119d419c-1c61-4e0d-834a-5dabb72f5514",
"metadata": {},
"source": [
"# Chain\n",
"\n",
"```{=mdx}\n",
"import ChatModelTabs from \"@theme/ChatModelTabs\";\n",
"\n",
"<ChatModelTabs customVarName=\"llm\" hideGoogle=\"true\"/>\n",
"```"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "f67d91d8-cc38-4065-8f80-901e079954dd",
"metadata": {},
"outputs": [],
"source": [
"# | echo: false\n",
"# | output: false\n",
"\n",
"from langchain_openai import ChatOpenAI\n",
"\n",
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0)"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "c35359ae-a740-48c5-b5e7-1a377fb25aa2",
"metadata": {},
"outputs": [],
"source": [
"from operator import itemgetter\n",
"from typing import Dict, List, Union\n",
"\n",
"from langchain_core.messages import AIMessage\n",
"from langchain_core.runnables import (\n",
" Runnable,\n",
" RunnableLambda,\n",
" RunnableMap,\n",
" RunnablePassthrough,\n",
")\n",
"\n",
"tools = [multiply, exponentiate, add]\n",
"llm_with_tools = llm.bind_tools(tools)\n",
"tool_map = {tool.name: tool for tool in tools}\n",
"\n",
"\n",
"def call_tools(msg: AIMessage) -> Runnable:\n",
" \"\"\"Simple sequential tool calling helper.\"\"\"\n",
" tool_map = {tool.name: tool for tool in tools}\n",
" tool_calls = msg.tool_calls.copy()\n",
" for tool_call in tool_calls:\n",
" tool_call[\"output\"] = tool_map[tool_call[\"name\"]].invoke(tool_call[\"args\"])\n",
" return tool_calls\n",
"\n",
"\n",
"chain = llm_with_tools | call_tools"
]
},
{
"cell_type": "code",
"execution_count": 9,
"id": "ea6dbb32-ec9b-4c70-a90f-a2db93978cf1",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"[{'name': 'multiply',\n",
" 'args': {'first_int': 23, 'second_int': 7},\n",
" 'id': 'call_22tgOrsVLyLMsl2RLbUhtycw',\n",
" 'output': 161},\n",
" {'name': 'multiply',\n",
" 'args': {'first_int': 5, 'second_int': 18},\n",
" 'id': 'call_EbKHEG3TjqBhEwb7aoxUtgzf',\n",
" 'output': 90},\n",
" {'name': 'add',\n",
" 'args': {'first_int': 1000000, 'second_int': 1000000000},\n",
" 'id': 'call_LUhu2IT3vINxlTc5fCVY6Nhi',\n",
" 'output': 1001000000},\n",
" {'name': 'exponentiate',\n",
" 'args': {'base': 37, 'exponent': 3},\n",
" 'id': 'call_bnCZIXelOKkmcyd4uGXId9Ct',\n",
" 'output': 50653}]"
]
},
"execution_count": 9,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"chain.invoke(\n",
" \"What's 23 times 7, and what's five times 18 and add a million plus a billion and cube thirty-seven\"\n",
")"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.1"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

@ -15,9 +15,30 @@
"id": "14b94240",
"metadata": {},
"source": [
"# How to use tools without function calling\n",
"# How to add ad-hoc tool calling capability to LLMs and Chat Models\n",
"\n",
"In this guide we'll build a Chain that does not rely on any special model APIs (like tool calling, which we showed in the [Quickstart](/docs/how_to/tool_calling)) and instead just prompts the model directly to invoke tools."
":::{.callout-caution}\n",
"\n",
"Some models have been fine-tuned for tool calling and provide a dedicated API for tool calling. Generally, such models are better at tool calling than non-fine-tuned models, and are recommended for use cases that require tool calling. Please see the [how to use a chat model to call tools](/docs/how_to/tool_calling/) guide for more information.\n",
"\n",
":::\n",
"\n",
":::info Prerequisites\n",
"\n",
"This guide assumes familiarity with the following concepts:\n",
"\n",
"- [LangChain Tools](/docs/concepts/#tools)\n",
"- [Function/tool calling](https://python.langchain.com/v0.2/docs/concepts/#functiontool-calling)\n",
"- [Chat models](/docs/concepts/#chat-models)\n",
"- [LLMs](/docs/concepts/#llms)\n",
"\n",
":::\n",
"\n",
"In this guide, we'll see how to add **ad-hoc** tool calling support to a chat model. This is an alternative method to invoke tools if you're using a model that does not natively support [tool calling](/docs/how_to/tool_calling/).\n",
"\n",
"We'll do this by simply writing a prompt that will get the model to invoke the appropriate tools. Here's a diagram of the logic:\n",
"\n",
"![chain](../../static/img/tool_chain.svg)"
]
},
{
@ -37,101 +58,134 @@
"metadata": {},
"outputs": [],
"source": [
"%pip install --upgrade --quiet langchain langchain-openai"
"%pip install --upgrade --quiet langchain langchain-community"
]
},
{
"cell_type": "markdown",
"id": "5e727d22-f861-4eee-882a-688f8efc885e",
"id": "897bc01e-cc2b-4400-8a64-db4aa56085d3",
"metadata": {},
"source": [
"And set these environment variables:"
"If you'd like to use LangSmith, uncomment the below:"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "527ef906-0104-4872-b4e5-f371cf73feba",
"execution_count": 26,
"id": "5efb4170-b95b-4d29-8f57-09509f3ba6df",
"metadata": {},
"outputs": [],
"source": [
"import getpass\n",
"import os\n",
"\n",
"os.environ[\"OPENAI_API_KEY\"] = getpass.getpass()\n",
"\n",
"# If you'd like to use LangSmith, uncomment the below:\n",
"# os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"\n",
"# os.environ[\"LANGCHAIN_API_KEY\"] = getpass.getpass()"
]
},
{
"cell_type": "markdown",
"id": "68946881",
"id": "7ec6409b-21e5-4d0a-8a46-c4ef0b055dd3",
"metadata": {},
"source": [
"## Create a tool\n",
"You can select any of the given models for this how-to guide. Keep in mind that most of these models already [support native tool calling](/docs/integrations/chat/), so using the prompting strategy shown here doesn't make sense for these models, and instead you should follow the [how to use a chat model to call tools](/docs/how_to/tool_calling/) guide.\n",
"\n",
"```{=mdx}\n",
"import ChatModelTabs from \"@theme/ChatModelTabs\";\n",
"\n",
"First, we need to create a tool to call. For this example, we will create a custom tool from a function. For more information on all details related to creating custom tools, please see [this guide](/docs/how_to/custom_tools)."
"<ChatModelTabs openaiParams={`model=\"gpt-4\"`} />\n",
"```\n",
"\n",
"To illustrate the idea, we'll use `phi3` via Ollama, which does **NOT** have native support for tool calling. If you'd like to use `Ollama` as well follow [these instructions](/docs/integrations/chat/ollama/)."
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "90187d07",
"execution_count": 24,
"id": "424be968-2806-4d1a-a6aa-5499ae20fac5",
"metadata": {},
"outputs": [],
"source": [
"from langchain_core.tools import tool\n",
"from langchain_community.llms import Ollama\n",
"\n",
"model = Ollama(model=\"phi3\")"
]
},
{
"cell_type": "markdown",
"id": "68946881",
"metadata": {},
"source": [
"## Create a tool\n",
"\n",
"@tool\n",
"def multiply(first_int: int, second_int: int) -> int:\n",
" \"\"\"Multiply two integers together.\"\"\"\n",
" return first_int * second_int"
"First, let's create an `add` and `multiply` tools. For more information on creating custom tools, please see [this guide](/docs/how_to/custom_tools)."
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "d7009e1a",
"execution_count": 4,
"id": "4548e6fa-0f9b-4d7a-8fa5-66cec0350e5f",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"--\n",
"multiply\n",
"multiply(first_int: int, second_int: int) -> int - Multiply two integers together.\n",
"{'first_int': {'title': 'First Int', 'type': 'integer'}, 'second_int': {'title': 'Second Int', 'type': 'integer'}}\n"
"Multiply two numbers together.\n",
"{'x': {'title': 'X', 'type': 'number'}, 'y': {'title': 'Y', 'type': 'number'}}\n",
"--\n",
"add\n",
"Add two numbers.\n",
"{'x': {'title': 'X', 'type': 'integer'}, 'y': {'title': 'Y', 'type': 'integer'}}\n"
]
}
],
"source": [
"print(multiply.name)\n",
"print(multiply.description)\n",
"print(multiply.args)"
"from langchain_core.tools import tool\n",
"\n",
"\n",
"@tool\n",
"def multiply(x: float, y: float) -> float:\n",
" \"\"\"Multiply two numbers together.\"\"\"\n",
" return x * y\n",
"\n",
"\n",
"@tool\n",
"def add(x: int, y: int) -> int:\n",
" \"Add two numbers.\"\n",
" return x + y\n",
"\n",
"\n",
"tools = [multiply, add]\n",
"\n",
"# Let's inspect the tools\n",
"for t in tools:\n",
" print(\"--\")\n",
" print(t.name)\n",
" print(t.description)\n",
" print(t.args)"
]
},
{
"cell_type": "code",
"execution_count": 3,
"execution_count": 5,
"id": "be77e780",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"20"
"20.0"
]
},
"execution_count": 3,
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"multiply.invoke({\"first_int\": 4, \"second_int\": 5})"
"multiply.invoke({\"x\": 4, \"y\": 5})"
]
},
{
@ -146,48 +200,85 @@
},
{
"cell_type": "code",
"execution_count": 4,
"id": "c64818f0-9364-423c-922e-bdfb8f01e726",
"execution_count": 6,
"id": "2063b564-25ca-4729-a45f-ba4633175b04",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'multiply: multiply(first_int: int, second_int: int) -> int - Multiply two integers together.'"
]
},
"execution_count": 4,
"metadata": {},
"output_type": "execute_result"
"name": "stdout",
"output_type": "stream",
"text": [
"multiply(x: float, y: float) -> float - Multiply two numbers together.\n",
"add(x: int, y: int) -> int - Add two numbers.\n"
]
}
],
"source": [
"from langchain.tools.render import render_text_description\n",
"from langchain_core.output_parsers import JsonOutputParser\n",
"from langchain_core.prompts import ChatPromptTemplate\n",
"from langchain_core.tools import render_text_description\n",
"\n",
"rendered_tools = render_text_description([multiply])\n",
"rendered_tools"
"rendered_tools = render_text_description(tools)\n",
"print(rendered_tools)"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "63552d4d-8bd6-4aca-8805-56e236f6552d",
"execution_count": 17,
"id": "f02f1dce-76e7-4ca9-9bac-5af496131fe1",
"metadata": {},
"outputs": [],
"source": [
"from langchain_core.prompts import ChatPromptTemplate\n",
"\n",
"system_prompt = f\"\"\"You are an assistant that has access to the following set of tools. Here are the names and descriptions for each tool:\n",
"system_prompt = f\"\"\"\\\n",
"You are an assistant that has access to the following set of tools. \n",
"Here are the names and descriptions for each tool:\n",
"\n",
"{rendered_tools}\n",
"\n",
"Given the user input, return the name and input of the tool to use. Return your response as a JSON blob with 'name' and 'arguments' keys.\"\"\"\n",
"Given the user input, return the name and input of the tool to use. \n",
"Return your response as a JSON blob with 'name' and 'arguments' keys.\n",
"\n",
"The `arguments` should be a dictionary, with keys corresponding \n",
"to the argument names and the values corresponding to the requested values.\n",
"\"\"\"\n",
"\n",
"prompt = ChatPromptTemplate.from_messages(\n",
" [(\"system\", system_prompt), (\"user\", \"{input}\")]\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 18,
"id": "f8623e03-60eb-4439-b57b-ecbcebc61b58",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"{\n",
" \"name\": \"add\",\n",
" \"arguments\": {\n",
" \"x\": 3,\n",
" \"y\": 1132\n",
" }\n",
"}\n"
]
}
],
"source": [
"chain = prompt | model\n",
"message = chain.invoke({\"input\": \"what's 3 plus 1132\"})\n",
"\n",
"# Let's take a look at the output from the model\n",
"# if the model is an LLM (not a chat model), the output will be a string.\n",
"if isinstance(message, str):\n",
" print(message)\n",
"else: # Otherwise it's a chat model\n",
" print(message.content)"
]
},
{
"cell_type": "markdown",
"id": "14df2cd5-b6fa-4b10-892d-e8692c7931e5",
@ -200,156 +291,153 @@
},
{
"cell_type": "code",
"execution_count": 7,
"execution_count": 19,
"id": "f129f5bd-127c-4c95-8f34-8f437da7ca8f",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'name': 'multiply', 'arguments': {'first_int': 13, 'second_int': 4}}"
"{'name': 'multiply', 'arguments': {'x': 13.0, 'y': 4.0}}"
]
},
"execution_count": 7,
"execution_count": 19,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from langchain_core.output_parsers import JsonOutputParser\n",
"from langchain_openai import ChatOpenAI\n",
"\n",
"model = ChatOpenAI(model=\"gpt-3.5-turbo\", temperature=0)\n",
"chain = prompt | model | JsonOutputParser()\n",
"chain.invoke({\"input\": \"what's thirteen times 4\"})"
]
},
{
"cell_type": "markdown",
"id": "8e29dd4c-8eb5-457f-92d1-8add076404dc",
"id": "e1f08255-f146-4f4a-be43-5c21c1d3ae83",
"metadata": {},
"source": [
"## Invoking the tool\n",
":::{.callout-important}\n",
"\n",
"We can invoke the tool as part of the chain by passing along the model-generated \"arguments\" to it:"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "0555b384-fde6-4404-86e0-7ea199003d58",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"52"
]
},
"execution_count": 8,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from operator import itemgetter\n",
"🎉 Amazing! 🎉 We now instructed our model on how to **request** that a tool be invoked.\n",
"\n",
"chain = prompt | model | JsonOutputParser() | itemgetter(\"arguments\") | multiply\n",
"chain.invoke({\"input\": \"what's thirteen times 4\"})"
"Now, let's create some logic to actually run the tool!\n",
":::"
]
},
{
"cell_type": "markdown",
"id": "8d60b2cb-6ce0-48fc-8d18-d2337161a53d",
"id": "8e29dd4c-8eb5-457f-92d1-8add076404dc",
"metadata": {},
"source": [
"## Choosing from multiple tools\n",
"## Invoking the tool 🏃\n",
"\n",
"Now that the model can request that a tool be invoked, we need to write a function that can actually invoke \n",
"the tool.\n",
"\n",
"Suppose we have multiple tools we want the chain to be able to choose from:"
"The function will select the appropriate tool by name, and pass to it the arguments chosen by the model."
]
},
{
"cell_type": "code",
"execution_count": 9,
"id": "95c86d32-ee45-4c87-a28c-14eff19b49e9",
"execution_count": 20,
"id": "faee95e0-4095-4310-991f-9e9465c6738e",
"metadata": {},
"outputs": [],
"source": [
"@tool\n",
"def add(first_int: int, second_int: int) -> int:\n",
" \"Add two integers.\"\n",
" return first_int + second_int\n",
"from typing import Any, Dict, Optional, TypedDict\n",
"\n",
"from langchain_core.runnables import RunnableConfig\n",
"\n",
"@tool\n",
"def exponentiate(base: int, exponent: int) -> int:\n",
" \"Exponentiate the base to the exponent power.\"\n",
" return base**exponent"
"\n",
"class ToolCallRequest(TypedDict):\n",
" \"\"\"A typed dict that shows the inputs into the invoke_tool function.\"\"\"\n",
"\n",
" name: str\n",
" arguments: Dict[str, Any]\n",
"\n",
"\n",
"def invoke_tool(\n",
" tool_call_request: ToolCallRequest, config: Optional[RunnableConfig] = None\n",
"):\n",
" \"\"\"A function that we can use the perform a tool invocation.\n",
"\n",
" Args:\n",
" tool_call_request: a dict that contains the keys name and arguments.\n",
" The name must match the name of a tool that exists.\n",
" The arguments are the arguments to that tool.\n",
" config: This is configuration information that LangChain uses that contains\n",
" things like callbacks, metadata, etc.See LCEL documentation about RunnableConfig.\n",
"\n",
" Returns:\n",
" output from the requested tool\n",
" \"\"\"\n",
" tool_name_to_tool = {tool.name: tool for tool in tools}\n",
" name = tool_call_request[\"name\"]\n",
" requested_tool = tool_name_to_tool[name]\n",
" return requested_tool.invoke(tool_call_request[\"arguments\"], config=config)"
]
},
{
"cell_type": "markdown",
"id": "748405ff-4c85-4bd7-82e1-30458b5a4106",
"id": "f4957532-9e0c-47f6-bb62-0fd789ac1d3e",
"metadata": {},
"source": [
"With function calling, we can do this like so:"
"Let's test this out 🧪!"
]
},
{
"cell_type": "markdown",
"id": "eb3aa89e-40e1-45ec-b1f3-ab28cfc8e42d",
"cell_type": "code",
"execution_count": 21,
"id": "d0ea3b2a-8fb2-4016-83c8-a5d3e78fedbc",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"15.0"
]
},
"execution_count": 21,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"If we want to run the model selected tool, we can do so using a function that returns the tool based on the model output. Specifically, our function will action return it's own subchain that gets the \"arguments\" part of the model output and passes it to the chosen tool:"
"invoke_tool({\"name\": \"multiply\", \"arguments\": {\"x\": 3, \"y\": 5}})"
]
},
{
"cell_type": "code",
"execution_count": 13,
"id": "db254773-5b8e-43d0-aabe-c21566c154cd",
"cell_type": "markdown",
"id": "715af6e1-935d-4bc0-a3d2-646ecf8a329b",
"metadata": {},
"outputs": [],
"source": [
"tools = [add, exponentiate, multiply]\n",
"## Let's put it together\n",
"\n",
"\n",
"def tool_chain(model_output):\n",
" tool_map = {tool.name: tool for tool in tools}\n",
" chosen_tool = tool_map[model_output[\"name\"]]\n",
" return itemgetter(\"arguments\") | chosen_tool"
"Let's put it together into a chain that creates a calculator with add and multiplication capabilities."
]
},
{
"cell_type": "code",
"execution_count": 14,
"id": "ad9f5cff-b86a-45fc-9ce4-b0aa9025a378",
"execution_count": 22,
"id": "0555b384-fde6-4404-86e0-7ea199003d58",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"1135"
"53.83784653"
]
},
"execution_count": 14,
"execution_count": 22,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"rendered_tools = render_text_description(tools)\n",
"system_prompt = f\"\"\"You are an assistant that has access to the following set of tools. Here are the names and descriptions for each tool:\n",
"\n",
"{rendered_tools}\n",
"\n",
"Given the user input, return the name and input of the tool to use. Return your response as a JSON blob with 'name' and 'arguments' keys.\"\"\"\n",
"\n",
"prompt = ChatPromptTemplate.from_messages(\n",
" [(\"system\", system_prompt), (\"user\", \"{input}\")]\n",
")\n",
"\n",
"chain = prompt | model | JsonOutputParser() | tool_chain\n",
"chain.invoke({\"input\": \"what's 3 plus 1132\"})"
"chain = prompt | model | JsonOutputParser() | invoke_tool\n",
"chain.invoke({\"input\": \"what's thirteen times 4.14137281\"})"
]
},
{
@ -364,19 +452,19 @@
},
{
"cell_type": "code",
"execution_count": 15,
"execution_count": 23,
"id": "45404406-859d-4caa-8b9d-5838162c80a0",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'name': 'add',\n",
" 'arguments': {'first_int': 3, 'second_int': 1132},\n",
" 'output': 1135}"
"{'name': 'multiply',\n",
" 'arguments': {'x': 13, 'y': 4.14137281},\n",
" 'output': 53.83784653}"
]
},
"execution_count": 15,
"execution_count": 23,
"metadata": {},
"output_type": "execute_result"
}
@ -385,9 +473,26 @@
"from langchain_core.runnables import RunnablePassthrough\n",
"\n",
"chain = (\n",
" prompt | model | JsonOutputParser() | RunnablePassthrough.assign(output=tool_chain)\n",
" prompt | model | JsonOutputParser() | RunnablePassthrough.assign(output=invoke_tool)\n",
")\n",
"chain.invoke({\"input\": \"what's 3 plus 1132\"})"
"chain.invoke({\"input\": \"what's thirteen times 4.14137281\"})"
]
},
{
"cell_type": "markdown",
"id": "1797fe82-ea35-4cba-834a-1caf9740d184",
"metadata": {},
"source": [
"## What's next?\n",
"\n",
"This how-to guide shows the \"happy path\" when the model correctly outputs all the required tool information.\n",
"\n",
"In reality, if you're using more complex tools, you will start encountering errors from the model, especially for models that have not been fine tuned for tool calling and for less capable models.\n",
"\n",
"You will need to be prepared to add strategies to improve the output from the model; e.g.,\n",
"\n",
"1. Provide few shot examples.\n",
"2. Add error handling (e.g., catch the exception and feed it back to the LLM to ask it to correct its previous output)."
]
}
],
@ -407,7 +512,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.4"
"version": "3.11.4"
}
},
"nbformat": 4,

@ -22,7 +22,8 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"**Sambaverse** allows you to interact with multiple open-source models. You can view the list of available models and interact with them in the [playground](https://sambaverse.sambanova.ai/playground).\n **Please note that Sambaverse's free offering is performance-limited.** Companies that are ready to evaluate the production tokens-per-second performance, volume throughput, and 10x lower total cost of ownership (TCO) of SambaNova should [contact us](https://sambaverse.sambanova.ai/contact-us) for a non-limited evaluation instance."
"**Sambaverse** allows you to interact with multiple open-source models. You can view the list of available models and interact with them in the [playground](https://sambaverse.sambanova.ai/playground).\n",
" **Please note that Sambaverse's free offering is performance-limited.** Companies that are ready to evaluate the production tokens-per-second performance, volume throughput, and 10x lower total cost of ownership (TCO) of SambaNova should [contact us](https://sambaverse.sambanova.ai/contact-us) for a non-limited evaluation instance."
]
},
{
@ -88,9 +89,10 @@
" \"temperature\": 0.01,\n",
" \"process_prompt\": True,\n",
" \"select_expert\": \"llama-2-7b-chat-hf\",\n",
" # \"repetition_penalty\": {\"type\": \"float\", \"value\": \"1\"},\n",
" # \"top_k\": {\"type\": \"int\", \"value\": \"50\"},\n",
" # \"top_p\": {\"type\": \"float\", \"value\": \"1\"}\n",
" # \"stop_sequences\": '\\\"sequence1\\\",\\\"sequence2\\\"',\n",
" # \"repetition_penalty\": 1.0,\n",
" # \"top_k\": 50,\n",
" # \"top_p\": 1.0\n",
" },\n",
")\n",
"\n",
@ -177,10 +179,10 @@
" \"do_sample\": True,\n",
" \"max_tokens_to_generate\": 1000,\n",
" \"temperature\": 0.01,\n",
" # \"repetition_penalty\": {\"type\": \"float\", \"value\": \"1\"},\n",
" # \"top_k\": {\"type\": \"int\", \"value\": \"50\"},\n",
" # \"top_logprobs\": {\"type\": \"int\", \"value\": \"0\"},\n",
" # \"top_p\": {\"type\": \"float\", \"value\": \"1\"}\n",
" # \"repetition_penalty\": 1.0,\n",
" # \"top_k\": 50,\n",
" # \"top_logprobs\": 0,\n",
" # \"top_p\": 1.0\n",
" },\n",
")\n",
"\n",

@ -61,4 +61,4 @@ FROM
{'input': 'Return the sql for this question: How many employees are in the company?', 'output': "SELECT \n COUNT(*)\nFROM \n employees"}
```
For more information on tools, see [this page](/docs/how_to/tools).
For more information on tools, see [this page](/docs/how_to/tools_builtin).

@ -31,4 +31,4 @@ from langchain.agents import load_tools
tools = load_tools(["golden-query"])
```
For more information on tools, see [this page](/docs/how_to/tools).
For more information on tools, see [this page](/docs/how_to/tools_builtin).

@ -71,4 +71,4 @@ from langchain.agents import load_tools
tools = load_tools(["google-serper"])
```
For more information on tools, see [this page](/docs/how_to/tools).
For more information on tools, see [this page](/docs/how_to/tools_builtin).

@ -41,4 +41,4 @@ from langchain.agents import load_tools
tools = load_tools(["openweathermap-api"])
```
For more information on tools, see [this page](/docs/how_to/tools).
For more information on tools, see [this page](/docs/how_to/tools_builtin).

@ -77,4 +77,4 @@ from langchain.agents import load_tools
tools = load_tools(["searchapi"])
```
For more information on tools, see [this page](/docs/how_to/tools).
For more information on tools, see [this page](/docs/how_to/tools_builtin).

@ -87,4 +87,4 @@ arxiv_tool = SearxSearchResults(name="Arxiv", wrapper=wrapper,
})
```
For more information on tools, see [this page](/docs/how_to/tools).
For more information on tools, see [this page](/docs/how_to/tools_builtin).

@ -28,4 +28,4 @@ from langchain.agents import load_tools
tools = load_tools(["serpapi"])
```
For more information on this, see [this page](/docs/how_to/tools)
For more information on this, see [this page](/docs/how_to/tools_builtin)

@ -33,4 +33,4 @@ from langchain.agents import load_tools
tools = load_tools(["stackexchange"])
```
For more information on tools, see [this page](/docs/how_to/tools).
For more information on tools, see [this page](/docs/how_to/tools_builtin).

@ -36,4 +36,4 @@ from langchain.agents import load_tools
tools = load_tools(["wolfram-alpha"])
```
For more information on tools, see [this page](/docs/how_to/tools).
For more information on tools, see [this page](/docs/how_to/tools_builtin).

@ -15,8 +15,6 @@
"source": [
"This notebook goes over how to use the bing search component.\n",
"\n",
"First, you need to set up the proper API keys and environment variables. To set it up, follow the instructions found [here](https://levelup.gitconnected.com/api-tutorial-how-to-use-bing-web-search-api-in-python-4165d5592a7e).\n",
"\n",
"Then we will need to set some environment variables."
]
},

@ -161,7 +161,7 @@
},
{
"cell_type": "code",
"execution_count": 1,
"execution_count": 3,
"id": "67ab8afa-f7c6-4fbf-b596-cb512da949da",
"metadata": {
"id": "67ab8afa-f7c6-4fbf-b596-cb512da949da",
@ -194,7 +194,7 @@
},
{
"cell_type": "code",
"execution_count": 2,
"execution_count": 4,
"id": "aac9563e",
"metadata": {
"id": "aac9563e",
@ -208,7 +208,7 @@
},
{
"cell_type": "code",
"execution_count": 3,
"execution_count": 5,
"id": "a3c3999a",
"metadata": {
"id": "a3c3999a",
@ -229,7 +229,7 @@
},
{
"cell_type": "code",
"execution_count": 4,
"execution_count": 6,
"id": "12eb86d8",
"metadata": {
"id": "12eb86d8",
@ -271,7 +271,7 @@
},
{
"cell_type": "code",
"execution_count": 5,
"execution_count": 7,
"id": "5d076412",
"metadata": {},
"outputs": [
@ -313,7 +313,7 @@
},
{
"cell_type": "code",
"execution_count": 6,
"execution_count": 8,
"id": "b2a4bd1b",
"metadata": {},
"outputs": [
@ -345,7 +345,7 @@
},
{
"cell_type": "code",
"execution_count": 8,
"execution_count": 9,
"id": "f3d294ff",
"metadata": {},
"outputs": [
@ -375,7 +375,7 @@
},
{
"cell_type": "code",
"execution_count": 59,
"execution_count": 10,
"id": "55b63a61",
"metadata": {},
"outputs": [
@ -405,7 +405,7 @@
},
{
"cell_type": "code",
"execution_count": 60,
"execution_count": 11,
"id": "9b831b3d",
"metadata": {},
"outputs": [
@ -435,7 +435,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 12,
"id": "fb1482e7",
"metadata": {},
"outputs": [],
@ -504,27 +504,29 @@
"metadata": {},
"source": [
"# Retrieval Strategies\n",
"Elasticsearch has big advantages over other vector only databases from its ability to support a wide range of retrieval strategies. In this notebook we will configure `ElasticsearchStore` to support some of the most common retrieval strategies. \n",
"Elasticsearch has big advantages over other vector only databases from its ability to support a wide range of retrieval strategies. In this notebook we will configure `ElasticsearchStore` to support some of the most common retrieval strategies. \n",
"\n",
"By default, `ElasticsearchStore` uses the `ApproxRetrievalStrategy`.\n",
"By default, `ElasticsearchStore` uses the `DenseVectorStrategy` (was called `ApproxRetrievalStrategy` prior to version 0.2.0).\n",
"\n",
"## ApproxRetrievalStrategy\n",
"This will return the top `k` most similar vectors to the query vector. The `k` parameter is set when the `ElasticsearchStore` is initialized. The default value is `10`."
"## DenseVectorStrategy\n",
"This will return the top `k` most similar vectors to the query vector. The `k` parameter is set when the `ElasticsearchStore` is initialized. The default value is `10`."
]
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 13,
"id": "999b5ef5",
"metadata": {},
"outputs": [],
"source": [
"from langchain_elasticsearch import DenseVectorStrategy\n",
"\n",
"db = ElasticsearchStore.from_documents(\n",
" docs,\n",
" embeddings,\n",
" es_url=\"http://localhost:9200\",\n",
" index_name=\"test\",\n",
" strategy=ElasticsearchStore.ApproxRetrievalStrategy(),\n",
" strategy=DenseVectorStrategy(),\n",
")\n",
"\n",
"docs = db.similarity_search(\n",
@ -537,12 +539,12 @@
"id": "9b651be5",
"metadata": {},
"source": [
"### Example: Approx with hybrid\n",
"### Example: Hybrid retrieval with dense vector and keyword search\n",
"This example will show how to configure `ElasticsearchStore` to perform a hybrid retrieval, using a combination of approximate semantic search and keyword based search. \n",
"\n",
"We use RRF to balance the two scores from different retrieval methods.\n",
"\n",
"To enable hybrid retrieval, we need to set `hybrid=True` in `ElasticsearchStore` `ApproxRetrievalStrategy` constructor.\n",
"To enable hybrid retrieval, we need to set `hybrid=True` in the `DenseVectorStrategy` constructor.\n",
"\n",
"```python\n",
"\n",
@ -551,9 +553,7 @@
" embeddings, \n",
" es_url=\"http://localhost:9200\", \n",
" index_name=\"test\",\n",
" strategy=ElasticsearchStore.ApproxRetrievalStrategy(\n",
" hybrid=True,\n",
" )\n",
" strategy=DenseVectorStrategy(hybrid=True)\n",
")\n",
"```\n",
"\n",
@ -582,22 +582,22 @@
"}\n",
"```\n",
"\n",
"### Example: Approx with Embedding Model in Elasticsearch\n",
"This example will show how to configure `ElasticsearchStore` to use the embedding model deployed in Elasticsearch for approximate retrieval. \n",
"### Example: Dense vector search with Embedding Model in Elasticsearch\n",
"This example will show how to configure `ElasticsearchStore` to use the embedding model deployed in Elasticsearch for dense vector retrieval.\n",
"\n",
"To use this, specify the model_id in `ElasticsearchStore` `ApproxRetrievalStrategy` constructor via the `query_model_id` argument.\n",
"To use this, specify the model_id in `DenseVectorStrategy` constructor via the `query_model_id` argument.\n",
"\n",
"**NOTE** This requires the model to be deployed and running in Elasticsearch ml node. See [notebook example](https://github.com/elastic/elasticsearch-labs/blob/main/notebooks/integrations/hugging-face/loading-model-from-hugging-face.ipynb) on how to deploy the model with eland.\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 14,
"id": "0a0c85e7",
"metadata": {},
"outputs": [],
"source": [
"APPROX_SELF_DEPLOYED_INDEX_NAME = \"test-approx-self-deployed\"\n",
"DENSE_SELF_DEPLOYED_INDEX_NAME = \"test-dense-self-deployed\"\n",
"\n",
"# Note: This does not have an embedding function specified\n",
"# Instead, we will use the embedding model deployed in Elasticsearch\n",
@ -605,12 +605,10 @@
" es_cloud_id=\"<your cloud id>\",\n",
" es_user=\"elastic\",\n",
" es_password=\"<your password>\",\n",
" index_name=APPROX_SELF_DEPLOYED_INDEX_NAME,\n",
" index_name=DENSE_SELF_DEPLOYED_INDEX_NAME,\n",
" query_field=\"text_field\",\n",
" vector_query_field=\"vector_query_field.predicted_value\",\n",
" strategy=ElasticsearchStore.ApproxRetrievalStrategy(\n",
" query_model_id=\"sentence-transformers__all-minilm-l6-v2\"\n",
" ),\n",
" strategy=DenseVectorStrategy(model_id=\"sentence-transformers__all-minilm-l6-v2\"),\n",
")\n",
"\n",
"# Setup a Ingest Pipeline to perform the embedding\n",
@ -631,7 +629,7 @@
"# creating a new index with the pipeline,\n",
"# not relying on langchain to create the index\n",
"db.client.indices.create(\n",
" index=APPROX_SELF_DEPLOYED_INDEX_NAME,\n",
" index=DENSE_SELF_DEPLOYED_INDEX_NAME,\n",
" mappings={\n",
" \"properties\": {\n",
" \"text_field\": {\"type\": \"text\"},\n",
@ -655,12 +653,10 @@
" es_cloud_id=\"<cloud id>\",\n",
" es_user=\"elastic\",\n",
" es_password=\"<cloud password>\",\n",
" index_name=APPROX_SELF_DEPLOYED_INDEX_NAME,\n",
" index_name=DENSE_SELF_DEPLOYED_INDEX_NAME,\n",
" query_field=\"text_field\",\n",
" vector_query_field=\"vector_query_field.predicted_value\",\n",
" strategy=ElasticsearchStore.ApproxRetrievalStrategy(\n",
" query_model_id=\"sentence-transformers__all-minilm-l6-v2\"\n",
" ),\n",
" strategy=DenseVectorStrategy(model_id=\"sentence-transformers__all-minilm-l6-v2\"),\n",
")\n",
"\n",
"# Perform search\n",
@ -672,12 +668,12 @@
"id": "53959de6",
"metadata": {},
"source": [
"## SparseVectorRetrievalStrategy (ELSER)\n",
"## SparseVectorStrategy (ELSER)\n",
"This strategy uses Elasticsearch's sparse vector retrieval to retrieve the top-k results. We only support our own \"ELSER\" embedding model for now.\n",
"\n",
"**NOTE** This requires the ELSER model to be deployed and running in Elasticsearch ml node. \n",
"\n",
"To use this, specify `SparseVectorRetrievalStrategy` in `ElasticsearchStore` constructor."
"To use this, specify `SparseVectorStrategy` (was called `SparseVectorRetrievalStrategy` prior to version 0.2.0) in the `ElasticsearchStore` constructor. You will need to provide a model ID."
]
},
{
@ -695,15 +691,17 @@
}
],
"source": [
"from langchain_elasticsearch import SparseVectorStrategy\n",
"\n",
"# Note that this example doesn't have an embedding function. This is because we infer the tokens at index time and at query time within Elasticsearch.\n",
"# This requires the ELSER model to be loaded and running in Elasticsearch.\n",
"db = ElasticsearchStore.from_documents(\n",
" docs,\n",
" es_cloud_id=\"My_deployment:dXMtY2VudHJhbDEuZ2NwLmNsb3VkLmVzLmlvOjQ0MyQ2OGJhMjhmNDc1M2Y0MWVjYTk2NzI2ZWNkMmE5YzRkNyQ3NWI4ODRjNWQ2OTU0MTYzODFjOTkxNmQ1YzYxMGI1Mw==\",\n",
" es_cloud_id=\"<cloud id>\",\n",
" es_user=\"elastic\",\n",
" es_password=\"GgUPiWKwEzgHIYdHdgPk1Lwi\",\n",
" es_password=\"<cloud password>\",\n",
" index_name=\"test-elser\",\n",
" strategy=ElasticsearchStore.SparseVectorRetrievalStrategy(),\n",
" strategy=SparseVectorStrategy(model_id=\".elser_model_2\"),\n",
")\n",
"\n",
"db.client.indices.refresh(index=\"test-elser\")\n",
@ -719,19 +717,42 @@
"id": "edf3a093",
"metadata": {},
"source": [
"## ExactRetrievalStrategy\n",
"This strategy uses Elasticsearch's exact retrieval (also known as brute force) to retrieve the top-k results.\n",
"## DenseVectorScriptScoreStrategy\n",
"This strategy uses Elasticsearch's script score query to perform exact vector retrieval (also known as brute force) to retrieve the top-k results. (This strategy was called `ExactRetrievalStrategy` prior to version 0.2.0.)\n",
"\n",
"To use this, specify `ExactRetrievalStrategy` in `ElasticsearchStore` constructor.\n",
"To use this, specify `DenseVectorScriptScoreStrategy` in `ElasticsearchStore` constructor.\n",
"\n",
"```python\n",
"from langchain_elasticsearch import SparseVectorStrategy\n",
"\n",
"db = ElasticsearchStore.from_documents(\n",
" docs, \n",
" embeddings, \n",
" es_url=\"http://localhost:9200\", \n",
" index_name=\"test\",\n",
" strategy=ElasticsearchStore.ExactRetrievalStrategy()\n",
" strategy=DenseVectorScriptScoreStrategy(),\n",
")\n",
"```"
]
},
{
"cell_type": "markdown",
"id": "11b51c47",
"metadata": {},
"source": [
"## BM25Strategy\n",
"Finally, you can use full-text keyword search.\n",
"\n",
"To use this, specify `BM25Strategy` in `ElasticsearchStore` constructor.\n",
"\n",
"```python\n",
"from langchain_elasticsearch import BM25Strategy\n",
"\n",
"db = ElasticsearchStore.from_documents(\n",
" docs, \n",
" es_url=\"http://localhost:9200\", \n",
" index_name=\"test\",\n",
" strategy=BM25Strategy(),\n",
")\n",
"```"
]
@ -924,9 +945,9 @@
"\n",
"## What's new?\n",
"\n",
"The new implementation is now one class called `ElasticsearchStore` which can be used for approx, exact, and ELSER search retrieval, via strategies.\n",
"The new implementation is now one class called `ElasticsearchStore` which can be used for approximate dense vector, exact dense vector, sparse vector (ELSER), BM25 retrieval and hybrid retrieval, via strategies.\n",
"\n",
"## Im using ElasticKNNSearch\n",
"## I am using ElasticKNNSearch\n",
"\n",
"Old implementation:\n",
"\n",
@ -946,21 +967,21 @@
"\n",
"```python\n",
"\n",
"from langchain_elasticsearch import ElasticsearchStore\n",
"from langchain_elasticsearch import ElasticsearchStore, DenseVectorStrategy\n",
"\n",
"db = ElasticsearchStore(\n",
" es_url=\"http://localhost:9200\",\n",
" index_name=\"test_index\",\n",
" embedding=embedding,\n",
" # if you use the model_id\n",
" # strategy=ElasticsearchStore.ApproxRetrievalStrategy( query_model_id=\"test_model\" )\n",
" # strategy=DenseVectorStrategy(model_id=\"test_model\")\n",
" # if you use hybrid search\n",
" # strategy=ElasticsearchStore.ApproxRetrievalStrategy( hybrid=True )\n",
" # strategy=DenseVectorStrategy(hybrid=True)\n",
")\n",
"\n",
"```\n",
"\n",
"## Im using ElasticVectorSearch\n",
"## I am using ElasticVectorSearch\n",
"\n",
"Old implementation:\n",
"\n",
@ -980,13 +1001,13 @@
"\n",
"```python\n",
"\n",
"from langchain_elasticsearch import ElasticsearchStore\n",
"from langchain_elasticsearch import ElasticsearchStore, DenseVectorScriptScoreStrategy\n",
"\n",
"db = ElasticsearchStore(\n",
" es_url=\"http://localhost:9200\",\n",
" index_name=\"test_index\",\n",
" embedding=embedding,\n",
" strategy=ElasticsearchStore.ExactRetrievalStrategy()\n",
" strategy=DenseVectorScriptScoreStrategy()\n",
")\n",
"\n",
"```"

@ -9,7 +9,7 @@ sidebar_label: Overview
The following features have been added during the development of 0.1.x:
- Better streaming support via the [Event Streaming API](https://python.langchain.com/docs/expression_language/streaming/#using-stream-events)
- Better streaming support via the [Event Streaming API](https://python.langchain.com/docs/expression_language/streaming/#using-stream-events).
- [Standardized tool calling support](https://blog.langchain.dev/tool-calling-with-langchain/)
- A standardized interface for [structuring output](https://github.com/langchain-ai/langchain/discussions/18154)
- [@chain decorator](https://python.langchain.com/docs/expression_language/how_to/decorator/) to more easily create **RunnableLambdas**
@ -20,6 +20,7 @@ The following features have been added during the development of 0.1.x:
- Interoperability of chat message histories across most providers
- [Over 20+ partner packages in python](https://python.langchain.com/docs/integrations/platforms/) for popular integrations
## Whats coming to LangChain?
- Weve been working hard on [langgraph](https://python.langchain.com/docs/langgraph/). We will be building more capabilities on top of it and focusing on making it the go-to framework for agent architectures.

@ -1,175 +0,0 @@
---
sidebar_position: 1
sidebar_label: v0.2
---
# LangChain v0.2
LangChain v0.2 was released in May 2024. This release includes a number of breaking changes and deprecations. This document contains a guide on upgrading to 0.2.x, as well as a list of deprecations and breaking changes.
## Migration
This documentation will help you upgrade your code to LangChain `0.2.x.`. To prepare for migration, we first recommend you take the following steps:
1. install the 0.2.x versions of langchain-core, langchain and upgrade to recent versions of other packages that you may be using (e.g. langgraph, langchain-community, langchain-openai, etc.)
2. Verify that your code runs properly with the new packages (e.g., unit tests pass)
3. Install a recent version of `langchain-cli` , and use the tool to replace old imports used by your code with the new imports. (See instructions below.)
4. Manually resolve any remaining deprecation warnings
5. Re-run unit tests
### Upgrade to new imports
We created a tool to help migrate your code. This tool is still in **beta** and may not cover all cases, but
we hope that it will help you migrate your code more quickly.
The migration script has the following limitations:
1. Its limited to helping users move from old imports to new imports. It doesnt help address other deprecations.
2. It cant handle imports that involve `as` .
3. New imports are always placed in global scope, even if the old import that was replaced was located inside some local scope (e..g, function body).
4. It will likely miss some deprecated imports.
Here is an example of the import changes that the migration script can help apply automatically:
| From Package | To Package | Deprecated Import | New Import |
|---------------------|--------------------------|--------------------------------------------------------------------|---------------------------------------------------------------------|
| langchain | langchain-community | from langchain.vectorstores import InMemoryVectorStore | from langchain_community.vectorstores import InMemoryVectorStore |
| langchain-community | langchain_openai | from langchain_community.chat_models import ChatOpenAI | from langchain_openai import ChatOpenAI |
| langchain-community | langchain-core | from langchain_community.document_loaders import Blob | from langchain_core.document_loaders import Blob |
| langchain | langchain-core | from langchain.schema.document import Document | from langchain_core.documents import Document |
| langchain | langchain-text-splitters | from langchain.text_splitter import RecursiveCharacterTextSplitter | from langchain_text_splitters import RecursiveCharacterTextSplitter |
#### Deprecation timeline
We have two main types of deprecations:
1. Code that was moved from `langchain` into another package (e.g, `langchain-community`)
If you try to import it from `langchain`, the import will keep on working, but will raise a deprecation warning. The warning will provide a replacement import statement.
```python
python -c "from langchain.document_loaders.markdown import UnstructuredMarkdownLoader"
```
```python
LangChainDeprecationWarning: Importing UnstructuredMarkdownLoader from langchain.document_loaders is deprecated. Please replace deprecated imports:
>> from langchain.document_loaders import UnstructuredMarkdownLoader
with new imports of:
>> from langchain_community.document_loaders import UnstructuredMarkdownLoader
```
We will continue supporting the imports in `langchain` until release 0.4 as long as the relevant package where the code lives is installed. (e.g., as long as `langchain_community` is installed.)
However, we advise for users to not rely on these imports and instead migrate to the new imports. To help with this process, were releasing a migration script via the LangChain CLI. See further instructions in migration guide.
2. Code that has better alternatives available and will eventually be removed, so theres only a single way to do things. (e.g., `predict_messages` method in ChatModels has been deprecated in favor of `invoke`).
Many of these were marked for removal in 0.2. We have bumped the removal to 0.3.
#### Installation
```bash
pip install langchain-cli
langchain-cli --version # <-- Make sure the version is at least 0.0.22
```
#### Usage
Given that the migration script is not perfect, you should make sure you have a backup of your code first (e.g., using version control like `git`).
You will need to run the migration script **twice** as it only applies one import replacement per run.
For example, say your code still uses `from langchain.chat_models import ChatOpenAI`:
After the first run, youll get: `from langchain_community.chat_models import ChatOpenAI`
After the second run, youll get: `from langchain_openai import ChatOpenAI`
```bash
# Run a first time
# Will replace from langchain.chat_models import ChatOpenAI
langchain-cli migrate [path to code] --diff # Preview
langchain-cli migrate [path to code] # Apply
# Run a second time to apply more import replacements
langchain-cli migrate [path to code] --diff # Preview
langchain-cli migrate [path to code] # Apply
```
#### Other options
```bash
# See help menu
langchain-cli migrate --help
# Preview Changes without applying
langchain-cli migrate --diff [path to code]
# Run on code including ipython notebooks
# Apply all import updates except for updates from langchain to langchain-core
langchain-cli migrate --disable langchain_to_core --include-ipynb [path to code]
```
## Deprecations and breaking changes
This code contains a list of deprecations and removals in the `langchain` and `langchain-core` packages.
### Breaking changes in 0.2.0
As of release 0.2.0, `langchain` is required to be integration-agnostic. This means that code in `langchain` should not by default instantiate any specific chat models, llms, embedding models, vectorstores etc; instead, the user will be required to specify those explicitly.
The following functions and classes require an explicit LLM to be passed as an argument:
- `langchain.agents.agent_toolkits.vectorstore.toolkit.VectorStoreToolkit`
- `langchain.agents.agent_toolkits.vectorstore.toolkit.VectorStoreRouterToolkit`
- `langchain.chains.openai_functions.get_openapi_chain`
- `langchain.chains.router.MultiRetrievalQAChain.from_retrievers`
- `langchain.indexes.VectorStoreIndexWrapper.query`
- `langchain.indexes.VectorStoreIndexWrapper.query_with_sources`
- `langchain.indexes.VectorStoreIndexWrapper.aquery_with_sources`
- `langchain.chains.flare.FlareChain`
The following classes now require passing an explicit Embedding model as an argument:
- `langchain.indexes.VectostoreIndexCreator`
The following code has been removed:
- `langchain.natbot.NatBotChain.from_default` removed in favor of the `from_llm` class method.
### Deprecations
We have two main types of deprecations:
1. Code that was moved from `langchain` into another package (e.g, `langchain-community`)
If you try to import it from `langchain`, the import will keep on working, but will raise a deprecation warning. The warning will provide a replacement import statement.
```python
python -c "from langchain.document_loaders.markdown import UnstructuredMarkdownLoader"
```
```python
LangChainDeprecationWarning: Importing UnstructuredMarkdownLoader from langchain.document_loaders is deprecated. Please replace deprecated imports:
>> from langchain.document_loaders import UnstructuredMarkdownLoader
with new imports of:
>> from langchain_community.document_loaders import UnstructuredMarkdownLoader
```
We will continue supporting the imports in `langchain` until release 0.4 as long as the relevant package where the code lives is installed. (e.g., as long as `langchain_community` is installed.)
However, we advise for users to not rely on these imports and instead migrate to the new imports. To help with this process, were releasing a migration script via the LangChain CLI. See further instructions in migration guide.
2. Code that has better alternatives available and will eventually be removed, so theres only a single way to do things. (e.g., `predict_messages` method in ChatModels has been deprecated in favor of `invoke`).
Many of these were marked for removal in 0.2. We have bumped the removal to 0.3.

@ -0,0 +1,897 @@
---
sidebar_position: 3
sidebar_label: Changes
---
# Deprecations and Breaking Changes
This code contains a list of deprecations and removals in the `langchain` and `langchain-core` packages.
New features and improvements are not listed here. See the [overview](/docs/versions/overview/) for a summary of what's new in this release.
## Breaking changes
As of release 0.2.0, `langchain` is required to be integration-agnostic. This means that code in `langchain` should not by default instantiate any specific chat models, llms, embedding models, vectorstores etc; instead, the user will be required to specify those explicitly.
The following functions and classes require an explicit LLM to be passed as an argument:
- `langchain.agents.agent_toolkits.vectorstore.toolkit.VectorStoreToolkit`
- `langchain.agents.agent_toolkits.vectorstore.toolkit.VectorStoreRouterToolkit`
- `langchain.chains.openai_functions.get_openapi_chain`
- `langchain.chains.router.MultiRetrievalQAChain.from_retrievers`
- `langchain.indexes.VectorStoreIndexWrapper.query`
- `langchain.indexes.VectorStoreIndexWrapper.query_with_sources`
- `langchain.indexes.VectorStoreIndexWrapper.aquery_with_sources`
- `langchain.chains.flare.FlareChain`
The following classes now require passing an explicit Embedding model as an argument:
- `langchain.indexes.VectostoreIndexCreator`
The following code has been removed:
- `langchain.natbot.NatBotChain.from_default` removed in favor of the `from_llm` class method.
Behavior was changed for the following code:
### @tool decorator
`@tool` decorator now assigns the function doc-string as the tool description. Previously, the `@tool` decorator
using to prepend the function signature.
Before 0.2.0:
```python
@tool
def my_tool(x: str) -> str:
"""Some description."""
return "something"
print(my_tool.description)
```
Would result in: `my_tool: (x: str) -> str - Some description.`
As of 0.2.0:
It will result in: `Some description.`
## Code that moved to another package
Code that was moved from `langchain` into another package (e.g, `langchain-community`)
If you try to import it from `langchain`, the import will keep on working, but will raise a deprecation warning. The warning will provide a replacement import statement.
```shell
python -c "from langchain.document_loaders.markdown import UnstructuredMarkdownLoader"
```
```shell
LangChainDeprecationWarning: Importing UnstructuredMarkdownLoader from langchain.document_loaders is deprecated. Please replace deprecated imports:
>> from langchain.document_loaders import UnstructuredMarkdownLoader
with new imports of:
>> from langchain_community.document_loaders import UnstructuredMarkdownLoader
```
We will continue supporting the imports in `langchain` until release 0.4 as long as the relevant package where the code lives is installed. (e.g., as long as `langchain_community` is installed.)
However, we advise for users to not rely on these imports and instead migrate to the new imports. To help with this process, were releasing a migration script via the LangChain CLI. See further instructions in migration guide.
## Code targeted for removal
Code that has better alternatives available and will eventually be removed, so theres only a single way to do things. (e.g., `predict_messages` method in ChatModels has been deprecated in favor of `invoke`).
### astream events V1
If you are using `astream_events`, please review how to [migrate to astream events v2](/docs/versions/v0_2/migrating_astream_events).
### langchain_core
#### try_load_from_hub
In module: `utils.loading`
Deprecated: 0.1.30
Removal: 0.3.0
Alternative: Using the hwchase17/langchain-hub repo for prompts is deprecated. Please use https://smith.langchain.com/hub instead.
#### BaseLanguageModel.predict
In module: `language_models.base`
Deprecated: 0.1.7
Removal: 0.3.0
Alternative: invoke
#### BaseLanguageModel.predict_messages
In module: `language_models.base`
Deprecated: 0.1.7
Removal: 0.3.0
Alternative: invoke
#### BaseLanguageModel.apredict
In module: `language_models.base`
Deprecated: 0.1.7
Removal: 0.3.0
Alternative: ainvoke
#### BaseLanguageModel.apredict_messages
In module: `language_models.base`
Deprecated: 0.1.7
Removal: 0.3.0
Alternative: ainvoke
#### RunTypeEnum
In module: `tracers.schemas`
Deprecated: 0.1.0
Removal: 0.3.0
Alternative: Use string instead.
#### TracerSessionV1Base
In module: `tracers.schemas`
Deprecated: 0.1.0
Removal: 0.3.0
Alternative:
#### TracerSessionV1Create
In module: `tracers.schemas`
Deprecated: 0.1.0
Removal: 0.3.0
Alternative:
#### TracerSessionV1
In module: `tracers.schemas`
Deprecated: 0.1.0
Removal: 0.3.0
Alternative:
#### TracerSessionBase
In module: `tracers.schemas`
Deprecated: 0.1.0
Removal: 0.3.0
Alternative:
#### TracerSession
In module: `tracers.schemas`
Deprecated: 0.1.0
Removal: 0.3.0
Alternative:
#### BaseRun
In module: `tracers.schemas`
Deprecated: 0.1.0
Removal: 0.3.0
Alternative: Run
#### LLMRun
In module: `tracers.schemas`
Deprecated: 0.1.0
Removal: 0.3.0
Alternative: Run
#### ChainRun
In module: `tracers.schemas`
Deprecated: 0.1.0
Removal: 0.3.0
Alternative: Run
#### ToolRun
In module: `tracers.schemas`
Deprecated: 0.1.0
Removal: 0.3.0
Alternative: Run
#### BaseChatModel.__call__
In module: `language_models.chat_models`
Deprecated: 0.1.7
Removal: 0.3.0
Alternative: invoke
#### BaseChatModel.call_as_llm
In module: `language_models.chat_models`
Deprecated: 0.1.7
Removal: 0.3.0
Alternative: invoke
#### BaseChatModel.predict
In module: `language_models.chat_models`
Deprecated: 0.1.7
Removal: 0.3.0
Alternative: invoke
#### BaseChatModel.predict_messages
In module: `language_models.chat_models`
Deprecated: 0.1.7
Removal: 0.3.0
Alternative: invoke
#### BaseChatModel.apredict
In module: `language_models.chat_models`
Deprecated: 0.1.7
Removal: 0.3.0
Alternative: ainvoke
#### BaseChatModel.apredict_messages
In module: `language_models.chat_models`
Deprecated: 0.1.7
Removal: 0.3.0
Alternative: ainvoke
#### BaseLLM.__call__
In module: `language_models.llms`
Deprecated: 0.1.7
Removal: 0.3.0
Alternative: invoke
#### BaseLLM.predict
In module: `language_models.llms`
Deprecated: 0.1.7
Removal: 0.3.0
Alternative: invoke
#### BaseLLM.predict_messages
In module: `language_models.llms`
Deprecated: 0.1.7
Removal: 0.3.0
Alternative: invoke
#### BaseLLM.apredict
In module: `language_models.llms`
Deprecated: 0.1.7
Removal: 0.3.0
Alternative: ainvoke
#### BaseLLM.apredict_messages
In module: `language_models.llms`
Deprecated: 0.1.7
Removal: 0.3.0
Alternative: ainvoke
#### BaseRetriever.get_relevant_documents
In module: `retrievers`
Deprecated: 0.1.46
Removal: 0.3.0
Alternative: invoke
#### BaseRetriever.aget_relevant_documents
In module: `retrievers`
Deprecated: 0.1.46
Removal: 0.3.0
Alternative: ainvoke
#### ChatPromptTemplate.from_role_strings
In module: `prompts.chat`
Deprecated: 0.0.1
Removal:
Alternative: from_messages classmethod
#### ChatPromptTemplate.from_strings
In module: `prompts.chat`
Deprecated: 0.0.1
Removal:
Alternative: from_messages classmethod
#### BaseTool.__call__
In module: `tools`
Deprecated: 0.1.47
Removal: 0.3.0
Alternative: invoke
#### convert_pydantic_to_openai_function
In module: `utils.function_calling`
Deprecated: 0.1.16
Removal: 0.3.0
Alternative: langchain_core.utils.function_calling.convert_to_openai_function()
#### convert_pydantic_to_openai_tool
In module: `utils.function_calling`
Deprecated: 0.1.16
Removal: 0.3.0
Alternative: langchain_core.utils.function_calling.convert_to_openai_tool()
#### convert_python_function_to_openai_function
In module: `utils.function_calling`
Deprecated: 0.1.16
Removal: 0.3.0
Alternative: langchain_core.utils.function_calling.convert_to_openai_function()
#### format_tool_to_openai_function
In module: `utils.function_calling`
Deprecated: 0.1.16
Removal: 0.3.0
Alternative: langchain_core.utils.function_calling.convert_to_openai_function()
#### format_tool_to_openai_tool
In module: `utils.function_calling`
Deprecated: 0.1.16
Removal: 0.3.0
Alternative: langchain_core.utils.function_calling.convert_to_openai_tool()
### langchain
#### AgentType
In module: `agents.agent_types`
Deprecated: 0.1.0
Removal: 0.3.0
Alternative: Use new agent constructor methods like create_react_agent, create_json_agent, create_structured_chat_agent, etc.
#### Chain.__call__
In module: `chains.base`
Deprecated: 0.1.0
Removal: 0.3.0
Alternative: invoke
#### Chain.acall
In module: `chains.base`
Deprecated: 0.1.0
Removal: 0.3.0
Alternative: ainvoke
#### Chain.run
In module: `chains.base`
Deprecated: 0.1.0
Removal: 0.3.0
Alternative: invoke
#### Chain.arun
In module: `chains.base`
Deprecated: 0.1.0
Removal: 0.3.0
Alternative: ainvoke
#### Chain.apply
In module: `chains.base`
Deprecated: 0.1.0
Removal: 0.3.0
Alternative: batch
#### LLMChain
In module: `chains.llm`
Deprecated: 0.1.17
Removal: 0.3.0
Alternative: RunnableSequence, e.g., `prompt | llm`
#### LLMSingleActionAgent
In module: `agents.agent`
Deprecated: 0.1.0
Removal: 0.3.0
Alternative: Use new agent constructor methods like create_react_agent, create_json_agent, create_structured_chat_agent, etc.
#### Agent
In module: `agents.agent`
Deprecated: 0.1.0
Removal: 0.3.0
Alternative: Use new agent constructor methods like create_react_agent, create_json_agent, create_structured_chat_agent, etc.
#### OpenAIFunctionsAgent
In module: `agents.openai_functions_agent.base`
Deprecated: 0.1.0
Removal: 0.3.0
Alternative: create_openai_functions_agent
#### ZeroShotAgent
In module: `agents.mrkl.base`
Deprecated: 0.1.0
Removal: 0.3.0
Alternative: create_react_agent
#### MRKLChain
In module: `agents.mrkl.base`
Deprecated: 0.1.0
Removal: 0.3.0
Alternative:
#### ConversationalAgent
In module: `agents.conversational.base`
Deprecated: 0.1.0
Removal: 0.3.0
Alternative: create_react_agent
#### ConversationalChatAgent
In module: `agents.conversational_chat.base`
Deprecated: 0.1.0
Removal: 0.3.0
Alternative: create_json_chat_agent
#### ChatAgent
In module: `agents.chat.base`
Deprecated: 0.1.0
Removal: 0.3.0
Alternative: create_react_agent
#### OpenAIMultiFunctionsAgent
In module: `agents.openai_functions_multi_agent.base`
Deprecated: 0.1.0
Removal: 0.3.0
Alternative: create_openai_tools_agent
#### ReActDocstoreAgent
In module: `agents.react.base`
Deprecated: 0.1.0
Removal: 0.3.0
Alternative:
#### DocstoreExplorer
In module: `agents.react.base`
Deprecated: 0.1.0
Removal: 0.3.0
Alternative:
#### ReActTextWorldAgent
In module: `agents.react.base`
Deprecated: 0.1.0
Removal: 0.3.0
Alternative:
#### ReActChain
In module: `agents.react.base`
Deprecated: 0.1.0
Removal: 0.3.0
Alternative:
#### SelfAskWithSearchAgent
In module: `agents.self_ask_with_search.base`
Deprecated: 0.1.0
Removal: 0.3.0
Alternative: create_self_ask_with_search
#### SelfAskWithSearchChain
In module: `agents.self_ask_with_search.base`
Deprecated: 0.1.0
Removal: 0.3.0
Alternative:
#### StructuredChatAgent
In module: `agents.structured_chat.base`
Deprecated: 0.1.0
Removal: 0.3.0
Alternative: create_structured_chat_agent
#### RetrievalQA
In module: `chains.retrieval_qa.base`
Deprecated: 0.1.17
Removal: 0.3.0
Alternative: create_retrieval_chain
#### load_agent_from_config
In module: `agents.loading`
Deprecated: 0.1.0
Removal: 0.3.0
Alternative:
#### load_agent
In module: `agents.loading`
Deprecated: 0.1.0
Removal: 0.3.0
Alternative:
#### initialize_agent
In module: `agents.initialize`
Deprecated: 0.1.0
Removal: 0.3.0
Alternative: Use new agent constructor methods like create_react_agent, create_json_agent, create_structured_chat_agent, etc.
#### XMLAgent
In module: `agents.xml.base`
Deprecated: 0.1.0
Removal: 0.3.0
Alternative: create_xml_agent
#### CohereRerank
In module: `retrievers.document_compressors.cohere_rerank`
Deprecated: 0.0.30
Removal: 0.3.0
Alternative: langchain_cohere.CohereRerank
#### ConversationalRetrievalChain
In module: `chains.conversational_retrieval.base`
Deprecated: 0.1.17
Removal: 0.3.0
Alternative: create_history_aware_retriever together with create_retrieval_chain (see example in docstring)
#### create_extraction_chain_pydantic
In module: `chains.openai_tools.extraction`
Deprecated: 0.1.14
Removal: 0.3.0
Alternative: with_structured_output method on chat models that support tool calling.
#### create_openai_fn_runnable
In module: `chains.structured_output.base`
Deprecated: 0.1.14
Removal: 0.3.0
Alternative: with_structured_output method on chat models that support tool calling.
#### create_structured_output_runnable
In module: `chains.structured_output.base`
Deprecated: 0.1.17
Removal: 0.3.0
Alternative: with_structured_output method on chat models that support tool calling.
#### create_openai_fn_chain
In module: `chains.openai_functions.base`
Deprecated: 0.1.1
Removal: 0.3.0
Alternative: create_openai_fn_runnable
#### create_structured_output_chain
In module: `chains.openai_functions.base`
Deprecated: 0.1.1
Removal: 0.3.0
Alternative: ChatOpenAI.with_structured_output
#### create_extraction_chain
In module: `chains.openai_functions.extraction`
Deprecated: 0.1.14
Removal: 0.3.0
Alternative: with_structured_output method on chat models that support tool calling.
#### create_extraction_chain_pydantic
In module: `chains.openai_functions.extraction`
Deprecated: 0.1.14
Removal: 0.3.0
Alternative: with_structured_output method on chat models that support tool calling.

@ -0,0 +1,90 @@
---
sidebar_position: 1
---
# LangChain v0.2
LangChain v0.2 was released in May 2024. This release includes a number of [breaking changes and deprecations](/docs/versions/v0_2/deprecations). This document contains a guide on upgrading to 0.2.x.
:::note Reference
- [Breaking Changes & Deprecations](/docs/versions/v0_2/deprecations)
:::
# Migration
This documentation will help you upgrade your code to LangChain `0.2.x.`. To prepare for migration, we first recommend you take the following steps:
1. Install the 0.2.x versions of langchain-core, langchain and upgrade to recent versions of other packages that you may be using. (e.g. langgraph, langchain-community, langchain-openai, etc.)
2. Verify that your code runs properly with the new packages (e.g., unit tests pass).
3. Install a recent version of `langchain-cli` , and use the tool to replace old imports used by your code with the new imports. (See instructions below.)
4. Manually resolve any remaining deprecation warnings.
5. Re-run unit tests.
## Upgrade to new imports
We created a tool to help migrate your code. This tool is still in **beta** and may not cover all cases, but
we hope that it will help you migrate your code more quickly.
The migration script has the following limitations:
1. Its limited to helping users move from old imports to new imports. It does not help address other deprecations.
2. It cant handle imports that involve `as` .
3. New imports are always placed in global scope, even if the old import that was replaced was located inside some local scope (e..g, function body).
4. It will likely miss some deprecated imports.
Here is an example of the import changes that the migration script can help apply automatically:
| From Package | To Package | Deprecated Import | New Import |
|---------------------|--------------------------|--------------------------------------------------------------------|---------------------------------------------------------------------|
| langchain | langchain-community | from langchain.vectorstores import InMemoryVectorStore | from langchain_community.vectorstores import InMemoryVectorStore |
| langchain-community | langchain_openai | from langchain_community.chat_models import ChatOpenAI | from langchain_openai import ChatOpenAI |
| langchain-community | langchain-core | from langchain_community.document_loaders import Blob | from langchain_core.document_loaders import Blob |
| langchain | langchain-core | from langchain.schema.document import Document | from langchain_core.documents import Document |
| langchain | langchain-text-splitters | from langchain.text_splitter import RecursiveCharacterTextSplitter | from langchain_text_splitters import RecursiveCharacterTextSplitter |
## Installation
```bash
pip install langchain-cli
langchain-cli --version # <-- Make sure the version is at least 0.0.22
```
## Usage
Given that the migration script is not perfect, you should make sure you have a backup of your code first (e.g., using version control like `git`).
You will need to run the migration script **twice** as it only applies one import replacement per run.
For example, say your code still uses `from langchain.chat_models import ChatOpenAI`:
After the first run, youll get: `from langchain_community.chat_models import ChatOpenAI`
After the second run, youll get: `from langchain_openai import ChatOpenAI`
```bash
# Run a first time
# Will replace from langchain.chat_models import ChatOpenAI
langchain-cli migrate [path to code] --diff # Preview
langchain-cli migrate [path to code] # Apply
# Run a second time to apply more import replacements
langchain-cli migrate [path to code] --diff # Preview
langchain-cli migrate [path to code] # Apply
```
### Other options
```bash
# See help menu
langchain-cli migrate --help
# Preview Changes without applying
langchain-cli migrate --diff [path to code]
# Run on code including ipython notebooks
# Apply all import updates except for updates from langchain to langchain-core
langchain-cli migrate --disable langchain_to_core --include-ipynb [path to code]
```

@ -0,0 +1,124 @@
---
sidebar_position: 2
sidebar_label: astream_events v2
---
# Migrating to Astream Events v2
:::danger
This migration guide is a work in progress and is not complete. Please wait to migrate astream_events.
:::
We've added a `v2` of the astream_events API with the release of `0.2.0`. You can see this [PR](https://github.com/langchain-ai/langchain/pull/21638) for more details.
The `v2` version is a re-write of the `v1` version, and should be more efficient, with more consistent output for the events. The `v1` version of the API will be deprecated in favor of the `v2` version and will be removed in `0.4.0`.
Below is a list of changes between the `v1` and `v2` versions of the API.
### output for `on_chat_model_end`
In `v1`, the outputs associated with `on_chat_model_end` changed depending on whether the
chat model was run as a root level runnable or as part of a chain.
As a root level runnable the output was:
```python
"data": {"output": AIMessageChunk(content="hello world!", id='some id')}
```
As part of a chain the output was:
```
"data": {
"output": {
"generations": [
[
{
"generation_info": None,
"message": AIMessageChunk(
content="hello world!", id=AnyStr()
),
"text": "hello world!",
"type": "ChatGenerationChunk",
}
]
],
"llm_output": None,
}
},
```
As of `v2`, the output will always be the simpler representation:
```python
"data": {"output": AIMessageChunk(content="hello world!", id='some id')}
```
:::note
Non chat models (i.e., regular LLMs) are will be consistently associated with the more verbose format for now.
:::
### output for `on_retriever_end`
`on_retriever_end` output will always return a list of `Documents`.
Before:
```python
{
"data": {
"output": [
Document(...),
Document(...),
...
]
}
}
```
### Removed `on_retriever_stream`
The `on_retriever_stream` event was an artifact of the implementation and has been removed.
Full information associated with the event is already available in the `on_retriever_end` event.
Please use `on_retriever_end` instead.
### Removed `on_tool_stream`
The `on_tool_stream` event was an artifact of the implementation and has been removed.
Full information associated with the event is already available in the `on_tool_end` event.
Please use `on_tool_end` instead.
### Propagating Names
Names of runnables have been updated to be more consistent.
```python
model = GenericFakeChatModel(messages=infinite_cycle).configurable_fields(
messages=ConfigurableField(
id="messages",
name="Messages",
description="Messages return by the LLM",
)
)
```
In `v1`, the event name was `RunnableConfigurableFields`.
In `v2`, the event name is `GenericFakeChatModel`.
If you're filtering by event names, check if you need to update your filters.
### RunnableRetry
Usage of [RunnableRetry](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.retry.RunnableRetry.html)
within an LCEL chain being streamed generated an incorrect `on_chain_end` event in `v1` corresponding
to the failed runnable invocation that was being retried. This event has been removed in `v2`.
No action is required for this change.

@ -124,7 +124,7 @@ const config = {
/** @type {import('@docusaurus/preset-classic').ThemeConfig} */
({
announcementBar: {
content: 'You are viewing the <strong>preview</strong> v0.2 docs. View the <strong>stable</strong> v0.1 docs <a href="/v0.1/docs/get_started/introduction/">here</a>. Leave feedback on the v0.2 docs <a href="https://github.com/langchain-ai/langchain/discussions/21716">here</a>.',
content: 'LangChain 0.2 is out! Leave feedback on the v0.2 docs <a href="https://github.com/langchain-ai/langchain/discussions/21716">here</a>. You can view the v0.1 docs <a href="/v0.1/docs/get_started/introduction/">here</a>.',
isCloseable: true,
},
docs: {
@ -202,6 +202,10 @@ const config = {
docId: "additional_resources/youtube",
label: "YouTube"
},
{
to: "/docs/additional_resources/arxiv_references",
label: "arXiv"
},
]
},
{

@ -0,0 +1,465 @@
"""Parse arXiv references from the documentation.
Generate a page with a table of the arXiv references with links to the documentation pages.
"""
import logging
import os
import re
from dataclasses import dataclass
from pathlib import Path
from typing import Any, Dict, List, Set
from pydantic.v1 import BaseModel, root_validator
# TODO parse docstrings for arXiv references
# TODO Generate a page with a table of the references with correspondent modules/classes/functions.
logger = logging.getLogger(__name__)
_ROOT_DIR = Path(os.path.abspath(__file__)).parents[2]
DOCS_DIR = _ROOT_DIR / "docs" / "docs"
CODE_DIR = _ROOT_DIR / "libs"
ARXIV_ID_PATTERN = r"https://arxiv\.org/(abs|pdf)/(\d+\.\d+)"
@dataclass
class ArxivPaper:
"""ArXiv paper information."""
arxiv_id: str
referencing_docs: list[str] # TODO: Add the referencing docs
referencing_api_refs: list[str] # TODO: Add the referencing docs
title: str
authors: list[str]
abstract: str
url: str
published_date: str
def search_documentation_for_arxiv_references(docs_dir: Path) -> dict[str, set[str]]:
"""Search the documentation for arXiv references.
Search for the arXiv references in the documentation pages.
Note: It finds only the first arXiv reference in a line.
Args:
docs_dir: Path to the documentation root folder.
Returns:
dict: Dictionary with arxiv_id as key and set of file names as value.
"""
arxiv_url_pattern = re.compile(ARXIV_ID_PATTERN)
exclude_strings = {"file_path", "metadata", "link", "loader", "PyPDFLoader"}
# loop all the files (ipynb, mdx, md) in the docs folder
files = (
p.resolve()
for p in Path(docs_dir).glob("**/*")
if p.suffix in {".ipynb", ".mdx", ".md"}
)
arxiv_id2file_names: dict[str, set[str]] = {}
for file in files:
if "-checkpoint.ipynb" in file.name:
continue
with open(file, "r", encoding="utf-8") as f:
lines = f.readlines()
for line in lines:
if any(exclude_string in line for exclude_string in exclude_strings):
continue
matches = arxiv_url_pattern.search(line)
if matches:
arxiv_id = matches.group(2)
file_name = _get_doc_path(file.parts, file.suffix)
if arxiv_id not in arxiv_id2file_names:
arxiv_id2file_names[arxiv_id] = {file_name}
else:
arxiv_id2file_names[arxiv_id].add(file_name)
return arxiv_id2file_names
def convert_module_name_and_members_to_urls(
arxiv_id2module_name_and_members: dict[str, set[str]],
) -> dict[str, set[str]]:
arxiv_id2urls = {}
for arxiv_id, module_name_and_members in arxiv_id2module_name_and_members.items():
urls = set()
for module_name_and_member in module_name_and_members:
module_name, type_and_member = module_name_and_member.split(":")
if "$" in type_and_member:
type, member = type_and_member.split("$")
else:
type = type_and_member
member = ""
_namespace_parts = module_name.split(".")
if type == "module":
first_namespace_part = _namespace_parts[0]
if first_namespace_part.startswith("langchain_"):
first_namespace_part = first_namespace_part.replace(
"langchain_", ""
)
url = f"{first_namespace_part}_api_reference.html#module-{module_name}"
elif type in ["class", "function"]:
second_namespace_part = _namespace_parts[1]
url = f"{second_namespace_part}/{module_name}.{member}.html#{module_name}.{member}"
else:
raise ValueError(
f"Unknown type: {type} in the {module_name_and_member}."
)
urls.add(url)
arxiv_id2urls[arxiv_id] = urls
return arxiv_id2urls
def search_code_for_arxiv_references(code_dir: Path) -> dict[str, set[str]]:
"""Search the code for arXiv references.
Search for the arXiv references in the code.
Note: It finds only the first arXiv reference in a line.
Args:
code_dir: Path to the code root folder.
Returns:
dict: Dictionary with arxiv_id as key and set of module names as value.
module names encoded as:
<module_name>:module
<module_name>:class$<ClassName>
<module_name>:function$<function_name>
"""
arxiv_url_pattern = re.compile(ARXIV_ID_PATTERN)
# exclude_strings = {"file_path", "metadata", "link", "loader"}
class_pattern = re.compile(r"\s*class\s+(\w+).*:")
function_pattern = re.compile(r"\s*def\s+(\w+)")
# loop all the files (ipynb, mdx, md) in the docs folder
files = (
p.resolve()
for p in Path(code_dir).glob("**/*")
if p.suffix in {".py"} and "tests" not in p.parts and "scripts" not in p.parts
# ".md" files are excluded
)
arxiv_id2module_name_and_members: dict[str, set[str]] = {}
for file in files:
try:
with open(file, "r", encoding="utf-8") as f:
module_name = _get_module_name(file.parts)
class_or_function_started = "module"
for line in f.readlines():
# class line:
matches = class_pattern.search(line)
if matches:
class_name = matches.group(1)
class_or_function_started = f"class${class_name}"
# function line:
# not inside a class!
if "class" not in class_or_function_started:
matches = function_pattern.search(line)
if matches:
func_name = matches.group(1)
class_or_function_started = f"function${func_name}"
# arxiv line:
matches = arxiv_url_pattern.search(line)
if matches:
arxiv_id = matches.group(2)
module_name_and_member = (
f"{module_name}:{class_or_function_started}"
)
if arxiv_id not in arxiv_id2module_name_and_members:
arxiv_id2module_name_and_members[arxiv_id] = {
module_name_and_member
}
else:
arxiv_id2module_name_and_members[arxiv_id].add(
module_name_and_member
)
except UnicodeDecodeError:
# Skip files like this 'tests/integration_tests/examples/non-utf8-encoding.py'
logger.warning(f"Could not read the file {file}.")
# handle border cases:
# 1. {'langchain_experimental.pal_chain.base:class$PALChain', 'langchain_experimental.pal_chain.base:module' - remove}
for arxiv_id, module_name_and_members in arxiv_id2module_name_and_members.items():
module_name_and_member_deduplicated = set()
non_module_members = set()
for module_name_and_member in module_name_and_members:
if not module_name_and_member.endswith(":module"):
module_name_and_member_deduplicated.add(module_name_and_member)
non_module_members.add(module_name_and_member.split(":")[0])
for module_name_and_member in module_name_and_members:
if module_name_and_member.endswith(":module"):
if module_name_and_member.split(":")[0] in non_module_members:
continue
module_name_and_member_deduplicated.add(module_name_and_member)
arxiv_id2module_name_and_members[arxiv_id] = module_name_and_member_deduplicated
# 2. {'langchain.evaluation.scoring.prompt:module', 'langchain.evaluation.comparison.prompt:module'}
# only modules with 2-part namespaces are parsed into API Reference now! TODO fix this behavior
# leave only the modules with 2-part namespaces
arxiv_id2module_name_and_members_reduced = {}
for arxiv_id, module_name_and_members in arxiv_id2module_name_and_members.items():
module_name_and_member_reduced = set()
removed_modules = set()
for module_name_and_member in module_name_and_members:
if module_name_and_member.endswith(":module"):
if module_name_and_member.split(":")[0].count(".") <= 1:
module_name_and_member_reduced.add(module_name_and_member)
else:
removed_modules.add(module_name_and_member)
else:
module_name_and_member_reduced.add(module_name_and_member)
if module_name_and_member_reduced:
arxiv_id2module_name_and_members_reduced[arxiv_id] = (
module_name_and_member_reduced
)
if removed_modules:
logger.warning(
f"{arxiv_id}: Removed the following modules with 2+ -part namespaces: {removed_modules}."
)
return arxiv_id2module_name_and_members_reduced
def _get_doc_path(file_parts: tuple[str, ...], file_extension) -> str:
"""Get the relative path to the documentation page
from the absolute path of the file.
Remove file_extension
"""
res = []
for el in file_parts[::-1]:
res.append(el)
if el == "docs":
break
ret = "/".join(reversed(res))
return ret[: -len(file_extension)] if ret.endswith(file_extension) else ret
def _get_code_path(file_parts: tuple[str, ...]) -> str:
"""Get the relative path to the documentation page
from the absolute path of the file.
"""
res = []
for el in file_parts[::-1]:
res.append(el)
if el == "libs":
break
return "/".join(reversed(res))
def _get_module_name(file_parts: tuple[str, ...]) -> str:
"""Get the module name from the absolute path of the file."""
ns_parts = []
for el in file_parts[::-1]:
if str(el) == "__init__.py":
continue
ns_parts.insert(0, str(el).replace(".py", ""))
if el.startswith("langchain"):
break
return ".".join(ns_parts)
def compound_urls(
arxiv_id2file_names: dict[str, set[str]], arxiv_id2code_urls: dict[str, set[str]]
) -> dict[str, dict[str, set[str]]]:
arxiv_id2urls = dict()
for arxiv_id, code_urls in arxiv_id2code_urls.items():
arxiv_id2urls[arxiv_id] = {"api": code_urls}
# intersection of the two sets
if arxiv_id in arxiv_id2file_names:
arxiv_id2urls[arxiv_id]["docs"] = arxiv_id2file_names[arxiv_id]
for arxiv_id, file_names in arxiv_id2file_names.items():
if arxiv_id not in arxiv_id2code_urls:
arxiv_id2urls[arxiv_id] = {"docs": file_names}
# reverse sort by the arxiv_id (the newest papers first)
ret = dict(sorted(arxiv_id2urls.items(), key=lambda item: item[0], reverse=True))
return ret
def _format_doc_link(doc_paths: list[str]) -> list[str]:
return [
f"[{doc_path}](https://python.langchain.com/{doc_path})"
for doc_path in doc_paths
]
def _format_api_ref_link(
doc_paths: list[str], compact: bool = False
) -> list[str]: # TODO
# agents/langchain_core.agents.AgentAction.html#langchain_core.agents.AgentAction
ret = []
for doc_path in doc_paths:
module = doc_path.split("#")[1].replace("module-", "")
if compact and module.count(".") > 2:
# langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI
# -> langchain_community.llms...OCIModelDeploymentTGI
module_parts = module.split(".")
module = f"{module_parts[0]}.{module_parts[1]}...{module_parts[-1]}"
ret.append(
f"[{module}](https://api.python.langchain.com/en/latest/{doc_path.split('langchain.com/')[-1]})"
)
return ret
def log_results(arxiv_id2urls):
arxiv_ids = arxiv_id2urls.keys()
doc_number, api_number = 0, 0
for urls in arxiv_id2urls.values():
if "docs" in urls:
doc_number += len(urls["docs"])
if "api" in urls:
api_number += len(urls["api"])
logger.info(
f"Found {len(arxiv_ids)} arXiv references in the {doc_number} docs and in {api_number} API Refs."
)
class ArxivAPIWrapper(BaseModel):
arxiv_search: Any #: :meta private:
arxiv_exceptions: Any # :meta private:
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that the python package exists in environment."""
try:
import arxiv
values["arxiv_search"] = arxiv.Search
values["arxiv_exceptions"] = (
arxiv.ArxivError,
arxiv.UnexpectedEmptyPageError,
arxiv.HTTPError,
)
except ImportError:
raise ImportError(
"Could not import arxiv python package. "
"Please install it with `pip install arxiv`."
)
return values
def get_papers(
self, arxiv_id2urls: dict[str, dict[str, set[str]]]
) -> list[ArxivPaper]:
"""
Performs an arxiv search and returns information about the papers found.
If an error occurs or no documents found, error text
is returned instead.
Args:
arxiv_id2urls: Dictionary with arxiv_id as key and dictionary
with sets of doc file names and API Ref urls.
Returns:
List of ArxivPaper objects.
""" # noqa: E501
def cut_authors(authors: list) -> list[str]:
if len(authors) > 3:
return [str(a) for a in authors[:3]] + [" et al."]
else:
return [str(a) for a in authors]
if not arxiv_id2urls:
return []
try:
arxiv_ids = list(arxiv_id2urls.keys())
results = self.arxiv_search(
id_list=arxiv_ids,
max_results=len(arxiv_ids),
).results()
except self.arxiv_exceptions as ex:
raise ex
papers = [
ArxivPaper(
arxiv_id=result.entry_id.split("/")[-1],
title=result.title,
authors=cut_authors(result.authors),
abstract=result.summary,
url=result.entry_id,
published_date=str(result.published.date()),
referencing_docs=urls["docs"] if "docs" in urls else [],
referencing_api_refs=urls["api"] if "api" in urls else [],
)
for result, urls in zip(results, arxiv_id2urls.values())
]
return papers
def generate_arxiv_references_page(file_name: str, papers: list[ArxivPaper]) -> None:
with open(file_name, "w") as f:
# Write the table headers
f.write("""# arXiv
LangChain implements the latest research in the field of Natural Language Processing.
This page contains `arXiv` papers referenced in the LangChain Documentation and API Reference.
## Summary
| arXiv id / Title | Authors | Published date 🔻 | LangChain Documentation and API Reference |
|------------------|---------|-------------------|-------------------------|
""")
for paper in papers:
refs = []
if paper.referencing_docs:
refs += [
"`Docs:` " + ", ".join(_format_doc_link(paper.referencing_docs))
]
if paper.referencing_api_refs:
refs += [
"`API:` "
+ ", ".join(
_format_api_ref_link(paper.referencing_api_refs, compact=True)
)
]
refs_str = ", ".join(refs)
title_link = f"[{paper.title}]({paper.url})"
f.write(
f"| {' | '.join([f'`{paper.arxiv_id}` {title_link}', ', '.join(paper.authors), paper.published_date, refs_str])}\n"
)
for paper in papers:
docs_refs = (
f"- **LangChain Documentation:** {', '.join(_format_doc_link(paper.referencing_docs))}"
if paper.referencing_docs
else ""
)
api_ref_refs = (
f"- **LangChain API Reference:** {', '.join(_format_api_ref_link(paper.referencing_api_refs))}"
if paper.referencing_api_refs
else ""
)
f.write(f"""
## {paper.title}
- **arXiv id:** {paper.arxiv_id}
- **Title:** {paper.title}
- **Authors:** {', '.join(paper.authors)}
- **Published Date:** {paper.published_date}
- **URL:** {paper.url}
{docs_refs}
{api_ref_refs}
**Abstract:** {paper.abstract}
""")
logger.info(f"Created the {file_name} file with {len(papers)} arXiv references.")
def main():
# search the documentation and the API Reference for arXiv references:
arxiv_id2module_name_and_members = search_code_for_arxiv_references(CODE_DIR)
arxiv_id2code_urls = convert_module_name_and_members_to_urls(
arxiv_id2module_name_and_members
)
arxiv_id2file_names = search_documentation_for_arxiv_references(DOCS_DIR)
arxiv_id2urls = compound_urls(arxiv_id2file_names, arxiv_id2code_urls)
log_results(arxiv_id2urls)
# get the arXiv paper information
papers = ArxivAPIWrapper().get_papers(arxiv_id2urls)
# generate the arXiv references page
output_file = str(DOCS_DIR / "additional_resources" / "arxiv_references.mdx")
generate_arxiv_references_page(output_file, papers)
if __name__ == "__main__":
main()

@ -21,7 +21,7 @@ if __name__ == "__main__":
with open(full_destination, "r") as f:
content = f.read()
# remove images
content = re.sub("\!\[.*?\]\((.*?)\)", "", content)
content = re.sub(r"\!\[.*?\]\((.*?)\)", "", content)
with open(full_destination, "w") as f:
f.write(content)
@ -39,7 +39,7 @@ sidebar_class_name: hidden
content = f.read()
# replace relative links
content = re.sub("\]\(\.\.\/", "](/docs/templates/", content)
content = re.sub(r"\]\(\.\.\/", "](/docs/templates/", content)
with open(templates_index_intermediate, "w") as f:
f.write(sidebar_hidden + content)

@ -11,7 +11,7 @@ def update_links(doc_path, docs_link):
content = f.read()
# replace relative links
content = re.sub("\]\(\.\/", f"]({docs_link}", content)
content = re.sub(r"\]\(\.\/", f"]({docs_link}", content)
with open(DOCS_DIR / doc_path, "w") as f:
f.write(content)

@ -69,13 +69,24 @@ module.exports = {
collapsed: false,
collapsible: false,
items: [
"versions/overview",
"versions/release_policy",
"versions/packages",
{
type: "autogenerated",
dirName: "versions",
}
type: "category",
label: "v0.2",
link: {type: 'doc', id: 'versions/v0_2/index'},
collapsible: false,
collapsed: false,
items: [{
type: 'autogenerated',
dirName: 'versions/v0_2',
className: 'hidden',
}],
},
],
},
"security",
"security"
],
integrations: [
{

@ -147,7 +147,7 @@ nav, h1, h2, h3, h4 {
.theme-doc-sidebar-menu > .theme-doc-sidebar-item-category:not(:first-of-type),
.theme-doc-sidebar-menu > .theme-doc-sidebar-item-link,
.theme-doc-sidebar-menu > .theme-doc-sidebar-item-link.theme-doc-sidebar-item-link-level-1 {
.theme-doc-sidebar-menu > .theme-doc-sidebar-item-link.theme-doc-sidebar-item-link-level-1:not(:first-of-type) {
margin-top: 1rem;
}

@ -20,6 +20,10 @@
{
"source": "/docs/:path(.*/?)*",
"destination": "/v0.1/docs/:path*"
},
{
"source": "/cookbook(/?)",
"destination": "/v0.1/docs/cookbook/"
}
]
}

@ -1,3 +1,4 @@
-e ../libs/core
-e ../libs/langchain
-e ../libs/community
-e ../libs/experimental
@ -6,7 +7,5 @@ langchain-cohere
langchain-astradb
langchain-nvidia-ai-endpoints
langchain-elasticsearch
langchain-postgres
urllib3==1.26.18
nbconvert==7.16.4
langchain-core==0.1.52

@ -80,8 +80,8 @@ class BaichuanTextEmbeddings(BaseModel, Embeddings):
else:
# Log error or handle unsuccessful response appropriately
print( # noqa: T201
f"""Error: Received status code {response.status_code} from
embedding API"""
f"Error: Received status code {response.status_code} from "
"embedding API"
)
return None
except Exception as e:

@ -47,8 +47,19 @@ class SVEndpointHandler:
"""
result: Dict[str, Any] = {}
try:
text_result = response.text.strip().split("\n")[-1]
result = {"data": json.loads("".join(text_result.split("data: ")[1:]))}
lines_result = response.text.strip().split("\n")
text_result = lines_result[-1]
if response.status_code == 200 and json.loads(text_result).get("error"):
completion = ""
for line in lines_result[:-1]:
completion += json.loads(line)["result"]["responses"][0][
"stream_token"
]
text_result = lines_result[-2]
result = json.loads(text_result)
result["result"]["responses"][0]["completion"] = completion
else:
result = json.loads(text_result)
except Exception as e:
result["detail"] = str(e)
if "status_code" not in result:
@ -58,25 +69,19 @@ class SVEndpointHandler:
@staticmethod
def _process_streaming_response(
response: requests.Response,
) -> Generator[GenerationChunk, None, None]:
) -> Generator[Dict, None, None]:
"""Process the streaming response"""
try:
import sseclient
except ImportError:
raise ImportError(
"could not import sseclient library"
"Please install it with `pip install sseclient-py`."
)
client = sseclient.SSEClient(response)
close_conn = False
for event in client.events():
if event.event == "error_event":
close_conn = True
text = json.dumps({"event": event.event, "data": event.data})
chunk = GenerationChunk(text=text)
yield chunk
if close_conn:
client.close()
for line in response.iter_lines():
chunk = json.loads(line)
if "status_code" not in chunk:
chunk["status_code"] = response.status_code
if chunk["status_code"] == 200 and chunk.get("error"):
chunk["result"] = {"responses": [{"stream_token": ""}]}
return chunk
yield chunk
except Exception as e:
raise RuntimeError(f"Error processing streaming response: {e}")
def _get_full_url(self) -> str:
"""
@ -105,25 +110,21 @@ class SVEndpointHandler:
:returns: Prediction results
:rtype: dict
"""
if isinstance(input, str):
input = [input]
parsed_input = []
for element in input:
parsed_element = {
"conversation_id": "sambaverse-conversation-id",
"messages": [
{
"message_id": 0,
"role": "user",
"content": element,
}
],
}
parsed_input.append(json.dumps(parsed_element))
parsed_element = {
"conversation_id": "sambaverse-conversation-id",
"messages": [
{
"message_id": 0,
"role": "user",
"content": input,
}
],
}
parsed_input = json.dumps(parsed_element)
if params:
data = {"inputs": parsed_input, "params": json.loads(params)}
data = {"instance": parsed_input, "params": json.loads(params)}
else:
data = {"inputs": parsed_input}
data = {"instance": parsed_input}
response = self.http_session.post(
self._get_full_url(),
headers={
@ -141,7 +142,7 @@ class SVEndpointHandler:
sambaverse_model_name: Optional[str],
input: Union[List[str], str],
params: Optional[str] = "",
) -> Iterator[GenerationChunk]:
) -> Iterator[Dict]:
"""
NLP predict using inline input string.
@ -153,25 +154,21 @@ class SVEndpointHandler:
:returns: Prediction results
:rtype: dict
"""
if isinstance(input, str):
input = [input]
parsed_input = []
for element in input:
parsed_element = {
"conversation_id": "sambaverse-conversation-id",
"messages": [
{
"message_id": 0,
"role": "user",
"content": element,
}
],
}
parsed_input.append(json.dumps(parsed_element))
parsed_element = {
"conversation_id": "sambaverse-conversation-id",
"messages": [
{
"message_id": 0,
"role": "user",
"content": input,
}
],
}
parsed_input = json.dumps(parsed_element)
if params:
data = {"inputs": parsed_input, "params": json.loads(params)}
data = {"instance": parsed_input, "params": json.loads(params)}
else:
data = {"inputs": parsed_input}
data = {"instance": parsed_input}
# Streaming output
response = self.http_session.post(
self._get_full_url(),
@ -213,7 +210,7 @@ class Sambaverse(LLM):
"max_tokens_to_generate": 100,
"temperature": 0.7,
"top_p": 1.0,
"repetition_penalty": 1,
"repetition_penalty": 1.0,
"top_k": 50,
},
)
@ -279,13 +276,17 @@ class Sambaverse(LLM):
The tuning parameters as a JSON string.
"""
_model_kwargs = self.model_kwargs or {}
_stop_sequences = _model_kwargs.get("stop_sequences", [])
_stop_sequences = stop or _stop_sequences
_model_kwargs["stop_sequences"] = ",".join(f'"{x}"' for x in _stop_sequences)
_kwarg_stop_sequences = _model_kwargs.get("stop_sequences", [])
_stop_sequences = stop or _kwarg_stop_sequences
if not _kwarg_stop_sequences:
_model_kwargs["stop_sequences"] = ",".join(
f'"{x}"' for x in _stop_sequences
)
tuning_params_dict = {
k: {"type": type(v).__name__, "value": str(v)}
for k, v in (_model_kwargs.items())
}
_model_kwargs["stop_sequences"] = _kwarg_stop_sequences
tuning_params = json.dumps(tuning_params_dict)
return tuning_params
@ -313,14 +314,17 @@ class Sambaverse(LLM):
self.sambaverse_api_key, self.sambaverse_model_name, prompt, tuning_params
)
if response["status_code"] != 200:
optional_details = response["details"]
optional_message = response["message"]
optional_code = response["error"].get("code")
optional_details = response["error"].get("details")
optional_message = response["error"].get("message")
raise ValueError(
f"Sambanova /complete call failed with status code "
f"{response['status_code']}. Details: {optional_details}"
f"{response['status_code']}. Message: {optional_message}"
f"{response['status_code']}."
f"Message: {optional_message}"
f"Details: {optional_details}"
f"Code: {optional_code}"
)
return response["data"]["completion"]
return response["result"]["responses"][0]["completion"]
def _handle_completion_requests(
self, prompt: Union[List[str], str], stop: Optional[List[str]]
@ -359,7 +363,20 @@ class Sambaverse(LLM):
for chunk in sdk.nlp_predict_stream(
self.sambaverse_api_key, self.sambaverse_model_name, prompt, tuning_params
):
yield chunk
if chunk["status_code"] != 200:
optional_code = chunk["error"].get("code")
optional_details = chunk["error"].get("details")
optional_message = chunk["error"].get("message")
raise ValueError(
f"Sambanova /complete call failed with status code "
f"{chunk['status_code']}."
f"Message: {optional_message}"
f"Details: {optional_details}"
f"Code: {optional_code}"
)
text = chunk["result"]["responses"][0]["stream_token"]
generated_chunk = GenerationChunk(text=text)
yield generated_chunk
def _stream(
self,

@ -1,8 +1,4 @@
"""Util that calls Bing Search.
In order to set this up, follow instructions at:
https://levelup.gitconnected.com/api-tutorial-how-to-use-bing-web-search-api-in-python-4165d5592a7e
"""
"""Util that calls Bing Search."""
from typing import Dict, List
import requests
@ -11,11 +7,7 @@ from langchain_core.utils import get_from_dict_or_env
class BingSearchAPIWrapper(BaseModel):
"""Wrapper for Bing Search API.
In order to set this up, follow instructions at:
https://levelup.gitconnected.com/api-tutorial-how-to-use-bing-web-search-api-in-python-4165d5592a7e
"""
"""Wrapper for Bing Search API."""
bing_subscription_key: str
bing_search_url: str

@ -446,10 +446,10 @@ class Annoy(VectorStore):
"You will need to set `allow_dangerous_deserialization` to `True` to "
"enable deserialization. If you do this, make sure that you "
"trust the source of the data. For example, if you are loading a "
"file that you created, and no that no one else has modified the file, "
"then this is safe to do. Do not set this to `True` if you are loading "
"a file from an untrusted source (e.g., some random site on the "
"internet.)."
"file that you created, and know that no one else has modified the "
"file, then this is safe to do. Do not set this to `True` if you are "
"loading a file from an untrusted source (e.g., some random site on "
"the internet.)."
)
path = Path(folder_path)
# load index separately since it is not picklable

@ -1082,10 +1082,10 @@ class FAISS(VectorStore):
"You will need to set `allow_dangerous_deserialization` to `True` to "
"enable deserialization. If you do this, make sure that you "
"trust the source of the data. For example, if you are loading a "
"file that you created, and no that no one else has modified the file, "
"then this is safe to do. Do not set this to `True` if you are loading "
"a file from an untrusted source (e.g., some random site on the "
"internet.)."
"file that you created, and know that no one else has modified the "
"file, then this is safe to do. Do not set this to `True` if you are "
"loading a file from an untrusted source (e.g., some random site on "
"the internet.)."
)
path = Path(folder_path)
# load index separately since it is not picklable

@ -463,7 +463,7 @@ class ScaNN(VectorStore):
Args:
folder_path: folder path to load index, docstore,
and index_to_docstore_id from.
embeddings: Embeddings to use when generating queries
embedding: Embeddings to use when generating queries
index_name: for saving with a specific index file name
allow_dangerous_deserialization: whether to allow deserialization
of the data which involves loading a pickle file.
@ -479,10 +479,10 @@ class ScaNN(VectorStore):
"You will need to set `allow_dangerous_deserialization` to `True` to "
"enable deserialization. If you do this, make sure that you "
"trust the source of the data. For example, if you are loading a "
"file that you created, and no that no one else has modified the file, "
"then this is safe to do. Do not set this to `True` if you are loading "
"a file from an untrusted source (e.g., some random site on the "
"internet.)."
"file that you created, and know that no one else has modified the "
"file, then this is safe to do. Do not set this to `True` if you are "
"loading a file from an untrusted source (e.g., some random site on "
"the internet.)."
)
path = Path(folder_path)
scann_path = path / "{index_name}.scann".format(index_name=index_name)

@ -1,4 +1,4 @@
# This file is automatically @generated by Poetry 1.6.1 and should not be changed by hand.
# This file is automatically @generated by Poetry 1.7.1 and should not be changed by hand.
[[package]]
name = "aenum"
@ -3455,7 +3455,6 @@ files = [
{file = "jq-1.6.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:227b178b22a7f91ae88525810441791b1ca1fc71c86f03190911793be15cec3d"},
{file = "jq-1.6.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:780eb6383fbae12afa819ef676fc93e1548ae4b076c004a393af26a04b460742"},
{file = "jq-1.6.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:08ded6467f4ef89fec35b2bf310f210f8cd13fbd9d80e521500889edf8d22441"},
{file = "jq-1.6.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:49e44ed677713f4115bd5bf2dbae23baa4cd503be350e12a1c1f506b0687848f"},
{file = "jq-1.6.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:984f33862af285ad3e41e23179ac4795f1701822473e1a26bf87ff023e5a89ea"},
{file = "jq-1.6.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f42264fafc6166efb5611b5d4cb01058887d050a6c19334f6a3f8a13bb369df5"},
{file = "jq-1.6.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a67154f150aaf76cc1294032ed588436eb002097dd4fd1e283824bf753a05080"},
@ -3966,7 +3965,7 @@ files = [
[[package]]
name = "langchain"
version = "0.2.0rc2"
version = "0.2.0"
description = "Building applications with LLMs through composability"
optional = false
python-versions = ">=3.8.1,<4.0"
@ -3977,8 +3976,8 @@ develop = true
aiohttp = "^3.8.3"
async-timeout = {version = "^4.0.0", markers = "python_version < \"3.11\""}
dataclasses-json = ">= 0.5.7, < 0.7"
langchain-core = ">=0.1.52,<0.3"
langchain-text-splitters = ">=0.0.1,<0.1"
langchain-core = "^0.2.0"
langchain-text-splitters = "^0.2.0"
langsmith = "^0.1.17"
numpy = "^1"
pydantic = ">=1,<3"
@ -3998,7 +3997,7 @@ embeddings = ["sentence-transformers (>=2,<3)"]
extended-testing = ["aiosqlite (>=0.19.0,<0.20.0)", "aleph-alpha-client (>=2.15.0,<3.0.0)", "anthropic (>=0.3.11,<0.4.0)", "arxiv (>=1.4,<2.0)", "assemblyai (>=0.17.0,<0.18.0)", "atlassian-python-api (>=3.36.0,<4.0.0)", "beautifulsoup4 (>=4,<5)", "bibtexparser (>=1.4.0,<2.0.0)", "cassio (>=0.1.0,<0.2.0)", "chardet (>=5.1.0,<6.0.0)", "cohere (>=4,<6)", "couchbase (>=4.1.9,<5.0.0)", "dashvector (>=1.0.1,<2.0.0)", "databricks-vectorsearch (>=0.21,<0.22)", "datasets (>=2.15.0,<3.0.0)", "dgml-utils (>=0.3.0,<0.4.0)", "esprima (>=4.0.1,<5.0.0)", "faiss-cpu (>=1,<2)", "feedparser (>=6.0.10,<7.0.0)", "fireworks-ai (>=0.9.0,<0.10.0)", "geopandas (>=0.13.1,<0.14.0)", "gitpython (>=3.1.32,<4.0.0)", "google-cloud-documentai (>=2.20.1,<3.0.0)", "gql (>=3.4.1,<4.0.0)", "hologres-vector (>=0.0.6,<0.0.7)", "html2text (>=2020.1.16,<2021.0.0)", "javelin-sdk (>=0.1.8,<0.2.0)", "jinja2 (>=3,<4)", "jq (>=1.4.1,<2.0.0)", "jsonschema (>1)", "langchain-openai (>=0.1,<0.2)", "lxml (>=4.9.3,<6.0)", "markdownify (>=0.11.6,<0.12.0)", "motor (>=3.3.1,<4.0.0)", "msal (>=1.25.0,<2.0.0)", "mwparserfromhell (>=0.6.4,<0.7.0)", "mwxml (>=0.3.3,<0.4.0)", "newspaper3k (>=0.2.8,<0.3.0)", "numexpr (>=2.8.6,<3.0.0)", "openai (<2)", "openai (<2)", "openapi-pydantic (>=0.3.2,<0.4.0)", "pandas (>=2.0.1,<3.0.0)", "pdfminer-six (>=20221105,<20221106)", "pgvector (>=0.1.6,<0.2.0)", "praw (>=7.7.1,<8.0.0)", "psychicapi (>=0.8.0,<0.9.0)", "py-trello (>=0.19.0,<0.20.0)", "pymupdf (>=1.22.3,<2.0.0)", "pypdf (>=3.4.0,<4.0.0)", "pypdfium2 (>=4.10.0,<5.0.0)", "pyspark (>=3.4.0,<4.0.0)", "rank-bm25 (>=0.2.2,<0.3.0)", "rapidfuzz (>=3.1.1,<4.0.0)", "rapidocr-onnxruntime (>=1.3.2,<2.0.0)", "rdflib (==7.0.0)", "requests-toolbelt (>=1.0.0,<2.0.0)", "rspace_client (>=2.5.0,<3.0.0)", "scikit-learn (>=1.2.2,<2.0.0)", "sqlite-vss (>=0.1.2,<0.2.0)", "streamlit (>=1.18.0,<2.0.0)", "sympy (>=1.12,<2.0)", "telethon (>=1.28.5,<2.0.0)", "timescale-vector (>=0.0.1,<0.0.2)", "tqdm (>=4.48.0)", "upstash-redis (>=0.15.0,<0.16.0)", "xata (>=1.0.0a7,<2.0.0)", "xmltodict (>=0.13.0,<0.14.0)"]
javascript = ["esprima (>=4.0.1,<5.0.0)"]
llms = ["clarifai (>=9.1.0)", "cohere (>=4,<6)", "huggingface_hub (>=0,<1)", "manifest-ml (>=0.0.1,<0.0.2)", "nlpcloud (>=1,<2)", "openai (<2)", "openlm (>=0.0.5,<0.0.6)", "torch (>=1,<3)", "transformers (>=4,<5)"]
openai = ["openai (<2)", "tiktoken (>=0.3.2,<0.6.0)"]
openai = ["openai (<2)", "tiktoken (>=0.7,<1.0)"]
qdrant = ["qdrant-client (>=1.3.1,<2.0.0)"]
text-helpers = ["chardet (>=5.1.0,<6.0.0)"]
@ -4008,7 +4007,7 @@ url = "../langchain"
[[package]]
name = "langchain-core"
version = "0.2.0rc1"
version = "0.2.0"
description = "Building applications with LLMs through composability"
optional = false
python-versions = ">=3.8.1,<4.0"
@ -4032,7 +4031,7 @@ url = "../core"
[[package]]
name = "langchain-text-splitters"
version = "0.0.2"
version = "0.2.0"
description = "LangChain text splitting utilities"
optional = false
python-versions = ">=3.8.1,<4.0"
@ -4040,7 +4039,7 @@ files = []
develop = true
[package.dependencies]
langchain-core = ">=0.1.28,<0.3"
langchain-core = "^0.2.0"
[package.extras]
extended-testing = ["beautifulsoup4 (>=4.12.3,<5.0.0)", "lxml (>=4.9.3,<6.0)"]
@ -6105,6 +6104,8 @@ files = [
{file = "psycopg2-2.9.9-cp310-cp310-win_amd64.whl", hash = "sha256:426f9f29bde126913a20a96ff8ce7d73fd8a216cfb323b1f04da402d452853c3"},
{file = "psycopg2-2.9.9-cp311-cp311-win32.whl", hash = "sha256:ade01303ccf7ae12c356a5e10911c9e1c51136003a9a1d92f7aa9d010fb98372"},
{file = "psycopg2-2.9.9-cp311-cp311-win_amd64.whl", hash = "sha256:121081ea2e76729acfb0673ff33755e8703d45e926e416cb59bae3a86c6a4981"},
{file = "psycopg2-2.9.9-cp312-cp312-win32.whl", hash = "sha256:d735786acc7dd25815e89cc4ad529a43af779db2e25aa7c626de864127e5a024"},
{file = "psycopg2-2.9.9-cp312-cp312-win_amd64.whl", hash = "sha256:a7653d00b732afb6fc597e29c50ad28087dcb4fbfb28e86092277a559ae4e693"},
{file = "psycopg2-2.9.9-cp37-cp37m-win32.whl", hash = "sha256:5e0d98cade4f0e0304d7d6f25bbfbc5bd186e07b38eac65379309c4ca3193efa"},
{file = "psycopg2-2.9.9-cp37-cp37m-win_amd64.whl", hash = "sha256:7e2dacf8b009a1c1e843b5213a87f7c544b2b042476ed7755be813eaf4e8347a"},
{file = "psycopg2-2.9.9-cp38-cp38-win32.whl", hash = "sha256:ff432630e510709564c01dafdbe996cb552e0b9f3f065eb89bdce5bd31fabf4c"},
@ -6147,6 +6148,7 @@ files = [
{file = "psycopg2_binary-2.9.9-cp311-cp311-win32.whl", hash = "sha256:dc4926288b2a3e9fd7b50dc6a1909a13bbdadfc67d93f3374d984e56f885579d"},
{file = "psycopg2_binary-2.9.9-cp311-cp311-win_amd64.whl", hash = "sha256:b76bedd166805480ab069612119ea636f5ab8f8771e640ae103e05a4aae3e417"},
{file = "psycopg2_binary-2.9.9-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:8532fd6e6e2dc57bcb3bc90b079c60de896d2128c5d9d6f24a63875a95a088cf"},
{file = "psycopg2_binary-2.9.9-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b0605eaed3eb239e87df0d5e3c6489daae3f7388d455d0c0b4df899519c6a38d"},
{file = "psycopg2_binary-2.9.9-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8f8544b092a29a6ddd72f3556a9fcf249ec412e10ad28be6a0c0d948924f2212"},
{file = "psycopg2_binary-2.9.9-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2d423c8d8a3c82d08fe8af900ad5b613ce3632a1249fd6a223941d0735fce493"},
{file = "psycopg2_binary-2.9.9-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2e5afae772c00980525f6d6ecf7cbca55676296b580c0e6abb407f15f3706996"},
@ -6155,6 +6157,8 @@ files = [
{file = "psycopg2_binary-2.9.9-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:cb16c65dcb648d0a43a2521f2f0a2300f40639f6f8c1ecbc662141e4e3e1ee07"},
{file = "psycopg2_binary-2.9.9-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:911dda9c487075abd54e644ccdf5e5c16773470a6a5d3826fda76699410066fb"},
{file = "psycopg2_binary-2.9.9-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:57fede879f08d23c85140a360c6a77709113efd1c993923c59fde17aa27599fe"},
{file = "psycopg2_binary-2.9.9-cp312-cp312-win32.whl", hash = "sha256:64cf30263844fa208851ebb13b0732ce674d8ec6a0c86a4e160495d299ba3c93"},
{file = "psycopg2_binary-2.9.9-cp312-cp312-win_amd64.whl", hash = "sha256:81ff62668af011f9a48787564ab7eded4e9fb17a4a6a74af5ffa6a457400d2ab"},
{file = "psycopg2_binary-2.9.9-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:2293b001e319ab0d869d660a704942c9e2cce19745262a8aba2115ef41a0a42a"},
{file = "psycopg2_binary-2.9.9-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:03ef7df18daf2c4c07e2695e8cfd5ee7f748a1d54d802330985a78d2a5a6dca9"},
{file = "psycopg2_binary-2.9.9-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0a602ea5aff39bb9fac6308e9c9d82b9a35c2bf288e184a816002c9fae930b77"},
@ -6687,31 +6691,26 @@ python-versions = ">=3.8"
files = [
{file = "PyMuPDF-1.23.26-cp310-none-macosx_10_9_x86_64.whl", hash = "sha256:645a05321aecc8c45739f71f0eb574ce33138d19189582ffa5241fea3a8e2549"},
{file = "PyMuPDF-1.23.26-cp310-none-macosx_11_0_arm64.whl", hash = "sha256:2dfc9e010669ae92fade6fb72aaea49ebe3b8dcd7ee4dcbbe50115abcaa4d3fe"},
{file = "PyMuPDF-1.23.26-cp310-none-manylinux2014_aarch64.whl", hash = "sha256:734ee380b3abd038602be79114194a3cb74ac102b7c943bcb333104575922c50"},
{file = "PyMuPDF-1.23.26-cp310-none-manylinux2014_x86_64.whl", hash = "sha256:b22f8d854f8196ad5b20308c1cebad3d5189ed9f0988acbafa043947ea7e6c55"},
{file = "PyMuPDF-1.23.26-cp310-none-win32.whl", hash = "sha256:cc0f794e3466bc96b5bf79d42fbc1551428751e3fef38ebc10ac70396b676144"},
{file = "PyMuPDF-1.23.26-cp310-none-win_amd64.whl", hash = "sha256:2eb701247d8e685a24e45899d1175f01a3ce5fc792a4431c91fbb68633b29298"},
{file = "PyMuPDF-1.23.26-cp311-none-macosx_10_9_x86_64.whl", hash = "sha256:e2804a64bb57da414781e312fb0561f6be67658ad57ed4a73dce008b23fc70a6"},
{file = "PyMuPDF-1.23.26-cp311-none-macosx_11_0_arm64.whl", hash = "sha256:97b40bb22e3056874634617a90e0ed24a5172cf71791b9e25d1d91c6743bc567"},
{file = "PyMuPDF-1.23.26-cp311-none-manylinux2014_aarch64.whl", hash = "sha256:fab8833559bc47ab26ce736f915b8fc1dd37c108049b90396f7cd5e1004d7593"},
{file = "PyMuPDF-1.23.26-cp311-none-manylinux2014_x86_64.whl", hash = "sha256:f25aafd3e7fb9d7761a22acf2b67d704f04cc36d4dc33a3773f0eb3f4ec3606f"},
{file = "PyMuPDF-1.23.26-cp311-none-win32.whl", hash = "sha256:05e672ed3e82caca7ef02a88ace30130b1dd392a1190f03b2b58ffe7aa331400"},
{file = "PyMuPDF-1.23.26-cp311-none-win_amd64.whl", hash = "sha256:92b3c4dd4d0491d495f333be2d41f4e1c155a409bc9d04b5ff29655dccbf4655"},
{file = "PyMuPDF-1.23.26-cp312-none-macosx_10_9_x86_64.whl", hash = "sha256:a217689ede18cc6991b4e6a78afee8a440b3075d53b9dec4ba5ef7487d4547e9"},
{file = "PyMuPDF-1.23.26-cp312-none-macosx_11_0_arm64.whl", hash = "sha256:42ad2b819b90ce1947e11b90ec5085889df0a2e3aa0207bc97ecacfc6157cabc"},
{file = "PyMuPDF-1.23.26-cp312-none-manylinux2014_aarch64.whl", hash = "sha256:99607649f89a02bba7d8ebe96e2410664316adc95e9337f7dfeff6a154f93049"},
{file = "PyMuPDF-1.23.26-cp312-none-manylinux2014_x86_64.whl", hash = "sha256:bb42d4b8407b4de7cb58c28f01449f16f32a6daed88afb41108f1aeb3552bdd4"},
{file = "PyMuPDF-1.23.26-cp312-none-win32.whl", hash = "sha256:c40d044411615e6f0baa7d3d933b3032cf97e168c7fa77d1be8a46008c109aee"},
{file = "PyMuPDF-1.23.26-cp312-none-win_amd64.whl", hash = "sha256:3f876533aa7f9a94bcd9a0225ce72571b7808260903fec1d95c120bc842fb52d"},
{file = "PyMuPDF-1.23.26-cp38-none-macosx_10_9_x86_64.whl", hash = "sha256:52df831d46beb9ff494f5fba3e5d069af6d81f49abf6b6e799ee01f4f8fa6799"},
{file = "PyMuPDF-1.23.26-cp38-none-macosx_11_0_arm64.whl", hash = "sha256:0bbb0cf6593e53524f3fc26fb5e6ead17c02c64791caec7c4afe61b677dedf80"},
{file = "PyMuPDF-1.23.26-cp38-none-manylinux2014_aarch64.whl", hash = "sha256:5ef4360f20015673c20cf59b7e19afc97168795188c584254ed3778cde43ce77"},
{file = "PyMuPDF-1.23.26-cp38-none-manylinux2014_x86_64.whl", hash = "sha256:d7cd88842b2e7f4c71eef4d87c98c35646b80b60e6375392d7ce40e519261f59"},
{file = "PyMuPDF-1.23.26-cp38-none-win32.whl", hash = "sha256:6577e2f473625e2d0df5f5a3bf1e4519e94ae749733cc9937994d1b256687bfa"},
{file = "PyMuPDF-1.23.26-cp38-none-win_amd64.whl", hash = "sha256:fbe1a3255b2cd0d769b2da2c4efdd0c0f30d4961a1aac02c0f75cf951b337aa4"},
{file = "PyMuPDF-1.23.26-cp39-none-macosx_10_9_x86_64.whl", hash = "sha256:73fce034f2afea886a59ead2d0caedf27e2b2a8558b5da16d0286882e0b1eb82"},
{file = "PyMuPDF-1.23.26-cp39-none-macosx_11_0_arm64.whl", hash = "sha256:b3de8618b7cb5b36db611083840b3bcf09b11a893e2d8262f4e042102c7e65de"},
{file = "PyMuPDF-1.23.26-cp39-none-manylinux2014_aarch64.whl", hash = "sha256:879e7f5ad35709d8760ab6103c3d5dac8ab8043a856ab3653fd324af7358ee87"},
{file = "PyMuPDF-1.23.26-cp39-none-manylinux2014_x86_64.whl", hash = "sha256:deee96c2fd415ded7b5070d8d5b2c60679aee6ed0e28ac0d2cb998060d835c2c"},
{file = "PyMuPDF-1.23.26-cp39-none-win32.whl", hash = "sha256:9f7f4ef99dd8ac97fb0b852efa3dcbee515798078b6c79a6a13c7b1e7c5d41a4"},
{file = "PyMuPDF-1.23.26-cp39-none-win_amd64.whl", hash = "sha256:ba9a54552c7afb9ec85432c765e2fa9a81413acfaa7d70db7c9b528297749e5b"},
@ -7152,6 +7151,7 @@ files = [
{file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"},
{file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"},
{file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"},
{file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef"},
{file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"},
{file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"},
{file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"},
@ -10084,4 +10084,4 @@ extended-testing = ["aiosqlite", "aleph-alpha-client", "anthropic", "arxiv", "as
[metadata]
lock-version = "2.0"
python-versions = ">=3.8.1,<4.0"
content-hash = "d1482d62a7b1b7701af1aafe760e69149abbaf66faab9f615d410e459fbb6829"
content-hash = "34179305bdc9ea3a20dd788263a3671c0917dace6513e75d0171a24d9e2cb77b"

@ -1,6 +1,6 @@
[tool.poetry]
name = "langchain-community"
version = "0.2.0rc1"
version = "0.2.0"
description = "Community contributed LangChain integrations."
authors = []
license = "MIT"
@ -9,8 +9,8 @@ repository = "https://github.com/langchain-ai/langchain"
[tool.poetry.dependencies]
python = ">=3.8.1,<4.0"
langchain-core = ">=0.1.52,<0.3"
langchain = "^0.2.0rc2"
langchain-core = "^0.2.0"
langchain = "^0.2.0"
SQLAlchemy = ">=1.4,<3"
requests = "^2"
PyYAML = ">=5.3"

@ -13,6 +13,7 @@ from typing import (
Dict,
Iterator,
List,
Literal,
Optional,
Sequence,
Type,
@ -20,6 +21,8 @@ from typing import (
cast,
)
from typing_extensions import TypedDict
from langchain_core._api import deprecated
from langchain_core.caches import BaseCache
from langchain_core.callbacks import (
@ -60,6 +63,15 @@ if TYPE_CHECKING:
from langchain_core.tools import BaseTool
class LangSmithParams(TypedDict, total=False):
ls_provider: str
ls_model_name: str
ls_model_type: Literal["chat"]
ls_temperature: Optional[float]
ls_max_tokens: Optional[int]
ls_stop: Optional[List[str]]
def generate_from_stream(stream: Iterator[ChatGenerationChunk]) -> ChatResult:
"""Generate from a stream."""
@ -206,13 +218,17 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
messages = self._convert_input(input).to_messages()
params = self._get_invocation_params(stop=stop, **kwargs)
options = {"stop": stop, **kwargs}
inheritable_metadata = {
**(config.get("metadata") or {}),
**self._get_ls_params(stop=stop, **kwargs),
}
callback_manager = CallbackManager.configure(
config.get("callbacks"),
self.callbacks,
self.verbose,
config.get("tags"),
self.tags,
config.get("metadata"),
inheritable_metadata,
self.metadata,
)
(run_manager,) = callback_manager.on_chat_model_start(
@ -273,13 +289,17 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
messages = self._convert_input(input).to_messages()
params = self._get_invocation_params(stop=stop, **kwargs)
options = {"stop": stop, **kwargs}
inheritable_metadata = {
**(config.get("metadata") or {}),
**self._get_ls_params(stop=stop, **kwargs),
}
callback_manager = AsyncCallbackManager.configure(
config.get("callbacks"),
self.callbacks,
self.verbose,
config.get("tags"),
self.tags,
config.get("metadata"),
inheritable_metadata,
self.metadata,
)
(run_manager,) = await callback_manager.on_chat_model_start(
@ -336,6 +356,17 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
params["stop"] = stop
return {**params, **kwargs}
def _get_ls_params(
self,
stop: Optional[List[str]] = None,
**kwargs: Any,
) -> LangSmithParams:
"""Get standard params for tracing."""
ls_params = LangSmithParams(ls_model_type="chat")
if stop:
ls_params["ls_stop"] = stop
return ls_params
def _get_llm_string(self, stop: Optional[List[str]] = None, **kwargs: Any) -> str:
if self.is_lc_serializable():
params = {**kwargs, **{"stop": stop}}
@ -385,6 +416,10 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
"""
params = self._get_invocation_params(stop=stop, **kwargs)
options = {"stop": stop}
inheritable_metadata = {
**(metadata or {}),
**self._get_ls_params(stop=stop, **kwargs),
}
callback_manager = CallbackManager.configure(
callbacks,
@ -392,7 +427,7 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
self.verbose,
tags,
self.tags,
metadata,
inheritable_metadata,
self.metadata,
)
run_managers = callback_manager.on_chat_model_start(
@ -472,6 +507,10 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
"""
params = self._get_invocation_params(stop=stop, **kwargs)
options = {"stop": stop}
inheritable_metadata = {
**(metadata or {}),
**self._get_ls_params(stop=stop, **kwargs),
}
callback_manager = AsyncCallbackManager.configure(
callbacks,
@ -479,7 +518,7 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
self.verbose,
tags,
self.tags,
metadata,
inheritable_metadata,
self.metadata,
)

@ -95,7 +95,7 @@ if TYPE_CHECKING:
RunLog,
RunLogPatch,
)
from langchain_core.tracers.root_listeners import Listener
from langchain_core.tracers.schemas import Run
Other = TypeVar("Other")
@ -1258,9 +1258,15 @@ class Runnable(Generic[Input, Output], ABC):
def with_listeners(
self,
*,
on_start: Optional[Listener] = None,
on_end: Optional[Listener] = None,
on_error: Optional[Listener] = None,
on_start: Optional[
Union[Callable[[Run], None], Callable[[Run, RunnableConfig], None]]
] = None,
on_end: Optional[
Union[Callable[[Run], None], Callable[[Run, RunnableConfig], None]]
] = None,
on_error: Optional[
Union[Callable[[Run], None], Callable[[Run, RunnableConfig], None]]
] = None,
) -> Runnable[Input, Output]:
"""
Bind lifecycle listeners to a Runnable, returning a new Runnable.
@ -1276,22 +1282,26 @@ class Runnable(Generic[Input, Output], ABC):
Example:
.. code-block:: python
from langchain_core.runnables import RunnableLambda
from langchain_core.tracers.schemas import Run
import time
def test_runnable(time_to_sleep : int):
time.sleep(time_to_sleep)
def fn_start(run_obj : Runnable):
def fn_start(run_obj: Run):
print("start_time:", run_obj.start_time)
def fn_end(run_obj : Runnable):
def fn_end(run_obj: Run):
print("end_time:", run_obj.end_time)
RunnableLambda(test_runnable).with_listeners(
chain = RunnableLambda(test_runnable).with_listeners(
on_start=fn_start,
on_end=fn_end
).invoke(2)
)
chain.invoke(2)
"""
from langchain_core.tracers.root_listeners import RootListenersTracer
@ -1339,6 +1349,7 @@ class Runnable(Generic[Input, Output], ABC):
Example:
.. code-block:: python
from langchain_core.runnables import RunnableLambda
count = 0
@ -1716,6 +1727,9 @@ class Runnable(Generic[Input, Output], ABC):
"""Helper method to transform an Iterator of Input values into an Iterator of
Output values, with callbacks.
Use this to implement `stream()` or `transform()` in Runnable subclasses."""
# Mixin that is used by both astream log and astream events implementation
from langchain_core.tracers._streaming import _StreamingCallbackHandler
# tee the input so we can iterate over it twice
input_for_tracing, input_for_transform = tee(input, 2)
# Start the input iterator to ensure the input runnable starts before this one
@ -1742,6 +1756,17 @@ class Runnable(Generic[Input, Output], ABC):
context = copy_context()
context.run(var_child_runnable_config.set, child_config)
iterator = context.run(transformer, input_for_transform, **kwargs) # type: ignore[arg-type]
if stream_handler := next(
(
cast(_StreamingCallbackHandler, h)
for h in run_manager.handlers
# instance check OK here, it's a mixin
if isinstance(h, _StreamingCallbackHandler) # type: ignore[misc]
),
None,
):
# populates streamed_output in astream_log() output if needed
iterator = stream_handler.tap_output_iter(run_manager.run_id, iterator)
try:
while True:
chunk: Output = context.run(next, iterator) # type: ignore
@ -1969,7 +1994,7 @@ class RunnableSerializable(Serializable, Runnable[Input, Output]):
# uses the default model ChatAnthropic
print(model.invoke("which organization created you?").content)
# uses ChatOpenaAI
# uses ChatOpenAI
print(
model.with_config(
configurable={"llm": "openai"}
@ -4225,9 +4250,15 @@ class RunnableEach(RunnableEachBase[Input, Output]):
def with_listeners(
self,
*,
on_start: Optional[Listener] = None,
on_end: Optional[Listener] = None,
on_error: Optional[Listener] = None,
on_start: Optional[
Union[Callable[[Run], None], Callable[[Run, RunnableConfig], None]]
] = None,
on_end: Optional[
Union[Callable[[Run], None], Callable[[Run, RunnableConfig], None]]
] = None,
on_error: Optional[
Union[Callable[[Run], None], Callable[[Run, RunnableConfig], None]]
] = None,
) -> RunnableEach[Input, Output]:
"""
Bind lifecycle listeners to a Runnable, returning a new Runnable.
@ -4715,9 +4746,15 @@ class RunnableBinding(RunnableBindingBase[Input, Output]):
def with_listeners(
self,
*,
on_start: Optional[Listener] = None,
on_end: Optional[Listener] = None,
on_error: Optional[Listener] = None,
on_start: Optional[
Union[Callable[[Run], None], Callable[[Run, RunnableConfig], None]]
] = None,
on_end: Optional[
Union[Callable[[Run], None], Callable[[Run, RunnableConfig], None]]
] = None,
on_error: Optional[
Union[Callable[[Run], None], Callable[[Run, RunnableConfig], None]]
] = None,
) -> Runnable[Input, Output]:
"""Bind lifecycle listeners to a Runnable, returning a new Runnable.

@ -1,6 +1,6 @@
"""Internal tracers used for stream_log and astream events implementations."""
import abc
from typing import AsyncIterator, TypeVar
from typing import AsyncIterator, Iterator, TypeVar
from uuid import UUID
T = TypeVar("T")
@ -22,6 +22,10 @@ class _StreamingCallbackHandler(abc.ABC):
) -> AsyncIterator[T]:
"""Used for internal astream_log and astream events implementations."""
@abc.abstractmethod
def tap_output_iter(self, run_id: UUID, output: Iterator[T]) -> Iterator[T]:
"""Used for internal astream_log and astream events implementations."""
__all__ = [
"_StreamingCallbackHandler",

@ -9,6 +9,7 @@ from typing import (
Any,
AsyncIterator,
Dict,
Iterator,
List,
Optional,
Sequence,
@ -102,10 +103,10 @@ class _AstreamEventsCallbackHandler(AsyncCallbackHandler, _StreamingCallbackHand
self.send_stream = memory_stream.get_send_stream()
self.receive_stream = memory_stream.get_receive_stream()
async def _send(self, event: StreamEvent, event_type: str) -> None:
def _send(self, event: StreamEvent, event_type: str) -> None:
"""Send an event to the stream."""
if self.root_event_filter.include_event(event, event_type):
await self.send_stream.send(event)
self.send_stream.send_nowait(event)
def __aiter__(self) -> AsyncIterator[Any]:
"""Iterate over the receive stream."""
@ -119,7 +120,26 @@ class _AstreamEventsCallbackHandler(AsyncCallbackHandler, _StreamingCallbackHand
run_info = self.run_map.get(run_id)
if run_info is None:
raise AssertionError(f"Run ID {run_id} not found in run map.")
await self._send(
self._send(
{
"event": f"on_{run_info['run_type']}_stream",
"data": {"chunk": chunk},
"run_id": str(run_id),
"name": run_info["name"],
"tags": run_info["tags"],
"metadata": run_info["metadata"],
},
run_info["run_type"],
)
yield chunk
def tap_output_iter(self, run_id: UUID, output: Iterator[T]) -> Iterator[T]:
"""Tap the output aiter."""
for chunk in output:
run_info = self.run_map.get(run_id)
if run_info is None:
raise AssertionError(f"Run ID {run_id} not found in run map.")
self._send(
{
"event": f"on_{run_info['run_type']}_stream",
"data": {"chunk": chunk},
@ -155,7 +175,7 @@ class _AstreamEventsCallbackHandler(AsyncCallbackHandler, _StreamingCallbackHand
"inputs": {"messages": messages},
}
await self._send(
self._send(
{
"event": "on_chat_model_start",
"data": {
@ -192,7 +212,7 @@ class _AstreamEventsCallbackHandler(AsyncCallbackHandler, _StreamingCallbackHand
"inputs": {"prompts": prompts},
}
await self._send(
self._send(
{
"event": "on_llm_start",
"data": {
@ -241,7 +261,7 @@ class _AstreamEventsCallbackHandler(AsyncCallbackHandler, _StreamingCallbackHand
else:
raise ValueError(f"Unexpected run type: {run_info['run_type']}")
await self._send(
self._send(
{
"event": event,
"data": {
@ -295,7 +315,7 @@ class _AstreamEventsCallbackHandler(AsyncCallbackHandler, _StreamingCallbackHand
else:
raise ValueError(f"Unexpected run type: {run_info['run_type']}")
await self._send(
self._send(
{
"event": event,
"data": {"output": output, "input": inputs_},
@ -340,7 +360,7 @@ class _AstreamEventsCallbackHandler(AsyncCallbackHandler, _StreamingCallbackHand
self.run_map[run_id] = run_info
await self._send(
self._send(
{
"event": f"on_{run_type_}_start",
"data": data,
@ -373,7 +393,7 @@ class _AstreamEventsCallbackHandler(AsyncCallbackHandler, _StreamingCallbackHand
"input": inputs,
}
await self._send(
self._send(
{
"event": event,
"data": data,
@ -408,7 +428,7 @@ class _AstreamEventsCallbackHandler(AsyncCallbackHandler, _StreamingCallbackHand
"inputs": inputs,
}
await self._send(
self._send(
{
"event": "on_tool_start",
"data": {
@ -432,7 +452,7 @@ class _AstreamEventsCallbackHandler(AsyncCallbackHandler, _StreamingCallbackHand
)
inputs = run_info["inputs"]
await self._send(
self._send(
{
"event": "on_tool_end",
"data": {
@ -470,7 +490,7 @@ class _AstreamEventsCallbackHandler(AsyncCallbackHandler, _StreamingCallbackHand
"inputs": {"query": query},
}
await self._send(
self._send(
{
"event": "on_retriever_start",
"data": {
@ -492,7 +512,7 @@ class _AstreamEventsCallbackHandler(AsyncCallbackHandler, _StreamingCallbackHand
"""Run when Retriever ends running."""
run_info = self.run_map.pop(run_id)
await self._send(
self._send(
{
"event": "on_retriever_end",
"data": {

@ -8,6 +8,7 @@ from typing import (
Any,
AsyncIterator,
Dict,
Iterator,
List,
Literal,
Optional,
@ -252,6 +253,25 @@ class LogStreamCallbackHandler(BaseTracer, _StreamingCallbackHandler):
yield chunk
def tap_output_iter(self, run_id: UUID, output: Iterator[T]) -> Iterator[T]:
"""Tap an output async iterator to stream its values to the log."""
for chunk in output:
# root run is handled in .astream_log()
if run_id != self.root_id:
# if we can't find the run silently ignore
# eg. because this run wasn't included in the log
if key := self._key_map_by_run_id.get(run_id):
if not self.send(
{
"op": "add",
"path": f"/logs/{key}/streamed_output/-",
"value": chunk,
}
):
break
yield chunk
def include_run(self, run: Run) -> bool:
if run.id == self.root_id:
return False

@ -37,7 +37,11 @@ class _SendStream(Generic[T]):
def send_nowait(self, item: T) -> None:
"""Schedule the item to be written to the queue using the original loop."""
self._reader_loop.call_soon_threadsafe(self._queue.put_nowait, item)
try:
self._reader_loop.call_soon_threadsafe(self._queue.put_nowait, item)
except RuntimeError:
if not self._reader_loop.is_closed():
raise # Raise the exception if the loop is not closed
async def aclose(self) -> None:
"""Schedule the done object write the queue using the original loop."""
@ -45,7 +49,11 @@ class _SendStream(Generic[T]):
def close(self) -> None:
"""Schedule the done object write the queue using the original loop."""
self._reader_loop.call_soon_threadsafe(self._queue.put_nowait, self._done)
try:
self._reader_loop.call_soon_threadsafe(self._queue.put_nowait, self._done)
except RuntimeError:
if not self._reader_loop.is_closed():
raise # Raise the exception if the loop is not closed
class _ReceiveStream(Generic[T]):

@ -1,4 +1,4 @@
# This file is automatically @generated by Poetry 1.6.1 and should not be changed by hand.
# This file is automatically @generated by Poetry 1.7.1 and should not be changed by hand.
[[package]]
name = "annotated-types"
@ -1115,13 +1115,13 @@ test = ["jupyter-server (>=2.0.0)", "pytest (>=7.0)", "pytest-jupyter[server] (>
[[package]]
name = "jupyterlab"
version = "4.1.8"
version = "4.2.0"
description = "JupyterLab computational environment"
optional = false
python-versions = ">=3.8"
files = [
{file = "jupyterlab-4.1.8-py3-none-any.whl", hash = "sha256:c3baf3a2f91f89d110ed5786cd18672b9a357129d4e389d2a0dead15e11a4d2c"},
{file = "jupyterlab-4.1.8.tar.gz", hash = "sha256:3384aded8680e7ce504fd63b8bb89a39df21c9c7694d9e7dc4a68742cdb30f9b"},
{file = "jupyterlab-4.2.0-py3-none-any.whl", hash = "sha256:0dfe9278e25a145362289c555d9beb505697d269c10e99909766af7c440ad3cc"},
{file = "jupyterlab-4.2.0.tar.gz", hash = "sha256:356e9205a6a2ab689c47c8fe4919dba6c076e376d03f26baadc05748c2435dd5"},
]
[package.dependencies]
@ -1142,11 +1142,11 @@ tornado = ">=6.2.0"
traitlets = "*"
[package.extras]
dev = ["build", "bump2version", "coverage", "hatch", "pre-commit", "pytest-cov", "ruff (==0.2.0)"]
dev = ["build", "bump2version", "coverage", "hatch", "pre-commit", "pytest-cov", "ruff (==0.3.5)"]
docs = ["jsx-lexer", "myst-parser", "pydata-sphinx-theme (>=0.13.0)", "pytest", "pytest-check-links", "pytest-jupyter", "sphinx (>=1.8,<7.3.0)", "sphinx-copybutton"]
docs-screenshots = ["altair (==5.2.0)", "ipython (==8.16.1)", "ipywidgets (==8.1.1)", "jupyterlab-geojson (==3.4.0)", "jupyterlab-language-pack-zh-cn (==4.0.post6)", "matplotlib (==3.8.2)", "nbconvert (>=7.0.0)", "pandas (==2.2.0)", "scipy (==1.12.0)", "vega-datasets (==0.9.0)"]
docs-screenshots = ["altair (==5.3.0)", "ipython (==8.16.1)", "ipywidgets (==8.1.2)", "jupyterlab-geojson (==3.4.0)", "jupyterlab-language-pack-zh-cn (==4.1.post2)", "matplotlib (==3.8.3)", "nbconvert (>=7.0.0)", "pandas (==2.2.1)", "scipy (==1.12.0)", "vega-datasets (==0.9.0)"]
test = ["coverage", "pytest (>=7.0)", "pytest-check-links (>=0.7)", "pytest-console-scripts", "pytest-cov", "pytest-jupyter (>=0.5.3)", "pytest-timeout", "pytest-tornasync", "requests", "requests-cache", "virtualenv"]
upgrade-extension = ["copier (>=8.0,<9.0)", "jinja2-time (<0.3)", "pydantic (<2.0)", "pyyaml-include (<2.0)", "tomli-w (<2.0)"]
upgrade-extension = ["copier (>=8,<10)", "jinja2-time (<0.3)", "pydantic (<2.0)", "pyyaml-include (<2.0)", "tomli-w (<2.0)"]
[[package]]
name = "jupyterlab-pygments"
@ -1217,13 +1217,13 @@ url = "../text-splitters"
[[package]]
name = "langsmith"
version = "0.1.57"
version = "0.1.59"
description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform."
optional = false
python-versions = "<4.0,>=3.8.1"
files = [
{file = "langsmith-0.1.57-py3-none-any.whl", hash = "sha256:dbd83b0944a2fbea4151f0aa053530d93fcf6784a580621bc60633cb890b57dc"},
{file = "langsmith-0.1.57.tar.gz", hash = "sha256:4682204de19f0218029c2b8445ce2cc3485c8d0df9796b31e2ce4c9051fce365"},
{file = "langsmith-0.1.59-py3-none-any.whl", hash = "sha256:445e3bc1d3baa1e5340cd979907a19483b9763a2ed37b863a01113d406f69345"},
{file = "langsmith-0.1.59.tar.gz", hash = "sha256:e748a89f4dd6aa441349143e49e546c03b5dfb43376a25bfef6a5ca792fe1437"},
]
[package.dependencies]
@ -1477,26 +1477,26 @@ files = [
[[package]]
name = "notebook"
version = "7.1.3"
version = "7.2.0"
description = "Jupyter Notebook - A web-based notebook environment for interactive computing"
optional = false
python-versions = ">=3.8"
files = [
{file = "notebook-7.1.3-py3-none-any.whl", hash = "sha256:919b911e59f41f6e3857ce93c9d93535ba66bb090059712770e5968c07e1004d"},
{file = "notebook-7.1.3.tar.gz", hash = "sha256:41fcebff44cf7bb9377180808bcbae066629b55d8c7722f1ebbe75ca44f9cfc1"},
{file = "notebook-7.2.0-py3-none-any.whl", hash = "sha256:b4752d7407d6c8872fc505df0f00d3cae46e8efb033b822adacbaa3f1f3ce8f5"},
{file = "notebook-7.2.0.tar.gz", hash = "sha256:34a2ba4b08ad5d19ec930db7484fb79746a1784be9e1a5f8218f9af8656a141f"},
]
[package.dependencies]
jupyter-server = ">=2.4.0,<3"
jupyterlab = ">=4.1.1,<4.2"
jupyterlab-server = ">=2.22.1,<3"
jupyterlab = ">=4.2.0,<4.3"
jupyterlab-server = ">=2.27.1,<3"
notebook-shim = ">=0.2,<0.3"
tornado = ">=6.2.0"
[package.extras]
dev = ["hatch", "pre-commit"]
docs = ["myst-parser", "nbsphinx", "pydata-sphinx-theme", "sphinx (>=1.3.6)", "sphinxcontrib-github-alt", "sphinxcontrib-spelling"]
test = ["importlib-resources (>=5.0)", "ipykernel", "jupyter-server[test] (>=2.4.0,<3)", "jupyterlab-server[test] (>=2.22.1,<3)", "nbval", "pytest (>=7.0)", "pytest-console-scripts", "pytest-timeout", "pytest-tornasync", "requests"]
test = ["importlib-resources (>=5.0)", "ipykernel", "jupyter-server[test] (>=2.4.0,<3)", "jupyterlab-server[test] (>=2.27.1,<3)", "nbval", "pytest (>=7.0)", "pytest-console-scripts", "pytest-timeout", "pytest-tornasync", "requests"]
[[package]]
name = "notebook-shim"
@ -1693,13 +1693,13 @@ files = [
[[package]]
name = "platformdirs"
version = "4.2.1"
version = "4.2.2"
description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`."
optional = false
python-versions = ">=3.8"
files = [
{file = "platformdirs-4.2.1-py3-none-any.whl", hash = "sha256:17d5a1161b3fd67b390023cb2d3b026bbd40abde6fdb052dfbd3a29c3ba22ee1"},
{file = "platformdirs-4.2.1.tar.gz", hash = "sha256:031cd18d4ec63ec53e82dceaac0417d218a6863f7745dfcc9efe7793b7039bdf"},
{file = "platformdirs-4.2.2-py3-none-any.whl", hash = "sha256:2d7a1657e36a80ea911db832a8a6ece5ee53d8de21edd5cc5879af6530b1bfee"},
{file = "platformdirs-4.2.2.tar.gz", hash = "sha256:38b7b51f512eed9e84a22788b4bce1de17c0adb134d6becb09836e37d8654cd3"},
]
[package.extras]
@ -2142,6 +2142,7 @@ files = [
{file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"},
{file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"},
{file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"},
{file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef"},
{file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"},
{file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"},
{file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"},
@ -2940,18 +2941,18 @@ files = [
[[package]]
name = "zipp"
version = "3.18.1"
version = "3.18.2"
description = "Backport of pathlib-compatible object wrapper for zip files"
optional = false
python-versions = ">=3.8"
files = [
{file = "zipp-3.18.1-py3-none-any.whl", hash = "sha256:206f5a15f2af3dbaee80769fb7dc6f249695e940acca08dfb2a4769fe61e538b"},
{file = "zipp-3.18.1.tar.gz", hash = "sha256:2884ed22e7d8961de1c9a05142eb69a247f120291bc0206a00a7642f09b5b715"},
{file = "zipp-3.18.2-py3-none-any.whl", hash = "sha256:dce197b859eb796242b0622af1b8beb0a722d52aa2f57133ead08edd5bf5374e"},
{file = "zipp-3.18.2.tar.gz", hash = "sha256:6278d9ddbcfb1f1089a88fde84481528b07b0e10474e09dcfe53dad4069fa059"},
]
[package.extras]
docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"]
testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-ignore-flaky", "pytest-mypy", "pytest-ruff (>=0.2.1)"]
testing = ["big-O", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more-itertools", "pytest (>=6,!=8.1.*)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-ignore-flaky", "pytest-mypy", "pytest-ruff (>=0.2.1)"]
[extras]
extended-testing = ["jinja2"]

@ -1,6 +1,6 @@
[tool.poetry]
name = "langchain-core"
version = "0.2.0rc1"
version = "0.2.0"
description = "Building applications with LLMs through composability"
authors = []
license = "MIT"

File diff suppressed because one or more lines are too long

@ -476,7 +476,7 @@ async def test_astream_events_from_model() -> None:
{
"data": {"input": {"messages": [[HumanMessage(content="hello")]]}},
"event": "on_chat_model_start",
"metadata": {"a": "b"},
"metadata": {"a": "b", "ls_model_type": "chat", "ls_stop": "<stop_token>"},
"name": "my_model",
"run_id": "",
"tags": ["my_model"],
@ -484,7 +484,7 @@ async def test_astream_events_from_model() -> None:
{
"data": {"chunk": AIMessageChunk(content="hello", id=AnyStr())},
"event": "on_chat_model_stream",
"metadata": {"a": "b"},
"metadata": {"a": "b", "ls_model_type": "chat", "ls_stop": "<stop_token>"},
"name": "my_model",
"run_id": "",
"tags": ["my_model"],
@ -492,7 +492,7 @@ async def test_astream_events_from_model() -> None:
{
"data": {"chunk": AIMessageChunk(content=" ", id=AnyStr())},
"event": "on_chat_model_stream",
"metadata": {"a": "b"},
"metadata": {"a": "b", "ls_model_type": "chat", "ls_stop": "<stop_token>"},
"name": "my_model",
"run_id": "",
"tags": ["my_model"],
@ -500,7 +500,7 @@ async def test_astream_events_from_model() -> None:
{
"data": {"chunk": AIMessageChunk(content="world!", id=AnyStr())},
"event": "on_chat_model_stream",
"metadata": {"a": "b"},
"metadata": {"a": "b", "ls_model_type": "chat", "ls_stop": "<stop_token>"},
"name": "my_model",
"run_id": "",
"tags": ["my_model"],
@ -526,7 +526,7 @@ async def test_astream_events_from_model() -> None:
},
},
"event": "on_chat_model_end",
"metadata": {"a": "b"},
"metadata": {"a": "b", "ls_model_type": "chat", "ls_stop": "<stop_token>"},
"name": "my_model",
"run_id": "",
"tags": ["my_model"],
@ -569,7 +569,7 @@ async def test_astream_events_from_model() -> None:
{
"data": {"input": {"messages": [[HumanMessage(content="hello")]]}},
"event": "on_chat_model_start",
"metadata": {"a": "b"},
"metadata": {"a": "b", "ls_model_type": "chat", "ls_stop": "<stop_token>"},
"name": "my_model",
"run_id": "",
"tags": ["my_model"],
@ -577,7 +577,7 @@ async def test_astream_events_from_model() -> None:
{
"data": {"chunk": AIMessageChunk(content="hello", id=AnyStr())},
"event": "on_chat_model_stream",
"metadata": {"a": "b"},
"metadata": {"a": "b", "ls_model_type": "chat", "ls_stop": "<stop_token>"},
"name": "my_model",
"run_id": "",
"tags": ["my_model"],
@ -585,7 +585,7 @@ async def test_astream_events_from_model() -> None:
{
"data": {"chunk": AIMessageChunk(content=" ", id=AnyStr())},
"event": "on_chat_model_stream",
"metadata": {"a": "b"},
"metadata": {"a": "b", "ls_model_type": "chat", "ls_stop": "<stop_token>"},
"name": "my_model",
"run_id": "",
"tags": ["my_model"],
@ -593,7 +593,7 @@ async def test_astream_events_from_model() -> None:
{
"data": {"chunk": AIMessageChunk(content="world!", id=AnyStr())},
"event": "on_chat_model_stream",
"metadata": {"a": "b"},
"metadata": {"a": "b", "ls_model_type": "chat", "ls_stop": "<stop_token>"},
"name": "my_model",
"run_id": "",
"tags": ["my_model"],
@ -619,7 +619,7 @@ async def test_astream_events_from_model() -> None:
},
},
"event": "on_chat_model_end",
"metadata": {"a": "b"},
"metadata": {"a": "b", "ls_model_type": "chat", "ls_stop": "<stop_token>"},
"name": "my_model",
"run_id": "",
"tags": ["my_model"],
@ -724,7 +724,12 @@ async def test_event_stream_with_simple_chain() -> None:
}
},
"event": "on_chat_model_start",
"metadata": {"a": "b", "foo": "bar"},
"metadata": {
"a": "b",
"foo": "bar",
"ls_model_type": "chat",
"ls_stop": "<stop_token>",
},
"name": "my_model",
"run_id": "",
"tags": ["my_chain", "my_model", "seq:step:2"],
@ -732,7 +737,12 @@ async def test_event_stream_with_simple_chain() -> None:
{
"data": {"chunk": AIMessageChunk(content="hello", id="ai1")},
"event": "on_chat_model_stream",
"metadata": {"a": "b", "foo": "bar"},
"metadata": {
"a": "b",
"foo": "bar",
"ls_model_type": "chat",
"ls_stop": "<stop_token>",
},
"name": "my_model",
"run_id": "",
"tags": ["my_chain", "my_model", "seq:step:2"],
@ -748,7 +758,12 @@ async def test_event_stream_with_simple_chain() -> None:
{
"data": {"chunk": AIMessageChunk(content=" ", id="ai1")},
"event": "on_chat_model_stream",
"metadata": {"a": "b", "foo": "bar"},
"metadata": {
"a": "b",
"foo": "bar",
"ls_model_type": "chat",
"ls_stop": "<stop_token>",
},
"name": "my_model",
"run_id": "",
"tags": ["my_chain", "my_model", "seq:step:2"],
@ -764,7 +779,12 @@ async def test_event_stream_with_simple_chain() -> None:
{
"data": {"chunk": AIMessageChunk(content="world!", id="ai1")},
"event": "on_chat_model_stream",
"metadata": {"a": "b", "foo": "bar"},
"metadata": {
"a": "b",
"foo": "bar",
"ls_model_type": "chat",
"ls_stop": "<stop_token>",
},
"name": "my_model",
"run_id": "",
"tags": ["my_chain", "my_model", "seq:step:2"],
@ -805,7 +825,12 @@ async def test_event_stream_with_simple_chain() -> None:
},
},
"event": "on_chat_model_end",
"metadata": {"a": "b", "foo": "bar"},
"metadata": {
"a": "b",
"foo": "bar",
"ls_model_type": "chat",
"ls_stop": "<stop_token>",
},
"name": "my_model",
"run_id": "",
"tags": ["my_chain", "my_model", "seq:step:2"],

@ -417,7 +417,7 @@ async def test_astream_events_from_model() -> None:
{
"data": {"input": "hello"},
"event": "on_chat_model_start",
"metadata": {"a": "b"},
"metadata": {"a": "b", "ls_model_type": "chat", "ls_stop": "<stop_token>"},
"name": "my_model",
"run_id": "",
"tags": ["my_model"],
@ -425,7 +425,7 @@ async def test_astream_events_from_model() -> None:
{
"data": {"chunk": AIMessageChunk(content="hello", id=AnyStr())},
"event": "on_chat_model_stream",
"metadata": {"a": "b"},
"metadata": {"a": "b", "ls_model_type": "chat", "ls_stop": "<stop_token>"},
"name": "my_model",
"run_id": "",
"tags": ["my_model"],
@ -433,7 +433,7 @@ async def test_astream_events_from_model() -> None:
{
"data": {"chunk": AIMessageChunk(content=" ", id=AnyStr())},
"event": "on_chat_model_stream",
"metadata": {"a": "b"},
"metadata": {"a": "b", "ls_model_type": "chat", "ls_stop": "<stop_token>"},
"name": "my_model",
"run_id": "",
"tags": ["my_model"],
@ -441,7 +441,7 @@ async def test_astream_events_from_model() -> None:
{
"data": {"chunk": AIMessageChunk(content="world!", id=AnyStr())},
"event": "on_chat_model_stream",
"metadata": {"a": "b"},
"metadata": {"a": "b", "ls_model_type": "chat", "ls_stop": "<stop_token>"},
"name": "my_model",
"run_id": "",
"tags": ["my_model"],
@ -451,7 +451,7 @@ async def test_astream_events_from_model() -> None:
"output": AIMessageChunk(content="hello world!", id=AnyStr()),
},
"event": "on_chat_model_end",
"metadata": {"a": "b"},
"metadata": {"a": "b", "ls_model_type": "chat", "ls_stop": "<stop_token>"},
"name": "my_model",
"run_id": "",
"tags": ["my_model"],
@ -495,7 +495,7 @@ async def test_astream_with_model_in_chain() -> None:
{
"data": {"input": {"messages": [[HumanMessage(content="hello")]]}},
"event": "on_chat_model_start",
"metadata": {"a": "b"},
"metadata": {"a": "b", "ls_model_type": "chat", "ls_stop": "<stop_token>"},
"name": "my_model",
"run_id": "",
"tags": ["my_model"],
@ -503,7 +503,7 @@ async def test_astream_with_model_in_chain() -> None:
{
"data": {"chunk": AIMessageChunk(content="hello", id=AnyStr())},
"event": "on_chat_model_stream",
"metadata": {"a": "b"},
"metadata": {"a": "b", "ls_model_type": "chat", "ls_stop": "<stop_token>"},
"name": "my_model",
"run_id": "",
"tags": ["my_model"],
@ -511,7 +511,7 @@ async def test_astream_with_model_in_chain() -> None:
{
"data": {"chunk": AIMessageChunk(content=" ", id=AnyStr())},
"event": "on_chat_model_stream",
"metadata": {"a": "b"},
"metadata": {"a": "b", "ls_model_type": "chat", "ls_stop": "<stop_token>"},
"name": "my_model",
"run_id": "",
"tags": ["my_model"],
@ -519,7 +519,7 @@ async def test_astream_with_model_in_chain() -> None:
{
"data": {"chunk": AIMessageChunk(content="world!", id=AnyStr())},
"event": "on_chat_model_stream",
"metadata": {"a": "b"},
"metadata": {"a": "b", "ls_model_type": "chat", "ls_stop": "<stop_token>"},
"name": "my_model",
"run_id": "",
"tags": ["my_model"],
@ -530,7 +530,7 @@ async def test_astream_with_model_in_chain() -> None:
"output": AIMessage(content="hello world!", id=AnyStr()),
},
"event": "on_chat_model_end",
"metadata": {"a": "b"},
"metadata": {"a": "b", "ls_model_type": "chat", "ls_stop": "<stop_token>"},
"name": "my_model",
"run_id": "",
"tags": ["my_model"],
@ -573,7 +573,7 @@ async def test_astream_with_model_in_chain() -> None:
{
"data": {"input": {"messages": [[HumanMessage(content="hello")]]}},
"event": "on_chat_model_start",
"metadata": {"a": "b"},
"metadata": {"a": "b", "ls_model_type": "chat", "ls_stop": "<stop_token>"},
"name": "my_model",
"run_id": "",
"tags": ["my_model"],
@ -581,7 +581,7 @@ async def test_astream_with_model_in_chain() -> None:
{
"data": {"chunk": AIMessageChunk(content="hello", id=AnyStr())},
"event": "on_chat_model_stream",
"metadata": {"a": "b"},
"metadata": {"a": "b", "ls_model_type": "chat", "ls_stop": "<stop_token>"},
"name": "my_model",
"run_id": "",
"tags": ["my_model"],
@ -589,7 +589,7 @@ async def test_astream_with_model_in_chain() -> None:
{
"data": {"chunk": AIMessageChunk(content=" ", id=AnyStr())},
"event": "on_chat_model_stream",
"metadata": {"a": "b"},
"metadata": {"a": "b", "ls_model_type": "chat", "ls_stop": "<stop_token>"},
"name": "my_model",
"run_id": "",
"tags": ["my_model"],
@ -597,7 +597,7 @@ async def test_astream_with_model_in_chain() -> None:
{
"data": {"chunk": AIMessageChunk(content="world!", id=AnyStr())},
"event": "on_chat_model_stream",
"metadata": {"a": "b"},
"metadata": {"a": "b", "ls_model_type": "chat", "ls_stop": "<stop_token>"},
"name": "my_model",
"run_id": "",
"tags": ["my_model"],
@ -608,7 +608,7 @@ async def test_astream_with_model_in_chain() -> None:
"output": AIMessage(content="hello world!", id=AnyStr()),
},
"event": "on_chat_model_end",
"metadata": {"a": "b"},
"metadata": {"a": "b", "ls_model_type": "chat", "ls_stop": "<stop_token>"},
"name": "my_model",
"run_id": "",
"tags": ["my_model"],
@ -713,7 +713,12 @@ async def test_event_stream_with_simple_chain() -> None:
}
},
"event": "on_chat_model_start",
"metadata": {"a": "b", "foo": "bar"},
"metadata": {
"a": "b",
"foo": "bar",
"ls_model_type": "chat",
"ls_stop": "<stop_token>",
},
"name": "my_model",
"run_id": "",
"tags": ["my_chain", "my_model", "seq:step:2"],
@ -721,7 +726,12 @@ async def test_event_stream_with_simple_chain() -> None:
{
"data": {"chunk": AIMessageChunk(content="hello", id="ai1")},
"event": "on_chat_model_stream",
"metadata": {"a": "b", "foo": "bar"},
"metadata": {
"a": "b",
"foo": "bar",
"ls_model_type": "chat",
"ls_stop": "<stop_token>",
},
"name": "my_model",
"run_id": "",
"tags": ["my_chain", "my_model", "seq:step:2"],
@ -737,7 +747,12 @@ async def test_event_stream_with_simple_chain() -> None:
{
"data": {"chunk": AIMessageChunk(content=" ", id="ai1")},
"event": "on_chat_model_stream",
"metadata": {"a": "b", "foo": "bar"},
"metadata": {
"a": "b",
"foo": "bar",
"ls_model_type": "chat",
"ls_stop": "<stop_token>",
},
"name": "my_model",
"run_id": "",
"tags": ["my_chain", "my_model", "seq:step:2"],
@ -753,7 +768,12 @@ async def test_event_stream_with_simple_chain() -> None:
{
"data": {"chunk": AIMessageChunk(content="world!", id="ai1")},
"event": "on_chat_model_stream",
"metadata": {"a": "b", "foo": "bar"},
"metadata": {
"a": "b",
"foo": "bar",
"ls_model_type": "chat",
"ls_stop": "<stop_token>",
},
"name": "my_model",
"run_id": "",
"tags": ["my_chain", "my_model", "seq:step:2"],
@ -779,7 +799,12 @@ async def test_event_stream_with_simple_chain() -> None:
"output": AIMessageChunk(content="hello world!", id="ai1"),
},
"event": "on_chat_model_end",
"metadata": {"a": "b", "foo": "bar"},
"metadata": {
"a": "b",
"foo": "bar",
"ls_model_type": "chat",
"ls_stop": "<stop_token>",
},
"name": "my_model",
"run_id": "",
"tags": ["my_chain", "my_model", "seq:step:2"],
@ -1459,7 +1484,7 @@ async def test_events_astream_config() -> None:
{
"data": {"input": "hello"},
"event": "on_chat_model_start",
"metadata": {},
"metadata": {"ls_model_type": "chat"},
"name": "GenericFakeChatModel",
"run_id": "",
"tags": [],
@ -1467,7 +1492,7 @@ async def test_events_astream_config() -> None:
{
"data": {"chunk": AIMessageChunk(content="Goodbye", id="ai2")},
"event": "on_chat_model_stream",
"metadata": {},
"metadata": {"ls_model_type": "chat"},
"name": "GenericFakeChatModel",
"run_id": "",
"tags": [],
@ -1475,7 +1500,7 @@ async def test_events_astream_config() -> None:
{
"data": {"chunk": AIMessageChunk(content=" ", id="ai2")},
"event": "on_chat_model_stream",
"metadata": {},
"metadata": {"ls_model_type": "chat"},
"name": "GenericFakeChatModel",
"run_id": "",
"tags": [],
@ -1483,7 +1508,7 @@ async def test_events_astream_config() -> None:
{
"data": {"chunk": AIMessageChunk(content="world", id="ai2")},
"event": "on_chat_model_stream",
"metadata": {},
"metadata": {"ls_model_type": "chat"},
"name": "GenericFakeChatModel",
"run_id": "",
"tags": [],
@ -1493,7 +1518,7 @@ async def test_events_astream_config() -> None:
"output": AIMessageChunk(content="Goodbye world", id="ai2"),
},
"event": "on_chat_model_end",
"metadata": {},
"metadata": {"ls_model_type": "chat"},
"name": "GenericFakeChatModel",
"run_id": "",
"tags": [],
@ -1625,27 +1650,22 @@ EXPECTED_EVENTS = [
]
@pytest.mark.xfail(
reason="This test is failing due to missing functionality."
"Need to implement logic in _transform_stream_with_config that mimics the async "
"variant that uses tap_output_iter"
)
async def test_sync_in_async_stream_lambdas() -> None:
"""Test invoking nested runnable lambda."""
def add_one_(x: int) -> int:
def add_one(x: int) -> int:
return x + 1
add_one = RunnableLambda(add_one_)
add_one_ = RunnableLambda(add_one)
async def add_one_proxy_(x: int, config: RunnableConfig) -> int:
streaming = add_one.stream(x, config)
async def add_one_proxy(x: int, config: RunnableConfig) -> int:
streaming = add_one_.stream(x, config)
results = [result for result in streaming]
return results[0]
add_one_proxy = RunnableLambda(add_one_proxy_) # type: ignore
add_one_proxy_ = RunnableLambda(add_one_proxy) # type: ignore
events = await _collect_events(add_one_proxy.astream_events(1, version="v2"))
events = await _collect_events(add_one_proxy_.astream_events(1, version="v2"))
assert events == EXPECTED_EVENTS
@ -1669,11 +1689,6 @@ async def test_async_in_async_stream_lambdas() -> None:
assert events == EXPECTED_EVENTS
@pytest.mark.xfail(
reason="This test is failing due to missing functionality."
"Need to implement logic in _transform_stream_with_config that mimics the async "
"variant that uses tap_output_iter"
)
async def test_sync_in_sync_lambdas() -> None:
"""Test invoking nested runnable lambda."""

@ -112,6 +112,24 @@ async def test_queue_for_streaming_via_sync_call() -> None:
), f"delta_time: {delta_time}"
def test_send_to_closed_stream() -> None:
"""Test that sending to a closed stream doesn't raise an error.
We may want to handle this in a better way in the future.
"""
event_loop = asyncio.get_event_loop()
channel = _MemoryStream[str](event_loop)
writer = channel.get_send_stream()
# send with an open even loop
writer.send_nowait("hello")
event_loop.close()
writer.send_nowait("hello")
# now close the loop
event_loop.close()
writer.close()
writer.send_nowait("hello")
async def test_closed_stream() -> None:
reader_loop = asyncio.get_event_loop()
channel = _MemoryStream[str](reader_loop)

@ -1736,7 +1736,7 @@ files = [
[[package]]
name = "langchain"
version = "0.2.0rc2"
version = "0.2.0"
description = "Building applications with LLMs through composability"
optional = false
python-versions = ">=3.8.1,<4.0"
@ -1747,8 +1747,8 @@ develop = true
aiohttp = "^3.8.3"
async-timeout = {version = "^4.0.0", markers = "python_version < \"3.11\""}
dataclasses-json = ">= 0.5.7, < 0.7"
langchain-core = ">=0.1.52,<0.3"
langchain-text-splitters = ">=0.0.1,<0.1"
langchain-core = "^0.2.0"
langchain-text-splitters = "^0.2.0"
langsmith = "^0.1.17"
numpy = "^1"
pydantic = ">=1,<3"
@ -1768,7 +1768,7 @@ embeddings = ["sentence-transformers (>=2,<3)"]
extended-testing = ["aiosqlite (>=0.19.0,<0.20.0)", "aleph-alpha-client (>=2.15.0,<3.0.0)", "anthropic (>=0.3.11,<0.4.0)", "arxiv (>=1.4,<2.0)", "assemblyai (>=0.17.0,<0.18.0)", "atlassian-python-api (>=3.36.0,<4.0.0)", "beautifulsoup4 (>=4,<5)", "bibtexparser (>=1.4.0,<2.0.0)", "cassio (>=0.1.0,<0.2.0)", "chardet (>=5.1.0,<6.0.0)", "cohere (>=4,<6)", "couchbase (>=4.1.9,<5.0.0)", "dashvector (>=1.0.1,<2.0.0)", "databricks-vectorsearch (>=0.21,<0.22)", "datasets (>=2.15.0,<3.0.0)", "dgml-utils (>=0.3.0,<0.4.0)", "esprima (>=4.0.1,<5.0.0)", "faiss-cpu (>=1,<2)", "feedparser (>=6.0.10,<7.0.0)", "fireworks-ai (>=0.9.0,<0.10.0)", "geopandas (>=0.13.1,<0.14.0)", "gitpython (>=3.1.32,<4.0.0)", "google-cloud-documentai (>=2.20.1,<3.0.0)", "gql (>=3.4.1,<4.0.0)", "hologres-vector (>=0.0.6,<0.0.7)", "html2text (>=2020.1.16,<2021.0.0)", "javelin-sdk (>=0.1.8,<0.2.0)", "jinja2 (>=3,<4)", "jq (>=1.4.1,<2.0.0)", "jsonschema (>1)", "langchain-openai (>=0.1,<0.2)", "lxml (>=4.9.3,<6.0)", "markdownify (>=0.11.6,<0.12.0)", "motor (>=3.3.1,<4.0.0)", "msal (>=1.25.0,<2.0.0)", "mwparserfromhell (>=0.6.4,<0.7.0)", "mwxml (>=0.3.3,<0.4.0)", "newspaper3k (>=0.2.8,<0.3.0)", "numexpr (>=2.8.6,<3.0.0)", "openai (<2)", "openai (<2)", "openapi-pydantic (>=0.3.2,<0.4.0)", "pandas (>=2.0.1,<3.0.0)", "pdfminer-six (>=20221105,<20221106)", "pgvector (>=0.1.6,<0.2.0)", "praw (>=7.7.1,<8.0.0)", "psychicapi (>=0.8.0,<0.9.0)", "py-trello (>=0.19.0,<0.20.0)", "pymupdf (>=1.22.3,<2.0.0)", "pypdf (>=3.4.0,<4.0.0)", "pypdfium2 (>=4.10.0,<5.0.0)", "pyspark (>=3.4.0,<4.0.0)", "rank-bm25 (>=0.2.2,<0.3.0)", "rapidfuzz (>=3.1.1,<4.0.0)", "rapidocr-onnxruntime (>=1.3.2,<2.0.0)", "rdflib (==7.0.0)", "requests-toolbelt (>=1.0.0,<2.0.0)", "rspace_client (>=2.5.0,<3.0.0)", "scikit-learn (>=1.2.2,<2.0.0)", "sqlite-vss (>=0.1.2,<0.2.0)", "streamlit (>=1.18.0,<2.0.0)", "sympy (>=1.12,<2.0)", "telethon (>=1.28.5,<2.0.0)", "timescale-vector (>=0.0.1,<0.0.2)", "tqdm (>=4.48.0)", "upstash-redis (>=0.15.0,<0.16.0)", "xata (>=1.0.0a7,<2.0.0)", "xmltodict (>=0.13.0,<0.14.0)"]
javascript = ["esprima (>=4.0.1,<5.0.0)"]
llms = ["clarifai (>=9.1.0)", "cohere (>=4,<6)", "huggingface_hub (>=0,<1)", "manifest-ml (>=0.0.1,<0.0.2)", "nlpcloud (>=1,<2)", "openai (<2)", "openlm (>=0.0.5,<0.0.6)", "torch (>=1,<3)", "transformers (>=4,<5)"]
openai = ["openai (<2)", "tiktoken (>=0.3.2,<0.6.0)"]
openai = ["openai (<2)", "tiktoken (>=0.7,<1.0)"]
qdrant = ["qdrant-client (>=1.3.1,<2.0.0)"]
text-helpers = ["chardet (>=5.1.0,<6.0.0)"]
@ -1778,7 +1778,7 @@ url = "../langchain"
[[package]]
name = "langchain-community"
version = "0.2.0rc1"
version = "0.2.0"
description = "Community contributed LangChain integrations."
optional = false
python-versions = ">=3.8.1,<4.0"
@ -1788,8 +1788,8 @@ develop = true
[package.dependencies]
aiohttp = "^3.8.3"
dataclasses-json = ">= 0.5.7, < 0.7"
langchain = "^0.2.0rc2"
langchain-core = ">=0.1.52,<0.3"
langchain = "^0.2.0"
langchain-core = "^0.2.0"
langsmith = "^0.1.0"
numpy = "^1"
PyYAML = ">=5.3"
@ -1807,7 +1807,7 @@ url = "../community"
[[package]]
name = "langchain-core"
version = "0.2.0rc1"
version = "0.2.0"
description = "Building applications with LLMs through composability"
optional = false
python-versions = ">=3.8.1,<4.0"
@ -1831,7 +1831,7 @@ url = "../core"
[[package]]
name = "langchain-openai"
version = "0.1.6"
version = "0.1.7"
description = "An integration package connecting OpenAI and LangChain"
optional = false
python-versions = ">=3.8.1,<4.0"
@ -1841,7 +1841,7 @@ develop = true
[package.dependencies]
langchain-core = ">=0.1.46,<0.3"
openai = "^1.24.0"
tiktoken = ">=0.5.2,<1"
tiktoken = ">=0.7,<1"
[package.source]
type = "directory"
@ -1849,7 +1849,7 @@ url = "../partners/openai"
[[package]]
name = "langchain-text-splitters"
version = "0.0.2"
version = "0.2.0"
description = "LangChain text splitting utilities"
optional = false
python-versions = ">=3.8.1,<4.0"
@ -1857,7 +1857,7 @@ files = []
develop = true
[package.dependencies]
langchain-core = ">=0.1.28,<0.3"
langchain-core = "^0.2.0"
[package.extras]
extended-testing = ["beautifulsoup4 (>=4.12.3,<5.0.0)", "lxml (>=4.9.3,<6.0)"]
@ -4765,47 +4765,47 @@ files = [
[[package]]
name = "tiktoken"
version = "0.6.0"
version = "0.7.0"
description = "tiktoken is a fast BPE tokeniser for use with OpenAI's models"
optional = false
python-versions = ">=3.8"
files = [
{file = "tiktoken-0.6.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:277de84ccd8fa12730a6b4067456e5cf72fef6300bea61d506c09e45658d41ac"},
{file = "tiktoken-0.6.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9c44433f658064463650d61387623735641dcc4b6c999ca30bc0f8ba3fccaf5c"},
{file = "tiktoken-0.6.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:afb9a2a866ae6eef1995ab656744287a5ac95acc7e0491c33fad54d053288ad3"},
{file = "tiktoken-0.6.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c62c05b3109fefca26fedb2820452a050074ad8e5ad9803f4652977778177d9f"},
{file = "tiktoken-0.6.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:0ef917fad0bccda07bfbad835525bbed5f3ab97a8a3e66526e48cdc3e7beacf7"},
{file = "tiktoken-0.6.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:e095131ab6092d0769a2fda85aa260c7c383072daec599ba9d8b149d2a3f4d8b"},
{file = "tiktoken-0.6.0-cp310-cp310-win_amd64.whl", hash = "sha256:05b344c61779f815038292a19a0c6eb7098b63c8f865ff205abb9ea1b656030e"},
{file = "tiktoken-0.6.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cefb9870fb55dca9e450e54dbf61f904aab9180ff6fe568b61f4db9564e78871"},
{file = "tiktoken-0.6.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:702950d33d8cabc039845674107d2e6dcabbbb0990ef350f640661368df481bb"},
{file = "tiktoken-0.6.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e8d49d076058f23254f2aff9af603863c5c5f9ab095bc896bceed04f8f0b013a"},
{file = "tiktoken-0.6.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:430bc4e650a2d23a789dc2cdca3b9e5e7eb3cd3935168d97d43518cbb1f9a911"},
{file = "tiktoken-0.6.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:293cb8669757301a3019a12d6770bd55bec38a4d3ee9978ddbe599d68976aca7"},
{file = "tiktoken-0.6.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:7bd1a288b7903aadc054b0e16ea78e3171f70b670e7372432298c686ebf9dd47"},
{file = "tiktoken-0.6.0-cp311-cp311-win_amd64.whl", hash = "sha256:ac76e000183e3b749634968a45c7169b351e99936ef46f0d2353cd0d46c3118d"},
{file = "tiktoken-0.6.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:17cc8a4a3245ab7d935c83a2db6bb71619099d7284b884f4b2aea4c74f2f83e3"},
{file = "tiktoken-0.6.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:284aebcccffe1bba0d6571651317df6a5b376ff6cfed5aeb800c55df44c78177"},
{file = "tiktoken-0.6.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0c1a3a5d33846f8cd9dd3b7897c1d45722f48625a587f8e6f3d3e85080559be8"},
{file = "tiktoken-0.6.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6318b2bb2337f38ee954fd5efa82632c6e5ced1d52a671370fa4b2eff1355e91"},
{file = "tiktoken-0.6.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:1f5f0f2ed67ba16373f9a6013b68da298096b27cd4e1cf276d2d3868b5c7efd1"},
{file = "tiktoken-0.6.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:75af4c0b16609c2ad02581f3cdcd1fb698c7565091370bf6c0cf8624ffaba6dc"},
{file = "tiktoken-0.6.0-cp312-cp312-win_amd64.whl", hash = "sha256:45577faf9a9d383b8fd683e313cf6df88b6076c034f0a16da243bb1c139340c3"},
{file = "tiktoken-0.6.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:7c1492ab90c21ca4d11cef3a236ee31a3e279bb21b3fc5b0e2210588c4209e68"},
{file = "tiktoken-0.6.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:e2b380c5b7751272015400b26144a2bab4066ebb8daae9c3cd2a92c3b508fe5a"},
{file = "tiktoken-0.6.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c9f497598b9f58c99cbc0eb764b4a92272c14d5203fc713dd650b896a03a50ad"},
{file = "tiktoken-0.6.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e65e8bd6f3f279d80f1e1fbd5f588f036b9a5fa27690b7f0cc07021f1dfa0839"},
{file = "tiktoken-0.6.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:5f1495450a54e564d236769d25bfefbf77727e232d7a8a378f97acddee08c1ae"},
{file = "tiktoken-0.6.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:6c4e4857d99f6fb4670e928250835b21b68c59250520a1941618b5b4194e20c3"},
{file = "tiktoken-0.6.0-cp38-cp38-win_amd64.whl", hash = "sha256:168d718f07a39b013032741867e789971346df8e89983fe3c0ef3fbd5a0b1cb9"},
{file = "tiktoken-0.6.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:47fdcfe11bd55376785a6aea8ad1db967db7f66ea81aed5c43fad497521819a4"},
{file = "tiktoken-0.6.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:fb7d2ccbf1a7784810aff6b80b4012fb42c6fc37eaa68cb3b553801a5cc2d1fc"},
{file = "tiktoken-0.6.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1ccb7a111ee76af5d876a729a347f8747d5ad548e1487eeea90eaf58894b3138"},
{file = "tiktoken-0.6.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b2048e1086b48e3c8c6e2ceeac866561374cd57a84622fa49a6b245ffecb7744"},
{file = "tiktoken-0.6.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:07f229a5eb250b6403a61200199cecf0aac4aa23c3ecc1c11c1ca002cbb8f159"},
{file = "tiktoken-0.6.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:432aa3be8436177b0db5a2b3e7cc28fd6c693f783b2f8722539ba16a867d0c6a"},
{file = "tiktoken-0.6.0-cp39-cp39-win_amd64.whl", hash = "sha256:8bfe8a19c8b5c40d121ee7938cd9c6a278e5b97dc035fd61714b4f0399d2f7a1"},
{file = "tiktoken-0.6.0.tar.gz", hash = "sha256:ace62a4ede83c75b0374a2ddfa4b76903cf483e9cb06247f566be3bf14e6beed"},
{file = "tiktoken-0.7.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:485f3cc6aba7c6b6ce388ba634fbba656d9ee27f766216f45146beb4ac18b25f"},
{file = "tiktoken-0.7.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e54be9a2cd2f6d6ffa3517b064983fb695c9a9d8aa7d574d1ef3c3f931a99225"},
{file = "tiktoken-0.7.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:79383a6e2c654c6040e5f8506f3750db9ddd71b550c724e673203b4f6b4b4590"},
{file = "tiktoken-0.7.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d4511c52caacf3c4981d1ae2df85908bd31853f33d30b345c8b6830763f769c"},
{file = "tiktoken-0.7.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:13c94efacdd3de9aff824a788353aa5749c0faee1fbe3816df365ea450b82311"},
{file = "tiktoken-0.7.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:8e58c7eb29d2ab35a7a8929cbeea60216a4ccdf42efa8974d8e176d50c9a3df5"},
{file = "tiktoken-0.7.0-cp310-cp310-win_amd64.whl", hash = "sha256:21a20c3bd1dd3e55b91c1331bf25f4af522c525e771691adbc9a69336fa7f702"},
{file = "tiktoken-0.7.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:10c7674f81e6e350fcbed7c09a65bca9356eaab27fb2dac65a1e440f2bcfe30f"},
{file = "tiktoken-0.7.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:084cec29713bc9d4189a937f8a35dbdfa785bd1235a34c1124fe2323821ee93f"},
{file = "tiktoken-0.7.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:811229fde1652fedcca7c6dfe76724d0908775b353556d8a71ed74d866f73f7b"},
{file = "tiktoken-0.7.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:86b6e7dc2e7ad1b3757e8a24597415bafcfb454cebf9a33a01f2e6ba2e663992"},
{file = "tiktoken-0.7.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:1063c5748be36344c7e18c7913c53e2cca116764c2080177e57d62c7ad4576d1"},
{file = "tiktoken-0.7.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:20295d21419bfcca092644f7e2f2138ff947a6eb8cfc732c09cc7d76988d4a89"},
{file = "tiktoken-0.7.0-cp311-cp311-win_amd64.whl", hash = "sha256:959d993749b083acc57a317cbc643fb85c014d055b2119b739487288f4e5d1cb"},
{file = "tiktoken-0.7.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:71c55d066388c55a9c00f61d2c456a6086673ab7dec22dd739c23f77195b1908"},
{file = "tiktoken-0.7.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:09ed925bccaa8043e34c519fbb2f99110bd07c6fd67714793c21ac298e449410"},
{file = "tiktoken-0.7.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:03c6c40ff1db0f48a7b4d2dafeae73a5607aacb472fa11f125e7baf9dce73704"},
{file = "tiktoken-0.7.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d20b5c6af30e621b4aca094ee61777a44118f52d886dbe4f02b70dfe05c15350"},
{file = "tiktoken-0.7.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d427614c3e074004efa2f2411e16c826f9df427d3c70a54725cae860f09e4bf4"},
{file = "tiktoken-0.7.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:8c46d7af7b8c6987fac9b9f61041b452afe92eb087d29c9ce54951280f899a97"},
{file = "tiktoken-0.7.0-cp312-cp312-win_amd64.whl", hash = "sha256:0bc603c30b9e371e7c4c7935aba02af5994a909fc3c0fe66e7004070858d3f8f"},
{file = "tiktoken-0.7.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2398fecd38c921bcd68418675a6d155fad5f5e14c2e92fcf5fe566fa5485a858"},
{file = "tiktoken-0.7.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:8f5f6afb52fb8a7ea1c811e435e4188f2bef81b5e0f7a8635cc79b0eef0193d6"},
{file = "tiktoken-0.7.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:861f9ee616766d736be4147abac500732b505bf7013cfaf019b85892637f235e"},
{file = "tiktoken-0.7.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:54031f95c6939f6b78122c0aa03a93273a96365103793a22e1793ee86da31685"},
{file = "tiktoken-0.7.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:fffdcb319b614cf14f04d02a52e26b1d1ae14a570f90e9b55461a72672f7b13d"},
{file = "tiktoken-0.7.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:c72baaeaefa03ff9ba9688624143c858d1f6b755bb85d456d59e529e17234769"},
{file = "tiktoken-0.7.0-cp38-cp38-win_amd64.whl", hash = "sha256:131b8aeb043a8f112aad9f46011dced25d62629091e51d9dc1adbf4a1cc6aa98"},
{file = "tiktoken-0.7.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:cabc6dc77460df44ec5b879e68692c63551ae4fae7460dd4ff17181df75f1db7"},
{file = "tiktoken-0.7.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:8d57f29171255f74c0aeacd0651e29aa47dff6f070cb9f35ebc14c82278f3b25"},
{file = "tiktoken-0.7.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2ee92776fdbb3efa02a83f968c19d4997a55c8e9ce7be821ceee04a1d1ee149c"},
{file = "tiktoken-0.7.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e215292e99cb41fbc96988ef62ea63bb0ce1e15f2c147a61acc319f8b4cbe5bf"},
{file = "tiktoken-0.7.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:8a81bac94769cab437dd3ab0b8a4bc4e0f9cf6835bcaa88de71f39af1791727a"},
{file = "tiktoken-0.7.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:d6d73ea93e91d5ca771256dfc9d1d29f5a554b83821a1dc0891987636e0ae226"},
{file = "tiktoken-0.7.0-cp39-cp39-win_amd64.whl", hash = "sha256:2bcb28ddf79ffa424f171dfeef9a4daff61a94c631ca6813f43967cb263b83b9"},
{file = "tiktoken-0.7.0.tar.gz", hash = "sha256:1077266e949c24e0291f6c350433c6f0971365ece2b173a23bc3b9f9defef6b6"},
]
[package.dependencies]
@ -5558,4 +5558,4 @@ extended-testing = ["faker", "jinja2", "pandas", "presidio-analyzer", "presidio-
[metadata]
lock-version = "2.0"
python-versions = ">=3.8.1,<4.0"
content-hash = "adb7f5ac26790812320bca6824385bb05b0c62a9e4d837d4773cef4dc4c3fdc6"
content-hash = "35fdc3fbe5d26e1fda88f9c68bea639c4bfebf61bf44c96033d557cb1f9b8279"

@ -1,6 +1,6 @@
[tool.poetry]
name = "langchain-experimental"
version = "0.2.0rc1"
version = "0.0.59"
description = "Building applications with LLMs through composability"
authors = []
license = "MIT"
@ -10,8 +10,8 @@ repository = "https://github.com/langchain-ai/langchain"
[tool.poetry.dependencies]
python = ">=3.8.1,<4.0"
langchain-core = ">=0.1.52,<0.3"
langchain-community = ">=0.0.38rc1,<0.3"
langchain-core = "^0.2"
langchain-community = "^0.2"
presidio-anonymizer = {version = "^2.2.352", optional = true}
presidio-analyzer = {version = "^2.2.352", optional = true}
faker = {version = "^19.3.1", optional = true}

@ -3469,7 +3469,7 @@ files = [
[[package]]
name = "langchain-core"
version = "0.2.0rc1"
version = "0.2.0"
description = "Building applications with LLMs through composability"
optional = false
python-versions = ">=3.8.1,<4.0"
@ -3493,7 +3493,7 @@ url = "../core"
[[package]]
name = "langchain-openai"
version = "0.1.6"
version = "0.1.7"
description = "An integration package connecting OpenAI and LangChain"
optional = true
python-versions = ">=3.8.1,<4.0"
@ -3503,7 +3503,7 @@ develop = true
[package.dependencies]
langchain-core = ">=0.1.46,<0.3"
openai = "^1.24.0"
tiktoken = ">=0.5.2,<1"
tiktoken = ">=0.7,<1"
[package.source]
type = "directory"
@ -3511,7 +3511,7 @@ url = "../partners/openai"
[[package]]
name = "langchain-text-splitters"
version = "0.0.1"
version = "0.2.0"
description = "LangChain text splitting utilities"
optional = false
python-versions = ">=3.8.1,<4.0"
@ -3519,7 +3519,7 @@ files = []
develop = true
[package.dependencies]
langchain-core = ">=0.1.28,<0.3"
langchain-core = "^0.2.0"
[package.extras]
extended-testing = ["beautifulsoup4 (>=4.12.3,<5.0.0)", "lxml (>=4.9.3,<6.0)"]
@ -8040,47 +8040,47 @@ files = [
[[package]]
name = "tiktoken"
version = "0.5.2"
version = "0.7.0"
description = "tiktoken is a fast BPE tokeniser for use with OpenAI's models"
optional = false
python-versions = ">=3.8"
files = [
{file = "tiktoken-0.5.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:8c4e654282ef05ec1bd06ead22141a9a1687991cef2c6a81bdd1284301abc71d"},
{file = "tiktoken-0.5.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:7b3134aa24319f42c27718c6967f3c1916a38a715a0fa73d33717ba121231307"},
{file = "tiktoken-0.5.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6092e6e77730929c8c6a51bb0d7cfdf1b72b63c4d033d6258d1f2ee81052e9e5"},
{file = "tiktoken-0.5.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:72ad8ae2a747622efae75837abba59be6c15a8f31b4ac3c6156bc56ec7a8e631"},
{file = "tiktoken-0.5.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:51cba7c8711afa0b885445f0637f0fcc366740798c40b981f08c5f984e02c9d1"},
{file = "tiktoken-0.5.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:3d8c7d2c9313f8e92e987d585ee2ba0f7c40a0de84f4805b093b634f792124f5"},
{file = "tiktoken-0.5.2-cp310-cp310-win_amd64.whl", hash = "sha256:692eca18c5fd8d1e0dde767f895c17686faaa102f37640e884eecb6854e7cca7"},
{file = "tiktoken-0.5.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:138d173abbf1ec75863ad68ca289d4da30caa3245f3c8d4bfb274c4d629a2f77"},
{file = "tiktoken-0.5.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7388fdd684690973fdc450b47dfd24d7f0cbe658f58a576169baef5ae4658607"},
{file = "tiktoken-0.5.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a114391790113bcff670c70c24e166a841f7ea8f47ee2fe0e71e08b49d0bf2d4"},
{file = "tiktoken-0.5.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ca96f001e69f6859dd52926d950cfcc610480e920e576183497ab954e645e6ac"},
{file = "tiktoken-0.5.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:15fed1dd88e30dfadcdd8e53a8927f04e1f6f81ad08a5ca824858a593ab476c7"},
{file = "tiktoken-0.5.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:93f8e692db5756f7ea8cb0cfca34638316dcf0841fb8469de8ed7f6a015ba0b0"},
{file = "tiktoken-0.5.2-cp311-cp311-win_amd64.whl", hash = "sha256:bcae1c4c92df2ffc4fe9f475bf8148dbb0ee2404743168bbeb9dcc4b79dc1fdd"},
{file = "tiktoken-0.5.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b76a1e17d4eb4357d00f0622d9a48ffbb23401dcf36f9716d9bd9c8e79d421aa"},
{file = "tiktoken-0.5.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:01d8b171bb5df4035580bc26d4f5339a6fd58d06f069091899d4a798ea279d3e"},
{file = "tiktoken-0.5.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42adf7d4fb1ed8de6e0ff2e794a6a15005f056a0d83d22d1d6755a39bffd9e7f"},
{file = "tiktoken-0.5.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4c3f894dbe0adb44609f3d532b8ea10820d61fdcb288b325a458dfc60fefb7db"},
{file = "tiktoken-0.5.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:58ccfddb4e62f0df974e8f7e34a667981d9bb553a811256e617731bf1d007d19"},
{file = "tiktoken-0.5.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:58902a8bad2de4268c2a701f1c844d22bfa3cbcc485b10e8e3e28a050179330b"},
{file = "tiktoken-0.5.2-cp312-cp312-win_amd64.whl", hash = "sha256:5e39257826d0647fcac403d8fa0a474b30d02ec8ffc012cfaf13083e9b5e82c5"},
{file = "tiktoken-0.5.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:8bde3b0fbf09a23072d39c1ede0e0821f759b4fa254a5f00078909158e90ae1f"},
{file = "tiktoken-0.5.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:2ddee082dcf1231ccf3a591d234935e6acf3e82ee28521fe99af9630bc8d2a60"},
{file = "tiktoken-0.5.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:35c057a6a4e777b5966a7540481a75a31429fc1cb4c9da87b71c8b75b5143037"},
{file = "tiktoken-0.5.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4c4a049b87e28f1dc60509f8eb7790bc8d11f9a70d99b9dd18dfdd81a084ffe6"},
{file = "tiktoken-0.5.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:5bf5ce759089f4f6521ea6ed89d8f988f7b396e9f4afb503b945f5c949c6bec2"},
{file = "tiktoken-0.5.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:0c964f554af1a96884e01188f480dad3fc224c4bbcf7af75d4b74c4b74ae0125"},
{file = "tiktoken-0.5.2-cp38-cp38-win_amd64.whl", hash = "sha256:368dd5726d2e8788e47ea04f32e20f72a2012a8a67af5b0b003d1e059f1d30a3"},
{file = "tiktoken-0.5.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a2deef9115b8cd55536c0a02c0203512f8deb2447f41585e6d929a0b878a0dd2"},
{file = "tiktoken-0.5.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2ed7d380195affbf886e2f8b92b14edfe13f4768ff5fc8de315adba5b773815e"},
{file = "tiktoken-0.5.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c76fce01309c8140ffe15eb34ded2bb94789614b7d1d09e206838fc173776a18"},
{file = "tiktoken-0.5.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:60a5654d6a2e2d152637dd9a880b4482267dfc8a86ccf3ab1cec31a8c76bfae8"},
{file = "tiktoken-0.5.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:41d4d3228e051b779245a8ddd21d4336f8975563e92375662f42d05a19bdff41"},
{file = "tiktoken-0.5.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a5c1cdec2c92fcde8c17a50814b525ae6a88e8e5b02030dc120b76e11db93f13"},
{file = "tiktoken-0.5.2-cp39-cp39-win_amd64.whl", hash = "sha256:84ddb36faedb448a50b246e13d1b6ee3437f60b7169b723a4b2abad75e914f3e"},
{file = "tiktoken-0.5.2.tar.gz", hash = "sha256:f54c581f134a8ea96ce2023ab221d4d4d81ab614efa0b2fbce926387deb56c80"},
{file = "tiktoken-0.7.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:485f3cc6aba7c6b6ce388ba634fbba656d9ee27f766216f45146beb4ac18b25f"},
{file = "tiktoken-0.7.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e54be9a2cd2f6d6ffa3517b064983fb695c9a9d8aa7d574d1ef3c3f931a99225"},
{file = "tiktoken-0.7.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:79383a6e2c654c6040e5f8506f3750db9ddd71b550c724e673203b4f6b4b4590"},
{file = "tiktoken-0.7.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d4511c52caacf3c4981d1ae2df85908bd31853f33d30b345c8b6830763f769c"},
{file = "tiktoken-0.7.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:13c94efacdd3de9aff824a788353aa5749c0faee1fbe3816df365ea450b82311"},
{file = "tiktoken-0.7.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:8e58c7eb29d2ab35a7a8929cbeea60216a4ccdf42efa8974d8e176d50c9a3df5"},
{file = "tiktoken-0.7.0-cp310-cp310-win_amd64.whl", hash = "sha256:21a20c3bd1dd3e55b91c1331bf25f4af522c525e771691adbc9a69336fa7f702"},
{file = "tiktoken-0.7.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:10c7674f81e6e350fcbed7c09a65bca9356eaab27fb2dac65a1e440f2bcfe30f"},
{file = "tiktoken-0.7.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:084cec29713bc9d4189a937f8a35dbdfa785bd1235a34c1124fe2323821ee93f"},
{file = "tiktoken-0.7.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:811229fde1652fedcca7c6dfe76724d0908775b353556d8a71ed74d866f73f7b"},
{file = "tiktoken-0.7.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:86b6e7dc2e7ad1b3757e8a24597415bafcfb454cebf9a33a01f2e6ba2e663992"},
{file = "tiktoken-0.7.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:1063c5748be36344c7e18c7913c53e2cca116764c2080177e57d62c7ad4576d1"},
{file = "tiktoken-0.7.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:20295d21419bfcca092644f7e2f2138ff947a6eb8cfc732c09cc7d76988d4a89"},
{file = "tiktoken-0.7.0-cp311-cp311-win_amd64.whl", hash = "sha256:959d993749b083acc57a317cbc643fb85c014d055b2119b739487288f4e5d1cb"},
{file = "tiktoken-0.7.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:71c55d066388c55a9c00f61d2c456a6086673ab7dec22dd739c23f77195b1908"},
{file = "tiktoken-0.7.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:09ed925bccaa8043e34c519fbb2f99110bd07c6fd67714793c21ac298e449410"},
{file = "tiktoken-0.7.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:03c6c40ff1db0f48a7b4d2dafeae73a5607aacb472fa11f125e7baf9dce73704"},
{file = "tiktoken-0.7.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d20b5c6af30e621b4aca094ee61777a44118f52d886dbe4f02b70dfe05c15350"},
{file = "tiktoken-0.7.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d427614c3e074004efa2f2411e16c826f9df427d3c70a54725cae860f09e4bf4"},
{file = "tiktoken-0.7.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:8c46d7af7b8c6987fac9b9f61041b452afe92eb087d29c9ce54951280f899a97"},
{file = "tiktoken-0.7.0-cp312-cp312-win_amd64.whl", hash = "sha256:0bc603c30b9e371e7c4c7935aba02af5994a909fc3c0fe66e7004070858d3f8f"},
{file = "tiktoken-0.7.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2398fecd38c921bcd68418675a6d155fad5f5e14c2e92fcf5fe566fa5485a858"},
{file = "tiktoken-0.7.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:8f5f6afb52fb8a7ea1c811e435e4188f2bef81b5e0f7a8635cc79b0eef0193d6"},
{file = "tiktoken-0.7.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:861f9ee616766d736be4147abac500732b505bf7013cfaf019b85892637f235e"},
{file = "tiktoken-0.7.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:54031f95c6939f6b78122c0aa03a93273a96365103793a22e1793ee86da31685"},
{file = "tiktoken-0.7.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:fffdcb319b614cf14f04d02a52e26b1d1ae14a570f90e9b55461a72672f7b13d"},
{file = "tiktoken-0.7.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:c72baaeaefa03ff9ba9688624143c858d1f6b755bb85d456d59e529e17234769"},
{file = "tiktoken-0.7.0-cp38-cp38-win_amd64.whl", hash = "sha256:131b8aeb043a8f112aad9f46011dced25d62629091e51d9dc1adbf4a1cc6aa98"},
{file = "tiktoken-0.7.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:cabc6dc77460df44ec5b879e68692c63551ae4fae7460dd4ff17181df75f1db7"},
{file = "tiktoken-0.7.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:8d57f29171255f74c0aeacd0651e29aa47dff6f070cb9f35ebc14c82278f3b25"},
{file = "tiktoken-0.7.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2ee92776fdbb3efa02a83f968c19d4997a55c8e9ce7be821ceee04a1d1ee149c"},
{file = "tiktoken-0.7.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e215292e99cb41fbc96988ef62ea63bb0ce1e15f2c147a61acc319f8b4cbe5bf"},
{file = "tiktoken-0.7.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:8a81bac94769cab437dd3ab0b8a4bc4e0f9cf6835bcaa88de71f39af1791727a"},
{file = "tiktoken-0.7.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:d6d73ea93e91d5ca771256dfc9d1d29f5a554b83821a1dc0891987636e0ae226"},
{file = "tiktoken-0.7.0-cp39-cp39-win_amd64.whl", hash = "sha256:2bcb28ddf79ffa424f171dfeef9a4daff61a94c631ca6813f43967cb263b83b9"},
{file = "tiktoken-0.7.0.tar.gz", hash = "sha256:1077266e949c24e0291f6c350433c6f0971365ece2b173a23bc3b9f9defef6b6"},
]
[package.dependencies]
@ -9378,4 +9378,4 @@ text-helpers = ["chardet"]
[metadata]
lock-version = "2.0"
python-versions = ">=3.8.1,<4.0"
content-hash = "b4c5e3b49f33d39457850835ae6cf7095067f7851ff8506a220b81f5d16340b9"
content-hash = "83762b3ce33babfb666f7f21e95a8c98397b38bbc4f4f93a1d4d3ae3be6cbd7b"

@ -1,6 +1,6 @@
[tool.poetry]
name = "langchain"
version = "0.2.0rc2"
version = "0.2.0"
description = "Building applications with LLMs through composability"
authors = []
license = "MIT"
@ -12,8 +12,8 @@ langchain-server = "langchain.server:main"
[tool.poetry.dependencies]
python = ">=3.8.1,<4.0"
langchain-core = ">=0.1.52,<0.3"
langchain-text-splitters = ">=0.0.1,<0.1"
langchain-core = "^0.2.0"
langchain-text-splitters = "^0.2.0"
langsmith = "^0.1.17"
pydantic = ">=1,<3"
SQLAlchemy = ">=1.4,<3"
@ -31,7 +31,7 @@ transformers = {version = "^4", optional = true}
beautifulsoup4 = {version = "^4", optional = true}
torch = {version = ">=1,<3", optional = true}
jinja2 = {version = "^3", optional = true}
tiktoken = {version = ">=0.3.2,<0.6.0", optional = true, python=">=3.9"}
tiktoken = {version = ">=0.7,<1.0", optional = true, python=">=3.9"}
qdrant-client = {version = "^1.3.1", optional = true, python = ">=3.8.1,<3.12"}
dataclasses-json = ">= 0.5.7, < 0.7"
cohere = {version = ">=4,<6", optional = true}
@ -166,7 +166,7 @@ wrapt = "^1.15.0"
openai = "^1"
python-dotenv = "^1.0.0"
cassio = "^0.1.0"
tiktoken = ">=0.3.2,<0.6.0"
tiktoken = ">=0.7,<1"
anthropic = "^0.3.11"
langchain-core = {path = "../core", develop = true}
langchain-text-splitters = {path = "../text-splitters", develop = true}

@ -21,6 +21,17 @@ class TestAI21J2(ChatModelUnitTests):
"api_key": "test_api_key",
}
@pytest.mark.xfail(reason="Not implemented.")
def test_standard_params(
self,
chat_model_class: Type[BaseChatModel],
chat_model_params: dict,
) -> None:
super().test_standard_params(
chat_model_class,
chat_model_params,
)
class TestAI21Jamba(ChatModelUnitTests):
@pytest.fixture
@ -33,3 +44,14 @@ class TestAI21Jamba(ChatModelUnitTests):
"model": "jamba-instruct",
"api_key": "test_api_key",
}
@pytest.mark.xfail(reason="Not implemented.")
def test_standard_params(
self,
chat_model_class: Type[BaseChatModel],
chat_model_params: dict,
) -> None:
super().test_standard_params(
chat_model_class,
chat_model_params,
)

@ -30,6 +30,7 @@ from langchain_core.callbacks import (
from langchain_core.language_models import LanguageModelInput
from langchain_core.language_models.chat_models import (
BaseChatModel,
LangSmithParams,
agenerate_from_stream,
generate_from_stream,
)
@ -326,6 +327,23 @@ class ChatAnthropic(BaseChatModel):
"default_request_timeout": self.default_request_timeout,
}
def _get_ls_params(
self, stop: Optional[List[str]] = None, **kwargs: Any
) -> LangSmithParams:
"""Get the parameters used to invoke the model."""
params = self._get_invocation_params(stop=stop, **kwargs)
ls_params = LangSmithParams(
ls_provider="anthropic",
ls_model_name=self.model,
ls_model_type="chat",
ls_temperature=params.get("temperature", self.temperature),
)
if ls_max_tokens := params.get("max_tokens", self.max_tokens):
ls_params["ls_max_tokens"] = ls_max_tokens
if ls_stop := stop or params.get("stop", None):
ls_params["ls_stop"] = ls_stop
return ls_params
@root_validator(pre=True)
def build_extra(cls, values: Dict) -> Dict:
extra = values.get("model_kwargs", {})

@ -19,3 +19,14 @@ class TestFireworksStandard(ChatModelUnitTests):
return {
"api_key": "test_api_key",
}
@pytest.mark.xfail(reason="Not implemented.")
def test_standard_params(
self,
chat_model_class: Type[BaseChatModel],
chat_model_params: dict,
) -> None:
super().test_standard_params(
chat_model_class,
chat_model_params,
)

@ -13,3 +13,14 @@ class TestGroqStandard(ChatModelUnitTests):
@pytest.fixture
def chat_model_class(self) -> Type[BaseChatModel]:
return ChatGroq
@pytest.mark.xfail(reason="Not implemented.")
def test_standard_params(
self,
chat_model_class: Type[BaseChatModel],
chat_model_params: dict,
) -> None:
super().test_standard_params(
chat_model_class,
chat_model_params,
)

@ -13,3 +13,14 @@ class TestMistralStandard(ChatModelUnitTests):
@pytest.fixture
def chat_model_class(self) -> Type[BaseChatModel]:
return ChatMistralAI
@pytest.mark.xfail(reason="Not implemented.")
def test_standard_params(
self,
chat_model_class: Type[BaseChatModel],
chat_model_params: dict,
) -> None:
super().test_standard_params(
chat_model_class,
chat_model_params,
)

@ -6,6 +6,7 @@ import os
from typing import Any, Callable, Dict, List, Optional, Union
import openai
from langchain_core.language_models.chat_models import LangSmithParams
from langchain_core.outputs import ChatResult
from langchain_core.pydantic_v1 import Field, SecretStr, root_validator
from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env
@ -228,6 +229,16 @@ class AzureChatOpenAI(BaseChatOpenAI):
"openai_api_version": self.openai_api_version,
}
def _get_ls_params(
self, stop: Optional[List[str]] = None, **kwargs: Any
) -> LangSmithParams:
"""Get the parameters used to invoke the model."""
params = super()._get_ls_params(stop=stop, **kwargs)
params["ls_provider"] = "azure"
if self.deployment_name:
params["ls_model_name"] = self.deployment_name
return params
def _create_chat_result(
self, response: Union[dict, openai.BaseModel]
) -> ChatResult:

@ -36,6 +36,7 @@ from langchain_core.callbacks import (
from langchain_core.language_models import LanguageModelInput
from langchain_core.language_models.chat_models import (
BaseChatModel,
LangSmithParams,
agenerate_from_stream,
generate_from_stream,
)
@ -639,6 +640,23 @@ class BaseChatOpenAI(BaseChatModel):
**kwargs,
}
def _get_ls_params(
self, stop: Optional[List[str]] = None, **kwargs: Any
) -> LangSmithParams:
"""Get standard params for tracing."""
params = self._get_invocation_params(stop=stop, **kwargs)
ls_params = LangSmithParams(
ls_provider="openai",
ls_model_name=self.model_name,
ls_model_type="chat",
ls_temperature=params.get("temperature", self.temperature),
)
if ls_max_tokens := params.get("max_tokens", self.max_tokens):
ls_params["ls_max_tokens"] = ls_max_tokens
if ls_stop := stop or params.get("stop", None):
ls_params["ls_stop"] = ls_stop
return ls_params
@property
def _llm_type(self) -> str:
"""Return type of chat model."""

@ -28,3 +28,7 @@ def test_initialize_more() -> None:
assert llm.deployment_name == "35-turbo-dev"
assert llm.openai_api_version == "2023-05-15"
assert llm.temperature == 0
ls_params = llm._get_ls_params()
assert ls_params["ls_provider"] == "azure"
assert ls_params["ls_model_name"] == "35-turbo-dev"

@ -103,6 +103,8 @@ class ActionServerToolkit(BaseModel):
"""Action Server URL"""
api_key: str = Field(exclude=True, default="")
"""Action Server request API key"""
additional_headers: dict = Field(exclude=True, default_factory=dict)
"""Additional headers to be passed to the Action Server"""
report_trace: bool = Field(exclude=True, default=False)
"""Enable reporting Langsmith trace to Action Server runs"""
_run_details: dict = PrivateAttr({})
@ -226,6 +228,7 @@ class ActionServerToolkit(BaseModel):
headers = {
"Authorization": f"Bearer {self.api_key}",
"Content-Type": "application/json",
**self.additional_headers,
}
try:

@ -9,6 +9,7 @@ from typing import (
)
import openai
from langchain_core.language_models.chat_models import LangSmithParams
from langchain_core.pydantic_v1 import Field, SecretStr, root_validator
from langchain_core.utils import (
convert_to_secret_str,
@ -54,6 +55,14 @@ class ChatTogether(BaseChatOpenAI):
"""Return type of chat model."""
return "together-chat"
def _get_ls_params(
self, stop: Optional[List[str]] = None, **kwargs: Any
) -> LangSmithParams:
"""Get the parameters used to invoke the model."""
params = super()._get_ls_params(stop=stop, **kwargs)
params["ls_provider"] = "together"
return params
model_name: str = Field(default="meta-llama/Llama-3-8b-chat-hf", alias="model")
"""Model name to use."""
together_api_key: Optional[SecretStr] = Field(default=None, alias="api_key")

@ -28,6 +28,8 @@ def test_together_model_param() -> None:
assert llm.model_name == "foo"
llm = ChatTogether(model_name="foo")
assert llm.model_name == "foo"
ls_params = llm._get_ls_params()
assert ls_params["ls_provider"] == "together"
def test_function_dict_to_message_function_message() -> None:

@ -7,6 +7,7 @@ from typing import (
)
import openai
from langchain_core.language_models.chat_models import LangSmithParams
from langchain_core.pydantic_v1 import Field, SecretStr, root_validator
from langchain_core.utils import (
convert_to_secret_str,
@ -52,6 +53,14 @@ class ChatUpstage(BaseChatOpenAI):
"""Return type of chat model."""
return "upstage-chat"
def _get_ls_params(
self, stop: Optional[List[str]] = None, **kwargs: Any
) -> LangSmithParams:
"""Get the parameters used to invoke the model."""
params = super()._get_ls_params(stop=stop, **kwargs)
params["ls_provider"] = "upstage"
return params
model_name: str = Field(default="solar-1-mini-chat", alias="model")
"""Model name to use."""
upstage_api_key: Optional[SecretStr] = Field(default=None, alias="api_key")

@ -28,6 +28,8 @@ def test_upstage_model_param() -> None:
assert llm.model_name == "foo"
llm = ChatUpstage(model_name="foo")
assert llm.model_name == "foo"
ls_params = llm._get_ls_params()
assert ls_params["ls_provider"] == "upstage"
def test_function_dict_to_message_function_message() -> None:

@ -45,7 +45,6 @@ class VoyageAIEmbeddings(BaseModel, Embeddings):
model = values.get("model")
batch_size = values.get("batch_size")
if batch_size is None:
print("batch size", batch_size)
values["batch_size"] = 72 if model in ["voyage-2", "voyage-02"] else 7
return values

@ -1,9 +1,9 @@
from abc import ABC, abstractmethod
from typing import Type
from typing import List, Literal, Optional, Type
import pytest
from langchain_core.language_models import BaseChatModel
from langchain_core.pydantic_v1 import BaseModel, Field
from langchain_core.pydantic_v1 import BaseModel, Field, ValidationError
from langchain_core.tools import tool
@ -89,3 +89,29 @@ class ChatModelUnitTests(ABC):
model = chat_model_class(**chat_model_params)
assert model is not None
assert model.with_structured_output(Person) is not None
def test_standard_params(
self, chat_model_class: Type[BaseChatModel], chat_model_params: dict
) -> None:
class ExpectedParams(BaseModel):
ls_provider: str
ls_model_name: str
ls_model_type: Literal["chat"]
ls_temperature: Optional[float]
ls_max_tokens: Optional[int]
ls_stop: Optional[List[str]]
model = chat_model_class(**chat_model_params)
ls_params = model._get_ls_params()
try:
ExpectedParams(**ls_params)
except ValidationError as e:
pytest.fail(f"Validation error: {e}")
# Test optional params
model = chat_model_class(max_tokens=10, stop=["test"], **chat_model_params)
ls_params = model._get_ls_params()
try:
ExpectedParams(**ls_params)
except ValidationError as e:
pytest.fail(f"Validation error: {e}")

@ -1249,13 +1249,13 @@ test = ["jupyter-server (>=2.0.0)", "pytest (>=7.0)", "pytest-jupyter[server] (>
[[package]]
name = "jupyterlab"
version = "4.1.8"
version = "4.2.0"
description = "JupyterLab computational environment"
optional = false
python-versions = ">=3.8"
files = [
{file = "jupyterlab-4.1.8-py3-none-any.whl", hash = "sha256:c3baf3a2f91f89d110ed5786cd18672b9a357129d4e389d2a0dead15e11a4d2c"},
{file = "jupyterlab-4.1.8.tar.gz", hash = "sha256:3384aded8680e7ce504fd63b8bb89a39df21c9c7694d9e7dc4a68742cdb30f9b"},
{file = "jupyterlab-4.2.0-py3-none-any.whl", hash = "sha256:0dfe9278e25a145362289c555d9beb505697d269c10e99909766af7c440ad3cc"},
{file = "jupyterlab-4.2.0.tar.gz", hash = "sha256:356e9205a6a2ab689c47c8fe4919dba6c076e376d03f26baadc05748c2435dd5"},
]
[package.dependencies]
@ -1276,11 +1276,11 @@ tornado = ">=6.2.0"
traitlets = "*"
[package.extras]
dev = ["build", "bump2version", "coverage", "hatch", "pre-commit", "pytest-cov", "ruff (==0.2.0)"]
dev = ["build", "bump2version", "coverage", "hatch", "pre-commit", "pytest-cov", "ruff (==0.3.5)"]
docs = ["jsx-lexer", "myst-parser", "pydata-sphinx-theme (>=0.13.0)", "pytest", "pytest-check-links", "pytest-jupyter", "sphinx (>=1.8,<7.3.0)", "sphinx-copybutton"]
docs-screenshots = ["altair (==5.2.0)", "ipython (==8.16.1)", "ipywidgets (==8.1.1)", "jupyterlab-geojson (==3.4.0)", "jupyterlab-language-pack-zh-cn (==4.0.post6)", "matplotlib (==3.8.2)", "nbconvert (>=7.0.0)", "pandas (==2.2.0)", "scipy (==1.12.0)", "vega-datasets (==0.9.0)"]
docs-screenshots = ["altair (==5.3.0)", "ipython (==8.16.1)", "ipywidgets (==8.1.2)", "jupyterlab-geojson (==3.4.0)", "jupyterlab-language-pack-zh-cn (==4.1.post2)", "matplotlib (==3.8.3)", "nbconvert (>=7.0.0)", "pandas (==2.2.1)", "scipy (==1.12.0)", "vega-datasets (==0.9.0)"]
test = ["coverage", "pytest (>=7.0)", "pytest-check-links (>=0.7)", "pytest-console-scripts", "pytest-cov", "pytest-jupyter (>=0.5.3)", "pytest-timeout", "pytest-tornasync", "requests", "requests-cache", "virtualenv"]
upgrade-extension = ["copier (>=8.0,<9.0)", "jinja2-time (<0.3)", "pydantic (<2.0)", "pyyaml-include (<2.0)", "tomli-w (<2.0)"]
upgrade-extension = ["copier (>=8,<10)", "jinja2-time (<0.3)", "pydantic (<2.0)", "pyyaml-include (<2.0)", "tomli-w (<2.0)"]
[[package]]
name = "jupyterlab-pygments"
@ -1332,7 +1332,7 @@ files = [
[[package]]
name = "langchain-core"
version = "0.2.0rc1"
version = "0.2.0"
description = "Building applications with LLMs through composability"
optional = false
python-versions = ">=3.8.1,<4.0"
@ -1374,13 +1374,13 @@ test = ["pytest", "pytest-cov"]
[[package]]
name = "langsmith"
version = "0.1.57"
version = "0.1.59"
description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform."
optional = false
python-versions = "<4.0,>=3.8.1"
files = [
{file = "langsmith-0.1.57-py3-none-any.whl", hash = "sha256:dbd83b0944a2fbea4151f0aa053530d93fcf6784a580621bc60633cb890b57dc"},
{file = "langsmith-0.1.57.tar.gz", hash = "sha256:4682204de19f0218029c2b8445ce2cc3485c8d0df9796b31e2ce4c9051fce365"},
{file = "langsmith-0.1.59-py3-none-any.whl", hash = "sha256:445e3bc1d3baa1e5340cd979907a19483b9763a2ed37b863a01113d406f69345"},
{file = "langsmith-0.1.59.tar.gz", hash = "sha256:e748a89f4dd6aa441349143e49e546c03b5dfb43376a25bfef6a5ca792fe1437"},
]
[package.dependencies]
@ -1965,26 +1965,26 @@ files = [
[[package]]
name = "notebook"
version = "7.1.3"
version = "7.2.0"
description = "Jupyter Notebook - A web-based notebook environment for interactive computing"
optional = false
python-versions = ">=3.8"
files = [
{file = "notebook-7.1.3-py3-none-any.whl", hash = "sha256:919b911e59f41f6e3857ce93c9d93535ba66bb090059712770e5968c07e1004d"},
{file = "notebook-7.1.3.tar.gz", hash = "sha256:41fcebff44cf7bb9377180808bcbae066629b55d8c7722f1ebbe75ca44f9cfc1"},
{file = "notebook-7.2.0-py3-none-any.whl", hash = "sha256:b4752d7407d6c8872fc505df0f00d3cae46e8efb033b822adacbaa3f1f3ce8f5"},
{file = "notebook-7.2.0.tar.gz", hash = "sha256:34a2ba4b08ad5d19ec930db7484fb79746a1784be9e1a5f8218f9af8656a141f"},
]
[package.dependencies]
jupyter-server = ">=2.4.0,<3"
jupyterlab = ">=4.1.1,<4.2"
jupyterlab-server = ">=2.22.1,<3"
jupyterlab = ">=4.2.0,<4.3"
jupyterlab-server = ">=2.27.1,<3"
notebook-shim = ">=0.2,<0.3"
tornado = ">=6.2.0"
[package.extras]
dev = ["hatch", "pre-commit"]
docs = ["myst-parser", "nbsphinx", "pydata-sphinx-theme", "sphinx (>=1.3.6)", "sphinxcontrib-github-alt", "sphinxcontrib-spelling"]
test = ["importlib-resources (>=5.0)", "ipykernel", "jupyter-server[test] (>=2.4.0,<3)", "jupyterlab-server[test] (>=2.22.1,<3)", "nbval", "pytest (>=7.0)", "pytest-console-scripts", "pytest-timeout", "pytest-tornasync", "requests"]
test = ["importlib-resources (>=5.0)", "ipykernel", "jupyter-server[test] (>=2.4.0,<3)", "jupyterlab-server[test] (>=2.27.1,<3)", "nbval", "pytest (>=7.0)", "pytest-console-scripts", "pytest-timeout", "pytest-tornasync", "requests"]
[[package]]
name = "notebook-shim"
@ -2226,13 +2226,13 @@ files = [
[[package]]
name = "platformdirs"
version = "4.2.1"
version = "4.2.2"
description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`."
optional = false
python-versions = ">=3.8"
files = [
{file = "platformdirs-4.2.1-py3-none-any.whl", hash = "sha256:17d5a1161b3fd67b390023cb2d3b026bbd40abde6fdb052dfbd3a29c3ba22ee1"},
{file = "platformdirs-4.2.1.tar.gz", hash = "sha256:031cd18d4ec63ec53e82dceaac0417d218a6863f7745dfcc9efe7793b7039bdf"},
{file = "platformdirs-4.2.2-py3-none-any.whl", hash = "sha256:2d7a1657e36a80ea911db832a8a6ece5ee53d8de21edd5cc5879af6530b1bfee"},
{file = "platformdirs-4.2.2.tar.gz", hash = "sha256:38b7b51f512eed9e84a22788b4bce1de17c0adb134d6becb09836e37d8654cd3"},
]
[package.extras]
@ -2901,90 +2901,90 @@ rpds-py = ">=0.7.0"
[[package]]
name = "regex"
version = "2024.5.10"
version = "2024.5.15"
description = "Alternative regular expression module, to replace re."
optional = false
python-versions = ">=3.8"
files = [
{file = "regex-2024.5.10-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:eda3dd46df535da787ffb9036b5140f941ecb91701717df91c9daf64cabef953"},
{file = "regex-2024.5.10-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1d5bd666466c8f00a06886ce1397ba8b12371c1f1c6d1bef11013e9e0a1464a8"},
{file = "regex-2024.5.10-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:32e5f3b8e32918bfbdd12eca62e49ab3031125c454b507127ad6ecbd86e62fca"},
{file = "regex-2024.5.10-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:534efd2653ebc4f26fc0e47234e53bf0cb4715bb61f98c64d2774a278b58c846"},
{file = "regex-2024.5.10-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:193b7c6834a06f722f0ce1ba685efe80881de7c3de31415513862f601097648c"},
{file = "regex-2024.5.10-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:160ba087232c5c6e2a1e7ad08bd3a3f49b58c815be0504d8c8aacfb064491cd8"},
{file = "regex-2024.5.10-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:951be1eae7b47660412dc4938777a975ebc41936d64e28081bf2e584b47ec246"},
{file = "regex-2024.5.10-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d8a0f0ab5453e409586b11ebe91c672040bc804ca98d03a656825f7890cbdf88"},
{file = "regex-2024.5.10-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:9e6d4d6ae1827b2f8c7200aaf7501c37cf3f3896c86a6aaf2566448397c823dd"},
{file = "regex-2024.5.10-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:161a206c8f3511e2f5fafc9142a2cc25d7fe9a1ec5ad9b4ad2496a7c33e1c5d2"},
{file = "regex-2024.5.10-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:44b3267cea873684af022822195298501568ed44d542f9a2d9bebc0212e99069"},
{file = "regex-2024.5.10-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:560278c9975694e1f0bc50da187abf2cdc1e4890739ea33df2bc4a85eeef143e"},
{file = "regex-2024.5.10-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:70364a097437dd0a90b31cd77f09f7387ad9ac60ef57590971f43b7fca3082a5"},
{file = "regex-2024.5.10-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:42be5de7cc8c1edac55db92d82b68dc8e683b204d6f5414c5a51997a323d7081"},
{file = "regex-2024.5.10-cp310-cp310-win32.whl", hash = "sha256:9a8625849387b9d558d528e263ecc9c0fbde86cfa5c2f0eef43fff480ae24d71"},
{file = "regex-2024.5.10-cp310-cp310-win_amd64.whl", hash = "sha256:903350bf44d7e4116b4d5898b30b15755d61dcd3161e3413a49c7db76f0bee5a"},
{file = "regex-2024.5.10-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:bf9596cba92ce7b1fd32c7b07c6e3212c7eed0edc271757e48bfcd2b54646452"},
{file = "regex-2024.5.10-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:45cc13d398b6359a7708986386f72bd156ae781c3e83a68a6d4cee5af04b1ce9"},
{file = "regex-2024.5.10-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ad45f3bccfcb00868f2871dce02a755529838d2b86163ab8a246115e80cfb7d6"},
{file = "regex-2024.5.10-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:33d19f0cde6838c81acffff25c7708e4adc7dd02896c9ec25c3939b1500a1778"},
{file = "regex-2024.5.10-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0a9f89d7db5ef6bdf53e5cc8e6199a493d0f1374b3171796b464a74ebe8e508a"},
{file = "regex-2024.5.10-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8c6c71cf92b09e5faa72ea2c68aa1f61c9ce11cb66fdc5069d712f4392ddfd00"},
{file = "regex-2024.5.10-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7467ad8b0eac0b28e52679e972b9b234b3de0ea5cee12eb50091d2b68145fe36"},
{file = "regex-2024.5.10-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bc0db93ad039fc2fe32ccd3dd0e0e70c4f3d6e37ae83f0a487e1aba939bd2fbd"},
{file = "regex-2024.5.10-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:fa9335674d7c819674467c7b46154196c51efbaf5f5715187fd366814ba3fa39"},
{file = "regex-2024.5.10-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:7dda3091838206969c2b286f9832dff41e2da545b99d1cfaea9ebd8584d02708"},
{file = "regex-2024.5.10-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:504b5116e2bd1821efd815941edff7535e93372a098e156bb9dffde30264e798"},
{file = "regex-2024.5.10-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:91b53dea84415e8115506cc62e441a2b54537359c63d856d73cb1abe05af4c9a"},
{file = "regex-2024.5.10-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:1a3903128f9e17a500618e80c68165c78c741ebb17dd1a0b44575f92c3c68b02"},
{file = "regex-2024.5.10-cp311-cp311-win32.whl", hash = "sha256:236cace6c1903effd647ed46ce6dd5d76d54985fc36dafc5256032886736c85d"},
{file = "regex-2024.5.10-cp311-cp311-win_amd64.whl", hash = "sha256:12446827f43c7881decf2c126762e11425de5eb93b3b0d8b581344c16db7047a"},
{file = "regex-2024.5.10-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:14905ed75c7a6edf423eb46c213ed3f4507c38115f1ed3c00f4ec9eafba50e58"},
{file = "regex-2024.5.10-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:4fad420b14ae1970a1f322e8ae84a1d9d89375eb71e1b504060ab2d1bfe68f3c"},
{file = "regex-2024.5.10-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c46a76a599fcbf95f98755275c5527304cc4f1bb69919434c1e15544d7052910"},
{file = "regex-2024.5.10-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0faecb6d5779753a6066a3c7a0471a8d29fe25d9981ca9e552d6d1b8f8b6a594"},
{file = "regex-2024.5.10-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aab65121229c2ecdf4a31b793d99a6a0501225bd39b616e653c87b219ed34a49"},
{file = "regex-2024.5.10-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:50e7e96a527488334379e05755b210b7da4a60fc5d6481938c1fa053e0c92184"},
{file = "regex-2024.5.10-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba034c8db4b264ef1601eb33cd23d87c5013b8fb48b8161debe2e5d3bd9156b0"},
{file = "regex-2024.5.10-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:031219782d97550c2098d9a68ce9e9eaefe67d2d81d8ff84c8354f9c009e720c"},
{file = "regex-2024.5.10-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:62b5f7910b639f3c1d122d408421317c351e213ca39c964ad4121f27916631c6"},
{file = "regex-2024.5.10-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:cd832bd9b6120d6074f39bdfbb3c80e416848b07ac72910f1c7f03131a6debc3"},
{file = "regex-2024.5.10-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:e91b1976358e17197157b405cab408a5f4e33310cda211c49fc6da7cffd0b2f0"},
{file = "regex-2024.5.10-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:571452362d552de508c37191b6abbbb660028b8b418e2d68c20779e0bc8eaaa8"},
{file = "regex-2024.5.10-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:5253dcb0bfda7214523de58b002eb0090cb530d7c55993ce5f6d17faf953ece7"},
{file = "regex-2024.5.10-cp312-cp312-win32.whl", hash = "sha256:2f30a5ab8902f93930dc6f627c4dd5da2703333287081c85cace0fc6e21c25af"},
{file = "regex-2024.5.10-cp312-cp312-win_amd64.whl", hash = "sha256:3799e36d60a35162bb35b2246d8bb012192b7437dff807ef79c14e7352706306"},
{file = "regex-2024.5.10-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:bbdc5db2c98ac2bf1971ffa1410c87ca7a15800415f788971e8ba8520fc0fda9"},
{file = "regex-2024.5.10-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6ccdeef4584450b6f0bddd5135354908dacad95425fcb629fe36d13e48b60f32"},
{file = "regex-2024.5.10-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:29d839829209f3c53f004e1de8c3113efce6d98029f044fa5cfee666253ee7e6"},
{file = "regex-2024.5.10-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0709ba544cf50bd5cb843df4b8bb6701bae2b70a8e88da9add8386cbca5c1385"},
{file = "regex-2024.5.10-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:972b49f2fe1047b9249c958ec4fa1bdd2cf8ce305dc19d27546d5a38e57732d8"},
{file = "regex-2024.5.10-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9cdbb1998da94607d5eec02566b9586f0e70d6438abf1b690261aac0edda7ab6"},
{file = "regex-2024.5.10-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf7c8ee4861d9ef5b1120abb75846828c811f932d63311596ad25fa168053e00"},
{file = "regex-2024.5.10-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7d35d4cc9270944e95f9c88af757b0c9fc43f396917e143a5756608462c5223b"},
{file = "regex-2024.5.10-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:8722f72068b3e1156a4b2e1afde6810f1fc67155a9fa30a4b9d5b4bc46f18fb0"},
{file = "regex-2024.5.10-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:696639a73ca78a380acfaa0a1f6dd8220616a99074c05bba9ba8bb916914b224"},
{file = "regex-2024.5.10-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:ea057306ab469130167014b662643cfaed84651c792948891d003cf0039223a5"},
{file = "regex-2024.5.10-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:b43b78f9386d3d932a6ce5af4b45f393d2e93693ee18dc4800d30a8909df700e"},
{file = "regex-2024.5.10-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:c43395a3b7cc9862801a65c6994678484f186ce13c929abab44fb8a9e473a55a"},
{file = "regex-2024.5.10-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:0bc94873ba11e34837bffd7e5006703abeffc4514e2f482022f46ce05bd25e67"},
{file = "regex-2024.5.10-cp38-cp38-win32.whl", hash = "sha256:1118ba9def608250250f4b3e3f48c62f4562ba16ca58ede491b6e7554bfa09ff"},
{file = "regex-2024.5.10-cp38-cp38-win_amd64.whl", hash = "sha256:458d68d34fb74b906709735c927c029e62f7d06437a98af1b5b6258025223210"},
{file = "regex-2024.5.10-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:15e593386ec6331e0ab4ac0795b7593f02ab2f4b30a698beb89fbdc34f92386a"},
{file = "regex-2024.5.10-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ca23b41355ba95929e9505ee04e55495726aa2282003ed9b012d86f857d3e49b"},
{file = "regex-2024.5.10-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2c8982ee19ccecabbaeac1ba687bfef085a6352a8c64f821ce2f43e6d76a9298"},
{file = "regex-2024.5.10-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7117cb7d6ac7f2e985f3d18aa8a1728864097da1a677ffa69e970ca215baebf1"},
{file = "regex-2024.5.10-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b66421f8878a0c82fc0c272a43e2121c8d4c67cb37429b764f0d5ad70b82993b"},
{file = "regex-2024.5.10-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:224a9269f133564109ce668213ef3cb32bc72ccf040b0b51c72a50e569e9dc9e"},
{file = "regex-2024.5.10-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ab98016541543692a37905871a5ffca59b16e08aacc3d7d10a27297b443f572d"},
{file = "regex-2024.5.10-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:51d27844763c273a122e08a3e86e7aefa54ee09fb672d96a645ece0454d8425e"},
{file = "regex-2024.5.10-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:853cc36e756ff673bf984e9044ccc8fad60b95a748915dddeab9488aea974c73"},
{file = "regex-2024.5.10-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:4e7eaf9df15423d07b6050fb91f86c66307171b95ea53e2d87a7993b6d02c7f7"},
{file = "regex-2024.5.10-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:169fd0acd7a259f58f417e492e93d0e15fc87592cd1e971c8c533ad5703b5830"},
{file = "regex-2024.5.10-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:334b79ce9c08f26b4659a53f42892793948a613c46f1b583e985fd5a6bf1c149"},
{file = "regex-2024.5.10-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:f03b1dbd4d9596dd84955bb40f7d885204d6aac0d56a919bb1e0ff2fb7e1735a"},
{file = "regex-2024.5.10-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:cfa6d61a76c77610ba9274c1a90a453062bdf6887858afbe214d18ad41cf6bde"},
{file = "regex-2024.5.10-cp39-cp39-win32.whl", hash = "sha256:249fbcee0a277c32a3ce36d8e36d50c27c968fdf969e0fbe342658d4e010fbc8"},
{file = "regex-2024.5.10-cp39-cp39-win_amd64.whl", hash = "sha256:0ce56a923f4c01d7568811bfdffe156268c0a7aae8a94c902b92fe34c4bde785"},
{file = "regex-2024.5.10.tar.gz", hash = "sha256:304e7e2418146ae4d0ef0e9ffa28f881f7874b45b4994cc2279b21b6e7ae50c8"},
{file = "regex-2024.5.15-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a81e3cfbae20378d75185171587cbf756015ccb14840702944f014e0d93ea09f"},
{file = "regex-2024.5.15-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:7b59138b219ffa8979013be7bc85bb60c6f7b7575df3d56dc1e403a438c7a3f6"},
{file = "regex-2024.5.15-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a0bd000c6e266927cb7a1bc39d55be95c4b4f65c5be53e659537537e019232b1"},
{file = "regex-2024.5.15-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5eaa7ddaf517aa095fa8da0b5015c44d03da83f5bd49c87961e3c997daed0de7"},
{file = "regex-2024.5.15-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ba68168daedb2c0bab7fd7e00ced5ba90aebf91024dea3c88ad5063c2a562cca"},
{file = "regex-2024.5.15-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6e8d717bca3a6e2064fc3a08df5cbe366369f4b052dcd21b7416e6d71620dca1"},
{file = "regex-2024.5.15-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1337b7dbef9b2f71121cdbf1e97e40de33ff114801263b275aafd75303bd62b5"},
{file = "regex-2024.5.15-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f9ebd0a36102fcad2f03696e8af4ae682793a5d30b46c647eaf280d6cfb32796"},
{file = "regex-2024.5.15-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:9efa1a32ad3a3ea112224897cdaeb6aa00381627f567179c0314f7b65d354c62"},
{file = "regex-2024.5.15-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:1595f2d10dff3d805e054ebdc41c124753631b6a471b976963c7b28543cf13b0"},
{file = "regex-2024.5.15-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:b802512f3e1f480f41ab5f2cfc0e2f761f08a1f41092d6718868082fc0d27143"},
{file = "regex-2024.5.15-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:a0981022dccabca811e8171f913de05720590c915b033b7e601f35ce4ea7019f"},
{file = "regex-2024.5.15-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:19068a6a79cf99a19ccefa44610491e9ca02c2be3305c7760d3831d38a467a6f"},
{file = "regex-2024.5.15-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:1b5269484f6126eee5e687785e83c6b60aad7663dafe842b34691157e5083e53"},
{file = "regex-2024.5.15-cp310-cp310-win32.whl", hash = "sha256:ada150c5adfa8fbcbf321c30c751dc67d2f12f15bd183ffe4ec7cde351d945b3"},
{file = "regex-2024.5.15-cp310-cp310-win_amd64.whl", hash = "sha256:ac394ff680fc46b97487941f5e6ae49a9f30ea41c6c6804832063f14b2a5a145"},
{file = "regex-2024.5.15-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:f5b1dff3ad008dccf18e652283f5e5339d70bf8ba7c98bf848ac33db10f7bc7a"},
{file = "regex-2024.5.15-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c6a2b494a76983df8e3d3feea9b9ffdd558b247e60b92f877f93a1ff43d26656"},
{file = "regex-2024.5.15-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a32b96f15c8ab2e7d27655969a23895eb799de3665fa94349f3b2fbfd547236f"},
{file = "regex-2024.5.15-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:10002e86e6068d9e1c91eae8295ef690f02f913c57db120b58fdd35a6bb1af35"},
{file = "regex-2024.5.15-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ec54d5afa89c19c6dd8541a133be51ee1017a38b412b1321ccb8d6ddbeb4cf7d"},
{file = "regex-2024.5.15-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:10e4ce0dca9ae7a66e6089bb29355d4432caed736acae36fef0fdd7879f0b0cb"},
{file = "regex-2024.5.15-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3e507ff1e74373c4d3038195fdd2af30d297b4f0950eeda6f515ae3d84a1770f"},
{file = "regex-2024.5.15-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d1f059a4d795e646e1c37665b9d06062c62d0e8cc3c511fe01315973a6542e40"},
{file = "regex-2024.5.15-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:0721931ad5fe0dda45d07f9820b90b2148ccdd8e45bb9e9b42a146cb4f695649"},
{file = "regex-2024.5.15-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:833616ddc75ad595dee848ad984d067f2f31be645d603e4d158bba656bbf516c"},
{file = "regex-2024.5.15-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:287eb7f54fc81546346207c533ad3c2c51a8d61075127d7f6d79aaf96cdee890"},
{file = "regex-2024.5.15-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:19dfb1c504781a136a80ecd1fff9f16dddf5bb43cec6871778c8a907a085bb3d"},
{file = "regex-2024.5.15-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:119af6e56dce35e8dfb5222573b50c89e5508d94d55713c75126b753f834de68"},
{file = "regex-2024.5.15-cp311-cp311-win32.whl", hash = "sha256:1c1c174d6ec38d6c8a7504087358ce9213d4332f6293a94fbf5249992ba54efa"},
{file = "regex-2024.5.15-cp311-cp311-win_amd64.whl", hash = "sha256:9e717956dcfd656f5055cc70996ee2cc82ac5149517fc8e1b60261b907740201"},
{file = "regex-2024.5.15-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:632b01153e5248c134007209b5c6348a544ce96c46005d8456de1d552455b014"},
{file = "regex-2024.5.15-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:e64198f6b856d48192bf921421fdd8ad8eb35e179086e99e99f711957ffedd6e"},
{file = "regex-2024.5.15-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:68811ab14087b2f6e0fc0c2bae9ad689ea3584cad6917fc57be6a48bbd012c49"},
{file = "regex-2024.5.15-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f8ec0c2fea1e886a19c3bee0cd19d862b3aa75dcdfb42ebe8ed30708df64687a"},
{file = "regex-2024.5.15-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d0c0c0003c10f54a591d220997dd27d953cd9ccc1a7294b40a4be5312be8797b"},
{file = "regex-2024.5.15-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2431b9e263af1953c55abbd3e2efca67ca80a3de8a0437cb58e2421f8184717a"},
{file = "regex-2024.5.15-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4a605586358893b483976cffc1723fb0f83e526e8f14c6e6614e75919d9862cf"},
{file = "regex-2024.5.15-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:391d7f7f1e409d192dba8bcd42d3e4cf9e598f3979cdaed6ab11288da88cb9f2"},
{file = "regex-2024.5.15-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:9ff11639a8d98969c863d4617595eb5425fd12f7c5ef6621a4b74b71ed8726d5"},
{file = "regex-2024.5.15-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:4eee78a04e6c67e8391edd4dad3279828dd66ac4b79570ec998e2155d2e59fd5"},
{file = "regex-2024.5.15-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:8fe45aa3f4aa57faabbc9cb46a93363edd6197cbc43523daea044e9ff2fea83e"},
{file = "regex-2024.5.15-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:d0a3d8d6acf0c78a1fff0e210d224b821081330b8524e3e2bc5a68ef6ab5803d"},
{file = "regex-2024.5.15-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:c486b4106066d502495b3025a0a7251bf37ea9540433940a23419461ab9f2a80"},
{file = "regex-2024.5.15-cp312-cp312-win32.whl", hash = "sha256:c49e15eac7c149f3670b3e27f1f28a2c1ddeccd3a2812cba953e01be2ab9b5fe"},
{file = "regex-2024.5.15-cp312-cp312-win_amd64.whl", hash = "sha256:673b5a6da4557b975c6c90198588181029c60793835ce02f497ea817ff647cb2"},
{file = "regex-2024.5.15-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:87e2a9c29e672fc65523fb47a90d429b70ef72b901b4e4b1bd42387caf0d6835"},
{file = "regex-2024.5.15-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:c3bea0ba8b73b71b37ac833a7f3fd53825924165da6a924aec78c13032f20850"},
{file = "regex-2024.5.15-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:bfc4f82cabe54f1e7f206fd3d30fda143f84a63fe7d64a81558d6e5f2e5aaba9"},
{file = "regex-2024.5.15-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e5bb9425fe881d578aeca0b2b4b3d314ec88738706f66f219c194d67179337cb"},
{file = "regex-2024.5.15-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:64c65783e96e563103d641760664125e91bd85d8e49566ee560ded4da0d3e704"},
{file = "regex-2024.5.15-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cf2430df4148b08fb4324b848672514b1385ae3807651f3567871f130a728cc3"},
{file = "regex-2024.5.15-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5397de3219a8b08ae9540c48f602996aa6b0b65d5a61683e233af8605c42b0f2"},
{file = "regex-2024.5.15-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:455705d34b4154a80ead722f4f185b04c4237e8e8e33f265cd0798d0e44825fa"},
{file = "regex-2024.5.15-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:b2b6f1b3bb6f640c1a92be3bbfbcb18657b125b99ecf141fb3310b5282c7d4ed"},
{file = "regex-2024.5.15-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:3ad070b823ca5890cab606c940522d05d3d22395d432f4aaaf9d5b1653e47ced"},
{file = "regex-2024.5.15-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:5b5467acbfc153847d5adb21e21e29847bcb5870e65c94c9206d20eb4e99a384"},
{file = "regex-2024.5.15-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:e6662686aeb633ad65be2a42b4cb00178b3fbf7b91878f9446075c404ada552f"},
{file = "regex-2024.5.15-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:2b4c884767504c0e2401babe8b5b7aea9148680d2e157fa28f01529d1f7fcf67"},
{file = "regex-2024.5.15-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:3cd7874d57f13bf70078f1ff02b8b0aa48d5b9ed25fc48547516c6aba36f5741"},
{file = "regex-2024.5.15-cp38-cp38-win32.whl", hash = "sha256:e4682f5ba31f475d58884045c1a97a860a007d44938c4c0895f41d64481edbc9"},
{file = "regex-2024.5.15-cp38-cp38-win_amd64.whl", hash = "sha256:d99ceffa25ac45d150e30bd9ed14ec6039f2aad0ffa6bb87a5936f5782fc1569"},
{file = "regex-2024.5.15-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:13cdaf31bed30a1e1c2453ef6015aa0983e1366fad2667657dbcac7b02f67133"},
{file = "regex-2024.5.15-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:cac27dcaa821ca271855a32188aa61d12decb6fe45ffe3e722401fe61e323cd1"},
{file = "regex-2024.5.15-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:7dbe2467273b875ea2de38ded4eba86cbcbc9a1a6d0aa11dcf7bd2e67859c435"},
{file = "regex-2024.5.15-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:64f18a9a3513a99c4bef0e3efd4c4a5b11228b48aa80743be822b71e132ae4f5"},
{file = "regex-2024.5.15-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d347a741ea871c2e278fde6c48f85136c96b8659b632fb57a7d1ce1872547600"},
{file = "regex-2024.5.15-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1878b8301ed011704aea4c806a3cadbd76f84dece1ec09cc9e4dc934cfa5d4da"},
{file = "regex-2024.5.15-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4babf07ad476aaf7830d77000874d7611704a7fcf68c9c2ad151f5d94ae4bfc4"},
{file = "regex-2024.5.15-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:35cb514e137cb3488bce23352af3e12fb0dbedd1ee6e60da053c69fb1b29cc6c"},
{file = "regex-2024.5.15-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:cdd09d47c0b2efee9378679f8510ee6955d329424c659ab3c5e3a6edea696294"},
{file = "regex-2024.5.15-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:72d7a99cd6b8f958e85fc6ca5b37c4303294954eac1376535b03c2a43eb72629"},
{file = "regex-2024.5.15-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:a094801d379ab20c2135529948cb84d417a2169b9bdceda2a36f5f10977ebc16"},
{file = "regex-2024.5.15-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:c0c18345010870e58238790a6779a1219b4d97bd2e77e1140e8ee5d14df071aa"},
{file = "regex-2024.5.15-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:16093f563098448ff6b1fa68170e4acbef94e6b6a4e25e10eae8598bb1694b5d"},
{file = "regex-2024.5.15-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:e38a7d4e8f633a33b4c7350fbd8bad3b70bf81439ac67ac38916c4a86b465456"},
{file = "regex-2024.5.15-cp39-cp39-win32.whl", hash = "sha256:71a455a3c584a88f654b64feccc1e25876066c4f5ef26cd6dd711308aa538694"},
{file = "regex-2024.5.15-cp39-cp39-win_amd64.whl", hash = "sha256:cab12877a9bdafde5500206d1020a584355a97884dfd388af3699e9137bf7388"},
{file = "regex-2024.5.15.tar.gz", hash = "sha256:d3ee02d9e5f482cc8309134a91eeaacbdd2261ba111b0fef3748eeb4913e6a2c"},
]
[[package]]
@ -3919,18 +3919,18 @@ files = [
[[package]]
name = "zipp"
version = "3.18.1"
version = "3.18.2"
description = "Backport of pathlib-compatible object wrapper for zip files"
optional = false
python-versions = ">=3.8"
files = [
{file = "zipp-3.18.1-py3-none-any.whl", hash = "sha256:206f5a15f2af3dbaee80769fb7dc6f249695e940acca08dfb2a4769fe61e538b"},
{file = "zipp-3.18.1.tar.gz", hash = "sha256:2884ed22e7d8961de1c9a05142eb69a247f120291bc0206a00a7642f09b5b715"},
{file = "zipp-3.18.2-py3-none-any.whl", hash = "sha256:dce197b859eb796242b0622af1b8beb0a722d52aa2f57133ead08edd5bf5374e"},
{file = "zipp-3.18.2.tar.gz", hash = "sha256:6278d9ddbcfb1f1089a88fde84481528b07b0e10474e09dcfe53dad4069fa059"},
]
[package.extras]
docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"]
testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-ignore-flaky", "pytest-mypy", "pytest-ruff (>=0.2.1)"]
testing = ["big-O", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more-itertools", "pytest (>=6,!=8.1.*)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-ignore-flaky", "pytest-mypy", "pytest-ruff (>=0.2.1)"]
[extras]
extended-testing = ["beautifulsoup4", "lxml"]
@ -3938,4 +3938,4 @@ extended-testing = ["beautifulsoup4", "lxml"]
[metadata]
lock-version = "2.0"
python-versions = ">=3.8.1,<4.0"
content-hash = "46deb6e5ef12180d34bdab47dc65df21150267f2b78d78f7674e8b5d21253e1c"
content-hash = "c4a0a1636ef6117db91919a711bf8ebff3f10ab32356f9c9545ffc06d4c1b628"

@ -1,6 +1,6 @@
[tool.poetry]
name = "langchain-text-splitters"
version = "0.0.2"
version = "0.2.0"
description = "LangChain text splitting utilities"
authors = []
license = "MIT"
@ -10,7 +10,7 @@ repository = "https://github.com/langchain-ai/langchain"
[tool.poetry.dependencies]
python = ">=3.8.1,<4.0"
langchain-core = ">=0.1.28,<0.3"
langchain-core = "^0.2.0"
lxml = {version = ">=4.9.3,<6.0", optional = true}
beautifulsoup4 = {version = "^4.12.3", optional = true}

Loading…
Cancel
Save