Bump deps in langserve (#11234)

Bump deps in langserve lockfile
pull/11241/head
Eugene Yurtsev 11 months ago committed by GitHub
parent 4c97a10bd0
commit f91ce4eddf
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -1626,7 +1626,7 @@ test = ["hatch", "ipykernel", "openapi-core (>=0.18.0,<0.19.0)", "openapi-spec-v
[[package]] [[package]]
name = "langchain" name = "langchain"
version = "0.0.304" version = "0.0.305"
description = "Building applications with LLMs through composability" description = "Building applications with LLMs through composability"
optional = false optional = false
python-versions = ">=3.8.1,<4.0" python-versions = ">=3.8.1,<4.0"
@ -1655,7 +1655,7 @@ clarifai = ["clarifai (>=9.1.0)"]
cohere = ["cohere (>=4,<5)"] cohere = ["cohere (>=4,<5)"]
docarray = ["docarray[hnswlib] (>=0.32.0,<0.33.0)"] docarray = ["docarray[hnswlib] (>=0.32.0,<0.33.0)"]
embeddings = ["sentence-transformers (>=2,<3)"] embeddings = ["sentence-transformers (>=2,<3)"]
extended-testing = ["amazon-textract-caller (<2)", "anthropic (>=0.3.11,<0.4.0)", "arxiv (>=1.4,<2.0)", "assemblyai (>=0.17.0,<0.18.0)", "atlassian-python-api (>=3.36.0,<4.0.0)", "beautifulsoup4 (>=4,<5)", "bibtexparser (>=1.4.0,<2.0.0)", "cassio (>=0.1.0,<0.2.0)", "chardet (>=5.1.0,<6.0.0)", "dashvector (>=1.0.1,<2.0.0)", "esprima (>=4.0.1,<5.0.0)", "faiss-cpu (>=1,<2)", "feedparser (>=6.0.10,<7.0.0)", "geopandas (>=0.13.1,<0.14.0)", "gitpython (>=3.1.32,<4.0.0)", "gql (>=3.4.1,<4.0.0)", "html2text (>=2020.1.16,<2021.0.0)", "jinja2 (>=3,<4)", "jq (>=1.4.1,<2.0.0)", "lxml (>=4.9.2,<5.0.0)", "markdownify (>=0.11.6,<0.12.0)", "mwparserfromhell (>=0.6.4,<0.7.0)", "mwxml (>=0.3.3,<0.4.0)", "newspaper3k (>=0.2.8,<0.3.0)", "openai (>=0,<1)", "openai (>=0,<1)", "openapi-schema-pydantic (>=1.2,<2.0)", "pandas (>=2.0.1,<3.0.0)", "pdfminer-six (>=20221105,<20221106)", "pgvector (>=0.1.6,<0.2.0)", "psychicapi (>=0.8.0,<0.9.0)", "py-trello (>=0.19.0,<0.20.0)", "pymupdf (>=1.22.3,<2.0.0)", "pypdf (>=3.4.0,<4.0.0)", "pypdfium2 (>=4.10.0,<5.0.0)", "pyspark (>=3.4.0,<4.0.0)", "rank-bm25 (>=0.2.2,<0.3.0)", "rapidfuzz (>=3.1.1,<4.0.0)", "requests-toolbelt (>=1.0.0,<2.0.0)", "scikit-learn (>=1.2.2,<2.0.0)", "sqlite-vss (>=0.1.2,<0.2.0)", "streamlit (>=1.18.0,<2.0.0)", "sympy (>=1.12,<2.0)", "telethon (>=1.28.5,<2.0.0)", "timescale-vector (>=0.0.1,<0.0.2)", "tqdm (>=4.48.0)", "xata (>=1.0.0a7,<2.0.0)", "xmltodict (>=0.13.0,<0.14.0)"] extended-testing = ["amazon-textract-caller (<2)", "anthropic (>=0.3.11,<0.4.0)", "arxiv (>=1.4,<2.0)", "assemblyai (>=0.17.0,<0.18.0)", "atlassian-python-api (>=3.36.0,<4.0.0)", "beautifulsoup4 (>=4,<5)", "bibtexparser (>=1.4.0,<2.0.0)", "cassio (>=0.1.0,<0.2.0)", "chardet (>=5.1.0,<6.0.0)", "dashvector (>=1.0.1,<2.0.0)", "esprima (>=4.0.1,<5.0.0)", "faiss-cpu (>=1,<2)", "feedparser (>=6.0.10,<7.0.0)", "geopandas (>=0.13.1,<0.14.0)", "gitpython (>=3.1.32,<4.0.0)", "gql (>=3.4.1,<4.0.0)", "html2text (>=2020.1.16,<2021.0.0)", "jinja2 (>=3,<4)", "jq (>=1.4.1,<2.0.0)", "lxml (>=4.9.2,<5.0.0)", "markdownify (>=0.11.6,<0.12.0)", "motor (>=3.3.1,<4.0.0)", "mwparserfromhell (>=0.6.4,<0.7.0)", "mwxml (>=0.3.3,<0.4.0)", "newspaper3k (>=0.2.8,<0.3.0)", "openai (>=0,<1)", "openai (>=0,<1)", "openapi-schema-pydantic (>=1.2,<2.0)", "pandas (>=2.0.1,<3.0.0)", "pdfminer-six (>=20221105,<20221106)", "pgvector (>=0.1.6,<0.2.0)", "psychicapi (>=0.8.0,<0.9.0)", "py-trello (>=0.19.0,<0.20.0)", "pymupdf (>=1.22.3,<2.0.0)", "pypdf (>=3.4.0,<4.0.0)", "pypdfium2 (>=4.10.0,<5.0.0)", "pyspark (>=3.4.0,<4.0.0)", "rank-bm25 (>=0.2.2,<0.3.0)", "rapidfuzz (>=3.1.1,<4.0.0)", "requests-toolbelt (>=1.0.0,<2.0.0)", "scikit-learn (>=1.2.2,<2.0.0)", "sqlite-vss (>=0.1.2,<0.2.0)", "streamlit (>=1.18.0,<2.0.0)", "sympy (>=1.12,<2.0)", "telethon (>=1.28.5,<2.0.0)", "timescale-vector (>=0.0.1,<0.0.2)", "tqdm (>=4.48.0)", "xata (>=1.0.0a7,<2.0.0)", "xmltodict (>=0.13.0,<0.14.0)"]
javascript = ["esprima (>=4.0.1,<5.0.0)"] javascript = ["esprima (>=4.0.1,<5.0.0)"]
llms = ["clarifai (>=9.1.0)", "cohere (>=4,<5)", "huggingface_hub (>=0,<1)", "manifest-ml (>=0.0.1,<0.0.2)", "nlpcloud (>=1,<2)", "openai (>=0,<1)", "openlm (>=0.0.5,<0.0.6)", "torch (>=1,<3)", "transformers (>=4,<5)"] llms = ["clarifai (>=9.1.0)", "cohere (>=4,<5)", "huggingface_hub (>=0,<1)", "manifest-ml (>=0.0.1,<0.0.2)", "nlpcloud (>=1,<2)", "openai (>=0,<1)", "openlm (>=0.0.5,<0.0.6)", "torch (>=1,<3)", "transformers (>=4,<5)"]
openai = ["openai (>=0,<1)", "tiktoken (>=0.3.2,<0.6.0)"] openai = ["openai (>=0,<1)", "tiktoken (>=0.3.2,<0.6.0)"]
@ -1666,7 +1666,7 @@ text-helpers = ["chardet (>=5.1.0,<6.0.0)"]
type = "git" type = "git"
url = "https://github.com/langchain-ai/langchain" url = "https://github.com/langchain-ai/langchain"
reference = "HEAD" reference = "HEAD"
resolved_reference = "4ad0f3de2b1c423d2900d925e26c0097b8741e90" resolved_reference = "8b4cb4eb60e3935eea895aa955d68ca0afce788c"
subdirectory = "libs/langchain" subdirectory = "libs/langchain"
[[package]] [[package]]

@ -4,6 +4,7 @@ from typing import Any
import pytest import pytest
from langchain.schema.messages import ( from langchain.schema.messages import (
HumanMessage, HumanMessage,
HumanMessageChunk,
SystemMessage, SystemMessage,
) )
@ -32,6 +33,7 @@ from langserve.serialization import simple_dumps, simple_loads
"additional_kwargs": {}, "additional_kwargs": {},
"type": "human", "type": "human",
"example": False, "example": False,
"is_chunk": False,
} }
] ]
}, },
@ -44,6 +46,7 @@ from langserve.serialization import simple_dumps, simple_loads
"content": "Hello", "content": "Hello",
"example": False, "example": False,
"type": "human", "type": "human",
"is_chunk": False,
}, },
), ),
# Test with a list containing mixed elements # Test with a list containing mixed elements
@ -55,50 +58,60 @@ from langserve.serialization import simple_dumps, simple_loads
"content": "Hello", "content": "Hello",
"example": False, "example": False,
"type": "human", "type": "human",
"is_chunk": False,
},
{
"additional_kwargs": {},
"content": "Hi",
"type": "system",
"is_chunk": False,
}, },
{"additional_kwargs": {}, "content": "Hi", "type": "system"},
42, 42,
"world", "world",
], ],
), ),
# # Attention: This test is not correct right now # Attention: This test is not correct right now
# # Test with full and chunk messages # Test with full and chunk messages
# ( (
# [HumanMessage(content="Hello"), HumanMessageChunk(content="Hi")], [HumanMessage(content="Hello"), HumanMessageChunk(content="Hi")],
# [ [
# { {
# "additional_kwargs": {}, "additional_kwargs": {},
# "content": "Hello", "content": "Hello",
# "example": False, "example": False,
# "type": "human", "type": "human",
# }, "is_chunk": False,
# { },
# "additional_kwargs": {}, {
# "content": "Hi", "additional_kwargs": {},
# "example": False, "content": "Hi",
# "type": "human", "example": False,
# }, "type": "human",
# ], "is_chunk": True,
# ), },
# # Attention: This test is not correct right now ],
# # Test with full and chunk messages ),
# ( # Attention: This test is not correct right now
# [HumanMessageChunk(content="Hello"), HumanMessage(content="Hi")], # Test with full and chunk messages
# [ (
# { [HumanMessageChunk(content="Hello"), HumanMessage(content="Hi")],
# "additional_kwargs": {}, [
# "content": "Hello", {
# "example": False, "additional_kwargs": {},
# "type": "human", "content": "Hello",
# }, "example": False,
# { "type": "human",
# "additional_kwargs": {}, "is_chunk": True,
# "content": "Hi", },
# "example": False, {
# "type": "human", "additional_kwargs": {},
# }, "content": "Hi",
# ], "example": False,
# ), "type": "human",
"is_chunk": False,
},
],
),
# Test with a dictionary containing mixed elements # Test with a dictionary containing mixed elements
( (
{ {
@ -112,6 +125,7 @@ from langserve.serialization import simple_dumps, simple_loads
"content": "Greetings", "content": "Greetings",
"example": False, "example": False,
"type": "human", "type": "human",
"is_chunk": False,
}, },
"numbers": [1, 2, 3], "numbers": [1, 2, 3],
"boom": "Hello, world!", "boom": "Hello, world!",

Loading…
Cancel
Save