Remove packages for pydantic compatibility (#9217)

# Poetry updates

This PR updates LangChains poetry file to remove
any dependencies that aren't pydantic v2 compatible yet.

All packages remain usable under pydantic v1, and can be installed
separately. 

## Bumping the following packages:

* langsmith

## Removing the following packages

not used in extended unit-tests:

* zep-python, anthropic, jina, spacy, steamship, betabageldb

not used at all:

* octoai-sdk

Cleaning up extras w/ for removed packages.

## Snapshots updated

Some snapshots had to be updated due to a change in the data model in
langsmith. RunType used to be Union of Enum and string and was changed
to be string only.
pull/9254/head
Eugene Yurtsev 11 months ago committed by GitHub
parent e986afa13a
commit 0470198fb5
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

File diff suppressed because it is too large Load Diff

@ -26,7 +26,6 @@ elasticsearch = {version = "^8", optional = true}
opensearch-py = {version = "^2.0.0", optional = true}
redis = {version = "^4", optional = true}
manifest-ml = {version = "^0.0.1", optional = true}
spacy = {version = "^3", optional = true}
nltk = {version = "^3", optional = true}
transformers = {version = "^4", optional = true}
beautifulsoup4 = {version = "^4", optional = true}
@ -42,7 +41,6 @@ marqo = {version = "^0.11.0", optional=true}
google-api-python-client = {version = "2.70.0", optional = true}
google-auth = {version = "^2.18.1", optional = true}
wolframalpha = {version = "5.0.0", optional = true}
anthropic = {version = "^0.3", optional = true}
qdrant-client = {version = "^1.3.1", optional = true, python = ">=3.8.1,<3.12"}
dataclasses-json = "^0.5.7"
tensorflow-text = {version = "^2.11.0", optional = true, python = "^3.10, <3.12"}
@ -52,8 +50,6 @@ openai = {version = "^0", optional = true}
nlpcloud = {version = "^1", optional = true}
nomic = {version = "^1.0.43", optional = true}
huggingface_hub = {version = "^0", optional = true}
octoai-sdk = {version = "^0.1.1", optional = true}
jina = {version = "^3.14", optional = true}
google-search-results = {version = "^2", optional = true}
sentence-transformers = {version = "^2", optional = true}
aiohttp = "^3.8.3"
@ -81,7 +77,6 @@ pexpect = {version = "^4.8.0", optional = true}
pyvespa = {version = "^0.33.0", optional = true}
O365 = {version = "^2.0.26", optional = true}
jq = {version = "^1.4.1", optional = true}
steamship = {version = "^2.16.9", optional = true}
pdfminer-six = {version = "^20221105", optional = true}
docarray = {version="^0.32.0", extras=["hnswlib"], optional=true}
lxml = {version = "^4.9.2", optional = true}
@ -91,7 +86,6 @@ gql = {version = "^3.4.1", optional = true}
pandas = {version = "^2.0.1", optional = true}
telethon = {version = "^1.28.5", optional = true}
neo4j = {version = "^5.8.1", optional = true}
zep-python = {version = ">=1.0.1,<1.1.0", optional = true}
langkit = {version = ">=0.0.6, <0.1.0", optional = true}
chardet = {version="^5.1.0", optional=true}
requests-toolbelt = {version = "^1.0.0", optional = true}
@ -120,11 +114,10 @@ cassio = {version = "^0.0.7", optional = true}
rdflib = {version = "^6.3.2", optional = true}
sympy = {version = "^1.12", optional = true}
rapidfuzz = {version = "^3.1.1", optional = true}
langsmith = "~0.0.11"
langsmith = "~0.0.21"
rank-bm25 = {version = "^0.2.2", optional = true}
amadeus = {version = ">=8.1.0", optional = true}
geopandas = {version = "^0.13.1", optional = true}
xinference = {version = "^0.0.6", optional = true}
python-arango = {version = "^7.5.9", optional = true}
gitpython = {version = "^3.1.32", optional = true}
librosa = {version="^0.10.0.post2", optional = true }
@ -133,7 +126,6 @@ newspaper3k = {version = "^0.2.8", optional = true}
amazon-textract-caller = {version = "<2", optional = true}
xata = {version = "^1.0.0a7", optional = true}
xmltodict = {version = "^0.13.0", optional = true}
betabageldb = {version = "0.2.32", optional = true, python = ">=3.7,<3.11"}
[tool.poetry.group.test.dependencies]
@ -205,7 +197,7 @@ playwright = "^1.28.0"
setuptools = "^67.6.1"
[tool.poetry.extras]
llms = ["anthropic", "clarifai", "cohere", "openai", "openllm", "openlm", "nlpcloud", "huggingface_hub", "manifest-ml", "torch", "transformers", "xinference"]
llms = ["clarifai", "cohere", "openai", "openllm", "openlm", "nlpcloud", "huggingface_hub", "manifest-ml", "torch", "transformers", ]
qdrant = ["qdrant-client"]
openai = ["openai", "tiktoken"]
text_helpers = ["chardet"]
@ -225,13 +217,11 @@ azure = [
"azure-search-documents",
]
all = [
"anthropic",
"clarifai",
"cohere",
"openai",
"nlpcloud",
"huggingface_hub",
"jina",
"manifest-ml",
"elasticsearch",
"opensearch-py",
@ -239,7 +229,6 @@ all = [
"faiss-cpu",
"sentence-transformers",
"transformers",
"spacy",
"nltk",
"wikipedia",
"beautifulsoup4",
@ -283,7 +272,6 @@ all = [
"O365",
"jq",
"docarray",
"steamship",
"pdfminer-six",
"lxml",
"requests-toolbelt",
@ -298,10 +286,8 @@ all = [
"nebula3-python",
"awadb",
"esprima",
"octoai-sdk",
"rdflib",
"amadeus",
"xinference",
"librosa",
"python-arango",
]
@ -330,7 +316,6 @@ extended_testing = [
"pandas",
"telethon",
"psychicapi",
"zep-python",
"gql",
"requests_toolbelt",
"html2text",
@ -345,14 +330,11 @@ extended_testing = [
"rank_bm25",
"geopandas",
"jinja2",
"xinference",
"gitpython",
"newspaper3k",
"feedparser",
"xata",
"xmltodict",
"betabageldb",
"anthropic",
]
[tool.ruff]

@ -95,3 +95,19 @@ def test_formatting() -> None:
chat_messages = [HumanMessage(content="Hello"), AIMessage(content="Answer:")]
result = chat._convert_messages_to_prompt(chat_messages)
assert result == "\n\nHuman: Hello\n\nAssistant: Answer:"
def test_anthropic_model_kwargs() -> None:
llm = ChatAnthropic(model_kwargs={"foo": "bar"})
assert llm.model_kwargs == {"foo": "bar"}
def test_anthropic_invalid_model_kwargs() -> None:
with pytest.raises(ValueError):
ChatAnthropic(model_kwargs={"max_tokens_to_sample": 5})
def test_anthropic_incorrect_field() -> None:
with pytest.warns(match="not default parameter"):
llm = ChatAnthropic(foo="bar")
assert llm.model_kwargs == {"foo": "bar"}

File diff suppressed because one or more lines are too long

@ -1,4 +1,3 @@
import warnings
from typing import Any, Dict, List, Optional
from uuid import UUID
@ -35,7 +34,6 @@ from langchain.schema.runnable import (
RunnableSequence,
RunnableWithFallbacks,
)
from langchain.utils.pydantic import PYDANTIC_MAJOR_VERSION
class FakeTracer(BaseTracer):
@ -253,13 +251,7 @@ async def test_prompt_with_chat_model(
]
)
if PYDANTIC_MAJOR_VERSION == 1:
assert tracer.runs == snapshot
else:
warnings.warn(
f"testing snapshot for pydantic version {PYDANTIC_MAJOR_VERSION} is "
f"not supported yet"
)
assert tracer.runs == snapshot
mocker.stop(prompt_spy)
mocker.stop(chat_spy)
@ -361,13 +353,7 @@ async def test_prompt_with_llm(
HumanMessage(content="What is your name?"),
]
)
if PYDANTIC_MAJOR_VERSION == 1:
assert tracer.runs == snapshot
else:
warnings.warn(
f"testing snapshot for pydantic version {PYDANTIC_MAJOR_VERSION} is "
f"not supported yet"
)
assert tracer.runs == snapshot
mocker.stop(prompt_spy)
mocker.stop(llm_spy)
@ -400,13 +386,7 @@ async def test_prompt_with_llm(
]
),
]
if PYDANTIC_MAJOR_VERSION == 1:
assert tracer.runs == snapshot
else:
warnings.warn(
f"testing snapshot for pydantic version {PYDANTIC_MAJOR_VERSION} is "
f"not supported yet"
)
assert tracer.runs == snapshot
mocker.stop(prompt_spy)
mocker.stop(llm_spy)
@ -465,13 +445,7 @@ def test_prompt_with_chat_model_and_parser(
)
assert parser_spy.call_args.args[1] == AIMessage(content="foo, bar")
if PYDANTIC_MAJOR_VERSION == 1:
assert tracer.runs == snapshot
else:
warnings.warn(
f"testing snapshot for pydantic version {PYDANTIC_MAJOR_VERSION} is "
f"not supported yet"
)
assert tracer.runs == snapshot
@freeze_time("2023-01-01")
@ -529,13 +503,8 @@ def test_combining_sequences(
assert combined_chain.invoke(
{"question": "What is your name?"}, dict(callbacks=[tracer])
) == ["baz", "qux"]
if PYDANTIC_MAJOR_VERSION == 1:
assert tracer.runs == snapshot
else:
warnings.warn(
f"testing snapshot for pydantic version {PYDANTIC_MAJOR_VERSION} is "
f"not supported yet"
)
assert tracer.runs == snapshot
@freeze_time("2023-01-01")

Loading…
Cancel
Save