core[patch], langchain[patch]: fix required deps (#14373)

pull/14415/head
Bagatur 5 months ago committed by GitHub
parent 7186faefb2
commit b2280fd874
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -48,7 +48,6 @@ jobs:
compile-integration-tests:
uses: ./.github/workflows/_compile_integration_test.yml
if: ${{ inputs.working-directory != 'libs/core' }}
with:
working-directory: ${{ inputs.working-directory }}
secrets: inherit
@ -68,7 +67,6 @@ jobs:
- "3.9"
- "3.10"
- "3.11"
if: ${{ inputs.working-directory == 'libs/langchain' }}
name: Python ${{ matrix.python-version }} extended tests
defaults:
run:
@ -88,12 +86,7 @@ jobs:
shell: bash
run: |
echo "Running extended tests, installing dependencies with poetry..."
poetry install -E extended_testing
- name: Install langchain core editable
shell: bash
run: |
poetry run pip install -e ../core
poetry install -E extended_testing --with test
- name: Run extended tests
run: make extended_tests

@ -38,7 +38,7 @@ jobs:
- name: Install integration dependencies
shell: bash
run: poetry install --with=test_integration
run: poetry install --with=test_integration,test
- name: Check integration tests compile
shell: bash

@ -90,4 +90,31 @@ jobs:
- name: Analysing the code with our lint
working-directory: ${{ inputs.working-directory }}
run: |
make lint
make lint_package
- name: Install test dependencies
# Also installs dev/lint/test/typing dependencies, to ensure we have
# type hints for as many of our libraries as possible.
# This helps catch errors that require dependencies to be spotted, for example:
# https://github.com/langchain-ai/langchain/pull/10249/files#diff-935185cd488d015f026dcd9e19616ff62863e8cde8c0bee70318d3ccbca98341
#
# If you change this configuration, make sure to change the `cache-key`
# in the `poetry_setup` action above to stop using the old cache.
# It doesn't matter how you change it, any change will cause a cache-bust.
working-directory: ${{ inputs.working-directory }}
run: |
poetry install --with test
- name: Get .mypy_cache to speed up mypy
uses: actions/cache@v3
env:
SEGMENT_DOWNLOAD_TIMEOUT_MIN: "2"
with:
path: |
${{ env.WORKDIR }}/.mypy_cache
key: mypy-test-${{ runner.os }}-${{ runner.arch }}-py${{ matrix.python-version }}-${{ inputs.working-directory }}-${{ hashFiles(format('{0}/poetry.lock', env.WORKDIR)) }}
- name: Analysing the code with our lint
working-directory: ${{ inputs.working-directory }}
run: |
make lint_tests

@ -42,7 +42,7 @@ jobs:
- name: Install dependencies
shell: bash
run: poetry install
run: poetry install --with test
- name: Install langchain editable
working-directory: ${{ inputs.working-directory }}

@ -33,5 +33,4 @@ jobs:
./.github/workflows/_lint.yml
with:
working-directory: templates
langchain-location: ../libs/langchain
secrets: inherit

@ -41,7 +41,7 @@ spell_fix:
# LINTING AND FORMATTING
######################
lint:
lint lint_package lint_tests:
poetry run ruff docs templates cookbook
poetry run ruff format docs templates cookbook --diff
poetry run ruff --select I docs templates cookbook

@ -15,6 +15,9 @@ tests:
test_watch:
poetry run ptw --snapshot-update --now . -- -vv -x tests/unit_tests
extended_tests:
poetry run pytest --only-extended $(TEST_FILE)
######################
# LINTING AND FORMATTING
@ -24,8 +27,10 @@ test_watch:
PYTHON_FILES=.
lint format: PYTHON_FILES=.
lint_diff format_diff: PYTHON_FILES=$(shell git diff --relative=libs/experimental --name-only --diff-filter=d master | grep -E '\.py$$|\.ipynb$$')
lint_package: PYTHON_FILES=langchain_core
lint_tests: PYTHON_FILES=tests
lint lint_diff:
lint lint_diff lint_package lint_tests:
./scripts/check_pydantic.sh .
./scripts/check_imports.sh
poetry run ruff .

@ -105,7 +105,7 @@ class AgentFinish(Serializable):
def _convert_agent_action_to_messages(
agent_action: AgentAction
agent_action: AgentAction,
) -> Sequence[BaseMessage]:
"""Convert an agent action to a message.

@ -6,11 +6,16 @@ __all__ = [
"Run",
"RunLog",
"RunLogPatch",
"LogStreamCallbackHandler",
]
from langchain_core.tracers.base import BaseTracer
from langchain_core.tracers.evaluation import EvaluatorCallbackHandler
from langchain_core.tracers.langchain import LangChainTracer
from langchain_core.tracers.log_stream import RunLog, RunLogPatch
from langchain_core.tracers.log_stream import (
LogStreamCallbackHandler,
RunLog,
RunLogPatch,
)
from langchain_core.tracers.schemas import Run
from langchain_core.tracers.stdout import ConsoleCallbackHandler

@ -2508,6 +2508,31 @@ files = [
docs = ["myst-parser", "pydata-sphinx-theme", "sphinx"]
test = ["argcomplete (>=3.0.3)", "mypy (>=1.6.0)", "pre-commit", "pytest (>=7.0,<7.5)", "pytest-mock", "pytest-mypy-testing"]
[[package]]
name = "types-jinja2"
version = "2.11.9"
description = "Typing stubs for Jinja2"
optional = false
python-versions = "*"
files = [
{file = "types-Jinja2-2.11.9.tar.gz", hash = "sha256:dbdc74a40aba7aed520b7e4d89e8f0fe4286518494208b35123bcf084d4b8c81"},
{file = "types_Jinja2-2.11.9-py3-none-any.whl", hash = "sha256:60a1e21e8296979db32f9374d8a239af4cb541ff66447bb915d8ad398f9c63b2"},
]
[package.dependencies]
types-MarkupSafe = "*"
[[package]]
name = "types-markupsafe"
version = "1.1.10"
description = "Typing stubs for MarkupSafe"
optional = false
python-versions = "*"
files = [
{file = "types-MarkupSafe-1.1.10.tar.gz", hash = "sha256:85b3a872683d02aea3a5ac2a8ef590193c344092032f58457287fbf8e06711b1"},
{file = "types_MarkupSafe-1.1.10-py3-none-any.whl", hash = "sha256:ca2bee0f4faafc45250602567ef38d533e877d2ddca13003b319c551ff5b3cc5"},
]
[[package]]
name = "types-python-dateutil"
version = "2.8.19.14"
@ -2703,7 +2728,10 @@ files = [
docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (<7.2.5)", "sphinx (>=3.5)", "sphinx-lint"]
testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-ignore-flaky", "pytest-mypy (>=0.9.1)", "pytest-ruff"]
[extras]
extended-testing = ["jinja2"]
[metadata]
lock-version = "2.0"
python-versions = ">=3.8.1,<4.0"
content-hash = "b08d47f726dd194af0f801d300402b174c8db96a4184cc1136cb8e5a0e287190"
content-hash = "64fa7ef31713835d12d5213f04b52adf7423299d023f9558b8b4e65ce1e5262f"

@ -14,19 +14,37 @@ pydantic = ">=1,<3"
langsmith = "~0.0.63"
tenacity = "^8.1.0"
jsonpatch = "^1.33"
anyio = ">=3,<5"
PyYAML = ">=5.3"
requests = "^2"
packaging = "^23.2"
jinja2 = {version = "^3", optional = true}
[tool.poetry.group.lint]
optional = true
[tool.poetry.group.lint.dependencies]
ruff = "^0.1.5"
[tool.poetry.group.typing]
optional = true
[tool.poetry.group.typing.dependencies]
mypy = "^0.991"
types-pyyaml = "^6.0.12.2"
types-requests = "^2.28.11.5"
types-jinja2 = "^2.11.9"
[tool.poetry.group.dev]
optional = true
[tool.poetry.group.dev.dependencies]
jupyter = "^1.0.0"
setuptools = "^67.6.1"
[tool.poetry.group.test]
optional = true
[tool.poetry.group.test.dependencies]
# The only dependencies that should be added are
# dependencies used for running tests (e.g., pytest, freezegun, response).
@ -43,6 +61,9 @@ pytest-asyncio = "^0.21.1"
optional = true
dependencies = {}
[tool.poetry.extras]
extended_testing = ["jinja2"]
[tool.ruff]
select = [
"E", # pycodestyle

@ -0,0 +1,7 @@
import pytest
@pytest.mark.compile
def test_placeholder() -> None:
"""Used for compiling integration tests without running any real tests."""
pass

@ -0,0 +1,87 @@
"""Configuration for unit tests."""
from importlib import util
from typing import Dict, Sequence
import pytest
from pytest import Config, Function, Parser
def pytest_addoption(parser: Parser) -> None:
"""Add custom command line options to pytest."""
parser.addoption(
"--only-extended",
action="store_true",
help="Only run extended tests. Does not allow skipping any extended tests.",
)
parser.addoption(
"--only-core",
action="store_true",
help="Only run core tests. Never runs any extended tests.",
)
def pytest_collection_modifyitems(config: Config, items: Sequence[Function]) -> None:
"""Add implementations for handling custom markers.
At the moment, this adds support for a custom `requires` marker.
The `requires` marker is used to denote tests that require one or more packages
to be installed to run. If the package is not installed, the test is skipped.
The `requires` marker syntax is:
.. code-block:: python
@pytest.mark.requires("package1", "package2")
def test_something():
...
"""
# Mapping from the name of a package to whether it is installed or not.
# Used to avoid repeated calls to `util.find_spec`
required_pkgs_info: Dict[str, bool] = {}
only_extended = config.getoption("--only-extended") or False
only_core = config.getoption("--only-core") or False
if only_extended and only_core:
raise ValueError("Cannot specify both `--only-extended` and `--only-core`.")
for item in items:
requires_marker = item.get_closest_marker("requires")
if requires_marker is not None:
if only_core:
item.add_marker(pytest.mark.skip(reason="Skipping not a core test."))
continue
# Iterate through the list of required packages
required_pkgs = requires_marker.args
for pkg in required_pkgs:
# If we haven't yet checked whether the pkg is installed
# let's check it and store the result.
if pkg not in required_pkgs_info:
try:
installed = util.find_spec(pkg) is not None
except Exception:
installed = False
required_pkgs_info[pkg] = installed
if not required_pkgs_info[pkg]:
if only_extended:
pytest.fail(
f"Package `{pkg}` is not installed but is required for "
f"extended tests. Please install the given package and "
f"try again.",
)
else:
# If the package is not installed, we immediately break
# and mark the test as skipped.
item.add_marker(
pytest.mark.skip(reason=f"Requires pkg: `{pkg}`")
)
break
else:
if only_extended:
item.add_marker(
pytest.mark.skip(reason="Skipping not an extended test.")
)

@ -233,7 +233,7 @@ def test_partial() -> None:
@pytest.mark.requires("jinja2")
def test_prompt_jinja2_functionality(
example_jinja2_prompt: Tuple[PromptTemplate, List[Dict[str, str]]]
example_jinja2_prompt: Tuple[PromptTemplate, List[Dict[str, str]]],
) -> None:
prefix = "Starting with {{ foo }}"
suffix = "Ending with {{ bar }}"
@ -256,7 +256,7 @@ def test_prompt_jinja2_functionality(
@pytest.mark.requires("jinja2")
def test_prompt_jinja2_missing_input_variables(
example_jinja2_prompt: Tuple[PromptTemplate, List[Dict[str, str]]]
example_jinja2_prompt: Tuple[PromptTemplate, List[Dict[str, str]]],
) -> None:
"""Test error is raised when input variables are not provided."""
prefix = "Starting with {{ foo }}"
@ -303,7 +303,7 @@ def test_prompt_jinja2_missing_input_variables(
@pytest.mark.requires("jinja2")
def test_prompt_jinja2_extra_input_variables(
example_jinja2_prompt: Tuple[PromptTemplate, List[Dict[str, str]]]
example_jinja2_prompt: Tuple[PromptTemplate, List[Dict[str, str]]],
) -> None:
"""Test error is raised when there are too many input variables."""
prefix = "Starting with {{ foo }}"

@ -8,6 +8,7 @@ EXPECTED_ALL = [
"Run",
"RunLog",
"RunLogPatch",
"LogStreamCallbackHandler",
]

@ -30,8 +30,10 @@ integration_tests:
PYTHON_FILES=.
lint format: PYTHON_FILES=.
lint_diff format_diff: PYTHON_FILES=$(shell git diff --relative=libs/experimental --name-only --diff-filter=d master | grep -E '\.py$$|\.ipynb$$')
lint_package: PYTHON_FILES=langchain_experimental
lint_tests: PYTHON_FILES=tests
lint lint_diff:
lint lint_diff lint_package lint_tests:
poetry run ruff .
poetry run ruff format $(PYTHON_FILES) --diff
poetry run ruff --select I $(PYTHON_FILES)

@ -7,30 +7,32 @@ from langchain.schema import BaseOutputParser
from langchain_experimental.tot.thought import ThoughtValidity
COT_PROMPT = PromptTemplate(
template_format="jinja2",
input_variables=["problem_description", "thoughts"],
template=dedent(
"""
You are an intelligent agent that is generating one thought at a time in
a tree of thoughts setting.
PROBLEM
{{problem_description}}
{% if thoughts %}
THOUGHTS
{% for thought in thoughts %}
{{ thought }}
{% endfor %}
{% endif %}
Let's think step by step.
"""
).strip(),
)
def get_cot_prompt() -> PromptTemplate:
return PromptTemplate(
template_format="jinja2",
input_variables=["problem_description", "thoughts"],
template=dedent(
"""
You are an intelligent agent that is generating one thought at a time in
a tree of thoughts setting.
PROBLEM
{{problem_description}}
{% if thoughts %}
THOUGHTS
{% for thought in thoughts %}
{{ thought }}
{% endfor %}
{% endif %}
Let's think step by step.
"""
).strip(),
)
class JSONListOutputParser(BaseOutputParser):
@ -50,45 +52,46 @@ class JSONListOutputParser(BaseOutputParser):
return []
PROPOSE_PROMPT = PromptTemplate(
template_format="jinja2",
input_variables=["problem_description", "thoughts", "n"],
output_parser=JSONListOutputParser(),
template=dedent(
"""
You are an intelligent agent that is generating thoughts in a tree of
thoughts setting.
The output should be a markdown code snippet formatted as a JSON list of
strings, including the leading and trailing "```json" and "```":
```json
[
"<thought-1>",
"<thought-2>",
"<thought-3>"
]
```
PROBLEM
{{ problem_description }}
{% if thoughts %}
VALID THOUGHTS
{% for thought in thoughts %}
{{ thought }}
{% endfor %}
Possible next {{ n }} valid thoughts based on the last valid thought:
{% else %}
Possible next {{ n }} valid thoughts based on the PROBLEM:
{%- endif -%}
"""
).strip(),
)
def get_propose_prompt() -> PromptTemplate:
return PromptTemplate(
template_format="jinja2",
input_variables=["problem_description", "thoughts", "n"],
output_parser=JSONListOutputParser(),
template=dedent(
"""
You are an intelligent agent that is generating thoughts in a tree of
thoughts setting.
The output should be a markdown code snippet formatted as a JSON list of
strings, including the leading and trailing "```json" and "```":
```json
[
"<thought-1>",
"<thought-2>",
"<thought-3>"
]
```
PROBLEM
{{ problem_description }}
{% if thoughts %}
VALID THOUGHTS
{% for thought in thoughts %}
{{ thought }}
{% endfor %}
Possible next {{ n }} valid thoughts based on the last valid thought:
{% else %}
Possible next {{ n }} valid thoughts based on the PROBLEM:
{%- endif -%}
"""
).strip(),
)
class CheckerOutputParser(BaseOutputParser):

@ -13,7 +13,7 @@ from langchain.chains.llm import LLMChain
from langchain.prompts.base import BasePromptTemplate
from langchain_experimental.pydantic_v1 import Field
from langchain_experimental.tot.prompts import COT_PROMPT, PROPOSE_PROMPT
from langchain_experimental.tot.prompts import get_cot_prompt, get_propose_prompt
class BaseThoughtGenerationStrategy(LLMChain):
@ -46,7 +46,7 @@ class SampleCoTStrategy(BaseThoughtGenerationStrategy):
lead to diversity, which helps to avoid repetition.
"""
prompt: BasePromptTemplate = COT_PROMPT
prompt: BasePromptTemplate = Field(default_factory=get_cot_prompt)
def next_thought(
self,
@ -69,7 +69,7 @@ class ProposePromptStrategy(BaseThoughtGenerationStrategy):
in the same prompt completion helps to avoid duplication.
"""
prompt: BasePromptTemplate = PROPOSE_PROMPT
prompt: BasePromptTemplate = Field(default_factory=get_propose_prompt)
tot_memory: Dict[Tuple[str, ...], List[str]] = Field(default_factory=dict)
def next_thought(

@ -1642,7 +1642,7 @@ files = [
[[package]]
name = "langchain"
version = "0.0.346"
version = "0.0.347"
description = "Building applications with LLMs through composability"
optional = false
python-versions = ">=3.8.1,<4.0"
@ -1651,11 +1651,10 @@ develop = true
[package.dependencies]
aiohttp = "^3.8.3"
anyio = "<4.0"
async-timeout = {version = "^4.0.0", markers = "python_version < \"3.11\""}
dataclasses-json = ">= 0.5.7, < 0.7"
jsonpatch = "^1.33"
langchain-core = ">=0.0.10,<0.1"
langchain-core = ">=0.0.11,<0.1"
langsmith = "~0.0.63"
numpy = "^1"
pydantic = ">=1,<3"
@ -1685,7 +1684,7 @@ url = "../langchain"
[[package]]
name = "langchain-core"
version = "0.0.10"
version = "0.0.11"
description = "Building applications with LLMs through composability"
optional = false
python-versions = ">=3.8.1,<4.0"
@ -1693,11 +1692,18 @@ files = []
develop = true
[package.dependencies]
anyio = ">=3,<5"
jsonpatch = "^1.33"
langsmith = "~0.0.63"
packaging = "^23.2"
pydantic = ">=1,<3"
PyYAML = ">=5.3"
requests = "^2"
tenacity = "^8.1.0"
[package.extras]
extended-testing = ["jinja2 (>=3,<4)"]
[package.source]
type = "directory"
url = "../core"
@ -4926,9 +4932,9 @@ docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.link
testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-ignore-flaky", "pytest-mypy (>=0.9.1)", "pytest-ruff"]
[extras]
extended-testing = ["faker", "presidio-analyzer", "presidio-anonymizer", "sentence-transformers", "vowpal-wabbit-next"]
extended-testing = ["faker", "jinja2", "presidio-analyzer", "presidio-anonymizer", "sentence-transformers", "vowpal-wabbit-next"]
[metadata]
lock-version = "2.0"
python-versions = ">=3.8.1,<4.0"
content-hash = "82bebfc5475be48f180bcb5013850eb88f451ffdc1f126a12112e10ed56f6529"
content-hash = "4bb3c4e436aba1fdab5159d3a3919173b7ae63f5816dc2e12accd2d76f58c29c"

@ -17,19 +17,32 @@ presidio-analyzer = {version = "^2.2.33", optional = true}
faker = {version = "^19.3.1", optional = true}
vowpal-wabbit-next = {version = "0.6.0", optional = true}
sentence-transformers = {version = "^2", optional = true}
jinja2 = {version = "^3", optional = true}
[tool.poetry.group.lint]
optional = true
[tool.poetry.group.lint.dependencies]
ruff = "^0.1.5"
[tool.poetry.group.typing]
optional = true
[tool.poetry.group.typing.dependencies]
mypy = "^0.991"
types-pyyaml = "^6.0.12.2"
types-requests = "^2.28.11.5"
[tool.poetry.group.dev]
optional = true
[tool.poetry.group.dev.dependencies]
jupyter = "^1.0.0"
setuptools = "^67.6.1"
[tool.poetry.group.test]
optional = true
[tool.poetry.group.test.dependencies]
# The only dependencies that should be added are
# dependencies used for running tests (e.g., pytest, freezegun, response).
@ -54,6 +67,7 @@ extended_testing = [
"faker",
"vowpal-wabbit-next",
"sentence-transformers",
"jinja2",
]
[tool.ruff]

@ -51,6 +51,7 @@ class SudokuChecker(ToTChecker):
return ThoughtValidity.INVALID
@pytest.mark.requires("jinja2")
def test_solve_sudoku(fake_llm_sudoku: FakeLLM) -> None:
"""Test simple question that should not need python."""
tot_chain = ToTChain(
@ -64,6 +65,7 @@ def test_solve_sudoku(fake_llm_sudoku: FakeLLM) -> None:
assert output == sudoku_solution
@pytest.mark.requires("jinja2")
def test_solve_sudoku_k_too_small(fake_llm_sudoku: FakeLLM) -> None:
"""Test simple question that should not need python."""
tot_chain = ToTChain(

@ -48,8 +48,10 @@ docker_tests:
PYTHON_FILES=.
lint format: PYTHON_FILES=.
lint_diff format_diff: PYTHON_FILES=$(shell git diff --relative=libs/langchain --name-only --diff-filter=d master | grep -E '\.py$$|\.ipynb$$')
lint_package: PYTHON_FILES=langchain
lint_tests: PYTHON_FILES=tests
lint lint_diff:
lint lint_diff lint_package lint_tests:
./scripts/check_pydantic.sh .
./scripts/check_imports.sh
poetry run ruff .

@ -8,7 +8,6 @@ from concurrent.futures import ThreadPoolExecutor
from typing import Any, Iterable, List, Optional, Tuple
import requests
from google.protobuf.struct_pb2 import Struct
from langchain_core.documents import Document
from langchain_core.embeddings import Embeddings
from langchain_core.vectorstores import VectorStore
@ -87,6 +86,7 @@ class Clarifai(VectorStore):
"""
try:
from clarifai.client.input import Inputs
from google.protobuf.struct_pb2 import Struct
except ImportError as e:
raise ImportError(
"Could not import clarifai python package. "

@ -4294,9 +4294,13 @@ files = []
develop = true
[package.dependencies]
anyio = ">=3,<5"
jsonpatch = "^1.33"
langsmith = "~0.0.63"
packaging = "^23.2"
pydantic = ">=1,<3"
PyYAML = ">=5.3"
requests = "^2"
tenacity = "^8.1.0"
[package.source]
@ -11519,4 +11523,4 @@ text-helpers = ["chardet"]
[metadata]
lock-version = "2.0"
python-versions = ">=3.8.1,<4.0"
content-hash = "e1bf5b9a8cdfa1f5f57401a618fdadd806617a2a3c40c38d3ff377bde0774d83"
content-hash = "83aadfddcd7f6ee30b3fcaf0c8b661ba9c096c5f7acd589cf9bec17fb791d358"

@ -20,7 +20,6 @@ PyYAML = ">=5.3"
numpy = "^1"
aiohttp = "^3.8.3"
tenacity = "^8.1.0"
anyio = "<4.0"
jsonpatch = "^1.33"
azure-core = {version = "^1.26.4", optional=true}
tqdm = {version = ">=4.48.0", optional = true}
@ -151,6 +150,9 @@ couchbase = {version = "^4.1.9", optional = true}
dgml-utils = {version = "^0.3.0", optional = true}
datasets = {version = "^2.15.0", optional = true}
[tool.poetry.group.test]
optional = true
[tool.poetry.group.test.dependencies]
# The only dependencies that should be added are
# dependencies used for running tests (e.g., pytest, freezegun, response).
@ -171,6 +173,9 @@ syrupy = "^4.0.2"
requests-mock = "^1.11.0"
langchain-core = {path = "../core", develop = true}
[tool.poetry.group.codespell]
optional = true
[tool.poetry.group.codespell.dependencies]
codespell = "^2.2.0"
@ -201,18 +206,24 @@ cassio = "^0.1.0"
tiktoken = "^0.3.2"
anthropic = "^0.3.11"
[tool.poetry.group.lint]
optional = true
[tool.poetry.group.lint.dependencies]
ruff = "^0.1.5"
types-toml = "^0.10.8.1"
types-redis = "^4.3.21.6"
types-pytz = "^2023.3.0.0"
types-chardet = "^5.0.4.6"
mypy-protobuf = "^3.0.0"
[tool.poetry.group.typing]
optional = true
[tool.poetry.group.typing.dependencies]
mypy = "^0.991"
types-pyyaml = "^6.0.12.2"
types-requests = "^2.28.11.5"
types-toml = "^0.10.8.1"
types-redis = "^4.3.21.6"
types-pytz = "^2023.3.0.0"
types-chardet = "^5.0.4.6"
mypy-protobuf = "^3.0.0"
[tool.poetry.group.dev]
optional = true

@ -12,7 +12,7 @@ import pytest
from langchain.document_loaders.google_speech_to_text import GoogleSpeechToTextLoader
@pytest.mark.requires("google_api_core")
@pytest.mark.requires("google.api_core")
def test_initialization() -> None:
loader = GoogleSpeechToTextLoader(
project_id="test_project_id", file_path="./testfile.mp3"

@ -23,7 +23,7 @@ from langchain.retrievers.google_vertex_ai_search import (
)
@pytest.mark.requires("google_api_core")
@pytest.mark.requires("google.api_core")
def test_google_vertex_ai_search_get_relevant_documents() -> None:
"""Test the get_relevant_documents() method."""
retriever = GoogleVertexAISearchRetriever()
@ -36,7 +36,7 @@ def test_google_vertex_ai_search_get_relevant_documents() -> None:
assert doc.metadata["source"]
@pytest.mark.requires("google_api_core")
@pytest.mark.requires("google.api_core")
def test_google_vertex_ai_multiturnsearch_get_relevant_documents() -> None:
"""Test the get_relevant_documents() method."""
retriever = GoogleVertexAIMultiTurnSearchRetriever()
@ -49,7 +49,7 @@ def test_google_vertex_ai_multiturnsearch_get_relevant_documents() -> None:
assert doc.metadata["source"]
@pytest.mark.requires("google_api_core")
@pytest.mark.requires("google.api_core")
def test_google_vertex_ai_search_enterprise_search_deprecation() -> None:
"""Test the deprecation of GoogleCloudEnterpriseSearchRetriever."""
with pytest.warns(

@ -59,7 +59,11 @@ def pytest_collection_modifyitems(config: Config, items: Sequence[Function]) ->
# If we haven't yet checked whether the pkg is installed
# let's check it and store the result.
if pkg not in required_pkgs_info:
required_pkgs_info[pkg] = util.find_spec(pkg) is not None
try:
installed = util.find_spec(pkg) is not None
except Exception:
installed = False
required_pkgs_info[pkg] = installed
if not required_pkgs_info[pkg]:
if only_extended:

@ -39,7 +39,6 @@ def test_required_dependencies(poetry_conf: Mapping[str, Any]) -> None:
"PyYAML",
"SQLAlchemy",
"aiohttp",
"anyio",
"async-timeout",
"dataclasses-json",
"jsonpatch",

@ -1,4 +1,4 @@
lint lint_diff:
lint lint_diff lint_package lint_tests:
poetry run poe lint
test:

1186
templates/poetry.lock generated

File diff suppressed because it is too large Load Diff

@ -11,15 +11,30 @@ python = "^3.10"
# dev, test, lint, typing
[tool.poetry.group.dev]
optional = true
[tool.poetry.group.dev.dependencies]
poethepoet = "^0.24.1"
pytest-watch = "^4.2.0"
[tool.poetry.group.test]
optional = true
[tool.poetry.group.test.dependencies]
pytest = "^7.4.3"
[tool.poetry.group.lint]
optional = true
[tool.poetry.group.lint.dependencies]
poethepoet = "^0.24.1"
ruff = "^0.1.3"
langchain = { path = "../libs/langchain", develop = true }
langchain-core = { path = "../libs/core", develop = true }
[tool.poetry.group.typing]
optional = true
[tool.poetry.group.typing.dependencies]

Loading…
Cancel
Save