2024-01-05 23:03:28 +00:00
|
|
|
[tool.poetry]
|
|
|
|
name = "langchain-openai"
|
2024-05-29 20:08:30 +00:00
|
|
|
version = "0.1.8"
|
2024-01-05 23:03:28 +00:00
|
|
|
description = "An integration package connecting OpenAI and LangChain"
|
|
|
|
authors = []
|
|
|
|
readme = "README.md"
|
2024-01-12 17:36:37 +00:00
|
|
|
repository = "https://github.com/langchain-ai/langchain"
|
2024-01-17 16:37:13 +00:00
|
|
|
license = "MIT"
|
2024-01-12 17:36:37 +00:00
|
|
|
|
|
|
|
[tool.poetry.urls]
|
|
|
|
"Source Code" = "https://github.com/langchain-ai/langchain/tree/master/libs/partners/openai"
|
2024-01-05 23:03:28 +00:00
|
|
|
|
|
|
|
[tool.poetry.dependencies]
|
|
|
|
python = ">=3.8.1,<4.0"
|
2024-05-29 20:08:30 +00:00
|
|
|
langchain-core = ">=0.2.2,<0.3"
|
openai: read stream_options (#21548)
OpenAI recently added a `stream_options` parameter to its chat
completions API (see [release
notes](https://platform.openai.com/docs/changelog/added-chat-completions-stream-usage)).
When this parameter is set to `{"usage": True}`, an extra "empty"
message is added to the end of a stream containing token usage. Here we
propagate token usage to `AIMessage.usage_metadata`.
We enable this feature by default. Streams would now include an extra
chunk at the end, **after** the chunk with
`response_metadata={'finish_reason': 'stop'}`.
New behavior:
```
[AIMessageChunk(content='', id='run-4b20dbe0-3817-4f62-b89d-03ef76f25bde'),
AIMessageChunk(content='Hello', id='run-4b20dbe0-3817-4f62-b89d-03ef76f25bde'),
AIMessageChunk(content='!', id='run-4b20dbe0-3817-4f62-b89d-03ef76f25bde'),
AIMessageChunk(content='', response_metadata={'finish_reason': 'stop'}, id='run-4b20dbe0-3817-4f62-b89d-03ef76f25bde'),
AIMessageChunk(content='', id='run-4b20dbe0-3817-4f62-b89d-03ef76f25bde', usage_metadata={'input_tokens': 8, 'output_tokens': 9, 'total_tokens': 17})]
```
Old behavior (accessible by passing `stream_options={"include_usage":
False}` into (a)stream:
```
[AIMessageChunk(content='', id='run-1312b971-c5ea-4d92-9015-e6604535f339'),
AIMessageChunk(content='Hello', id='run-1312b971-c5ea-4d92-9015-e6604535f339'),
AIMessageChunk(content='!', id='run-1312b971-c5ea-4d92-9015-e6604535f339'),
AIMessageChunk(content='', response_metadata={'finish_reason': 'stop'}, id='run-1312b971-c5ea-4d92-9015-e6604535f339')]
```
From what I can tell this is not yet implemented in Azure, so we enable
only for ChatOpenAI.
2024-05-24 17:20:56 +00:00
|
|
|
openai = "^1.26.0"
|
2024-05-15 19:19:29 +00:00
|
|
|
tiktoken = ">=0.7,<1"
|
2024-01-05 23:03:28 +00:00
|
|
|
|
|
|
|
[tool.poetry.group.test]
|
|
|
|
optional = true
|
|
|
|
|
|
|
|
[tool.poetry.group.test.dependencies]
|
|
|
|
pytest = "^7.3.0"
|
|
|
|
freezegun = "^1.2.2"
|
2024-02-13 19:25:55 +00:00
|
|
|
pytest-mock = "^3.10.0"
|
2024-01-05 23:03:28 +00:00
|
|
|
syrupy = "^4.0.2"
|
|
|
|
pytest-watcher = "^0.3.4"
|
|
|
|
pytest-asyncio = "^0.21.1"
|
2024-02-13 19:25:55 +00:00
|
|
|
langchain-core = { path = "../../core", develop = true }
|
2024-03-22 22:33:10 +00:00
|
|
|
pytest-cov = "^4.1.0"
|
2024-04-10 00:54:58 +00:00
|
|
|
langchain-standard-tests = { path = "../../standard-tests", develop = true }
|
2024-05-01 00:08:24 +00:00
|
|
|
numpy = "^1.24"
|
2024-01-05 23:03:28 +00:00
|
|
|
|
|
|
|
[tool.poetry.group.codespell]
|
|
|
|
optional = true
|
|
|
|
|
|
|
|
[tool.poetry.group.codespell.dependencies]
|
|
|
|
codespell = "^2.2.0"
|
|
|
|
|
|
|
|
[tool.poetry.group.lint]
|
|
|
|
optional = true
|
|
|
|
|
|
|
|
[tool.poetry.group.lint.dependencies]
|
|
|
|
ruff = "^0.1.5"
|
|
|
|
|
|
|
|
[tool.poetry.group.typing.dependencies]
|
|
|
|
mypy = "^0.991"
|
2024-02-13 19:25:55 +00:00
|
|
|
langchain-core = { path = "../../core", develop = true }
|
2024-01-05 23:03:28 +00:00
|
|
|
types-tqdm = "^4.66.0.5"
|
|
|
|
|
|
|
|
[tool.poetry.group.dev]
|
|
|
|
optional = true
|
|
|
|
|
|
|
|
[tool.poetry.group.dev.dependencies]
|
2024-02-13 19:25:55 +00:00
|
|
|
langchain-core = { path = "../../core", develop = true }
|
2024-01-05 23:03:28 +00:00
|
|
|
|
2024-02-23 21:12:05 +00:00
|
|
|
[tool.poetry.group.test_integration]
|
|
|
|
optional = true
|
|
|
|
|
|
|
|
[tool.poetry.group.test_integration.dependencies]
|
|
|
|
numpy = "^1"
|
|
|
|
|
2024-02-09 22:28:02 +00:00
|
|
|
[tool.ruff.lint]
|
2024-01-05 23:03:28 +00:00
|
|
|
select = [
|
2024-02-13 19:25:55 +00:00
|
|
|
"E", # pycodestyle
|
|
|
|
"F", # pyflakes
|
|
|
|
"I", # isort
|
2024-02-10 00:13:30 +00:00
|
|
|
"T201", # print
|
2024-01-05 23:03:28 +00:00
|
|
|
]
|
|
|
|
|
|
|
|
[tool.mypy]
|
|
|
|
disallow_untyped_defs = "True"
|
|
|
|
|
|
|
|
[[tool.mypy.overrides]]
|
|
|
|
module = "transformers"
|
|
|
|
ignore_missing_imports = true
|
|
|
|
|
|
|
|
[tool.coverage.run]
|
2024-02-13 19:25:55 +00:00
|
|
|
omit = ["tests/*"]
|
2024-01-05 23:03:28 +00:00
|
|
|
|
|
|
|
[build-system]
|
|
|
|
requires = ["poetry-core>=1.0.0"]
|
|
|
|
build-backend = "poetry.core.masonry.api"
|
|
|
|
|
|
|
|
[tool.pytest.ini_options]
|
|
|
|
# --strict-markers will raise errors on unknown marks.
|
|
|
|
# https://docs.pytest.org/en/7.1.x/how-to/mark.html#raising-errors-on-unknown-marks
|
|
|
|
#
|
|
|
|
# https://docs.pytest.org/en/7.1.x/reference/reference.html
|
|
|
|
# --strict-config any warnings encountered while parsing the `pytest`
|
|
|
|
# section of the configuration file raise errors.
|
|
|
|
#
|
|
|
|
# https://github.com/tophat/syrupy
|
|
|
|
# --snapshot-warn-unused Prints a warning on unused snapshots rather than fail the test suite.
|
2024-03-22 22:33:10 +00:00
|
|
|
addopts = "--snapshot-warn-unused --strict-markers --strict-config --durations=5 --cov=langchain_openai"
|
2024-01-05 23:03:28 +00:00
|
|
|
# Registering custom markers.
|
|
|
|
# https://docs.pytest.org/en/7.1.x/example/markers.html#registering-markers
|
|
|
|
markers = [
|
|
|
|
"requires: mark tests as requiring a specific library",
|
|
|
|
"asyncio: mark tests as requiring asyncio",
|
|
|
|
"compile: mark placeholder test used to compile integration tests without running them",
|
|
|
|
"scheduled: mark tests to run in scheduled testing",
|
|
|
|
]
|
|
|
|
asyncio_mode = "auto"
|