2023-07-21 17:36:28 +00:00
|
|
|
[tool.poetry]
|
2023-07-21 20:32:39 +00:00
|
|
|
name = "langchain-experimental"
|
2024-04-09 22:08:01 +00:00
|
|
|
version = "0.0.57"
|
2023-07-21 17:36:28 +00:00
|
|
|
description = "Building applications with LLMs through composability"
|
|
|
|
authors = []
|
|
|
|
license = "MIT"
|
|
|
|
readme = "README.md"
|
2023-08-18 16:55:43 +00:00
|
|
|
repository = "https://github.com/langchain-ai/langchain"
|
2023-07-21 17:36:28 +00:00
|
|
|
|
|
|
|
|
|
|
|
[tool.poetry.dependencies]
|
|
|
|
python = ">=3.8.1,<4.0"
|
2024-04-09 22:08:01 +00:00
|
|
|
langchain-core = "^0.1.41"
|
|
|
|
langchain = "^0.1.15"
|
2024-03-15 17:10:47 +00:00
|
|
|
presidio-anonymizer = {version = "^2.2.352", optional = true}
|
|
|
|
presidio-analyzer = {version = "^2.2.352", optional = true}
|
|
|
|
faker = {version = "^19.3.1", optional = true}
|
|
|
|
vowpal-wabbit-next = {version = "0.6.0", optional = true}
|
|
|
|
sentence-transformers = {version = "^2", optional = true}
|
|
|
|
jinja2 = {version = "^3", optional = true}
|
|
|
|
pandas = { version = "^2.0.1", optional = true }
|
|
|
|
tabulate = {version = "^0.9.0", optional = true}
|
2023-12-07 22:24:58 +00:00
|
|
|
|
|
|
|
[tool.poetry.group.lint]
|
|
|
|
optional = true
|
2023-07-21 17:36:28 +00:00
|
|
|
|
|
|
|
[tool.poetry.group.lint.dependencies]
|
2023-11-14 21:00:21 +00:00
|
|
|
ruff = "^0.1.5"
|
2023-07-21 17:36:28 +00:00
|
|
|
|
2023-12-07 22:24:58 +00:00
|
|
|
[tool.poetry.group.typing]
|
|
|
|
optional = true
|
|
|
|
|
2023-07-21 17:36:28 +00:00
|
|
|
[tool.poetry.group.typing.dependencies]
|
|
|
|
mypy = "^0.991"
|
2023-07-21 20:32:39 +00:00
|
|
|
types-pyyaml = "^6.0.12.2"
|
2023-09-06 20:32:59 +00:00
|
|
|
types-requests = "^2.28.11.5"
|
2023-12-11 21:53:30 +00:00
|
|
|
langchain = {path = "../langchain", develop = true}
|
|
|
|
langchain-core = {path = "../core", develop = true}
|
|
|
|
langchain-community = {path = "../community", develop = true}
|
2023-07-21 17:36:28 +00:00
|
|
|
|
2023-12-07 22:24:58 +00:00
|
|
|
[tool.poetry.group.dev]
|
|
|
|
optional = true
|
|
|
|
|
2023-07-21 17:36:28 +00:00
|
|
|
[tool.poetry.group.dev.dependencies]
|
|
|
|
jupyter = "^1.0.0"
|
|
|
|
setuptools = "^67.6.1"
|
2023-12-11 21:53:30 +00:00
|
|
|
langchain = {path = "../langchain", develop = true}
|
|
|
|
langchain-core = {path = "../core", develop = true}
|
|
|
|
langchain-community = {path = "../community", develop = true}
|
2023-07-21 17:36:28 +00:00
|
|
|
|
2023-12-07 22:24:58 +00:00
|
|
|
[tool.poetry.group.test]
|
|
|
|
optional = true
|
|
|
|
|
2023-07-21 17:36:28 +00:00
|
|
|
[tool.poetry.group.test.dependencies]
|
|
|
|
# The only dependencies that should be added are
|
|
|
|
# dependencies used for running tests (e.g., pytest, freezegun, response).
|
|
|
|
# Any dependencies that do not meet that criteria will be removed.
|
|
|
|
pytest = "^7.3.0"
|
EXPERIMENTAL Generic LLM wrapper to support chat model interface with configurable chat prompt format (#8295)
## Update 2023-09-08
This PR now supports further models in addition to Lllama-2 chat models.
See [this comment](#issuecomment-1668988543) for further details. The
title of this PR has been updated accordingly.
## Original PR description
This PR adds a generic `Llama2Chat` model, a wrapper for LLMs able to
serve Llama-2 chat models (like `LlamaCPP`,
`HuggingFaceTextGenInference`, ...). It implements `BaseChatModel`,
converts a list of chat messages into the [required Llama-2 chat prompt
format](https://huggingface.co/blog/llama2#how-to-prompt-llama-2) and
forwards the formatted prompt as `str` to the wrapped `LLM`. Usage
example:
```python
# uses a locally hosted Llama2 chat model
llm = HuggingFaceTextGenInference(
inference_server_url="http://127.0.0.1:8080/",
max_new_tokens=512,
top_k=50,
temperature=0.1,
repetition_penalty=1.03,
)
# Wrap llm to support Llama2 chat prompt format.
# Resulting model is a chat model
model = Llama2Chat(llm=llm)
messages = [
SystemMessage(content="You are a helpful assistant."),
MessagesPlaceholder(variable_name="chat_history"),
HumanMessagePromptTemplate.from_template("{text}"),
]
prompt = ChatPromptTemplate.from_messages(messages)
memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
chain = LLMChain(llm=model, prompt=prompt, memory=memory)
# use chat model in a conversation
# ...
```
Also part of this PR are tests and a demo notebook.
- Tag maintainer: @hwchase17
- Twitter handle: `@mrt1nz`
---------
Co-authored-by: Erick Friis <erick@langchain.dev>
2023-11-18 00:32:13 +00:00
|
|
|
pytest-asyncio = "^0.20.3"
|
2023-11-28 21:26:15 +00:00
|
|
|
langchain = {path = "../langchain", develop = true}
|
|
|
|
langchain-core = {path = "../core", develop = true}
|
2023-12-11 21:53:30 +00:00
|
|
|
langchain-community = {path = "../community", develop = true}
|
2023-07-21 17:36:28 +00:00
|
|
|
|
2023-10-21 14:52:18 +00:00
|
|
|
|
|
|
|
[tool.poetry.group.test_integration]
|
|
|
|
optional = true
|
2023-12-11 21:53:30 +00:00
|
|
|
|
|
|
|
[tool.poetry.group.test_integration.dependencies]
|
|
|
|
langchain = {path = "../langchain", develop = true}
|
|
|
|
langchain-core = {path = "../core", develop = true}
|
|
|
|
langchain-community = {path = "../community", develop = true}
|
experimental[minor]: Create Closed Captioning Chain for .mp4 videos (#14059)
Description: Video imagery to text (Closed Captioning)
This pull request introduces the VideoCaptioningChain, a tool for
automated video captioning. It processes audio and video to generate
subtitles and closed captions, merging them into a single SRT output.
Issue: https://github.com/langchain-ai/langchain/issues/11770
Dependencies: opencv-python, ffmpeg-python, assemblyai, transformers,
pillow, torch, openai
Tag maintainer:
@baskaryan
@hwchase17
Hello!
We are a group of students from the University of Toronto
(@LunarECL, @TomSadan, @nicoledroi1, @A2113S) that want to make a
contribution to the LangChain community! We have ran make format, make
lint and make test locally before submitting the PR. To our knowledge,
our changes do not introduce any new errors.
Thank you for taking the time to review our PR!
---------
Co-authored-by: Bagatur <baskaryan@gmail.com>
2024-03-30 01:57:53 +00:00
|
|
|
langchain-openai = {path = "../partners/openai", develop = true}
|
2023-10-21 14:52:18 +00:00
|
|
|
|
2024-03-15 17:10:47 +00:00
|
|
|
# An extra used to be able to add extended testing.
|
|
|
|
# Please use new-line on formatting to make it easier to add new packages without
|
|
|
|
# merge-conflicts
|
|
|
|
[tool.poetry.extras]
|
|
|
|
extended_testing = [
|
|
|
|
"presidio-anonymizer",
|
|
|
|
"presidio-analyzer",
|
|
|
|
"faker",
|
|
|
|
"vowpal-wabbit-next",
|
|
|
|
"sentence-transformers",
|
|
|
|
"jinja2",
|
|
|
|
"pandas",
|
|
|
|
"tabulate",
|
|
|
|
]
|
|
|
|
|
2024-02-09 22:28:02 +00:00
|
|
|
[tool.ruff.lint]
|
2023-07-21 17:36:28 +00:00
|
|
|
select = [
|
|
|
|
"E", # pycodestyle
|
|
|
|
"F", # pyflakes
|
|
|
|
"I", # isort
|
2024-02-10 00:13:30 +00:00
|
|
|
"T201", # print
|
2023-07-21 17:36:28 +00:00
|
|
|
]
|
|
|
|
|
|
|
|
[tool.mypy]
|
|
|
|
ignore_missing_imports = "True"
|
|
|
|
disallow_untyped_defs = "True"
|
|
|
|
exclude = ["notebooks", "examples", "example_data"]
|
|
|
|
|
|
|
|
[tool.coverage.run]
|
|
|
|
omit = [
|
|
|
|
"tests/*",
|
|
|
|
]
|
|
|
|
|
|
|
|
[build-system]
|
|
|
|
requires = ["poetry-core>=1.0.0"]
|
|
|
|
build-backend = "poetry.core.masonry.api"
|
|
|
|
|
|
|
|
[tool.pytest.ini_options]
|
|
|
|
# --strict-markers will raise errors on unknown marks.
|
|
|
|
# https://docs.pytest.org/en/7.1.x/how-to/mark.html#raising-errors-on-unknown-marks
|
|
|
|
#
|
|
|
|
# https://docs.pytest.org/en/7.1.x/reference/reference.html
|
|
|
|
# --strict-config any warnings encountered while parsing the `pytest`
|
|
|
|
# section of the configuration file raise errors.
|
|
|
|
#
|
|
|
|
# https://github.com/tophat/syrupy
|
|
|
|
# --snapshot-warn-unused Prints a warning on unused snapshots rather than fail the test suite.
|
|
|
|
addopts = "--strict-markers --strict-config --durations=5"
|
|
|
|
# Registering custom markers.
|
|
|
|
# https://docs.pytest.org/en/7.1.x/example/markers.html#registering-markers
|
|
|
|
markers = [
|
2023-10-13 16:48:24 +00:00
|
|
|
"requires: mark tests as requiring a specific library",
|
2023-10-21 14:52:18 +00:00
|
|
|
"asyncio: mark tests as requiring asyncio",
|
|
|
|
"compile: mark placeholder test used to compile integration tests without running them",
|
2023-07-21 17:36:28 +00:00
|
|
|
]
|
Use pytest asyncio auto mode (#13643)
<!-- Thank you for contributing to LangChain!
Replace this entire comment with:
- **Description:** a description of the change,
- **Issue:** the issue # it fixes (if applicable),
- **Dependencies:** any dependencies required for this change,
- **Tag maintainer:** for a quicker response, tag the relevant
maintainer (see below),
- **Twitter handle:** we announce bigger features on Twitter. If your PR
gets announced, and you'd like a mention, we'll gladly shout you out!
Please make sure your PR is passing linting and testing before
submitting. Run `make format`, `make lint` and `make test` to check this
locally.
See contribution guidelines for more information on how to write/run
tests, lint, etc:
https://github.com/langchain-ai/langchain/blob/master/.github/CONTRIBUTING.md
If you're adding a new integration, please include:
1. a test for the integration, preferably unit tests that do not rely on
network access,
2. an example notebook showing its use. It lives in `docs/extras`
directory.
If no one reviews your PR within a few days, please @-mention one of
@baskaryan, @eyurtsev, @hwchase17.
-->
2023-11-21 15:00:13 +00:00
|
|
|
asyncio_mode = "auto"
|