mirror of
https://github.com/hwchase17/langchain
synced 2024-10-31 15:20:26 +00:00
cd4c54282a
Refactors the docs build in order to: - run the same `make build` command in both vercel and local build - incrementally build artifacts in 2 distinct steps, instead of building all docs in-place (in vercel) or in a _dist dir (locally) Highlights: - introduces `make build` in order to build the docs - collects and generates all files for the build in `docs/build/intermediate` - renders those jupyter notebook + markdown files into `docs/build/outputs` And now the outputs to host are in `docs/build/outputs`, which will need a vercel settings change. Todo: - [ ] figure out how to point the right directory (right now deleting and moving docs dir in vercel_build.sh isn't great)
235 lines
9.1 KiB
Python
235 lines
9.1 KiB
Python
import os
|
|
import sys
|
|
from pathlib import Path
|
|
|
|
from langchain_community import chat_models, llms
|
|
from langchain_core.language_models.chat_models import BaseChatModel, SimpleChatModel
|
|
from langchain_core.language_models.llms import LLM, BaseLLM
|
|
|
|
LLM_IGNORE = ("FakeListLLM", "OpenAIChat", "PromptLayerOpenAIChat")
|
|
LLM_FEAT_TABLE_CORRECTION = {
|
|
"TextGen": {"_astream": False, "_agenerate": False},
|
|
"Ollama": {
|
|
"_stream": False,
|
|
},
|
|
"PromptLayerOpenAI": {"batch_generate": False, "batch_agenerate": False},
|
|
}
|
|
CHAT_MODEL_IGNORE = ("FakeListChatModel", "HumanInputChatModel")
|
|
|
|
CHAT_MODEL_FEAT_TABLE_CORRECTION = {
|
|
"ChatMLflowAIGateway": {"_agenerate": False},
|
|
"PromptLayerChatOpenAI": {"_stream": False, "_astream": False},
|
|
"ChatKonko": {"_astream": False, "_agenerate": False},
|
|
"ChatAnthropic": {
|
|
"tool_calling": True,
|
|
"structured_output": True,
|
|
"package": "langchain-anthropic",
|
|
},
|
|
"ChatMistralAI": {
|
|
"tool_calling": True,
|
|
"structured_output": True,
|
|
"package": "langchain-mistralai",
|
|
},
|
|
"ChatFireworks": {
|
|
"tool_calling": True,
|
|
"structured_output": True,
|
|
"package": "langchain-fireworks",
|
|
},
|
|
"AzureChatOpenAI": {
|
|
"tool_calling": True,
|
|
"structured_output": True,
|
|
"package": "langchain-openai",
|
|
},
|
|
"ChatOpenAI": {
|
|
"tool_calling": True,
|
|
"structured_output": True,
|
|
"package": "langchain-openai",
|
|
},
|
|
"ChatVertexAI": {
|
|
"tool_calling": True,
|
|
"structured_output": True,
|
|
"package": "langchain-google-vertexai",
|
|
},
|
|
"ChatGroq": {
|
|
"tool_calling": True,
|
|
"structured_output": True,
|
|
"package": "langchain-groq",
|
|
},
|
|
"ChatCohere": {
|
|
"tool_calling": True,
|
|
"structured_output": True,
|
|
"package": "langchain-cohere",
|
|
},
|
|
}
|
|
|
|
|
|
LLM_TEMPLATE = """\
|
|
---
|
|
sidebar_position: 1
|
|
sidebar_class_name: hidden
|
|
---
|
|
|
|
# LLMs
|
|
|
|
## Features (natively supported)
|
|
All LLMs implement the Runnable interface, which comes with default implementations of all methods, ie. `ainvoke`, `batch`, `abatch`, `stream`, `astream`. This gives all LLMs basic support for async, streaming and batch, which by default is implemented as below:
|
|
- *Async* support defaults to calling the respective sync method in asyncio's default thread pool executor. This lets other async functions in your application make progress while the LLM is being executed, by moving this call to a background thread.
|
|
- *Streaming* support defaults to returning an `Iterator` (or `AsyncIterator` in the case of async streaming) of a single value, the final result returned by the underlying LLM provider. This obviously doesn't give you token-by-token streaming, which requires native support from the LLM provider, but ensures your code that expects an iterator of tokens can work for any of our LLM integrations.
|
|
- *Batch* support defaults to calling the underlying LLM in parallel for each input by making use of a thread pool executor (in the sync batch case) or `asyncio.gather` (in the async batch case). The concurrency can be controlled with the `max_concurrency` key in `RunnableConfig`.
|
|
|
|
Each LLM integration can optionally provide native implementations for async, streaming or batch, which, for providers that support it, can be more efficient. The table shows, for each integration, which features have been implemented with native support.
|
|
|
|
{table}
|
|
|
|
""" # noqa: E501
|
|
|
|
CHAT_MODEL_TEMPLATE = """\
|
|
---
|
|
sidebar_position: 0
|
|
sidebar_class_name: hidden
|
|
---
|
|
|
|
# Chat models
|
|
|
|
## Features (natively supported)
|
|
All ChatModels implement the Runnable interface, which comes with default implementations of all methods, ie. `ainvoke`, `batch`, `abatch`, `stream`, `astream`. This gives all ChatModels basic support for async, streaming and batch, which by default is implemented as below:
|
|
- *Async* support defaults to calling the respective sync method in asyncio's default thread pool executor. This lets other async functions in your application make progress while the ChatModel is being executed, by moving this call to a background thread.
|
|
- *Streaming* support defaults to returning an `Iterator` (or `AsyncIterator` in the case of async streaming) of a single value, the final result returned by the underlying ChatModel provider. This obviously doesn't give you token-by-token streaming, which requires native support from the ChatModel provider, but ensures your code that expects an iterator of tokens can work for any of our ChatModel integrations.
|
|
- *Batch* support defaults to calling the underlying ChatModel in parallel for each input by making use of a thread pool executor (in the sync batch case) or `asyncio.gather` (in the async batch case). The concurrency can be controlled with the `max_concurrency` key in `RunnableConfig`.
|
|
|
|
Each ChatModel integration can optionally provide native implementations to truly enable async or streaming.
|
|
The table shows, for each integration, which features have been implemented with native support.
|
|
|
|
{table}
|
|
|
|
""" # noqa: E501
|
|
|
|
|
|
def get_llm_table():
|
|
llm_feat_table = {}
|
|
for cm in llms.__all__:
|
|
llm_feat_table[cm] = {}
|
|
cls = getattr(llms, cm)
|
|
if issubclass(cls, LLM):
|
|
for feat in ("_stream", "_astream", ("_acall", "_agenerate")):
|
|
if isinstance(feat, tuple):
|
|
feat, name = feat
|
|
else:
|
|
feat, name = feat, feat
|
|
llm_feat_table[cm][name] = getattr(cls, feat) != getattr(LLM, feat)
|
|
else:
|
|
for feat in [
|
|
"_stream",
|
|
"_astream",
|
|
("_generate", "batch_generate"),
|
|
"_agenerate",
|
|
("_agenerate", "batch_agenerate"),
|
|
]:
|
|
if isinstance(feat, tuple):
|
|
feat, name = feat
|
|
else:
|
|
feat, name = feat, feat
|
|
llm_feat_table[cm][name] = getattr(cls, feat) != getattr(BaseLLM, feat)
|
|
final_feats = {
|
|
k: v
|
|
for k, v in {**llm_feat_table, **LLM_FEAT_TABLE_CORRECTION}.items()
|
|
if k not in LLM_IGNORE
|
|
}
|
|
|
|
header = [
|
|
"model",
|
|
"_agenerate",
|
|
"_stream",
|
|
"_astream",
|
|
"batch_generate",
|
|
"batch_agenerate",
|
|
"tool_calling",
|
|
]
|
|
title = [
|
|
"Model",
|
|
"Invoke",
|
|
"Async invoke",
|
|
"Stream",
|
|
"Async stream",
|
|
"Batch",
|
|
"Async batch",
|
|
"Tool calling",
|
|
]
|
|
rows = [title, [":-"] + [":-:"] * (len(title) - 1)]
|
|
for llm, feats in sorted(final_feats.items()):
|
|
rows += [[llm, "✅"] + ["✅" if feats.get(h) else "❌" for h in header[1:]]]
|
|
return "\n".join(["|".join(row) for row in rows])
|
|
|
|
|
|
def get_chat_model_table() -> str:
|
|
"""Get the table of chat models."""
|
|
feat_table = {}
|
|
for cm in chat_models.__all__:
|
|
feat_table[cm] = {}
|
|
cls = getattr(chat_models, cm)
|
|
if issubclass(cls, SimpleChatModel):
|
|
comparison_cls = SimpleChatModel
|
|
else:
|
|
comparison_cls = BaseChatModel
|
|
for feat in ("_stream", "_astream", "_agenerate"):
|
|
feat_table[cm][feat] = getattr(cls, feat) != getattr(comparison_cls, feat)
|
|
final_feats = {
|
|
k: v
|
|
for k, v in {**feat_table, **CHAT_MODEL_FEAT_TABLE_CORRECTION}.items()
|
|
if k not in CHAT_MODEL_IGNORE
|
|
}
|
|
header = [
|
|
"model",
|
|
"_agenerate",
|
|
"_stream",
|
|
"_astream",
|
|
"tool_calling",
|
|
"structured_output",
|
|
"package",
|
|
]
|
|
title = [
|
|
"Model",
|
|
"Invoke",
|
|
"Async invoke",
|
|
"Stream",
|
|
"Async stream",
|
|
"[Tool calling](/docs/modules/model_io/chat/function_calling/)",
|
|
"[Structured output](/docs/modules/model_io/chat/structured_output/)",
|
|
"Python Package",
|
|
]
|
|
rows = [title, [":-"] + [":-:"] * (len(title) - 1)]
|
|
for llm, feats in sorted(final_feats.items()):
|
|
# Fields are in the order of the header
|
|
row = [llm, "✅"]
|
|
for h in header[1:]:
|
|
value = feats.get(h)
|
|
index = header.index(h)
|
|
if h == "package":
|
|
row.append(value or "langchain-community")
|
|
else:
|
|
if value == "partial":
|
|
row.append("🟡")
|
|
elif value is True:
|
|
row.append("✅")
|
|
else:
|
|
row.append("❌")
|
|
rows.append(row)
|
|
return "\n".join(["|".join(row) for row in rows])
|
|
|
|
|
|
if __name__ == "__main__":
|
|
output_dir = Path(sys.argv[1])
|
|
output_integrations_dir = output_dir / "integrations"
|
|
output_integrations_dir_llms = output_integrations_dir / "llms"
|
|
output_integrations_dir_chat = output_integrations_dir / "chat"
|
|
output_integrations_dir_llms.mkdir(parents=True, exist_ok=True)
|
|
output_integrations_dir_chat.mkdir(parents=True, exist_ok=True)
|
|
|
|
llm_page = LLM_TEMPLATE.format(table=get_llm_table())
|
|
|
|
with open(output_integrations_dir / "llms" / "index.mdx", "w") as f:
|
|
f.write(llm_page)
|
|
chat_model_page = CHAT_MODEL_TEMPLATE.format(table=get_chat_model_table())
|
|
with open(output_integrations_dir / "chat" / "index.mdx", "w") as f:
|
|
f.write(chat_model_page)
|