langchain/docs/scripts/model_feat_table.py

278 lines
9.1 KiB
Python
Raw Normal View History

import sys
2023-09-22 08:10:27 +00:00
from pathlib import Path
2024-05-16 19:58:51 +00:00
from langchain_community import llms
docs[patch], templates[patch]: Import from core (#14575) Update imports to use core for the low-hanging fruit changes. Ran following ```bash git grep -l 'langchain.schema.runnable' {docs,templates,cookbook} | xargs sed -i '' 's/langchain\.schema\.runnable/langchain_core.runnables/g' git grep -l 'langchain.schema.output_parser' {docs,templates,cookbook} | xargs sed -i '' 's/langchain\.schema\.output_parser/langchain_core.output_parsers/g' git grep -l 'langchain.schema.messages' {docs,templates,cookbook} | xargs sed -i '' 's/langchain\.schema\.messages/langchain_core.messages/g' git grep -l 'langchain.schema.chat_histry' {docs,templates,cookbook} | xargs sed -i '' 's/langchain\.schema\.chat_history/langchain_core.chat_history/g' git grep -l 'langchain.schema.prompt_template' {docs,templates,cookbook} | xargs sed -i '' 's/langchain\.schema\.prompt_template/langchain_core.prompts/g' git grep -l 'from langchain.pydantic_v1' {docs,templates,cookbook} | xargs sed -i '' 's/from langchain\.pydantic_v1/from langchain_core.pydantic_v1/g' git grep -l 'from langchain.tools.base' {docs,templates,cookbook} | xargs sed -i '' 's/from langchain\.tools\.base/from langchain_core.tools/g' git grep -l 'from langchain.chat_models.base' {docs,templates,cookbook} | xargs sed -i '' 's/from langchain\.chat_models.base/from langchain_core.language_models.chat_models/g' git grep -l 'from langchain.llms.base' {docs,templates,cookbook} | xargs sed -i '' 's/from langchain\.llms\.base\ /from langchain_core.language_models.llms\ /g' git grep -l 'from langchain.embeddings.base' {docs,templates,cookbook} | xargs sed -i '' 's/from langchain\.embeddings\.base/from langchain_core.embeddings/g' git grep -l 'from langchain.vectorstores.base' {docs,templates,cookbook} | xargs sed -i '' 's/from langchain\.vectorstores\.base/from langchain_core.vectorstores/g' git grep -l 'from langchain.agents.tools' {docs,templates,cookbook} | xargs sed -i '' 's/from langchain\.agents\.tools/from langchain_core.tools/g' git grep -l 'from langchain.schema.output' {docs,templates,cookbook} | xargs sed -i '' 's/from langchain\.schema\.output\ /from langchain_core.outputs\ /g' git grep -l 'from langchain.schema.embeddings' {docs,templates,cookbook} | xargs sed -i '' 's/from langchain\.schema\.embeddings/from langchain_core.embeddings/g' git grep -l 'from langchain.schema.document' {docs,templates,cookbook} | xargs sed -i '' 's/from langchain\.schema\.document/from langchain_core.documents/g' git grep -l 'from langchain.schema.agent' {docs,templates,cookbook} | xargs sed -i '' 's/from langchain\.schema\.agent/from langchain_core.agents/g' git grep -l 'from langchain.schema.prompt ' {docs,templates,cookbook} | xargs sed -i '' 's/from langchain\.schema\.prompt\ /from langchain_core.prompt_values /g' git grep -l 'from langchain.schema.language_model' {docs,templates,cookbook} | xargs sed -i '' 's/from langchain\.schema\.language_model/from langchain_core.language_models/g' ```
2023-12-12 00:49:10 +00:00
from langchain_core.language_models.llms import LLM, BaseLLM
2023-09-22 08:10:27 +00:00
LLM_IGNORE = ("FakeListLLM", "OpenAIChat", "PromptLayerOpenAIChat")
LLM_FEAT_TABLE_CORRECTION = {
"TextGen": {"_astream": False, "_agenerate": False},
"Ollama": {
"_stream": False,
},
"PromptLayerOpenAI": {"batch_generate": False, "batch_agenerate": False},
}
CHAT_MODEL_IGNORE = ("FakeListChatModel", "HumanInputChatModel")
2024-05-16 19:58:51 +00:00
CHAT_MODEL_FEAT_TABLE = {
"ChatAnthropic": {
"tool_calling": True,
"structured_output": True,
"multimodal": True,
"package": "langchain-anthropic",
2024-05-16 19:58:51 +00:00
"link": "/docs/integrations/chat/anthropic/",
},
"ChatMistralAI": {
"tool_calling": True,
"structured_output": True,
"json_model": True,
"package": "langchain-mistralai",
2024-05-16 19:58:51 +00:00
"link": "/docs/integrations/chat/mistralai/",
},
"ChatFireworks": {
"tool_calling": True,
"structured_output": True,
2024-05-16 19:58:51 +00:00
"json_mode": True,
"package": "langchain-fireworks",
2024-05-16 19:58:51 +00:00
"link": "/docs/integrations/chat/fireworks/",
},
"AzureChatOpenAI": {
"tool_calling": True,
"structured_output": True,
2024-05-16 19:58:51 +00:00
"json_mode": True,
"multimodal": True,
"package": "langchain-openai",
2024-05-16 19:58:51 +00:00
"link": "/docs/integrations/chat/azure_chat_openai/",
},
"ChatOpenAI": {
"tool_calling": True,
"structured_output": True,
2024-05-16 19:58:51 +00:00
"json_mode": True,
"multimodal": True,
"package": "langchain-openai",
2024-05-16 19:58:51 +00:00
"link": "/docs/integrations/chat/openai/",
},
"ChatTogether": {
"tool_calling": True,
"structured_output": True,
"json_mode": True,
"package": "langchain-together",
"link": "/docs/integrations/chat/together/",
},
"ChatVertexAI": {
"tool_calling": True,
"structured_output": True,
"multimodal": True,
"package": "langchain-google-vertexai",
2024-05-16 19:58:51 +00:00
"link": "/docs/integrations/chat/google_vertex_ai_palm/",
},
"ChatGoogleGenerativeAI": {
"tool_calling": True,
"multimodal": True,
"package": "langchain-google-genai",
"link": "/docs/integrations/chat/google_generative_ai/",
},
"ChatGroq": {
"tool_calling": True,
"structured_output": True,
2024-05-16 19:58:51 +00:00
"json_mode": True,
"package": "langchain-groq",
2024-05-16 19:58:51 +00:00
"link": "/docs/integrations/chat/groq/",
},
"ChatCohere": {
"tool_calling": True,
"structured_output": True,
"package": "langchain-cohere",
2024-05-16 19:58:51 +00:00
"link": "/docs/integrations/chat/cohere/",
},
"ChatBedrock": {
"tool_calling": True,
"package": "langchain-aws",
"link": "/docs/integrations/chat/bedrock/",
},
"ChatHuggingFace": {
"tool_calling": True,
2024-05-16 19:58:51 +00:00
"local": True,
"package": "langchain-huggingface",
"link": "/docs/integrations/chat/huggingface/",
},
"ChatOllama": {
"local": True,
"json_mode": True,
2024-05-16 19:58:51 +00:00
"package": "langchain-community",
"link": "/docs/integrations/chat/ollama/",
},
"vLLM Chat (via ChatOpenAI)": {
"local": True,
"package": "langchain-community",
"link": "/docs/integrations/chat/vllm/",
},
"ChatEdenAI": {
"tool_calling": True,
"structured_output": True,
"package": "langchain-community",
"link": "/docs/integrations/chat/edenai/",
},
"ChatLlamaCpp": {
"tool_calling": True,
"structured_output": True,
"local": True,
"package": "langchain-community",
"link": "/docs/integrations/chat/llamacpp",
},
2023-09-22 08:10:27 +00:00
}
2023-09-22 08:10:27 +00:00
LLM_TEMPLATE = """\
---
2023-11-10 04:35:55 +00:00
sidebar_position: 1
2023-09-22 08:10:27 +00:00
sidebar_class_name: hidden
keywords: [compatibility]
custom_edit_url:
2023-09-22 08:10:27 +00:00
---
# LLMs
## Features (natively supported)
2023-09-22 17:29:12 +00:00
All LLMs implement the Runnable interface, which comes with default implementations of all methods, ie. `ainvoke`, `batch`, `abatch`, `stream`, `astream`. This gives all LLMs basic support for async, streaming and batch, which by default is implemented as below:
2023-09-22 16:16:40 +00:00
- *Async* support defaults to calling the respective sync method in asyncio's default thread pool executor. This lets other async functions in your application make progress while the LLM is being executed, by moving this call to a background thread.
- *Streaming* support defaults to returning an `Iterator` (or `AsyncIterator` in the case of async streaming) of a single value, the final result returned by the underlying LLM provider. This obviously doesn't give you token-by-token streaming, which requires native support from the LLM provider, but ensures your code that expects an iterator of tokens can work for any of our LLM integrations.
- *Batch* support defaults to calling the underlying LLM in parallel for each input by making use of a thread pool executor (in the sync batch case) or `asyncio.gather` (in the async batch case). The concurrency can be controlled with the `max_concurrency` key in `RunnableConfig`.
2023-09-22 08:10:27 +00:00
2023-09-22 17:29:12 +00:00
Each LLM integration can optionally provide native implementations for async, streaming or batch, which, for providers that support it, can be more efficient. The table shows, for each integration, which features have been implemented with native support.
2023-09-22 08:10:27 +00:00
{table}
"""
2023-09-22 08:10:27 +00:00
CHAT_MODEL_TEMPLATE = """\
---
2023-11-10 04:35:55 +00:00
sidebar_position: 0
2023-09-22 08:10:27 +00:00
sidebar_class_name: hidden
keywords: [compatibility]
custom_edit_url:
hide_table_of_contents: true
2023-09-22 08:10:27 +00:00
---
# Chat models
2024-05-16 19:58:51 +00:00
## Advanced features
2023-09-22 08:10:27 +00:00
2024-05-16 19:58:51 +00:00
The following table shows all the chat models that support one or more advanced features.
2023-09-22 16:37:03 +00:00
2023-09-22 08:10:27 +00:00
{table}
"""
2023-09-22 08:10:27 +00:00
def get_llm_table():
llm_feat_table = {}
for cm in llms.__all__:
llm_feat_table[cm] = {}
cls = getattr(llms, cm)
if issubclass(cls, LLM):
for feat in ("_stream", "_astream", ("_acall", "_agenerate")):
if isinstance(feat, tuple):
feat, name = feat
else:
feat, name = feat, feat
llm_feat_table[cm][name] = getattr(cls, feat) != getattr(LLM, feat)
else:
for feat in [
"_stream",
"_astream",
("_generate", "batch_generate"),
"_agenerate",
("_agenerate", "batch_agenerate"),
]:
if isinstance(feat, tuple):
feat, name = feat
else:
feat, name = feat, feat
llm_feat_table[cm][name] = getattr(cls, feat) != getattr(BaseLLM, feat)
final_feats = {
k: v
for k, v in {**llm_feat_table, **LLM_FEAT_TABLE_CORRECTION}.items()
if k not in LLM_IGNORE
}
header = [
"model",
"_agenerate",
"_stream",
"_astream",
"batch_generate",
"batch_agenerate",
]
2023-10-06 17:09:41 +00:00
title = [
"Model",
"Invoke",
"Async invoke",
"Stream",
"Async stream",
"Batch",
"Async batch",
]
2023-09-22 08:10:27 +00:00
rows = [title, [":-"] + [":-:"] * (len(title) - 1)]
for llm, feats in sorted(final_feats.items()):
rows += [[llm, ""] + ["" if feats.get(h) else "" for h in header[1:]]]
return "\n".join(["|".join(row) for row in rows])
def get_chat_model_table() -> str:
"""Get the table of chat models."""
header = [
"model",
"tool_calling",
"structured_output",
2024-05-16 19:58:51 +00:00
"json_mode",
"local",
"multimodal",
"package",
]
title = [
"Model",
"[Tool calling](/docs/how_to/tool_calling/)",
"[Structured output](/docs/how_to/structured_output/)",
2024-05-16 19:58:51 +00:00
"JSON mode",
"Local",
"[Multimodal](/docs/how_to/multimodal_inputs/)",
2024-05-16 19:58:51 +00:00
"Package",
]
2023-09-22 08:10:27 +00:00
rows = [title, [":-"] + [":-:"] * (len(title) - 1)]
2024-05-16 19:58:51 +00:00
for llm, feats in sorted(CHAT_MODEL_FEAT_TABLE.items()):
# Fields are in the order of the header
2024-05-16 19:58:51 +00:00
row = [
f"[{llm}]({feats['link']})",
]
for h in header[1:]:
value = feats.get(h)
if h == "package":
row.append(value or "langchain-community")
else:
if value == "partial":
row.append("🟡")
elif value is True:
row.append("")
else:
row.append("")
rows.append(row)
2023-09-22 08:10:27 +00:00
return "\n".join(["|".join(row) for row in rows])
if __name__ == "__main__":
output_dir = Path(sys.argv[1])
output_integrations_dir = output_dir / "integrations"
output_integrations_dir_llms = output_integrations_dir / "llms"
output_integrations_dir_chat = output_integrations_dir / "chat"
output_integrations_dir_llms.mkdir(parents=True, exist_ok=True)
output_integrations_dir_chat.mkdir(parents=True, exist_ok=True)
2023-09-22 08:10:27 +00:00
llm_page = LLM_TEMPLATE.format(table=get_llm_table())
with open(output_integrations_dir / "llms" / "index.mdx", "w") as f:
2023-09-22 08:10:27 +00:00
f.write(llm_page)
chat_model_page = CHAT_MODEL_TEMPLATE.format(table=get_chat_model_table())
with open(output_integrations_dir / "chat" / "index.mdx", "w") as f:
2023-09-22 08:10:27 +00:00
f.write(chat_model_page)