2023-09-22 08:10:27 +00:00
import os
2024-05-02 00:34:05 +00:00
import sys
2023-09-22 08:10:27 +00:00
from pathlib import Path
2024-01-08 19:40:35 +00:00
from langchain_community import chat_models , llms
docs[patch], templates[patch]: Import from core (#14575)
Update imports to use core for the low-hanging fruit changes. Ran
following
```bash
git grep -l 'langchain.schema.runnable' {docs,templates,cookbook} | xargs sed -i '' 's/langchain\.schema\.runnable/langchain_core.runnables/g'
git grep -l 'langchain.schema.output_parser' {docs,templates,cookbook} | xargs sed -i '' 's/langchain\.schema\.output_parser/langchain_core.output_parsers/g'
git grep -l 'langchain.schema.messages' {docs,templates,cookbook} | xargs sed -i '' 's/langchain\.schema\.messages/langchain_core.messages/g'
git grep -l 'langchain.schema.chat_histry' {docs,templates,cookbook} | xargs sed -i '' 's/langchain\.schema\.chat_history/langchain_core.chat_history/g'
git grep -l 'langchain.schema.prompt_template' {docs,templates,cookbook} | xargs sed -i '' 's/langchain\.schema\.prompt_template/langchain_core.prompts/g'
git grep -l 'from langchain.pydantic_v1' {docs,templates,cookbook} | xargs sed -i '' 's/from langchain\.pydantic_v1/from langchain_core.pydantic_v1/g'
git grep -l 'from langchain.tools.base' {docs,templates,cookbook} | xargs sed -i '' 's/from langchain\.tools\.base/from langchain_core.tools/g'
git grep -l 'from langchain.chat_models.base' {docs,templates,cookbook} | xargs sed -i '' 's/from langchain\.chat_models.base/from langchain_core.language_models.chat_models/g'
git grep -l 'from langchain.llms.base' {docs,templates,cookbook} | xargs sed -i '' 's/from langchain\.llms\.base\ /from langchain_core.language_models.llms\ /g'
git grep -l 'from langchain.embeddings.base' {docs,templates,cookbook} | xargs sed -i '' 's/from langchain\.embeddings\.base/from langchain_core.embeddings/g'
git grep -l 'from langchain.vectorstores.base' {docs,templates,cookbook} | xargs sed -i '' 's/from langchain\.vectorstores\.base/from langchain_core.vectorstores/g'
git grep -l 'from langchain.agents.tools' {docs,templates,cookbook} | xargs sed -i '' 's/from langchain\.agents\.tools/from langchain_core.tools/g'
git grep -l 'from langchain.schema.output' {docs,templates,cookbook} | xargs sed -i '' 's/from langchain\.schema\.output\ /from langchain_core.outputs\ /g'
git grep -l 'from langchain.schema.embeddings' {docs,templates,cookbook} | xargs sed -i '' 's/from langchain\.schema\.embeddings/from langchain_core.embeddings/g'
git grep -l 'from langchain.schema.document' {docs,templates,cookbook} | xargs sed -i '' 's/from langchain\.schema\.document/from langchain_core.documents/g'
git grep -l 'from langchain.schema.agent' {docs,templates,cookbook} | xargs sed -i '' 's/from langchain\.schema\.agent/from langchain_core.agents/g'
git grep -l 'from langchain.schema.prompt ' {docs,templates,cookbook} | xargs sed -i '' 's/from langchain\.schema\.prompt\ /from langchain_core.prompt_values /g'
git grep -l 'from langchain.schema.language_model' {docs,templates,cookbook} | xargs sed -i '' 's/from langchain\.schema\.language_model/from langchain_core.language_models/g'
```
2023-12-12 00:49:10 +00:00
from langchain_core . language_models . chat_models import BaseChatModel , SimpleChatModel
from langchain_core . language_models . llms import LLM , BaseLLM
2023-09-22 08:10:27 +00:00
LLM_IGNORE = ( " FakeListLLM " , " OpenAIChat " , " PromptLayerOpenAIChat " )
LLM_FEAT_TABLE_CORRECTION = {
" TextGen " : { " _astream " : False , " _agenerate " : False } ,
" Ollama " : {
" _stream " : False ,
} ,
" PromptLayerOpenAI " : { " batch_generate " : False , " batch_agenerate " : False } ,
}
CHAT_MODEL_IGNORE = ( " FakeListChatModel " , " HumanInputChatModel " )
2024-04-11 16:22:49 +00:00
2023-09-22 08:10:27 +00:00
CHAT_MODEL_FEAT_TABLE_CORRECTION = {
" ChatMLflowAIGateway " : { " _agenerate " : False } ,
" PromptLayerChatOpenAI " : { " _stream " : False , " _astream " : False } ,
" ChatKonko " : { " _astream " : False , " _agenerate " : False } ,
2024-04-17 02:14:26 +00:00
" ChatAnthropic " : {
" tool_calling " : True ,
" structured_output " : True ,
" package " : " langchain-anthropic " ,
} ,
" ChatMistralAI " : {
" tool_calling " : True ,
" structured_output " : True ,
" package " : " langchain-mistralai " ,
} ,
" ChatFireworks " : {
" tool_calling " : True ,
" structured_output " : True ,
" package " : " langchain-fireworks " ,
} ,
" AzureChatOpenAI " : {
" tool_calling " : True ,
" structured_output " : True ,
" package " : " langchain-openai " ,
} ,
" ChatOpenAI " : {
" tool_calling " : True ,
" structured_output " : True ,
" package " : " langchain-openai " ,
} ,
" ChatVertexAI " : {
" tool_calling " : True ,
" structured_output " : True ,
" package " : " langchain-google-vertexai " ,
} ,
" ChatGroq " : {
2024-04-25 19:05:43 +00:00
" tool_calling " : True ,
2024-04-17 02:14:26 +00:00
" structured_output " : True ,
" package " : " langchain-groq " ,
} ,
" ChatCohere " : {
2024-04-25 19:05:43 +00:00
" tool_calling " : True ,
2024-04-17 02:14:26 +00:00
" structured_output " : True ,
" package " : " langchain-cohere " ,
} ,
2023-09-22 08:10:27 +00:00
}
2024-04-11 16:22:49 +00:00
2023-09-22 08:10:27 +00:00
LLM_TEMPLATE = """ \
- - -
2023-11-10 04:35:55 +00:00
sidebar_position : 1
2023-09-22 08:10:27 +00:00
sidebar_class_name : hidden
2024-05-02 21:04:29 +00:00
keywords : [ compatibility ]
2023-09-22 08:10:27 +00:00
- - -
# LLMs
## Features (natively supported)
2023-09-22 17:29:12 +00:00
All LLMs implement the Runnable interface , which comes with default implementations of all methods , ie . ` ainvoke ` , ` batch ` , ` abatch ` , ` stream ` , ` astream ` . This gives all LLMs basic support for async , streaming and batch , which by default is implemented as below :
2023-09-22 16:16:40 +00:00
- * Async * support defaults to calling the respective sync method in asyncio ' s default thread pool executor. This lets other async functions in your application make progress while the LLM is being executed, by moving this call to a background thread.
- * Streaming * support defaults to returning an ` Iterator ` ( or ` AsyncIterator ` in the case of async streaming ) of a single value , the final result returned by the underlying LLM provider . This obviously doesn ' t give you token-by-token streaming, which requires native support from the LLM provider, but ensures your code that expects an iterator of tokens can work for any of our LLM integrations.
- * Batch * support defaults to calling the underlying LLM in parallel for each input by making use of a thread pool executor ( in the sync batch case ) or ` asyncio . gather ` ( in the async batch case ) . The concurrency can be controlled with the ` max_concurrency ` key in ` RunnableConfig ` .
2023-09-22 08:10:27 +00:00
2023-09-22 17:29:12 +00:00
Each LLM integration can optionally provide native implementations for async , streaming or batch , which , for providers that support it , can be more efficient . The table shows , for each integration , which features have been implemented with native support .
2023-09-22 08:10:27 +00:00
{ table }
2023-10-29 22:50:09 +00:00
""" # noqa: E501
2023-09-22 08:10:27 +00:00
CHAT_MODEL_TEMPLATE = """ \
- - -
2023-11-10 04:35:55 +00:00
sidebar_position : 0
2023-09-22 08:10:27 +00:00
sidebar_class_name : hidden
2024-05-02 21:04:29 +00:00
keywords : [ compatibility , bind_tools , tool calling , function calling , structured output , with_structured_output ]
2023-09-22 08:10:27 +00:00
- - -
# Chat models
## Features (natively supported)
2023-09-22 17:29:12 +00:00
All ChatModels implement the Runnable interface , which comes with default implementations of all methods , ie . ` ainvoke ` , ` batch ` , ` abatch ` , ` stream ` , ` astream ` . This gives all ChatModels basic support for async , streaming and batch , which by default is implemented as below :
2023-09-22 16:37:03 +00:00
- * Async * support defaults to calling the respective sync method in asyncio ' s default thread pool executor. This lets other async functions in your application make progress while the ChatModel is being executed, by moving this call to a background thread.
- * Streaming * support defaults to returning an ` Iterator ` ( or ` AsyncIterator ` in the case of async streaming ) of a single value , the final result returned by the underlying ChatModel provider . This obviously doesn ' t give you token-by-token streaming, which requires native support from the ChatModel provider, but ensures your code that expects an iterator of tokens can work for any of our ChatModel integrations.
- * Batch * support defaults to calling the underlying ChatModel in parallel for each input by making use of a thread pool executor ( in the sync batch case ) or ` asyncio . gather ` ( in the async batch case ) . The concurrency can be controlled with the ` max_concurrency ` key in ` RunnableConfig ` .
2023-09-22 08:10:27 +00:00
2023-09-22 17:29:12 +00:00
Each ChatModel integration can optionally provide native implementations to truly enable async or streaming .
2024-04-25 19:05:43 +00:00
The table shows , for each integration , which features have been implemented with native support .
2023-09-22 16:37:03 +00:00
2023-09-22 08:10:27 +00:00
{ table }
2023-10-29 22:50:09 +00:00
""" # noqa: E501
2023-09-22 08:10:27 +00:00
def get_llm_table ( ) :
llm_feat_table = { }
for cm in llms . __all__ :
llm_feat_table [ cm ] = { }
cls = getattr ( llms , cm )
if issubclass ( cls , LLM ) :
for feat in ( " _stream " , " _astream " , ( " _acall " , " _agenerate " ) ) :
if isinstance ( feat , tuple ) :
feat , name = feat
else :
feat , name = feat , feat
llm_feat_table [ cm ] [ name ] = getattr ( cls , feat ) != getattr ( LLM , feat )
else :
for feat in [
" _stream " ,
" _astream " ,
( " _generate " , " batch_generate " ) ,
" _agenerate " ,
( " _agenerate " , " batch_agenerate " ) ,
] :
if isinstance ( feat , tuple ) :
feat , name = feat
else :
feat , name = feat , feat
llm_feat_table [ cm ] [ name ] = getattr ( cls , feat ) != getattr ( BaseLLM , feat )
final_feats = {
k : v
for k , v in { * * llm_feat_table , * * LLM_FEAT_TABLE_CORRECTION } . items ( )
if k not in LLM_IGNORE
}
header = [
" model " ,
" _agenerate " ,
" _stream " ,
" _astream " ,
" batch_generate " ,
" batch_agenerate " ,
]
2023-10-06 17:09:41 +00:00
title = [
" Model " ,
" Invoke " ,
" Async invoke " ,
" Stream " ,
" Async stream " ,
" Batch " ,
" Async batch " ,
]
2023-09-22 08:10:27 +00:00
rows = [ title , [ " :- " ] + [ " :-: " ] * ( len ( title ) - 1 ) ]
for llm , feats in sorted ( final_feats . items ( ) ) :
rows + = [ [ llm , " ✅ " ] + [ " ✅ " if feats . get ( h ) else " ❌ " for h in header [ 1 : ] ] ]
return " \n " . join ( [ " | " . join ( row ) for row in rows ] )
2024-04-11 16:22:49 +00:00
def get_chat_model_table ( ) - > str :
""" Get the table of chat models. """
2023-09-22 08:10:27 +00:00
feat_table = { }
for cm in chat_models . __all__ :
feat_table [ cm ] = { }
cls = getattr ( chat_models , cm )
if issubclass ( cls , SimpleChatModel ) :
comparison_cls = SimpleChatModel
else :
comparison_cls = BaseChatModel
for feat in ( " _stream " , " _astream " , " _agenerate " ) :
feat_table [ cm ] [ feat ] = getattr ( cls , feat ) != getattr ( comparison_cls , feat )
final_feats = {
k : v
for k , v in { * * feat_table , * * CHAT_MODEL_FEAT_TABLE_CORRECTION } . items ( )
if k not in CHAT_MODEL_IGNORE
}
2024-04-11 17:29:42 +00:00
header = [
" model " ,
" _agenerate " ,
" _stream " ,
" _astream " ,
" tool_calling " ,
2024-04-17 02:14:26 +00:00
" structured_output " ,
2024-04-11 17:29:42 +00:00
" package " ,
]
2024-04-11 16:22:49 +00:00
title = [
" Model " ,
" Invoke " ,
" Async invoke " ,
" Stream " ,
" Async stream " ,
2024-04-17 02:14:26 +00:00
" [Tool calling](/docs/modules/model_io/chat/function_calling/) " ,
" [Structured output](/docs/modules/model_io/chat/structured_output/) " ,
2024-04-11 17:29:42 +00:00
" Python Package " ,
2024-04-11 16:22:49 +00:00
]
2023-09-22 08:10:27 +00:00
rows = [ title , [ " :- " ] + [ " :-: " ] * ( len ( title ) - 1 ) ]
for llm , feats in sorted ( final_feats . items ( ) ) :
2024-04-11 17:29:42 +00:00
# Fields are in the order of the header
row = [ llm , " ✅ " ]
for h in header [ 1 : ] :
value = feats . get ( h )
index = header . index ( h )
if h == " package " :
row . append ( value or " langchain-community " )
else :
if value == " partial " :
row . append ( " 🟡 " )
elif value is True :
row . append ( " ✅ " )
else :
row . append ( " ❌ " )
rows . append ( row )
2023-09-22 08:10:27 +00:00
return " \n " . join ( [ " | " . join ( row ) for row in rows ] )
if __name__ == " __main__ " :
2024-05-02 00:34:05 +00:00
output_dir = Path ( sys . argv [ 1 ] )
output_integrations_dir = output_dir / " integrations "
output_integrations_dir_llms = output_integrations_dir / " llms "
output_integrations_dir_chat = output_integrations_dir / " chat "
output_integrations_dir_llms . mkdir ( parents = True , exist_ok = True )
output_integrations_dir_chat . mkdir ( parents = True , exist_ok = True )
2023-09-22 08:10:27 +00:00
llm_page = LLM_TEMPLATE . format ( table = get_llm_table ( ) )
2024-05-02 00:34:05 +00:00
with open ( output_integrations_dir / " llms " / " index.mdx " , " w " ) as f :
2023-09-22 08:10:27 +00:00
f . write ( llm_page )
chat_model_page = CHAT_MODEL_TEMPLATE . format ( table = get_chat_model_table ( ) )
2024-05-02 00:34:05 +00:00
with open ( output_integrations_dir / " chat " / " index.mdx " , " w " ) as f :
2023-09-22 08:10:27 +00:00
f . write ( chat_model_page )