2024-05-02 00:34:05 +00:00
import sys
2023-09-22 08:10:27 +00:00
from pathlib import Path
2024-05-16 19:58:51 +00:00
from langchain_community import llms
docs[patch], templates[patch]: Import from core (#14575)
Update imports to use core for the low-hanging fruit changes. Ran
following
```bash
git grep -l 'langchain.schema.runnable' {docs,templates,cookbook} | xargs sed -i '' 's/langchain\.schema\.runnable/langchain_core.runnables/g'
git grep -l 'langchain.schema.output_parser' {docs,templates,cookbook} | xargs sed -i '' 's/langchain\.schema\.output_parser/langchain_core.output_parsers/g'
git grep -l 'langchain.schema.messages' {docs,templates,cookbook} | xargs sed -i '' 's/langchain\.schema\.messages/langchain_core.messages/g'
git grep -l 'langchain.schema.chat_histry' {docs,templates,cookbook} | xargs sed -i '' 's/langchain\.schema\.chat_history/langchain_core.chat_history/g'
git grep -l 'langchain.schema.prompt_template' {docs,templates,cookbook} | xargs sed -i '' 's/langchain\.schema\.prompt_template/langchain_core.prompts/g'
git grep -l 'from langchain.pydantic_v1' {docs,templates,cookbook} | xargs sed -i '' 's/from langchain\.pydantic_v1/from langchain_core.pydantic_v1/g'
git grep -l 'from langchain.tools.base' {docs,templates,cookbook} | xargs sed -i '' 's/from langchain\.tools\.base/from langchain_core.tools/g'
git grep -l 'from langchain.chat_models.base' {docs,templates,cookbook} | xargs sed -i '' 's/from langchain\.chat_models.base/from langchain_core.language_models.chat_models/g'
git grep -l 'from langchain.llms.base' {docs,templates,cookbook} | xargs sed -i '' 's/from langchain\.llms\.base\ /from langchain_core.language_models.llms\ /g'
git grep -l 'from langchain.embeddings.base' {docs,templates,cookbook} | xargs sed -i '' 's/from langchain\.embeddings\.base/from langchain_core.embeddings/g'
git grep -l 'from langchain.vectorstores.base' {docs,templates,cookbook} | xargs sed -i '' 's/from langchain\.vectorstores\.base/from langchain_core.vectorstores/g'
git grep -l 'from langchain.agents.tools' {docs,templates,cookbook} | xargs sed -i '' 's/from langchain\.agents\.tools/from langchain_core.tools/g'
git grep -l 'from langchain.schema.output' {docs,templates,cookbook} | xargs sed -i '' 's/from langchain\.schema\.output\ /from langchain_core.outputs\ /g'
git grep -l 'from langchain.schema.embeddings' {docs,templates,cookbook} | xargs sed -i '' 's/from langchain\.schema\.embeddings/from langchain_core.embeddings/g'
git grep -l 'from langchain.schema.document' {docs,templates,cookbook} | xargs sed -i '' 's/from langchain\.schema\.document/from langchain_core.documents/g'
git grep -l 'from langchain.schema.agent' {docs,templates,cookbook} | xargs sed -i '' 's/from langchain\.schema\.agent/from langchain_core.agents/g'
git grep -l 'from langchain.schema.prompt ' {docs,templates,cookbook} | xargs sed -i '' 's/from langchain\.schema\.prompt\ /from langchain_core.prompt_values /g'
git grep -l 'from langchain.schema.language_model' {docs,templates,cookbook} | xargs sed -i '' 's/from langchain\.schema\.language_model/from langchain_core.language_models/g'
```
2023-12-12 00:49:10 +00:00
from langchain_core . language_models . llms import LLM , BaseLLM
2023-09-22 08:10:27 +00:00
LLM_IGNORE = ( " FakeListLLM " , " OpenAIChat " , " PromptLayerOpenAIChat " )
LLM_FEAT_TABLE_CORRECTION = {
" TextGen " : { " _astream " : False , " _agenerate " : False } ,
" Ollama " : {
" _stream " : False ,
} ,
" PromptLayerOpenAI " : { " batch_generate " : False , " batch_agenerate " : False } ,
}
CHAT_MODEL_IGNORE = ( " FakeListChatModel " , " HumanInputChatModel " )
2024-04-11 16:22:49 +00:00
2024-05-16 19:58:51 +00:00
CHAT_MODEL_FEAT_TABLE = {
2024-04-17 02:14:26 +00:00
" ChatAnthropic " : {
" tool_calling " : True ,
2024-06-13 09:26:55 +00:00
" multimodal " : True ,
2024-04-17 02:14:26 +00:00
" package " : " langchain-anthropic " ,
2024-05-16 19:58:51 +00:00
" link " : " /docs/integrations/chat/anthropic/ " ,
2024-04-17 02:14:26 +00:00
} ,
" ChatMistralAI " : {
" tool_calling " : True ,
2024-05-27 21:16:52 +00:00
" json_model " : True ,
2024-04-17 02:14:26 +00:00
" package " : " langchain-mistralai " ,
2024-05-16 19:58:51 +00:00
" link " : " /docs/integrations/chat/mistralai/ " ,
2024-04-17 02:14:26 +00:00
} ,
" ChatFireworks " : {
" tool_calling " : True ,
2024-05-16 19:58:51 +00:00
" json_mode " : True ,
2024-04-17 02:14:26 +00:00
" package " : " langchain-fireworks " ,
2024-05-16 19:58:51 +00:00
" link " : " /docs/integrations/chat/fireworks/ " ,
2024-04-17 02:14:26 +00:00
} ,
" AzureChatOpenAI " : {
" tool_calling " : True ,
2024-05-16 19:58:51 +00:00
" json_mode " : True ,
2024-06-13 09:26:55 +00:00
" multimodal " : True ,
2024-04-17 02:14:26 +00:00
" package " : " langchain-openai " ,
2024-05-16 19:58:51 +00:00
" link " : " /docs/integrations/chat/azure_chat_openai/ " ,
2024-04-17 02:14:26 +00:00
} ,
" ChatOpenAI " : {
" tool_calling " : True ,
2024-05-16 19:58:51 +00:00
" json_mode " : True ,
2024-06-13 09:26:55 +00:00
" multimodal " : True ,
2024-04-17 02:14:26 +00:00
" package " : " langchain-openai " ,
2024-05-16 19:58:51 +00:00
" link " : " /docs/integrations/chat/openai/ " ,
} ,
" ChatTogether " : {
" tool_calling " : True ,
" json_mode " : True ,
" package " : " langchain-together " ,
" link " : " /docs/integrations/chat/together/ " ,
2024-04-17 02:14:26 +00:00
} ,
" ChatVertexAI " : {
" tool_calling " : True ,
2024-06-13 09:26:55 +00:00
" multimodal " : True ,
2024-04-17 02:14:26 +00:00
" package " : " langchain-google-vertexai " ,
2024-05-16 19:58:51 +00:00
" link " : " /docs/integrations/chat/google_vertex_ai_palm/ " ,
2024-04-17 02:14:26 +00:00
} ,
2024-06-06 15:07:13 +00:00
" ChatGoogleGenerativeAI " : {
" tool_calling " : True ,
2024-06-13 09:26:55 +00:00
" multimodal " : True ,
2024-06-06 15:07:13 +00:00
" package " : " langchain-google-genai " ,
" link " : " /docs/integrations/chat/google_generative_ai/ " ,
} ,
2024-04-17 02:14:26 +00:00
" ChatGroq " : {
2024-04-25 19:05:43 +00:00
" tool_calling " : True ,
2024-05-16 19:58:51 +00:00
" json_mode " : True ,
2024-04-17 02:14:26 +00:00
" package " : " langchain-groq " ,
2024-05-16 19:58:51 +00:00
" link " : " /docs/integrations/chat/groq/ " ,
2024-04-17 02:14:26 +00:00
} ,
" ChatCohere " : {
2024-04-25 19:05:43 +00:00
" tool_calling " : True ,
2024-04-17 02:14:26 +00:00
" package " : " langchain-cohere " ,
2024-05-16 19:58:51 +00:00
" link " : " /docs/integrations/chat/cohere/ " ,
} ,
" ChatBedrock " : {
" tool_calling " : True ,
" package " : " langchain-aws " ,
" link " : " /docs/integrations/chat/bedrock/ " ,
} ,
" ChatHuggingFace " : {
2024-05-23 22:09:30 +00:00
" tool_calling " : True ,
2024-05-16 19:58:51 +00:00
" local " : True ,
" package " : " langchain-huggingface " ,
" link " : " /docs/integrations/chat/huggingface/ " ,
} ,
" ChatOllama " : {
2024-07-23 20:33:23 +00:00
" tool_calling " : True ,
2024-05-16 19:58:51 +00:00
" local " : True ,
2024-06-14 23:27:55 +00:00
" json_mode " : True ,
2024-07-23 20:33:23 +00:00
" package " : " langchain-ollama " ,
2024-05-16 19:58:51 +00:00
" link " : " /docs/integrations/chat/ollama/ " ,
} ,
" vLLM Chat (via ChatOpenAI) " : {
" local " : True ,
2024-06-28 23:09:36 +00:00
" package " : " langchain-openai " ,
2024-05-16 19:58:51 +00:00
" link " : " /docs/integrations/chat/vllm/ " ,
2024-04-17 02:14:26 +00:00
} ,
2024-06-04 17:29:28 +00:00
" ChatEdenAI " : {
" tool_calling " : True ,
" package " : " langchain-community " ,
" link " : " /docs/integrations/chat/edenai/ " ,
} ,
2024-06-14 14:51:43 +00:00
" ChatLlamaCpp " : {
" tool_calling " : True ,
" local " : True ,
" package " : " langchain-community " ,
" link " : " /docs/integrations/chat/llamacpp " ,
} ,
2023-09-22 08:10:27 +00:00
}
2024-06-28 03:38:49 +00:00
for feats in CHAT_MODEL_FEAT_TABLE . values ( ) :
feats [ " structured_output " ] = feats . get ( " tool_calling " , False )
2024-04-11 16:22:49 +00:00
2023-09-22 08:10:27 +00:00
LLM_TEMPLATE = """ \
- - -
2023-11-10 04:35:55 +00:00
sidebar_position : 1
2023-09-22 08:10:27 +00:00
sidebar_class_name : hidden
2024-05-02 21:04:29 +00:00
keywords : [ compatibility ]
2024-05-24 19:44:46 +00:00
custom_edit_url :
2023-09-22 08:10:27 +00:00
- - -
# LLMs
## Features (natively supported)
2023-09-22 17:29:12 +00:00
All LLMs implement the Runnable interface , which comes with default implementations of all methods , ie . ` ainvoke ` , ` batch ` , ` abatch ` , ` stream ` , ` astream ` . This gives all LLMs basic support for async , streaming and batch , which by default is implemented as below :
2023-09-22 16:16:40 +00:00
- * Async * support defaults to calling the respective sync method in asyncio ' s default thread pool executor. This lets other async functions in your application make progress while the LLM is being executed, by moving this call to a background thread.
- * Streaming * support defaults to returning an ` Iterator ` ( or ` AsyncIterator ` in the case of async streaming ) of a single value , the final result returned by the underlying LLM provider . This obviously doesn ' t give you token-by-token streaming, which requires native support from the LLM provider, but ensures your code that expects an iterator of tokens can work for any of our LLM integrations.
- * Batch * support defaults to calling the underlying LLM in parallel for each input by making use of a thread pool executor ( in the sync batch case ) or ` asyncio . gather ` ( in the async batch case ) . The concurrency can be controlled with the ` max_concurrency ` key in ` RunnableConfig ` .
2023-09-22 08:10:27 +00:00
2023-09-22 17:29:12 +00:00
Each LLM integration can optionally provide native implementations for async , streaming or batch , which , for providers that support it , can be more efficient . The table shows , for each integration , which features have been implemented with native support .
2023-09-22 08:10:27 +00:00
{ table }
2024-05-22 22:21:08 +00:00
"""
2023-09-22 08:10:27 +00:00
CHAT_MODEL_TEMPLATE = """ \
- - -
2023-11-10 04:35:55 +00:00
sidebar_position : 0
2023-09-22 08:10:27 +00:00
sidebar_class_name : hidden
2024-06-13 10:40:29 +00:00
keywords : [ compatibility ]
2024-05-24 19:44:46 +00:00
custom_edit_url :
2024-06-13 09:26:55 +00:00
hide_table_of_contents : true
2023-09-22 08:10:27 +00:00
- - -
# Chat models
2024-05-16 19:58:51 +00:00
## Advanced features
2023-09-22 08:10:27 +00:00
2024-07-19 20:05:59 +00:00
The following table shows all the chat model classes that support one or more advanced features .
: : : info
While all these LangChain classes support the indicated advanced feature , you may have
to open the provider - specific documentation to learn which hosted models or backends support
the feature .
: : :
2023-09-22 16:37:03 +00:00
2023-09-22 08:10:27 +00:00
{ table }
2024-05-22 22:21:08 +00:00
"""
2023-09-22 08:10:27 +00:00
def get_llm_table ( ) :
llm_feat_table = { }
for cm in llms . __all__ :
llm_feat_table [ cm ] = { }
cls = getattr ( llms , cm )
if issubclass ( cls , LLM ) :
for feat in ( " _stream " , " _astream " , ( " _acall " , " _agenerate " ) ) :
if isinstance ( feat , tuple ) :
feat , name = feat
else :
feat , name = feat , feat
llm_feat_table [ cm ] [ name ] = getattr ( cls , feat ) != getattr ( LLM , feat )
else :
for feat in [
" _stream " ,
" _astream " ,
( " _generate " , " batch_generate " ) ,
" _agenerate " ,
( " _agenerate " , " batch_agenerate " ) ,
] :
if isinstance ( feat , tuple ) :
feat , name = feat
else :
feat , name = feat , feat
llm_feat_table [ cm ] [ name ] = getattr ( cls , feat ) != getattr ( BaseLLM , feat )
final_feats = {
k : v
for k , v in { * * llm_feat_table , * * LLM_FEAT_TABLE_CORRECTION } . items ( )
if k not in LLM_IGNORE
}
header = [
" model " ,
" _agenerate " ,
" _stream " ,
" _astream " ,
" batch_generate " ,
" batch_agenerate " ,
]
2023-10-06 17:09:41 +00:00
title = [
" Model " ,
" Invoke " ,
" Async invoke " ,
" Stream " ,
" Async stream " ,
" Batch " ,
" Async batch " ,
]
2023-09-22 08:10:27 +00:00
rows = [ title , [ " :- " ] + [ " :-: " ] * ( len ( title ) - 1 ) ]
for llm , feats in sorted ( final_feats . items ( ) ) :
rows + = [ [ llm , " ✅ " ] + [ " ✅ " if feats . get ( h ) else " ❌ " for h in header [ 1 : ] ] ]
return " \n " . join ( [ " | " . join ( row ) for row in rows ] )
2024-04-11 16:22:49 +00:00
def get_chat_model_table ( ) - > str :
""" Get the table of chat models. """
2024-04-11 17:29:42 +00:00
header = [
" model " ,
" tool_calling " ,
2024-04-17 02:14:26 +00:00
" structured_output " ,
2024-05-16 19:58:51 +00:00
" json_mode " ,
" local " ,
2024-06-13 09:26:55 +00:00
" multimodal " ,
2024-04-11 17:29:42 +00:00
" package " ,
]
2024-04-11 16:22:49 +00:00
title = [
" Model " ,
2024-06-25 20:15:08 +00:00
" [Tool calling](/docs/how_to/tool_calling) " ,
2024-05-10 13:49:59 +00:00
" [Structured output](/docs/how_to/structured_output/) " ,
2024-05-16 19:58:51 +00:00
" JSON mode " ,
" Local " ,
2024-06-13 09:26:55 +00:00
" [Multimodal](/docs/how_to/multimodal_inputs/) " ,
2024-05-16 19:58:51 +00:00
" Package " ,
2024-04-11 16:22:49 +00:00
]
2023-09-22 08:10:27 +00:00
rows = [ title , [ " :- " ] + [ " :-: " ] * ( len ( title ) - 1 ) ]
2024-05-16 19:58:51 +00:00
for llm , feats in sorted ( CHAT_MODEL_FEAT_TABLE . items ( ) ) :
2024-04-11 17:29:42 +00:00
# Fields are in the order of the header
2024-05-16 19:58:51 +00:00
row = [
f " [ { llm } ]( { feats [ ' link ' ] } ) " ,
]
2024-04-11 17:29:42 +00:00
for h in header [ 1 : ] :
value = feats . get ( h )
if h == " package " :
row . append ( value or " langchain-community " )
else :
if value == " partial " :
row . append ( " 🟡 " )
elif value is True :
row . append ( " ✅ " )
else :
row . append ( " ❌ " )
rows . append ( row )
2023-09-22 08:10:27 +00:00
return " \n " . join ( [ " | " . join ( row ) for row in rows ] )
if __name__ == " __main__ " :
2024-05-02 00:34:05 +00:00
output_dir = Path ( sys . argv [ 1 ] )
output_integrations_dir = output_dir / " integrations "
output_integrations_dir_llms = output_integrations_dir / " llms "
output_integrations_dir_chat = output_integrations_dir / " chat "
output_integrations_dir_llms . mkdir ( parents = True , exist_ok = True )
output_integrations_dir_chat . mkdir ( parents = True , exist_ok = True )
2023-09-22 08:10:27 +00:00
llm_page = LLM_TEMPLATE . format ( table = get_llm_table ( ) )
2024-05-02 00:34:05 +00:00
with open ( output_integrations_dir / " llms " / " index.mdx " , " w " ) as f :
2023-09-22 08:10:27 +00:00
f . write ( llm_page )
chat_model_page = CHAT_MODEL_TEMPLATE . format ( table = get_chat_model_table ( ) )
2024-05-02 00:34:05 +00:00
with open ( output_integrations_dir / " chat " / " index.mdx " , " w " ) as f :
2023-09-22 08:10:27 +00:00
f . write ( chat_model_page )