mirror of
https://github.com/hwchase17/langchain
synced 2024-11-10 01:10:59 +00:00
langchain: revert "init_chat_model() to support ChatOllama from langchain-ollama" (#24819)
Reverts langchain-ai/langchain#24818 Overlooked discussion in https://github.com/langchain-ai/langchain/pull/24801.
This commit is contained in:
parent
5abfc85fec
commit
b7bbfc7c67
@ -118,7 +118,7 @@ def init_chat_model(
|
||||
- mistralai (langchain-mistralai)
|
||||
- huggingface (langchain-huggingface)
|
||||
- groq (langchain-groq)
|
||||
- ollama (langchain-ollama)
|
||||
- ollama (langchain-community)
|
||||
|
||||
Will attempt to infer model_provider from model if not specified. The
|
||||
following providers will be inferred based on these model prefixes:
|
||||
@ -336,8 +336,8 @@ def _init_chat_model_helper(
|
||||
|
||||
return ChatFireworks(model=model, **kwargs)
|
||||
elif model_provider == "ollama":
|
||||
_check_pkg("langchain_ollama")
|
||||
from langchain_ollama import ChatOllama
|
||||
_check_pkg("langchain_community")
|
||||
from langchain_community.chat_models import ChatOllama
|
||||
|
||||
return ChatOllama(model=model, **kwargs)
|
||||
elif model_provider == "together":
|
||||
|
Loading…
Reference in New Issue
Block a user