2023-12-11 21:53:30 +00:00
|
|
|
"""
|
|
|
|
**LLM** classes provide
|
|
|
|
access to the large language model (**LLM**) APIs and services.
|
|
|
|
|
|
|
|
**Class hierarchy:**
|
|
|
|
|
|
|
|
.. code-block::
|
|
|
|
|
|
|
|
BaseLanguageModel --> BaseLLM --> LLM --> <name> # Examples: AI21, HuggingFaceHub, OpenAI
|
|
|
|
|
|
|
|
**Main helpers:**
|
|
|
|
|
|
|
|
.. code-block::
|
|
|
|
|
|
|
|
LLMResult, PromptValue,
|
|
|
|
CallbackManagerForLLMRun, AsyncCallbackManagerForLLMRun,
|
|
|
|
CallbackManager, AsyncCallbackManager,
|
|
|
|
AIMessage, BaseMessage
|
|
|
|
""" # noqa: E501
|
2024-02-23 17:51:27 +00:00
|
|
|
|
2023-12-11 21:53:30 +00:00
|
|
|
from typing import Any, Callable, Dict, Type
|
|
|
|
|
2024-02-23 17:51:27 +00:00
|
|
|
from langchain_core._api.deprecation import warn_deprecated
|
2023-12-11 21:53:30 +00:00
|
|
|
from langchain_core.language_models.llms import BaseLLM
|
|
|
|
|
|
|
|
|
2024-02-23 17:51:27 +00:00
|
|
|
def _import_ai21() -> Type[BaseLLM]:
|
2023-12-11 21:53:30 +00:00
|
|
|
from langchain_community.llms.ai21 import AI21
|
|
|
|
|
|
|
|
return AI21
|
|
|
|
|
|
|
|
|
2024-02-23 17:51:27 +00:00
|
|
|
def _import_aleph_alpha() -> Type[BaseLLM]:
|
2023-12-11 21:53:30 +00:00
|
|
|
from langchain_community.llms.aleph_alpha import AlephAlpha
|
|
|
|
|
|
|
|
return AlephAlpha
|
|
|
|
|
|
|
|
|
2024-02-23 17:51:27 +00:00
|
|
|
def _import_amazon_api_gateway() -> Type[BaseLLM]:
|
2023-12-11 21:53:30 +00:00
|
|
|
from langchain_community.llms.amazon_api_gateway import AmazonAPIGateway
|
|
|
|
|
|
|
|
return AmazonAPIGateway
|
|
|
|
|
|
|
|
|
2024-02-23 17:51:27 +00:00
|
|
|
def _import_anthropic() -> Type[BaseLLM]:
|
2023-12-11 21:53:30 +00:00
|
|
|
from langchain_community.llms.anthropic import Anthropic
|
|
|
|
|
|
|
|
return Anthropic
|
|
|
|
|
|
|
|
|
2024-02-23 17:51:27 +00:00
|
|
|
def _import_anyscale() -> Type[BaseLLM]:
|
2023-12-11 21:53:30 +00:00
|
|
|
from langchain_community.llms.anyscale import Anyscale
|
|
|
|
|
|
|
|
return Anyscale
|
|
|
|
|
|
|
|
|
2024-02-23 17:51:27 +00:00
|
|
|
def _import_aphrodite() -> Type[BaseLLM]:
|
2023-12-20 06:16:57 +00:00
|
|
|
from langchain_community.llms.aphrodite import Aphrodite
|
|
|
|
|
|
|
|
return Aphrodite
|
|
|
|
|
|
|
|
|
2024-02-23 17:51:27 +00:00
|
|
|
def _import_arcee() -> Type[BaseLLM]:
|
2023-12-11 21:53:30 +00:00
|
|
|
from langchain_community.llms.arcee import Arcee
|
|
|
|
|
|
|
|
return Arcee
|
|
|
|
|
|
|
|
|
2024-02-23 17:51:27 +00:00
|
|
|
def _import_aviary() -> Type[BaseLLM]:
|
2023-12-11 21:53:30 +00:00
|
|
|
from langchain_community.llms.aviary import Aviary
|
|
|
|
|
|
|
|
return Aviary
|
|
|
|
|
|
|
|
|
2024-02-23 17:51:27 +00:00
|
|
|
def _import_azureml_endpoint() -> Type[BaseLLM]:
|
2023-12-11 21:53:30 +00:00
|
|
|
from langchain_community.llms.azureml_endpoint import AzureMLOnlineEndpoint
|
|
|
|
|
|
|
|
return AzureMLOnlineEndpoint
|
|
|
|
|
|
|
|
|
2024-02-23 17:51:27 +00:00
|
|
|
def _import_baichuan() -> Type[BaseLLM]:
|
2024-01-30 04:08:24 +00:00
|
|
|
from langchain_community.llms.baichuan import BaichuanLLM
|
|
|
|
|
|
|
|
return BaichuanLLM
|
|
|
|
|
|
|
|
|
2024-02-23 17:51:27 +00:00
|
|
|
def _import_baidu_qianfan_endpoint() -> Type[BaseLLM]:
|
2023-12-11 21:53:30 +00:00
|
|
|
from langchain_community.llms.baidu_qianfan_endpoint import QianfanLLMEndpoint
|
|
|
|
|
|
|
|
return QianfanLLMEndpoint
|
|
|
|
|
|
|
|
|
2024-02-23 17:51:27 +00:00
|
|
|
def _import_bananadev() -> Type[BaseLLM]:
|
2023-12-11 21:53:30 +00:00
|
|
|
from langchain_community.llms.bananadev import Banana
|
|
|
|
|
|
|
|
return Banana
|
|
|
|
|
|
|
|
|
2024-02-23 17:51:27 +00:00
|
|
|
def _import_baseten() -> Type[BaseLLM]:
|
2023-12-11 21:53:30 +00:00
|
|
|
from langchain_community.llms.baseten import Baseten
|
|
|
|
|
|
|
|
return Baseten
|
|
|
|
|
|
|
|
|
2024-02-23 17:51:27 +00:00
|
|
|
def _import_beam() -> Type[BaseLLM]:
|
2023-12-11 21:53:30 +00:00
|
|
|
from langchain_community.llms.beam import Beam
|
|
|
|
|
|
|
|
return Beam
|
|
|
|
|
|
|
|
|
2024-02-23 17:51:27 +00:00
|
|
|
def _import_bedrock() -> Type[BaseLLM]:
|
2023-12-11 21:53:30 +00:00
|
|
|
from langchain_community.llms.bedrock import Bedrock
|
|
|
|
|
|
|
|
return Bedrock
|
|
|
|
|
|
|
|
|
2024-02-23 17:51:27 +00:00
|
|
|
def _import_bittensor() -> Type[BaseLLM]:
|
2023-12-11 21:53:30 +00:00
|
|
|
from langchain_community.llms.bittensor import NIBittensorLLM
|
|
|
|
|
|
|
|
return NIBittensorLLM
|
|
|
|
|
|
|
|
|
2024-02-23 17:51:27 +00:00
|
|
|
def _import_cerebriumai() -> Type[BaseLLM]:
|
2023-12-11 21:53:30 +00:00
|
|
|
from langchain_community.llms.cerebriumai import CerebriumAI
|
|
|
|
|
|
|
|
return CerebriumAI
|
|
|
|
|
|
|
|
|
2024-02-23 17:51:27 +00:00
|
|
|
def _import_chatglm() -> Type[BaseLLM]:
|
2023-12-11 21:53:30 +00:00
|
|
|
from langchain_community.llms.chatglm import ChatGLM
|
|
|
|
|
|
|
|
return ChatGLM
|
|
|
|
|
|
|
|
|
2024-02-23 17:51:27 +00:00
|
|
|
def _import_clarifai() -> Type[BaseLLM]:
|
2023-12-11 21:53:30 +00:00
|
|
|
from langchain_community.llms.clarifai import Clarifai
|
|
|
|
|
|
|
|
return Clarifai
|
|
|
|
|
|
|
|
|
2024-02-23 17:51:27 +00:00
|
|
|
def _import_cohere() -> Type[BaseLLM]:
|
2023-12-11 21:53:30 +00:00
|
|
|
from langchain_community.llms.cohere import Cohere
|
|
|
|
|
|
|
|
return Cohere
|
|
|
|
|
|
|
|
|
2024-02-23 17:51:27 +00:00
|
|
|
def _import_ctransformers() -> Type[BaseLLM]:
|
2023-12-11 21:53:30 +00:00
|
|
|
from langchain_community.llms.ctransformers import CTransformers
|
|
|
|
|
|
|
|
return CTransformers
|
|
|
|
|
|
|
|
|
2024-02-23 17:51:27 +00:00
|
|
|
def _import_ctranslate2() -> Type[BaseLLM]:
|
2023-12-11 21:53:30 +00:00
|
|
|
from langchain_community.llms.ctranslate2 import CTranslate2
|
|
|
|
|
|
|
|
return CTranslate2
|
|
|
|
|
|
|
|
|
2024-02-23 17:51:27 +00:00
|
|
|
def _import_databricks() -> Type[BaseLLM]:
|
2023-12-11 21:53:30 +00:00
|
|
|
from langchain_community.llms.databricks import Databricks
|
|
|
|
|
|
|
|
return Databricks
|
|
|
|
|
|
|
|
|
|
|
|
def _import_databricks_chat() -> Any:
|
2024-02-23 17:51:27 +00:00
|
|
|
warn_deprecated(
|
|
|
|
since="0.0.22",
|
|
|
|
removal="0.2",
|
|
|
|
alternative_import="langchain_community.chat_models.ChatDatabricks",
|
|
|
|
)
|
2023-12-11 21:53:30 +00:00
|
|
|
from langchain_community.chat_models.databricks import ChatDatabricks
|
|
|
|
|
|
|
|
return ChatDatabricks
|
|
|
|
|
|
|
|
|
2024-02-23 17:51:27 +00:00
|
|
|
def _import_deepinfra() -> Type[BaseLLM]:
|
2023-12-11 21:53:30 +00:00
|
|
|
from langchain_community.llms.deepinfra import DeepInfra
|
|
|
|
|
|
|
|
return DeepInfra
|
|
|
|
|
|
|
|
|
2024-02-23 17:51:27 +00:00
|
|
|
def _import_deepsparse() -> Type[BaseLLM]:
|
2023-12-11 21:53:30 +00:00
|
|
|
from langchain_community.llms.deepsparse import DeepSparse
|
|
|
|
|
|
|
|
return DeepSparse
|
|
|
|
|
|
|
|
|
2024-02-23 17:51:27 +00:00
|
|
|
def _import_edenai() -> Type[BaseLLM]:
|
2023-12-11 21:53:30 +00:00
|
|
|
from langchain_community.llms.edenai import EdenAI
|
|
|
|
|
|
|
|
return EdenAI
|
|
|
|
|
|
|
|
|
2024-02-23 17:51:27 +00:00
|
|
|
def _import_fake() -> Type[BaseLLM]:
|
2023-12-11 21:53:30 +00:00
|
|
|
from langchain_community.llms.fake import FakeListLLM
|
|
|
|
|
|
|
|
return FakeListLLM
|
|
|
|
|
|
|
|
|
2024-02-23 17:51:27 +00:00
|
|
|
def _import_fireworks() -> Type[BaseLLM]:
|
2023-12-11 21:53:30 +00:00
|
|
|
from langchain_community.llms.fireworks import Fireworks
|
|
|
|
|
|
|
|
return Fireworks
|
|
|
|
|
|
|
|
|
2024-02-23 17:51:27 +00:00
|
|
|
def _import_forefrontai() -> Type[BaseLLM]:
|
2023-12-11 21:53:30 +00:00
|
|
|
from langchain_community.llms.forefrontai import ForefrontAI
|
|
|
|
|
|
|
|
return ForefrontAI
|
|
|
|
|
|
|
|
|
2024-02-23 17:51:27 +00:00
|
|
|
def _import_gigachat() -> Type[BaseLLM]:
|
2023-12-11 21:53:30 +00:00
|
|
|
from langchain_community.llms.gigachat import GigaChat
|
|
|
|
|
|
|
|
return GigaChat
|
|
|
|
|
|
|
|
|
2024-02-23 17:51:27 +00:00
|
|
|
def _import_google_palm() -> Type[BaseLLM]:
|
2023-12-11 21:53:30 +00:00
|
|
|
from langchain_community.llms.google_palm import GooglePalm
|
|
|
|
|
|
|
|
return GooglePalm
|
|
|
|
|
|
|
|
|
2024-02-23 17:51:27 +00:00
|
|
|
def _import_gooseai() -> Type[BaseLLM]:
|
2023-12-11 21:53:30 +00:00
|
|
|
from langchain_community.llms.gooseai import GooseAI
|
|
|
|
|
|
|
|
return GooseAI
|
|
|
|
|
|
|
|
|
2024-02-23 17:51:27 +00:00
|
|
|
def _import_gpt4all() -> Type[BaseLLM]:
|
2023-12-11 21:53:30 +00:00
|
|
|
from langchain_community.llms.gpt4all import GPT4All
|
|
|
|
|
|
|
|
return GPT4All
|
|
|
|
|
|
|
|
|
2024-02-23 17:51:27 +00:00
|
|
|
def _import_gradient_ai() -> Type[BaseLLM]:
|
2023-12-11 21:53:30 +00:00
|
|
|
from langchain_community.llms.gradient_ai import GradientLLM
|
|
|
|
|
|
|
|
return GradientLLM
|
|
|
|
|
|
|
|
|
2024-02-23 17:51:27 +00:00
|
|
|
def _import_huggingface_endpoint() -> Type[BaseLLM]:
|
2023-12-11 21:53:30 +00:00
|
|
|
from langchain_community.llms.huggingface_endpoint import HuggingFaceEndpoint
|
|
|
|
|
|
|
|
return HuggingFaceEndpoint
|
|
|
|
|
|
|
|
|
2024-02-23 17:51:27 +00:00
|
|
|
def _import_huggingface_hub() -> Type[BaseLLM]:
|
2023-12-11 21:53:30 +00:00
|
|
|
from langchain_community.llms.huggingface_hub import HuggingFaceHub
|
|
|
|
|
|
|
|
return HuggingFaceHub
|
|
|
|
|
|
|
|
|
2024-02-23 17:51:27 +00:00
|
|
|
def _import_huggingface_pipeline() -> Type[BaseLLM]:
|
2023-12-11 21:53:30 +00:00
|
|
|
from langchain_community.llms.huggingface_pipeline import HuggingFacePipeline
|
|
|
|
|
|
|
|
return HuggingFacePipeline
|
|
|
|
|
|
|
|
|
2024-02-23 17:51:27 +00:00
|
|
|
def _import_huggingface_text_gen_inference() -> Type[BaseLLM]:
|
2023-12-11 21:53:30 +00:00
|
|
|
from langchain_community.llms.huggingface_text_gen_inference import (
|
|
|
|
HuggingFaceTextGenInference,
|
|
|
|
)
|
|
|
|
|
|
|
|
return HuggingFaceTextGenInference
|
|
|
|
|
|
|
|
|
2024-02-23 17:51:27 +00:00
|
|
|
def _import_human() -> Type[BaseLLM]:
|
2023-12-11 21:53:30 +00:00
|
|
|
from langchain_community.llms.human import HumanInputLLM
|
|
|
|
|
|
|
|
return HumanInputLLM
|
|
|
|
|
|
|
|
|
2024-02-23 17:51:27 +00:00
|
|
|
def _import_javelin_ai_gateway() -> Type[BaseLLM]:
|
2023-12-11 21:53:30 +00:00
|
|
|
from langchain_community.llms.javelin_ai_gateway import JavelinAIGateway
|
|
|
|
|
|
|
|
return JavelinAIGateway
|
|
|
|
|
|
|
|
|
2024-02-23 17:51:27 +00:00
|
|
|
def _import_koboldai() -> Type[BaseLLM]:
|
2023-12-11 21:53:30 +00:00
|
|
|
from langchain_community.llms.koboldai import KoboldApiLLM
|
|
|
|
|
|
|
|
return KoboldApiLLM
|
|
|
|
|
|
|
|
|
2024-02-23 17:51:27 +00:00
|
|
|
def _import_konko() -> Type[BaseLLM]:
|
2024-01-24 02:22:32 +00:00
|
|
|
from langchain_community.llms.konko import Konko
|
|
|
|
|
|
|
|
return Konko
|
|
|
|
|
|
|
|
|
2024-02-23 17:51:27 +00:00
|
|
|
def _import_llamacpp() -> Type[BaseLLM]:
|
2023-12-11 21:53:30 +00:00
|
|
|
from langchain_community.llms.llamacpp import LlamaCpp
|
|
|
|
|
|
|
|
return LlamaCpp
|
|
|
|
|
|
|
|
|
2024-02-23 17:51:27 +00:00
|
|
|
def _import_manifest() -> Type[BaseLLM]:
|
2023-12-11 21:53:30 +00:00
|
|
|
from langchain_community.llms.manifest import ManifestWrapper
|
|
|
|
|
|
|
|
return ManifestWrapper
|
|
|
|
|
|
|
|
|
2024-02-23 17:51:27 +00:00
|
|
|
def _import_minimax() -> Type[BaseLLM]:
|
2023-12-11 21:53:30 +00:00
|
|
|
from langchain_community.llms.minimax import Minimax
|
|
|
|
|
|
|
|
return Minimax
|
|
|
|
|
|
|
|
|
2024-02-23 17:51:27 +00:00
|
|
|
def _import_mlflow() -> Type[BaseLLM]:
|
2023-12-11 21:53:30 +00:00
|
|
|
from langchain_community.llms.mlflow import Mlflow
|
|
|
|
|
|
|
|
return Mlflow
|
|
|
|
|
|
|
|
|
|
|
|
def _import_mlflow_chat() -> Any:
|
2024-02-23 17:51:27 +00:00
|
|
|
warn_deprecated(
|
|
|
|
since="0.0.22",
|
|
|
|
removal="0.2",
|
|
|
|
alternative_import="langchain_community.chat_models.ChatMlflow",
|
|
|
|
)
|
2023-12-11 21:53:30 +00:00
|
|
|
from langchain_community.chat_models.mlflow import ChatMlflow
|
|
|
|
|
|
|
|
return ChatMlflow
|
|
|
|
|
|
|
|
|
2024-02-23 17:51:27 +00:00
|
|
|
def _import_mlflow_ai_gateway() -> Type[BaseLLM]:
|
2023-12-11 21:53:30 +00:00
|
|
|
from langchain_community.llms.mlflow_ai_gateway import MlflowAIGateway
|
|
|
|
|
|
|
|
return MlflowAIGateway
|
|
|
|
|
|
|
|
|
2024-02-23 17:51:27 +00:00
|
|
|
def _import_modal() -> Type[BaseLLM]:
|
2023-12-11 21:53:30 +00:00
|
|
|
from langchain_community.llms.modal import Modal
|
|
|
|
|
|
|
|
return Modal
|
|
|
|
|
|
|
|
|
2024-02-23 17:51:27 +00:00
|
|
|
def _import_mosaicml() -> Type[BaseLLM]:
|
2023-12-11 21:53:30 +00:00
|
|
|
from langchain_community.llms.mosaicml import MosaicML
|
|
|
|
|
|
|
|
return MosaicML
|
|
|
|
|
|
|
|
|
2024-02-23 17:51:27 +00:00
|
|
|
def _import_nlpcloud() -> Type[BaseLLM]:
|
2023-12-11 21:53:30 +00:00
|
|
|
from langchain_community.llms.nlpcloud import NLPCloud
|
|
|
|
|
|
|
|
return NLPCloud
|
|
|
|
|
|
|
|
|
2024-02-23 17:51:27 +00:00
|
|
|
def _import_oci_md_tgi() -> Type[BaseLLM]:
|
2023-12-20 19:52:20 +00:00
|
|
|
from langchain_community.llms.oci_data_science_model_deployment_endpoint import (
|
|
|
|
OCIModelDeploymentTGI,
|
|
|
|
)
|
|
|
|
|
|
|
|
return OCIModelDeploymentTGI
|
|
|
|
|
|
|
|
|
2024-02-23 17:51:27 +00:00
|
|
|
def _import_oci_md_vllm() -> Type[BaseLLM]:
|
2023-12-20 19:52:20 +00:00
|
|
|
from langchain_community.llms.oci_data_science_model_deployment_endpoint import (
|
|
|
|
OCIModelDeploymentVLLM,
|
|
|
|
)
|
|
|
|
|
|
|
|
return OCIModelDeploymentVLLM
|
|
|
|
|
|
|
|
|
2024-02-23 17:51:27 +00:00
|
|
|
def _import_oci_gen_ai() -> Type[BaseLLM]:
|
community[minor]: Add OCI Generative AI integration (#16548)
<!-- Thank you for contributing to LangChain!
Please title your PR "<package>: <description>", where <package> is
whichever of langchain, community, core, experimental, etc. is being
modified.
Replace this entire comment with:
- **Description:** Adding Oracle Cloud Infrastructure Generative AI
integration. Oracle Cloud Infrastructure (OCI) Generative AI is a fully
managed service that provides a set of state-of-the-art, customizable
large language models (LLMs) that cover a wide range of use cases, and
which is available through a single API. Using the OCI Generative AI
service you can access ready-to-use pretrained models, or create and
host your own fine-tuned custom models based on your own data on
dedicated AI clusters.
https://docs.oracle.com/en-us/iaas/Content/generative-ai/home.htm
- **Issue:** None,
- **Dependencies:** OCI Python SDK,
- **Twitter handle:** we announce bigger features on Twitter. If your PR
gets announced, and you'd like a mention, we'll gladly shout you out!
Please make sure your PR is passing linting and testing before
submitting. Run `make format`, `make lint` and `make test` from the root
of the package you've modified to check this locally.
Passed
See contribution guidelines for more information on how to write/run
tests, lint, etc: https://python.langchain.com/docs/contributing/
If you're adding a new integration, please include:
1. a test for the integration, preferably unit tests that do not rely on
network access,
2. an example notebook showing its use. It lives in
`docs/docs/integrations` directory.
we provide unit tests. However, we cannot provide integration tests due
to Oracle policies that prohibit public sharing of api keys.
If no one reviews your PR within a few days, please @-mention one of
@baskaryan, @eyurtsev, @hwchase17.
-->
---------
Co-authored-by: Arthur Cheng <arthur.cheng@oracle.com>
Co-authored-by: Bagatur <baskaryan@gmail.com>
2024-01-25 02:23:50 +00:00
|
|
|
from langchain_community.llms.oci_generative_ai import OCIGenAI
|
|
|
|
|
|
|
|
return OCIGenAI
|
|
|
|
|
|
|
|
|
2024-02-23 17:51:27 +00:00
|
|
|
def _import_octoai_endpoint() -> Type[BaseLLM]:
|
2023-12-11 21:53:30 +00:00
|
|
|
from langchain_community.llms.octoai_endpoint import OctoAIEndpoint
|
|
|
|
|
|
|
|
return OctoAIEndpoint
|
|
|
|
|
|
|
|
|
2024-02-23 17:51:27 +00:00
|
|
|
def _import_ollama() -> Type[BaseLLM]:
|
2023-12-11 21:53:30 +00:00
|
|
|
from langchain_community.llms.ollama import Ollama
|
|
|
|
|
|
|
|
return Ollama
|
|
|
|
|
|
|
|
|
2024-02-23 17:51:27 +00:00
|
|
|
def _import_opaqueprompts() -> Type[BaseLLM]:
|
2023-12-11 21:53:30 +00:00
|
|
|
from langchain_community.llms.opaqueprompts import OpaquePrompts
|
|
|
|
|
|
|
|
return OpaquePrompts
|
|
|
|
|
|
|
|
|
2024-02-23 17:51:27 +00:00
|
|
|
def _import_azure_openai() -> Type[BaseLLM]:
|
2023-12-11 21:53:30 +00:00
|
|
|
from langchain_community.llms.openai import AzureOpenAI
|
|
|
|
|
|
|
|
return AzureOpenAI
|
|
|
|
|
|
|
|
|
2024-02-23 17:51:27 +00:00
|
|
|
def _import_openai() -> Type[BaseLLM]:
|
2023-12-11 21:53:30 +00:00
|
|
|
from langchain_community.llms.openai import OpenAI
|
|
|
|
|
|
|
|
return OpenAI
|
|
|
|
|
|
|
|
|
2024-02-23 17:51:27 +00:00
|
|
|
def _import_openai_chat() -> Type[BaseLLM]:
|
2023-12-11 21:53:30 +00:00
|
|
|
from langchain_community.llms.openai import OpenAIChat
|
|
|
|
|
|
|
|
return OpenAIChat
|
|
|
|
|
|
|
|
|
2024-02-23 17:51:27 +00:00
|
|
|
def _import_openllm() -> Type[BaseLLM]:
|
2023-12-11 21:53:30 +00:00
|
|
|
from langchain_community.llms.openllm import OpenLLM
|
|
|
|
|
|
|
|
return OpenLLM
|
|
|
|
|
|
|
|
|
2024-02-23 17:51:27 +00:00
|
|
|
def _import_openlm() -> Type[BaseLLM]:
|
2023-12-11 21:53:30 +00:00
|
|
|
from langchain_community.llms.openlm import OpenLM
|
|
|
|
|
|
|
|
return OpenLM
|
|
|
|
|
|
|
|
|
2024-02-23 17:51:27 +00:00
|
|
|
def _import_pai_eas_endpoint() -> Type[BaseLLM]:
|
2023-12-11 21:53:30 +00:00
|
|
|
from langchain_community.llms.pai_eas_endpoint import PaiEasEndpoint
|
|
|
|
|
|
|
|
return PaiEasEndpoint
|
|
|
|
|
|
|
|
|
2024-02-23 17:51:27 +00:00
|
|
|
def _import_petals() -> Type[BaseLLM]:
|
2023-12-11 21:53:30 +00:00
|
|
|
from langchain_community.llms.petals import Petals
|
|
|
|
|
|
|
|
return Petals
|
|
|
|
|
|
|
|
|
2024-02-23 17:51:27 +00:00
|
|
|
def _import_pipelineai() -> Type[BaseLLM]:
|
2023-12-11 21:53:30 +00:00
|
|
|
from langchain_community.llms.pipelineai import PipelineAI
|
|
|
|
|
|
|
|
return PipelineAI
|
|
|
|
|
|
|
|
|
2024-02-23 17:51:27 +00:00
|
|
|
def _import_predibase() -> Type[BaseLLM]:
|
2023-12-11 21:53:30 +00:00
|
|
|
from langchain_community.llms.predibase import Predibase
|
|
|
|
|
|
|
|
return Predibase
|
|
|
|
|
|
|
|
|
2024-02-23 17:51:27 +00:00
|
|
|
def _import_predictionguard() -> Type[BaseLLM]:
|
2023-12-11 21:53:30 +00:00
|
|
|
from langchain_community.llms.predictionguard import PredictionGuard
|
|
|
|
|
|
|
|
return PredictionGuard
|
|
|
|
|
|
|
|
|
2024-02-23 17:51:27 +00:00
|
|
|
def _import_promptlayer() -> Type[BaseLLM]:
|
2023-12-11 21:53:30 +00:00
|
|
|
from langchain_community.llms.promptlayer_openai import PromptLayerOpenAI
|
|
|
|
|
|
|
|
return PromptLayerOpenAI
|
|
|
|
|
|
|
|
|
2024-02-23 17:51:27 +00:00
|
|
|
def _import_promptlayer_chat() -> Type[BaseLLM]:
|
2023-12-11 21:53:30 +00:00
|
|
|
from langchain_community.llms.promptlayer_openai import PromptLayerOpenAIChat
|
|
|
|
|
|
|
|
return PromptLayerOpenAIChat
|
|
|
|
|
|
|
|
|
2024-02-23 17:51:27 +00:00
|
|
|
def _import_replicate() -> Type[BaseLLM]:
|
2023-12-11 21:53:30 +00:00
|
|
|
from langchain_community.llms.replicate import Replicate
|
|
|
|
|
|
|
|
return Replicate
|
|
|
|
|
|
|
|
|
2024-02-23 17:51:27 +00:00
|
|
|
def _import_rwkv() -> Type[BaseLLM]:
|
2023-12-11 21:53:30 +00:00
|
|
|
from langchain_community.llms.rwkv import RWKV
|
|
|
|
|
|
|
|
return RWKV
|
|
|
|
|
|
|
|
|
2024-02-23 17:51:27 +00:00
|
|
|
def _import_sagemaker_endpoint() -> Type[BaseLLM]:
|
2023-12-11 21:53:30 +00:00
|
|
|
from langchain_community.llms.sagemaker_endpoint import SagemakerEndpoint
|
|
|
|
|
|
|
|
return SagemakerEndpoint
|
|
|
|
|
|
|
|
|
2024-02-23 17:51:27 +00:00
|
|
|
def _import_self_hosted() -> Type[BaseLLM]:
|
2023-12-11 21:53:30 +00:00
|
|
|
from langchain_community.llms.self_hosted import SelfHostedPipeline
|
|
|
|
|
|
|
|
return SelfHostedPipeline
|
|
|
|
|
|
|
|
|
2024-02-23 17:51:27 +00:00
|
|
|
def _import_self_hosted_hugging_face() -> Type[BaseLLM]:
|
2023-12-11 21:53:30 +00:00
|
|
|
from langchain_community.llms.self_hosted_hugging_face import (
|
|
|
|
SelfHostedHuggingFaceLLM,
|
|
|
|
)
|
|
|
|
|
|
|
|
return SelfHostedHuggingFaceLLM
|
|
|
|
|
|
|
|
|
2024-02-23 17:51:27 +00:00
|
|
|
def _import_stochasticai() -> Type[BaseLLM]:
|
2023-12-11 21:53:30 +00:00
|
|
|
from langchain_community.llms.stochasticai import StochasticAI
|
|
|
|
|
|
|
|
return StochasticAI
|
|
|
|
|
|
|
|
|
2024-02-23 17:51:27 +00:00
|
|
|
def _import_symblai_nebula() -> Type[BaseLLM]:
|
2023-12-11 21:53:30 +00:00
|
|
|
from langchain_community.llms.symblai_nebula import Nebula
|
|
|
|
|
|
|
|
return Nebula
|
|
|
|
|
|
|
|
|
2024-02-23 17:51:27 +00:00
|
|
|
def _import_textgen() -> Type[BaseLLM]:
|
2023-12-11 21:53:30 +00:00
|
|
|
from langchain_community.llms.textgen import TextGen
|
|
|
|
|
|
|
|
return TextGen
|
|
|
|
|
|
|
|
|
2024-02-23 17:51:27 +00:00
|
|
|
def _import_titan_takeoff() -> Type[BaseLLM]:
|
2023-12-11 21:53:30 +00:00
|
|
|
from langchain_community.llms.titan_takeoff import TitanTakeoff
|
|
|
|
|
|
|
|
return TitanTakeoff
|
|
|
|
|
|
|
|
|
2024-02-23 17:51:27 +00:00
|
|
|
def _import_titan_takeoff_pro() -> Type[BaseLLM]:
|
2023-12-11 21:53:30 +00:00
|
|
|
from langchain_community.llms.titan_takeoff_pro import TitanTakeoffPro
|
|
|
|
|
|
|
|
return TitanTakeoffPro
|
|
|
|
|
|
|
|
|
2024-02-23 17:51:27 +00:00
|
|
|
def _import_together() -> Type[BaseLLM]:
|
2023-12-11 21:53:30 +00:00
|
|
|
from langchain_community.llms.together import Together
|
|
|
|
|
|
|
|
return Together
|
|
|
|
|
|
|
|
|
2024-02-23 17:51:27 +00:00
|
|
|
def _import_tongyi() -> Type[BaseLLM]:
|
2023-12-11 21:53:30 +00:00
|
|
|
from langchain_community.llms.tongyi import Tongyi
|
|
|
|
|
|
|
|
return Tongyi
|
|
|
|
|
|
|
|
|
2024-02-23 17:51:27 +00:00
|
|
|
def _import_vertex() -> Type[BaseLLM]:
|
2023-12-11 21:53:30 +00:00
|
|
|
from langchain_community.llms.vertexai import VertexAI
|
|
|
|
|
|
|
|
return VertexAI
|
|
|
|
|
|
|
|
|
2024-02-23 17:51:27 +00:00
|
|
|
def _import_vertex_model_garden() -> Type[BaseLLM]:
|
2023-12-11 21:53:30 +00:00
|
|
|
from langchain_community.llms.vertexai import VertexAIModelGarden
|
|
|
|
|
|
|
|
return VertexAIModelGarden
|
|
|
|
|
|
|
|
|
2024-02-23 17:51:27 +00:00
|
|
|
def _import_vllm() -> Type[BaseLLM]:
|
2023-12-11 21:53:30 +00:00
|
|
|
from langchain_community.llms.vllm import VLLM
|
|
|
|
|
|
|
|
return VLLM
|
|
|
|
|
|
|
|
|
2024-02-23 17:51:27 +00:00
|
|
|
def _import_vllm_openai() -> Type[BaseLLM]:
|
2023-12-11 21:53:30 +00:00
|
|
|
from langchain_community.llms.vllm import VLLMOpenAI
|
|
|
|
|
|
|
|
return VLLMOpenAI
|
|
|
|
|
|
|
|
|
2024-02-23 17:51:27 +00:00
|
|
|
def _import_watsonxllm() -> Type[BaseLLM]:
|
2023-12-11 21:53:30 +00:00
|
|
|
from langchain_community.llms.watsonxllm import WatsonxLLM
|
|
|
|
|
|
|
|
return WatsonxLLM
|
|
|
|
|
|
|
|
|
2024-02-23 17:51:27 +00:00
|
|
|
def _import_writer() -> Type[BaseLLM]:
|
2023-12-11 21:53:30 +00:00
|
|
|
from langchain_community.llms.writer import Writer
|
|
|
|
|
|
|
|
return Writer
|
|
|
|
|
|
|
|
|
2024-02-23 17:51:27 +00:00
|
|
|
def _import_xinference() -> Type[BaseLLM]:
|
2023-12-11 21:53:30 +00:00
|
|
|
from langchain_community.llms.xinference import Xinference
|
|
|
|
|
|
|
|
return Xinference
|
|
|
|
|
|
|
|
|
2024-02-23 17:51:27 +00:00
|
|
|
def _import_yandex_gpt() -> Type[BaseLLM]:
|
2023-12-11 21:53:30 +00:00
|
|
|
from langchain_community.llms.yandex import YandexGPT
|
|
|
|
|
|
|
|
return YandexGPT
|
|
|
|
|
|
|
|
|
2024-02-23 17:51:27 +00:00
|
|
|
def _import_yuan2() -> Type[BaseLLM]:
|
2024-02-14 19:46:20 +00:00
|
|
|
from langchain_community.llms.yuan2 import Yuan2
|
|
|
|
|
|
|
|
return Yuan2
|
|
|
|
|
|
|
|
|
2024-02-23 17:51:27 +00:00
|
|
|
def _import_volcengine_maas() -> Type[BaseLLM]:
|
2023-12-11 21:53:30 +00:00
|
|
|
from langchain_community.llms.volcengine_maas import VolcEngineMaasLLM
|
|
|
|
|
|
|
|
return VolcEngineMaasLLM
|
|
|
|
|
|
|
|
|
2024-02-23 17:51:27 +00:00
|
|
|
def _import_sparkllm() -> Type[BaseLLM]:
|
2024-02-20 19:23:47 +00:00
|
|
|
from langchain_community.llms.sparkllm import SparkLLM
|
|
|
|
|
|
|
|
return SparkLLM
|
|
|
|
|
|
|
|
|
2023-12-11 21:53:30 +00:00
|
|
|
def __getattr__(name: str) -> Any:
|
|
|
|
if name == "AI21":
|
|
|
|
return _import_ai21()
|
|
|
|
elif name == "AlephAlpha":
|
|
|
|
return _import_aleph_alpha()
|
|
|
|
elif name == "AmazonAPIGateway":
|
|
|
|
return _import_amazon_api_gateway()
|
|
|
|
elif name == "Anthropic":
|
|
|
|
return _import_anthropic()
|
|
|
|
elif name == "Anyscale":
|
|
|
|
return _import_anyscale()
|
2023-12-20 06:16:57 +00:00
|
|
|
elif name == "Aphrodite":
|
|
|
|
return _import_aphrodite()
|
2023-12-11 21:53:30 +00:00
|
|
|
elif name == "Arcee":
|
|
|
|
return _import_arcee()
|
|
|
|
elif name == "Aviary":
|
|
|
|
return _import_aviary()
|
|
|
|
elif name == "AzureMLOnlineEndpoint":
|
|
|
|
return _import_azureml_endpoint()
|
2024-01-30 04:08:24 +00:00
|
|
|
elif name == "Baichuan":
|
|
|
|
return _import_baichuan()
|
2023-12-11 21:53:30 +00:00
|
|
|
elif name == "QianfanLLMEndpoint":
|
|
|
|
return _import_baidu_qianfan_endpoint()
|
|
|
|
elif name == "Banana":
|
|
|
|
return _import_bananadev()
|
|
|
|
elif name == "Baseten":
|
|
|
|
return _import_baseten()
|
|
|
|
elif name == "Beam":
|
|
|
|
return _import_beam()
|
|
|
|
elif name == "Bedrock":
|
|
|
|
return _import_bedrock()
|
|
|
|
elif name == "NIBittensorLLM":
|
|
|
|
return _import_bittensor()
|
|
|
|
elif name == "CerebriumAI":
|
|
|
|
return _import_cerebriumai()
|
|
|
|
elif name == "ChatGLM":
|
|
|
|
return _import_chatglm()
|
|
|
|
elif name == "Clarifai":
|
|
|
|
return _import_clarifai()
|
|
|
|
elif name == "Cohere":
|
|
|
|
return _import_cohere()
|
|
|
|
elif name == "CTransformers":
|
|
|
|
return _import_ctransformers()
|
|
|
|
elif name == "CTranslate2":
|
|
|
|
return _import_ctranslate2()
|
|
|
|
elif name == "Databricks":
|
|
|
|
return _import_databricks()
|
|
|
|
elif name == "DeepInfra":
|
|
|
|
return _import_deepinfra()
|
|
|
|
elif name == "DeepSparse":
|
|
|
|
return _import_deepsparse()
|
|
|
|
elif name == "EdenAI":
|
|
|
|
return _import_edenai()
|
|
|
|
elif name == "FakeListLLM":
|
|
|
|
return _import_fake()
|
|
|
|
elif name == "Fireworks":
|
|
|
|
return _import_fireworks()
|
|
|
|
elif name == "ForefrontAI":
|
|
|
|
return _import_forefrontai()
|
|
|
|
elif name == "GigaChat":
|
|
|
|
return _import_gigachat()
|
|
|
|
elif name == "GooglePalm":
|
|
|
|
return _import_google_palm()
|
|
|
|
elif name == "GooseAI":
|
|
|
|
return _import_gooseai()
|
|
|
|
elif name == "GPT4All":
|
|
|
|
return _import_gpt4all()
|
|
|
|
elif name == "GradientLLM":
|
|
|
|
return _import_gradient_ai()
|
|
|
|
elif name == "HuggingFaceEndpoint":
|
|
|
|
return _import_huggingface_endpoint()
|
|
|
|
elif name == "HuggingFaceHub":
|
|
|
|
return _import_huggingface_hub()
|
|
|
|
elif name == "HuggingFacePipeline":
|
|
|
|
return _import_huggingface_pipeline()
|
|
|
|
elif name == "HuggingFaceTextGenInference":
|
|
|
|
return _import_huggingface_text_gen_inference()
|
|
|
|
elif name == "HumanInputLLM":
|
|
|
|
return _import_human()
|
|
|
|
elif name == "JavelinAIGateway":
|
|
|
|
return _import_javelin_ai_gateway()
|
|
|
|
elif name == "KoboldApiLLM":
|
|
|
|
return _import_koboldai()
|
2024-01-24 02:22:32 +00:00
|
|
|
elif name == "Konko":
|
|
|
|
return _import_konko()
|
2023-12-11 21:53:30 +00:00
|
|
|
elif name == "LlamaCpp":
|
|
|
|
return _import_llamacpp()
|
|
|
|
elif name == "ManifestWrapper":
|
|
|
|
return _import_manifest()
|
|
|
|
elif name == "Minimax":
|
|
|
|
return _import_minimax()
|
|
|
|
elif name == "Mlflow":
|
|
|
|
return _import_mlflow()
|
|
|
|
elif name == "MlflowAIGateway":
|
|
|
|
return _import_mlflow_ai_gateway()
|
|
|
|
elif name == "Modal":
|
|
|
|
return _import_modal()
|
|
|
|
elif name == "MosaicML":
|
|
|
|
return _import_mosaicml()
|
|
|
|
elif name == "NLPCloud":
|
|
|
|
return _import_nlpcloud()
|
2023-12-20 19:52:20 +00:00
|
|
|
elif name == "OCIModelDeploymentTGI":
|
|
|
|
return _import_oci_md_tgi()
|
|
|
|
elif name == "OCIModelDeploymentVLLM":
|
|
|
|
return _import_oci_md_vllm()
|
community[minor]: Add OCI Generative AI integration (#16548)
<!-- Thank you for contributing to LangChain!
Please title your PR "<package>: <description>", where <package> is
whichever of langchain, community, core, experimental, etc. is being
modified.
Replace this entire comment with:
- **Description:** Adding Oracle Cloud Infrastructure Generative AI
integration. Oracle Cloud Infrastructure (OCI) Generative AI is a fully
managed service that provides a set of state-of-the-art, customizable
large language models (LLMs) that cover a wide range of use cases, and
which is available through a single API. Using the OCI Generative AI
service you can access ready-to-use pretrained models, or create and
host your own fine-tuned custom models based on your own data on
dedicated AI clusters.
https://docs.oracle.com/en-us/iaas/Content/generative-ai/home.htm
- **Issue:** None,
- **Dependencies:** OCI Python SDK,
- **Twitter handle:** we announce bigger features on Twitter. If your PR
gets announced, and you'd like a mention, we'll gladly shout you out!
Please make sure your PR is passing linting and testing before
submitting. Run `make format`, `make lint` and `make test` from the root
of the package you've modified to check this locally.
Passed
See contribution guidelines for more information on how to write/run
tests, lint, etc: https://python.langchain.com/docs/contributing/
If you're adding a new integration, please include:
1. a test for the integration, preferably unit tests that do not rely on
network access,
2. an example notebook showing its use. It lives in
`docs/docs/integrations` directory.
we provide unit tests. However, we cannot provide integration tests due
to Oracle policies that prohibit public sharing of api keys.
If no one reviews your PR within a few days, please @-mention one of
@baskaryan, @eyurtsev, @hwchase17.
-->
---------
Co-authored-by: Arthur Cheng <arthur.cheng@oracle.com>
Co-authored-by: Bagatur <baskaryan@gmail.com>
2024-01-25 02:23:50 +00:00
|
|
|
elif name == "OCIGenAI":
|
|
|
|
return _import_oci_gen_ai()
|
2023-12-11 21:53:30 +00:00
|
|
|
elif name == "OctoAIEndpoint":
|
|
|
|
return _import_octoai_endpoint()
|
|
|
|
elif name == "Ollama":
|
|
|
|
return _import_ollama()
|
|
|
|
elif name == "OpaquePrompts":
|
|
|
|
return _import_opaqueprompts()
|
|
|
|
elif name == "AzureOpenAI":
|
|
|
|
return _import_azure_openai()
|
|
|
|
elif name == "OpenAI":
|
|
|
|
return _import_openai()
|
|
|
|
elif name == "OpenAIChat":
|
|
|
|
return _import_openai_chat()
|
|
|
|
elif name == "OpenLLM":
|
|
|
|
return _import_openllm()
|
|
|
|
elif name == "OpenLM":
|
|
|
|
return _import_openlm()
|
|
|
|
elif name == "PaiEasEndpoint":
|
|
|
|
return _import_pai_eas_endpoint()
|
|
|
|
elif name == "Petals":
|
|
|
|
return _import_petals()
|
|
|
|
elif name == "PipelineAI":
|
|
|
|
return _import_pipelineai()
|
|
|
|
elif name == "Predibase":
|
|
|
|
return _import_predibase()
|
|
|
|
elif name == "PredictionGuard":
|
|
|
|
return _import_predictionguard()
|
|
|
|
elif name == "PromptLayerOpenAI":
|
|
|
|
return _import_promptlayer()
|
|
|
|
elif name == "PromptLayerOpenAIChat":
|
|
|
|
return _import_promptlayer_chat()
|
|
|
|
elif name == "Replicate":
|
|
|
|
return _import_replicate()
|
|
|
|
elif name == "RWKV":
|
|
|
|
return _import_rwkv()
|
|
|
|
elif name == "SagemakerEndpoint":
|
|
|
|
return _import_sagemaker_endpoint()
|
|
|
|
elif name == "SelfHostedPipeline":
|
|
|
|
return _import_self_hosted()
|
|
|
|
elif name == "SelfHostedHuggingFaceLLM":
|
|
|
|
return _import_self_hosted_hugging_face()
|
|
|
|
elif name == "StochasticAI":
|
|
|
|
return _import_stochasticai()
|
|
|
|
elif name == "Nebula":
|
|
|
|
return _import_symblai_nebula()
|
|
|
|
elif name == "TextGen":
|
|
|
|
return _import_textgen()
|
|
|
|
elif name == "TitanTakeoff":
|
|
|
|
return _import_titan_takeoff()
|
|
|
|
elif name == "TitanTakeoffPro":
|
|
|
|
return _import_titan_takeoff_pro()
|
|
|
|
elif name == "Together":
|
|
|
|
return _import_together()
|
|
|
|
elif name == "Tongyi":
|
|
|
|
return _import_tongyi()
|
|
|
|
elif name == "VertexAI":
|
|
|
|
return _import_vertex()
|
|
|
|
elif name == "VertexAIModelGarden":
|
|
|
|
return _import_vertex_model_garden()
|
|
|
|
elif name == "VLLM":
|
|
|
|
return _import_vllm()
|
|
|
|
elif name == "VLLMOpenAI":
|
|
|
|
return _import_vllm_openai()
|
|
|
|
elif name == "WatsonxLLM":
|
|
|
|
return _import_watsonxllm()
|
|
|
|
elif name == "Writer":
|
|
|
|
return _import_writer()
|
|
|
|
elif name == "Xinference":
|
|
|
|
return _import_xinference()
|
|
|
|
elif name == "YandexGPT":
|
|
|
|
return _import_yandex_gpt()
|
2024-02-14 19:46:20 +00:00
|
|
|
elif name == "Yuan2":
|
|
|
|
return _import_yuan2()
|
2023-12-11 21:53:30 +00:00
|
|
|
elif name == "VolcEngineMaasLLM":
|
|
|
|
return _import_volcengine_maas()
|
|
|
|
elif name == "type_to_cls_dict":
|
|
|
|
# for backwards compatibility
|
|
|
|
type_to_cls_dict: Dict[str, Type[BaseLLM]] = {
|
|
|
|
k: v() for k, v in get_type_to_cls_dict().items()
|
|
|
|
}
|
|
|
|
return type_to_cls_dict
|
2024-02-20 19:23:47 +00:00
|
|
|
elif name == "SparkLLM":
|
|
|
|
return _import_sparkllm()
|
2023-12-11 21:53:30 +00:00
|
|
|
else:
|
|
|
|
raise AttributeError(f"Could not find: {name}")
|
|
|
|
|
|
|
|
|
|
|
|
__all__ = [
|
|
|
|
"AI21",
|
|
|
|
"AlephAlpha",
|
|
|
|
"AmazonAPIGateway",
|
|
|
|
"Anthropic",
|
|
|
|
"Anyscale",
|
2023-12-20 06:16:57 +00:00
|
|
|
"Aphrodite",
|
2023-12-11 21:53:30 +00:00
|
|
|
"Arcee",
|
|
|
|
"Aviary",
|
|
|
|
"AzureMLOnlineEndpoint",
|
|
|
|
"AzureOpenAI",
|
|
|
|
"Banana",
|
|
|
|
"Baseten",
|
|
|
|
"Beam",
|
|
|
|
"Bedrock",
|
|
|
|
"CTransformers",
|
|
|
|
"CTranslate2",
|
|
|
|
"CerebriumAI",
|
|
|
|
"ChatGLM",
|
|
|
|
"Clarifai",
|
|
|
|
"Cohere",
|
|
|
|
"Databricks",
|
|
|
|
"DeepInfra",
|
|
|
|
"DeepSparse",
|
|
|
|
"EdenAI",
|
|
|
|
"FakeListLLM",
|
|
|
|
"Fireworks",
|
|
|
|
"ForefrontAI",
|
|
|
|
"GigaChat",
|
|
|
|
"GPT4All",
|
|
|
|
"GooglePalm",
|
|
|
|
"GooseAI",
|
|
|
|
"GradientLLM",
|
|
|
|
"HuggingFaceEndpoint",
|
|
|
|
"HuggingFaceHub",
|
|
|
|
"HuggingFacePipeline",
|
|
|
|
"HuggingFaceTextGenInference",
|
|
|
|
"HumanInputLLM",
|
|
|
|
"KoboldApiLLM",
|
2024-01-24 02:22:32 +00:00
|
|
|
"Konko",
|
2023-12-11 21:53:30 +00:00
|
|
|
"LlamaCpp",
|
|
|
|
"TextGen",
|
|
|
|
"ManifestWrapper",
|
|
|
|
"Minimax",
|
|
|
|
"MlflowAIGateway",
|
|
|
|
"Modal",
|
|
|
|
"MosaicML",
|
|
|
|
"Nebula",
|
|
|
|
"NIBittensorLLM",
|
|
|
|
"NLPCloud",
|
2023-12-20 19:52:20 +00:00
|
|
|
"OCIModelDeploymentTGI",
|
|
|
|
"OCIModelDeploymentVLLM",
|
community[minor]: Add OCI Generative AI integration (#16548)
<!-- Thank you for contributing to LangChain!
Please title your PR "<package>: <description>", where <package> is
whichever of langchain, community, core, experimental, etc. is being
modified.
Replace this entire comment with:
- **Description:** Adding Oracle Cloud Infrastructure Generative AI
integration. Oracle Cloud Infrastructure (OCI) Generative AI is a fully
managed service that provides a set of state-of-the-art, customizable
large language models (LLMs) that cover a wide range of use cases, and
which is available through a single API. Using the OCI Generative AI
service you can access ready-to-use pretrained models, or create and
host your own fine-tuned custom models based on your own data on
dedicated AI clusters.
https://docs.oracle.com/en-us/iaas/Content/generative-ai/home.htm
- **Issue:** None,
- **Dependencies:** OCI Python SDK,
- **Twitter handle:** we announce bigger features on Twitter. If your PR
gets announced, and you'd like a mention, we'll gladly shout you out!
Please make sure your PR is passing linting and testing before
submitting. Run `make format`, `make lint` and `make test` from the root
of the package you've modified to check this locally.
Passed
See contribution guidelines for more information on how to write/run
tests, lint, etc: https://python.langchain.com/docs/contributing/
If you're adding a new integration, please include:
1. a test for the integration, preferably unit tests that do not rely on
network access,
2. an example notebook showing its use. It lives in
`docs/docs/integrations` directory.
we provide unit tests. However, we cannot provide integration tests due
to Oracle policies that prohibit public sharing of api keys.
If no one reviews your PR within a few days, please @-mention one of
@baskaryan, @eyurtsev, @hwchase17.
-->
---------
Co-authored-by: Arthur Cheng <arthur.cheng@oracle.com>
Co-authored-by: Bagatur <baskaryan@gmail.com>
2024-01-25 02:23:50 +00:00
|
|
|
"OCIGenAI",
|
2023-12-11 21:53:30 +00:00
|
|
|
"Ollama",
|
|
|
|
"OpenAI",
|
|
|
|
"OpenAIChat",
|
|
|
|
"OpenLLM",
|
|
|
|
"OpenLM",
|
|
|
|
"PaiEasEndpoint",
|
|
|
|
"Petals",
|
|
|
|
"PipelineAI",
|
|
|
|
"Predibase",
|
|
|
|
"PredictionGuard",
|
|
|
|
"PromptLayerOpenAI",
|
|
|
|
"PromptLayerOpenAIChat",
|
|
|
|
"OpaquePrompts",
|
|
|
|
"RWKV",
|
|
|
|
"Replicate",
|
|
|
|
"SagemakerEndpoint",
|
|
|
|
"SelfHostedHuggingFaceLLM",
|
|
|
|
"SelfHostedPipeline",
|
|
|
|
"StochasticAI",
|
|
|
|
"TitanTakeoff",
|
|
|
|
"TitanTakeoffPro",
|
|
|
|
"Tongyi",
|
|
|
|
"VertexAI",
|
|
|
|
"VertexAIModelGarden",
|
|
|
|
"VLLM",
|
|
|
|
"VLLMOpenAI",
|
|
|
|
"WatsonxLLM",
|
|
|
|
"Writer",
|
|
|
|
"OctoAIEndpoint",
|
|
|
|
"Xinference",
|
|
|
|
"JavelinAIGateway",
|
|
|
|
"QianfanLLMEndpoint",
|
|
|
|
"YandexGPT",
|
2024-02-14 19:46:20 +00:00
|
|
|
"Yuan2",
|
2023-12-11 21:53:30 +00:00
|
|
|
"VolcEngineMaasLLM",
|
2024-02-20 19:23:47 +00:00
|
|
|
"SparkLLM",
|
2023-12-11 21:53:30 +00:00
|
|
|
]
|
|
|
|
|
|
|
|
|
|
|
|
def get_type_to_cls_dict() -> Dict[str, Callable[[], Type[BaseLLM]]]:
|
|
|
|
return {
|
|
|
|
"ai21": _import_ai21,
|
|
|
|
"aleph_alpha": _import_aleph_alpha,
|
|
|
|
"amazon_api_gateway": _import_amazon_api_gateway,
|
|
|
|
"amazon_bedrock": _import_bedrock,
|
|
|
|
"anthropic": _import_anthropic,
|
|
|
|
"anyscale": _import_anyscale,
|
|
|
|
"arcee": _import_arcee,
|
|
|
|
"aviary": _import_aviary,
|
|
|
|
"azure": _import_azure_openai,
|
|
|
|
"azureml_endpoint": _import_azureml_endpoint,
|
|
|
|
"bananadev": _import_bananadev,
|
|
|
|
"baseten": _import_baseten,
|
|
|
|
"beam": _import_beam,
|
|
|
|
"cerebriumai": _import_cerebriumai,
|
|
|
|
"chat_glm": _import_chatglm,
|
|
|
|
"clarifai": _import_clarifai,
|
|
|
|
"cohere": _import_cohere,
|
|
|
|
"ctransformers": _import_ctransformers,
|
|
|
|
"ctranslate2": _import_ctranslate2,
|
|
|
|
"databricks": _import_databricks,
|
|
|
|
"databricks-chat": _import_databricks_chat,
|
|
|
|
"deepinfra": _import_deepinfra,
|
|
|
|
"deepsparse": _import_deepsparse,
|
|
|
|
"edenai": _import_edenai,
|
|
|
|
"fake-list": _import_fake,
|
|
|
|
"forefrontai": _import_forefrontai,
|
|
|
|
"giga-chat-model": _import_gigachat,
|
|
|
|
"google_palm": _import_google_palm,
|
|
|
|
"gooseai": _import_gooseai,
|
|
|
|
"gradient": _import_gradient_ai,
|
|
|
|
"gpt4all": _import_gpt4all,
|
|
|
|
"huggingface_endpoint": _import_huggingface_endpoint,
|
|
|
|
"huggingface_hub": _import_huggingface_hub,
|
|
|
|
"huggingface_pipeline": _import_huggingface_pipeline,
|
|
|
|
"huggingface_textgen_inference": _import_huggingface_text_gen_inference,
|
|
|
|
"human-input": _import_human,
|
|
|
|
"koboldai": _import_koboldai,
|
2024-01-24 02:22:32 +00:00
|
|
|
"konko": _import_konko,
|
2023-12-11 21:53:30 +00:00
|
|
|
"llamacpp": _import_llamacpp,
|
|
|
|
"textgen": _import_textgen,
|
|
|
|
"minimax": _import_minimax,
|
|
|
|
"mlflow": _import_mlflow,
|
|
|
|
"mlflow-chat": _import_mlflow_chat,
|
|
|
|
"mlflow-ai-gateway": _import_mlflow_ai_gateway,
|
|
|
|
"modal": _import_modal,
|
|
|
|
"mosaic": _import_mosaicml,
|
|
|
|
"nebula": _import_symblai_nebula,
|
|
|
|
"nibittensor": _import_bittensor,
|
|
|
|
"nlpcloud": _import_nlpcloud,
|
2023-12-20 19:52:20 +00:00
|
|
|
"oci_model_deployment_tgi_endpoint": _import_oci_md_tgi,
|
|
|
|
"oci_model_deployment_vllm_endpoint": _import_oci_md_vllm,
|
community[minor]: Add OCI Generative AI integration (#16548)
<!-- Thank you for contributing to LangChain!
Please title your PR "<package>: <description>", where <package> is
whichever of langchain, community, core, experimental, etc. is being
modified.
Replace this entire comment with:
- **Description:** Adding Oracle Cloud Infrastructure Generative AI
integration. Oracle Cloud Infrastructure (OCI) Generative AI is a fully
managed service that provides a set of state-of-the-art, customizable
large language models (LLMs) that cover a wide range of use cases, and
which is available through a single API. Using the OCI Generative AI
service you can access ready-to-use pretrained models, or create and
host your own fine-tuned custom models based on your own data on
dedicated AI clusters.
https://docs.oracle.com/en-us/iaas/Content/generative-ai/home.htm
- **Issue:** None,
- **Dependencies:** OCI Python SDK,
- **Twitter handle:** we announce bigger features on Twitter. If your PR
gets announced, and you'd like a mention, we'll gladly shout you out!
Please make sure your PR is passing linting and testing before
submitting. Run `make format`, `make lint` and `make test` from the root
of the package you've modified to check this locally.
Passed
See contribution guidelines for more information on how to write/run
tests, lint, etc: https://python.langchain.com/docs/contributing/
If you're adding a new integration, please include:
1. a test for the integration, preferably unit tests that do not rely on
network access,
2. an example notebook showing its use. It lives in
`docs/docs/integrations` directory.
we provide unit tests. However, we cannot provide integration tests due
to Oracle policies that prohibit public sharing of api keys.
If no one reviews your PR within a few days, please @-mention one of
@baskaryan, @eyurtsev, @hwchase17.
-->
---------
Co-authored-by: Arthur Cheng <arthur.cheng@oracle.com>
Co-authored-by: Bagatur <baskaryan@gmail.com>
2024-01-25 02:23:50 +00:00
|
|
|
"oci_generative_ai": _import_oci_gen_ai,
|
2023-12-11 21:53:30 +00:00
|
|
|
"ollama": _import_ollama,
|
|
|
|
"openai": _import_openai,
|
|
|
|
"openlm": _import_openlm,
|
|
|
|
"pai_eas_endpoint": _import_pai_eas_endpoint,
|
|
|
|
"petals": _import_petals,
|
|
|
|
"pipelineai": _import_pipelineai,
|
|
|
|
"predibase": _import_predibase,
|
|
|
|
"opaqueprompts": _import_opaqueprompts,
|
|
|
|
"replicate": _import_replicate,
|
|
|
|
"rwkv": _import_rwkv,
|
|
|
|
"sagemaker_endpoint": _import_sagemaker_endpoint,
|
|
|
|
"self_hosted": _import_self_hosted,
|
|
|
|
"self_hosted_hugging_face": _import_self_hosted_hugging_face,
|
|
|
|
"stochasticai": _import_stochasticai,
|
|
|
|
"together": _import_together,
|
|
|
|
"tongyi": _import_tongyi,
|
|
|
|
"titan_takeoff": _import_titan_takeoff,
|
|
|
|
"titan_takeoff_pro": _import_titan_takeoff_pro,
|
|
|
|
"vertexai": _import_vertex,
|
|
|
|
"vertexai_model_garden": _import_vertex_model_garden,
|
|
|
|
"openllm": _import_openllm,
|
|
|
|
"openllm_client": _import_openllm,
|
|
|
|
"vllm": _import_vllm,
|
|
|
|
"vllm_openai": _import_vllm_openai,
|
|
|
|
"watsonxllm": _import_watsonxllm,
|
|
|
|
"writer": _import_writer,
|
|
|
|
"xinference": _import_xinference,
|
|
|
|
"javelin-ai-gateway": _import_javelin_ai_gateway,
|
|
|
|
"qianfan_endpoint": _import_baidu_qianfan_endpoint,
|
|
|
|
"yandex_gpt": _import_yandex_gpt,
|
2024-02-14 19:46:20 +00:00
|
|
|
"yuan2": _import_yuan2,
|
2023-12-11 21:53:30 +00:00
|
|
|
"VolcEngineMaasLLM": _import_volcengine_maas,
|
2024-02-23 14:40:29 +00:00
|
|
|
"SparkLLM": _import_sparkllm,
|
2023-12-11 21:53:30 +00:00
|
|
|
}
|