diff --git a/libs/langchain/langchain/agents/openai_assistant/base.py b/libs/langchain/langchain/agents/openai_assistant/base.py index 7939542858..fa827adca5 100644 --- a/libs/langchain/langchain/agents/openai_assistant/base.py +++ b/libs/langchain/langchain/agents/openai_assistant/base.py @@ -28,6 +28,10 @@ class OpenAIAssistantFinish(AgentFinish): run_id: str thread_id: str + @classmethod + def is_lc_serializable(cls) -> bool: + return False + class OpenAIAssistantAction(AgentAction): """AgentAction with info needed to submit custom tool output to existing run.""" @@ -36,6 +40,10 @@ class OpenAIAssistantAction(AgentAction): run_id: str thread_id: str + @classmethod + def is_lc_serializable(cls) -> bool: + return False + def _get_openai_client() -> openai.OpenAI: try: diff --git a/libs/langchain/langchain/agents/schema.py b/libs/langchain/langchain/agents/schema.py index 6c16cad9e6..e0c00fb95e 100644 --- a/libs/langchain/langchain/agents/schema.py +++ b/libs/langchain/langchain/agents/schema.py @@ -7,6 +7,10 @@ from langchain_core.prompts.chat import ChatPromptTemplate class AgentScratchPadChatPromptTemplate(ChatPromptTemplate): """Chat prompt template for the agent scratchpad.""" + @classmethod + def is_lc_serializable(cls) -> bool: + return False + def _construct_agent_scratchpad( self, intermediate_steps: List[Tuple[AgentAction, str]] ) -> str: diff --git a/libs/langchain/langchain/chains/api/openapi/requests_chain.py b/libs/langchain/langchain/chains/api/openapi/requests_chain.py index 50179cc902..cca19165ee 100644 --- a/libs/langchain/langchain/chains/api/openapi/requests_chain.py +++ b/libs/langchain/langchain/chains/api/openapi/requests_chain.py @@ -40,6 +40,10 @@ class APIRequesterOutputParser(BaseOutputParser): class APIRequesterChain(LLMChain): """Get the request parser.""" + @classmethod + def is_lc_serializable(cls) -> bool: + return False + @classmethod def from_llm_and_typescript( cls, diff --git a/libs/langchain/langchain/chains/api/openapi/response_chain.py b/libs/langchain/langchain/chains/api/openapi/response_chain.py index 1c6156091f..06e7686b4a 100644 --- a/libs/langchain/langchain/chains/api/openapi/response_chain.py +++ b/libs/langchain/langchain/chains/api/openapi/response_chain.py @@ -40,6 +40,10 @@ class APIResponderOutputParser(BaseOutputParser): class APIResponderChain(LLMChain): """Get the response parser.""" + @classmethod + def is_lc_serializable(cls) -> bool: + return False + @classmethod def from_llm( cls, llm: BaseLanguageModel, verbose: bool = True, **kwargs: Any diff --git a/libs/langchain/langchain/chains/conversation/base.py b/libs/langchain/langchain/chains/conversation/base.py index d05e7920b5..f0c4221734 100644 --- a/libs/langchain/langchain/chains/conversation/base.py +++ b/libs/langchain/langchain/chains/conversation/base.py @@ -36,6 +36,10 @@ class ConversationChain(LLMChain): extra = Extra.forbid arbitrary_types_allowed = True + @classmethod + def is_lc_serializable(cls) -> bool: + return False + @property def input_keys(self) -> List[str]: """Use this since so some prompt vars come from history.""" diff --git a/libs/langchain/langchain/chains/flare/base.py b/libs/langchain/langchain/chains/flare/base.py index 638212d54e..de673f6cb8 100644 --- a/libs/langchain/langchain/chains/flare/base.py +++ b/libs/langchain/langchain/chains/flare/base.py @@ -29,6 +29,10 @@ class _ResponseChain(LLMChain): prompt: BasePromptTemplate = PROMPT + @classmethod + def is_lc_serializable(cls) -> bool: + return False + @property def input_keys(self) -> List[str]: return self.prompt.input_variables @@ -77,6 +81,10 @@ class QuestionGeneratorChain(LLMChain): prompt: BasePromptTemplate = QUESTION_GENERATOR_PROMPT """Prompt template for the chain.""" + @classmethod + def is_lc_serializable(cls) -> bool: + return False + @property def input_keys(self) -> List[str]: """Input keys for the chain.""" diff --git a/libs/langchain/langchain/chat_models/anyscale.py b/libs/langchain/langchain/chat_models/anyscale.py index 27efc4a1ab..ff579d27f5 100644 --- a/libs/langchain/langchain/chat_models/anyscale.py +++ b/libs/langchain/langchain/chat_models/anyscale.py @@ -57,6 +57,10 @@ class ChatAnyscale(ChatOpenAI): def lc_secrets(self) -> Dict[str, str]: return {"anyscale_api_key": "ANYSCALE_API_KEY"} + @classmethod + def is_lc_serializable(cls) -> bool: + return False + anyscale_api_key: SecretStr """AnyScale Endpoints API keys.""" model_name: str = Field(default=DEFAULT_MODEL, alias="model") diff --git a/libs/langchain/langchain/chat_models/everlyai.py b/libs/langchain/langchain/chat_models/everlyai.py index 95bddaba5b..046f00f6e7 100644 --- a/libs/langchain/langchain/chat_models/everlyai.py +++ b/libs/langchain/langchain/chat_models/everlyai.py @@ -51,6 +51,10 @@ class ChatEverlyAI(ChatOpenAI): def lc_secrets(self) -> Dict[str, str]: return {"everlyai_api_key": "EVERLYAI_API_KEY"} + @classmethod + def is_lc_serializable(cls) -> bool: + return False + everlyai_api_key: Optional[str] = None """EverlyAI Endpoints API keys.""" model_name: str = Field(default=DEFAULT_MODEL, alias="model") diff --git a/libs/langchain/langchain/chat_models/jinachat.py b/libs/langchain/langchain/chat_models/jinachat.py index bdb77e25b2..abaf57b5ff 100644 --- a/libs/langchain/langchain/chat_models/jinachat.py +++ b/libs/langchain/langchain/chat_models/jinachat.py @@ -165,7 +165,7 @@ class JinaChat(BaseChatModel): @classmethod def is_lc_serializable(cls) -> bool: """Return whether this model can be serialized by Langchain.""" - return True + return False client: Any #: :meta private: temperature: float = 0.7 diff --git a/libs/langchain/langchain/chat_models/konko.py b/libs/langchain/langchain/chat_models/konko.py index eaafb10810..8065c3cb10 100644 --- a/libs/langchain/langchain/chat_models/konko.py +++ b/libs/langchain/langchain/chat_models/konko.py @@ -57,7 +57,7 @@ class ChatKonko(BaseChatModel): @classmethod def is_lc_serializable(cls) -> bool: """Return whether this model can be serialized by Langchain.""" - return True + return False client: Any = None #: :meta private: model: str = Field(default=DEFAULT_MODEL, alias="model") diff --git a/libs/langchain/langchain/chat_models/ollama.py b/libs/langchain/langchain/chat_models/ollama.py index b356733566..819f657fb9 100644 --- a/libs/langchain/langchain/chat_models/ollama.py +++ b/libs/langchain/langchain/chat_models/ollama.py @@ -50,7 +50,7 @@ class ChatOllama(BaseChatModel, _OllamaCommon): @classmethod def is_lc_serializable(cls) -> bool: """Return whether this model can be serialized by Langchain.""" - return True + return False def _format_message_as_text(self, message: BaseMessage) -> str: if isinstance(message, ChatMessage): diff --git a/libs/langchain/langchain/chat_models/promptlayer_openai.py b/libs/langchain/langchain/chat_models/promptlayer_openai.py index e7146b6eca..bab7a1ffdb 100644 --- a/libs/langchain/langchain/chat_models/promptlayer_openai.py +++ b/libs/langchain/langchain/chat_models/promptlayer_openai.py @@ -39,6 +39,10 @@ class PromptLayerChatOpenAI(ChatOpenAI): pl_tags: Optional[List[str]] return_pl_id: Optional[bool] = False + @classmethod + def is_lc_serializable(cls) -> bool: + return False + def _generate( self, messages: List[BaseMessage], diff --git a/libs/langchain/langchain/chat_models/volcengine_maas.py b/libs/langchain/langchain/chat_models/volcengine_maas.py index 5ce8378d94..2af4754179 100644 --- a/libs/langchain/langchain/chat_models/volcengine_maas.py +++ b/libs/langchain/langchain/chat_models/volcengine_maas.py @@ -76,7 +76,7 @@ class VolcEngineMaasChat(BaseChatModel, VolcEngineMaasBase): @classmethod def is_lc_serializable(cls) -> bool: """Return whether this model can be serialized by Langchain.""" - return True + return False @property def _identifying_params(self) -> Dict[str, Any]: diff --git a/libs/langchain/langchain/document_transformers/embeddings_redundant_filter.py b/libs/langchain/langchain/document_transformers/embeddings_redundant_filter.py index d7772a3092..276ac2061f 100644 --- a/libs/langchain/langchain/document_transformers/embeddings_redundant_filter.py +++ b/libs/langchain/langchain/document_transformers/embeddings_redundant_filter.py @@ -15,6 +15,10 @@ class _DocumentWithState(Document): state: dict = Field(default_factory=dict) """State associated with the document.""" + @classmethod + def is_lc_serializable(cls) -> bool: + return False + def to_document(self) -> Document: """Convert the DocumentWithState to a Document.""" return Document(page_content=self.page_content, metadata=self.metadata) diff --git a/libs/langchain/langchain/evaluation/comparison/eval_chain.py b/libs/langchain/langchain/evaluation/comparison/eval_chain.py index 65f8fc939a..aaf97b2217 100644 --- a/libs/langchain/langchain/evaluation/comparison/eval_chain.py +++ b/libs/langchain/langchain/evaluation/comparison/eval_chain.py @@ -188,6 +188,10 @@ class PairwiseStringEvalChain(PairwiseStringEvaluator, LLMEvalChain, LLMChain): default_factory=PairwiseStringResultOutputParser ) + @classmethod + def is_lc_serializable(cls) -> bool: + return False + class Config: """Configuration for the PairwiseStringEvalChain.""" diff --git a/libs/langchain/langchain/evaluation/criteria/eval_chain.py b/libs/langchain/langchain/evaluation/criteria/eval_chain.py index ac3bb1211c..ab167477c4 100644 --- a/libs/langchain/langchain/evaluation/criteria/eval_chain.py +++ b/libs/langchain/langchain/evaluation/criteria/eval_chain.py @@ -232,6 +232,10 @@ class CriteriaEvalChain(StringEvaluator, LLMEvalChain, LLMChain): """The name of the criterion being evaluated.""" output_key: str = "results" #: :meta private: + @classmethod + def is_lc_serializable(cls) -> bool: + return False + class Config: """Configuration for the QAEvalChain.""" @@ -508,6 +512,10 @@ class CriteriaEvalChain(StringEvaluator, LLMEvalChain, LLMChain): class LabeledCriteriaEvalChain(CriteriaEvalChain): """Criteria evaluation chain that requires references.""" + @classmethod + def is_lc_serializable(cls) -> bool: + return False + @property def requires_reference(self) -> bool: """Whether the evaluation requires a reference text.""" diff --git a/libs/langchain/langchain/evaluation/qa/eval_chain.py b/libs/langchain/langchain/evaluation/qa/eval_chain.py index ae63aa343f..9a465d0255 100644 --- a/libs/langchain/langchain/evaluation/qa/eval_chain.py +++ b/libs/langchain/langchain/evaluation/qa/eval_chain.py @@ -77,6 +77,10 @@ class QAEvalChain(LLMChain, StringEvaluator, LLMEvalChain): extra = Extra.ignore + @classmethod + def is_lc_serializable(cls) -> bool: + return False + @property def evaluation_name(self) -> str: return "correctness" @@ -204,6 +208,10 @@ class QAEvalChain(LLMChain, StringEvaluator, LLMEvalChain): class ContextQAEvalChain(LLMChain, StringEvaluator, LLMEvalChain): """LLM Chain for evaluating QA w/o GT based on context""" + @classmethod + def is_lc_serializable(cls) -> bool: + return False + @property def requires_reference(self) -> bool: """Whether the chain requires a reference string.""" @@ -328,6 +336,10 @@ class ContextQAEvalChain(LLMChain, StringEvaluator, LLMEvalChain): class CotQAEvalChain(ContextQAEvalChain): """LLM Chain for evaluating QA using chain of thought reasoning.""" + @classmethod + def is_lc_serializable(cls) -> bool: + return False + @property def evaluation_name(self) -> str: return "COT Contextual Accuracy" diff --git a/libs/langchain/langchain/evaluation/qa/generate_chain.py b/libs/langchain/langchain/evaluation/qa/generate_chain.py index 46854fae00..219978f739 100644 --- a/libs/langchain/langchain/evaluation/qa/generate_chain.py +++ b/libs/langchain/langchain/evaluation/qa/generate_chain.py @@ -22,6 +22,10 @@ class QAGenerateChain(LLMChain): output_parser: BaseLLMOutputParser = Field(default=_QA_OUTPUT_PARSER) output_key: str = "qa_pairs" + @classmethod + def is_lc_serializable(cls) -> bool: + return False + @classmethod def from_llm(cls, llm: BaseLanguageModel, **kwargs: Any) -> QAGenerateChain: """Load QA Generate Chain from LLM.""" diff --git a/libs/langchain/langchain/evaluation/scoring/eval_chain.py b/libs/langchain/langchain/evaluation/scoring/eval_chain.py index 3f7449bb0d..dab0d4842f 100644 --- a/libs/langchain/langchain/evaluation/scoring/eval_chain.py +++ b/libs/langchain/langchain/evaluation/scoring/eval_chain.py @@ -185,6 +185,10 @@ class ScoreStringEvalChain(StringEvaluator, LLMEvalChain, LLMChain): extra = Extra.ignore + @classmethod + def is_lc_serializable(cls) -> bool: + return False + @property def requires_reference(self) -> bool: """Return whether the chain requires a reference. diff --git a/libs/langchain/langchain/indexes/_api.py b/libs/langchain/langchain/indexes/_api.py index 5637a12511..29bf6dfb5e 100644 --- a/libs/langchain/langchain/indexes/_api.py +++ b/libs/langchain/langchain/indexes/_api.py @@ -58,6 +58,10 @@ class _HashedDocument(Document): metadata_hash: str """The hash of the document metadata.""" + @classmethod + def is_lc_serializable(cls) -> bool: + return False + @root_validator(pre=True) def calculate_hashes(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Root validator to calculate content and metadata hash.""" diff --git a/libs/langchain/langchain/llms/anyscale.py b/libs/langchain/langchain/llms/anyscale.py index e2f26463ec..a58bbb183d 100644 --- a/libs/langchain/langchain/llms/anyscale.py +++ b/libs/langchain/langchain/llms/anyscale.py @@ -90,6 +90,10 @@ class Anyscale(BaseOpenAI): prefix_messages: List = Field(default_factory=list) + @classmethod + def is_lc_serializable(cls) -> bool: + return False + @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" diff --git a/libs/langchain/langchain/llms/openlm.py b/libs/langchain/langchain/llms/openlm.py index fcbf3af584..13231e8564 100644 --- a/libs/langchain/langchain/llms/openlm.py +++ b/libs/langchain/langchain/llms/openlm.py @@ -8,6 +8,10 @@ from langchain.llms.openai import BaseOpenAI class OpenLM(BaseOpenAI): """OpenLM models.""" + @classmethod + def is_lc_serializable(cls) -> bool: + return False + @property def _invocation_params(self) -> Dict[str, Any]: return {**{"model": self.model_name}, **super()._invocation_params} diff --git a/libs/langchain/langchain/llms/promptlayer_openai.py b/libs/langchain/langchain/llms/promptlayer_openai.py index b8037cdbfb..025347aabe 100644 --- a/libs/langchain/langchain/llms/promptlayer_openai.py +++ b/libs/langchain/langchain/llms/promptlayer_openai.py @@ -37,6 +37,10 @@ class PromptLayerOpenAI(OpenAI): pl_tags: Optional[List[str]] return_pl_id: Optional[bool] = False + @classmethod + def is_lc_serializable(cls) -> bool: + return False + def _generate( self, prompts: List[str], diff --git a/libs/langchain/langchain/llms/tongyi.py b/libs/langchain/langchain/llms/tongyi.py index 1ecd238cd7..3606e6289f 100644 --- a/libs/langchain/langchain/llms/tongyi.py +++ b/libs/langchain/langchain/llms/tongyi.py @@ -106,7 +106,7 @@ class Tongyi(LLM): @classmethod def is_lc_serializable(cls) -> bool: - return True + return False client: Any #: :meta private: model_name: str = "qwen-plus-v1" diff --git a/libs/langchain/langchain/llms/vllm.py b/libs/langchain/langchain/llms/vllm.py index 4ccb5a743b..0b86ac6947 100644 --- a/libs/langchain/langchain/llms/vllm.py +++ b/libs/langchain/langchain/llms/vllm.py @@ -147,6 +147,10 @@ class VLLM(BaseLLM): class VLLMOpenAI(BaseOpenAI): """vLLM OpenAI-compatible API client""" + @classmethod + def is_lc_serializable(cls) -> bool: + return False + @property def _invocation_params(self) -> Dict[str, Any]: """Get the parameters used to invoke the model.""" diff --git a/libs/langchain/langchain/llms/watsonxllm.py b/libs/langchain/langchain/llms/watsonxllm.py index 4000543781..a316da2fb5 100644 --- a/libs/langchain/langchain/llms/watsonxllm.py +++ b/libs/langchain/langchain/llms/watsonxllm.py @@ -97,7 +97,7 @@ class WatsonxLLM(BaseLLM): @classmethod def is_lc_serializable(cls) -> bool: - return True + return False @property def lc_secrets(self) -> Dict[str, str]: