revoke serialization (#14456)

pull/14460/head
Harrison Chase 6 months ago committed by GitHub
parent ff0d5514c1
commit 02ee0073cf
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -28,6 +28,10 @@ class OpenAIAssistantFinish(AgentFinish):
run_id: str
thread_id: str
@classmethod
def is_lc_serializable(cls) -> bool:
return False
class OpenAIAssistantAction(AgentAction):
"""AgentAction with info needed to submit custom tool output to existing run."""
@ -36,6 +40,10 @@ class OpenAIAssistantAction(AgentAction):
run_id: str
thread_id: str
@classmethod
def is_lc_serializable(cls) -> bool:
return False
def _get_openai_client() -> openai.OpenAI:
try:

@ -7,6 +7,10 @@ from langchain_core.prompts.chat import ChatPromptTemplate
class AgentScratchPadChatPromptTemplate(ChatPromptTemplate):
"""Chat prompt template for the agent scratchpad."""
@classmethod
def is_lc_serializable(cls) -> bool:
return False
def _construct_agent_scratchpad(
self, intermediate_steps: List[Tuple[AgentAction, str]]
) -> str:

@ -40,6 +40,10 @@ class APIRequesterOutputParser(BaseOutputParser):
class APIRequesterChain(LLMChain):
"""Get the request parser."""
@classmethod
def is_lc_serializable(cls) -> bool:
return False
@classmethod
def from_llm_and_typescript(
cls,

@ -40,6 +40,10 @@ class APIResponderOutputParser(BaseOutputParser):
class APIResponderChain(LLMChain):
"""Get the response parser."""
@classmethod
def is_lc_serializable(cls) -> bool:
return False
@classmethod
def from_llm(
cls, llm: BaseLanguageModel, verbose: bool = True, **kwargs: Any

@ -36,6 +36,10 @@ class ConversationChain(LLMChain):
extra = Extra.forbid
arbitrary_types_allowed = True
@classmethod
def is_lc_serializable(cls) -> bool:
return False
@property
def input_keys(self) -> List[str]:
"""Use this since so some prompt vars come from history."""

@ -29,6 +29,10 @@ class _ResponseChain(LLMChain):
prompt: BasePromptTemplate = PROMPT
@classmethod
def is_lc_serializable(cls) -> bool:
return False
@property
def input_keys(self) -> List[str]:
return self.prompt.input_variables
@ -77,6 +81,10 @@ class QuestionGeneratorChain(LLMChain):
prompt: BasePromptTemplate = QUESTION_GENERATOR_PROMPT
"""Prompt template for the chain."""
@classmethod
def is_lc_serializable(cls) -> bool:
return False
@property
def input_keys(self) -> List[str]:
"""Input keys for the chain."""

@ -57,6 +57,10 @@ class ChatAnyscale(ChatOpenAI):
def lc_secrets(self) -> Dict[str, str]:
return {"anyscale_api_key": "ANYSCALE_API_KEY"}
@classmethod
def is_lc_serializable(cls) -> bool:
return False
anyscale_api_key: SecretStr
"""AnyScale Endpoints API keys."""
model_name: str = Field(default=DEFAULT_MODEL, alias="model")

@ -51,6 +51,10 @@ class ChatEverlyAI(ChatOpenAI):
def lc_secrets(self) -> Dict[str, str]:
return {"everlyai_api_key": "EVERLYAI_API_KEY"}
@classmethod
def is_lc_serializable(cls) -> bool:
return False
everlyai_api_key: Optional[str] = None
"""EverlyAI Endpoints API keys."""
model_name: str = Field(default=DEFAULT_MODEL, alias="model")

@ -165,7 +165,7 @@ class JinaChat(BaseChatModel):
@classmethod
def is_lc_serializable(cls) -> bool:
"""Return whether this model can be serialized by Langchain."""
return True
return False
client: Any #: :meta private:
temperature: float = 0.7

@ -57,7 +57,7 @@ class ChatKonko(BaseChatModel):
@classmethod
def is_lc_serializable(cls) -> bool:
"""Return whether this model can be serialized by Langchain."""
return True
return False
client: Any = None #: :meta private:
model: str = Field(default=DEFAULT_MODEL, alias="model")

@ -50,7 +50,7 @@ class ChatOllama(BaseChatModel, _OllamaCommon):
@classmethod
def is_lc_serializable(cls) -> bool:
"""Return whether this model can be serialized by Langchain."""
return True
return False
def _format_message_as_text(self, message: BaseMessage) -> str:
if isinstance(message, ChatMessage):

@ -39,6 +39,10 @@ class PromptLayerChatOpenAI(ChatOpenAI):
pl_tags: Optional[List[str]]
return_pl_id: Optional[bool] = False
@classmethod
def is_lc_serializable(cls) -> bool:
return False
def _generate(
self,
messages: List[BaseMessage],

@ -76,7 +76,7 @@ class VolcEngineMaasChat(BaseChatModel, VolcEngineMaasBase):
@classmethod
def is_lc_serializable(cls) -> bool:
"""Return whether this model can be serialized by Langchain."""
return True
return False
@property
def _identifying_params(self) -> Dict[str, Any]:

@ -15,6 +15,10 @@ class _DocumentWithState(Document):
state: dict = Field(default_factory=dict)
"""State associated with the document."""
@classmethod
def is_lc_serializable(cls) -> bool:
return False
def to_document(self) -> Document:
"""Convert the DocumentWithState to a Document."""
return Document(page_content=self.page_content, metadata=self.metadata)

@ -188,6 +188,10 @@ class PairwiseStringEvalChain(PairwiseStringEvaluator, LLMEvalChain, LLMChain):
default_factory=PairwiseStringResultOutputParser
)
@classmethod
def is_lc_serializable(cls) -> bool:
return False
class Config:
"""Configuration for the PairwiseStringEvalChain."""

@ -232,6 +232,10 @@ class CriteriaEvalChain(StringEvaluator, LLMEvalChain, LLMChain):
"""The name of the criterion being evaluated."""
output_key: str = "results" #: :meta private:
@classmethod
def is_lc_serializable(cls) -> bool:
return False
class Config:
"""Configuration for the QAEvalChain."""
@ -508,6 +512,10 @@ class CriteriaEvalChain(StringEvaluator, LLMEvalChain, LLMChain):
class LabeledCriteriaEvalChain(CriteriaEvalChain):
"""Criteria evaluation chain that requires references."""
@classmethod
def is_lc_serializable(cls) -> bool:
return False
@property
def requires_reference(self) -> bool:
"""Whether the evaluation requires a reference text."""

@ -77,6 +77,10 @@ class QAEvalChain(LLMChain, StringEvaluator, LLMEvalChain):
extra = Extra.ignore
@classmethod
def is_lc_serializable(cls) -> bool:
return False
@property
def evaluation_name(self) -> str:
return "correctness"
@ -204,6 +208,10 @@ class QAEvalChain(LLMChain, StringEvaluator, LLMEvalChain):
class ContextQAEvalChain(LLMChain, StringEvaluator, LLMEvalChain):
"""LLM Chain for evaluating QA w/o GT based on context"""
@classmethod
def is_lc_serializable(cls) -> bool:
return False
@property
def requires_reference(self) -> bool:
"""Whether the chain requires a reference string."""
@ -328,6 +336,10 @@ class ContextQAEvalChain(LLMChain, StringEvaluator, LLMEvalChain):
class CotQAEvalChain(ContextQAEvalChain):
"""LLM Chain for evaluating QA using chain of thought reasoning."""
@classmethod
def is_lc_serializable(cls) -> bool:
return False
@property
def evaluation_name(self) -> str:
return "COT Contextual Accuracy"

@ -22,6 +22,10 @@ class QAGenerateChain(LLMChain):
output_parser: BaseLLMOutputParser = Field(default=_QA_OUTPUT_PARSER)
output_key: str = "qa_pairs"
@classmethod
def is_lc_serializable(cls) -> bool:
return False
@classmethod
def from_llm(cls, llm: BaseLanguageModel, **kwargs: Any) -> QAGenerateChain:
"""Load QA Generate Chain from LLM."""

@ -185,6 +185,10 @@ class ScoreStringEvalChain(StringEvaluator, LLMEvalChain, LLMChain):
extra = Extra.ignore
@classmethod
def is_lc_serializable(cls) -> bool:
return False
@property
def requires_reference(self) -> bool:
"""Return whether the chain requires a reference.

@ -58,6 +58,10 @@ class _HashedDocument(Document):
metadata_hash: str
"""The hash of the document metadata."""
@classmethod
def is_lc_serializable(cls) -> bool:
return False
@root_validator(pre=True)
def calculate_hashes(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Root validator to calculate content and metadata hash."""

@ -90,6 +90,10 @@ class Anyscale(BaseOpenAI):
prefix_messages: List = Field(default_factory=list)
@classmethod
def is_lc_serializable(cls) -> bool:
return False
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""

@ -8,6 +8,10 @@ from langchain.llms.openai import BaseOpenAI
class OpenLM(BaseOpenAI):
"""OpenLM models."""
@classmethod
def is_lc_serializable(cls) -> bool:
return False
@property
def _invocation_params(self) -> Dict[str, Any]:
return {**{"model": self.model_name}, **super()._invocation_params}

@ -37,6 +37,10 @@ class PromptLayerOpenAI(OpenAI):
pl_tags: Optional[List[str]]
return_pl_id: Optional[bool] = False
@classmethod
def is_lc_serializable(cls) -> bool:
return False
def _generate(
self,
prompts: List[str],

@ -106,7 +106,7 @@ class Tongyi(LLM):
@classmethod
def is_lc_serializable(cls) -> bool:
return True
return False
client: Any #: :meta private:
model_name: str = "qwen-plus-v1"

@ -147,6 +147,10 @@ class VLLM(BaseLLM):
class VLLMOpenAI(BaseOpenAI):
"""vLLM OpenAI-compatible API client"""
@classmethod
def is_lc_serializable(cls) -> bool:
return False
@property
def _invocation_params(self) -> Dict[str, Any]:
"""Get the parameters used to invoke the model."""

@ -97,7 +97,7 @@ class WatsonxLLM(BaseLLM):
@classmethod
def is_lc_serializable(cls) -> bool:
return True
return False
@property
def lc_secrets(self) -> Dict[str, str]:

Loading…
Cancel
Save