Removed duplicate BaseModel dependencies (#2471)

Removed duplicate BaseModel dependencies in class inheritances.
Also, sorted imports by `isort`.
This commit is contained in:
leo-gan 2023-04-06 12:45:16 -07:00 committed by GitHub
parent b6a101d121
commit fd69cc7e42
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
99 changed files with 187 additions and 257 deletions

View File

@ -548,7 +548,7 @@ class Agent(BaseSingleActionAgent):
}
class AgentExecutor(Chain, BaseModel):
class AgentExecutor(Chain):
"""Consists of an agent using tools."""
agent: Union[BaseSingleActionAgent, BaseMultiActionAgent]

View File

@ -19,9 +19,7 @@ from langchain.agents.agent_toolkits.openapi.planner_prompt import (
REQUESTS_GET_TOOL_DESCRIPTION,
REQUESTS_POST_TOOL_DESCRIPTION,
)
from langchain.agents.agent_toolkits.openapi.spec import (
ReducedOpenAPISpec,
)
from langchain.agents.agent_toolkits.openapi.spec import ReducedOpenAPISpec
from langchain.agents.mrkl.base import ZeroShotAgent
from langchain.agents.tools import Tool
from langchain.chains.llm import LLMChain

View File

@ -2,7 +2,6 @@
from langchain.prompts.prompt import PromptTemplate
API_PLANNER_PROMPT = """You are a planner that plans a sequence of API calls to assist with user queries against an API.
You should:

View File

@ -1,11 +1,11 @@
# flake8: noqa
"""Load tools."""
from typing import Any, List, Optional
import warnings
from typing import Any, List, Optional
from langchain.agents.tools import Tool
from langchain.callbacks.base import BaseCallbackManager
from langchain.chains.api import news_docs, open_meteo_docs, tmdb_docs, podcast_docs
from langchain.chains.api import news_docs, open_meteo_docs, podcast_docs, tmdb_docs
from langchain.chains.api.base import APIChain
from langchain.chains.llm_math.base import LLMMathChain
from langchain.chains.pal.base import PALChain
@ -14,16 +14,16 @@ from langchain.requests import TextRequestsWrapper
from langchain.tools.base import BaseTool
from langchain.tools.bing_search.tool import BingSearchRun
from langchain.tools.google_search.tool import GoogleSearchResults, GoogleSearchRun
from langchain.tools.searx_search.tool import SearxSearchResults, SearxSearchRun
from langchain.tools.human.tool import HumanInputRun
from langchain.tools.python.tool import PythonREPLTool
from langchain.tools.requests.tool import (
RequestsGetTool,
RequestsPostTool,
RequestsPatchTool,
RequestsPutTool,
RequestsDeleteTool,
RequestsGetTool,
RequestsPatchTool,
RequestsPostTool,
RequestsPutTool,
)
from langchain.tools.searx_search.tool import SearxSearchResults, SearxSearchRun
from langchain.tools.wikipedia.tool import WikipediaQueryRun
from langchain.tools.wolfram_alpha.tool import WolframAlphaQueryRun
from langchain.utilities.apify import ApifyWrapper

View File

@ -2,8 +2,6 @@
import re
from typing import Any, List, Optional, Sequence, Tuple
from pydantic import BaseModel
from langchain.agents.agent import Agent, AgentExecutor
from langchain.agents.agent_types import AgentType
from langchain.agents.react.textworld_prompt import TEXTWORLD_PROMPT
@ -16,7 +14,7 @@ from langchain.prompts.base import BasePromptTemplate
from langchain.tools.base import BaseTool
class ReActDocstoreAgent(Agent, BaseModel):
class ReActDocstoreAgent(Agent):
"""Agent for the ReAct chain."""
@property
@ -124,7 +122,7 @@ class DocstoreExplorer:
return self.document.page_content.split("\n\n")
class ReActTextWorldAgent(ReActDocstoreAgent, BaseModel):
class ReActTextWorldAgent(ReActDocstoreAgent):
"""Agent for the ReAct TextWorld chain."""
@classmethod

View File

@ -77,11 +77,7 @@ class ArizeCallbackHandler(BaseCallbackHandler):
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
"""Log data to Arize when an LLM ends."""
from arize.utils.types import (
Embedding,
Environments,
ModelTypes,
)
from arize.utils.types import Embedding, Environments, ModelTypes
# Record token usage of the LLM
if response.llm_output is not None:

View File

@ -3,7 +3,7 @@ from __future__ import annotations
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Field, root_validator
from pydantic import Field, root_validator
from langchain.chains.api.prompt import API_RESPONSE_PROMPT, API_URL_PROMPT
from langchain.chains.base import Chain
@ -13,7 +13,7 @@ from langchain.requests import TextRequestsWrapper
from langchain.schema import BaseLanguageModel
class APIChain(Chain, BaseModel):
class APIChain(Chain):
"""Chain that makes API calls and summarizes the responses to answer a question."""
api_request_chain: LLMChain

View File

@ -3,14 +3,14 @@
from abc import ABC, abstractmethod
from typing import Any, Dict, List, Optional, Tuple
from pydantic import BaseModel, Field
from pydantic import Field
from langchain.chains.base import Chain
from langchain.docstore.document import Document
from langchain.text_splitter import RecursiveCharacterTextSplitter, TextSplitter
class BaseCombineDocumentsChain(Chain, BaseModel, ABC):
class BaseCombineDocumentsChain(Chain, ABC):
"""Base interface for chains combining documents."""
input_key: str = "input_documents" #: :meta private:
@ -66,7 +66,7 @@ class BaseCombineDocumentsChain(Chain, BaseModel, ABC):
return extra_return_dict
class AnalyzeDocumentChain(Chain, BaseModel):
class AnalyzeDocumentChain(Chain):
"""Chain that splits documents, then analyzes it in pieces."""
input_key: str = "input_document" #: :meta private:

View File

@ -4,7 +4,7 @@ from __future__ import annotations
from typing import Any, Callable, Dict, List, Optional, Protocol, Tuple
from pydantic import BaseModel, Extra, root_validator
from pydantic import Extra, root_validator
from langchain.chains.combine_documents.base import BaseCombineDocumentsChain
from langchain.chains.llm import LLMChain
@ -59,7 +59,7 @@ def _collapse_docs(
return Document(page_content=result, metadata=combined_metadata)
class MapReduceDocumentsChain(BaseCombineDocumentsChain, BaseModel):
class MapReduceDocumentsChain(BaseCombineDocumentsChain):
"""Combining documents by mapping a chain over them, then combining results."""
llm_chain: LLMChain

View File

@ -4,7 +4,7 @@ from __future__ import annotations
from typing import Any, Dict, List, Optional, Sequence, Tuple, Union, cast
from pydantic import BaseModel, Extra, root_validator
from pydantic import Extra, root_validator
from langchain.chains.combine_documents.base import BaseCombineDocumentsChain
from langchain.chains.llm import LLMChain
@ -12,7 +12,7 @@ from langchain.docstore.document import Document
from langchain.output_parsers.regex import RegexParser
class MapRerankDocumentsChain(BaseCombineDocumentsChain, BaseModel):
class MapRerankDocumentsChain(BaseCombineDocumentsChain):
"""Combining documents by mapping a chain over them, then reranking results."""
llm_chain: LLMChain

View File

@ -4,7 +4,7 @@ from __future__ import annotations
from typing import Any, Dict, List, Tuple
from pydantic import BaseModel, Extra, Field, root_validator
from pydantic import Extra, Field, root_validator
from langchain.chains.combine_documents.base import BaseCombineDocumentsChain
from langchain.chains.llm import LLMChain
@ -17,7 +17,7 @@ def _get_default_document_prompt() -> PromptTemplate:
return PromptTemplate(input_variables=["page_content"], template="{page_content}")
class RefineDocumentsChain(BaseCombineDocumentsChain, BaseModel):
class RefineDocumentsChain(BaseCombineDocumentsChain):
"""Combine documents by doing a first pass and then refining on more documents."""
initial_llm_chain: LLMChain

View File

@ -2,7 +2,7 @@
from typing import Any, Dict, List, Optional, Tuple
from pydantic import BaseModel, Extra, Field, root_validator
from pydantic import Extra, Field, root_validator
from langchain.chains.combine_documents.base import BaseCombineDocumentsChain
from langchain.chains.llm import LLMChain
@ -15,7 +15,7 @@ def _get_default_document_prompt() -> PromptTemplate:
return PromptTemplate(input_variables=["page_content"], template="{page_content}")
class StuffDocumentsChain(BaseCombineDocumentsChain, BaseModel):
class StuffDocumentsChain(BaseCombineDocumentsChain):
"""Chain that combines documents by stuffing into context."""
llm_chain: LLMChain

View File

@ -1,5 +1,6 @@
# flake8: noqa
from typing import Dict
from langchain.chains.constitutional_ai.models import ConstitutionalPrinciple
PRINCIPLES: Dict[str, ConstitutionalPrinciple] = {}

View File

@ -1,7 +1,7 @@
"""Chain that carries on a conversation and calls an LLM."""
from typing import Dict, List
from pydantic import BaseModel, Extra, Field, root_validator
from pydantic import Extra, Field, root_validator
from langchain.chains.conversation.prompt import PROMPT
from langchain.chains.llm import LLMChain
@ -10,7 +10,7 @@ from langchain.prompts.base import BasePromptTemplate
from langchain.schema import BaseMemory
class ConversationChain(LLMChain, BaseModel):
class ConversationChain(LLMChain):
"""Chain to have a conversation and load context from memory.
Example:

View File

@ -6,7 +6,7 @@ from abc import abstractmethod
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
from pydantic import BaseModel, Extra, Field, root_validator
from pydantic import Extra, Field, root_validator
from langchain.chains.base import Chain
from langchain.chains.combine_documents.base import BaseCombineDocumentsChain
@ -28,7 +28,7 @@ def _get_chat_history(chat_history: List[Tuple[str, str]]) -> str:
return buffer
class BaseConversationalRetrievalChain(Chain, BaseModel):
class BaseConversationalRetrievalChain(Chain):
"""Chain for chatting with an index."""
combine_docs_chain: BaseCombineDocumentsChain
@ -116,7 +116,7 @@ class BaseConversationalRetrievalChain(Chain, BaseModel):
super().save(file_path)
class ConversationalRetrievalChain(BaseConversationalRetrievalChain, BaseModel):
class ConversationalRetrievalChain(BaseConversationalRetrievalChain):
"""Chain for chatting with an index."""
retriever: BaseRetriever
@ -175,7 +175,7 @@ class ConversationalRetrievalChain(BaseConversationalRetrievalChain, BaseModel):
)
class ChatVectorDBChain(BaseConversationalRetrievalChain, BaseModel):
class ChatVectorDBChain(BaseConversationalRetrievalChain):
"""Chain for chatting with a vector database."""
vectorstore: VectorStore = Field(alias="vectorstore")

View File

@ -7,7 +7,7 @@ from __future__ import annotations
from typing import Dict, List
import numpy as np
from pydantic import BaseModel, Extra
from pydantic import Extra
from langchain.chains.base import Chain
from langchain.chains.hyde.prompts import PROMPT_MAP
@ -16,7 +16,7 @@ from langchain.embeddings.base import Embeddings
from langchain.llms.base import BaseLLM
class HypotheticalDocumentEmbedder(Chain, Embeddings, BaseModel):
class HypotheticalDocumentEmbedder(Chain, Embeddings):
"""Generate hypothetical document for query, and then embed that.
Based on https://arxiv.org/abs/2212.10496

View File

@ -3,7 +3,7 @@ from __future__ import annotations
from typing import Any, Dict, List, Optional, Sequence, Tuple, Union
from pydantic import BaseModel, Extra
from pydantic import Extra
from langchain.chains.base import Chain
from langchain.input import get_colored_text
@ -12,7 +12,7 @@ from langchain.prompts.prompt import PromptTemplate
from langchain.schema import BaseLanguageModel, LLMResult, PromptValue
class LLMChain(Chain, BaseModel):
class LLMChain(Chain):
"""Chain to run queries against LLMs.
Example:

View File

@ -1,7 +1,7 @@
"""Chain that interprets a prompt and executes bash code to perform bash operations."""
from typing import Dict, List
from pydantic import BaseModel, Extra
from pydantic import Extra
from langchain.chains.base import Chain
from langchain.chains.llm import LLMChain
@ -11,7 +11,7 @@ from langchain.schema import BaseLanguageModel
from langchain.utilities.bash import BashProcess
class LLMBashChain(Chain, BaseModel):
class LLMBashChain(Chain):
"""Chain that interprets a prompt and executes bash code to perform bash operations.
Example:

View File

@ -3,7 +3,7 @@
from typing import Dict, List
from pydantic import BaseModel, Extra
from pydantic import Extra
from langchain.chains.base import Chain
from langchain.chains.llm import LLMChain
@ -18,7 +18,7 @@ from langchain.llms.base import BaseLLM
from langchain.prompts import PromptTemplate
class LLMCheckerChain(Chain, BaseModel):
class LLMCheckerChain(Chain):
"""Chain for question-answering with self-verification.
Example:

View File

@ -1,7 +1,7 @@
"""Chain that interprets a prompt and executes python code to do math."""
from typing import Dict, List
from pydantic import BaseModel, Extra
from pydantic import Extra
from langchain.chains.base import Chain
from langchain.chains.llm import LLMChain
@ -11,7 +11,7 @@ from langchain.python import PythonREPL
from langchain.schema import BaseLanguageModel
class LLMMathChain(Chain, BaseModel):
class LLMMathChain(Chain):
"""Chain that interprets a prompt and executes python code to do math.
Example:

View File

@ -3,7 +3,7 @@ from __future__ import annotations
from typing import Dict, List
from pydantic import BaseModel, Extra, Field, root_validator
from pydantic import Extra, Field, root_validator
from langchain.chains import LLMChain
from langchain.chains.base import Chain
@ -14,7 +14,7 @@ DEFAULT_HEADERS = {
}
class LLMRequestsChain(Chain, BaseModel):
class LLMRequestsChain(Chain):
"""Chain that hits a URL and then uses an LLM to parse results."""
llm_chain: LLMChain

View File

@ -3,7 +3,7 @@
from pathlib import Path
from typing import Dict, List
from pydantic import BaseModel, Extra
from pydantic import Extra
from langchain.chains.base import Chain
from langchain.chains.llm import LLMChain
@ -27,7 +27,7 @@ ARE_ALL_TRUE_PROMPT = PromptTemplate.from_file(
)
class LLMSummarizationCheckerChain(Chain, BaseModel):
class LLMSummarizationCheckerChain(Chain):
"""Chain for question-answering with self-verification.
Example:

View File

@ -7,7 +7,7 @@ from __future__ import annotations
from typing import Dict, List
from pydantic import BaseModel, Extra
from pydantic import Extra
from langchain.chains.base import Chain
from langchain.chains.combine_documents.base import BaseCombineDocumentsChain
@ -20,7 +20,7 @@ from langchain.prompts.base import BasePromptTemplate
from langchain.text_splitter import TextSplitter
class MapReduceChain(Chain, BaseModel):
class MapReduceChain(Chain):
"""Map-reduce chain."""
combine_documents_chain: BaseCombineDocumentsChain

View File

@ -1,13 +1,13 @@
"""Pass input through a moderation endpoint."""
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, root_validator
from pydantic import root_validator
from langchain.chains.base import Chain
from langchain.utils import get_from_dict_or_env
class OpenAIModerationChain(Chain, BaseModel):
class OpenAIModerationChain(Chain):
"""Pass input through a moderation endpoint.
To use, you should have the ``openai`` python package installed, and the

View File

@ -3,7 +3,7 @@ from __future__ import annotations
from typing import Dict, List
from pydantic import BaseModel, Extra
from pydantic import Extra
from langchain.chains.base import Chain
from langchain.chains.llm import LLMChain
@ -12,7 +12,7 @@ from langchain.llms.base import BaseLLM
from langchain.llms.openai import OpenAI
class NatBotChain(Chain, BaseModel):
class NatBotChain(Chain):
"""Implement an LLM driven browser.
Example:

View File

@ -6,7 +6,7 @@ from __future__ import annotations
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Extra
from pydantic import Extra
from langchain.chains.base import Chain
from langchain.chains.llm import LLMChain
@ -17,7 +17,7 @@ from langchain.python import PythonREPL
from langchain.schema import BaseLanguageModel
class PALChain(Chain, BaseModel):
class PALChain(Chain):
"""Implements Program-Aided Language Models."""
llm: BaseLanguageModel

View File

@ -6,7 +6,7 @@ import re
from abc import ABC, abstractmethod
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Extra, root_validator
from pydantic import Extra, root_validator
from langchain.chains.base import Chain
from langchain.chains.combine_documents.base import BaseCombineDocumentsChain
@ -24,7 +24,7 @@ from langchain.prompts.base import BasePromptTemplate
from langchain.schema import BaseLanguageModel
class BaseQAWithSourcesChain(Chain, BaseModel, ABC):
class BaseQAWithSourcesChain(Chain, ABC):
"""Question answering with sources over documents."""
combine_documents_chain: BaseCombineDocumentsChain
@ -149,7 +149,7 @@ class BaseQAWithSourcesChain(Chain, BaseModel, ABC):
return result
class QAWithSourcesChain(BaseQAWithSourcesChain, BaseModel):
class QAWithSourcesChain(BaseQAWithSourcesChain):
"""Question answering with sources over documents."""
input_docs_key: str = "docs" #: :meta private:

View File

@ -2,7 +2,7 @@
from typing import Any, Dict, List
from pydantic import BaseModel, Field
from pydantic import Field
from langchain.chains.combine_documents.stuff import StuffDocumentsChain
from langchain.chains.qa_with_sources.base import BaseQAWithSourcesChain
@ -10,7 +10,7 @@ from langchain.docstore.document import Document
from langchain.schema import BaseRetriever
class RetrievalQAWithSourcesChain(BaseQAWithSourcesChain, BaseModel):
class RetrievalQAWithSourcesChain(BaseQAWithSourcesChain):
"""Question-answering with sources over an index."""
retriever: BaseRetriever = Field(exclude=True)

View File

@ -3,7 +3,7 @@
import warnings
from typing import Any, Dict, List
from pydantic import BaseModel, Field, root_validator
from pydantic import Field, root_validator
from langchain.chains.combine_documents.stuff import StuffDocumentsChain
from langchain.chains.qa_with_sources.base import BaseQAWithSourcesChain
@ -11,7 +11,7 @@ from langchain.docstore.document import Document
from langchain.vectorstores.base import VectorStore
class VectorDBQAWithSourcesChain(BaseQAWithSourcesChain, BaseModel):
class VectorDBQAWithSourcesChain(BaseQAWithSourcesChain):
"""Question-answering with sources over a vector database."""
vectorstore: VectorStore = Field(exclude=True)

View File

@ -1,14 +1,11 @@
# flake8: noqa
from langchain.prompts.prompt import PromptTemplate
from langchain.chains.prompt_selector import ConditionalPromptSelector, is_chat_model
from langchain.prompts.chat import (
SystemMessagePromptTemplate,
HumanMessagePromptTemplate,
ChatPromptTemplate,
HumanMessagePromptTemplate,
SystemMessagePromptTemplate,
)
from langchain.chains.prompt_selector import (
ConditionalPromptSelector,
is_chat_model,
)
from langchain.prompts.prompt import PromptTemplate
question_prompt_template = """Use the following portion of a long document to see if any of the text is relevant to answer the question.
Return any relevant text verbatim.

View File

@ -1,6 +1,6 @@
# flake8: noqa
from langchain.prompts import PromptTemplate
from langchain.output_parsers.regex import RegexParser
from langchain.prompts import PromptTemplate
output_parser = RegexParser(
regex=r"(.*?)\nScore: (.*)",

View File

@ -1,16 +1,12 @@
# flake8: noqa
from langchain.prompts.prompt import PromptTemplate
from langchain.chains.prompt_selector import ConditionalPromptSelector, is_chat_model
from langchain.prompts.chat import (
SystemMessagePromptTemplate,
HumanMessagePromptTemplate,
ChatPromptTemplate,
AIMessagePromptTemplate,
ChatPromptTemplate,
HumanMessagePromptTemplate,
SystemMessagePromptTemplate,
)
from langchain.chains.prompt_selector import (
ConditionalPromptSelector,
is_chat_model,
)
from langchain.prompts.prompt import PromptTemplate
DEFAULT_REFINE_PROMPT_TMPL = (
"The original question is as follows: {question}\n"

View File

@ -1,16 +1,12 @@
# flake8: noqa
from langchain.chains.prompt_selector import ConditionalPromptSelector, is_chat_model
from langchain.prompts import PromptTemplate
from langchain.chains.prompt_selector import (
ConditionalPromptSelector,
is_chat_model,
)
from langchain.prompts.chat import (
ChatPromptTemplate,
SystemMessagePromptTemplate,
HumanMessagePromptTemplate,
SystemMessagePromptTemplate,
)
prompt_template = """Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.
{context}

View File

@ -5,7 +5,7 @@ import warnings
from abc import abstractmethod
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Extra, Field, root_validator
from pydantic import Extra, Field, root_validator
from langchain.chains.base import Chain
from langchain.chains.combine_documents.base import BaseCombineDocumentsChain
@ -18,7 +18,7 @@ from langchain.schema import BaseLanguageModel, BaseRetriever, Document
from langchain.vectorstores.base import VectorStore
class BaseRetrievalQA(Chain, BaseModel):
class BaseRetrievalQA(Chain):
combine_documents_chain: BaseCombineDocumentsChain
"""Chain to use to combine the documents."""
input_key: str = "query" #: :meta private:
@ -143,7 +143,7 @@ class BaseRetrievalQA(Chain, BaseModel):
return {self.output_key: answer}
class RetrievalQA(BaseRetrievalQA, BaseModel):
class RetrievalQA(BaseRetrievalQA):
"""Chain for question-answering against an index.
Example:
@ -166,7 +166,7 @@ class RetrievalQA(BaseRetrievalQA, BaseModel):
return await self.retriever.aget_relevant_documents(question)
class VectorDBQA(BaseRetrievalQA, BaseModel):
class VectorDBQA(BaseRetrievalQA):
"""Chain for question-answering against a vector database."""
vectorstore: VectorStore = Field(exclude=True, alias="vectorstore")

View File

@ -1,13 +1,13 @@
"""Chain pipeline where the outputs of one step feed directly into next."""
from typing import Dict, List
from pydantic import BaseModel, Extra, root_validator
from pydantic import Extra, root_validator
from langchain.chains.base import Chain
from langchain.input import get_color_mapping
class SequentialChain(Chain, BaseModel):
class SequentialChain(Chain):
"""Chain where the outputs of one chain feed directly into next."""
chains: List[Chain]
@ -94,7 +94,7 @@ class SequentialChain(Chain, BaseModel):
return {k: known_values[k] for k in self.output_variables}
class SimpleSequentialChain(Chain, BaseModel):
class SimpleSequentialChain(Chain):
"""Simple chain where the outputs of one step feed directly into next."""
chains: List[Chain]

View File

@ -3,7 +3,7 @@ from __future__ import annotations
from typing import Any, Dict, List
from pydantic import BaseModel, Extra, Field
from pydantic import Extra, Field
from langchain.chains.base import Chain
from langchain.chains.llm import LLMChain
@ -13,7 +13,7 @@ from langchain.schema import BaseLanguageModel
from langchain.sql_database import SQLDatabase
class SQLDatabaseChain(Chain, BaseModel):
class SQLDatabaseChain(Chain):
"""Chain for interacting with SQL Database.
Example:
@ -107,7 +107,7 @@ class SQLDatabaseChain(Chain, BaseModel):
return "sql_database_chain"
class SQLDatabaseSequentialChain(Chain, BaseModel):
class SQLDatabaseSequentialChain(Chain):
"""Chain for querying SQL database that is a sequential chain.
The chain is as follows:

View File

@ -1,12 +1,10 @@
"""Chain that runs an arbitrary python function."""
from typing import Callable, Dict, List
from pydantic import BaseModel
from langchain.chains.base import Chain
class TransformChain(Chain, BaseModel):
class TransformChain(Chain):
"""Chain transform chain output.
Example:

View File

@ -6,9 +6,7 @@ from typing import Any, Dict
from pydantic import root_validator
from langchain.chat_models.openai import (
ChatOpenAI,
)
from langchain.chat_models.openai import ChatOpenAI
from langchain.utils import get_from_dict_or_env
logger = logging.getLogger(__file__)

View File

@ -2,7 +2,7 @@ import asyncio
from abc import ABC, abstractmethod
from typing import List, Optional
from pydantic import BaseModel, Extra, Field, validator
from pydantic import Extra, Field, validator
import langchain
from langchain.callbacks import get_callback_manager
@ -23,7 +23,7 @@ def _get_verbosity() -> bool:
return langchain.verbose
class BaseChatModel(BaseLanguageModel, BaseModel, ABC):
class BaseChatModel(BaseLanguageModel, ABC):
verbose: bool = Field(default_factory=_get_verbosity)
"""Whether to print out response text."""
callback_manager: BaseCallbackManager = Field(default_factory=get_callback_manager)

View File

@ -5,7 +5,7 @@ import logging
import sys
from typing import Any, Callable, Dict, List, Mapping, Optional, Tuple
from pydantic import BaseModel, Extra, Field, root_validator
from pydantic import Extra, Field, root_validator
from tenacity import (
before_sleep_log,
retry,
@ -91,7 +91,7 @@ def _convert_message_to_dict(message: BaseMessage) -> dict:
return message_dict
class ChatOpenAI(BaseChatModel, BaseModel):
class ChatOpenAI(BaseChatModel):
"""Wrapper around OpenAI Chat large language models.
To use, you should have the ``openai`` python package installed, and the

View File

@ -2,13 +2,11 @@
import datetime
from typing import List, Optional
from pydantic import BaseModel
from langchain.chat_models import ChatOpenAI
from langchain.schema import BaseMessage, ChatResult
class PromptLayerChatOpenAI(ChatOpenAI, BaseModel):
class PromptLayerChatOpenAI(ChatOpenAI):
"""Wrapper around OpenAI Chat large language models and PromptLayer.
To use, you should have the ``openai`` and ``promptlayer`` python

View File

@ -11,9 +11,7 @@ from langchain.document_loaders.azure_blob_storage_file import (
)
from langchain.document_loaders.bigquery import BigQueryLoader
from langchain.document_loaders.blackboard import BlackboardLoader
from langchain.document_loaders.college_confidential import (
CollegeConfidentialLoader,
)
from langchain.document_loaders.college_confidential import CollegeConfidentialLoader
from langchain.document_loaders.conllu import CoNLLULoader
from langchain.document_loaders.csv_loader import CSVLoader
from langchain.document_loaders.dataframe import DataFrameLoader
@ -66,9 +64,7 @@ from langchain.document_loaders.url import UnstructuredURLLoader
from langchain.document_loaders.url_selenium import SeleniumURLLoader
from langchain.document_loaders.web_base import WebBaseLoader
from langchain.document_loaders.whatsapp_chat import WhatsAppChatLoader
from langchain.document_loaders.word_document import (
UnstructuredWordDocumentLoader,
)
from langchain.document_loaders.word_document import UnstructuredWordDocumentLoader
from langchain.document_loaders.youtube import (
GoogleApiClient,
GoogleApiYoutubeLoader,

View File

@ -54,9 +54,7 @@ class AlephAlphaAsymmetricSemanticEmbedding(BaseModel, Embeddings):
values, "aleph_alpha_api_key", "ALEPH_ALPHA_API_KEY"
)
try:
from aleph_alpha_client import (
Client,
)
from aleph_alpha_client import Client
except ImportError:
raise ValueError(
"Could not import aleph_alpha_client python package. "

View File

@ -1,7 +1,7 @@
"""Running custom embedding models on self-hosted remote hardware."""
from typing import Any, Callable, List
from pydantic import BaseModel, Extra
from pydantic import Extra
from langchain.embeddings.base import Embeddings
from langchain.llms import SelfHostedPipeline
@ -16,7 +16,7 @@ def _embed_documents(pipeline: Any, *args: Any, **kwargs: Any) -> List[List[floa
return pipeline(*args, **kwargs)
class SelfHostedEmbeddings(SelfHostedPipeline, Embeddings, BaseModel):
class SelfHostedEmbeddings(SelfHostedPipeline, Embeddings):
"""Runs custom embedding models on self-hosted remote hardware.
Supported hardware includes auto-launched instances on AWS, GCP, Azure,

View File

@ -3,8 +3,6 @@ import importlib
import logging
from typing import Any, Callable, List, Optional
from pydantic import BaseModel
from langchain.embeddings.self_hosted import SelfHostedEmbeddings
DEFAULT_MODEL_NAME = "sentence-transformers/all-mpnet-base-v2"
@ -59,7 +57,7 @@ def load_embedding_model(model_id: str, instruct: bool = False, device: int = 0)
return client
class SelfHostedHuggingFaceEmbeddings(SelfHostedEmbeddings, BaseModel):
class SelfHostedHuggingFaceEmbeddings(SelfHostedEmbeddings):
"""Runs sentence_transformers embedding models on self-hosted remote hardware.
Supported hardware includes auto-launched instances on AWS, GCP, Azure,

View File

@ -1,6 +1,6 @@
# flake8: noqa
from langchain.prompts import PromptTemplate
from langchain.output_parsers.regex import RegexParser
from langchain.prompts import PromptTemplate
template = """You are a teacher coming up with questions to ask on a quiz.
Given the following document, please generate a question and answer based on that document.

View File

@ -19,7 +19,7 @@ class AI21PenaltyData(BaseModel):
applyToEmojis: bool = True
class AI21(LLM, BaseModel):
class AI21(LLM):
"""Wrapper around AI21 large language models.
To use, you should have the environment variable ``AI21_API_KEY``

View File

@ -1,14 +1,14 @@
"""Wrapper around Aleph Alpha APIs."""
from typing import Any, Dict, List, Optional, Sequence
from pydantic import BaseModel, Extra, root_validator
from pydantic import Extra, root_validator
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.utils import get_from_dict_or_env
class AlephAlpha(LLM, BaseModel):
class AlephAlpha(LLM):
"""Wrapper around Aleph Alpha large language models.
To use, you should have the ``aleph_alpha_client`` python package installed, and the

View File

@ -2,13 +2,13 @@
import re
from typing import Any, Dict, Generator, List, Mapping, Optional
from pydantic import BaseModel, Extra, root_validator
from pydantic import Extra, root_validator
from langchain.llms.base import LLM
from langchain.utils import get_from_dict_or_env
class Anthropic(LLM, BaseModel):
class Anthropic(LLM):
r"""Wrapper around Anthropic large language models.
To use, you should have the ``anthropic`` python package installed, and the

View File

@ -2,7 +2,7 @@
import logging
from typing import Any, Dict, List, Mapping, Optional
from pydantic import BaseModel, Extra, Field, root_validator
from pydantic import Extra, Field, root_validator
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
@ -11,7 +11,7 @@ from langchain.utils import get_from_dict_or_env
logger = logging.getLogger(__name__)
class Banana(LLM, BaseModel):
class Banana(LLM):
"""Wrapper around Banana large language models.
To use, you should have the ``banana-dev`` python package installed,

View File

@ -5,7 +5,7 @@ from pathlib import Path
from typing import Any, Dict, List, Mapping, Optional, Tuple, Union
import yaml
from pydantic import BaseModel, Extra, Field, validator
from pydantic import Extra, Field, validator
import langchain
from langchain.callbacks import get_callback_manager
@ -53,7 +53,7 @@ def update_cache(
return llm_output
class BaseLLM(BaseLanguageModel, BaseModel, ABC):
class BaseLLM(BaseLanguageModel, ABC):
"""LLM wrapper should take in a prompt and return a string."""
cache: Optional[bool] = None

View File

@ -2,7 +2,7 @@
import logging
from typing import Any, Dict, List, Mapping, Optional
from pydantic import BaseModel, Extra, Field, root_validator
from pydantic import Extra, Field, root_validator
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
@ -11,7 +11,7 @@ from langchain.utils import get_from_dict_or_env
logger = logging.getLogger(__name__)
class CerebriumAI(LLM, BaseModel):
class CerebriumAI(LLM):
"""Wrapper around CerebriumAI large language models.
To use, you should have the ``cerebrium`` python package installed, and the

View File

@ -2,7 +2,7 @@
import logging
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Extra, root_validator
from pydantic import Extra, root_validator
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
@ -11,7 +11,7 @@ from langchain.utils import get_from_dict_or_env
logger = logging.getLogger(__name__)
class Cohere(LLM, BaseModel):
class Cohere(LLM):
"""Wrapper around Cohere large language models.
To use, you should have the ``cohere`` python package installed, and the

View File

@ -2,7 +2,7 @@
from typing import Any, Dict, List, Mapping, Optional
import requests
from pydantic import BaseModel, Extra, root_validator
from pydantic import Extra, root_validator
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
@ -11,7 +11,7 @@ from langchain.utils import get_from_dict_or_env
DEFAULT_MODEL_ID = "google/flan-t5-xl"
class DeepInfra(LLM, BaseModel):
class DeepInfra(LLM):
"""Wrapper around DeepInfra deployed models.
To use, you should have the ``requests`` python package installed, and the

View File

@ -1,12 +1,10 @@
"""Fake LLM wrapper for testing purposes."""
from typing import Any, List, Mapping, Optional
from pydantic import BaseModel
from langchain.llms.base import LLM
class FakeListLLM(LLM, BaseModel):
class FakeListLLM(LLM):
"""Fake LLM wrapper for testing purposes."""
responses: List

View File

@ -2,14 +2,14 @@
from typing import Any, Dict, List, Mapping, Optional
import requests
from pydantic import BaseModel, Extra, root_validator
from pydantic import Extra, root_validator
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.utils import get_from_dict_or_env
class ForefrontAI(LLM, BaseModel):
class ForefrontAI(LLM):
"""Wrapper around ForefrontAI large language models.
To use, you should have the environment variable ``FOREFRONTAI_API_KEY``

View File

@ -2,7 +2,7 @@
import logging
from typing import Any, Dict, List, Mapping, Optional
from pydantic import BaseModel, Extra, Field, root_validator
from pydantic import Extra, Field, root_validator
from langchain.llms.base import LLM
from langchain.utils import get_from_dict_or_env
@ -10,7 +10,7 @@ from langchain.utils import get_from_dict_or_env
logger = logging.getLogger(__name__)
class GooseAI(LLM, BaseModel):
class GooseAI(LLM):
"""Wrapper around OpenAI large language models.
To use, you should have the ``openai`` python package installed, and the

View File

@ -1,13 +1,13 @@
"""Wrapper for the GPT4All model."""
from typing import Any, Dict, List, Mapping, Optional, Set
from pydantic import BaseModel, Extra, Field, root_validator
from pydantic import Extra, Field, root_validator
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
class GPT4All(LLM, BaseModel):
class GPT4All(LLM):
r"""Wrapper around GPT4All language models.
To use, you should have the ``pyllamacpp`` python package installed, the

View File

@ -2,7 +2,7 @@
from typing import Any, Dict, List, Mapping, Optional
import requests
from pydantic import BaseModel, Extra, root_validator
from pydantic import Extra, root_validator
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
@ -11,7 +11,7 @@ from langchain.utils import get_from_dict_or_env
VALID_TASKS = ("text2text-generation", "text-generation")
class HuggingFaceEndpoint(LLM, BaseModel):
class HuggingFaceEndpoint(LLM):
"""Wrapper around HuggingFaceHub Inference Endpoints.
To use, you should have the ``huggingface_hub`` python package installed, and the

View File

@ -1,7 +1,7 @@
"""Wrapper around HuggingFace APIs."""
from typing import Any, Dict, List, Mapping, Optional
from pydantic import BaseModel, Extra, root_validator
from pydantic import Extra, root_validator
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
@ -11,7 +11,7 @@ DEFAULT_REPO_ID = "gpt2"
VALID_TASKS = ("text2text-generation", "text-generation")
class HuggingFaceHub(LLM, BaseModel):
class HuggingFaceHub(LLM):
"""Wrapper around HuggingFaceHub models.
To use, you should have the ``huggingface_hub`` python package installed, and the

View File

@ -3,7 +3,7 @@ import importlib.util
import logging
from typing import Any, List, Mapping, Optional
from pydantic import BaseModel, Extra
from pydantic import Extra
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
@ -15,7 +15,7 @@ VALID_TASKS = ("text2text-generation", "text-generation")
logger = logging.getLogger()
class HuggingFacePipeline(LLM, BaseModel):
class HuggingFacePipeline(LLM):
"""Wrapper around HuggingFace Pipeline API.
To use, you should have the ``transformers`` python package installed.

View File

@ -2,14 +2,14 @@
import logging
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Field, root_validator
from pydantic import Field, root_validator
from langchain.llms.base import LLM
logger = logging.getLogger(__name__)
class LlamaCpp(LLM, BaseModel):
class LlamaCpp(LLM):
"""Wrapper around the llama.cpp model.
To use, you should have the llama-cpp-python library installed, and provide the

View File

@ -1,12 +1,12 @@
"""Wrapper around HazyResearch's Manifest library."""
from typing import Any, Dict, List, Mapping, Optional
from pydantic import BaseModel, Extra, root_validator
from pydantic import Extra, root_validator
from langchain.llms.base import LLM
class ManifestWrapper(LLM, BaseModel):
class ManifestWrapper(LLM):
"""Wrapper around HazyResearch's Manifest library."""
client: Any #: :meta private:

View File

@ -3,7 +3,7 @@ import logging
from typing import Any, Dict, List, Mapping, Optional
import requests
from pydantic import BaseModel, Extra, Field, root_validator
from pydantic import Extra, Field, root_validator
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
@ -11,7 +11,7 @@ from langchain.llms.utils import enforce_stop_tokens
logger = logging.getLogger(__name__)
class Modal(LLM, BaseModel):
class Modal(LLM):
"""Wrapper around Modal large language models.
To use, you should have the ``modal-client`` python package installed.

View File

@ -1,13 +1,13 @@
"""Wrapper around NLPCloud APIs."""
from typing import Any, Dict, List, Mapping, Optional
from pydantic import BaseModel, Extra, root_validator
from pydantic import Extra, root_validator
from langchain.llms.base import LLM
from langchain.utils import get_from_dict_or_env
class NLPCloud(LLM, BaseModel):
class NLPCloud(LLM):
"""Wrapper around NLPCloud large language models.
To use, you should have the ``nlpcloud`` python package installed, and the

View File

@ -17,7 +17,7 @@ from typing import (
Union,
)
from pydantic import BaseModel, Extra, Field, root_validator
from pydantic import Extra, Field, root_validator
from tenacity import (
before_sleep_log,
retry,
@ -113,7 +113,7 @@ async def acompletion_with_retry(
return await _completion_with_retry(**kwargs)
class BaseOpenAI(BaseLLM, BaseModel):
class BaseOpenAI(BaseLLM):
"""Wrapper around OpenAI large language models.
To use, you should have the ``openai`` python package installed, and the
@ -534,7 +534,7 @@ class AzureOpenAI(BaseOpenAI):
return {**{"engine": self.deployment_name}, **super()._invocation_params}
class OpenAIChat(BaseLLM, BaseModel):
class OpenAIChat(BaseLLM):
"""Wrapper around OpenAI Chat large language models.
To use, you should have the ``openai`` python package installed, and the

View File

@ -2,7 +2,7 @@
import logging
from typing import Any, Dict, List, Mapping, Optional
from pydantic import BaseModel, Extra, Field, root_validator
from pydantic import Extra, Field, root_validator
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
@ -11,7 +11,7 @@ from langchain.utils import get_from_dict_or_env
logger = logging.getLogger(__name__)
class Petals(LLM, BaseModel):
class Petals(LLM):
"""Wrapper around Petals Bloom models.
To use, you should have the ``petals`` python package installed, and the

View File

@ -2,13 +2,11 @@
import datetime
from typing import List, Optional
from pydantic import BaseModel
from langchain.llms import OpenAI, OpenAIChat
from langchain.schema import LLMResult
class PromptLayerOpenAI(OpenAI, BaseModel):
class PromptLayerOpenAI(OpenAI):
"""Wrapper around OpenAI large language models.
To use, you should have the ``openai`` and ``promptlayer`` python
@ -106,7 +104,7 @@ class PromptLayerOpenAI(OpenAI, BaseModel):
return generated_responses
class PromptLayerOpenAIChat(OpenAIChat, BaseModel):
class PromptLayerOpenAIChat(OpenAIChat):
"""Wrapper around OpenAI large language models.
To use, you should have the ``openai`` and ``promptlayer`` python

View File

@ -2,7 +2,7 @@
import logging
from typing import Any, Dict, List, Mapping, Optional
from pydantic import BaseModel, Extra, Field, root_validator
from pydantic import Extra, Field, root_validator
from langchain.llms.base import LLM
from langchain.utils import get_from_dict_or_env
@ -10,7 +10,7 @@ from langchain.utils import get_from_dict_or_env
logger = logging.getLogger(__name__)
class Replicate(LLM, BaseModel):
class Replicate(LLM):
"""Wrapper around Replicate models.
To use, you should have the ``replicate`` python package installed,

View File

@ -2,7 +2,7 @@
from abc import ABC, abstractmethod
from typing import Any, Dict, List, Mapping, Optional, Union
from pydantic import BaseModel, Extra, root_validator
from pydantic import Extra, root_validator
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
@ -55,7 +55,7 @@ class ContentHandlerBase(ABC):
"""
class SagemakerEndpoint(LLM, BaseModel):
class SagemakerEndpoint(LLM):
"""Wrapper around custom Sagemaker Inference Endpoints.
To use, you must supply the endpoint name from your deployed

View File

@ -4,7 +4,7 @@ import logging
import pickle
from typing import Any, Callable, List, Mapping, Optional
from pydantic import BaseModel, Extra
from pydantic import Extra
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
@ -61,7 +61,7 @@ def _send_pipeline_to_device(pipeline: Any, device: int) -> Any:
return pipeline
class SelfHostedPipeline(LLM, BaseModel):
class SelfHostedPipeline(LLM):
"""Run model inference on self-hosted remote hardware.
Supported hardware includes auto-launched instances on AWS, GCP, Azure,

View File

@ -3,7 +3,7 @@ import importlib.util
import logging
from typing import Any, Callable, List, Mapping, Optional
from pydantic import BaseModel, Extra
from pydantic import Extra
from langchain.llms.self_hosted import SelfHostedPipeline
from langchain.llms.utils import enforce_stop_tokens
@ -108,7 +108,7 @@ def _load_transformer(
return pipeline
class SelfHostedHuggingFaceLLM(SelfHostedPipeline, BaseModel):
class SelfHostedHuggingFaceLLM(SelfHostedPipeline):
"""Wrapper around HuggingFace Pipeline API to run on self-hosted remote hardware.
Supported hardware includes auto-launched instances on AWS, GCP, Azure,

View File

@ -4,7 +4,7 @@ import time
from typing import Any, Dict, List, Mapping, Optional
import requests
from pydantic import BaseModel, Extra, Field, root_validator
from pydantic import Extra, Field, root_validator
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
@ -13,7 +13,7 @@ from langchain.utils import get_from_dict_or_env
logger = logging.getLogger(__name__)
class StochasticAI(LLM, BaseModel):
class StochasticAI(LLM):
"""Wrapper around StochasticAI large language models.
To use, you should have the environment variable ``STOCHASTICAI_API_KEY``

View File

@ -2,14 +2,14 @@
from typing import Any, Dict, List, Mapping, Optional
import requests
from pydantic import BaseModel, Extra, root_validator
from pydantic import Extra, root_validator
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.utils import get_from_dict_or_env
class Writer(LLM, BaseModel):
class Writer(LLM):
"""Wrapper around Writer large language models.
To use, you should have the environment variable ``WRITER_API_KEY``

View File

@ -1,13 +1,13 @@
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, root_validator
from pydantic import root_validator
from langchain.memory.chat_memory import BaseChatMemory, BaseMemory
from langchain.memory.utils import get_prompt_input_key
from langchain.schema import get_buffer_string
class ConversationBufferMemory(BaseChatMemory, BaseModel):
class ConversationBufferMemory(BaseChatMemory):
"""Buffer for storing conversation memory."""
human_prefix: str = "Human"
@ -39,7 +39,7 @@ class ConversationBufferMemory(BaseChatMemory, BaseModel):
return {self.memory_key: self.buffer}
class ConversationStringBufferMemory(BaseMemory, BaseModel):
class ConversationStringBufferMemory(BaseMemory):
"""Buffer for storing conversation memory."""
human_prefix: str = "Human"

View File

@ -1,12 +1,10 @@
from typing import Any, Dict, List
from pydantic import BaseModel
from langchain.memory.chat_memory import BaseChatMemory
from langchain.schema import BaseMessage, get_buffer_string
class ConversationBufferWindowMemory(BaseChatMemory, BaseModel):
class ConversationBufferWindowMemory(BaseChatMemory):
"""Buffer for storing conversation memory."""
human_prefix: str = "Human"

View File

@ -5,10 +5,7 @@ from pydantic import Field
from langchain.memory.chat_message_histories.in_memory import ChatMessageHistory
from langchain.memory.utils import get_prompt_input_key
from langchain.schema import (
BaseChatMessageHistory,
BaseMemory,
)
from langchain.schema import BaseChatMessageHistory, BaseMemory
class BaseChatMemory(BaseMemory, ABC):

View File

@ -1,11 +1,9 @@
from typing import Any, Dict, List
from pydantic import BaseModel
from langchain.schema import BaseMemory
class CombinedMemory(BaseMemory, BaseModel):
class CombinedMemory(BaseMemory):
"""Class for combining multiple memories' data together."""
memories: List[BaseMemory]

View File

@ -1,7 +1,5 @@
from typing import Any, Dict, List, Optional
from pydantic import BaseModel
from langchain.chains.llm import LLMChain
from langchain.memory.chat_memory import BaseChatMemory
from langchain.memory.prompt import (
@ -13,7 +11,7 @@ from langchain.prompts.base import BasePromptTemplate
from langchain.schema import BaseLanguageModel, BaseMessage, get_buffer_string
class ConversationEntityMemory(BaseChatMemory, BaseModel):
class ConversationEntityMemory(BaseChatMemory):
"""Entity extractor & summarizer to memory."""
human_prefix: str = "Human"

View File

@ -1,6 +1,6 @@
from typing import Any, Dict, List, Type, Union
from pydantic import BaseModel, Field
from pydantic import Field
from langchain.chains.llm import LLMChain
from langchain.graphs import NetworkxEntityGraph
@ -20,7 +20,7 @@ from langchain.schema import (
)
class ConversationKGMemory(BaseChatMemory, BaseModel):
class ConversationKGMemory(BaseChatMemory):
"""Knowledge graph memory for storing conversation memory.
Integrates with external knowledge graph to store and retrieve

View File

@ -1,11 +1,9 @@
from typing import Any, Dict, List
from pydantic import BaseModel
from langchain.schema import BaseMemory
class SimpleMemory(BaseMemory, BaseModel):
class SimpleMemory(BaseMemory):
"""Simple memory for storing context or other bits of information that shouldn't
ever change between prompts.
"""

View File

@ -34,7 +34,7 @@ class SummarizerMixin(BaseModel):
return chain.predict(summary=existing_summary, new_lines=new_lines)
class ConversationSummaryMemory(BaseChatMemory, SummarizerMixin, BaseModel):
class ConversationSummaryMemory(BaseChatMemory, SummarizerMixin):
"""Conversation summarizer to memory."""
buffer: str = ""

View File

@ -1,13 +1,13 @@
from typing import Any, Dict, List
from pydantic import BaseModel, root_validator
from pydantic import root_validator
from langchain.memory.chat_memory import BaseChatMemory
from langchain.memory.summary import SummarizerMixin
from langchain.schema import BaseMessage, get_buffer_string
class ConversationSummaryBufferMemory(BaseChatMemory, SummarizerMixin, BaseModel):
class ConversationSummaryBufferMemory(BaseChatMemory, SummarizerMixin):
"""Buffer with summarizer for storing conversation memory."""
max_token_limit: int = 2000

View File

@ -1,12 +1,10 @@
from typing import Any, Dict, List
from pydantic import BaseModel
from langchain.memory.chat_memory import BaseChatMemory
from langchain.schema import BaseLanguageModel, BaseMessage, get_buffer_string
class ConversationTokenBufferMemory(BaseChatMemory, BaseModel):
class ConversationTokenBufferMemory(BaseChatMemory):
"""Buffer for storing conversation memory."""
human_prefix: str = "Human"

View File

@ -3,12 +3,10 @@ from __future__ import annotations
import re
from typing import Dict, List, Optional
from pydantic import BaseModel
from langchain.schema import BaseOutputParser
class RegexParser(BaseOutputParser, BaseModel):
class RegexParser(BaseOutputParser):
"""Class to parse the output into a dictionary."""
regex: str

View File

@ -3,12 +3,10 @@ from __future__ import annotations
import re
from typing import Dict, Optional
from pydantic import BaseModel
from langchain.schema import BaseOutputParser
class RegexDictParser(BaseOutputParser, BaseModel):
class RegexDictParser(BaseOutputParser):
"""Class to parse the output into a dictionary."""
regex_pattern: str = r"{}:\s?([^.'\n']*)\.?" # : :meta private:

View File

@ -20,8 +20,8 @@ def ngram_overlap_score(source: List[str], example: List[str]) -> float:
https://www.nltk.org/_modules/nltk/translate/bleu_score.html
https://aclanthology.org/P02-1040.pdf
"""
from nltk.translate.bleu_score import ( # type: ignore
SmoothingFunction,
from nltk.translate.bleu_score import (
SmoothingFunction, # type: ignore
sentence_bleu,
)

View File

@ -99,7 +99,7 @@ class SemanticSimilarityExampleSelector(BaseExampleSelector, BaseModel):
return cls(vectorstore=vectorstore, k=k, input_keys=input_keys)
class MaxMarginalRelevanceExampleSelector(SemanticSimilarityExampleSelector, BaseModel):
class MaxMarginalRelevanceExampleSelector(SemanticSimilarityExampleSelector):
"""ExampleSelector that selects examples based on Max Marginal Relevance.
This was shown to improve performance in this paper:

View File

@ -1,7 +1,7 @@
"""Prompt template that contains few shot examples."""
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Extra, root_validator
from pydantic import Extra, root_validator
from langchain.prompts.base import (
DEFAULT_FORMATTER_MAPPING,
@ -12,7 +12,7 @@ from langchain.prompts.example_selector.base import BaseExampleSelector
from langchain.prompts.prompt import PromptTemplate
class FewShotPromptTemplate(StringPromptTemplate, BaseModel):
class FewShotPromptTemplate(StringPromptTemplate):
"""Prompt template that contains few shot examples."""
examples: Optional[List[dict]] = None

View File

@ -1,17 +1,14 @@
"""Prompt template that contains few shot examples."""
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Extra, root_validator
from pydantic import Extra, root_validator
from langchain.prompts.base import (
DEFAULT_FORMATTER_MAPPING,
StringPromptTemplate,
)
from langchain.prompts.base import DEFAULT_FORMATTER_MAPPING, StringPromptTemplate
from langchain.prompts.example_selector.base import BaseExampleSelector
from langchain.prompts.prompt import PromptTemplate
class FewShotPromptWithTemplates(StringPromptTemplate, BaseModel):
class FewShotPromptWithTemplates(StringPromptTemplate):
"""Prompt template that contains few shot examples."""
examples: Optional[List[dict]] = None

View File

@ -5,7 +5,7 @@ from pathlib import Path
from string import Formatter
from typing import Any, Dict, List, Union
from pydantic import BaseModel, Extra, root_validator
from pydantic import Extra, root_validator
from langchain.prompts.base import (
DEFAULT_FORMATTER_MAPPING,
@ -14,7 +14,7 @@ from langchain.prompts.base import (
)
class PromptTemplate(StringPromptTemplate, BaseModel):
class PromptTemplate(StringPromptTemplate):
"""Schema to represent a prompt for an LLM.
Example:

View File

@ -6,9 +6,7 @@ from sqlalchemy.orm import Session
from langchain.docstore.document import Document
from langchain.vectorstores.pgvector import PGVector
from tests.integration_tests.vectorstores.fake_embeddings import (
FakeEmbeddings,
)
from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings
CONNECTION_STRING = PGVector.connection_string_from_db_params(
driver=os.environ.get("TEST_PGVECTOR_DRIVER", "psycopg2"),

View File

@ -2,8 +2,6 @@
from typing import Any, List, Mapping, Optional
from pydantic import BaseModel
from langchain.agents import AgentExecutor, AgentType, initialize_agent
from langchain.agents.tools import Tool
from langchain.callbacks.base import CallbackManager
@ -11,7 +9,7 @@ from langchain.llms.base import LLM
from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler
class FakeListLLM(LLM, BaseModel):
class FakeListLLM(LLM):
"""Fake LLM for testing that outputs elements of a list."""
responses: List[str]

View File

@ -2,8 +2,6 @@
from typing import Any, List, Mapping, Optional, Union
from pydantic import BaseModel
from langchain.agents.react.base import ReActChain, ReActDocstoreAgent
from langchain.agents.tools import Tool
from langchain.docstore.base import Docstore
@ -23,7 +21,7 @@ Made in 2022."""
_FAKE_PROMPT = PromptTemplate(input_variables=["input"], template="{input}")
class FakeListLLM(LLM, BaseModel):
class FakeListLLM(LLM):
"""Fake LLM for testing that outputs elements of a list."""
responses: List[str]

View File

@ -2,7 +2,6 @@
from typing import Any, Dict, List, Optional
import pytest
from pydantic import BaseModel
from langchain.callbacks.base import CallbackManager
from langchain.chains.base import Chain
@ -10,7 +9,7 @@ from langchain.schema import BaseMemory
from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler
class FakeMemory(BaseMemory, BaseModel):
class FakeMemory(BaseMemory):
"""Fake memory class for testing purposes."""
@property
@ -33,7 +32,7 @@ class FakeMemory(BaseMemory, BaseModel):
pass
class FakeChain(Chain, BaseModel):
class FakeChain(Chain):
"""Fake chain class for testing purposes."""
be_correct: bool = True

View File

@ -2,7 +2,6 @@
from typing import List, Optional
import numpy as np
from pydantic import BaseModel
from langchain.chains.hyde.base import HypotheticalDocumentEmbedder
from langchain.chains.hyde.prompts import PROMPT_MAP
@ -23,7 +22,7 @@ class FakeEmbeddings(Embeddings):
return list(np.random.uniform(0, 1, 10))
class FakeLLM(BaseLLM, BaseModel):
class FakeLLM(BaseLLM):
"""Fake LLM wrapper for testing purposes."""
n: int = 1

View File

@ -2,13 +2,11 @@
from typing import Any, List, Mapping, Optional
from pydantic import BaseModel
from langchain.chains.natbot.base import NatBotChain
from langchain.llms.base import LLM
class FakeLLM(LLM, BaseModel):
class FakeLLM(LLM):
"""Fake LLM wrapper for testing purposes."""
def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str:

View File

@ -2,14 +2,13 @@
from typing import Dict, List
import pytest
from pydantic import BaseModel
from langchain.chains.base import Chain
from langchain.chains.sequential import SequentialChain, SimpleSequentialChain
from langchain.memory.simple import SimpleMemory
class FakeChain(Chain, BaseModel):
class FakeChain(Chain):
"""Fake Chain for testing purposes."""
input_variables: List[str]

View File

@ -1,12 +1,10 @@
"""Fake LLM wrapper for testing purposes."""
from typing import Any, List, Mapping, Optional
from pydantic import BaseModel
from langchain.llms.base import LLM
class FakeLLM(LLM, BaseModel):
class FakeLLM(LLM):
"""Fake LLM wrapper for testing purposes."""
queries: Optional[Mapping] = None