change to baselanguagemodel (#1496)

This commit is contained in:
Harrison Chase 2023-03-07 09:29:59 -08:00 committed by GitHub
parent f276bfad8e
commit 8e6f599822
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 6 additions and 9 deletions

View File

@ -3,7 +3,6 @@ from typing import Any, Dict, List, Optional
from pydantic import BaseModel from pydantic import BaseModel
from langchain.chains.llm import LLMChain from langchain.chains.llm import LLMChain
from langchain.llms.base import BaseLLM
from langchain.memory.chat_memory import BaseChatMemory from langchain.memory.chat_memory import BaseChatMemory
from langchain.memory.prompt import ( from langchain.memory.prompt import (
ENTITY_EXTRACTION_PROMPT, ENTITY_EXTRACTION_PROMPT,
@ -11,7 +10,7 @@ from langchain.memory.prompt import (
) )
from langchain.memory.utils import get_buffer_string, get_prompt_input_key from langchain.memory.utils import get_buffer_string, get_prompt_input_key
from langchain.prompts.base import BasePromptTemplate from langchain.prompts.base import BasePromptTemplate
from langchain.schema import BaseMessage from langchain.schema import BaseLanguageModel, BaseMessage
class ConversationEntityMemory(BaseChatMemory, BaseModel): class ConversationEntityMemory(BaseChatMemory, BaseModel):
@ -19,7 +18,7 @@ class ConversationEntityMemory(BaseChatMemory, BaseModel):
human_prefix: str = "Human" human_prefix: str = "Human"
ai_prefix: str = "AI" ai_prefix: str = "AI"
llm: BaseLLM llm: BaseLanguageModel
entity_extraction_prompt: BasePromptTemplate = ENTITY_EXTRACTION_PROMPT entity_extraction_prompt: BasePromptTemplate = ENTITY_EXTRACTION_PROMPT
entity_summarization_prompt: BasePromptTemplate = ENTITY_SUMMARIZATION_PROMPT entity_summarization_prompt: BasePromptTemplate = ENTITY_SUMMARIZATION_PROMPT
store: Dict[str, Optional[str]] = {} store: Dict[str, Optional[str]] = {}

View File

@ -5,7 +5,6 @@ from pydantic import BaseModel, Field
from langchain.chains.llm import LLMChain from langchain.chains.llm import LLMChain
from langchain.graphs import NetworkxEntityGraph from langchain.graphs import NetworkxEntityGraph
from langchain.graphs.networkx_graph import KnowledgeTriple, get_entities, parse_triples from langchain.graphs.networkx_graph import KnowledgeTriple, get_entities, parse_triples
from langchain.llms.base import BaseLLM
from langchain.memory.chat_memory import BaseChatMemory from langchain.memory.chat_memory import BaseChatMemory
from langchain.memory.prompt import ( from langchain.memory.prompt import (
ENTITY_EXTRACTION_PROMPT, ENTITY_EXTRACTION_PROMPT,
@ -13,7 +12,7 @@ from langchain.memory.prompt import (
) )
from langchain.memory.utils import get_buffer_string, get_prompt_input_key from langchain.memory.utils import get_buffer_string, get_prompt_input_key
from langchain.prompts.base import BasePromptTemplate from langchain.prompts.base import BasePromptTemplate
from langchain.schema import SystemMessage from langchain.schema import BaseLanguageModel, SystemMessage
class ConversationKGMemory(BaseChatMemory, BaseModel): class ConversationKGMemory(BaseChatMemory, BaseModel):
@ -29,7 +28,7 @@ class ConversationKGMemory(BaseChatMemory, BaseModel):
kg: NetworkxEntityGraph = Field(default_factory=NetworkxEntityGraph) kg: NetworkxEntityGraph = Field(default_factory=NetworkxEntityGraph)
knowledge_extraction_prompt: BasePromptTemplate = KNOWLEDGE_TRIPLE_EXTRACTION_PROMPT knowledge_extraction_prompt: BasePromptTemplate = KNOWLEDGE_TRIPLE_EXTRACTION_PROMPT
entity_extraction_prompt: BasePromptTemplate = ENTITY_EXTRACTION_PROMPT entity_extraction_prompt: BasePromptTemplate = ENTITY_EXTRACTION_PROMPT
llm: BaseLLM llm: BaseLanguageModel
"""Number of previous utterances to include in the context.""" """Number of previous utterances to include in the context."""
memory_key: str = "history" #: :meta private: memory_key: str = "history" #: :meta private:

View File

@ -3,18 +3,17 @@ from typing import Any, Dict, List
from pydantic import BaseModel, root_validator from pydantic import BaseModel, root_validator
from langchain.chains.llm import LLMChain from langchain.chains.llm import LLMChain
from langchain.llms.base import BaseLLM
from langchain.memory.chat_memory import BaseChatMemory from langchain.memory.chat_memory import BaseChatMemory
from langchain.memory.prompt import SUMMARY_PROMPT from langchain.memory.prompt import SUMMARY_PROMPT
from langchain.memory.utils import get_buffer_string from langchain.memory.utils import get_buffer_string
from langchain.prompts.base import BasePromptTemplate from langchain.prompts.base import BasePromptTemplate
from langchain.schema import BaseMessage, SystemMessage from langchain.schema import BaseLanguageModel, BaseMessage, SystemMessage
class SummarizerMixin(BaseModel): class SummarizerMixin(BaseModel):
human_prefix: str = "Human" human_prefix: str = "Human"
ai_prefix: str = "AI" ai_prefix: str = "AI"
llm: BaseLLM llm: BaseLanguageModel
prompt: BasePromptTemplate = SUMMARY_PROMPT prompt: BasePromptTemplate = SUMMARY_PROMPT
def predict_new_summary( def predict_new_summary(