2023-05-11 18:06:39 +00:00
|
|
|
"""Fake Chat Model wrapper for testing purposes."""
|
2023-05-11 22:34:06 +00:00
|
|
|
from typing import Any, List, Mapping, Optional
|
2023-05-11 18:06:39 +00:00
|
|
|
|
|
|
|
from langchain.callbacks.manager import (
|
|
|
|
AsyncCallbackManagerForLLMRun,
|
|
|
|
CallbackManagerForLLMRun,
|
|
|
|
)
|
|
|
|
from langchain.chat_models.base import SimpleChatModel
|
2023-07-01 17:39:19 +00:00
|
|
|
from langchain.schema import ChatGeneration, ChatResult
|
|
|
|
from langchain.schema.messages import AIMessage, BaseMessage
|
2023-05-11 18:06:39 +00:00
|
|
|
|
|
|
|
|
|
|
|
class FakeChatModel(SimpleChatModel):
|
|
|
|
"""Fake Chat Model wrapper for testing purposes."""
|
|
|
|
|
|
|
|
def _call(
|
|
|
|
self,
|
|
|
|
messages: List[BaseMessage],
|
|
|
|
stop: Optional[List[str]] = None,
|
|
|
|
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
2023-06-11 17:09:22 +00:00
|
|
|
**kwargs: Any,
|
2023-05-11 18:06:39 +00:00
|
|
|
) -> str:
|
|
|
|
return "fake response"
|
|
|
|
|
|
|
|
async def _agenerate(
|
|
|
|
self,
|
|
|
|
messages: List[BaseMessage],
|
|
|
|
stop: Optional[List[str]] = None,
|
|
|
|
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
|
2023-06-11 17:09:22 +00:00
|
|
|
**kwargs: Any,
|
2023-05-11 18:06:39 +00:00
|
|
|
) -> ChatResult:
|
|
|
|
output_str = "fake response"
|
|
|
|
message = AIMessage(content=output_str)
|
|
|
|
generation = ChatGeneration(message=message)
|
|
|
|
return ChatResult(generations=[generation])
|
2023-05-11 22:34:06 +00:00
|
|
|
|
|
|
|
@property
|
|
|
|
def _llm_type(self) -> str:
|
|
|
|
return "fake-chat-model"
|
|
|
|
|
|
|
|
@property
|
|
|
|
def _identifying_params(self) -> Mapping[str, Any]:
|
|
|
|
return {"key": "fake"}
|