mirror of
https://github.com/hwchase17/langchain
synced 2024-11-10 01:10:59 +00:00
f4d3cf2dfb
### Add Invocation Params to Logged Run Adds an llm type to each chat model as well as an override of the dict() method to log the invocation parameters for each call --------- Co-authored-by: Ankush Gola <ankush.gola@gmail.com>
41 lines
1.2 KiB
Python
41 lines
1.2 KiB
Python
"""Fake Chat Model wrapper for testing purposes."""
|
|
from typing import Any, List, Mapping, Optional
|
|
|
|
from langchain.callbacks.manager import (
|
|
AsyncCallbackManagerForLLMRun,
|
|
CallbackManagerForLLMRun,
|
|
)
|
|
from langchain.chat_models.base import SimpleChatModel
|
|
from langchain.schema import AIMessage, BaseMessage, ChatGeneration, ChatResult
|
|
|
|
|
|
class FakeChatModel(SimpleChatModel):
|
|
"""Fake Chat Model wrapper for testing purposes."""
|
|
|
|
def _call(
|
|
self,
|
|
messages: List[BaseMessage],
|
|
stop: Optional[List[str]] = None,
|
|
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
|
) -> str:
|
|
return "fake response"
|
|
|
|
async def _agenerate(
|
|
self,
|
|
messages: List[BaseMessage],
|
|
stop: Optional[List[str]] = None,
|
|
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
|
|
) -> ChatResult:
|
|
output_str = "fake response"
|
|
message = AIMessage(content=output_str)
|
|
generation = ChatGeneration(message=message)
|
|
return ChatResult(generations=[generation])
|
|
|
|
@property
|
|
def _llm_type(self) -> str:
|
|
return "fake-chat-model"
|
|
|
|
@property
|
|
def _identifying_params(self) -> Mapping[str, Any]:
|
|
return {"key": "fake"}
|