From f4d3cf2dfba90fc38e147c8701a85099f0814248 Mon Sep 17 00:00:00 2001 From: Zander Chase <130414180+vowelparrot@users.noreply.github.com> Date: Thu, 11 May 2023 15:34:06 -0700 Subject: [PATCH] Add Invocation Params (#4509) ### Add Invocation Params to Logged Run Adds an llm type to each chat model as well as an override of the dict() method to log the invocation parameters for each call --------- Co-authored-by: Ankush Gola --- langchain/chat_models/azure_openai.py | 11 ++++++++- langchain/chat_models/base.py | 27 ++++++++++++++++++--- langchain/chat_models/google_palm.py | 17 ++++++++++++- langchain/chat_models/openai.py | 5 ++++ langchain/chat_models/promptlayer_openai.py | 14 ++++++++++- tests/unit_tests/llms/fake_chat_model.py | 10 +++++++- 6 files changed, 77 insertions(+), 7 deletions(-) diff --git a/langchain/chat_models/azure_openai.py b/langchain/chat_models/azure_openai.py index ed79be35..4d4792e8 100644 --- a/langchain/chat_models/azure_openai.py +++ b/langchain/chat_models/azure_openai.py @@ -2,7 +2,7 @@ from __future__ import annotations import logging -from typing import Any, Dict +from typing import Any, Dict, Mapping from pydantic import root_validator @@ -110,3 +110,12 @@ class AzureChatOpenAI(ChatOpenAI): **super()._default_params, "engine": self.deployment_name, } + + @property + def _identifying_params(self) -> Mapping[str, Any]: + """Get the identifying parameters.""" + return {**self._default_params} + + @property + def _llm_type(self) -> str: + return "azure-openai-chat" diff --git a/langchain/chat_models/base.py b/langchain/chat_models/base.py index 3a8bfb1a..14b8e486 100644 --- a/langchain/chat_models/base.py +++ b/langchain/chat_models/base.py @@ -2,7 +2,7 @@ import asyncio import inspect import warnings from abc import ABC, abstractmethod -from typing import Dict, List, Optional +from typing import Any, Dict, List, Mapping, Optional from pydantic import Extra, Field, root_validator @@ -65,11 +65,14 @@ class BaseChatModel(BaseLanguageModel, ABC): ) -> LLMResult: """Top Level call""" + params = self.dict() + params["stop"] = stop + callback_manager = CallbackManager.configure( callbacks, self.callbacks, self.verbose ) run_manager = callback_manager.on_chat_model_start( - {"name": self.__class__.__name__}, messages + {"name": self.__class__.__name__}, messages, invocation_params=params ) new_arg_supported = inspect.signature(self._generate).parameters.get( @@ -98,12 +101,14 @@ class BaseChatModel(BaseLanguageModel, ABC): callbacks: Callbacks = None, ) -> LLMResult: """Top Level call""" + params = self.dict() + params["stop"] = stop callback_manager = AsyncCallbackManager.configure( callbacks, self.callbacks, self.verbose ) run_manager = await callback_manager.on_chat_model_start( - {"name": self.__class__.__name__}, messages + {"name": self.__class__.__name__}, messages, invocation_params=params ) new_arg_supported = inspect.signature(self._agenerate).parameters.get( @@ -181,6 +186,22 @@ class BaseChatModel(BaseLanguageModel, ABC): result = self([HumanMessage(content=message)], stop=stop) return result.content + @property + def _identifying_params(self) -> Mapping[str, Any]: + """Get the identifying parameters.""" + return {} + + @property + @abstractmethod + def _llm_type(self) -> str: + """Return type of chat model.""" + + def dict(self, **kwargs: Any) -> Dict: + """Return a dictionary of the LLM.""" + starter_dict = dict(self._identifying_params) + starter_dict["_type"] = self._llm_type + return starter_dict + class SimpleChatModel(BaseChatModel): def _generate( diff --git a/langchain/chat_models/google_palm.py b/langchain/chat_models/google_palm.py index 4431918e..0e9a2a15 100644 --- a/langchain/chat_models/google_palm.py +++ b/langchain/chat_models/google_palm.py @@ -1,7 +1,7 @@ """Wrapper around Google's PaLM Chat API.""" from __future__ import annotations -from typing import TYPE_CHECKING, Any, Dict, List, Optional +from typing import TYPE_CHECKING, Any, Dict, List, Mapping, Optional from pydantic import BaseModel, root_validator @@ -256,3 +256,18 @@ class ChatGooglePalm(BaseChatModel, BaseModel): ) return _response_to_result(response, stop) + + @property + def _identifying_params(self) -> Mapping[str, Any]: + """Get the identifying parameters.""" + return { + "model_name": self.model_name, + "temperature": self.temperature, + "top_p": self.top_p, + "top_k": self.top_k, + "n": self.n, + } + + @property + def _llm_type(self) -> str: + return "google-palm-chat" diff --git a/langchain/chat_models/openai.py b/langchain/chat_models/openai.py index a4d2ac87..cf60fdc4 100644 --- a/langchain/chat_models/openai.py +++ b/langchain/chat_models/openai.py @@ -347,6 +347,11 @@ class ChatOpenAI(BaseChatModel): """Get the identifying parameters.""" return {**{"model_name": self.model_name}, **self._default_params} + @property + def _llm_type(self) -> str: + """Return type of chat model.""" + return "openai-chat" + def get_num_tokens(self, text: str) -> int: """Calculate num tokens with tiktoken package.""" # tiktoken NOT supported for Python 3.7 or below diff --git a/langchain/chat_models/promptlayer_openai.py b/langchain/chat_models/promptlayer_openai.py index 6f9b9a08..59695994 100644 --- a/langchain/chat_models/promptlayer_openai.py +++ b/langchain/chat_models/promptlayer_openai.py @@ -1,6 +1,6 @@ """PromptLayer wrapper.""" import datetime -from typing import List, Optional +from typing import Any, List, Mapping, Optional from langchain.callbacks.manager import ( AsyncCallbackManagerForLLMRun, @@ -109,3 +109,15 @@ class PromptLayerChatOpenAI(ChatOpenAI): generation.generation_info = {} generation.generation_info["pl_request_id"] = pl_request_id return generated_responses + + @property + def _llm_type(self) -> str: + return "promptlayer-openai-chat" + + @property + def _identifying_params(self) -> Mapping[str, Any]: + return { + **super()._identifying_params, + "pl_tags": self.pl_tags, + "return_pl_id": self.return_pl_id, + } diff --git a/tests/unit_tests/llms/fake_chat_model.py b/tests/unit_tests/llms/fake_chat_model.py index 1f0a8e28..c8705d1c 100644 --- a/tests/unit_tests/llms/fake_chat_model.py +++ b/tests/unit_tests/llms/fake_chat_model.py @@ -1,5 +1,5 @@ """Fake Chat Model wrapper for testing purposes.""" -from typing import List, Optional +from typing import Any, List, Mapping, Optional from langchain.callbacks.manager import ( AsyncCallbackManagerForLLMRun, @@ -30,3 +30,11 @@ class FakeChatModel(SimpleChatModel): message = AIMessage(content=output_str) generation = ChatGeneration(message=message) return ChatResult(generations=[generation]) + + @property + def _llm_type(self) -> str: + return "fake-chat-model" + + @property + def _identifying_params(self) -> Mapping[str, Any]: + return {"key": "fake"}