Enhancements and bug fixes for `LLMonitorCallbackHandler` (#10297)

Hi @baskaryan,

I've made updates to LLMonitorCallbackHandler to address a few bugs
reported by users
These changes don't alter the fundamental behavior of the callback
handler.

Thanks you!

---------

Co-authored-by: vincelwt <vince@lyser.io>
pull/10291/head
Hugues 12 months ago committed by GitHub
parent c902a1545b
commit 3e5a143625
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -1,19 +1,23 @@
# LLMonitor
[LLMonitor](https://llmonitor.com) is an open-source observability platform that provides cost tracking, user tracking and powerful agent tracing.
[LLMonitor](https://llmonitor.com?utm_source=langchain&utm_medium=py&utm_campaign=docs) is an open-source observability platform that provides cost and usage analytics, user tracking, tracing and evaluation tools.
<video controls width='100%' >
<source src='https://llmonitor.com/videos/demo-annotated.mp4'/>
</video>
## Setup
Create an account on [llmonitor.com](https://llmonitor.com), create an `App`, and then copy the associated `tracking id`.
Create an account on [llmonitor.com](https://llmonitor.com?utm_source=langchain&utm_medium=py&utm_campaign=docs), then copy your new app's `tracking id`.
Once you have it, set it as an environment variable by running:
```bash
export LLMONITOR_APP_ID="..."
```
If you'd prefer not to set an environment variable, you can pass the key directly when initializing the callback handler:
```python
from langchain.callbacks import LLMonitorCallbackHandler
@ -21,12 +25,13 @@ handler = LLMonitorCallbackHandler(app_id="...")
```
## Usage with LLM/Chat models
```python
from langchain.llms import OpenAI
from langchain.chat_models import ChatOpenAI
from langchain.callbacks import LLMonitorCallbackHandler
handler = LLMonitorCallbackHandler(app_id="...")
handler = LLMonitorCallbackHandler()
llm = OpenAI(
callbacks=[handler],
@ -38,26 +43,63 @@ chat = ChatOpenAI(
)
```
## Usage with chains and agents
Make sure to pass the callback handler to the `run` method so that all related chains and llm calls are correctly tracked.
It is also recommended to pass `agent_name` in the metadata to be able to distinguish between agents in the dashboard.
Example:
```python
from langchain.chat_models import ChatOpenAI
from langchain.schema import SystemMessage, HumanMessage
from langchain.agents import OpenAIFunctionsAgent, AgentExecutor, tool
from langchain.callbacks import LLMonitorCallbackHandler
llm = ChatOpenAI(temperature=0)
handler = LLMonitorCallbackHandler()
@tool
def get_word_length(word: str) -> int:
"""Returns the length of a word."""
return len(word)
tools = [get_word_length]
prompt = OpenAIFunctionsAgent.create_prompt(
system_message=SystemMessage(
content="You are very powerful assistant, but bad at calculating lengths of words."
)
)
agent = OpenAIFunctionsAgent(llm=llm, tools=tools, prompt=prompt, verbose=True)
agent_executor = AgentExecutor(
agent=agent, tools=tools, verbose=True, metadata={"agent_name": "WordCount"} # <- recommended, assign a custom name
)
agent_executor.run("how many letters in the word educa?", callbacks=[handler])
```
Another example:
## Usage with agents
```python
from langchain.agents import load_tools, initialize_agent, AgentType
from langchain.llms import OpenAI
from langchain.callbacks import LLMonitorCallbackHandler
handler = LLMonitorCallbackHandler(app_id="...")
handler = LLMonitorCallbackHandler()
llm = OpenAI(temperature=0)
tools = load_tools(["serpapi", "llm-math"], llm=llm)
agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION)
agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, metadata={ "agent_name": "GirlfriendAgeFinder" }) # <- recommended, assign a custom name
agent.run(
"Who is Leo DiCaprio's girlfriend? What is her current age raised to the 0.43 power?",
callbacks=[handler],
metadata={
"agentName": "Leo DiCaprio's girlfriend", # you can assign a custom agent in the metadata
},
)
```
## Support
For any question or issue with integration you can reach out to the LLMonitor team on [Discord](http://discord.com/invite/8PafSG58kK) or via [email](mailto:vince@llmonitor.com).

@ -14,6 +14,70 @@ from langchain.schema.output import LLMResult
DEFAULT_API_URL = "https://app.llmonitor.com"
def _serialize(obj: Any) -> Union[Dict[str, Any], List[Any], Any]:
if hasattr(obj, "to_json"):
return obj.to_json()
if isinstance(obj, dict):
return {key: _serialize(value) for key, value in obj.items()}
if isinstance(obj, list):
return [_serialize(element) for element in obj]
return obj
def _parse_input(raw_input: Any) -> Any:
if not raw_input:
return None
if not isinstance(raw_input, dict):
return _serialize(raw_input)
input_value = raw_input.get("input")
inputs_value = raw_input.get("inputs")
question_value = raw_input.get("question")
query_value = raw_input.get("query")
if input_value:
return input_value
if inputs_value:
return inputs_value
if question_value:
return question_value
if query_value:
return query_value
return _serialize(raw_input)
def _parse_output(raw_output: dict) -> Any:
if not raw_output:
return None
if not isinstance(raw_output, dict):
return _serialize(raw_output)
text_value = raw_output.get("text")
output_value = raw_output.get("output")
output_text_value = raw_output.get("output_text")
answer_value = raw_output.get("answer")
result_value = raw_output.get("result")
if text_value:
return text_value
if answer_value:
return answer_value
if output_value:
return output_value
if output_text_value:
return output_text_value
if result_value:
return result_value
return _serialize(raw_output)
def _parse_lc_role(
role: str,
) -> Union[Literal["user", "ai", "system", "function"], None]:
@ -29,8 +93,27 @@ def _parse_lc_role(
return None
def _serialize_lc_message(message: BaseMessage) -> Dict[str, Any]:
return {"text": message.content, "role": _parse_lc_role(message.type)}
def _get_user_id(metadata: Any) -> Any:
metadata = metadata or {}
user_id = metadata.get("user_id")
if user_id is None:
user_id = metadata.get("userId")
return user_id
def _parse_lc_message(message: BaseMessage) -> Dict[str, Any]:
parsed = {"text": message.content, "role": _parse_lc_role(message.type)}
function_call = (message.additional_kwargs or {}).get("function_call")
if function_call is not None:
parsed["functionCall"] = function_call
return parsed
def _parse_lc_messages(messages: Union[List[BaseMessage], Any]) -> List[Dict[str, Any]]:
return [_parse_lc_message(message) for message in messages]
class LLMonitorCallbackHandler(BaseCallbackHandler):
@ -62,14 +145,20 @@ class LLMonitorCallbackHandler(BaseCallbackHandler):
__api_url: str
__app_id: str
__verbose: bool
def __init__(
self, app_id: Union[str, None] = None, api_url: Union[str, None] = None
self,
app_id: Union[str, None] = None,
api_url: Union[str, None] = None,
verbose: bool = False,
) -> None:
super().__init__()
self.__api_url = api_url or os.getenv("LLMONITOR_API_URL") or DEFAULT_API_URL
self.__verbose = verbose or bool(os.getenv("LLMONITOR_VERBOSE"))
_app_id = app_id or os.getenv("LLMONITOR_APP_ID")
if _app_id is None:
raise ValueError(
@ -89,7 +178,12 @@ class LLMonitorCallbackHandler(BaseCallbackHandler):
def __send_event(self, event: Dict[str, Any]) -> None:
headers = {"Content-Type": "application/json"}
event = {**event, "app": self.__app_id, "timestamp": str(datetime.utcnow())}
if self.__verbose:
print("llmonitor_callback", event)
data = {"events": event}
requests.post(headers=headers, url=f"{self.__api_url}/api/report", json=data)
@ -110,7 +204,7 @@ class LLMonitorCallbackHandler(BaseCallbackHandler):
"userId": (metadata or {}).get("userId"),
"runId": str(run_id),
"parentRunId": str(parent_run_id) if parent_run_id else None,
"input": prompts[0],
"input": _parse_input(prompts),
"name": kwargs.get("invocation_params", {}).get("model_name"),
"tags": tags,
"metadata": metadata,
@ -128,13 +222,15 @@ class LLMonitorCallbackHandler(BaseCallbackHandler):
metadata: Union[Dict[str, Any], None] = None,
**kwargs: Any,
) -> Any:
user_id = _get_user_id(metadata)
event = {
"event": "start",
"type": "llm",
"userId": (metadata or {}).get("userId"),
"userId": user_id,
"runId": str(run_id),
"parentRunId": str(parent_run_id) if parent_run_id else None,
"input": [_serialize_lc_message(message[0]) for message in messages],
"input": _parse_lc_messages(messages[0]),
"name": kwargs.get("invocation_params", {}).get("model_name"),
"tags": tags,
"metadata": metadata,
@ -151,36 +247,26 @@ class LLMonitorCallbackHandler(BaseCallbackHandler):
) -> None:
token_usage = (response.llm_output or {}).get("token_usage", {})
parsed_output = _parse_lc_messages(
map(
lambda o: o.message if hasattr(o, "message") else None,
response.generations[0],
)
)
event = {
"event": "end",
"type": "llm",
"runId": str(run_id),
"parent_run_id": str(parent_run_id) if parent_run_id else None,
"output": {"text": response.generations[0][0].text, "role": "ai"},
"output": parsed_output,
"tokensUsage": {
"prompt": token_usage.get("prompt_tokens", 0),
"completion": token_usage.get("completion_tokens", 0),
"prompt": token_usage.get("prompt_tokens"),
"completion": token_usage.get("completion_tokens"),
},
}
self.__send_event(event)
def on_llm_error(
self,
error: Union[Exception, KeyboardInterrupt],
*,
run_id: UUID,
parent_run_id: Union[UUID, None] = None,
**kwargs: Any,
) -> Any:
event = {
"event": "error",
"type": "llm",
"runId": str(run_id),
"parent_run_id": str(parent_run_id) if parent_run_id else None,
"error": {"message": str(error), "stack": traceback.format_exc()},
}
self.__send_event(event)
def on_tool_start(
self,
serialized: Dict[str, Any],
@ -192,10 +278,11 @@ class LLMonitorCallbackHandler(BaseCallbackHandler):
metadata: Union[Dict[str, Any], None] = None,
**kwargs: Any,
) -> None:
user_id = _get_user_id(metadata)
event = {
"event": "start",
"type": "tool",
"userId": (metadata or {}).get("userId"),
"userId": user_id,
"runId": str(run_id),
"parentRunId": str(parent_run_id) if parent_run_id else None,
"name": serialized.get("name"),
@ -236,25 +323,34 @@ class LLMonitorCallbackHandler(BaseCallbackHandler):
) -> Any:
name = serialized.get("id", [None, None, None, None])[3]
type = "chain"
metadata = metadata or {}
agentName = metadata.get("agent_name")
if agentName is None:
agentName = metadata.get("agentName")
agentName = (metadata or {}).get("agentName")
if agentName is not None:
type = "agent"
name = agentName
if name == "AgentExecutor" or name == "PlanAndExecute":
type = "agent"
if parent_run_id is not None:
type = "chain"
user_id = _get_user_id(metadata)
event = {
"event": "start",
"type": type,
"userId": (metadata or {}).get("userId"),
"userId": user_id,
"runId": str(run_id),
"parentRunId": str(parent_run_id) if parent_run_id else None,
"input": inputs.get("input", inputs),
"input": _parse_input(inputs),
"tags": tags,
"metadata": metadata,
"name": serialized.get("id", [None, None, None, None])[3],
"name": name,
}
self.__send_event(event)
def on_chain_end(
@ -269,7 +365,42 @@ class LLMonitorCallbackHandler(BaseCallbackHandler):
"event": "end",
"type": "chain",
"runId": str(run_id),
"output": outputs.get("output", outputs),
"output": _parse_output(outputs),
}
self.__send_event(event)
def on_agent_action(
self,
action: AgentAction,
*,
run_id: UUID,
parent_run_id: Union[UUID, None] = None,
**kwargs: Any,
) -> Any:
event = {
"event": "start",
"type": "tool",
"runId": str(run_id),
"parentRunId": str(parent_run_id) if parent_run_id else None,
"name": action.tool,
"input": _parse_input(action.tool_input),
}
self.__send_event(event)
def on_agent_finish(
self,
finish: AgentFinish,
*,
run_id: UUID,
parent_run_id: Union[UUID, None] = None,
**kwargs: Any,
) -> Any:
event = {
"event": "end",
"type": "agent",
"runId": str(run_id),
"parentRunId": str(parent_run_id) if parent_run_id else None,
"output": _parse_output(finish.return_values),
}
self.__send_event(event)
@ -290,38 +421,37 @@ class LLMonitorCallbackHandler(BaseCallbackHandler):
}
self.__send_event(event)
def on_agent_action(
def on_tool_error(
self,
action: AgentAction,
error: Union[Exception, KeyboardInterrupt],
*,
run_id: UUID,
parent_run_id: Union[UUID, None] = None,
**kwargs: Any,
) -> Any:
event = {
"event": "start",
"event": "error",
"type": "tool",
"runId": str(run_id),
"parentRunId": str(parent_run_id) if parent_run_id else None,
"name": action.tool,
"input": action.tool_input,
"parent_run_id": str(parent_run_id) if parent_run_id else None,
"error": {"message": str(error), "stack": traceback.format_exc()},
}
self.__send_event(event)
def on_agent_finish(
def on_llm_error(
self,
finish: AgentFinish,
error: Union[Exception, KeyboardInterrupt],
*,
run_id: UUID,
parent_run_id: Union[UUID, None] = None,
**kwargs: Any,
) -> Any:
event = {
"event": "end",
"type": "agent",
"event": "error",
"type": "llm",
"runId": str(run_id),
"parentRunId": str(parent_run_id) if parent_run_id else None,
"output": finish.return_values,
"parent_run_id": str(parent_run_id) if parent_run_id else None,
"error": {"message": str(error), "stack": traceback.format_exc()},
}
self.__send_event(event)

Loading…
Cancel
Save