Merge branch 'master' into patch-1

pull/18885/head
crissebasbol 3 months ago committed by GitHub
commit d85378f183
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

@ -1068,7 +1068,7 @@
"\n",
" def on_tool_end(\n",
" self,\n",
" output: str,\n",
" output: Any,\n",
" *,\n",
" run_id: UUID,\n",
" parent_run_id: Optional[UUID] = None,\n",
@ -1076,7 +1076,7 @@
" ) -> Any:\n",
" \"\"\"Run when tool ends running.\"\"\"\n",
" print(\"Tool end\")\n",
" print(output)\n",
" print(str(output))\n",
"\n",
" async def on_llm_end(\n",
" self,\n",

@ -52,7 +52,7 @@
}
],
"source": [
"from langchain.callbacks.base import BaseCallbackHandler\n",
"from langchain_core.callbacks import BaseCallbackHandler\n",
"from langchain_core.messages import HumanMessage\n",
"from langchain_openai import ChatOpenAI\n",
"\n",
@ -66,7 +66,7 @@
"# Additionally, we pass in a list with our custom handler\n",
"chat = ChatOpenAI(max_tokens=25, streaming=True, callbacks=[MyCustomHandler()])\n",
"\n",
"chat([HumanMessage(content=\"Tell me a joke\")])"
"chat.invoke([HumanMessage(content=\"Tell me a joke\")])"
]
},
{

@ -59,7 +59,7 @@ class BaseCallbackHandler:
) -> Any:
"""Run when tool starts running."""
def on_tool_end(self, output: str, **kwargs: Any) -> Any:
def on_tool_end(self, output: Any, **kwargs: Any) -> Any:
"""Run when tool ends running."""
def on_tool_error(
@ -79,15 +79,15 @@ class BaseCallbackHandler:
## Get started
LangChain provides a few built-in handlers that you can use to get started. These are available in the `langchain/callbacks` module. The most basic handler is the `StdOutCallbackHandler`, which simply logs all events to `stdout`.
LangChain provides a few built-in handlers that you can use to get started. These are available in the `langchain_core/callbacks` module. The most basic handler is the `StdOutCallbackHandler`, which simply logs all events to `stdout`.
**Note**: when the `verbose` flag on the object is set to true, the `StdOutCallbackHandler` will be invoked even without being explicitly passed in.
```python
from langchain.callbacks import StdOutCallbackHandler
from langchain_core.callbacks import StdOutCallbackHandler
from langchain.chains import LLMChain
from langchain_openai import OpenAI
from langchain.prompts import PromptTemplate
from langchain_core.prompts import PromptTemplate
handler = StdOutCallbackHandler()
llm = OpenAI()
@ -160,5 +160,5 @@ The `verbose` argument is available on most objects throughout the API (Chains,
### When do you want to use each of these?
- Constructor callbacks are most useful for use cases such as logging, monitoring, etc., which are _not specific to a single request_, but rather to the entire chain. For example, if you want to log all the requests made to an `LLMChain`, you would pass a handler to the constructor.
- Request callbacks are most useful for use cases such as streaming, where you want to stream the output of a single request to a specific websocket connection, or other similar use cases. For example, if you want to stream the output of a single request to a websocket, you would pass a handler to the `call()` method
- Request callbacks are most useful for use cases such as streaming, where you want to stream the output of a single request to a specific websocket connection, or other similar use cases. For example, if you want to stream the output of a single request to a websocket, you would pass a handler to the `invoke()` method

@ -15,7 +15,7 @@
"source": [
"# Graphs\n",
"\n",
"One of the common types of databases that we can build Q&A systems for are graph databases. LangChain comes with a number of built-in chains and agents that are compatible with graph query language dialects like Cypher, SparQL, and others (e.g., Neo4j, MemGraph, Amazon Neptune, OntoText, Tigegraph). They enable use cases such as:\n",
"One of the common types of databases that we can build Q&A systems for are graph databases. LangChain comes with a number of built-in chains and agents that are compatible with graph query language dialects like Cypher, SparQL, and others (e.g., Neo4j, MemGraph, Amazon Neptune, Kùzu, OntoText, Tigergraph). They enable use cases such as:\n",
"\n",
"* Generating queries that will be run based on natural language questions,\n",
"* Creating chatbots that can answer questions based on database data,\n",

@ -314,8 +314,9 @@ class AimCallbackHandler(BaseMetadataCallbackHandler, BaseCallbackHandler):
self._run.track(aim.Text(input_str), name="on_tool_start", context=resp)
def on_tool_end(self, output: str, **kwargs: Any) -> None:
def on_tool_end(self, output: Any, **kwargs: Any) -> None:
"""Run when tool ends running."""
output = str(output)
aim = import_aim()
self.step += 1
self.tool_ends += 1

@ -328,7 +328,7 @@ class ArgillaCallbackHandler(BaseCallbackHandler):
def on_tool_end(
self,
output: str,
output: Any,
observation_prefix: Optional[str] = None,
llm_prefix: Optional[str] = None,
**kwargs: Any,

@ -196,7 +196,7 @@ class ArizeCallbackHandler(BaseCallbackHandler):
def on_tool_end(
self,
output: str,
output: Any,
observation_prefix: Optional[str] = None,
llm_prefix: Optional[str] = None,
**kwargs: Any,

@ -279,7 +279,7 @@ class ArthurCallbackHandler(BaseCallbackHandler):
def on_tool_end(
self,
output: str,
output: Any,
observation_prefix: Optional[str] = None,
llm_prefix: Optional[str] = None,
**kwargs: Any,

@ -243,8 +243,9 @@ class ClearMLCallbackHandler(BaseMetadataCallbackHandler, BaseCallbackHandler):
if self.stream_logs:
self.logger.report_text(resp)
def on_tool_end(self, output: str, **kwargs: Any) -> None:
def on_tool_end(self, output: Any, **kwargs: Any) -> None:
"""Run when tool ends running."""
output = str(output)
self.step += 1
self.tool_ends += 1
self.ends += 1

@ -303,8 +303,9 @@ class CometCallbackHandler(BaseMetadataCallbackHandler, BaseCallbackHandler):
resp.update({"input_str": input_str})
self.action_records.append(resp)
def on_tool_end(self, output: str, **kwargs: Any) -> None:
def on_tool_end(self, output: Any, **kwargs: Any) -> None:
"""Run when tool ends running."""
output = str(output)
self.step += 1
self.tool_ends += 1
self.ends += 1

@ -162,7 +162,7 @@ class DeepEvalCallbackHandler(BaseCallbackHandler):
def on_tool_end(
self,
output: str,
output: Any,
observation_prefix: Optional[str] = None,
llm_prefix: Optional[str] = None,
**kwargs: Any,

@ -465,13 +465,14 @@ class LLMonitorCallbackHandler(BaseCallbackHandler):
def on_tool_end(
self,
output: str,
output: Any,
*,
run_id: UUID,
parent_run_id: Union[UUID, None] = None,
tags: Union[List[str], None] = None,
**kwargs: Any,
) -> None:
output = str(output)
if self.__has_valid_config is False:
return
try:

@ -518,8 +518,9 @@ class MlflowCallbackHandler(BaseMetadataCallbackHandler, BaseCallbackHandler):
self.records["action_records"].append(resp)
self.mlflg.jsonf(resp, f"tool_start_{tool_starts}")
def on_tool_end(self, output: str, **kwargs: Any) -> None:
def on_tool_end(self, output: Any, **kwargs: Any) -> None:
"""Run when tool ends running."""
output = str(output)
self.metrics["step"] += 1
self.metrics["tool_ends"] += 1
self.metrics["ends"] += 1

@ -186,8 +186,9 @@ class SageMakerCallbackHandler(BaseCallbackHandler):
self.jsonf(resp, self.temp_dir, f"tool_start_{tool_starts}")
def on_tool_end(self, output: str, **kwargs: Any) -> None:
def on_tool_end(self, output: Any, **kwargs: Any) -> None:
"""Run when tool ends running."""
output = str(output)
self.metrics["step"] += 1
self.metrics["tool_ends"] += 1
self.metrics["ends"] += 1

@ -183,13 +183,13 @@ class LLMThought:
def on_tool_end(
self,
output: str,
output: Any,
color: Optional[str] = None,
observation_prefix: Optional[str] = None,
llm_prefix: Optional[str] = None,
**kwargs: Any,
) -> None:
self._container.markdown(f"**{output}**")
self._container.markdown(f"**{str(output)}**")
def on_tool_error(self, error: BaseException, **kwargs: Any) -> None:
self._container.markdown("**Tool encountered an error...**")
@ -363,12 +363,13 @@ class StreamlitCallbackHandler(BaseCallbackHandler):
def on_tool_end(
self,
output: str,
output: Any,
color: Optional[str] = None,
observation_prefix: Optional[str] = None,
llm_prefix: Optional[str] = None,
**kwargs: Any,
) -> None:
output = str(output)
self._require_current_thought().on_tool_end(
output, color, observation_prefix, llm_prefix, **kwargs
)

@ -356,8 +356,9 @@ class WandbCallbackHandler(BaseMetadataCallbackHandler, BaseCallbackHandler):
if self.stream_logs:
self.run.log(resp)
def on_tool_end(self, output: str, **kwargs: Any) -> None:
def on_tool_end(self, output: Any, **kwargs: Any) -> None:
"""Run when tool ends running."""
output = str(output)
self.step += 1
self.tool_ends += 1
self.ends += 1

@ -133,7 +133,7 @@ class ToolManagerMixin:
def on_tool_end(
self,
output: str,
output: Any,
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
@ -440,7 +440,7 @@ class AsyncCallbackHandler(BaseCallbackHandler):
async def on_tool_end(
self,
output: str,
output: Any,
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,

@ -976,14 +976,15 @@ class CallbackManagerForToolRun(ParentRunManager, ToolManagerMixin):
def on_tool_end(
self,
output: str,
output: Any,
**kwargs: Any,
) -> None:
"""Run when tool ends running.
Args:
output (str): The output of the tool.
output (Any): The output of the tool.
"""
output = str(output)
handle_event(
self.handlers,
"on_tool_end",
@ -1038,12 +1039,13 @@ class AsyncCallbackManagerForToolRun(AsyncParentRunManager, ToolManagerMixin):
)
@shielded
async def on_tool_end(self, output: str, **kwargs: Any) -> None:
async def on_tool_end(self, output: Any, **kwargs: Any) -> None:
"""Run when tool ends running.
Args:
output (str): The output of the tool.
output (Any): The output of the tool.
"""
output = str(output)
await ahandle_event(
self.handlers,
"on_tool_end",

@ -37,13 +37,14 @@ class StdOutCallbackHandler(BaseCallbackHandler):
def on_tool_end(
self,
output: str,
output: Any,
color: Optional[str] = None,
observation_prefix: Optional[str] = None,
llm_prefix: Optional[str] = None,
**kwargs: Any,
) -> None:
"""If not the final action, print out observation."""
output = str(output)
if observation_prefix is not None:
print_text(f"\n{observation_prefix}")
print_text(output, color=color or self.color)

@ -59,7 +59,7 @@ class StreamingStdOutCallbackHandler(BaseCallbackHandler):
"""Run on agent action."""
pass
def on_tool_end(self, output: str, **kwargs: Any) -> None:
def on_tool_end(self, output: Any, **kwargs: Any) -> None:
"""Run when tool ends running."""
def on_tool_error(self, error: BaseException, **kwargs: Any) -> None:

@ -410,17 +410,13 @@ class ChildTool(BaseTool):
f"Got unexpected type of `handle_tool_error`. Expected bool, str "
f"or callable. Received: {self.handle_tool_error}"
)
run_manager.on_tool_end(
str(observation), color="red", name=self.name, **kwargs
)
run_manager.on_tool_end(observation, color="red", name=self.name, **kwargs)
return observation
except (Exception, KeyboardInterrupt) as e:
run_manager.on_tool_error(e)
raise e
else:
run_manager.on_tool_end(
str(observation), color=color, name=self.name, **kwargs
)
run_manager.on_tool_end(observation, color=color, name=self.name, **kwargs)
return observation
async def arun(
@ -502,7 +498,7 @@ class ChildTool(BaseTool):
f"or callable. Received: {self.handle_tool_error}"
)
await run_manager.on_tool_end(
str(observation), color="red", name=self.name, **kwargs
observation, color="red", name=self.name, **kwargs
)
return observation
except (Exception, KeyboardInterrupt) as e:
@ -510,7 +506,7 @@ class ChildTool(BaseTool):
raise e
else:
await run_manager.on_tool_end(
str(observation), color=color, name=self.name, **kwargs
observation, color=color, name=self.name, **kwargs
)
return observation

@ -504,8 +504,9 @@ class BaseTracer(BaseCallbackHandler, ABC):
self._on_tool_start(tool_run)
return tool_run
def on_tool_end(self, output: str, *, run_id: UUID, **kwargs: Any) -> Run:
def on_tool_end(self, output: Any, *, run_id: UUID, **kwargs: Any) -> Run:
"""End a trace for a tool run."""
output = str(output)
tool_run = self._get_run(run_id, run_type="tool")
tool_run.outputs = {"output": output}
tool_run.end_time = datetime.now(timezone.utc)

Loading…
Cancel
Save