You cannot select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
langchain/libs/core/langchain_core/runnables/__init__.py

83 lines
2.1 KiB
Python

"""LangChain **Runnable** and the **LangChain Expression Language (LCEL)**.
The LangChain Expression Language (LCEL) offers a declarative method to build
production-grade programs that harness the power of LLMs.
Programs created using LCEL and LangChain Runnables inherently support
synchronous, asynchronous, batch, and streaming operations.
Support for **async** allows servers hosting LCEL based programs to scale better
for higher concurrent loads.
**Streaming** of intermediate outputs as they're being generated allows for
creating more responsive UX.
This module contains schema and implementation of LangChain Runnables primitives.
"""
from langchain_core.runnables.base import (
Runnable,
RunnableBinding,
RunnableGenerator,
RunnableLambda,
RunnableMap,
RunnableParallel,
RunnableSequence,
RunnableSerializable,
Fetch runnable config from context var inside runnable lambda and runnable generator (#15334) - easier to write custom logic/loops with automatic tracing - if you don't want to streaming support write a regular function and pass to RunnableLambda - if you do want streaming write a generator and pass it to RunnableGenerator ```py import json from typing import AsyncIterator from langchain_core.messages import BaseMessage, FunctionMessage, HumanMessage from langchain_core.agents import AgentAction, AgentFinish from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder from langchain_core.runnables import Runnable, RunnableGenerator, RunnablePassthrough from langchain_core.tools import BaseTool from langchain.agents.output_parsers import OpenAIFunctionsAgentOutputParser from langchain.chat_models import ChatOpenAI from langchain.tools.render import format_tool_to_openai_function def _get_tavily(): from langchain.tools.tavily_search import TavilySearchResults from langchain.utilities.tavily_search import TavilySearchAPIWrapper tavily_search = TavilySearchAPIWrapper() return TavilySearchResults(api_wrapper=tavily_search) async def _agent_executor_generator( input: AsyncIterator[list[BaseMessage]], *, max_iterations: int = 10, tools: dict[str, BaseTool], agent: Runnable[list[BaseMessage], BaseMessage], parser: Runnable[BaseMessage, AgentAction | AgentFinish], ) -> AsyncIterator[BaseMessage]: messages = [m async for mm in input for m in mm] for _ in range(max_iterations): next_message = await agent.ainvoke(messages) yield next_message messages.append(next_message) parsed = await parser.ainvoke(next_message) if isinstance(parsed, AgentAction): result = await tools[parsed.tool].ainvoke(parsed.tool_input) next_message = FunctionMessage(name=parsed.tool, content=json.dumps(result)) yield next_message messages.append(next_message) elif isinstance(parsed, AgentFinish): return def get_agent_executor(tools: list[BaseTool], system_message: str): llm = ChatOpenAI(model="gpt-4-1106-preview", temperature=0, streaming=True) prompt = ChatPromptTemplate.from_messages( [ ("system", system_message), MessagesPlaceholder(variable_name="messages"), ] ) llm_with_tools = llm.bind( functions=[format_tool_to_openai_function(t) for t in tools] ) agent = {"messages": RunnablePassthrough()} | prompt | llm_with_tools parser = OpenAIFunctionsAgentOutputParser() executor = RunnableGenerator(_agent_executor_generator) return executor.bind( tools={tool.name for tool in tools}, agent=agent, parser=parser ) agent = get_agent_executor([_get_tavily()], "You are a very nice agent!") async def main(): async for message in agent.astream( [HumanMessage(content="whats the weather in sf tomorrow?")] ): print(message) if __name__ == "__main__": import asyncio asyncio.run(main()) ``` results in this trace https://smith.langchain.com/public/fa17f05d-9724-4d08-8fa1-750f8fcd051b/r
8 months ago
chain,
)
from langchain_core.runnables.branch import RunnableBranch
from langchain_core.runnables.config import (
RunnableConfig,
ensure_config,
get_config_list,
patch_config,
run_in_executor,
)
from langchain_core.runnables.fallbacks import RunnableWithFallbacks
from langchain_core.runnables.passthrough import (
RunnableAssign,
RunnablePassthrough,
RunnablePick,
)
from langchain_core.runnables.router import RouterInput, RouterRunnable
from langchain_core.runnables.utils import (
AddableDict,
ConfigurableField,
ConfigurableFieldMultiOption,
ConfigurableFieldSingleOption,
ConfigurableFieldSpec,
aadd,
add,
)
__all__ = [
Fetch runnable config from context var inside runnable lambda and runnable generator (#15334) - easier to write custom logic/loops with automatic tracing - if you don't want to streaming support write a regular function and pass to RunnableLambda - if you do want streaming write a generator and pass it to RunnableGenerator ```py import json from typing import AsyncIterator from langchain_core.messages import BaseMessage, FunctionMessage, HumanMessage from langchain_core.agents import AgentAction, AgentFinish from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder from langchain_core.runnables import Runnable, RunnableGenerator, RunnablePassthrough from langchain_core.tools import BaseTool from langchain.agents.output_parsers import OpenAIFunctionsAgentOutputParser from langchain.chat_models import ChatOpenAI from langchain.tools.render import format_tool_to_openai_function def _get_tavily(): from langchain.tools.tavily_search import TavilySearchResults from langchain.utilities.tavily_search import TavilySearchAPIWrapper tavily_search = TavilySearchAPIWrapper() return TavilySearchResults(api_wrapper=tavily_search) async def _agent_executor_generator( input: AsyncIterator[list[BaseMessage]], *, max_iterations: int = 10, tools: dict[str, BaseTool], agent: Runnable[list[BaseMessage], BaseMessage], parser: Runnable[BaseMessage, AgentAction | AgentFinish], ) -> AsyncIterator[BaseMessage]: messages = [m async for mm in input for m in mm] for _ in range(max_iterations): next_message = await agent.ainvoke(messages) yield next_message messages.append(next_message) parsed = await parser.ainvoke(next_message) if isinstance(parsed, AgentAction): result = await tools[parsed.tool].ainvoke(parsed.tool_input) next_message = FunctionMessage(name=parsed.tool, content=json.dumps(result)) yield next_message messages.append(next_message) elif isinstance(parsed, AgentFinish): return def get_agent_executor(tools: list[BaseTool], system_message: str): llm = ChatOpenAI(model="gpt-4-1106-preview", temperature=0, streaming=True) prompt = ChatPromptTemplate.from_messages( [ ("system", system_message), MessagesPlaceholder(variable_name="messages"), ] ) llm_with_tools = llm.bind( functions=[format_tool_to_openai_function(t) for t in tools] ) agent = {"messages": RunnablePassthrough()} | prompt | llm_with_tools parser = OpenAIFunctionsAgentOutputParser() executor = RunnableGenerator(_agent_executor_generator) return executor.bind( tools={tool.name for tool in tools}, agent=agent, parser=parser ) agent = get_agent_executor([_get_tavily()], "You are a very nice agent!") async def main(): async for message in agent.astream( [HumanMessage(content="whats the weather in sf tomorrow?")] ): print(message) if __name__ == "__main__": import asyncio asyncio.run(main()) ``` results in this trace https://smith.langchain.com/public/fa17f05d-9724-4d08-8fa1-750f8fcd051b/r
8 months ago
"chain",
"AddableDict",
"ConfigurableField",
"ConfigurableFieldSingleOption",
"ConfigurableFieldMultiOption",
"ConfigurableFieldSpec",
"ensure_config",
"run_in_executor",
"patch_config",
"RouterInput",
"RouterRunnable",
"Runnable",
"RunnableSerializable",
"RunnableBinding",
"RunnableBranch",
"RunnableConfig",
"RunnableGenerator",
"RunnableLambda",
"RunnableMap",
"RunnableParallel",
"RunnablePassthrough",
"RunnableAssign",
"RunnablePick",
"RunnableSequence",
"RunnableWithFallbacks",
"get_config_list",
"aadd",
"add",
]