mirror of
https://github.com/hwchase17/langchain
synced 2024-11-18 09:25:54 +00:00
community[minor]: add allow_dangerous_requests for OpenAPI toolkits (#19493)
**OpenAPI allow_dangerous_requests**: community: add allow_dangerous_requests for OpenAPI toolkits **Description:** a description of the change Due to BaseRequestsTool changes, we need to pass allow_dangerous_requests manually.b617085af0/libs/community/langchain_community/tools/requests/tool.py (L26-L46)
While OpenAPI toolkits didn't pass it in the arguments.b617085af0/libs/community/langchain_community/agent_toolkits/openapi/planner.py (L262-L269)
**Issue:** the issue # it fixes, if applicable https://github.com/langchain-ai/langchain/issues/19440 If not passing allow_dangerous_requests, it won't be able to do requests. **Dependencies:** any dependencies required for this change Not much --------- Co-authored-by: Bagatur <22008038+baskaryan@users.noreply.github.com> Co-authored-by: Eugene Yurtsev <eyurtsev@gmail.com>
This commit is contained in:
parent
301dc3dfd2
commit
0394c6e126
@ -10,6 +10,18 @@
|
||||
"We can construct agents to consume arbitrary APIs, here APIs conformant to the `OpenAPI`/`Swagger` specification."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "798a442b",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# NOTE: In this example. We must set `allow_dangerous_request=True` to enable the OpenAPI Agent to automatically use the Request Tool.\n",
|
||||
"# This can be dangerous for calling unwanted requests. Please make sure your custom OpenAPI spec (yaml) is safe.\n",
|
||||
"ALLOW_DANGEROUS_REQUEST = True"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a389367b",
|
||||
@ -46,6 +58,14 @@
|
||||
"import yaml"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "816011d8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You will be able to get OpenAPI specs from here: [APIs-guru/openapi-directory](https://github.com/APIs-guru/openapi-directory)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
@ -261,9 +281,9 @@
|
||||
],
|
||||
"source": [
|
||||
"from langchain_community.agent_toolkits.openapi import planner\n",
|
||||
"from langchain_openai import OpenAI\n",
|
||||
"from langchain_openai import ChatOpenAI\n",
|
||||
"\n",
|
||||
"llm = OpenAI(model_name=\"gpt-4\", temperature=0.0)"
|
||||
"llm = ChatOpenAI(model_name=\"gpt-4\", temperature=0.0)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -335,11 +355,17 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"spotify_agent = planner.create_openapi_agent(spotify_api_spec, requests_wrapper, llm)\n",
|
||||
"# NOTE: set allow_dangerous_requests manually for security concern https://python.langchain.com/docs/security\n",
|
||||
"spotify_agent = planner.create_openapi_agent(\n",
|
||||
" spotify_api_spec,\n",
|
||||
" requests_wrapper,\n",
|
||||
" llm,\n",
|
||||
" allow_dangerous_requests=ALLOW_DANGEROUS_REQUEST,\n",
|
||||
")\n",
|
||||
"user_query = (\n",
|
||||
" \"make me a playlist with the first song from kind of blue. call it machine blues.\"\n",
|
||||
")\n",
|
||||
"spotify_agent.run(user_query)"
|
||||
"spotify_agent.invoke(user_query)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -420,7 +446,7 @@
|
||||
],
|
||||
"source": [
|
||||
"user_query = \"give me a song I'd like, make it blues-ey\"\n",
|
||||
"spotify_agent.run(user_query)"
|
||||
"spotify_agent.invoke(user_query)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -549,12 +575,12 @@
|
||||
],
|
||||
"source": [
|
||||
"# Meta!\n",
|
||||
"llm = OpenAI(model_name=\"gpt-4\", temperature=0.25)\n",
|
||||
"llm = ChatOpenAI(model_name=\"gpt-4\", temperature=0.25)\n",
|
||||
"openai_agent = planner.create_openapi_agent(\n",
|
||||
" openai_api_spec, openai_requests_wrapper, llm\n",
|
||||
")\n",
|
||||
"user_query = \"generate a short piece of advice\"\n",
|
||||
"openai_agent.run(user_query)"
|
||||
"openai_agent.invoke(user_query)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -606,7 +632,10 @@
|
||||
" OpenAI(temperature=0), json_spec, openai_requests_wrapper, verbose=True\n",
|
||||
")\n",
|
||||
"openapi_agent_executor = create_openapi_agent(\n",
|
||||
" llm=OpenAI(temperature=0), toolkit=openapi_toolkit, verbose=True\n",
|
||||
" llm=OpenAI(temperature=0),\n",
|
||||
" toolkit=openapi_toolkit,\n",
|
||||
" allow_dangerous_requests=ALLOW_DANGEROUS_REQUEST,\n",
|
||||
" verbose=True,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
|
@ -1,4 +1,5 @@
|
||||
"""Agent that interacts with OpenAPI APIs via a hierarchical planning approach."""
|
||||
|
||||
import json
|
||||
import re
|
||||
from functools import partial
|
||||
@ -252,6 +253,7 @@ def _create_api_controller_agent(
|
||||
api_docs: str,
|
||||
requests_wrapper: RequestsWrapper,
|
||||
llm: BaseLanguageModel,
|
||||
allow_dangerous_requests: bool,
|
||||
) -> Any:
|
||||
from langchain.agents.agent import AgentExecutor
|
||||
from langchain.agents.mrkl.base import ZeroShotAgent
|
||||
@ -261,10 +263,14 @@ def _create_api_controller_agent(
|
||||
post_llm_chain = LLMChain(llm=llm, prompt=PARSING_POST_PROMPT)
|
||||
tools: List[BaseTool] = [
|
||||
RequestsGetToolWithParsing(
|
||||
requests_wrapper=requests_wrapper, llm_chain=get_llm_chain
|
||||
requests_wrapper=requests_wrapper,
|
||||
llm_chain=get_llm_chain,
|
||||
allow_dangerous_requests=allow_dangerous_requests,
|
||||
),
|
||||
RequestsPostToolWithParsing(
|
||||
requests_wrapper=requests_wrapper, llm_chain=post_llm_chain
|
||||
requests_wrapper=requests_wrapper,
|
||||
llm_chain=post_llm_chain,
|
||||
allow_dangerous_requests=allow_dangerous_requests,
|
||||
),
|
||||
]
|
||||
prompt = PromptTemplate(
|
||||
@ -290,6 +296,7 @@ def _create_api_controller_tool(
|
||||
api_spec: ReducedOpenAPISpec,
|
||||
requests_wrapper: RequestsWrapper,
|
||||
llm: BaseLanguageModel,
|
||||
allow_dangerous_requests: bool,
|
||||
) -> Tool:
|
||||
"""Expose controller as a tool.
|
||||
|
||||
@ -318,7 +325,9 @@ def _create_api_controller_tool(
|
||||
if not found_match:
|
||||
raise ValueError(f"{endpoint_name} endpoint does not exist.")
|
||||
|
||||
agent = _create_api_controller_agent(base_url, docs_str, requests_wrapper, llm)
|
||||
agent = _create_api_controller_agent(
|
||||
base_url, docs_str, requests_wrapper, llm, allow_dangerous_requests
|
||||
)
|
||||
return agent.run(plan_str)
|
||||
|
||||
return Tool(
|
||||
@ -336,6 +345,7 @@ def create_openapi_agent(
|
||||
callback_manager: Optional[BaseCallbackManager] = None,
|
||||
verbose: bool = True,
|
||||
agent_executor_kwargs: Optional[Dict[str, Any]] = None,
|
||||
allow_dangerous_requests: bool = False,
|
||||
**kwargs: Any,
|
||||
) -> Any:
|
||||
"""Instantiate OpenAI API planner and controller for a given spec.
|
||||
@ -345,6 +355,14 @@ def create_openapi_agent(
|
||||
We use a top-level "orchestrator" agent to invoke the planner and controller,
|
||||
rather than a top-level planner
|
||||
that invokes a controller with its plan. This is to keep the planner simple.
|
||||
|
||||
You need to set allow_dangerous_requests to True to use Agent with BaseRequestsTool.
|
||||
Requests can be dangerous and can lead to security vulnerabilities.
|
||||
For example, users can ask a server to make a request to an internal
|
||||
server. It's recommended to use requests through a proxy server
|
||||
and avoid accepting inputs from untrusted sources without proper sandboxing.
|
||||
Please see: https://python.langchain.com/docs/security
|
||||
for further security information.
|
||||
"""
|
||||
from langchain.agents.agent import AgentExecutor
|
||||
from langchain.agents.mrkl.base import ZeroShotAgent
|
||||
@ -352,7 +370,9 @@ def create_openapi_agent(
|
||||
|
||||
tools = [
|
||||
_create_api_planner_tool(api_spec, llm),
|
||||
_create_api_controller_tool(api_spec, requests_wrapper, llm),
|
||||
_create_api_controller_tool(
|
||||
api_spec, requests_wrapper, llm, allow_dangerous_requests
|
||||
),
|
||||
]
|
||||
prompt = PromptTemplate(
|
||||
template=API_ORCHESTRATOR_PROMPT,
|
||||
|
@ -35,8 +35,8 @@ class BaseRequestsTool(BaseModel):
|
||||
if not kwargs.get("allow_dangerous_requests", False):
|
||||
raise ValueError(
|
||||
"You must set allow_dangerous_requests to True to use this tool. "
|
||||
"Request scan be dangerous and can lead to security vulnerabilities. "
|
||||
"For example, users can ask a server to make a request to an internal"
|
||||
"Requests can be dangerous and can lead to security vulnerabilities. "
|
||||
"For example, users can ask a server to make a request to an internal "
|
||||
"server. It's recommended to use requests through a proxy server "
|
||||
"and avoid accepting inputs from untrusted sources without proper "
|
||||
"sandboxing."
|
||||
@ -50,7 +50,10 @@ class RequestsGetTool(BaseRequestsTool, BaseTool):
|
||||
"""Tool for making a GET request to an API endpoint."""
|
||||
|
||||
name: str = "requests_get"
|
||||
description: str = "A portal to the internet. Use this when you need to get specific content from a website. Input should be a url (i.e. https://www.google.com). The output will be the text response of the GET request."
|
||||
description: str = """A portal to the internet. Use this when you need to get specific
|
||||
content from a website. Input should be a url (i.e. https://www.google.com).
|
||||
The output will be the text response of the GET request.
|
||||
"""
|
||||
|
||||
def _run(
|
||||
self, url: str, run_manager: Optional[CallbackManagerForToolRun] = None
|
||||
@ -182,7 +185,11 @@ class RequestsDeleteTool(BaseRequestsTool, BaseTool):
|
||||
"""Tool for making a DELETE request to an API endpoint."""
|
||||
|
||||
name: str = "requests_delete"
|
||||
description: str = "A portal to the internet. Use this when you need to make a DELETE request to a URL. Input should be a specific url, and the output will be the text response of the DELETE request."
|
||||
description: str = """A portal to the internet.
|
||||
Use this when you need to make a DELETE request to a URL.
|
||||
Input should be a specific url, and the output will be the text
|
||||
response of the DELETE request.
|
||||
"""
|
||||
|
||||
def _run(
|
||||
self,
|
||||
|
Loading…
Reference in New Issue
Block a user