test_stream_log_retriever Unit Test + Tool names fix (#11808)

## Description



| Tool         | Original Tool Name       |
|-----------------------------|---------------------------|
| open-meteo-api              | Open Meteo API            |
| news-api                    | News API                  |
| tmdb-api                    | TMDB API                  |
| podcast-api                 | Podcast API               |
| golden_query                | Golden Query              |
| dall-e-image-generator      | Dall-E Image Generator    |
| twilio                      | Text Message              |
| searx_search_results        | Searx Search Results      |
| dataforseo                  | DataForSeo Results JSON   |

When using these tools through `load_tools`, I encountered the following
validation error:

```console
openai.error.InvalidRequestError: 'TMDB API' does not match '^[a-zA-Z0-9_-]{1,64}$' - 'functions.0.name'
```

In order to avoid this error, I replaced spaces with hyphens in the tool
names:

| Tool           | Corrected Tool Name       |
|-----------------------------|---------------------------|
| open-meteo-api              | Open-Meteo-API            |
| news-api                    | News-API                  |
| tmdb-api                    | TMDB-API                  |
| podcast-api                 | Podcast-API               |
| golden_query                | Golden-Query              |
| dall-e-image-generator      | Dall-E-Image-Generator    |
| twilio                      | Text-Message              |
| searx_search_results        | Searx-Search-Results      |
| dataforseo                  | DataForSeo-Results-JSON   |

This correction resolved the validation error.

Additionally, a unit test,
`tests/unit_tests/schema/runnable/test_runnable.py::test_stream_log_retriever`,
was failing at random. Upon further investigation, I confirmed that the
failure was not related to the above-mentioned changes. The `stream_log`
variable was generating the order of logs in two ways at random The
reason for this behavior is unclear, but in the assertion, I included
both possible orders to account for this variability.
pull/11520/head^2
Mohammad Mohtashim 11 months ago committed by GitHub
parent a1120e2685
commit 634ccb8ccd
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -120,7 +120,7 @@ def _get_llm_math(llm: BaseLanguageModel) -> BaseTool:
def _get_open_meteo_api(llm: BaseLanguageModel) -> BaseTool: def _get_open_meteo_api(llm: BaseLanguageModel) -> BaseTool:
chain = APIChain.from_llm_and_api_docs(llm, open_meteo_docs.OPEN_METEO_DOCS) chain = APIChain.from_llm_and_api_docs(llm, open_meteo_docs.OPEN_METEO_DOCS)
return Tool( return Tool(
name="Open Meteo API", name="Open-Meteo-API",
description="Useful for when you want to get weather information from the OpenMeteo API. The input should be a question in natural language that this API can answer.", description="Useful for when you want to get weather information from the OpenMeteo API. The input should be a question in natural language that this API can answer.",
func=chain.run, func=chain.run,
) )
@ -138,7 +138,7 @@ def _get_news_api(llm: BaseLanguageModel, **kwargs: Any) -> BaseTool:
llm, news_docs.NEWS_DOCS, headers={"X-Api-Key": news_api_key} llm, news_docs.NEWS_DOCS, headers={"X-Api-Key": news_api_key}
) )
return Tool( return Tool(
name="News API", name="News-API",
description="Use this when you want to get information about the top headlines of current news stories. The input should be a question in natural language that this API can answer.", description="Use this when you want to get information about the top headlines of current news stories. The input should be a question in natural language that this API can answer.",
func=chain.run, func=chain.run,
) )
@ -152,7 +152,7 @@ def _get_tmdb_api(llm: BaseLanguageModel, **kwargs: Any) -> BaseTool:
headers={"Authorization": f"Bearer {tmdb_bearer_token}"}, headers={"Authorization": f"Bearer {tmdb_bearer_token}"},
) )
return Tool( return Tool(
name="TMDB API", name="TMDB-API",
description="Useful for when you want to get information from The Movie Database. The input should be a question in natural language that this API can answer.", description="Useful for when you want to get information from The Movie Database. The input should be a question in natural language that this API can answer.",
func=chain.run, func=chain.run,
) )
@ -166,7 +166,7 @@ def _get_podcast_api(llm: BaseLanguageModel, **kwargs: Any) -> BaseTool:
headers={"X-ListenAPI-Key": listen_api_key}, headers={"X-ListenAPI-Key": listen_api_key},
) )
return Tool( return Tool(
name="Podcast API", name="Podcast-API",
description="Use the Listen Notes Podcast API to search all podcasts or episodes. The input should be a question in natural language that this API can answer.", description="Use the Listen Notes Podcast API to search all podcasts or episodes. The input should be a question in natural language that this API can answer.",
func=chain.run, func=chain.run,
) )
@ -235,7 +235,7 @@ def _get_serpapi(**kwargs: Any) -> BaseTool:
def _get_dalle_image_generator(**kwargs: Any) -> Tool: def _get_dalle_image_generator(**kwargs: Any) -> Tool:
return Tool( return Tool(
"Dall-E Image Generator", "Dall-E-Image-Generator",
DallEAPIWrapper(**kwargs).run, DallEAPIWrapper(**kwargs).run,
"A wrapper around OpenAI DALL-E API. Useful for when you need to generate images from a text description. Input should be an image description.", "A wrapper around OpenAI DALL-E API. Useful for when you need to generate images from a text description. Input should be an image description.",
) )
@ -243,7 +243,7 @@ def _get_dalle_image_generator(**kwargs: Any) -> Tool:
def _get_twilio(**kwargs: Any) -> BaseTool: def _get_twilio(**kwargs: Any) -> BaseTool:
return Tool( return Tool(
name="Text Message", name="Text-Message",
description="Useful for when you need to send a text message to a provided phone number.", description="Useful for when you need to send a text message to a provided phone number.",
func=TwilioAPIWrapper(**kwargs).run, func=TwilioAPIWrapper(**kwargs).run,
) )

@ -43,7 +43,7 @@ class DataForSeoAPISearchResults(BaseTool):
"""Tool that queries the DataForSeo Google Search API """Tool that queries the DataForSeo Google Search API
and get back json.""" and get back json."""
name: str = "DataForSeo Results JSON" name: str = "DataForSeo-Results-JSON"
description: str = ( description: str = (
"A comprehensive Google Search API provided by DataForSeo." "A comprehensive Google Search API provided by DataForSeo."
"This tool is useful for obtaining real-time data on current events " "This tool is useful for obtaining real-time data on current events "

@ -10,7 +10,7 @@ from langchain.utilities.golden_query import GoldenQueryAPIWrapper
class GoldenQueryRun(BaseTool): class GoldenQueryRun(BaseTool):
"""Tool that adds the capability to query using the Golden API and get back JSON.""" """Tool that adds the capability to query using the Golden API and get back JSON."""
name: str = "Golden Query" name: str = "Golden-Query"
description: str = ( description: str = (
"A wrapper around Golden Query API." "A wrapper around Golden Query API."
" Useful for getting entities that match" " Useful for getting entities that match"

@ -42,7 +42,7 @@ class SearxSearchRun(BaseTool):
class SearxSearchResults(BaseTool): class SearxSearchResults(BaseTool):
"""Tool that queries a Searx instance and gets back json.""" """Tool that queries a Searx instance and gets back json."""
name: str = "Searx Search Results" name: str = "Searx-Search-Results"
description: str = ( description: str = (
"A meta search engine." "A meta search engine."
"Useful for when you need to answer questions about current events." "Useful for when you need to answer questions about current events."

@ -1709,149 +1709,291 @@ async def test_stream_log_retriever() -> None:
): ):
del op["value"]["id"] del op["value"]["id"]
assert stream_log[:-9] == [ assert stream_log[:-9] in [
RunLogPatch( [
{ RunLogPatch(
"op": "replace", {
"path": "", "op": "replace",
"value": { "path": "",
"logs": {}, "value": {
"final_output": None, "logs": {},
"streamed_output": [], "final_output": None,
"streamed_output": [],
},
}
),
RunLogPatch(
{
"op": "add",
"path": "/logs/RunnableParallel",
"value": {
"end_time": None,
"final_output": None,
"metadata": {},
"name": "RunnableParallel",
"start_time": "2023-01-01T00:00:00.000",
"streamed_output_str": [],
"tags": ["seq:step:1"],
"type": "chain",
},
}
),
RunLogPatch(
{
"op": "add",
"path": "/logs/RunnableLambda",
"value": {
"end_time": None,
"final_output": None,
"metadata": {},
"name": "RunnableLambda",
"start_time": "2023-01-01T00:00:00.000",
"streamed_output_str": [],
"tags": ["map:key:question"],
"type": "chain",
},
}
),
RunLogPatch(
{
"op": "add",
"path": "/logs/RunnableLambda/final_output",
"value": {"output": "What is your name?"},
}, },
} {
), "op": "add",
RunLogPatch( "path": "/logs/RunnableLambda/end_time",
{ "value": "2023-01-01T00:00:00.000",
"op": "add",
"path": "/logs/RunnableParallel",
"value": {
"end_time": None,
"final_output": None,
"metadata": {},
"name": "RunnableParallel",
"start_time": "2023-01-01T00:00:00.000",
"streamed_output_str": [],
"tags": ["seq:step:1"],
"type": "chain",
}, },
} ),
), RunLogPatch(
RunLogPatch( {
{ "op": "add",
"op": "add", "path": "/logs/Retriever",
"path": "/logs/RunnableLambda", "value": {
"value": { "end_time": None,
"end_time": None, "final_output": None,
"final_output": None, "metadata": {},
"metadata": {}, "name": "Retriever",
"name": "RunnableLambda", "start_time": "2023-01-01T00:00:00.000",
"start_time": "2023-01-01T00:00:00.000", "streamed_output_str": [],
"streamed_output_str": [], "tags": ["map:key:documents"],
"tags": ["map:key:question"], "type": "retriever",
"type": "chain", },
}
),
RunLogPatch(
{
"op": "add",
"path": "/logs/Retriever/final_output",
"value": {
"documents": [
Document(page_content="foo"),
Document(page_content="bar"),
]
},
}, },
} {
), "op": "add",
RunLogPatch( "path": "/logs/Retriever/end_time",
{ "value": "2023-01-01T00:00:00.000",
"op": "add",
"path": "/logs/RunnableLambda/final_output",
"value": {"output": "What is your name?"},
},
{
"op": "add",
"path": "/logs/RunnableLambda/end_time",
"value": "2023-01-01T00:00:00.000",
},
),
RunLogPatch(
{
"op": "add",
"path": "/logs/Retriever",
"value": {
"end_time": None,
"final_output": None,
"metadata": {},
"name": "Retriever",
"start_time": "2023-01-01T00:00:00.000",
"streamed_output_str": [],
"tags": ["map:key:documents"],
"type": "retriever",
}, },
} ),
), RunLogPatch(
RunLogPatch( {
{ "op": "add",
"op": "add", "path": "/logs/RunnableParallel/final_output",
"path": "/logs/Retriever/final_output", "value": {
"value": { "documents": [
"documents": [ Document(page_content="foo"),
Document(page_content="foo"), Document(page_content="bar"),
Document(page_content="bar"), ],
] "question": "What is your name?",
},
}, },
}, {
{ "op": "add",
"op": "add", "path": "/logs/RunnableParallel/end_time",
"path": "/logs/Retriever/end_time", "value": "2023-01-01T00:00:00.000",
"value": "2023-01-01T00:00:00.000",
},
),
RunLogPatch(
{
"op": "add",
"path": "/logs/RunnableParallel/final_output",
"value": {
"documents": [
Document(page_content="foo"),
Document(page_content="bar"),
],
"question": "What is your name?",
}, },
}, ),
{ RunLogPatch(
"op": "add", {
"path": "/logs/RunnableParallel/end_time", "op": "add",
"value": "2023-01-01T00:00:00.000", "path": "/logs/ChatPromptTemplate",
}, "value": {
), "end_time": None,
RunLogPatch( "final_output": None,
{ "metadata": {},
"op": "add", "name": "ChatPromptTemplate",
"path": "/logs/ChatPromptTemplate", "start_time": "2023-01-01T00:00:00.000",
"value": { "streamed_output_str": [],
"end_time": None, "tags": ["seq:step:2"],
"final_output": None, "type": "prompt",
"metadata": {}, },
"name": "ChatPromptTemplate", }
"start_time": "2023-01-01T00:00:00.000", ),
"streamed_output_str": [], RunLogPatch(
"tags": ["seq:step:2"], {
"type": "prompt", "op": "add",
"path": "/logs/ChatPromptTemplate/final_output",
"value": ChatPromptValue(
messages=[
SystemMessage(content="You are a nice assistant."),
HumanMessage(
content="[Document(page_content='foo'), Document(page_content='bar')]" # noqa: E501
),
HumanMessage(content="What is your name?"),
]
),
}, },
} {
), "op": "add",
RunLogPatch( "path": "/logs/ChatPromptTemplate/end_time",
{ "value": "2023-01-01T00:00:00.000",
"op": "add", },
"path": "/logs/ChatPromptTemplate/final_output", ),
"value": ChatPromptValue( ],
messages=[ [
SystemMessage(content="You are a nice assistant."), RunLogPatch(
HumanMessage( {
content="[Document(page_content='foo'), Document(page_content='bar')]" # noqa: E501 "op": "replace",
), "path": "",
HumanMessage(content="What is your name?"), "value": {"final_output": None, "logs": {}, "streamed_output": []},
] }
), ),
}, RunLogPatch(
{ {
"op": "add", "op": "add",
"path": "/logs/ChatPromptTemplate/end_time", "path": "/logs/RunnableParallel",
"value": "2023-01-01T00:00:00.000", "value": {
}, "end_time": None,
), "final_output": None,
"metadata": {},
"name": "RunnableParallel",
"start_time": "2023-01-01T00:00:00.000",
"streamed_output_str": [],
"tags": ["seq:step:1"],
"type": "chain",
},
}
),
RunLogPatch(
{
"op": "add",
"path": "/logs/Retriever",
"value": {
"end_time": None,
"final_output": None,
"metadata": {},
"name": "Retriever",
"start_time": "2023-01-01T00:00:00.000",
"streamed_output_str": [],
"tags": ["map:key:documents"],
"type": "retriever",
},
}
),
RunLogPatch(
{
"op": "add",
"path": "/logs/RunnableLambda",
"value": {
"end_time": None,
"final_output": None,
"metadata": {},
"name": "RunnableLambda",
"start_time": "2023-01-01T00:00:00.000",
"streamed_output_str": [],
"tags": ["map:key:question"],
"type": "chain",
},
}
),
RunLogPatch(
{
"op": "add",
"path": "/logs/RunnableLambda/final_output",
"value": {"output": "What is your name?"},
},
{
"op": "add",
"path": "/logs/RunnableLambda/end_time",
"value": "2023-01-01T00:00:00.000",
},
),
RunLogPatch(
{
"op": "add",
"path": "/logs/Retriever/final_output",
"value": {
"documents": [
Document(page_content="foo"),
Document(page_content="bar"),
]
},
},
{
"op": "add",
"path": "/logs/Retriever/end_time",
"value": "2023-01-01T00:00:00.000",
},
),
RunLogPatch(
{
"op": "add",
"path": "/logs/RunnableParallel/final_output",
"value": {
"documents": [
Document(page_content="foo"),
Document(page_content="bar"),
],
"question": "What is your name?",
},
},
{
"op": "add",
"path": "/logs/RunnableParallel/end_time",
"value": "2023-01-01T00:00:00.000",
},
),
RunLogPatch(
{
"op": "add",
"path": "/logs/ChatPromptTemplate",
"value": {
"end_time": None,
"final_output": None,
"metadata": {},
"name": "ChatPromptTemplate",
"start_time": "2023-01-01T00:00:00.000",
"streamed_output_str": [],
"tags": ["seq:step:2"],
"type": "prompt",
},
}
),
RunLogPatch(
{
"op": "add",
"path": "/logs/ChatPromptTemplate/final_output",
"value": ChatPromptValue(
messages=[
SystemMessage(content="You are a nice assistant."),
HumanMessage(
content="[Document(page_content='foo'), Document(page_content='bar')]" # noqa: E501
),
HumanMessage(content="What is your name?"),
]
),
},
{
"op": "add",
"path": "/logs/ChatPromptTemplate/end_time",
"value": "2023-01-01T00:00:00.000",
},
),
],
] ]
assert sorted(cast(RunLog, add(stream_log)).state["logs"]) == [ assert sorted(cast(RunLog, add(stream_log)).state["logs"]) == [

Loading…
Cancel
Save