Add in the async methods and link the run id (#5810)

searx_updates
Zander Chase 11 months ago committed by GitHub
parent ce7c11625f
commit d9fcc45d05
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -6,10 +6,13 @@ from typing import Any, Dict, List, Optional
from langchainplus_sdk import EvaluationResult, RunEvaluator
from langchainplus_sdk.schemas import Example, Run
from langchain.callbacks.manager import CallbackManagerForChainRun
from langchain.callbacks.manager import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
)
from langchain.chains.base import Chain
from langchain.chains.llm import LLMChain
from langchain.schema import BaseOutputParser
from langchain.schema import RUN_KEY, BaseOutputParser
class RunEvalInputMapper:
@ -59,8 +62,33 @@ class RunEvaluatorChain(Chain, RunEvaluator):
example: Optional[Example] = inputs.get("example")
chain_input = self.input_mapper.map(run, example)
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
chain_output = self.eval_chain(chain_input, callbacks=_run_manager.get_child())
callbacks = _run_manager.get_child()
chain_output = self.eval_chain(
chain_input, callbacks=callbacks, include_run_info=True
)
run_info = chain_output[RUN_KEY]
feedback = self.output_parser.parse_chain_output(chain_output)
feedback.evaluator_info[RUN_KEY] = run_info
return {"feedback": feedback}
async def _acall(
self,
inputs: Dict[str, Any],
run_manager: AsyncCallbackManagerForChainRun | None = None,
) -> Dict[str, Any]:
run: Run = inputs["run"]
example: Optional[Example] = inputs.get("example")
chain_input = self.input_mapper.map(run, example)
_run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager()
callbacks = _run_manager.get_child()
chain_output = await self.eval_chain.acall(
chain_input,
callbacks=callbacks,
include_run_info=True,
)
run_info = chain_output[RUN_KEY]
feedback = self.output_parser.parse_chain_output(chain_output)
feedback.evaluator_info[RUN_KEY] = run_info
return {"feedback": feedback}
def evaluate_run(
@ -68,3 +96,10 @@ class RunEvaluatorChain(Chain, RunEvaluator):
) -> EvaluationResult:
"""Evaluate an example."""
return self({"run": run, "example": example})["feedback"]
async def aevaluate_run(
self, run: Run, example: Optional[Example] = None
) -> EvaluationResult:
"""Evaluate an example."""
result = await self.acall({"run": run, "example": example})
return result["feedback"]

@ -43,7 +43,7 @@ class StringRunEvalInputMapper(RunEvalInputMapper, BaseModel):
def map(self, run: Run, example: Optional[Example] = None) -> Dict[str, str]:
"""Maps the Run and Optional[Example] to a dictionary"""
if run.outputs is None:
raise ValueError("Run outputs cannot be None.")
raise ValueError(f"Run {run.id} has no outputs.")
data = {
value: run.outputs.get(key) for key, value in self.prediction_map.items()

@ -120,11 +120,11 @@
},
"outputs": [],
"source": [
"from langchain.llms import OpenAI\n",
"from langchain.chat_models import ChatOpenAI\n",
"from langchain.agents import initialize_agent, load_tools\n",
"from langchain.agents import AgentType\n",
"\n",
"llm = OpenAI(temperature=0)\n",
"llm = ChatOpenAI(temperature=0)\n",
"tools = load_tools([\"serpapi\", \"llm-math\"], llm=llm)\n",
"agent = initialize_agent(\n",
" tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=False\n",
@ -138,51 +138,7 @@
"metadata": {
"tags": []
},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"Retrying langchain.llms.openai.acompletion_with_retry.<locals>._completion_with_retry in 4.0 seconds as it raised RateLimitError: The server had an error while processing your request. Sorry about that!.\n",
"Retrying langchain.llms.openai.acompletion_with_retry.<locals>._completion_with_retry in 4.0 seconds as it raised RateLimitError: The server had an error while processing your request. Sorry about that!.\n",
"Retrying langchain.llms.openai.acompletion_with_retry.<locals>._completion_with_retry in 4.0 seconds as it raised RateLimitError: The server had an error while processing your request. Sorry about that!.\n",
"Retrying langchain.llms.openai.acompletion_with_retry.<locals>._completion_with_retry in 4.0 seconds as it raised RateLimitError: The server had an error while processing your request. Sorry about that!.\n",
"Retrying langchain.llms.openai.acompletion_with_retry.<locals>._completion_with_retry in 4.0 seconds as it raised RateLimitError: The server had an error while processing your request. Sorry about that!.\n",
"Retrying langchain.llms.openai.acompletion_with_retry.<locals>._completion_with_retry in 4.0 seconds as it raised RateLimitError: The server had an error while processing your request. Sorry about that!.\n",
"Retrying langchain.llms.openai.acompletion_with_retry.<locals>._completion_with_retry in 4.0 seconds as it raised RateLimitError: The server had an error while processing your request. Sorry about that!.\n",
"Retrying langchain.llms.openai.acompletion_with_retry.<locals>._completion_with_retry in 4.0 seconds as it raised RateLimitError: The server had an error while processing your request. Sorry about that!.\n",
"Retrying langchain.llms.openai.acompletion_with_retry.<locals>._completion_with_retry in 4.0 seconds as it raised RateLimitError: The server had an error while processing your request. Sorry about that!.\n",
"Retrying langchain.llms.openai.acompletion_with_retry.<locals>._completion_with_retry in 4.0 seconds as it raised RateLimitError: The server had an error while processing your request. Sorry about that!.\n",
"Retrying langchain.llms.openai.acompletion_with_retry.<locals>._completion_with_retry in 4.0 seconds as it raised RateLimitError: The server had an error while processing your request. Sorry about that!.\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"unknown format from LLM: This question cannot be answered using the numexpr library, as it does not involve any mathematical expressions.\n"
]
},
{
"data": {
"text/plain": [
"['39,566,248 people live in Canada as of 2023.',\n",
" \"Romain Gavras is Dua Lipa's boyfriend and his age raised to the .43 power is 4.9373857399466665.\",\n",
" '3.991298452658078',\n",
" 'The shortest distance (air line) between Boston and Paris is 3,437.00 mi (5,531.32 km).',\n",
" 'The total number of points scored in the 2023 Super Bowl raised to the .23 power is 2.3086081644669734.',\n",
" ValueError('unknown format from LLM: This question cannot be answered using the numexpr library, as it does not involve any mathematical expressions.'),\n",
" 'The 2023 Super Bowl scored 3 more points than the 2022 Super Bowl.',\n",
" '1.9347796717823205',\n",
" 'Devin Booker, Kendall Jenner\\'s boyfriend, is 6\\' 5\" tall and his height raised to the .13 power is 1.27335715306192.',\n",
" '1213 divided by 4345 is 0.2791714614499425']"
]
},
"execution_count": 3,
"metadata": {},
"output_type": "execute_result"
}
],
"outputs": [],
"source": [
"import asyncio\n",
"\n",
@ -206,13 +162,12 @@
" return await agent.arun(input_example)\n",
" except Exception as e:\n",
" # The agent sometimes makes mistakes! These will be captured by the tracing.\n",
" print(e)\n",
" return e\n",
"\n",
"\n",
"for input_example in inputs:\n",
" results.append(arun(agent, input_example))\n",
"await asyncio.gather(*results)"
"results = await asyncio.gather(*results)"
]
},
{
@ -479,27 +434,6 @@
"tags": []
},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"Chain failed for example fb07a1d4-e96e-45fe-a3cd-5113e174b017. Error: unknown format from LLM: Sorry, I cannot answer this question as it requires information that is not currently available.\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Processed examples: 2\r"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"Chain failed for example f088cda6-3745-4f83-b8fa-e5c1038e81b2. Error: unknown format from LLM: Sorry, as an AI language model, I do not have access to personal information such as someone's age. Please provide a different math problem.\n"
]
},
{
"name": "stdout",
"output_type": "stream",
@ -511,36 +445,16 @@
"name": "stderr",
"output_type": "stream",
"text": [
"Chain failed for example abb7259c-8136-4903-80b3-04644eebcc82. Error: Parsing LLM output produced both a final answer and a parse-able action: I need to use the search engine to find out who Dua Lipa's boyfriend is and then use the calculator to raise his age to the .43 power.\n",
"Action 1: Search\n",
"Action Input 1: \"Dua Lipa boyfriend\"\n",
"Observation 1: Anwar Hadid is Dua Lipa's boyfriend.\n",
"Action 2: Calculator\n",
"Action Input 2: 21^0.43\n",
"Observation 2: Anwar Hadid's age raised to the 0.43 power is approximately 3.87.\n",
"Thought: I now know the final answer.\n",
"Final Answer: Anwar Hadid is Dua Lipa's boyfriend and his age raised to the 0.43 power is approximately 3.87.\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Processed examples: 7\r"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"Chain failed for example 2123b7f1-3d3d-4eca-ba30-faf0dff75399. Error: Could not parse LLM output: `I need to subtract the score of the`\n"
"Chain failed for example 59fb1b4d-d935-4e43-b2a7-bc33fde841bb. Error: LLMMathChain._evaluate(\"\n",
"round(0.2791714614499425, 2)\n",
"\") raised error: 'VariableNode' object is not callable. Please try again with a valid numerical expression\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Processed examples: 9\r"
"Processed examples: 5\r"
]
}
],
@ -622,7 +536,7 @@
},
{
"cell_type": "code",
"execution_count": 16,
"execution_count": 14,
"id": "35db4025-9183-4e5f-ba14-0b1b380f49c7",
"metadata": {
"tags": []
@ -644,39 +558,8 @@
},
{
"cell_type": "code",
"execution_count": 17,
"id": "20ab5a84-1d34-4532-8b4f-b12407f42a0e",
"metadata": {
"tags": []
},
"outputs": [
{
"data": {
"text/html": [
"<a href=\"https://dev.langchain.plus\", target=\"_blank\" rel=\"noopener\">LangChain+ Client</a>"
],
"text/plain": [
"LangChainPlusClient (API URL: https://dev.api.langchain.plus)"
]
},
"execution_count": 17,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# TODO: Use this one above as well\n",
"from langchainplus_sdk import LangChainPlusClient\n",
"\n",
"client = LangChainPlusClient()\n",
"runs = list(client.list_runs(session_name=evaluation_session_name, execution_order=1, error=False))\n",
"client"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "58c23a51-1e0a-46d8-b04b-0e0627983232",
"execution_count": 27,
"id": "4c94a738-dcd3-442e-b8e7-dd36459f56e3",
"metadata": {
"tags": []
},
@ -684,12 +567,12 @@
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "ddf4e207965345c7b1ac27a5e3e677e8",
"model_id": "a185493c1af74cbaa0f9b10f32cf81c6",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
" 0%| | 0/44 [00:00<?, ?it/s]"
"0it [00:00, ?it/s]"
]
},
"metadata": {},
@ -698,19 +581,37 @@
],
"source": [
"from tqdm.notebook import tqdm\n",
"feedbacks = []\n",
"runs = client.list_runs(session_name=evaluation_session_name, execution_order=1, error=False)\n",
"for run in tqdm(runs):\n",
" eval_feedback = []\n",
" for evaluator in evaluators:\n",
" feedback = client.evaluate_run(run, evaluator)"
" eval_feedback.append(client.aevaluate_run(run, evaluator))\n",
" feedbacks.extend(await asyncio.gather(*eval_feedback)) "
]
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 29,
"id": "8696f167-dc75-4ef8-8bb3-ac1ce8324f30",
"metadata": {
"tags": []
},
"outputs": [],
"outputs": [
{
"data": {
"text/html": [
"<a href=\"https://dev.langchain.plus\", target=\"_blank\" rel=\"noopener\">LangChain+ Client</a>"
],
"text/plain": [
"LangChainPlusClient (API URL: https://dev.api.langchain.plus)"
]
},
"execution_count": 29,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"client"
]
@ -718,7 +619,7 @@
{
"cell_type": "code",
"execution_count": null,
"id": "daf7dc7f-a5b0-49be-a695-2a87e283e588",
"id": "a5037e54-2c5a-4993-9b46-2a98773d3079",
"metadata": {},
"outputs": [],
"source": []

70
poetry.lock generated

@ -1685,13 +1685,13 @@ files = [
[[package]]
name = "deeplake"
version = "3.5.4"
version = "3.6.0"
description = "Activeloop Deep Lake"
category = "main"
optional = false
python-versions = "*"
files = [
{file = "deeplake-3.5.4.tar.gz", hash = "sha256:a05eba10141fe1e5be5f0ed115a42da1320d6675d644b49790dc28485a87aa32"},
{file = "deeplake-3.6.0.tar.gz", hash = "sha256:bf502ed4fcd19624e750c649b8dd2fb892529a29384c8a816bbb09005b763db1"},
]
[package.dependencies]
@ -1708,11 +1708,12 @@ pyjwt = "*"
tqdm = "*"
[package.extras]
all = ["IPython", "av (>=8.1.0)", "flask", "google-api-python-client (>=2.31.0,<2.32.0)", "google-auth (>=2.0.1,<2.1.0)", "google-auth-oauthlib (>=0.4.5,<0.5.0)", "google-cloud-storage (>=1.42.0,<1.43.0)", "laspy", "libdeeplake (==0.0.54)", "nibabel", "oauth2client (>=4.1.3,<4.2.0)", "pydicom"]
all = ["IPython", "av (>=8.1.0)", "azure-cli", "azure-identity", "azure-storage-blob", "flask", "google-api-python-client (>=2.31.0,<2.32.0)", "google-auth (>=2.0.1,<2.1.0)", "google-auth-oauthlib (>=0.4.5,<0.5.0)", "google-cloud-storage (>=1.42.0,<1.43.0)", "laspy", "libdeeplake (==0.0.55)", "nibabel", "oauth2client (>=4.1.3,<4.2.0)", "pydicom"]
audio = ["av (>=8.1.0)"]
av = ["av (>=8.1.0)"]
azure = ["azure-cli", "azure-identity", "azure-storage-blob"]
dicom = ["nibabel", "pydicom"]
enterprise = ["libdeeplake (==0.0.54)", "pyjwt"]
enterprise = ["libdeeplake (==0.0.55)", "pyjwt"]
gcp = ["google-auth (>=2.0.1,<2.1.0)", "google-auth-oauthlib (>=0.4.5,<0.5.0)", "google-cloud-storage (>=1.42.0,<1.43.0)"]
gdrive = ["google-api-python-client (>=2.31.0,<2.32.0)", "google-auth (>=2.0.1,<2.1.0)", "google-auth-oauthlib (>=0.4.5,<0.5.0)", "oauth2client (>=4.1.3,<4.2.0)"]
medical = ["nibabel", "pydicom"]
@ -2524,14 +2525,14 @@ grpc = ["grpcio (>=1.44.0,<2.0.0dev)"]
[[package]]
name = "gptcache"
version = "0.1.29.1"
version = "0.1.30"
description = "GPTCache, a powerful caching library that can be used to speed up and lower the cost of chat applications that rely on the LLM service. GPTCache works as a memcache for AIGC applications, similar to how Redis works for traditional applications."
category = "main"
optional = false
python-versions = ">=3.8.1"
files = [
{file = "gptcache-0.1.29.1-py3-none-any.whl", hash = "sha256:e281f1c7bf1f2cc72e0193d6e15ad3d50e7cb358d04332ef96b98f732aee8a74"},
{file = "gptcache-0.1.29.1.tar.gz", hash = "sha256:c7c07c5300422feed302f4a140ff5e25e01f04885c81fb3388e07b3c3a82647a"},
{file = "gptcache-0.1.30-py3-none-any.whl", hash = "sha256:57c37babe85161fbbe547cb036f6780b33232d70557ae085daccf3c032bc7b14"},
{file = "gptcache-0.1.30.tar.gz", hash = "sha256:a69f600e286dee3e7f3b151c8b269778a4e6a7d5da409c01fbfbedf3239d0cd9"},
]
[package.dependencies]
@ -3969,14 +3970,14 @@ tests = ["pytest", "pytest-mock"]
[[package]]
name = "langchainplus-sdk"
version = "0.0.4"
version = "0.0.6"
description = "Client library to connect to the LangChainPlus LLM Tracing and Evaluation Platform."
category = "main"
optional = false
python-versions = ">=3.8.1,<4.0"
files = [
{file = "langchainplus_sdk-0.0.4-py3-none-any.whl", hash = "sha256:ba72276b0e9cb148f572d531fbed41017d3a0072dc5a1110d8d2f3af06efce75"},
{file = "langchainplus_sdk-0.0.4.tar.gz", hash = "sha256:0c09af62e81975e33561c655ba9b32777d79834218c9377d28578a9558740044"},
{file = "langchainplus_sdk-0.0.6-py3-none-any.whl", hash = "sha256:43fe01c66442b88403c969b8812f6be81e023c0d2a6d5d3572a8d87961438658"},
{file = "langchainplus_sdk-0.0.6.tar.gz", hash = "sha256:c911a98fd2d02baa48f742b7d700fd6a55f11c9a545ee5d66b08825940c9a32e"},
]
[package.dependencies]
@ -5591,14 +5592,14 @@ sympy = "*"
[[package]]
name = "openai"
version = "0.27.7"
version = "0.27.8"
description = "Python client library for the OpenAI API"
category = "main"
optional = false
python-versions = ">=3.7.1"
files = [
{file = "openai-0.27.7-py3-none-any.whl", hash = "sha256:788fb7fa85bf7caac6c1ed7eea5984254a1bdaf09ef485acf0e5718c8b2dc25a"},
{file = "openai-0.27.7.tar.gz", hash = "sha256:bca95fd4c3054ef38924def096396122130454442ec52005915ecf8269626b1d"},
{file = "openai-0.27.8-py3-none-any.whl", hash = "sha256:e0a7c2f7da26bdbe5354b03c6d4b82a2f34bd4458c7a17ae1a7092c3e397e03c"},
{file = "openai-0.27.8.tar.gz", hash = "sha256:2483095c7db1eee274cebac79e315a986c4e55207bb4fa7b82d185b3a2ed9536"},
]
[package.dependencies]
@ -6340,20 +6341,20 @@ tests = ["check-manifest", "coverage", "defusedxml", "markdown2", "olefile", "pa
[[package]]
name = "pinecone-client"
version = "2.2.1"
version = "2.2.2"
description = "Pinecone client and SDK"
category = "main"
optional = false
python-versions = ">=3.6"
python-versions = ">=3.8"
files = [
{file = "pinecone-client-2.2.1.tar.gz", hash = "sha256:0878dcaee447c46c8d1b3d71c854689daa7e548e5009a171780907c7d4e74789"},
{file = "pinecone_client-2.2.1-py3-none-any.whl", hash = "sha256:6976a22aee57a9813378607506c8c36b0317dfa36a08a5397aaaeab2eef66c1b"},
{file = "pinecone-client-2.2.2.tar.gz", hash = "sha256:391fe413754efd4e0ef00154b44271d63c4cdd4bedf088d23111a5725d863210"},
{file = "pinecone_client-2.2.2-py3-none-any.whl", hash = "sha256:21fddb752668efee4d3c6b706346d9580e36a8b06b8d97afd60bd33ef2536e7e"},
]
[package.dependencies]
dnspython = ">=2.0.0"
loguru = ">=0.5.0"
numpy = "*"
numpy = ">=1.22.0"
python-dateutil = ">=2.5.3"
pyyaml = ">=5.4"
requests = ">=2.19.0"
@ -6362,7 +6363,7 @@ typing-extensions = ">=3.7.4"
urllib3 = ">=1.21.1"
[package.extras]
grpc = ["googleapis-common-protos (>=1.53.0)", "grpc-gateway-protoc-gen-openapiv2 (==0.1.0)", "grpcio (>=1.44.0)", "lz4 (>=3.1.3)", "protobuf (==3.19.3)"]
grpc = ["googleapis-common-protos (>=1.53.0)", "grpc-gateway-protoc-gen-openapiv2 (==0.1.0)", "grpcio (>=1.44.0)", "lz4 (>=3.1.3)", "protobuf (>=3.19.5,<3.20.0)"]
[[package]]
name = "pinecone-text"
@ -7159,16 +7160,15 @@ tests = ["coverage[toml] (==5.0.4)", "pytest (>=6.0.0,<7.0.0)"]
[[package]]
name = "pylance"
version = "0.4.18"
version = "0.4.19"
description = "python wrapper for lance-rs"
category = "main"
optional = true
python-versions = ">=3.8"
files = [
{file = "pylance-0.4.18-cp38-abi3-macosx_10_15_x86_64.whl", hash = "sha256:9741880ede522b4f80ae9a43b499265096dca4eb2a2a2d72690cb01eb6ef6fed"},
{file = "pylance-0.4.18-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:8b2e2baf0dfeb65da8f993872f4c375d6ad16ada4472756f888f033ce72dbd86"},
{file = "pylance-0.4.18-cp38-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c81f891a6230fc38e60f01f16923263a082f7fd1646c15aad1e940d29c4e8a83"},
{file = "pylance-0.4.18-cp38-abi3-win_amd64.whl", hash = "sha256:eda511361a4545bb32fb98bcb270a1a41aaf022dd63cc306fdf5698310ba81ce"},
{file = "pylance-0.4.19-cp38-abi3-macosx_10_15_x86_64.whl", hash = "sha256:7f5dbf2c384de6c499a2d876be4aec5cbb8f8ef65d490ac93fbd84251ae61e7c"},
{file = "pylance-0.4.19-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:b2125d50ef28af11b5473e72f82ce9af30756ae655b982582c58267b72216043"},
{file = "pylance-0.4.19-cp38-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f98fc8cbc5f66d35ea7701eae691c195df4e45cca8c8f33d9e1a71f24077d7a6"},
]
[package.dependencies]
@ -8905,20 +8905,20 @@ themes = ["myst-parser (>=0.12.9,<0.13.0)", "pydata-sphinx-theme (>=0.4.0,<0.5.0
[[package]]
name = "sphinx-rtd-theme"
version = "1.2.1"
version = "1.2.2"
description = "Read the Docs theme for Sphinx"
category = "dev"
optional = false
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7"
files = [
{file = "sphinx_rtd_theme-1.2.1-py2.py3-none-any.whl", hash = "sha256:2cc9351176cbf91944ce44cefd4fab6c3b76ac53aa9e15d6db45a3229ad7f866"},
{file = "sphinx_rtd_theme-1.2.1.tar.gz", hash = "sha256:cf9a7dc0352cf179c538891cb28d6fad6391117d4e21c891776ab41dd6c8ff70"},
{file = "sphinx_rtd_theme-1.2.2-py2.py3-none-any.whl", hash = "sha256:6a7e7d8af34eb8fc57d52a09c6b6b9c46ff44aea5951bc831eeb9245378f3689"},
{file = "sphinx_rtd_theme-1.2.2.tar.gz", hash = "sha256:01c5c5a72e2d025bd23d1f06c59a4831b06e6ce6c01fdd5ebfe9986c0a880fc7"},
]
[package.dependencies]
docutils = "<0.19"
sphinx = ">=1.6,<7"
sphinxcontrib-jquery = {version = ">=2.0.0,<3.0.0 || >3.0.0", markers = "python_version > \"3\""}
sphinxcontrib-jquery = ">=4,<5"
[package.extras]
dev = ["bump2version", "sphinxcontrib-httpdomain", "transifex-client", "wheel"]
@ -9219,14 +9219,14 @@ full = ["httpx (>=0.22.0)", "itsdangerous", "jinja2", "python-multipart", "pyyam
[[package]]
name = "steamship"
version = "2.17.5"
version = "2.17.6"
description = "The fastest way to add language AI to your product."
category = "main"
optional = true
python-versions = "*"
files = [
{file = "steamship-2.17.5-py3-none-any.whl", hash = "sha256:4782281aca0d69f6c050623355687eb4a48d5a2108ac274d96e029779aad82f0"},
{file = "steamship-2.17.5.tar.gz", hash = "sha256:e0a6d0e46744b3208b306dffe87d20ecf46113d333bc76515191b0d5c9eb28bb"},
{file = "steamship-2.17.6-py3-none-any.whl", hash = "sha256:7d25db57f19d228f82ce445e15ace66b2a2e3ac25307d69c4828c27026e8e44c"},
{file = "steamship-2.17.6.tar.gz", hash = "sha256:ff3af96d7f0eef2036c222513af51ac356f0753123a7bf44bdb31eccd2d957df"},
]
[package.dependencies]
@ -10385,14 +10385,14 @@ yarl = "*"
[[package]]
name = "wasabi"
version = "1.1.1"
version = "1.1.2"
description = "A lightweight console printing and formatting toolkit"
category = "main"
optional = true
python-versions = ">=3.6"
files = [
{file = "wasabi-1.1.1-py3-none-any.whl", hash = "sha256:32e44649d99a64e08e40c1c96cddb69fad460bd0cc33802a53cab6714dfb73f8"},
{file = "wasabi-1.1.1.tar.gz", hash = "sha256:f5ee7c609027811bd16e620f2fd7a7319466005848e41b051a62053ab8fd70d6"},
{file = "wasabi-1.1.2-py3-none-any.whl", hash = "sha256:0a3f933c4bf0ed3f93071132c1b87549733256d6c8de6473c5f7ed2e171b5cf9"},
{file = "wasabi-1.1.2.tar.gz", hash = "sha256:1aaef3aceaa32edb9c91330d29d3936c0c39fdb965743549c173cb54b16c30b5"},
]
[package.dependencies]
@ -11241,4 +11241,4 @@ text-helpers = ["chardet"]
[metadata]
lock-version = "2.0"
python-versions = ">=3.8.1,<4.0"
content-hash = "190059b07b111e19ec5b516be227b81728163fee14928c92cba2c0212d35ead0"
content-hash = "8c0ab1bdc8b506e38e6fa4cba40dcf2df47473212d47fa1086c6aae8ddf2c021"

@ -103,7 +103,7 @@ momento = {version = "^1.5.0", optional = true}
bibtexparser = {version = "^1.4.0", optional = true}
pyspark = {version = "^3.4.0", optional = true}
tigrisdb = {version = "^1.0.0b6", optional = true}
langchainplus-sdk = "^0.0.4"
langchainplus-sdk = ">=0.0.6"
[tool.poetry.group.docs.dependencies]
autodoc_pydantic = "^1.8.0"

Loading…
Cancel
Save