nvidia-ai-endpoints[patch]: Support User-Agent metadata and minor fixes. (#16942)

- **Description:** Several meta/usability updates, including User-Agent.
  - **Issue:** 
- User-Agent metadata for tracking connector engagement. @milesial
please check and advise.
- Better error messages. Tries harder to find a request ID. @milesial
requested.
- Client-side image resizing for multimodal models. Hope to upgrade to
Assets API solution in around a month.
- `client.payload_fn` allows you to modify payload before network
request. Use-case shown in doc notebook for kosmos_2.
- `client.last_inputs` put back in to allow for advanced
support/debugging.
  - **Dependencies:** 
- Attempts to pull in PIL for image resizing. If not installed, prints
out "please install" message, warns it might fail, and then tries
without resizing. We are waiting on a more permanent solution.

For LC viz: @hinthornw 
For NV viz: @fciannella @milesial @vinaybagade

---------

Co-authored-by: Erick Friis <erick@langchain.dev>
pull/17035/head
Vadim Kudlay 5 months ago committed by GitHub
parent ae56fd020a
commit 75b6fa1134
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

File diff suppressed because one or more lines are too long

@ -2,6 +2,8 @@ from __future__ import annotations
import json
import logging
import time
from functools import partial
from typing import (
Any,
AsyncIterator,
@ -45,6 +47,7 @@ class NVEModel(BaseModel):
## Core defaults. These probably should not be changed
fetch_url_format: str = Field("https://api.nvcf.nvidia.com/v2/nvcf/pexec/status/")
call_invoke_base: str = Field("https://api.nvcf.nvidia.com/v2/nvcf/pexec/functions")
func_list_format: str = Field("https://api.nvcf.nvidia.com/v2/nvcf/functions")
get_session_fn: Callable = Field(requests.Session)
get_asession_fn: Callable = Field(aiohttp.ClientSession)
@ -55,7 +58,10 @@ class NVEModel(BaseModel):
is_staging: bool = Field(False, description="Whether to use staging API")
## Generation arguments
max_tries: int = Field(5, ge=1)
timeout: float = Field(60, ge=0, description="Timeout for waiting on response (s)")
interval: float = Field(0.02, ge=0, description="Interval for pulling response")
last_inputs: dict = Field({}, description="Last inputs sent over to the server")
payload_fn: Callable = Field(lambda d: d, description="Function to process payload")
headers_tmpl: dict = Field(
...,
description="Headers template for API calls."
@ -85,34 +91,31 @@ class NVEModel(BaseModel):
)
if "nvapi-" not in values.get("nvidia_api_key", ""):
raise ValueError("Invalid NVAPI key detected. Should start with `nvapi-`")
is_staging = "nvapi-stg-" in values["nvidia_api_key"]
values["is_staging"] = is_staging
values["is_staging"] = "nvapi-stg-" in values["nvidia_api_key"]
if "headers_tmpl" not in values:
call_kvs = {
"Accept": "application/json",
}
stream_kvs = {
"Accept": "text/event-stream",
"content-type": "application/json",
}
shared_kvs = {
"Authorization": "Bearer {nvidia_api_key}",
"User-Agent": "langchain-nvidia-ai-endpoints",
}
values["headers_tmpl"] = {
"call": {
"Authorization": "Bearer {nvidia_api_key}",
"Accept": "application/json",
},
"stream": {
"Authorization": "Bearer {nvidia_api_key}",
"Accept": "text/event-stream",
"content-type": "application/json",
},
"call": {**call_kvs, **shared_kvs},
"stream": {**stream_kvs, **shared_kvs},
}
return values
values["fetch_url_format"] = cls._stagify(
is_staging,
values.get(
"fetch_url_format", "https://api.nvcf.nvidia.com/v2/nvcf/pexec/status/"
),
)
values["call_invoke_base"] = cls._stagify(
is_staging,
values.get(
"call_invoke_base",
"https://api.nvcf.nvidia.com/v2/nvcf/pexec/functions",
),
)
@root_validator(pre=False)
def validate_model_post(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Additional validation after default values have been put in"""
values["stagify"] = partial(cls._stagify, is_staging=values["is_staging"])
values["fetch_url_format"] = values["stagify"](values.get("fetch_url_format"))
values["call_invoke_base"] = values["stagify"](values.get("call_invoke_base"))
return values
@property
@ -129,9 +132,7 @@ class NVEModel(BaseModel):
"""List the available functions that can be invoked."""
if self._available_functions is not None:
return self._available_functions
invoke_url = self._stagify(
self.is_staging, "https://api.nvcf.nvidia.com/v2/nvcf/functions"
)
invoke_url = self._stagify(self.func_list_format, self.is_staging)
query_res = self.query(invoke_url)
if "functions" not in query_res:
raise ValueError(
@ -140,8 +141,8 @@ class NVEModel(BaseModel):
self._available_functions = query_res["functions"]
return self._available_functions
@classmethod
def _stagify(cls, is_staging: bool, path: str) -> str:
@staticmethod
def _stagify(path: str, is_staging: bool) -> str:
"""Helper method to switch between staging and production endpoints"""
if is_staging and "stg.api" not in path:
return path.replace("api.", "stg.api.")
@ -154,56 +155,61 @@ class NVEModel(BaseModel):
def _post(self, invoke_url: str, payload: dict = {}) -> Tuple[Response, Any]:
"""Method for posting to the AI Foundation Model Function API."""
call_inputs = {
self.last_inputs = {
"url": invoke_url,
"headers": self.headers["call"],
"json": payload,
"json": self.payload_fn(payload),
"stream": False,
}
session = self.get_session_fn()
response = session.post(**call_inputs)
response = session.post(**self.last_inputs)
self._try_raise(response)
return response, session
def _get(self, invoke_url: str, payload: dict = {}) -> Tuple[Response, Any]:
"""Method for getting from the AI Foundation Model Function API."""
last_inputs = {
self.last_inputs = {
"url": invoke_url,
"headers": self.headers["call"],
"json": payload,
"json": self.payload_fn(payload),
"stream": False,
}
session = self.get_session_fn()
last_response = session.get(**last_inputs)
last_response = session.get(**self.last_inputs)
self._try_raise(last_response)
return last_response, session
def _wait(self, response: Response, session: Any) -> Response:
"""Wait for a response from API after an initial response is made."""
i = 1
"""Wait for a response from API after an initial response is made"""
start_time = time.time()
while response.status_code == 202:
time.sleep(self.interval)
if (time.time() - start_time) > self.timeout:
raise TimeoutError(
f"Timeout reached without a successful response."
f"\nLast response: {str(response)}"
)
request_id = response.headers.get("NVCF-REQID", "")
response = session.get(
self.fetch_url_format + request_id,
headers=self.headers["call"],
)
if response.status_code == 202:
try:
body = response.json()
except ValueError:
body = str(response)
if i > self.max_tries:
raise ValueError(f"Failed to get response with {i} tries: {body}")
self._try_raise(response)
return response
def _try_raise(self, response: Response) -> None:
"""Try to raise an error from a response"""
## (VK) Several systems can throw errors. This tries to coerce all of them
## If we can't predictably pull out request id, then dump response
try:
response.raise_for_status()
except requests.HTTPError as e:
except requests.HTTPError:
try:
rd = response.json()
if "detail" in rd and "reqId" in rd.get("detail", ""):
rd_buf = "- " + str(rd["detail"])
rd_buf = rd_buf.replace(": ", ", Error: ").replace(", ", "\n- ")
rd["detail"] = rd_buf
except json.JSONDecodeError:
rd = response.__dict__
rd = rd.get("_content", rd)
@ -213,9 +219,19 @@ class NVEModel(BaseModel):
rd = json.loads(rd)
except Exception:
rd = {"detail": rd}
title = f"[{rd.get('status', '###')}] {rd.get('title', 'Unknown Error')}"
body = f"{rd.get('detail', rd.get('type', rd))}"
raise Exception(f"{title}\n{body}") from e
status = rd.get("status", "###")
title = rd.get("title", rd.get("error", "Unknown Error"))
header = f"[{status}] {title}"
body = ""
if "requestId" in rd:
if "detail" in rd:
body += f"{rd['detail']}\n"
body += "RequestID: " + rd["requestId"]
else:
body = rd.get("detail", rd)
if str(status) == "401":
body += "\nPlease check or regenerate your API key."
raise Exception(f"{header}\n{body}") from None
####################################################################################
## Simple query interface to show the set of model options
@ -361,18 +377,18 @@ class NVEModel(BaseModel):
invoke_url = self._get_invoke_url(model, invoke_url)
if payload.get("stream", True) is False:
payload = {**payload, "stream": True}
last_inputs = {
self.last_inputs = {
"url": invoke_url,
"headers": self.headers["stream"],
"json": payload,
"stream": True,
}
response = self.get_session_fn().post(**last_inputs)
response = self.get_session_fn().post(**self.last_inputs)
self._try_raise(response)
call = self.copy()
def out_gen() -> Generator[dict, Any, Any]:
## Good for client, since it allows self.last_input
## Good for client, since it allows self.last_inputs
for line in response.iter_lines():
if line and line.strip() != b"data: [DONE]":
line = line.decode("utf-8")
@ -397,13 +413,13 @@ class NVEModel(BaseModel):
invoke_url = self._get_invoke_url(model, invoke_url)
if payload.get("stream", True) is False:
payload = {**payload, "stream": True}
last_inputs = {
self.last_inputs = {
"url": invoke_url,
"headers": self.headers["stream"],
"json": payload,
}
async with self.get_asession_fn() as session:
async with session.post(**last_inputs) as response:
async with session.post(**self.last_inputs) as response:
self._try_raise(response)
async for line in response.content.iter_any():
if line and line.strip() != b"data: [DONE]":
@ -451,6 +467,16 @@ class _NVIDIAClient(BaseModel):
"""Map the available models that can be invoked."""
return self.client.available_models
@staticmethod
def get_available_functions(**kwargs: Any) -> List[dict]:
"""Map the available functions that can be invoked. Callable from class"""
return NVEModel(**kwargs).available_functions
@staticmethod
def get_available_models(**kwargs: Any) -> dict:
"""Map the available models that can be invoked. Callable from class"""
return NVEModel(**kwargs).available_models
def get_model_details(self, model: Optional[str] = None) -> dict:
"""Get more meta-details about a model retrieved by a given name"""
if model is None:

@ -2,8 +2,10 @@
from __future__ import annotations
import base64
import io
import logging
import os
import sys
import urllib.parse
from typing import (
Any,
@ -28,6 +30,13 @@ from langchain_core.outputs import ChatGenerationChunk
from langchain_nvidia_ai_endpoints import _common as nvidia_ai_endpoints
try:
import PIL.Image
has_pillow = True
except ImportError:
has_pillow = False
logger = logging.getLogger(__name__)
@ -48,6 +57,27 @@ def _is_b64(s: str) -> bool:
return s.startswith("data:image")
def _resize_image(img_data: bytes, max_dim: int = 1024) -> str:
if not has_pillow:
print(
"Pillow is required to resize images down to reasonable scale."
" Please install it using `pip install pillow`."
" For now, not resizing; may cause NVIDIA API to fail."
)
return base64.b64encode(img_data).decode("utf-8")
image = PIL.Image.open(io.BytesIO(img_data))
max_dim_size = max(image.size)
aspect_ratio = max_dim / max_dim_size
new_h = int(image.size[1] * aspect_ratio)
new_w = int(image.size[0] * aspect_ratio)
resized_image = image.resize((new_w, new_h), PIL.Image.Resampling.LANCZOS)
output_buffer = io.BytesIO()
resized_image.save(output_buffer, format="JPEG")
output_buffer.seek(0)
resized_b64_string = base64.b64encode(output_buffer.read()).decode("utf-8")
return resized_b64_string
def _url_to_b64_string(image_source: str) -> str:
b64_template = "data:image/png;base64,{b64_string}"
try:
@ -55,6 +85,9 @@ def _url_to_b64_string(image_source: str) -> str:
response = requests.get(image_source)
response.raise_for_status()
encoded = base64.b64encode(response.content).decode("utf-8")
if sys.getsizeof(encoded) > 200000:
## (VK) Temporary fix. NVIDIA API has a limit of 250KB for the input.
encoded = _resize_image(response.content)
return b64_template.format(b64_string=encoded)
elif _is_b64(image_source):
return image_source
@ -148,8 +181,6 @@ class ChatNVIDIA(nvidia_ai_endpoints._NVIDIAClient, SimpleChatModel):
def custom_preprocess(
self, msg_list: Sequence[BaseMessage]
) -> List[Dict[str, str]]:
# The previous author had a lot of custom preprocessing here
# but I'm just going to assume it's a list
return [self.preprocess_msg(m) for m in msg_list]
def _process_content(self, content: Union[str, List[Union[dict, str]]]) -> str:
@ -184,9 +215,6 @@ class ChatNVIDIA(nvidia_ai_endpoints._NVIDIAClient, SimpleChatModel):
return "".join(string_array)
def preprocess_msg(self, msg: BaseMessage) -> Dict[str, str]:
## (WFH): Previous author added a bunch of
# custom processing here, but I'm just going to support
# the LCEL api.
if isinstance(msg, BaseMessage):
role_convert = {"ai": "assistant", "human": "user"}
if isinstance(msg, ChatMessage):

@ -4,13 +4,13 @@ from typing import Any, List, Literal, Optional
from langchain_core.embeddings import Embeddings
from langchain_core.pydantic_v1 import BaseModel, Field, root_validator
import langchain_nvidia_ai_endpoints._common as nvai_common
from langchain_nvidia_ai_endpoints._common import NVEModel
class NVIDIAEmbeddings(BaseModel, Embeddings):
"""NVIDIA's AI Foundation Retriever Question-Answering Asymmetric Model."""
client: nvai_common.NVEModel = Field(nvai_common.NVEModel)
client: NVEModel = Field(NVEModel)
model: str = Field(
..., description="The embedding model to use. Example: nvolveqa_40k"
)
@ -23,14 +23,29 @@ class NVIDIAEmbeddings(BaseModel, Embeddings):
@root_validator(pre=True)
def _validate_client(cls, values: Any) -> Any:
if "client" not in values:
values["client"] = nvai_common.NVEModel()
values["client"] = NVEModel(**values)
return values
@property
def available_functions(self) -> List[dict]:
"""Map the available functions that can be invoked."""
return self.client.available_functions
@property
def available_models(self) -> dict:
"""Map the available models that can be invoked."""
return self.client.available_models
@staticmethod
def get_available_functions(**kwargs: Any) -> List[dict]:
"""Map the available functions that can be invoked. Callable from class"""
return NVEModel(**kwargs).available_functions
@staticmethod
def get_available_models(**kwargs: Any) -> dict:
"""Map the available models that can be invoked. Callable from class"""
return NVEModel(**kwargs).available_models
def _embed(
self, texts: List[str], model_type: Literal["passage", "query"]
) -> List[List[float]]:

@ -1,4 +1,4 @@
# This file is automatically @generated by Poetry 1.6.1 and should not be changed by hand.
# This file is automatically @generated by Poetry 1.7.1 and should not be changed by hand.
[[package]]
name = "aiohttp"
@ -454,6 +454,7 @@ optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, !=3.6.*"
files = [
{file = "jsonpointer-2.4-py2.py3-none-any.whl", hash = "sha256:15d51bba20eea3165644553647711d150376234112651b4f1811022aecad7d7a"},
{file = "jsonpointer-2.4.tar.gz", hash = "sha256:585cee82b70211fa9e6043b7bb89db6e1aa49524340dde8ad6b63206ea689d88"},
]
[[package]]
@ -901,6 +902,7 @@ files = [
{file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"},
{file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"},
{file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"},
{file = "PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290"},
{file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"},
{file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"},
{file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"},
@ -908,8 +910,16 @@ files = [
{file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"},
{file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"},
{file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"},
{file = "PyYAML-6.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b"},
{file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"},
{file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"},
{file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"},
{file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"},
{file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef"},
{file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"},
{file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"},
{file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"},
{file = "PyYAML-6.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df"},
{file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"},
{file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"},
{file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"},
@ -926,6 +936,7 @@ files = [
{file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"},
{file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"},
{file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"},
{file = "PyYAML-6.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6"},
{file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"},
{file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"},
{file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"},
@ -933,6 +944,7 @@ files = [
{file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"},
{file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"},
{file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"},
{file = "PyYAML-6.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5"},
{file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"},
{file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"},
{file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"},
@ -1046,6 +1058,17 @@ files = [
{file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"},
]
[[package]]
name = "types-pillow"
version = "10.2.0.20240125"
description = "Typing stubs for Pillow"
optional = false
python-versions = ">=3.8"
files = [
{file = "types-Pillow-10.2.0.20240125.tar.gz", hash = "sha256:c449b2c43b9fdbe0494a7b950e6b39a4e50516091213fec24ef3f33c1d017717"},
{file = "types_Pillow-10.2.0.20240125-py3-none-any.whl", hash = "sha256:322dbae32b4b7918da5e8a47c50ac0f24b0aa72a804a23857620f2722b03c858"},
]
[[package]]
name = "types-requests"
version = "2.31.0.10"
@ -1232,4 +1255,4 @@ multidict = ">=4.0"
[metadata]
lock-version = "2.0"
python-versions = ">=3.8.1,<4.0"
content-hash = "04cdee0f18ebbe7c619ec38d0b11a060a9364709576c1183ad1207b7a25306f8"
content-hash = "ab0b10dcff485da25cd4a2b8de147038237af3920362cc8c749f58409e91c9f8"

@ -48,6 +48,7 @@ ruff = "^0.1.5"
mypy = "^0.991"
langchain-core = {path = "../../core", develop = true}
types-requests = "^2.31.0.10"
types-pillow = "^10.2.0.20240125"
[tool.poetry.group.dev]
optional = true

Loading…
Cancel
Save