partners/openai: fix depracation errors of pydantic's .dict() function (reopen #16629) (#17404)

---------

Co-authored-by: Bagatur <baskaryan@gmail.com>
pull/17659/head^2
Savvas Mantzouranidis 8 months ago committed by GitHub
parent bebe401b1a
commit 691ff67096
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

@ -7,7 +7,7 @@ from typing import Any, Callable, Dict, List, Optional, Union
import openai
from langchain_core.outputs import ChatResult
from langchain_core.pydantic_v1 import BaseModel, Field, SecretStr, root_validator
from langchain_core.pydantic_v1 import Field, SecretStr, root_validator
from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env
from langchain_openai.chat_models.base import ChatOpenAI
@ -209,9 +209,11 @@ class AzureChatOpenAI(ChatOpenAI):
"openai_api_version": self.openai_api_version,
}
def _create_chat_result(self, response: Union[dict, BaseModel]) -> ChatResult:
def _create_chat_result(
self, response: Union[dict, openai.BaseModel]
) -> ChatResult:
if not isinstance(response, dict):
response = response.dict()
response = response.model_dump()
for res in response["choices"]:
if res.get("finish_reason", None) == "content_filter":
raise ValueError(

@ -394,7 +394,7 @@ class ChatOpenAI(BaseChatModel):
default_chunk_class = AIMessageChunk
for chunk in self.client.create(messages=message_dicts, **params):
if not isinstance(chunk, dict):
chunk = chunk.dict()
chunk = chunk.model_dump()
if len(chunk["choices"]) == 0:
continue
choice = chunk["choices"][0]
@ -449,10 +449,12 @@ class ChatOpenAI(BaseChatModel):
message_dicts = [_convert_message_to_dict(m) for m in messages]
return message_dicts, params
def _create_chat_result(self, response: Union[dict, BaseModel]) -> ChatResult:
def _create_chat_result(
self, response: Union[dict, openai.BaseModel]
) -> ChatResult:
generations = []
if not isinstance(response, dict):
response = response.dict()
response = response.model_dump()
for res in response["choices"]:
message = _convert_dict_to_message(res["message"])
generation_info = dict(finish_reason=res.get("finish_reason"))
@ -486,7 +488,7 @@ class ChatOpenAI(BaseChatModel):
messages=message_dicts, **params
):
if not isinstance(chunk, dict):
chunk = chunk.dict()
chunk = chunk.model_dump()
if len(chunk["choices"]) == 0:
continue
choice = chunk["choices"][0]

@ -324,7 +324,7 @@ class OpenAIEmbeddings(BaseModel, Embeddings):
input=tokens[i : i + _chunk_size], **self._invocation_params
)
if not isinstance(response, dict):
response = response.dict()
response = response.model_dump()
batched_embeddings.extend(r["embedding"] for r in response["data"])
results: List[List[List[float]]] = [[] for _ in range(len(texts))]
@ -343,7 +343,7 @@ class OpenAIEmbeddings(BaseModel, Embeddings):
input="", **self._invocation_params
)
if not isinstance(average_embedded, dict):
average_embedded = average_embedded.dict()
average_embedded = average_embedded.model_dump()
average = average_embedded["data"][0]["embedding"]
else:
average = np.average(_result, axis=0, weights=num_tokens_in_batch[i])
@ -436,7 +436,7 @@ class OpenAIEmbeddings(BaseModel, Embeddings):
)
if not isinstance(response, dict):
response = response.dict()
response = response.model_dump()
batched_embeddings.extend(r["embedding"] for r in response["data"])
results: List[List[List[float]]] = [[] for _ in range(len(texts))]
@ -453,7 +453,7 @@ class OpenAIEmbeddings(BaseModel, Embeddings):
input="", **self._invocation_params
)
if not isinstance(average_embedded, dict):
average_embedded = average_embedded.dict()
average_embedded = average_embedded.model_dump()
average = average_embedded["data"][0]["embedding"]
else:
average = np.average(_result, axis=0, weights=num_tokens_in_batch[i])

@ -251,7 +251,7 @@ class BaseOpenAI(BaseLLM):
self.get_sub_prompts(params, [prompt], stop) # this mutates params
for stream_resp in self.client.create(prompt=prompt, **params):
if not isinstance(stream_resp, dict):
stream_resp = stream_resp.dict()
stream_resp = stream_resp.model_dump()
chunk = _stream_response_to_generation_chunk(stream_resp)
yield chunk
if run_manager:
@ -279,7 +279,7 @@ class BaseOpenAI(BaseLLM):
prompt=prompt, **params
):
if not isinstance(stream_resp, dict):
stream_resp = stream_resp.dict()
stream_resp = stream_resp.model_dump()
chunk = _stream_response_to_generation_chunk(stream_resp)
yield chunk
if run_manager:
@ -357,7 +357,7 @@ class BaseOpenAI(BaseLLM):
if not isinstance(response, dict):
# V1 client returns the response in an PyDantic object instead of
# dict. For the transition period, we deep convert it to dict.
response = response.dict()
response = response.model_dump()
choices.extend(response["choices"])
_update_token_usage(_keys, response, token_usage)
@ -420,7 +420,7 @@ class BaseOpenAI(BaseLLM):
else:
response = await self.async_client.create(prompt=_prompts, **params)
if not isinstance(response, dict):
response = response.dict()
response = response.model_dump()
choices.extend(response["choices"])
_update_token_usage(_keys, response, token_usage)
return self.create_llm_result(

Loading…
Cancel
Save