@ -63,10 +63,7 @@ from langchain_core.messages import (
ToolMessageChunk ,
ToolMessageChunk ,
)
)
from langchain_core . messages . ai import UsageMetadata
from langchain_core . messages . ai import UsageMetadata
from langchain_core . output_parsers import (
from langchain_core . output_parsers import JsonOutputParser , PydanticOutputParser
JsonOutputParser ,
PydanticOutputParser ,
)
from langchain_core . output_parsers . base import OutputParserLike
from langchain_core . output_parsers . base import OutputParserLike
from langchain_core . output_parsers . openai_tools import (
from langchain_core . output_parsers . openai_tools import (
JsonOutputKeyToolsParser ,
JsonOutputKeyToolsParser ,
@ -182,9 +179,7 @@ def _convert_message_to_dict(message: BaseMessage) -> dict:
Returns :
Returns :
The dictionary .
The dictionary .
"""
"""
message_dict : Dict [ str , Any ] = {
message_dict : Dict [ str , Any ] = { " content " : _format_message_content ( message . content ) }
" content " : _format_message_content ( message . content ) ,
}
if ( name := message . name or message . additional_kwargs . get ( " name " ) ) is not None :
if ( name := message . name or message . additional_kwargs . get ( " name " ) ) is not None :
message_dict [ " name " ] = name
message_dict [ " name " ] = name
@ -388,10 +383,7 @@ class BaseChatOpenAI(BaseChatModel):
" OPENAI_API_BASE "
" OPENAI_API_BASE "
)
)
values [ " openai_proxy " ] = get_from_dict_or_env (
values [ " openai_proxy " ] = get_from_dict_or_env (
values ,
values , " openai_proxy " , " OPENAI_PROXY " , default = " "
" openai_proxy " ,
" OPENAI_PROXY " ,
default = " " ,
)
)
client_params = {
client_params = {
@ -586,10 +578,7 @@ class BaseChatOpenAI(BaseChatModel):
generation_info = dict ( finish_reason = res . get ( " finish_reason " ) )
generation_info = dict ( finish_reason = res . get ( " finish_reason " ) )
if " logprobs " in res :
if " logprobs " in res :
generation_info [ " logprobs " ] = res [ " logprobs " ]
generation_info [ " logprobs " ] = res [ " logprobs " ]
gen = ChatGeneration (
gen = ChatGeneration ( message = message , generation_info = generation_info )
message = message ,
generation_info = generation_info ,
)
generations . append ( gen )
generations . append ( gen )
llm_output = {
llm_output = {
" token_usage " : token_usage ,
" token_usage " : token_usage ,
@ -849,10 +838,7 @@ class BaseChatOpenAI(BaseChatModel):
f " provided function was { formatted_functions [ 0 ] [ ' name ' ] } . "
f " provided function was { formatted_functions [ 0 ] [ ' name ' ] } . "
)
)
kwargs = { * * kwargs , " function_call " : function_call }
kwargs = { * * kwargs , " function_call " : function_call }
return super ( ) . bind (
return super ( ) . bind ( functions = formatted_functions , * * kwargs )
functions = formatted_functions ,
* * kwargs ,
)
def bind_tools (
def bind_tools (
self ,
self ,
@ -998,15 +984,20 @@ class BaseChatOpenAI(BaseChatModel):
from langchain_openai import ChatOpenAI
from langchain_openai import ChatOpenAI
from langchain_core . pydantic_v1 import BaseModel
from langchain_core . pydantic_v1 import BaseModel
class AnswerWithJustification ( BaseModel ) :
class AnswerWithJustification ( BaseModel ) :
''' An answer to the user question along with justification for the answer. '''
''' An answer to the user question along with justification for the answer. '''
answer : str
answer : str
justification : str
justification : str
llm = ChatOpenAI ( model = " gpt-3.5-turbo-0125 " , temperature = 0 )
llm = ChatOpenAI ( model = " gpt-3.5-turbo-0125 " , temperature = 0 )
structured_llm = llm . with_structured_output ( AnswerWithJustification )
structured_llm = llm . with_structured_output ( AnswerWithJustification )
structured_llm . invoke ( " What weighs more a pound of bricks or a pound of feathers " )
structured_llm . invoke (
" What weighs more a pound of bricks or a pound of feathers "
)
# -> AnswerWithJustification(
# -> AnswerWithJustification(
# answer='They weigh the same',
# answer='They weigh the same',
@ -1019,15 +1010,22 @@ class BaseChatOpenAI(BaseChatModel):
from langchain_openai import ChatOpenAI
from langchain_openai import ChatOpenAI
from langchain_core . pydantic_v1 import BaseModel
from langchain_core . pydantic_v1 import BaseModel
class AnswerWithJustification ( BaseModel ) :
class AnswerWithJustification ( BaseModel ) :
''' An answer to the user question along with justification for the answer. '''
''' An answer to the user question along with justification for the answer. '''
answer : str
answer : str
justification : str
justification : str
llm = ChatOpenAI ( model = " gpt-3.5-turbo-0125 " , temperature = 0 )
llm = ChatOpenAI ( model = " gpt-3.5-turbo-0125 " , temperature = 0 )
structured_llm = llm . with_structured_output ( AnswerWithJustification , include_raw = True )
structured_llm = llm . with_structured_output (
AnswerWithJustification , include_raw = True
)
structured_llm . invoke ( " What weighs more a pound of bricks or a pound of feathers " )
structured_llm . invoke (
" What weighs more a pound of bricks or a pound of feathers "
)
# -> {
# -> {
# 'raw': AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_Ao02pnFYXD6GN1yzc0uXPsvF', 'function': {'arguments': '{"answer":"They weigh the same.","justification":"Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ."}', 'name': 'AnswerWithJustification'}, 'type': 'function'}]}),
# 'raw': AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_Ao02pnFYXD6GN1yzc0uXPsvF', 'function': {'arguments': '{"answer":"They weigh the same.","justification":"Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ."}', 'name': 'AnswerWithJustification'}, 'type': 'function'}]}),
# 'parsed': AnswerWithJustification(answer='They weigh the same.', justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.'),
# 'parsed': AnswerWithJustification(answer='They weigh the same.', justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.'),
@ -1041,16 +1039,21 @@ class BaseChatOpenAI(BaseChatModel):
from langchain_core . pydantic_v1 import BaseModel
from langchain_core . pydantic_v1 import BaseModel
from langchain_core . utils . function_calling import convert_to_openai_tool
from langchain_core . utils . function_calling import convert_to_openai_tool
class AnswerWithJustification ( BaseModel ) :
class AnswerWithJustification ( BaseModel ) :
''' An answer to the user question along with justification for the answer. '''
''' An answer to the user question along with justification for the answer. '''
answer : str
answer : str
justification : str
justification : str
dict_schema = convert_to_openai_tool ( AnswerWithJustification )
dict_schema = convert_to_openai_tool ( AnswerWithJustification )
llm = ChatOpenAI ( model = " gpt-3.5-turbo-0125 " , temperature = 0 )
llm = ChatOpenAI ( model = " gpt-3.5-turbo-0125 " , temperature = 0 )
structured_llm = llm . with_structured_output ( dict_schema )
structured_llm = llm . with_structured_output ( dict_schema )
structured_llm . invoke ( " What weighs more a pound of bricks or a pound of feathers " )
structured_llm . invoke (
" What weighs more a pound of bricks or a pound of feathers "
)
# -> {
# -> {
# 'answer': 'They weigh the same',
# 'answer': 'They weigh the same',
# 'justification': 'Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume and density of the two substances differ.'
# 'justification': 'Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume and density of the two substances differ.'
@ -1231,14 +1234,32 @@ class ChatOpenAI(BaseChatOpenAI):
. . code - block : : python
. . code - block : : python
messages = [
messages = [
( " system " , " You are a helpful translator. Translate the user sentence to French. " ) ,
(
" system " ,
" You are a helpful translator. Translate the user sentence to French. " ,
) ,
( " human " , " I love programming. " ) ,
( " human " , " I love programming. " ) ,
]
]
llm . invoke ( messages )
llm . invoke ( messages )
. . code - block : : python
. . code - block : : python
AIMessage ( content = " J ' adore la programmation. " , response_metadata = { ' token_usage ' : { ' completion_tokens ' : 5 , ' prompt_tokens ' : 31 , ' total_tokens ' : 36 } , ' model_name ' : ' gpt-4o ' , ' system_fingerprint ' : ' fp_43dfabdef1 ' , ' finish_reason ' : ' stop ' , ' logprobs ' : None } , id = ' run-012cffe2-5d3d-424d-83b5-51c6d4a593d1-0 ' , usage_metadata = { ' input_tokens ' : 31 , ' output_tokens ' : 5 , ' total_tokens ' : 36 } )
AIMessage (
content = " J ' adore la programmation. " ,
response_metadata = {
" token_usage " : {
" completion_tokens " : 5 ,
" prompt_tokens " : 31 ,
" total_tokens " : 36 ,
} ,
" model_name " : " gpt-4o " ,
" system_fingerprint " : " fp_43dfabdef1 " ,
" finish_reason " : " stop " ,
" logprobs " : None ,
} ,
id = " run-012cffe2-5d3d-424d-83b5-51c6d4a593d1-0 " ,
usage_metadata = { " input_tokens " : 31 , " output_tokens " : 5 , " total_tokens " : 36 } ,
)
Stream :
Stream :
. . code - block : : python
. . code - block : : python
@ -1248,13 +1269,19 @@ class ChatOpenAI(BaseChatOpenAI):
. . code - block : : python
. . code - block : : python
AIMessageChunk ( content = ' ' , id = ' run-9e1517e3-12bf-48f2-bb1b-2e824f7cd7b0 ' )
AIMessageChunk ( content = " " , id = " run-9e1517e3-12bf-48f2-bb1b-2e824f7cd7b0 " )
AIMessageChunk ( content = ' J ' , id = ' run-9e1517e3-12bf-48f2-bb1b-2e824f7cd7b0 ' )
AIMessageChunk ( content = " J " , id = " run-9e1517e3-12bf-48f2-bb1b-2e824f7cd7b0 " )
AIMessageChunk ( content = " ' adore " , id = ' run-9e1517e3-12bf-48f2-bb1b-2e824f7cd7b0 ' )
AIMessageChunk ( content = " ' adore " , id = " run-9e1517e3-12bf-48f2-bb1b-2e824f7cd7b0 " )
AIMessageChunk ( content = ' la ' , id = ' run-9e1517e3-12bf-48f2-bb1b-2e824f7cd7b0 ' )
AIMessageChunk ( content = " la " , id = " run-9e1517e3-12bf-48f2-bb1b-2e824f7cd7b0 " )
AIMessageChunk ( content = ' programmation ' , id = ' run-9e1517e3-12bf-48f2-bb1b-2e824f7cd7b0 ' )
AIMessageChunk (
AIMessageChunk ( content = ' . ' , id = ' run-9e1517e3-12bf-48f2-bb1b-2e824f7cd7b0 ' )
content = " programmation " , id = " run-9e1517e3-12bf-48f2-bb1b-2e824f7cd7b0 "
AIMessageChunk ( content = ' ' , response_metadata = { ' finish_reason ' : ' stop ' } , id = ' run-9e1517e3-12bf-48f2-bb1b-2e824f7cd7b0 ' )
)
AIMessageChunk ( content = " . " , id = " run-9e1517e3-12bf-48f2-bb1b-2e824f7cd7b0 " )
AIMessageChunk (
content = " " ,
response_metadata = { " finish_reason " : " stop " } ,
id = " run-9e1517e3-12bf-48f2-bb1b-2e824f7cd7b0 " ,
)
. . code - block : : python
. . code - block : : python
@ -1266,7 +1293,11 @@ class ChatOpenAI(BaseChatOpenAI):
. . code - block : : python
. . code - block : : python
AIMessageChunk ( content = " J ' adore la programmation. " , response_metadata = { ' finish_reason ' : ' stop ' } , id = ' run-bf917526-7f58-4683-84f7-36a6b671d140 ' )
AIMessageChunk (
content = " J ' adore la programmation. " ,
response_metadata = { " finish_reason " : " stop " } ,
id = " run-bf917526-7f58-4683-84f7-36a6b671d140 " ,
)
Async :
Async :
. . code - block : : python
. . code - block : : python
@ -1281,41 +1312,75 @@ class ChatOpenAI(BaseChatOpenAI):
. . code - block : : python
. . code - block : : python
AIMessage ( content = " J ' adore la programmation. " , response_metadata = { ' token_usage ' : { ' completion_tokens ' : 5 , ' prompt_tokens ' : 31 , ' total_tokens ' : 36 } , ' model_name ' : ' gpt-4o ' , ' system_fingerprint ' : ' fp_43dfabdef1 ' , ' finish_reason ' : ' stop ' , ' logprobs ' : None } , id = ' run-012cffe2-5d3d-424d-83b5-51c6d4a593d1-0 ' , usage_metadata = { ' input_tokens ' : 31 , ' output_tokens ' : 5 , ' total_tokens ' : 36 } )
AIMessage (
content = " J ' adore la programmation. " ,
response_metadata = {
" token_usage " : {
" completion_tokens " : 5 ,
" prompt_tokens " : 31 ,
" total_tokens " : 36 ,
} ,
" model_name " : " gpt-4o " ,
" system_fingerprint " : " fp_43dfabdef1 " ,
" finish_reason " : " stop " ,
" logprobs " : None ,
} ,
id = " run-012cffe2-5d3d-424d-83b5-51c6d4a593d1-0 " ,
usage_metadata = { " input_tokens " : 31 , " output_tokens " : 5 , " total_tokens " : 36 } ,
)
Tool calling :
Tool calling :
. . code - block : : python
. . code - block : : python
from langchain_core . pydantic_v1 import BaseModel , Field
from langchain_core . pydantic_v1 import BaseModel , Field
class GetWeather ( BaseModel ) :
class GetWeather ( BaseModel ) :
''' Get the current weather in a given location '''
''' Get the current weather in a given location '''
location : str = Field ( . . . , description = " The city and state, e.g. San Francisco, CA " )
location : str = Field (
. . . , description = " The city and state, e.g. San Francisco, CA "
)
class GetPopulation ( BaseModel ) :
class GetPopulation ( BaseModel ) :
''' Get the current population in a given location '''
''' Get the current population in a given location '''
location : str = Field ( . . . , description = " The city and state, e.g. San Francisco, CA " )
location : str = Field (
. . . , description = " The city and state, e.g. San Francisco, CA "
)
llm_with_tools = llm . bind_tools ( [ GetWeather , GetPopulation ] )
llm_with_tools = llm . bind_tools ( [ GetWeather , GetPopulation ] )
ai_msg = llm_with_tools . invoke ( " Which city is hotter today and which is bigger: LA or NY? " )
ai_msg = llm_with_tools . invoke (
" Which city is hotter today and which is bigger: LA or NY? "
)
ai_msg . tool_calls
ai_msg . tool_calls
. . code - block : : python
. . code - block : : python
[ { ' name ' : ' GetWeather ' ,
[
' args ' : { ' location ' : ' Los Angeles, CA ' } ,
{
' id ' : ' call_6XswGD5Pqk8Tt5atYr7tfenU ' } ,
" name " : " GetWeather " ,
{ ' name ' : ' GetWeather ' ,
" args " : { " location " : " Los Angeles, CA " } ,
' args ' : { ' location ' : ' New York, NY ' } ,
" id " : " call_6XswGD5Pqk8Tt5atYr7tfenU " ,
' id ' : ' call_ZVL15vA8Y7kXqOy3dtmQgeCi ' } ,
} ,
{ ' name ' : ' GetPopulation ' ,
{
' args ' : { ' location ' : ' Los Angeles, CA ' } ,
" name " : " GetWeather " ,
' id ' : ' call_49CFW8zqC9W7mh7hbMLSIrXw ' } ,
" args " : { " location " : " New York, NY " } ,
{ ' name ' : ' GetPopulation ' ,
" id " : " call_ZVL15vA8Y7kXqOy3dtmQgeCi " ,
' args ' : { ' location ' : ' New York, NY ' } ,
} ,
' id ' : ' call_6ghfKxV264jEfe1mRIkS3PE7 ' } ]
{
" name " : " GetPopulation " ,
" args " : { " location " : " Los Angeles, CA " } ,
" id " : " call_49CFW8zqC9W7mh7hbMLSIrXw " ,
} ,
{
" name " : " GetPopulation " ,
" args " : { " location " : " New York, NY " } ,
" id " : " call_6ghfKxV264jEfe1mRIkS3PE7 " ,
} ,
]
Note that ` ` openai > = 1.32 ` ` supports a ` ` parallel_tool_calls ` ` parameter
Note that ` ` openai > = 1.32 ` ` supports a ` ` parallel_tool_calls ` ` parameter
that defaults to ` ` True ` ` . This parameter can be set to ` ` False ` ` to
that defaults to ` ` True ` ` . This parameter can be set to ` ` False ` ` to
@ -1324,16 +1389,19 @@ class ChatOpenAI(BaseChatOpenAI):
. . code - block : : python
. . code - block : : python
ai_msg = llm_with_tools . invoke (
ai_msg = llm_with_tools . invoke (
" What is the weather in LA and NY? " ,
" What is the weather in LA and NY? " , parallel_tool_calls = False
parallel_tool_calls = False ,
)
)
ai_msg . tool_calls
ai_msg . tool_calls
. . code - block : : python
. . code - block : : python
[ { ' name ' : ' GetWeather ' ,
[
' args ' : { ' location ' : ' Los Angeles, CA ' } ,
{
' id ' : ' call_4OoY0ZR99iEvC7fevsH8Uhtz ' } ]
" name " : " GetWeather " ,
" args " : { " location " : " Los Angeles, CA " } ,
" id " : " call_4OoY0ZR99iEvC7fevsH8Uhtz " ,
}
]
Like other runtime parameters , ` ` parallel_tool_calls ` ` can be bound to a model
Like other runtime parameters , ` ` parallel_tool_calls ` ` can be bound to a model
using ` ` llm . bind ( parallel_tool_calls = False ) ` ` or during instantiation by
using ` ` llm . bind ( parallel_tool_calls = False ) ` ` or during instantiation by
@ -1348,6 +1416,7 @@ class ChatOpenAI(BaseChatOpenAI):
from langchain_core . pydantic_v1 import BaseModel , Field
from langchain_core . pydantic_v1 import BaseModel , Field
class Joke ( BaseModel ) :
class Joke ( BaseModel ) :
''' Joke to tell user. '''
''' Joke to tell user. '''
@ -1355,12 +1424,17 @@ class ChatOpenAI(BaseChatOpenAI):
punchline : str = Field ( description = " The punchline to the joke " )
punchline : str = Field ( description = " The punchline to the joke " )
rating : Optional [ int ] = Field ( description = " How funny the joke is, from 1 to 10 " )
rating : Optional [ int ] = Field ( description = " How funny the joke is, from 1 to 10 " )
structured_llm = llm . with_structured_output ( Joke )
structured_llm = llm . with_structured_output ( Joke )
structured_llm . invoke ( " Tell me a joke about cats " )
structured_llm . invoke ( " Tell me a joke about cats " )
. . code - block : : python
. . code - block : : python
Joke ( setup = ' Why was the cat sitting on the computer? ' , punchline = ' To keep an eye on the mouse! ' , rating = None )
Joke (
setup = " Why was the cat sitting on the computer? " ,
punchline = " To keep an eye on the mouse! " ,
rating = None ,
)
See ` ` ChatOpenAI . with_structured_output ( ) ` ` for more .
See ` ` ChatOpenAI . with_structured_output ( ) ` ` for more .
@ -1368,7 +1442,9 @@ class ChatOpenAI(BaseChatOpenAI):
. . code - block : : python
. . code - block : : python
json_llm = llm . bind ( response_format = { " type " : " json_object " } )
json_llm = llm . bind ( response_format = { " type " : " json_object " } )
ai_msg = json_llm . invoke ( " Return a JSON object with key ' random_ints ' and a value of 10 random ints in [0-99] " )
ai_msg = json_llm . invoke (
" Return a JSON object with key ' random_ints ' and a value of 10 random ints in [0-99] "
)
ai_msg . content
ai_msg . content
. . code - block : : python
. . code - block : : python
@ -1391,7 +1467,7 @@ class ChatOpenAI(BaseChatOpenAI):
" type " : " image_url " ,
" type " : " image_url " ,
" image_url " : { " url " : f " data:image/jpeg;base64, { image_data } " } ,
" image_url " : { " url " : f " data:image/jpeg;base64, { image_data } " } ,
} ,
} ,
] ,
]
)
)
ai_msg = llm . invoke ( [ message ] )
ai_msg = llm . invoke ( [ message ] )
ai_msg . content
ai_msg . content
@ -1408,7 +1484,7 @@ class ChatOpenAI(BaseChatOpenAI):
. . code - block : : python
. . code - block : : python
{ ' input_tokens ' : 28 , ' output_tokens ' : 5 , ' total_tokens ' : 33 }
{ " input_tokens " : 28 , " output_tokens " : 5 , " total_tokens " : 33 }
When streaming , set the ` ` stream_usage ` ` kwarg :
When streaming , set the ` ` stream_usage ` ` kwarg :
@ -1422,7 +1498,7 @@ class ChatOpenAI(BaseChatOpenAI):
. . code - block : : python
. . code - block : : python
{ ' input_tokens ' : 28 , ' output_tokens ' : 5 , ' total_tokens ' : 33 }
{ " input_tokens " : 28 , " output_tokens " : 5 , " total_tokens " : 33 }
Alternatively , setting ` ` stream_usage ` ` when instantiating the model can be
Alternatively , setting ` ` stream_usage ` ` when instantiating the model can be
useful when incorporating ` ` ChatOpenAI ` ` into LCEL chains - - or when using
useful when incorporating ` ` ChatOpenAI ` ` into LCEL chains - - or when using
@ -1431,10 +1507,7 @@ class ChatOpenAI(BaseChatOpenAI):
. . code - block : : python
. . code - block : : python
llm = ChatOpenAI (
llm = ChatOpenAI ( model = " gpt-4o " , stream_usage = True )
model = " gpt-4o " ,
stream_usage = True ,
)
structured_llm = llm . with_structured_output ( . . . )
structured_llm = llm . with_structured_output ( . . . )
Logprobs :
Logprobs :
@ -1446,11 +1519,55 @@ class ChatOpenAI(BaseChatOpenAI):
. . code - block : : python
. . code - block : : python
{ ' content ' : [ { ' token ' : ' J ' , ' bytes ' : [ 74 ] , ' logprob ' : - 4.9617593e-06 , ' top_logprobs ' : [ ] } ,
{
{ ' token ' : " ' adore " , ' bytes ' : [ 39 , 97 , 100 , 111 , 114 , 101 ] , ' logprob ' : - 0.25202933 , ' top_logprobs ' : [ ] } ,
" content " : [
{ ' token ' : ' la ' , ' bytes ' : [ 32 , 108 , 97 ] , ' logprob ' : - 0.20141791 , ' top_logprobs ' : [ ] } ,
{
{ ' token ' : ' programmation ' , ' bytes ' : [ 32 , 112 , 114 , 111 , 103 , 114 , 97 , 109 , 109 , 97 , 116 , 105 , 111 , 110 ] , ' logprob ' : - 1.9361265e-07 , ' top_logprobs ' : [ ] } ,
" token " : " J " ,
{ ' token ' : ' . ' , ' bytes ' : [ 46 ] , ' logprob ' : - 1.2233183e-05 , ' top_logprobs ' : [ ] } ] }
" bytes " : [ 74 ] ,
" logprob " : - 4.9617593e-06 ,
" top_logprobs " : [ ] ,
} ,
{
" token " : " ' adore " ,
" bytes " : [ 39 , 97 , 100 , 111 , 114 , 101 ] ,
" logprob " : - 0.25202933 ,
" top_logprobs " : [ ] ,
} ,
{
" token " : " la " ,
" bytes " : [ 32 , 108 , 97 ] ,
" logprob " : - 0.20141791 ,
" top_logprobs " : [ ] ,
} ,
{
" token " : " programmation " ,
" bytes " : [
32 ,
112 ,
114 ,
111 ,
103 ,
114 ,
97 ,
109 ,
109 ,
97 ,
116 ,
105 ,
111 ,
110 ,
] ,
" logprob " : - 1.9361265e-07 ,
" top_logprobs " : [ ] ,
} ,
{
" token " : " . " ,
" bytes " : [ 46 ] ,
" logprob " : - 1.2233183e-05 ,
" top_logprobs " : [ ] ,
} ,
]
}
Response metadata
Response metadata
. . code - block : : python
. . code - block : : python
@ -1460,13 +1577,17 @@ class ChatOpenAI(BaseChatOpenAI):
. . code - block : : python
. . code - block : : python
{ ' token_usage ' : { ' completion_tokens ' : 5 ,
{
' prompt_tokens ' : 28 ,
" token_usage " : {
' total_tokens ' : 33 } ,
" completion_tokens " : 5 ,
' model_name ' : ' gpt-4o ' ,
" prompt_tokens " : 28 ,
' system_fingerprint ' : ' fp_319be4768e ' ,
" total_tokens " : 33 ,
' finish_reason ' : ' stop ' ,
} ,
' logprobs ' : None }
" model_name " : " gpt-4o " ,
" system_fingerprint " : " fp_319be4768e " ,
" finish_reason " : " stop " ,
" logprobs " : None ,
}
""" # noqa: E501
""" # noqa: E501