From 2f5606a31858ef6e1de5fb921e2cf74b1fb2c57b Mon Sep 17 00:00:00 2001 From: Bagatur <22008038+baskaryan@users.noreply.github.com> Date: Fri, 29 Mar 2024 14:47:35 -0700 Subject: [PATCH] mistralai[patch]: correct integration_test (#19774) --- .../mistralai/tests/integration_tests/test_chat_models.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/libs/partners/mistralai/tests/integration_tests/test_chat_models.py b/libs/partners/mistralai/tests/integration_tests/test_chat_models.py index 56646ee42f..6607531c5c 100644 --- a/libs/partners/mistralai/tests/integration_tests/test_chat_models.py +++ b/libs/partners/mistralai/tests/integration_tests/test_chat_models.py @@ -101,17 +101,15 @@ def test_chat_mistralai_llm_output_contains_token_usage() -> None: assert "total_tokens" in token_usage -def test_chat_mistralai_streaming_llm_output_contains_token_usage() -> None: - """Test llm_output contains model_name.""" +def test_chat_mistralai_streaming_llm_output_not_contain_token_usage() -> None: + """Mistral currently doesn't return token usage when streaming.""" chat = ChatMistralAI(max_tokens=10, streaming=True) message = HumanMessage(content="Hello") llm_result = chat.generate([[message]]) assert llm_result.llm_output is not None assert "token_usage" in llm_result.llm_output token_usage = llm_result.llm_output["token_usage"] - assert "prompt_tokens" in token_usage - assert "completion_tokens" in token_usage - assert "total_tokens" in token_usage + assert not token_usage def test_structured_output() -> None: