From 8af25867cb4ba7c2fd33cc4c8466ff4963e3555a Mon Sep 17 00:00:00 2001 From: Gengliang Wang Date: Wed, 3 May 2023 22:45:03 -0700 Subject: [PATCH] Simplify HumanMessages in the quick start guide (#4026) In the section `Get Message Completions from a Chat Model` of the quick start guide, the HumanMessage doesn't need to include `Translate this sentence from English to French.` when there is a system message. Simplify HumanMessages in these examples can further demonstrate the power of LLM. --- docs/getting_started/getting_started.md | 10 +++++----- docs/modules/models/chat/getting_started.ipynb | 14 +++++++------- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/docs/getting_started/getting_started.md b/docs/getting_started/getting_started.md index c4a6577c..4faae9b7 100644 --- a/docs/getting_started/getting_started.md +++ b/docs/getting_started/getting_started.md @@ -316,7 +316,7 @@ You can also pass in multiple messages for OpenAI's gpt-3.5-turbo and gpt-4 mode ```python messages = [ SystemMessage(content="You are a helpful assistant that translates English to French."), - HumanMessage(content="Translate this sentence from English to French. I love programming.") + HumanMessage(content="I love programming.") ] chat(messages) # -> AIMessage(content="J'aime programmer.", additional_kwargs={}) @@ -327,22 +327,22 @@ You can go one step further and generate completions for multiple sets of messag batch_messages = [ [ SystemMessage(content="You are a helpful assistant that translates English to French."), - HumanMessage(content="Translate this sentence from English to French. I love programming.") + HumanMessage(content="I love programming.") ], [ SystemMessage(content="You are a helpful assistant that translates English to French."), - HumanMessage(content="Translate this sentence from English to French. I love artificial intelligence.") + HumanMessage(content="I love artificial intelligence.") ], ] result = chat.generate(batch_messages) result -# -> LLMResult(generations=[[ChatGeneration(text="J'aime programmer.", generation_info=None, message=AIMessage(content="J'aime programmer.", additional_kwargs={}))], [ChatGeneration(text="J'aime l'intelligence artificielle.", generation_info=None, message=AIMessage(content="J'aime l'intelligence artificielle.", additional_kwargs={}))]], llm_output={'token_usage': {'prompt_tokens': 71, 'completion_tokens': 18, 'total_tokens': 89}}) +# -> LLMResult(generations=[[ChatGeneration(text="J'aime programmer.", generation_info=None, message=AIMessage(content="J'aime programmer.", additional_kwargs={}))], [ChatGeneration(text="J'aime l'intelligence artificielle.", generation_info=None, message=AIMessage(content="J'aime l'intelligence artificielle.", additional_kwargs={}))]], llm_output={'token_usage': {'prompt_tokens': 57, 'completion_tokens': 20, 'total_tokens': 77}}) ``` You can recover things like token usage from this LLMResult: ``` result.llm_output['token_usage'] -# -> {'prompt_tokens': 71, 'completion_tokens': 18, 'total_tokens': 89} +# -> {'prompt_tokens': 57, 'completion_tokens': 20, 'total_tokens': 77} ``` diff --git a/docs/modules/models/chat/getting_started.ipynb b/docs/modules/models/chat/getting_started.ipynb index d98b0c93..7d5970fd 100644 --- a/docs/modules/models/chat/getting_started.ipynb +++ b/docs/modules/models/chat/getting_started.ipynb @@ -107,7 +107,7 @@ "source": [ "messages = [\n", " SystemMessage(content=\"You are a helpful assistant that translates English to French.\"),\n", - " HumanMessage(content=\"Translate this sentence from English to French. I love programming.\")\n", + " HumanMessage(content=\"I love programming.\")\n", "]\n", "chat(messages)" ] @@ -131,7 +131,7 @@ { "data": { "text/plain": [ - "LLMResult(generations=[[ChatGeneration(text=\"J'aime programmer.\", generation_info=None, message=AIMessage(content=\"J'aime programmer.\", additional_kwargs={}))], [ChatGeneration(text=\"J'aime l'intelligence artificielle.\", generation_info=None, message=AIMessage(content=\"J'aime l'intelligence artificielle.\", additional_kwargs={}))]], llm_output={'token_usage': {'prompt_tokens': 71, 'completion_tokens': 18, 'total_tokens': 89}})" + "LLMResult(generations=[[ChatGeneration(text=\"J'aime programmer.\", generation_info=None, message=AIMessage(content=\"J'aime programmer.\", additional_kwargs={}))], [ChatGeneration(text=\"J'aime l'intelligence artificielle.\", generation_info=None, message=AIMessage(content=\"J'aime l'intelligence artificielle.\", additional_kwargs={}))]], llm_output={'token_usage': {'prompt_tokens': 57, 'completion_tokens': 20, 'total_tokens': 77}})" ] }, "execution_count": 5, @@ -143,11 +143,11 @@ "batch_messages = [\n", " [\n", " SystemMessage(content=\"You are a helpful assistant that translates English to French.\"),\n", - " HumanMessage(content=\"Translate this sentence from English to French. I love programming.\")\n", + " HumanMessage(content=\"I love programming.\")\n", " ],\n", " [\n", " SystemMessage(content=\"You are a helpful assistant that translates English to French.\"),\n", - " HumanMessage(content=\"Translate this sentence from English to French. I love artificial intelligence.\")\n", + " HumanMessage(content=\"I love artificial intelligence.\")\n", " ],\n", "]\n", "result = chat.generate(batch_messages)\n", @@ -171,9 +171,9 @@ { "data": { "text/plain": [ - "{'token_usage': {'prompt_tokens': 71,\n", - " 'completion_tokens': 18,\n", - " 'total_tokens': 89}}" + "{'token_usage': {'prompt_tokens': 57,\n", + " 'completion_tokens': 20,\n", + " 'total_tokens': 77}}" ] }, "execution_count": 6,