From e9799d682160d27036eae3ea18201feba1ea6a3b Mon Sep 17 00:00:00 2001 From: Charles Frye Date: Fri, 10 Feb 2023 17:56:15 -0800 Subject: [PATCH] improves huggingface_hub example (#988) The provided example uses the default `max_length` of `20` tokens, which leads to the example generation getting cut off. 20 tokens is way too short to show CoT reasoning, so I boosted it to `64`. Without knowing HF's API well, it can be hard to figure out just where those `model_kwargs` come from, and `max_length` is a super critical one. --- docs/modules/llms/integrations/huggingface_hub.ipynb | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/modules/llms/integrations/huggingface_hub.ipynb b/docs/modules/llms/integrations/huggingface_hub.ipynb index 5e4769f75c..803f67ecba 100644 --- a/docs/modules/llms/integrations/huggingface_hub.ipynb +++ b/docs/modules/llms/integrations/huggingface_hub.ipynb @@ -20,7 +20,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "The Seattle Seahawks won the Super Bowl in 2010. Justin Beiber was born in 2010. The\n" + "The Seattle Seahawks won the Super Bowl in 2010. Justin Beiber was born in 2010. The final answer: Seattle Seahawks.\n" ] } ], @@ -31,7 +31,7 @@ "\n", "Answer: Let's think step by step.\"\"\"\n", "prompt = PromptTemplate(template=template, input_variables=[\"question\"])\n", - "llm_chain = LLMChain(prompt=prompt, llm=HuggingFaceHub(repo_id=\"google/flan-t5-xl\", model_kwargs={\"temperature\":1e-10}))\n", + "llm_chain = LLMChain(prompt=prompt, llm=HuggingFaceHub(repo_id=\"google/flan-t5-xl\", model_kwargs={\"temperature\":0, \"max_length\":64}))\n", "\n", "question = \"What NFL team won the Super Bowl in the year Justin Beiber was born?\"\n", "\n",