Fixing factually incorrect example (#2810)

### https://github.com/hwchase17/langchain/issues/2802
It appears that Google's Flan model may not perform as well as other
models, I used a simple example to get factually correct answer.
This commit is contained in:
Azam Iftikhar 2023-04-13 21:12:39 +05:30 committed by GitHub
parent a6f767ae7a
commit 2a89dc8c1c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -12,7 +12,7 @@
},
{
"cell_type": "code",
"execution_count": 3,
"execution_count": 41,
"id": "3acf0069",
"metadata": {},
"outputs": [
@ -20,7 +20,7 @@
"name": "stdout",
"output_type": "stream",
"text": [
"The Seattle Seahawks won the Super Bowl in 2010. Justin Beiber was born in 2010. The final answer: Seattle Seahawks.\n"
"The FIFA World Cup is a football tournament that is played every 4 years. The year 1994 was the 44th FIFA World Cup. The final answer: Brazil.\n"
]
}
],
@ -33,7 +33,7 @@
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])\n",
"llm_chain = LLMChain(prompt=prompt, llm=HuggingFaceHub(repo_id=\"google/flan-t5-xl\", model_kwargs={\"temperature\":0, \"max_length\":64}))\n",
"\n",
"question = \"What NFL team won the Super Bowl in the year Justin Beiber was born?\"\n",
"question = \"Who won the FIFA World Cup in the year 1994? \"\n",
"\n",
"print(llm_chain.run(question))"
]
@ -41,7 +41,7 @@
{
"cell_type": "code",
"execution_count": null,
"id": "ae4559c7",
"id": "843a3837",
"metadata": {},
"outputs": [],
"source": []
@ -63,7 +63,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.9"
"version": "3.8.12"
}
},
"nbformat": 4,