forked from Archives/langchain
255 lines
7.5 KiB
Plaintext
255 lines
7.5 KiB
Plaintext
{
|
|
"cells": [
|
|
{
|
|
"cell_type": "markdown",
|
|
"id": "920a3c1a",
|
|
"metadata": {},
|
|
"source": [
|
|
"# Model Laboratory\n",
|
|
"\n",
|
|
"This example goes over basic functionality of how to use the ModelLaboratory to test out and try different models."
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 1,
|
|
"id": "ab9e95ad",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"from langchain import LLMChain, OpenAI, Cohere, HuggingFaceHub, PromptTemplate\n",
|
|
"from langchain.model_laboratory import ModelLaboratory"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 2,
|
|
"id": "32cb94e6",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"llms = [\n",
|
|
" OpenAI(temperature=0), \n",
|
|
" Cohere(model=\"command-xlarge-20221108\", max_tokens=20, temperature=0), \n",
|
|
" HuggingFaceHub(repo_id=\"google/flan-t5-xl\", model_kwargs={\"temperature\":1})\n",
|
|
"]"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 3,
|
|
"id": "14cde09d",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"model_lab = ModelLaboratory.from_llms(llms)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 4,
|
|
"id": "f186c741",
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"name": "stdout",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"\u001b[1mInput:\u001b[0m\n",
|
|
"What color is a flamingo?\n",
|
|
"\n",
|
|
"\u001b[1mOpenAI\u001b[0m\n",
|
|
"Params: {'model': 'text-davinci-002', 'temperature': 0.0, 'max_tokens': 256, 'top_p': 1, 'frequency_penalty': 0, 'presence_penalty': 0, 'n': 1, 'best_of': 1}\n",
|
|
"\u001b[36;1m\u001b[1;3m\n",
|
|
"\n",
|
|
"Flamingos are pink.\u001b[0m\n",
|
|
"\n",
|
|
"\u001b[1mCohere\u001b[0m\n",
|
|
"Params: {'model': 'command-xlarge-20221108', 'max_tokens': 20, 'temperature': 0.0, 'k': 0, 'p': 1, 'frequency_penalty': 0, 'presence_penalty': 0}\n",
|
|
"\u001b[33;1m\u001b[1;3m\n",
|
|
"\n",
|
|
"Pink\u001b[0m\n",
|
|
"\n",
|
|
"\u001b[1mHuggingFaceHub\u001b[0m\n",
|
|
"Params: {'repo_id': 'google/flan-t5-xl', 'temperature': 1}\n",
|
|
"\u001b[38;5;200m\u001b[1;3mpink\u001b[0m\n",
|
|
"\n"
|
|
]
|
|
}
|
|
],
|
|
"source": [
|
|
"model_lab.compare(\"What color is a flamingo?\")"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 5,
|
|
"id": "248b652a",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"prompt = PromptTemplate(template=\"What is the capital of {state}?\", input_variables=[\"state\"])\n",
|
|
"model_lab_with_prompt = ModelLaboratory.from_llms(llms, prompt=prompt)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 6,
|
|
"id": "f64377ac",
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"name": "stdout",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"\u001b[1mInput:\u001b[0m\n",
|
|
"New York\n",
|
|
"\n",
|
|
"\u001b[1mOpenAI\u001b[0m\n",
|
|
"Params: {'model': 'text-davinci-002', 'temperature': 0.0, 'max_tokens': 256, 'top_p': 1, 'frequency_penalty': 0, 'presence_penalty': 0, 'n': 1, 'best_of': 1}\n",
|
|
"\u001b[36;1m\u001b[1;3m\n",
|
|
"\n",
|
|
"The capital of New York is Albany.\u001b[0m\n",
|
|
"\n",
|
|
"\u001b[1mCohere\u001b[0m\n",
|
|
"Params: {'model': 'command-xlarge-20221108', 'max_tokens': 20, 'temperature': 0.0, 'k': 0, 'p': 1, 'frequency_penalty': 0, 'presence_penalty': 0}\n",
|
|
"\u001b[33;1m\u001b[1;3m\n",
|
|
"\n",
|
|
"The capital of New York is Albany.\u001b[0m\n",
|
|
"\n",
|
|
"\u001b[1mHuggingFaceHub\u001b[0m\n",
|
|
"Params: {'repo_id': 'google/flan-t5-xl', 'temperature': 1}\n",
|
|
"\u001b[38;5;200m\u001b[1;3mst john s\u001b[0m\n",
|
|
"\n"
|
|
]
|
|
}
|
|
],
|
|
"source": [
|
|
"model_lab_with_prompt.compare(\"New York\")"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 7,
|
|
"id": "54336dbf",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"from langchain import SelfAskWithSearchChain, SerpAPIChain\n",
|
|
"\n",
|
|
"open_ai_llm = OpenAI(temperature=0)\n",
|
|
"search = SerpAPIChain()\n",
|
|
"self_ask_with_search_openai = SelfAskWithSearchChain(llm=open_ai_llm, search_chain=search, verbose=True)\n",
|
|
"\n",
|
|
"cohere_llm = Cohere(temperature=0, model=\"command-xlarge-20221108\")\n",
|
|
"search = SerpAPIChain()\n",
|
|
"self_ask_with_search_cohere = SelfAskWithSearchChain(llm=cohere_llm, search_chain=search, verbose=True)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 8,
|
|
"id": "6a50a9f1",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"chains = [self_ask_with_search_openai, self_ask_with_search_cohere]\n",
|
|
"names = [str(open_ai_llm), str(cohere_llm)]"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 9,
|
|
"id": "d3549e99",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"model_lab = ModelLaboratory(chains, names=names)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 10,
|
|
"id": "362f7f57",
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"name": "stdout",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"\u001b[1mInput:\u001b[0m\n",
|
|
"What is the hometown of the reigning men's U.S. Open champion?\n",
|
|
"\n",
|
|
"\u001b[1mOpenAI\u001b[0m\n",
|
|
"Params: {'model': 'text-davinci-002', 'temperature': 0.0, 'max_tokens': 256, 'top_p': 1, 'frequency_penalty': 0, 'presence_penalty': 0, 'n': 1, 'best_of': 1}\n",
|
|
"\n",
|
|
"\n",
|
|
"\u001b[1m> Entering new chain...\u001b[0m\n",
|
|
"What is the hometown of the reigning men's U.S. Open champion?\n",
|
|
"Are follow up questions needed here:\u001b[32;1m\u001b[1;3m Yes.\n",
|
|
"Follow up: Who is the reigning men's U.S. Open champion?\u001b[0m\n",
|
|
"Intermediate answer: \u001b[33;1m\u001b[1;3mCarlos Alcaraz.\u001b[0m\u001b[32;1m\u001b[1;3m\n",
|
|
"Follow up: Where is Carlos Alcaraz from?\u001b[0m\n",
|
|
"Intermediate answer: \u001b[33;1m\u001b[1;3mEl Palmar, Spain.\u001b[0m\u001b[32;1m\u001b[1;3m\n",
|
|
"So the final answer is: El Palmar, Spain\u001b[0m\n",
|
|
"\u001b[1m> Finished chain.\u001b[0m\n",
|
|
"\u001b[36;1m\u001b[1;3m\n",
|
|
"So the final answer is: El Palmar, Spain\u001b[0m\n",
|
|
"\n",
|
|
"\u001b[1mCohere\u001b[0m\n",
|
|
"Params: {'model': 'command-xlarge-20221108', 'max_tokens': 256, 'temperature': 0.0, 'k': 0, 'p': 1, 'frequency_penalty': 0, 'presence_penalty': 0}\n",
|
|
"\n",
|
|
"\n",
|
|
"\u001b[1m> Entering new chain...\u001b[0m\n",
|
|
"What is the hometown of the reigning men's U.S. Open champion?\n",
|
|
"Are follow up questions needed here:\u001b[32;1m\u001b[1;3m Yes.\n",
|
|
"Follow up: Who is the reigning men's U.S. Open champion?\u001b[0m\n",
|
|
"Intermediate answer: \u001b[33;1m\u001b[1;3mCarlos Alcaraz.\u001b[0m\u001b[32;1m\u001b[1;3m\n",
|
|
"So the final answer is:\n",
|
|
"\n",
|
|
"Carlos Alcaraz\u001b[0m\n",
|
|
"\u001b[1m> Finished chain.\u001b[0m\n",
|
|
"\u001b[33;1m\u001b[1;3m\n",
|
|
"So the final answer is:\n",
|
|
"\n",
|
|
"Carlos Alcaraz\u001b[0m\n",
|
|
"\n"
|
|
]
|
|
}
|
|
],
|
|
"source": [
|
|
"model_lab.compare(\"What is the hometown of the reigning men's U.S. Open champion?\")"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "94159131",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": []
|
|
}
|
|
],
|
|
"metadata": {
|
|
"kernelspec": {
|
|
"display_name": "Python 3 (ipykernel)",
|
|
"language": "python",
|
|
"name": "python3"
|
|
},
|
|
"language_info": {
|
|
"codemirror_mode": {
|
|
"name": "ipython",
|
|
"version": 3
|
|
},
|
|
"file_extension": ".py",
|
|
"mimetype": "text/x-python",
|
|
"name": "python",
|
|
"nbconvert_exporter": "python",
|
|
"pygments_lexer": "ipython3",
|
|
"version": "3.8.7"
|
|
}
|
|
},
|
|
"nbformat": 4,
|
|
"nbformat_minor": 5
|
|
}
|