Compare commits

...

4 Commits

Author SHA1 Message Date
Harrison Chase 7df74a4d52 load prompt 2 years ago
Harrison Chase 0d0d3f122a cr 2 years ago
Harrison Chase bf3a9973f0 Merge branch 'master' into harrison/prompts_take_2 2 years ago
Harrison Chase c28d5ec3ba update prompts 2 years ago

@ -0,0 +1,579 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "43fb16cb",
"metadata": {},
"source": [
"# Prompt Walkthrough\n",
"\n",
"An overview of the different types of prompts in LangChain and how to use them"
]
},
{
"cell_type": "code",
"execution_count": 53,
"id": "2c8d7587",
"metadata": {},
"outputs": [],
"source": [
"import yaml\n",
"\n",
"with open(\"simple_prompt.yaml\", \"r\") as stream:\n",
" config = yaml.safe_load(stream)"
]
},
{
"cell_type": "code",
"execution_count": 54,
"id": "1ab11b59",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'input_variables': ['adjective', 'content'],\n",
" 'template': 'Tell me a {adjective} joke about {content}.\\nLike what does it mean?'}"
]
},
"execution_count": 54,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"config"
]
},
{
"cell_type": "code",
"execution_count": 55,
"id": "78a2cf84",
"metadata": {},
"outputs": [],
"source": [
"from pathlib import Path\n",
"from typing import Union\n",
"import yaml\n",
"def load_file(file: Union[str, Path]):\n",
" if isinstance(file, str):\n",
" file_path = Path(file)\n",
" else:\n",
" file_path = file\n",
" if file_path.suffix == \".json\":\n",
" with open(file_path) as f:\n",
" config = json.load(f)\n",
" elif file_path.suffix == \".yaml\":\n",
" with open(file_path, \"r\") as f:\n",
" config = yaml.safe_load(f)\n",
" else:\n",
" raise ValueError\n",
" return load_prompt_from_config(config)\n",
" "
]
},
{
"cell_type": "code",
"execution_count": 42,
"id": "6e1f9bcd",
"metadata": {},
"outputs": [],
"source": [
"from langchain.prompts import Prompt, DynamicPrompt\n",
"import json\n",
"def load_prompt_from_config(config):\n",
" if \"type\" in config:\n",
" prompt_type = config.pop(\"type\")\n",
" else:\n",
" prompt_type = \"prompt\"\n",
" if prompt_type == \"prompt\":\n",
" return _load_prompt(config)\n",
" elif prompt_type == \"dynamic_prompt\":\n",
" return _load_dynamic_prompt(config)\n",
" else:\n",
" raise ValueError\n",
" \n",
"def _load_template(var_name: str, config: dict) -> dict:\n",
" if f\"{var_name}_path\" in config:\n",
" if var_name in config:\n",
" raise ValueError(f\"Both `{var_name}_path` and `{var_name}` cannot be provided.\")\n",
" template_path = Path(config.pop(f\"{var_name}_path\"))\n",
" if template_path.suffix == \".txt\":\n",
" with open(template_path) as f:\n",
" template = f.read()\n",
" else:\n",
" raise ValueError\n",
" config[var_name] = template\n",
" return config\n",
" \n",
" \n",
"def _load_dynamic_prompt(config):\n",
" if \"loader\" in config:\n",
" prompt_type = config.pop(\"loader\")\n",
" else:\n",
" prompt_type = \"init\"\n",
" if prompt_type == \"init\":\n",
" config = _load_template(\"suffix\", config)\n",
" config = _load_template(\"prefix\", config)\n",
" return DynamicPrompt(**config)\n",
" elif prompt_type == \"from_structured_examples\":\n",
" config = _load_template(\"suffix\", config)\n",
" config = _load_template(\"prefix\", config)\n",
" config[\"example_prompt\"] = _load_prompt(config[\"example_prompt\"])\n",
" if isinstance(config[\"examples\"], list):\n",
" pass\n",
" elif isinstance(config[\"examples\"], str):\n",
" with open(config[\"examples\"]) as f:\n",
" examples = json.load(f)\n",
" config[\"examples\"] = examples\n",
" else:\n",
" raise ValueError\n",
" return DynamicPrompt.from_structured_examples(**config)\n",
" else:\n",
" raise ValueError\n",
"\n",
"def _load_prompt(config):\n",
" if \"loader\" in config:\n",
" prompt_type = config.pop(\"loader\")\n",
" else:\n",
" prompt_type = \"init\"\n",
" if prompt_type == \"init\":\n",
" config = _load_template(\"template\", config)\n",
" return Prompt(**config)\n",
" elif prompt_type == \"from_examples\":\n",
" config = _load_template(\"suffix\", config)\n",
" config = _load_template(\"prefix\", config)\n",
" if isinstance(config[\"examples\"], list):\n",
" pass\n",
" elif isinstance(config[\"examples\"], str):\n",
" with open(config[\"examples\"]) as f:\n",
" examples = json.load(f)\n",
" config[\"examples\"] = examples\n",
" else:\n",
" raise ValueError\n",
" return Prompt.from_examples(**config)\n",
" elif prompt_type == \"from_structured_examples\":\n",
" config = _load_template(\"suffix\", config)\n",
" config = _load_template(\"prefix\", config)\n",
" config[\"example_prompt\"] = _load_prompt(config[\"example_prompt\"])\n",
" if isinstance(config[\"examples\"], list):\n",
" pass\n",
" elif isinstance(config[\"examples\"], str):\n",
" with open(config[\"examples\"]) as f:\n",
" examples = json.load(f)\n",
" config[\"examples\"] = examples\n",
" else:\n",
" raise ValueError\n",
" return Prompt.from_structured_examples(**config)\n",
" else:\n",
" raise ValueError"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "b045da0f",
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "markdown",
"id": "cddb465e",
"metadata": {},
"source": [
"### Basic Prompt\n",
"\n",
"The most simple type of prompt - a string template that takes any number of input variables. The template should be formatted as a Python f-string."
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "ab46bd2a",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'Tellme a joke.'"
]
},
"execution_count": 4,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# An example prompt with no input variables\n",
"_config = {\n",
" \"input_variables\": [],\n",
" \"template\": \"Tell me a joke.\"\n",
"}\n",
"no_input_prompt = load_prompt_from_config(_config)\n",
"no_input_prompt.format()"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "c3ad0fa8",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'Tell me a funny joke.'"
]
},
"execution_count": 6,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# An example prompt with one input variable\n",
"_config = {\n",
" \"input_variables\": [\"adjective\"],\n",
" \"template\": \"Tell me a {adjective} joke.\"\n",
"}\n",
"one_input_prompt = load_prompt_from_config(_config)\n",
"one_input_prompt.format(adjective=\"funny\")"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "ba577dcf",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'Tell me a funny joke about chickens.'"
]
},
"execution_count": 7,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# An example prompt with multiple input variables\n",
"_config = {\n",
" \"input_variables\": [\"adjective\", \"content\"],\n",
" \"template\": \"Tell me a {adjective} joke about {content}.\"\n",
"}\n",
"multiple_input_prompt = load_prompt_from_config(_config)\n",
"multiple_input_prompt.format(adjective=\"funny\", content=\"chickens\")"
]
},
{
"cell_type": "code",
"execution_count": 43,
"id": "e0ad7fb8",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'Tell me a funny joke about chickens.'"
]
},
"execution_count": 43,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"multiple_input_prompt = load_file(\"simple_prompt_with_template_file.json\")\n",
"multiple_input_prompt.format(adjective=\"funny\", content=\"chickens\")"
]
},
{
"cell_type": "markdown",
"id": "d27b1824",
"metadata": {},
"source": [
"### Examples\n",
"Examples are datapoints that can be used to show the model how to produce results. They can be either strings, or dictionaries that are then turned into strings by an example prompt itself."
]
},
{
"cell_type": "code",
"execution_count": 12,
"id": "2c00e965",
"metadata": {},
"outputs": [],
"source": [
"string_examples = [\"Input: happy\\nOutput: sad\", \"Input: tall\\nOutput: short\"]\n",
"dict_examples = [{\"input\": \"happy\", \"output\": \"sad\"}, {\"input\": \"tall\", \"output\": \"short\"}]\n",
"example_prompt_config = {\"input_variables\": [\"input\",\"output\"], \"template\": \"Input: {input}\\nOutput: {output}\"}"
]
},
{
"cell_type": "markdown",
"id": "1492b49d",
"metadata": {},
"source": [
"### Simple Prompt with examples\n",
"\n",
"We can then use these examples to construct prompts."
]
},
{
"cell_type": "code",
"execution_count": 11,
"id": "1a5a686d",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Give the antonym of every input\n",
"\n",
"Input: happy\n",
"Output: sad\n",
"\n",
"Input: tall\n",
"Output: short\n",
"\n",
"Input: big\n",
"Output:\n"
]
}
],
"source": [
"_config = {\n",
" \"loader\": \"from_examples\",\n",
" \"examples\": string_examples,\n",
" \"prefix\": \"Give the antonym of every input\",\n",
" \"suffix\": \"Input: {adjective}\\nOutput:\", \n",
" \"input_variables\": [\"adjective\"],\n",
"}\n",
"prompt_from_string_examples = load_prompt_from_config(_config)\n",
"print(prompt_from_string_examples.format(adjective=\"big\"))"
]
},
{
"cell_type": "code",
"execution_count": 22,
"id": "08d43717",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Give the antonym of every input\n",
"\n",
"Input: happy\n",
"Output: sad\n",
"\n",
"Input: tall\n",
"Output: short\n",
"\n",
"Input: big\n",
"Output:\n"
]
}
],
"source": [
"_config = {\n",
" \"loader\": \"from_examples\",\n",
" \"examples\": \"string_examples.json\",\n",
" \"prefix\": \"Give the antonym of every input\",\n",
" \"suffix\": \"Input: {adjective}\\nOutput:\", \n",
" \"input_variables\": [\"adjective\"],\n",
"}\n",
"prompt_from_string_examples = load_prompt_from_config(_config)\n",
"print(prompt_from_string_examples.format(adjective=\"big\"))"
]
},
{
"cell_type": "code",
"execution_count": 17,
"id": "7931e5f2",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Give the antonym of every input\n",
"\n",
"Input: happy\n",
"Output: sad\n",
"\n",
"Input: tall\n",
"Output: short\n",
"\n",
"Input: big\n",
"Output:\n"
]
}
],
"source": [
"_config = {\n",
" \"loader\": \"from_structured_examples\",\n",
" \"examples\": dict_examples,\n",
" \"example_prompt\": example_prompt_config,\n",
" \"prefix\": \"Give the antonym of every input\",\n",
" \"suffix\": \"Input: {adjective}\\nOutput:\", \n",
" \"input_variables\": [\"adjective\"],\n",
"}\n",
"prompt_from_structured_examples = load_prompt_from_config(_config)\n",
"print(prompt_from_structured_examples.format(adjective=\"big\"))"
]
},
{
"cell_type": "code",
"execution_count": 23,
"id": "738ff0a8",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Give the antonym of every input\n",
"\n",
"Input: happy\n",
"Output: sad\n",
"\n",
"Input: tall\n",
"Output: short\n",
"\n",
"Input: big\n",
"Output:\n"
]
}
],
"source": [
"_config = {\n",
" \"loader\": \"from_structured_examples\",\n",
" \"examples\": \"structured_examples.json\",\n",
" \"example_prompt\": example_prompt_config,\n",
" \"prefix\": \"Give the antonym of every input\",\n",
" \"suffix\": \"Input: {adjective}\\nOutput:\", \n",
" \"input_variables\": [\"adjective\"],\n",
"}\n",
"prompt_from_structured_examples = load_prompt_from_config(_config)\n",
"print(prompt_from_structured_examples.format(adjective=\"big\"))"
]
},
{
"cell_type": "markdown",
"id": "861a4d1f",
"metadata": {},
"source": [
"### Dynamic Prompt\n",
"\n",
"We also do more clever things with prompts - for example, only select a certain number of examples in order to limit the size of the text passed in. This will vary with the input text size."
]
},
{
"cell_type": "code",
"execution_count": 29,
"id": "207e55f7",
"metadata": {},
"outputs": [],
"source": [
"_config = {\n",
" \"type\": \"dynamic_prompt\",\n",
" \"loader\": \"from_structured_examples\",\n",
" \"examples\": \"structured_examples.json\",\n",
" \"example_prompt\": example_prompt_config,\n",
" \"prefix\": \"Give the antonym of every input\",\n",
" \"suffix\": \"Input: {adjective}\\nOutput:\", \n",
" \"input_variables\": [\"adjective\"],\n",
" \"max_length\": 20,\n",
"}\n",
"dynamic_prompt = load_prompt_from_config(_config)"
]
},
{
"cell_type": "code",
"execution_count": 30,
"id": "d00b4385",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Give the antonym of every input\n",
"\n",
"Input: happy\n",
"Output: sad\n",
"\n",
"Input: tall\n",
"Output: short\n",
"\n",
"Input: big\n",
"Output:\n"
]
}
],
"source": [
"# An example with small input, so it selects both examples.\n",
"print(dynamic_prompt.format(adjective=\"big\"))"
]
},
{
"cell_type": "code",
"execution_count": 31,
"id": "878bcde9",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Give the antonym of every input\n",
"\n",
"Input: happy\n",
"Output: sad\n",
"\n",
"Input: big and huge and massive\n",
"Output:\n"
]
}
],
"source": [
"# An example with long input, so it selects only one example.\n",
"print(dynamic_prompt.format(adjective=\"big and huge and massive\"))"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "76a1065d",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.6"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

@ -0,0 +1,4 @@
{
"input_variables": ["adjective", "content"],
"template": "Tell me a {adjective} joke about {content}."
}

@ -0,0 +1,5 @@
input_variables:
["adjective", "content"]
template: |
Tell me a {adjective} joke about {content}.
Like what does it mean?

@ -0,0 +1,4 @@
{
"input_variables": ["adjective", "content"],
"template_path": "simple_template.txt"
}

@ -0,0 +1 @@
Tell me a {adjective} joke about {content}.

@ -0,0 +1 @@
["Input: happy\nOutput: sad", "Input: tall\nOutput: short"]

@ -0,0 +1 @@
[{"input": "happy", "output": "sad"}, {"input": "tall", "output": "short"}]

@ -0,0 +1,410 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "43fb16cb",
"metadata": {},
"source": [
"# Prompt Walkthrough\n",
"\n",
"An overview of the different types of prompts in LangChain and how to use them"
]
},
{
"cell_type": "markdown",
"id": "cddb465e",
"metadata": {},
"source": [
"### Basic Prompt\n",
"\n",
"The most simple type of prompt - a string template that takes any number of input variables. The template should be formatted as a Python f-string."
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "094229f4",
"metadata": {},
"outputs": [],
"source": [
"from langchain.prompts import Prompt"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "ab46bd2a",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'Tell me a joke.'"
]
},
"execution_count": 2,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# An example prompt with no input variables\n",
"no_input_prompt = Prompt(input_variables=[], template=\"Tell me a joke.\")\n",
"no_input_prompt.format()"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "c3ad0fa8",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'Tell me a funny joke.'"
]
},
"execution_count": 3,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# An example prompt with one input variable\n",
"no_input_prompt = Prompt(input_variables=[\"adjective\"], template=\"Tell me a {adjective} joke.\")\n",
"no_input_prompt.format(adjective=\"funny\")"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "ba577dcf",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'Tell me a funny joke about chickens.'"
]
},
"execution_count": 4,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# An example prompt with multiple input variables\n",
"no_input_prompt = Prompt(input_variables=[\"adjective\", \"content\"], template=\"Tell me a {adjective} joke about {content}.\")\n",
"no_input_prompt.format(adjective=\"funny\", content=\"chickens\")"
]
},
{
"cell_type": "markdown",
"id": "d27b1824",
"metadata": {},
"source": [
"### Examples\n",
"Examples are datapoints that can be used to show the model how to produce results. They can be either strings, or dictionaries that are then turned into strings by an example prompt itself."
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "2c00e965",
"metadata": {},
"outputs": [],
"source": [
"string_examples = [\"Input: happy\\nOutput: sad\", \"Input: tall\\nOutput: short\"]\n",
"dict_examples = [{\"input\": \"happy\", \"output\": \"sad\"}, {\"input\": \"tall\", \"output\": \"short\"}]\n",
"example_prompt = Prompt(input_variables=[\"input\",\"output\"], template=\"Input: {input}\\nOutput: {output}\")"
]
},
{
"cell_type": "markdown",
"id": "1492b49d",
"metadata": {},
"source": [
"### Simple Prompt with examples\n",
"\n",
"We can then use these examples to construct prompts."
]
},
{
"cell_type": "code",
"execution_count": 11,
"id": "1a5a686d",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Give the antonym of every input\n",
"\n",
"Input: happy\n",
"Output: sad\n",
"\n",
"Input: tall\n",
"Output: short\n",
"\n",
"Input: big\n",
"Output:\n"
]
}
],
"source": [
"prompt_from_string_examples = Prompt.from_examples(\n",
" string_examples, \n",
" prefix=\"Give the antonym of every input\",\n",
" suffix=\"Input: {adjective}\\nOutput:\", \n",
" input_variables=[\"adjective\"],\n",
")\n",
"print(prompt_from_string_examples.format(adjective=\"big\"))"
]
},
{
"cell_type": "code",
"execution_count": 12,
"id": "7931e5f2",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Give the antonym of every input\n",
"\n",
"Input: happy\n",
"Output: sad\n",
"\n",
"Input: tall\n",
"Output: short\n",
"\n",
"Input: big\n",
"Output:\n"
]
}
],
"source": [
"prompt_from_string_examples = Prompt.from_structured_examples(\n",
" dict_examples,\n",
" example_prompt,\n",
" prefix=\"Give the antonym of every input\",\n",
" suffix=\"Input: {adjective}\\nOutput:\", \n",
" input_variables=[\"adjective\"],\n",
")\n",
"print(prompt_from_string_examples.format(adjective=\"big\"))"
]
},
{
"cell_type": "markdown",
"id": "861a4d1f",
"metadata": {},
"source": [
"### Dynamic Prompt\n",
"\n",
"We also do more clever things with prompts - for example, only select a certain number of examples in order to limit the size of the text passed in. This will vary with the input text size."
]
},
{
"cell_type": "code",
"execution_count": 13,
"id": "7c469c95",
"metadata": {},
"outputs": [],
"source": [
"from langchain.prompts import DynamicPrompt"
]
},
{
"cell_type": "code",
"execution_count": 17,
"id": "207e55f7",
"metadata": {},
"outputs": [],
"source": [
"dynamic_prompt = DynamicPrompt.from_structured_examples(\n",
" dict_examples,\n",
" example_prompt,\n",
" prefix=\"Give the antonym of every input\",\n",
" suffix=\"Input: {adjective}\\nOutput:\", \n",
" input_variables=[\"adjective\"],\n",
" max_length=20,\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 19,
"id": "d00b4385",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Give the antonym of every input\n",
"\n",
"Input: happy\n",
"Output: sad\n",
"\n",
"Input: tall\n",
"Output: short\n",
"\n",
"Input: big\n",
"Output:\n"
]
}
],
"source": [
"# An example with small input, so it selects both examples.\n",
"print(dynamic_prompt.format(adjective=\"big\"))"
]
},
{
"cell_type": "code",
"execution_count": 20,
"id": "878bcde9",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Give the antonym of every input\n",
"\n",
"Input: happy\n",
"Output: sad\n",
"\n",
"Input: big and huge and massive\n",
"Output:\n"
]
}
],
"source": [
"# An example with long input, so it selects only one example.\n",
"print(dynamic_prompt.format(adjective=\"big and huge and massive\"))"
]
},
{
"cell_type": "markdown",
"id": "2d007b0a",
"metadata": {},
"source": [
"# Optimized Prompt\n",
"\n",
"Besides selecting a variable number of examples to show, we can also select examples that most closely match the user input. This is done by creating embeddings of the user input and comparing it embeddings of the examples."
]
},
{
"cell_type": "code",
"execution_count": 23,
"id": "241bfe80",
"metadata": {},
"outputs": [],
"source": [
"from langchain.prompts.optimized import OptimizedPrompt\n",
"from langchain.vectorstores import FAISS\n",
"from langchain.embeddings import OpenAIEmbeddings"
]
},
{
"cell_type": "code",
"execution_count": 25,
"id": "50d0a701",
"metadata": {},
"outputs": [],
"source": [
"optimized_prompt = OptimizedPrompt.from_structured_examples(\n",
" dict_examples,\n",
" example_prompt,\n",
" prefix=\"Give the antonym of every input\",\n",
" suffix=\"Input: {adjective}\\nOutput:\", \n",
" input_variables=[\"adjective\"],\n",
" embeddings=OpenAIEmbeddings(),\n",
" vectorstore_cls=FAISS\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 28,
"id": "4c8fdf45",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Give the antonym of every input\n",
"\n",
"Input: happy\n",
"Output: sad\n",
"\n",
"Input: worried\n",
"Output:\n"
]
}
],
"source": [
"# Input is a feeling, so should select the happy/sad example\n",
"print(optimized_prompt.format(adjective=\"worried\", k=1))"
]
},
{
"cell_type": "code",
"execution_count": 29,
"id": "829af21a",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Give the antonym of every input\n",
"\n",
"Input: tall\n",
"Output: short\n",
"\n",
"Input: fat\n",
"Output:\n"
]
}
],
"source": [
"# Input is a measurment, so should select the tall/short example\n",
"print(optimized_prompt.format(adjective=\"fat\", k=1))"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "76a1065d",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.6"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

@ -5,6 +5,7 @@ from typing import Any, Callable, Dict, List
from pydantic import BaseModel, Extra, root_validator
from langchain.prompts.base import DEFAULT_FORMATTER_MAPPING, BasePrompt
from langchain.prompts.prompt import Prompt
class DynamicPrompt(BaseModel, BasePrompt):
@ -18,7 +19,7 @@ class DynamicPrompt(BaseModel, BasePrompt):
examples=["Say hi. Hi", "Say ho. Ho"],
example_separator="\n\n",
prefix="",
suffix="\n\nSay {foo}"
suffix="Say {foo}"
input_variables=["foo"],
max_length=200,
get_text_length=word_count
@ -110,3 +111,20 @@ class DynamicPrompt(BaseModel, BasePrompt):
except KeyError:
raise ValueError("Invalid prompt schema.")
return values
@classmethod
def from_structured_examples(
cls, examples: List[dict], example_prompt: Prompt, **kwargs: Any
) -> "DynamicPrompt":
"""Create prompt from structured examples.
Args:
examples: List of structured examples to use in the prompt.
example_prompt: Prompt used to format the examples.
**kwargs: Key-word arguments to passed through to init.
Returns:
The final prompt generated.
"""
string_examples = [example_prompt.format(**example) for example in examples]
return cls(examples=string_examples, **kwargs)

@ -0,0 +1,131 @@
from pathlib import Path
from typing import Union
import yaml
from langchain.prompts import Prompt, DynamicPrompt
import json
def load_prompt_from_config(config):
"""Get the right type from the config and load it accordingly."""
if "type" in config:
prompt_type = config.pop("type")
else:
# Default to base prompt type.
prompt_type = "prompt"
if prompt_type == "prompt":
return _load_prompt(config)
elif prompt_type == "dynamic_prompt":
return _load_dynamic_prompt(config)
else:
raise ValueError
def _load_template(var_name: str, config: dict) -> dict:
"""Load template from disk if applicable."""
# Check if template_path exists in config.
if f"{var_name}_path" in config:
# If it does, make sure template variable doesn't also exist.
if var_name in config:
raise ValueError(f"Both `{var_name}_path` and `{var_name}` cannot be provided.")
# Pop the template path from the config.
template_path = Path(config.pop(f"{var_name}_path"))
# Load the template.
if template_path.suffix == ".txt":
with open(template_path) as f:
template = f.read()
else:
raise ValueError
# Set the template variable to the extracted variable.
config[var_name] = template
return config
def _load_examples(config):
"""Load examples if necessary."""
if isinstance(config["examples"], list):
pass
elif isinstance(config["examples"], str):
with open(config["examples"]) as f:
examples = json.load(f)
config["examples"] = examples
else:
raise ValueError
return config
def _load_dynamic_prompt(config):
"""Load the dynamic prompt from the config."""
# Get the loader type (init, from_examples, etc)
if "loader" in config:
prompt_type = config.pop("loader")
else:
prompt_type = "init"
# Call loading logic depending on what loader to use.
if prompt_type == "init":
# Load the suffix and prefix templates.
config = _load_template("suffix", config)
config = _load_template("prefix", config)
return DynamicPrompt(**config)
elif prompt_type == "from_structured_examples":
# Load the suffix and prefix templates.
config = _load_template("suffix", config)
config = _load_template("prefix", config)
# Load the example prompt.
config["example_prompt"] = _load_prompt(config["example_prompt"])
# Load the examples.
config = _load_examples(config)
return DynamicPrompt.from_structured_examples(**config)
else:
raise ValueError
def _load_prompt(config):
"""Load the base prompt type from config."""
# Get the loader type (init, from_examples, etc)
if "loader" in config:
prompt_type = config.pop("loader")
else:
prompt_type = "init"
# Call loading logic depending on what loader to use.
if prompt_type == "init":
# Load the template from disk.
config = _load_template("template", config)
return Prompt(**config)
elif prompt_type == "from_examples":
# Load the suffix and prefix templates.
config = _load_template("suffix", config)
config = _load_template("prefix", config)
# Load the examples.
config = _load_examples(config)
return Prompt.from_examples(**config)
elif prompt_type == "from_structured_examples":
# Load the suffix and prefix templates.
config = _load_template("suffix", config)
config = _load_template("prefix", config)
config["example_prompt"] = _load_prompt(config["example_prompt"])
# Load the examples.
config = _load_examples(config)
return Prompt.from_structured_examples(**config)
else:
raise ValueError
def load_prompt(file: Union[str, Path]):
"""Load prompt from file."""
# Convert file to Path object.
if isinstance(file, str):
file_path = Path(file)
else:
file_path = file
# Load from either json or yaml.
if file_path.suffix == ".json":
with open(file_path) as f:
config = json.load(f)
elif file_path.suffix == ".yaml":
with open(file_path, "r") as f:
config = yaml.safe_load(f)
else:
raise ValueError
# Load the prompt from the config now.
return load_prompt_from_config(config)

@ -1,11 +1,12 @@
"""Optimized prompt schema definition."""
import re
from typing import Any, Callable, Dict, List
from typing import Any, Callable, Dict, List, Optional
from pydantic import BaseModel, Extra, root_validator
from langchain.embeddings.base import Embeddings
from langchain.prompts.base import DEFAULT_FORMATTER_MAPPING
from langchain.prompts.prompt import Prompt
from langchain.vectorstores.base import VectorStore
@ -28,6 +29,9 @@ class OptimizedPrompt(BaseModel):
)
"""
vectorstore: VectorStore
"""Vectorstore to use for storing the embeddings."""
example_separator: str = "\n\n"
"""Example separator, e.g. \n\n, for the dynamic prompt creation."""
@ -49,9 +53,6 @@ class OptimizedPrompt(BaseModel):
max_length: int = 2048
"""Max length for the prompt, beyond which examples are cut."""
vectorstore: VectorStore
"""Vectorstore to use for storing the embeddings."""
class Config:
"""Configuration for this pydantic object."""
@ -154,8 +155,65 @@ class OptimizedPrompt(BaseModel):
Returns:
The OptimizedPrompt instantiated, backed by a vector store.
"""
dict_examples = [{"text": example} for example in examples]
example_prompt = Prompt(input_variables=["text"], template="{text}")
return cls.from_structured_examples(
dict_examples,
example_prompt,
suffix,
input_variables,
embeddings,
vectorstore_cls=vectorstore_cls,
example_separator=example_separator,
prefix=prefix,
**vectorstore_cls_kwargs,
)
@classmethod
def from_structured_examples(
cls,
examples: List[dict],
example_prompt: Prompt,
suffix: str,
input_variables: List[str],
embeddings: Embeddings,
vectorstore_cls: VectorStore,
example_separator: str = "\n\n",
prefix: str = "",
example_key: Optional[str] = None,
**vectorstore_cls_kwargs: Any,
) -> "OptimizedPrompt":
"""Create k-shot prompt optimizer using example list and embeddings.
Reshuffles examples for the prompt dynamically based on query similarity.
Args:
examples: List of structured examples to use in the prompt.
example_prompt: Prompt used to format the examples.
suffix: String to go after the list of examples. Should generally
set up the user's input.
input_variables: A list of variable names the final prompt template
will expect.
embeddings: An initialized embedding API interface, e.g. OpenAIEmbeddings().
vectorstore_cls: A vector store DB interface class, e.g. FAISS.
example_separator: The seperator to use in between examples. Defaults
to two new line characters.
prefix: String that should go before any examples. Generally includes
examples. Default to an empty string.
example_key: Optional string pointing to the key in the example to
vectorized. If None, will format the example in the example_prompt,
and then vectorize that whole string.
vectorstore_cls_kwargs: optional kwargs containing url for vector store
Returns:
The OptimizedPrompt instantiated, backed by a vector store.
"""
if example_key is None:
string_examples = [example_prompt.format(**example) for example in examples]
else:
string_examples = [example[example_key] for example in examples]
vectorstore = vectorstore_cls.from_texts(
examples, embeddings, **vectorstore_cls_kwargs
string_examples, embeddings, **vectorstore_cls_kwargs
)
return cls(
suffix=suffix,

@ -97,6 +97,34 @@ class Prompt(BaseModel, BasePrompt):
template = example_separator.join([prefix, *examples, suffix])
return cls(input_variables=input_variables, template=template)
@classmethod
def from_structured_examples(
cls,
examples: List[dict],
example_prompt: "Prompt",
suffix: str,
input_variables: List[str],
**kwargs: Any,
) -> "Prompt":
"""Take examples in list format with prefix and suffix to create a prompt.
Intended be used as a way to dynamically create a prompt from examples.
Args:
examples: List of structured examples to use in the prompt.
example_prompt: Prompt used to format each example.
suffix: String to go after the list of examples. Should generally
set up the user's input.
input_variables: A list of variable names the final prompt template
will expect.
**kwargs: Key-word arguments to be passed through to init.
Returns:
The final prompt generated.
"""
string_examples = [example_prompt.format(**example) for example in examples]
return cls.from_examples(string_examples, suffix, input_variables, **kwargs)
@classmethod
def from_file(cls, template_file: str, input_variables: List[str]) -> "Prompt":
"""Load a prompt from a file.

@ -27,7 +27,7 @@ setup(
version=__version__,
packages=find_packages(),
description="Building applications with LLMs through composability",
install_requires=["pydantic", "sqlalchemy", "numpy", "requests"],
install_requires=["pydantic", "sqlalchemy", "numpy", "requests", "pyyaml"],
long_description=long_description,
license="MIT",
url="https://github.com/hwchase17/langchain",

Loading…
Cancel
Save