Harrison/pipeline prompt (#5540)

idea is to make prompts more composable
searx_updates
Harrison Chase 12 months ago committed by GitHub
parent 647210a4b9
commit b9040669a0
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -0,0 +1,179 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "c79d1cbf",
"metadata": {},
"source": [
"# Prompt Composition\n",
"\n",
"This notebook goes over how to compose multiple prompts together. This can be useful when you want to reuse parts of prompts. This can be done with a PipelinePrompt. A PipelinePrompt consists of two main parts:\n",
"\n",
"- final_prompt: This is the final prompt that is returned\n",
"- pipeline_prompts: This is a list of tuples, consisting of a string (`name`) and a Prompt Template. Each PromptTemplate will be formatted and then passed to future prompt templates as a variable with the same name as `name`"
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "4eb8c5e6",
"metadata": {},
"outputs": [],
"source": [
"from langchain.prompts.pipeline import PipelinePromptTemplate\n",
"from langchain.prompts.prompt import PromptTemplate"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "67842c6e",
"metadata": {},
"outputs": [],
"source": [
"full_template = \"\"\"{introduction}\n",
"\n",
"{example}\n",
"\n",
"{start}\"\"\"\n",
"full_prompt = PromptTemplate.from_template(full_template)"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "11913f4b",
"metadata": {},
"outputs": [],
"source": [
"introduction_template = \"\"\"You are impersonating {person}.\"\"\"\n",
"introduction_prompt = PromptTemplate.from_template(introduction_template)"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "bc94cac0",
"metadata": {},
"outputs": [],
"source": [
"example_template = \"\"\"Here's an example of an interaction: \n",
"\n",
"Q: {example_q}\n",
"A: {example_a}\"\"\"\n",
"example_prompt = PromptTemplate.from_template(example_template)"
]
},
{
"cell_type": "code",
"execution_count": 10,
"id": "e89c4dd7",
"metadata": {},
"outputs": [],
"source": [
"start_template = \"\"\"Now, do this for real!\n",
"\n",
"Q: {input}\n",
"A:\"\"\"\n",
"start_prompt = PromptTemplate.from_template(start_template)"
]
},
{
"cell_type": "code",
"execution_count": 11,
"id": "fa029e4b",
"metadata": {},
"outputs": [],
"source": [
"input_prompts = [\n",
" (\"introduction\", introduction_prompt),\n",
" (\"example\", example_prompt),\n",
" (\"start\", start_prompt)\n",
"]\n",
"pipeline_prompt = PipelinePromptTemplate(final_prompt=full_prompt, pipeline_prompts=input_prompts)"
]
},
{
"cell_type": "code",
"execution_count": 12,
"id": "674ea983",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"['example_a', 'person', 'example_q', 'input']"
]
},
"execution_count": 12,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"pipeline_prompt.input_variables"
]
},
{
"cell_type": "code",
"execution_count": 13,
"id": "f1fa0925",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"You are impersonating Elon Musk.\n",
"Here's an example of an interaction: \n",
"\n",
"Q: What's your favorite car?\n",
"A: Telsa\n",
"Now, do this for real!\n",
"\n",
"Q: What's your favorite social media site?\n",
"A:\n",
"\n"
]
}
],
"source": [
"print(pipeline_prompt.format(\n",
" person=\"Elon Musk\",\n",
" example_q=\"What's your favorite car?\",\n",
" example_a=\"Telsa\",\n",
" input=\"What's your favorite social media site?\"\n",
"))"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "047c2b0a",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.1"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

@ -0,0 +1,56 @@
from typing import Any, Dict, List, Tuple
from pydantic import root_validator
from langchain.prompts.base import BasePromptTemplate
from langchain.prompts.chat import BaseChatPromptTemplate
from langchain.schema import PromptValue
def _get_inputs(inputs: dict, input_variables: List[str]) -> dict:
return {k: inputs[k] for k in input_variables}
class PipelinePromptTemplate(BasePromptTemplate):
"""A prompt template for composing multiple prompts together.
This can be useful when you want to reuse parts of prompts.
A PipelinePrompt consists of two main parts:
- final_prompt: This is the final prompt that is returned
- pipeline_prompts: This is a list of tuples, consisting
of a string (`name`) and a Prompt Template.
Each PromptTemplate will be formatted and then passed
to future prompt templates as a variable with
the same name as `name`
"""
final_prompt: BasePromptTemplate
pipeline_prompts: List[Tuple[str, BasePromptTemplate]]
@root_validator(pre=True)
def get_input_variables(cls, values: Dict) -> Dict:
"""Get input variables."""
created_variables = set()
all_variables = set()
for k, prompt in values["pipeline_prompts"]:
created_variables.add(k)
all_variables.update(prompt.input_variables)
values["input_variables"] = list(all_variables.difference(created_variables))
return values
def format_prompt(self, **kwargs: Any) -> PromptValue:
for k, prompt in self.pipeline_prompts:
_inputs = _get_inputs(kwargs, prompt.input_variables)
if isinstance(prompt, BaseChatPromptTemplate):
kwargs[k] = prompt.format_messages(**_inputs)
else:
kwargs[k] = prompt.format(**_inputs)
_inputs = _get_inputs(kwargs, self.final_prompt.input_variables)
return self.final_prompt.format_prompt(**_inputs)
def format(self, **kwargs: Any) -> str:
return self.format_prompt(**kwargs).to_string()
@property
def _prompt_type(self) -> str:
raise ValueError

@ -0,0 +1,45 @@
from langchain.prompts.chat import ChatPromptTemplate, MessagesPlaceholder
from langchain.prompts.pipeline import PipelinePromptTemplate
from langchain.prompts.prompt import PromptTemplate
def test_get_input_variables() -> None:
prompt_a = PromptTemplate.from_template("{foo}")
prompt_b = PromptTemplate.from_template("{bar}")
pipeline_prompt = PipelinePromptTemplate(
final_prompt=prompt_b, pipeline_prompts=[("bar", prompt_a)]
)
assert pipeline_prompt.input_variables == ["foo"]
def test_simple_pipeline() -> None:
prompt_a = PromptTemplate.from_template("{foo}")
prompt_b = PromptTemplate.from_template("{bar}")
pipeline_prompt = PipelinePromptTemplate(
final_prompt=prompt_b, pipeline_prompts=[("bar", prompt_a)]
)
output = pipeline_prompt.format(foo="jim")
assert output == "jim"
def test_multi_variable_pipeline() -> None:
prompt_a = PromptTemplate.from_template("{foo}")
prompt_b = PromptTemplate.from_template("okay {bar} {baz}")
pipeline_prompt = PipelinePromptTemplate(
final_prompt=prompt_b, pipeline_prompts=[("bar", prompt_a)]
)
output = pipeline_prompt.format(foo="jim", baz="deep")
assert output == "okay jim deep"
def test_partial_with_chat_prompts() -> None:
prompt_a = ChatPromptTemplate(
input_variables=["foo"], messages=[MessagesPlaceholder(variable_name="foo")]
)
prompt_b = ChatPromptTemplate.from_template("jim {bar}")
pipeline_prompt = PipelinePromptTemplate(
final_prompt=prompt_a, pipeline_prompts=[("foo", prompt_b)]
)
assert pipeline_prompt.input_variables == ["bar"]
output = pipeline_prompt.format_prompt(bar="okay")
assert output.to_messages()[0].content == "jim okay"
Loading…
Cancel
Save