mirror of
https://github.com/hwchase17/langchain-hub
synced 2024-11-04 06:00:17 +00:00
commit
2a819e3196
@ -7,12 +7,11 @@ folders = BASE_FOLDER.glob("**")
|
|||||||
|
|
||||||
|
|
||||||
def check_files(files):
|
def check_files(files):
|
||||||
if len(files) != 2:
|
|
||||||
raise ValueError(f"Each directory should have two files, got {len(files)}")
|
|
||||||
file_names = [f.name for f in files]
|
file_names = [f.name for f in files]
|
||||||
if "README.md" not in file_names:
|
if "README.md" not in file_names:
|
||||||
raise ValueError(f"Expected to find a README.md file, but found {files}")
|
raise ValueError(f"Expected to find a README.md file, but found {files}")
|
||||||
other_file = [file for file in files if file.name != "README.md"][0]
|
other_files = [file for file in files if file.name != "README.md"]
|
||||||
|
for other_file in other_files:
|
||||||
if other_file.suffix in (".json", ".yaml"):
|
if other_file.suffix in (".json", ".yaml"):
|
||||||
load_prompt(other_file)
|
load_prompt(other_file)
|
||||||
# TODO: testing for python files
|
# TODO: testing for python files
|
||||||
|
35
prompts/api/api_response/README.md
Normal file
35
prompts/api/api_response/README.md
Normal file
@ -0,0 +1,35 @@
|
|||||||
|
<!-- Add a template for READMEs that capture the utility of prompts -->
|
||||||
|
|
||||||
|
# Description of {{prompt}}
|
||||||
|
|
||||||
|
{{High level text description of the prompt, including use cases.}}
|
||||||
|
|
||||||
|
## Compatible Chains
|
||||||
|
|
||||||
|
Below is a list of chains we expect this prompt to be compatible with.
|
||||||
|
|
||||||
|
1. {{Chain Name}}: {{Path to chain in module}}
|
||||||
|
2. ...
|
||||||
|
|
||||||
|
## Inputs
|
||||||
|
|
||||||
|
This is a description of the inputs that the prompt expects.
|
||||||
|
|
||||||
|
1. {{input_var}}: {{Description}}
|
||||||
|
2. ...
|
||||||
|
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
Below is a code snippet for how to use the prompt.
|
||||||
|
|
||||||
|
```python
|
||||||
|
from langchain.prompts import load_from_hub
|
||||||
|
from langchain.chains import APIChain
|
||||||
|
|
||||||
|
llm = ...
|
||||||
|
api_docs = ...
|
||||||
|
prompt = load_from_hub('api/api_response/<file-name>')
|
||||||
|
chain = APIChain.from_llm_and_api_docs(llm, api_docs, api_response_prompt=prompt)
|
||||||
|
```
|
||||||
|
|
11
prompts/api/api_response/prompt.json
Normal file
11
prompts/api/api_response/prompt.json
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
{
|
||||||
|
"input_variables": [
|
||||||
|
"api_docs",
|
||||||
|
"question",
|
||||||
|
"api_url",
|
||||||
|
"api_response"
|
||||||
|
],
|
||||||
|
"output_parser": null,
|
||||||
|
"template": "You are given the below API Documentation:\n{api_docs}\nUsing this documentation, generate the full API url to call for answering the user question.\nYou should build the API url in order to get a response that is as short as possible, while still getting the necessary information to answer the question. Pay attention to deliberately exclude any unnecessary pieces of data in the API call.\n\nQuestion:{question}\nAPI url: {api_url}\n\nHere is the response from the API:\n\n{api_response}\n\nSummarize this response to answer the original question.\n\nSummary:",
|
||||||
|
"template_format": "f-string"
|
||||||
|
}
|
35
prompts/api/api_url/README.md
Normal file
35
prompts/api/api_url/README.md
Normal file
@ -0,0 +1,35 @@
|
|||||||
|
<!-- Add a template for READMEs that capture the utility of prompts -->
|
||||||
|
|
||||||
|
# Description of {{prompt}}
|
||||||
|
|
||||||
|
{{High level text description of the prompt, including use cases.}}
|
||||||
|
|
||||||
|
## Compatible Chains
|
||||||
|
|
||||||
|
Below is a list of chains we expect this prompt to be compatible with.
|
||||||
|
|
||||||
|
1. {{Chain Name}}: {{Path to chain in module}}
|
||||||
|
2. ...
|
||||||
|
|
||||||
|
## Inputs
|
||||||
|
|
||||||
|
This is a description of the inputs that the prompt expects.
|
||||||
|
|
||||||
|
1. {{input_var}}: {{Description}}
|
||||||
|
2. ...
|
||||||
|
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
Below is a code snippet for how to use the prompt.
|
||||||
|
|
||||||
|
```python
|
||||||
|
from langchain.prompts import load_from_hub
|
||||||
|
from langchain.chains import APIChain
|
||||||
|
|
||||||
|
llm = ...
|
||||||
|
api_docs = ...
|
||||||
|
prompt = load_from_hub('api/api_url/<file-name>')
|
||||||
|
chain = APIChain.from_llm_and_api_docs(llm, api_docs, api_url_prompt=prompt)
|
||||||
|
```
|
||||||
|
|
9
prompts/api/api_url/prompt.json
Normal file
9
prompts/api/api_url/prompt.json
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
{
|
||||||
|
"input_variables": [
|
||||||
|
"api_docs",
|
||||||
|
"question"
|
||||||
|
],
|
||||||
|
"output_parser": null,
|
||||||
|
"template": "You are given the below API Documentation:\n{api_docs}\nUsing this documentation, generate the full API url to call for answering the user question.\nYou should build the API url in order to get a response that is as short as possible, while still getting the necessary information to answer the question. Pay attention to deliberately exclude any unnecessary pieces of data in the API call.\n\nQuestion:{question}\nAPI url:",
|
||||||
|
"template_format": "f-string"
|
||||||
|
}
|
34
prompts/conversation/basic/README.md
Normal file
34
prompts/conversation/basic/README.md
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
<!-- Add a template for READMEs that capture the utility of prompts -->
|
||||||
|
|
||||||
|
# Description of {{prompt}}
|
||||||
|
|
||||||
|
{{High level text description of the prompt, including use cases.}}
|
||||||
|
|
||||||
|
## Compatible Chains
|
||||||
|
|
||||||
|
Below is a list of chains we expect this prompt to be compatible with.
|
||||||
|
|
||||||
|
1. {{Chain Name}}: {{Path to chain in module}}
|
||||||
|
2. ...
|
||||||
|
|
||||||
|
## Inputs
|
||||||
|
|
||||||
|
This is a description of the inputs that the prompt expects.
|
||||||
|
|
||||||
|
1. {{input_var}}: {{Description}}
|
||||||
|
2. ...
|
||||||
|
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
Below is a code snippet for how to use the prompt.
|
||||||
|
|
||||||
|
```python
|
||||||
|
from langchain.prompts import load_from_hub
|
||||||
|
from langchain.chains import ConversationChain
|
||||||
|
|
||||||
|
llm = ...
|
||||||
|
prompt = load_from_hub('conversation/basic/<file-name>')
|
||||||
|
chain = ConversationChain(llm=llm, prompt=prompt)
|
||||||
|
```
|
||||||
|
|
9
prompts/conversation/basic/prompt.json
Normal file
9
prompts/conversation/basic/prompt.json
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
{
|
||||||
|
"input_variables": [
|
||||||
|
"history",
|
||||||
|
"input"
|
||||||
|
],
|
||||||
|
"output_parser": null,
|
||||||
|
"template": "The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n\nCurrent conversation:\n{history}\nHuman: {input}\nAI:",
|
||||||
|
"template_format": "f-string"
|
||||||
|
}
|
@ -1,27 +1,34 @@
|
|||||||
# Hello World
|
<!-- Add a template for READMEs that capture the utility of prompts -->
|
||||||
> A simple prompt as an example
|
|
||||||
|
|
||||||
|
# Description of {{prompt}}
|
||||||
|
|
||||||
|
{{High level text description of the prompt, including use cases.}}
|
||||||
|
|
||||||
|
## Compatible Chains
|
||||||
|
|
||||||
|
Below is a list of chains we expect this prompt to be compatible with.
|
||||||
|
|
||||||
|
1. {{Chain Name}}: {{Path to chain in module}}
|
||||||
|
2. ...
|
||||||
|
|
||||||
|
## Inputs
|
||||||
|
|
||||||
|
This is a description of the inputs that the prompt expects.
|
||||||
|
|
||||||
|
1. {{input_var}}: {{Description}}
|
||||||
|
2. ...
|
||||||
|
|
||||||
## Configuration
|
|
||||||
- input_variables: []
|
|
||||||
- There are no inputs
|
|
||||||
- output_parser: null
|
|
||||||
- There is no output parsing needed
|
|
||||||
- template: 'Say hello world.'
|
|
||||||
- Just a simple hello.
|
|
||||||
template_format: f-string
|
|
||||||
- We use standard f-string formatting here.
|
|
||||||
|
|
||||||
## Usage
|
## Usage
|
||||||
|
|
||||||
Ex:
|
Below is a code snippet for how to use the prompt.
|
||||||
```python3
|
|
||||||
from langchain.prompts import load_from_hub
|
|
||||||
from langchain.llms import OpenAI
|
|
||||||
from langchain.prompts.loading import load_prompt
|
|
||||||
|
|
||||||
llm = OpenAI(temperature=0.9)
|
```python
|
||||||
# prompt = load_from_hub("hello-world/prompt.yaml")
|
from langchain.prompts import load_from_hub
|
||||||
output = llm(prompt.format())
|
from langchain.chains import LLMChain
|
||||||
print(output)
|
|
||||||
|
llm = ...
|
||||||
|
prompt = load_from_hub('hello-world/<file-name>')
|
||||||
|
chain = LLMChain(llm=llm, prompt=prompt)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
34
prompts/llm_bash/README.md
Normal file
34
prompts/llm_bash/README.md
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
<!-- Add a template for READMEs that capture the utility of prompts -->
|
||||||
|
|
||||||
|
# Description of {{prompt}}
|
||||||
|
|
||||||
|
{{High level text description of the prompt, including use cases.}}
|
||||||
|
|
||||||
|
## Compatible Chains
|
||||||
|
|
||||||
|
Below is a list of chains we expect this prompt to be compatible with.
|
||||||
|
|
||||||
|
1. {{Chain Name}}: {{Path to chain in module}}
|
||||||
|
2. ...
|
||||||
|
|
||||||
|
## Inputs
|
||||||
|
|
||||||
|
This is a description of the inputs that the prompt expects.
|
||||||
|
|
||||||
|
1. {{input_var}}: {{Description}}
|
||||||
|
2. ...
|
||||||
|
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
Below is a code snippet for how to use the prompt.
|
||||||
|
|
||||||
|
```python
|
||||||
|
from langchain.prompts import load_from_hub
|
||||||
|
from langchain.chains import LLMBash
|
||||||
|
|
||||||
|
llm = ...
|
||||||
|
prompt = load_from_hub('llm_bash/<file-name>')
|
||||||
|
chain = LLMBash(llm=llm, prompt=prompt)
|
||||||
|
```
|
||||||
|
|
8
prompts/llm_bash/prompt.json
Normal file
8
prompts/llm_bash/prompt.json
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
{
|
||||||
|
"input_variables": [
|
||||||
|
"question"
|
||||||
|
],
|
||||||
|
"output_parser": null,
|
||||||
|
"template": "If someone asks you to perform a task, your job is to come up with a series of bash commands that will perform the task. There is no need to put \"#!/bin/bash\" in your answer. Make sure to reason step by step, using this format:\n\nQuestion: \"copy the files in the directory named 'target' into a new directory at the same level as target called 'myNewDirectory'\"\n\nI need to take the following actions:\n- List all files in the directory\n- Create a new directory\n- Copy the files from the first directory into the second directory\n```bash\nls\nmkdir myNewDirectory\ncp -r target/* myNewDirectory\n```\n\nThat is the format. Begin!\n\nQuestion: {question}",
|
||||||
|
"template_format": "f-string"
|
||||||
|
}
|
34
prompts/llm_math/README.md
Normal file
34
prompts/llm_math/README.md
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
<!-- Add a template for READMEs that capture the utility of prompts -->
|
||||||
|
|
||||||
|
# Description of {{prompt}}
|
||||||
|
|
||||||
|
{{High level text description of the prompt, including use cases.}}
|
||||||
|
|
||||||
|
## Compatible Chains
|
||||||
|
|
||||||
|
Below is a list of chains we expect this prompt to be compatible with.
|
||||||
|
|
||||||
|
1. {{Chain Name}}: {{Path to chain in module}}
|
||||||
|
2. ...
|
||||||
|
|
||||||
|
## Inputs
|
||||||
|
|
||||||
|
This is a description of the inputs that the prompt expects.
|
||||||
|
|
||||||
|
1. {{input_var}}: {{Description}}
|
||||||
|
2. ...
|
||||||
|
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
Below is a code snippet for how to use the prompt.
|
||||||
|
|
||||||
|
```python
|
||||||
|
from langchain.prompts import load_from_hub
|
||||||
|
from langchain.chains import LLMMathChain
|
||||||
|
|
||||||
|
llm = ...
|
||||||
|
prompt = load_from_hub('llm_math/<file-name>')
|
||||||
|
chain = LLMMathChain(llm=llm, prompt=prompt)
|
||||||
|
```
|
||||||
|
|
8
prompts/llm_math/prompt.json
Normal file
8
prompts/llm_math/prompt.json
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
{
|
||||||
|
"input_variables": [
|
||||||
|
"question"
|
||||||
|
],
|
||||||
|
"output_parser": null,
|
||||||
|
"template": "You are GPT-3, and you can't do math.\n\nYou can do basic math, and your memorization abilities are impressive, but you can't do any complex calculations that a human could not do in their head. You also have an annoying tendency to just make up highly specific, but wrong, answers.\n\nSo we hooked you up to a Python 3 kernel, and now you can execute code. If anyone gives you a hard math problem, just use this format and we\u2019ll take care of the rest:\n\nQuestion: ${{Question with hard calculation.}}\n```python\n${{Code that prints what you need to know}}\n```\n```output\n${{Output of your code}}\n```\nAnswer: ${{Answer}}\n\nOtherwise, use this simpler format:\n\nQuestion: ${{Question without hard calculation}}\nAnswer: ${{Answer}}\n\nBegin.\n\nQuestion: What is 37593 * 67?\n\n```python\nprint(37593 * 67)\n```\n```output\n2518731\n```\nAnswer: 2518731\n\nQuestion: {question}\n",
|
||||||
|
"template_format": "f-string"
|
||||||
|
}
|
36
prompts/memory/summarize/README.md
Normal file
36
prompts/memory/summarize/README.md
Normal file
@ -0,0 +1,36 @@
|
|||||||
|
<!-- Add a template for READMEs that capture the utility of prompts -->
|
||||||
|
|
||||||
|
# Description of {{prompt}}
|
||||||
|
|
||||||
|
{{High level text description of the prompt, including use cases.}}
|
||||||
|
|
||||||
|
## Compatible Chains
|
||||||
|
|
||||||
|
Below is a list of chains we expect this prompt to be compatible with.
|
||||||
|
|
||||||
|
1. {{Chain Name}}: {{Path to chain in module}}
|
||||||
|
2. ...
|
||||||
|
|
||||||
|
## Inputs
|
||||||
|
|
||||||
|
This is a description of the inputs that the prompt expects.
|
||||||
|
|
||||||
|
1. {{input_var}}: {{Description}}
|
||||||
|
2. ...
|
||||||
|
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
Below is a code snippet for how to use the prompt.
|
||||||
|
|
||||||
|
```python
|
||||||
|
from langchain.prompts import load_from_hub
|
||||||
|
from langchain.chains import ConversationChain
|
||||||
|
from langchain.chains.conversation.memory import ConversationSummaryMemory
|
||||||
|
|
||||||
|
llm = ...
|
||||||
|
prompt = load_from_hub('memory/summarize/<file-name>')
|
||||||
|
memory = ConversationSummaryMemory(llm=llm, prompt=prompt)
|
||||||
|
chain = ConversationChain(llm=llm, memory=memory)
|
||||||
|
```
|
||||||
|
|
9
prompts/memory/summarize/prompt.json
Normal file
9
prompts/memory/summarize/prompt.json
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
{
|
||||||
|
"input_variables": [
|
||||||
|
"summary",
|
||||||
|
"new_lines"
|
||||||
|
],
|
||||||
|
"output_parser": null,
|
||||||
|
"template": "Progressively summarize the lines of conversation provided, adding onto the previous summary returning a new summary.\n\nEXAMPLE\nCurrent summary:\nThe human asks what the AI thinks of artificial intelligence. The AI thinks artificial intelligence is a force for good.\n\nNew lines of conversation:\nHuman: Why do you think artificial intelligence is a force for good?\nAI: Because artificial intelligence will help humans reach their full potential.\n\nNew summary:\nThe human asks what the AI thinks of artificial intelligence. The AI thinks artificial intelligence is a force for good because it will help humans reach their full potential.\nEND OF EXAMPLE\n\nCurrent summary:\n{summary}\n\nNew lines of conversation:\n{new_lines}\n\nNew summary:",
|
||||||
|
"template_format": "f-string"
|
||||||
|
}
|
36
prompts/pal/README.md
Normal file
36
prompts/pal/README.md
Normal file
@ -0,0 +1,36 @@
|
|||||||
|
<!-- Add a template for READMEs that capture the utility of prompts -->
|
||||||
|
|
||||||
|
# Description of {{prompt}}
|
||||||
|
|
||||||
|
{{High level text description of the prompt, including use cases.}}
|
||||||
|
|
||||||
|
## Compatible Chains
|
||||||
|
|
||||||
|
Below is a list of chains we expect this prompt to be compatible with.
|
||||||
|
|
||||||
|
1. {{Chain Name}}: {{Path to chain in module}}
|
||||||
|
2. ...
|
||||||
|
|
||||||
|
## Inputs
|
||||||
|
|
||||||
|
This is a description of the inputs that the prompt expects.
|
||||||
|
|
||||||
|
1. {{input_var}}: {{Description}}
|
||||||
|
2. ...
|
||||||
|
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
Below is a code snippet for how to use the prompt.
|
||||||
|
|
||||||
|
```python
|
||||||
|
from langchain.prompts import load_from_hub
|
||||||
|
from langchain.chains import PALChain
|
||||||
|
|
||||||
|
llm = ...
|
||||||
|
stop = ...
|
||||||
|
get_answer_expr = ...
|
||||||
|
prompt = load_from_hub('pal/<file-name>')
|
||||||
|
chain = PALChain(llm=llm, prompt=prompt, stop=stop, get_answer_expr=get_answer_expr)
|
||||||
|
```
|
||||||
|
|
8
prompts/pal/colored_objects.json
Normal file
8
prompts/pal/colored_objects.json
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
{
|
||||||
|
"input_variables": [
|
||||||
|
"question"
|
||||||
|
],
|
||||||
|
"output_parser": null,
|
||||||
|
"template": "# Generate Python3 Code to solve problems\n# Q: On the nightstand, there is a red pencil, a purple mug, a burgundy keychain, a fuchsia teddy bear, a black plate, and a blue stress ball. What color is the stress ball?\n# Put objects into a dictionary for quick look up\nobjects = dict()\nobjects['pencil'] = 'red'\nobjects['mug'] = 'purple'\nobjects['keychain'] = 'burgundy'\nobjects['teddy bear'] = 'fuchsia'\nobjects['plate'] = 'black'\nobjects['stress ball'] = 'blue'\n\n# Look up the color of stress ball\nstress_ball_color = objects['stress ball']\nanswer = stress_ball_color\n\n\n# Q: On the table, you see a bunch of objects arranged in a row: a purple paperclip, a pink stress ball, a brown keychain, a green scrunchiephone charger, a mauve fidget spinner, and a burgundy pen. What is the color of the object directly to the right of the stress ball?\n# Put objects into a list to record ordering\nobjects = []\nobjects += [('paperclip', 'purple')] * 1\nobjects += [('stress ball', 'pink')] * 1\nobjects += [('keychain', 'brown')] * 1\nobjects += [('scrunchiephone charger', 'green')] * 1\nobjects += [('fidget spinner', 'mauve')] * 1\nobjects += [('pen', 'burgundy')] * 1\n\n# Find the index of the stress ball\nstress_ball_idx = None\nfor i, object in enumerate(objects):\n if object[0] == 'stress ball':\n stress_ball_idx = i\n break\n\n# Find the directly right object\ndirect_right = objects[i+1]\n\n# Check the directly right object's color\ndirect_right_color = direct_right[1]\nanswer = direct_right_color\n\n\n# Q: On the nightstand, you see the following items arranged in a row: a teal plate, a burgundy keychain, a yellow scrunchiephone charger, an orange mug, a pink notebook, and a grey cup. How many non-orange items do you see to the left of the teal item?\n# Put objects into a list to record ordering\nobjects = []\nobjects += [('plate', 'teal')] * 1\nobjects += [('keychain', 'burgundy')] * 1\nobjects += [('scrunchiephone charger', 'yellow')] * 1\nobjects += [('mug', 'orange')] * 1\nobjects += [('notebook', 'pink')] * 1\nobjects += [('cup', 'grey')] * 1\n\n# Find the index of the teal item\nteal_idx = None\nfor i, object in enumerate(objects):\n if object[1] == 'teal':\n teal_idx = i\n break\n\n# Find non-orange items to the left of the teal item\nnon_orange = [object for object in objects[:i] if object[1] != 'orange']\n\n# Count number of non-orange objects\nnum_non_orange = len(non_orange)\nanswer = num_non_orange\n\n\n# Q: {question}\n",
|
||||||
|
"template_format": "f-string"
|
||||||
|
}
|
8
prompts/pal/math.json
Normal file
8
prompts/pal/math.json
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
{
|
||||||
|
"input_variables": [
|
||||||
|
"question"
|
||||||
|
],
|
||||||
|
"output_parser": null,
|
||||||
|
"template": "Q: Olivia has $23. She bought five bagels for $3 each. How much money does she have left?\n\n# solution in Python:\n\n\ndef solution():\n \"\"\"Olivia has $23. She bought five bagels for $3 each. How much money does she have left?\"\"\"\n money_initial = 23\n bagels = 5\n bagel_cost = 3\n money_spent = bagels * bagel_cost\n money_left = money_initial - money_spent\n result = money_left\n return result\n\n\n\n\n\nQ: Michael had 58 golf balls. On tuesday, he lost 23 golf balls. On wednesday, he lost 2 more. How many golf balls did he have at the end of wednesday?\n\n# solution in Python:\n\n\ndef solution():\n \"\"\"Michael had 58 golf balls. On tuesday, he lost 23 golf balls. On wednesday, he lost 2 more. How many golf balls did he have at the end of wednesday?\"\"\"\n golf_balls_initial = 58\n golf_balls_lost_tuesday = 23\n golf_balls_lost_wednesday = 2\n golf_balls_left = golf_balls_initial - golf_balls_lost_tuesday - golf_balls_lost_wednesday\n result = golf_balls_left\n return result\n\n\n\n\n\nQ: There were nine computers in the server room. Five more computers were installed each day, from monday to thursday. How many computers are now in the server room?\n\n# solution in Python:\n\n\ndef solution():\n \"\"\"There were nine computers in the server room. Five more computers were installed each day, from monday to thursday. How many computers are now in the server room?\"\"\"\n computers_initial = 9\n computers_per_day = 5\n num_days = 4 # 4 days between monday and thursday\n computers_added = computers_per_day * num_days\n computers_total = computers_initial + computers_added\n result = computers_total\n return result\n\n\n\n\n\nQ: Shawn has five toys. For Christmas, he got two toys each from his mom and dad. How many toys does he have now?\n\n# solution in Python:\n\n\ndef solution():\n \"\"\"Shawn has five toys. For Christmas, he got two toys each from his mom and dad. How many toys does he have now?\"\"\"\n toys_initial = 5\n mom_toys = 2\n dad_toys = 2\n total_received = mom_toys + dad_toys\n total_toys = toys_initial + total_received\n result = total_toys\n return result\n\n\n\n\n\nQ: Jason had 20 lollipops. He gave Denny some lollipops. Now Jason has 12 lollipops. How many lollipops did Jason give to Denny?\n\n# solution in Python:\n\n\ndef solution():\n \"\"\"Jason had 20 lollipops. He gave Denny some lollipops. Now Jason has 12 lollipops. How many lollipops did Jason give to Denny?\"\"\"\n jason_lollipops_initial = 20\n jason_lollipops_after = 12\n denny_lollipops = jason_lollipops_initial - jason_lollipops_after\n result = denny_lollipops\n return result\n\n\n\n\n\nQ: Leah had 32 chocolates and her sister had 42. If they ate 35, how many pieces do they have left in total?\n\n# solution in Python:\n\n\ndef solution():\n \"\"\"Leah had 32 chocolates and her sister had 42. If they ate 35, how many pieces do they have left in total?\"\"\"\n leah_chocolates = 32\n sister_chocolates = 42\n total_chocolates = leah_chocolates + sister_chocolates\n chocolates_eaten = 35\n chocolates_left = total_chocolates - chocolates_eaten\n result = chocolates_left\n return result\n\n\n\n\n\nQ: If there are 3 cars in the parking lot and 2 more cars arrive, how many cars are in the parking lot?\n\n# solution in Python:\n\n\ndef solution():\n \"\"\"If there are 3 cars in the parking lot and 2 more cars arrive, how many cars are in the parking lot?\"\"\"\n cars_initial = 3\n cars_arrived = 2\n total_cars = cars_initial + cars_arrived\n result = total_cars\n return result\n\n\n\n\n\nQ: There are 15 trees in the grove. Grove workers will plant trees in the grove today. After they are done, there will be 21 trees. How many trees did the grove workers plant today?\n\n# solution in Python:\n\n\ndef solution():\n \"\"\"There are 15 trees in the grove. Grove workers will plant trees in the grove today. After they are done, there will be 21 trees. How many trees did the grove workers plant today?\"\"\"\n trees_initial = 15\n trees_after = 21\n trees_added = trees_after - trees_initial\n result = trees_added\n return result\n\n\n\n\n\nQ: {question}\n\n# solution in Python:\n\n\n",
|
||||||
|
"template_format": "f-string"
|
||||||
|
}
|
34
prompts/qa/map_reduce/question/README.md
Normal file
34
prompts/qa/map_reduce/question/README.md
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
<!-- Add a template for READMEs that capture the utility of prompts -->
|
||||||
|
|
||||||
|
# Description of {{prompt}}
|
||||||
|
|
||||||
|
{{High level text description of the prompt, including use cases.}}
|
||||||
|
|
||||||
|
## Compatible Chains
|
||||||
|
|
||||||
|
Below is a list of chains we expect this prompt to be compatible with.
|
||||||
|
|
||||||
|
1. {{Chain Name}}: {{Path to chain in module}}
|
||||||
|
2. ...
|
||||||
|
|
||||||
|
## Inputs
|
||||||
|
|
||||||
|
This is a description of the inputs that the prompt expects.
|
||||||
|
|
||||||
|
1. {{input_var}}: {{Description}}
|
||||||
|
2. ...
|
||||||
|
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
Below is a code snippet for how to use the prompt.
|
||||||
|
|
||||||
|
```python
|
||||||
|
from langchain.prompts import load_from_hub
|
||||||
|
from langchain.chains.question_answering import load_qa_chain
|
||||||
|
|
||||||
|
llm = ...
|
||||||
|
prompt = load_from_hub('qa/map_reduce/question/<file-name>')
|
||||||
|
chain = load_qa_chain(llm, chain_type="map_reduce", question_prompt=prompt)
|
||||||
|
```
|
||||||
|
|
@ -1,3 +0,0 @@
|
|||||||
This prompt is a basic implementation of a question answering prompt for a map-reduce chain.
|
|
||||||
Specifically, it is the "map" prompt that gets applied to all input documents.
|
|
||||||
It takes in a single variable for the document (`context`) and then a variable for the question (`question`)
|
|
34
prompts/qa/map_reduce/reduce/README.md
Normal file
34
prompts/qa/map_reduce/reduce/README.md
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
<!-- Add a template for READMEs that capture the utility of prompts -->
|
||||||
|
|
||||||
|
# Description of {{prompt}}
|
||||||
|
|
||||||
|
{{High level text description of the prompt, including use cases.}}
|
||||||
|
|
||||||
|
## Compatible Chains
|
||||||
|
|
||||||
|
Below is a list of chains we expect this prompt to be compatible with.
|
||||||
|
|
||||||
|
1. {{Chain Name}}: {{Path to chain in module}}
|
||||||
|
2. ...
|
||||||
|
|
||||||
|
## Inputs
|
||||||
|
|
||||||
|
This is a description of the inputs that the prompt expects.
|
||||||
|
|
||||||
|
1. {{input_var}}: {{Description}}
|
||||||
|
2. ...
|
||||||
|
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
Below is a code snippet for how to use the prompt.
|
||||||
|
|
||||||
|
```python
|
||||||
|
from langchain.prompts import load_from_hub
|
||||||
|
from langchain.chains.question_answering import load_qa_chain
|
||||||
|
|
||||||
|
llm = ...
|
||||||
|
prompt = load_from_hub('qa/map_reduce/reduce/<file-name>')
|
||||||
|
chain = load_qa_chain(llm, chain_type="map_reduce", combine_prompt=prompt)
|
||||||
|
```
|
||||||
|
|
9
prompts/qa/map_reduce/reduce/basic.json
Normal file
9
prompts/qa/map_reduce/reduce/basic.json
Normal file
File diff suppressed because one or more lines are too long
@ -1,3 +0,0 @@
|
|||||||
This prompt is a basic implementation of a question answering prompt for a map-reduce chain.
|
|
||||||
Specifically, it is the "reduce" prompt that gets applied documents after they have initially been asked for an answer.
|
|
||||||
It takes in a single variable for the initial responses (`summaries`) and then a variable for the question (`question`)
|
|
@ -1,35 +0,0 @@
|
|||||||
from langchain.prompts import PromptTemplate
|
|
||||||
|
|
||||||
combine_prompt_template = """Given the following extracted parts of a long document and a question, create a final answer.
|
|
||||||
If you don't know the answer, just say that you don't know. Don't try to make up an answer.
|
|
||||||
|
|
||||||
QUESTION: Which state/country's law governs the interpretation of the contract?
|
|
||||||
=========
|
|
||||||
Content: This Agreement is governed by English law and the parties submit to the exclusive jurisdiction of the English courts in relation to any dispute (contractual or non-contractual) concerning this Agreement save that either party may apply to any court for an injunction or other relief to protect its Intellectual Property Rights.
|
|
||||||
|
|
||||||
Content: No Waiver. Failure or delay in exercising any right or remedy under this Agreement shall not constitute a waiver of such (or any other) right or remedy.\n\n11.7 Severability. The invalidity, illegality or unenforceability of any term (or part of a term) of this Agreement shall not affect the continuation in force of the remainder of the term (if any) and this Agreement.\n\n11.8 No Agency. Except as expressly stated otherwise, nothing in this Agreement shall create an agency, partnership or joint venture of any kind between the parties.\n\n11.9 No Third-Party Beneficiaries.
|
|
||||||
|
|
||||||
Content: (b) if Google believes, in good faith, that the Distributor has violated or caused Google to violate any Anti-Bribery Laws (as defined in Clause 8.5) or that such a violation is reasonably likely to occur,
|
|
||||||
=========
|
|
||||||
FINAL ANSWER: This Agreement is governed by English law.
|
|
||||||
|
|
||||||
QUESTION: What did the president say about Michael Jackson?
|
|
||||||
=========
|
|
||||||
Content: Madam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and the Cabinet. Justices of the Supreme Court. My fellow Americans. \n\nLast year COVID-19 kept us apart. This year we are finally together again. \n\nTonight, we meet as Democrats Republicans and Independents. But most importantly as Americans. \n\nWith a duty to one another to the American people to the Constitution. \n\nAnd with an unwavering resolve that freedom will always triumph over tyranny. \n\nSix days ago, Russia’s Vladimir Putin sought to shake the foundations of the free world thinking he could make it bend to his menacing ways. But he badly miscalculated. \n\nHe thought he could roll into Ukraine and the world would roll over. Instead he met a wall of strength he never imagined. \n\nHe met the Ukrainian people. \n\nFrom President Zelenskyy to every Ukrainian, their fearlessness, their courage, their determination, inspires the world. \n\nGroups of citizens blocking tanks with their bodies. Everyone from students to retirees teachers turned soldiers defending their homeland.
|
|
||||||
|
|
||||||
Content: And we won’t stop. \n\nWe have lost so much to COVID-19. Time with one another. And worst of all, so much loss of life. \n\nLet’s use this moment to reset. Let’s stop looking at COVID-19 as a partisan dividing line and see it for what it is: A God-awful disease. \n\nLet’s stop seeing each other as enemies, and start seeing each other for who we really are: Fellow Americans. \n\nWe can’t change how divided we’ve been. But we can change how we move forward—on COVID-19 and other issues we must face together. \n\nI recently visited the New York City Police Department days after the funerals of Officer Wilbert Mora and his partner, Officer Jason Rivera. \n\nThey were responding to a 9-1-1 call when a man shot and killed them with a stolen gun. \n\nOfficer Mora was 27 years old. \n\nOfficer Rivera was 22. \n\nBoth Dominican Americans who’d grown up on the same streets they later chose to patrol as police officers. \n\nI spoke with their families and told them that we are forever in debt for their sacrifice, and we will carry on their mission to restore the trust and safety every community deserves.
|
|
||||||
|
|
||||||
Content: And a proud Ukrainian people, who have known 30 years of independence, have repeatedly shown that they will not tolerate anyone who tries to take their country backwards. \n\nTo all Americans, I will be honest with you, as I’ve always promised. A Russian dictator, invading a foreign country, has costs around the world. \n\nAnd I’m taking robust action to make sure the pain of our sanctions is targeted at Russia’s economy. And I will use every tool at our disposal to protect American businesses and consumers. \n\nTonight, I can announce that the United States has worked with 30 other countries to release 60 Million barrels of oil from reserves around the world. \n\nAmerica will lead that effort, releasing 30 Million barrels from our own Strategic Petroleum Reserve. And we stand ready to do more if necessary, unified with our allies. \n\nThese steps will help blunt gas prices here at home. And I know the news about what’s happening can seem alarming. \n\nBut I want you to know that we are going to be okay.
|
|
||||||
|
|
||||||
Content: More support for patients and families. \n\nTo get there, I call on Congress to fund ARPA-H, the Advanced Research Projects Agency for Health. \n\nIt’s based on DARPA—the Defense Department project that led to the Internet, GPS, and so much more. \n\nARPA-H will have a singular purpose—to drive breakthroughs in cancer, Alzheimer’s, diabetes, and more. \n\nA unity agenda for the nation. \n\nWe can do this. \n\nMy fellow Americans—tonight , we have gathered in a sacred space—the citadel of our democracy. \n\nIn this Capitol, generation after generation, Americans have debated great questions amid great strife, and have done great things. \n\nWe have fought for freedom, expanded liberty, defeated totalitarianism and terror. \n\nAnd built the strongest, freest, and most prosperous nation the world has ever known. \n\nNow is the hour. \n\nOur moment of responsibility. \n\nOur test of resolve and conscience, of history itself. \n\nIt is in this moment that our character is formed. Our purpose is found. Our future is forged. \n\nWell I know this nation.
|
|
||||||
=========
|
|
||||||
FINAL ANSWER: The president did not mention Michael Jackson.
|
|
||||||
|
|
||||||
QUESTION: {question}
|
|
||||||
=========
|
|
||||||
{summaries}
|
|
||||||
=========
|
|
||||||
FINAL ANSWER:"""
|
|
||||||
PROMPT = PromptTemplate(
|
|
||||||
template=combine_prompt_template, input_variables=["summaries", "question"]
|
|
||||||
)
|
|
34
prompts/qa/refine/README.md
Normal file
34
prompts/qa/refine/README.md
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
<!-- Add a template for READMEs that capture the utility of prompts -->
|
||||||
|
|
||||||
|
# Description of {{prompt}}
|
||||||
|
|
||||||
|
{{High level text description of the prompt, including use cases.}}
|
||||||
|
|
||||||
|
## Compatible Chains
|
||||||
|
|
||||||
|
Below is a list of chains we expect this prompt to be compatible with.
|
||||||
|
|
||||||
|
1. {{Chain Name}}: {{Path to chain in module}}
|
||||||
|
2. ...
|
||||||
|
|
||||||
|
## Inputs
|
||||||
|
|
||||||
|
This is a description of the inputs that the prompt expects.
|
||||||
|
|
||||||
|
1. {{input_var}}: {{Description}}
|
||||||
|
2. ...
|
||||||
|
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
Below is a code snippet for how to use the prompt.
|
||||||
|
|
||||||
|
```python
|
||||||
|
from langchain.prompts import load_from_hub
|
||||||
|
from langchain.chains.question_answering import load_qa_chain
|
||||||
|
|
||||||
|
llm = ...
|
||||||
|
prompt = load_from_hub('qa/refine/<file-name>')
|
||||||
|
chain = load_qa_chain(llm, chain_type="refine", refine_prompt=prompt)
|
||||||
|
```
|
||||||
|
|
10
prompts/qa/refine/basic.json
Normal file
10
prompts/qa/refine/basic.json
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
{
|
||||||
|
"input_variables": [
|
||||||
|
"question",
|
||||||
|
"existing_answer",
|
||||||
|
"context_str"
|
||||||
|
],
|
||||||
|
"output_parser": null,
|
||||||
|
"template": "The original question is as follows: {question}\nWe have provided an existing answer: {existing_answer}\nWe have the opportunity to refine the existing answer(only if needed) with some more context below.\n------------\n{context_str}\n------------\nGiven the new context, refine the original answer to better answer the question. If the context isn't useful, return the original answer.",
|
||||||
|
"template_format": "f-string"
|
||||||
|
}
|
34
prompts/qa/stuff/README.md
Normal file
34
prompts/qa/stuff/README.md
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
<!-- Add a template for READMEs that capture the utility of prompts -->
|
||||||
|
|
||||||
|
# Description of {{prompt}}
|
||||||
|
|
||||||
|
{{High level text description of the prompt, including use cases.}}
|
||||||
|
|
||||||
|
## Compatible Chains
|
||||||
|
|
||||||
|
Below is a list of chains we expect this prompt to be compatible with.
|
||||||
|
|
||||||
|
1. {{Chain Name}}: {{Path to chain in module}}
|
||||||
|
2. ...
|
||||||
|
|
||||||
|
## Inputs
|
||||||
|
|
||||||
|
This is a description of the inputs that the prompt expects.
|
||||||
|
|
||||||
|
1. {{input_var}}: {{Description}}
|
||||||
|
2. ...
|
||||||
|
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
Below is a code snippet for how to use the prompt.
|
||||||
|
|
||||||
|
```python
|
||||||
|
from langchain.prompts import load_from_hub
|
||||||
|
from langchain.chains.question_answering import load_qa_chain
|
||||||
|
|
||||||
|
llm = ...
|
||||||
|
prompt = load_from_hub('qa/stuff/<file-name>')
|
||||||
|
chain = load_qa_chain(llm, chain_type="stuff", prompt=prompt)
|
||||||
|
```
|
||||||
|
|
@ -1,2 +0,0 @@
|
|||||||
This prompt is a basic implementation of a question answering prompt for a "stuff" chain.
|
|
||||||
It takes in a single variable for all the documents (`context`) and then a variable for the question (`question`)
|
|
34
prompts/qa_with_sources/map_reduce/reduce/README.md
Normal file
34
prompts/qa_with_sources/map_reduce/reduce/README.md
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
<!-- Add a template for READMEs that capture the utility of prompts -->
|
||||||
|
|
||||||
|
# Description of {{prompt}}
|
||||||
|
|
||||||
|
{{High level text description of the prompt, including use cases.}}
|
||||||
|
|
||||||
|
## Compatible Chains
|
||||||
|
|
||||||
|
Below is a list of chains we expect this prompt to be compatible with.
|
||||||
|
|
||||||
|
1. {{Chain Name}}: {{Path to chain in module}}
|
||||||
|
2. ...
|
||||||
|
|
||||||
|
## Inputs
|
||||||
|
|
||||||
|
This is a description of the inputs that the prompt expects.
|
||||||
|
|
||||||
|
1. {{input_var}}: {{Description}}
|
||||||
|
2. ...
|
||||||
|
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
Below is a code snippet for how to use the prompt.
|
||||||
|
|
||||||
|
```python
|
||||||
|
from langchain.prompts import load_from_hub
|
||||||
|
from langchain.chains.qa_with_sources import load_qa_with_sources_chain
|
||||||
|
|
||||||
|
llm = ...
|
||||||
|
prompt = load_from_hub('qa_with_sources/map_reduce/reduce/<file-name>')
|
||||||
|
chain = load_qa_with_sources_chain(llm, chain_type="map_reduce", combine_prompt=prompt)
|
||||||
|
```
|
||||||
|
|
File diff suppressed because one or more lines are too long
34
prompts/qa_with_sources/refine/README.md
Normal file
34
prompts/qa_with_sources/refine/README.md
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
<!-- Add a template for READMEs that capture the utility of prompts -->
|
||||||
|
|
||||||
|
# Description of {{prompt}}
|
||||||
|
|
||||||
|
{{High level text description of the prompt, including use cases.}}
|
||||||
|
|
||||||
|
## Compatible Chains
|
||||||
|
|
||||||
|
Below is a list of chains we expect this prompt to be compatible with.
|
||||||
|
|
||||||
|
1. {{Chain Name}}: {{Path to chain in module}}
|
||||||
|
2. ...
|
||||||
|
|
||||||
|
## Inputs
|
||||||
|
|
||||||
|
This is a description of the inputs that the prompt expects.
|
||||||
|
|
||||||
|
1. {{input_var}}: {{Description}}
|
||||||
|
2. ...
|
||||||
|
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
Below is a code snippet for how to use the prompt.
|
||||||
|
|
||||||
|
```python
|
||||||
|
from langchain.prompts import load_from_hub
|
||||||
|
from langchain.chains.qa_with_sources import load_qa_with_sources_chain
|
||||||
|
|
||||||
|
llm = ...
|
||||||
|
prompt = load_from_hub('qa_with_sources/refine/<file-name>')
|
||||||
|
chain = load_qa_with_sources_chain(llm, chain_type="refine", refine_prompt=prompt)
|
||||||
|
```
|
||||||
|
|
10
prompts/qa_with_sources/refine/with_sources.json
Normal file
10
prompts/qa_with_sources/refine/with_sources.json
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
{
|
||||||
|
"input_variables": [
|
||||||
|
"question",
|
||||||
|
"existing_answer",
|
||||||
|
"context_str"
|
||||||
|
],
|
||||||
|
"output_parser": null,
|
||||||
|
"template": "The original question is as follows: {question}\nWe have provided an existing answer, including sources: {existing_answer}\nWe have the opportunity to refine the existing answer(only if needed) with some more context below.\n------------\n{context_str}\n------------\nGiven the new context, refine the original answer to better answer the question. If you do update it, please update the sources as well. If the context isn't useful, return the original answer.",
|
||||||
|
"template_format": "f-string"
|
||||||
|
}
|
34
prompts/qa_with_sources/stuff/README.md
Normal file
34
prompts/qa_with_sources/stuff/README.md
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
<!-- Add a template for READMEs that capture the utility of prompts -->
|
||||||
|
|
||||||
|
# Description of {{prompt}}
|
||||||
|
|
||||||
|
{{High level text description of the prompt, including use cases.}}
|
||||||
|
|
||||||
|
## Compatible Chains
|
||||||
|
|
||||||
|
Below is a list of chains we expect this prompt to be compatible with.
|
||||||
|
|
||||||
|
1. {{Chain Name}}: {{Path to chain in module}}
|
||||||
|
2. ...
|
||||||
|
|
||||||
|
## Inputs
|
||||||
|
|
||||||
|
This is a description of the inputs that the prompt expects.
|
||||||
|
|
||||||
|
1. {{input_var}}: {{Description}}
|
||||||
|
2. ...
|
||||||
|
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
Below is a code snippet for how to use the prompt.
|
||||||
|
|
||||||
|
```python
|
||||||
|
from langchain.prompts import load_from_hub
|
||||||
|
from langchain.chains.qa_with_sources import load_qa_with_sources_chain
|
||||||
|
|
||||||
|
llm = ...
|
||||||
|
prompt = load_from_hub('qa_with_sources/stuff/<file-name>')
|
||||||
|
chain = load_qa_with_sources_chain(llm, chain_type="stuff", prompt=prompt)
|
||||||
|
```
|
||||||
|
|
9
prompts/qa_with_sources/stuff/with_sources.json
Normal file
9
prompts/qa_with_sources/stuff/with_sources.json
Normal file
File diff suppressed because one or more lines are too long
35
prompts/sql_query/language_to_sql_output/README.md
Normal file
35
prompts/sql_query/language_to_sql_output/README.md
Normal file
@ -0,0 +1,35 @@
|
|||||||
|
<!-- Add a template for READMEs that capture the utility of prompts -->
|
||||||
|
|
||||||
|
# Description of {{prompt}}
|
||||||
|
|
||||||
|
{{High level text description of the prompt, including use cases.}}
|
||||||
|
|
||||||
|
## Compatible Chains
|
||||||
|
|
||||||
|
Below is a list of chains we expect this prompt to be compatible with.
|
||||||
|
|
||||||
|
1. {{Chain Name}}: {{Path to chain in module}}
|
||||||
|
2. ...
|
||||||
|
|
||||||
|
## Inputs
|
||||||
|
|
||||||
|
This is a description of the inputs that the prompt expects.
|
||||||
|
|
||||||
|
1. {{input_var}}: {{Description}}
|
||||||
|
2. ...
|
||||||
|
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
Below is a code snippet for how to use the prompt.
|
||||||
|
|
||||||
|
```python
|
||||||
|
from langchain.prompts import load_from_hub
|
||||||
|
from langchain.chains import SQLDatabaseChain
|
||||||
|
|
||||||
|
llm = ...
|
||||||
|
database = ...
|
||||||
|
prompt = load_from_hub('sql_query/language_to_sql_output/<file-name>')
|
||||||
|
chain = SQLDatabaseChain(llm=llm, database=database, prompt=prompt)
|
||||||
|
```
|
||||||
|
|
11
prompts/sql_query/language_to_sql_output/prompt.json
Normal file
11
prompts/sql_query/language_to_sql_output/prompt.json
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
{
|
||||||
|
"input_variables": [
|
||||||
|
"input",
|
||||||
|
"table_info",
|
||||||
|
"dialect",
|
||||||
|
"top_k"
|
||||||
|
],
|
||||||
|
"output_parser": null,
|
||||||
|
"template": "Given an input question, first create a syntactically correct {dialect} query to run, then look at the results of the query and return the answer. Unless the user specifies in his question a specific number of examples he wishes to obtain, always limit your query to at most {top_k} results using the LIMIT clause. You can order the results by a relevant column to return the most interesting examples in the database.\nUse the following format:\n\nQuestion: \"Question here\"\nSQLQuery: \"SQL Query to run\"\nSQLResult: \"Result of the SQLQuery\"\nAnswer: \"Final answer here\"\n\nOnly use the following tables:\n\n{table_info}\n\nQuestion: {input}",
|
||||||
|
"template_format": "f-string"
|
||||||
|
}
|
35
prompts/sql_query/relevant_tables/README.md
Normal file
35
prompts/sql_query/relevant_tables/README.md
Normal file
@ -0,0 +1,35 @@
|
|||||||
|
<!-- Add a template for READMEs that capture the utility of prompts -->
|
||||||
|
|
||||||
|
# Description of {{prompt}}
|
||||||
|
|
||||||
|
{{High level text description of the prompt, including use cases.}}
|
||||||
|
|
||||||
|
## Compatible Chains
|
||||||
|
|
||||||
|
Below is a list of chains we expect this prompt to be compatible with.
|
||||||
|
|
||||||
|
1. {{Chain Name}}: {{Path to chain in module}}
|
||||||
|
2. ...
|
||||||
|
|
||||||
|
## Inputs
|
||||||
|
|
||||||
|
This is a description of the inputs that the prompt expects.
|
||||||
|
|
||||||
|
1. {{input_var}}: {{Description}}
|
||||||
|
2. ...
|
||||||
|
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
Below is a code snippet for how to use the prompt.
|
||||||
|
|
||||||
|
```python
|
||||||
|
from langchain.prompts import load_from_hub
|
||||||
|
from langchain.chains import SQLDatabaseSequentialChain
|
||||||
|
|
||||||
|
llm = ...
|
||||||
|
database = ...
|
||||||
|
prompt = load_from_hub('sql_query/relevant_tables/<file-name>')
|
||||||
|
chain = SQLDatabaseSequentialChain.from_llm(llm, database, decider_prompt=prompt)
|
||||||
|
```
|
||||||
|
|
16
prompts/sql_query/relevant_tables/relevant_tables.py
Normal file
16
prompts/sql_query/relevant_tables/relevant_tables.py
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
from langchain.prompts.base import CommaSeparatedListOutputParser
|
||||||
|
from langchain.prompts.prompt import PromptTemplate
|
||||||
|
|
||||||
|
|
||||||
|
_DECIDER_TEMPLATE = """Given the below input question and list of potential tables, output a comma separated list of the table names that may be neccessary to answer this question.
|
||||||
|
|
||||||
|
Question: {query}
|
||||||
|
|
||||||
|
Table Names: {table_names}
|
||||||
|
|
||||||
|
Relevant Table Names:"""
|
||||||
|
DECIDER_PROMPT = PromptTemplate(
|
||||||
|
input_variables=["query", "table_names"],
|
||||||
|
template=_DECIDER_TEMPLATE,
|
||||||
|
output_parser=CommaSeparatedListOutputParser(),
|
||||||
|
)
|
@ -1,9 +0,0 @@
|
|||||||
# Summarize
|
|
||||||
This is a type of chain to distill large amounts of information into a small amount.
|
|
||||||
|
|
||||||
There are three types of summarize chains:
|
|
||||||
1. Stuff: This is a simple chain to stuff all the text from each document into one propmt. This is limited by token window so this approach only works for smaller amounts of data.
|
|
||||||
2. Map Reduce: This maps a summarize prompt onto each data chunk and then combines all the outputs to finally reduce using a summarization prompt.
|
|
||||||
3. Refine: This iteratively passes in each chunk of data and update a continously update an evolving summary to be more accurate based on the new chunk of data given.
|
|
||||||
|
|
||||||
## Usage
|
|
@ -1,2 +0,0 @@
|
|||||||
# Map Reduce
|
|
||||||
This maps a summarize prompt onto each data chunk and then combines all the outputs to finally reduce using a summarization prompt.
|
|
34
prompts/summarize/map_reduce/map/README.md
Normal file
34
prompts/summarize/map_reduce/map/README.md
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
<!-- Add a template for READMEs that capture the utility of prompts -->
|
||||||
|
|
||||||
|
# Description of {{prompt}}
|
||||||
|
|
||||||
|
{{High level text description of the prompt, including use cases.}}
|
||||||
|
|
||||||
|
## Compatible Chains
|
||||||
|
|
||||||
|
Below is a list of chains we expect this prompt to be compatible with.
|
||||||
|
|
||||||
|
1. {{Chain Name}}: {{Path to chain in module}}
|
||||||
|
2. ...
|
||||||
|
|
||||||
|
## Inputs
|
||||||
|
|
||||||
|
This is a description of the inputs that the prompt expects.
|
||||||
|
|
||||||
|
1. {{input_var}}: {{Description}}
|
||||||
|
2. ...
|
||||||
|
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
Below is a code snippet for how to use the prompt.
|
||||||
|
|
||||||
|
```python
|
||||||
|
from langchain.prompts import load_from_hub
|
||||||
|
from langchain.chains.summarize import load_summarize_chain
|
||||||
|
|
||||||
|
llm = ...
|
||||||
|
prompt = load_from_hub('summarize/map_reduce/map/<file-name>')
|
||||||
|
chain = load_summarize_chain(llm, chain_type="map_reduce", map_prompt=prompt)
|
||||||
|
```
|
||||||
|
|
@ -1,2 +1,34 @@
|
|||||||
# Refine
|
<!-- Add a template for READMEs that capture the utility of prompts -->
|
||||||
This iteratively passes in each chunk of data and update a continously update an evolving summary to be more accurate based on the new chunk of data given.
|
|
||||||
|
# Description of {{prompt}}
|
||||||
|
|
||||||
|
{{High level text description of the prompt, including use cases.}}
|
||||||
|
|
||||||
|
## Compatible Chains
|
||||||
|
|
||||||
|
Below is a list of chains we expect this prompt to be compatible with.
|
||||||
|
|
||||||
|
1. {{Chain Name}}: {{Path to chain in module}}
|
||||||
|
2. ...
|
||||||
|
|
||||||
|
## Inputs
|
||||||
|
|
||||||
|
This is a description of the inputs that the prompt expects.
|
||||||
|
|
||||||
|
1. {{input_var}}: {{Description}}
|
||||||
|
2. ...
|
||||||
|
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
Below is a code snippet for how to use the prompt.
|
||||||
|
|
||||||
|
```python
|
||||||
|
from langchain.prompts import load_from_hub
|
||||||
|
from langchain.chains.summarize import load_summarize_chain
|
||||||
|
|
||||||
|
llm = ...
|
||||||
|
prompt = load_from_hub('summarize/refine/<file-name>')
|
||||||
|
chain = load_summarize_chain(llm, chain_type="refine", refine_prompt=prompt)
|
||||||
|
```
|
||||||
|
|
||||||
|
@ -1,13 +0,0 @@
|
|||||||
# Prompt config for a chain?
|
|
||||||
|
|
||||||
input_variables: ['text']
|
|
||||||
output_parser: null
|
|
||||||
# what primitives do we allow?
|
|
||||||
template: [
|
|
||||||
map:
|
|
||||||
input: 'text'
|
|
||||||
prompt: 'prompt.yaml',
|
|
||||||
reduce:
|
|
||||||
prompt: 'promptSummarize.yaml'
|
|
||||||
]
|
|
||||||
template_format: f-string
|
|
@ -1,6 +1,6 @@
|
|||||||
input_variables: [existing_answer, text]
|
input_variables: [existing_answer, text]
|
||||||
output_parser: null
|
output_parser: null
|
||||||
template: '
|
template: "
|
||||||
Your job is to produce a final summary\n
|
Your job is to produce a final summary\n
|
||||||
We have provided an existing summary up to a certain point: {existing_answer}\n
|
We have provided an existing summary up to a certain point: {existing_answer}\n
|
||||||
We have the opportunity to refine the existing summary
|
We have the opportunity to refine the existing summary
|
||||||
@ -9,5 +9,5 @@ template: '
|
|||||||
{text}\n
|
{text}\n
|
||||||
------------\n
|
------------\n
|
||||||
Given the new context, refine the original summary
|
Given the new context, refine the original summary
|
||||||
If the context isn't useful, return the original summary.'
|
If the context isn't useful, return the original summary."
|
||||||
template_format: f-string
|
template_format: f-string
|
||||||
|
@ -1,10 +0,0 @@
|
|||||||
input_variables: [text]
|
|
||||||
output_parser: null
|
|
||||||
template: 'Write a concise summary of the following:
|
|
||||||
|
|
||||||
|
|
||||||
{text}
|
|
||||||
|
|
||||||
|
|
||||||
CONCISE SUMMARY:'
|
|
||||||
template_format: f-string
|
|
@ -1,2 +1,34 @@
|
|||||||
# Stuff
|
<!-- Add a template for READMEs that capture the utility of prompts -->
|
||||||
This is a simple chain to stuff all the text from each document into one propmt. This is limited by token window so this approach only works for smaller amounts of data.
|
|
||||||
|
# Description of {{prompt}}
|
||||||
|
|
||||||
|
{{High level text description of the prompt, including use cases.}}
|
||||||
|
|
||||||
|
## Compatible Chains
|
||||||
|
|
||||||
|
Below is a list of chains we expect this prompt to be compatible with.
|
||||||
|
|
||||||
|
1. {{Chain Name}}: {{Path to chain in module}}
|
||||||
|
2. ...
|
||||||
|
|
||||||
|
## Inputs
|
||||||
|
|
||||||
|
This is a description of the inputs that the prompt expects.
|
||||||
|
|
||||||
|
1. {{input_var}}: {{Description}}
|
||||||
|
2. ...
|
||||||
|
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
Below is a code snippet for how to use the prompt.
|
||||||
|
|
||||||
|
```python
|
||||||
|
from langchain.prompts import load_from_hub
|
||||||
|
from langchain.chains.summarize import load_summarize_chain
|
||||||
|
|
||||||
|
llm = ...
|
||||||
|
prompt = load_from_hub('summarize/stuff/<file-name>')
|
||||||
|
chain = load_summarize_chain(llm, chain_type="stuff", prompt=prompt)
|
||||||
|
```
|
||||||
|
|
||||||
|
35
prompts/vector_db_qa/README.md
Normal file
35
prompts/vector_db_qa/README.md
Normal file
@ -0,0 +1,35 @@
|
|||||||
|
<!-- Add a template for READMEs that capture the utility of prompts -->
|
||||||
|
|
||||||
|
# Description of {{prompt}}
|
||||||
|
|
||||||
|
{{High level text description of the prompt, including use cases.}}
|
||||||
|
|
||||||
|
## Compatible Chains
|
||||||
|
|
||||||
|
Below is a list of chains we expect this prompt to be compatible with.
|
||||||
|
|
||||||
|
1. {{Chain Name}}: {{Path to chain in module}}
|
||||||
|
2. ...
|
||||||
|
|
||||||
|
## Inputs
|
||||||
|
|
||||||
|
This is a description of the inputs that the prompt expects.
|
||||||
|
|
||||||
|
1. {{input_var}}: {{Description}}
|
||||||
|
2. ...
|
||||||
|
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
Below is a code snippet for how to use the prompt.
|
||||||
|
|
||||||
|
```python
|
||||||
|
from langchain.prompts import load_from_hub
|
||||||
|
from langchain.chains import VectorDBQA
|
||||||
|
|
||||||
|
llm = ...
|
||||||
|
vectorstore = ...
|
||||||
|
prompt = load_from_hub('vector_db_qa/<file-name>')
|
||||||
|
chain = VectorDBQA.from_llm(llm, prompt=prompt, vectorstore=vectorstore)
|
||||||
|
```
|
||||||
|
|
9
prompts/vector_db_qa/prompt.json
Normal file
9
prompts/vector_db_qa/prompt.json
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
{
|
||||||
|
"input_variables": [
|
||||||
|
"context",
|
||||||
|
"question"
|
||||||
|
],
|
||||||
|
"output_parser": null,
|
||||||
|
"template": "Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.\n\n{context}\n\nQuestion: {question}\nHelpful Answer:",
|
||||||
|
"template_format": "f-string"
|
||||||
|
}
|
27
readme_template.md
Normal file
27
readme_template.md
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
<!-- Add a template for READMEs that capture the utility of prompts -->
|
||||||
|
|
||||||
|
# Description of {{prompt}}
|
||||||
|
|
||||||
|
{{High level text description of the prompt, including use cases.}}
|
||||||
|
|
||||||
|
## Compatible Chains
|
||||||
|
|
||||||
|
Below is a list of chains we expect this prompt to be compatible with.
|
||||||
|
|
||||||
|
1. {{Chain Name}}: {{Path to chain in module}}
|
||||||
|
2. ...
|
||||||
|
|
||||||
|
## Inputs
|
||||||
|
|
||||||
|
This is a description of the inputs that the prompt expects.
|
||||||
|
|
||||||
|
1. {{input_var}}: {{Description}}
|
||||||
|
2. ...
|
||||||
|
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
Below is a code snippet for how to use the prompt.
|
||||||
|
|
||||||
|
{{Code snippet}}
|
||||||
|
|
Loading…
Reference in New Issue
Block a user