diff --git a/img/introduction.png b/img/introduction.png new file mode 100644 index 0000000..6022d8a Binary files /dev/null and b/img/introduction.png differ diff --git a/pages/_meta.json b/pages/_meta.json index f71b694..f4f2c4f 100644 --- a/pages/_meta.json +++ b/pages/_meta.json @@ -1,5 +1,5 @@ { - "index": "Preface", + "index": "Prompt Engineering", "introduction": "Introduction", "techniques": "Techniques", "applications": "Applications", @@ -16,7 +16,7 @@ "contact": { "title": "Contact ↗", "type": "page", - "href": "https://twitter.com/shuding_", + "href": "https://twitter.com/dair_ai", "newWindow": true } } diff --git a/pages/about.mdx b/pages/about.mdx index ec6faed..e946860 100644 --- a/pages/about.mdx +++ b/pages/about.mdx @@ -1,3 +1,3 @@ # About -This is the about page! This page is shown on the navbar. +The Prompt Engineering Guide is a project by DAIR.AI. It aims to educate researchers and practitioners about prompt engineering. diff --git a/pages/applications.mdx b/pages/applications.mdx new file mode 100644 index 0000000..ab686fd --- /dev/null +++ b/pages/applications.mdx @@ -0,0 +1,21 @@ +# Prompting Applications + +In this guide we will cover some advanced and interesting ways we can use prompt engineering to perform useful and more advanced tasks. + +**Note that this section is under heavy development.** + +import { Card, Cards } from 'nextra-theme-docs' + + + + + + + + \ No newline at end of file diff --git a/pages/applications/_meta.json b/pages/applications/_meta.json index b1a9993..8b6a913 100644 --- a/pages/applications/_meta.json +++ b/pages/applications/_meta.json @@ -1,5 +1,4 @@ { - "introduction": "Prompting Applications", "generating": "Generating Data", "pal": "Program-Aided Language Models" } \ No newline at end of file diff --git a/pages/applications/introduction.mdx b/pages/applications/introduction.mdx deleted file mode 100644 index 656d95d..0000000 --- a/pages/applications/introduction.mdx +++ /dev/null @@ -1,3 +0,0 @@ -In this guide we will cover some advanced and interesting ways we can use prompt engineering to perform useful and more advanced tasks. - -**Note that this section is under heavy development.** \ No newline at end of file diff --git a/pages/applications/pal.mdx b/pages/applications/pal.mdx index e69de29..479b957 100644 --- a/pages/applications/pal.mdx +++ b/pages/applications/pal.mdx @@ -0,0 +1,104 @@ +# PAL (Program-Aided Language Models) + +import { Callout, FileTree } from 'nextra-theme-docs' +import {Screenshot} from 'components/screenshot' +import PAL from '../../img/pal.png' + +[Gao et al., (2022)](https://arxiv.org/abs/2211.10435) presents a method that uses LLMs to read natural language problems and generate programs as the intermediate reasoning steps. Coined, program-aided language models (PAL), it differs from chain-of-thought prompting in that instead of using free-form text to obtain solution it offloads the solution step to a programmatic runtime such as a Python interpreter. + + + +Let's look at an example using LangChain and OpenAI GPT-3. We are interested to develop a simple application that's able to interpret the question being asked and provide an answer by leveraging the Python interpreter. + +Specifically, we are interested to create a functionality that allows the use of the LLM to answer questions that require date understanding. We will provide the LLM a prompt that includes a few exemplars which are adopted from [here](https://github.com/reasoning-machines/pal/blob/main/pal/prompt/date_understanding_prompt.py). + +These are the imports we need: + +```python +import openai +from datetime import datetime +from dateutil.relativedelta import relativedelta +import os +from langchain.llms import OpenAI +from dotenv import load_dotenv +``` + +Let's first configure a few things: + +```python +load_dotenv() + +# API configuration +openai.api_key = os.getenv("OPENAI_API_KEY") + +# for LangChain +os.environ["OPENAI_API_KEY"] = os.getenv("OPENAI_API_KEY") +``` + +Setup model instance: + +```python +llm = OpenAI(model_name='text-davinci-003', temperature=0) +``` + +Setup prompt + question: + +```python +question = "Today is 27 February 2023. I was born exactly 25 years ago. What is the date I was born in MM/DD/YYYY?" + +DATE_UNDERSTANDING_PROMPT = """ +# Q: 2015 is coming in 36 hours. What is the date one week from today in MM/DD/YYYY? +# If 2015 is coming in 36 hours, then today is 36 hours before. +today = datetime(2015, 1, 1) - relativedelta(hours=36) +# One week from today, +one_week_from_today = today + relativedelta(weeks=1) +# The answer formatted with %m/%d/%Y is +one_week_from_today.strftime('%m/%d/%Y') +# Q: The first day of 2019 is a Tuesday, and today is the first Monday of 2019. What is the date today in MM/DD/YYYY? +# If the first day of 2019 is a Tuesday, and today is the first Monday of 2019, then today is 6 days later. +today = datetime(2019, 1, 1) + relativedelta(days=6) +# The answer formatted with %m/%d/%Y is +today.strftime('%m/%d/%Y') +# Q: The concert was scheduled to be on 06/01/1943, but was delayed by one day to today. What is the date 10 days ago in MM/DD/YYYY? +# If the concert was scheduled to be on 06/01/1943, but was delayed by one day to today, then today is one day later. +today = datetime(1943, 6, 1) + relativedelta(days=1) +# 10 days ago, +ten_days_ago = today - relativedelta(days=10) +# The answer formatted with %m/%d/%Y is +ten_days_ago.strftime('%m/%d/%Y') +# Q: It is 4/19/1969 today. What is the date 24 hours later in MM/DD/YYYY? +# It is 4/19/1969 today. +today = datetime(1969, 4, 19) +# 24 hours later, +later = today + relativedelta(hours=24) +# The answer formatted with %m/%d/%Y is +today.strftime('%m/%d/%Y') +# Q: Jane thought today is 3/11/2002, but today is in fact Mar 12, which is 1 day later. What is the date 24 hours later in MM/DD/YYYY? +# If Jane thought today is 3/11/2002, but today is in fact Mar 12, then today is 3/1/2002. +today = datetime(2002, 3, 12) +# 24 hours later, +later = today + relativedelta(hours=24) +# The answer formatted with %m/%d/%Y is +later.strftime('%m/%d/%Y') +# Q: Jane was born on the last day of Feburary in 2001. Today is her 16-year-old birthday. What is the date yesterday in MM/DD/YYYY? +# If Jane was born on the last day of Feburary in 2001 and today is her 16-year-old birthday, then today is 16 years later. +today = datetime(2001, 2, 28) + relativedelta(years=16) +# Yesterday, +yesterday = today - relativedelta(days=1) +# The answer formatted with %m/%d/%Y is +yesterday.strftime('%m/%d/%Y') +# Q: {question} +""".strip() + '\n' +``` + +```python +llm_out = llm(DATE_UNDERSTANDING_PROMPT.format(question=question)) +print(llm_out) +``` + +```python +exec(llm_out) +print(born) +``` + +This will output the following: `02/27/1998` \ No newline at end of file diff --git a/pages/index.mdx b/pages/index.mdx index e8f41af..887c2f9 100644 --- a/pages/index.mdx +++ b/pages/index.mdx @@ -1,5 +1,55 @@ -# Preface +# Prompt Engineering Guide Prompt engineering is a relatively new discipline for developing and optimizing prompts to efficiently use language models (LMs) for a wide variety of applications and research topics. Prompt engineering skills help to better understand the capabilities and limitations of large language models (LLMs). Researchers use prompt engineering to improve the capacity of LLMs on a wide range of common and complex tasks such as question answering and arithmetic reasoning. Developers use prompt engineering to design robust and effective prompting techniques that interface with LLMs and other tools. Motivated by the high interest in developing with LLMs, we have created this new prompt engineering guide that contains all the latest papers, learning guides, lectures, references, and tools related to prompt engineering. + +import { Card, Cards } from 'nextra-theme-docs' + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/pages/introduction/introduction.mdx b/pages/introduction.mdx similarity index 56% rename from pages/introduction/introduction.mdx rename to pages/introduction.mdx index 4715934..cc9eb40 100644 --- a/pages/introduction/introduction.mdx +++ b/pages/introduction.mdx @@ -4,4 +4,39 @@ Prompt engineering is a relatively new discipline for developing and optimizing This guide covers the basics of standard prompts to provide a rough idea on how to use prompts to interact and instruct large language models (LLMs). -All examples are tested with `text-davinci-003` (using OpenAI's playground) unless otherwise specified. It uses the default configurations, e.g., `temperature=0.7` and `top-p=1`. \ No newline at end of file +All examples are tested with `text-davinci-003` (using OpenAI's playground) unless otherwise specified. It uses the default configurations, e.g., `temperature=0.7` and `top-p=1`. + +import { Card, Cards } from 'nextra-theme-docs' + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/pages/introduction/_meta.json b/pages/introduction/_meta.json index 2e37cb3..738e37f 100644 --- a/pages/introduction/_meta.json +++ b/pages/introduction/_meta.json @@ -1,7 +1,6 @@ { - "introduction": "Introduction", "basics": "Basic Prompts", - "settings": "A Word on LLM Settings", + "settings": "LLM Settings", "standard": "Standard Prompts", "elements": "Prompt Elements", "tips": "General Tips for Designing Prompts", diff --git a/pages/introduction/settings.mdx b/pages/introduction/settings.mdx index 16f73a8..83d4e50 100644 --- a/pages/introduction/settings.mdx +++ b/pages/introduction/settings.mdx @@ -1,4 +1,4 @@ -# A Word on LLM Settings +# LLM Settings When working with prompts, you will be interacting with the LLM via an API or directly. You can configure a few parameters to get different results for your prompts. diff --git a/pages/models.mdx b/pages/models.mdx new file mode 100644 index 0000000..c495239 --- /dev/null +++ b/pages/models.mdx @@ -0,0 +1,13 @@ +# Models + +In this section, we will cover some of the capabilities of language models by applying the latest and most advanced prompting engineering techniques. + +import { Card, Cards } from 'nextra-theme-docs' + + + + + \ No newline at end of file diff --git a/pages/models/_meta.json b/pages/models/_meta.json index ca2d8bd..6d981f4 100644 --- a/pages/models/_meta.json +++ b/pages/models/_meta.json @@ -1,5 +1,4 @@ { - "introduction": "Introduction", "chatgpt": "ChatGPT" } \ No newline at end of file diff --git a/pages/models/introduction.mdx b/pages/models/introduction.mdx deleted file mode 100644 index 3d8b183..0000000 --- a/pages/models/introduction.mdx +++ /dev/null @@ -1,3 +0,0 @@ -# Models - -In this section, we will cover some of the capabilities of language models by applying the latest and most advanced prompting engineering techniques. \ No newline at end of file diff --git a/pages/risks/introduction.mdx b/pages/risks.mdx similarity index 66% rename from pages/risks/introduction.mdx rename to pages/risks.mdx index 7769f4f..ba12eda 100644 --- a/pages/risks/introduction.mdx +++ b/pages/risks.mdx @@ -1,3 +1,23 @@ # Risks & Misuses We have seen already how effective well-crafted prompts can be for various tasks using techniques like few-shot learning. As we think about building real-world applications on top of LLMs, it becomes crucial to think about the misuses, risks, and safety involved with language models. This section focuses on highlighting some of the risks and misuses of LLMs via techniques like prompt injections. It also highlights harmful behaviors including how to mitigate via effective prompting techniques. Other topics of interest include generalizability, calibration, biases, social biases, and factuality to name a few. + +import { Card, Cards } from 'nextra-theme-docs' + + + + + + + + + \ No newline at end of file diff --git a/pages/risks/_meta.json b/pages/risks/_meta.json index 287594d..29a5ff4 100644 --- a/pages/risks/_meta.json +++ b/pages/risks/_meta.json @@ -1,5 +1,4 @@ { - "introduction": "Introduction", "adversarial": "Adversarial Prompting", "factuality": "Factuality", "biases": "Biases" diff --git a/pages/techniques.mdx b/pages/techniques.mdx new file mode 100644 index 0000000..36876c9 --- /dev/null +++ b/pages/techniques.mdx @@ -0,0 +1,70 @@ +# Prompting Techniques + +By this point, it should be obvious that it helps to improve prompts to get better results on different tasks. That's the whole idea behind prompt engineering. + +While those examples were fun, let's cover a few concepts more formally before we jump into more advanced concepts. + +import { Card, Cards } from 'nextra-theme-docs' + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/pages/techniques/_meta.json b/pages/techniques/_meta.json index a5b343e..411c6f5 100644 --- a/pages/techniques/_meta.json +++ b/pages/techniques/_meta.json @@ -1,5 +1,4 @@ { - "introduction": "Introduction", "zeroshot": "Zero-shot Prompting", "fewshot": "Few-shot Prompting", "cot": "Chain-of-Thought Prompting", diff --git a/pages/techniques/introduction.mdx b/pages/techniques/introduction.mdx deleted file mode 100644 index ae8c2ac..0000000 --- a/pages/techniques/introduction.mdx +++ /dev/null @@ -1,5 +0,0 @@ -# Prompting Techniques - -By this point, it should be obvious that it helps to improve prompts to get better results on different tasks. That's the whole idea behind prompt engineering. - -While those examples were fun, let's cover a few concepts more formally before we jump into more advanced concepts. \ No newline at end of file diff --git a/theme.config.tsx b/theme.config.tsx index 268e1cc..a936f5d 100644 --- a/theme.config.tsx +++ b/theme.config.tsx @@ -2,16 +2,27 @@ import React from 'react' import { DocsThemeConfig } from 'nextra-theme-docs' const config: DocsThemeConfig = { - logo: Prompt Engineering Guide, + logo: ( + <> + + + + + + + Prompt Engineering Guide + + + ), project: { - link: 'https://github.com/shuding/nextra-docs-template', + link: 'https://github.com/dair-ai/Prompt-Engineering-Guide', }, chat: { - link: 'https://discord.com', + link: 'https://discord.gg/SKgkVT8BGJ', }, - docsRepositoryBase: 'https://github.com/shuding/nextra-docs-template', + docsRepositoryBase: 'https://github.com/dair-ai/Prompt-Engineering-Guide/tree/main/', footer: { - text: 'Nextra Docs Template', + text: 'Copyright © 2023 DAIR.AI', }, }