From 3561f18e662418a440d94e61b3cc9381d2be345a Mon Sep 17 00:00:00 2001 From: Harrison Chase Date: Sun, 13 Nov 2022 20:03:32 -0800 Subject: [PATCH] ape example --- examples/ape.ipynb | 111 +++++++++++++++++++++++++++++++ langchain/chains/ape/__init__.py | 0 langchain/chains/ape/base.py | 47 +++++++++++++ langchain/chains/ape/prompt.py | 9 +++ 4 files changed, 167 insertions(+) create mode 100644 examples/ape.ipynb create mode 100644 langchain/chains/ape/__init__.py create mode 100644 langchain/chains/ape/base.py create mode 100644 langchain/chains/ape/prompt.py diff --git a/examples/ape.ipynb b/examples/ape.ipynb new file mode 100644 index 00000000..982572cb --- /dev/null +++ b/examples/ape.ipynb @@ -0,0 +1,111 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 5, + "id": "081420ea", + "metadata": {}, + "outputs": [], + "source": [ + "words = [\"sane\", \"direct\", \"informally\", \"unpopular\", \"subtractive\", \"nonresidential\",\n", + " \"inexact\", \"uptown\", \"incomparable\", \"powerful\", \"gaseous\", \"evenly\", \"formality\",\n", + " \"deliberately\", \"off\"]\n", + "antonyms = [\"insane\", \"indirect\", \"formally\", \"popular\", \"additive\", \"residential\",\n", + " \"exact\", \"downtown\", \"comparable\", \"powerless\", \"solid\", \"unevenly\", \"informality\",\n", + " \"accidentally\", \"on\"]\n", + "data = (words, antonyms)" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "6c7c00b7", + "metadata": {}, + "outputs": [], + "source": [ + "examples = [f\"INPUT: {i}\\nOUTPUT: {j}\" for i, j in zip(words, antonyms)]" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "70d4ea31", + "metadata": {}, + "outputs": [], + "source": [ + "from langchain.chains.ape.base import APEChain\n", + "from langchain.llms import OpenAI" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "f64829c4", + "metadata": {}, + "outputs": [], + "source": [ + "llm = OpenAI()" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "0663e721", + "metadata": {}, + "outputs": [], + "source": [ + "chain = APEChain(llm=llm)" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "4d65f1ee", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "' reverse the input.'" + ] + }, + "execution_count": 7, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "chain.ape(examples)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3bd0bd06", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.6" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/langchain/chains/ape/__init__.py b/langchain/chains/ape/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/langchain/chains/ape/base.py b/langchain/chains/ape/base.py new file mode 100644 index 00000000..b776e9e0 --- /dev/null +++ b/langchain/chains/ape/base.py @@ -0,0 +1,47 @@ +from typing import Dict, List + +from langchain.chains.base import Chain +from pydantic import BaseModel, Extra +from langchain.llms.base import LLM +from langchain.chains.llm import LLMChain +from langchain.chains.ape.prompt import PROMPT + + +class APEChain(Chain, BaseModel): + + llm: LLM + + input_key: str = "code" #: :meta private: + output_key: str = "output" #: :meta private: + + class Config: + """Configuration for this pydantic object.""" + + extra = Extra.forbid + arbitrary_types_allowed = True + + @property + def input_keys(self) -> List[str]: + """Expect input key. + + :meta private: + """ + return [self.input_key] + + @property + def output_keys(self) -> List[str]: + """Return output key. + + :meta private: + """ + return [self.output_key] + + def _call(self, inputs: Dict[str, str]) -> Dict[str, str]: + chain = LLMChain(llm=self.llm, prompt=PROMPT) + output = chain.run(inputs[self.input_key]) + return {self.output_key: output} + + def ape(self, examples: List[str]) -> str: + combined_examples = "\n\n".join(examples) + return self.run(combined_examples) + diff --git a/langchain/chains/ape/prompt.py b/langchain/chains/ape/prompt.py new file mode 100644 index 00000000..778b425c --- /dev/null +++ b/langchain/chains/ape/prompt.py @@ -0,0 +1,9 @@ +# flake8: noqa +from langchain.prompts.prompt import Prompt +_TEMPLATE = """I gave a friend an instruction. Based on the instruction they produced the following input-output pairs: + +{examples} + +The instruction was to""" + +PROMPT = Prompt(input_variables=["examples"], template=_TEMPLATE)