From 641b71e2cd6706e90c727dcef1edff88b5992ced Mon Sep 17 00:00:00 2001 From: Zizhong Zhang Date: Thu, 31 Aug 2023 12:21:24 -0700 Subject: [PATCH] refactor: rename to OpaquePrompts (#10013) Renamed to OpaquePrompts cc @baskaryan Thanks in advance! --- ...{promptguard.ipynb => opaqueprompts.ipynb} | 36 +++++++-------- libs/langchain/langchain/llms/__init__.py | 6 +-- .../llms/{promptguard.py => opaqueprompts.py} | 46 +++++++++---------- .../{promptguard.py => opaqueprompts.py} | 18 ++++---- ...t_promptguard.py => test_opaqueprompts.py} | 14 +++--- 5 files changed, 60 insertions(+), 60 deletions(-) rename docs/extras/integrations/llms/{promptguard.ipynb => opaqueprompts.ipynb} (82%) rename libs/langchain/langchain/llms/{promptguard.py => opaqueprompts.py} (65%) rename libs/langchain/langchain/utilities/{promptguard.py => opaqueprompts.py} (83%) rename libs/langchain/tests/integration_tests/llms/{test_promptguard.py => test_opaqueprompts.py} (91%) diff --git a/docs/extras/integrations/llms/promptguard.ipynb b/docs/extras/integrations/llms/opaqueprompts.ipynb similarity index 82% rename from docs/extras/integrations/llms/promptguard.ipynb rename to docs/extras/integrations/llms/opaqueprompts.ipynb index f93244ecad..132b37e8ce 100644 --- a/docs/extras/integrations/llms/promptguard.ipynb +++ b/docs/extras/integrations/llms/opaqueprompts.ipynb @@ -4,12 +4,12 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "# PromptGuard\n", + "# OpaquePrompts\n", "\n", - "[PromptGuard](https://promptguard.readthedocs.io/en/latest/) is a service that enables applications to leverage the power of language models without compromising user privacy. Designed for composability and ease of integration into existing applications and services, PromptGuard is consumable via a simple Python library as well as through LangChain. Perhaps more importantly, PromptGuard leverages the power of [confidential computing](https://en.wikipedia.org/wiki/Confidential_computing) to ensure that even the PromptGuard service itself cannot access the data it is protecting.\n", + "[OpaquePrompts](https://opaqueprompts.readthedocs.io/en/latest/) is a service that enables applications to leverage the power of language models without compromising user privacy. Designed for composability and ease of integration into existing applications and services, OpaquePrompts is consumable via a simple Python library as well as through LangChain. Perhaps more importantly, OpaquePrompts leverages the power of [confidential computing](https://en.wikipedia.org/wiki/Confidential_computing) to ensure that even the OpaquePrompts service itself cannot access the data it is protecting.\n", " \n", "\n", - "This notebook goes over how to use LangChain to interact with `PromptGuard`." + "This notebook goes over how to use LangChain to interact with `OpaquePrompts`." ] }, { @@ -18,15 +18,15 @@ "metadata": {}, "outputs": [], "source": [ - "# install the promptguard and langchain packages\n", - "! pip install promptguard langchain" + "# install the opaqueprompts and langchain packages\n", + "! pip install opaqueprompts langchain" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Accessing the PromptGuard API requires an API key, which you can get by creating an account on [the PromptGuard website](https://promptguard.opaque.co/). Once you have an account, you can find your API key on [the API Keys page](https://promptguard.opaque.co/api-keys)." + "Accessing the OpaquePrompts API requires an API key, which you can get by creating an account on [the OpaquePrompts website](https://opaqueprompts.opaque.co/). Once you have an account, you can find your API key on [the API Keys page](https:opaqueprompts.opaque.co/api-keys)." ] }, { @@ -39,7 +39,7 @@ "\n", "# Set API keys\n", "\n", - "os.environ['PROMPTGUARD_API_KEY'] = \"\"\n", + "os.environ['OPAQUEPROMPTS_API_KEY'] = \"\"\n", "os.environ['OPENAI_API_KEY'] = \"\"" ] }, @@ -47,9 +47,9 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "# Use PromptGuard LLM Wrapper\n", + "# Use OpaquePrompts LLM Wrapper\n", "\n", - "Applying promptguard to your application could be as simple as wrapping your LLM using the PromptGuard class by replace `llm=OpenAI()` with `llm=PromptGuard(base_llm=OpenAI())`." + "Applying OpaquePrompts to your application could be as simple as wrapping your LLM using the OpaquePrompts class by replace `llm=OpenAI()` with `llm=OpaquePrompts(base_llm=OpenAI())`." ] }, { @@ -64,7 +64,7 @@ "from langchain.llms import OpenAI\n", "from langchain.memory import ConversationBufferWindowMemory\n", "\n", - "from langchain.llms import PromptGuard\n", + "from langchain.llms import OpaquePrompts\n", "\n", "langchain.verbose = True\n", "langchain.debug = True\n", @@ -106,7 +106,7 @@ "\n", "chain = LLMChain(\n", " prompt=PromptTemplate.from_template(prompt_template),\n", - " llm=PromptGuard(base_llm=OpenAI()),\n", + " llm=OpaquePrompts(base_llm=OpenAI()),\n", " memory=ConversationBufferWindowMemory(k=2),\n", " verbose=True,\n", ")\n", @@ -132,10 +132,10 @@ "During our recent meeting on February 23, 2023, at 10:30 AM, John Doe provided me with his personal details. His email is johndoe@example.com and his contact number is 650-456-7890. He lives in New York City, USA, and belongs to the American nationality with Christian beliefs and a leaning towards the Democratic party. He mentioned that he recently made a transaction using his credit card 4111 1111 1111 1111 and transferred bitcoins to the wallet address 1A1zP1eP5QGefi2DMPTfTL5SLmv7DivfNa. While discussing his European travels, he noted down his IBAN as GB29 NWBK 6016 1331 9268 19. Additionally, he provided his website as https://johndoeportfolio.com. John also discussed some of his US-specific details. He said his bank account number is 1234567890123456 and his drivers license is Y12345678. His ITIN is 987-65-4321, and he recently renewed his passport, the number for which is 123456789. He emphasized not to share his SSN, which is 669-45-6789. Furthermore, he mentioned that he accesses his work files remotely through the IP 192.168.1.1 and has a medical license number MED-123456.\n", "```\n", "\n", - "PromptGuard will automatically detect the sensitive data and replace it with a placeholder. \n", + "OpaquePrompts will automatically detect the sensitive data and replace it with a placeholder. \n", "\n", "```\n", - "# Context after PromptGuard\n", + "# Context after OpaquePrompts\n", "\n", "During our recent meeting on DATE_TIME_3, at DATE_TIME_2, PERSON_3 provided me with his personal details. His email is EMAIL_ADDRESS_1 and his contact number is PHONE_NUMBER_1. He lives in LOCATION_3, LOCATION_2, and belongs to the NRP_3 nationality with NRP_2 beliefs and a leaning towards the Democratic party. He mentioned that he recently made a transaction using his credit card CREDIT_CARD_1 and transferred bitcoins to the wallet address CRYPTO_1. While discussing his NRP_1 travels, he noted down his IBAN as IBAN_CODE_1. Additionally, he provided his website as URL_1. PERSON_2 also discussed some of his LOCATION_1-specific details. He said his bank account number is US_BANK_NUMBER_1 and his drivers license is US_DRIVER_LICENSE_2. His ITIN is US_ITIN_1, and he recently renewed his passport, the number for which is DATE_TIME_1. He emphasized not to share his SSN, which is US_SSN_1. Furthermore, he mentioned that he accesses his work files remotely through the IP IP_ADDRESS_1 and has a medical license number MED-US_DRIVER_LICENSE_1.\n", "```\n", @@ -151,7 +151,7 @@ "Response is desanitized by replacing the placeholder with the original sensitive data.\n", "\n", "```\n", - "# desanitized LLM response from PromptGuard\n", + "# desanitized LLM response from OpaquePrompts\n", "\n", "Hey John, just wanted to remind you to do a password reset for your website https://johndoeportfolio.com through your email johndoe@example.com. It's important to stay secure online, so don't forget to do it!\n", "```" @@ -161,7 +161,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "# Use PromptGuard in LangChain expression\n", + "# Use OpaquePrompts in LangChain expression\n", "\n", "There are functions that can be used with LangChain expression as well if a drop-in replacement doesn't offer the flexibility you need. " ] @@ -172,7 +172,7 @@ "metadata": {}, "outputs": [], "source": [ - "import langchain.utilities.promptguard as pgf\n", + "import langchain.utilities.opaqueprompts as op\n", "from langchain.schema.runnable import RunnableMap\n", "from langchain.schema.output_parser import StrOutputParser\n", "\n", @@ -180,7 +180,7 @@ "prompt=PromptTemplate.from_template(prompt_template), \n", "llm = OpenAI()\n", "pg_chain = (\n", - " pgf.sanitize\n", + " op.sanitize\n", " | RunnableMap(\n", " {\n", " \"response\": (lambda x: x[\"sanitized_input\"])\n", @@ -190,7 +190,7 @@ " \"secure_context\": lambda x: x[\"secure_context\"],\n", " }\n", " )\n", - " | (lambda x: pgf.desanitize(x[\"response\"], x[\"secure_context\"]))\n", + " | (lambda x: op.desanitize(x[\"response\"], x[\"secure_context\"]))\n", ")\n", "\n", "pg_chain.invoke({\"question\": \"Write a text message to remind John to do password reset for his website through his email to stay secure.\", \"history\": \"\"})" diff --git a/libs/langchain/langchain/llms/__init__.py b/libs/langchain/langchain/llms/__init__.py index d46ce1ab0c..a454e10afd 100644 --- a/libs/langchain/langchain/llms/__init__.py +++ b/libs/langchain/langchain/llms/__init__.py @@ -62,6 +62,7 @@ from langchain.llms.mosaicml import MosaicML from langchain.llms.nlpcloud import NLPCloud from langchain.llms.octoai_endpoint import OctoAIEndpoint from langchain.llms.ollama import Ollama +from langchain.llms.opaqueprompts import OpaquePrompts from langchain.llms.openai import AzureOpenAI, OpenAI, OpenAIChat from langchain.llms.openllm import OpenLLM from langchain.llms.openlm import OpenLM @@ -69,7 +70,6 @@ from langchain.llms.petals import Petals from langchain.llms.pipelineai import PipelineAI from langchain.llms.predibase import Predibase from langchain.llms.predictionguard import PredictionGuard -from langchain.llms.promptguard import PromptGuard from langchain.llms.promptlayer_openai import PromptLayerOpenAI, PromptLayerOpenAIChat from langchain.llms.replicate import Replicate from langchain.llms.rwkv import RWKV @@ -142,7 +142,7 @@ __all__ = [ "PredictionGuard", "PromptLayerOpenAI", "PromptLayerOpenAIChat", - "PromptGuard", + "OpaquePrompts", "RWKV", "Replicate", "SagemakerEndpoint", @@ -207,7 +207,7 @@ type_to_cls_dict: Dict[str, Type[BaseLLM]] = { "petals": Petals, "pipelineai": PipelineAI, "predibase": Predibase, - "promptguard": PromptGuard, + "opaqueprompts": OpaquePrompts, "replicate": Replicate, "rwkv": RWKV, "sagemaker_endpoint": SagemakerEndpoint, diff --git a/libs/langchain/langchain/llms/promptguard.py b/libs/langchain/langchain/llms/opaqueprompts.py similarity index 65% rename from libs/langchain/langchain/llms/promptguard.py rename to libs/langchain/langchain/llms/opaqueprompts.py index 9dcdfcb6af..af3ccc9672 100644 --- a/libs/langchain/langchain/llms/promptguard.py +++ b/libs/langchain/langchain/llms/opaqueprompts.py @@ -10,23 +10,23 @@ from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) -class PromptGuard(LLM): - """An LLM wrapper that uses PromptGuard to sanitize prompts. +class OpaquePrompts(LLM): + """An LLM wrapper that uses OpaquePrompts to sanitize prompts. Wraps another LLM and sanitizes prompts before passing it to the LLM, then de-sanitizes the response. - To use, you should have the ``promptguard`` python package installed, - and the environment variable ``PROMPTGUARD_API_KEY`` set with + To use, you should have the ``opaqueprompts`` python package installed, + and the environment variable ``OPAQUEPROMPTS_API_KEY`` set with your API key, or pass it as a named parameter to the constructor. Example: .. code-block:: python - from langchain.llms import PromptGuard + from langchain.llms import OpaquePrompts from langchain.chat_models import ChatOpenAI - prompt_guard_llm = PromptGuard(base_llm=ChatOpenAI()) + op_llm = OpaquePrompts(base_llm=ChatOpenAI()) """ base_llm: BaseLanguageModel @@ -39,29 +39,29 @@ class PromptGuard(LLM): @root_validator() def validate_environment(cls, values: Dict) -> Dict: - """Validates that the PromptGuard API key and the Python package exist.""" + """Validates that the OpaquePrompts API key and the Python package exist.""" try: - import promptguard as pg + import opaqueprompts as op except ImportError: raise ImportError( - "Could not import the `promptguard` Python package, " - "please install it with `pip install promptguard`." + "Could not import the `opaqueprompts` Python package, " + "please install it with `pip install opaqueprompts`." ) - if pg.__package__ is None: + if op.__package__ is None: raise ValueError( - "Could not properly import `promptguard`, " - "promptguard.__package__ is None." + "Could not properly import `opaqueprompts`, " + "opaqueprompts.__package__ is None." ) api_key = get_from_dict_or_env( - values, "promptguard_api_key", "PROMPTGUARD_API_KEY", default="" + values, "opaqueprompts_api_key", "OPAQUEPROMPTS_API_KEY", default="" ) if not api_key: raise ValueError( - "Could not find PROMPTGUARD_API_KEY in the environment. " - "Please set it to your PromptGuard API key." - "You can get it by creating an account on the PromptGuard website: " - "https://promptguard.opaque.co/ ." + "Could not find OPAQUEPROMPTS_API_KEY in the environment. " + "Please set it to your OpaquePrompts API key." + "You can get it by creating an account on the OpaquePrompts website: " + "https://opaqueprompts.opaque.co/ ." ) return values @@ -83,14 +83,14 @@ class PromptGuard(LLM): Example: .. code-block:: python - response = prompt_guard_llm("Tell me a joke.") + response = op_llm("Tell me a joke.") """ - import promptguard as pg + import opaqueprompts as op _run_manager = run_manager or CallbackManagerForLLMRun.get_noop_manager() # sanitize the prompt by replacing the sensitive information with a placeholder - sanitize_response: pg.SanitizeResponse = pg.sanitize([prompt]) + sanitize_response: op.SanitizeResponse = op.sanitize([prompt]) sanitized_prompt_value_str = sanitize_response.sanitized_texts[0] # TODO: Add in callbacks once child runs for LLMs are supported by LangSmith. @@ -101,7 +101,7 @@ class PromptGuard(LLM): ) # desanitize the response by restoring the original sensitive information - desanitize_response: pg.DesanitizeResponse = pg.desanitize( + desanitize_response: op.DesanitizeResponse = op.desanitize( llm_response, secure_context=sanitize_response.secure_context, ) @@ -113,4 +113,4 @@ class PromptGuard(LLM): This is an override of the base class method. """ - return "promptguard" + return "opaqueprompts" diff --git a/libs/langchain/langchain/utilities/promptguard.py b/libs/langchain/langchain/utilities/opaqueprompts.py similarity index 83% rename from libs/langchain/langchain/utilities/promptguard.py rename to libs/langchain/langchain/utilities/opaqueprompts.py index df29cafa4c..23b02fdf2b 100644 --- a/libs/langchain/langchain/utilities/promptguard.py +++ b/libs/langchain/langchain/utilities/opaqueprompts.py @@ -31,16 +31,16 @@ def sanitize( The `secure_context` needs to be passed to the `desanitize` function. """ try: - import promptguard as pg + import opaqueprompts as op except ImportError: raise ImportError( - "Could not import the `promptguard` Python package, " - "please install it with `pip install promptguard`." + "Could not import the `opaqueprompts` Python package, " + "please install it with `pip install opaqueprompts`." ) if isinstance(input, str): # the input could be a string, so we sanitize the string - sanitize_response: pg.SanitizeResponse = pg.sanitize([input]) + sanitize_response: op.SanitizeResponse = op.sanitize([input]) return { "sanitized_input": sanitize_response.sanitized_texts[0], "secure_context": sanitize_response.secure_context, @@ -55,7 +55,7 @@ def sanitize( values.append(input[key]) # sanitize the values - sanitize_values_response: pg.SanitizeResponse = pg.sanitize(values) + sanitize_values_response: op.SanitizeResponse = op.sanitize(values) # reconstruct the dict with the sanitized values sanitized_input_values = sanitize_values_response.sanitized_texts @@ -85,13 +85,13 @@ def desanitize(sanitized_text: str, secure_context: bytes) -> str: De-sanitized text. """ try: - import promptguard as pg + import opaqueprompts as op except ImportError: raise ImportError( - "Could not import the `promptguard` Python package, " - "please install it with `pip install promptguard`." + "Could not import the `opaqueprompts` Python package, " + "please install it with `pip install opaqueprompts`." ) - desanitize_response: pg.DesanitizeResponse = pg.desanitize( + desanitize_response: op.DesanitizeResponse = op.desanitize( sanitized_text, secure_context ) return desanitize_response.desanitized_text diff --git a/libs/langchain/tests/integration_tests/llms/test_promptguard.py b/libs/langchain/tests/integration_tests/llms/test_opaqueprompts.py similarity index 91% rename from libs/langchain/tests/integration_tests/llms/test_promptguard.py rename to libs/langchain/tests/integration_tests/llms/test_opaqueprompts.py index 599df595a0..1a2fb604bf 100644 --- a/libs/langchain/tests/integration_tests/llms/test_promptguard.py +++ b/libs/langchain/tests/integration_tests/llms/test_opaqueprompts.py @@ -1,7 +1,7 @@ -import langchain.utilities.promptguard as pgf +import langchain.utilities.opaqueprompts as op from langchain import LLMChain, PromptTemplate from langchain.llms import OpenAI -from langchain.llms.promptguard import PromptGuard +from langchain.llms.opaqueprompts import OpaquePrompts from langchain.memory import ConversationBufferWindowMemory from langchain.schema.output_parser import StrOutputParser from langchain.schema.runnable import RunnableMap @@ -42,10 +42,10 @@ Question: ```{question}``` """ -def test_promptguard() -> None: +def test_opaqueprompts() -> None: chain = LLMChain( prompt=PromptTemplate.from_template(prompt_template), - llm=PromptGuard(llm=OpenAI()), + llm=OpaquePrompts(llm=OpenAI()), memory=ConversationBufferWindowMemory(k=2), ) @@ -58,11 +58,11 @@ def test_promptguard() -> None: assert isinstance(output, str) -def test_promptguard_functions() -> None: +def test_opaqueprompts_functions() -> None: prompt = (PromptTemplate.from_template(prompt_template),) llm = OpenAI() pg_chain = ( - pgf.sanitize + op.sanitize | RunnableMap( { "response": (lambda x: x["sanitized_input"]) # type: ignore @@ -72,7 +72,7 @@ def test_promptguard_functions() -> None: "secure_context": lambda x: x["secure_context"], } ) - | (lambda x: pgf.desanitize(x["response"], x["secure_context"])) + | (lambda x: op.desanitize(x["response"], x["secure_context"])) ) pg_chain.invoke(