From 8bd7a9d18eeb23de9d70bf34971f33e5a3529a77 Mon Sep 17 00:00:00 2001 From: Zizhong Zhang Date: Tue, 29 Aug 2023 12:22:30 -0700 Subject: [PATCH] feat: PromptGuard takes a list of str (#9948) Recently we made the decision that PromptGuard takes a list of strings instead of a string. @ggroode implemented the integration change. --------- Co-authored-by: ggroode Co-authored-by: ggroode <46691276+ggroode@users.noreply.github.com> --- libs/langchain/langchain/llms/promptguard.py | 4 ++-- libs/langchain/langchain/utilities/promptguard.py | 10 ++++------ 2 files changed, 6 insertions(+), 8 deletions(-) diff --git a/libs/langchain/langchain/llms/promptguard.py b/libs/langchain/langchain/llms/promptguard.py index 9f99ff4019..9dcdfcb6af 100644 --- a/libs/langchain/langchain/llms/promptguard.py +++ b/libs/langchain/langchain/llms/promptguard.py @@ -90,8 +90,8 @@ class PromptGuard(LLM): _run_manager = run_manager or CallbackManagerForLLMRun.get_noop_manager() # sanitize the prompt by replacing the sensitive information with a placeholder - sanitize_response: pg.SanitizeResponse = pg.sanitize(prompt) - sanitized_prompt_value_str = sanitize_response.sanitized_text + sanitize_response: pg.SanitizeResponse = pg.sanitize([prompt]) + sanitized_prompt_value_str = sanitize_response.sanitized_texts[0] # TODO: Add in callbacks once child runs for LLMs are supported by LangSmith. # call the LLM with the sanitized prompt and get the response diff --git a/libs/langchain/langchain/utilities/promptguard.py b/libs/langchain/langchain/utilities/promptguard.py index e89d3b690e..df29cafa4c 100644 --- a/libs/langchain/langchain/utilities/promptguard.py +++ b/libs/langchain/langchain/utilities/promptguard.py @@ -1,4 +1,3 @@ -import json from typing import Dict, Union @@ -41,9 +40,9 @@ def sanitize( if isinstance(input, str): # the input could be a string, so we sanitize the string - sanitize_response: pg.SanitizeResponse = pg.sanitize(input) + sanitize_response: pg.SanitizeResponse = pg.sanitize([input]) return { - "sanitized_input": sanitize_response.sanitized_text, + "sanitized_input": sanitize_response.sanitized_texts[0], "secure_context": sanitize_response.secure_context, } @@ -54,13 +53,12 @@ def sanitize( # get the values from the dict for key in input: values.append(input[key]) - input_value_str = json.dumps(values) # sanitize the values - sanitize_values_response: pg.SanitizeResponse = pg.sanitize(input_value_str) + sanitize_values_response: pg.SanitizeResponse = pg.sanitize(values) # reconstruct the dict with the sanitized values - sanitized_input_values = json.loads(sanitize_values_response.sanitized_text) + sanitized_input_values = sanitize_values_response.sanitized_texts idx = 0 sanitized_input = dict() for key in input: