mirror of
https://github.com/hwchase17/langchain
synced 2024-10-31 15:20:26 +00:00
d57d08fd01
This PR implements a custom chain that wraps Amazon Comprehend API calls. The custom chain is aimed to be used with LLM chains to provide moderation capability that let’s you detect and redact PII, Toxic and Intent content in the LLM prompt, or the LLM response. The implementation accepts a configuration object to control what checks will be performed on a LLM prompt and can be used in a variety of setups using the LangChain expression language to not only detect the configured info in chains, but also other constructs such as a retriever. The included sample notebook goes over the different configuration options and how to use it with other chains. ### Usage sample ```python from langchain_experimental.comprehend_moderation import BaseModerationActions, BaseModerationFilters moderation_config = { "filters":[ BaseModerationFilters.PII, BaseModerationFilters.TOXICITY, BaseModerationFilters.INTENT ], "pii":{ "action": BaseModerationActions.ALLOW, "threshold":0.5, "labels":["SSN"], "mask_character": "X" }, "toxicity":{ "action": BaseModerationActions.STOP, "threshold":0.5 }, "intent":{ "action": BaseModerationActions.STOP, "threshold":0.5 } } comp_moderation_with_config = AmazonComprehendModerationChain( moderation_config=moderation_config, #specify the configuration client=comprehend_client, #optionally pass the Boto3 Client verbose=True ) template = """Question: {question} Answer:""" prompt = PromptTemplate(template=template, input_variables=["question"]) responses = [ "Final Answer: A credit card number looks like 1289-2321-1123-2387. A fake SSN number looks like 323-22-9980. John Doe's phone number is (999)253-9876.", "Final Answer: This is a really shitty way of constructing a birdhouse. This is fucking insane to think that any birds would actually create their motherfucking nests here." ] llm = FakeListLLM(responses=responses) llm_chain = LLMChain(prompt=prompt, llm=llm) chain = ( prompt | comp_moderation_with_config | {llm_chain.input_keys[0]: lambda x: x['output'] } | llm_chain | { "input": lambda x: x['text'] } | comp_moderation_with_config ) response = chain.invoke({"question": "A sample SSN number looks like this 123-456-7890. Can you give me some more samples?"}) print(response['output']) ``` ### Output ``` > Entering new AmazonComprehendModerationChain chain... Running AmazonComprehendModerationChain... Running pii validation... Found PII content..stopping.. The prompt contains PII entities and cannot be processed ``` --------- Co-authored-by: Piyush Jain <piyushjain@duck.com> Co-authored-by: Anjan Biswas <anjanavb@amazon.com> Co-authored-by: Jha <nikjha@amazon.com> Co-authored-by: Bagatur <baskaryan@gmail.com>
174 lines
6.6 KiB
Python
174 lines
6.6 KiB
Python
import asyncio
|
|
from typing import Any, Dict, Optional
|
|
|
|
from langchain_experimental.comprehend_moderation.base_moderation_exceptions import (
|
|
ModerationPiiError,
|
|
)
|
|
|
|
|
|
class ComprehendPII:
|
|
def __init__(
|
|
self,
|
|
client: Any,
|
|
callback: Optional[Any] = None,
|
|
unique_id: Optional[str] = None,
|
|
chain_id: Optional[str] = None,
|
|
) -> None:
|
|
self.client = client
|
|
self.moderation_beacon = {
|
|
"moderation_chain_id": chain_id,
|
|
"moderation_type": "PII",
|
|
"moderation_status": "LABELS_NOT_FOUND",
|
|
}
|
|
self.callback = callback
|
|
self.unique_id = unique_id
|
|
|
|
def validate(
|
|
self, prompt_value: str, config: Optional[Dict[str, Any]] = None
|
|
) -> str:
|
|
from langchain_experimental.comprehend_moderation.base_moderation_enums import (
|
|
BaseModerationActions,
|
|
)
|
|
|
|
if config:
|
|
action = config.get("action", BaseModerationActions.STOP)
|
|
if action not in [BaseModerationActions.STOP, BaseModerationActions.ALLOW]:
|
|
raise ValueError("Action can either be stop or allow")
|
|
|
|
return (
|
|
self._contains_pii(prompt_value=prompt_value, config=config)
|
|
if action == BaseModerationActions.STOP
|
|
else self._detect_pii(prompt_value=prompt_value, config=config)
|
|
)
|
|
else:
|
|
return self._contains_pii(prompt_value=prompt_value)
|
|
|
|
def _contains_pii(
|
|
self, prompt_value: str, config: Optional[Dict[str, Any]] = None
|
|
) -> str:
|
|
"""
|
|
Checks for Personally Identifiable Information (PII) labels above a
|
|
specified threshold.
|
|
|
|
Args:
|
|
prompt_value (str): The input text to be checked for PII labels.
|
|
config (Dict[str, Any]): Configuration for PII check and actions.
|
|
|
|
Returns:
|
|
str: the original prompt
|
|
|
|
Note:
|
|
- The provided client should be initialized with valid AWS credentials.
|
|
"""
|
|
pii_identified = self.client.contains_pii_entities(
|
|
Text=prompt_value, LanguageCode="en"
|
|
)
|
|
|
|
if self.callback and self.callback.pii_callback:
|
|
self.moderation_beacon["moderation_input"] = prompt_value
|
|
self.moderation_beacon["moderation_output"] = pii_identified
|
|
|
|
threshold = config.get("threshold", 0.5) if config else 0.5
|
|
pii_labels = config.get("labels", []) if config else []
|
|
pii_found = False
|
|
for entity in pii_identified["Labels"]:
|
|
if (entity["Score"] >= threshold and entity["Name"] in pii_labels) or (
|
|
entity["Score"] >= threshold and not pii_labels
|
|
):
|
|
pii_found = True
|
|
break
|
|
|
|
if self.callback and self.callback.pii_callback:
|
|
if pii_found:
|
|
self.moderation_beacon["moderation_status"] = "LABELS_FOUND"
|
|
asyncio.create_task(
|
|
self.callback.on_after_pii(self.moderation_beacon, self.unique_id)
|
|
)
|
|
if pii_found:
|
|
raise ModerationPiiError
|
|
return prompt_value
|
|
|
|
def _detect_pii(self, prompt_value: str, config: Optional[Dict[str, Any]]) -> str:
|
|
"""
|
|
Detects and handles Personally Identifiable Information (PII) entities in the
|
|
given prompt text using Amazon Comprehend's detect_pii_entities API. The
|
|
function provides options to redact or stop processing based on the identified
|
|
PII entities and a provided configuration.
|
|
|
|
Args:
|
|
prompt_value (str): The input text to be checked for PII entities.
|
|
config (Dict[str, Any]): A configuration specifying how to handle
|
|
PII entities.
|
|
|
|
Returns:
|
|
str: The processed prompt text with redacted PII entities or raised
|
|
exceptions.
|
|
|
|
Raises:
|
|
ValueError: If the prompt contains configured PII entities for
|
|
stopping processing.
|
|
|
|
Note:
|
|
- If PII is not found in the prompt, the original prompt is returned.
|
|
- The client should be initialized with valid AWS credentials.
|
|
"""
|
|
pii_identified = self.client.detect_pii_entities(
|
|
Text=prompt_value, LanguageCode="en"
|
|
)
|
|
|
|
if self.callback and self.callback.pii_callback:
|
|
self.moderation_beacon["moderation_input"] = prompt_value
|
|
self.moderation_beacon["moderation_output"] = pii_identified
|
|
|
|
if (pii_identified["Entities"]) == []:
|
|
if self.callback and self.callback.pii_callback:
|
|
asyncio.create_task(
|
|
self.callback.on_after_pii(self.moderation_beacon, self.unique_id)
|
|
)
|
|
return prompt_value
|
|
|
|
pii_found = False
|
|
if not config and pii_identified["Entities"]:
|
|
for entity in pii_identified["Entities"]:
|
|
if entity["Score"] >= 0.5:
|
|
pii_found = True
|
|
break
|
|
|
|
if self.callback and self.callback.pii_callback:
|
|
if pii_found:
|
|
self.moderation_beacon["moderation_status"] = "LABELS_FOUND"
|
|
asyncio.create_task(
|
|
self.callback.on_after_pii(self.moderation_beacon, self.unique_id)
|
|
)
|
|
if pii_found:
|
|
raise ModerationPiiError
|
|
else:
|
|
threshold = config.get("threshold", 0.5) # type: ignore
|
|
pii_labels = config.get("labels", []) # type: ignore
|
|
mask_marker = config.get("mask_character", "*") # type: ignore
|
|
pii_found = False
|
|
|
|
for entity in pii_identified["Entities"]:
|
|
if (
|
|
pii_labels
|
|
and entity["Type"] in pii_labels
|
|
and entity["Score"] >= threshold
|
|
) or (not pii_labels and entity["Score"] >= threshold):
|
|
pii_found = True
|
|
char_offset_begin = entity["BeginOffset"]
|
|
char_offset_end = entity["EndOffset"]
|
|
prompt_value = (
|
|
prompt_value[:char_offset_begin]
|
|
+ mask_marker * (char_offset_end - char_offset_begin)
|
|
+ prompt_value[char_offset_end:]
|
|
)
|
|
|
|
if self.callback and self.callback.pii_callback:
|
|
if pii_found:
|
|
self.moderation_beacon["moderation_status"] = "LABELS_FOUND"
|
|
asyncio.create_task(
|
|
self.callback.on_after_pii(self.moderation_beacon, self.unique_id)
|
|
)
|
|
|
|
return prompt_value
|