diff --git a/docs/docs/guides/safety/index.mdx b/docs/docs/guides/safety/index.mdx index b5d047d771..644c2cc4ca 100644 --- a/docs/docs/guides/safety/index.mdx +++ b/docs/docs/guides/safety/index.mdx @@ -5,5 +5,6 @@ One of the key concerns with using LLMs is that they may generate harmful or une - [Amazon Comprehend moderation chain](/docs/guides/safety/amazon_comprehend_chain): Use [Amazon Comprehend](https://aws.amazon.com/comprehend/) to detect and handle Personally Identifiable Information (PII) and toxicity. - [Constitutional chain](/docs/guides/safety/constitutional_chain): Prompt the model with a set of principles which should guide the model behavior. - [Hugging Face prompt injection identification](/docs/guides/safety/hugging_face_prompt_injection): Detect and handle prompt injection attacks. +- [Layerup Security](/docs/guides/safety/layerup_security): Easily mask PII & sensitive data, detect and mitigate 10+ LLM-based threat vectors, including PII & sensitive data, prompt injection, hallucination, abuse, and more. - [Logical Fallacy chain](/docs/guides/safety/logical_fallacy_chain): Checks the model output against logical fallacies to correct any deviation. - [Moderation chain](/docs/guides/safety/moderation): Check if any output text is harmful and flag it. diff --git a/docs/docs/guides/safety/layerup_security.mdx b/docs/docs/guides/safety/layerup_security.mdx new file mode 100644 index 0000000000..6beee53209 --- /dev/null +++ b/docs/docs/guides/safety/layerup_security.mdx @@ -0,0 +1,85 @@ +# Layerup Security + +The [Layerup Security](https://uselayerup.com) integration allows you to secure your calls to any LangChain LLM, LLM chain or LLM agent. The LLM object wraps around any existing LLM object, allowing for a secure layer between your users and your LLMs. + +While the Layerup Security object is designed as an LLM, it is not actually an LLM itself, it simply wraps around an LLM, allowing it to adapt the same functionality as the underlying LLM. + +## Setup +First, you'll need a Layerup Security account from the Layerup [website](https://uselayerup.com). + +Next, create a project via the [dashboard](https://dashboard.uselayerup.com), and copy your API key. We recommend putting your API key in your project's environment. + +Install the Layerup Security SDK: +```bash +pip install LayerupSecurity +``` + +And install LangChain Community: +```bash +pip install langchain-community +``` + +And now you're ready to start protecting your LLM calls with Layerup Security! + +```python +from langchain_community.llms.layerup_security import LayerupSecurity +from langchain_openai import OpenAI + +# Create an instance of your favorite LLM +openai = OpenAI( + model_name="gpt-3.5-turbo", + openai_api_key="OPENAI_API_KEY", +) + +# Configure Layerup Security +layerup_security = LayerupSecurity( + # Specify a LLM that Layerup Security will wrap around + llm=openai, + + # Layerup API key, from the Layerup dashboard + layerup_api_key="LAYERUP_API_KEY", + + # Custom base URL, if self hosting + layerup_api_base_url="https://api.uselayerup.com/v1", + + # List of guardrails to run on prompts before the LLM is invoked + prompt_guardrails=[], + + # List of guardrails to run on responses from the LLM + response_guardrails=["layerup.hallucination"], + + # Whether or not to mask the prompt for PII & sensitive data before it is sent to the LLM + mask=False, + + # Metadata for abuse tracking, customer tracking, and scope tracking. + metadata={"customer": "example@uselayerup.com"}, + + # Handler for guardrail violations on the prompt guardrails + handle_prompt_guardrail_violation=( + lambda violation: { + "role": "assistant", + "content": ( + "There was sensitive data! I cannot respond. " + "Here's a dynamic canned response. Current date: {}" + ).format(datetime.now()) + } + if violation["offending_guardrail"] == "layerup.sensitive_data" + else None + ), + + # Handler for guardrail violations on the response guardrails + handle_response_guardrail_violation=( + lambda violation: { + "role": "assistant", + "content": ( + "Custom canned response with dynamic data! " + "The violation rule was {}." + ).format(violation["offending_guardrail"]) + } + ), +) + +response = layerup_security.invoke( + "Summarize this message: my name is Bob Dylan. My SSN is 123-45-6789." +) +``` \ No newline at end of file diff --git a/docs/docs/integrations/llms/layerup_security.mdx b/docs/docs/integrations/llms/layerup_security.mdx new file mode 100644 index 0000000000..6beee53209 --- /dev/null +++ b/docs/docs/integrations/llms/layerup_security.mdx @@ -0,0 +1,85 @@ +# Layerup Security + +The [Layerup Security](https://uselayerup.com) integration allows you to secure your calls to any LangChain LLM, LLM chain or LLM agent. The LLM object wraps around any existing LLM object, allowing for a secure layer between your users and your LLMs. + +While the Layerup Security object is designed as an LLM, it is not actually an LLM itself, it simply wraps around an LLM, allowing it to adapt the same functionality as the underlying LLM. + +## Setup +First, you'll need a Layerup Security account from the Layerup [website](https://uselayerup.com). + +Next, create a project via the [dashboard](https://dashboard.uselayerup.com), and copy your API key. We recommend putting your API key in your project's environment. + +Install the Layerup Security SDK: +```bash +pip install LayerupSecurity +``` + +And install LangChain Community: +```bash +pip install langchain-community +``` + +And now you're ready to start protecting your LLM calls with Layerup Security! + +```python +from langchain_community.llms.layerup_security import LayerupSecurity +from langchain_openai import OpenAI + +# Create an instance of your favorite LLM +openai = OpenAI( + model_name="gpt-3.5-turbo", + openai_api_key="OPENAI_API_KEY", +) + +# Configure Layerup Security +layerup_security = LayerupSecurity( + # Specify a LLM that Layerup Security will wrap around + llm=openai, + + # Layerup API key, from the Layerup dashboard + layerup_api_key="LAYERUP_API_KEY", + + # Custom base URL, if self hosting + layerup_api_base_url="https://api.uselayerup.com/v1", + + # List of guardrails to run on prompts before the LLM is invoked + prompt_guardrails=[], + + # List of guardrails to run on responses from the LLM + response_guardrails=["layerup.hallucination"], + + # Whether or not to mask the prompt for PII & sensitive data before it is sent to the LLM + mask=False, + + # Metadata for abuse tracking, customer tracking, and scope tracking. + metadata={"customer": "example@uselayerup.com"}, + + # Handler for guardrail violations on the prompt guardrails + handle_prompt_guardrail_violation=( + lambda violation: { + "role": "assistant", + "content": ( + "There was sensitive data! I cannot respond. " + "Here's a dynamic canned response. Current date: {}" + ).format(datetime.now()) + } + if violation["offending_guardrail"] == "layerup.sensitive_data" + else None + ), + + # Handler for guardrail violations on the response guardrails + handle_response_guardrail_violation=( + lambda violation: { + "role": "assistant", + "content": ( + "Custom canned response with dynamic data! " + "The violation rule was {}." + ).format(violation["offending_guardrail"]) + } + ), +) + +response = layerup_security.invoke( + "Summarize this message: my name is Bob Dylan. My SSN is 123-45-6789." +) +``` \ No newline at end of file diff --git a/libs/community/langchain_community/llms/layerup_security.py b/libs/community/langchain_community/llms/layerup_security.py new file mode 100644 index 0000000000..6faf14e1e3 --- /dev/null +++ b/libs/community/langchain_community/llms/layerup_security.py @@ -0,0 +1,96 @@ +import logging +from typing import Any, Callable, Dict, List, Optional + +from langchain_core.callbacks import CallbackManagerForLLMRun +from langchain_core.language_models.llms import LLM +from langchain_core.pydantic_v1 import root_validator + +logger = logging.getLogger(__name__) + + +def default_guardrail_violation_handler(violation: dict) -> str: + if violation.get("canned_response"): + return violation["canned_response"] + guardrail_name = ( + f"Guardrail {violation.get('offending_guardrail')}" + if violation.get("offending_guardrail") + else "A guardrail" + ) + raise ValueError( + f"{guardrail_name} was violated without a proper guardrail violation handler." + ) + + +class LayerupSecurity(LLM): + llm: LLM + layerup_api_key: str + layerup_api_base_url: str = "https://api.uselayerup.com/v1" + prompt_guardrails: Optional[List[str]] = [] + response_guardrails: Optional[List[str]] = [] + mask: bool = False + metadata: Optional[Dict[str, Any]] = {} + handle_prompt_guardrail_violation: Callable[ + [dict], str + ] = default_guardrail_violation_handler + handle_response_guardrail_violation: Callable[ + [dict], str + ] = default_guardrail_violation_handler + client: Any #: :meta private: + + @root_validator(pre=True) + def validate_layerup_sdk(cls, values: Dict[str, Any]) -> Dict[str, Any]: + try: + from layerup_security import LayerupSecurity as LayerupSecuritySDK + + values["client"] = LayerupSecuritySDK( + api_key=values["layerup_api_key"], + base_url=values["layerup_api_base_url"], + ) + except ImportError: + raise ImportError( + "Could not import LayerupSecurity SDK. " + "Please install it with `pip install LayerupSecurity`." + ) + return values + + @property + def _llm_type(self) -> str: + return "layerup_security" + + def _call( + self, + prompt: str, + stop: Optional[List[str]] = None, + run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, + ) -> str: + messages = [{"role": "user", "content": prompt}] + unmask_response = None + + if self.mask: + messages, unmask_response = self.client.mask_prompt(messages, self.metadata) + + if self.prompt_guardrails: + security_response = self.client.execute_guardrails( + self.prompt_guardrails, messages, self.metadata + ) + if not security_response["all_safe"]: + return self.handle_prompt_guardrail_violation(security_response) + + result = self.llm._call( + messages[0]["content"], run_manager=run_manager, **kwargs + ) + + if self.mask and unmask_response: + result = unmask_response(result) + + messages.append({"role": "assistant", "content": result}) + + if self.response_guardrails: + security_response = self.client.execute_guardrails( + self.response_guardrails, messages, self.metadata + ) + if not security_response["all_safe"]: + return self.handle_response_guardrail_violation(security_response) + + return result diff --git a/libs/community/tests/integration_tests/llms/test_layerup_security.py b/libs/community/tests/integration_tests/llms/test_layerup_security.py new file mode 100644 index 0000000000..91176a13a8 --- /dev/null +++ b/libs/community/tests/integration_tests/llms/test_layerup_security.py @@ -0,0 +1,44 @@ +from typing import Any, List, Optional + +import pytest +from langchain_core.callbacks import CallbackManagerForLLMRun +from langchain_core.language_models.llms import LLM + +from langchain_community.llms.layerup_security import LayerupSecurity + + +class MockLLM(LLM): + @property + def _llm_type(self) -> str: + return "mock_llm" + + def _call( + self, + prompt: str, + stop: Optional[List[str]] = None, + run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, + ) -> str: + return "Hi Bob! How are you?" + + +def test_layerup_security_with_invalid_api_key() -> None: + mock_llm = MockLLM() + layerup_security = LayerupSecurity( + llm=mock_llm, + layerup_api_key="-- invalid API key --", + layerup_api_base_url="https://api.uselayerup.com/v1", + prompt_guardrails=[], + response_guardrails=["layerup.hallucination"], + mask=False, + metadata={"customer": "example@uselayerup.com"}, + handle_response_guardrail_violation=( + lambda violation: ( + "Custom canned response with dynamic data! " + "The violation rule was {offending_guardrail}." + ).format(offending_guardrail=violation["offending_guardrail"]) + ), + ) + + with pytest.raises(Exception): + layerup_security.invoke("My name is Bob Dylan. My SSN is 123-45-6789.")