langchain/libs/experimental/langchain_experimental/comprehend_moderation/base_moderation_exceptions.py
Leonid Ganeline 3f6bf852ea
experimental: docstrings update (#18048)
Added missed docstrings. Formatted docsctrings to the consistent format.
2024-02-23 21:24:16 -05:00

42 lines
1.0 KiB
Python

class ModerationPiiError(Exception):
"""Exception raised if PII entities are detected.
Attributes:
message -- explanation of the error
"""
def __init__(
self, message: str = "The prompt contains PII entities and cannot be processed"
):
self.message = message
super().__init__(self.message)
class ModerationToxicityError(Exception):
"""Exception raised if Toxic entities are detected.
Attributes:
message -- explanation of the error
"""
def __init__(
self, message: str = "The prompt contains toxic content and cannot be processed"
):
self.message = message
super().__init__(self.message)
class ModerationPromptSafetyError(Exception):
"""Exception raised if Unsafe prompts are detected.
Attributes:
message -- explanation of the error
"""
def __init__(
self,
message: str = ("The prompt is unsafe and cannot be processed"),
):
self.message = message
super().__init__(self.message)