mirror of
https://github.com/hwchase17/langchain
synced 2024-10-31 15:20:26 +00:00
44 lines
1.1 KiB
Python
44 lines
1.1 KiB
Python
|
class ModerationPiiError(Exception):
|
||
|
"""Exception raised if PII entities are detected.
|
||
|
|
||
|
Attributes:
|
||
|
message -- explanation of the error
|
||
|
"""
|
||
|
|
||
|
def __init__(
|
||
|
self, message: str = "The prompt contains PII entities and cannot be processed"
|
||
|
):
|
||
|
self.message = message
|
||
|
super().__init__(self.message)
|
||
|
|
||
|
|
||
|
class ModerationToxicityError(Exception):
|
||
|
"""Exception raised if Toxic entities are detected.
|
||
|
|
||
|
Attributes:
|
||
|
message -- explanation of the error
|
||
|
"""
|
||
|
|
||
|
def __init__(
|
||
|
self, message: str = "The prompt contains toxic content and cannot be processed"
|
||
|
):
|
||
|
self.message = message
|
||
|
super().__init__(self.message)
|
||
|
|
||
|
|
||
|
class ModerationIntentionError(Exception):
|
||
|
"""Exception raised if Intention entities are detected.
|
||
|
|
||
|
Attributes:
|
||
|
message -- explanation of the error
|
||
|
"""
|
||
|
|
||
|
def __init__(
|
||
|
self,
|
||
|
message: str = (
|
||
|
"The prompt indicates an un-desired intent and " "cannot be processed"
|
||
|
),
|
||
|
):
|
||
|
self.message = message
|
||
|
super().__init__(self.message)
|