mirror of
https://github.com/hwchase17/langchain
synced 2024-11-10 01:10:59 +00:00
a0c2281540
```python """python scripts/update_mypy_ruff.py""" import glob import tomllib from pathlib import Path import toml import subprocess import re ROOT_DIR = Path(__file__).parents[1] def main(): for path in glob.glob(str(ROOT_DIR / "libs/**/pyproject.toml"), recursive=True): print(path) with open(path, "rb") as f: pyproject = tomllib.load(f) try: pyproject["tool"]["poetry"]["group"]["typing"]["dependencies"]["mypy"] = ( "^1.10" ) pyproject["tool"]["poetry"]["group"]["lint"]["dependencies"]["ruff"] = ( "^0.5" ) except KeyError: continue with open(path, "w") as f: toml.dump(pyproject, f) cwd = "/".join(path.split("/")[:-1]) completed = subprocess.run( "poetry lock --no-update; poetry install --with typing; poetry run mypy . --no-color", cwd=cwd, shell=True, capture_output=True, text=True, ) logs = completed.stdout.split("\n") to_ignore = {} for l in logs: if re.match("^(.*)\:(\d+)\: error:.*\[(.*)\]", l): path, line_no, error_type = re.match( "^(.*)\:(\d+)\: error:.*\[(.*)\]", l ).groups() if (path, line_no) in to_ignore: to_ignore[(path, line_no)].append(error_type) else: to_ignore[(path, line_no)] = [error_type] print(len(to_ignore)) for (error_path, line_no), error_types in to_ignore.items(): all_errors = ", ".join(error_types) full_path = f"{cwd}/{error_path}" try: with open(full_path, "r") as f: file_lines = f.readlines() except FileNotFoundError: continue file_lines[int(line_no) - 1] = ( file_lines[int(line_no) - 1][:-1] + f" # type: ignore[{all_errors}]\n" ) with open(full_path, "w") as f: f.write("".join(file_lines)) subprocess.run( "poetry run ruff format .; poetry run ruff --select I --fix .", cwd=cwd, shell=True, capture_output=True, text=True, ) if __name__ == "__main__": main() ```
107 lines
3.4 KiB
Python
107 lines
3.4 KiB
Python
import logging
|
|
from typing import Any, Callable, Dict, List, Optional
|
|
|
|
from langchain_core.callbacks import CallbackManagerForLLMRun
|
|
from langchain_core.language_models.llms import LLM
|
|
from langchain_core.pydantic_v1 import root_validator
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
def default_guardrail_violation_handler(violation: dict) -> str:
|
|
"""Default guardrail violation handler.
|
|
|
|
Args:
|
|
violation (dict): The violation dictionary.
|
|
|
|
Returns:
|
|
str: The canned response.
|
|
"""
|
|
if violation.get("canned_response"):
|
|
return violation["canned_response"]
|
|
guardrail_name = (
|
|
f"Guardrail {violation.get('offending_guardrail')}"
|
|
if violation.get("offending_guardrail")
|
|
else "A guardrail"
|
|
)
|
|
raise ValueError(
|
|
f"{guardrail_name} was violated without a proper guardrail violation handler."
|
|
)
|
|
|
|
|
|
class LayerupSecurity(LLM):
|
|
"""Layerup Security LLM service."""
|
|
|
|
llm: LLM
|
|
layerup_api_key: str
|
|
layerup_api_base_url: str = "https://api.uselayerup.com/v1"
|
|
prompt_guardrails: Optional[List[str]] = []
|
|
response_guardrails: Optional[List[str]] = []
|
|
mask: bool = False
|
|
metadata: Optional[Dict[str, Any]] = {}
|
|
handle_prompt_guardrail_violation: Callable[[dict], str] = (
|
|
default_guardrail_violation_handler
|
|
)
|
|
handle_response_guardrail_violation: Callable[[dict], str] = (
|
|
default_guardrail_violation_handler
|
|
)
|
|
client: Any #: :meta private:
|
|
|
|
@root_validator(pre=True)
|
|
def validate_layerup_sdk(cls, values: Dict[str, Any]) -> Dict[str, Any]:
|
|
try:
|
|
from layerup_security import LayerupSecurity as LayerupSecuritySDK
|
|
|
|
values["client"] = LayerupSecuritySDK(
|
|
api_key=values["layerup_api_key"],
|
|
base_url=values["layerup_api_base_url"],
|
|
)
|
|
except ImportError:
|
|
raise ImportError(
|
|
"Could not import LayerupSecurity SDK. "
|
|
"Please install it with `pip install LayerupSecurity`."
|
|
)
|
|
return values
|
|
|
|
@property
|
|
def _llm_type(self) -> str:
|
|
return "layerup_security"
|
|
|
|
def _call(
|
|
self,
|
|
prompt: str,
|
|
stop: Optional[List[str]] = None,
|
|
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
|
**kwargs: Any,
|
|
) -> str:
|
|
messages = [{"role": "user", "content": prompt}]
|
|
unmask_response = None
|
|
|
|
if self.mask:
|
|
messages, unmask_response = self.client.mask_prompt(messages, self.metadata)
|
|
|
|
if self.prompt_guardrails:
|
|
security_response = self.client.execute_guardrails(
|
|
self.prompt_guardrails, messages, prompt, self.metadata
|
|
)
|
|
if not security_response["all_safe"]:
|
|
return self.handle_prompt_guardrail_violation(security_response)
|
|
|
|
result = self.llm._call(
|
|
messages[0]["content"], run_manager=run_manager, **kwargs
|
|
)
|
|
|
|
if self.mask and unmask_response:
|
|
result = unmask_response(result)
|
|
|
|
messages.append({"role": "assistant", "content": result})
|
|
|
|
if self.response_guardrails:
|
|
security_response = self.client.execute_guardrails(
|
|
self.response_guardrails, messages, result, self.metadata
|
|
)
|
|
if not security_response["all_safe"]:
|
|
return self.handle_response_guardrail_violation(security_response)
|
|
|
|
return result
|