langchain/libs/community/langchain_community/llms/opaqueprompts.py
Eugene Yurtsev bf5193bb99
community[patch]: Upgrade pydantic extra (#25185)
Upgrade to using a literal for specifying the extra which is the
recommended approach in pydantic 2.

This works correctly also in pydantic v1.

```python
from pydantic.v1 import BaseModel

class Foo(BaseModel, extra="forbid"):
    x: int

Foo(x=5, y=1)
```

And 


```python
from pydantic.v1 import BaseModel

class Foo(BaseModel):
    x: int

    class Config:
      extra = "forbid"

Foo(x=5, y=1)
```


## Enum -> literal using grit pattern:

```
engine marzano(0.1)
language python
or {
    `extra=Extra.allow` => `extra="allow"`,
    `extra=Extra.forbid` => `extra="forbid"`,
    `extra=Extra.ignore` => `extra="ignore"`
}
```

Resorted attributes in config and removed doc-string in case we will
need to deal with going back and forth between pydantic v1 and v2 during
the 0.3 release. (This will reduce merge conflicts.)


## Sort attributes in Config:

```
engine marzano(0.1)
language python


function sort($values) js {
    return $values.text.split(',').sort().join("\n");
}


class_definition($name, $body) as $C where {
    $name <: `Config`,
    $body <: block($statements),
    $values = [],
    $statements <: some bubble($values) assignment() as $A where {
        $values += $A
    },
    $body => sort($values),
}

```
2024-08-08 17:20:39 +00:00

116 lines
3.9 KiB
Python

import logging
from typing import Any, Dict, List, Optional
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models import BaseLanguageModel
from langchain_core.language_models.llms import LLM
from langchain_core.messages import AIMessage
from langchain_core.utils import get_from_dict_or_env, pre_init
logger = logging.getLogger(__name__)
class OpaquePrompts(LLM):
"""LLM that uses OpaquePrompts to sanitize prompts.
Wraps another LLM and sanitizes prompts before passing it to the LLM, then
de-sanitizes the response.
To use, you should have the ``opaqueprompts`` python package installed,
and the environment variable ``OPAQUEPROMPTS_API_KEY`` set with
your API key, or pass it as a named parameter to the constructor.
Example:
.. code-block:: python
from langchain_community.llms import OpaquePrompts
from langchain_community.chat_models import ChatOpenAI
op_llm = OpaquePrompts(base_llm=ChatOpenAI())
"""
base_llm: BaseLanguageModel
"""The base LLM to use."""
class Config:
extra = "forbid"
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
"""Validates that the OpaquePrompts API key and the Python package exist."""
try:
import opaqueprompts as op
except ImportError:
raise ImportError(
"Could not import the `opaqueprompts` Python package, "
"please install it with `pip install opaqueprompts`."
)
if op.__package__ is None:
raise ValueError(
"Could not properly import `opaqueprompts`, "
"opaqueprompts.__package__ is None."
)
api_key = get_from_dict_or_env(
values, "opaqueprompts_api_key", "OPAQUEPROMPTS_API_KEY", default=""
)
if not api_key:
raise ValueError(
"Could not find OPAQUEPROMPTS_API_KEY in the environment. "
"Please set it to your OpaquePrompts API key."
"You can get it by creating an account on the OpaquePrompts website: "
"https://opaqueprompts.opaque.co/ ."
)
return values
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call base LLM with sanitization before and de-sanitization after.
Args:
prompt: The prompt to pass into the model.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = op_llm.invoke("Tell me a joke.")
"""
import opaqueprompts as op
_run_manager = run_manager or CallbackManagerForLLMRun.get_noop_manager()
# sanitize the prompt by replacing the sensitive information with a placeholder
sanitize_response: op.SanitizeResponse = op.sanitize([prompt])
sanitized_prompt_value_str = sanitize_response.sanitized_texts[0]
# TODO: Add in callbacks once child runs for LLMs are supported by LangSmith.
# call the LLM with the sanitized prompt and get the response
llm_response = self.base_llm.bind(stop=stop).invoke(
sanitized_prompt_value_str,
)
if isinstance(llm_response, AIMessage):
llm_response = llm_response.content
# desanitize the response by restoring the original sensitive information
desanitize_response: op.DesanitizeResponse = op.desanitize(
llm_response,
secure_context=sanitize_response.secure_context,
)
return desanitize_response.desanitized_text
@property
def _llm_type(self) -> str:
"""Return type of LLM.
This is an override of the base class method.
"""
return "opaqueprompts"