mirror of
https://github.com/hwchase17/langchain
synced 2024-11-04 06:00:26 +00:00
ed58eeb9c5
Moved the following modules to new package langchain-community in a backwards compatible fashion: ``` mv langchain/langchain/adapters community/langchain_community mv langchain/langchain/callbacks community/langchain_community/callbacks mv langchain/langchain/chat_loaders community/langchain_community mv langchain/langchain/chat_models community/langchain_community mv langchain/langchain/document_loaders community/langchain_community mv langchain/langchain/docstore community/langchain_community mv langchain/langchain/document_transformers community/langchain_community mv langchain/langchain/embeddings community/langchain_community mv langchain/langchain/graphs community/langchain_community mv langchain/langchain/llms community/langchain_community mv langchain/langchain/memory/chat_message_histories community/langchain_community mv langchain/langchain/retrievers community/langchain_community mv langchain/langchain/storage community/langchain_community mv langchain/langchain/tools community/langchain_community mv langchain/langchain/utilities community/langchain_community mv langchain/langchain/vectorstores community/langchain_community mv langchain/langchain/agents/agent_toolkits community/langchain_community mv langchain/langchain/cache.py community/langchain_community mv langchain/langchain/adapters community/langchain_community mv langchain/langchain/callbacks community/langchain_community/callbacks mv langchain/langchain/chat_loaders community/langchain_community mv langchain/langchain/chat_models community/langchain_community mv langchain/langchain/document_loaders community/langchain_community mv langchain/langchain/docstore community/langchain_community mv langchain/langchain/document_transformers community/langchain_community mv langchain/langchain/embeddings community/langchain_community mv langchain/langchain/graphs community/langchain_community mv langchain/langchain/llms community/langchain_community mv langchain/langchain/memory/chat_message_histories community/langchain_community mv langchain/langchain/retrievers community/langchain_community mv langchain/langchain/storage community/langchain_community mv langchain/langchain/tools community/langchain_community mv langchain/langchain/utilities community/langchain_community mv langchain/langchain/vectorstores community/langchain_community mv langchain/langchain/agents/agent_toolkits community/langchain_community mv langchain/langchain/cache.py community/langchain_community ``` Moved the following to core ``` mv langchain/langchain/utils/json_schema.py core/langchain_core/utils mv langchain/langchain/utils/html.py core/langchain_core/utils mv langchain/langchain/utils/strings.py core/langchain_core/utils cat langchain/langchain/utils/env.py >> core/langchain_core/utils/env.py rm langchain/langchain/utils/env.py ``` See .scripts/community_split/script_integrations.sh for all changes
74 lines
2.4 KiB
Python
74 lines
2.4 KiB
Python
from __future__ import annotations
|
|
|
|
import logging
|
|
from typing import Optional
|
|
|
|
from langchain_core.callbacks import CallbackManagerForToolRun
|
|
|
|
from langchain_community.tools.edenai.edenai_base_tool import EdenaiTool
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
class EdenAiTextModerationTool(EdenaiTool):
|
|
"""Tool that queries the Eden AI Explicit text detection.
|
|
|
|
for api reference check edenai documentation:
|
|
https://docs.edenai.co/reference/image_explicit_content_create.
|
|
|
|
To use, you should have
|
|
the environment variable ``EDENAI_API_KEY`` set with your API token.
|
|
You can find your token here: https://app.edenai.run/admin/account/settings
|
|
|
|
"""
|
|
|
|
name = "edenai_explicit_content_detection_text"
|
|
|
|
description = (
|
|
"A wrapper around edenai Services explicit content detection for text. "
|
|
"""Useful for when you have to scan text for offensive,
|
|
sexually explicit or suggestive content,
|
|
it checks also if there is any content of self-harm,
|
|
violence, racist or hate speech."""
|
|
"""the structure of the output is :
|
|
'the type of the explicit content : the likelihood of it being explicit'
|
|
the likelihood is a number
|
|
between 1 and 5, 1 being the lowest and 5 the highest.
|
|
something is explicit if the likelihood is equal or higher than 3.
|
|
for example :
|
|
nsfw_likelihood: 1
|
|
this is not explicit.
|
|
for example :
|
|
nsfw_likelihood: 3
|
|
this is explicit.
|
|
"""
|
|
"Input should be a string."
|
|
)
|
|
|
|
language: str
|
|
|
|
feature: str = "text"
|
|
subfeature: str = "moderation"
|
|
|
|
def _parse_response(self, response: list) -> str:
|
|
formatted_result = []
|
|
for result in response:
|
|
if "nsfw_likelihood" in result.keys():
|
|
formatted_result.append(
|
|
"nsfw_likelihood: " + str(result["nsfw_likelihood"])
|
|
)
|
|
|
|
for label, likelihood in zip(result["label"], result["likelihood"]):
|
|
formatted_result.append(f'"{label}": {str(likelihood)}')
|
|
|
|
return "\n".join(formatted_result)
|
|
|
|
def _run(
|
|
self,
|
|
query: str,
|
|
run_manager: Optional[CallbackManagerForToolRun] = None,
|
|
) -> str:
|
|
"""Use the tool."""
|
|
query_params = {"text": query, "language": self.language}
|
|
return self._call_eden_ai(query_params)
|