diff --git a/libs/experimental/langchain_experimental/sql/base.py b/libs/experimental/langchain_experimental/sql/base.py index fcc664a75d..5773e45561 100644 --- a/libs/experimental/langchain_experimental/sql/base.py +++ b/libs/experimental/langchain_experimental/sql/base.py @@ -29,6 +29,15 @@ class SQLDatabaseChain(Chain): from langchain import OpenAI, SQLDatabase db = SQLDatabase(...) db_chain = SQLDatabaseChain.from_llm(OpenAI(), db) + + *Security note*: Make sure that the database connection uses credentials + that are narrowly-scoped to only include the permissions this chain needs. + Failure to do so may result in data corruption or loss, since this chain may + attempt commands like `DROP TABLE` or `INSERT` if appropriately prompted. + The best way to guard against such negative outcomes is to (as appropriate) + limit the permissions granted to the credentials used with this chain. + This issue shows an example negative outcome if these steps are not taken: + https://github.com/langchain-ai/langchain/issues/5923 """ llm_chain: LLMChain @@ -49,7 +58,7 @@ class SQLDatabaseChain(Chain): return_direct: bool = False """Whether or not to return the result of querying the SQL table directly.""" use_query_checker: bool = False - """Whether or not the query checker tool should be used to attempt + """Whether or not the query checker tool should be used to attempt to fix the initial SQL from the LLM.""" query_checker_prompt: Optional[BasePromptTemplate] = None """The prompt template that should be used by the query checker""" @@ -197,6 +206,17 @@ class SQLDatabaseChain(Chain): prompt: Optional[BasePromptTemplate] = None, **kwargs: Any, ) -> SQLDatabaseChain: + """Create a SQLDatabaseChain from an LLM and a database connection. + + *Security note*: Make sure that the database connection uses credentials + that are narrowly-scoped to only include the permissions this chain needs. + Failure to do so may result in data corruption or loss, since this chain may + attempt commands like `DROP TABLE` or `INSERT` if appropriately prompted. + The best way to guard against such negative outcomes is to (as appropriate) + limit the permissions granted to the credentials used with this chain. + This issue shows an example negative outcome if these steps are not taken: + https://github.com/langchain-ai/langchain/issues/5923 + """ prompt = prompt or SQL_PROMPTS.get(db.dialect, PROMPT) llm_chain = LLMChain(llm=llm, prompt=prompt) return cls(llm_chain=llm_chain, database=db, **kwargs)