From 065cde69b162539a6765a86c435113d4e6841e78 Mon Sep 17 00:00:00 2001 From: Erick Friis Date: Tue, 13 Feb 2024 10:01:25 -0800 Subject: [PATCH] google-genai[patch]: release 0.0.9, safety settings docs (#17432) --- .../chat/google_generative_ai.ipynb | 35 +++++++++++++++++-- docs/docs/integrations/llms/google_ai.ipynb | 32 +++++++++++++++-- .../text_embedding/google_generative_ai.ipynb | 2 +- libs/partners/google-genai/pyproject.toml | 8 ++--- 4 files changed, 68 insertions(+), 9 deletions(-) diff --git a/docs/docs/integrations/chat/google_generative_ai.ipynb b/docs/docs/integrations/chat/google_generative_ai.ipynb index c8544163a4..f06b250755 100644 --- a/docs/docs/integrations/chat/google_generative_ai.ipynb +++ b/docs/docs/integrations/chat/google_generative_ai.ipynb @@ -320,20 +320,51 @@ "4. Message may be blocked if they violate the safety checks of the LLM. In this case, the model will return an empty response." ] }, + { + "cell_type": "markdown", + "id": "54793b9e", + "metadata": {}, + "source": [ + "### Safety Settings\n", + "\n", + "Gemini models have default safety settings that can be overridden. If you are receiving lots of \"Safety Warnings\" from your models, you can try tweaking the `safety_settings` attribute of the model. For example, to turn off safety blocking for dangerous content, you can construct your LLM as follows:" + ] + }, { "cell_type": "code", "execution_count": null, "id": "75fdfad6", "metadata": {}, "outputs": [], - "source": [] + "source": [ + "from langchain_google_genai import (\n", + " ChatGoogleGenerativeAI,\n", + " HarmBlockThreshold,\n", + " HarmCategory,\n", + ")\n", + "\n", + "llm = ChatGoogleGenerativeAI(\n", + " model=\"gemini-pro\",\n", + " safety_settings={\n", + " HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: HarmBlockThreshold.BLOCK_NONE,\n", + " },\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "e68e203d", + "metadata": {}, + "source": [ + "For an enumeration of the categories and thresholds available, see Google's [safety setting types](https://ai.google.dev/api/python/google/generativeai/types/SafetySettingDict)." + ] }, { "cell_type": "markdown", "id": "92b5aca5", "metadata": {}, "source": [ - "## Additional Configuraation\n", + "## Additional Configuration\n", "\n", "You can pass the following parameters to ChatGoogleGenerativeAI in order to customize the SDK's behavior:\n", "\n", diff --git a/docs/docs/integrations/llms/google_ai.ipynb b/docs/docs/integrations/llms/google_ai.ipynb index d5bd980980..540cfca998 100644 --- a/docs/docs/integrations/llms/google_ai.ipynb +++ b/docs/docs/integrations/llms/google_ai.ipynb @@ -264,13 +264,41 @@ " sys.stdout.flush()" ] }, + { + "cell_type": "markdown", + "id": "aefe6df7", + "metadata": {}, + "source": [ + "### Safety Settings\n", + "\n", + "Gemini models have default safety settings that can be overridden. If you are receiving lots of \"Safety Warnings\" from your models, you can try tweaking the `safety_settings` attribute of the model. For example, to turn off safety blocking for dangerous content, you can construct your LLM as follows:" + ] + }, { "cell_type": "code", "execution_count": null, - "id": "aefe6df7", + "id": "7e2682e6", "metadata": {}, "outputs": [], - "source": [] + "source": [ + "from langchain_google_genai import GoogleGenerativeAI, HarmBlockThreshold, HarmCategory\n", + "\n", + "llm = GoogleGenerativeAI(\n", + " model=\"gemini-pro\",\n", + " google_api_key=api_key,\n", + " safety_settings={\n", + " HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: HarmBlockThreshold.BLOCK_NONE,\n", + " },\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "e8d0ee0b", + "metadata": {}, + "source": [ + "For an enumeration of the categories and thresholds available, see Google's [safety setting types](https://ai.google.dev/api/python/google/generativeai/types/SafetySettingDict)." + ] } ], "metadata": { diff --git a/docs/docs/integrations/text_embedding/google_generative_ai.ipynb b/docs/docs/integrations/text_embedding/google_generative_ai.ipynb index 7cac7e42b8..7afdf8b34f 100644 --- a/docs/docs/integrations/text_embedding/google_generative_ai.ipynb +++ b/docs/docs/integrations/text_embedding/google_generative_ai.ipynb @@ -200,7 +200,7 @@ "id": "2e7857e5", "metadata": {}, "source": [ - "## Additional Configuraation\n", + "## Additional Configuration\n", "\n", "You can pass the following parameters to ChatGoogleGenerativeAI in order to customize the SDK's behavior:\n", "\n", diff --git a/libs/partners/google-genai/pyproject.toml b/libs/partners/google-genai/pyproject.toml index deee61e7f7..43248562bc 100644 --- a/libs/partners/google-genai/pyproject.toml +++ b/libs/partners/google-genai/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "langchain-google-genai" -version = "0.0.8" +version = "0.0.9" description = "An integration package connecting Google's genai package and LangChain" authors = [] readme = "README.md" @@ -70,9 +70,9 @@ types-google-cloud-ndb = "^2.2.0.1" [tool.ruff.lint] select = [ - "E", # pycodestyle - "F", # pyflakes - "I", # isort + "E", # pycodestyle + "F", # pyflakes + "I", # isort "T201", # print ]