diff --git a/docs/modules/models/chat/integrations/anthropic.ipynb b/docs/modules/models/chat/integrations/anthropic.ipynb new file mode 100644 index 00000000..57cbf488 --- /dev/null +++ b/docs/modules/models/chat/integrations/anthropic.ipynb @@ -0,0 +1,171 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "bf733a38-db84-4363-89e2-de6735c37230", + "metadata": {}, + "source": [ + "# Anthropic\n", + "\n", + "This notebook covers how to get started with Anthropic chat models." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "d4a7c55d-b235-4ca4-a579-c90cc9570da9", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "from langchain.chat_models import ChatAnthropic\n", + "from langchain.prompts.chat import (\n", + " ChatPromptTemplate,\n", + " SystemMessagePromptTemplate,\n", + " AIMessagePromptTemplate,\n", + " HumanMessagePromptTemplate,\n", + ")\n", + "from langchain.schema import (\n", + " AIMessage,\n", + " HumanMessage,\n", + " SystemMessage\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "70cf04e8-423a-4ff6-8b09-f11fb711c817", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "chat = ChatAnthropic()" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "8199ef8f-eb8b-4253-9ea0-6c24a013ca4c", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "data": { + "text/plain": [ + "AIMessage(content=\" J'adore programmer.\", additional_kwargs={})" + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "messages = [\n", + " HumanMessage(content=\"Translate this sentence from English to French. I love programming.\")\n", + "]\n", + "chat(messages)" + ] + }, + { + "cell_type": "markdown", + "id": "c361ab1e-8c0c-4206-9e3c-9d1424a12b9c", + "metadata": {}, + "source": [ + "## `ChatAnthropic` also supports async and streaming functionality:" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "93a21c5c-6ef9-4688-be60-b2e1f94842fb", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "from langchain.callbacks.base import CallbackManager\n", + "from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "c5fac0e9-05a4-4fc1-a3b3-e5bbb24b971b", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "data": { + "text/plain": [ + "LLMResult(generations=[[ChatGeneration(text=\" J'aime programmer.\", generation_info=None, message=AIMessage(content=\" J'aime programmer.\", additional_kwargs={}))]], llm_output={})" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "await chat.agenerate([messages])" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "025be980-e50d-4a68-93dc-c9c7b500ce34", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + " J'aime la programmation." + ] + }, + { + "data": { + "text/plain": [ + "AIMessage(content=\" J'aime la programmation.\", additional_kwargs={})" + ] + }, + "execution_count": 8, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "chat = ChatAnthropic(streaming=True, verbose=True, callback_manager=CallbackManager([StreamingStdOutCallbackHandler()]))\n", + "chat(messages)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.9" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/langchain/chat_models/__init__.py b/langchain/chat_models/__init__.py index 88bafc7e..fdfe5e7d 100644 --- a/langchain/chat_models/__init__.py +++ b/langchain/chat_models/__init__.py @@ -1,5 +1,6 @@ +from langchain.chat_models.anthropic import ChatAnthropic from langchain.chat_models.azure_openai import AzureChatOpenAI from langchain.chat_models.openai import ChatOpenAI from langchain.chat_models.promptlayer_openai import PromptLayerChatOpenAI -__all__ = ["ChatOpenAI", "AzureChatOpenAI", "PromptLayerChatOpenAI"] +__all__ = ["ChatOpenAI", "AzureChatOpenAI", "PromptLayerChatOpenAI", "ChatAnthropic"] diff --git a/langchain/chat_models/anthropic.py b/langchain/chat_models/anthropic.py index b63fbf05..f56c6063 100644 --- a/langchain/chat_models/anthropic.py +++ b/langchain/chat_models/anthropic.py @@ -1,4 +1,4 @@ -from typing import List, Optional +from typing import Any, Dict, List, Optional from pydantic import Extra @@ -26,17 +26,7 @@ class ChatAnthropic(BaseChatModel, _AnthropicCommon): .. code-block:: python import anthropic from langchain.llms import Anthropic - model = Anthropic(model="", anthropic_api_key="my-api-key") - - # Simplest invocation, automatically wrapped with HUMAN_PROMPT - # and AI_PROMPT. - response = model("What are the biggest risks facing humanity?") - - # Or if you want to use the chat mode, build a few-shot-prompt, or - # put words in the Assistant's mouth, use HUMAN_PROMPT and AI_PROMPT: - raw_prompt = "What are the biggest risks facing humanity?" - prompt = f"{anthropic.HUMAN_PROMPT} {prompt}{anthropic.AI_PROMPT}" - response = model(prompt) + model = ChatAnthropic(model="", anthropic_api_key="my-api-key") """ class Config: @@ -98,7 +88,9 @@ class ChatAnthropic(BaseChatModel, _AnthropicCommon): self, messages: List[BaseMessage], stop: Optional[List[str]] = None ) -> ChatResult: prompt = self._convert_messages_to_prompt(messages) - params = {"prompt": prompt, "stop_sequences": stop, **self._default_params} + params: Dict[str, Any] = {"prompt": prompt, **self._default_params} + if stop: + params["stop_sequences"] = stop if self.streaming: completion = "" @@ -120,7 +112,9 @@ class ChatAnthropic(BaseChatModel, _AnthropicCommon): self, messages: List[BaseMessage], stop: Optional[List[str]] = None ) -> ChatResult: prompt = self._convert_messages_to_prompt(messages) - params = {"prompt": prompt, "stop_sequences": stop, **self._default_params} + params: Dict[str, Any] = {"prompt": prompt, **self._default_params} + if stop: + params["stop_sequences"] = stop if self.streaming: completion = "" diff --git a/langchain/llms/anthropic.py b/langchain/llms/anthropic.py index 24d9def1..e6096279 100644 --- a/langchain/llms/anthropic.py +++ b/langchain/llms/anthropic.py @@ -10,7 +10,7 @@ from langchain.utils import get_from_dict_or_env class _AnthropicCommon(BaseModel): client: Any = None #: :meta private: - model: str = "claude-latest" + model: str = "claude-v1" """Model name to use.""" max_tokens_to_sample: int = 256 diff --git a/poetry.lock b/poetry.lock index 7a88ee2a..6b0e37aa 100644 --- a/poetry.lock +++ b/poetry.lock @@ -9035,4 +9035,4 @@ qdrant = ["qdrant-client"] [metadata] lock-version = "2.0" python-versions = ">=3.8.1,<4.0" -content-hash = "373f68ef16e7f3d5d9cde8b81c5f261096cc537ddca4f6a36711d7215b63f226" +content-hash = "7e343fa8e31d8fcf1023cbda592f64c05e80015c4e0e23c1d387d2e9671ce995" diff --git a/pyproject.toml b/pyproject.toml index 3cc2d497..351d6e43 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -36,7 +36,7 @@ pinecone-text = {version = "^0.4.2", optional = true} weaviate-client = {version = "^3", optional = true} google-api-python-client = {version = "2.70.0", optional = true} wolframalpha = {version = "5.0.0", optional = true} -anthropic = {version = "^0.2.4", optional = true} +anthropic = {version = "^0.2.6", optional = true} qdrant-client = {version = "^1.1.2", optional = true, python = ">=3.8.1,<3.12"} dataclasses-json = "^0.5.7" tensorflow-text = {version = "^2.11.0", optional = true, python = "^3.10, <3.12"} diff --git a/tests/integration_tests/chat_models/test_anthropic.py b/tests/integration_tests/chat_models/test_anthropic.py index f04b30e2..60fe58f3 100644 --- a/tests/integration_tests/chat_models/test_anthropic.py +++ b/tests/integration_tests/chat_models/test_anthropic.py @@ -17,7 +17,7 @@ from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler def test_anthropic_call() -> None: """Test valid call to anthropic.""" - chat = ChatAnthropic(model="bare-nano-0") + chat = ChatAnthropic(model="test") message = HumanMessage(content="Hello") response = chat([message]) assert isinstance(response, AIMessage) @@ -26,7 +26,7 @@ def test_anthropic_call() -> None: def test_anthropic_streaming() -> None: """Test streaming tokens from anthropic.""" - chat = ChatAnthropic(model="bare-nano-0", streaming=True) + chat = ChatAnthropic(model="test", streaming=True) message = HumanMessage(content="Hello") response = chat([message]) assert isinstance(response, AIMessage) @@ -38,11 +38,12 @@ def test_anthropic_streaming_callback() -> None: callback_handler = FakeCallbackHandler() callback_manager = CallbackManager([callback_handler]) chat = ChatAnthropic( + model="test", streaming=True, callback_manager=callback_manager, verbose=True, ) - message = HumanMessage(content="Write me a sentence with 100 words.") + message = HumanMessage(content="Write me a sentence with 10 words.") chat([message]) assert callback_handler.llm_streams > 1 @@ -53,6 +54,7 @@ async def test_anthropic_async_streaming_callback() -> None: callback_handler = FakeCallbackHandler() callback_manager = CallbackManager([callback_handler]) chat = ChatAnthropic( + model="test", streaming=True, callback_manager=callback_manager, verbose=True,