mirror of
https://github.com/hwchase17/langchain
synced 2024-11-06 03:20:49 +00:00
8f14ddefdf
a cheeky wrapper around claude that adds in function calling support (kind of, hence it going in experimental)
288 lines
6.5 KiB
Plaintext
288 lines
6.5 KiB
Plaintext
{
|
|
"cells": [
|
|
{
|
|
"cell_type": "markdown",
|
|
"id": "5125a1e3",
|
|
"metadata": {},
|
|
"source": [
|
|
"# Anthropic Functions\n",
|
|
"\n",
|
|
"This notebook shows how to use an experimental wrapper around Anthropic that gives it the same API as OpenAI Functions."
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 1,
|
|
"id": "378be79b",
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"name": "stderr",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"/Users/harrisonchase/.pyenv/versions/3.9.1/envs/langchain/lib/python3.9/site-packages/deeplake/util/check_latest_version.py:32: UserWarning: A newer version of deeplake (3.6.14) is available. It's recommended that you update to the latest version using `pip install -U deeplake`.\n",
|
|
" warnings.warn(\n"
|
|
]
|
|
}
|
|
],
|
|
"source": [
|
|
"from langchain_experimental.llms.anthropic_functions import AnthropicFunctions"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"id": "65499965",
|
|
"metadata": {},
|
|
"source": [
|
|
"## Initialize Model\n",
|
|
"\n",
|
|
"You can initialize this wrapper the same way you'd initialize ChatAnthropic"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 2,
|
|
"id": "e1d535f6",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"model = AnthropicFunctions(model='claude-2')"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"id": "fcc9eaf4",
|
|
"metadata": {},
|
|
"source": [
|
|
"## Passing in functions\n",
|
|
"\n",
|
|
"You can now pass in functions in a similar way"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 3,
|
|
"id": "0779c320",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"functions=[\n",
|
|
" {\n",
|
|
" \"name\": \"get_current_weather\",\n",
|
|
" \"description\": \"Get the current weather in a given location\",\n",
|
|
" \"parameters\": {\n",
|
|
" \"type\": \"object\",\n",
|
|
" \"properties\": {\n",
|
|
" \"location\": {\n",
|
|
" \"type\": \"string\",\n",
|
|
" \"description\": \"The city and state, e.g. San Francisco, CA\"\n",
|
|
" },\n",
|
|
" \"unit\": {\n",
|
|
" \"type\": \"string\",\n",
|
|
" \"enum\": [\"celsius\", \"fahrenheit\"]\n",
|
|
" }\n",
|
|
" },\n",
|
|
" \"required\": [\"location\"]\n",
|
|
" }\n",
|
|
" }\n",
|
|
" ]"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 5,
|
|
"id": "ad75a933",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"from langchain.schema import HumanMessage"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 6,
|
|
"id": "fc703085",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"response = model.predict_messages(\n",
|
|
" [HumanMessage(content=\"whats the weater in boston?\")], \n",
|
|
" functions=functions\n",
|
|
")"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 7,
|
|
"id": "04d7936a",
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"AIMessage(content=' ', additional_kwargs={'function_call': {'name': 'get_current_weather', 'arguments': '{\"location\": \"Boston, MA\", \"unit\": \"fahrenheit\"}'}}, example=False)"
|
|
]
|
|
},
|
|
"execution_count": 7,
|
|
"metadata": {},
|
|
"output_type": "execute_result"
|
|
}
|
|
],
|
|
"source": [
|
|
"response"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"id": "0072fdba",
|
|
"metadata": {},
|
|
"source": [
|
|
"## Using for extraction\n",
|
|
"\n",
|
|
"You can now use this for extraction."
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 8,
|
|
"id": "7af5c567",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"from langchain.chains import create_extraction_chain\n",
|
|
"schema = {\n",
|
|
" \"properties\": {\n",
|
|
" \"name\": {\"type\": \"string\"},\n",
|
|
" \"height\": {\"type\": \"integer\"},\n",
|
|
" \"hair_color\": {\"type\": \"string\"},\n",
|
|
" },\n",
|
|
" \"required\": [\"name\", \"height\"],\n",
|
|
"}\n",
|
|
"inp = \"\"\"\n",
|
|
"Alex is 5 feet tall. Claudia is 1 feet taller Alex and jumps higher than him. Claudia is a brunette and Alex is blonde.\n",
|
|
" \"\"\""
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 9,
|
|
"id": "bd01082a",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"chain = create_extraction_chain(schema, model)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 10,
|
|
"id": "b5a23e9f",
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"[{'name': 'Alex', 'height': '5', 'hair_color': 'blonde'},\n",
|
|
" {'name': 'Claudia', 'height': '6', 'hair_color': 'brunette'}]"
|
|
]
|
|
},
|
|
"execution_count": 10,
|
|
"metadata": {},
|
|
"output_type": "execute_result"
|
|
}
|
|
],
|
|
"source": [
|
|
"chain.run(inp)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"id": "90ec959e",
|
|
"metadata": {},
|
|
"source": [
|
|
"## Using for tagging\n",
|
|
"\n",
|
|
"You can now use this for tagging"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 11,
|
|
"id": "03c1eb0d",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"from langchain.chains import create_tagging_chain"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 12,
|
|
"id": "581c0ece",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"schema = {\n",
|
|
" \"properties\": {\n",
|
|
" \"sentiment\": {\"type\": \"string\"},\n",
|
|
" \"aggressiveness\": {\"type\": \"integer\"},\n",
|
|
" \"language\": {\"type\": \"string\"},\n",
|
|
" }\n",
|
|
"}"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 14,
|
|
"id": "d9a8570e",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"chain = create_tagging_chain(schema, model)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 15,
|
|
"id": "cf37d679",
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"{'sentiment': 'positive', 'aggressiveness': '0', 'language': 'english'}"
|
|
]
|
|
},
|
|
"execution_count": 15,
|
|
"metadata": {},
|
|
"output_type": "execute_result"
|
|
}
|
|
],
|
|
"source": [
|
|
"chain.run(\"this is really cool\")"
|
|
]
|
|
}
|
|
],
|
|
"metadata": {
|
|
"kernelspec": {
|
|
"display_name": "Python 3 (ipykernel)",
|
|
"language": "python",
|
|
"name": "python3"
|
|
},
|
|
"language_info": {
|
|
"codemirror_mode": {
|
|
"name": "ipython",
|
|
"version": 3
|
|
},
|
|
"file_extension": ".py",
|
|
"mimetype": "text/x-python",
|
|
"name": "python",
|
|
"nbconvert_exporter": "python",
|
|
"pygments_lexer": "ipython3",
|
|
"version": "3.9.1"
|
|
}
|
|
},
|
|
"nbformat": 4,
|
|
"nbformat_minor": 5
|
|
}
|