mirror of
https://github.com/hwchase17/langchain
synced 2024-10-29 17:07:25 +00:00
324 lines
6.7 KiB
Plaintext
324 lines
6.7 KiB
Plaintext
|
{
|
||
|
"cells": [
|
||
|
{
|
||
|
"cell_type": "markdown",
|
||
|
"id": "700a516b",
|
||
|
"metadata": {},
|
||
|
"source": [
|
||
|
"# OpenAI Adapter\n",
|
||
|
"\n",
|
||
|
"A lot of people get started with OpenAI but want to explore other models. LangChain's integrations with many model providers make this easy to do so. While LangChain has it's own message and model APIs, we've also made it as easy as possible to explore other models by exposing an adapter to adapt LangChain models to the OpenAI api.\n",
|
||
|
"\n",
|
||
|
"At the moment this only deals with output and does not return other information (token counts, stop reasons, etc)."
|
||
|
]
|
||
|
},
|
||
|
{
|
||
|
"cell_type": "code",
|
||
|
"execution_count": 1,
|
||
|
"id": "6017f26a",
|
||
|
"metadata": {},
|
||
|
"outputs": [],
|
||
|
"source": [
|
||
|
"import openai\n",
|
||
|
"from langchain.adapters import openai as lc_openai"
|
||
|
]
|
||
|
},
|
||
|
{
|
||
|
"cell_type": "markdown",
|
||
|
"id": "b522ceda",
|
||
|
"metadata": {},
|
||
|
"source": [
|
||
|
"## ChatCompletion.create"
|
||
|
]
|
||
|
},
|
||
|
{
|
||
|
"cell_type": "code",
|
||
|
"execution_count": 29,
|
||
|
"id": "1d22eb61",
|
||
|
"metadata": {},
|
||
|
"outputs": [],
|
||
|
"source": [
|
||
|
"messages = [{\"role\": \"user\", \"content\": \"hi\"}]"
|
||
|
]
|
||
|
},
|
||
|
{
|
||
|
"cell_type": "markdown",
|
||
|
"id": "d550d3ad",
|
||
|
"metadata": {},
|
||
|
"source": [
|
||
|
"Original OpenAI call"
|
||
|
]
|
||
|
},
|
||
|
{
|
||
|
"cell_type": "code",
|
||
|
"execution_count": 14,
|
||
|
"id": "e1d27dfa",
|
||
|
"metadata": {},
|
||
|
"outputs": [],
|
||
|
"source": [
|
||
|
"result = openai.ChatCompletion.create(\n",
|
||
|
" messages=messages, \n",
|
||
|
" model=\"gpt-3.5-turbo\", \n",
|
||
|
" temperature=0\n",
|
||
|
")"
|
||
|
]
|
||
|
},
|
||
|
{
|
||
|
"cell_type": "code",
|
||
|
"execution_count": 15,
|
||
|
"id": "012d81ae",
|
||
|
"metadata": {},
|
||
|
"outputs": [
|
||
|
{
|
||
|
"data": {
|
||
|
"text/plain": [
|
||
|
"{'role': 'assistant', 'content': 'Hello! How can I assist you today?'}"
|
||
|
]
|
||
|
},
|
||
|
"execution_count": 15,
|
||
|
"metadata": {},
|
||
|
"output_type": "execute_result"
|
||
|
}
|
||
|
],
|
||
|
"source": [
|
||
|
"result[\"choices\"][0]['message'].to_dict_recursive()"
|
||
|
]
|
||
|
},
|
||
|
{
|
||
|
"cell_type": "markdown",
|
||
|
"id": "db5b5500",
|
||
|
"metadata": {},
|
||
|
"source": [
|
||
|
"LangChain OpenAI wrapper call"
|
||
|
]
|
||
|
},
|
||
|
{
|
||
|
"cell_type": "code",
|
||
|
"execution_count": 16,
|
||
|
"id": "87c2d515",
|
||
|
"metadata": {},
|
||
|
"outputs": [],
|
||
|
"source": [
|
||
|
"lc_result = lc_openai.ChatCompletion.create(\n",
|
||
|
" messages=messages, \n",
|
||
|
" model=\"gpt-3.5-turbo\", \n",
|
||
|
" temperature=0\n",
|
||
|
")"
|
||
|
]
|
||
|
},
|
||
|
{
|
||
|
"cell_type": "code",
|
||
|
"execution_count": 17,
|
||
|
"id": "c67a5ac8",
|
||
|
"metadata": {},
|
||
|
"outputs": [
|
||
|
{
|
||
|
"data": {
|
||
|
"text/plain": [
|
||
|
"{'role': 'assistant', 'content': 'Hello! How can I assist you today?'}"
|
||
|
]
|
||
|
},
|
||
|
"execution_count": 17,
|
||
|
"metadata": {},
|
||
|
"output_type": "execute_result"
|
||
|
}
|
||
|
],
|
||
|
"source": [
|
||
|
"lc_result[\"choices\"][0]['message']"
|
||
|
]
|
||
|
},
|
||
|
{
|
||
|
"cell_type": "markdown",
|
||
|
"id": "034ba845",
|
||
|
"metadata": {},
|
||
|
"source": [
|
||
|
"Swapping out model providers"
|
||
|
]
|
||
|
},
|
||
|
{
|
||
|
"cell_type": "code",
|
||
|
"execution_count": 18,
|
||
|
"id": "7a2c011c",
|
||
|
"metadata": {},
|
||
|
"outputs": [],
|
||
|
"source": [
|
||
|
"lc_result = lc_openai.ChatCompletion.create(\n",
|
||
|
" messages=messages, \n",
|
||
|
" model=\"claude-2\", \n",
|
||
|
" temperature=0, \n",
|
||
|
" provider=\"ChatAnthropic\"\n",
|
||
|
")"
|
||
|
]
|
||
|
},
|
||
|
{
|
||
|
"cell_type": "code",
|
||
|
"execution_count": 19,
|
||
|
"id": "f7c94827",
|
||
|
"metadata": {},
|
||
|
"outputs": [
|
||
|
{
|
||
|
"data": {
|
||
|
"text/plain": [
|
||
|
"{'role': 'assistant', 'content': ' Hello!'}"
|
||
|
]
|
||
|
},
|
||
|
"execution_count": 19,
|
||
|
"metadata": {},
|
||
|
"output_type": "execute_result"
|
||
|
}
|
||
|
],
|
||
|
"source": [
|
||
|
"lc_result[\"choices\"][0]['message']"
|
||
|
]
|
||
|
},
|
||
|
{
|
||
|
"cell_type": "markdown",
|
||
|
"id": "cb3f181d",
|
||
|
"metadata": {},
|
||
|
"source": [
|
||
|
"## ChatCompletion.stream"
|
||
|
]
|
||
|
},
|
||
|
{
|
||
|
"cell_type": "markdown",
|
||
|
"id": "f7b8cd18",
|
||
|
"metadata": {},
|
||
|
"source": [
|
||
|
"Original OpenAI call"
|
||
|
]
|
||
|
},
|
||
|
{
|
||
|
"cell_type": "code",
|
||
|
"execution_count": 24,
|
||
|
"id": "fd8cb1ea",
|
||
|
"metadata": {},
|
||
|
"outputs": [
|
||
|
{
|
||
|
"name": "stdout",
|
||
|
"output_type": "stream",
|
||
|
"text": [
|
||
|
"{'role': 'assistant', 'content': ''}\n",
|
||
|
"{'content': 'Hello'}\n",
|
||
|
"{'content': '!'}\n",
|
||
|
"{'content': ' How'}\n",
|
||
|
"{'content': ' can'}\n",
|
||
|
"{'content': ' I'}\n",
|
||
|
"{'content': ' assist'}\n",
|
||
|
"{'content': ' you'}\n",
|
||
|
"{'content': ' today'}\n",
|
||
|
"{'content': '?'}\n",
|
||
|
"{}\n"
|
||
|
]
|
||
|
}
|
||
|
],
|
||
|
"source": [
|
||
|
"for c in openai.ChatCompletion.create(\n",
|
||
|
" messages = messages,\n",
|
||
|
" model=\"gpt-3.5-turbo\", \n",
|
||
|
" temperature=0,\n",
|
||
|
" stream=True\n",
|
||
|
"):\n",
|
||
|
" print(c[\"choices\"][0]['delta'].to_dict_recursive())"
|
||
|
]
|
||
|
},
|
||
|
{
|
||
|
"cell_type": "markdown",
|
||
|
"id": "0b2a076b",
|
||
|
"metadata": {},
|
||
|
"source": [
|
||
|
"LangChain OpenAI wrapper call"
|
||
|
]
|
||
|
},
|
||
|
{
|
||
|
"cell_type": "code",
|
||
|
"execution_count": 30,
|
||
|
"id": "9521218c",
|
||
|
"metadata": {},
|
||
|
"outputs": [
|
||
|
{
|
||
|
"name": "stdout",
|
||
|
"output_type": "stream",
|
||
|
"text": [
|
||
|
"{'role': 'assistant', 'content': ''}\n",
|
||
|
"{'content': 'Hello'}\n",
|
||
|
"{'content': '!'}\n",
|
||
|
"{'content': ' How'}\n",
|
||
|
"{'content': ' can'}\n",
|
||
|
"{'content': ' I'}\n",
|
||
|
"{'content': ' assist'}\n",
|
||
|
"{'content': ' you'}\n",
|
||
|
"{'content': ' today'}\n",
|
||
|
"{'content': '?'}\n",
|
||
|
"{}\n"
|
||
|
]
|
||
|
}
|
||
|
],
|
||
|
"source": [
|
||
|
"for c in lc_openai.ChatCompletion.create(\n",
|
||
|
" messages = messages,\n",
|
||
|
" model=\"gpt-3.5-turbo\", \n",
|
||
|
" temperature=0,\n",
|
||
|
" stream=True\n",
|
||
|
"):\n",
|
||
|
" print(c[\"choices\"][0]['delta'])"
|
||
|
]
|
||
|
},
|
||
|
{
|
||
|
"cell_type": "markdown",
|
||
|
"id": "0fc39750",
|
||
|
"metadata": {},
|
||
|
"source": [
|
||
|
"Swapping out model providers"
|
||
|
]
|
||
|
},
|
||
|
{
|
||
|
"cell_type": "code",
|
||
|
"execution_count": 31,
|
||
|
"id": "68f0214e",
|
||
|
"metadata": {},
|
||
|
"outputs": [
|
||
|
{
|
||
|
"name": "stdout",
|
||
|
"output_type": "stream",
|
||
|
"text": [
|
||
|
"{'role': 'assistant', 'content': ' Hello'}\n",
|
||
|
"{'content': '!'}\n",
|
||
|
"{}\n"
|
||
|
]
|
||
|
}
|
||
|
],
|
||
|
"source": [
|
||
|
"for c in lc_openai.ChatCompletion.create(\n",
|
||
|
" messages = messages,\n",
|
||
|
" model=\"claude-2\", \n",
|
||
|
" temperature=0,\n",
|
||
|
" stream=True,\n",
|
||
|
" provider=\"ChatAnthropic\",\n",
|
||
|
"):\n",
|
||
|
" print(c[\"choices\"][0]['delta'])"
|
||
|
]
|
||
|
}
|
||
|
],
|
||
|
"metadata": {
|
||
|
"kernelspec": {
|
||
|
"display_name": "Python 3 (ipykernel)",
|
||
|
"language": "python",
|
||
|
"name": "python3"
|
||
|
},
|
||
|
"language_info": {
|
||
|
"codemirror_mode": {
|
||
|
"name": "ipython",
|
||
|
"version": 3
|
||
|
},
|
||
|
"file_extension": ".py",
|
||
|
"mimetype": "text/x-python",
|
||
|
"name": "python",
|
||
|
"nbconvert_exporter": "python",
|
||
|
"pygments_lexer": "ipython3",
|
||
|
"version": "3.10.1"
|
||
|
}
|
||
|
},
|
||
|
"nbformat": 4,
|
||
|
"nbformat_minor": 5
|
||
|
}
|