forked from Archives/langchain
184 lines
4.8 KiB
Plaintext
184 lines
4.8 KiB
Plaintext
{
|
|
"cells": [
|
|
{
|
|
"cell_type": "markdown",
|
|
"id": "e5715368",
|
|
"metadata": {},
|
|
"source": [
|
|
"# How to track token usage\n",
|
|
"\n",
|
|
"This notebook goes over how to track your token usage for specific calls. It is currently only implemented for the OpenAI API.\n",
|
|
"\n",
|
|
"Let's first look at an extremely simple example of tracking token usage for a single LLM call."
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 1,
|
|
"id": "9455db35",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"from langchain.llms import OpenAI\n",
|
|
"from langchain.callbacks import get_openai_callback"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 3,
|
|
"id": "d1c55cc9",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"llm = OpenAI(model_name=\"text-davinci-002\", n=2, best_of=2)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 5,
|
|
"id": "31667d54",
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"name": "stdout",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"Total Tokens: 42\n",
|
|
"Prompt Tokens: 4\n",
|
|
"Completion Tokens: 38\n"
|
|
]
|
|
}
|
|
],
|
|
"source": [
|
|
"with get_openai_callback() as cb:\n",
|
|
" result = llm(\"Tell me a joke\")\n",
|
|
" print(f\"Total Tokens: {cb.total_tokens}\")\n",
|
|
" print(f\"Prompt Tokens: {cb.prompt_tokens}\")\n",
|
|
" print(f\"Completion Tokens: {cb.completion_tokens}\")"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"id": "c0ab6d27",
|
|
"metadata": {},
|
|
"source": [
|
|
"Anything inside the context manager will get tracked. Here's an example of using it to track multiple calls in sequence."
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 6,
|
|
"id": "e09420f4",
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"name": "stdout",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"83\n"
|
|
]
|
|
}
|
|
],
|
|
"source": [
|
|
"with get_openai_callback() as cb:\n",
|
|
" result = llm(\"Tell me a joke\")\n",
|
|
" result2 = llm(\"Tell me a joke\")\n",
|
|
" print(cb.total_tokens)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"id": "d8186e7b",
|
|
"metadata": {},
|
|
"source": [
|
|
"If a chain or agent with multiple steps in it is used, it will track all those steps."
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 7,
|
|
"id": "5d1125c6",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"from langchain.agents import load_tools\n",
|
|
"from langchain.agents import initialize_agent\n",
|
|
"from langchain.llms import OpenAI\n",
|
|
"\n",
|
|
"llm = OpenAI(temperature=0)\n",
|
|
"tools = load_tools([\"serpapi\", \"llm-math\"], llm=llm)\n",
|
|
"agent = initialize_agent(tools, llm, agent=\"zero-shot-react-description\", verbose=True)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 8,
|
|
"id": "2f98c536",
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"name": "stdout",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"\n",
|
|
"\n",
|
|
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
|
"\u001b[32;1m\u001b[1;3m I need to find out who Olivia Wilde's boyfriend is and then calculate his age raised to the 0.23 power.\n",
|
|
"Action: Search\n",
|
|
"Action Input: \"Olivia Wilde boyfriend\"\u001b[0m\n",
|
|
"Observation: \u001b[36;1m\u001b[1;3mJason Sudeikis\u001b[0m\n",
|
|
"Thought:\u001b[32;1m\u001b[1;3m I need to find out Jason Sudeikis' age\n",
|
|
"Action: Search\n",
|
|
"Action Input: \"Jason Sudeikis age\"\u001b[0m\n",
|
|
"Observation: \u001b[36;1m\u001b[1;3m47 years\u001b[0m\n",
|
|
"Thought:\u001b[32;1m\u001b[1;3m I need to calculate 47 raised to the 0.23 power\n",
|
|
"Action: Calculator\n",
|
|
"Action Input: 47^0.23\u001b[0m\n",
|
|
"Observation: \u001b[33;1m\u001b[1;3mAnswer: 2.4242784855673896\n",
|
|
"\u001b[0m\n",
|
|
"Thought:\u001b[32;1m\u001b[1;3m I now know the final answer\n",
|
|
"Final Answer: Jason Sudeikis, Olivia Wilde's boyfriend, is 47 years old and his age raised to the 0.23 power is 2.4242784855673896.\u001b[0m\n",
|
|
"\n",
|
|
"\u001b[1m> Finished chain.\u001b[0m\n",
|
|
"1465\n"
|
|
]
|
|
}
|
|
],
|
|
"source": [
|
|
"with get_openai_callback() as cb:\n",
|
|
" response = agent.run(\"Who is Olivia Wilde's boyfriend? What is his current age raised to the 0.23 power?\")\n",
|
|
" print(cb.total_tokens)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "80ca77a3",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": []
|
|
}
|
|
],
|
|
"metadata": {
|
|
"kernelspec": {
|
|
"display_name": "Python 3 (ipykernel)",
|
|
"language": "python",
|
|
"name": "python3"
|
|
},
|
|
"language_info": {
|
|
"codemirror_mode": {
|
|
"name": "ipython",
|
|
"version": 3
|
|
},
|
|
"file_extension": ".py",
|
|
"mimetype": "text/x-python",
|
|
"name": "python",
|
|
"nbconvert_exporter": "python",
|
|
"pygments_lexer": "ipython3",
|
|
"version": "3.9.1"
|
|
}
|
|
},
|
|
"nbformat": 4,
|
|
"nbformat_minor": 5
|
|
}
|