Harrison/fake llm (#990)

Co-authored-by: Stefan Keselj <skeselj@princeton.edu>
Co-authored-by: Harrison Chase <harrisonchase@Harrisons-MBP.attlocal.net>
makefile-update-1
Harrison Chase 1 year ago committed by GitHub
parent e51fad1488
commit 10e7297306
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -166,7 +166,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.9"
"version": "3.9.1"
}
},
"nbformat": 4,

@ -0,0 +1,138 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "052dfe58",
"metadata": {},
"source": [
"# Fake LLM\n",
"We expose a fake LLM class that can be used for testing. This allows you to mock out calls to the LLM and simulate what would happen if the LLM responded in a certain way.\n",
"\n",
"In this notebook we go over how to use this.\n",
"\n",
"We start this with using the FakeLLM in an agent."
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "ef97ac4d",
"metadata": {},
"outputs": [],
"source": [
"from langchain.llms.fake import FakeListLLM"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "9a0a160f",
"metadata": {},
"outputs": [],
"source": [
"from langchain.agents import load_tools\n",
"from langchain.agents import initialize_agent"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "b272258c",
"metadata": {},
"outputs": [],
"source": [
"tools = load_tools([\"python_repl\"])"
]
},
{
"cell_type": "code",
"execution_count": 16,
"id": "94096c4c",
"metadata": {},
"outputs": [],
"source": [
"responses=[\n",
" \"Action: Python REPL\\nAction Input: print(2 + 2)\",\n",
" \"Final Answer: 4\"\n",
"]\n",
"llm = FakeListLLM(responses=responses)"
]
},
{
"cell_type": "code",
"execution_count": 17,
"id": "da226d02",
"metadata": {},
"outputs": [],
"source": [
"agent = initialize_agent(tools, llm, agent=\"zero-shot-react-description\", verbose=True)"
]
},
{
"cell_type": "code",
"execution_count": 18,
"id": "44c13426",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"\n",
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
"\u001b[32;1m\u001b[1;3mAction: Python REPL\n",
"Action Input: print(2 + 2)\u001b[0m\n",
"Observation: \u001b[36;1m\u001b[1;3m4\n",
"\u001b[0m\n",
"Thought:\u001b[32;1m\u001b[1;3mFinal Answer: 4\u001b[0m\n",
"\n",
"\u001b[1m> Finished chain.\u001b[0m\n"
]
},
{
"data": {
"text/plain": [
"'4'"
]
},
"execution_count": 18,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"agent.run(\"whats 2 + 2\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "814c2858",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.1"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

@ -11,6 +11,8 @@ The examples here all address certain "how-to" guides for working with LLMs.
`Token Usage Tracking <./examples/token_usage_tracking.html>`_: How to track the token usage of various chains/agents/LLM calls.
`Fake LLM <./examples/fake_llm.html>`_: How to create and use a fake LLM for testing and debugging purposes.
.. toctree::
:maxdepth: 1

@ -0,0 +1,28 @@
"""Fake LLM wrapper for testing purposes."""
from typing import Any, List, Mapping, Optional
from pydantic import BaseModel
from langchain.llms.base import LLM
class FakeListLLM(LLM, BaseModel):
"""Fake LLM wrapper for testing purposes."""
responses: List
i: int = 0
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "fake-list"
def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str:
"""First try to lookup in queries, else return 'foo' or 'bar'."""
response = self.responses[self.i]
self.i += 1
return response
@property
def _identifying_params(self) -> Mapping[str, Any]:
return {}
Loading…
Cancel
Save