From 2aa3754024c21630720916a56f5f6e51a3183f19 Mon Sep 17 00:00:00 2001 From: Marcus Winter <60154509+mwinterde@users.noreply.github.com> Date: Sat, 20 May 2023 01:54:26 +0200 Subject: [PATCH] Check for single prompt in __call__ method of the BaseLLM class (#4892) # Ensuring that users pass a single prompt when calling a LLM - This PR adds a check to the `__call__` method of the `BaseLLM` class to ensure that it is called with a single prompt - Raises a `ValueError` if users try to call a LLM with a list of prompt and instructs them to use the `generate` method instead ## Why this could be useful I stumbled across this by accident. I accidentally called the OpenAI LLM with a list of prompts instead of a single string and still got a result: ``` >>> from langchain.llms import OpenAI >>> llm = OpenAI() >>> llm(["Tell a joke"]*2) "\n\nQ: Why don't scientists trust atoms?\nA: Because they make up everything!" ``` It might be better to catch such a scenario preventing unnecessary costs and irritation for the user. ## Proposed behaviour ``` >>> from langchain.llms import OpenAI >>> llm = OpenAI() >>> llm(["Tell a joke"]*2) Traceback (most recent call last): File "", line 1, in File "/Users/marcus/Projects/langchain/langchain/llms/base.py", line 291, in __call__ raise ValueError( ValueError: Argument `prompt` is expected to be a single string, not a list. If you want to run the LLM on multiple prompts, use `generate` instead. ``` --- langchain/llms/base.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/langchain/llms/base.py b/langchain/llms/base.py index 9fbf983e01..2e92c15dc9 100644 --- a/langchain/llms/base.py +++ b/langchain/llms/base.py @@ -287,6 +287,12 @@ class BaseLLM(BaseLanguageModel, ABC): self, prompt: str, stop: Optional[List[str]] = None, callbacks: Callbacks = None ) -> str: """Check Cache and run the LLM on the given prompt and input.""" + if not isinstance(prompt, str): + raise ValueError( + "Argument `prompt` is expected to be a string. Instead found " + f"{type(prompt)}. If you want to run the LLM on multiple prompts, use " + "`generate` instead." + ) return ( self.generate([prompt], stop=stop, callbacks=callbacks) .generations[0][0]