diff --git a/cookbook/Multi_modal_RAG_google.ipynb b/cookbook/Multi_modal_RAG_google.ipynb index 6df5b20cda..3ba989edcf 100644 --- a/cookbook/Multi_modal_RAG_google.ipynb +++ b/cookbook/Multi_modal_RAG_google.ipynb @@ -256,7 +256,7 @@ " \"\"\"Make image summary\"\"\"\n", " model = ChatVertexAI(model_name=\"gemini-pro-vision\", max_output_tokens=1024)\n", "\n", - " msg = model(\n", + " msg = model.invoke(\n", " [\n", " HumanMessage(\n", " content=[\n", diff --git a/cookbook/camel_role_playing.ipynb b/cookbook/camel_role_playing.ipynb index ab8f44adf9..afa2215c15 100644 --- a/cookbook/camel_role_playing.ipynb +++ b/cookbook/camel_role_playing.ipynb @@ -90,7 +90,7 @@ " ) -> AIMessage:\n", " messages = self.update_messages(input_message)\n", "\n", - " output_message = self.model(messages)\n", + " output_message = self.model.invoke(messages)\n", " self.update_messages(output_message)\n", "\n", " return output_message" diff --git a/cookbook/forward_looking_retrieval_augmented_generation.ipynb b/cookbook/forward_looking_retrieval_augmented_generation.ipynb index 4406c1812d..46200f04f5 100644 --- a/cookbook/forward_looking_retrieval_augmented_generation.ipynb +++ b/cookbook/forward_looking_retrieval_augmented_generation.ipynb @@ -362,7 +362,7 @@ ], "source": [ "llm = OpenAI()\n", - "llm(query)" + "llm.invoke(query)" ] }, { diff --git a/cookbook/gymnasium_agent_simulation.ipynb b/cookbook/gymnasium_agent_simulation.ipynb index 9990afd4d8..3997a644e2 100644 --- a/cookbook/gymnasium_agent_simulation.ipynb +++ b/cookbook/gymnasium_agent_simulation.ipynb @@ -108,7 +108,7 @@ " return obs_message\n", "\n", " def _act(self):\n", - " act_message = self.model(self.message_history)\n", + " act_message = self.model.invoke(self.message_history)\n", " self.message_history.append(act_message)\n", " action = int(self.action_parser.parse(act_message.content)[\"action\"])\n", " return action\n", diff --git a/cookbook/multi_player_dnd.ipynb b/cookbook/multi_player_dnd.ipynb index 05c4d45914..9bb3489c53 100644 --- a/cookbook/multi_player_dnd.ipynb +++ b/cookbook/multi_player_dnd.ipynb @@ -74,7 +74,7 @@ " Applies the chatmodel to the message history\n", " and returns the message string\n", " \"\"\"\n", - " message = self.model(\n", + " message = self.model.invoke(\n", " [\n", " self.system_message,\n", " HumanMessage(content=\"\\n\".join(self.message_history + [self.prefix])),\n", diff --git a/cookbook/multiagent_authoritarian.ipynb b/cookbook/multiagent_authoritarian.ipynb index 893b35f7c7..fd557083e2 100644 --- a/cookbook/multiagent_authoritarian.ipynb +++ b/cookbook/multiagent_authoritarian.ipynb @@ -79,7 +79,7 @@ " Applies the chatmodel to the message history\n", " and returns the message string\n", " \"\"\"\n", - " message = self.model(\n", + " message = self.model.invoke(\n", " [\n", " self.system_message,\n", " HumanMessage(content=\"\\n\".join(self.message_history + [self.prefix])),\n", @@ -234,7 +234,7 @@ " termination_clause=self.termination_clause if self.stop else \"\",\n", " )\n", "\n", - " self.response = self.model(\n", + " self.response = self.model.invoke(\n", " [\n", " self.system_message,\n", " HumanMessage(content=response_prompt),\n", @@ -263,7 +263,7 @@ " speaker_names=speaker_names,\n", " )\n", "\n", - " choice_string = self.model(\n", + " choice_string = self.model.invoke(\n", " [\n", " self.system_message,\n", " HumanMessage(content=choice_prompt),\n", @@ -299,7 +299,7 @@ " ),\n", " next_speaker=self.next_speaker,\n", " )\n", - " message = self.model(\n", + " message = self.model.invoke(\n", " [\n", " self.system_message,\n", " HumanMessage(content=next_prompt),\n", diff --git a/cookbook/multiagent_bidding.ipynb b/cookbook/multiagent_bidding.ipynb index fbb9f03f53..886c472899 100644 --- a/cookbook/multiagent_bidding.ipynb +++ b/cookbook/multiagent_bidding.ipynb @@ -71,7 +71,7 @@ " Applies the chatmodel to the message history\n", " and returns the message string\n", " \"\"\"\n", - " message = self.model(\n", + " message = self.model.invoke(\n", " [\n", " self.system_message,\n", " HumanMessage(content=\"\\n\".join(self.message_history + [self.prefix])),\n", @@ -164,7 +164,7 @@ " message_history=\"\\n\".join(self.message_history),\n", " recent_message=self.message_history[-1],\n", " )\n", - " bid_string = self.model([SystemMessage(content=prompt)]).content\n", + " bid_string = self.model.invoke([SystemMessage(content=prompt)]).content\n", " return bid_string" ] }, diff --git a/cookbook/petting_zoo.ipynb b/cookbook/petting_zoo.ipynb index c0db7653b0..14d6435b47 100644 --- a/cookbook/petting_zoo.ipynb +++ b/cookbook/petting_zoo.ipynb @@ -129,7 +129,7 @@ " return obs_message\n", "\n", " def _act(self):\n", - " act_message = self.model(self.message_history)\n", + " act_message = self.model.invoke(self.message_history)\n", " self.message_history.append(act_message)\n", " action = int(self.action_parser.parse(act_message.content)[\"action\"])\n", " return action\n", diff --git a/cookbook/two_agent_debate_tools.ipynb b/cookbook/two_agent_debate_tools.ipynb index 3815889ff2..78c2469c6e 100644 --- a/cookbook/two_agent_debate_tools.ipynb +++ b/cookbook/two_agent_debate_tools.ipynb @@ -84,7 +84,7 @@ " Applies the chatmodel to the message history\n", " and returns the message string\n", " \"\"\"\n", - " message = self.model(\n", + " message = self.model.invoke(\n", " [\n", " self.system_message,\n", " HumanMessage(content=\"\\n\".join(self.message_history + [self.prefix])),\n", diff --git a/cookbook/two_player_dnd.ipynb b/cookbook/two_player_dnd.ipynb index d90e4f9365..74f3b0c566 100644 --- a/cookbook/two_player_dnd.ipynb +++ b/cookbook/two_player_dnd.ipynb @@ -70,7 +70,7 @@ " Applies the chatmodel to the message history\n", " and returns the message string\n", " \"\"\"\n", - " message = self.model(\n", + " message = self.model.invoke(\n", " [\n", " self.system_message,\n", " HumanMessage(content=\"\\n\".join(self.message_history + [self.prefix])),\n", diff --git a/docs/docs/integrations/callbacks/labelstudio.ipynb b/docs/docs/integrations/callbacks/labelstudio.ipynb index 68e17408c5..e6841025c9 100644 --- a/docs/docs/integrations/callbacks/labelstudio.ipynb +++ b/docs/docs/integrations/callbacks/labelstudio.ipynb @@ -194,7 +194,7 @@ "llm = OpenAI(\n", " temperature=0, callbacks=[LabelStudioCallbackHandler(project_name=\"My Project\")]\n", ")\n", - "print(llm(\"Tell me a joke\"))" + "print(llm.invoke(\"Tell me a joke\"))" ] }, { @@ -270,7 +270,7 @@ " )\n", " ]\n", ")\n", - "llm_results = chat_llm(\n", + "llm_results = chat_llm.invoke(\n", " [\n", " SystemMessage(content=\"Always use a lot of emojis\"),\n", " HumanMessage(content=\"Tell me a joke\"),\n", diff --git a/docs/docs/integrations/callbacks/llmonitor.md b/docs/docs/integrations/callbacks/llmonitor.md index a90e606e7b..d27a698c78 100644 --- a/docs/docs/integrations/callbacks/llmonitor.md +++ b/docs/docs/integrations/callbacks/llmonitor.md @@ -107,7 +107,7 @@ User tracking allows you to identify your users, track their cost, conversations from langchain_community.callbacks.llmonitor_callback import LLMonitorCallbackHandler, identify with identify("user-123"): - llm("Tell me a joke") + llm.invoke("Tell me a joke") with identify("user-456", user_props={"email": "user456@test.com"}): agen.run("Who is Leo DiCaprio's girlfriend?") diff --git a/docs/docs/integrations/callbacks/promptlayer.ipynb b/docs/docs/integrations/callbacks/promptlayer.ipynb index 8deba48be5..acbe00324a 100644 --- a/docs/docs/integrations/callbacks/promptlayer.ipynb +++ b/docs/docs/integrations/callbacks/promptlayer.ipynb @@ -103,7 +103,7 @@ " temperature=0,\n", " callbacks=[PromptLayerCallbackHandler(pl_tags=[\"chatopenai\"])],\n", ")\n", - "llm_results = chat_llm(\n", + "llm_results = chat_llm.invoke(\n", " [\n", " HumanMessage(content=\"What comes after 1,2,3 ?\"),\n", " HumanMessage(content=\"Tell me another joke?\"),\n", @@ -129,10 +129,11 @@ "from langchain_community.llms import GPT4All\n", "\n", "model = GPT4All(model=\"./models/gpt4all-model.bin\", n_ctx=512, n_threads=8)\n", + "callbacks = [PromptLayerCallbackHandler(pl_tags=[\"langchain\", \"gpt4all\"])]\n", "\n", - "response = model(\n", + "response = model.invoke(\n", " \"Once upon a time, \",\n", - " callbacks=[PromptLayerCallbackHandler(pl_tags=[\"langchain\", \"gpt4all\"])],\n", + " config={\"callbacks\": callbacks},\n", ")" ] }, @@ -181,7 +182,7 @@ ")\n", "\n", "example_prompt = promptlayer.prompts.get(\"example\", version=1, langchain=True)\n", - "openai_llm(example_prompt.format(product=\"toasters\"))" + "openai_llm.invoke(example_prompt.format(product=\"toasters\"))" ] }, { diff --git a/docs/docs/integrations/callbacks/trubrics.ipynb b/docs/docs/integrations/callbacks/trubrics.ipynb index 89feb2c1d0..4e3771c0a9 100644 --- a/docs/docs/integrations/callbacks/trubrics.ipynb +++ b/docs/docs/integrations/callbacks/trubrics.ipynb @@ -315,7 +315,7 @@ } ], "source": [ - "chat_res = chat_llm(\n", + "chat_res = chat_llm.invoke(\n", " [\n", " SystemMessage(content=\"Every answer of yours must be about OpenAI.\"),\n", " HumanMessage(content=\"Tell me a joke\"),\n", diff --git a/docs/docs/integrations/chat/alibaba_cloud_pai_eas.ipynb b/docs/docs/integrations/chat/alibaba_cloud_pai_eas.ipynb index c2eb767547..342835629d 100644 --- a/docs/docs/integrations/chat/alibaba_cloud_pai_eas.ipynb +++ b/docs/docs/integrations/chat/alibaba_cloud_pai_eas.ipynb @@ -72,7 +72,7 @@ "metadata": {}, "outputs": [], "source": [ - "output = chat([HumanMessage(content=\"write a funny joke\")])\n", + "output = chat.invoke([HumanMessage(content=\"write a funny joke\")])\n", "print(\"output:\", output)" ] }, @@ -90,7 +90,7 @@ "outputs": [], "source": [ "kwargs = {\"temperature\": 0.8, \"top_p\": 0.8, \"top_k\": 5}\n", - "output = chat([HumanMessage(content=\"write a funny joke\")], **kwargs)\n", + "output = chat.invoke([HumanMessage(content=\"write a funny joke\")], **kwargs)\n", "print(\"output:\", output)" ] }, diff --git a/docs/docs/integrations/chat/llama_edge.ipynb b/docs/docs/integrations/chat/llama_edge.ipynb index 003439844a..16fb92b0b0 100644 --- a/docs/docs/integrations/chat/llama_edge.ipynb +++ b/docs/docs/integrations/chat/llama_edge.ipynb @@ -62,7 +62,7 @@ "messages = [system_message, user_message]\n", "\n", "# chat with wasm-chat service\n", - "response = chat(messages)\n", + "response = chat.invoke(messages)\n", "\n", "print(f\"[Bot] {response.content}\")" ] diff --git a/docs/docs/integrations/chat/zhipuai.ipynb b/docs/docs/integrations/chat/zhipuai.ipynb index 7d7c0777f6..7dd4ae931c 100644 --- a/docs/docs/integrations/chat/zhipuai.ipynb +++ b/docs/docs/integrations/chat/zhipuai.ipynb @@ -119,7 +119,7 @@ "metadata": {}, "outputs": [], "source": [ - "response = chat(messages)\n", + "response = chat.invoke(messages)\n", "print(response.content) # Displays the AI-generated poem" ] }, diff --git a/docs/docs/integrations/llms/anyscale.ipynb b/docs/docs/integrations/llms/anyscale.ipynb index 105746779c..5bba14d951 100644 --- a/docs/docs/integrations/llms/anyscale.ipynb +++ b/docs/docs/integrations/llms/anyscale.ipynb @@ -147,7 +147,7 @@ "\n", "@ray.remote(num_cpus=0.1)\n", "def send_query(llm, prompt):\n", - " resp = llm(prompt)\n", + " resp = llm.invoke(prompt)\n", " return resp\n", "\n", "\n", diff --git a/docs/docs/integrations/llms/aphrodite.ipynb b/docs/docs/integrations/llms/aphrodite.ipynb index f90c4fae25..724f271ac5 100644 --- a/docs/docs/integrations/llms/aphrodite.ipynb +++ b/docs/docs/integrations/llms/aphrodite.ipynb @@ -96,7 +96,7 @@ ")\n", "\n", "print(\n", - " llm(\n", + " llm.invoke(\n", " '<|system|>Enter RP mode. You are Ayumu \"Osaka\" Kasuga.<|user|>Hey Osaka. Tell me about yourself.<|model|>'\n", " )\n", ")" diff --git a/docs/docs/integrations/llms/baichuan.ipynb b/docs/docs/integrations/llms/baichuan.ipynb index 7c92d17717..ec55b305bd 100644 --- a/docs/docs/integrations/llms/baichuan.ipynb +++ b/docs/docs/integrations/llms/baichuan.ipynb @@ -45,7 +45,7 @@ "# Load the model\n", "llm = BaichuanLLM()\n", "\n", - "res = llm(\"What's your name?\")\n", + "res = llm.invoke(\"What's your name?\")\n", "print(res)" ] }, diff --git a/docs/docs/integrations/llms/baidu_qianfan_endpoint.ipynb b/docs/docs/integrations/llms/baidu_qianfan_endpoint.ipynb index cff3fc145c..aaedf7891c 100644 --- a/docs/docs/integrations/llms/baidu_qianfan_endpoint.ipynb +++ b/docs/docs/integrations/llms/baidu_qianfan_endpoint.ipynb @@ -80,7 +80,7 @@ "os.environ[\"QIANFAN_SK\"] = \"your_sk\"\n", "\n", "llm = QianfanLLMEndpoint(streaming=True)\n", - "res = llm(\"hi\")\n", + "res = llm.invoke(\"hi\")\n", "print(res)" ] }, @@ -185,7 +185,7 @@ " model=\"ERNIE-Bot-turbo\",\n", " endpoint=\"eb-instant\",\n", ")\n", - "res = llm(\"hi\")" + "res = llm.invoke(\"hi\")" ] }, { diff --git a/docs/docs/integrations/llms/bittensor.ipynb b/docs/docs/integrations/llms/bittensor.ipynb index 1af3584fae..89981589c7 100644 --- a/docs/docs/integrations/llms/bittensor.ipynb +++ b/docs/docs/integrations/llms/bittensor.ipynb @@ -62,7 +62,7 @@ " } \"\"\"\n", "\n", "multi_response_llm = NIBittensorLLM(top_responses=10)\n", - "multi_resp = multi_response_llm(\"What is Neural Network Feeding Mechanism?\")\n", + "multi_resp = multi_response_llm.invoke(\"What is Neural Network Feeding Mechanism?\")\n", "json_multi_resp = json.loads(multi_resp)\n", "pprint(json_multi_resp)" ] diff --git a/docs/docs/integrations/llms/ctransformers.ipynb b/docs/docs/integrations/llms/ctransformers.ipynb index 7c6248c013..ce7bbce11f 100644 --- a/docs/docs/integrations/llms/ctransformers.ipynb +++ b/docs/docs/integrations/llms/ctransformers.ipynb @@ -62,7 +62,7 @@ "metadata": {}, "outputs": [], "source": [ - "print(llm(\"AI is going to\"))" + "print(llm.invoke(\"AI is going to\"))" ] }, { @@ -85,7 +85,7 @@ " model=\"marella/gpt-2-ggml\", callbacks=[StreamingStdOutCallbackHandler()]\n", ")\n", "\n", - "response = llm(\"AI is going to\")" + "response = llm.invoke(\"AI is going to\")" ] }, { diff --git a/docs/docs/integrations/llms/ctranslate2.ipynb b/docs/docs/integrations/llms/ctranslate2.ipynb index c13a4d7625..3f177af52c 100644 --- a/docs/docs/integrations/llms/ctranslate2.ipynb +++ b/docs/docs/integrations/llms/ctranslate2.ipynb @@ -97,7 +97,7 @@ ], "source": [ "print(\n", - " llm(\n", + " llm.invoke(\n", " \"He presented me with plausible evidence for the existence of unicorns: \",\n", " max_length=256,\n", " sampling_topk=50,\n", diff --git a/docs/docs/integrations/llms/deepsparse.ipynb b/docs/docs/integrations/llms/deepsparse.ipynb index bdec459e73..138663b69c 100644 --- a/docs/docs/integrations/llms/deepsparse.ipynb +++ b/docs/docs/integrations/llms/deepsparse.ipynb @@ -32,7 +32,7 @@ " model=\"zoo:nlg/text_generation/codegen_mono-350m/pytorch/huggingface/bigpython_bigquery_thepile/base-none\"\n", ")\n", "\n", - "print(llm(\"def fib():\"))" + "print(llm.invoke(\"def fib():\"))" ] }, { diff --git a/docs/docs/integrations/llms/edenai.ipynb b/docs/docs/integrations/llms/edenai.ipynb index b6231654df..be68b1275d 100644 --- a/docs/docs/integrations/llms/edenai.ipynb +++ b/docs/docs/integrations/llms/edenai.ipynb @@ -203,7 +203,7 @@ "User: Answer the following yes/no question by reasoning step by step. Can a dog drive a car?\n", "Assistant:\n", "\"\"\"\n", - "print(llm(prompt))" + "print(llm.invoke(prompt))" ] }, { diff --git a/docs/docs/integrations/llms/google_vertex_ai_palm.ipynb b/docs/docs/integrations/llms/google_vertex_ai_palm.ipynb index 425b5bb3f5..65ae7900f7 100644 --- a/docs/docs/integrations/llms/google_vertex_ai_palm.ipynb +++ b/docs/docs/integrations/llms/google_vertex_ai_palm.ipynb @@ -359,7 +359,7 @@ "}\n", "message = HumanMessage(content=[text_message, image_message])\n", "\n", - "output = llm([message])\n", + "output = llm.invoke([message])\n", "print(output.content)" ] }, @@ -432,7 +432,7 @@ "}\n", "message = HumanMessage(content=[text_message, image_message])\n", "\n", - "output = llm([message])\n", + "output = llm.invoke([message])\n", "print(output.content)" ] }, @@ -457,7 +457,7 @@ "outputs": [], "source": [ "message2 = HumanMessage(content=\"And where the image is taken?\")\n", - "output2 = llm([message, output, message2])\n", + "output2 = llm.invoke([message, output, message2])\n", "print(output2.content)" ] }, @@ -486,7 +486,7 @@ "}\n", "message = HumanMessage(content=[text_message, image_message])\n", "\n", - "output = llm([message])\n", + "output = llm.invoke([message])\n", "print(output.content)" ] }, diff --git a/docs/docs/integrations/llms/koboldai.ipynb b/docs/docs/integrations/llms/koboldai.ipynb index d55a73efc2..de6502c0a8 100644 --- a/docs/docs/integrations/llms/koboldai.ipynb +++ b/docs/docs/integrations/llms/koboldai.ipynb @@ -57,7 +57,9 @@ }, "outputs": [], "source": [ - "response = llm(\"### Instruction:\\nWhat is the first book of the bible?\\n### Response:\")" + "response = llm.invoke(\n", + " \"### Instruction:\\nWhat is the first book of the bible?\\n### Response:\"\n", + ")" ] } ], diff --git a/docs/docs/integrations/llms/konko.ipynb b/docs/docs/integrations/llms/konko.ipynb index ef1ffc15d8..8592a09d83 100644 --- a/docs/docs/integrations/llms/konko.ipynb +++ b/docs/docs/integrations/llms/konko.ipynb @@ -90,7 +90,7 @@ "llm = Konko(model=\"mistralai/mistral-7b-v0.1\", temperature=0.1, max_tokens=128)\n", "\n", "input_ = \"\"\"You are a helpful assistant. Explain Big Bang Theory briefly.\"\"\"\n", - "print(llm(input_))" + "print(llm.invoke(input_))" ] }, { diff --git a/docs/docs/integrations/llms/llm_caching.ipynb b/docs/docs/integrations/llms/llm_caching.ipynb index 65e55ee1da..504a771ea2 100644 --- a/docs/docs/integrations/llms/llm_caching.ipynb +++ b/docs/docs/integrations/llms/llm_caching.ipynb @@ -1020,7 +1020,7 @@ "source": [ "%%time\n", "\n", - "print(llm(\"Why is the Moon always showing the same side?\"))" + "print(llm.invoke(\"Why is the Moon always showing the same side?\"))" ] }, { @@ -1044,7 +1044,7 @@ "source": [ "%%time\n", "\n", - "print(llm(\"Why is the Moon always showing the same side?\"))" + "print(llm.invoke(\"Why is the Moon always showing the same side?\"))" ] }, { @@ -1109,7 +1109,7 @@ "source": [ "%%time\n", "\n", - "print(llm(\"Why is the Moon always showing the same side?\"))" + "print(llm.invoke(\"Why is the Moon always showing the same side?\"))" ] }, { @@ -1133,7 +1133,7 @@ "source": [ "%%time\n", "\n", - "print(llm(\"How come we always see one face of the moon?\"))" + "print(llm.invoke(\"How come we always see one face of the moon?\"))" ] }, { @@ -1238,7 +1238,7 @@ "source": [ "%%time\n", "\n", - "print(llm(\"Is a true fakery the same as a fake truth?\"))" + "print(llm.invoke(\"Is a true fakery the same as a fake truth?\"))" ] }, { @@ -1262,7 +1262,7 @@ "source": [ "%%time\n", "\n", - "print(llm(\"Is a true fakery the same as a fake truth?\"))" + "print(llm.invoke(\"Is a true fakery the same as a fake truth?\"))" ] }, { @@ -1327,7 +1327,7 @@ "source": [ "%%time\n", "\n", - "print(llm(\"Are there truths that are false?\"))" + "print(llm.invoke(\"Are there truths that are false?\"))" ] }, { @@ -1351,7 +1351,7 @@ "source": [ "%%time\n", "\n", - "print(llm(\"Is is possible that something false can be also true?\"))" + "print(llm.invoke(\"Is is possible that something false can be also true?\"))" ] }, { diff --git a/docs/docs/integrations/llms/predibase.ipynb b/docs/docs/integrations/llms/predibase.ipynb index fc5e43bd46..5052309469 100644 --- a/docs/docs/integrations/llms/predibase.ipynb +++ b/docs/docs/integrations/llms/predibase.ipynb @@ -96,7 +96,7 @@ "metadata": {}, "outputs": [], "source": [ - "response = model(\"Can you recommend me a nice dry wine?\")\n", + "response = model.invoke(\"Can you recommend me a nice dry wine?\")\n", "print(response)" ] }, @@ -269,7 +269,7 @@ "metadata": {}, "outputs": [], "source": [ - "# response = model(\"Can you help categorize the following emails into positive, negative, and neutral?\")" + "# response = model.invoke(\"Can you help categorize the following emails into positive, negative, and neutral?\")" ] } ], diff --git a/docs/docs/integrations/llms/replicate.ipynb b/docs/docs/integrations/llms/replicate.ipynb index 95260e4096..569d33155e 100644 --- a/docs/docs/integrations/llms/replicate.ipynb +++ b/docs/docs/integrations/llms/replicate.ipynb @@ -323,7 +323,7 @@ "User: Answer the following yes/no question by reasoning step by step. Can a dog drive a car?\n", "Assistant:\n", "\"\"\"\n", - "_ = llm(prompt)" + "_ = llm.invoke(prompt)" ] }, { @@ -376,13 +376,13 @@ "Assistant:\n", "\"\"\"\n", "start_time = time.perf_counter()\n", - "raw_output = llm(prompt) # raw output, no stop\n", + "raw_output = llm.invoke(prompt) # raw output, no stop\n", "end_time = time.perf_counter()\n", "print(f\"Raw output:\\n {raw_output}\")\n", "print(f\"Raw output runtime: {end_time - start_time} seconds\")\n", "\n", "start_time = time.perf_counter()\n", - "stopped_output = llm(prompt, stop=[\"\\n\\n\"]) # stop on double newlines\n", + "stopped_output = llm.invoke(prompt, stop=[\"\\n\\n\"]) # stop on double newlines\n", "end_time = time.perf_counter()\n", "print(f\"Stopped output:\\n {stopped_output}\")\n", "print(f\"Stopped output runtime: {end_time - start_time} seconds\")" diff --git a/docs/docs/integrations/llms/sparkllm.ipynb b/docs/docs/integrations/llms/sparkllm.ipynb index f17c33a36d..63642a81c7 100644 --- a/docs/docs/integrations/llms/sparkllm.ipynb +++ b/docs/docs/integrations/llms/sparkllm.ipynb @@ -65,7 +65,7 @@ "# Load the model\n", "llm = SparkLLM()\n", "\n", - "res = llm(\"What's your name?\")\n", + "res = llm.invoke(\"What's your name?\")\n", "print(res)" ] }, diff --git a/docs/docs/integrations/providers/ctransformers.mdx b/docs/docs/integrations/providers/ctransformers.mdx index a787a7ec2f..09414a8fe7 100644 --- a/docs/docs/integrations/providers/ctransformers.mdx +++ b/docs/docs/integrations/providers/ctransformers.mdx @@ -23,7 +23,7 @@ It provides a unified interface for all models: ```python llm = CTransformers(model='/path/to/ggml-gpt-2.bin', model_type='gpt2') -print(llm('AI is going to')) +print(llm.invoke('AI is going to')) ``` If you are getting `illegal instruction` error, try using `lib='avx'` or `lib='basic'`: diff --git a/docs/docs/integrations/providers/deepsparse.mdx b/docs/docs/integrations/providers/deepsparse.mdx index 879b07c55c..562d9e3e76 100644 --- a/docs/docs/integrations/providers/deepsparse.mdx +++ b/docs/docs/integrations/providers/deepsparse.mdx @@ -22,7 +22,7 @@ It provides a unified interface for all models: ```python llm = DeepSparse(model='zoo:nlg/text_generation/codegen_mono-350m/pytorch/huggingface/bigpython_bigquery_thepile/base-none') -print(llm('def fib():')) +print(llm.invoke('def fib():')) ``` Additional parameters can be passed using the `config` parameter: diff --git a/docs/docs/integrations/providers/flyte.mdx b/docs/docs/integrations/providers/flyte.mdx index c37fe96c65..5fe20d8965 100644 --- a/docs/docs/integrations/providers/flyte.mdx +++ b/docs/docs/integrations/providers/flyte.mdx @@ -83,7 +83,7 @@ def langchain_llm() -> str: temperature=0.2, callbacks=[FlyteCallbackHandler()], ) - return llm([HumanMessage(content="Tell me a joke")]).content + return llm.invoke([HumanMessage(content="Tell me a joke")]).content ``` ### Chain diff --git a/docs/docs/integrations/providers/gpt4all.mdx b/docs/docs/integrations/providers/gpt4all.mdx index 1050015961..231fae5f67 100644 --- a/docs/docs/integrations/providers/gpt4all.mdx +++ b/docs/docs/integrations/providers/gpt4all.mdx @@ -27,7 +27,7 @@ from langchain_community.llms import GPT4All model = GPT4All(model="./models/mistral-7b-openorca.Q4_0.gguf", n_threads=8) # Generate text -response = model("Once upon a time, ") +response = model.invoke("Once upon a time, ") ``` You can also customize the generation parameters, such as n_predict, temp, top_p, top_k, and others. diff --git a/docs/docs/integrations/providers/helicone.mdx b/docs/docs/integrations/providers/helicone.mdx index 548088d079..9f2898870b 100644 --- a/docs/docs/integrations/providers/helicone.mdx +++ b/docs/docs/integrations/providers/helicone.mdx @@ -29,7 +29,7 @@ openai.api_base = "https://oai.hconeai.com/v1" llm = OpenAI(temperature=0.9, headers={"Helicone-Cache-Enabled": "true"}) text = "What is a helicone?" -print(llm(text)) +print(llm.invoke(text)) ``` [Helicone caching docs](https://docs.helicone.ai/advanced-usage/caching) @@ -47,7 +47,7 @@ llm = OpenAI(temperature=0.9, headers={ "Helicone-Property-App": "mobile", }) text = "What is a helicone?" -print(llm(text)) +print(llm.invoke(text)) ``` [Helicone property docs](https://docs.helicone.ai/advanced-usage/custom-properties) diff --git a/docs/docs/integrations/providers/konko.mdx b/docs/docs/integrations/providers/konko.mdx index 47f3f088f8..88b612dbc0 100644 --- a/docs/docs/integrations/providers/konko.mdx +++ b/docs/docs/integrations/providers/konko.mdx @@ -44,7 +44,7 @@ See a usage [example](/docs/integrations/llms/konko). from langchain.llms import Konko llm = Konko(max_tokens=800, model='mistralai/Mistral-7B-v0.1') prompt = "Generate a Product Description for Apple Iphone 15" - response = llm(prompt) + response = llm.invoke(prompt) ``` ## Chat Models diff --git a/docs/docs/integrations/providers/predibase.md b/docs/docs/integrations/providers/predibase.md index 8f27b818f4..7ba380d133 100644 --- a/docs/docs/integrations/providers/predibase.md +++ b/docs/docs/integrations/providers/predibase.md @@ -23,7 +23,7 @@ model = Predibase( predibase_sdk_version=None, # optional parameter (defaults to the latest Predibase SDK version if omitted) ) -response = model("Can you recommend me a nice dry wine?") +response = model.invoke("Can you recommend me a nice dry wine?") print(response) ``` @@ -44,7 +44,7 @@ model = Predibase( adapter_version=1, ) -response = model("Can you recommend me a nice dry wine?") +response = model.invoke("Can you recommend me a nice dry wine?") print(response) ``` @@ -64,6 +64,6 @@ model = Predibase( adapter_id="predibase/e2e_nlg", ) -response = model("Can you recommend me a nice dry wine?") +response = model.invoke("Can you recommend me a nice dry wine?") print(response) ``` diff --git a/docs/docs/integrations/providers/rwkv.mdx b/docs/docs/integrations/providers/rwkv.mdx index 2b5f827aa2..90a795a420 100644 --- a/docs/docs/integrations/providers/rwkv.mdx +++ b/docs/docs/integrations/providers/rwkv.mdx @@ -44,7 +44,7 @@ def generate_prompt(instruction, input=None): model = RWKV(model="./models/RWKV-4-Raven-3B-v7-Eng-20230404-ctx4096.pth", strategy="cpu fp32", tokens_path="./rwkv/20B_tokenizer.json") -response = model(generate_prompt("Once upon a time, ")) +response = model.invoke(generate_prompt("Once upon a time, ")) ``` ## Model File diff --git a/docs/docs/use_cases/data_generation.ipynb b/docs/docs/use_cases/data_generation.ipynb index f96737b2de..208af24bd0 100644 --- a/docs/docs/use_cases/data_generation.ipynb +++ b/docs/docs/use_cases/data_generation.ipynb @@ -545,7 +545,7 @@ ")\n", "\n", "_input = prompt.format_prompt(text=dataset[0][\"text\"])\n", - "output = llm(_input.to_string())\n", + "output = llm.invoke(_input.to_string())\n", "\n", "parsed = parser.parse(output)\n", "parsed" diff --git a/libs/community/langchain_community/cache.py b/libs/community/langchain_community/cache.py index 7002fead09..9343b8cc57 100644 --- a/libs/community/langchain_community/cache.py +++ b/libs/community/langchain_community/cache.py @@ -1115,7 +1115,8 @@ class CassandraCache(BaseCache): ) -> None: """ A wrapper around `delete` with the LLM being passed. - In case the llm(prompt) calls have a `stop` param, you should pass it here + In case the llm.invoke(prompt) calls have a `stop` param, you should + pass it here """ llm_string = get_prompts( {**llm.dict(), **{"stop": stop}}, @@ -1505,7 +1506,8 @@ class AstraDBCache(BaseCache): ) -> None: """ A wrapper around `delete` with the LLM being passed. - In case the llm(prompt) calls have a `stop` param, you should pass it here + In case the llm.invoke(prompt) calls have a `stop` param, you should + pass it here """ llm_string = get_prompts( {**llm.dict(), **{"stop": stop}}, @@ -1518,7 +1520,8 @@ class AstraDBCache(BaseCache): ) -> None: """ A wrapper around `adelete` with the LLM being passed. - In case the llm(prompt) calls have a `stop` param, you should pass it here + In case the llm.invoke(prompt) calls have a `stop` param, you should + pass it here """ llm_string = ( await aget_prompts( diff --git a/libs/community/langchain_community/callbacks/context_callback.py b/libs/community/langchain_community/callbacks/context_callback.py index 8514976687..be01cb3dbb 100644 --- a/libs/community/langchain_community/callbacks/context_callback.py +++ b/libs/community/langchain_community/callbacks/context_callback.py @@ -58,7 +58,7 @@ class ContextCallbackHandler(BaseCallbackHandler): ... SystemMessage(content="You translate English to French."), ... HumanMessage(content="I love programming with LangChain."), ... ] - >>> chat(messages) + >>> chat.invoke(messages) Chain Example: >>> from langchain.chains import LLMChain diff --git a/libs/community/langchain_community/chat_models/azureml_endpoint.py b/libs/community/langchain_community/chat_models/azureml_endpoint.py index e2ea9d775c..91947f97ff 100644 --- a/libs/community/langchain_community/chat_models/azureml_endpoint.py +++ b/libs/community/langchain_community/chat_models/azureml_endpoint.py @@ -263,7 +263,7 @@ class AzureMLChatOnlineEndpoint(BaseChatModel, AzureMLBaseEndpoint): The string generated by the model. Example: .. code-block:: python - response = azureml_model("Tell me a joke.") + response = azureml_model.invoke("Tell me a joke.") """ _model_kwargs = self.model_kwargs or {} _model_kwargs.update(kwargs) diff --git a/libs/community/langchain_community/chat_models/baidu_qianfan_endpoint.py b/libs/community/langchain_community/chat_models/baidu_qianfan_endpoint.py index aa80fd7fe0..8a7afa4ff2 100644 --- a/libs/community/langchain_community/chat_models/baidu_qianfan_endpoint.py +++ b/libs/community/langchain_community/chat_models/baidu_qianfan_endpoint.py @@ -250,7 +250,7 @@ class QianfanChatEndpoint(BaseChatModel): Example: .. code-block:: python - response = qianfan_model("Tell me a joke.") + response = qianfan_model.invoke("Tell me a joke.") """ if self.streaming: completion = "" diff --git a/libs/community/langchain_community/llms/aleph_alpha.py b/libs/community/langchain_community/llms/aleph_alpha.py index be24b919a9..48ea552c66 100644 --- a/libs/community/langchain_community/llms/aleph_alpha.py +++ b/libs/community/langchain_community/llms/aleph_alpha.py @@ -284,4 +284,4 @@ class AlephAlpha(LLM): if __name__ == "__main__": aa = AlephAlpha() - print(aa("How are you?")) # noqa: T201 + print(aa.invoke("How are you?")) # noqa: T201 diff --git a/libs/community/langchain_community/llms/anthropic.py b/libs/community/langchain_community/llms/anthropic.py index 13d8ff81e7..05a2582f1c 100644 --- a/libs/community/langchain_community/llms/anthropic.py +++ b/libs/community/langchain_community/llms/anthropic.py @@ -170,13 +170,13 @@ class Anthropic(LLM, _AnthropicCommon): # Simplest invocation, automatically wrapped with HUMAN_PROMPT # and AI_PROMPT. - response = model("What are the biggest risks facing humanity?") + response = model.invoke("What are the biggest risks facing humanity?") # Or if you want to use the chat mode, build a few-shot-prompt, or # put words in the Assistant's mouth, use HUMAN_PROMPT and AI_PROMPT: raw_prompt = "What are the biggest risks facing humanity?" prompt = f"{anthropic.HUMAN_PROMPT} {prompt}{anthropic.AI_PROMPT}" - response = model(prompt) + response = model.invoke(prompt) """ class Config: @@ -236,7 +236,7 @@ class Anthropic(LLM, _AnthropicCommon): prompt = "What are the biggest risks facing humanity?" prompt = f"\n\nHuman: {prompt}\n\nAssistant:" - response = model(prompt) + response = model.invoke(prompt) """ if self.streaming: diff --git a/libs/community/langchain_community/llms/anyscale.py b/libs/community/langchain_community/llms/anyscale.py index 1d0dd45a4a..fc9a4b0650 100644 --- a/libs/community/langchain_community/llms/anyscale.py +++ b/libs/community/langchain_community/llms/anyscale.py @@ -75,7 +75,7 @@ class Anyscale(BaseOpenAI): # To leverage Ray for parallel processing @ray.remote(num_cpus=1) def send_query(llm, text): - resp = llm(text) + resp = llm.invoke(text) return resp futures = [send_query.remote(anyscalellm, text) for text in texts] results = ray.get(futures) diff --git a/libs/community/langchain_community/llms/azureml_endpoint.py b/libs/community/langchain_community/llms/azureml_endpoint.py index 8b9dcc43fe..925c43661d 100644 --- a/libs/community/langchain_community/llms/azureml_endpoint.py +++ b/libs/community/langchain_community/llms/azureml_endpoint.py @@ -528,7 +528,7 @@ class AzureMLOnlineEndpoint(BaseLLM, AzureMLBaseEndpoint): The string generated by the model. Example: .. code-block:: python - response = azureml_model("Tell me a joke.") + response = azureml_model.invoke("Tell me a joke.") """ _model_kwargs = self.model_kwargs or {} _model_kwargs.update(kwargs) diff --git a/libs/community/langchain_community/llms/baidu_qianfan_endpoint.py b/libs/community/langchain_community/llms/baidu_qianfan_endpoint.py index 1a611d2656..b92b0cd5f6 100644 --- a/libs/community/langchain_community/llms/baidu_qianfan_endpoint.py +++ b/libs/community/langchain_community/llms/baidu_qianfan_endpoint.py @@ -172,7 +172,7 @@ class QianfanLLMEndpoint(LLM): Example: .. code-block:: python - response = qianfan_model("Tell me a joke.") + response = qianfan_model.invoke("Tell me a joke.") """ if self.streaming: completion = "" diff --git a/libs/community/langchain_community/llms/bedrock.py b/libs/community/langchain_community/llms/bedrock.py index cfbc393b00..796169e108 100644 --- a/libs/community/langchain_community/llms/bedrock.py +++ b/libs/community/langchain_community/llms/bedrock.py @@ -829,7 +829,7 @@ class Bedrock(LLM, BedrockBase): Example: .. code-block:: python - response = llm("Tell me a joke.") + response = llm.invoke("Tell me a joke.") """ if self.streaming: diff --git a/libs/community/langchain_community/llms/chatglm.py b/libs/community/langchain_community/llms/chatglm.py index bdd5594660..c98ea1c2b1 100644 --- a/libs/community/langchain_community/llms/chatglm.py +++ b/libs/community/langchain_community/llms/chatglm.py @@ -72,7 +72,7 @@ class ChatGLM(LLM): Example: .. code-block:: python - response = chatglm_llm("Who are you?") + response = chatglm_llm.invoke("Who are you?") """ _model_kwargs = self.model_kwargs or {} diff --git a/libs/community/langchain_community/llms/chatglm3.py b/libs/community/langchain_community/llms/chatglm3.py index 0582fc58f0..aa3255a50f 100644 --- a/libs/community/langchain_community/llms/chatglm3.py +++ b/libs/community/langchain_community/llms/chatglm3.py @@ -106,7 +106,7 @@ class ChatGLM3(LLM): Example: .. code-block:: python - response = chatglm_llm("Who are you?") + response = chatglm_llm.invoke("Who are you?") """ import httpx diff --git a/libs/community/langchain_community/llms/clarifai.py b/libs/community/langchain_community/llms/clarifai.py index 78a9115d6b..8a3c9b0421 100644 --- a/libs/community/langchain_community/llms/clarifai.py +++ b/libs/community/langchain_community/llms/clarifai.py @@ -128,7 +128,7 @@ class Clarifai(LLM): Example: .. code-block:: python - response = clarifai_llm("Tell me a joke.") + response = clarifai_llm.invoke("Tell me a joke.") """ try: diff --git a/libs/community/langchain_community/llms/ctransformers.py b/libs/community/langchain_community/llms/ctransformers.py index b532b1585c..95c13ae487 100644 --- a/libs/community/langchain_community/llms/ctransformers.py +++ b/libs/community/langchain_community/llms/ctransformers.py @@ -97,7 +97,7 @@ class CTransformers(LLM): Example: .. code-block:: python - response = llm("Tell me a joke.") + response = llm.invoke("Tell me a joke.") """ text = [] _run_manager = run_manager or CallbackManagerForLLMRun.get_noop_manager() @@ -125,7 +125,7 @@ class CTransformers(LLM): Example: .. code-block:: python - response = llm("Once upon a time, ") + response = llm.invoke("Once upon a time, ") """ text_callback = None if run_manager: diff --git a/libs/community/langchain_community/llms/deepsparse.py b/libs/community/langchain_community/llms/deepsparse.py index c82f022e27..5560edafd4 100644 --- a/libs/community/langchain_community/llms/deepsparse.py +++ b/libs/community/langchain_community/llms/deepsparse.py @@ -92,7 +92,7 @@ class DeepSparse(LLM): .. code-block:: python from langchain_community.llms import DeepSparse llm = DeepSparse(model="zoo:nlg/text_generation/codegen_mono-350m/pytorch/huggingface/bigpython_bigquery_thepile/base_quant-none") - llm("Tell me a joke.") + llm.invoke("Tell me a joke.") """ if self.streaming: combined_output = "" @@ -130,7 +130,7 @@ class DeepSparse(LLM): .. code-block:: python from langchain_community.llms import DeepSparse llm = DeepSparse(model="zoo:nlg/text_generation/codegen_mono-350m/pytorch/huggingface/bigpython_bigquery_thepile/base_quant-none") - llm("Tell me a joke.") + llm.invoke("Tell me a joke.") """ if self.streaming: combined_output = "" diff --git a/libs/community/langchain_community/llms/gpt4all.py b/libs/community/langchain_community/llms/gpt4all.py index 8b347ceb5c..7824b00aa5 100644 --- a/libs/community/langchain_community/llms/gpt4all.py +++ b/libs/community/langchain_community/llms/gpt4all.py @@ -21,7 +21,7 @@ class GPT4All(LLM): model = GPT4All(model="./models/gpt4all-model.bin", n_threads=8) # Simplest invocation - response = model("Once upon a time, ") + response = model.invoke("Once upon a time, ") """ model: str @@ -197,7 +197,7 @@ class GPT4All(LLM): .. code-block:: python prompt = "Once upon a time, " - response = model(prompt, n_predict=55) + response = model.invoke(prompt, n_predict=55) """ text_callback = None if run_manager: diff --git a/libs/community/langchain_community/llms/huggingface_endpoint.py b/libs/community/langchain_community/llms/huggingface_endpoint.py index 290b2da963..3239414bc4 100644 --- a/libs/community/langchain_community/llms/huggingface_endpoint.py +++ b/libs/community/langchain_community/llms/huggingface_endpoint.py @@ -43,7 +43,7 @@ class HuggingFaceEndpoint(LLM): repetition_penalty=1.03, huggingfacehub_api_token="my-api-key" ) - print(llm("What is Deep Learning?")) + print(llm.invoke("What is Deep Learning?")) # Streaming response example from langchain_core.callbacks.streaming_stdout import StreamingStdOutCallbackHandler @@ -61,7 +61,7 @@ class HuggingFaceEndpoint(LLM): streaming=True, huggingfacehub_api_token="my-api-key" ) - print(llm("What is Deep Learning?")) + print(llm.invoke("What is Deep Learning?")) """ # noqa: E501 diff --git a/libs/community/langchain_community/llms/huggingface_text_gen_inference.py b/libs/community/langchain_community/llms/huggingface_text_gen_inference.py index e053d17278..17aadec2df 100644 --- a/libs/community/langchain_community/llms/huggingface_text_gen_inference.py +++ b/libs/community/langchain_community/llms/huggingface_text_gen_inference.py @@ -36,7 +36,7 @@ class HuggingFaceTextGenInference(LLM): temperature=0.01, repetition_penalty=1.03, ) - print(llm("What is Deep Learning?")) # noqa: T201 + print(llm.invoke("What is Deep Learning?")) # noqa: T201 # Streaming response example from langchain_community.callbacks import streaming_stdout @@ -53,7 +53,7 @@ class HuggingFaceTextGenInference(LLM): callbacks=callbacks, streaming=True ) - print(llm("What is Deep Learning?")) # noqa: T201 + print(llm.invoke("What is Deep Learning?")) # noqa: T201 """ diff --git a/libs/community/langchain_community/llms/koboldai.py b/libs/community/langchain_community/llms/koboldai.py index ad12175538..837dd306ca 100644 --- a/libs/community/langchain_community/llms/koboldai.py +++ b/libs/community/langchain_community/llms/koboldai.py @@ -147,7 +147,7 @@ class KoboldApiLLM(LLM): from langchain_community.llms import KoboldApiLLM llm = KoboldApiLLM(endpoint="http://localhost:5000") - llm("Write a story about dragons.") + llm.invoke("Write a story about dragons.") """ data: Dict[str, Any] = { "prompt": prompt, diff --git a/libs/community/langchain_community/llms/llamacpp.py b/libs/community/langchain_community/llms/llamacpp.py index b06e6d8cf7..39e58093a6 100644 --- a/libs/community/langchain_community/llms/llamacpp.py +++ b/libs/community/langchain_community/llms/llamacpp.py @@ -278,7 +278,7 @@ class LlamaCpp(LLM): from langchain_community.llms import LlamaCpp llm = LlamaCpp(model_path="/path/to/local/llama/model.bin") - llm("This is a prompt.") + llm.invoke("This is a prompt.") """ if self.streaming: # If streaming is enabled, we use the stream diff --git a/libs/community/langchain_community/llms/mosaicml.py b/libs/community/langchain_community/llms/mosaicml.py index b73e0d5e21..f7fcba0c7e 100644 --- a/libs/community/langchain_community/llms/mosaicml.py +++ b/libs/community/langchain_community/llms/mosaicml.py @@ -115,7 +115,7 @@ class MosaicML(LLM): Example: .. code-block:: python - response = mosaic_llm("Tell me a joke.") + response = mosaic_llm.invoke("Tell me a joke.") """ _model_kwargs = self.model_kwargs or {} diff --git a/libs/community/langchain_community/llms/opaqueprompts.py b/libs/community/langchain_community/llms/opaqueprompts.py index 0fbc801aae..67bd5b6272 100644 --- a/libs/community/langchain_community/llms/opaqueprompts.py +++ b/libs/community/langchain_community/llms/opaqueprompts.py @@ -84,7 +84,7 @@ class OpaquePrompts(LLM): Example: .. code-block:: python - response = op_llm("Tell me a joke.") + response = op_llm.invoke("Tell me a joke.") """ import opaqueprompts as op diff --git a/libs/community/langchain_community/llms/openllm.py b/libs/community/langchain_community/llms/openllm.py index 5d43e7ea26..746e12b64e 100644 --- a/libs/community/langchain_community/llms/openllm.py +++ b/libs/community/langchain_community/llms/openllm.py @@ -63,7 +63,7 @@ class OpenLLM(LLM): model_name='flan-t5', model_id='google/flan-t5-large', ) - llm("What is the difference between a duck and a goose?") + llm.invoke("What is the difference between a duck and a goose?") For all available supported models, you can run 'openllm models'. diff --git a/libs/community/langchain_community/llms/predictionguard.py b/libs/community/langchain_community/llms/predictionguard.py index 51291500ca..62115509cb 100644 --- a/libs/community/langchain_community/llms/predictionguard.py +++ b/libs/community/langchain_community/llms/predictionguard.py @@ -100,7 +100,7 @@ class PredictionGuard(LLM): The string generated by the model. Example: .. code-block:: python - response = pgllm("Tell me a joke.") + response = pgllm.invoke("Tell me a joke.") """ import predictionguard as pg diff --git a/libs/community/langchain_community/llms/rwkv.py b/libs/community/langchain_community/llms/rwkv.py index 470c200537..681fc02c6d 100644 --- a/libs/community/langchain_community/llms/rwkv.py +++ b/libs/community/langchain_community/llms/rwkv.py @@ -25,7 +25,7 @@ class RWKV(LLM, BaseModel): model = RWKV(model="./models/rwkv-3b-fp16.bin", strategy="cpu fp32") # Simplest invocation - response = model("Once upon a time, ") + response = model.invoke("Once upon a time, ") """ model: str @@ -225,7 +225,7 @@ class RWKV(LLM, BaseModel): .. code-block:: python prompt = "Once upon a time, " - response = model(prompt, n_predict=55) + response = model.invoke(prompt, n_predict=55) """ text = self.rwkv_generate(prompt) diff --git a/libs/community/langchain_community/llms/textgen.py b/libs/community/langchain_community/llms/textgen.py index 9d1ce10833..9a752f844a 100644 --- a/libs/community/langchain_community/llms/textgen.py +++ b/libs/community/langchain_community/llms/textgen.py @@ -199,7 +199,7 @@ class TextGen(LLM): from langchain_community.llms import TextGen llm = TextGen(model_url="http://localhost:5000") - llm("Write a story about llamas.") + llm.invoke("Write a story about llamas.") """ if self.streaming: combined_text_output = "" @@ -245,7 +245,7 @@ class TextGen(LLM): from langchain_community.llms import TextGen llm = TextGen(model_url="http://localhost:5000") - llm("Write a story about llamas.") + llm.invoke("Write a story about llamas.") """ if self.streaming: combined_text_output = "" diff --git a/libs/community/langchain_community/llms/watsonxllm.py b/libs/community/langchain_community/llms/watsonxllm.py index 191574e094..b18f224cd9 100644 --- a/libs/community/langchain_community/llms/watsonxllm.py +++ b/libs/community/langchain_community/llms/watsonxllm.py @@ -320,7 +320,7 @@ class WatsonxLLM(BaseLLM): Example: .. code-block:: python - response = watsonx_llm("What is a molecule") + response = watsonx_llm.invoke("What is a molecule") """ result = self._generate( prompts=[prompt], stop=stop, run_manager=run_manager, **kwargs diff --git a/libs/community/langchain_community/llms/weight_only_quantization.py b/libs/community/langchain_community/llms/weight_only_quantization.py index 5480fbe036..bf5e967fb2 100644 --- a/libs/community/langchain_community/llms/weight_only_quantization.py +++ b/libs/community/langchain_community/llms/weight_only_quantization.py @@ -222,7 +222,7 @@ class WeightOnlyQuantPipeline(LLM): model_id="google/flan-t5-large", task="text2text-generation", ) - llm("This is a prompt.") + llm.invoke("This is a prompt.") """ response = self.pipeline(prompt) if self.pipeline.task == "text-generation": diff --git a/libs/community/langchain_community/llms/xinference.py b/libs/community/langchain_community/llms/xinference.py index cc18bf31a9..4828132e6a 100644 --- a/libs/community/langchain_community/llms/xinference.py +++ b/libs/community/langchain_community/llms/xinference.py @@ -62,7 +62,7 @@ class Xinference(LLM): model_uid = {model_uid} # replace model_uid with the model UID return from launching the model ) - llm( + llm.invoke( prompt="Q: where can we visit in the capital of France? A:", generate_config={"max_tokens": 1024, "stream": True}, ) diff --git a/libs/community/langchain_community/llms/yuan2.py b/libs/community/langchain_community/llms/yuan2.py index 0c345f9299..4373ba622e 100644 --- a/libs/community/langchain_community/llms/yuan2.py +++ b/libs/community/langchain_community/llms/yuan2.py @@ -26,7 +26,7 @@ class Yuan2(LLM): top_k=40, ) print(yuan_llm) - print(yuan_llm("你是谁?")) + print(yuan_llm.invoke("你是谁?")) """ infer_api: str = "http://127.0.0.1:8000/yuan" @@ -137,7 +137,7 @@ class Yuan2(LLM): Example: .. code-block:: python - response = yuan_llm("你能做什么?") + response = yuan_llm.invoke("你能做什么?") """ if self.use_history: diff --git a/libs/community/tests/integration_tests/callbacks/test_openai_callback.py b/libs/community/tests/integration_tests/callbacks/test_openai_callback.py index 100021e746..77e18281ce 100644 --- a/libs/community/tests/integration_tests/callbacks/test_openai_callback.py +++ b/libs/community/tests/integration_tests/callbacks/test_openai_callback.py @@ -9,14 +9,14 @@ from langchain_community.llms import OpenAI async def test_openai_callback() -> None: llm = OpenAI(temperature=0) with get_openai_callback() as cb: - llm("What is the square root of 4?") + llm.invoke("What is the square root of 4?") total_tokens = cb.total_tokens assert total_tokens > 0 with get_openai_callback() as cb: - llm("What is the square root of 4?") - llm("What is the square root of 4?") + llm.invoke("What is the square root of 4?") + llm.invoke("What is the square root of 4?") assert cb.total_tokens == total_tokens * 2 @@ -44,8 +44,8 @@ def test_openai_callback_batch_llm() -> None: total_tokens = cb.total_tokens with get_openai_callback() as cb: - llm("What is the square root of 4?") - llm("What is the square root of 4?") + llm.invoke("What is the square root of 4?") + llm.invoke("What is the square root of 4?") assert cb.total_tokens == total_tokens diff --git a/libs/community/tests/integration_tests/chat_models/test_anthropic.py b/libs/community/tests/integration_tests/chat_models/test_anthropic.py index a1ddc14ead..c01b5ff298 100644 --- a/libs/community/tests/integration_tests/chat_models/test_anthropic.py +++ b/libs/community/tests/integration_tests/chat_models/test_anthropic.py @@ -17,7 +17,7 @@ def test_anthropic_call() -> None: """Test valid call to anthropic.""" chat = ChatAnthropic(model="test") message = HumanMessage(content="Hello") - response = chat([message]) + response = chat.invoke([message]) assert isinstance(response, AIMessage) assert isinstance(response.content, str) @@ -44,7 +44,7 @@ def test_anthropic_streaming() -> None: """Test streaming tokens from anthropic.""" chat = ChatAnthropic(model="test", streaming=True) message = HumanMessage(content="Hello") - response = chat([message]) + response = chat.invoke([message]) assert isinstance(response, AIMessage) assert isinstance(response.content, str) @@ -61,7 +61,7 @@ def test_anthropic_streaming_callback() -> None: verbose=True, ) message = HumanMessage(content="Write me a sentence with 10 words.") - chat([message]) + chat.invoke([message]) assert callback_handler.llm_streams > 1 diff --git a/libs/community/tests/integration_tests/chat_models/test_azure_openai.py b/libs/community/tests/integration_tests/chat_models/test_azure_openai.py index 23812775c0..7d3c3cfe8c 100644 --- a/libs/community/tests/integration_tests/chat_models/test_azure_openai.py +++ b/libs/community/tests/integration_tests/chat_models/test_azure_openai.py @@ -40,7 +40,7 @@ def llm() -> AzureChatOpenAI: def test_chat_openai(llm: AzureChatOpenAI) -> None: """Test AzureChatOpenAI wrapper.""" message = HumanMessage(content="Hello") - response = llm([message]) + response = llm.invoke([message]) assert isinstance(response, BaseMessage) assert isinstance(response.content, str) @@ -87,7 +87,7 @@ def test_chat_openai_streaming() -> None: verbose=True, ) message = HumanMessage(content="Hello") - response = chat([message]) + response = chat.invoke([message]) assert callback_handler.llm_streams > 0 assert isinstance(response, BaseMessage) diff --git a/libs/community/tests/integration_tests/chat_models/test_baichuan.py b/libs/community/tests/integration_tests/chat_models/test_baichuan.py index b1322435f3..a44e0c94db 100644 --- a/libs/community/tests/integration_tests/chat_models/test_baichuan.py +++ b/libs/community/tests/integration_tests/chat_models/test_baichuan.py @@ -9,7 +9,7 @@ from langchain_community.chat_models.baichuan import ChatBaichuan def test_chat_baichuan_default() -> None: chat = ChatBaichuan(streaming=True) message = HumanMessage(content="请完整背诵将进酒,背诵5遍") - response = chat([message]) + response = chat.invoke([message]) assert isinstance(response, AIMessage) assert isinstance(response.content, str) @@ -17,7 +17,7 @@ def test_chat_baichuan_default() -> None: def test_chat_baichuan_default_non_streaming() -> None: chat = ChatBaichuan() message = HumanMessage(content="请完整背诵将进酒,背诵5遍") - response = chat([message]) + response = chat.invoke([message]) assert isinstance(response, AIMessage) assert isinstance(response.content, str) @@ -25,7 +25,7 @@ def test_chat_baichuan_default_non_streaming() -> None: def test_chat_baichuan_turbo() -> None: chat = ChatBaichuan(model="Baichuan2-Turbo", streaming=True) message = HumanMessage(content="Hello") - response = chat([message]) + response = chat.invoke([message]) assert isinstance(response, AIMessage) assert isinstance(response.content, str) @@ -33,7 +33,7 @@ def test_chat_baichuan_turbo() -> None: def test_chat_baichuan_turbo_non_streaming() -> None: chat = ChatBaichuan(model="Baichuan2-Turbo") message = HumanMessage(content="Hello") - response = chat([message]) + response = chat.invoke([message]) assert isinstance(response, AIMessage) assert isinstance(response.content, str) @@ -41,7 +41,7 @@ def test_chat_baichuan_turbo_non_streaming() -> None: def test_chat_baichuan_with_temperature() -> None: chat = ChatBaichuan(temperature=1.0) message = HumanMessage(content="Hello") - response = chat([message]) + response = chat.invoke([message]) assert isinstance(response, AIMessage) assert isinstance(response.content, str) @@ -49,7 +49,9 @@ def test_chat_baichuan_with_temperature() -> None: def test_chat_baichuan_with_kwargs() -> None: chat = ChatBaichuan() message = HumanMessage(content="百川192K API是什么时候上线的?") - response = chat([message], temperature=0.88, top_p=0.7, with_search_enhance=True) + response = chat.invoke( + [message], temperature=0.88, top_p=0.7, with_search_enhance=True + ) print(response) # noqa: T201 assert isinstance(response, AIMessage) assert isinstance(response.content, str) diff --git a/libs/community/tests/integration_tests/chat_models/test_bedrock.py b/libs/community/tests/integration_tests/chat_models/test_bedrock.py index f90ef8937f..d1d5399bb8 100644 --- a/libs/community/tests/integration_tests/chat_models/test_bedrock.py +++ b/libs/community/tests/integration_tests/chat_models/test_bedrock.py @@ -25,7 +25,7 @@ def test_chat_bedrock(chat: BedrockChat) -> None: """Test BedrockChat wrapper.""" system = SystemMessage(content="You are a helpful assistant.") human = HumanMessage(content="Hello") - response = chat([system, human]) + response = chat.invoke([system, human]) assert isinstance(response, BaseMessage) assert isinstance(response.content, str) @@ -70,7 +70,7 @@ def test_chat_bedrock_streaming() -> None: verbose=True, ) message = HumanMessage(content="Hello") - response = chat([message]) + response = chat.invoke([message]) assert callback_handler.llm_streams > 0 assert isinstance(response, BaseMessage) diff --git a/libs/community/tests/integration_tests/chat_models/test_dappier.py b/libs/community/tests/integration_tests/chat_models/test_dappier.py index 2d5cd0f674..2fcfe1f68e 100644 --- a/libs/community/tests/integration_tests/chat_models/test_dappier.py +++ b/libs/community/tests/integration_tests/chat_models/test_dappier.py @@ -17,7 +17,7 @@ def test_dappier_chat() -> None: dappier_model="dm_01hpsxyfm2fwdt2zet9cg6fdxt", ) message = HumanMessage(content="Who are you ?") - response = chat([message]) + response = chat.invoke([message]) assert isinstance(response, AIMessage) assert isinstance(response.content, str) diff --git a/libs/community/tests/integration_tests/chat_models/test_edenai.py b/libs/community/tests/integration_tests/chat_models/test_edenai.py index 37ddac803f..e699da6ef2 100644 --- a/libs/community/tests/integration_tests/chat_models/test_edenai.py +++ b/libs/community/tests/integration_tests/chat_models/test_edenai.py @@ -17,7 +17,7 @@ def test_chat_edenai() -> None: provider="openai", model="gpt-3.5-turbo", temperature=0, max_tokens=1000 ) message = HumanMessage(content="Who are you ?") - response = chat([message]) + response = chat.invoke([message]) assert isinstance(response, AIMessage) assert isinstance(response.content, str) diff --git a/libs/community/tests/integration_tests/chat_models/test_ernie.py b/libs/community/tests/integration_tests/chat_models/test_ernie.py index 4d472fdd9e..892042f584 100644 --- a/libs/community/tests/integration_tests/chat_models/test_ernie.py +++ b/libs/community/tests/integration_tests/chat_models/test_ernie.py @@ -7,7 +7,7 @@ from langchain_community.chat_models.ernie import ErnieBotChat def test_chat_ernie_bot() -> None: chat = ErnieBotChat() message = HumanMessage(content="Hello") - response = chat([message]) + response = chat.invoke([message]) assert isinstance(response, AIMessage) assert isinstance(response.content, str) @@ -15,7 +15,7 @@ def test_chat_ernie_bot() -> None: def test_chat_ernie_bot_with_model_name() -> None: chat = ErnieBotChat(model_name="ERNIE-Bot") message = HumanMessage(content="Hello") - response = chat([message]) + response = chat.invoke([message]) assert isinstance(response, AIMessage) assert isinstance(response.content, str) @@ -23,7 +23,7 @@ def test_chat_ernie_bot_with_model_name() -> None: def test_chat_ernie_bot_with_temperature() -> None: chat = ErnieBotChat(model_name="ERNIE-Bot", temperature=1.0) message = HumanMessage(content="Hello") - response = chat([message]) + response = chat.invoke([message]) assert isinstance(response, AIMessage) assert isinstance(response.content, str) @@ -31,7 +31,7 @@ def test_chat_ernie_bot_with_temperature() -> None: def test_chat_ernie_bot_with_kwargs() -> None: chat = ErnieBotChat() message = HumanMessage(content="Hello") - response = chat([message], temperature=0.88, top_p=0.7) + response = chat.invoke([message], temperature=0.88, top_p=0.7) assert isinstance(response, AIMessage) assert isinstance(response.content, str) @@ -46,7 +46,7 @@ def test_wrong_temperature_1() -> None: chat = ErnieBotChat() message = HumanMessage(content="Hello") with pytest.raises(ValueError) as e: - chat([message], temperature=1.2) + chat.invoke([message], temperature=1.2) assert "parameter check failed, temperature range is (0, 1.0]" in str(e) @@ -54,5 +54,5 @@ def test_wrong_temperature_2() -> None: chat = ErnieBotChat() message = HumanMessage(content="Hello") with pytest.raises(ValueError) as e: - chat([message], temperature=0) + chat.invoke([message], temperature=0) assert "parameter check failed, temperature range is (0, 1.0]" in str(e) diff --git a/libs/community/tests/integration_tests/chat_models/test_fireworks.py b/libs/community/tests/integration_tests/chat_models/test_fireworks.py index 9ab943be87..e753e30d4e 100644 --- a/libs/community/tests/integration_tests/chat_models/test_fireworks.py +++ b/libs/community/tests/integration_tests/chat_models/test_fireworks.py @@ -21,7 +21,7 @@ def chat() -> ChatFireworks: def test_chat_fireworks(chat: ChatFireworks) -> None: """Test ChatFireworks wrapper.""" message = HumanMessage(content="What is the weather in Redwood City, CA today") - response = chat([message]) + response = chat.invoke([message]) assert isinstance(response, BaseMessage) assert isinstance(response.content, str) @@ -38,7 +38,7 @@ def test_chat_fireworks_system_message(chat: ChatFireworks) -> None: """Test ChatFireworks wrapper with system message.""" system_message = SystemMessage(content="You are to chat with the user.") human_message = HumanMessage(content="Hello") - response = chat([system_message, human_message]) + response = chat.invoke([system_message, human_message]) assert isinstance(response, BaseMessage) assert isinstance(response.content, str) diff --git a/libs/community/tests/integration_tests/chat_models/test_friendli.py b/libs/community/tests/integration_tests/chat_models/test_friendli.py index ab9646c1eb..c9e5bd0d38 100644 --- a/libs/community/tests/integration_tests/chat_models/test_friendli.py +++ b/libs/community/tests/integration_tests/chat_models/test_friendli.py @@ -15,14 +15,6 @@ def friendli_chat() -> ChatFriendli: return ChatFriendli(temperature=0, max_tokens=10) -def test_friendli_call(friendli_chat: ChatFriendli) -> None: - """Test call.""" - message = HumanMessage(content="What is generative AI?") - output = friendli_chat([message]) - assert isinstance(output, AIMessage) - assert isinstance(output.content, str) - - def test_friendli_invoke(friendli_chat: ChatFriendli) -> None: """Test invoke.""" output = friendli_chat.invoke("What is generative AI?") diff --git a/libs/community/tests/integration_tests/chat_models/test_google_palm.py b/libs/community/tests/integration_tests/chat_models/test_google_palm.py index 6bb0b4db47..11cf35877c 100644 --- a/libs/community/tests/integration_tests/chat_models/test_google_palm.py +++ b/libs/community/tests/integration_tests/chat_models/test_google_palm.py @@ -14,7 +14,7 @@ def test_chat_google_palm() -> None: """Test Google PaLM Chat API wrapper.""" chat = ChatGooglePalm() message = HumanMessage(content="Hello") - response = chat([message]) + response = chat.invoke([message]) assert isinstance(response, BaseMessage) assert isinstance(response.content, str) @@ -24,7 +24,7 @@ def test_chat_google_palm_system_message() -> None: chat = ChatGooglePalm() system_message = SystemMessage(content="You are to chat with the user.") human_message = HumanMessage(content="Hello") - response = chat([system_message, human_message]) + response = chat.invoke([system_message, human_message]) assert isinstance(response, BaseMessage) assert isinstance(response.content, str) diff --git a/libs/community/tests/integration_tests/chat_models/test_gpt_router.py b/libs/community/tests/integration_tests/chat_models/test_gpt_router.py index f6cc3b6cb7..d98ba6fe4e 100644 --- a/libs/community/tests/integration_tests/chat_models/test_gpt_router.py +++ b/libs/community/tests/integration_tests/chat_models/test_gpt_router.py @@ -43,7 +43,7 @@ def test_gpt_router_call() -> None: ) chat = GPTRouter(models_priority_list=[anthropic_claude]) message = HumanMessage(content="Hello World") - response = chat([message]) + response = chat.invoke([message]) assert isinstance(response, AIMessage) assert isinstance(response.content, str) @@ -56,7 +56,7 @@ def test_gpt_router_call_incorrect_model() -> None: chat = GPTRouter(models_priority_list=[anthropic_claude]) message = HumanMessage(content="Hello World") with pytest.raises(Exception): - chat([message]) + chat.invoke([message]) def test_gpt_router_generate() -> None: @@ -85,7 +85,7 @@ def test_gpt_router_streaming() -> None: ) chat = GPTRouter(models_priority_list=[anthropic_claude], streaming=True) message = HumanMessage(content="Hello") - response = chat([message]) + response = chat.invoke([message]) assert isinstance(response, AIMessage) assert isinstance(response.content, str) @@ -104,5 +104,5 @@ def test_gpt_router_streaming_callback() -> None: verbose=True, ) message = HumanMessage(content="Write me a 5 line poem.") - chat([message]) + chat.invoke([message]) assert callback_handler.llm_streams > 1 diff --git a/libs/community/tests/integration_tests/chat_models/test_hunyuan.py b/libs/community/tests/integration_tests/chat_models/test_hunyuan.py index 0901cc4cf6..f3972391a4 100644 --- a/libs/community/tests/integration_tests/chat_models/test_hunyuan.py +++ b/libs/community/tests/integration_tests/chat_models/test_hunyuan.py @@ -6,7 +6,7 @@ from langchain_community.chat_models.hunyuan import ChatHunyuan def test_chat_hunyuan() -> None: chat = ChatHunyuan() message = HumanMessage(content="Hello") - response = chat([message]) + response = chat.invoke([message]) assert isinstance(response, AIMessage) assert isinstance(response.content, str) @@ -14,7 +14,7 @@ def test_chat_hunyuan() -> None: def test_chat_hunyuan_with_temperature() -> None: chat = ChatHunyuan(temperature=0.6) message = HumanMessage(content="Hello") - response = chat([message]) + response = chat.invoke([message]) assert isinstance(response, AIMessage) assert isinstance(response.content, str) diff --git a/libs/community/tests/integration_tests/chat_models/test_jinachat.py b/libs/community/tests/integration_tests/chat_models/test_jinachat.py index 3a62cbc05f..0865cfb0f1 100644 --- a/libs/community/tests/integration_tests/chat_models/test_jinachat.py +++ b/libs/community/tests/integration_tests/chat_models/test_jinachat.py @@ -51,7 +51,7 @@ def test_jinachat() -> None: """Test JinaChat wrapper.""" chat = JinaChat(max_tokens=10) message = HumanMessage(content="Hello") - response = chat([message]) + response = chat.invoke([message]) assert isinstance(response, BaseMessage) assert isinstance(response.content, str) @@ -61,7 +61,7 @@ def test_jinachat_system_message() -> None: chat = JinaChat(max_tokens=10) system_message = SystemMessage(content="You are to chat with the user.") human_message = HumanMessage(content="Hello") - response = chat([system_message, human_message]) + response = chat.invoke([system_message, human_message]) assert isinstance(response, BaseMessage) assert isinstance(response.content, str) @@ -93,7 +93,7 @@ def test_jinachat_streaming() -> None: verbose=True, ) message = HumanMessage(content="Hello") - response = chat([message]) + response = chat.invoke([message]) assert callback_handler.llm_streams > 0 assert isinstance(response, BaseMessage) diff --git a/libs/community/tests/integration_tests/chat_models/test_konko.py b/libs/community/tests/integration_tests/chat_models/test_konko.py index 79feaba4a3..94f1b652b3 100644 --- a/libs/community/tests/integration_tests/chat_models/test_konko.py +++ b/libs/community/tests/integration_tests/chat_models/test_konko.py @@ -57,7 +57,7 @@ def test_konko_chat_test() -> None: """Evaluate basic ChatKonko functionality.""" chat_instance = ChatKonko(max_tokens=10) msg = HumanMessage(content="Hi") - chat_response = chat_instance([msg]) + chat_response = chat_instance.invoke([msg]) assert isinstance(chat_response, BaseMessage) assert isinstance(chat_response.content, str) @@ -66,7 +66,7 @@ def test_konko_chat_test_openai() -> None: """Evaluate basic ChatKonko functionality.""" chat_instance = ChatKonko(max_tokens=10, model="meta-llama/llama-2-70b-chat") msg = HumanMessage(content="Hi") - chat_response = chat_instance([msg]) + chat_response = chat_instance.invoke([msg]) assert isinstance(chat_response, BaseMessage) assert isinstance(chat_response.content, str) @@ -91,7 +91,7 @@ def test_konko_system_msg_test() -> None: chat_instance = ChatKonko(max_tokens=10) sys_msg = SystemMessage(content="Initiate user chat.") user_msg = HumanMessage(content="Hi there") - chat_response = chat_instance([sys_msg, user_msg]) + chat_response = chat_instance.invoke([sys_msg, user_msg]) assert isinstance(chat_response, BaseMessage) assert isinstance(chat_response.content, str) @@ -135,7 +135,7 @@ def test_konko_streaming_callback_test() -> None: verbose=True, ) msg = HumanMessage(content="Hi") - chat_response = chat_instance([msg]) + chat_response = chat_instance.invoke([msg]) assert callback_instance.llm_streams > 0 assert isinstance(chat_response, BaseMessage) diff --git a/libs/community/tests/integration_tests/chat_models/test_litellm.py b/libs/community/tests/integration_tests/chat_models/test_litellm.py index c71d0d3ac1..1d29c5c7ed 100644 --- a/libs/community/tests/integration_tests/chat_models/test_litellm.py +++ b/libs/community/tests/integration_tests/chat_models/test_litellm.py @@ -17,7 +17,7 @@ def test_litellm_call() -> None: model="test", ) message = HumanMessage(content="Hello") - response = chat([message]) + response = chat.invoke([message]) assert isinstance(response, AIMessage) assert isinstance(response.content, str) @@ -42,7 +42,7 @@ def test_litellm_streaming() -> None: """Test streaming tokens from anthropic.""" chat = ChatLiteLLM(model="test", streaming=True) message = HumanMessage(content="Hello") - response = chat([message]) + response = chat.invoke([message]) assert isinstance(response, AIMessage) assert isinstance(response.content, str) @@ -58,5 +58,5 @@ def test_litellm_streaming_callback() -> None: verbose=True, ) message = HumanMessage(content="Write me a sentence with 10 words.") - chat([message]) + chat.invoke([message]) assert callback_handler.llm_streams > 1 diff --git a/libs/community/tests/integration_tests/chat_models/test_litellm_router.py b/libs/community/tests/integration_tests/chat_models/test_litellm_router.py index b7bfd79406..163818f41d 100644 --- a/libs/community/tests/integration_tests/chat_models/test_litellm_router.py +++ b/libs/community/tests/integration_tests/chat_models/test_litellm_router.py @@ -184,7 +184,7 @@ def test_litellm_router_call( chat = ChatLiteLLMRouter(router=litellm_router) message = HumanMessage(content="Hello") - response = chat([message]) + response = chat.invoke([message]) assert isinstance(response, AIMessage) assert isinstance(response.content, str) @@ -232,7 +232,7 @@ def test_litellm_router_streaming( chat = ChatLiteLLMRouter(router=litellm_router, streaming=True) message = HumanMessage(content="Hello") - response = chat([message]) + response = chat.invoke([message]) assert isinstance(response, AIMessage) assert isinstance(response.content, str) @@ -255,7 +255,7 @@ def test_litellm_router_streaming_callback( ) message = HumanMessage(content="Write me a sentence with 10 words.") - response = chat([message]) + response = chat.invoke([message]) assert callback_handler.llm_streams > 1 assert isinstance(response, AIMessage) diff --git a/libs/community/tests/integration_tests/chat_models/test_llama_edge.py b/libs/community/tests/integration_tests/chat_models/test_llama_edge.py index 08867d5374..7420adad9f 100644 --- a/libs/community/tests/integration_tests/chat_models/test_llama_edge.py +++ b/libs/community/tests/integration_tests/chat_models/test_llama_edge.py @@ -20,7 +20,7 @@ def test_chat_wasm_service() -> None: messages = [system_message, user_message] # chat with wasm-chat service - response = chat(messages) + response = chat.invoke(messages) # check response assert isinstance(response, AIMessage) diff --git a/libs/community/tests/integration_tests/chat_models/test_octoai.py b/libs/community/tests/integration_tests/chat_models/test_octoai.py index 274cb7008a..cb9bea63b0 100644 --- a/libs/community/tests/integration_tests/chat_models/test_octoai.py +++ b/libs/community/tests/integration_tests/chat_models/test_octoai.py @@ -6,6 +6,6 @@ from langchain_community.chat_models.octoai import ChatOctoAI def test_chat_octoai() -> None: chat = ChatOctoAI() message = HumanMessage(content="Hello") - response = chat([message]) + response = chat.invoke([message]) assert isinstance(response, AIMessage) assert isinstance(response.content, str) diff --git a/libs/community/tests/integration_tests/chat_models/test_openai.py b/libs/community/tests/integration_tests/chat_models/test_openai.py index 9274ad8e83..dc5c6639ee 100644 --- a/libs/community/tests/integration_tests/chat_models/test_openai.py +++ b/libs/community/tests/integration_tests/chat_models/test_openai.py @@ -33,7 +33,7 @@ def test_chat_openai() -> None: default_query=None, ) message = HumanMessage(content="Hello") - response = chat([message]) + response = chat.invoke([message]) assert isinstance(response, BaseMessage) assert isinstance(response.content, str) @@ -51,7 +51,7 @@ def test_chat_openai_system_message() -> None: chat = ChatOpenAI(max_tokens=10) system_message = SystemMessage(content="You are to chat with the user.") human_message = HumanMessage(content="Hello") - response = chat([system_message, human_message]) + response = chat.invoke([system_message, human_message]) assert isinstance(response, BaseMessage) assert isinstance(response.content, str) @@ -99,7 +99,7 @@ def test_chat_openai_streaming() -> None: verbose=True, ) message = HumanMessage(content="Hello") - response = chat([message]) + response = chat.invoke([message]) assert callback_handler.llm_streams > 0 assert isinstance(response, BaseMessage) diff --git a/libs/community/tests/integration_tests/chat_models/test_pai_eas_chat_endpoint.py b/libs/community/tests/integration_tests/chat_models/test_pai_eas_chat_endpoint.py index 136153e585..dc114b6be7 100644 --- a/libs/community/tests/integration_tests/chat_models/test_pai_eas_chat_endpoint.py +++ b/libs/community/tests/integration_tests/chat_models/test_pai_eas_chat_endpoint.py @@ -14,7 +14,7 @@ def test_pai_eas_call() -> None: eas_service_url=os.getenv("EAS_SERVICE_URL"), eas_service_token=os.getenv("EAS_SERVICE_TOKEN"), ) - response = chat(messages=[HumanMessage(content="Say foo:")]) + response = chat.invoke([HumanMessage(content="Say foo:")]) assert isinstance(response, BaseMessage) assert isinstance(response.content, str) @@ -26,8 +26,8 @@ def test_multiple_history() -> None: eas_service_token=os.getenv("EAS_SERVICE_TOKEN"), ) - response = chat( - messages=[ + response = chat.invoke( + [ HumanMessage(content="Hello."), AIMessage(content="Hello!"), HumanMessage(content="How are you doing?"), @@ -46,14 +46,14 @@ def test_stream() -> None: ) callback_handler = FakeCallbackHandler() callback_manager = CallbackManager([callback_handler]) - response = chat( - messages=[ + response = chat.invoke( + [ HumanMessage(content="Hello."), AIMessage(content="Hello!"), HumanMessage(content="Who are you?"), ], stream=True, - callbacks=callback_manager, + config={"callbacks": callback_manager}, ) assert callback_handler.llm_streams > 0 assert isinstance(response.content, str) diff --git a/libs/community/tests/integration_tests/chat_models/test_premai.py b/libs/community/tests/integration_tests/chat_models/test_premai.py index fae9b4135f..36b96dd8d5 100644 --- a/libs/community/tests/integration_tests/chat_models/test_premai.py +++ b/libs/community/tests/integration_tests/chat_models/test_premai.py @@ -21,7 +21,7 @@ def test_chat_premai() -> None: """Test ChatPremAI wrapper.""" chat = ChatPremAI(project_id=8) message = HumanMessage(content="Hello") - response = chat([message]) + response = chat.invoke([message]) assert isinstance(response, BaseMessage) assert isinstance(response.content, str) @@ -31,7 +31,7 @@ def test_chat_prem_system_message() -> None: chat = ChatPremAI(project_id=8) system_message = SystemMessage(content="You are to chat with the user.") human_message = HumanMessage(content="Hello") - response = chat([system_message, human_message]) + response = chat.invoke([system_message, human_message]) assert isinstance(response, BaseMessage) assert isinstance(response.content, str) diff --git a/libs/community/tests/integration_tests/chat_models/test_promptlayer_openai.py b/libs/community/tests/integration_tests/chat_models/test_promptlayer_openai.py index d037056d72..455c2876dd 100644 --- a/libs/community/tests/integration_tests/chat_models/test_promptlayer_openai.py +++ b/libs/community/tests/integration_tests/chat_models/test_promptlayer_openai.py @@ -13,7 +13,7 @@ def test_promptlayer_chat_openai() -> None: """Test PromptLayerChatOpenAI wrapper.""" chat = PromptLayerChatOpenAI(max_tokens=10) message = HumanMessage(content="Hello") - response = chat([message]) + response = chat.invoke([message]) assert isinstance(response, BaseMessage) assert isinstance(response.content, str) @@ -23,7 +23,7 @@ def test_promptlayer_chat_openai_system_message() -> None: chat = PromptLayerChatOpenAI(max_tokens=10) system_message = SystemMessage(content="You are to chat with the user.") human_message = HumanMessage(content="Hello") - response = chat([system_message, human_message]) + response = chat.invoke([system_message, human_message]) assert isinstance(response, BaseMessage) assert isinstance(response.content, str) @@ -67,7 +67,7 @@ def test_promptlayer_chat_openai_streaming() -> None: verbose=True, ) message = HumanMessage(content="Hello") - response = chat([message]) + response = chat.invoke([message]) assert callback_handler.llm_streams > 0 assert isinstance(response, BaseMessage) diff --git a/libs/community/tests/integration_tests/chat_models/test_qianfan_endpoint.py b/libs/community/tests/integration_tests/chat_models/test_qianfan_endpoint.py index 407f0cd67e..c57a77495e 100644 --- a/libs/community/tests/integration_tests/chat_models/test_qianfan_endpoint.py +++ b/libs/community/tests/integration_tests/chat_models/test_qianfan_endpoint.py @@ -98,9 +98,9 @@ def test_initialization() -> None: def test_default_call() -> None: - """Test default model(`ERNIE-Bot`) call.""" + """Test default model.invoke(`ERNIE-Bot`) call.""" chat = QianfanChatEndpoint() - response = chat(messages=[HumanMessage(content="Hello")]) + response = chat.invoke([HumanMessage(content="Hello")]) assert isinstance(response, BaseMessage) assert isinstance(response.content, str) @@ -108,7 +108,7 @@ def test_default_call() -> None: def test_model() -> None: """Test model kwarg works.""" chat = QianfanChatEndpoint(model="BLOOMZ-7B") - response = chat(messages=[HumanMessage(content="Hello")]) + response = chat.invoke([HumanMessage(content="Hello")]) assert isinstance(response, BaseMessage) assert isinstance(response.content, str) @@ -116,7 +116,7 @@ def test_model() -> None: def test_model_param() -> None: """Test model params works.""" chat = QianfanChatEndpoint() - response = chat(model="BLOOMZ-7B", messages=[HumanMessage(content="Hello")]) + response = chat.invoke([HumanMessage(content="Hello")], model="BLOOMZ-7B") assert isinstance(response, BaseMessage) assert isinstance(response.content, str) @@ -124,7 +124,7 @@ def test_model_param() -> None: def test_endpoint() -> None: """Test user custom model deployments like some open source models.""" chat = QianfanChatEndpoint(endpoint="qianfan_bloomz_7b_compressed") - response = chat(messages=[HumanMessage(content="Hello")]) + response = chat.invoke([HumanMessage(content="Hello")]) assert isinstance(response, BaseMessage) assert isinstance(response.content, str) @@ -132,10 +132,8 @@ def test_endpoint() -> None: def test_endpoint_param() -> None: """Test user custom model deployments like some open source models.""" chat = QianfanChatEndpoint() - response = chat( - messages=[ - HumanMessage(endpoint="qianfan_bloomz_7b_compressed", content="Hello") - ] + response = chat.invoke( + [HumanMessage(endpoint="qianfan_bloomz_7b_compressed", content="Hello")] ) assert isinstance(response, BaseMessage) assert isinstance(response.content, str) @@ -145,8 +143,8 @@ def test_multiple_history() -> None: """Tests multiple history works.""" chat = QianfanChatEndpoint() - response = chat( - messages=[ + response = chat.invoke( + [ HumanMessage(content="Hello."), AIMessage(content="Hello!"), HumanMessage(content="How are you doing?"), @@ -180,14 +178,14 @@ def test_stream() -> None: chat = QianfanChatEndpoint(streaming=True) callback_handler = FakeCallbackHandler() callback_manager = CallbackManager([callback_handler]) - response = chat( - messages=[ + response = chat.invoke( + [ HumanMessage(content="Hello."), AIMessage(content="Hello!"), HumanMessage(content="Who are you?"), ], stream=True, - callbacks=callback_manager, + config={"callbacks": callback_manager}, ) assert callback_handler.llm_streams > 0 assert isinstance(response.content, str) diff --git a/libs/community/tests/integration_tests/chat_models/test_sparkllm.py b/libs/community/tests/integration_tests/chat_models/test_sparkllm.py index 65fc38712c..df5357d8e7 100644 --- a/libs/community/tests/integration_tests/chat_models/test_sparkllm.py +++ b/libs/community/tests/integration_tests/chat_models/test_sparkllm.py @@ -15,7 +15,7 @@ def test_initialization() -> None: def test_chat_spark_llm() -> None: chat = ChatSparkLLM() message = HumanMessage(content="Hello") - response = chat([message]) + response = chat.invoke([message]) assert isinstance(response, AIMessage) assert isinstance(response.content, str) @@ -30,7 +30,7 @@ def test_chat_spark_llm_streaming() -> None: def test_chat_spark_llm_with_domain() -> None: chat = ChatSparkLLM(spark_llm_domain="generalv3") message = HumanMessage(content="Hello") - response = chat([message]) + response = chat.invoke([message]) print(response) # noqa: T201 assert isinstance(response, AIMessage) assert isinstance(response.content, str) @@ -39,7 +39,7 @@ def test_chat_spark_llm_with_domain() -> None: def test_chat_spark_llm_with_temperature() -> None: chat = ChatSparkLLM(temperature=0.9, top_k=2) message = HumanMessage(content="Hello") - response = chat([message]) + response = chat.invoke([message]) print(response) # noqa: T201 assert isinstance(response, AIMessage) assert isinstance(response.content, str) diff --git a/libs/community/tests/integration_tests/chat_models/test_tongyi.py b/libs/community/tests/integration_tests/chat_models/test_tongyi.py index 73591bb4e3..3e0a8f9442 100644 --- a/libs/community/tests/integration_tests/chat_models/test_tongyi.py +++ b/libs/community/tests/integration_tests/chat_models/test_tongyi.py @@ -61,7 +61,7 @@ def test_api_key_masked_when_passed_via_constructor( def test_default_call() -> None: """Test default model call.""" chat = ChatTongyi() - response = chat(messages=[HumanMessage(content="Hello")]) + response = chat.invoke([HumanMessage(content="Hello")]) assert isinstance(response, BaseMessage) assert isinstance(response.content, str) @@ -69,7 +69,7 @@ def test_default_call() -> None: def test_model() -> None: """Test model kwarg works.""" chat = ChatTongyi(model="qwen-plus") - response = chat(messages=[HumanMessage(content="Hello")]) + response = chat.invoke([HumanMessage(content="Hello")]) assert isinstance(response, BaseMessage) assert isinstance(response.content, str) @@ -95,8 +95,8 @@ def test_multiple_history() -> None: """Tests multiple history works.""" chat = ChatTongyi() - response = chat( - messages=[ + response = chat.invoke( + [ HumanMessage(content="Hello."), AIMessage(content="Hello!"), HumanMessage(content="How are you doing?"), @@ -111,14 +111,14 @@ def test_stream() -> None: chat = ChatTongyi(streaming=True) callback_handler = FakeCallbackHandler() callback_manager = CallbackManager([callback_handler]) - response = chat( - messages=[ + response = chat.invoke( + [ HumanMessage(content="Hello."), AIMessage(content="Hello!"), HumanMessage(content="Who are you?"), ], stream=True, - callbacks=callback_manager, + config={"callbacks": callback_manager}, ) assert callback_handler.llm_streams > 0 assert isinstance(response.content, str) diff --git a/libs/community/tests/integration_tests/chat_models/test_vertexai.py b/libs/community/tests/integration_tests/chat_models/test_vertexai.py index 5311f46daf..235d3cfa8f 100644 --- a/libs/community/tests/integration_tests/chat_models/test_vertexai.py +++ b/libs/community/tests/integration_tests/chat_models/test_vertexai.py @@ -50,7 +50,7 @@ def test_vertexai_single_call(model_name: str) -> None: else: model = ChatVertexAI() message = HumanMessage(content="Hello") - response = model([message]) + response = model.invoke([message]) assert isinstance(response, AIMessage) assert isinstance(response.content, str) @@ -104,7 +104,7 @@ def test_vertexai_single_call_with_context() -> None: ) context = SystemMessage(content=raw_context) message = HumanMessage(content=question) - response = model([context, message]) + response = model.invoke([context, message]) assert isinstance(response, AIMessage) assert isinstance(response.content, str) @@ -124,7 +124,7 @@ def test_multimodal() -> None: "text": "What is shown in this image?", } message = HumanMessage(content=[text_message, image_message]) - output = llm([message]) + output = llm.invoke([message]) assert isinstance(output.content, str) @@ -151,7 +151,7 @@ def test_multimodal_history() -> None: ) ) message3 = HumanMessage(content="What time of day is it?") - response = llm([message1, message2, message3]) + response = llm.invoke([message1, message2, message3]) assert isinstance(response, AIMessage) assert isinstance(response.content, str) @@ -166,7 +166,7 @@ def test_vertexai_single_call_with_examples() -> None: output = AIMessage(content=text_answer) context = SystemMessage(content=raw_context) message = HumanMessage(content=question) - response = model([context, message], examples=[inp, output]) + response = model.invoke([context, message], examples=[inp, output]) assert isinstance(response, AIMessage) assert isinstance(response.content, str) @@ -183,7 +183,7 @@ def test_vertexai_single_call_with_history(model_name: str) -> None: message1 = HumanMessage(content=text_question1) message2 = AIMessage(content=text_answer1) message3 = HumanMessage(content=text_question2) - response = model([message1, message2, message3]) + response = model.invoke([message1, message2, message3]) assert isinstance(response, AIMessage) assert isinstance(response.content, str) @@ -219,7 +219,7 @@ def test_parse_chat_history_correct() -> None: def test_vertexai_single_call_fails_no_message() -> None: chat = ChatVertexAI() with pytest.raises(ValueError) as exc_info: - _ = chat([]) + _ = chat.invoke([]) assert ( str(exc_info.value) == "You should provide at least one message to start the chat!" @@ -251,9 +251,9 @@ def test_vertexai_args_passed(stop: Optional[str]) -> None: model = ChatVertexAI(**prompt_params) message = HumanMessage(content=user_prompt) if stop: - response = model([message], stop=[stop]) + response = model.invoke([message], stop=[stop]) else: - response = model([message]) + response = model.invoke([message]) assert response.content == response_text mock_send_message.assert_called_once_with(user_prompt, candidate_count=1) diff --git a/libs/community/tests/integration_tests/chat_models/test_volcengine_maas.py b/libs/community/tests/integration_tests/chat_models/test_volcengine_maas.py index 4701c504e7..24002ec617 100644 --- a/libs/community/tests/integration_tests/chat_models/test_volcengine_maas.py +++ b/libs/community/tests/integration_tests/chat_models/test_volcengine_maas.py @@ -11,7 +11,7 @@ from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler def test_default_call() -> None: """Test valid chat call to volc engine.""" chat = VolcEngineMaasChat() - response = chat(messages=[HumanMessage(content="Hello")]) + response = chat.invoke([HumanMessage(content="Hello")]) assert isinstance(response, BaseMessage) assert isinstance(response.content, str) @@ -20,8 +20,8 @@ def test_multiple_history() -> None: """Tests multiple history works.""" chat = VolcEngineMaasChat() - response = chat( - messages=[ + response = chat.invoke( + [ HumanMessage(content="Hello"), AIMessage(content="Hello!"), HumanMessage(content="How are you?"), @@ -36,14 +36,14 @@ def test_stream() -> None: chat = VolcEngineMaasChat(streaming=True) callback_handler = FakeCallbackHandler() callback_manager = CallbackManager([callback_handler]) - response = chat( - messages=[ + response = chat.invoke( + [ HumanMessage(content="Hello"), AIMessage(content="Hello!"), HumanMessage(content="How are you?"), ], stream=True, - callbacks=callback_manager, + config={"callbacks": callback_manager}, ) assert callback_handler.llm_streams > 0 assert isinstance(response.content, str) @@ -56,14 +56,14 @@ def test_stop() -> None: ) callback_handler = FakeCallbackHandler() callback_manager = CallbackManager([callback_handler]) - response = chat( - messages=[ + response = chat.invoke( + [ HumanMessage(content="repeat: hello world"), AIMessage(content="hello world"), HumanMessage(content="repeat: hello world"), ], stream=True, - callbacks=callback_manager, + config={"callbacks": callback_manager}, stop=["world"], ) assert callback_handler.llm_streams > 0 diff --git a/libs/community/tests/integration_tests/chat_models/test_zhipuai.py b/libs/community/tests/integration_tests/chat_models/test_zhipuai.py index 0c110d1c9a..1f6c4e9af8 100644 --- a/libs/community/tests/integration_tests/chat_models/test_zhipuai.py +++ b/libs/community/tests/integration_tests/chat_models/test_zhipuai.py @@ -11,7 +11,7 @@ from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler def test_default_call() -> None: """Test default model call.""" chat = ChatZhipuAI() - response = chat(messages=[HumanMessage(content="Hello")]) + response = chat.invoke([HumanMessage(content="Hello")]) assert isinstance(response, BaseMessage) assert isinstance(response.content, str) @@ -19,7 +19,7 @@ def test_default_call() -> None: def test_model() -> None: """Test model kwarg works.""" chat = ChatZhipuAI(model="glm-4") - response = chat(messages=[HumanMessage(content="Hello")]) + response = chat.invoke([HumanMessage(content="Hello")]) assert isinstance(response, BaseMessage) assert isinstance(response.content, str) @@ -28,8 +28,8 @@ def test_multiple_history() -> None: """Tests multiple history works.""" chat = ChatZhipuAI() - response = chat( - messages=[ + response = chat.invoke( + [ HumanMessage(content="Hello."), AIMessage(content="Hello!"), HumanMessage(content="How are you doing?"), @@ -44,14 +44,14 @@ def test_stream() -> None: chat = ChatZhipuAI(streaming=True) callback_handler = FakeCallbackHandler() callback_manager = CallbackManager([callback_handler]) - response = chat( - messages=[ + response = chat.invoke( + [ HumanMessage(content="Hello."), AIMessage(content="Hello!"), HumanMessage(content="Who are you?"), ], stream=True, - callbacks=callback_manager, + config={"callbacks": callback_manager}, ) assert callback_handler.llm_streams > 0 assert isinstance(response.content, str) diff --git a/libs/community/tests/integration_tests/llms/test_ai21.py b/libs/community/tests/integration_tests/llms/test_ai21.py index 1f4398984b..8b9a02841d 100644 --- a/libs/community/tests/integration_tests/llms/test_ai21.py +++ b/libs/community/tests/integration_tests/llms/test_ai21.py @@ -9,14 +9,14 @@ from langchain_community.llms.loading import load_llm def test_ai21_call() -> None: """Test valid call to ai21.""" llm = AI21(maxTokens=10) - output = llm("Say foo:") + output = llm.invoke("Say foo:") assert isinstance(output, str) def test_ai21_call_experimental() -> None: """Test valid call to ai21 with an experimental model.""" llm = AI21(maxTokens=10, model="j1-grande-instruct") - output = llm("Say foo:") + output = llm.invoke("Say foo:") assert isinstance(output, str) diff --git a/libs/community/tests/integration_tests/llms/test_aleph_alpha.py b/libs/community/tests/integration_tests/llms/test_aleph_alpha.py index 20f9014eb2..56d6aad2af 100644 --- a/libs/community/tests/integration_tests/llms/test_aleph_alpha.py +++ b/libs/community/tests/integration_tests/llms/test_aleph_alpha.py @@ -6,5 +6,5 @@ from langchain_community.llms.aleph_alpha import AlephAlpha def test_aleph_alpha_call() -> None: """Test valid call to cohere.""" llm = AlephAlpha(maximum_tokens=10) - output = llm("Say foo:") + output = llm.invoke("Say foo:") assert isinstance(output, str) diff --git a/libs/community/tests/integration_tests/llms/test_anthropic.py b/libs/community/tests/integration_tests/llms/test_anthropic.py index c0c10335bd..d0e42eec5e 100644 --- a/libs/community/tests/integration_tests/llms/test_anthropic.py +++ b/libs/community/tests/integration_tests/llms/test_anthropic.py @@ -24,7 +24,7 @@ def test_anthropic_model_param() -> None: def test_anthropic_call() -> None: """Test valid call to anthropic.""" llm = Anthropic(model="claude-instant-1") - output = llm("Say foo:") + output = llm.invoke("Say foo:") assert isinstance(output, str) @@ -48,7 +48,7 @@ def test_anthropic_streaming_callback() -> None: callback_manager=callback_manager, verbose=True, ) - llm("Write me a sentence with 100 words.") + llm.invoke("Write me a sentence with 100 words.") assert callback_handler.llm_streams > 1 diff --git a/libs/community/tests/integration_tests/llms/test_anyscale.py b/libs/community/tests/integration_tests/llms/test_anyscale.py index 686918880c..7cb48b8050 100644 --- a/libs/community/tests/integration_tests/llms/test_anyscale.py +++ b/libs/community/tests/integration_tests/llms/test_anyscale.py @@ -6,5 +6,5 @@ from langchain_community.llms.anyscale import Anyscale def test_anyscale_call() -> None: """Test valid call to Anyscale.""" llm = Anyscale() - output = llm("Say foo:") + output = llm.invoke("Say foo:") assert isinstance(output, str) diff --git a/libs/community/tests/integration_tests/llms/test_aviary.py b/libs/community/tests/integration_tests/llms/test_aviary.py index 2c8fd96f52..e3bd407bba 100644 --- a/libs/community/tests/integration_tests/llms/test_aviary.py +++ b/libs/community/tests/integration_tests/llms/test_aviary.py @@ -6,6 +6,6 @@ from langchain_community.llms.aviary import Aviary def test_aviary_call() -> None: """Test valid call to Anyscale.""" llm = Aviary() - output = llm("Say bar:") + output = llm.invoke("Say bar:") print(f"llm answer:\n{output}") # noqa: T201 assert isinstance(output, str) diff --git a/libs/community/tests/integration_tests/llms/test_azure_openai.py b/libs/community/tests/integration_tests/llms/test_azure_openai.py index ee5030f101..1ed40125fc 100644 --- a/libs/community/tests/integration_tests/llms/test_azure_openai.py +++ b/libs/community/tests/integration_tests/llms/test_azure_openai.py @@ -38,7 +38,7 @@ def llm() -> AzureOpenAI: @pytest.mark.scheduled def test_openai_call(llm: AzureOpenAI) -> None: """Test valid call to openai.""" - output = llm("Say something nice:") + output = llm.invoke("Say something nice:") assert isinstance(output, str) @@ -133,7 +133,7 @@ def test_openai_streaming_multiple_prompts_error() -> None: def test_openai_streaming_call() -> None: """Test valid call to openai.""" llm = _get_llm(max_tokens=10, streaming=True) - output = llm("Say foo:") + output = llm.invoke("Say foo:") assert isinstance(output, str) @@ -148,7 +148,7 @@ def test_openai_streaming_callback() -> None: callback_manager=callback_manager, verbose=True, ) - llm("Write me a sentence with 100 words.") + llm.invoke("Write me a sentence with 100 words.") assert callback_handler.llm_streams == 11 diff --git a/libs/community/tests/integration_tests/llms/test_baichuan.py b/libs/community/tests/integration_tests/llms/test_baichuan.py index 330e9fe829..000229aff0 100644 --- a/libs/community/tests/integration_tests/llms/test_baichuan.py +++ b/libs/community/tests/integration_tests/llms/test_baichuan.py @@ -7,7 +7,7 @@ from langchain_community.llms.baichuan import BaichuanLLM def test_call() -> None: """Test valid call to baichuan.""" llm = BaichuanLLM() - output = llm("Who won the second world war?") + output = llm.invoke("Who won the second world war?") assert isinstance(output, str) diff --git a/libs/community/tests/integration_tests/llms/test_banana.py b/libs/community/tests/integration_tests/llms/test_banana.py index fa68114c30..866f958053 100644 --- a/libs/community/tests/integration_tests/llms/test_banana.py +++ b/libs/community/tests/integration_tests/llms/test_banana.py @@ -6,5 +6,5 @@ from langchain_community.llms.bananadev import Banana def test_banana_call() -> None: """Test valid call to BananaDev.""" llm = Banana() - output = llm("Say foo:") + output = llm.invoke("Say foo:") assert isinstance(output, str) diff --git a/libs/community/tests/integration_tests/llms/test_baseten.py b/libs/community/tests/integration_tests/llms/test_baseten.py index d0db0ef40d..692e34790e 100644 --- a/libs/community/tests/integration_tests/llms/test_baseten.py +++ b/libs/community/tests/integration_tests/llms/test_baseten.py @@ -9,5 +9,5 @@ from langchain_community.llms.baseten import Baseten def test_baseten_call() -> None: """Test valid call to Baseten.""" llm = Baseten(model=os.environ["BASETEN_MODEL_ID"]) - output = llm("Test prompt, please respond.") + output = llm.invoke("Test prompt, please respond.") assert isinstance(output, str) diff --git a/libs/community/tests/integration_tests/llms/test_bedrock.py b/libs/community/tests/integration_tests/llms/test_bedrock.py index 97dbf4f4e8..98d02ad3a3 100644 --- a/libs/community/tests/integration_tests/llms/test_bedrock.py +++ b/libs/community/tests/integration_tests/llms/test_bedrock.py @@ -86,7 +86,7 @@ def test_claude_instant_v1(bedrock_runtime_client, bedrock_models): # type: ign client=bedrock_runtime_client, model_kwargs={}, ) - output = llm("Say something positive:") + output = llm.invoke("Say something positive:") assert isinstance(output, str) except Exception as e: pytest.fail(f"can not instantiate claude-instant-v1: {e}", pytrace=False) @@ -106,7 +106,7 @@ def test_amazon_bedrock_guardrails_no_intervention_for_valid_query( # type: ign "trace": False, }, ) - output = llm("Say something positive:") + output = llm.invoke("Say something positive:") assert isinstance(output, str) except Exception as e: pytest.fail(f"can not instantiate claude-instant-v1: {e}", pytrace=False) @@ -131,6 +131,6 @@ def test_amazon_bedrock_guardrails_intervention_for_invalid_query( # type: igno except Exception as e: pytest.fail(f"can not instantiate claude-instant-v1: {e}", pytrace=False) else: - llm(GUARDRAILS_TRIGGER) + llm.invoke(GUARDRAILS_TRIGGER) guardrails_intervened = handler.get_response() assert guardrails_intervened is True diff --git a/libs/community/tests/integration_tests/llms/test_bigdl_llm.py b/libs/community/tests/integration_tests/llms/test_bigdl_llm.py index 1967ca07a8..d214df8429 100644 --- a/libs/community/tests/integration_tests/llms/test_bigdl_llm.py +++ b/libs/community/tests/integration_tests/llms/test_bigdl_llm.py @@ -10,7 +10,7 @@ def test_call() -> None: model_id="lmsys/vicuna-7b-v1.5", model_kwargs={"temperature": 0, "max_length": 16, "trust_remote_code": True}, ) - output = llm("Hello!") + output = llm.invoke("Hello!") assert isinstance(output, str) diff --git a/libs/community/tests/integration_tests/llms/test_bittensor.py b/libs/community/tests/integration_tests/llms/test_bittensor.py index 16d60ce445..22106f054c 100644 --- a/libs/community/tests/integration_tests/llms/test_bittensor.py +++ b/libs/community/tests/integration_tests/llms/test_bittensor.py @@ -6,5 +6,5 @@ from langchain_community.llms import NIBittensorLLM def test_bittensor_call() -> None: """Test valid call to validator endpoint.""" llm = NIBittensorLLM(system_prompt="Your task is to answer user prompt.") - output = llm("Say foo:") + output = llm.invoke("Say foo:") assert isinstance(output, str) diff --git a/libs/community/tests/integration_tests/llms/test_cerebriumai.py b/libs/community/tests/integration_tests/llms/test_cerebriumai.py index 757611306c..5a33141c0a 100644 --- a/libs/community/tests/integration_tests/llms/test_cerebriumai.py +++ b/libs/community/tests/integration_tests/llms/test_cerebriumai.py @@ -6,5 +6,5 @@ from langchain_community.llms.cerebriumai import CerebriumAI def test_cerebriumai_call() -> None: """Test valid call to cerebriumai.""" llm = CerebriumAI(max_length=10) - output = llm("Say foo:") + output = llm.invoke("Say foo:") assert isinstance(output, str) diff --git a/libs/community/tests/integration_tests/llms/test_chatglm.py b/libs/community/tests/integration_tests/llms/test_chatglm.py index f29bf5375f..9731794bff 100644 --- a/libs/community/tests/integration_tests/llms/test_chatglm.py +++ b/libs/community/tests/integration_tests/llms/test_chatglm.py @@ -7,7 +7,7 @@ from langchain_community.llms.chatglm import ChatGLM def test_chatglm_call() -> None: """Test valid call to chatglm.""" llm = ChatGLM() - output = llm("北京和上海这两座城市有什么不同?") + output = llm.invoke("北京和上海这两座城市有什么不同?") assert isinstance(output, str) diff --git a/libs/community/tests/integration_tests/llms/test_clarifai.py b/libs/community/tests/integration_tests/llms/test_clarifai.py index a062e956f8..700017c9c4 100644 --- a/libs/community/tests/integration_tests/llms/test_clarifai.py +++ b/libs/community/tests/integration_tests/llms/test_clarifai.py @@ -16,7 +16,7 @@ def test_clarifai_call() -> None: app_id="summarization", model_id="text-summarization-english-pegasus", ) - output = llm( + output = llm.invoke( "A chain is a serial assembly of connected pieces, called links, \ typically made of metal, with an overall character similar to that\ of a rope in that it is flexible and curved in compression but \ diff --git a/libs/community/tests/integration_tests/llms/test_cloudflare_workersai.py b/libs/community/tests/integration_tests/llms/test_cloudflare_workersai.py index 2e5101ae71..36daa87816 100644 --- a/libs/community/tests/integration_tests/llms/test_cloudflare_workersai.py +++ b/libs/community/tests/integration_tests/llms/test_cloudflare_workersai.py @@ -17,7 +17,7 @@ def test_cloudflare_workersai_call() -> None: api_token="my_api_token", model="@cf/meta/llama-2-7b-chat-int8", ) - output = llm("What is 2 + 2?") + output = llm.invoke("What is 2 + 2?") assert output == "4" diff --git a/libs/community/tests/integration_tests/llms/test_cohere.py b/libs/community/tests/integration_tests/llms/test_cohere.py index f6aef917ec..2ccb86e770 100644 --- a/libs/community/tests/integration_tests/llms/test_cohere.py +++ b/libs/community/tests/integration_tests/llms/test_cohere.py @@ -13,7 +13,7 @@ from tests.integration_tests.llms.utils import assert_llm_equality def test_cohere_call() -> None: """Test valid call to cohere.""" llm = Cohere(max_tokens=10) - output = llm("Say foo:") + output = llm.invoke("Say foo:") assert isinstance(output, str) diff --git a/libs/community/tests/integration_tests/llms/test_ctransformers.py b/libs/community/tests/integration_tests/llms/test_ctransformers.py index 39db685229..6c0a0b8ae0 100644 --- a/libs/community/tests/integration_tests/llms/test_ctransformers.py +++ b/libs/community/tests/integration_tests/llms/test_ctransformers.py @@ -15,7 +15,7 @@ def test_ctransformers_call() -> None: callbacks=[callback_handler], ) - output = llm("Say foo:") + output = llm.invoke("Say foo:") assert isinstance(output, str) assert len(output) > 1 assert 0 < callback_handler.llm_streams <= config["max_new_tokens"] diff --git a/libs/community/tests/integration_tests/llms/test_deepsparse.py b/libs/community/tests/integration_tests/llms/test_deepsparse.py index f49a0e769f..890cef383a 100644 --- a/libs/community/tests/integration_tests/llms/test_deepsparse.py +++ b/libs/community/tests/integration_tests/llms/test_deepsparse.py @@ -11,7 +11,7 @@ def test_deepsparse_call() -> None: config=config, ) - output = llm("def ") + output = llm.invoke("def ") assert isinstance(output, str) assert len(output) > 1 assert output == "ids_to_names" diff --git a/libs/community/tests/integration_tests/llms/test_edenai.py b/libs/community/tests/integration_tests/llms/test_edenai.py index 575578b845..f7e94dea04 100644 --- a/libs/community/tests/integration_tests/llms/test_edenai.py +++ b/libs/community/tests/integration_tests/llms/test_edenai.py @@ -14,7 +14,7 @@ from langchain_community.llms import EdenAI def test_edenai_call() -> None: """Test simple call to edenai.""" llm = EdenAI(provider="openai", temperature=0.2, max_tokens=250) - output = llm("Say foo:") + output = llm.invoke("Say foo:") assert llm._llm_type == "edenai" assert llm.feature == "text" @@ -38,7 +38,7 @@ def test_edenai_call_with_old_params() -> None: to pass optional parameters to api """ llm = EdenAI(provider="openai", params={"temperature": 0.2, "max_tokens": 250}) - output = llm("Say foo:") + output = llm.invoke("Say foo:") assert llm._llm_type == "edenai" assert llm.feature == "text" diff --git a/libs/community/tests/integration_tests/llms/test_fireworks.py b/libs/community/tests/integration_tests/llms/test_fireworks.py index ca61010f7d..f6f6a66843 100644 --- a/libs/community/tests/integration_tests/llms/test_fireworks.py +++ b/libs/community/tests/integration_tests/llms/test_fireworks.py @@ -15,7 +15,7 @@ def llm() -> Fireworks: @pytest.mark.scheduled def test_fireworks_call(llm: Fireworks) -> None: """Test valid call to fireworks.""" - output = llm("How is the weather in New York today?") + output = llm.invoke("How is the weather in New York today?") assert isinstance(output, str) diff --git a/libs/community/tests/integration_tests/llms/test_forefrontai.py b/libs/community/tests/integration_tests/llms/test_forefrontai.py index 54f37b5c9b..a71c726b2c 100644 --- a/libs/community/tests/integration_tests/llms/test_forefrontai.py +++ b/libs/community/tests/integration_tests/llms/test_forefrontai.py @@ -6,5 +6,5 @@ from langchain_community.llms.forefrontai import ForefrontAI def test_forefrontai_call() -> None: """Test valid call to forefrontai.""" llm = ForefrontAI(length=10) - output = llm("Say foo:") + output = llm.invoke("Say foo:") assert isinstance(output, str) diff --git a/libs/community/tests/integration_tests/llms/test_friendli.py b/libs/community/tests/integration_tests/llms/test_friendli.py index 38c6fd0646..e1c730d5c6 100644 --- a/libs/community/tests/integration_tests/llms/test_friendli.py +++ b/libs/community/tests/integration_tests/llms/test_friendli.py @@ -13,12 +13,6 @@ def friendli_llm() -> Friendli: return Friendli(temperature=0, max_tokens=10) -def test_friendli_call(friendli_llm: Friendli) -> None: - """Test call.""" - output = friendli_llm("Say hello world.") - assert isinstance(output, str) - - def test_friendli_invoke(friendli_llm: Friendli) -> None: """Test invoke.""" output = friendli_llm.invoke("Say hello world.") diff --git a/libs/community/tests/integration_tests/llms/test_google_palm.py b/libs/community/tests/integration_tests/llms/test_google_palm.py index 5c586c451d..bd052d91ac 100644 --- a/libs/community/tests/integration_tests/llms/test_google_palm.py +++ b/libs/community/tests/integration_tests/llms/test_google_palm.py @@ -25,7 +25,7 @@ def test_google_generativeai_call(model_name: str) -> None: llm = GooglePalm(max_output_tokens=10, model_name=model_name) else: llm = GooglePalm(max_output_tokens=10) - output = llm("Say foo:") + output = llm.invoke("Say foo:") assert isinstance(output, str) assert llm._llm_type == "google_palm" if model_name and "gemini" in model_name: diff --git a/libs/community/tests/integration_tests/llms/test_gooseai.py b/libs/community/tests/integration_tests/llms/test_gooseai.py index dccde414f3..7c890f459a 100644 --- a/libs/community/tests/integration_tests/llms/test_gooseai.py +++ b/libs/community/tests/integration_tests/llms/test_gooseai.py @@ -6,14 +6,14 @@ from langchain_community.llms.gooseai import GooseAI def test_gooseai_call() -> None: """Test valid call to gooseai.""" llm = GooseAI(max_tokens=10) - output = llm("Say foo:") + output = llm.invoke("Say foo:") assert isinstance(output, str) def test_gooseai_call_fairseq() -> None: """Test valid call to gooseai with fairseq model.""" llm = GooseAI(model_name="fairseq-1-3b", max_tokens=10) - output = llm("Say foo:") + output = llm.invoke("Say foo:") assert isinstance(output, str) @@ -21,8 +21,8 @@ def test_gooseai_stop_valid() -> None: """Test gooseai stop logic on valid configuration.""" query = "write an ordered list of five items" first_llm = GooseAI(stop="3", temperature=0) - first_output = first_llm(query) + first_output = first_llm.invoke(query) second_llm = GooseAI(temperature=0) - second_output = second_llm(query, stop=["3"]) + second_output = second_llm.invoke(query, stop=["3"]) # Because it stops on new lines, shouldn't return anything assert first_output == second_output diff --git a/libs/community/tests/integration_tests/llms/test_gpt4all.py b/libs/community/tests/integration_tests/llms/test_gpt4all.py index 038c50209b..5848e5f45b 100644 --- a/libs/community/tests/integration_tests/llms/test_gpt4all.py +++ b/libs/community/tests/integration_tests/llms/test_gpt4all.py @@ -21,5 +21,5 @@ def test_gpt4all_inference() -> None: """Test valid gpt4all inference.""" model_path = _download_model() llm = GPT4All(model=model_path) - output = llm("Say foo:") + output = llm.invoke("Say foo:") assert isinstance(output, str) diff --git a/libs/community/tests/integration_tests/llms/test_gradient_ai.py b/libs/community/tests/integration_tests/llms/test_gradient_ai.py index 7fe55b7284..c399299c12 100644 --- a/libs/community/tests/integration_tests/llms/test_gradient_ai.py +++ b/libs/community/tests/integration_tests/llms/test_gradient_ai.py @@ -23,7 +23,7 @@ def test_gradient_acall() -> None: gradient_access_token=gradient_access_token, gradient_workspace_id=gradient_workspace_id, ) - output = llm("Say hello:", temperature=0.2, max_tokens=250) + output = llm.invoke("Say hello:", temperature=0.2, max_tokens=250) assert llm._llm_type == "gradient" diff --git a/libs/community/tests/integration_tests/llms/test_huggingface_endpoint.py b/libs/community/tests/integration_tests/llms/test_huggingface_endpoint.py index 11af7df374..1945c271c8 100644 --- a/libs/community/tests/integration_tests/llms/test_huggingface_endpoint.py +++ b/libs/community/tests/integration_tests/llms/test_huggingface_endpoint.py @@ -13,7 +13,7 @@ def test_huggingface_endpoint_call_error() -> None: """Test valid call to HuggingFace that errors.""" llm = HuggingFaceEndpoint(endpoint_url="", model_kwargs={"max_new_tokens": -1}) with pytest.raises(ValueError): - llm("Say foo:") + llm.invoke("Say foo:") def test_saving_loading_endpoint_llm(tmp_path: Path) -> None: @@ -29,7 +29,7 @@ def test_saving_loading_endpoint_llm(tmp_path: Path) -> None: def test_huggingface_text_generation() -> None: """Test valid call to HuggingFace text generation model.""" llm = HuggingFaceEndpoint(repo_id="gpt2", model_kwargs={"max_new_tokens": 10}) - output = llm("Say foo:") + output = llm.invoke("Say foo:") print(output) # noqa: T201 assert isinstance(output, str) @@ -37,14 +37,14 @@ def test_huggingface_text_generation() -> None: def test_huggingface_text2text_generation() -> None: """Test valid call to HuggingFace text2text model.""" llm = HuggingFaceEndpoint(repo_id="google/flan-t5-xl") - output = llm("The capital of New York is") + output = llm.invoke("The capital of New York is") assert output == "Albany" def test_huggingface_summarization() -> None: """Test valid call to HuggingFace summarization model.""" llm = HuggingFaceEndpoint(repo_id="facebook/bart-large-cnn") - output = llm("Say foo:") + output = llm.invoke("Say foo:") assert isinstance(output, str) @@ -52,7 +52,7 @@ def test_huggingface_call_error() -> None: """Test valid call to HuggingFace that errors.""" llm = HuggingFaceEndpoint(repo_id="gpt2", model_kwargs={"max_new_tokens": -1}) with pytest.raises(ValueError): - llm("Say foo:") + llm.invoke("Say foo:") def test_saving_loading_llm(tmp_path: Path) -> None: diff --git a/libs/community/tests/integration_tests/llms/test_huggingface_hub.py b/libs/community/tests/integration_tests/llms/test_huggingface_hub.py index a770be7f96..999f92ad41 100644 --- a/libs/community/tests/integration_tests/llms/test_huggingface_hub.py +++ b/libs/community/tests/integration_tests/llms/test_huggingface_hub.py @@ -12,21 +12,21 @@ from tests.integration_tests.llms.utils import assert_llm_equality def test_huggingface_text_generation() -> None: """Test valid call to HuggingFace text generation model.""" llm = HuggingFaceHub(repo_id="gpt2", model_kwargs={"max_new_tokens": 10}) - output = llm("Say foo:") + output = llm.invoke("Say foo:") assert isinstance(output, str) def test_huggingface_text2text_generation() -> None: """Test valid call to HuggingFace text2text model.""" llm = HuggingFaceHub(repo_id="google/flan-t5-xl") - output = llm("The capital of New York is") + output = llm.invoke("The capital of New York is") assert output == "Albany" def test_huggingface_summarization() -> None: """Test valid call to HuggingFace summarization model.""" llm = HuggingFaceHub(repo_id="facebook/bart-large-cnn") - output = llm("Say foo:") + output = llm.invoke("Say foo:") assert isinstance(output, str) @@ -34,7 +34,7 @@ def test_huggingface_call_error() -> None: """Test valid call to HuggingFace that errors.""" llm = HuggingFaceHub(model_kwargs={"max_new_tokens": -1}) with pytest.raises(ValueError): - llm("Say foo:") + llm.invoke("Say foo:") def test_saving_loading_llm(tmp_path: Path) -> None: diff --git a/libs/community/tests/integration_tests/llms/test_huggingface_pipeline.py b/libs/community/tests/integration_tests/llms/test_huggingface_pipeline.py index 3928046913..73dbdd0e7a 100755 --- a/libs/community/tests/integration_tests/llms/test_huggingface_pipeline.py +++ b/libs/community/tests/integration_tests/llms/test_huggingface_pipeline.py @@ -12,7 +12,7 @@ def test_huggingface_pipeline_text_generation() -> None: llm = HuggingFacePipeline.from_model_id( model_id="gpt2", task="text-generation", pipeline_kwargs={"max_new_tokens": 10} ) - output = llm("Say foo:") + output = llm.invoke("Say foo:") assert isinstance(output, str) @@ -21,7 +21,7 @@ def test_huggingface_pipeline_text2text_generation() -> None: llm = HuggingFacePipeline.from_model_id( model_id="google/flan-t5-small", task="text2text-generation" ) - output = llm("Say foo:") + output = llm.invoke("Say foo:") assert isinstance(output, str) @@ -33,7 +33,7 @@ def test_huggingface_pipeline_device_map() -> None: device_map="auto", pipeline_kwargs={"max_new_tokens": 10}, ) - output = llm("Say foo:") + output = llm.invoke("Say foo:") assert isinstance(output, str) @@ -42,7 +42,7 @@ def text_huggingface_pipeline_summarization() -> None: llm = HuggingFacePipeline.from_model_id( model_id="facebook/bart-large-cnn", task="summarization" ) - output = llm("Say foo:") + output = llm.invoke("Say foo:") assert isinstance(output, str) @@ -67,7 +67,7 @@ def test_init_with_pipeline() -> None: "text-generation", model=model, tokenizer=tokenizer, max_new_tokens=10 ) llm = HuggingFacePipeline(pipeline=pipe) - output = llm("Say foo:") + output = llm.invoke("Say foo:") assert isinstance(output, str) @@ -78,7 +78,7 @@ def test_huggingface_pipeline_runtime_kwargs() -> None: task="text-generation", ) prompt = "Say foo:" - output = llm(prompt, pipeline_kwargs={"max_new_tokens": 2}) + output = llm.invoke(prompt, pipeline_kwargs={"max_new_tokens": 2}) assert len(output) < 10 @@ -94,7 +94,7 @@ def test_huggingface_pipeline_text_generation_ov() -> None: model_kwargs={"device": "CPU", "ov_config": ov_config}, pipeline_kwargs={"max_new_tokens": 64}, ) - output = llm("Say foo:") + output = llm.invoke("Say foo:") assert isinstance(output, str) @@ -107,7 +107,7 @@ def test_huggingface_pipeline_text2text_generation_ov() -> None: model_kwargs={"device": "CPU", "ov_config": ov_config}, pipeline_kwargs={"max_new_tokens": 64}, ) - output = llm("Say foo:") + output = llm.invoke("Say foo:") assert isinstance(output, str) @@ -120,5 +120,5 @@ def text_huggingface_pipeline_summarization_ov() -> None: model_kwargs={"device": "CPU", "ov_config": ov_config}, pipeline_kwargs={"max_new_tokens": 64}, ) - output = llm("Say foo:") + output = llm.invoke("Say foo:") assert isinstance(output, str) diff --git a/libs/community/tests/integration_tests/llms/test_ipex_llm.py b/libs/community/tests/integration_tests/llms/test_ipex_llm.py index a56a5e8365..a98bbf14be 100644 --- a/libs/community/tests/integration_tests/llms/test_ipex_llm.py +++ b/libs/community/tests/integration_tests/llms/test_ipex_llm.py @@ -10,7 +10,7 @@ def test_call() -> None: model_id="lmsys/vicuna-7b-v1.5", model_kwargs={"temperature": 0, "max_length": 16, "trust_remote_code": True}, ) - output = llm("Hello!") + output = llm.invoke("Hello!") assert isinstance(output, str) diff --git a/libs/community/tests/integration_tests/llms/test_konko.py b/libs/community/tests/integration_tests/llms/test_konko.py index 3e0fe0f31b..5aa399e91d 100644 --- a/libs/community/tests/integration_tests/llms/test_konko.py +++ b/libs/community/tests/integration_tests/llms/test_konko.py @@ -15,7 +15,7 @@ def test_konko_call() -> None: temperature=0.2, max_tokens=250, ) - output = llm("Say foo:") + output = llm.invoke("Say foo:") assert llm._llm_type == "konko" assert isinstance(output, str) diff --git a/libs/community/tests/integration_tests/llms/test_llamacpp.py b/libs/community/tests/integration_tests/llms/test_llamacpp.py index 1328e80fd8..59d17e73a2 100644 --- a/libs/community/tests/integration_tests/llms/test_llamacpp.py +++ b/libs/community/tests/integration_tests/llms/test_llamacpp.py @@ -35,7 +35,7 @@ def test_llamacpp_inference() -> None: """Test valid llama.cpp inference.""" model_path = get_model() llm = LlamaCpp(model_path=model_path) - output = llm("Say foo:") + output = llm.invoke("Say foo:") assert isinstance(output, str) assert len(output) > 1 @@ -68,7 +68,7 @@ def test_llamacpp_streaming_callback() -> None: verbose=True, max_tokens=MAX_TOKENS, ) - llm("Q: Can you count to 10? A:'1, ") + llm.invoke("Q: Can you count to 10? A:'1, ") assert callback_handler.llm_streams <= MAX_TOKENS + OFF_BY_ONE diff --git a/libs/community/tests/integration_tests/llms/test_manifest.py b/libs/community/tests/integration_tests/llms/test_manifest.py index 3c2538a3b7..db5e6ea56e 100644 --- a/libs/community/tests/integration_tests/llms/test_manifest.py +++ b/libs/community/tests/integration_tests/llms/test_manifest.py @@ -8,5 +8,5 @@ def test_manifest_wrapper() -> None: manifest = Manifest(client_name="openai") llm = ManifestWrapper(client=manifest, llm_kwargs={"temperature": 0}) - output = llm("The capital of New York is:") + output = llm.invoke("The capital of New York is:") assert output == "Albany" diff --git a/libs/community/tests/integration_tests/llms/test_minimax.py b/libs/community/tests/integration_tests/llms/test_minimax.py index f42cad2954..cc7a33fb17 100644 --- a/libs/community/tests/integration_tests/llms/test_minimax.py +++ b/libs/community/tests/integration_tests/llms/test_minimax.py @@ -5,14 +5,14 @@ from langchain_community.llms.minimax import Minimax def test_minimax_call() -> None: """Test valid call to minimax.""" llm = Minimax(max_tokens=10) - output = llm("Hello world!") + output = llm.invoke("Hello world!") assert isinstance(output, str) def test_minimax_call_successful() -> None: """Test valid call to minimax.""" llm = Minimax() - output = llm( + output = llm.invoke( "A chain is a serial assembly of connected pieces, called links, \ typically made of metal, with an overall character similar to that\ of a rope in that it is flexible and curved in compression but \ diff --git a/libs/community/tests/integration_tests/llms/test_modal.py b/libs/community/tests/integration_tests/llms/test_modal.py index e79032bbd5..adddd53b90 100644 --- a/libs/community/tests/integration_tests/llms/test_modal.py +++ b/libs/community/tests/integration_tests/llms/test_modal.py @@ -6,5 +6,5 @@ from langchain_community.llms.modal import Modal def test_modal_call() -> None: """Test valid call to Modal.""" llm = Modal() - output = llm("Say foo:") + output = llm.invoke("Say foo:") assert isinstance(output, str) diff --git a/libs/community/tests/integration_tests/llms/test_mosaicml.py b/libs/community/tests/integration_tests/llms/test_mosaicml.py index f42eed131e..9fb0e54d40 100644 --- a/libs/community/tests/integration_tests/llms/test_mosaicml.py +++ b/libs/community/tests/integration_tests/llms/test_mosaicml.py @@ -9,7 +9,7 @@ from langchain_community.llms.mosaicml import PROMPT_FOR_GENERATION_FORMAT, Mosa def test_mosaicml_llm_call() -> None: """Test valid call to MosaicML.""" llm = MosaicML(model_kwargs={}) - output = llm("Say foo:") + output = llm.invoke("Say foo:") assert isinstance(output, str) @@ -18,7 +18,7 @@ def test_mosaicml_endpoint_change() -> None: new_url = "https://models.hosted-on.mosaicml.hosting/mpt-30b-instruct/v1/predict" llm = MosaicML(endpoint_url=new_url) assert llm.endpoint_url == new_url - output = llm("Say foo:") + output = llm.invoke("Say foo:") assert isinstance(output, str) @@ -26,7 +26,7 @@ def test_mosaicml_extra_kwargs() -> None: llm = MosaicML(model_kwargs={"max_new_tokens": 1}) assert llm.model_kwargs == {"max_new_tokens": 1} - output = llm("Say foo:") + output = llm.invoke("Say foo:") assert isinstance(output, str) @@ -41,7 +41,7 @@ def test_instruct_prompt() -> None: prompt = llm._transform_prompt(instruction) expected_prompt = PROMPT_FOR_GENERATION_FORMAT.format(instruction=instruction) assert prompt == expected_prompt - output = llm(prompt) + output = llm.invoke(prompt) assert isinstance(output, str) @@ -52,9 +52,9 @@ def test_retry_logic() -> None: prompt = llm._transform_prompt(instruction) expected_prompt = PROMPT_FOR_GENERATION_FORMAT.format(instruction=instruction) assert prompt == expected_prompt - output = llm(prompt) + output = llm.invoke(prompt) assert isinstance(output, str) - output = llm(prompt) + output = llm.invoke(prompt) assert isinstance(output, str) @@ -78,5 +78,5 @@ def test_short_retry_does_not_loop() -> None: ), ): for _ in range(10): - output = llm(prompt) + output = llm.invoke(prompt) assert isinstance(output, str) diff --git a/libs/community/tests/integration_tests/llms/test_nlpcloud.py b/libs/community/tests/integration_tests/llms/test_nlpcloud.py index 5806f4d21a..6a4635b254 100644 --- a/libs/community/tests/integration_tests/llms/test_nlpcloud.py +++ b/libs/community/tests/integration_tests/llms/test_nlpcloud.py @@ -14,7 +14,7 @@ from tests.integration_tests.llms.utils import assert_llm_equality def test_nlpcloud_call() -> None: """Test valid call to nlpcloud.""" llm = NLPCloud(max_length=10) - output = llm("Say foo:") + output = llm.invoke("Say foo:") assert isinstance(output, str) diff --git a/libs/community/tests/integration_tests/llms/test_octoai_endpoint.py b/libs/community/tests/integration_tests/llms/test_octoai_endpoint.py index f3070199fe..fb9c07e49a 100644 --- a/libs/community/tests/integration_tests/llms/test_octoai_endpoint.py +++ b/libs/community/tests/integration_tests/llms/test_octoai_endpoint.py @@ -6,6 +6,6 @@ from langchain_community.llms.octoai_endpoint import OctoAIEndpoint def test_octoai_endpoint_call() -> None: """Test valid call to OctoAI endpoint.""" llm = OctoAIEndpoint() - output = llm("Which state is Los Angeles in?") + output = llm.invoke("Which state is Los Angeles in?") print(output) # noqa: T201 assert isinstance(output, str) diff --git a/libs/community/tests/integration_tests/llms/test_openai.py b/libs/community/tests/integration_tests/llms/test_openai.py index c1ec3c5d0f..1c8d1c37a4 100644 --- a/libs/community/tests/integration_tests/llms/test_openai.py +++ b/libs/community/tests/integration_tests/llms/test_openai.py @@ -18,7 +18,7 @@ from tests.unit_tests.callbacks.fake_callback_handler import ( def test_openai_call() -> None: """Test valid call to openai.""" llm = OpenAI() - output = llm("Say something nice:") + output = llm.invoke("Say something nice:") assert isinstance(output, str) @@ -34,9 +34,9 @@ def test_openai_stop_valid() -> None: """Test openai stop logic on valid configuration.""" query = "write an ordered list of five items" first_llm = OpenAI(stop="3", temperature=0) - first_output = first_llm(query) + first_output = first_llm.invoke(query) second_llm = OpenAI(temperature=0) - second_output = second_llm(query, stop=["3"]) + second_output = second_llm.invoke(query, stop=["3"]) # Because it stops on new lines, shouldn't return anything assert first_output == second_output @@ -45,7 +45,7 @@ def test_openai_stop_error() -> None: """Test openai stop logic on bad configuration.""" llm = OpenAI(stop="3", temperature=0) with pytest.raises(ValueError): - llm("write an ordered list of five items", stop=["\n"]) + llm.invoke("write an ordered list of five items", stop=["\n"]) def test_saving_loading_llm(tmp_path: Path) -> None: @@ -158,7 +158,7 @@ def test_openai_streaming_multiple_prompts_error() -> None: def test_openai_streaming_call() -> None: """Test valid call to openai.""" llm = OpenAI(max_tokens=10, streaming=True) - output = llm("Say foo:") + output = llm.invoke("Say foo:") assert isinstance(output, str) @@ -173,7 +173,7 @@ def test_openai_streaming_callback() -> None: callback_manager=callback_manager, verbose=True, ) - llm("Write me a sentence with 100 words.") + llm.invoke("Write me a sentence with 100 words.") assert callback_handler.llm_streams == 10 diff --git a/libs/community/tests/integration_tests/llms/test_openllm.py b/libs/community/tests/integration_tests/llms/test_openllm.py index acc61a8bcd..ac5d7e3013 100644 --- a/libs/community/tests/integration_tests/llms/test_openllm.py +++ b/libs/community/tests/integration_tests/llms/test_openllm.py @@ -4,7 +4,7 @@ from langchain_community.llms.openllm import OpenLLM def test_openllm_llm_local() -> None: llm = OpenLLM(model_name="flan-t5", model_id="google/flan-t5-small") - output = llm("Say foo:") + output = llm.invoke("Say foo:") assert isinstance(output, str) @@ -12,5 +12,5 @@ def test_openllm_with_kwargs() -> None: llm = OpenLLM( model_name="flan-t5", model_id="google/flan-t5-small", temperature=0.84 ) - output = llm("Say bar:") + output = llm.invoke("Say bar:") assert isinstance(output, str) diff --git a/libs/community/tests/integration_tests/llms/test_openlm.py b/libs/community/tests/integration_tests/llms/test_openlm.py index 5c93081027..a567a37440 100644 --- a/libs/community/tests/integration_tests/llms/test_openlm.py +++ b/libs/community/tests/integration_tests/llms/test_openlm.py @@ -4,5 +4,5 @@ from langchain_community.llms.openlm import OpenLM def test_openlm_call() -> None: """Test valid call to openlm.""" llm = OpenLM(model_name="dolly-v2-7b", max_tokens=10) - output = llm(prompt="Say foo:") + output = llm.invoke("Say foo:") assert isinstance(output, str) diff --git a/libs/community/tests/integration_tests/llms/test_pai_eas_endpoint.py b/libs/community/tests/integration_tests/llms/test_pai_eas_endpoint.py index 80043c1953..e8476bd2a1 100644 --- a/libs/community/tests/integration_tests/llms/test_pai_eas_endpoint.py +++ b/libs/community/tests/integration_tests/llms/test_pai_eas_endpoint.py @@ -12,7 +12,7 @@ def test_pai_eas_v1_call() -> None: eas_service_token=os.getenv("EAS_SERVICE_TOKEN"), version="1.0", ) - output = llm("Say foo:") + output = llm.invoke("Say foo:") assert isinstance(output, str) @@ -22,7 +22,7 @@ def test_pai_eas_v2_call() -> None: eas_service_token=os.getenv("EAS_SERVICE_TOKEN"), version="2.0", ) - output = llm("Say foo:") + output = llm.invoke("Say foo:") assert isinstance(output, str) diff --git a/libs/community/tests/integration_tests/llms/test_petals.py b/libs/community/tests/integration_tests/llms/test_petals.py index 4fd76753a0..d1af2aaed0 100644 --- a/libs/community/tests/integration_tests/llms/test_petals.py +++ b/libs/community/tests/integration_tests/llms/test_petals.py @@ -24,5 +24,5 @@ def test_api_key_masked_when_passed_via_constructor( def test_gooseai_call() -> None: """Test valid call to gooseai.""" llm = Petals(max_new_tokens=10) - output = llm("Say foo:") + output = llm.invoke("Say foo:") assert isinstance(output, str) diff --git a/libs/community/tests/integration_tests/llms/test_pipelineai.py b/libs/community/tests/integration_tests/llms/test_pipelineai.py index cefd9b3564..895bbbe27a 100644 --- a/libs/community/tests/integration_tests/llms/test_pipelineai.py +++ b/libs/community/tests/integration_tests/llms/test_pipelineai.py @@ -6,5 +6,5 @@ from langchain_community.llms.pipelineai import PipelineAI def test_pipelineai_call() -> None: """Test valid call to Pipeline Cloud.""" llm = PipelineAI() - output = llm("Say foo:") + output = llm.invoke("Say foo:") assert isinstance(output, str) diff --git a/libs/community/tests/integration_tests/llms/test_predictionguard.py b/libs/community/tests/integration_tests/llms/test_predictionguard.py index 3a210ce763..fbe47e7499 100644 --- a/libs/community/tests/integration_tests/llms/test_predictionguard.py +++ b/libs/community/tests/integration_tests/llms/test_predictionguard.py @@ -6,5 +6,5 @@ from langchain_community.llms.predictionguard import PredictionGuard def test_predictionguard_call() -> None: """Test valid call to prediction guard.""" llm = PredictionGuard(model="OpenAI-text-davinci-003") - output = llm("Say foo:") + output = llm.invoke("Say foo:") assert isinstance(output, str) diff --git a/libs/community/tests/integration_tests/llms/test_promptlayer_openai.py b/libs/community/tests/integration_tests/llms/test_promptlayer_openai.py index bf9bc7db44..430a899643 100644 --- a/libs/community/tests/integration_tests/llms/test_promptlayer_openai.py +++ b/libs/community/tests/integration_tests/llms/test_promptlayer_openai.py @@ -12,7 +12,7 @@ from langchain_community.llms.promptlayer_openai import PromptLayerOpenAI def test_promptlayer_openai_call() -> None: """Test valid call to promptlayer openai.""" llm = PromptLayerOpenAI(max_tokens=10) - output = llm("Say foo:") + output = llm.invoke("Say foo:") assert isinstance(output, str) @@ -36,9 +36,9 @@ def test_promptlayer_openai_stop_valid() -> None: """Test promptlayer openai stop logic on valid configuration.""" query = "write an ordered list of five items" first_llm = PromptLayerOpenAI(stop="3", temperature=0) - first_output = first_llm(query) + first_output = first_llm.invoke(query) second_llm = PromptLayerOpenAI(temperature=0) - second_output = second_llm(query, stop=["3"]) + second_output = second_llm.invoke(query, stop=["3"]) # Because it stops on new lines, shouldn't return anything assert first_output == second_output @@ -47,7 +47,7 @@ def test_promptlayer_openai_stop_error() -> None: """Test promptlayer openai stop logic on bad configuration.""" llm = PromptLayerOpenAI(stop="3", temperature=0) with pytest.raises(ValueError): - llm("write an ordered list of five items", stop=["\n"]) + llm.invoke("write an ordered list of five items", stop=["\n"]) def test_saving_loading_llm(tmp_path: Path) -> None: diff --git a/libs/community/tests/integration_tests/llms/test_propmptlayer_openai_chat.py b/libs/community/tests/integration_tests/llms/test_propmptlayer_openai_chat.py index a27e011450..4a1c91d276 100644 --- a/libs/community/tests/integration_tests/llms/test_propmptlayer_openai_chat.py +++ b/libs/community/tests/integration_tests/llms/test_propmptlayer_openai_chat.py @@ -11,7 +11,7 @@ from langchain_community.llms.promptlayer_openai import PromptLayerOpenAIChat def test_promptlayer_openai_chat_call() -> None: """Test valid call to promptlayer openai.""" llm = PromptLayerOpenAIChat(max_tokens=10) - output = llm("Say foo:") + output = llm.invoke("Say foo:") assert isinstance(output, str) @@ -19,9 +19,9 @@ def test_promptlayer_openai_chat_stop_valid() -> None: """Test promptlayer openai stop logic on valid configuration.""" query = "write an ordered list of five items" first_llm = PromptLayerOpenAIChat(stop="3", temperature=0) - first_output = first_llm(query) + first_output = first_llm.invoke(query) second_llm = PromptLayerOpenAIChat(temperature=0) - second_output = second_llm(query, stop=["3"]) + second_output = second_llm.invoke(query, stop=["3"]) # Because it stops on new lines, shouldn't return anything assert first_output == second_output @@ -30,7 +30,7 @@ def test_promptlayer_openai_chat_stop_error() -> None: """Test promptlayer openai stop logic on bad configuration.""" llm = PromptLayerOpenAIChat(stop="3", temperature=0) with pytest.raises(ValueError): - llm("write an ordered list of five items", stop=["\n"]) + llm.invoke("write an ordered list of five items", stop=["\n"]) def test_saving_loading_llm(tmp_path: Path) -> None: diff --git a/libs/community/tests/integration_tests/llms/test_qianfan_endpoint.py b/libs/community/tests/integration_tests/llms/test_qianfan_endpoint.py index 30a9e135e1..576c6ab9e4 100644 --- a/libs/community/tests/integration_tests/llms/test_qianfan_endpoint.py +++ b/libs/community/tests/integration_tests/llms/test_qianfan_endpoint.py @@ -9,7 +9,7 @@ from langchain_community.llms.baidu_qianfan_endpoint import QianfanLLMEndpoint def test_call() -> None: """Test valid call to qianfan.""" llm = QianfanLLMEndpoint() - output = llm("write a joke") + output = llm.invoke("write a joke") assert isinstance(output, str) diff --git a/libs/community/tests/integration_tests/llms/test_replicate.py b/libs/community/tests/integration_tests/llms/test_replicate.py index a5de0889b4..13a20ae9ee 100644 --- a/libs/community/tests/integration_tests/llms/test_replicate.py +++ b/libs/community/tests/integration_tests/llms/test_replicate.py @@ -11,7 +11,7 @@ TEST_MODEL = "replicate/dolly-v2-12b:ef0e1aefc61f8e096ebe4db6b2bacc297daf2ef6899 def test_replicate_call() -> None: """Test simple non-streaming call to Replicate.""" llm = Replicate(model=TEST_MODEL) - output = llm("What is LangChain") + output = llm.invoke("What is LangChain") assert output assert isinstance(output, str) @@ -22,7 +22,7 @@ def test_replicate_streaming_call() -> None: callback_manager = CallbackManager([callback_handler]) llm = Replicate(streaming=True, callback_manager=callback_manager, model=TEST_MODEL) - output = llm("What is LangChain") + output = llm.invoke("What is LangChain") assert output assert isinstance(output, str) @@ -32,11 +32,11 @@ def test_replicate_model_kwargs() -> None: llm = Replicate( model=TEST_MODEL, model_kwargs={"max_length": 100, "temperature": 0.01} ) - long_output = llm("What is LangChain") + long_output = llm.invoke("What is LangChain") llm = Replicate( model=TEST_MODEL, model_kwargs={"max_length": 10, "temperature": 0.01} ) - short_output = llm("What is LangChain") + short_output = llm.invoke("What is LangChain") assert len(short_output) < len(long_output) assert llm.model_kwargs == {"max_length": 10, "temperature": 0.01} diff --git a/libs/community/tests/integration_tests/llms/test_rwkv.py b/libs/community/tests/integration_tests/llms/test_rwkv.py index 05d85d263e..8f18f80d97 100644 --- a/libs/community/tests/integration_tests/llms/test_rwkv.py +++ b/libs/community/tests/integration_tests/llms/test_rwkv.py @@ -31,5 +31,5 @@ def test_rwkv_inference() -> None: """Test valid gpt4all inference.""" model_path = _download_model() llm = RWKV(model=model_path, tokens_path="20B_tokenizer.json", strategy="cpu fp32") - output = llm("Say foo:") + output = llm.invoke("Say foo:") assert isinstance(output, str) diff --git a/libs/community/tests/integration_tests/llms/test_self_hosted_llm.py b/libs/community/tests/integration_tests/llms/test_self_hosted_llm.py index be6221c104..ea250c3142 100644 --- a/libs/community/tests/integration_tests/llms/test_self_hosted_llm.py +++ b/libs/community/tests/integration_tests/llms/test_self_hosted_llm.py @@ -24,7 +24,7 @@ def test_self_hosted_huggingface_pipeline_text_generation() -> None: hardware=gpu, model_reqs=model_reqs, ) - output = llm("Say foo:") # type: ignore + output = llm.invoke("Say foo:") # type: ignore assert isinstance(output, str) @@ -37,7 +37,7 @@ def test_self_hosted_huggingface_pipeline_text2text_generation() -> None: hardware=gpu, model_reqs=model_reqs, ) - output = llm("Say foo:") # type: ignore + output = llm.invoke("Say foo:") # type: ignore assert isinstance(output, str) @@ -50,7 +50,7 @@ def test_self_hosted_huggingface_pipeline_summarization() -> None: hardware=gpu, model_reqs=model_reqs, ) - output = llm("Say foo:") + output = llm.invoke("Say foo:") assert isinstance(output, str) @@ -82,7 +82,7 @@ def test_init_with_local_pipeline() -> None: model_reqs=model_reqs, inference_fn=inference_fn, ) - output = llm("Say foo:") # type: ignore + output = llm.invoke("Say foo:") # type: ignore assert isinstance(output, str) @@ -101,7 +101,7 @@ def test_init_with_pipeline_path() -> None: model_reqs=model_reqs, inference_fn=inference_fn, ) - output = llm("Say foo:") # type: ignore + output = llm.invoke("Say foo:") # type: ignore assert isinstance(output, str) @@ -114,5 +114,5 @@ def test_init_with_pipeline_fn() -> None: model_reqs=model_reqs, inference_fn=inference_fn, ) - output = llm("Say foo:") # type: ignore + output = llm.invoke("Say foo:") # type: ignore assert isinstance(output, str) diff --git a/libs/community/tests/integration_tests/llms/test_sparkllm.py b/libs/community/tests/integration_tests/llms/test_sparkllm.py index 6df9bf7c36..0d6c591439 100644 --- a/libs/community/tests/integration_tests/llms/test_sparkllm.py +++ b/libs/community/tests/integration_tests/llms/test_sparkllm.py @@ -7,7 +7,7 @@ from langchain_community.llms.sparkllm import SparkLLM def test_call() -> None: """Test valid call to sparkllm.""" llm = SparkLLM() - output = llm("Say foo:") + output = llm.invoke("Say foo:") assert isinstance(output, str) diff --git a/libs/community/tests/integration_tests/llms/test_stochasticai.py b/libs/community/tests/integration_tests/llms/test_stochasticai.py index 31da7fc268..827c7295ef 100644 --- a/libs/community/tests/integration_tests/llms/test_stochasticai.py +++ b/libs/community/tests/integration_tests/llms/test_stochasticai.py @@ -6,5 +6,5 @@ from langchain_community.llms.stochasticai import StochasticAI def test_stochasticai_call() -> None: """Test valid call to StochasticAI.""" llm = StochasticAI() - output = llm("Say foo:") + output = llm.invoke("Say foo:") assert isinstance(output, str) diff --git a/libs/community/tests/integration_tests/llms/test_titan_takeoff.py b/libs/community/tests/integration_tests/llms/test_titan_takeoff.py index a573bb55e5..f31c3124e4 100644 --- a/libs/community/tests/integration_tests/llms/test_titan_takeoff.py +++ b/libs/community/tests/integration_tests/llms/test_titan_takeoff.py @@ -85,7 +85,7 @@ def test_titan_takeoff_bad_call( llm = takeoff_object(streaming=streaming) with pytest.raises(TakeoffException): - llm("What is 2 + 2?") + llm.invoke("What is 2 + 2?") assert len(httpx_mock.get_requests()) == 1 assert httpx_mock.get_requests()[0].url == url assert json.loads(httpx_mock.get_requests()[0].content)["text"] == "What is 2 + 2?" @@ -124,7 +124,7 @@ def test_titan_takeoff_model_initialisation( llm = takeoff_object( port=inf_port, mgmt_port=mgnt_port, models=[reader_1, reader_2] ) - output = llm("What is 2 + 2?") + output = llm.invoke("What is 2 + 2?") assert isinstance(output, str) # Ensure the management api was called to create the reader diff --git a/libs/community/tests/integration_tests/llms/test_together.py b/libs/community/tests/integration_tests/llms/test_together.py index 98a7659874..1eff409b19 100644 --- a/libs/community/tests/integration_tests/llms/test_together.py +++ b/libs/community/tests/integration_tests/llms/test_together.py @@ -18,7 +18,7 @@ def test_together_call() -> None: temperature=0.2, max_tokens=250, ) - output = llm("Say foo:") + output = llm.invoke("Say foo:") assert llm._llm_type == "together" assert isinstance(output, str) diff --git a/libs/community/tests/integration_tests/llms/test_tongyi.py b/libs/community/tests/integration_tests/llms/test_tongyi.py index 7e918d87be..04c6e2e997 100644 --- a/libs/community/tests/integration_tests/llms/test_tongyi.py +++ b/libs/community/tests/integration_tests/llms/test_tongyi.py @@ -8,7 +8,7 @@ from langchain_community.llms.tongyi import Tongyi def test_tongyi_call() -> None: """Test valid call to tongyi.""" llm = Tongyi() - output = llm("who are you") + output = llm.invoke("who are you") assert isinstance(output, str) diff --git a/libs/community/tests/integration_tests/llms/test_vertexai.py b/libs/community/tests/integration_tests/llms/test_vertexai.py index f9547dea4c..8d50bc4f37 100644 --- a/libs/community/tests/integration_tests/llms/test_vertexai.py +++ b/libs/community/tests/integration_tests/llms/test_vertexai.py @@ -40,7 +40,7 @@ def test_vertex_call(model_name: str) -> None: if model_name else VertexAI(temperature=0.0) ) - output = llm("Say foo:") + output = llm.invoke("Say foo:") assert isinstance(output, str) @@ -116,7 +116,7 @@ def test_model_garden( result_arg=result_arg, location=location, ) - output = llm("What is the meaning of life?") + output = llm.invoke("What is the meaning of life?") assert isinstance(output, str) assert llm._llm_type == "vertexai_model_garden" diff --git a/libs/community/tests/integration_tests/llms/test_volcengine_maas.py b/libs/community/tests/integration_tests/llms/test_volcengine_maas.py index f6bef52690..b2af4e1d6f 100644 --- a/libs/community/tests/integration_tests/llms/test_volcengine_maas.py +++ b/libs/community/tests/integration_tests/llms/test_volcengine_maas.py @@ -37,7 +37,7 @@ def test_api_key_masked_when_passed_via_constructor( def test_default_call() -> None: """Test valid call to volc engine.""" llm = VolcEngineMaasLLM() - output = llm("tell me a joke") + output = llm.invoke("tell me a joke") assert isinstance(output, str) diff --git a/libs/community/tests/integration_tests/llms/test_watsonxllm.py b/libs/community/tests/integration_tests/llms/test_watsonxllm.py index 315d31df7a..a9c3d0fc92 100644 --- a/libs/community/tests/integration_tests/llms/test_watsonxllm.py +++ b/libs/community/tests/integration_tests/llms/test_watsonxllm.py @@ -10,5 +10,5 @@ def test_watsonxllm_call() -> None: apikey="***", project_id="***", ) - response = watsonxllm("What color sunflower is?") + response = watsonxllm.invoke("What color sunflower is?") assert isinstance(response, str) diff --git a/libs/community/tests/integration_tests/llms/test_weight_only_quantization.py b/libs/community/tests/integration_tests/llms/test_weight_only_quantization.py index 4fa6971eb3..628079b481 100644 --- a/libs/community/tests/integration_tests/llms/test_weight_only_quantization.py +++ b/libs/community/tests/integration_tests/llms/test_weight_only_quantization.py @@ -13,7 +13,7 @@ def test_weight_only_quantization_with_config() -> None: llm = WeightOnlyQuantPipeline.from_model_id( model_id=model_id, task="text2text-generation", quantization_config=conf ) - output = llm("Say foo:") + output = llm.invoke("Say foo:") assert isinstance(output, str) @@ -22,7 +22,7 @@ def test_weight_only_quantization_4bit() -> None: llm = WeightOnlyQuantPipeline.from_model_id( model_id=model_id, task="text2text-generation", load_in_4bit=True ) - output = llm("Say foo:") + output = llm.invoke("Say foo:") assert isinstance(output, str) @@ -31,7 +31,7 @@ def test_weight_only_quantization_8bit() -> None: llm = WeightOnlyQuantPipeline.from_model_id( model_id=model_id, task="text2text-generation", load_in_8bit=True ) - output = llm("Say foo:") + output = llm.invoke("Say foo:") assert isinstance(output, str) @@ -46,7 +46,7 @@ def test_init_with_pipeline() -> None: ) pipe = pipeline("text2text-generation", model=model, tokenizer=tokenizer) llm = WeightOnlyQuantPipeline(pipeline=pipe) - output = llm("Say foo:") + output = llm.invoke("Say foo:") assert isinstance(output, str) @@ -58,5 +58,5 @@ def text_weight_only_pipeline_summarization() -> None: llm = WeightOnlyQuantPipeline.from_model_id( model_id=model_id, task="summarization", quantization_config=conf ) - output = llm("Say foo:") + output = llm.invoke("Say foo:") assert isinstance(output, str) diff --git a/libs/community/tests/integration_tests/llms/test_writer.py b/libs/community/tests/integration_tests/llms/test_writer.py index b6957580d2..db8ad80914 100644 --- a/libs/community/tests/integration_tests/llms/test_writer.py +++ b/libs/community/tests/integration_tests/llms/test_writer.py @@ -6,5 +6,5 @@ from langchain_community.llms.writer import Writer def test_writer_call() -> None: """Test valid call to Writer.""" llm = Writer() - output = llm("Say foo:") + output = llm.invoke("Say foo:") assert isinstance(output, str) diff --git a/libs/community/tests/integration_tests/llms/test_xinference.py b/libs/community/tests/integration_tests/llms/test_xinference.py index 3be912b4a4..31c64f9ed5 100644 --- a/libs/community/tests/integration_tests/llms/test_xinference.py +++ b/libs/community/tests/integration_tests/llms/test_xinference.py @@ -46,12 +46,12 @@ def test_xinference_llm_(setup: Tuple[str, str]) -> None: llm = Xinference(server_url=endpoint, model_uid=model_uid) - answer = llm(prompt="Q: What food can we try in the capital of France? A:") + answer = llm.invoke("Q: What food can we try in the capital of France? A:") assert isinstance(answer, str) - answer = llm( - prompt="Q: where can we visit in the capital of France? A:", + answer = llm.invoke( + "Q: where can we visit in the capital of France? A:", generate_config={"max_tokens": 1024, "stream": True}, ) diff --git a/libs/community/tests/integration_tests/llms/test_yuan2.py b/libs/community/tests/integration_tests/llms/test_yuan2.py index 2660a2af58..a269829a1f 100644 --- a/libs/community/tests/integration_tests/llms/test_yuan2.py +++ b/libs/community/tests/integration_tests/llms/test_yuan2.py @@ -13,7 +13,7 @@ def test_yuan2_call_method() -> None: top_p=0.9, use_history=False, ) - output = llm("写一段快速排序算法。") + output = llm.invoke("写一段快速排序算法。") assert isinstance(output, str) diff --git a/libs/community/tests/unit_tests/chat_models/konko.py b/libs/community/tests/unit_tests/chat_models/konko.py index 2fca6e67cb..20b81d7527 100644 --- a/libs/community/tests/unit_tests/chat_models/konko.py +++ b/libs/community/tests/unit_tests/chat_models/konko.py @@ -14,7 +14,7 @@ def test_konko_chat_test() -> None: """Evaluate basic ChatKonko functionality.""" chat_instance = ChatKonko(max_tokens=10) msg = HumanMessage(content="Hi") - chat_response = chat_instance([msg]) + chat_response = chat_instance.invoke([msg]) assert isinstance(chat_response, BaseMessage) assert isinstance(chat_response.content, str) @@ -23,7 +23,7 @@ def test_konko_chat_test_openai() -> None: """Evaluate basic ChatKonko functionality.""" chat_instance = ChatKonko(max_tokens=10, model="meta-llama/llama-2-70b-chat") msg = HumanMessage(content="Hi") - chat_response = chat_instance([msg]) + chat_response = chat_instance.invoke([msg]) assert isinstance(chat_response, BaseMessage) assert isinstance(chat_response.content, str) @@ -48,7 +48,7 @@ def test_konko_system_msg_test() -> None: chat_instance = ChatKonko(max_tokens=10) sys_msg = SystemMessage(content="Initiate user chat.") user_msg = HumanMessage(content="Hi there") - chat_response = chat_instance([sys_msg, user_msg]) + chat_response = chat_instance.invoke([sys_msg, user_msg]) assert isinstance(chat_response, BaseMessage) assert isinstance(chat_response.content, str) @@ -92,7 +92,7 @@ def test_konko_streaming_callback_test() -> None: verbose=True, ) msg = HumanMessage(content="Hi") - chat_response = chat_instance([msg]) + chat_response = chat_instance.invoke([msg]) assert callback_instance.llm_streams > 0 assert isinstance(chat_response, BaseMessage) diff --git a/libs/community/tests/unit_tests/chat_models/test_llama_edge.py b/libs/community/tests/unit_tests/chat_models/test_llama_edge.py index 4bcf0fde6e..7d11123b32 100644 --- a/libs/community/tests/unit_tests/chat_models/test_llama_edge.py +++ b/libs/community/tests/unit_tests/chat_models/test_llama_edge.py @@ -72,7 +72,7 @@ def test_wasm_chat_without_service_url() -> None: messages = [system_message, user_message] with pytest.raises(ValueError) as e: - chat(messages) + chat.invoke(messages) assert "Error code: 503" in str(e) assert "reason: The IP address or port of the chat service is incorrect." in str(e) diff --git a/libs/community/tests/unit_tests/llms/konko.py b/libs/community/tests/unit_tests/llms/konko.py index 3e0fe0f31b..5aa399e91d 100644 --- a/libs/community/tests/unit_tests/llms/konko.py +++ b/libs/community/tests/unit_tests/llms/konko.py @@ -15,7 +15,7 @@ def test_konko_call() -> None: temperature=0.2, max_tokens=250, ) - output = llm("Say foo:") + output = llm.invoke("Say foo:") assert llm._llm_type == "konko" assert isinstance(output, str) diff --git a/libs/community/tests/unit_tests/llms/test_callbacks.py b/libs/community/tests/unit_tests/llms/test_callbacks.py index def9aa66a4..d20b45ceb7 100644 --- a/libs/community/tests/unit_tests/llms/test_callbacks.py +++ b/libs/community/tests/unit_tests/llms/test_callbacks.py @@ -13,7 +13,7 @@ def test_llm_with_callbacks() -> None: """Test LLM callbacks.""" handler = FakeCallbackHandler() llm = FakeListLLM(callbacks=[handler], verbose=True, responses=["foo"]) - output = llm("foo") + output = llm.invoke("foo") assert output == "foo" assert handler.starts == 1 assert handler.ends == 1 @@ -26,7 +26,7 @@ def test_chat_model_with_v1_callbacks() -> None: llm = FakeListChatModel( callbacks=[handler], verbose=True, responses=["fake response"] ) - output = llm([HumanMessage(content="foo")]) + output = llm.invoke([HumanMessage(content="foo")]) assert output.content == "fake response" assert handler.starts == 1 assert handler.ends == 1 @@ -41,7 +41,7 @@ def test_chat_model_with_v2_callbacks() -> None: llm = FakeListChatModel( callbacks=[handler], verbose=True, responses=["fake response"] ) - output = llm([HumanMessage(content="foo")]) + output = llm.invoke([HumanMessage(content="foo")]) assert output.content == "fake response" assert handler.starts == 1 assert handler.ends == 1 diff --git a/libs/community/tests/unit_tests/llms/test_gradient_ai.py b/libs/community/tests/unit_tests/llms/test_gradient_ai.py index 308b27eea5..ceeb650f9f 100644 --- a/libs/community/tests/unit_tests/llms/test_gradient_ai.py +++ b/libs/community/tests/unit_tests/llms/test_gradient_ai.py @@ -64,7 +64,7 @@ def test_gradient_llm_sync(mocker: MockerFixture, setup: dict) -> None: assert llm.gradient_workspace_id == _GRADIENT_WORKSPACE_ID assert llm.model_id == _MODEL_ID - response = llm("Say foo:") + response = llm.invoke("Say foo:") want = "bar" assert response == want diff --git a/libs/community/tests/unit_tests/llms/test_ollama.py b/libs/community/tests/unit_tests/llms/test_ollama.py index 1a332d3237..0c8c4e9ace 100644 --- a/libs/community/tests/unit_tests/llms/test_ollama.py +++ b/libs/community/tests/unit_tests/llms/test_ollama.py @@ -46,7 +46,7 @@ def test_pass_headers_if_provided(monkeypatch: MonkeyPatch) -> None: monkeypatch.setattr(requests, "post", mock_post) - llm("Test prompt") + llm.invoke("Test prompt") def test_handle_if_headers_not_provided(monkeypatch: MonkeyPatch) -> None: @@ -65,7 +65,7 @@ def test_handle_if_headers_not_provided(monkeypatch: MonkeyPatch) -> None: monkeypatch.setattr(requests, "post", mock_post) - llm("Test prompt") + llm.invoke("Test prompt") def test_handle_kwargs_top_level_parameters(monkeypatch: MonkeyPatch) -> None: @@ -109,7 +109,7 @@ def test_handle_kwargs_top_level_parameters(monkeypatch: MonkeyPatch) -> None: monkeypatch.setattr(requests, "post", mock_post) - llm("Test prompt", model="test-model", system="Test system prompt") + llm.invoke("Test prompt", model="test-model", system="Test system prompt") def test_handle_kwargs_with_unknown_param(monkeypatch: MonkeyPatch) -> None: @@ -157,7 +157,7 @@ def test_handle_kwargs_with_unknown_param(monkeypatch: MonkeyPatch) -> None: monkeypatch.setattr(requests, "post", mock_post) - llm("Test prompt", unknown="Unknown parameter value", temperature=0.8) + llm.invoke("Test prompt", unknown="Unknown parameter value", temperature=0.8) def test_handle_kwargs_with_options(monkeypatch: MonkeyPatch) -> None: @@ -189,7 +189,7 @@ def test_handle_kwargs_with_options(monkeypatch: MonkeyPatch) -> None: monkeypatch.setattr(requests, "post", mock_post) - llm( + llm.invoke( "Test prompt", model="test-another-model", options={"unknown_option": "Unknown option value"}, diff --git a/libs/langchain/tests/unit_tests/test_cache.py b/libs/langchain/tests/unit_tests/test_cache.py index 13e27c8e02..70bbaaf89d 100644 --- a/libs/langchain/tests/unit_tests/test_cache.py +++ b/libs/langchain/tests/unit_tests/test_cache.py @@ -98,7 +98,7 @@ def test_old_sqlite_llm_caching() -> None: with Session(llm_cache.engine) as session, session.begin(): for item in items: session.merge(item) - assert llm(prompt) == cached_response + assert llm.invoke(prompt) == cached_response async def test_chat_model_caching() -> None: @@ -114,7 +114,7 @@ async def test_chat_model_caching() -> None: llm_string=llm._get_llm_string(), return_val=[ChatGeneration(message=cached_message)], ) - result = llm(prompt) + result = llm.invoke(prompt) assert isinstance(result, AIMessage) assert result.content == cached_response @@ -147,8 +147,8 @@ async def test_chat_model_caching_params() -> None: llm_string=llm._get_llm_string(functions=[]), return_val=[ChatGeneration(message=cached_message)], ) - result = llm(prompt, functions=[]) - result_no_params = llm(prompt) + result = llm.invoke(prompt, functions=[]) + result_no_params = llm.invoke(prompt) assert isinstance(result, AIMessage) assert result.content == cached_response assert isinstance(result_no_params, AIMessage) @@ -186,7 +186,7 @@ async def test_llm_cache_clear() -> None: return_val=[Generation(text=cached_response)], ) llm_cache.clear() - response = llm(prompt) + response = llm.invoke(prompt) assert response == expected_response # async test diff --git a/libs/partners/anthropic/langchain_anthropic/llms.py b/libs/partners/anthropic/langchain_anthropic/llms.py index 2ab9ab04c5..e8fac8fa11 100644 --- a/libs/partners/anthropic/langchain_anthropic/llms.py +++ b/libs/partners/anthropic/langchain_anthropic/llms.py @@ -232,7 +232,7 @@ class AnthropicLLM(LLM, _AnthropicCommon): prompt = "What are the biggest risks facing humanity?" prompt = f"\n\nHuman: {prompt}\n\nAssistant:" - response = model(prompt) + response = model.invoke(prompt) """ if self.streaming: diff --git a/libs/partners/anthropic/tests/integration_tests/test_chat_models.py b/libs/partners/anthropic/tests/integration_tests/test_chat_models.py index 94f773975e..459d542aa7 100644 --- a/libs/partners/anthropic/tests/integration_tests/test_chat_models.py +++ b/libs/partners/anthropic/tests/integration_tests/test_chat_models.py @@ -108,7 +108,7 @@ def test_anthropic_call() -> None: """Test valid call to anthropic.""" chat = ChatAnthropic(model="test") message = HumanMessage(content="Hello") - response = chat([message]) + response = chat.invoke([message]) assert isinstance(response, AIMessage) assert isinstance(response.content, str) diff --git a/libs/partners/anthropic/tests/integration_tests/test_llms.py b/libs/partners/anthropic/tests/integration_tests/test_llms.py index b0c5e4f782..35bbf89377 100644 --- a/libs/partners/anthropic/tests/integration_tests/test_llms.py +++ b/libs/partners/anthropic/tests/integration_tests/test_llms.py @@ -25,7 +25,7 @@ def test_anthropic_model_param() -> None: def test_anthropic_call() -> None: """Test valid call to anthropic.""" llm = Anthropic(model="claude-instant-1") - output = llm("Say foo:") + output = llm.invoke("Say foo:") assert isinstance(output, str) @@ -49,7 +49,7 @@ def test_anthropic_streaming_callback() -> None: callback_manager=callback_manager, verbose=True, ) - llm("Write me a sentence with 100 words.") + llm.invoke("Write me a sentence with 100 words.") assert callback_handler.llm_streams > 1 diff --git a/libs/partners/ibm/langchain_ibm/llms.py b/libs/partners/ibm/langchain_ibm/llms.py index bdf51f9b99..43d8b61942 100644 --- a/libs/partners/ibm/langchain_ibm/llms.py +++ b/libs/partners/ibm/langchain_ibm/llms.py @@ -333,7 +333,7 @@ class WatsonxLLM(BaseLLM): Example: .. code-block:: python - response = watsonx_llm("What is a molecule") + response = watsonx_llm.invoke("What is a molecule") """ result = self._generate( prompts=[prompt], stop=stop, run_manager=run_manager, **kwargs diff --git a/libs/partners/openai/tests/integration_tests/chat_models/test_azure.py b/libs/partners/openai/tests/integration_tests/chat_models/test_azure.py index 001b5296e3..2cd97fd0cb 100644 --- a/libs/partners/openai/tests/integration_tests/chat_models/test_azure.py +++ b/libs/partners/openai/tests/integration_tests/chat_models/test_azure.py @@ -42,7 +42,7 @@ def llm() -> AzureChatOpenAI: def test_chat_openai(llm: AzureChatOpenAI) -> None: """Test AzureChatOpenAI wrapper.""" message = HumanMessage(content="Hello") - response = llm([message]) + response = llm.invoke([message]) assert isinstance(response, BaseMessage) assert isinstance(response.content, str) @@ -89,7 +89,7 @@ def test_chat_openai_streaming() -> None: verbose=True, ) message = HumanMessage(content="Hello") - response = chat([message]) + response = chat.invoke([message]) assert callback_handler.llm_streams > 0 assert isinstance(response, BaseMessage) diff --git a/libs/partners/openai/tests/integration_tests/chat_models/test_base.py b/libs/partners/openai/tests/integration_tests/chat_models/test_base.py index e1f15ec9c5..ace749d5e4 100644 --- a/libs/partners/openai/tests/integration_tests/chat_models/test_base.py +++ b/libs/partners/openai/tests/integration_tests/chat_models/test_base.py @@ -42,7 +42,7 @@ def test_chat_openai() -> None: default_query=None, ) message = HumanMessage(content="Hello") - response = chat([message]) + response = chat.invoke([message]) assert isinstance(response, BaseMessage) assert isinstance(response.content, str) @@ -60,7 +60,7 @@ def test_chat_openai_system_message() -> None: chat = ChatOpenAI(max_tokens=10) system_message = SystemMessage(content="You are to chat with the user.") human_message = HumanMessage(content="Hello") - response = chat([system_message, human_message]) + response = chat.invoke([system_message, human_message]) assert isinstance(response, BaseMessage) assert isinstance(response.content, str) diff --git a/libs/partners/openai/tests/integration_tests/llms/test_azure.py b/libs/partners/openai/tests/integration_tests/llms/test_azure.py index c00e5afbd1..019d7146b7 100644 --- a/libs/partners/openai/tests/integration_tests/llms/test_azure.py +++ b/libs/partners/openai/tests/integration_tests/llms/test_azure.py @@ -38,7 +38,7 @@ def llm() -> AzureOpenAI: @pytest.mark.scheduled def test_openai_call(llm: AzureOpenAI) -> None: """Test valid call to openai.""" - output = llm("Say something nice:") + output = llm.invoke("Say something nice:") assert isinstance(output, str) @@ -133,7 +133,7 @@ def test_openai_streaming_multiple_prompts_error() -> None: def test_openai_streaming_call() -> None: """Test valid call to openai.""" llm = _get_llm(max_tokens=10, streaming=True) - output = llm("Say foo:") + output = llm.invoke("Say foo:") assert isinstance(output, str) @@ -148,7 +148,7 @@ def test_openai_streaming_callback() -> None: callback_manager=callback_manager, verbose=True, ) - llm("Write me a sentence with 100 words.") + llm.invoke("Write me a sentence with 100 words.") assert callback_handler.llm_streams == 11 diff --git a/libs/partners/openai/tests/integration_tests/llms/test_base.py b/libs/partners/openai/tests/integration_tests/llms/test_base.py index 608e42ed3e..651f94ef1b 100644 --- a/libs/partners/openai/tests/integration_tests/llms/test_base.py +++ b/libs/partners/openai/tests/integration_tests/llms/test_base.py @@ -76,7 +76,7 @@ def test_invoke() -> None: def test_openai_call() -> None: """Test valid call to openai.""" llm = OpenAI() - output = llm("Say something nice:") + output = llm.invoke("Say something nice:") assert isinstance(output, str) @@ -92,9 +92,9 @@ def test_openai_stop_valid() -> None: """Test openai stop logic on valid configuration.""" query = "write an ordered list of five items" first_llm = OpenAI(stop="3", temperature=0) - first_output = first_llm(query) + first_output = first_llm.invoke(query) second_llm = OpenAI(temperature=0) - second_output = second_llm(query, stop=["3"]) + second_output = second_llm.invoke(query, stop=["3"]) # Because it stops on new lines, shouldn't return anything assert first_output == second_output @@ -103,7 +103,7 @@ def test_openai_stop_error() -> None: """Test openai stop logic on bad configuration.""" llm = OpenAI(stop="3", temperature=0) with pytest.raises(ValueError): - llm("write an ordered list of five items", stop=["\n"]) + llm.invoke("write an ordered list of five items", stop=["\n"]) @pytest.mark.scheduled @@ -208,7 +208,7 @@ def test_openai_streaming_multiple_prompts_error() -> None: def test_openai_streaming_call() -> None: """Test valid call to openai.""" llm = OpenAI(max_tokens=10, streaming=True) - output = llm("Say foo:") + output = llm.invoke("Say foo:") assert isinstance(output, str) @@ -223,7 +223,7 @@ def test_openai_streaming_callback() -> None: callback_manager=callback_manager, verbose=True, ) - llm("Write me a sentence with 100 words.") + llm.invoke("Write me a sentence with 100 words.") # new client sometimes passes 2 tokens at once assert callback_handler.llm_streams >= 5 diff --git a/libs/partners/upstage/tests/integration_tests/test_chat_models.py b/libs/partners/upstage/tests/integration_tests/test_chat_models.py index 0a0da3a3fb..e0a1ba5757 100644 --- a/libs/partners/upstage/tests/integration_tests/test_chat_models.py +++ b/libs/partners/upstage/tests/integration_tests/test_chat_models.py @@ -17,7 +17,7 @@ def test_chat_upstage_system_message() -> None: chat = ChatUpstage(max_tokens=10) system_message = SystemMessage(content="You are to chat with the user.") human_message = HumanMessage(content="Hello") - response = chat([system_message, human_message]) + response = chat.invoke([system_message, human_message]) assert isinstance(response, BaseMessage) assert isinstance(response.content, str)