diff --git a/cookbook/autogpt/marathon_times.ipynb b/cookbook/autogpt/marathon_times.ipynb index 16f2221ef9..b2b1a1150e 100644 --- a/cookbook/autogpt/marathon_times.ipynb +++ b/cookbook/autogpt/marathon_times.ipynb @@ -59,7 +59,7 @@ }, "outputs": [], "source": [ - "llm = ChatOpenAI(model_name=\"gpt-4\", temperature=1.0)" + "llm = ChatOpenAI(model=\"gpt-4\", temperature=1.0)" ] }, { diff --git a/cookbook/elasticsearch_db_qa.ipynb b/cookbook/elasticsearch_db_qa.ipynb index 3a38446a30..29f2037381 100644 --- a/cookbook/elasticsearch_db_qa.ipynb +++ b/cookbook/elasticsearch_db_qa.ipynb @@ -84,7 +84,7 @@ "metadata": {}, "outputs": [], "source": [ - "llm = ChatOpenAI(model_name=\"gpt-4\", temperature=0)\n", + "llm = ChatOpenAI(model=\"gpt-4\", temperature=0)\n", "chain = ElasticsearchDatabaseChain.from_llm(llm=llm, database=db, verbose=True)" ] }, diff --git a/cookbook/langgraph_crag.ipynb b/cookbook/langgraph_crag.ipynb index 8dc7750c9a..8ac3113900 100644 --- a/cookbook/langgraph_crag.ipynb +++ b/cookbook/langgraph_crag.ipynb @@ -229,7 +229,7 @@ " prompt = hub.pull(\"rlm/rag-prompt\")\n", "\n", " # LLM\n", - " llm = ChatOpenAI(model_name=\"gpt-3.5-turbo\", temperature=0, streaming=True)\n", + " llm = ChatOpenAI(model=\"gpt-3.5-turbo\", temperature=0, streaming=True)\n", "\n", " # Post-processing\n", " def format_docs(docs):\n", diff --git a/cookbook/langgraph_self_rag.ipynb b/cookbook/langgraph_self_rag.ipynb index 50f7dbef17..91adaf9d6f 100644 --- a/cookbook/langgraph_self_rag.ipynb +++ b/cookbook/langgraph_self_rag.ipynb @@ -236,7 +236,7 @@ " prompt = hub.pull(\"rlm/rag-prompt\")\n", "\n", " # LLM\n", - " llm = ChatOpenAI(model_name=\"gpt-3.5-turbo\", temperature=0)\n", + " llm = ChatOpenAI(model=\"gpt-3.5-turbo\", temperature=0)\n", "\n", " # Post-processing\n", " def format_docs(docs):\n", diff --git a/cookbook/press_releases.ipynb b/cookbook/press_releases.ipynb index 30aba0a68d..104fe40d62 100644 --- a/cookbook/press_releases.ipynb +++ b/cookbook/press_releases.ipynb @@ -84,7 +84,7 @@ "from langchain.retrievers import KayAiRetriever\n", "from langchain_openai import ChatOpenAI\n", "\n", - "model = ChatOpenAI(model_name=\"gpt-3.5-turbo\")\n", + "model = ChatOpenAI(model=\"gpt-3.5-turbo\")\n", "retriever = KayAiRetriever.create(\n", " dataset_id=\"company\", data_types=[\"PressRelease\"], num_contexts=6\n", ")\n", diff --git a/cookbook/retrieval_in_sql.ipynb b/cookbook/retrieval_in_sql.ipynb index 998e9aa8dd..e73e400018 100644 --- a/cookbook/retrieval_in_sql.ipynb +++ b/cookbook/retrieval_in_sql.ipynb @@ -274,7 +274,7 @@ "db = SQLDatabase.from_uri(\n", " CONNECTION_STRING\n", ") # We reconnect to db so the new columns are loaded as well.\n", - "llm = ChatOpenAI(model_name=\"gpt-4\", temperature=0)\n", + "llm = ChatOpenAI(model=\"gpt-4\", temperature=0)\n", "\n", "sql_query_chain = (\n", " RunnablePassthrough.assign(schema=get_schema)\n", diff --git a/cookbook/twitter-the-algorithm-analysis-deeplake.ipynb b/cookbook/twitter-the-algorithm-analysis-deeplake.ipynb index 04f42f449c..2e92d35b30 100644 --- a/cookbook/twitter-the-algorithm-analysis-deeplake.ipynb +++ b/cookbook/twitter-the-algorithm-analysis-deeplake.ipynb @@ -3811,7 +3811,7 @@ "from langchain.chains import ConversationalRetrievalChain\n", "from langchain_openai import ChatOpenAI\n", "\n", - "model = ChatOpenAI(model_name=\"gpt-3.5-turbo-0613\") # switch to 'gpt-4'\n", + "model = ChatOpenAI(model=\"gpt-3.5-turbo-0613\") # switch to 'gpt-4'\n", "qa = ConversationalRetrievalChain.from_llm(model, retriever=retriever)" ] }, diff --git a/cookbook/two_agent_debate_tools.ipynb b/cookbook/two_agent_debate_tools.ipynb index b31e769dee..3815889ff2 100644 --- a/cookbook/two_agent_debate_tools.ipynb +++ b/cookbook/two_agent_debate_tools.ipynb @@ -424,7 +424,7 @@ " DialogueAgentWithTools(\n", " name=name,\n", " system_message=SystemMessage(content=system_message),\n", - " model=ChatOpenAI(model_name=\"gpt-4\", temperature=0.2),\n", + " model=ChatOpenAI(model=\"gpt-4\", temperature=0.2),\n", " tool_names=tools,\n", " top_k_results=2,\n", " )\n", diff --git a/cookbook/wikibase_agent.ipynb b/cookbook/wikibase_agent.ipynb index 13c4063cf7..d48b0eaa7b 100644 --- a/cookbook/wikibase_agent.ipynb +++ b/cookbook/wikibase_agent.ipynb @@ -601,7 +601,7 @@ "source": [ "from langchain_openai import ChatOpenAI\n", "\n", - "llm = ChatOpenAI(model_name=\"gpt-4\", temperature=0)" + "llm = ChatOpenAI(model=\"gpt-4\", temperature=0)" ] }, { diff --git a/docs/docs/get_started/quickstart.mdx b/docs/docs/get_started/quickstart.mdx index de63efd0c2..3a4d7d9de7 100644 --- a/docs/docs/get_started/quickstart.mdx +++ b/docs/docs/get_started/quickstart.mdx @@ -94,12 +94,12 @@ from langchain_openai import ChatOpenAI llm = ChatOpenAI() ``` -If you'd prefer not to set an environment variable you can pass the key in directly via the `openai_api_key` named parameter when initiating the OpenAI LLM class: +If you'd prefer not to set an environment variable you can pass the key in directly via the `api_key` named parameter when initiating the OpenAI LLM class: ```python from langchain_openai import ChatOpenAI -llm = ChatOpenAI(openai_api_key="...") +llm = ChatOpenAI(api_key="...") ``` @@ -509,7 +509,7 @@ from langchain.agents import AgentExecutor # Get the prompt to use - you can modify this! prompt = hub.pull("hwchase17/openai-functions-agent") -# You need to set OPENAI_API_KEY environment variable or pass it as argument `openai_api_key`. +# You need to set OPENAI_API_KEY environment variable or pass it as argument `api_key`. llm = ChatOpenAI(model="gpt-3.5-turbo", temperature=0) agent = create_openai_functions_agent(llm, tools, prompt) agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True) diff --git a/docs/docs/guides/development/debugging.md b/docs/docs/guides/development/debugging.md index e8ca2622ec..e606d808e5 100644 --- a/docs/docs/guides/development/debugging.md +++ b/docs/docs/guides/development/debugging.md @@ -27,7 +27,7 @@ Let's suppose we have a simple agent, and want to visualize the actions it takes from langchain.agents import AgentType, initialize_agent, load_tools from langchain_openai import ChatOpenAI -llm = ChatOpenAI(model_name="gpt-4", temperature=0) +llm = ChatOpenAI(model="gpt-4", temperature=0) tools = load_tools(["ddg-search", "llm-math"], llm=llm) agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION) ``` diff --git a/docs/docs/guides/productionization/fallbacks.ipynb b/docs/docs/guides/productionization/fallbacks.ipynb index 36762d4bbc..0c29961c6e 100644 --- a/docs/docs/guides/productionization/fallbacks.ipynb +++ b/docs/docs/guides/productionization/fallbacks.ipynb @@ -204,7 +204,7 @@ " ]\n", ")\n", "# Here we're going to use a bad model name to easily create a chain that will error\n", - "chat_model = ChatOpenAI(model_name=\"gpt-fake\")\n", + "chat_model = ChatOpenAI(model=\"gpt-fake\")\n", "bad_chain = chat_prompt | chat_model | StrOutputParser()" ] }, diff --git a/docs/docs/integrations/document_loaders/youtube_audio.ipynb b/docs/docs/integrations/document_loaders/youtube_audio.ipynb index bcdd7191b6..7a34546aab 100644 --- a/docs/docs/integrations/document_loaders/youtube_audio.ipynb +++ b/docs/docs/integrations/document_loaders/youtube_audio.ipynb @@ -218,7 +218,7 @@ "source": [ "# Build a QA chain\n", "qa_chain = RetrievalQA.from_chain_type(\n", - " llm=ChatOpenAI(model_name=\"gpt-3.5-turbo\", temperature=0),\n", + " llm=ChatOpenAI(model=\"gpt-3.5-turbo\", temperature=0),\n", " chain_type=\"stuff\",\n", " retriever=vectordb.as_retriever(),\n", ")" diff --git a/docs/docs/integrations/providers/log10.mdx b/docs/docs/integrations/providers/log10.mdx index b9e3c58030..b4378506e7 100644 --- a/docs/docs/integrations/providers/log10.mdx +++ b/docs/docs/integrations/providers/log10.mdx @@ -30,7 +30,7 @@ messages = [ HumanMessage(content="Ping?"), ] -llm = ChatOpenAI(model_name="gpt-3.5-turbo", callbacks=[log10_callback]) +llm = ChatOpenAI(model="gpt-3.5-turbo", callbacks=[log10_callback]) ``` [Log10 + Langchain + Logs docs](https://github.com/log10-io/log10/blob/main/logging.md#langchain-logger) @@ -55,7 +55,7 @@ messages = [ HumanMessage(content="Ping?"), ] -llm = ChatOpenAI(model_name="gpt-3.5-turbo", callbacks=[log10_callback], temperature=0.5, tags=["test"]) +llm = ChatOpenAI(model="gpt-3.5-turbo", callbacks=[log10_callback], temperature=0.5, tags=["test"]) completion = llm.predict_messages(messages, tags=["foobar"]) print(completion) diff --git a/docs/docs/integrations/retrievers/arxiv.ipynb b/docs/docs/integrations/retrievers/arxiv.ipynb index 5d4b74a894..d347962dde 100644 --- a/docs/docs/integrations/retrievers/arxiv.ipynb +++ b/docs/docs/integrations/retrievers/arxiv.ipynb @@ -203,7 +203,7 @@ "from langchain.chains import ConversationalRetrievalChain\n", "from langchain_openai import ChatOpenAI\n", "\n", - "model = ChatOpenAI(model_name=\"gpt-3.5-turbo\") # switch to 'gpt-4'\n", + "model = ChatOpenAI(model=\"gpt-3.5-turbo\") # switch to 'gpt-4'\n", "qa = ConversationalRetrievalChain.from_llm(model, retriever=retriever)" ] }, diff --git a/docs/docs/integrations/retrievers/kay.ipynb b/docs/docs/integrations/retrievers/kay.ipynb index 49727f1178..6af7787720 100644 --- a/docs/docs/integrations/retrievers/kay.ipynb +++ b/docs/docs/integrations/retrievers/kay.ipynb @@ -153,7 +153,7 @@ "from langchain.chains import ConversationalRetrievalChain\n", "from langchain_openai import ChatOpenAI\n", "\n", - "model = ChatOpenAI(model_name=\"gpt-3.5-turbo\")\n", + "model = ChatOpenAI(model=\"gpt-3.5-turbo\")\n", "qa = ConversationalRetrievalChain.from_llm(model, retriever=retriever)" ] }, diff --git a/docs/docs/integrations/retrievers/outline.ipynb b/docs/docs/integrations/retrievers/outline.ipynb index 470498316e..c8007304c0 100644 --- a/docs/docs/integrations/retrievers/outline.ipynb +++ b/docs/docs/integrations/retrievers/outline.ipynb @@ -140,7 +140,7 @@ "from langchain.chains import ConversationalRetrievalChain\n", "from langchain_openai import ChatOpenAI\n", "\n", - "model = ChatOpenAI(model_name=\"gpt-3.5-turbo\")\n", + "model = ChatOpenAI(model=\"gpt-3.5-turbo\")\n", "qa = ConversationalRetrievalChain.from_llm(model, retriever=retriever)" ] }, diff --git a/docs/docs/integrations/retrievers/sec_filings.ipynb b/docs/docs/integrations/retrievers/sec_filings.ipynb index 67336a27d4..3cfbcddd20 100644 --- a/docs/docs/integrations/retrievers/sec_filings.ipynb +++ b/docs/docs/integrations/retrievers/sec_filings.ipynb @@ -81,7 +81,7 @@ "from langchain_community.retrievers import KayAiRetriever\n", "from langchain_openai import ChatOpenAI\n", "\n", - "model = ChatOpenAI(model_name=\"gpt-3.5-turbo\")\n", + "model = ChatOpenAI(model=\"gpt-3.5-turbo\")\n", "retriever = KayAiRetriever.create(\n", " dataset_id=\"company\", data_types=[\"10-K\", \"10-Q\"], num_contexts=6\n", ")\n", diff --git a/docs/docs/integrations/retrievers/wikipedia.ipynb b/docs/docs/integrations/retrievers/wikipedia.ipynb index c070bb740d..c17d3e6a70 100644 --- a/docs/docs/integrations/retrievers/wikipedia.ipynb +++ b/docs/docs/integrations/retrievers/wikipedia.ipynb @@ -202,7 +202,7 @@ "from langchain.chains import ConversationalRetrievalChain\n", "from langchain_openai import ChatOpenAI\n", "\n", - "model = ChatOpenAI(model_name=\"gpt-3.5-turbo\") # switch to 'gpt-4'\n", + "model = ChatOpenAI(model=\"gpt-3.5-turbo\") # switch to 'gpt-4'\n", "qa = ConversationalRetrievalChain.from_llm(model, retriever=retriever)" ] }, diff --git a/docs/docs/integrations/vectorstores/jaguar.ipynb b/docs/docs/integrations/vectorstores/jaguar.ipynb index 4a3b67782f..f12e5d2ca8 100644 --- a/docs/docs/integrations/vectorstores/jaguar.ipynb +++ b/docs/docs/integrations/vectorstores/jaguar.ipynb @@ -144,7 +144,7 @@ "prompt = ChatPromptTemplate.from_template(template)\n", "\n", "\"\"\" Obtain a Large Language Model \"\"\"\n", - "LLM = ChatOpenAI(model_name=\"gpt-3.5-turbo\", temperature=0)\n", + "LLM = ChatOpenAI(model=\"gpt-3.5-turbo\", temperature=0)\n", "\n", "\"\"\" Create a chain for the RAG flow \"\"\"\n", "rag_chain = (\n", diff --git a/docs/docs/integrations/vectorstores/momento_vector_index.ipynb b/docs/docs/integrations/vectorstores/momento_vector_index.ipynb index 9c7588980b..b987baf9c3 100644 --- a/docs/docs/integrations/vectorstores/momento_vector_index.ipynb +++ b/docs/docs/integrations/vectorstores/momento_vector_index.ipynb @@ -394,7 +394,7 @@ "metadata": {}, "outputs": [], "source": [ - "llm = ChatOpenAI(model_name=\"gpt-3.5-turbo\", temperature=0)\n", + "llm = ChatOpenAI(model=\"gpt-3.5-turbo\", temperature=0)\n", "qa_chain = RetrievalQA.from_chain_type(llm, retriever=vector_db.as_retriever())" ] }, diff --git a/docs/docs/integrations/vectorstores/sap_hanavector.ipynb b/docs/docs/integrations/vectorstores/sap_hanavector.ipynb index 33f8a50bed..f416f6b622 100644 --- a/docs/docs/integrations/vectorstores/sap_hanavector.ipynb +++ b/docs/docs/integrations/vectorstores/sap_hanavector.ipynb @@ -437,7 +437,7 @@ "source": [ "from langchain.chains import ConversationalRetrievalChain\n", "\n", - "llm = ChatOpenAI(model_name=\"gpt-3.5-turbo\")\n", + "llm = ChatOpenAI(model=\"gpt-3.5-turbo\")\n", "memory = ConversationBufferMemory(\n", " memory_key=\"chat_history\", output_key=\"answer\", return_messages=True\n", ")\n", diff --git a/docs/docs/integrations/vectorstores/weaviate.ipynb b/docs/docs/integrations/vectorstores/weaviate.ipynb index 94fe604306..2020cbbbea 100644 --- a/docs/docs/integrations/vectorstores/weaviate.ipynb +++ b/docs/docs/integrations/vectorstores/weaviate.ipynb @@ -589,7 +589,7 @@ "source": [ "from langchain_community.chat_models import ChatOpenAI\n", "\n", - "llm = ChatOpenAI(model_name=\"gpt-3.5-turbo\", temperature=0)\n", + "llm = ChatOpenAI(model=\"gpt-3.5-turbo\", temperature=0)\n", "llm.predict(\"What did the president say about Justice Breyer\")" ] }, @@ -824,7 +824,7 @@ "source": [ "from langchain_community.chat_models import ChatOpenAI\n", "\n", - "llm = ChatOpenAI(model_name=\"gpt-3.5-turbo\", temperature=0)" + "llm = ChatOpenAI(model=\"gpt-3.5-turbo\", temperature=0)" ] }, { diff --git a/docs/docs/modules/data_connection/text_embedding/index.mdx b/docs/docs/modules/data_connection/text_embedding/index.mdx index dd0ff96177..d3d4599326 100644 --- a/docs/docs/modules/data_connection/text_embedding/index.mdx +++ b/docs/docs/modules/data_connection/text_embedding/index.mdx @@ -35,12 +35,12 @@ Accessing the API requires an API key, which you can get by creating an account export OPENAI_API_KEY="..." ``` -If you'd prefer not to set an environment variable you can pass the key in directly via the `openai_api_key` named parameter when initiating the OpenAI LLM class: +If you'd prefer not to set an environment variable you can pass the key in directly via the `api_key` named parameter when initiating the OpenAI LLM class: ```python from langchain_openai import OpenAIEmbeddings -embeddings_model = OpenAIEmbeddings(openai_api_key="...") +embeddings_model = OpenAIEmbeddings(api_key="...") ``` Otherwise you can initialize without any params: diff --git a/docs/docs/modules/model_io/chat/quick_start.ipynb b/docs/docs/modules/model_io/chat/quick_start.ipynb index d83e6677a5..7df73f959e 100644 --- a/docs/docs/modules/model_io/chat/quick_start.ipynb +++ b/docs/docs/modules/model_io/chat/quick_start.ipynb @@ -52,7 +52,7 @@ "source": [ "```{=mdx}\n", " diff --git a/docs/docs/use_cases/code_understanding.ipynb b/docs/docs/use_cases/code_understanding.ipynb index 3ab6957a46..15ffdb29fb 100644 --- a/docs/docs/use_cases/code_understanding.ipynb +++ b/docs/docs/use_cases/code_understanding.ipynb @@ -237,7 +237,7 @@ "from langchain_core.prompts import ChatPromptTemplate\n", "from langchain_openai import ChatOpenAI\n", "\n", - "llm = ChatOpenAI(model_name=\"gpt-4\")\n", + "llm = ChatOpenAI(model=\"gpt-4\")\n", "\n", "# First we need a prompt that we can pass into an LLM to generate this search query\n", "\n", diff --git a/docs/docs/use_cases/data_generation.ipynb b/docs/docs/use_cases/data_generation.ipynb index 8329f7251f..82a55cf052 100644 --- a/docs/docs/use_cases/data_generation.ipynb +++ b/docs/docs/use_cases/data_generation.ipynb @@ -269,7 +269,7 @@ "outputs": [], "source": [ "# LLM\n", - "model = ChatOpenAI(model_name=\"gpt-3.5-turbo\", temperature=0.7)\n", + "model = ChatOpenAI(model=\"gpt-3.5-turbo\", temperature=0.7)\n", "chain = create_data_generation_chain(model)" ] }, diff --git a/docs/docs/use_cases/question_answering/chat_history.ipynb b/docs/docs/use_cases/question_answering/chat_history.ipynb index 45b2e3869d..ecd093e523 100644 --- a/docs/docs/use_cases/question_answering/chat_history.ipynb +++ b/docs/docs/use_cases/question_answering/chat_history.ipynb @@ -151,7 +151,7 @@ "# Retrieve and generate using the relevant snippets of the blog.\n", "retriever = vectorstore.as_retriever()\n", "prompt = hub.pull(\"rlm/rag-prompt\")\n", - "llm = ChatOpenAI(model_name=\"gpt-3.5-turbo\", temperature=0)\n", + "llm = ChatOpenAI(model=\"gpt-3.5-turbo\", temperature=0)\n", "\n", "\n", "def format_docs(docs):\n", @@ -417,7 +417,7 @@ "from langchain_openai import ChatOpenAI, OpenAIEmbeddings\n", "from langchain_text_splitters import RecursiveCharacterTextSplitter\n", "\n", - "llm = ChatOpenAI(model_name=\"gpt-3.5-turbo\", temperature=0)\n", + "llm = ChatOpenAI(model=\"gpt-3.5-turbo\", temperature=0)\n", "\n", "\n", "### Construct retriever ###\n", diff --git a/docs/docs/use_cases/question_answering/sources.ipynb b/docs/docs/use_cases/question_answering/sources.ipynb index 635d0a06f1..858181bd94 100644 --- a/docs/docs/use_cases/question_answering/sources.ipynb +++ b/docs/docs/use_cases/question_answering/sources.ipynb @@ -143,7 +143,7 @@ "# Retrieve and generate using the relevant snippets of the blog.\n", "retriever = vectorstore.as_retriever()\n", "prompt = hub.pull(\"rlm/rag-prompt\")\n", - "llm = ChatOpenAI(model_name=\"gpt-3.5-turbo\", temperature=0)\n", + "llm = ChatOpenAI(model=\"gpt-3.5-turbo\", temperature=0)\n", "\n", "\n", "def format_docs(docs):\n", diff --git a/docs/docs/use_cases/question_answering/streaming.ipynb b/docs/docs/use_cases/question_answering/streaming.ipynb index cf895a52af..5867e75d53 100644 --- a/docs/docs/use_cases/question_answering/streaming.ipynb +++ b/docs/docs/use_cases/question_answering/streaming.ipynb @@ -143,7 +143,7 @@ "# Retrieve and generate using the relevant snippets of the blog.\n", "retriever = vectorstore.as_retriever()\n", "prompt = hub.pull(\"rlm/rag-prompt\")\n", - "llm = ChatOpenAI(model_name=\"gpt-3.5-turbo\", temperature=0)\n", + "llm = ChatOpenAI(model=\"gpt-3.5-turbo\", temperature=0)\n", "\n", "\n", "def format_docs(docs):\n", diff --git a/libs/community/langchain_community/chat_models/openai.py b/libs/community/langchain_community/chat_models/openai.py index b3d26a6c2d..de1df40703 100644 --- a/libs/community/langchain_community/chat_models/openai.py +++ b/libs/community/langchain_community/chat_models/openai.py @@ -160,7 +160,7 @@ class ChatOpenAI(BaseChatModel): .. code-block:: python from langchain_community.chat_models import ChatOpenAI - openai = ChatOpenAI(model_name="gpt-3.5-turbo") + openai = ChatOpenAI(model="gpt-3.5-turbo") """ @property diff --git a/libs/community/langchain_community/chat_models/promptlayer_openai.py b/libs/community/langchain_community/chat_models/promptlayer_openai.py index 551655e4c7..aa930a474a 100644 --- a/libs/community/langchain_community/chat_models/promptlayer_openai.py +++ b/libs/community/langchain_community/chat_models/promptlayer_openai.py @@ -33,7 +33,7 @@ class PromptLayerChatOpenAI(ChatOpenAI): .. code-block:: python from langchain_community.chat_models import PromptLayerChatOpenAI - openai = PromptLayerChatOpenAI(model_name="gpt-3.5-turbo") + openai = PromptLayerChatOpenAI(model="gpt-3.5-turbo") """ pl_tags: Optional[List[str]] diff --git a/libs/langchain/langchain/chains/combine_documents/stuff.py b/libs/langchain/langchain/chains/combine_documents/stuff.py index 2dfdf80cf8..c91f999683 100644 --- a/libs/langchain/langchain/chains/combine_documents/stuff.py +++ b/libs/langchain/langchain/chains/combine_documents/stuff.py @@ -60,7 +60,7 @@ def create_stuff_documents_chain( prompt = ChatPromptTemplate.from_messages( [("system", "What are everyone's favorite colors:\\n\\n{context}")] ) - llm = ChatOpenAI(model_name="gpt-3.5-turbo") + llm = ChatOpenAI(model="gpt-3.5-turbo") chain = create_stuff_documents_chain(llm, prompt) docs = [ diff --git a/libs/partners/openai/langchain_openai/chat_models/base.py b/libs/partners/openai/langchain_openai/chat_models/base.py index 62ad918e03..b7e103d9f1 100644 --- a/libs/partners/openai/langchain_openai/chat_models/base.py +++ b/libs/partners/openai/langchain_openai/chat_models/base.py @@ -240,7 +240,7 @@ class ChatOpenAI(BaseChatModel): from langchain_openai import ChatOpenAI - model = ChatOpenAI(model_name="gpt-3.5-turbo") + model = ChatOpenAI(model="gpt-3.5-turbo") """ @property diff --git a/templates/neo4j-cypher-ft/neo4j_cypher_ft/chain.py b/templates/neo4j-cypher-ft/neo4j_cypher_ft/chain.py index d668f802d9..fe4953a2c7 100644 --- a/templates/neo4j-cypher-ft/neo4j_cypher_ft/chain.py +++ b/templates/neo4j-cypher-ft/neo4j_cypher_ft/chain.py @@ -20,8 +20,8 @@ corrector_schema = [ cypher_validation = CypherQueryCorrector(corrector_schema) # LLMs -cypher_llm = ChatOpenAI(model_name="gpt-4", temperature=0.0) -qa_llm = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0.0) +cypher_llm = ChatOpenAI(model="gpt-4", temperature=0.0) +qa_llm = ChatOpenAI(model="gpt-3.5-turbo", temperature=0.0) # Extract entities from text diff --git a/templates/neo4j-cypher-memory/neo4j_cypher_memory/chain.py b/templates/neo4j-cypher-memory/neo4j_cypher_memory/chain.py index 68883a0e8c..f3fac21450 100644 --- a/templates/neo4j-cypher-memory/neo4j_cypher_memory/chain.py +++ b/templates/neo4j-cypher-memory/neo4j_cypher_memory/chain.py @@ -20,8 +20,8 @@ corrector_schema = [ cypher_validation = CypherQueryCorrector(corrector_schema) # LLMs -cypher_llm = ChatOpenAI(model_name="gpt-4", temperature=0.0) -qa_llm = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0.0) +cypher_llm = ChatOpenAI(model="gpt-4", temperature=0.0) +qa_llm = ChatOpenAI(model="gpt-3.5-turbo", temperature=0.0) def convert_messages(input: List[Dict[str, Any]]) -> ChatMessageHistory: diff --git a/templates/neo4j-cypher/neo4j_cypher/chain.py b/templates/neo4j-cypher/neo4j_cypher/chain.py index 18243045d0..c6f9e4f0d9 100644 --- a/templates/neo4j-cypher/neo4j_cypher/chain.py +++ b/templates/neo4j-cypher/neo4j_cypher/chain.py @@ -17,8 +17,8 @@ corrector_schema = [ cypher_validation = CypherQueryCorrector(corrector_schema) # LLMs -cypher_llm = ChatOpenAI(model_name="gpt-4", temperature=0.0) -qa_llm = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0.0) +cypher_llm = ChatOpenAI(model="gpt-4", temperature=0.0) +qa_llm = ChatOpenAI(model="gpt-3.5-turbo", temperature=0.0) # Generate Cypher statement based on natural language input cypher_template = """Based on the Neo4j graph schema below, write a Cypher query that would answer the user's question: diff --git a/templates/python-lint/python_lint/agent_executor.py b/templates/python-lint/python_lint/agent_executor.py index 0a5f274bb8..4cddb33f33 100644 --- a/templates/python-lint/python_lint/agent_executor.py +++ b/templates/python-lint/python_lint/agent_executor.py @@ -203,13 +203,13 @@ class Instruction(BaseModel): agent_executor = ( - get_agent_executor(ChatOpenAI(model_name="gpt-4-1106-preview", temperature=0.0)) + get_agent_executor(ChatOpenAI(model="gpt-4-1106-preview", temperature=0.0)) .configurable_alternatives( ConfigurableField("model_name"), default_key="gpt4turbo", - gpt4=get_agent_executor(ChatOpenAI(model_name="gpt-4", temperature=0.0)), + gpt4=get_agent_executor(ChatOpenAI(model="gpt-4", temperature=0.0)), gpt35t=get_agent_executor( - ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0.0), + ChatOpenAI(model="gpt-3.5-turbo", temperature=0.0), ), ) .with_types(input_type=Instruction, output_type=str) diff --git a/templates/rag-jaguardb/rag_jaguardb/chain.py b/templates/rag-jaguardb/rag_jaguardb/chain.py index b4450cf401..5a90def0b3 100644 --- a/templates/rag-jaguardb/rag_jaguardb/chain.py +++ b/templates/rag-jaguardb/rag_jaguardb/chain.py @@ -47,7 +47,7 @@ prompt = ChatPromptTemplate.from_template(template) # RAG -model = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0) +model = ChatOpenAI(model="gpt-3.5-turbo", temperature=0) chain = ( RunnableParallel({"context": retriever, "question": RunnablePassthrough()}) | prompt diff --git a/templates/rag-redis/rag_redis/chain.py b/templates/rag-redis/rag_redis/chain.py index 7a21a2beb3..2327a08b57 100644 --- a/templates/rag-redis/rag_redis/chain.py +++ b/templates/rag-redis/rag_redis/chain.py @@ -54,7 +54,7 @@ prompt = ChatPromptTemplate.from_template(template) # RAG Chain -model = ChatOpenAI(model_name="gpt-3.5-turbo-16k") +model = ChatOpenAI(model="gpt-3.5-turbo-16k") chain = ( RunnableParallel({"context": retriever, "question": RunnablePassthrough()}) | prompt