Harrison/stop importing from init (#10690)

pull/10693/head
Harrison Chase 9 months ago committed by GitHub
parent 9749f8ebae
commit 5442d2b1fa
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -0,0 +1,23 @@
---
name: Imports
on:
push:
branches: [master]
pull_request:
branches: [master]
jobs:
check:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v2
- name: Run import check
run: |
# We should not encourage imports directly from main init file
# Expect for __version__ and hub
# And of course expect for this file
git grep 'from langchain import' | grep -vE 'from langchain import (__version__|hub)' | grep -v '.github/workflows/check-imports.yml' && exit 1 || exit 0

@ -105,7 +105,7 @@
},
"outputs": [],
"source": [
"from langchain import PromptTemplate, LLMChain\n",
"from langchain.prompts import PromptTemplate\nfrom langchain.chains import LLMChain\n",
"from langchain.llms.fake import FakeListLLM\n",
"from langchain_experimental.comprehend_moderation.base_moderation_exceptions import ModerationPiiError\n",
"\n",
@ -412,7 +412,7 @@
},
"outputs": [],
"source": [
"from langchain import PromptTemplate, LLMChain\n",
"from langchain.prompts import PromptTemplate\nfrom langchain.chains import LLMChain\n",
"from langchain.llms.fake import FakeListLLM\n",
"\n",
"template = \"\"\"Question: {question}\n",
@ -572,8 +572,8 @@
},
"outputs": [],
"source": [
"from langchain import HuggingFaceHub\n",
"from langchain import PromptTemplate, LLMChain\n",
"from langchain.llms import HuggingFaceHub\n",
"from langchain.prompts import PromptTemplate\nfrom langchain.chains import LLMChain\n",
"\n",
"template = \"\"\"Question: {question}\"\"\"\n",
"\n",
@ -697,7 +697,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain import SagemakerEndpoint\n",
"from langchain.llms import SagemakerEndpoint\n",
"from langchain.llms.sagemaker_endpoint import LLMContentHandler\n",
"from langchain.chains import LLMChain\n",
"from langchain.prompts import load_prompt, PromptTemplate\n",

@ -97,7 +97,7 @@
},
"outputs": [],
"source": [
"from langchain import SerpAPIWrapper\n",
"from langchain.utilities import SerpAPIWrapper\n",
"from langchain.agents import initialize_agent, Tool\n",
"from langchain.agents import AgentType\n",
"from langchain.chat_models import ChatOpenAI\n",

@ -468,7 +468,8 @@
}
],
"source": [
"from langchain import PromptTemplate, LLMChain\n",
"from langchain.prompts import PromptTemplate\n",
"from langchain.chains import LLMChain\n",
"from langchain.chains.prompt_selector import ConditionalPromptSelector\n",
"\n",
"DEFAULT_LLAMA_SEARCH_PROMPT = PromptTemplate(\n",
@ -593,7 +594,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.1"
"version": "3.10.1"
}
},
"nbformat": 4,

@ -19,7 +19,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain import LLMChain, OpenAI, Cohere, HuggingFaceHub, PromptTemplate\n",
"from langchain.chains import LLMChain\nfrom langchain.llms import OpenAI, Cohere, HuggingFaceHub\nfrom langchain.prompts import PromptTemplate\n",
"from langchain.model_laboratory import ModelLaboratory"
]
},
@ -139,7 +139,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain import SelfAskWithSearchChain, SerpAPIWrapper\n",
"from langchain.chains import SelfAskWithSearchChain\nfrom langchain.utilities import SerpAPIWrapper\n",
"\n",
"open_ai_llm = OpenAI(temperature=0)\n",
"search = SerpAPIWrapper()\n",

@ -95,7 +95,7 @@
},
"outputs": [],
"source": [
"from langchain import PromptTemplate, LLMChain\n",
"from langchain.prompts import PromptTemplate\nfrom langchain.chains import LLMChain\n",
"from langchain.llms.fake import FakeListLLM\n",
"from langchain_experimental.comprehend_moderation.base_moderation_exceptions import ModerationPiiError\n",
"\n",
@ -399,7 +399,7 @@
},
"outputs": [],
"source": [
"from langchain import PromptTemplate, LLMChain\n",
"from langchain.prompts import PromptTemplate\nfrom langchain.chains import LLMChain\n",
"from langchain.llms.fake import FakeListLLM\n",
"\n",
"template = \"\"\"Question: {question}\n",
@ -564,8 +564,8 @@
},
"outputs": [],
"source": [
"from langchain import HuggingFaceHub\n",
"from langchain import PromptTemplate, LLMChain\n",
"from langchain.llms import HuggingFaceHub\n",
"from langchain.prompts import PromptTemplate\nfrom langchain.chains import LLMChain\n",
"\n",
"template = \"\"\"Question: {question}\n",
"\n",
@ -679,7 +679,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain import SagemakerEndpoint\n",
"from langchain.llms import SagemakerEndpoint\n",
"from langchain.llms.sagemaker_endpoint import LLMContentHandler\n",
"from langchain.chains import LLMChain\n",
"from langchain.prompts import load_prompt, PromptTemplate\n",

@ -123,7 +123,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain import OpenAI\n",
"from langchain.llms import OpenAI\n",
"from langchain.agents import initialize_agent, AgentType"
]
},

@ -167,7 +167,7 @@
"import os\n",
"\n",
"from langchain.chat_models import ChatOpenAI\n",
"from langchain import LLMChain\n",
"from langchain.chains import LLMChain\n",
"from langchain.prompts import PromptTemplate\n",
"from langchain.prompts.chat import (\n",
" ChatPromptTemplate,\n",

@ -199,7 +199,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain import PromptTemplate\n",
"from langchain.prompts import PromptTemplate\n",
"\n",
"# Prompt\n",
"template = \"\"\"[INST] <<SYS>> Use the following pieces of context to answer the question at the end. \n",

@ -81,7 +81,7 @@
"import re\n",
"from typing import Iterator, List\n",
"\n",
"from langchain import schema\n",
"from langchain.schema import BaseMessage, HumanMessage\n",
"from langchain.chat_loaders import base as chat_loaders\n",
"\n",
"logger = logging.getLogger()\n",
@ -117,7 +117,7 @@
" with open(file_path, \"r\", encoding=\"utf-8\") as file:\n",
" lines = file.readlines()\n",
"\n",
" results: List[schema.BaseMessage] = []\n",
" results: List[BaseMessage] = []\n",
" current_sender = None\n",
" current_timestamp = None\n",
" current_content = []\n",
@ -128,7 +128,7 @@
" ):\n",
" if current_sender and current_content:\n",
" results.append(\n",
" schema.HumanMessage(\n",
" HumanMessage(\n",
" content=\"\".join(current_content).strip(),\n",
" additional_kwargs={\n",
" \"sender\": current_sender,\n",
@ -142,7 +142,7 @@
" ]\n",
" elif re.match(r\"\\[\\d{1,2}:\\d{2} (?:AM|PM)\\]\", line.strip()):\n",
" results.append(\n",
" schema.HumanMessage(\n",
" HumanMessage(\n",
" content=\"\".join(current_content).strip(),\n",
" additional_kwargs={\n",
" \"sender\": current_sender,\n",
@ -157,7 +157,7 @@
"\n",
" if current_sender and current_content:\n",
" results.append(\n",
" schema.HumanMessage(\n",
" HumanMessage(\n",
" content=\"\".join(current_content).strip(),\n",
" additional_kwargs={\n",
" \"sender\": current_sender,\n",

@ -78,7 +78,7 @@
"import re\n",
"from typing import Iterator, List\n",
"\n",
"from langchain import schema\n",
"from langchain.schema import HumanMessage, BaseMessage\n",
"from langchain.chat_loaders import base as chat_loaders\n",
"\n",
"logger = logging.getLogger()\n",
@ -110,7 +110,7 @@
" # skip non-text messages like stickers, images, etc.\n",
" if not re.match(r\"\\[.*\\]\", content):\n",
" results.append(\n",
" schema.HumanMessage(\n",
" HumanMessage(\n",
" content=content,\n",
" additional_kwargs={\n",
" \"sender\": current_sender,\n",
@ -135,7 +135,7 @@
" with open(file_path, \"r\", encoding=\"utf-8\") as file:\n",
" lines = file.readlines()\n",
"\n",
" results: List[schema.BaseMessage] = []\n",
" results: List[BaseMessage] = []\n",
" current_sender = None\n",
" current_timestamp = None\n",
" current_content = []\n",
@ -292,7 +292,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.11"
"version": "3.10.1"
}
},
"nbformat": 4,

@ -59,7 +59,7 @@
"outputs": [],
"source": [
"from langchain.llms import AI21\n",
"from langchain import PromptTemplate, LLMChain"
"from langchain.prompts import PromptTemplate\nfrom langchain.chains import LLMChain"
]
},
{

@ -59,7 +59,7 @@
"outputs": [],
"source": [
"from langchain.llms import AlephAlpha\n",
"from langchain import PromptTemplate, LLMChain"
"from langchain.prompts import PromptTemplate\nfrom langchain.chains import LLMChain"
]
},
{

@ -41,7 +41,7 @@
"outputs": [],
"source": [
"from langchain.llms import Anyscale\n",
"from langchain import PromptTemplate, LLMChain"
"from langchain.prompts import PromptTemplate\nfrom langchain.chains import LLMChain"
]
},
{

@ -154,7 +154,7 @@
}
],
"source": [
"from langchain import PromptTemplate\n",
"from langchain.prompts import PromptTemplate\n",
"from langchain.llms.azureml_endpoint import DollyContentFormatter\n",
"from langchain.chains import LLMChain\n",
"\n",

@ -53,7 +53,7 @@
"outputs": [],
"source": [
"from langchain.llms import Banana\n",
"from langchain import PromptTemplate, LLMChain"
"from langchain.prompts import PromptTemplate\nfrom langchain.chains import LLMChain"
]
},
{

@ -107,7 +107,7 @@
"outputs": [],
"source": [
"from langchain.chains import SimpleSequentialChain\n",
"from langchain import PromptTemplate, LLMChain"
"from langchain.prompts import PromptTemplate\nfrom langchain.chains import LLMChain"
]
},
{

@ -80,7 +80,7 @@
"outputs": [],
"source": [
"import langchain\n",
"from langchain import PromptTemplate, LLMChain\n",
"from langchain.prompts import PromptTemplate\nfrom langchain.chains import LLMChain\n",
"from langchain.llms import NIBittensorLLM\n",
"\n",
"langchain.debug = True\n",
@ -123,7 +123,7 @@
" AgentExecutor,\n",
")\n",
"from langchain.memory import ConversationBufferMemory\n",
"from langchain import LLMChain, PromptTemplate\n",
"from langchain.chains import LLMChain\nfrom langchain.prompts import PromptTemplate\n",
"from langchain.utilities import GoogleSearchAPIWrapper, SerpAPIWrapper\n",
"from langchain.llms import NIBittensorLLM\n",
"\n",

@ -44,7 +44,7 @@
"source": [
"import os\n",
"from langchain.llms import CerebriumAI\n",
"from langchain import PromptTemplate, LLMChain"
"from langchain.prompts import PromptTemplate\nfrom langchain.chains import LLMChain"
]
},
{

@ -22,7 +22,7 @@
"outputs": [],
"source": [
"from langchain.llms import ChatGLM\n",
"from langchain import PromptTemplate, LLMChain\n",
"from langchain.prompts import PromptTemplate\nfrom langchain.chains import LLMChain\n",
"\n",
"# import os"
]

@ -82,7 +82,7 @@
"source": [
"# Import the required modules\n",
"from langchain.llms import Clarifai\n",
"from langchain import PromptTemplate, LLMChain"
"from langchain.prompts import PromptTemplate\nfrom langchain.chains import LLMChain"
]
},
{

@ -59,7 +59,7 @@
"outputs": [],
"source": [
"from langchain.llms import Cohere\n",
"from langchain import PromptTemplate, LLMChain"
"from langchain.prompts import PromptTemplate\nfrom langchain.chains import LLMChain"
]
},
{

@ -102,7 +102,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain import PromptTemplate, LLMChain\n",
"from langchain.prompts import PromptTemplate\nfrom langchain.chains import LLMChain\n",
"\n",
"template = \"\"\"Question: {question}\n",
"\n",

@ -195,7 +195,7 @@
}
],
"source": [
"from langchain import PromptTemplate, LLMChain\n",
"from langchain.prompts import PromptTemplate\nfrom langchain.chains import LLMChain\n",
"\n",
"template = \"\"\"{question}\n",
"\n",

@ -28,7 +28,7 @@
"source": [
"import os\n",
"from langchain.llms import DeepInfra\n",
"from langchain import PromptTemplate, LLMChain"
"from langchain.prompts import PromptTemplate\nfrom langchain.chains import LLMChain"
]
},
{

@ -103,7 +103,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain import PromptTemplate, LLMChain\n",
"from langchain.prompts import PromptTemplate\nfrom langchain.chains import LLMChain\n",
"llm=EdenAI(feature=\"text\",provider=\"openai\",model=\"text-davinci-003\",temperature=0.2, max_tokens=250)\n",
"\n",
"prompt = \"\"\"\n",

@ -20,7 +20,7 @@
"outputs": [],
"source": [
"from langchain.llms.fireworks import Fireworks, FireworksChat\n",
"from langchain import PromptTemplate, LLMChain\n",
"from langchain.prompts import PromptTemplate\nfrom langchain.chains import LLMChain\n",
"from langchain.prompts.chat import (\n",
" ChatPromptTemplate,\n",
" HumanMessagePromptTemplate,\n",

@ -27,7 +27,7 @@
"source": [
"import os\n",
"from langchain.llms import ForefrontAI\n",
"from langchain import PromptTemplate, LLMChain"
"from langchain.prompts import PromptTemplate\nfrom langchain.chains import LLMChain"
]
},
{

@ -66,7 +66,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain import PromptTemplate, LLMChain"
"from langchain.prompts import PromptTemplate\nfrom langchain.chains import LLMChain"
]
},
{

@ -43,7 +43,7 @@
"source": [
"import os\n",
"from langchain.llms import GooseAI\n",
"from langchain import PromptTemplate, LLMChain"
"from langchain.prompts import PromptTemplate\nfrom langchain.chains import LLMChain"
]
},
{

@ -47,7 +47,7 @@
},
"outputs": [],
"source": [
"from langchain import PromptTemplate, LLMChain\n",
"from langchain.prompts import PromptTemplate\nfrom langchain.chains import LLMChain\n",
"from langchain.llms import GPT4All\n",
"from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler"
]

@ -91,7 +91,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain import HuggingFaceHub"
"from langchain.llms import HuggingFaceHub"
]
},
{
@ -101,7 +101,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain import PromptTemplate, LLMChain"
"from langchain.prompts import PromptTemplate\nfrom langchain.chains import LLMChain"
]
},
{

@ -189,7 +189,7 @@
"outputs": [],
"source": [
"from langchain.llms import LlamaCpp\n",
"from langchain import PromptTemplate, LLMChain\n",
"from langchain.prompts import PromptTemplate\nfrom langchain.chains import LLMChain\n",
"from langchain.callbacks.manager import CallbackManager\n",
"from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler"
]

@ -80,7 +80,7 @@
"outputs": [],
"source": [
"# Map reduce example\n",
"from langchain import PromptTemplate\n",
"from langchain.prompts import PromptTemplate\n",
"from langchain.text_splitter import CharacterTextSplitter\n",
"from langchain.chains.mapreduce import MapReduceChain\n",
"\n",

@ -94,7 +94,7 @@
"outputs": [],
"source": [
"from langchain.llms import Minimax\n",
"from langchain import PromptTemplate, LLMChain"
"from langchain.prompts import PromptTemplate\nfrom langchain.chains import LLMChain"
],
"metadata": {
"collapsed": false

@ -108,7 +108,7 @@
"outputs": [],
"source": [
"from langchain.llms import Modal\n",
"from langchain import PromptTemplate, LLMChain"
"from langchain.prompts import PromptTemplate\nfrom langchain.chains import LLMChain"
]
},
{

@ -43,7 +43,7 @@
"outputs": [],
"source": [
"from langchain.llms import MosaicML\n",
"from langchain import PromptTemplate, LLMChain"
"from langchain.prompts import PromptTemplate\nfrom langchain.chains import LLMChain"
]
},
{

@ -73,7 +73,7 @@
"outputs": [],
"source": [
"from langchain.llms import NLPCloud\n",
"from langchain import PromptTemplate, LLMChain"
"from langchain.prompts import PromptTemplate\nfrom langchain.chains import LLMChain"
]
},
{

@ -43,7 +43,7 @@
"outputs": [],
"source": [
"from langchain.llms.octoai_endpoint import OctoAIEndpoint\n",
"from langchain import PromptTemplate, LLMChain"
"from langchain.prompts import PromptTemplate\nfrom langchain.chains import LLMChain"
]
},
{

@ -206,7 +206,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain import PromptTemplate\n",
"from langchain.prompts import PromptTemplate\n",
"\n",
"# Prompt\n",
"template = \"\"\"Use the following pieces of context to answer the question at the end. \n",

@ -59,7 +59,7 @@
"outputs": [],
"source": [
"import langchain\n",
"from langchain import LLMChain, PromptTemplate\n",
"from langchain.chains import LLMChain\nfrom langchain.prompts import PromptTemplate\n",
"from langchain.callbacks.stdout import StdOutCallbackHandler\n",
"from langchain.llms import OpenAI\n",
"from langchain.memory import ConversationBufferWindowMemory\n",

@ -67,7 +67,7 @@
"outputs": [],
"source": [
"from langchain.llms import OpenAI\n",
"from langchain import PromptTemplate, LLMChain"
"from langchain.prompts import PromptTemplate\nfrom langchain.chains import LLMChain"
]
},
{

@ -114,7 +114,7 @@
}
],
"source": [
"from langchain import PromptTemplate, LLMChain\n",
"from langchain.prompts import PromptTemplate\nfrom langchain.chains import LLMChain\n",
"\n",
"template = \"What is a good name for a company that makes {product}?\"\n",
"\n",

@ -71,7 +71,7 @@
"outputs": [],
"source": [
"from langchain.llms import OpenLM\n",
"from langchain import PromptTemplate, LLMChain"
"from langchain.prompts import PromptTemplate\nfrom langchain.chains import LLMChain"
]
},
{

@ -45,7 +45,7 @@
"source": [
"import os\n",
"from langchain.llms import Petals\n",
"from langchain import PromptTemplate, LLMChain"
"from langchain.prompts import PromptTemplate\nfrom langchain.chains import LLMChain"
]
},
{

@ -50,7 +50,7 @@
"source": [
"import os\n",
"from langchain.llms import PipelineAI\n",
"from langchain import PromptTemplate, LLMChain"
"from langchain.prompts import PromptTemplate\nfrom langchain.chains import LLMChain"
]
},
{

@ -32,7 +32,7 @@
"\n",
"import predictionguard as pg\n",
"from langchain.llms import PredictionGuard\n",
"from langchain import PromptTemplate, LLMChain"
"from langchain.prompts import PromptTemplate\nfrom langchain.chains import LLMChain"
],
"id": "7191a5ce"
},

@ -104,7 +104,7 @@
"outputs": [],
"source": [
"from langchain.llms import Replicate\n",
"from langchain import PromptTemplate, LLMChain"
"from langchain.prompts import PromptTemplate\nfrom langchain.chains import LLMChain"
]
},
{

@ -44,7 +44,7 @@
],
"source": [
"from langchain.llms import SelfHostedPipeline, SelfHostedHuggingFaceLLM\n",
"from langchain import PromptTemplate, LLMChain\n",
"from langchain.prompts import PromptTemplate\nfrom langchain.chains import LLMChain\n",
"import runhouse as rh"
]
},

@ -92,7 +92,7 @@
"source": [
"from typing import Dict\n",
"\n",
"from langchain import PromptTemplate, SagemakerEndpoint\n",
"from langchain.prompts import PromptTemplate\nfrom langchain.llms import SagemakerEndpoint\n",
"from langchain.llms.sagemaker_endpoint import LLMContentHandler\n",
"from langchain.chains.question_answering import load_qa_chain\n",
"import json\n",

@ -80,7 +80,7 @@
"outputs": [],
"source": [
"from langchain.llms import StochasticAI\n",
"from langchain import PromptTemplate, LLMChain"
"from langchain.prompts import PromptTemplate\nfrom langchain.chains import LLMChain"
]
},
{

@ -54,7 +54,7 @@
"execution_count": null,
"outputs": [],
"source": [
"from langchain import PromptTemplate, LLMChain\n",
"from langchain.prompts import PromptTemplate\nfrom langchain.chains import LLMChain\n",
"\n",
"conversation = \"\"\"Sam: Good morning, team! Let's keep this standup concise. We'll go in the usual order: what you did yesterday, what you plan to do today, and any blockers. Alex, kick us off.\n",
"Alex: Morning! Yesterday, I wrapped up the UI for the user dashboard. The new charts and widgets are now responsive. I also had a sync with the design team to ensure the final touchups are in line with the brand guidelines. Today, I'll start integrating the frontend with the new API endpoints Rhea was working on. The only blocker is waiting for some final API documentation, but I guess Rhea can update on that.\n",

@ -44,7 +44,7 @@
"outputs": [],
"source": [
"import langchain\n",
"from langchain import PromptTemplate, LLMChain\n",
"from langchain.prompts import PromptTemplate\nfrom langchain.chains import LLMChain\n",
"from langchain.llms import TextGen\n",
"\n",
"langchain.debug = True\n",
@ -93,7 +93,7 @@
"outputs": [],
"source": [
"import langchain\n",
"from langchain import PromptTemplate, LLMChain\n",
"from langchain.prompts import PromptTemplate\nfrom langchain.chains import LLMChain\n",
"from langchain.llms import TextGen\n",
"from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n",
"\n",

@ -157,7 +157,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain import PromptTemplate, LLMChain\n",
"from langchain.prompts import PromptTemplate\nfrom langchain.chains import LLMChain\n",
"\n",
"llm = TitanTakeoff()\n",
"\n",

@ -76,7 +76,7 @@
"outputs": [],
"source": [
"from langchain.llms import Tongyi\n",
"from langchain import PromptTemplate, LLMChain"
"from langchain.prompts import PromptTemplate\nfrom langchain.chains import LLMChain"
]
},
{

@ -128,7 +128,7 @@
}
],
"source": [
"from langchain import PromptTemplate, LLMChain\n",
"from langchain.prompts import PromptTemplate\nfrom langchain.chains import LLMChain\n",
"\n",
"template = \"\"\"Question: {question}\n",
"\n",

@ -56,7 +56,7 @@
"outputs": [],
"source": [
"from langchain.llms import Writer\n",
"from langchain import PromptTemplate, LLMChain"
"from langchain.prompts import PromptTemplate\nfrom langchain.chains import LLMChain"
]
},
{

@ -122,7 +122,7 @@
}
],
"source": [
"from langchain import PromptTemplate, LLMChain\n",
"from langchain.prompts import PromptTemplate\nfrom langchain.chains import LLMChain\n",
"\n",
"template = \"Where can we visit in the capital of {country}?\"\n",
"\n",

@ -20,7 +20,7 @@
"outputs": [],
"source": [
"from langchain.memory.motorhead_memory import MotorheadMemory\n",
"from langchain import OpenAI, LLMChain, PromptTemplate\n",
"from langchain.llms import OpenAI\nfrom langchain.chains import LLMChain\nfrom langchain.prompts import PromptTemplate\n",
"\n",
"template = \"\"\"You are a chatbot having a conversation with a human.\n",
"\n",

@ -21,7 +21,7 @@
"outputs": [],
"source": [
"from langchain.memory.motorhead_memory import MotorheadMemory\n",
"from langchain import OpenAI, LLMChain, PromptTemplate\n",
"from langchain.llms import OpenAI\nfrom langchain.chains import LLMChain\nfrom langchain.prompts import PromptTemplate\n",
"\n",
"template = \"\"\"You are a chatbot having a conversation with a human.\n",
"\n",

@ -49,7 +49,7 @@
"source": [
"from langchain.memory import ZepMemory\n",
"from langchain.retrievers import ZepRetriever\n",
"from langchain import OpenAI\n",
"from langchain.llms import OpenAI\n",
"from langchain.schema import HumanMessage, AIMessage\n",
"from langchain.utilities import WikipediaAPIWrapper\n",
"from langchain.agents import initialize_agent, AgentType, Tool\n",

@ -31,7 +31,7 @@ Args:
## Examples
```python
# Connecting to CnosDB with SQLDatabase Wrapper
from langchain import SQLDatabase
from langchain.utilities import SQLDatabase
db = SQLDatabase.from_cnosdb()
```
@ -45,7 +45,7 @@ llm = ChatOpenAI(temperature=0, model_name="gpt-3.5-turbo")
### SQL Database Chain
This example demonstrates the use of the SQL Chain for answering a question over a CnosDB.
```python
from langchain import SQLDatabaseChain
from langchain.utilities import SQLDatabaseChain
db_chain = SQLDatabaseChain.from_llm(llm, db, verbose=True)

@ -80,7 +80,7 @@
"outputs": [],
"source": [
"# Connecting to Databricks with SQLDatabase wrapper\n",
"from langchain import SQLDatabase\n",
"from langchain.utilities import SQLDatabase\n",
"\n",
"db = SQLDatabase.from_databricks(catalog=\"samples\", schema=\"nyctaxi\")"
]
@ -115,7 +115,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain import SQLDatabaseChain\n",
"from langchain.utilities import SQLDatabaseChain\n",
"\n",
"db_chain = SQLDatabaseChain.from_llm(llm, db, verbose=True)"
]

@ -37,7 +37,8 @@ from lcserve import serving
@serving
def ask(input: str) -> str:
from langchain import LLMChain, OpenAI
from langchain.chains import LLMChain
from langchain.llms import OpenAI
from langchain.agents import AgentExecutor, ZeroShotAgent
tools = [...] # list of tools

@ -40,7 +40,7 @@ llm = ChatOpenAI(model_name="gpt-3.5-turbo", callbacks=[log10_callback])
## How to use tags with Log10
```python
from langchain import OpenAI
from langchain.llms import OpenAI
from langchain.chat_models import ChatAnthropic
from langchain.chat_models import ChatOpenAI
from langchain.schema import HumanMessage
@ -74,7 +74,7 @@ You can also intermix direct OpenAI calls and Langchain LLM calls:
import os
from log10.load import log10, log10_session
import openai
from langchain import OpenAI
from langchain.llms import OpenAI
log10(openai)

@ -60,7 +60,7 @@ See the [API documentation and examples](https://www.mlflow.org/docs/latest/pyth
```python
import mlflow
from langchain import LLMChain, PromptTemplate
from langchain.chains import LLMChain, PromptTemplate
from langchain.llms import MlflowAIGateway
gateway = MlflowAIGateway(
@ -134,7 +134,8 @@ Databricks MLflow AI Gateway is in private preview.
Please contact a Databricks representative to enroll in the preview.
```python
from langchain import LLMChain, PromptTemplate
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain.llms import MlflowAIGateway
gateway = MlflowAIGateway(

@ -26,7 +26,7 @@ conn_str = f"duckdb:///md:{token}@my_db"
You can use the SQLChain to query data in your Motherduck instance in natural language.
```
from langchain import OpenAI, SQLDatabase, SQLDatabaseChain
from langchain.llms import OpenAI, SQLDatabase, SQLDatabaseChain
db = SQLDatabase.from_uri(conn_str)
db_chain = SQLDatabaseChain.from_llm(OpenAI(temperature=0), db, verbose=True)
```

@ -37,7 +37,7 @@ import os
import predictionguard as pg
from langchain.llms import PredictionGuard
from langchain import PromptTemplate, LLMChain
from langchain.prompts import PromptTemplate\nfrom langchain.chains import LLMChain
# Your Prediction Guard API key. Get one at predictionguard.com
os.environ["PREDICTIONGUARD_TOKEN"] = "<your Prediction Guard access token>"
@ -76,7 +76,7 @@ Basic LLM Chaining with the Prediction Guard wrapper:
```python
import os
from langchain import PromptTemplate, LLMChain
from langchain.prompts import PromptTemplate\nfrom langchain.chains import LLMChain
from langchain.llms import PredictionGuard
# Optional, add your OpenAI API Key. This is optional, as Prediction Guard allows

@ -108,7 +108,7 @@
"outputs": [],
"source": [
"from langchain.llms import OpenAI\n",
"from langchain import PromptTemplate, LLMChain"
"from langchain.prompts import PromptTemplate\nfrom langchain.chains import LLMChain"
]
},
{

@ -20,7 +20,7 @@ As of June 2023, the API supports Vicuna-13B by default. We are going to support
For example
```python
from langchain.llms import OpenAI
from langchain import PromptTemplate, LLMChain
from langchain.prompts import PromptTemplate\nfrom langchain.chains import LLMChain
import os
os.environ['OPENAI_API_BASE'] = "https://shale.live/v1"

@ -204,7 +204,7 @@
},
"outputs": [],
"source": [
"from langchain import PromptTemplate\n",
"from langchain.prompts import PromptTemplate\n",
"retriever = GoogleDriveRetriever(\n",
" template=PromptTemplate(input_variables=['query'],\n",
" # See https://developers.google.com/drive/api/guides/search-files\n",

@ -150,7 +150,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain import LLMChain\n",
"from langchain.chains import LLMChain\n",
"from langchain.prompts import PromptTemplate\n",
"\n",
"QUERY_PROMPT = PromptTemplate(\n",

@ -82,7 +82,7 @@
"source": [
"# Import the required modules\n",
"from langchain.embeddings import ClarifaiEmbeddings\n",
"from langchain import PromptTemplate, LLMChain"
"from langchain.prompts import PromptTemplate\nfrom langchain.chains import LLMChain"
]
},
{

@ -81,7 +81,7 @@
},
"outputs": [],
"source": [
"from langchain import OpenAI\n",
"from langchain.llms import OpenAI\n",
"from langchain.agents import initialize_agent, AgentType"
]
},

@ -105,7 +105,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain import OpenAI\n",
"from langchain.llms import OpenAI\n",
"from langchain.agents import initialize_agent, AgentType"
]
},

@ -118,7 +118,7 @@
},
"outputs": [],
"source": [
"from langchain import OpenAI\n",
"from langchain.llms import OpenAI\n",
"from langchain.agents import initialize_agent, AgentType"
]
},

@ -167,7 +167,7 @@
},
"outputs": [],
"source": [
"from langchain import OpenAI\n",
"from langchain.llms import OpenAI\n",
"from langchain.agents import initialize_agent, AgentType\n",
"llm = OpenAI(temperature=0)\n",
"agent = initialize_agent(\n",

@ -81,7 +81,7 @@
},
"outputs": [],
"source": [
"from langchain import OpenAI\n",
"from langchain.llms import OpenAI\n",
"from langchain.agents import initialize_agent, AgentType\n",
"llm = OpenAI(temperature=0)\n",
"from langchain.agents.agent_toolkits import MultionToolkit\n",

@ -93,7 +93,7 @@
},
"outputs": [],
"source": [
"from langchain import OpenAI\n",
"from langchain.llms import OpenAI\n",
"from langchain.agents import initialize_agent, AgentType"
]
},

@ -30,7 +30,7 @@
"from langchain.embeddings.openai import OpenAIEmbeddings\n",
"from langchain.vectorstores import Chroma\n",
"from langchain.text_splitter import CharacterTextSplitter\n",
"from langchain import OpenAI, VectorDBQA\n",
"from langchain.llms import OpenAI\nfrom langchain.chains import VectorDBQA\n",
"\n",
"llm = OpenAI(temperature=0)"
]

@ -61,7 +61,7 @@
},
"outputs": [],
"source": [
"from langchain import OpenAI\n",
"from langchain.llms import OpenAI\n",
"from langchain.agents import load_tools, initialize_agent, AgentType\n",
"\n",
"llm = OpenAI(temperature=0)\n",

@ -126,7 +126,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain import OpenAI\n",
"from langchain.llms import OpenAI\n",
"from langchain.agents import initialize_agent, AgentType, load_tools"
]
},

@ -167,7 +167,7 @@
},
"outputs": [],
"source": [
"from langchain import OpenAI\n",
"from langchain.llms import OpenAI\n",
"from langchain.agents import initialize_agent, AgentType\n",
"llm = OpenAI(temperature=0)\n",
"agent = initialize_agent(\n",

@ -43,7 +43,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain import OpenAI\n",
"from langchain.llms import OpenAI\n",
"from langchain.agents import load_tools, initialize_agent, AgentType\n",
"from langchain.utilities import GraphQLAPIWrapper\n",
"\n",

@ -126,7 +126,7 @@
"source": [
"import os\n",
"from lemonai import execute_workflow\n",
"from langchain import OpenAI"
"from langchain.llms import OpenAI"
]
},
{

@ -474,7 +474,7 @@
],
"source": [
"from langchain.chains import RetrievalQAWithSourcesChain\n",
"from langchain import OpenAI\n",
"from langchain.llms import OpenAI\n",
"\n",
"import os\n",
"import getpass\n",

@ -62,7 +62,7 @@
"from langchain.vectorstores.starrocks import StarRocksSettings\n",
"from langchain.vectorstores import Chroma\n",
"from langchain.text_splitter import CharacterTextSplitter, TokenTextSplitter\n",
"from langchain import OpenAI, VectorDBQA\n",
"from langchain.llms import OpenAI\nfrom langchain.chains import VectorDBQA\n",
"from langchain.document_loaders import DirectoryLoader\n",
"from langchain.chains import RetrievalQA\n",
"from langchain.document_loaders import TextLoader, UnstructuredMarkdownLoader\n",

@ -39,7 +39,7 @@
"\n",
"import os, sys, torch\n",
"from transformers import AutoTokenizer, AutoModelForCausalLM, AutoModel\n",
"from langchain import HuggingFacePipeline, ConversationChain\n",
"from langchain.llms import HuggingFacePipeline\nfrom langchain.chains import ConversationChain\n",
"from langchain.text_splitter import RecursiveCharacterTextSplitter\n",
"from langchain.vectorstores.vearch import VearchDb\n",
"from langchain.document_loaders import TextLoader\n",

@ -332,7 +332,7 @@
"outputs": [],
"source": [
"from langchain.chains import RetrievalQAWithSourcesChain\n",
"from langchain import OpenAI"
"from langchain.llms import OpenAI"
]
},
{

@ -23,7 +23,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain import SerpAPIWrapper\n",
"from langchain.utilities import SerpAPIWrapper\n",
"from langchain.agents import initialize_agent, Tool\n",
"from langchain.agents import AgentType\n",
"from langchain.chat_models import ChatOpenAI"

@ -17,7 +17,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain import OpenAI, Wikipedia\n",
"from langchain.llms import OpenAI\from langchain.utilities import Wikipedia\n",
"from langchain.agents import initialize_agent, Tool\n",
"from langchain.agents import AgentType\n",
"from langchain.agents.react.base import DocstoreExplorer\n",

@ -45,7 +45,7 @@
}
],
"source": [
"from langchain import OpenAI, SerpAPIWrapper\n",
"from langchain.llms import OpenAI\nfrom langchain.utilities import SerpAPIWrapper\n",
"from langchain.agents import initialize_agent, Tool\n",
"from langchain.agents import AgentType\n",
"\n",

@ -17,13 +17,11 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain import (\n",
" LLMMathChain,\n",
" OpenAI,\n",
" SerpAPIWrapper,\n",
" SQLDatabase,\n",
" SQLDatabaseChain,\n",
")\n",
"from langchain.chains import LLMMathChain\n",
"from langchain.llms import OpenAI\n",
"from langchain.utilities import SerpAPIWrapper\n",
"from langchain.utilities import SQLDatabase\n",
"from langchain_experimental.sql import SQLDatabaseChain\n",
"from langchain.agents import initialize_agent, Tool\n",
"from langchain.agents import AgentType\n",
"from langchain.chat_models import ChatOpenAI"

@ -29,7 +29,7 @@
"from langchain.agents import AgentExecutor, initialize_agent, AgentType\n",
"from langchain.schema import AgentFinish\n",
"from langchain.agents.tools import Tool\n",
"from langchain import LLMMathChain\n",
"from langchain.chains import LLMMathChain\n",
"from langchain.chat_models import ChatOpenAI"
]
},

@ -166,7 +166,7 @@
"from langchain.agents import AgentType\n",
"from langchain.tools import BaseTool\n",
"from langchain.llms import OpenAI\n",
"from langchain import LLMMathChain, SerpAPIWrapper"
"from langchain.chains import LLMMathChain\nfrom langchain.utilities import SerpAPIWrapper"
]
},
{

@ -47,7 +47,7 @@
}
],
"source": [
"from langchain import OpenAI, ConversationChain, LLMChain, PromptTemplate\n",
"from langchain.llms import OpenAI\nfrom langchain.chains import ConversationChain, LLMChain\nfrom langchain.prompts import PromptTemplate\n",
"from langchain.memory import ConversationBufferWindowMemory\n",
"\n",
"\n",

@ -26,7 +26,7 @@
"outputs": [],
"source": [
"from langchain.agents import Tool, AgentExecutor, BaseSingleActionAgent\n",
"from langchain import OpenAI, SerpAPIWrapper"
"from langchain.llms import OpenAI\nfrom langchain.utilities import SerpAPIWrapper"
]
},
{

@ -38,7 +38,7 @@
" AgentOutputParser,\n",
")\n",
"from langchain.prompts import StringPromptTemplate\n",
"from langchain import OpenAI, SerpAPIWrapper, LLMChain\n",
"from langchain.llms import OpenAI\nfrom langchain.utilities import SerpAPIWrapper\nfrom langchain.chains import LLMChain\n",
"from typing import List, Union\n",
"from langchain.schema import AgentAction, AgentFinish\n",
"import re"

@ -48,7 +48,7 @@
"outputs": [],
"source": [
"from langchain.agents import ZeroShotAgent, Tool, AgentExecutor\n",
"from langchain import OpenAI, SerpAPIWrapper, LLMChain"
"from langchain.llms import OpenAI\nfrom langchain.utilities import SerpAPIWrapper\nfrom langchain.chains import LLMChain"
]
},
{

@ -26,7 +26,7 @@
"outputs": [],
"source": [
"from langchain.agents import Tool, AgentExecutor, BaseMultiActionAgent\n",
"from langchain import OpenAI, SerpAPIWrapper"
"from langchain.llms import OpenAI\nfrom langchain.utilities import SerpAPIWrapper"
]
},
{

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save