diff --git a/docs/docs/integrations/llms/databricks.ipynb b/docs/docs/integrations/llms/databricks.ipynb index 227259ccbd..ac3c99ab03 100644 --- a/docs/docs/integrations/llms/databricks.ipynb +++ b/docs/docs/integrations/llms/databricks.ipynb @@ -61,7 +61,10 @@ "import os\n", "\n", "os.environ[\"DATABRICKS_HOST\"] = \"https://your-workspace.cloud.databricks.com\"\n", - "os.environ[\"DATABRICKS_TOKEN\"] = getpass.getpass(\"Enter your Databricks access token: \")" + "if \"DATABRICKS_TOKEN\" not in os.environ:\n", + " os.environ[\"DATABRICKS_TOKEN\"] = getpass.getpass(\n", + " \"Enter your Databricks access token: \"\n", + " )" ] }, { diff --git a/docs/docs/integrations/llms/friendli.ipynb b/docs/docs/integrations/llms/friendli.ipynb index 8e0e2d1511..3a457e0cc4 100644 --- a/docs/docs/integrations/llms/friendli.ipynb +++ b/docs/docs/integrations/llms/friendli.ipynb @@ -40,12 +40,7 @@ "execution_count": 1, "metadata": {}, "outputs": [], - "source": [ - "import getpass\n", - "import os\n", - "\n", - "os.environ[\"FRIENDLI_TOKEN\"] = getpass.getpass(\"Friendi Personal Access Token: \")" - ] + "source": ["import getpass\nimport os\n\nif \"FRIENDLI_TOKEN\" not in os.environ:\n os.environ[\"FRIENDLI_TOKEN\"] = getpass.getpass(\"Friendi Personal Access Token: \")"] }, { "cell_type": "markdown", @@ -59,11 +54,7 @@ "execution_count": 2, "metadata": {}, "outputs": [], - "source": [ - "from langchain_community.llms.friendli import Friendli\n", - "\n", - "llm = Friendli(model=\"mixtral-8x7b-instruct-v0-1\", max_tokens=100, temperature=0)" - ] + "source": ["from langchain_community.llms.friendli import Friendli\n\nllm = Friendli(model=\"mixtral-8x7b-instruct-v0-1\", max_tokens=100, temperature=0)"] }, { "cell_type": "markdown", @@ -97,9 +88,7 @@ "output_type": "execute_result" } ], - "source": [ - "llm.invoke(\"Tell me a joke.\")" - ] + "source": ["llm.invoke(\"Tell me a joke.\")"] }, { "cell_type": "code", @@ -118,9 +107,7 @@ "output_type": "execute_result" } ], - "source": [ - "llm.batch([\"Tell me a joke.\", \"Tell me a joke.\"])" - ] + "source": ["llm.batch([\"Tell me a joke.\", \"Tell me a joke.\"])"] }, { "cell_type": "code", @@ -138,9 +125,7 @@ "output_type": "execute_result" } ], - "source": [ - "llm.generate([\"Tell me a joke.\", \"Tell me a joke.\"])" - ] + "source": ["llm.generate([\"Tell me a joke.\", \"Tell me a joke.\"])"] }, { "cell_type": "code", @@ -158,10 +143,7 @@ ] } ], - "source": [ - "for chunk in llm.stream(\"Tell me a joke.\"):\n", - " print(chunk, end=\"\", flush=True)" - ] + "source": ["for chunk in llm.stream(\"Tell me a joke.\"):\n print(chunk, end=\"\", flush=True)"] }, { "cell_type": "markdown", @@ -186,9 +168,7 @@ "output_type": "execute_result" } ], - "source": [ - "await llm.ainvoke(\"Tell me a joke.\")" - ] + "source": ["await llm.ainvoke(\"Tell me a joke.\")"] }, { "cell_type": "code", @@ -207,9 +187,7 @@ "output_type": "execute_result" } ], - "source": [ - "await llm.abatch([\"Tell me a joke.\", \"Tell me a joke.\"])" - ] + "source": ["await llm.abatch([\"Tell me a joke.\", \"Tell me a joke.\"])"] }, { "cell_type": "code", @@ -227,9 +205,7 @@ "output_type": "execute_result" } ], - "source": [ - "await llm.agenerate([\"Tell me a joke.\", \"Tell me a joke.\"])" - ] + "source": ["await llm.agenerate([\"Tell me a joke.\", \"Tell me a joke.\"])"] }, { "cell_type": "code", @@ -247,10 +223,7 @@ ] } ], - "source": [ - "async for chunk in llm.astream(\"Tell me a joke.\"):\n", - " print(chunk, end=\"\", flush=True)" - ] + "source": ["async for chunk in llm.astream(\"Tell me a joke.\"):\n print(chunk, end=\"\", flush=True)"] } ], "metadata": {