docs[patch]: Update code that checks API keys (#25444)

Check whether the API key is already in the environment

Update:

```python
import getpass
import os

os.environ["DATABRICKS_HOST"] = "https://your-workspace.cloud.databricks.com"
os.environ["DATABRICKS_TOKEN"] = getpass.getpass("Enter your Databricks access token: ")
```

To:

```python
import getpass
import os

os.environ["DATABRICKS_HOST"] = "https://your-workspace.cloud.databricks.com"
if "DATABRICKS_TOKEN" not in os.environ:
    os.environ["DATABRICKS_TOKEN"] = getpass.getpass(
        "Enter your Databricks access token: "
    )
```

grit migration:

```
engine marzano(0.1)
language python

`os.environ[$Q] = getpass.getpass("$X")` as $CHECK where {
    $CHECK <: ! within if_statement(),
    $CHECK => `if $Q not in os.environ:\n    $CHECK`
}
```
pull/25894/head
Eugene Yurtsev 1 month ago committed by GitHub
parent 60b65528c5
commit b7c070d437
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

@ -61,7 +61,10 @@
"import os\n", "import os\n",
"\n", "\n",
"os.environ[\"DATABRICKS_HOST\"] = \"https://your-workspace.cloud.databricks.com\"\n", "os.environ[\"DATABRICKS_HOST\"] = \"https://your-workspace.cloud.databricks.com\"\n",
"os.environ[\"DATABRICKS_TOKEN\"] = getpass.getpass(\"Enter your Databricks access token: \")" "if \"DATABRICKS_TOKEN\" not in os.environ:\n",
" os.environ[\"DATABRICKS_TOKEN\"] = getpass.getpass(\n",
" \"Enter your Databricks access token: \"\n",
" )"
] ]
}, },
{ {

@ -40,12 +40,7 @@
"execution_count": 1, "execution_count": 1,
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": ["import getpass\nimport os\n\nif \"FRIENDLI_TOKEN\" not in os.environ:\n os.environ[\"FRIENDLI_TOKEN\"] = getpass.getpass(\"Friendi Personal Access Token: \")"]
"import getpass\n",
"import os\n",
"\n",
"os.environ[\"FRIENDLI_TOKEN\"] = getpass.getpass(\"Friendi Personal Access Token: \")"
]
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
@ -59,11 +54,7 @@
"execution_count": 2, "execution_count": 2,
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": ["from langchain_community.llms.friendli import Friendli\n\nllm = Friendli(model=\"mixtral-8x7b-instruct-v0-1\", max_tokens=100, temperature=0)"]
"from langchain_community.llms.friendli import Friendli\n",
"\n",
"llm = Friendli(model=\"mixtral-8x7b-instruct-v0-1\", max_tokens=100, temperature=0)"
]
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
@ -97,9 +88,7 @@
"output_type": "execute_result" "output_type": "execute_result"
} }
], ],
"source": [ "source": ["llm.invoke(\"Tell me a joke.\")"]
"llm.invoke(\"Tell me a joke.\")"
]
}, },
{ {
"cell_type": "code", "cell_type": "code",
@ -118,9 +107,7 @@
"output_type": "execute_result" "output_type": "execute_result"
} }
], ],
"source": [ "source": ["llm.batch([\"Tell me a joke.\", \"Tell me a joke.\"])"]
"llm.batch([\"Tell me a joke.\", \"Tell me a joke.\"])"
]
}, },
{ {
"cell_type": "code", "cell_type": "code",
@ -138,9 +125,7 @@
"output_type": "execute_result" "output_type": "execute_result"
} }
], ],
"source": [ "source": ["llm.generate([\"Tell me a joke.\", \"Tell me a joke.\"])"]
"llm.generate([\"Tell me a joke.\", \"Tell me a joke.\"])"
]
}, },
{ {
"cell_type": "code", "cell_type": "code",
@ -158,10 +143,7 @@
] ]
} }
], ],
"source": [ "source": ["for chunk in llm.stream(\"Tell me a joke.\"):\n print(chunk, end=\"\", flush=True)"]
"for chunk in llm.stream(\"Tell me a joke.\"):\n",
" print(chunk, end=\"\", flush=True)"
]
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
@ -186,9 +168,7 @@
"output_type": "execute_result" "output_type": "execute_result"
} }
], ],
"source": [ "source": ["await llm.ainvoke(\"Tell me a joke.\")"]
"await llm.ainvoke(\"Tell me a joke.\")"
]
}, },
{ {
"cell_type": "code", "cell_type": "code",
@ -207,9 +187,7 @@
"output_type": "execute_result" "output_type": "execute_result"
} }
], ],
"source": [ "source": ["await llm.abatch([\"Tell me a joke.\", \"Tell me a joke.\"])"]
"await llm.abatch([\"Tell me a joke.\", \"Tell me a joke.\"])"
]
}, },
{ {
"cell_type": "code", "cell_type": "code",
@ -227,9 +205,7 @@
"output_type": "execute_result" "output_type": "execute_result"
} }
], ],
"source": [ "source": ["await llm.agenerate([\"Tell me a joke.\", \"Tell me a joke.\"])"]
"await llm.agenerate([\"Tell me a joke.\", \"Tell me a joke.\"])"
]
}, },
{ {
"cell_type": "code", "cell_type": "code",
@ -247,10 +223,7 @@
] ]
} }
], ],
"source": [ "source": ["async for chunk in llm.astream(\"Tell me a joke.\"):\n print(chunk, end=\"\", flush=True)"]
"async for chunk in llm.astream(\"Tell me a joke.\"):\n",
" print(chunk, end=\"\", flush=True)"
]
} }
], ],
"metadata": { "metadata": {

Loading…
Cancel
Save