forked from Archives/langchain
173 lines
4.2 KiB
Plaintext
173 lines
4.2 KiB
Plaintext
|
{
|
||
|
"cells": [
|
||
|
{
|
||
|
"attachments": {},
|
||
|
"cell_type": "markdown",
|
||
|
"metadata": {},
|
||
|
"source": [
|
||
|
"# MLflow\n",
|
||
|
"\n",
|
||
|
"This notebook goes over how to track your LangChain experiments into your MLflow Server"
|
||
|
]
|
||
|
},
|
||
|
{
|
||
|
"cell_type": "code",
|
||
|
"execution_count": null,
|
||
|
"metadata": {},
|
||
|
"outputs": [],
|
||
|
"source": [
|
||
|
"!pip install azureml-mlflow\n",
|
||
|
"!pip install pandas\n",
|
||
|
"!pip install textstat\n",
|
||
|
"!pip install spacy\n",
|
||
|
"!pip install openai\n",
|
||
|
"!pip install google-search-results\n",
|
||
|
"!python -m spacy download en_core_web_sm"
|
||
|
]
|
||
|
},
|
||
|
{
|
||
|
"cell_type": "code",
|
||
|
"execution_count": null,
|
||
|
"metadata": {},
|
||
|
"outputs": [],
|
||
|
"source": [
|
||
|
"import os\n",
|
||
|
"os.environ[\"MLFLOW_TRACKING_URI\"] = \"\"\n",
|
||
|
"os.environ[\"OPENAI_API_KEY\"] = \"\"\n",
|
||
|
"os.environ[\"SERPAPI_API_KEY\"] = \"\"\n"
|
||
|
]
|
||
|
},
|
||
|
{
|
||
|
"cell_type": "code",
|
||
|
"execution_count": null,
|
||
|
"metadata": {},
|
||
|
"outputs": [],
|
||
|
"source": [
|
||
|
"from langchain.callbacks import MlflowCallbackHandler\n",
|
||
|
"from langchain.llms import OpenAI"
|
||
|
]
|
||
|
},
|
||
|
{
|
||
|
"cell_type": "code",
|
||
|
"execution_count": null,
|
||
|
"metadata": {},
|
||
|
"outputs": [],
|
||
|
"source": [
|
||
|
"\"\"\"Main function.\n",
|
||
|
"\n",
|
||
|
"This function is used to try the callback handler.\n",
|
||
|
"Scenarios:\n",
|
||
|
"1. OpenAI LLM\n",
|
||
|
"2. Chain with multiple SubChains on multiple generations\n",
|
||
|
"3. Agent with Tools\n",
|
||
|
"\"\"\"\n",
|
||
|
"mlflow_callback = MlflowCallbackHandler()\n",
|
||
|
"llm = OpenAI(model_name=\"gpt-3.5-turbo\", temperature=0, callbacks=[mlflow_callback], verbose=True)"
|
||
|
]
|
||
|
},
|
||
|
{
|
||
|
"cell_type": "code",
|
||
|
"execution_count": null,
|
||
|
"metadata": {},
|
||
|
"outputs": [],
|
||
|
"source": [
|
||
|
"# SCENARIO 1 - LLM\n",
|
||
|
"llm_result = llm.generate([\"Tell me a joke\"])\n",
|
||
|
"\n",
|
||
|
"mlflow_callback.flush_tracker(llm)"
|
||
|
]
|
||
|
},
|
||
|
{
|
||
|
"cell_type": "code",
|
||
|
"execution_count": null,
|
||
|
"metadata": {},
|
||
|
"outputs": [],
|
||
|
"source": [
|
||
|
"from langchain.prompts import PromptTemplate\n",
|
||
|
"from langchain.chains import LLMChain"
|
||
|
]
|
||
|
},
|
||
|
{
|
||
|
"cell_type": "code",
|
||
|
"execution_count": null,
|
||
|
"metadata": {},
|
||
|
"outputs": [],
|
||
|
"source": [
|
||
|
"# SCENARIO 2 - Chain\n",
|
||
|
"template = \"\"\"You are a playwright. Given the title of play, it is your job to write a synopsis for that title.\n",
|
||
|
"Title: {title}\n",
|
||
|
"Playwright: This is a synopsis for the above play:\"\"\"\n",
|
||
|
"prompt_template = PromptTemplate(input_variables=[\"title\"], template=template)\n",
|
||
|
"synopsis_chain = LLMChain(llm=llm, prompt=prompt_template, callbacks=[mlflow_callback])\n",
|
||
|
"\n",
|
||
|
"test_prompts = [\n",
|
||
|
" {\n",
|
||
|
" \"title\": \"documentary about good video games that push the boundary of game design\"\n",
|
||
|
" },\n",
|
||
|
"]\n",
|
||
|
"synopsis_chain.apply(test_prompts)\n",
|
||
|
"mlflow_callback.flush_tracker(synopsis_chain)"
|
||
|
]
|
||
|
},
|
||
|
{
|
||
|
"cell_type": "code",
|
||
|
"execution_count": null,
|
||
|
"metadata": {
|
||
|
"id": "_jN73xcPVEpI"
|
||
|
},
|
||
|
"outputs": [],
|
||
|
"source": [
|
||
|
"from langchain.agents import initialize_agent, load_tools\n",
|
||
|
"from langchain.agents import AgentType"
|
||
|
]
|
||
|
},
|
||
|
{
|
||
|
"cell_type": "code",
|
||
|
"execution_count": null,
|
||
|
"metadata": {
|
||
|
"id": "Gpq4rk6VT9cu"
|
||
|
},
|
||
|
"outputs": [],
|
||
|
"source": [
|
||
|
"# SCENARIO 3 - Agent with Tools\n",
|
||
|
"tools = load_tools([\"serpapi\", \"llm-math\"], llm=llm, callbacks=[mlflow_callback])\n",
|
||
|
"agent = initialize_agent(\n",
|
||
|
" tools,\n",
|
||
|
" llm,\n",
|
||
|
" agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,\n",
|
||
|
" callbacks=[mlflow_callback],\n",
|
||
|
" verbose=True,\n",
|
||
|
")\n",
|
||
|
"agent.run(\n",
|
||
|
" \"Who is Leo DiCaprio's girlfriend? What is her current age raised to the 0.43 power?\"\n",
|
||
|
")\n",
|
||
|
"mlflow_callback.flush_tracker(agent, finish=True)"
|
||
|
]
|
||
|
}
|
||
|
],
|
||
|
"metadata": {
|
||
|
"colab": {
|
||
|
"provenance": []
|
||
|
},
|
||
|
"kernelspec": {
|
||
|
"display_name": "Python 3 (ipykernel)",
|
||
|
"language": "python",
|
||
|
"name": "python3"
|
||
|
},
|
||
|
"language_info": {
|
||
|
"codemirror_mode": {
|
||
|
"name": "ipython",
|
||
|
"version": 3
|
||
|
},
|
||
|
"file_extension": ".py",
|
||
|
"mimetype": "text/x-python",
|
||
|
"name": "python",
|
||
|
"nbconvert_exporter": "python",
|
||
|
"pygments_lexer": "ipython3",
|
||
|
"version": "3.9.16"
|
||
|
}
|
||
|
},
|
||
|
"nbformat": 4,
|
||
|
"nbformat_minor": 1
|
||
|
}
|