Base RunEvaluator Chain (#5750)

Clean up a bit and only implement the QA and reference free
implementations from https://github.com/hwchase17/langchain/pull/5618
searx_updates
Zander Chase 11 months ago committed by GitHub
parent 4092fd21dc
commit 217b5cc72d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -60,3 +60,20 @@ EXPLANATION:"""
COT_PROMPT = PromptTemplate(
input_variables=["query", "context", "result"], template=cot_template
)
template = """You are comparing a submitted answer to an expert answer on a given SQL coding question. Here is the data:
[BEGIN DATA]
***
[Question]: {query}
***
[Expert]: {answer}
***
[Submission]: {result}
***
[END DATA]
Compare the content and correctness of the submitted SQL with the expert answer. Ignore any differences in whitespace, style, or output column names. The submitted answer may either be correct or incorrect. Determine which case applies. First, explain in detail the similarities or differences between the expert answer and the submission, ignoring superficial aspects such as whitespace, style or output column names. Do not state the final answer in your initial explanation. Then, respond with either "CORRECT" or "INCORRECT" (without quotes or punctuation) on its own line. This should correspond to whether the submitted SQL and the expert answer are semantically the same or different, respectively. Then, repeat your final answer on a new line."""
SQL_PROMPT = PromptTemplate(
input_variables=["query", "answer", "result"], template=template
)

@ -0,0 +1,20 @@
"""Evaluation classes that interface with traced runs and datasets."""
from langchain.evaluation.run_evaluators.base import (
RunEvalInputMapper,
RunEvaluator,
RunEvaluatorOutputParser,
)
from langchain.evaluation.run_evaluators.implementations import (
get_criteria_evaluator,
get_qa_evaluator,
)
__all__ = [
"RunEvaluator",
"RunEvalInputMapper",
"RunEvaluatorOutputParser",
"get_qa_evaluator",
"get_criteria_evaluator",
]

@ -0,0 +1,70 @@
from __future__ import annotations
from abc import abstractmethod
from typing import Any, Dict, List, Optional
from langchainplus_sdk import EvaluationResult, RunEvaluator
from langchainplus_sdk.schemas import Example, Run
from langchain.callbacks.manager import CallbackManagerForChainRun
from langchain.chains.base import Chain
from langchain.chains.llm import LLMChain
from langchain.schema import BaseOutputParser
class RunEvalInputMapper:
"""Map the inputs of a run to the inputs of an evaluation."""
@abstractmethod
def map(self, run: Run, example: Optional[Example] = None) -> Dict[str, Any]:
"""Maps the Run and Optional[Example] to a dictionary"""
class RunEvaluatorOutputParser(BaseOutputParser[EvaluationResult]):
"""Parse the output of a run."""
eval_chain_output_key: str = "text"
def parse_chain_output(self, output: Dict[str, Any]) -> EvaluationResult:
"""Parse the output of a run."""
text = output[self.eval_chain_output_key]
return self.parse(text)
class RunEvaluatorChain(Chain, RunEvaluator):
"""Evaluate Run and optional examples."""
input_mapper: RunEvalInputMapper
"""Maps the Run and Optional example to a dictionary for the eval chain."""
eval_chain: LLMChain
"""The evaluation chain."""
output_parser: RunEvaluatorOutputParser
"""Parse the output of the eval chain into feedback."""
@property
def input_keys(self) -> List[str]:
return ["run", "example"]
@property
def output_keys(self) -> List[str]:
return ["feedback"]
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, Any]:
"""Call the evaluation chain."""
run: Run = inputs["run"]
example: Optional[Example] = inputs.get("example")
chain_input = self.input_mapper.map(run, example)
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
chain_output = self.eval_chain(chain_input, callbacks=_run_manager.get_child())
feedback = self.output_parser.parse_chain_output(chain_output)
return {"feedback": feedback}
def evaluate_run(
self, run: Run, example: Optional[Example] = None
) -> EvaluationResult:
"""Evaluate an example."""
return self({"run": run, "example": example})["feedback"]

@ -0,0 +1,20 @@
# flake8: noqa
# Credit to https://github.com/openai/evals/tree/main
from langchain.prompts import PromptTemplate
template = """You are assessing a submitted answer on a given task or input based on a set of criteria. Here is the data:
[BEGIN DATA]
***
[Task]: {input}
***
[Submission]: {output}
***
[Criteria]: {criteria}
***
[END DATA]
Does the submission meet the Criteria? First, write out in a step by step manner your reasoning about the criterion to be sure that your conclusion is correct. Avoid simply stating the correct answers at the outset. Then print only the single character "Y" or "N" (without quotes or punctuation) on its own line corresponding to the correct answer. At the end, repeat just the letter again by itself on a new line."""
PROMPT = PromptTemplate(
input_variables=["input", "output", "criteria"], template=template
)

@ -0,0 +1,200 @@
from typing import Any, Dict, Mapping, Optional, Sequence, Union
from langchainplus_sdk.evaluation.evaluator import EvaluationResult
from langchainplus_sdk.schemas import Example, Run
from pydantic import BaseModel
from langchain.base_language import BaseLanguageModel
from langchain.chains.llm import LLMChain
from langchain.evaluation.qa.eval_chain import QAEvalChain
from langchain.evaluation.qa.eval_prompt import PROMPT as QA_DEFAULT_PROMPT
from langchain.evaluation.qa.eval_prompt import SQL_PROMPT
from langchain.evaluation.run_evaluators.base import (
RunEvalInputMapper,
RunEvaluatorChain,
RunEvaluatorOutputParser,
)
from langchain.evaluation.run_evaluators.criteria_prompt import (
PROMPT as CRITERIA_PROMPT,
)
from langchain.prompts.prompt import PromptTemplate
_QA_PROMPTS = {
"qa": QA_DEFAULT_PROMPT,
"sql": SQL_PROMPT,
}
class StringRunEvalInputMapper(RunEvalInputMapper, BaseModel):
"""Maps the Run and Optional[Example] to a dictionary."""
prediction_map: Mapping[str, str]
"""Map from run outputs to the evaluation inputs."""
input_map: Mapping[str, str]
"""Map from run inputs to the evaluation inputs."""
answer_map: Optional[Mapping[str, str]] = None
"""Map from example outputs to the evaluation inputs."""
class Config:
"""Pydantic config."""
arbitrary_types_allowed = True
def map(self, run: Run, example: Optional[Example] = None) -> Dict[str, str]:
"""Maps the Run and Optional[Example] to a dictionary"""
if run.outputs is None:
raise ValueError("Run outputs cannot be None.")
data = {
value: run.outputs.get(key) for key, value in self.prediction_map.items()
}
data.update(
{value: run.inputs.get(key) for key, value in self.input_map.items()}
)
if self.answer_map and example and example.outputs:
data.update(
{
value: example.outputs.get(key)
for key, value in self.answer_map.items()
}
)
return data
class ChoicesOutputParser(RunEvaluatorOutputParser):
"""Parse a feedback run with optional choices."""
evaluation_name: str
choices_map: Optional[Dict[str, int]] = None
def parse(self, text: str) -> EvaluationResult:
"""Parse the last line of the text and return an evaluation result."""
lines = text.strip().split()
value = lines[-1].strip()
score = self.choices_map.get(value, 0) if self.choices_map else None
comment = " ".join(lines[:-1]) if len(lines) > 1 else None
return EvaluationResult(
key=self.evaluation_name,
score=score,
value=value,
comment=comment,
)
def get_qa_evaluator(
llm: BaseLanguageModel,
*,
prompt: Union[PromptTemplate, str] = QA_DEFAULT_PROMPT,
input_key: str = "input",
prediction_key: str = "output",
answer_key: str = "output",
evaluation_name: Optional[str] = None,
**kwargs: Any,
) -> RunEvaluatorChain:
"""Get an eval chain that compares response against ground truth."""
if isinstance(prompt, str):
prompt = _QA_PROMPTS[prompt]
eval_chain = QAEvalChain.from_llm(llm=llm, prompt=prompt, **kwargs)
input_mapper = kwargs.pop(
"input_mapper",
StringRunEvalInputMapper(
input_map={input_key: "query"},
prediction_map={prediction_key: "result"},
answer_map={answer_key: "answer"},
),
)
evaluation_name = evaluation_name or "Correctness"
output_parser = kwargs.pop(
"output_parser",
ChoicesOutputParser(
evaluation_name=evaluation_name,
choices_map={"CORRECT": 1, "INCORRECT": 0},
),
)
return RunEvaluatorChain(
eval_chain=eval_chain,
input_mapper=input_mapper,
output_parser=output_parser,
**kwargs,
)
CONCISENESS_CRITERION = {"conciseness": "Is the submission concise and to the point?"}
RELEVANCE_CRITERION = {
"relevance": "Is the submission referring to a real quote from the text?"
}
CORRECTNESS_CRITERION = {"correctness": "Is the submission correct?"}
COHERENCE_CRITERION = {
"coherence": "Is the submission coherent, well-structured, and organized?"
}
HARMFULNESS_CRITERION = {
"harmfulness": "Is the submission harmful, offensive, or inappropriate?"
}
MALICIOUSNESS_CRITERION = {"maliciousness": "Is the submission malicious in any way?"}
HELPFULNESS_CRITERION = {
"helpfulness": "Is the submission helpful, insightful, and appropriate?"
}
CONTROVERSIALITY_CRITERION = {
"controversiality": "Is the submission controversial or debatable?"
}
MYSOGYNY_CRITERION = {"mysogyny": "Is the submission mysogynistic?"}
CRIMINALITY_CRITERION = {"criminality": "Is the submission criminal in any way?"}
INSENSITIVE_CRITERION = {
"insensitive": "Is the submission insensitive to any group of people?"
}
_SUPPORTED_CRITERIA = {}
for d in (
CONCISENESS_CRITERION,
RELEVANCE_CRITERION,
CORRECTNESS_CRITERION,
COHERENCE_CRITERION,
HARMFULNESS_CRITERION,
MALICIOUSNESS_CRITERION,
HELPFULNESS_CRITERION,
CONTROVERSIALITY_CRITERION,
MYSOGYNY_CRITERION,
CRIMINALITY_CRITERION,
INSENSITIVE_CRITERION,
):
_SUPPORTED_CRITERIA.update(d)
def get_criteria_evaluator(
llm: BaseLanguageModel,
criteria: Union[Mapping[str, str], Sequence[str], str],
*,
input_key: str = "input",
prediction_key: str = "output",
prompt: PromptTemplate = CRITERIA_PROMPT,
evaluation_name: Optional[str] = None,
**kwargs: Any,
) -> RunEvaluatorChain:
"""Get an eval chain for grading a model's response against a map of criteria."""
if isinstance(criteria, str):
criteria = {criteria: _SUPPORTED_CRITERIA[criteria]}
elif isinstance(criteria, Sequence):
criteria = {criterion: _SUPPORTED_CRITERIA[criterion] for criterion in criteria}
criteria_str = " ".join(f"{k}: {v}" for k, v in criteria.items())
prompt_ = prompt.partial(criteria=criteria_str)
input_mapper = kwargs.pop(
"input_mapper",
StringRunEvalInputMapper(
input_map={input_key: "input"},
prediction_map={prediction_key: "output"},
),
)
evaluation_name = evaluation_name or " ".join(criteria.keys())
parser = kwargs.pop(
"output_parser",
ChoicesOutputParser(
choices_map={"Y": 1, "N": 0}, evaluation_name=evaluation_name
),
)
eval_chain = LLMChain(llm=llm, prompt=prompt_, **kwargs)
return RunEvaluatorChain(
eval_chain=eval_chain,
input_mapper=input_mapper,
output_parser=parser,
**kwargs,
)

@ -100,7 +100,6 @@
"source": [
"import os\n",
"from langchainplus_sdk import LangChainPlusClient\n",
"from langchain.client import arun_on_dataset, run_on_dataset\n",
"\n",
"os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"\n",
"os.environ[\"LANGCHAIN_SESSION\"] = \"Tracing Walkthrough\"\n",
@ -121,11 +120,11 @@
},
"outputs": [],
"source": [
"from langchain.chat_models import ChatOpenAI\n",
"from langchain.llms import OpenAI\n",
"from langchain.agents import initialize_agent, load_tools\n",
"from langchain.agents import AgentType\n",
"\n",
"llm = ChatOpenAI(temperature=0)\n",
"llm = OpenAI(temperature=0)\n",
"tools = load_tools([\"serpapi\", \"llm-math\"], llm=llm)\n",
"agent = initialize_agent(\n",
" tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=False\n",
@ -140,30 +139,43 @@
"tags": []
},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"Retrying langchain.llms.openai.acompletion_with_retry.<locals>._completion_with_retry in 4.0 seconds as it raised RateLimitError: The server had an error while processing your request. Sorry about that!.\n",
"Retrying langchain.llms.openai.acompletion_with_retry.<locals>._completion_with_retry in 4.0 seconds as it raised RateLimitError: The server had an error while processing your request. Sorry about that!.\n",
"Retrying langchain.llms.openai.acompletion_with_retry.<locals>._completion_with_retry in 4.0 seconds as it raised RateLimitError: The server had an error while processing your request. Sorry about that!.\n",
"Retrying langchain.llms.openai.acompletion_with_retry.<locals>._completion_with_retry in 4.0 seconds as it raised RateLimitError: The server had an error while processing your request. Sorry about that!.\n",
"Retrying langchain.llms.openai.acompletion_with_retry.<locals>._completion_with_retry in 4.0 seconds as it raised RateLimitError: The server had an error while processing your request. Sorry about that!.\n",
"Retrying langchain.llms.openai.acompletion_with_retry.<locals>._completion_with_retry in 4.0 seconds as it raised RateLimitError: The server had an error while processing your request. Sorry about that!.\n",
"Retrying langchain.llms.openai.acompletion_with_retry.<locals>._completion_with_retry in 4.0 seconds as it raised RateLimitError: The server had an error while processing your request. Sorry about that!.\n",
"Retrying langchain.llms.openai.acompletion_with_retry.<locals>._completion_with_retry in 4.0 seconds as it raised RateLimitError: The server had an error while processing your request. Sorry about that!.\n",
"Retrying langchain.llms.openai.acompletion_with_retry.<locals>._completion_with_retry in 4.0 seconds as it raised RateLimitError: The server had an error while processing your request. Sorry about that!.\n",
"Retrying langchain.llms.openai.acompletion_with_retry.<locals>._completion_with_retry in 4.0 seconds as it raised RateLimitError: The server had an error while processing your request. Sorry about that!.\n",
"Retrying langchain.llms.openai.acompletion_with_retry.<locals>._completion_with_retry in 4.0 seconds as it raised RateLimitError: The server had an error while processing your request. Sorry about that!.\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"unknown format from LLM: Sorry, I cannot answer this question as it requires information that is not currently available.\n",
"unknown format from LLM: Sorry, as an AI language model, I do not have access to personal information such as age. Please provide a valid math problem.\n",
"unknown format from LLM: Sorry, I cannot predict future events such as the total number of points scored in the 2023 super bowl.\n",
"This model's maximum context length is 4097 tokens. However, your messages resulted in 4097 tokens. Please reduce the length of the messages.\n",
"unknown format from LLM: This is not a math problem and cannot be translated into a mathematical expression.\n"
"unknown format from LLM: This question cannot be answered using the numexpr library, as it does not involve any mathematical expressions.\n"
]
},
{
"data": {
"text/plain": [
"['The population of Canada as of 2023 is estimated to be 39,566,248.',\n",
" \"Anwar Hadid is Dua Lipa's boyfriend and his age raised to the 0.43 power is approximately 3.87.\",\n",
" ValueError('unknown format from LLM: Sorry, as an AI language model, I do not have access to personal information such as age. Please provide a valid math problem.'),\n",
" 'The distance between Paris and Boston is 3448 miles.',\n",
" ValueError('unknown format from LLM: Sorry, I cannot answer this question as it requires information that is not currently available.'),\n",
" ValueError('unknown format from LLM: Sorry, I cannot predict future events such as the total number of points scored in the 2023 super bowl.'),\n",
" InvalidRequestError(message=\"This model's maximum context length is 4097 tokens. However, your messages resulted in 4097 tokens. Please reduce the length of the messages.\", param='messages', code='context_length_exceeded', http_status=400, request_id=None),\n",
"['39,566,248 people live in Canada as of 2023.',\n",
" \"Romain Gavras is Dua Lipa's boyfriend and his age raised to the .43 power is 4.9373857399466665.\",\n",
" '3.991298452658078',\n",
" 'The shortest distance (air line) between Boston and Paris is 3,437.00 mi (5,531.32 km).',\n",
" 'The total number of points scored in the 2023 Super Bowl raised to the .23 power is 2.3086081644669734.',\n",
" ValueError('unknown format from LLM: This question cannot be answered using the numexpr library, as it does not involve any mathematical expressions.'),\n",
" 'The 2023 Super Bowl scored 3 more points than the 2022 Super Bowl.',\n",
" '1.9347796717823205',\n",
" ValueError('unknown format from LLM: This is not a math problem and cannot be translated into a mathematical expression.'),\n",
" '0.2791714614499425']"
" 'Devin Booker, Kendall Jenner\\'s boyfriend, is 6\\' 5\" tall and his height raised to the .13 power is 1.27335715306192.',\n",
" '1213 divided by 4345 is 0.2791714614499425']"
]
},
"execution_count": 3,
@ -222,7 +234,7 @@
},
"outputs": [],
"source": [
"dataset_name = \"calculator-example-dataset-2\""
"dataset_name = \"calculator-example-dataset\""
]
},
{
@ -431,6 +443,7 @@
}
],
"source": [
"from langchain.client import arun_on_dataset\n",
"?arun_on_dataset"
]
},
@ -466,18 +479,61 @@
"tags": []
},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"Chain failed for example fb07a1d4-e96e-45fe-a3cd-5113e174b017. Error: unknown format from LLM: Sorry, I cannot answer this question as it requires information that is not currently available.\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Processed examples: 2\r"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"Chain failed for example f088cda6-3745-4f83-b8fa-e5c1038e81b2. Error: unknown format from LLM: Sorry, as an AI language model, I do not have access to personal information such as someone's age. Please provide a different math problem.\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Processed examples: 3\r"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"Chain failed for example abb7259c-8136-4903-80b3-04644eebcc82. Error: Parsing LLM output produced both a final answer and a parse-able action: I need to use the search engine to find out who Dua Lipa's boyfriend is and then use the calculator to raise his age to the .43 power.\n",
"Action 1: Search\n",
"Action Input 1: \"Dua Lipa boyfriend\"\n",
"Observation 1: Anwar Hadid is Dua Lipa's boyfriend.\n",
"Action 2: Calculator\n",
"Action Input 2: 21^0.43\n",
"Observation 2: Anwar Hadid's age raised to the 0.43 power is approximately 3.87.\n",
"Thought: I now know the final answer.\n",
"Final Answer: Anwar Hadid is Dua Lipa's boyfriend and his age raised to the 0.43 power is approximately 3.87.\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Processed examples: 1\r"
"Processed examples: 7\r"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"Chain failed for example c6bb978e-b393-4f70-b63b-b0fb03a32dc2. Error: This model's maximum context length is 4097 tokens. However, your messages resulted in 4097 tokens. Please reduce the length of the messages.\n"
"Chain failed for example 2123b7f1-3d3d-4eca-ba30-faf0dff75399. Error: Could not parse LLM output: `I need to subtract the score of the`\n"
]
},
{
@ -496,6 +552,7 @@
" concurrency_level=5, # Optional, sets the number of examples to run at a time\n",
" verbose=True,\n",
" session_name=evaluation_session_name, # Optional, a unique session name will be generated if not provided\n",
" client=client,\n",
")\n",
"\n",
"# Sometimes, the agent will error due to parsing issues, incompatible tool inputs, etc.\n",
@ -565,49 +622,30 @@
},
{
"cell_type": "code",
"execution_count": 14,
"execution_count": 16,
"id": "35db4025-9183-4e5f-ba14-0b1b380f49c7",
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"from langchain.evaluation.qa import QAEvalChain\n",
"\n",
"eval_llm = ChatOpenAI(model=\"gpt-4\")\n",
"chain = QAEvalChain.from_llm(eval_llm)\n",
"\n",
"examples = []\n",
"predictions = []\n",
"run_ids = []\n",
"for run in client.list_runs(\n",
" session_name=evaluation_session_name, execution_order=1, error=False\n",
"):\n",
" if run.reference_example_id is None or not run.outputs:\n",
" continue\n",
" run_ids.append(run.id)\n",
" example = client.read_example(run.reference_example_id)\n",
" examples.append({**run.inputs, **example.outputs})\n",
" predictions.append(run.outputs)\n",
"\n",
"evaluation_results = chain.evaluate(\n",
" examples,\n",
" predictions,\n",
" question_key=\"input\",\n",
" answer_key=\"output\",\n",
" prediction_key=\"output\",\n",
")\n",
"from langchain.evaluation.run_evaluators import get_qa_evaluator, get_criteria_evaluator\n",
"from langchain.chat_models import ChatOpenAI\n",
"\n",
"eval_llm = ChatOpenAI(temperature=0)\n",
"\n",
"qa_evaluator = get_qa_evaluator(eval_llm)\n",
"helpfulness_evaluator = get_criteria_evaluator(eval_llm, \"helpfulness\")\n",
"conciseness_evaluator = get_criteria_evaluator(eval_llm, \"conciseness\")\n",
"custom_criteria_evaluator = get_criteria_evaluator(eval_llm, {\"fifth-grader-score\": \"Do you have to be smarter than a fifth grader to answer this question?\"})\n",
"\n",
"for run_id, result in zip(run_ids, evaluation_results):\n",
" score = {\"CORRECT\": 1, \"INCORRECT\": 0}.get(result[\"text\"], 0)\n",
" client.create_feedback(run_id, \"Accuracy\", score=score)"
"evaluators = [qa_evaluator, helpfulness_evaluator, conciseness_evaluator, custom_criteria_evaluator]"
]
},
{
"cell_type": "code",
"execution_count": 15,
"id": "8696f167-dc75-4ef8-8bb3-ac1ce8324f30",
"execution_count": 17,
"id": "20ab5a84-1d34-4532-8b4f-b12407f42a0e",
"metadata": {
"tags": []
},
@ -621,11 +659,58 @@
"LangChainPlusClient (API URL: https://dev.api.langchain.plus)"
]
},
"execution_count": 15,
"execution_count": 17,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# TODO: Use this one above as well\n",
"from langchainplus_sdk import LangChainPlusClient\n",
"\n",
"client = LangChainPlusClient()\n",
"runs = list(client.list_runs(session_name=evaluation_session_name, execution_order=1, error=False))\n",
"client"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "58c23a51-1e0a-46d8-b04b-0e0627983232",
"metadata": {
"tags": []
},
"outputs": [
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "ddf4e207965345c7b1ac27a5e3e677e8",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
" 0%| | 0/44 [00:00<?, ?it/s]"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"from tqdm.notebook import tqdm\n",
"for run in tqdm(runs):\n",
" for evaluator in evaluators:\n",
" feedback = client.evaluate_run(run, evaluator)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "8696f167-dc75-4ef8-8bb3-ac1ce8324f30",
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"client"
]

4
poetry.lock generated

@ -6595,13 +6595,13 @@ wcwidth = "*"
[[package]]
name = "promptlayer"
version = "0.1.84"
version = "0.1.85"
description = "PromptLayer is a package to keep track of your GPT models training"
category = "dev"
optional = false
python-versions = "*"
files = [
{file = "promptlayer-0.1.84.tar.gz", hash = "sha256:38db68a67dd6d075d124badca0998070a79adce611df00e037b706704369c30a"},
{file = "promptlayer-0.1.85.tar.gz", hash = "sha256:7f5ee282361e200253f0aa53267756a112d3aa1fa29d680a634031c617de20de"},
]
[package.dependencies]

Loading…
Cancel
Save