Update run eval imports in init (#5858)

This commit is contained in:
Zander Chase 2023-06-08 10:44:36 -07:00 committed by GitHub
parent 511c12dd39
commit 5f74db4500
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 14 additions and 12 deletions

View File

@ -1,20 +1,22 @@
"""Evaluation classes that interface with traced runs and datasets."""
from langchain.evaluation.run_evaluators.base import (
RunEvalInputMapper,
RunEvaluator,
RunEvaluatorChain,
RunEvaluatorInputMapper,
RunEvaluatorOutputParser,
)
from langchain.evaluation.run_evaluators.implementations import (
ChoicesOutputParser,
StringRunEvaluatorInputMapper,
get_criteria_evaluator,
get_qa_evaluator,
)
__all__ = [
"RunEvaluator",
"RunEvalInputMapper",
"RunEvaluatorChain",
"RunEvaluatorInputMapper",
"RunEvaluatorOutputParser",
"get_qa_evaluator",
"get_criteria_evaluator",
"StringRunEvaluatorInputMapper",
"ChoicesOutputParser",
]

View File

@ -15,7 +15,7 @@ from langchain.chains.llm import LLMChain
from langchain.schema import RUN_KEY, BaseOutputParser
class RunEvalInputMapper:
class RunEvaluatorInputMapper:
"""Map the inputs of a run to the inputs of an evaluation."""
@abstractmethod
@ -37,7 +37,7 @@ class RunEvaluatorOutputParser(BaseOutputParser[EvaluationResult]):
class RunEvaluatorChain(Chain, RunEvaluator):
"""Evaluate Run and optional examples."""
input_mapper: RunEvalInputMapper
input_mapper: RunEvaluatorInputMapper
"""Maps the Run and Optional example to a dictionary for the eval chain."""
eval_chain: LLMChain
"""The evaluation chain."""

View File

@ -10,8 +10,8 @@ from langchain.evaluation.qa.eval_chain import QAEvalChain
from langchain.evaluation.qa.eval_prompt import PROMPT as QA_DEFAULT_PROMPT
from langchain.evaluation.qa.eval_prompt import SQL_PROMPT
from langchain.evaluation.run_evaluators.base import (
RunEvalInputMapper,
RunEvaluatorChain,
RunEvaluatorInputMapper,
RunEvaluatorOutputParser,
)
from langchain.evaluation.run_evaluators.criteria_prompt import (
@ -25,7 +25,7 @@ _QA_PROMPTS = {
}
class StringRunEvalInputMapper(RunEvalInputMapper, BaseModel):
class StringRunEvaluatorInputMapper(RunEvaluatorInputMapper, BaseModel):
"""Maps the Run and Optional[Example] to a dictionary."""
prediction_map: Mapping[str, str]
@ -97,7 +97,7 @@ def get_qa_evaluator(
eval_chain = QAEvalChain.from_llm(llm=llm, prompt=prompt, **kwargs)
input_mapper = kwargs.pop(
"input_mapper",
StringRunEvalInputMapper(
StringRunEvaluatorInputMapper(
input_map={input_key: "query"},
prediction_map={prediction_key: "result"},
answer_map={answer_key: "answer"},
@ -179,7 +179,7 @@ def get_criteria_evaluator(
prompt_ = prompt.partial(criteria=criteria_str)
input_mapper = kwargs.pop(
"input_mapper",
StringRunEvalInputMapper(
StringRunEvaluatorInputMapper(
input_map={input_key: "input"},
prediction_map={prediction_key: "output"},
),