mirror of
https://github.com/hwchase17/langchain
synced 2024-11-18 09:25:54 +00:00
e7e5cb9d08
# [WIP] Tree of Thought introducing a new ToTChain. This PR adds a new chain called ToTChain that implements the ["Large Language Model Guided Tree-of-Though"](https://arxiv.org/pdf/2305.08291.pdf) paper. There's a notebook example `docs/modules/chains/examples/tot.ipynb` that shows how to use it. Implements #4975 ## Who can review? Community members can review the PR once tests pass. Tag maintainers/contributors who might be interested: - @hwchase17 - @vowelparrot --------- Co-authored-by: Vadim Gubergrits <vgubergrits@outbox.com> Co-authored-by: Harrison Chase <hw.chase.17@gmail.com>
53 lines
1.4 KiB
Python
53 lines
1.4 KiB
Python
from abc import ABC, abstractmethod
|
|
from typing import Any, Dict, List, Optional, Tuple
|
|
|
|
from langchain.callbacks.manager import CallbackManagerForChainRun
|
|
from langchain.chains.base import Chain
|
|
|
|
from langchain_experimental.tot.thought import ThoughtValidity
|
|
|
|
|
|
class ToTChecker(Chain, ABC):
|
|
"""
|
|
Tree of Thought (ToT) checker.
|
|
|
|
This is an abstract ToT checker that must be implemented by the user. You
|
|
can implement a simple rule-based checker or a more sophisticated
|
|
neural network based classifier.
|
|
"""
|
|
|
|
output_key: str = "validity" #: :meta private:
|
|
|
|
@property
|
|
def input_keys(self) -> List[str]:
|
|
"""The checker input keys.
|
|
|
|
:meta private:
|
|
"""
|
|
return ["problem_description", "thoughts"]
|
|
|
|
@property
|
|
def output_keys(self) -> List[str]:
|
|
"""The checker output keys.
|
|
|
|
:meta private:
|
|
"""
|
|
return [self.output_key]
|
|
|
|
@abstractmethod
|
|
def evaluate(
|
|
self,
|
|
problem_description: str,
|
|
thoughts: Tuple[str, ...] = (),
|
|
) -> ThoughtValidity:
|
|
"""
|
|
Evaluate the response to the problem description and return the solution type.
|
|
"""
|
|
|
|
def _call(
|
|
self,
|
|
inputs: Dict[str, Any],
|
|
run_manager: Optional[CallbackManagerForChainRun] = None,
|
|
) -> Dict[str, ThoughtValidity]:
|
|
return {self.output_key: self.evaluate(**inputs)}
|