update experimental (#8402)

some changes were made to experimental, porting them over
pull/8425/head^2
Harrison Chase 1 year ago committed by GitHub
parent af7e70d4af
commit 3a78450883
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -6,6 +6,8 @@ from pydantic import Field
class AutoGPTMemory(BaseChatMemory):
"""Memory for AutoGPT."""
retriever: VectorStoreRetriever = Field(exclude=True)
"""VectorStoreRetriever object to connect to."""

@ -7,20 +7,18 @@ from langchain.schema import BaseOutputParser
class AutoGPTAction(NamedTuple):
"""Action for AutoGPT."""
"""Action returned by AutoGPTOutputParser."""
name: str
"""Name of the action."""
args: Dict
"""Arguments for the action."""
class BaseAutoGPTOutputParser(BaseOutputParser):
"""Base class for AutoGPT output parsers."""
"""Base Output parser for AutoGPT."""
@abstractmethod
def parse(self, text: str) -> AutoGPTAction:
"""Parse text and return AutoGPTAction"""
"""Return AutoGPTAction"""
def preprocess_json_input(input_str: str) -> str:

@ -123,7 +123,7 @@ class PromptGenerator:
def get_prompt(tools: List[BaseTool]) -> str:
"""Generate a prompt string.
"""Generates a prompt string.
It includes various constraints, commands, resources, and performance evaluations.

@ -4,7 +4,7 @@ from langchain.schema.language_model import BaseLanguageModel
class TaskCreationChain(LLMChain):
"""Chain to generates tasks."""
"""Chain generating tasks."""
@classmethod
def from_llm(cls, llm: BaseLanguageModel, verbose: bool = True) -> LLMChain:

@ -11,11 +11,10 @@ from langchain_experimental.generative_agents.memory import GenerativeAgentMemor
class GenerativeAgent(BaseModel):
"""A character with memory and innate characteristics."""
"""An Agent as a character with memory and innate characteristics."""
name: str
"""The character's name."""
age: Optional[int] = None
"""The optional age of the character."""
traits: str = "N/A"
@ -29,13 +28,10 @@ class GenerativeAgent(BaseModel):
verbose: bool = False
summary: str = "" #: :meta private:
"""Stateful self-summary generated via reflection on the character's memory."""
summary_refresh_seconds: int = 3600 #: :meta private:
"""How frequently to re-generate the summary."""
last_refreshed: datetime = Field(default_factory=datetime.now) # : :meta private:
"""The last time the character's summary was regenerated."""
daily_summaries: List[str] = Field(default_factory=list) # : :meta private:
"""Summary of the events in the plan that the agent took."""

@ -14,24 +14,21 @@ logger = logging.getLogger(__name__)
class GenerativeAgentMemory(BaseMemory):
"""Memory for the generative agent."""
llm: BaseLanguageModel
"""The core language model."""
memory_retriever: TimeWeightedVectorStoreRetriever
"""The retriever to fetch related memories."""
verbose: bool = False
reflection_threshold: Optional[float] = None
"""When aggregate_importance exceeds reflection_threshold, stop to reflect."""
current_plan: List[str] = []
"""The current plan of the agent."""
# A weight of 0.15 makes this less important than it
# would be otherwise, relative to salience and time
importance_weight: float = 0.15
"""How much weight to assign the memory importance."""
aggregate_importance: float = 0.0 # : :meta private:
"""Track the sum of the 'importance' of recent memories.

@ -17,7 +17,7 @@ def import_jsonformer() -> jsonformer:
try:
import jsonformer
except ImportError:
raise ValueError(
raise ImportError(
"Could not import jsonformer python package. "
"Please install it with `pip install jsonformer`."
)
@ -25,6 +25,11 @@ def import_jsonformer() -> jsonformer:
class JsonFormer(HuggingFacePipeline):
"""Jsonformer wrapped LLM using HuggingFace Pipeline API.
This pipeline is experimental and not yet stable.
"""
json_schema: dict = Field(..., description="The JSON Schema to complete.")
max_new_tokens: int = Field(
default=200, description="Maximum number of new tokens to generate."

@ -23,7 +23,7 @@ def import_rellm() -> rellm:
try:
import rellm
except ImportError:
raise ValueError(
raise ImportError(
"Could not import rellm python package. "
"Please install it with `pip install rellm`."
)
@ -31,6 +31,8 @@ def import_rellm() -> rellm:
class RELLM(HuggingFacePipeline):
"""RELLM wrapped LLM using HuggingFace Pipeline API."""
regex: RegexPattern = Field(..., description="The structured format to complete.")
max_new_tokens: int = Field(
default=200, description="Maximum number of new tokens to generate."

@ -16,9 +16,14 @@ from langchain_experimental.plan_and_execute.schema import (
class PlanAndExecute(Chain):
"""Plan and execute a chain of steps."""
planner: BasePlanner
"""The planner to use."""
executor: BaseExecutor
"""The executor to use."""
step_container: BaseStepContainer = Field(default_factory=ListStepContainer)
"""The step container to use."""
input_key: str = "input"
output_key: str = "output"

@ -9,6 +9,8 @@ from langchain_experimental.plan_and_execute.schema import StepResponse
class BaseExecutor(BaseModel):
"""Base executor."""
@abstractmethod
def step(
self, inputs: dict, callbacks: Callbacks = None, **kwargs: Any
@ -19,11 +21,14 @@ class BaseExecutor(BaseModel):
async def astep(
self, inputs: dict, callbacks: Callbacks = None, **kwargs: Any
) -> StepResponse:
"""Take step."""
"""Take async step."""
class ChainExecutor(BaseExecutor):
"""Chain executor."""
chain: Chain
"""The chain to use."""
def step(
self, inputs: dict, callbacks: Callbacks = None, **kwargs: Any

@ -9,6 +9,8 @@ from langchain_experimental.plan_and_execute.schema import Plan, PlanOutputParse
class BasePlanner(BaseModel):
"""Base planner."""
@abstractmethod
def plan(self, inputs: dict, callbacks: Callbacks = None, **kwargs: Any) -> Plan:
"""Given input, decide what to do."""
@ -17,13 +19,18 @@ class BasePlanner(BaseModel):
async def aplan(
self, inputs: dict, callbacks: Callbacks = None, **kwargs: Any
) -> Plan:
"""Given input, decide what to do."""
"""Given input, asynchronously decide what to do."""
class LLMPlanner(BasePlanner):
"""LLM planner."""
llm_chain: LLMChain
"""The LLM chain to use."""
output_parser: PlanOutputParser
"""The output parser to use."""
stop: Optional[List] = None
"""The stop list to use."""
def plan(self, inputs: dict, callbacks: Callbacks = None, **kwargs: Any) -> Plan:
"""Given input, decide what to do."""
@ -33,7 +40,7 @@ class LLMPlanner(BasePlanner):
async def aplan(
self, inputs: dict, callbacks: Callbacks = None, **kwargs: Any
) -> Plan:
"""Given input, decide what to do."""
"""Given input, asynchronously decide what to do."""
llm_response = await self.llm_chain.arun(
**inputs, stop=self.stop, callbacks=callbacks
)

@ -25,6 +25,8 @@ SYSTEM_PROMPT = (
class PlanningOutputParser(PlanOutputParser):
"""Planning output parser."""
def parse(self, text: str) -> Plan:
steps = [Step(value=v) for v in re.split("\n\s*\d+\. ", text)[1:]]
return Plan(steps=steps)
@ -35,6 +37,7 @@ def load_chat_planner(
) -> LLMPlanner:
"""
Load a chat planner.
Args:
llm: Language model.
system_prompt: System prompt.

@ -6,18 +6,29 @@ from pydantic import BaseModel, Field
class Step(BaseModel):
"""Step."""
value: str
"""The value."""
class Plan(BaseModel):
"""Plan."""
steps: List[Step]
"""The steps."""
class StepResponse(BaseModel):
"""Step response."""
response: str
"""The response."""
class BaseStepContainer(BaseModel):
"""Base step container."""
@abstractmethod
def add_step(self, step: Step, step_response: StepResponse) -> None:
"""Add step and step response to the container."""
@ -28,7 +39,10 @@ class BaseStepContainer(BaseModel):
class ListStepContainer(BaseStepContainer):
"""List step container."""
steps: List[Tuple[Step, StepResponse]] = Field(default_factory=list)
"""The steps."""
def add_step(self, step: Step, step_response: StepResponse) -> None:
self.steps.append((step, step_response))
@ -41,6 +55,8 @@ class ListStepContainer(BaseStepContainer):
class PlanOutputParser(BaseOutputParser):
"""Plan output parser."""
@abstractmethod
def parse(self, text: str) -> Plan:
"""Parse into a plan."""

Loading…
Cancel
Save