2023-04-19 04:41:03 +00:00
|
|
|
import time
|
2023-12-02 03:15:23 +00:00
|
|
|
from typing import Any, Callable, List, cast
|
2023-04-19 04:41:03 +00:00
|
|
|
|
|
|
|
from langchain.tools.base import BaseTool
|
2024-01-02 20:09:45 +00:00
|
|
|
from langchain_core.messages import BaseMessage, HumanMessage, SystemMessage
|
2024-04-18 20:09:11 +00:00
|
|
|
from langchain_core.prompts.chat import (
|
|
|
|
BaseChatPromptTemplate,
|
|
|
|
)
|
2024-01-02 20:09:45 +00:00
|
|
|
from langchain_core.vectorstores import VectorStoreRetriever
|
2023-07-22 01:44:32 +00:00
|
|
|
|
|
|
|
from langchain_experimental.autonomous_agents.autogpt.prompt_generator import get_prompt
|
Use a submodule for pydantic v1 compat (#9371)
<!-- Thank you for contributing to LangChain!
Replace this entire comment with:
- Description: a description of the change,
- Issue: the issue # it fixes (if applicable),
- Dependencies: any dependencies required for this change,
- Tag maintainer: for a quicker response, tag the relevant maintainer
(see below),
- Twitter handle: we announce bigger features on Twitter. If your PR
gets announced and you'd like a mention, we'll gladly shout you out!
Please make sure your PR is passing linting and testing before
submitting. Run `make format`, `make lint` and `make test` to check this
locally.
See contribution guidelines for more information on how to write/run
tests, lint, etc:
https://github.com/hwchase17/langchain/blob/master/.github/CONTRIBUTING.md
If you're adding a new integration, please include:
1. a test for the integration, preferably unit tests that do not rely on
network access,
2. an example notebook showing its use. These live is docs/extras
directory.
If no one reviews your PR within a few days, please @-mention one of
@baskaryan, @eyurtsev, @hwchase17, @rlancemartin.
-->
2023-08-17 15:35:49 +00:00
|
|
|
from langchain_experimental.pydantic_v1 import BaseModel
|
2023-04-19 04:41:03 +00:00
|
|
|
|
|
|
|
|
2023-10-17 01:13:31 +00:00
|
|
|
# This class has a metaclass conflict: both `BaseChatPromptTemplate` and `BaseModel`
|
|
|
|
# define a metaclass to use, and the two metaclasses attempt to define
|
|
|
|
# the same functions but in mutually-incompatible ways.
|
|
|
|
# It isn't clear how to resolve this, and this code predates mypy
|
|
|
|
# beginning to perform that check.
|
|
|
|
#
|
|
|
|
# Mypy errors:
|
|
|
|
# ```
|
|
|
|
# Definition of "__private_attributes__" in base class "BaseModel" is
|
|
|
|
# incompatible with definition in base class "BaseModel" [misc]
|
|
|
|
# Definition of "__repr_name__" in base class "Representation" is
|
|
|
|
# incompatible with definition in base class "BaseModel" [misc]
|
|
|
|
# Definition of "__pretty__" in base class "Representation" is
|
|
|
|
# incompatible with definition in base class "BaseModel" [misc]
|
|
|
|
# Definition of "__repr_str__" in base class "Representation" is
|
|
|
|
# incompatible with definition in base class "BaseModel" [misc]
|
|
|
|
# Definition of "__rich_repr__" in base class "Representation" is
|
|
|
|
# incompatible with definition in base class "BaseModel" [misc]
|
|
|
|
# Metaclass conflict: the metaclass of a derived class must be
|
|
|
|
# a (non-strict) subclass of the metaclasses of all its bases [misc]
|
|
|
|
# ```
|
|
|
|
#
|
|
|
|
# TODO: look into refactoring this class in a way that avoids the mypy type errors
|
|
|
|
class AutoGPTPrompt(BaseChatPromptTemplate, BaseModel): # type: ignore[misc]
|
2023-07-26 21:13:10 +00:00
|
|
|
"""Prompt for AutoGPT."""
|
|
|
|
|
2023-04-19 04:41:03 +00:00
|
|
|
ai_name: str
|
|
|
|
ai_role: str
|
|
|
|
tools: List[BaseTool]
|
|
|
|
token_counter: Callable[[str], int]
|
|
|
|
send_token_limit: int = 4196
|
|
|
|
|
|
|
|
def construct_full_prompt(self, goals: List[str]) -> str:
|
2023-04-25 05:15:44 +00:00
|
|
|
prompt_start = (
|
|
|
|
"Your decisions must always be made independently "
|
|
|
|
"without seeking user assistance.\n"
|
|
|
|
"Play to your strengths as an LLM and pursue simple "
|
|
|
|
"strategies with no legal complications.\n"
|
|
|
|
"If you have completed all your tasks, make sure to "
|
|
|
|
'use the "finish" command.'
|
|
|
|
)
|
2023-04-19 04:41:03 +00:00
|
|
|
# Construct full prompt
|
|
|
|
full_prompt = (
|
|
|
|
f"You are {self.ai_name}, {self.ai_role}\n{prompt_start}\n\nGOALS:\n\n"
|
|
|
|
)
|
|
|
|
for i, goal in enumerate(goals):
|
|
|
|
full_prompt += f"{i+1}. {goal}\n"
|
|
|
|
|
|
|
|
full_prompt += f"\n\n{get_prompt(self.tools)}"
|
|
|
|
return full_prompt
|
|
|
|
|
|
|
|
def format_messages(self, **kwargs: Any) -> List[BaseMessage]:
|
|
|
|
base_prompt = SystemMessage(content=self.construct_full_prompt(kwargs["goals"]))
|
|
|
|
time_prompt = SystemMessage(
|
|
|
|
content=f"The current time and date is {time.strftime('%c')}"
|
|
|
|
)
|
2023-12-02 03:15:23 +00:00
|
|
|
used_tokens = self.token_counter(
|
|
|
|
cast(str, base_prompt.content)
|
|
|
|
) + self.token_counter(cast(str, time_prompt.content))
|
2023-04-19 04:41:03 +00:00
|
|
|
memory: VectorStoreRetriever = kwargs["memory"]
|
|
|
|
previous_messages = kwargs["messages"]
|
|
|
|
relevant_docs = memory.get_relevant_documents(str(previous_messages[-10:]))
|
|
|
|
relevant_memory = [d.page_content for d in relevant_docs]
|
|
|
|
relevant_memory_tokens = sum(
|
|
|
|
[self.token_counter(doc) for doc in relevant_memory]
|
|
|
|
)
|
|
|
|
while used_tokens + relevant_memory_tokens > 2500:
|
|
|
|
relevant_memory = relevant_memory[:-1]
|
|
|
|
relevant_memory_tokens = sum(
|
|
|
|
[self.token_counter(doc) for doc in relevant_memory]
|
|
|
|
)
|
|
|
|
content_format = (
|
|
|
|
f"This reminds you of these events "
|
|
|
|
f"from your past:\n{relevant_memory}\n\n"
|
|
|
|
)
|
|
|
|
memory_message = SystemMessage(content=content_format)
|
2023-12-02 03:15:23 +00:00
|
|
|
used_tokens += self.token_counter(cast(str, memory_message.content))
|
2023-04-19 04:41:03 +00:00
|
|
|
historical_messages: List[BaseMessage] = []
|
|
|
|
for message in previous_messages[-10:][::-1]:
|
|
|
|
message_tokens = self.token_counter(message.content)
|
|
|
|
if used_tokens + message_tokens > self.send_token_limit - 1000:
|
|
|
|
break
|
|
|
|
historical_messages = [message] + historical_messages
|
2023-05-01 17:36:54 +00:00
|
|
|
used_tokens += message_tokens
|
2023-04-19 04:41:03 +00:00
|
|
|
input_message = HumanMessage(content=kwargs["user_input"])
|
|
|
|
messages: List[BaseMessage] = [base_prompt, time_prompt, memory_message]
|
|
|
|
messages += historical_messages
|
|
|
|
messages.append(input_message)
|
|
|
|
return messages
|
2024-01-13 02:08:51 +00:00
|
|
|
|
|
|
|
def pretty_repr(self, html: bool = False) -> str:
|
|
|
|
raise NotImplementedError
|