From daee0b2b9754b13d87b25c7dfbf5298ec22f3eb6 Mon Sep 17 00:00:00 2001 From: Zander Chase <130414180+vowelparrot@users.noreply.github.com> Date: Thu, 20 Apr 2023 13:31:30 -0700 Subject: [PATCH 1/5] Patch Chat History Formatting (#3236) While we work on solidifying the memory interfaces, handle common chat history formats. This may break linting on anyone who has been passing in `get_chat_history` . Somewhat handles #3077 Alternative to #3078 that updates the typing --- .../chains/conversational_retrieval/base.py | 30 ++++++++++++++----- 1 file changed, 23 insertions(+), 7 deletions(-) diff --git a/langchain/chains/conversational_retrieval/base.py b/langchain/chains/conversational_retrieval/base.py index 97424ecb..b7fb299e 100644 --- a/langchain/chains/conversational_retrieval/base.py +++ b/langchain/chains/conversational_retrieval/base.py @@ -15,16 +15,32 @@ from langchain.chains.conversational_retrieval.prompts import CONDENSE_QUESTION_ from langchain.chains.llm import LLMChain from langchain.chains.question_answering import load_qa_chain from langchain.prompts.base import BasePromptTemplate -from langchain.schema import BaseLanguageModel, BaseRetriever, Document +from langchain.schema import BaseLanguageModel, BaseMessage, BaseRetriever, Document from langchain.vectorstores.base import VectorStore +# Depending on the memory type and configuration, the chat history format may differ. +# This needs to be consolidated. +CHAT_TURN_TYPE = Union[Tuple[str, str], BaseMessage] -def _get_chat_history(chat_history: List[Tuple[str, str]]) -> str: + +_ROLE_MAP = {"human": "Human: ", "ai": "Assistant: "} + + +def _get_chat_history(chat_history: List[CHAT_TURN_TYPE]) -> str: buffer = "" - for human_s, ai_s in chat_history: - human = "Human: " + human_s - ai = "Assistant: " + ai_s - buffer += "\n" + "\n".join([human, ai]) + for dialogue_turn in chat_history: + if isinstance(dialogue_turn, BaseMessage): + role_prefix = _ROLE_MAP.get(dialogue_turn.type, f"{dialogue_turn.type}: ") + buffer += f"\n{role_prefix}{dialogue_turn.content}" + elif isinstance(dialogue_turn, tuple): + human = "Human: " + dialogue_turn[0] + ai = "Assistant: " + dialogue_turn[1] + buffer += "\n" + "\n".join([human, ai]) + else: + raise ValueError( + f"Unsupported chat history format: {type(dialogue_turn)}." + f" Full chat history: {chat_history} " + ) return buffer @@ -35,7 +51,7 @@ class BaseConversationalRetrievalChain(Chain): question_generator: LLMChain output_key: str = "answer" return_source_documents: bool = False - get_chat_history: Optional[Callable[[Tuple[str, str]], str]] = None + get_chat_history: Optional[Callable[[CHAT_TURN_TYPE], str]] = None """Return the source documents.""" class Config: From 7d3e6389f26da27ef4fcb9466795fe4622ab1e5a Mon Sep 17 00:00:00 2001 From: Tom Dyson Date: Thu, 20 Apr 2023 22:02:20 +0100 Subject: [PATCH 2/5] Add DuckDB prompt (#3233) Adds a prompt template for the DuckDB SQL dialect. --- langchain/chains/sql_database/prompt.py | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/langchain/chains/sql_database/prompt.py b/langchain/chains/sql_database/prompt.py index 0ced8a22..8fd3b46a 100644 --- a/langchain/chains/sql_database/prompt.py +++ b/langchain/chains/sql_database/prompt.py @@ -40,6 +40,27 @@ DECIDER_PROMPT = PromptTemplate( output_parser=CommaSeparatedListOutputParser(), ) +_duckdb_prompt = """You are a DuckDB expert. Given an input question, first create a syntactically correct DuckDB query to run, then look at the results of the query and return the answer to the input question. +Unless the user specifies in the question a specific number of examples to obtain, query for at most {top_k} results using the LIMIT clause as per DuckDB. You can order the results to return the most informative data in the database. +Never query for all columns from a table. You must query only the columns that are needed to answer the question. Wrap each column name in double quotes (") to denote them as delimited identifiers. +Pay attention to use only the column names you can see in the tables below. Be careful to not query for columns that do not exist. Also, pay attention to which column is in which table. + +Use the following format: + +Question: "Question here" +SQLQuery: "SQL Query to run" +SQLResult: "Result of the SQLQuery" +Answer: "Final answer here" + +Only use the following tables: +{table_info} + +Question: {input}""" + +DUCKDB_PROMPT = PromptTemplate( + input_variables=["input", "table_info", "top_k"], + template=_duckdb_prompt, +) _googlesql_prompt = """You are a GoogleSQL expert. Given an input question, first create a syntactically correct GoogleSQL query to run, then look at the results of the query and return the answer to the input question. Unless the user specifies in the question a specific number of examples to obtain, query for at most {top_k} results using the LIMIT clause as per GoogleSQL. You can order the results to return the most informative data in the database. @@ -201,6 +222,7 @@ SQLITE_PROMPT = PromptTemplate( SQL_PROMPTS = { + "duckdb": DUCKDB_PROMPT, "googlesql": GOOGLESQL_PROMPT, "mssql": MSSQL_PROMPT, "mysql": MYSQL_PROMPT, From ae528fd06e005992e6b0d6a623acb83de27c578d Mon Sep 17 00:00:00 2001 From: Daniel Chalef <131175+danielchalef@users.noreply.github.com> Date: Thu, 20 Apr 2023 15:03:32 -0600 Subject: [PATCH 3/5] fix error msg ref to beautifulsoup4 (#3242) Co-authored-by: Daniel Chalef --- langchain/document_loaders/html_bs.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/langchain/document_loaders/html_bs.py b/langchain/document_loaders/html_bs.py index c90e32d7..fc636367 100644 --- a/langchain/document_loaders/html_bs.py +++ b/langchain/document_loaders/html_bs.py @@ -24,7 +24,8 @@ class BSHTMLLoader(BaseLoader): import bs4 # noqa:F401 except ImportError: raise ValueError( - "bs4 package not found, please install it with " "`pip install bs4`" + "beautifulsoup4 package not found, please install it with " + "`pip install beautifulsoup4`" ) self.file_path = file_path From 0e797a3ff993070440927aa7549493d3bf885eb4 Mon Sep 17 00:00:00 2001 From: Boris Feld Date: Thu, 20 Apr 2023 23:57:41 +0200 Subject: [PATCH 4/5] Fixing issue link for Comet callback (#3212) Sorry I fixed that link once but there was still a typo inside, this time it should be good. --- langchain/callbacks/comet_ml_callback.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/langchain/callbacks/comet_ml_callback.py b/langchain/callbacks/comet_ml_callback.py index 7917d26a..877a8f90 100644 --- a/langchain/callbacks/comet_ml_callback.py +++ b/langchain/callbacks/comet_ml_callback.py @@ -130,7 +130,7 @@ class CometCallbackHandler(BaseMetadataCallbackHandler, BaseCallbackHandler): warning = ( "The comet_ml callback is currently in beta and is subject to change " "based on updates to `langchain`. Please report any issues to " - "https://github.com/comet-ml/issue_tracking/issues with the tag " + "https://github.com/comet-ml/issue-tracking/issues with the tag " "`langchain`." ) self.comet_ml.LOGGER.warning(warning) From 0684aa081a26b0773471e02f842c6af8618c4ebc Mon Sep 17 00:00:00 2001 From: Albert Castellana Date: Fri, 21 Apr 2023 00:20:21 +0200 Subject: [PATCH 5/5] Ecosystem/Yeager.ai (#3239) Added yeagerai.md to ecosystem --- docs/ecosystem/yeagerai.md | 43 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) create mode 100644 docs/ecosystem/yeagerai.md diff --git a/docs/ecosystem/yeagerai.md b/docs/ecosystem/yeagerai.md new file mode 100644 index 00000000..6483cce9 --- /dev/null +++ b/docs/ecosystem/yeagerai.md @@ -0,0 +1,43 @@ +# Yeager.ai + +This page covers how to use [Yeager.ai](https://yeager.ai) to generate LangChain tools and agents. + +## What is Yeager.ai? +Yeager.ai is an ecosystem designed to simplify the process of creating AI agents and tools. + +It features yAgents, a No-code LangChain Agent Builder, which enables users to build, test, and deploy AI solutions with ease. Leveraging the LangChain framework, yAgents allows seamless integration with various language models and resources, making it suitable for developers, researchers, and AI enthusiasts across diverse applications. + +## yAgents +Low code generative agent designed to help you build, prototype, and deploy Langchain tools with ease. + +### How to use? +``` +pip install yeagerai-agent +yeagerai-agent +``` +Go to http://127.0.0.1:7860 + +This will install the necessary dependencies and set up yAgents on your system. After the first run, yAgents will create a .env file where you can input your OpenAI API key. You can do the same directly from the Gradio interface under the tab "Settings". + +`OPENAI_API_KEY=` + +We recommend using GPT-4,. However, the tool can also work with GPT-3 if the problem is broken down sufficiently. + +### Creating and Executing Tools with yAgents +yAgents makes it easy to create and execute AI-powered tools. Here's a brief overview of the process: +1. Create a tool: To create a tool, provide a natural language prompt to yAgents. The prompt should clearly describe the tool's purpose and functionality. For example: +`create a tool that returns the n-th prime number` + +2. Load the tool into the toolkit: To load a tool into yAgents, simply provide a command to yAgents that says so. For example: +`load the tool that you just created it into your toolkit` + +3. Execute the tool: To run a tool or agent, simply provide a command to yAgents that includes the name of the tool and any required parameters. For example: +`generate the 50th prime number` + +You can see a video of how it works [here](https://www.youtube.com/watch?v=KA5hCM3RaWE). + +As you become more familiar with yAgents, you can create more advanced tools and agents to automate your work and enhance your productivity. + +For more information, see [yAgents' Github](https://github.com/yeagerai/yeagerai-agent) or our [docs](https://yeagerai.gitbook.io/docs/general/welcome-to-yeager.ai) + +