From 55beab326c9188f5385ae0e34e50f3c4f51c9689 Mon Sep 17 00:00:00 2001 From: Bagatur <22008038+baskaryan@users.noreply.github.com> Date: Thu, 27 Jul 2023 13:43:05 -0700 Subject: [PATCH] cleanup warnings (#8379) --- .../docs/modules/chains/foundational/sequential_chains.mdx | 2 +- docs/extras/guides/debugging.md | 2 +- .../integrations/document_loaders/example_data/notebook.md | 2 +- docs/snippets/modules/agents/how_to/custom_llm_agent.mdx | 2 +- docs/snippets/modules/agents/how_to/custom_llm_chat_agent.mdx | 2 +- docs/snippets/modules/chains/additional/moderation.mdx | 2 +- docs/snippets/modules/memory/get_started.mdx | 2 +- .../model_io/prompts/prompt_templates/few_shot_examples.mdx | 2 +- .../modules/model_io/prompts/prompt_templates/get_started.mdx | 2 +- .../modules/model_io/prompts/prompt_templates/partial.mdx | 2 +- 10 files changed, 10 insertions(+), 10 deletions(-) diff --git a/docs/docs_skeleton/docs/modules/chains/foundational/sequential_chains.mdx b/docs/docs_skeleton/docs/modules/chains/foundational/sequential_chains.mdx index 0fd2f83596..2e89506fda 100644 --- a/docs/docs_skeleton/docs/modules/chains/foundational/sequential_chains.mdx +++ b/docs/docs_skeleton/docs/modules/chains/foundational/sequential_chains.mdx @@ -1,6 +1,6 @@ # Sequential - + The next step after calling a language model is make a series of calls to a language model. This is particularly useful when you want to take the output from one call and use it as the input to another. diff --git a/docs/extras/guides/debugging.md b/docs/extras/guides/debugging.md index b03a96f19e..2034289899 100644 --- a/docs/extras/guides/debugging.md +++ b/docs/extras/guides/debugging.md @@ -4,7 +4,7 @@ If you're building with LLMs, at some point something will break, and you'll nee Here's a few different tools and functionalities to aid in debugging. - + ## Tracing diff --git a/docs/extras/integrations/document_loaders/example_data/notebook.md b/docs/extras/integrations/document_loaders/example_data/notebook.md index 1b51407a4b..712bfd174c 100644 --- a/docs/extras/integrations/document_loaders/example_data/notebook.md +++ b/docs/extras/integrations/document_loaders/example_data/notebook.md @@ -2,7 +2,7 @@ This notebook covers how to load data from an .ipynb notebook into a format suitable by LangChain. - + ```python diff --git a/docs/snippets/modules/agents/how_to/custom_llm_agent.mdx b/docs/snippets/modules/agents/how_to/custom_llm_agent.mdx index eebf7429f0..f6a4de83ae 100644 --- a/docs/snippets/modules/agents/how_to/custom_llm_agent.mdx +++ b/docs/snippets/modules/agents/how_to/custom_llm_agent.mdx @@ -10,7 +10,7 @@ The LLMAgent is used in an AgentExecutor. This AgentExecutor can largely be thou In this notebook we walk through how to create a custom LLM agent. - + ## Set up environment diff --git a/docs/snippets/modules/agents/how_to/custom_llm_chat_agent.mdx b/docs/snippets/modules/agents/how_to/custom_llm_chat_agent.mdx index 955fdf9557..a44fffae08 100644 --- a/docs/snippets/modules/agents/how_to/custom_llm_chat_agent.mdx +++ b/docs/snippets/modules/agents/how_to/custom_llm_chat_agent.mdx @@ -10,7 +10,7 @@ The LLMAgent is used in an AgentExecutor. This AgentExecutor can largely be thou In this notebook we walk through how to create a custom LLM agent. - + ## Set up environment diff --git a/docs/snippets/modules/chains/additional/moderation.mdx b/docs/snippets/modules/chains/additional/moderation.mdx index 04b6b15b46..470d167d03 100644 --- a/docs/snippets/modules/chains/additional/moderation.mdx +++ b/docs/snippets/modules/chains/additional/moderation.mdx @@ -3,7 +3,7 @@ We'll show: 1. How to run any piece of text through a moderation chain. 2. How to append a Moderation chain to an LLMChain. - + ```python diff --git a/docs/snippets/modules/memory/get_started.mdx b/docs/snippets/modules/memory/get_started.mdx index 8ac4b62785..0914adb127 100644 --- a/docs/snippets/modules/memory/get_started.mdx +++ b/docs/snippets/modules/memory/get_started.mdx @@ -5,7 +5,7 @@ One of the core utility classes underpinning most (if not all) memory modules is You may want to use this class directly if you are managing memory outside of a chain. - + ```python diff --git a/docs/snippets/modules/model_io/prompts/prompt_templates/few_shot_examples.mdx b/docs/snippets/modules/model_io/prompts/prompt_templates/few_shot_examples.mdx index d905280903..e14aafd2ff 100644 --- a/docs/snippets/modules/model_io/prompts/prompt_templates/few_shot_examples.mdx +++ b/docs/snippets/modules/model_io/prompts/prompt_templates/few_shot_examples.mdx @@ -1,7 +1,7 @@ ### Use Case In this tutorial, we'll configure few shot examples for self-ask with search. - + ## Using an example set diff --git a/docs/snippets/modules/model_io/prompts/prompt_templates/get_started.mdx b/docs/snippets/modules/model_io/prompts/prompt_templates/get_started.mdx index aa3f803a46..47ba6c321e 100644 --- a/docs/snippets/modules/model_io/prompts/prompt_templates/get_started.mdx +++ b/docs/snippets/modules/model_io/prompts/prompt_templates/get_started.mdx @@ -77,7 +77,7 @@ For example, in OpenAI [Chat Completion API](https://platform.openai.com/docs/gu LangChain provides several prompt templates to make constructing and working with prompts easily. You are encouraged to use these chat related prompt templates instead of `PromptTemplate` when querying chat models to fully exploit the potential of underlying chat model. - + ```python diff --git a/docs/snippets/modules/model_io/prompts/prompt_templates/partial.mdx b/docs/snippets/modules/model_io/prompts/prompt_templates/partial.mdx index 8bbb13cc63..b791a220f3 100644 --- a/docs/snippets/modules/model_io/prompts/prompt_templates/partial.mdx +++ b/docs/snippets/modules/model_io/prompts/prompt_templates/partial.mdx @@ -2,7 +2,7 @@ One common use case for wanting to partial a prompt template is if you get some of the variables before others. For example, suppose you have a prompt template that requires two variables, `foo` and `baz`. If you get the `foo` value early on in the chain, but the `baz` value later, it can be annoying to wait until you have both variables in the same place to pass them to the prompt template. Instead, you can partial the prompt template with the `foo` value, and then pass the partialed prompt template along and just use that. Below is an example of doing this: - + ```python