From f9f13402083ac537cd85b8098ffbe2e9c6317fee Mon Sep 17 00:00:00 2001 From: Aashish Saini <141953346+ShorthillsAI@users.noreply.github.com> Date: Fri, 15 Sep 2023 06:13:36 +0530 Subject: [PATCH] Fixed some grammatical and spelling errors (#10595) Fixed some grammatical and spelling errors --- .../docs/modules/memory/chat_messages/index.mdx | 2 +- docs/extras/integrations/providers/predictionguard.mdx | 6 +++--- libs/experimental/langchain_experimental/smart_llm/base.py | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/docs/docs_skeleton/docs/modules/memory/chat_messages/index.mdx b/docs/docs_skeleton/docs/modules/memory/chat_messages/index.mdx index c44f1ac15b..bcdd77abe3 100644 --- a/docs/docs_skeleton/docs/modules/memory/chat_messages/index.mdx +++ b/docs/docs_skeleton/docs/modules/memory/chat_messages/index.mdx @@ -8,7 +8,7 @@ Head to [Integrations](/docs/integrations/memory/) for documentation on built-in ::: One of the core utility classes underpinning most (if not all) memory modules is the `ChatMessageHistory` class. -This is a super lightweight wrapper which provides convenience methods for saving HumanMessages, AIMessages, and then fetching them all. +This is a super lightweight wrapper that provides convenience methods for saving HumanMessages, AIMessages, and then fetching them all. You may want to use this class directly if you are managing memory outside of a chain. diff --git a/docs/extras/integrations/providers/predictionguard.mdx b/docs/extras/integrations/providers/predictionguard.mdx index 28cb383e81..06766504f7 100644 --- a/docs/extras/integrations/providers/predictionguard.mdx +++ b/docs/extras/integrations/providers/predictionguard.mdx @@ -5,7 +5,7 @@ It is broken into two parts: installation and setup, and then references to spec ## Installation and Setup - Install the Python SDK with `pip install predictionguard` -- Get an Prediction Guard access token (as described [here](https://docs.predictionguard.com/)) and set it as an environment variable (`PREDICTIONGUARD_TOKEN`) +- Get a Prediction Guard access token (as described [here](https://docs.predictionguard.com/)) and set it as an environment variable (`PREDICTIONGUARD_TOKEN`) ## LLM Wrapper @@ -49,7 +49,7 @@ Context: EVERY comment, DM + email suggestion has led us to this EXCITING announ Exclusive Candle Box - $80 Monthly Candle Box - $45 (NEW!) Scent of The Month Box - $28 (NEW!) -Head to stories to get ALLL the deets on each box! 👆 BONUS: Save 50% on your first box with code 50OFF! 🎉 +Head to stories to get ALL the deets on each box! 👆 BONUS: Save 50% on your first box with code 50OFF! 🎉 Query: {query} @@ -97,4 +97,4 @@ llm_chain = LLMChain(prompt=prompt, llm=pgllm, verbose=True) question = "What NFL team won the Super Bowl in the year Justin Beiber was born?" llm_chain.predict(question=question) -``` \ No newline at end of file +``` diff --git a/libs/experimental/langchain_experimental/smart_llm/base.py b/libs/experimental/langchain_experimental/smart_llm/base.py index 8aff6a293a..69af8ab053 100644 --- a/libs/experimental/langchain_experimental/smart_llm/base.py +++ b/libs/experimental/langchain_experimental/smart_llm/base.py @@ -225,7 +225,7 @@ class SmartLLMChain(Chain): ( HumanMessagePromptTemplate, "You are a resolved tasked with 1) finding which of " - f"the {self.n_ideas} anwer options the researcher thought was " + f"the {self.n_ideas} answer options the researcher thought was " "best,2) improving that answer and 3) printing the answer in full. " "Don't output anything for step 1 or 2, only the full answer in 3. " "Let's work this out in a step by step way to be sure we have "