diff --git a/langchain/agents/agent.py b/langchain/agents/agent.py index eda22924..80ee3ab1 100644 --- a/langchain/agents/agent.py +++ b/langchain/agents/agent.py @@ -1,4 +1,6 @@ """Chain that takes in an input and produces an action and action input.""" +from __future__ import annotations + from abc import ABC, abstractmethod from typing import Any, ClassVar, Dict, List, NamedTuple, Optional, Tuple @@ -91,7 +93,7 @@ class Agent(Chain, BaseModel, ABC): pass @classmethod - def from_llm_and_tools(cls, llm: LLM, tools: List[Tool], **kwargs: Any) -> "Agent": + def from_llm_and_tools(cls, llm: LLM, tools: List[Tool], **kwargs: Any) -> Agent: """Construct an agent from an LLM and tools.""" cls._validate_tools(tools) llm_chain = LLMChain(llm=llm, prompt=cls.create_prompt(tools)) diff --git a/langchain/agents/mrkl/base.py b/langchain/agents/mrkl/base.py index 28eac3db..1519c38d 100644 --- a/langchain/agents/mrkl/base.py +++ b/langchain/agents/mrkl/base.py @@ -1,4 +1,6 @@ """Attempt to implement MRKL systems as described in arxiv.org/pdf/2205.00445.pdf.""" +from __future__ import annotations + from typing import Any, Callable, List, NamedTuple, Optional, Tuple from langchain.agents.agent import Agent @@ -114,7 +116,7 @@ class MRKLChain(ZeroShotAgent): """ @classmethod - def from_chains(cls, llm: LLM, chains: List[ChainConfig], **kwargs: Any) -> "Agent": + def from_chains(cls, llm: LLM, chains: List[ChainConfig], **kwargs: Any) -> Agent: """User friendly way to initialize the MRKL chain. This is intended to be an easy way to get up and running with the diff --git a/langchain/agents/self_ask_with_search/base.py b/langchain/agents/self_ask_with_search/base.py index 1273308d..d8184fb4 100644 --- a/langchain/agents/self_ask_with_search/base.py +++ b/langchain/agents/self_ask_with_search/base.py @@ -27,10 +27,7 @@ class SelfAskWithSearchAgent(Agent): def _extract_tool_and_input(self, text: str) -> Optional[Tuple[str, str]]: followup = "Follow up:" - if "\n" not in text: - last_line = text - else: - last_line = text.split("\n")[-1] + last_line = text.split("\n")[-1] if followup not in last_line: finish_string = "So the final answer is: " @@ -38,10 +35,7 @@ class SelfAskWithSearchAgent(Agent): return None return "Final Answer", last_line[len(finish_string) :] - if ":" not in last_line: - after_colon = last_line - else: - after_colon = text.split(":")[-1] + after_colon = text.split(":")[-1] if " " == after_colon[0]: after_colon = after_colon[1:] @@ -49,7 +43,7 @@ class SelfAskWithSearchAgent(Agent): return "Intermediate Answer", after_colon def _fix_text(self, text: str) -> str: - return text + "\nSo the final answer is:" + return f"{text}\nSo the final answer is:" @property def observation_prefix(self) -> str: diff --git a/langchain/chains/conversation/memory.py b/langchain/chains/conversation/memory.py index b6bf9e26..758bbbfb 100644 --- a/langchain/chains/conversation/memory.py +++ b/langchain/chains/conversation/memory.py @@ -84,8 +84,8 @@ class ConversationSummaryMemory(Memory, BaseModel): prompt_input_key = _get_prompt_input_key(inputs, self.memory_variables) if len(outputs) != 1: raise ValueError(f"One output key expected, got {outputs.keys()}") - human = "Human: " + inputs[prompt_input_key] - ai = "AI: " + list(outputs.values())[0] + human = f"Human: {inputs[prompt_input_key]}" + ai = f"AI: {list(outputs.values())[0]}" new_lines = "\n".join([human, ai]) chain = LLMChain(llm=self.llm, prompt=self.prompt) self.buffer = chain.predict(summary=self.buffer, new_lines=new_lines) diff --git a/langchain/chains/mapreduce.py b/langchain/chains/mapreduce.py index 8a0792d7..aea2baa4 100644 --- a/langchain/chains/mapreduce.py +++ b/langchain/chains/mapreduce.py @@ -3,6 +3,7 @@ Splits up a document, sends the smaller parts to the LLM with one prompt, then combines the results with another one. """ +from __future__ import annotations from typing import Dict, List @@ -32,7 +33,7 @@ class MapReduceChain(Chain, BaseModel): @classmethod def from_params( cls, llm: LLM, prompt: BasePromptTemplate, text_splitter: TextSplitter - ) -> "MapReduceChain": + ) -> MapReduceChain: """Construct a map-reduce chain that uses the chain for map and reduce.""" llm_chain = LLMChain(llm=llm, prompt=prompt) return cls(map_llm=llm_chain, reduce_llm=llm_chain, text_splitter=text_splitter) diff --git a/langchain/chains/natbot/base.py b/langchain/chains/natbot/base.py index 67c69363..744c4bc8 100644 --- a/langchain/chains/natbot/base.py +++ b/langchain/chains/natbot/base.py @@ -1,4 +1,6 @@ """Implement an LLM driven browser.""" +from __future__ import annotations + from typing import Dict, List from pydantic import BaseModel, Extra @@ -36,7 +38,7 @@ class NatBotChain(Chain, BaseModel): arbitrary_types_allowed = True @classmethod - def from_default(cls, objective: str) -> "NatBotChain": + def from_default(cls, objective: str) -> NatBotChain: """Load with default LLM.""" llm = OpenAI(temperature=0.5, best_of=10, n=3, max_tokens=50) return cls(llm=llm, objective=objective) diff --git a/langchain/chains/pal/base.py b/langchain/chains/pal/base.py index 4ebeb0ea..04573625 100644 --- a/langchain/chains/pal/base.py +++ b/langchain/chains/pal/base.py @@ -2,6 +2,8 @@ As in https://arxiv.org/pdf/2211.10435.pdf. """ +from __future__ import annotations + from typing import Any, Dict, List from pydantic import BaseModel, Extra @@ -57,7 +59,7 @@ class PALChain(Chain, BaseModel): return {self.output_key: res.strip()} @classmethod - def from_math_prompt(cls, llm: LLM, **kwargs: Any) -> "PALChain": + def from_math_prompt(cls, llm: LLM, **kwargs: Any) -> PALChain: """Load PAL from math prompt.""" return cls( llm=llm, @@ -68,7 +70,7 @@ class PALChain(Chain, BaseModel): ) @classmethod - def from_colored_object_prompt(cls, llm: LLM, **kwargs: Any) -> "PALChain": + def from_colored_object_prompt(cls, llm: LLM, **kwargs: Any) -> PALChain: """Load PAL from colored object prompt.""" return cls( llm=llm, diff --git a/langchain/chains/qa_with_sources/base.py b/langchain/chains/qa_with_sources/base.py index 9b778985..5e295e6a 100644 --- a/langchain/chains/qa_with_sources/base.py +++ b/langchain/chains/qa_with_sources/base.py @@ -1,5 +1,7 @@ """Question answering with sources over documents.""" +from __future__ import annotations + from abc import ABC, abstractmethod from typing import Any, Dict, List @@ -40,7 +42,7 @@ class BaseQAWithSourcesChain(Chain, BaseModel, ABC): question_prompt: BasePromptTemplate = QUESTION_PROMPT, combine_prompt: BasePromptTemplate = COMBINE_PROMPT, **kwargs: Any, - ) -> "BaseQAWithSourcesChain": + ) -> BaseQAWithSourcesChain: """Construct the chain from an LLM.""" llm_question_chain = LLMChain(llm=llm, prompt=question_prompt) llm_combine_chain = LLMChain(llm=llm, prompt=combine_prompt) diff --git a/langchain/chains/sql_database/base.py b/langchain/chains/sql_database/base.py index 32bb1c9c..4a13f393 100644 --- a/langchain/chains/sql_database/base.py +++ b/langchain/chains/sql_database/base.py @@ -54,7 +54,7 @@ class SQLDatabaseChain(Chain, BaseModel): def _call(self, inputs: Dict[str, str]) -> Dict[str, str]: llm_chain = LLMChain(llm=self.llm, prompt=PROMPT) chained_input = ChainedInput( - inputs[self.input_key] + "\nSQLQuery:", verbose=self.verbose + f"{inputs[self.input_key]} \nSQLQuery:", verbose=self.verbose ) llm_inputs = { "input": chained_input.input, diff --git a/langchain/model_laboratory.py b/langchain/model_laboratory.py index 614bba34..e31d6654 100644 --- a/langchain/model_laboratory.py +++ b/langchain/model_laboratory.py @@ -1,4 +1,6 @@ """Experiment with different models.""" +from __future__ import annotations + from typing import List, Optional, Sequence, Union from langchain.agents.agent import Agent @@ -49,7 +51,7 @@ class ModelLaboratory: @classmethod def from_llms( cls, llms: List[LLM], prompt: Optional[PromptTemplate] = None - ) -> "ModelLaboratory": + ) -> ModelLaboratory: """Initialize with LLMs to experiment with and optional prompt. Args: diff --git a/langchain/prompts/example_selector/semantic_similarity.py b/langchain/prompts/example_selector/semantic_similarity.py index a78ca12d..620e0dfa 100644 --- a/langchain/prompts/example_selector/semantic_similarity.py +++ b/langchain/prompts/example_selector/semantic_similarity.py @@ -1,4 +1,6 @@ """Example selector that selects examples based on SemanticSimilarity.""" +from __future__ import annotations + from typing import Any, Dict, List, Optional from pydantic import BaseModel, Extra @@ -55,7 +57,7 @@ class SemanticSimilarityExampleSelector(BaseExampleSelector, BaseModel): vectorstore_cls: VectorStore, k: int = 4, **vectorstore_cls_kwargs: Any, - ) -> "SemanticSimilarityExampleSelector": + ) -> SemanticSimilarityExampleSelector: """Create k-shot example selector using example list and embeddings. Reshuffles examples dynamically based on query similarity. diff --git a/langchain/prompts/prompt.py b/langchain/prompts/prompt.py index bb1d331f..4a24c2de 100644 --- a/langchain/prompts/prompt.py +++ b/langchain/prompts/prompt.py @@ -1,4 +1,6 @@ """Prompt schema definition.""" +from __future__ import annotations + from typing import Any, Dict, List from pydantic import BaseModel, Extra, root_validator @@ -67,7 +69,7 @@ class PromptTemplate(BasePromptTemplate, BaseModel): input_variables: List[str], example_separator: str = "\n\n", prefix: str = "", - ) -> "PromptTemplate": + ) -> PromptTemplate: """Take examples in list format with prefix and suffix to create a prompt. Intended be used as a way to dynamically create a prompt from examples. @@ -92,7 +94,7 @@ class PromptTemplate(BasePromptTemplate, BaseModel): @classmethod def from_file( cls, template_file: str, input_variables: List[str] - ) -> "PromptTemplate": + ) -> PromptTemplate: """Load a prompt from a file. Args: diff --git a/langchain/sql_database.py b/langchain/sql_database.py index 2afcebc1..56e76d6c 100644 --- a/langchain/sql_database.py +++ b/langchain/sql_database.py @@ -1,4 +1,6 @@ """SQLAlchemy wrapper around a database.""" +from __future__ import annotations + from typing import Any, Iterable, List, Optional from sqlalchemy import create_engine, inspect @@ -37,7 +39,7 @@ class SQLDatabase: ) @classmethod - def from_uri(cls, database_uri: str, **kwargs: Any) -> "SQLDatabase": + def from_uri(cls, database_uri: str, **kwargs: Any) -> SQLDatabase: """Construct a SQLAlchemy engine from URI.""" return cls(create_engine(database_uri), **kwargs) diff --git a/langchain/text_splitter.py b/langchain/text_splitter.py index dbae51cf..c2da1745 100644 --- a/langchain/text_splitter.py +++ b/langchain/text_splitter.py @@ -1,4 +1,6 @@ """Functionality for splitting text.""" +from __future__ import annotations + from abc import ABC, abstractmethod from typing import Any, Callable, Iterable, List @@ -46,9 +48,7 @@ class TextSplitter(ABC): return docs @classmethod - def from_huggingface_tokenizer( - cls, tokenizer: Any, **kwargs: Any - ) -> "TextSplitter": + def from_huggingface_tokenizer(cls, tokenizer: Any, **kwargs: Any) -> TextSplitter: """Text splitter than uses HuggingFace tokenizer to count length.""" try: from transformers import PreTrainedTokenizerBase diff --git a/langchain/vectorstores/base.py b/langchain/vectorstores/base.py index 066c9a01..429f8246 100644 --- a/langchain/vectorstores/base.py +++ b/langchain/vectorstores/base.py @@ -1,4 +1,6 @@ """Interface for vector stores.""" +from __future__ import annotations + from abc import ABC, abstractmethod from typing import Any, Iterable, List, Optional @@ -26,6 +28,6 @@ class VectorStore(ABC): texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, - **kwargs: Any - ) -> "VectorStore": + **kwargs: Any, + ) -> VectorStore: """Return VectorStore initialized from texts and embeddings.""" diff --git a/langchain/vectorstores/elastic_vector_search.py b/langchain/vectorstores/elastic_vector_search.py index 8620c559..20703f57 100644 --- a/langchain/vectorstores/elastic_vector_search.py +++ b/langchain/vectorstores/elastic_vector_search.py @@ -1,4 +1,6 @@ """Wrapper around Elasticsearch vector database.""" +from __future__ import annotations + import uuid from typing import Any, Callable, Dict, Iterable, List, Optional @@ -117,7 +119,7 @@ class ElasticVectorSearch(VectorStore): embedding: Embeddings, metadatas: Optional[List[dict]] = None, **kwargs: Any, - ) -> "ElasticVectorSearch": + ) -> ElasticVectorSearch: """Construct ElasticVectorSearch wrapper from raw documents. This is a user-friendly interface that: diff --git a/langchain/vectorstores/faiss.py b/langchain/vectorstores/faiss.py index 7d26ad12..61ffdf53 100644 --- a/langchain/vectorstores/faiss.py +++ b/langchain/vectorstores/faiss.py @@ -1,4 +1,6 @@ """Wrapper around FAISS vector database.""" +from __future__ import annotations + import uuid from typing import Any, Callable, Dict, Iterable, List, Optional @@ -96,7 +98,7 @@ class FAISS(VectorStore): embedding: Embeddings, metadatas: Optional[List[dict]] = None, **kwargs: Any, - ) -> "FAISS": + ) -> FAISS: """Construct FAISS wrapper from raw documents. This is a user friendly interface that: diff --git a/tests/unit_tests/chains/test_sequential.py b/tests/unit_tests/chains/test_sequential.py index aa83f2ac..f231a740 100644 --- a/tests/unit_tests/chains/test_sequential.py +++ b/tests/unit_tests/chains/test_sequential.py @@ -28,7 +28,7 @@ class FakeChain(Chain, BaseModel): outputs = {} for var in self.output_variables: variables = [inputs[k] for k in self.input_variables] - outputs[var] = " ".join(variables) + "foo" + outputs[var] = f"{' '.join(variables)}foo" return outputs