Refactored `example_generator` (#8099)

Refactored `example_generator.py`. The same as #7961 
`example_generator.py` is in the root code folder. This creates the
`langchain.example_generator: Example Generator ` group on the API
Reference navigation ToC, on the same level as `Chains` and `Agents`
which is not correct.

Refactoring:
- moved `example_generator.py` content into
`chains/example_generator.py` (not in `utils` because the
`example_generator` has dependencies on other LangChain classes. It also
doesn't work for moving into `utilities/`)
- added the backwards compatibility ref in the original
`example_generator.py`

@hwchase17
pull/7726/head
Leonid Ganeline 1 year ago committed by GitHub
parent 1cc7d4c9eb
commit 3eb4112a1f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -26,6 +26,7 @@ from langchain.chains.conversational_retrieval.base import (
ChatVectorDBChain, ChatVectorDBChain,
ConversationalRetrievalChain, ConversationalRetrievalChain,
) )
from langchain.chains.example_generator import generate_example
from langchain.chains.flare.base import FlareChain from langchain.chains.flare.base import FlareChain
from langchain.chains.graph_qa.base import GraphQAChain from langchain.chains.graph_qa.base import GraphQAChain
from langchain.chains.graph_qa.cypher import GraphCypherQAChain from langchain.chains.graph_qa.cypher import GraphCypherQAChain
@ -84,9 +85,9 @@ __all__ = [
"GraphCypherQAChain", "GraphCypherQAChain",
"GraphQAChain", "GraphQAChain",
"GraphSparqlQAChain", "GraphSparqlQAChain",
"HugeGraphQAChain",
"HypotheticalDocumentEmbedder", "HypotheticalDocumentEmbedder",
"KuzuQAChain", "KuzuQAChain",
"HugeGraphQAChain",
"LLMBashChain", "LLMBashChain",
"LLMChain", "LLMChain",
"LLMCheckerChain", "LLMCheckerChain",
@ -95,6 +96,8 @@ __all__ = [
"LLMRouterChain", "LLMRouterChain",
"LLMSummarizationCheckerChain", "LLMSummarizationCheckerChain",
"MapReduceChain", "MapReduceChain",
"MapReduceDocumentsChain",
"MapRerankDocumentsChain",
"MultiPromptChain", "MultiPromptChain",
"MultiRetrievalQAChain", "MultiRetrievalQAChain",
"MultiRouteChain", "MultiRouteChain",
@ -105,6 +108,8 @@ __all__ = [
"PALChain", "PALChain",
"QAGenerationChain", "QAGenerationChain",
"QAWithSourcesChain", "QAWithSourcesChain",
"ReduceDocumentsChain",
"RefineDocumentsChain",
"RetrievalQA", "RetrievalQA",
"RetrievalQAWithSourcesChain", "RetrievalQAWithSourcesChain",
"RouterChain", "RouterChain",
@ -112,20 +117,17 @@ __all__ = [
"SQLDatabaseSequentialChain", "SQLDatabaseSequentialChain",
"SequentialChain", "SequentialChain",
"SimpleSequentialChain", "SimpleSequentialChain",
"StuffDocumentsChain",
"TransformChain", "TransformChain",
"VectorDBQA", "VectorDBQA",
"VectorDBQAWithSourcesChain", "VectorDBQAWithSourcesChain",
"create_citation_fuzzy_match_chain",
"create_extraction_chain", "create_extraction_chain",
"create_extraction_chain_pydantic", "create_extraction_chain_pydantic",
"create_qa_with_sources_chain",
"create_qa_with_structure_chain",
"create_tagging_chain", "create_tagging_chain",
"create_tagging_chain_pydantic", "create_tagging_chain_pydantic",
"generate_example",
"load_chain", "load_chain",
"create_citation_fuzzy_match_chain",
"create_qa_with_structure_chain",
"create_qa_with_sources_chain",
"StuffDocumentsChain",
"MapRerankDocumentsChain",
"MapReduceDocumentsChain",
"RefineDocumentsChain",
"ReduceDocumentsChain",
] ]

@ -0,0 +1,22 @@
from typing import List
from langchain.chains.llm import LLMChain
from langchain.prompts.few_shot import FewShotPromptTemplate
from langchain.prompts.prompt import PromptTemplate
from langchain.schema.language_model import BaseLanguageModel
TEST_GEN_TEMPLATE_SUFFIX = "Add another example."
def generate_example(
examples: List[dict], llm: BaseLanguageModel, prompt_template: PromptTemplate
) -> str:
"""Return another example given a list of examples for a prompt."""
prompt = FewShotPromptTemplate(
examples=examples,
suffix=TEST_GEN_TEMPLATE_SUFFIX,
input_variables=[],
example_prompt=prompt_template,
)
chain = LLMChain(llm=llm, prompt=prompt)
return chain.predict()

@ -1,23 +1,4 @@
"""Utility functions for working with prompts.""" """Keep here for backwards compatibility."""
from typing import List from langchain.chains.example_generator import generate_example
from langchain.chains.llm import LLMChain __all__ = ["generate_example"]
from langchain.prompts.few_shot import FewShotPromptTemplate
from langchain.prompts.prompt import PromptTemplate
from langchain.schema.language_model import BaseLanguageModel
TEST_GEN_TEMPLATE_SUFFIX = "Add another example."
def generate_example(
examples: List[dict], llm: BaseLanguageModel, prompt_template: PromptTemplate
) -> str:
"""Return another example given a list of examples for a prompt."""
prompt = FewShotPromptTemplate(
examples=examples,
suffix=TEST_GEN_TEMPLATE_SUFFIX,
input_variables=[],
example_prompt=prompt_template,
)
chain = LLMChain(llm=llm, prompt=prompt)
return chain.predict()

Loading…
Cancel
Save