diff --git a/README.md b/README.md index b1b87d3..172c65c 100644 --- a/README.md +++ b/README.md @@ -17,8 +17,8 @@ For example, if there is a file at `prompts/qa/stuff/basic/prompt.yaml`, the pat Once you have that path, you can load it in the following manner: ```python -from langchain.prompts import load_from_hub -prompt = load_from_hub("qa/stuff/basic/prompt.yaml") +from langchain.prompts import load_prompt +prompt = load_prompt('lc://prompts/qa/stuff/basic/prompt.yaml') ``` ## Uploading diff --git a/prompts/api/api_response/README.md b/prompts/api/api_response/README.md index d903eb2..5053cdf 100644 --- a/prompts/api/api_response/README.md +++ b/prompts/api/api_response/README.md @@ -17,12 +17,12 @@ This is a description of the inputs that the prompt expects. Below is a code snippet for how to use the prompt. ```python -from langchain.prompts import load_from_hub +from langchain.prompts import load_prompt from langchain.chains import APIChain llm = ... api_docs = ... -prompt = load_from_hub('api/api_response/') +prompt = load_prompt('lc://prompts/api/api_response/') chain = APIChain.from_llm_and_api_docs(llm, api_docs, api_response_prompt=prompt) ``` diff --git a/prompts/api/api_url/README.md b/prompts/api/api_url/README.md index e04ad73..9a360a7 100644 --- a/prompts/api/api_url/README.md +++ b/prompts/api/api_url/README.md @@ -15,12 +15,12 @@ This is a description of the inputs that the prompt expects. Below is a code snippet for how to use the prompt. ```python -from langchain.prompts import load_from_hub +from langchain.prompts import load_prompt from langchain.chains import APIChain llm = ... api_docs = ... -prompt = load_from_hub('api/api_url/') +prompt = load_prompt('lc://prompts/api/api_url/') chain = APIChain.from_llm_and_api_docs(llm, api_docs, api_url_prompt=prompt) ``` diff --git a/prompts/conversation/README.md b/prompts/conversation/README.md index f641b8e..72c2d18 100644 --- a/prompts/conversation/README.md +++ b/prompts/conversation/README.md @@ -16,11 +16,11 @@ This is a description of the inputs that the prompt expects. Below is a code snippet for how to use the prompt. ```python -from langchain.prompts import load_from_hub +from langchain.prompts import load_prompt from langchain.chains import ConversationChain llm = ... -prompt = load_from_hub('conversation/') +prompt = load_prompt('lc://prompts/conversation/') chain = ConversationChain(llm=llm, prompt=prompt) ``` diff --git a/prompts/hello-world/README.md b/prompts/hello-world/README.md index 1cc9d61..4812feb 100644 --- a/prompts/hello-world/README.md +++ b/prompts/hello-world/README.md @@ -8,11 +8,11 @@ Basic prompt designed to be use as a test case, will just instruct the LLM to sa Below is a code snippet for how to use the prompt. ```python -from langchain.prompts import load_from_hub +from langchain.prompts import load_prompt from langchain.chains import LLMChain llm = ... -prompt = load_from_hub('hello-world/') +prompt = load_prompt('lc://prompts/hello-world/') chain = LLMChain(llm=llm, prompt=prompt) ``` diff --git a/prompts/llm_bash/README.md b/prompts/llm_bash/README.md index 212e4d7..e383dda 100644 --- a/prompts/llm_bash/README.md +++ b/prompts/llm_bash/README.md @@ -14,11 +14,11 @@ This is a description of the inputs that the prompt expects. Below is a code snippet for how to use the prompt. ```python -from langchain.prompts import load_from_hub +from langchain.prompts import load_prompt from langchain.chains import LLMBashChain llm = ... -prompt = load_from_hub('llm_bash/') +prompt = load_prompt('lc://prompts/llm_bash/') chain = LLMBashChain(llm=llm, prompt=prompt) ``` diff --git a/prompts/llm_math/README.md b/prompts/llm_math/README.md index 692f0ee..ef539a7 100644 --- a/prompts/llm_math/README.md +++ b/prompts/llm_math/README.md @@ -15,11 +15,11 @@ This is a description of the inputs that the prompt expects. Below is a code snippet for how to use the prompt. ```python -from langchain.prompts import load_from_hub +from langchain.prompts import load_prompt from langchain.chains import LLMMathChain llm = ... -prompt = load_from_hub('llm_math/') +prompt = load_prompt('lc://prompts/llm_math/') chain = LLMMathChain(llm=llm, prompt=prompt) ``` diff --git a/prompts/memory/summarize/README.md b/prompts/memory/summarize/README.md index 2010a3e..5370e91 100644 --- a/prompts/memory/summarize/README.md +++ b/prompts/memory/summarize/README.md @@ -16,12 +16,12 @@ This is a description of the inputs that the prompt expects. Below is a code snippet for how to use the prompt. ```python -from langchain.prompts import load_from_hub +from langchain.prompts import load_prompt from langchain.chains import ConversationChain from langchain.chains.conversation.memory import ConversationSummaryMemory llm = ... -prompt = load_from_hub('memory/summarize/') +prompt = load_prompt('lc://prompts/memory/summarize/') memory = ConversationSummaryMemory(llm=llm, prompt=prompt) chain = ConversationChain(llm=llm, memory=memory) ``` diff --git a/prompts/pal/README.md b/prompts/pal/README.md index 475dcfc..1248f28 100644 --- a/prompts/pal/README.md +++ b/prompts/pal/README.md @@ -16,13 +16,13 @@ This is a description of the inputs that the prompt expects. Below is a code snippet for how to use the prompt. ```python -from langchain.prompts import load_from_hub +from langchain.prompts import load_prompt from langchain.chains import PALChain llm = ... stop = ... get_answer_expr = ... -prompt = load_from_hub('pal/') +prompt = load_prompt('lc://prompts/pal/') chain = PALChain(llm=llm, prompt=prompt, stop=stop, get_answer_expr=get_answer_expr) ``` diff --git a/prompts/qa/map_reduce/question/README.md b/prompts/qa/map_reduce/question/README.md index c0988f9..e246216 100644 --- a/prompts/qa/map_reduce/question/README.md +++ b/prompts/qa/map_reduce/question/README.md @@ -16,11 +16,11 @@ This is a description of the inputs that the prompt expects. Below is a code snippet for how to use the prompt. ```python -from langchain.prompts import load_from_hub +from langchain.prompts import load_prompt from langchain.chains.question_answering import load_qa_chain llm = ... -prompt = load_from_hub('qa/map_reduce/question/') +prompt = load_prompt('lc://prompts/qa/map_reduce/question/') chain = load_qa_chain(llm, chain_type="map_reduce", question_prompt=prompt) ``` diff --git a/prompts/qa/map_reduce/reduce/README.md b/prompts/qa/map_reduce/reduce/README.md index a397f92..e4abac1 100644 --- a/prompts/qa/map_reduce/reduce/README.md +++ b/prompts/qa/map_reduce/reduce/README.md @@ -16,11 +16,11 @@ This is a description of the inputs that the prompt expects. Below is a code snippet for how to use the prompt. ```python -from langchain.prompts import load_from_hub +from langchain.prompts import load_prompt from langchain.chains.question_answering import load_qa_chain llm = ... -prompt = load_from_hub('qa/map_reduce/reduce/') +prompt = load_prompt('lc://prompts/qa/map_reduce/reduce/') chain = load_qa_chain(llm, chain_type="map_reduce", combine_prompt=prompt) ``` diff --git a/prompts/qa/refine/README.md b/prompts/qa/refine/README.md index 0a05d15..01a8362 100644 --- a/prompts/qa/refine/README.md +++ b/prompts/qa/refine/README.md @@ -17,11 +17,11 @@ This is a description of the inputs that the prompt expects. Below is a code snippet for how to use the prompt. ```python -from langchain.prompts import load_from_hub +from langchain.prompts import load_prompt from langchain.chains.question_answering import load_qa_chain llm = ... -prompt = load_from_hub('qa/refine/') +prompt = load_prompt('lc://prompts/qa/refine/') chain = load_qa_chain(llm, chain_type="refine", refine_prompt=prompt) ``` diff --git a/prompts/qa/stuff/README.md b/prompts/qa/stuff/README.md index 85205f7..469026b 100644 --- a/prompts/qa/stuff/README.md +++ b/prompts/qa/stuff/README.md @@ -16,11 +16,11 @@ This is a description of the inputs that the prompt expects. Below is a code snippet for how to use the prompt. ```python -from langchain.prompts import load_from_hub +from langchain.prompts import load_prompt from langchain.chains.question_answering import load_qa_chain llm = ... -prompt = load_from_hub('qa/stuff/') +prompt = load_prompt('lc://prompts/qa/stuff/') chain = load_qa_chain(llm, chain_type="stuff", prompt=prompt) ``` diff --git a/prompts/qa_with_sources/map_reduce/reduce/README.md b/prompts/qa_with_sources/map_reduce/reduce/README.md index 4e286dd..2cf53ae 100644 --- a/prompts/qa_with_sources/map_reduce/reduce/README.md +++ b/prompts/qa_with_sources/map_reduce/reduce/README.md @@ -22,11 +22,11 @@ This is a description of the inputs that the prompt expects. Below is a code snippet for how to use the prompt. ```python -from langchain.prompts import load_from_hub +from langchain.prompts import load_prompt from langchain.chains.qa_with_sources import load_qa_with_sources_chain llm = ... -prompt = load_from_hub('qa_with_sources/map_reduce/reduce/') +prompt = load_prompt('lc://prompts/qa_with_sources/map_reduce/reduce/') chain = load_qa_with_sources_chain(llm, chain_type="map_reduce", combine_prompt=prompt) ``` diff --git a/prompts/qa_with_sources/refine/README.md b/prompts/qa_with_sources/refine/README.md index 0a8a8b5..c77f6f1 100644 --- a/prompts/qa_with_sources/refine/README.md +++ b/prompts/qa_with_sources/refine/README.md @@ -16,11 +16,11 @@ This is a description of the inputs that the prompt expects. Below is a code snippet for how to use the prompt. ```python -from langchain.prompts import load_from_hub +from langchain.prompts import load_prompt from langchain.chains.qa_with_sources import load_qa_with_sources_chain llm = ... -prompt = load_from_hub('qa_with_sources/refine/') +prompt = load_prompt('lc://prompts/qa_with_sources/refine/') chain = load_qa_with_sources_chain(llm, chain_type="refine", refine_prompt=prompt) ``` diff --git a/prompts/qa_with_sources/stuff/README.md b/prompts/qa_with_sources/stuff/README.md index a5cca26..7118580 100644 --- a/prompts/qa_with_sources/stuff/README.md +++ b/prompts/qa_with_sources/stuff/README.md @@ -16,11 +16,11 @@ This is a description of the inputs that the prompt expects. Below is a code snippet for how to use the prompt. ```python -from langchain.prompts import load_from_hub +from langchain.prompts import load_prompt from langchain.chains.qa_with_sources import load_qa_with_sources_chain llm = ... -prompt = load_from_hub('qa_with_sources/stuff/') +prompt = load_prompt('lc://prompts/qa_with_sources/stuff/') chain = load_qa_with_sources_chain(llm, chain_type="stuff", prompt=prompt) ``` diff --git a/prompts/sql_query/language_to_sql_output/README.md b/prompts/sql_query/language_to_sql_output/README.md index 1d96bf6..788c760 100644 --- a/prompts/sql_query/language_to_sql_output/README.md +++ b/prompts/sql_query/language_to_sql_output/README.md @@ -18,12 +18,12 @@ This is a description of the inputs that the prompt expects. Below is a code snippet for how to use the prompt. ```python -from langchain.prompts import load_from_hub +from langchain.prompts import load_prompt from langchain.chains import SQLDatabaseChain llm = ... database = ... -prompt = load_from_hub('sql_query/language_to_sql_output/') +prompt = load_prompt('lc://prompts/sql_query/language_to_sql_output/') chain = SQLDatabaseChain(llm=llm, database=database, prompt=prompt) ``` diff --git a/prompts/sql_query/relevant_tables/README.md b/prompts/sql_query/relevant_tables/README.md index 030a148..d95bdd6 100644 --- a/prompts/sql_query/relevant_tables/README.md +++ b/prompts/sql_query/relevant_tables/README.md @@ -16,12 +16,12 @@ This is a description of the inputs that the prompt expects. Below is a code snippet for how to use the prompt. ```python -from langchain.prompts import load_from_hub +from langchain.prompts import load_prompt from langchain.chains import SQLDatabaseSequentialChain llm = ... database = ... -prompt = load_from_hub('sql_query/relevant_tables/') +prompt = load_prompt('lc://prompts/sql_query/relevant_tables/') chain = SQLDatabaseSequentialChain.from_llm(llm, database, decider_prompt=prompt) ``` diff --git a/prompts/summarize/map_reduce/map/README.md b/prompts/summarize/map_reduce/map/README.md index c7ac7e6..886ee16 100644 --- a/prompts/summarize/map_reduce/map/README.md +++ b/prompts/summarize/map_reduce/map/README.md @@ -15,11 +15,11 @@ This is a description of the inputs that the prompt expects. Below is a code snippet for how to use the prompt. ```python -from langchain.prompts import load_from_hub +from langchain.prompts import load_prompt from langchain.chains.summarize import load_summarize_chain llm = ... -prompt = load_from_hub('summarize/map_reduce/map/') +prompt = load_prompt('lc://prompts/summarize/map_reduce/map/') chain = load_summarize_chain(llm, chain_type="map_reduce", map_prompt=prompt) ``` diff --git a/prompts/summarize/refine/README.md b/prompts/summarize/refine/README.md index 3968b63..bf48c53 100644 --- a/prompts/summarize/refine/README.md +++ b/prompts/summarize/refine/README.md @@ -16,11 +16,11 @@ This is a description of the inputs that the prompt expects. Below is a code snippet for how to use the prompt. ```python -from langchain.prompts import load_from_hub +from langchain.prompts import load_prompt from langchain.chains.summarize import load_summarize_chain llm = ... -prompt = load_from_hub('summarize/refine/') +prompt = load_prompt('lc://prompts/summarize/refine/') chain = load_summarize_chain(llm, chain_type="refine", refine_prompt=prompt) ``` diff --git a/prompts/summarize/stuff/README.md b/prompts/summarize/stuff/README.md index 312e96e..8aa9968 100644 --- a/prompts/summarize/stuff/README.md +++ b/prompts/summarize/stuff/README.md @@ -14,11 +14,11 @@ This is a description of the inputs that the prompt expects. Below is a code snippet for how to use the prompt. ```python -from langchain.prompts import load_from_hub +from langchain.prompts import load_prompt from langchain.chains.summarize import load_summarize_chain llm = ... -prompt = load_from_hub('summarize/stuff/') +prompt = load_prompt('lc://prompts/summarize/stuff/') chain = load_summarize_chain(llm, chain_type="stuff", prompt=prompt) ``` diff --git a/prompts/vector_db_qa/README.md b/prompts/vector_db_qa/README.md index 5c21ccd..eff3c79 100644 --- a/prompts/vector_db_qa/README.md +++ b/prompts/vector_db_qa/README.md @@ -16,12 +16,12 @@ This is a description of the inputs that the prompt expects. Below is a code snippet for how to use the prompt. ```python -from langchain.prompts import load_from_hub +from langchain.prompts import load_prompt from langchain.chains import VectorDBQA llm = ... vectorstore = ... -prompt = load_from_hub('vector_db_qa/') +prompt = load_prompt('lc://prompts/vector_db_qa/') chain = VectorDBQA.from_llm(llm, prompt=prompt, vectorstore=vectorstore) ```