langchain[patch], experimental[patch]: replace langchain.schema imports (#15410)

Import from core instead.

Ran:
```bash
git grep -l 'from langchain.schema\.output_parser' | xargs -L 1 sed -i '' "s/from\ langchain\.schema\.output_parser/from\ langchain_core.output_parsers/g"
git grep -l 'from langchain.schema\.messages' | xargs -L 1 sed -i '' "s/from\ langchain\.schema\.messages/from\ langchain_core.messages/g"
git grep -l 'from langchain.schema\.document' | xargs -L 1 sed -i '' "s/from\ langchain\.schema\.document/from\ langchain_core.documents/g"
git grep -l 'from langchain.schema\.runnable' | xargs -L 1 sed -i '' "s/from\ langchain\.schema\.runnable/from\ langchain_core.runnables/g"
git grep -l 'from langchain.schema\.vectorstore' | xargs -L 1 sed -i '' "s/from\ langchain\.schema\.vectorstore/from\ langchain_core.vectorstores/g"
git grep -l 'from langchain.schema\.language_model' | xargs -L 1 sed -i '' "s/from\ langchain\.schema\.language_model/from\ langchain_core.language_models/g"
git grep -l 'from langchain.schema\.embeddings' | xargs -L 1 sed -i '' "s/from\ langchain\.schema\.embeddings/from\ langchain_core.embeddings/g"
git grep -l 'from langchain.schema\.storage' | xargs -L 1 sed -i '' "s/from\ langchain\.schema\.storage/from\ langchain_core.stores/g"
git checkout master libs/langchain/tests/unit_tests/schema/
make format
cd libs/experimental
make format
cd ../langchain
make format
```
pull/15412/head^2
Bagatur 6 months ago committed by GitHub
parent a3d47b4f19
commit 8e0d5813c2
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -161,8 +161,8 @@
"from langchain.chat_models import ChatVertexAI\n",
"from langchain.llms import VertexAI\n",
"from langchain.prompts import PromptTemplate\n",
"from langchain.schema.output_parser import StrOutputParser\n",
"from langchain_core.messages import AIMessage\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.runnables import RunnableLambda\n",
"\n",
"\n",
@ -243,7 +243,7 @@
"import base64\n",
"import os\n",
"\n",
"from langchain.schema.messages import HumanMessage\n",
"from langchain_core.messages import HumanMessage\n",
"\n",
"\n",
"def encode_image(image_path):\n",
@ -344,9 +344,9 @@
"\n",
"from langchain.embeddings import VertexAIEmbeddings\n",
"from langchain.retrievers.multi_vector import MultiVectorRetriever\n",
"from langchain.schema.document import Document\n",
"from langchain.storage import InMemoryStore\n",
"from langchain.vectorstores import Chroma\n",
"from langchain_core.documents import Document\n",
"\n",
"\n",
"def create_multi_vector_retriever(\n",
@ -440,7 +440,7 @@
"import re\n",
"\n",
"from IPython.display import HTML, display\n",
"from langchain.schema.runnable import RunnableLambda, RunnablePassthrough\n",
"from langchain_core.runnables import RunnableLambda, RunnablePassthrough\n",
"from PIL import Image\n",
"\n",
"\n",

@ -151,7 +151,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.schema.messages import HumanMessage, SystemMessage\n",
"from langchain_core.messages import HumanMessage, SystemMessage\n",
"\n",
"model = ChatGoogleGenerativeAI(model=\"gemini-pro\", convert_system_message_to_human=True)\n",
"model(\n",

@ -415,7 +415,7 @@
],
"source": [
"from langchain.chat_models import ChatVertexAI\n",
"from langchain.schema.messages import HumanMessage\n",
"from langchain_core.messages import HumanMessage\n",
"\n",
"llm = ChatVertexAI(model_name=\"gemini-ultra-vision\")\n",
"\n",

@ -49,7 +49,7 @@ print(llm.invoke("Come up with a pet name"))
```python
from langchain.chat_models import ChatCohere
from langchain.retrievers import CohereRagRetriever
from langchain.schema.document import Document
from langchain_core.documents import Document
rag = CohereRagRetriever(llm=ChatCohere())
print(rag.get_relevant_documents("What is cohere ai?"))
@ -60,7 +60,7 @@ print(rag.get_relevant_documents("What is cohere ai?"))
```python
from langchain.chat_models import ChatCohere
from langchain.retrievers import CohereRagRetriever
from langchain.schema.document import Document
from langchain_core.documents import Document
rag = CohereRagRetriever(llm=ChatCohere())
print(rag.get_relevant_documents("What is cohere ai?"))

@ -76,9 +76,9 @@
"source": [
"from langchain.embeddings import FakeEmbeddings\n",
"from langchain.prompts import ChatPromptTemplate\n",
"from langchain.schema.output_parser import StrOutputParser\n",
"from langchain.schema.runnable import RunnableLambda, RunnablePassthrough\n",
"from langchain.vectorstores import Vectara"
"from langchain.vectorstores import Vectara\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.runnables import RunnableLambda, RunnablePassthrough"
]
},
{

@ -36,9 +36,9 @@
"from langchain.embeddings import OpenAIEmbeddings\n",
"from langchain.retrievers import MultiVectorRetriever\n",
"from langchain.schema import Document\n",
"from langchain.schema.storage import BaseStore\n",
"from langchain.schema.vectorstore import VectorStore\n",
"from langchain.vectorstores import FAISS\n",
"from langchain_core.stores import BaseStore\n",
"from langchain_core.vectorstores import VectorStore\n",
"\n",
"\n",
"def load_fleet_retriever(\n",

@ -58,10 +58,10 @@
"from langchain.embeddings.openai import OpenAIEmbeddings\n",
"from langchain.llms import OpenAI\n",
"from langchain.prompts import ChatPromptTemplate\n",
"from langchain.schema.output_parser import StrOutputParser\n",
"from langchain.schema.runnable import RunnablePassthrough\n",
"from langchain.text_splitter import CharacterTextSplitter\n",
"from langchain_community.vectorstores.jaguar import Jaguar\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.runnables import RunnablePassthrough\n",
"\n",
"\"\"\" \n",
"Load a text file into a set of documents \n",

@ -3556,7 +3556,7 @@ class RunnableBinding(RunnableBindingBase[Input, Output]):
.. code-block:: python
from langchain.schema.runnable import RunnableBinding
from langchain_core.runnables import RunnableBinding
runnable_binding = RunnableBinding(
bound=model,
kwargs={'stop': ['-']} # <-- Note the additional kwargs

@ -2,7 +2,7 @@ from io import IOBase
from typing import Any, List, Optional, Union
from langchain.agents.agent import AgentExecutor
from langchain.schema.language_model import BaseLanguageModel
from langchain_core.language_models import BaseLanguageModel
from langchain_experimental.agents.agent_toolkits.pandas.base import (
create_pandas_dataframe_agent,

@ -8,9 +8,9 @@ from langchain.agents.types import AgentType
from langchain.callbacks.base import BaseCallbackManager
from langchain.chains.llm import LLMChain
from langchain.schema import BasePromptTemplate
from langchain.schema.language_model import BaseLanguageModel
from langchain.schema.messages import SystemMessage
from langchain.tools import BaseTool
from langchain_core.language_models import BaseLanguageModel
from langchain_core.messages import SystemMessage
from langchain_experimental.agents.agent_toolkits.pandas.prompt import (
FUNCTIONS_WITH_DF,

@ -8,8 +8,8 @@ from langchain.agents.openai_functions_agent.base import OpenAIFunctionsAgent
from langchain.agents.types import AgentType
from langchain.callbacks.base import BaseCallbackManager
from langchain.chains.llm import LLMChain
from langchain.schema.language_model import BaseLanguageModel
from langchain.schema.messages import SystemMessage
from langchain_core.language_models import BaseLanguageModel
from langchain_core.messages import SystemMessage
from langchain_experimental.agents.agent_toolkits.python.prompt import PREFIX
from langchain_experimental.tools.python.tool import PythonREPLTool

@ -9,10 +9,10 @@ from langchain.schema import (
BaseChatMessageHistory,
Document,
)
from langchain.schema.messages import AIMessage, HumanMessage, SystemMessage
from langchain.schema.vectorstore import VectorStoreRetriever
from langchain.tools.base import BaseTool
from langchain.tools.human.tool import HumanInputRun
from langchain_core.messages import AIMessage, HumanMessage, SystemMessage
from langchain_core.vectorstores import VectorStoreRetriever
from langchain_experimental.autonomous_agents.autogpt.output_parser import (
AutoGPTOutputParser,

@ -1,7 +1,7 @@
from typing import Any, Dict, List
from langchain.memory.chat_memory import BaseChatMemory, get_prompt_input_key
from langchain.schema.vectorstore import VectorStoreRetriever
from langchain_core.vectorstores import VectorStoreRetriever
from langchain_experimental.pydantic_v1 import Field

@ -4,9 +4,9 @@ from typing import Any, Callable, List, cast
from langchain.prompts.chat import (
BaseChatPromptTemplate,
)
from langchain.schema.messages import BaseMessage, HumanMessage, SystemMessage
from langchain.schema.vectorstore import VectorStoreRetriever
from langchain.tools.base import BaseTool
from langchain_core.messages import BaseMessage, HumanMessage, SystemMessage
from langchain_core.vectorstores import VectorStoreRetriever
from langchain_experimental.autonomous_agents.autogpt.prompt_generator import get_prompt
from langchain_experimental.pydantic_v1 import BaseModel

@ -4,8 +4,8 @@ from typing import Any, Dict, List, Optional
from langchain.callbacks.manager import CallbackManagerForChainRun
from langchain.chains.base import Chain
from langchain.schema.language_model import BaseLanguageModel
from langchain.schema.vectorstore import VectorStore
from langchain_core.language_models import BaseLanguageModel
from langchain_core.vectorstores import VectorStore
from langchain_experimental.autonomous_agents.baby_agi.task_creation import (
TaskCreationChain,

@ -1,6 +1,6 @@
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain.schema.language_model import BaseLanguageModel
from langchain_core.language_models import BaseLanguageModel
class TaskCreationChain(LLMChain):

@ -1,6 +1,6 @@
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain.schema.language_model import BaseLanguageModel
from langchain_core.language_models import BaseLanguageModel
class TaskExecutionChain(LLMChain):

@ -1,6 +1,6 @@
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain.schema.language_model import BaseLanguageModel
from langchain_core.language_models import BaseLanguageModel
class TaskPrioritizationChain(LLMChain):

@ -7,7 +7,7 @@ from langchain.callbacks.manager import CallbackManagerForChainRun
from langchain.chains.base import Chain
from langchain.chains.llm import LLMChain
from langchain.schema import BasePromptTemplate
from langchain.schema.language_model import BaseLanguageModel
from langchain_core.language_models import BaseLanguageModel
from langchain_experimental.fallacy_removal.fallacies import FALLACIES
from langchain_experimental.fallacy_removal.models import LogicalFallacy

@ -4,7 +4,7 @@ from typing import Any, Dict, List, Optional, Tuple
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain.schema.language_model import BaseLanguageModel
from langchain_core.language_models import BaseLanguageModel
from langchain_experimental.generative_agents.memory import GenerativeAgentMemory
from langchain_experimental.pydantic_v1 import BaseModel, Field

@ -7,8 +7,8 @@ from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain.retrievers import TimeWeightedVectorStoreRetriever
from langchain.schema import BaseMemory, Document
from langchain.schema.language_model import BaseLanguageModel
from langchain.utils import mock_now
from langchain_core.language_models import BaseLanguageModel
logger = logging.getLogger(__name__)

@ -9,7 +9,7 @@ from langchain.callbacks.manager import CallbackManagerForChainRun
from langchain.chains.base import Chain
from langchain.chains.llm import LLMChain
from langchain.schema import BasePromptTemplate, OutputParserException
from langchain.schema.language_model import BaseLanguageModel
from langchain_core.language_models import BaseLanguageModel
from langchain_experimental.llm_bash.bash import BashProcess
from langchain_experimental.llm_bash.prompt import PROMPT

@ -12,7 +12,7 @@ from langchain.schema import (
ChatGeneration,
ChatResult,
)
from langchain.schema.messages import (
from langchain_core.messages import (
AIMessage,
BaseMessage,
SystemMessage,

@ -15,7 +15,7 @@ from langchain.schema import (
ChatGeneration,
ChatResult,
)
from langchain.schema.messages import (
from langchain_core.messages import (
AIMessage,
BaseMessage,
ChatMessage,

@ -1,7 +1,7 @@
from typing import Any, Dict, List
from langchain.pydantic_v1 import BaseModel, root_validator
from langchain.schema.embeddings import Embeddings
from langchain_core.embeddings import Embeddings
class OpenCLIPEmbeddings(BaseModel, Embeddings):

@ -13,8 +13,8 @@ from typing import Any, Dict, List, Optional
from langchain.callbacks.manager import CallbackManagerForChainRun
from langchain.chains.base import Chain
from langchain.chains.llm import LLMChain
from langchain.schema.language_model import BaseLanguageModel
from langchain.utilities import PythonREPL
from langchain_core.language_models import BaseLanguageModel
from langchain_experimental.pal_chain.colored_object_prompt import COLORED_OBJECT_PROMPT
from langchain_experimental.pal_chain.math_prompt import MATH_PROMPT

@ -2,8 +2,8 @@ from typing import List
from langchain.agents.agent import AgentExecutor
from langchain.agents.structured_chat.base import StructuredChatAgent
from langchain.schema.language_model import BaseLanguageModel
from langchain.tools import BaseTool
from langchain_core.language_models import BaseLanguageModel
from langchain_experimental.plan_and_execute.executors.base import ChainExecutor

@ -2,8 +2,8 @@ import re
from langchain.chains import LLMChain
from langchain.prompts import ChatPromptTemplate, HumanMessagePromptTemplate
from langchain.schema.language_model import BaseLanguageModel
from langchain.schema.messages import SystemMessage
from langchain_core.language_models import BaseLanguageModel
from langchain_core.messages import SystemMessage
from langchain_experimental.plan_and_execute.planners.base import LLMPlanner
from langchain_experimental.plan_and_execute.schema import (

@ -10,9 +10,9 @@ from langchain.chains.llm import LLMChain
from langchain.chains.sql_database.prompt import DECIDER_PROMPT, PROMPT, SQL_PROMPTS
from langchain.prompts.prompt import PromptTemplate
from langchain.schema import BasePromptTemplate
from langchain.schema.language_model import BaseLanguageModel
from langchain.tools.sql_database.prompt import QUERY_CHECKER
from langchain.utilities.sql_database import SQLDatabase
from langchain_core.language_models import BaseLanguageModel
from langchain_experimental.pydantic_v1 import Extra, Field, root_validator

@ -8,10 +8,10 @@ from langchain.chains.llm import LLMChain
from langchain.chains.sql_database.prompt import PROMPT, SQL_PROMPTS
from langchain.prompts.prompt import PromptTemplate
from langchain.schema import BaseOutputParser, BasePromptTemplate
from langchain.schema.embeddings import Embeddings
from langchain.schema.language_model import BaseLanguageModel
from langchain.tools.sql_database.prompt import QUERY_CHECKER
from langchain.utilities.sql_database import SQLDatabase
from langchain_core.embeddings import Embeddings
from langchain_core.language_models import BaseLanguageModel
from langchain_experimental.sql.base import INTERMEDIATE_STEPS_KEY, SQLDatabaseChain

@ -3,7 +3,7 @@ from typing import Any, Dict, List, Optional
from langchain.chains.base import Chain
from langchain.chains.llm import LLMChain
from langchain.prompts import PromptTemplate
from langchain.schema.language_model import BaseLanguageModel
from langchain_core.language_models import BaseLanguageModel
from langchain_experimental.synthetic_data.prompts import SENTENCE_PROMPT

@ -5,7 +5,7 @@ from langchain.chains.base import Chain
from langchain.chains.llm import LLMChain
from langchain.prompts.few_shot import FewShotPromptTemplate
from langchain.pydantic_v1 import BaseModel, root_validator
from langchain.schema.language_model import BaseLanguageModel
from langchain_core.language_models import BaseLanguageModel
class SyntheticDataGenerator(BaseModel):

@ -13,6 +13,9 @@ from typing import (
cast,
)
from langchain_core.output_parsers import BaseGenerationOutputParser, BaseOutputParser
from langchain_core.runnables import Runnable
from langchain.base_language import BaseLanguageModel
from langchain.chains import LLMChain
from langchain.output_parsers.ernie_functions import (
@ -23,8 +26,6 @@ from langchain.output_parsers.ernie_functions import (
from langchain.prompts import BasePromptTemplate
from langchain.pydantic_v1 import BaseModel
from langchain.schema import BaseLLMOutputParser
from langchain.schema.output_parser import BaseGenerationOutputParser, BaseOutputParser
from langchain.schema.runnable import Runnable
from langchain.utils.ernie_functions import convert_pydantic_to_ernie_function
PYTHON_TO_JSON_TYPES = {

@ -3,6 +3,10 @@ import json
from typing import Any, Dict, List, Optional, Type, Union
import jsonpatch
from langchain_core.output_parsers import (
BaseCumulativeTransformOutputParser,
BaseGenerationOutputParser,
)
from langchain.output_parsers.json import parse_partial_json
from langchain.pydantic_v1 import BaseModel, root_validator
@ -11,10 +15,6 @@ from langchain.schema import (
Generation,
OutputParserException,
)
from langchain.schema.output_parser import (
BaseCumulativeTransformOutputParser,
BaseGenerationOutputParser,
)
class OutputFunctionsParser(BaseGenerationOutputParser[Any]):

@ -5,11 +5,11 @@ from typing import Any, Dict, List, Optional
from langchain_core.agents import AgentAction, AgentStep
from langchain_core.language_models.llms import LLM
from langchain_core.messages import AIMessage, HumanMessage
from langchain_core.runnables.utils import add
from langchain_core.tools import Tool
from langchain.agents import AgentExecutor, AgentType, initialize_agent
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.schema.runnable.utils import add
from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler

@ -5,11 +5,11 @@ from typing import Any, Dict, List, Optional
from langchain_core.agents import AgentAction, AgentStep
from langchain_core.language_models.llms import LLM
from langchain_core.messages import AIMessage, HumanMessage
from langchain_core.runnables.utils import add
from langchain_core.tools import Tool
from langchain.agents import AgentExecutor, AgentType, initialize_agent
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.schema.runnable.utils import add
from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler

@ -2,7 +2,7 @@ import os
from langchain.chat_models import BedrockChat
from langchain.prompts import ChatPromptTemplate
from langchain.schema.runnable import ConfigurableField
from langchain_core.runnables import ConfigurableField
# For a description of each inference parameter, see
# https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters-claude.html

@ -1,6 +1,6 @@
from langchain.prompts import ChatPromptTemplate
from langchain.schema.output_parser import StrOutputParser
from langchain.schema.runnable import RunnableBranch
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import RunnableBranch
from .blurb_matcher import book_rec_chain
from .chat import chat

@ -9,7 +9,7 @@ from langchain.chat_models import ChatOpenAI
from langchain.llms.base import BaseLLM
from langchain.prompts import ChatPromptTemplate
from langchain.pydantic_v1 import BaseModel, Field, ValidationError, validator
from langchain.schema.runnable import ConfigurableField, Runnable
from langchain_core.runnables import ConfigurableField, Runnable
def strip_python_markdown_tags(text: str) -> str:

@ -9,10 +9,10 @@ import pypdfium2 as pdfium
from langchain.chat_models import ChatOpenAI
from langchain.embeddings import OpenAIEmbeddings
from langchain.retrievers.multi_vector import MultiVectorRetriever
from langchain.schema.document import Document
from langchain.schema.messages import HumanMessage
from langchain.storage import LocalFileStore, UpstashRedisByteStore
from langchain.vectorstores import Chroma
from langchain_core.documents import Document
from langchain_core.messages import HumanMessage
from PIL import Image

@ -7,12 +7,12 @@ from langchain.chat_models import ChatOpenAI
from langchain.embeddings import OpenAIEmbeddings
from langchain.pydantic_v1 import BaseModel
from langchain.retrievers.multi_vector import MultiVectorRetriever
from langchain.schema.document import Document
from langchain.schema.messages import HumanMessage
from langchain.schema.output_parser import StrOutputParser
from langchain.schema.runnable import RunnableLambda, RunnablePassthrough
from langchain.storage import LocalFileStore, UpstashRedisByteStore
from langchain.vectorstores import Chroma
from langchain_core.documents import Document
from langchain_core.messages import HumanMessage
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import RunnableLambda, RunnablePassthrough
from PIL import Image

@ -8,10 +8,10 @@ from pathlib import Path
from langchain.chat_models import ChatOllama
from langchain.embeddings import OllamaEmbeddings
from langchain.retrievers.multi_vector import MultiVectorRetriever
from langchain.schema.document import Document
from langchain.schema.messages import HumanMessage
from langchain.storage import LocalFileStore
from langchain.vectorstores import Chroma
from langchain_core.documents import Document
from langchain_core.messages import HumanMessage
from PIL import Image

@ -6,12 +6,12 @@ from langchain.chat_models import ChatOllama
from langchain.embeddings import OllamaEmbeddings
from langchain.pydantic_v1 import BaseModel
from langchain.retrievers.multi_vector import MultiVectorRetriever
from langchain.schema.document import Document
from langchain.schema.messages import HumanMessage
from langchain.schema.output_parser import StrOutputParser
from langchain.schema.runnable import RunnableLambda, RunnablePassthrough
from langchain.storage import LocalFileStore
from langchain.vectorstores import Chroma
from langchain_core.documents import Document
from langchain_core.messages import HumanMessage
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import RunnableLambda, RunnablePassthrough
from PIL import Image

@ -1,5 +1,5 @@
from langchain.pydantic_v1 import BaseModel
from langchain.schema.runnable import RunnablePassthrough
from langchain_core.runnables import RunnablePassthrough
from sql_research_assistant.search.web import chain as search_chain
from sql_research_assistant.writer import chain as writer_chain

@ -4,9 +4,9 @@ from langchain.chat_models import ChatOllama, ChatOpenAI
from langchain.memory import ConversationBufferMemory
from langchain.prompts import ChatPromptTemplate
from langchain.pydantic_v1 import BaseModel
from langchain.schema.output_parser import StrOutputParser
from langchain.schema.runnable import RunnablePassthrough
from langchain.utilities import SQLDatabase
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import RunnablePassthrough
# Add the LLM downloaded from Ollama
ollama_llm = "llama2"

@ -5,15 +5,15 @@ import requests
from bs4 import BeautifulSoup
from langchain.chat_models import ChatOpenAI
from langchain.prompts import ChatPromptTemplate
from langchain.schema.messages import SystemMessage
from langchain.schema.output_parser import StrOutputParser
from langchain.schema.runnable import (
from langchain.utilities import DuckDuckGoSearchAPIWrapper
from langchain_core.messages import SystemMessage
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import (
Runnable,
RunnableLambda,
RunnableParallel,
RunnablePassthrough,
)
from langchain.utilities import DuckDuckGoSearchAPIWrapper
from sql_research_assistant.search.sql import sql_answer_chain

@ -1,7 +1,7 @@
from langchain.chat_models import ChatOpenAI
from langchain.prompts import ChatPromptTemplate
from langchain.schema.output_parser import StrOutputParser
from langchain.schema.runnable import ConfigurableField
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import ConfigurableField
WRITER_SYSTEM_PROMPT = "You are an AI critical thinker research assistant. Your sole purpose is to write well written, critically acclaimed, objective and structured reports on given text." # noqa: E501

Loading…
Cancel
Save