mirror of
https://github.com/hwchase17/langchain
synced 2024-11-08 07:10:35 +00:00
480626dc99
…tch]: import models from community ran ```bash git grep -l 'from langchain\.chat_models' | xargs -L 1 sed -i '' "s/from\ langchain\.chat_models/from\ langchain_community.chat_models/g" git grep -l 'from langchain\.llms' | xargs -L 1 sed -i '' "s/from\ langchain\.llms/from\ langchain_community.llms/g" git grep -l 'from langchain\.embeddings' | xargs -L 1 sed -i '' "s/from\ langchain\.embeddings/from\ langchain_community.embeddings/g" git checkout master libs/langchain/tests/unit_tests/llms git checkout master libs/langchain/tests/unit_tests/chat_models git checkout master libs/langchain/tests/unit_tests/embeddings/test_imports.py make format cd libs/langchain; make format cd ../experimental; make format cd ../core; make format ```
39 lines
1.1 KiB
Python
39 lines
1.1 KiB
Python
from langchain.agents import AgentExecutor
|
|
from langchain.agents.format_scratchpad import format_xml
|
|
from langchain.tools import DuckDuckGoSearchRun
|
|
from langchain.tools.render import render_text_description
|
|
from langchain_community.llms import OpenAI
|
|
from langchain_core.pydantic_v1 import BaseModel
|
|
|
|
from solo_performance_prompting_agent.parser import parse_output
|
|
from solo_performance_prompting_agent.prompts import conversational_prompt
|
|
|
|
_model = OpenAI()
|
|
_tools = [DuckDuckGoSearchRun()]
|
|
_prompt = conversational_prompt.partial(
|
|
tools=render_text_description(_tools),
|
|
tool_names=", ".join([t.name for t in _tools]),
|
|
)
|
|
_llm_with_stop = _model.bind(stop=["</tool_input>", "</final_answer>"])
|
|
|
|
agent = (
|
|
{
|
|
"question": lambda x: x["question"],
|
|
"agent_scratchpad": lambda x: format_xml(x["intermediate_steps"]),
|
|
}
|
|
| _prompt
|
|
| _llm_with_stop
|
|
| parse_output
|
|
)
|
|
|
|
|
|
class AgentInput(BaseModel):
|
|
question: str
|
|
|
|
|
|
agent_executor = AgentExecutor(
|
|
agent=agent, tools=_tools, verbose=True, handle_parsing_errors=True
|
|
).with_types(input_type=AgentInput)
|
|
|
|
agent_executor = agent_executor | (lambda x: x["output"])
|