mirror of
https://github.com/hwchase17/langchain
synced 2024-10-29 17:07:25 +00:00
8d2344db43
Just updating some spelling / grammar issues in the documentation. No code changes. --------- Co-authored-by: Harrison Chase <hw.chase.17@gmail.com>
35 lines
1.2 KiB
Plaintext
35 lines
1.2 KiB
Plaintext
```python
|
|
from langchain.chat_models import ChatOpenAI
|
|
from langchain.prompts.chat import (
|
|
ChatPromptTemplate,
|
|
SystemMessagePromptTemplate,
|
|
HumanMessagePromptTemplate,
|
|
)
|
|
from langchain.chains import LLMChain
|
|
from langchain.schema import BaseOutputParser
|
|
|
|
class CommaSeparatedListOutputParser(BaseOutputParser):
|
|
"""Parse the output of an LLM call to a comma-separated list."""
|
|
|
|
|
|
def parse(self, text: str):
|
|
"""Parse the output of an LLM call."""
|
|
return text.strip().split(", ")
|
|
|
|
template = """You are a helpful assistant who generates comma separated lists.
|
|
A user will pass in a category, and you should generate 5 objects in that category in a comma separated list.
|
|
ONLY return a comma separated list, and nothing more."""
|
|
system_message_prompt = SystemMessagePromptTemplate.from_template(template)
|
|
human_template = "{text}"
|
|
human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)
|
|
|
|
chat_prompt = ChatPromptTemplate.from_messages([system_message_prompt, human_message_prompt])
|
|
chain = LLMChain(
|
|
llm=ChatOpenAI(),
|
|
prompt=chat_prompt,
|
|
output_parser=CommaSeparatedListOutputParser()
|
|
)
|
|
chain.run("colors")
|
|
# >> ['red', 'blue', 'green', 'yellow', 'orange']
|
|
```
|