mirror of
https://github.com/hwchase17/langchain
synced 2024-11-11 19:11:02 +00:00
b2fd41331e
Addded missed docstrings. Fixed inconsistency in docstrings. **Note** CC @efriis There were PR errors on `langchain_experimental/prompt_injection_identifier/hugging_face_identifier.py` But, I didn't touch this file in this PR! Can it be some cache problems? I fixed this error.
32 lines
967 B
Python
32 lines
967 B
Python
from typing import List
|
|
|
|
from langchain_core.messages import (
|
|
AIMessage,
|
|
BaseMessage,
|
|
ChatMessage,
|
|
HumanMessage,
|
|
SystemMessage,
|
|
)
|
|
|
|
|
|
def _convert_one_message_to_text_llama(message: BaseMessage) -> str:
|
|
if isinstance(message, ChatMessage):
|
|
message_text = f"\n\n{message.role.capitalize()}: {message.content}"
|
|
elif isinstance(message, HumanMessage):
|
|
message_text = f"[INST] {message.content} [/INST]"
|
|
elif isinstance(message, AIMessage):
|
|
message_text = f"{message.content}"
|
|
elif isinstance(message, SystemMessage):
|
|
message_text = f"<<SYS>> {message.content} <</SYS>>"
|
|
else:
|
|
raise ValueError(f"Got unknown type {message}")
|
|
return message_text
|
|
|
|
|
|
def convert_messages_to_prompt_llama(messages: List[BaseMessage]) -> str:
|
|
"""Convert a list of messages to a prompt for llama."""
|
|
|
|
return "\n".join(
|
|
[_convert_one_message_to_text_llama(message) for message in messages]
|
|
)
|