2023-12-21 17:28:30 +00:00
|
|
|
"""Hugging Face Chat Wrapper."""
|
2024-05-13 20:53:15 +00:00
|
|
|
|
2024-04-17 02:17:03 +00:00
|
|
|
from typing import Any, AsyncIterator, Iterator, List, Optional
|
2023-12-21 17:28:30 +00:00
|
|
|
|
2024-05-13 20:53:15 +00:00
|
|
|
from langchain_core._api.deprecation import deprecated
|
2023-12-21 17:28:30 +00:00
|
|
|
from langchain_core.callbacks.manager import (
|
|
|
|
AsyncCallbackManagerForLLMRun,
|
|
|
|
CallbackManagerForLLMRun,
|
|
|
|
)
|
2024-04-17 02:17:03 +00:00
|
|
|
from langchain_core.language_models.chat_models import (
|
|
|
|
BaseChatModel,
|
|
|
|
agenerate_from_stream,
|
|
|
|
generate_from_stream,
|
|
|
|
)
|
2023-12-21 17:28:30 +00:00
|
|
|
from langchain_core.messages import (
|
|
|
|
AIMessage,
|
2024-04-17 02:17:03 +00:00
|
|
|
AIMessageChunk,
|
2023-12-21 17:28:30 +00:00
|
|
|
BaseMessage,
|
|
|
|
HumanMessage,
|
|
|
|
SystemMessage,
|
|
|
|
)
|
2024-04-17 02:17:03 +00:00
|
|
|
from langchain_core.outputs import (
|
|
|
|
ChatGeneration,
|
|
|
|
ChatGenerationChunk,
|
|
|
|
ChatResult,
|
|
|
|
LLMResult,
|
|
|
|
)
|
2024-03-08 01:06:38 +00:00
|
|
|
from langchain_core.pydantic_v1 import root_validator
|
2023-12-21 17:28:30 +00:00
|
|
|
|
|
|
|
from langchain_community.llms.huggingface_endpoint import HuggingFaceEndpoint
|
|
|
|
from langchain_community.llms.huggingface_hub import HuggingFaceHub
|
|
|
|
from langchain_community.llms.huggingface_text_gen_inference import (
|
|
|
|
HuggingFaceTextGenInference,
|
|
|
|
)
|
|
|
|
|
|
|
|
DEFAULT_SYSTEM_PROMPT = """You are a helpful, respectful, and honest assistant."""
|
|
|
|
|
|
|
|
|
2024-05-13 20:53:15 +00:00
|
|
|
@deprecated(
|
|
|
|
since="0.0.37",
|
|
|
|
removal="0.3",
|
2024-06-14 16:52:30 +00:00
|
|
|
alternative_import="langchain_huggingface.ChatHuggingFace",
|
2024-05-13 20:53:15 +00:00
|
|
|
)
|
2023-12-21 17:28:30 +00:00
|
|
|
class ChatHuggingFace(BaseChatModel):
|
2024-04-17 02:17:03 +00:00
|
|
|
"""
|
|
|
|
Wrapper for using Hugging Face LLM's as ChatModels.
|
2023-12-21 17:28:30 +00:00
|
|
|
|
|
|
|
Works with `HuggingFaceTextGenInference`, `HuggingFaceEndpoint`,
|
|
|
|
and `HuggingFaceHub` LLMs.
|
|
|
|
|
|
|
|
Upon instantiating this class, the model_id is resolved from the url
|
|
|
|
provided to the LLM, and the appropriate tokenizer is loaded from
|
|
|
|
the HuggingFace Hub.
|
|
|
|
|
|
|
|
Adapted from: https://python.langchain.com/docs/integrations/chat/llama2_chat
|
|
|
|
"""
|
|
|
|
|
2024-03-08 01:06:38 +00:00
|
|
|
llm: Any
|
|
|
|
"""LLM, must be of type HuggingFaceTextGenInference, HuggingFaceEndpoint, or
|
|
|
|
HuggingFaceHub."""
|
2023-12-21 17:28:30 +00:00
|
|
|
system_message: SystemMessage = SystemMessage(content=DEFAULT_SYSTEM_PROMPT)
|
|
|
|
tokenizer: Any = None
|
2024-02-05 21:42:59 +00:00
|
|
|
model_id: Optional[str] = None
|
2024-04-17 02:17:03 +00:00
|
|
|
streaming: bool = False
|
2023-12-21 17:28:30 +00:00
|
|
|
|
|
|
|
def __init__(self, **kwargs: Any):
|
|
|
|
super().__init__(**kwargs)
|
|
|
|
|
|
|
|
from transformers import AutoTokenizer
|
|
|
|
|
|
|
|
self._resolve_model_id()
|
2024-02-19 18:33:15 +00:00
|
|
|
|
2023-12-21 17:28:30 +00:00
|
|
|
self.tokenizer = (
|
|
|
|
AutoTokenizer.from_pretrained(self.model_id)
|
|
|
|
if self.tokenizer is None
|
|
|
|
else self.tokenizer
|
|
|
|
)
|
|
|
|
|
2024-03-08 01:06:38 +00:00
|
|
|
@root_validator()
|
|
|
|
def validate_llm(cls, values: dict) -> dict:
|
|
|
|
if not isinstance(
|
|
|
|
values["llm"],
|
|
|
|
(HuggingFaceTextGenInference, HuggingFaceEndpoint, HuggingFaceHub),
|
|
|
|
):
|
|
|
|
raise TypeError(
|
|
|
|
"Expected llm to be one of HuggingFaceTextGenInference, "
|
|
|
|
f"HuggingFaceEndpoint, HuggingFaceHub, received {type(values['llm'])}"
|
|
|
|
)
|
|
|
|
return values
|
|
|
|
|
2024-04-17 02:17:03 +00:00
|
|
|
def _stream(
|
|
|
|
self,
|
|
|
|
messages: List[BaseMessage],
|
|
|
|
stop: Optional[List[str]] = None,
|
|
|
|
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
|
|
|
**kwargs: Any,
|
|
|
|
) -> Iterator[ChatGenerationChunk]:
|
|
|
|
request = self._to_chat_prompt(messages)
|
|
|
|
|
|
|
|
for data in self.llm.stream(request, **kwargs):
|
|
|
|
delta = data
|
|
|
|
chunk = ChatGenerationChunk(message=AIMessageChunk(content=delta))
|
|
|
|
if run_manager:
|
|
|
|
run_manager.on_llm_new_token(delta, chunk=chunk)
|
|
|
|
yield chunk
|
|
|
|
|
|
|
|
async def _astream(
|
|
|
|
self,
|
|
|
|
messages: List[BaseMessage],
|
|
|
|
stop: Optional[List[str]] = None,
|
|
|
|
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
|
|
|
|
**kwargs: Any,
|
|
|
|
) -> AsyncIterator[ChatGenerationChunk]:
|
|
|
|
request = self._to_chat_prompt(messages)
|
|
|
|
async for data in self.llm.astream(request, **kwargs):
|
|
|
|
delta = data
|
|
|
|
chunk = ChatGenerationChunk(message=AIMessageChunk(content=delta))
|
|
|
|
if run_manager:
|
|
|
|
await run_manager.on_llm_new_token(delta, chunk=chunk)
|
|
|
|
yield chunk
|
|
|
|
|
2023-12-21 17:28:30 +00:00
|
|
|
def _generate(
|
|
|
|
self,
|
|
|
|
messages: List[BaseMessage],
|
|
|
|
stop: Optional[List[str]] = None,
|
|
|
|
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
|
|
|
**kwargs: Any,
|
|
|
|
) -> ChatResult:
|
2024-04-17 02:17:03 +00:00
|
|
|
if self.streaming:
|
|
|
|
stream_iter = self._stream(
|
|
|
|
messages, stop=stop, run_manager=run_manager, **kwargs
|
|
|
|
)
|
|
|
|
return generate_from_stream(stream_iter)
|
|
|
|
|
2023-12-21 17:28:30 +00:00
|
|
|
llm_input = self._to_chat_prompt(messages)
|
|
|
|
llm_result = self.llm._generate(
|
|
|
|
prompts=[llm_input], stop=stop, run_manager=run_manager, **kwargs
|
|
|
|
)
|
|
|
|
return self._to_chat_result(llm_result)
|
|
|
|
|
|
|
|
async def _agenerate(
|
|
|
|
self,
|
|
|
|
messages: List[BaseMessage],
|
|
|
|
stop: Optional[List[str]] = None,
|
|
|
|
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
|
|
|
|
**kwargs: Any,
|
|
|
|
) -> ChatResult:
|
2024-04-17 02:17:03 +00:00
|
|
|
if self.streaming:
|
|
|
|
stream_iter = self._astream(
|
|
|
|
messages, stop=stop, run_manager=run_manager, **kwargs
|
|
|
|
)
|
|
|
|
return await agenerate_from_stream(stream_iter)
|
|
|
|
|
2023-12-21 17:28:30 +00:00
|
|
|
llm_input = self._to_chat_prompt(messages)
|
|
|
|
llm_result = await self.llm._agenerate(
|
|
|
|
prompts=[llm_input], stop=stop, run_manager=run_manager, **kwargs
|
|
|
|
)
|
|
|
|
return self._to_chat_result(llm_result)
|
|
|
|
|
|
|
|
def _to_chat_prompt(
|
|
|
|
self,
|
|
|
|
messages: List[BaseMessage],
|
|
|
|
) -> str:
|
|
|
|
"""Convert a list of messages into a prompt format expected by wrapped LLM."""
|
|
|
|
if not messages:
|
2024-02-19 18:33:15 +00:00
|
|
|
raise ValueError("At least one HumanMessage must be provided!")
|
2023-12-21 17:28:30 +00:00
|
|
|
|
|
|
|
if not isinstance(messages[-1], HumanMessage):
|
2024-02-19 18:33:15 +00:00
|
|
|
raise ValueError("Last message must be a HumanMessage!")
|
2023-12-21 17:28:30 +00:00
|
|
|
|
|
|
|
messages_dicts = [self._to_chatml_format(m) for m in messages]
|
|
|
|
|
|
|
|
return self.tokenizer.apply_chat_template(
|
|
|
|
messages_dicts, tokenize=False, add_generation_prompt=True
|
|
|
|
)
|
|
|
|
|
|
|
|
def _to_chatml_format(self, message: BaseMessage) -> dict:
|
|
|
|
"""Convert LangChain message to ChatML format."""
|
|
|
|
|
|
|
|
if isinstance(message, SystemMessage):
|
|
|
|
role = "system"
|
|
|
|
elif isinstance(message, AIMessage):
|
|
|
|
role = "assistant"
|
|
|
|
elif isinstance(message, HumanMessage):
|
|
|
|
role = "user"
|
|
|
|
else:
|
|
|
|
raise ValueError(f"Unknown message type: {type(message)}")
|
|
|
|
|
|
|
|
return {"role": role, "content": message.content}
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def _to_chat_result(llm_result: LLMResult) -> ChatResult:
|
|
|
|
chat_generations = []
|
|
|
|
|
|
|
|
for g in llm_result.generations[0]:
|
|
|
|
chat_generation = ChatGeneration(
|
|
|
|
message=AIMessage(content=g.text), generation_info=g.generation_info
|
|
|
|
)
|
|
|
|
chat_generations.append(chat_generation)
|
|
|
|
|
|
|
|
return ChatResult(
|
|
|
|
generations=chat_generations, llm_output=llm_result.llm_output
|
|
|
|
)
|
|
|
|
|
|
|
|
def _resolve_model_id(self) -> None:
|
|
|
|
"""Resolve the model_id from the LLM's inference_server_url"""
|
|
|
|
|
|
|
|
from huggingface_hub import list_inference_endpoints
|
|
|
|
|
|
|
|
available_endpoints = list_inference_endpoints("*")
|
2024-02-19 18:33:15 +00:00
|
|
|
if isinstance(self.llm, HuggingFaceHub) or (
|
|
|
|
hasattr(self.llm, "repo_id") and self.llm.repo_id
|
|
|
|
):
|
2024-02-05 21:42:59 +00:00
|
|
|
self.model_id = self.llm.repo_id
|
2023-12-21 17:28:30 +00:00
|
|
|
return
|
2024-02-19 18:33:15 +00:00
|
|
|
elif isinstance(self.llm, HuggingFaceTextGenInference):
|
|
|
|
endpoint_url: Optional[str] = self.llm.inference_server_url
|
2023-12-21 17:28:30 +00:00
|
|
|
else:
|
2024-02-19 18:33:15 +00:00
|
|
|
endpoint_url = self.llm.endpoint_url
|
2023-12-21 17:28:30 +00:00
|
|
|
|
|
|
|
for endpoint in available_endpoints:
|
|
|
|
if endpoint.url == endpoint_url:
|
|
|
|
self.model_id = endpoint.repository
|
|
|
|
|
|
|
|
if not self.model_id:
|
|
|
|
raise ValueError(
|
2024-02-19 18:33:15 +00:00
|
|
|
"Failed to resolve model_id:"
|
|
|
|
f"Could not find model id for inference server: {endpoint_url}"
|
2023-12-21 17:28:30 +00:00
|
|
|
"Make sure that your Hugging Face token has access to the endpoint."
|
|
|
|
)
|
|
|
|
|
|
|
|
@property
|
|
|
|
def _llm_type(self) -> str:
|
|
|
|
return "huggingface-chat-wrapper"
|