community[patch]: fix structured_output in llamacpp integration (#27202)

Resolves https://github.com/langchain-ai/langchain/issues/25318.
This commit is contained in:
ccurme 2024-10-08 15:16:59 -04:00 committed by GitHub
parent c3cb56a9e8
commit e3920f2320
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 22 additions and 2 deletions

View File

@ -342,7 +342,7 @@ class ChatLlamaCpp(BaseChatModel):
self,
tools: Sequence[Union[Dict[str, Any], Type[BaseModel], Callable, BaseTool]],
*,
tool_choice: Optional[Union[Dict[str, Dict], bool, str]] = None,
tool_choice: Optional[Union[dict, bool, str]] = None,
**kwargs: Any,
) -> Runnable[LanguageModelInput, BaseMessage]:
"""Bind tool-like objects to this chat model
@ -538,7 +538,8 @@ class ChatLlamaCpp(BaseChatModel):
"Received None."
)
tool_name = convert_to_openai_tool(schema)["function"]["name"]
llm = self.bind_tools([schema], tool_choice=tool_name)
tool_choice = {"type": "function", "function": {"name": tool_name}}
llm = self.bind_tools([schema], tool_choice=tool_choice)
if is_pydantic_schema:
output_parser: OutputParserLike = PydanticToolsParser(
tools=[cast(Type, schema)], first_tool_only=True

View File

@ -0,0 +1,19 @@
from pydantic import BaseModel, Field
from langchain_community.chat_models import ChatLlamaCpp
class Joke(BaseModel):
"""Joke to tell user."""
setup: str = Field(description="question to set up a joke")
punchline: str = Field(description="answer to resolve the joke")
# TODO: replace with standard integration tests
# See example in tests/integration_tests/chat_models/test_litellm.py
def test_structured_output() -> None:
llm = ChatLlamaCpp(model_path="/path/to/Meta-Llama-3.1-8B-Instruct.Q4_K_M.gguf")
structured_llm = llm.with_structured_output(Joke)
result = structured_llm.invoke("Tell me a short joke about cats.")
assert isinstance(result, Joke)