2023-12-11 21:53:30 +00:00
|
|
|
from typing import Any, Dict, List, Optional
|
|
|
|
|
|
|
|
from langchain_core.embeddings import Embeddings
|
|
|
|
from langchain_core.pydantic_v1 import BaseModel, Extra, Field, root_validator
|
|
|
|
|
|
|
|
|
|
|
|
class LlamaCppEmbeddings(BaseModel, Embeddings):
|
|
|
|
"""llama.cpp embedding models.
|
|
|
|
|
|
|
|
To use, you should have the llama-cpp-python library installed, and provide the
|
|
|
|
path to the Llama model as a named parameter to the constructor.
|
|
|
|
Check out: https://github.com/abetlen/llama-cpp-python
|
|
|
|
|
|
|
|
Example:
|
|
|
|
.. code-block:: python
|
|
|
|
|
|
|
|
from langchain_community.embeddings import LlamaCppEmbeddings
|
|
|
|
llama = LlamaCppEmbeddings(model_path="/path/to/model.bin")
|
|
|
|
"""
|
|
|
|
|
|
|
|
client: Any #: :meta private:
|
|
|
|
model_path: str
|
|
|
|
|
|
|
|
n_ctx: int = Field(512, alias="n_ctx")
|
|
|
|
"""Token context window."""
|
|
|
|
|
|
|
|
n_parts: int = Field(-1, alias="n_parts")
|
|
|
|
"""Number of parts to split the model into.
|
|
|
|
If -1, the number of parts is automatically determined."""
|
|
|
|
|
|
|
|
seed: int = Field(-1, alias="seed")
|
|
|
|
"""Seed. If -1, a random seed is used."""
|
|
|
|
|
|
|
|
f16_kv: bool = Field(False, alias="f16_kv")
|
|
|
|
"""Use half-precision for key/value cache."""
|
|
|
|
|
|
|
|
logits_all: bool = Field(False, alias="logits_all")
|
|
|
|
"""Return logits for all tokens, not just the last token."""
|
|
|
|
|
|
|
|
vocab_only: bool = Field(False, alias="vocab_only")
|
|
|
|
"""Only load the vocabulary, no weights."""
|
|
|
|
|
|
|
|
use_mlock: bool = Field(False, alias="use_mlock")
|
|
|
|
"""Force system to keep model in RAM."""
|
|
|
|
|
|
|
|
n_threads: Optional[int] = Field(None, alias="n_threads")
|
|
|
|
"""Number of threads to use. If None, the number
|
|
|
|
of threads is automatically determined."""
|
|
|
|
|
community[patch]: llama cpp embeddings reset default n_batch (#17594)
When testing Nomic embeddings --
```
from langchain_community.embeddings import LlamaCppEmbeddings
embd_model_path = "/Users/rlm/Desktop/Code/llama.cpp/models/nomic-embd/nomic-embed-text-v1.Q4_K_S.gguf"
embd_lc = LlamaCppEmbeddings(model_path=embd_model_path)
embedding_lc = embd_lc.embed_query(query)
```
We were seeing this error for strings > a certain size --
```
File ~/miniforge3/envs/llama2/lib/python3.9/site-packages/llama_cpp/llama.py:827, in Llama.embed(self, input, normalize, truncate, return_count)
824 s_sizes = []
826 # add to batch
--> 827 self._batch.add_sequence(tokens, len(s_sizes), False)
828 t_batch += n_tokens
829 s_sizes.append(n_tokens)
File ~/miniforge3/envs/llama2/lib/python3.9/site-packages/llama_cpp/_internals.py:542, in _LlamaBatch.add_sequence(self, batch, seq_id, logits_all)
540 self.batch.token[j] = batch[i]
541 self.batch.pos[j] = i
--> 542 self.batch.seq_id[j][0] = seq_id
543 self.batch.n_seq_id[j] = 1
544 self.batch.logits[j] = logits_all
ValueError: NULL pointer access
```
The default `n_batch` of llama-cpp-python's Llama is `512` but we were
explicitly setting it to `8`.
These need to be set to equal for embedding models.
* The embedding.cpp example has an assertion to make sure these are
always equal.
* Apparently this is not being done properly in llama-cpp-python.
With `n_batch` set to 8, if more than 8 tokens are passed the batch runs
out of space and it crashes.
This also explains why the CPU compute buffer size was small:
raw client with default `n_batch=512`
```
llama_new_context_with_model: CPU input buffer size = 3.51 MiB
llama_new_context_with_model: CPU compute buffer size = 21.00 MiB
```
langchain with `n_batch=8`
```
llama_new_context_with_model: CPU input buffer size = 0.04 MiB
llama_new_context_with_model: CPU compute buffer size = 0.33 MiB
```
We can work around this by passing `n_batch=512`, but this will not be
obvious to some users:
```
embedding = LlamaCppEmbeddings(model_path=embd_model_path,
n_batch=512)
```
From discussion w/ @cebtenzzre. Related:
https://github.com/abetlen/llama-cpp-python/issues/1189
Co-authored-by: Bagatur <baskaryan@gmail.com>
2024-03-29 00:47:22 +00:00
|
|
|
n_batch: Optional[int] = Field(512, alias="n_batch")
|
2023-12-11 21:53:30 +00:00
|
|
|
"""Number of tokens to process in parallel.
|
|
|
|
Should be a number between 1 and n_ctx."""
|
|
|
|
|
|
|
|
n_gpu_layers: Optional[int] = Field(None, alias="n_gpu_layers")
|
|
|
|
"""Number of layers to be loaded into gpu memory. Default None."""
|
|
|
|
|
|
|
|
verbose: bool = Field(True, alias="verbose")
|
|
|
|
"""Print verbose output to stderr."""
|
|
|
|
|
|
|
|
class Config:
|
|
|
|
"""Configuration for this pydantic object."""
|
|
|
|
|
|
|
|
extra = Extra.forbid
|
|
|
|
|
|
|
|
@root_validator()
|
|
|
|
def validate_environment(cls, values: Dict) -> Dict:
|
|
|
|
"""Validate that llama-cpp-python library is installed."""
|
|
|
|
model_path = values["model_path"]
|
|
|
|
model_param_names = [
|
|
|
|
"n_ctx",
|
|
|
|
"n_parts",
|
|
|
|
"seed",
|
|
|
|
"f16_kv",
|
|
|
|
"logits_all",
|
|
|
|
"vocab_only",
|
|
|
|
"use_mlock",
|
|
|
|
"n_threads",
|
|
|
|
"n_batch",
|
|
|
|
"verbose",
|
|
|
|
]
|
|
|
|
model_params = {k: values[k] for k in model_param_names}
|
|
|
|
# For backwards compatibility, only include if non-null.
|
|
|
|
if values["n_gpu_layers"] is not None:
|
|
|
|
model_params["n_gpu_layers"] = values["n_gpu_layers"]
|
|
|
|
|
|
|
|
try:
|
|
|
|
from llama_cpp import Llama
|
|
|
|
|
|
|
|
values["client"] = Llama(model_path, embedding=True, **model_params)
|
|
|
|
except ImportError:
|
|
|
|
raise ModuleNotFoundError(
|
|
|
|
"Could not import llama-cpp-python library. "
|
|
|
|
"Please install the llama-cpp-python library to "
|
|
|
|
"use this embedding model: pip install llama-cpp-python"
|
|
|
|
)
|
|
|
|
except Exception as e:
|
|
|
|
raise ValueError(
|
|
|
|
f"Could not load Llama model from path: {model_path}. "
|
|
|
|
f"Received error {e}"
|
|
|
|
)
|
|
|
|
|
|
|
|
return values
|
|
|
|
|
|
|
|
def embed_documents(self, texts: List[str]) -> List[List[float]]:
|
|
|
|
"""Embed a list of documents using the Llama model.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
texts: The list of texts to embed.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
List of embeddings, one for each text.
|
|
|
|
"""
|
|
|
|
embeddings = [self.client.embed(text) for text in texts]
|
|
|
|
return [list(map(float, e)) for e in embeddings]
|
|
|
|
|
|
|
|
def embed_query(self, text: str) -> List[float]:
|
|
|
|
"""Embed a query using the Llama model.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
text: The text to embed.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
Embeddings for the text.
|
|
|
|
"""
|
|
|
|
embedding = self.client.embed(text)
|
|
|
|
return list(map(float, embedding))
|