mirror of
https://github.com/nomic-ai/gpt4all
synced 2024-11-13 19:11:27 +00:00
llamamodel: fix macOS build (#2125)
Signed-off-by: Jared Van Bortel <jared@nomic.ai>
This commit is contained in:
parent
667f29c2a1
commit
53f109f519
@ -791,14 +791,16 @@ void LLamaModel::embedInternal(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// split into max_len-sized chunks
|
// split into max_len-sized chunks
|
||||||
struct split_batch { int idx; TokenString batch; };
|
struct split_batch { unsigned idx; TokenString batch; };
|
||||||
std::vector<split_batch> batches;
|
std::vector<split_batch> batches;
|
||||||
for (unsigned i = 0; i < inputs.size(); i++) {
|
for (unsigned i = 0; i < inputs.size(); i++) {
|
||||||
auto &input = inputs[i];
|
auto &input = inputs[i];
|
||||||
for (auto it = input.begin(); it < input.end(); it += max_len) {
|
for (auto it = input.begin(); it < input.end(); it += max_len) {
|
||||||
if (it > input.begin()) { it -= chunkOverlap; }
|
if (it > input.begin()) { it -= chunkOverlap; }
|
||||||
auto end = std::min(it + max_len, input.end());
|
auto end = std::min(it + max_len, input.end());
|
||||||
auto &batch = batches.emplace_back(i, prefixTokens).batch;
|
batches.push_back({ i, {} });
|
||||||
|
auto &batch = batches.back().batch;
|
||||||
|
batch = prefixTokens;
|
||||||
batch.insert(batch.end(), it, end);
|
batch.insert(batch.end(), it, end);
|
||||||
batch.push_back(eos_token);
|
batch.push_back(eos_token);
|
||||||
if (!doMean) { break; /* limit text to one chunk */ }
|
if (!doMean) { break; /* limit text to one chunk */ }
|
||||||
|
Loading…
Reference in New Issue
Block a user