mirror of
https://github.com/hwchase17/langchain
synced 2024-11-08 07:10:35 +00:00
Harrison/rwkv utf8 (#2867)
Co-authored-by: Akihiro <ueyama0105@gmail.com>
This commit is contained in:
parent
6be5d7c612
commit
ed2ef5cbe4
@ -3,7 +3,7 @@
|
||||
Based on https://github.com/saharNooby/rwkv.cpp/blob/master/rwkv/chat_with_bot.py
|
||||
https://github.com/BlinkDL/ChatRWKV/blob/main/v2/chat.py
|
||||
"""
|
||||
from typing import Any, Dict, List, Mapping, Optional, Set, SupportsIndex
|
||||
from typing import Any, Dict, List, Mapping, Optional, Set
|
||||
|
||||
from pydantic import BaseModel, Extra, root_validator
|
||||
|
||||
@ -58,7 +58,7 @@ class RWKV(LLM, BaseModel):
|
||||
CHUNK_LEN: int = 256
|
||||
"""Batch size for prompt processing."""
|
||||
|
||||
max_tokens_per_generation: SupportsIndex = 256
|
||||
max_tokens_per_generation: int = 256
|
||||
"""Maximum number of tokens to generate."""
|
||||
|
||||
client: Any = None #: :meta private:
|
||||
@ -69,6 +69,8 @@ class RWKV(LLM, BaseModel):
|
||||
|
||||
model_tokens: Any = None #: :meta private:
|
||||
|
||||
model_state: Any = None #: :meta private:
|
||||
|
||||
class Config:
|
||||
"""Configuration for this pydantic object."""
|
||||
|
||||
@ -139,42 +141,66 @@ class RWKV(LLM, BaseModel):
|
||||
"""Return the type of llm."""
|
||||
return "rwkv-4"
|
||||
|
||||
def rwkv_generate(self, prompt: str) -> str:
|
||||
tokens = self.tokenizer.encode(prompt).ids
|
||||
def run_rnn(self, _tokens: List[str], newline_adj: int = 0) -> Any:
|
||||
AVOID_REPEAT_TOKENS = []
|
||||
AVOID_REPEAT = ",:?!"
|
||||
for i in AVOID_REPEAT:
|
||||
dd = self.pipeline.encode(i)
|
||||
assert len(dd) == 1
|
||||
AVOID_REPEAT_TOKENS += dd
|
||||
|
||||
logits = None
|
||||
state = None
|
||||
tokens = [int(x) for x in _tokens]
|
||||
self.model_tokens += tokens
|
||||
|
||||
occurrence = {}
|
||||
out: Any = None
|
||||
|
||||
# Feed in the input string
|
||||
while len(tokens) > 0:
|
||||
logits, state = self.client.forward(tokens[: self.CHUNK_LEN], state)
|
||||
out, self.model_state = self.client.forward(
|
||||
tokens[: self.CHUNK_LEN], self.model_state
|
||||
)
|
||||
tokens = tokens[self.CHUNK_LEN :]
|
||||
END_OF_LINE = 187
|
||||
out[END_OF_LINE] += newline_adj # adjust \n probability
|
||||
|
||||
if self.model_tokens[-1] in AVOID_REPEAT_TOKENS:
|
||||
out[self.model_tokens[-1]] = -999999999
|
||||
return out
|
||||
|
||||
def rwkv_generate(self, prompt: str) -> str:
|
||||
self.model_state = None
|
||||
self.model_tokens = []
|
||||
logits = self.run_rnn(self.tokenizer.encode(prompt).ids)
|
||||
begin = len(self.model_tokens)
|
||||
out_last = begin
|
||||
|
||||
occurrence: Dict = {}
|
||||
|
||||
decoded = ""
|
||||
for i in range(self.max_tokens_per_generation):
|
||||
token = self.pipeline.sample_logits(
|
||||
logits, temperature=self.temperature, top_p=self.top_p
|
||||
)
|
||||
|
||||
if token not in occurrence:
|
||||
occurrence[token] = 1
|
||||
else:
|
||||
occurrence[token] += 1
|
||||
|
||||
decoded += self.tokenizer.decode([token])
|
||||
|
||||
if "\n" in decoded:
|
||||
break
|
||||
|
||||
# feed back in
|
||||
logits, state = self.client.forward([token], state)
|
||||
for n in occurrence:
|
||||
logits[n] -= (
|
||||
self.penalty_alpha_presence
|
||||
+ occurrence[n] * self.penalty_alpha_frequency
|
||||
)
|
||||
token = self.pipeline.sample_logits(
|
||||
logits, temperature=self.temperature, top_p=self.top_p
|
||||
)
|
||||
|
||||
END_OF_TEXT = 0
|
||||
if token == END_OF_TEXT:
|
||||
break
|
||||
if token not in occurrence:
|
||||
occurrence[token] = 1
|
||||
else:
|
||||
occurrence[token] += 1
|
||||
|
||||
logits = self.run_rnn([token])
|
||||
xxx = self.tokenizer.decode(self.model_tokens[out_last:])
|
||||
if "\ufffd" not in xxx: # avoid utf-8 display issues
|
||||
decoded += xxx
|
||||
out_last = begin + i + 1
|
||||
if i >= self.max_tokens_per_generation - 100:
|
||||
break
|
||||
|
||||
return decoded
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user