Harrison/rwkv utf8 (#2867)

Co-authored-by: Akihiro <ueyama0105@gmail.com>
pull/2868/head
Harrison Chase 1 year ago committed by GitHub
parent 6be5d7c612
commit ed2ef5cbe4
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -3,7 +3,7 @@
Based on https://github.com/saharNooby/rwkv.cpp/blob/master/rwkv/chat_with_bot.py Based on https://github.com/saharNooby/rwkv.cpp/blob/master/rwkv/chat_with_bot.py
https://github.com/BlinkDL/ChatRWKV/blob/main/v2/chat.py https://github.com/BlinkDL/ChatRWKV/blob/main/v2/chat.py
""" """
from typing import Any, Dict, List, Mapping, Optional, Set, SupportsIndex from typing import Any, Dict, List, Mapping, Optional, Set
from pydantic import BaseModel, Extra, root_validator from pydantic import BaseModel, Extra, root_validator
@ -58,7 +58,7 @@ class RWKV(LLM, BaseModel):
CHUNK_LEN: int = 256 CHUNK_LEN: int = 256
"""Batch size for prompt processing.""" """Batch size for prompt processing."""
max_tokens_per_generation: SupportsIndex = 256 max_tokens_per_generation: int = 256
"""Maximum number of tokens to generate.""" """Maximum number of tokens to generate."""
client: Any = None #: :meta private: client: Any = None #: :meta private:
@ -69,6 +69,8 @@ class RWKV(LLM, BaseModel):
model_tokens: Any = None #: :meta private: model_tokens: Any = None #: :meta private:
model_state: Any = None #: :meta private:
class Config: class Config:
"""Configuration for this pydantic object.""" """Configuration for this pydantic object."""
@ -139,42 +141,66 @@ class RWKV(LLM, BaseModel):
"""Return the type of llm.""" """Return the type of llm."""
return "rwkv-4" return "rwkv-4"
def rwkv_generate(self, prompt: str) -> str: def run_rnn(self, _tokens: List[str], newline_adj: int = 0) -> Any:
tokens = self.tokenizer.encode(prompt).ids AVOID_REPEAT_TOKENS = []
AVOID_REPEAT = ""
for i in AVOID_REPEAT:
dd = self.pipeline.encode(i)
assert len(dd) == 1
AVOID_REPEAT_TOKENS += dd
logits = None tokens = [int(x) for x in _tokens]
state = None self.model_tokens += tokens
occurrence = {} out: Any = None
# Feed in the input string
while len(tokens) > 0: while len(tokens) > 0:
logits, state = self.client.forward(tokens[: self.CHUNK_LEN], state) out, self.model_state = self.client.forward(
tokens[: self.CHUNK_LEN], self.model_state
)
tokens = tokens[self.CHUNK_LEN :] tokens = tokens[self.CHUNK_LEN :]
END_OF_LINE = 187
out[END_OF_LINE] += newline_adj # adjust \n probability
if self.model_tokens[-1] in AVOID_REPEAT_TOKENS:
out[self.model_tokens[-1]] = -999999999
return out
def rwkv_generate(self, prompt: str) -> str:
self.model_state = None
self.model_tokens = []
logits = self.run_rnn(self.tokenizer.encode(prompt).ids)
begin = len(self.model_tokens)
out_last = begin
occurrence: Dict = {}
decoded = "" decoded = ""
for i in range(self.max_tokens_per_generation): for i in range(self.max_tokens_per_generation):
for n in occurrence:
logits[n] -= (
self.penalty_alpha_presence
+ occurrence[n] * self.penalty_alpha_frequency
)
token = self.pipeline.sample_logits( token = self.pipeline.sample_logits(
logits, temperature=self.temperature, top_p=self.top_p logits, temperature=self.temperature, top_p=self.top_p
) )
END_OF_TEXT = 0
if token == END_OF_TEXT:
break
if token not in occurrence: if token not in occurrence:
occurrence[token] = 1 occurrence[token] = 1
else: else:
occurrence[token] += 1 occurrence[token] += 1
decoded += self.tokenizer.decode([token]) logits = self.run_rnn([token])
xxx = self.tokenizer.decode(self.model_tokens[out_last:])
if "\n" in decoded: if "\ufffd" not in xxx: # avoid utf-8 display issues
break decoded += xxx
out_last = begin + i + 1
# feed back in if i >= self.max_tokens_per_generation - 100:
logits, state = self.client.forward([token], state) break
for n in occurrence:
logits[n] -= (
self.penalty_alpha_presence
+ occurrence[n] * self.penalty_alpha_frequency
)
return decoded return decoded

Loading…
Cancel
Save