mirror of
https://github.com/hwchase17/langchain
synced 2024-11-08 07:10:35 +00:00
Harrison/rwkv utf8 (#2867)
Co-authored-by: Akihiro <ueyama0105@gmail.com>
This commit is contained in:
parent
6be5d7c612
commit
ed2ef5cbe4
@ -3,7 +3,7 @@
|
|||||||
Based on https://github.com/saharNooby/rwkv.cpp/blob/master/rwkv/chat_with_bot.py
|
Based on https://github.com/saharNooby/rwkv.cpp/blob/master/rwkv/chat_with_bot.py
|
||||||
https://github.com/BlinkDL/ChatRWKV/blob/main/v2/chat.py
|
https://github.com/BlinkDL/ChatRWKV/blob/main/v2/chat.py
|
||||||
"""
|
"""
|
||||||
from typing import Any, Dict, List, Mapping, Optional, Set, SupportsIndex
|
from typing import Any, Dict, List, Mapping, Optional, Set
|
||||||
|
|
||||||
from pydantic import BaseModel, Extra, root_validator
|
from pydantic import BaseModel, Extra, root_validator
|
||||||
|
|
||||||
@ -58,7 +58,7 @@ class RWKV(LLM, BaseModel):
|
|||||||
CHUNK_LEN: int = 256
|
CHUNK_LEN: int = 256
|
||||||
"""Batch size for prompt processing."""
|
"""Batch size for prompt processing."""
|
||||||
|
|
||||||
max_tokens_per_generation: SupportsIndex = 256
|
max_tokens_per_generation: int = 256
|
||||||
"""Maximum number of tokens to generate."""
|
"""Maximum number of tokens to generate."""
|
||||||
|
|
||||||
client: Any = None #: :meta private:
|
client: Any = None #: :meta private:
|
||||||
@ -69,6 +69,8 @@ class RWKV(LLM, BaseModel):
|
|||||||
|
|
||||||
model_tokens: Any = None #: :meta private:
|
model_tokens: Any = None #: :meta private:
|
||||||
|
|
||||||
|
model_state: Any = None #: :meta private:
|
||||||
|
|
||||||
class Config:
|
class Config:
|
||||||
"""Configuration for this pydantic object."""
|
"""Configuration for this pydantic object."""
|
||||||
|
|
||||||
@ -139,42 +141,66 @@ class RWKV(LLM, BaseModel):
|
|||||||
"""Return the type of llm."""
|
"""Return the type of llm."""
|
||||||
return "rwkv-4"
|
return "rwkv-4"
|
||||||
|
|
||||||
def rwkv_generate(self, prompt: str) -> str:
|
def run_rnn(self, _tokens: List[str], newline_adj: int = 0) -> Any:
|
||||||
tokens = self.tokenizer.encode(prompt).ids
|
AVOID_REPEAT_TOKENS = []
|
||||||
|
AVOID_REPEAT = ",:?!"
|
||||||
|
for i in AVOID_REPEAT:
|
||||||
|
dd = self.pipeline.encode(i)
|
||||||
|
assert len(dd) == 1
|
||||||
|
AVOID_REPEAT_TOKENS += dd
|
||||||
|
|
||||||
logits = None
|
tokens = [int(x) for x in _tokens]
|
||||||
state = None
|
self.model_tokens += tokens
|
||||||
|
|
||||||
occurrence = {}
|
out: Any = None
|
||||||
|
|
||||||
# Feed in the input string
|
|
||||||
while len(tokens) > 0:
|
while len(tokens) > 0:
|
||||||
logits, state = self.client.forward(tokens[: self.CHUNK_LEN], state)
|
out, self.model_state = self.client.forward(
|
||||||
|
tokens[: self.CHUNK_LEN], self.model_state
|
||||||
|
)
|
||||||
tokens = tokens[self.CHUNK_LEN :]
|
tokens = tokens[self.CHUNK_LEN :]
|
||||||
|
END_OF_LINE = 187
|
||||||
|
out[END_OF_LINE] += newline_adj # adjust \n probability
|
||||||
|
|
||||||
|
if self.model_tokens[-1] in AVOID_REPEAT_TOKENS:
|
||||||
|
out[self.model_tokens[-1]] = -999999999
|
||||||
|
return out
|
||||||
|
|
||||||
|
def rwkv_generate(self, prompt: str) -> str:
|
||||||
|
self.model_state = None
|
||||||
|
self.model_tokens = []
|
||||||
|
logits = self.run_rnn(self.tokenizer.encode(prompt).ids)
|
||||||
|
begin = len(self.model_tokens)
|
||||||
|
out_last = begin
|
||||||
|
|
||||||
|
occurrence: Dict = {}
|
||||||
|
|
||||||
decoded = ""
|
decoded = ""
|
||||||
for i in range(self.max_tokens_per_generation):
|
for i in range(self.max_tokens_per_generation):
|
||||||
token = self.pipeline.sample_logits(
|
|
||||||
logits, temperature=self.temperature, top_p=self.top_p
|
|
||||||
)
|
|
||||||
|
|
||||||
if token not in occurrence:
|
|
||||||
occurrence[token] = 1
|
|
||||||
else:
|
|
||||||
occurrence[token] += 1
|
|
||||||
|
|
||||||
decoded += self.tokenizer.decode([token])
|
|
||||||
|
|
||||||
if "\n" in decoded:
|
|
||||||
break
|
|
||||||
|
|
||||||
# feed back in
|
|
||||||
logits, state = self.client.forward([token], state)
|
|
||||||
for n in occurrence:
|
for n in occurrence:
|
||||||
logits[n] -= (
|
logits[n] -= (
|
||||||
self.penalty_alpha_presence
|
self.penalty_alpha_presence
|
||||||
+ occurrence[n] * self.penalty_alpha_frequency
|
+ occurrence[n] * self.penalty_alpha_frequency
|
||||||
)
|
)
|
||||||
|
token = self.pipeline.sample_logits(
|
||||||
|
logits, temperature=self.temperature, top_p=self.top_p
|
||||||
|
)
|
||||||
|
|
||||||
|
END_OF_TEXT = 0
|
||||||
|
if token == END_OF_TEXT:
|
||||||
|
break
|
||||||
|
if token not in occurrence:
|
||||||
|
occurrence[token] = 1
|
||||||
|
else:
|
||||||
|
occurrence[token] += 1
|
||||||
|
|
||||||
|
logits = self.run_rnn([token])
|
||||||
|
xxx = self.tokenizer.decode(self.model_tokens[out_last:])
|
||||||
|
if "\ufffd" not in xxx: # avoid utf-8 display issues
|
||||||
|
decoded += xxx
|
||||||
|
out_last = begin + i + 1
|
||||||
|
if i >= self.max_tokens_per_generation - 100:
|
||||||
|
break
|
||||||
|
|
||||||
return decoded
|
return decoded
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user