Merge pull request #1667 from hlohaus/phind2

Expire cache, Fix multiple websocket conversations in OpenaiChat
This commit is contained in:
H Lohaus 2024-03-09 19:51:35 +01:00 committed by GitHub
commit b3d19c5660
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
6 changed files with 267 additions and 230 deletions

View File

@ -26,38 +26,35 @@ class GeminiPro(AsyncGeneratorProvider, ProviderModelMixin):
stream: bool = False, stream: bool = False,
proxy: str = None, proxy: str = None,
api_key: str = None, api_key: str = None,
api_base: str = None, api_base: str = "https://generativelanguage.googleapis.com/v1beta",
use_auth_header: bool = True, use_auth_header: bool = False,
image: ImageType = None, image: ImageType = None,
connector: BaseConnector = None, connector: BaseConnector = None,
**kwargs **kwargs
) -> AsyncResult: ) -> AsyncResult:
model = "gemini-pro-vision" if not model and image else model model = "gemini-pro-vision" if not model and image is not None else model
model = cls.get_model(model) model = cls.get_model(model)
if not api_key: if not api_key:
raise MissingAuthError('Missing "api_key"') raise MissingAuthError('Missing "api_key"')
headers = params = None headers = params = None
if api_base and use_auth_header: if use_auth_header:
headers = {"Authorization": f"Bearer {api_key}"} headers = {"Authorization": f"Bearer {api_key}"}
else: else:
params = {"key": api_key} params = {"key": api_key}
if not api_base:
api_base = f"https://generativelanguage.googleapis.com/v1beta"
method = "streamGenerateContent" if stream else "generateContent" method = "streamGenerateContent" if stream else "generateContent"
url = f"{api_base.rstrip('/')}/models/{model}:{method}" url = f"{api_base.rstrip('/')}/models/{model}:{method}"
async with ClientSession(headers=headers, connector=get_connector(connector, proxy)) as session: async with ClientSession(headers=headers, connector=get_connector(connector, proxy)) as session:
contents = [ contents = [
{ {
"role": "model" if message["role"] == "assistant" else message["role"], "role": "model" if message["role"] == "assistant" else "user",
"parts": [{"text": message["content"]}] "parts": [{"text": message["content"]}]
} }
for message in messages for message in messages
] ]
if image: if image is not None:
image = to_bytes(image) image = to_bytes(image)
contents[-1]["parts"].append({ contents[-1]["parts"].append({
"inline_data": { "inline_data": {
@ -87,7 +84,8 @@ class GeminiPro(AsyncGeneratorProvider, ProviderModelMixin):
lines = [b"{\n"] lines = [b"{\n"]
elif chunk == b",\r\n" or chunk == b"]": elif chunk == b",\r\n" or chunk == b"]":
try: try:
data = json.loads(b"".join(lines)) data = b"".join(lines)
data = json.loads(data)
yield data["candidates"][0]["content"]["parts"][0]["text"] yield data["candidates"][0]["content"]["parts"][0]["text"]
except: except:
data = data.decode() if isinstance(data, bytes) else data data = data.decode() if isinstance(data, bytes) else data

View File

@ -46,7 +46,7 @@ async def create_conversation(session: ClientSession, proxy: str = None) -> Conv
} }
for k, v in headers.items(): for k, v in headers.items():
session.headers[k] = v session.headers[k] = v
url = 'https://www.bing.com/turing/conversation/create?bundleVersion=1.1579.2' url = 'https://www.bing.com/turing/conversation/create?bundleVersion=1.1626.1'
async with session.get(url, headers=headers, proxy=proxy) as response: async with session.get(url, headers=headers, proxy=proxy) as response:
try: try:
data = await response.json() data = await response.json()

View File

@ -5,15 +5,15 @@ import uuid
import json import json
import os import os
import base64 import base64
import time
from aiohttp import ClientWebSocketResponse from aiohttp import ClientWebSocketResponse
try: try:
from py_arkose_generator.arkose import get_values_for_request from py_arkose_generator.arkose import get_values_for_request
from async_property import async_cached_property has_arkose_generator = True
has_requirements = True
except ImportError: except ImportError:
async_cached_property = property has_arkose_generator = False
has_requirements = False
try: try:
from selenium.webdriver.common.by import By from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support.ui import WebDriverWait
@ -33,7 +33,7 @@ from ... import debug
class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin): class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
"""A class for creating and managing conversations with OpenAI chat service""" """A class for creating and managing conversations with OpenAI chat service"""
url = "https://chat.openai.com" url = "https://chat.openai.com"
working = True working = True
needs_auth = True needs_auth = True
@ -47,7 +47,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
_api_key: str = None _api_key: str = None
_headers: dict = None _headers: dict = None
_cookies: Cookies = None _cookies: Cookies = None
_last_message: int = 0 _expires: int = None
@classmethod @classmethod
async def create( async def create(
@ -80,7 +80,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
A Response object that contains the generator, action, messages, and options A Response object that contains the generator, action, messages, and options
""" """
# Add the user input to the messages list # Add the user input to the messages list
if prompt: if prompt is not None:
messages.append({ messages.append({
"role": "user", "role": "user",
"content": prompt "content": prompt
@ -102,7 +102,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
messages, messages,
kwargs kwargs
) )
@classmethod @classmethod
async def upload_image( async def upload_image(
cls, cls,
@ -162,7 +162,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
response.raise_for_status() response.raise_for_status()
image_data["download_url"] = (await response.json())["download_url"] image_data["download_url"] = (await response.json())["download_url"]
return ImageRequest(image_data) return ImageRequest(image_data)
@classmethod @classmethod
async def get_default_model(cls, session: StreamSession, headers: dict): async def get_default_model(cls, session: StreamSession, headers: dict):
""" """
@ -185,7 +185,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
return cls.default_model return cls.default_model
raise RuntimeError(f"Response: {data}") raise RuntimeError(f"Response: {data}")
return cls.default_model return cls.default_model
@classmethod @classmethod
def create_messages(cls, messages: Messages, image_request: ImageRequest = None): def create_messages(cls, messages: Messages, image_request: ImageRequest = None):
""" """
@ -334,9 +334,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
Raises: Raises:
RuntimeError: If an error occurs during processing. RuntimeError: If an error occurs during processing.
""" """
if not has_requirements: if parent_id is None:
raise MissingRequirementsError('Install "py-arkose-generator" and "async_property" package')
if not parent_id:
parent_id = str(uuid.uuid4()) parent_id = str(uuid.uuid4())
# Read api_key from arguments # Read api_key from arguments
@ -348,7 +346,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
timeout=timeout timeout=timeout
) as session: ) as session:
# Read api_key and cookies from cache / browser config # Read api_key and cookies from cache / browser config
if cls._headers is None: if cls._headers is None or cls._expires is None or time.time() > cls._expires:
if api_key is None: if api_key is None:
# Read api_key from cookies # Read api_key from cookies
cookies = get_cookies("chat.openai.com", False) if cookies is None else cookies cookies = get_cookies("chat.openai.com", False) if cookies is None else cookies
@ -357,8 +355,8 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
else: else:
api_key = cls._api_key if api_key is None else api_key api_key = cls._api_key if api_key is None else api_key
# Read api_key with session cookies # Read api_key with session cookies
if api_key is None and cookies: #if api_key is None and cookies:
api_key = await cls.fetch_access_token(session, cls._headers) # api_key = await cls.fetch_access_token(session, cls._headers)
# Load default model # Load default model
if cls.default_model is None and api_key is not None: if cls.default_model is None and api_key is not None:
try: try:
@ -384,6 +382,19 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
else: else:
cls._set_api_key(api_key) cls._set_api_key(api_key)
async with session.post(
f"{cls.url}/backend-api/sentinel/chat-requirements",
json={"conversation_mode_kind": "primary_assistant"},
headers=cls._headers
) as response:
response.raise_for_status()
data = await response.json()
need_arkose = data["arkose"]["required"]
chat_token = data["token"]
if need_arkose and not has_arkose_generator:
raise MissingRequirementsError('Install "py-arkose-generator" package')
try: try:
image_request = await cls.upload_image(session, cls._headers, image, image_name) if image else None image_request = await cls.upload_image(session, cls._headers, image, image_name) if image else None
except Exception as e: except Exception as e:
@ -394,12 +405,10 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
model = cls.get_model(model).replace("gpt-3.5-turbo", "text-davinci-002-render-sha") model = cls.get_model(model).replace("gpt-3.5-turbo", "text-davinci-002-render-sha")
fields = ResponseFields() fields = ResponseFields()
while fields.finish_reason is None: while fields.finish_reason is None:
arkose_token = await cls.get_arkose_token(session)
conversation_id = conversation_id if fields.conversation_id is None else fields.conversation_id conversation_id = conversation_id if fields.conversation_id is None else fields.conversation_id
parent_id = parent_id if fields.message_id is None else fields.message_id parent_id = parent_id if fields.message_id is None else fields.message_id
data = { data = {
"action": action, "action": action,
"arkose_token": arkose_token,
"conversation_mode": {"kind": "primary_assistant"}, "conversation_mode": {"kind": "primary_assistant"},
"force_paragen": False, "force_paragen": False,
"force_rate_limit": False, "force_rate_limit": False,
@ -417,7 +426,8 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
json=data, json=data,
headers={ headers={
"Accept": "text/event-stream", "Accept": "text/event-stream",
"OpenAI-Sentinel-Arkose-Token": arkose_token, **({"OpenAI-Sentinel-Arkose-Token": await cls.get_arkose_token(session)} if need_arkose else {}),
"OpenAI-Sentinel-Chat-Requirements-Token": chat_token,
**cls._headers **cls._headers
} }
) as response: ) as response:
@ -437,17 +447,20 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
await cls.delete_conversation(session, cls._headers, fields.conversation_id) await cls.delete_conversation(session, cls._headers, fields.conversation_id)
@staticmethod @staticmethod
async def iter_messages_ws(ws: ClientWebSocketResponse) -> AsyncIterator: async def iter_messages_ws(ws: ClientWebSocketResponse, conversation_id: str) -> AsyncIterator:
while True: while True:
yield base64.b64decode((await ws.receive_json())["body"]) message = await ws.receive_json()
if message["conversation_id"] == conversation_id:
yield base64.b64decode(message["body"])
@classmethod @classmethod
async def iter_messages_chunk(cls, messages: AsyncIterator, session: StreamSession, fields: ResponseFields) -> AsyncIterator: async def iter_messages_chunk(cls, messages: AsyncIterator, session: StreamSession, fields: ResponseFields) -> AsyncIterator:
last_message: int = 0 last_message: int = 0
async for message in messages: async for message in messages:
if message.startswith(b'{"wss_url":'): if message.startswith(b'{"wss_url":'):
async with session.ws_connect(json.loads(message)["wss_url"]) as ws: message = json.loads(message)
async for chunk in cls.iter_messages_chunk(cls.iter_messages_ws(ws), session, fields): async with session.ws_connect(message["wss_url"]) as ws:
async for chunk in cls.iter_messages_chunk(cls.iter_messages_ws(ws, message["conversation_id"]), session, fields):
yield chunk yield chunk
break break
async for chunk in cls.iter_messages_line(session, message, fields): async for chunk in cls.iter_messages_line(session, message, fields):
@ -467,6 +480,8 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
if not line.startswith(b"data: "): if not line.startswith(b"data: "):
return return
elif line.startswith(b"data: [DONE]"): elif line.startswith(b"data: [DONE]"):
if fields.finish_reason is None:
fields.finish_reason = "error"
return return
try: try:
line = json.loads(line[6:]) line = json.loads(line[6:])
@ -589,22 +604,13 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
@classmethod @classmethod
def _set_api_key(cls, api_key: str): def _set_api_key(cls, api_key: str):
cls._api_key = api_key cls._api_key = api_key
cls._expires = int(time.time()) + 60 * 60 * 4
cls._headers["Authorization"] = f"Bearer {api_key}" cls._headers["Authorization"] = f"Bearer {api_key}"
@classmethod @classmethod
def _update_cookie_header(cls): def _update_cookie_header(cls):
cls._headers["Cookie"] = cls._format_cookies(cls._cookies) cls._headers["Cookie"] = cls._format_cookies(cls._cookies)
class EndTurn:
"""
Class to represent the end of a conversation turn.
"""
def __init__(self):
self.is_end = False
def end(self):
self.is_end = True
class ResponseFields: class ResponseFields:
""" """
Class to encapsulate response fields. Class to encapsulate response fields.
@ -633,8 +639,8 @@ class Response():
self._options = options self._options = options
self._fields = None self._fields = None
async def generator(self): async def generator(self) -> AsyncIterator:
if self._generator: if self._generator is not None:
self._generator = None self._generator = None
chunks = [] chunks = []
async for chunk in self._generator: async for chunk in self._generator:
@ -644,27 +650,29 @@ class Response():
yield chunk yield chunk
chunks.append(str(chunk)) chunks.append(str(chunk))
self._message = "".join(chunks) self._message = "".join(chunks)
if not self._fields: if self._fields is None:
raise RuntimeError("Missing response fields") raise RuntimeError("Missing response fields")
self.is_end = self._fields.end_turn self.is_end = self._fields.finish_reason == "stop"
def __aiter__(self): def __aiter__(self):
return self.generator() return self.generator()
@async_cached_property async def get_message(self) -> str:
async def message(self) -> str:
await self.generator() await self.generator()
return self._message return self._message
async def get_fields(self): async def get_fields(self) -> dict:
await self.generator() await self.generator()
return {"conversation_id": self._fields.conversation_id, "parent_id": self._fields.message_id} return {
"conversation_id": self._fields.conversation_id,
"parent_id": self._fields.message_id
}
async def next(self, prompt: str, **kwargs) -> Response: async def create_next(self, prompt: str, **kwargs) -> Response:
return await OpenaiChat.create( return await OpenaiChat.create(
**self._options, **self._options,
prompt=prompt, prompt=prompt,
messages=await self.messages, messages=await self.get_messages(),
action="next", action="next",
**await self.get_fields(), **await self.get_fields(),
**kwargs **kwargs
@ -676,13 +684,13 @@ class Response():
raise RuntimeError("Can't continue message. Message already finished.") raise RuntimeError("Can't continue message. Message already finished.")
return await OpenaiChat.create( return await OpenaiChat.create(
**self._options, **self._options,
messages=await self.messages, messages=await self.get_messages(),
action="continue", action="continue",
**fields, **fields,
**kwargs **kwargs
) )
async def variant(self, **kwargs) -> Response: async def create_variant(self, **kwargs) -> Response:
if self.action != "next": if self.action != "next":
raise RuntimeError("Can't create variant from continue or variant request.") raise RuntimeError("Can't create variant from continue or variant request.")
return await OpenaiChat.create( return await OpenaiChat.create(
@ -693,8 +701,7 @@ class Response():
**kwargs **kwargs
) )
@async_cached_property async def get_messages(self) -> list:
async def messages(self):
messages = self._messages messages = self._messages
messages.append({"role": "assistant", "content": await self.message}) messages.append({"role": "assistant", "content": await self.message()})
return messages return messages

View File

@ -65,6 +65,7 @@
:root { :root {
--font-1: "Inter", sans-serif; --font-1: "Inter", sans-serif;
--section-gap: 25px; --section-gap: 25px;
--inner-gap: 15px;
--border-radius-1: 8px; --border-radius-1: 8px;
} }
@ -204,6 +205,12 @@ body {
gap: 10px; gap: 10px;
} }
.conversations .convo .choise {
position: absolute;
right: 8px;
background-color: var(--blur-bg);
}
.conversations i { .conversations i {
color: var(--conversations); color: var(--conversations);
cursor: pointer; cursor: pointer;
@ -222,10 +229,14 @@ body {
overflow-wrap: break-word; overflow-wrap: break-word;
display: flex; display: flex;
gap: var(--section-gap); gap: var(--section-gap);
padding: var(--section-gap); padding: var(--inner-gap) var(--section-gap);
padding-bottom: 0; padding-bottom: 0;
} }
.message.regenerate {
opacity: 0.75;
}
.message:last-child { .message:last-child {
animation: 0.6s show_message; animation: 0.6s show_message;
} }
@ -393,10 +404,10 @@ body {
#input-count { #input-count {
width: fit-content; width: fit-content;
font-size: 12px; font-size: 12px;
padding: 6px 15px; padding: 6px var(--inner-gap);
} }
.stop_generating, .regenerate { .stop_generating, .toolbar .regenerate {
position: absolute; position: absolute;
z-index: 1000000; z-index: 1000000;
top: 0; top: 0;
@ -404,20 +415,20 @@ body {
} }
@media only screen and (min-width: 40em) { @media only screen and (min-width: 40em) {
.stop_generating, .regenerate { .stop_generating, .toolbar .regenerate {
left: 50%; left: 50%;
transform: translateX(-50%); transform: translateX(-50%);
right: auto; right: auto;
} }
} }
.stop_generating button, .regenerate button{ .stop_generating button, .toolbar .regenerate button{
backdrop-filter: blur(20px); backdrop-filter: blur(20px);
-webkit-backdrop-filter: blur(20px); -webkit-backdrop-filter: blur(20px);
background-color: var(--blur-bg); background-color: var(--blur-bg);
border-radius: var(--border-radius-1); border-radius: var(--border-radius-1);
border: 1px solid var(--blur-border); border: 1px solid var(--blur-border);
padding: 5px 15px; padding: 5px var(--inner-gap);
color: var(--colour-3); color: var(--colour-3);
display: flex; display: flex;
justify-content: center; justify-content: center;
@ -601,7 +612,6 @@ select {
.input-box { .input-box {
display: flex; display: flex;
align-items: center; align-items: center;
padding-right: 15px;
cursor: pointer; cursor: pointer;
} }
@ -785,7 +795,7 @@ a:-webkit-any-link {
font-size: 15px; font-size: 15px;
width: 100%; width: 100%;
height: 100%; height: 100%;
padding: 12px 15px; padding: 12px var(--inner-gap);
background: none; background: none;
border: none; border: none;
outline: none; outline: none;
@ -990,10 +1000,21 @@ a:-webkit-any-link {
padding-right: 5px; padding-right: 5px;
padding-top: 2px; padding-top: 2px;
padding-bottom: 2px; padding-bottom: 2px;
top: 20px; position: absolute;
left: 8px; bottom: 8px;
right: 8px;
} }
#send-button:hover { #send-button:hover {
border: 1px solid #e4d4ffc9; border: 1px solid #e4d4ffc9;
} }
#systemPrompt {
font-size: 15px;
width: 100%;
color: var(--colour-3);
height: 50px;
outline: none;
padding: var(--inner-gap) var(--section-gap);
resize: vertical;
}

View File

@ -37,10 +37,6 @@
import llamaTokenizer from "llama-tokenizer-js" import llamaTokenizer from "llama-tokenizer-js"
</script> </script>
<script src="https://unpkg.com/gpt-tokenizer/dist/cl100k_base.js" async></script> <script src="https://unpkg.com/gpt-tokenizer/dist/cl100k_base.js" async></script>
<script type="module" async>
import { countWords } from 'https://esm.run/alfaaz';
window.countWords = countWords;
</script>
<script> <script>
const user_image = '<img src="/assets/img/user.png" alt="your avatar">'; const user_image = '<img src="/assets/img/user.png" alt="your avatar">';
const gpt_image = '<img src="/assets/img/gpt.png" alt="your avatar">'; const gpt_image = '<img src="/assets/img/gpt.png" alt="your avatar">';
@ -55,7 +51,6 @@
} }
#message-input { #message-input {
margin-right: 30px;
height: 82px; height: 82px;
margin-left: 20px; margin-left: 20px;
} }
@ -116,6 +111,7 @@
</div> </div>
</div> </div>
<div class="conversation"> <div class="conversation">
<textarea id="systemPrompt" class="box" placeholder="System prompt"></textarea>
<div id="messages" class="box"></div> <div id="messages" class="box"></div>
<div class="toolbar"> <div class="toolbar">
<div id="input-count" class=""> <div id="input-count" class="">

View File

@ -1,28 +1,29 @@
const colorThemes = document.querySelectorAll('[name="theme"]'); const colorThemes = document.querySelectorAll('[name="theme"]');
const markdown = window.markdownit(); const markdown = window.markdownit();
const message_box = document.getElementById(`messages`); const message_box = document.getElementById(`messages`);
const message_input = document.getElementById(`message-input`); const messageInput = document.getElementById(`message-input`);
const box_conversations = document.querySelector(`.top`); const box_conversations = document.querySelector(`.top`);
const stop_generating = document.querySelector(`.stop_generating`); const stop_generating = document.querySelector(`.stop_generating`);
const regenerate = document.querySelector(`.regenerate`); const regenerate = document.querySelector(`.regenerate`);
const sidebar = document.querySelector(".conversations"); const sidebar = document.querySelector(".conversations");
const sidebar_button = document.querySelector(".mobile-sidebar"); const sidebar_button = document.querySelector(".mobile-sidebar");
const send_button = document.getElementById("send-button"); const sendButton = document.getElementById("send-button");
const imageInput = document.getElementById("image"); const imageInput = document.getElementById("image");
const cameraInput = document.getElementById("camera"); const cameraInput = document.getElementById("camera");
const fileInput = document.getElementById("file"); const fileInput = document.getElementById("file");
const inputCount = document.getElementById("input-count") const inputCount = document.getElementById("input-count")
const modelSelect = document.getElementById("model"); const modelSelect = document.getElementById("model");
const systemPrompt = document.getElementById("systemPrompt")
let prompt_lock = false; let prompt_lock = false;
hljs.addPlugin(new CopyButtonPlugin()); hljs.addPlugin(new CopyButtonPlugin());
message_input.addEventListener("blur", () => { messageInput.addEventListener("blur", () => {
window.scrollTo(0, 0); window.scrollTo(0, 0);
}); });
message_input.addEventListener("focus", () => { messageInput.addEventListener("focus", () => {
document.documentElement.scrollTop = document.documentElement.scrollHeight; document.documentElement.scrollTop = document.documentElement.scrollHeight;
}); });
@ -59,7 +60,7 @@ const register_remove_message = async () => {
} }
const message_el = el.parentElement.parentElement; const message_el = el.parentElement.parentElement;
await remove_message(window.conversation_id, message_el.dataset.index); await remove_message(window.conversation_id, message_el.dataset.index);
await load_conversation(window.conversation_id); await load_conversation(window.conversation_id, false);
}) })
} }
}); });
@ -77,13 +78,13 @@ const delete_conversations = async () => {
}; };
const handle_ask = async () => { const handle_ask = async () => {
message_input.style.height = `82px`; messageInput.style.height = "82px";
message_input.focus(); messageInput.focus();
window.scrollTo(0, 0); window.scrollTo(0, 0);
message = message_input.value message = messageInput.value
if (message.length > 0) { if (message.length > 0) {
message_input.value = ''; messageInput.value = "";
prompt_lock = true; prompt_lock = true;
count_input() count_input()
await add_conversation(window.conversation_id, message); await add_conversation(window.conversation_id, message);
@ -135,7 +136,7 @@ const remove_cancel_button = async () => {
}, 300); }, 300);
}; };
const filter_messages = (messages, filter_last_message = true) => { const prepare_messages = (messages, filter_last_message = true) => {
// Removes none user messages at end // Removes none user messages at end
if (filter_last_message) { if (filter_last_message) {
let last_message; let last_message;
@ -147,7 +148,7 @@ const filter_messages = (messages, filter_last_message = true) => {
} }
} }
// Remove history, if it is selected // Remove history, if it's selected
if (document.getElementById('history')?.checked) { if (document.getElementById('history')?.checked) {
if (filter_last_message) { if (filter_last_message) {
messages = [messages.pop()]; messages = [messages.pop()];
@ -157,20 +158,31 @@ const filter_messages = (messages, filter_last_message = true) => {
} }
let new_messages = []; let new_messages = [];
for (i in messages) { if (messages) {
new_message = messages[i]; for (i in messages) {
// Remove generated images from history new_message = messages[i];
new_message["content"] = new_message["content"].replaceAll( // Remove generated images from history
/<!-- generated images start -->[\s\S]+<!-- generated images end -->/gm, new_message.content = new_message.content.replaceAll(
"" /<!-- generated images start -->[\s\S]+<!-- generated images end -->/gm,
) ""
delete new_message["provider"]; )
// Remove regenerated messages delete new_message["provider"];
if (!new_message.regenerate) { // Remove regenerated messages
new_messages.push(new_message) if (!new_message.regenerate) {
new_messages.push(new_message)
}
} }
} }
// Add system message
system_content = systemPrompt?.value;
if (system_content) {
new_messages.unshift({
"role": "system",
"content": system_content
});
}
return new_messages; return new_messages;
} }
@ -179,7 +191,7 @@ const ask_gpt = async () => {
messages = await get_messages(window.conversation_id); messages = await get_messages(window.conversation_id);
total_messages = messages.length; total_messages = messages.length;
messages = filter_messages(messages); messages = prepare_messages(messages);
window.scrollTo(0, 0); window.scrollTo(0, 0);
window.controller = new AbortController(); window.controller = new AbortController();
@ -192,8 +204,6 @@ const ask_gpt = async () => {
message_box.scrollTop = message_box.scrollHeight; message_box.scrollTop = message_box.scrollHeight;
window.scrollTo(0, 0); window.scrollTo(0, 0);
await new Promise((r) => setTimeout(r, 500));
window.scrollTo(0, 0);
el = message_box.querySelector('.count_total'); el = message_box.querySelector('.count_total');
el ? el.parentElement.removeChild(el) : null; el ? el.parentElement.removeChild(el) : null;
@ -218,6 +228,8 @@ const ask_gpt = async () => {
message_box.scrollTop = message_box.scrollHeight; message_box.scrollTop = message_box.scrollHeight;
window.scrollTo(0, 0); window.scrollTo(0, 0);
error = provider_result = null;
try { try {
let body = JSON.stringify({ let body = JSON.stringify({
id: window.token, id: window.token,
@ -241,49 +253,47 @@ const ask_gpt = async () => {
} else { } else {
headers['content-type'] = 'application/json'; headers['content-type'] = 'application/json';
} }
const response = await fetch(`/backend-api/v2/conversation`, { const response = await fetch(`/backend-api/v2/conversation`, {
method: 'POST', method: 'POST',
signal: window.controller.signal, signal: window.controller.signal,
headers: headers, headers: headers,
body: body body: body
}); });
await new Promise((r) => setTimeout(r, 1000));
window.scrollTo(0, 0);
const reader = response.body.pipeThrough(new TextDecoderStream()).getReader(); const reader = response.body.pipeThrough(new TextDecoderStream()).getReader();
error = provider = null;
while (true) { while (true) {
const { value, done } = await reader.read(); const { value, done } = await reader.read();
if (done) break; if (done) break;
for (const line of value.split("\n")) { for (const line of value.split("\n")) {
if (!line) continue; if (!line) {
continue;
}
const message = JSON.parse(line); const message = JSON.parse(line);
if (message.type == "content") { if (message.type == "content") {
text += message.content; text += message.content;
} else if (message["type"] == "provider") { } else if (message.type == "provider") {
provider = message.provider provider_result = message.provider
content.querySelector('.provider').innerHTML = ` content.querySelector('.provider').innerHTML = `
<a href="${provider.url}" target="_blank"> <a href="${provider_result.url}" target="_blank">
${provider.name} ${provider_result.name}
</a> </a>
${provider.model ? ' with ' + provider.model : ''} ${provider_result.model ? ' with ' + provider_result.model : ''}
` `
} else if (message["type"] == "error") { } else if (message.type == "error") {
error = message["error"]; error = message.error;
} else if (message["type"] == "message") { } else if (messag.type == "message") {
console.error(message["message"]) console.error(messag.message)
} }
} }
if (error) { if (error) {
console.error(error); console.error(error);
content_inner.innerHTML += "<p>An error occured, please try again, if the problem persists, please use a other model or provider.</p>"; content_inner.innerHTML += `<p><strong>An error occured:</strong> ${error}</p>`;
} else { } else {
html = markdown_render(text); html = markdown_render(text);
let lastElement, lastIndex = null; let lastElement, lastIndex = null;
for (element of ['</p>', '</code></pre>', '</li>\n</ol>', '</li>\n</ul>']) { for (element of ['</p>', '</code></pre>', '</p>\n</li>\n</ol>', '</li>\n</ol>', '</li>\n</ul>']) {
const index = html.lastIndexOf(element) const index = html.lastIndexOf(element)
if (index > lastIndex) { if (index - element.length > lastIndex) {
lastElement = element; lastElement = element;
lastIndex = index; lastIndex = index;
} }
@ -292,7 +302,7 @@ const ask_gpt = async () => {
html = html.substring(0, lastIndex) + '<span id="cursor"></span>' + lastElement; html = html.substring(0, lastIndex) + '<span id="cursor"></span>' + lastElement;
} }
content_inner.innerHTML = html; content_inner.innerHTML = html;
content_count.innerText = count_words_and_tokens(text, provider?.model); content_count.innerText = count_words_and_tokens(text, provider_result?.model);
highlight(content_inner); highlight(content_inner);
} }
@ -302,7 +312,6 @@ const ask_gpt = async () => {
} }
} }
if (!error) { if (!error) {
// Remove cursor
html = markdown_render(text); html = markdown_render(text);
content_inner.innerHTML = html; content_inner.innerHTML = html;
highlight(content_inner); highlight(content_inner);
@ -313,30 +322,29 @@ const ask_gpt = async () => {
} }
} catch (e) { } catch (e) {
console.error(e); console.error(e);
if (e.name != "AbortError") { if (e.name != "AbortError") {
error = true; error = true;
text = "oops ! something went wrong, please try again / reload. [stacktrace in console]"; text = "oops ! something went wrong, please try again / reload. [stacktrace in console]";
content_inner.innerHTML = text; content_inner.innerHTML = text;
} else { } else {
content_inner.innerHTML += ` [aborted]`; content_inner.innerHTML += " [aborted]";
text += ` [aborted]` if (text) text += " [aborted]";
} }
} }
if (!error) { if (!error && text) {
await add_message(window.conversation_id, "assistant", text, provider); await add_message(window.conversation_id, "assistant", text, provider_result);
await load_conversation(window.conversation_id); await load_conversation(window.conversation_id);
} else { } else {
let cursorDiv = document.getElementById(`cursor`); let cursorDiv = document.getElementById(`cursor`);
if (cursorDiv) cursorDiv.parentNode.removeChild(cursorDiv); if (cursorDiv) cursorDiv.parentNode.removeChild(cursorDiv);
} }
window.scrollTo(0, 0);
message_box.scrollTop = message_box.scrollHeight; message_box.scrollTop = message_box.scrollHeight;
await remove_cancel_button(); await remove_cancel_button();
await register_remove_message(); await register_remove_message();
prompt_lock = false; prompt_lock = false;
window.scrollTo(0, 0);
await load_conversations(); await load_conversations();
regenerate.classList.remove(`regenerate-hidden`); regenerate.classList.remove("regenerate-hidden");
}; };
const clear_conversations = async () => { const clear_conversations = async () => {
@ -366,22 +374,18 @@ const clear_conversation = async () => {
const show_option = async (conversation_id) => { const show_option = async (conversation_id) => {
const conv = document.getElementById(`conv-${conversation_id}`); const conv = document.getElementById(`conv-${conversation_id}`);
const yes = document.getElementById(`yes-${conversation_id}`); const choi = document.getElementById(`cho-${conversation_id}`);
const not = document.getElementById(`not-${conversation_id}`);
conv.style.display = `none`; conv.style.display = "none";
yes.style.display = `block`; choi.style.display = "block";
not.style.display = `block`;
}; };
const hide_option = async (conversation_id) => { const hide_option = async (conversation_id) => {
const conv = document.getElementById(`conv-${conversation_id}`); const conv = document.getElementById(`conv-${conversation_id}`);
const yes = document.getElementById(`yes-${conversation_id}`); const choi = document.getElementById(`cho-${conversation_id}`);
const not = document.getElementById(`not-${conversation_id}`);
conv.style.display = `block`; conv.style.display = "block";
yes.style.display = `none`; choi.style.display = "none";
not.style.display = `none`;
}; };
const delete_conversation = async (conversation_id) => { const delete_conversation = async (conversation_id) => {
@ -412,23 +416,31 @@ const new_conversation = async () => {
window.conversation_id = uuid(); window.conversation_id = uuid();
await clear_conversation(); await clear_conversation();
if (systemPrompt) {
systemPrompt.value = "";
}
load_conversations(); load_conversations();
hide_sidebar(); hide_sidebar();
say_hello(); say_hello();
}; };
const load_conversation = async (conversation_id) => { const load_conversation = async (conversation_id, scroll = true) => {
let messages = await get_messages(conversation_id); let conversation = await get_conversation(conversation_id);
let messages = conversation?.items || [];
if (systemPrompt) {
systemPrompt.value = conversation.system || "";
}
let elements = ""; let elements = "";
let last_model = null; let last_model = null;
for (i in messages) { for (i in messages) {
let item = messages[i]; let item = messages[i];
last_model = item?.provider?.model; last_model = item.provider?.model;
let next_i = parseInt(i) + 1; let next_i = parseInt(i) + 1;
let next_provider = item.provider ? item.provider : (messages.length > next_i ? messages[next_i].provider : null); let next_provider = item.provider ? item.provider : (messages.length > next_i ? messages[next_i].provider : null);
let provider_link = item.provider?.name ? `<a href="${item.provider?.url}" target="_blank">${item.provider.name}</a>` : ""; let provider_link = item.provider?.name ? `<a href="${item.provider.url}" target="_blank">${item.provider.name}</a>` : "";
let provider = provider_link ? ` let provider = provider_link ? `
<div class="provider"> <div class="provider">
${provider_link} ${provider_link}
@ -436,7 +448,7 @@ const load_conversation = async (conversation_id) => {
</div> </div>
` : ""; ` : "";
elements += ` elements += `
<div class="message" data-index="${i}"> <div class="message${item.regenerate ? " regenerate": ""}" data-index="${i}">
<div class="${item.role}"> <div class="${item.role}">
${item.role == "assistant" ? gpt_image : user_image} ${item.role == "assistant" ? gpt_image : user_image}
<i class="fa-solid fa-xmark"></i> <i class="fa-solid fa-xmark"></i>
@ -454,7 +466,7 @@ const load_conversation = async (conversation_id) => {
`; `;
} }
const filtered = filter_messages(messages, false); const filtered = prepare_messages(messages, false);
if (filtered.length > 0) { if (filtered.length > 0) {
last_model = last_model?.startsWith("gpt-4") ? "gpt-4" : "gpt-3.5-turbo" last_model = last_model?.startsWith("gpt-4") ? "gpt-4" : "gpt-3.5-turbo"
let count_total = GPTTokenizer_cl100k_base?.encodeChat(filtered, last_model).length let count_total = GPTTokenizer_cl100k_base?.encodeChat(filtered, last_model).length
@ -468,44 +480,35 @@ const load_conversation = async (conversation_id) => {
register_remove_message(); register_remove_message();
highlight(message_box); highlight(message_box);
message_box.scrollTo({ top: message_box.scrollHeight, behavior: "smooth" }); if (scroll) {
message_box.scrollTo({ top: message_box.scrollHeight, behavior: "smooth" });
setTimeout(() => { setTimeout(() => {
message_box.scrollTop = message_box.scrollHeight; message_box.scrollTop = message_box.scrollHeight;
}, 500); }, 500);
}
}; };
function count_tokens(model, text) { async function get_conversation(conversation_id) {
if (model.startsWith("gpt-3") || model.startsWith("gpt-4")) {
return GPTTokenizer_cl100k_base?.encode(text).length;
}
if (model.startsWith("llama2") || model.startsWith("codellama")) {
return llamaTokenizer?.encode(text).length;
}
if (model.startsWith("mistral") || model.startsWith("mixtral")) {
return mistralTokenizer?.encode(text).length;
}
}
function count_words_and_tokens(text, model) {
const tokens_count = model ? count_tokens(model, text) : null;
const tokens_append = tokens_count ? `, ${tokens_count} tokens` : "";
return countWords ? `(${countWords(text)} words${tokens_append})` : "";
}
const get_conversation = async (conversation_id) => {
let conversation = await JSON.parse( let conversation = await JSON.parse(
localStorage.getItem(`conversation:${conversation_id}`) localStorage.getItem(`conversation:${conversation_id}`)
); );
return conversation; return conversation;
}; }
const get_messages = async (conversation_id) => { async function save_conversation(conversation_id, conversation) {
localStorage.setItem(
`conversation:${conversation_id}`,
JSON.stringify(conversation)
);
}
async function get_messages(conversation_id) {
let conversation = await get_conversation(conversation_id); let conversation = await get_conversation(conversation_id);
return conversation?.items || []; return conversation?.items || [];
}; }
const add_conversation = async (conversation_id, content) => { async function add_conversation(conversation_id, content) {
if (content.length > 17) { if (content.length > 17) {
title = content.substring(0, 17) + '...' title = content.substring(0, 17) + '...'
} else { } else {
@ -513,31 +516,34 @@ const add_conversation = async (conversation_id, content) => {
} }
if (localStorage.getItem(`conversation:${conversation_id}`) == null) { if (localStorage.getItem(`conversation:${conversation_id}`) == null) {
localStorage.setItem( await save_conversation(conversation_id, {
`conversation:${conversation_id}`, id: conversation_id,
JSON.stringify({ title: title,
id: conversation_id, system: systemPrompt?.value,
title: title, items: [],
items: [], });
})
);
} }
history.pushState({}, null, `/chat/${conversation_id}`); history.pushState({}, null, `/chat/${conversation_id}`);
}; }
async function save_system_message() {
if (!window.conversation_id) return;
const conversation = await get_conversation(window.conversation_id);
conversation.system = systemPrompt?.value;
await save_conversation(window.conversation_id, conversation);
}
const hide_last_message = async (conversation_id) => { const hide_last_message = async (conversation_id) => {
const conversation = await get_conversation(conversation_id) const conversation = await get_conversation(conversation_id)
const last_message = conversation.items.pop(); const last_message = conversation.items.pop();
if (last_message["role"] == "assistant") { if (last_message !== null) {
last_message["regenerate"] = true; if (last_message["role"] == "assistant") {
last_message["regenerate"] = true;
}
conversation.items.push(last_message);
} }
conversation.items.push(last_message); await save_conversation(conversation_id, conversation);
localStorage.setItem(
`conversation:${conversation_id}`,
JSON.stringify(conversation)
);
}; };
const remove_message = async (conversation_id, index) => { const remove_message = async (conversation_id, index) => {
@ -545,17 +551,16 @@ const remove_message = async (conversation_id, index) => {
let new_items = []; let new_items = [];
for (i in conversation.items) { for (i in conversation.items) {
if (i == index - 1) { if (i == index - 1) {
delete conversation.items[i]["regenerate"]; if (!conversation.items[index]?.regenerate) {
delete conversation.items[i]["regenerate"];
}
} }
if (i != index) { if (i != index) {
new_items.push(conversation.items[i]) new_items.push(conversation.items[i])
} }
} }
conversation.items = new_items; conversation.items = new_items;
localStorage.setItem( await save_conversation(conversation_id, conversation);
`conversation:${conversation_id}`,
JSON.stringify(conversation)
);
}; };
const add_message = async (conversation_id, role, content, provider) => { const add_message = async (conversation_id, role, content, provider) => {
@ -566,12 +571,7 @@ const add_message = async (conversation_id, role, content, provider) => {
content: content, content: content,
provider: provider provider: provider
}); });
await save_conversation(conversation_id, conversation);
localStorage.setItem(
`conversation:${conversation_id}`,
JSON.stringify(conversation)
);
return conversation.items.length - 1; return conversation.items.length - 1;
}; };
@ -594,8 +594,10 @@ const load_conversations = async () => {
<span class="convo-title">${conversation.title}</span> <span class="convo-title">${conversation.title}</span>
</div> </div>
<i onclick="show_option('${conversation.id}')" class="fa-regular fa-trash" id="conv-${conversation.id}"></i> <i onclick="show_option('${conversation.id}')" class="fa-regular fa-trash" id="conv-${conversation.id}"></i>
<i onclick="delete_conversation('${conversation.id}')" class="fa-regular fa-check" id="yes-${conversation.id}" style="display:none;"></i> <div id="cho-${conversation.id}" class="choise" style="display:none;">
<i onclick="hide_option('${conversation.id}')" class="fa-regular fa-x" id="not-${conversation.id}" style="display:none;"></i> <i onclick="delete_conversation('${conversation.id}')" class="fa-regular fa-check"></i>
<i onclick="hide_option('${conversation.id}')" class="fa-regular fa-x"></i>
</div>
</div> </div>
`; `;
} }
@ -733,15 +735,45 @@ colorThemes.forEach((themeOption) => {
}); });
}); });
function count_tokens(model, text) {
if (model) {
if (model.startsWith("llama2") || model.startsWith("codellama")) {
return llamaTokenizer?.encode(text).length;
}
if (model.startsWith("mistral") || model.startsWith("mixtral")) {
return mistralTokenizer?.encode(text).length;
}
}
return GPTTokenizer_cl100k_base?.encode(text).length;
}
function count_words(text) {
return text.trim().match(/[\w\u4E00-\u9FA5]+/gu)?.length || 0;
}
function count_words_and_tokens(text, model) {
return `(${count_words(text)} words, ${count_tokens(model, text)} tokens)`;
}
let countFocus = messageInput;
const count_input = async () => { const count_input = async () => {
if (message_input.value) { if (countFocus.value) {
model = modelSelect.options[modelSelect.selectedIndex].value; model = modelSelect.options[modelSelect.selectedIndex].value;
inputCount.innerText = count_words_and_tokens(message_input.value, model); inputCount.innerText = count_words_and_tokens(countFocus.value, model);
} else { } else {
inputCount.innerHTML = "&nbsp;" inputCount.innerHTML = "&nbsp;"
} }
}; };
message_input.addEventListener("keyup", count_input); messageInput.addEventListener("keyup", count_input);
systemPrompt.addEventListener("keyup", count_input);
systemPrompt.addEventListener("focus", function() {
countFocus = systemPrompt;
count_input();
});
systemPrompt.addEventListener("blur", function() {
countFocus = messageInput;
count_input();
});
window.onload = async () => { window.onload = async () => {
setTheme(); setTheme();
@ -754,11 +786,9 @@ window.onload = async () => {
say_hello() say_hello()
} }
setTimeout(() => { load_conversations();
load_conversations();
}, 1);
message_input.addEventListener("keydown", async (evt) => { messageInput.addEventListener("keydown", async (evt) => {
if (prompt_lock) return; if (prompt_lock) return;
if (evt.keyCode === 13 && !evt.shiftKey) { if (evt.keyCode === 13 && !evt.shiftKey) {
@ -766,41 +796,22 @@ window.onload = async () => {
console.log("pressed enter"); console.log("pressed enter");
await handle_ask(); await handle_ask();
} else { } else {
message_input.style.removeProperty("height"); messageInput.style.removeProperty("height");
message_input.style.height = message_input.scrollHeight + "px"; messageInput.style.height = messageInput.scrollHeight + "px";
} }
}); });
send_button.addEventListener(`click`, async () => { sendButton.addEventListener(`click`, async () => {
console.log("clicked send"); console.log("clicked send");
if (prompt_lock) return; if (prompt_lock) return;
await handle_ask(); await handle_ask();
}); });
messageInput.focus();
register_settings_localstorage(); register_settings_localstorage();
}; };
const observer = new MutationObserver((mutationsList) => {
for (const mutation of mutationsList) {
if (mutation.type === 'attributes' && mutation.attributeName === 'style') {
const height = message_input.offsetHeight;
let heightValues = {
81: "20px",
82: "20px",
100: "30px",
119: "39px",
138: "49px",
150: "55px"
}
send_button.style.top = heightValues[height] || '';
}
}
});
observer.observe(message_input, { attributes: true });
(async () => { (async () => {
response = await fetch('/backend-api/v2/models') response = await fetch('/backend-api/v2/models')
models = await response.json() models = await response.json()
@ -875,4 +886,8 @@ fileInput.addEventListener('change', async (event) => {
} else { } else {
delete fileInput.dataset.text; delete fileInput.dataset.text;
} }
});
systemPrompt?.addEventListener("blur", async () => {
await save_system_message();
}); });