Merge pull request #1175 from Biflez48/main

Adding context preservation to the GUI.
pull/1182/head
Tekky 8 months ago committed by GitHub
commit 1dc8e6d528
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -260,6 +260,32 @@ body {
z-index: 10000;
}
.message .assistant{
max-width: 48px;
max-height: 48px;
flex-shrink: 0;
}
.message .assistant img {
width: 100%;
height: 100%;
object-fit: cover;
border-radius: 8px;
outline: 1px solid var(--blur-border);
}
.message .assistant:after {
content: "63";
position: absolute;
bottom: 0;
right: 0;
height: 60%;
width: 60%;
background: var(--colour-3);
filter: blur(10px) opacity(0.5);
z-index: 10000;
}
.message .content {
display: flex;
flex-direction: column;
@ -280,6 +306,13 @@ body {
z-index: 1000;
}
.message .assistant i {
position: absolute;
bottom: -6px;
right: -6px;
z-index: 1000;
}
.new_convo {
padding: 8px 12px;
display: flex;

@ -31,12 +31,37 @@ const handle_ask = async () => {
message_input.style.height = `80px`;
message_input.focus();
let txtMsgs = [];
const divTags = document.getElementsByClassName("message");
for(let i=0;i<divTags.length;i++){
if(!divTags[i].children[1].classList.contains("welcome-message")){
if(divTags[i].children[0].className == "assistant"){
const msg = {
role: "assistant",
content: divTags[i].children[1].textContent+" "
};
txtMsgs.push(msg);
}else{
const msg = {
role: "user",
content: divTags[i].children[1].textContent+" "
};
txtMsgs.push(msg);
}
}
}
window.scrollTo(0, 0);
let message = message_input.value;
const msg = {
role: "user",
content: message
};
txtMsgs.push(msg);
if (message.length > 0) {
message_input.value = ``;
await ask_gpt(message);
await ask_gpt(txtMsgs);
}
};
@ -49,13 +74,13 @@ const remove_cancel_button = async () => {
}, 300);
};
const ask_gpt = async (message) => {
const ask_gpt = async (txtMsgs) => {
try {
message_input.value = ``;
message_input.innerHTML = ``;
message_input.innerText = ``;
add_conversation(window.conversation_id, message);
add_conversation(window.conversation_id, txtMsgs[0].content);
window.scrollTo(0, 0);
window.controller = new AbortController();
@ -75,7 +100,7 @@ const ask_gpt = async (message) => {
<i class="fa-regular fa-phone-arrow-up-right"></i>
</div>
<div class="content" id="user_${token}">
${format(message)}
${format(txtMsgs[txtMsgs.length-1].content)}
</div>
</div>
`;
@ -87,7 +112,7 @@ const ask_gpt = async (message) => {
message_box.innerHTML += `
<div class="message">
<div class="user">
<div class="assistant">
${gpt_image} <i class="fa-regular fa-phone-arrow-down-left"></i>
</div>
<div class="content" id="gpt_${window.token}">
@ -120,12 +145,7 @@ const ask_gpt = async (message) => {
conversation: await get_conversation(window.conversation_id),
internet_access: document.getElementById(`switch`).checked,
content_type: `text`,
parts: [
{
content: message,
role: `user`,
},
],
parts: txtMsgs,
},
},
}),
@ -154,7 +174,7 @@ const ask_gpt = async (message) => {
document.getElementById(`gpt_${window.token}`).innerHTML = "An error occured, please try again, if the problem persists, please reload / refresh cache or use a differnet browser";
}
add_message(window.conversation_id, "user", message);
add_message(window.conversation_id, "user", txtMsgs[txtMsgs.length-1].content);
add_message(window.conversation_id, "assistant", text);
message_box.scrollTop = message_box.scrollHeight;
@ -165,7 +185,7 @@ const ask_gpt = async (message) => {
window.scrollTo(0, 0);
} catch (e) {
add_message(window.conversation_id, "user", message);
add_message(window.conversation_id, "user", txtMsgs[txtMsgs.length-1].content);
message_box.scrollTop = message_box.scrollHeight;
await remove_cancel_button();
@ -279,7 +299,7 @@ const load_conversation = async (conversation_id) => {
for (item of conversation.items) {
message_box.innerHTML += `
<div class="message">
<div class="user">
<div class=${item.role == "assistant" ? "assistant" : "user"}>
${item.role == "assistant" ? gpt_image : user_image}
${item.role == "assistant"
? `<i class="fa-regular fa-phone-arrow-down-left"></i>`
@ -316,7 +336,7 @@ const get_conversation = async (conversation_id) => {
const add_conversation = async (conversation_id, content) => {
if (content.length > 17) {
title = content.substring(0, 17) + '..'
title = content.substring(0, 17) + '...'
} else {
title = content + '&nbsp;'.repeat(19 - content.length)
}
@ -461,7 +481,7 @@ const say_hello = async () => {
message_box.innerHTML += `
<div class="message">
<div class="user">
<div class="assistant">
${gpt_image}
<i class="fa-regular fa-phone-arrow-down-left"></i>
</div>

@ -1,5 +1,4 @@
import g4f
import json
from flask import request
from .internet import search
@ -44,45 +43,26 @@ class Backend_Api:
}
def _conversation(self):
config = None
proxy = None
try:
config = json.load(open("config.json","r",encoding="utf-8"))
proxy = config["proxy"]
except Exception:
pass
try:
jailbreak = request.json['jailbreak']
internet_access = request.json['meta']['content']['internet_access']
conversation = request.json['meta']['content']['conversation']
prompt = request.json['meta']['content']['parts'][0]
#jailbreak = request.json['jailbreak']
#internet_access = request.json['meta']['content']['internet_access']
#conversation = request.json['meta']['content']['conversation']
prompt = request.json['meta']['content']['parts']
model = request.json['model']
provider = request.json.get('provider').split('g4f.Provider.')[1]
messages = special_instructions[jailbreak] + conversation + search(internet_access, prompt) + [prompt]
messages = prompt
print(messages)
def stream():
if proxy != None:
yield from g4f.ChatCompletion.create(
model=model,
provider=get_provider(provider),
messages=messages,
stream=True,
proxy=proxy
) if provider else g4f.ChatCompletion.create(
model=model, messages=messages, stream=True, proxy=proxy
)
else:
yield from g4f.ChatCompletion.create(
model=model,
provider=get_provider(provider),
messages=messages,
stream=True,
) if provider else g4f.ChatCompletion.create(
model=model, messages=messages, stream=True
)
yield from g4f.ChatCompletion.create(
model=g4f.models.gpt_35_long,
provider=get_provider(provider),
messages=messages,
stream=True,
) if provider else g4f.ChatCompletion.create(
model=model, messages=messages, stream=True
)
return self.app.response_class(stream(), mimetype='text/event-stream')

Loading…
Cancel
Save