mirror of
https://github.com/xtekky/gpt4free.git
synced 2024-11-05 00:01:00 +00:00
Add copilot conversation mode
This commit is contained in:
parent
905ced06bd
commit
f861f322aa
@ -26,6 +26,7 @@ class Tones:
|
||||
creative = "Creative"
|
||||
balanced = "Balanced"
|
||||
precise = "Precise"
|
||||
copilot = "Balanced"
|
||||
|
||||
class Bing(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
"""
|
||||
@ -35,10 +36,8 @@ class Bing(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
working = True
|
||||
supports_message_history = True
|
||||
supports_gpt_4 = True
|
||||
default_model = Tones.balanced
|
||||
models = [
|
||||
getattr(Tones, key) for key in dir(Tones) if not key.startswith("__")
|
||||
]
|
||||
default_model = "balanced"
|
||||
models = [key for key in Tones.__dict__ if not key.startswith("__")]
|
||||
|
||||
@classmethod
|
||||
def create_async_generator(
|
||||
@ -71,7 +70,7 @@ class Bing(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
context = create_context(messages[:-1]) if len(messages) > 1 else None
|
||||
if tone is None:
|
||||
tone = tone if model.startswith("gpt-4") else model
|
||||
tone = cls.get_model(tone)
|
||||
tone = cls.get_model("" if tone is None else tone.lower())
|
||||
gpt4_turbo = True if model.startswith("gpt-4-turbo") else False
|
||||
|
||||
return stream_generate(
|
||||
@ -136,31 +135,32 @@ class Defaults:
|
||||
]
|
||||
|
||||
sliceIds = {
|
||||
"Balanced": [
|
||||
"balanced": [
|
||||
"supllmnfe","archnewtf",
|
||||
"stpstream", "stpsig", "vnextvoicecf", "scmcbase", "cmcpupsalltf", "sydtransctrl",
|
||||
"thdnsrch", "220dcl1s0", "0215wcrwips0", "0305hrthrots0", "0130gpt4t",
|
||||
"bingfc", "0225unsticky1", "0228scss0",
|
||||
"defquerycf", "defcontrol", "3022tphpv"
|
||||
],
|
||||
"Creative": [
|
||||
"creative": [
|
||||
"bgstream", "fltltst2c",
|
||||
"stpstream", "stpsig", "vnextvoicecf", "cmcpupsalltf", "sydtransctrl",
|
||||
"0301techgnd", "220dcl1bt15", "0215wcrwip", "0305hrthrot", "0130gpt4t",
|
||||
"bingfccf", "0225unsticky1", "0228scss0",
|
||||
"3022tpvs0"
|
||||
],
|
||||
"Precise": [
|
||||
"precise": [
|
||||
"bgstream", "fltltst2c",
|
||||
"stpstream", "stpsig", "vnextvoicecf", "cmcpupsalltf", "sydtransctrl",
|
||||
"0301techgnd", "220dcl1bt15", "0215wcrwip", "0305hrthrot", "0130gpt4t",
|
||||
"bingfccf", "0225unsticky1", "0228scss0",
|
||||
"defquerycf", "3022tpvs0"
|
||||
],
|
||||
"copilot": []
|
||||
}
|
||||
|
||||
optionsSets = {
|
||||
"Balanced": [
|
||||
"balanced": [
|
||||
"nlu_direct_response_filter", "deepleo",
|
||||
"disable_emoji_spoken_text", "responsible_ai_policy_235",
|
||||
"enablemm", "dv3sugg", "autosave",
|
||||
@ -168,7 +168,7 @@ class Defaults:
|
||||
"galileo", "saharagenconv5", "gldcl1p",
|
||||
"gpt4tmncnp"
|
||||
],
|
||||
"Creative": [
|
||||
"creative": [
|
||||
"nlu_direct_response_filter", "deepleo",
|
||||
"disable_emoji_spoken_text", "responsible_ai_policy_235",
|
||||
"enablemm", "dv3sugg",
|
||||
@ -176,7 +176,7 @@ class Defaults:
|
||||
"h3imaginative", "techinstgnd", "hourthrot", "clgalileo", "gencontentv3",
|
||||
"gpt4tmncnp"
|
||||
],
|
||||
"Precise": [
|
||||
"precise": [
|
||||
"nlu_direct_response_filter", "deepleo",
|
||||
"disable_emoji_spoken_text", "responsible_ai_policy_235",
|
||||
"enablemm", "dv3sugg",
|
||||
@ -184,6 +184,13 @@ class Defaults:
|
||||
"h3precise", "techinstgnd", "hourthrot", "techinstgnd", "hourthrot",
|
||||
"clgalileo", "gencontentv3"
|
||||
],
|
||||
"copilot": [
|
||||
"nlu_direct_response_filter", "deepleo",
|
||||
"disable_emoji_spoken_text", "responsible_ai_policy_235",
|
||||
"enablemm", "dv3sugg",
|
||||
"iyxapbing", "iycapbing",
|
||||
"h3precise", "clgalileo", "gencontentv3", "prjupy"
|
||||
],
|
||||
}
|
||||
|
||||
# Default location settings
|
||||
@ -264,7 +271,7 @@ def create_message(
|
||||
"allowedMessageTypes": Defaults.allowedMessageTypes,
|
||||
"sliceIds": Defaults.sliceIds[tone],
|
||||
"verbosity": "verbose",
|
||||
"scenario": "SERP",
|
||||
"scenario": "CopilotMicrosoftCom", # "SERP",
|
||||
"plugins": [{"id": "c310c353-b9f0-4d76-ab0d-1dd5e979cf68", "category": 1}] if web_search else [],
|
||||
"traceId": get_random_hex(40),
|
||||
"conversationHistoryOptionsSets": ["autosave","savemem","uprofupd","uprofgen"],
|
||||
@ -282,8 +289,7 @@ def create_message(
|
||||
"requestId": request_id,
|
||||
"messageId": request_id
|
||||
},
|
||||
"tone": tone,
|
||||
"extraExtensionParameters": {"gpt-creator-persona": {"personaId": "copilot"}},
|
||||
"tone": getattr(Tones, tone),
|
||||
"spokenTextMode": "None",
|
||||
"conversationId": conversation.conversationId,
|
||||
"participant": {"id": conversation.clientId}
|
||||
@ -322,6 +328,7 @@ async def stream_generate(
|
||||
gpt4_turbo: bool = False,
|
||||
timeout: int = 900,
|
||||
conversation: Conversation = None,
|
||||
return_conversation: bool = False,
|
||||
raise_apology: bool = False,
|
||||
max_retries: int = 5,
|
||||
sleep_retry: int = 15,
|
||||
@ -344,10 +351,15 @@ async def stream_generate(
|
||||
async with ClientSession(
|
||||
timeout=ClientTimeout(total=timeout), connector=connector
|
||||
) as session:
|
||||
while conversation is None:
|
||||
first = True
|
||||
while first or conversation is None:
|
||||
first = False
|
||||
do_read = True
|
||||
try:
|
||||
conversation = await create_conversation(session, headers)
|
||||
if conversation is None:
|
||||
conversation = await create_conversation(session, headers)
|
||||
if return_conversation:
|
||||
yield conversation
|
||||
except ResponseStatusError as e:
|
||||
max_retries -= 1
|
||||
if max_retries < 1:
|
||||
@ -358,7 +370,7 @@ async def stream_generate(
|
||||
await asyncio.sleep(sleep_retry)
|
||||
continue
|
||||
|
||||
image_request = await upload_image(session, image, tone, headers) if image else None
|
||||
image_request = await upload_image(session, image, getattr(Tones, tone), headers) if image else None
|
||||
async with session.ws_connect(
|
||||
'wss://sydney.bing.com/sydney/ChatHub',
|
||||
autoping=False,
|
||||
|
@ -811,7 +811,7 @@ async function on_api() {
|
||||
|
||||
register_settings_storage();
|
||||
|
||||
versions = await api('version')
|
||||
versions = await api("version");
|
||||
document.title = 'g4f - ' + versions["version"];
|
||||
let text = "version ~ "
|
||||
if (versions["version"] != versions["latest_version"]) {
|
||||
@ -825,19 +825,19 @@ async function on_api() {
|
||||
|
||||
models = await api("models");
|
||||
models.forEach((model) => {
|
||||
let option = document.createElement('option');
|
||||
let option = document.createElement("option");
|
||||
option.value = option.text = model;
|
||||
modelSelect.appendChild(option);
|
||||
});
|
||||
|
||||
providers = await api('providers')
|
||||
providers = await api("providers")
|
||||
providers.forEach((provider) => {
|
||||
let option = document.createElement('option');
|
||||
let option = document.createElement("option");
|
||||
option.value = option.text = provider;
|
||||
providerSelect.appendChild(option);
|
||||
})
|
||||
|
||||
load_provider_models();
|
||||
await load_provider_models(appStorage.getItem("provider"));
|
||||
load_settings_storage()
|
||||
}
|
||||
|
||||
@ -965,8 +965,11 @@ async function read_response(response) {
|
||||
}
|
||||
}
|
||||
|
||||
async function load_provider_models() {
|
||||
const provider = providerSelect.options[providerSelect.selectedIndex].value;
|
||||
async function load_provider_models(providerIndex=null) {
|
||||
if (!providerIndex) {
|
||||
providerIndex = providerSelect.selectedIndex;
|
||||
}
|
||||
const provider = providerSelect.options[providerIndex].value;
|
||||
if (!provider) {
|
||||
return;
|
||||
}
|
||||
@ -986,7 +989,7 @@ async function load_provider_models() {
|
||||
modelSelect.classList.remove("hidden");
|
||||
}
|
||||
};
|
||||
providerSelect.addEventListener("change", load_provider_models)
|
||||
providerSelect.addEventListener("change", () => load_provider_models());
|
||||
|
||||
function save_storage() {
|
||||
let filename = new Date().toLocaleString()
|
||||
|
Loading…
Reference in New Issue
Block a user