mirror of
https://github.com/xtekky/gpt4free.git
synced 2024-11-10 19:11:01 +00:00
Add conversation support for Bing
This commit is contained in:
parent
f861f322aa
commit
4778356064
@ -51,6 +51,7 @@ class Bing(AsyncGeneratorProvider, ProviderModelMixin):
|
|||||||
tone: str = None,
|
tone: str = None,
|
||||||
image: ImageType = None,
|
image: ImageType = None,
|
||||||
web_search: bool = False,
|
web_search: bool = False,
|
||||||
|
context: str = None,
|
||||||
**kwargs
|
**kwargs
|
||||||
) -> AsyncResult:
|
) -> AsyncResult:
|
||||||
"""
|
"""
|
||||||
@ -67,7 +68,8 @@ class Bing(AsyncGeneratorProvider, ProviderModelMixin):
|
|||||||
:return: An asynchronous result object.
|
:return: An asynchronous result object.
|
||||||
"""
|
"""
|
||||||
prompt = messages[-1]["content"]
|
prompt = messages[-1]["content"]
|
||||||
context = create_context(messages[:-1]) if len(messages) > 1 else None
|
if context is None:
|
||||||
|
context = create_context(messages[:-1]) if len(messages) > 1 else None
|
||||||
if tone is None:
|
if tone is None:
|
||||||
tone = tone if model.startswith("gpt-4") else model
|
tone = tone if model.startswith("gpt-4") else model
|
||||||
tone = cls.get_model("" if tone is None else tone.lower())
|
tone = cls.get_model("" if tone is None else tone.lower())
|
||||||
@ -126,7 +128,7 @@ class Defaults:
|
|||||||
"ActionRequest","Chat",
|
"ActionRequest","Chat",
|
||||||
"ConfirmationCard", "Context",
|
"ConfirmationCard", "Context",
|
||||||
"InternalSearchQuery", #"InternalSearchResult",
|
"InternalSearchQuery", #"InternalSearchResult",
|
||||||
"Disengaged", #"InternalLoaderMessage",
|
#"Disengaged", "InternalLoaderMessage",
|
||||||
"Progress", "RenderCardRequest",
|
"Progress", "RenderCardRequest",
|
||||||
"RenderContentRequest", "AdsQuery",
|
"RenderContentRequest", "AdsQuery",
|
||||||
"SemanticSerp", "GenerateContentQuery",
|
"SemanticSerp", "GenerateContentQuery",
|
||||||
@ -160,30 +162,62 @@ class Defaults:
|
|||||||
}
|
}
|
||||||
|
|
||||||
optionsSets = {
|
optionsSets = {
|
||||||
"balanced": [
|
"balanced": {
|
||||||
"nlu_direct_response_filter", "deepleo",
|
"default": [
|
||||||
"disable_emoji_spoken_text", "responsible_ai_policy_235",
|
"nlu_direct_response_filter", "deepleo",
|
||||||
"enablemm", "dv3sugg", "autosave",
|
"disable_emoji_spoken_text", "responsible_ai_policy_235",
|
||||||
"iyxapbing", "iycapbing",
|
"enablemm", "dv3sugg", "autosave",
|
||||||
"galileo", "saharagenconv5", "gldcl1p",
|
"iyxapbing", "iycapbing",
|
||||||
"gpt4tmncnp"
|
"galileo", "saharagenconv5", "gldcl1p",
|
||||||
],
|
"gpt4tmncnp"
|
||||||
"creative": [
|
],
|
||||||
"nlu_direct_response_filter", "deepleo",
|
"nosearch": [
|
||||||
"disable_emoji_spoken_text", "responsible_ai_policy_235",
|
"nlu_direct_response_filter", "deepleo",
|
||||||
"enablemm", "dv3sugg",
|
"disable_emoji_spoken_text", "responsible_ai_policy_235",
|
||||||
"iyxapbing", "iycapbing",
|
"enablemm", "dv3sugg", "autosave",
|
||||||
"h3imaginative", "techinstgnd", "hourthrot", "clgalileo", "gencontentv3",
|
"iyxapbing", "iycapbing",
|
||||||
"gpt4tmncnp"
|
"galileo", "sunoupsell", "base64filter", "uprv4p1upd",
|
||||||
],
|
"hourthrot", "noctprf", "gndlogcf", "nosearchall"
|
||||||
"precise": [
|
]
|
||||||
"nlu_direct_response_filter", "deepleo",
|
},
|
||||||
"disable_emoji_spoken_text", "responsible_ai_policy_235",
|
"creative": {
|
||||||
"enablemm", "dv3sugg",
|
"default": [
|
||||||
"iyxapbing", "iycapbing",
|
"nlu_direct_response_filter", "deepleo",
|
||||||
"h3precise", "techinstgnd", "hourthrot", "techinstgnd", "hourthrot",
|
"disable_emoji_spoken_text", "responsible_ai_policy_235",
|
||||||
"clgalileo", "gencontentv3"
|
"enablemm", "dv3sugg",
|
||||||
],
|
"iyxapbing", "iycapbing",
|
||||||
|
"h3imaginative", "techinstgnd", "hourthrot", "clgalileo", "gencontentv3",
|
||||||
|
"gpt4tmncnp"
|
||||||
|
],
|
||||||
|
"nosearch": [
|
||||||
|
"nlu_direct_response_filter", "deepleo",
|
||||||
|
"disable_emoji_spoken_text", "responsible_ai_policy_235",
|
||||||
|
"enablemm", "dv3sugg", "autosave",
|
||||||
|
"iyxapbing", "iycapbing",
|
||||||
|
"h3imaginative", "sunoupsell", "base64filter", "uprv4p1upd",
|
||||||
|
"hourthrot", "noctprf", "gndlogcf", "nosearchall",
|
||||||
|
"clgalileo", "nocache", "up4rp14bstcst"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"precise": {
|
||||||
|
"default": [
|
||||||
|
"nlu_direct_response_filter", "deepleo",
|
||||||
|
"disable_emoji_spoken_text", "responsible_ai_policy_235",
|
||||||
|
"enablemm", "dv3sugg",
|
||||||
|
"iyxapbing", "iycapbing",
|
||||||
|
"h3precise", "techinstgnd", "hourthrot", "techinstgnd", "hourthrot",
|
||||||
|
"clgalileo", "gencontentv3"
|
||||||
|
],
|
||||||
|
"nosearch": [
|
||||||
|
"nlu_direct_response_filter", "deepleo",
|
||||||
|
"disable_emoji_spoken_text", "responsible_ai_policy_235",
|
||||||
|
"enablemm", "dv3sugg", "autosave",
|
||||||
|
"iyxapbing", "iycapbing",
|
||||||
|
"h3precise", "sunoupsell", "base64filter", "uprv4p1upd",
|
||||||
|
"hourthrot", "noctprf", "gndlogcf", "nosearchall",
|
||||||
|
"clgalileo", "nocache", "up4rp14bstcst"
|
||||||
|
]
|
||||||
|
},
|
||||||
"copilot": [
|
"copilot": [
|
||||||
"nlu_direct_response_filter", "deepleo",
|
"nlu_direct_response_filter", "deepleo",
|
||||||
"disable_emoji_spoken_text", "responsible_ai_policy_235",
|
"disable_emoji_spoken_text", "responsible_ai_policy_235",
|
||||||
@ -244,7 +278,8 @@ def create_message(
|
|||||||
context: str = None,
|
context: str = None,
|
||||||
image_request: ImageRequest = None,
|
image_request: ImageRequest = None,
|
||||||
web_search: bool = False,
|
web_search: bool = False,
|
||||||
gpt4_turbo: bool = False
|
gpt4_turbo: bool = False,
|
||||||
|
new_conversation: bool = True
|
||||||
) -> str:
|
) -> str:
|
||||||
"""
|
"""
|
||||||
Creates a message for the Bing API with specified parameters.
|
Creates a message for the Bing API with specified parameters.
|
||||||
@ -259,7 +294,12 @@ def create_message(
|
|||||||
:return: A formatted string message for the Bing API.
|
:return: A formatted string message for the Bing API.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
options_sets = []
|
options_sets = Defaults.optionsSets[tone]
|
||||||
|
if not web_search and "nosearch" in options_sets:
|
||||||
|
options_sets = options_sets["nosearch"]
|
||||||
|
elif "default" in options_sets:
|
||||||
|
options_sets = options_sets["default"]
|
||||||
|
options_sets = options_sets.copy()
|
||||||
if gpt4_turbo:
|
if gpt4_turbo:
|
||||||
options_sets.append("dlgpt4t")
|
options_sets.append("dlgpt4t")
|
||||||
|
|
||||||
@ -267,7 +307,7 @@ def create_message(
|
|||||||
struct = {
|
struct = {
|
||||||
"arguments":[{
|
"arguments":[{
|
||||||
"source": "cib",
|
"source": "cib",
|
||||||
"optionsSets": [*Defaults.optionsSets[tone], *options_sets],
|
"optionsSets": options_sets,
|
||||||
"allowedMessageTypes": Defaults.allowedMessageTypes,
|
"allowedMessageTypes": Defaults.allowedMessageTypes,
|
||||||
"sliceIds": Defaults.sliceIds[tone],
|
"sliceIds": Defaults.sliceIds[tone],
|
||||||
"verbosity": "verbose",
|
"verbosity": "verbose",
|
||||||
@ -276,7 +316,7 @@ def create_message(
|
|||||||
"traceId": get_random_hex(40),
|
"traceId": get_random_hex(40),
|
||||||
"conversationHistoryOptionsSets": ["autosave","savemem","uprofupd","uprofgen"],
|
"conversationHistoryOptionsSets": ["autosave","savemem","uprofupd","uprofgen"],
|
||||||
"gptId": "copilot",
|
"gptId": "copilot",
|
||||||
"isStartOfSession": True,
|
"isStartOfSession": new_conversation,
|
||||||
"requestId": request_id,
|
"requestId": request_id,
|
||||||
"message":{
|
"message":{
|
||||||
**Defaults.location,
|
**Defaults.location,
|
||||||
@ -330,7 +370,7 @@ async def stream_generate(
|
|||||||
conversation: Conversation = None,
|
conversation: Conversation = None,
|
||||||
return_conversation: bool = False,
|
return_conversation: bool = False,
|
||||||
raise_apology: bool = False,
|
raise_apology: bool = False,
|
||||||
max_retries: int = 5,
|
max_retries: int = None,
|
||||||
sleep_retry: int = 15,
|
sleep_retry: int = 15,
|
||||||
**kwargs
|
**kwargs
|
||||||
):
|
):
|
||||||
@ -348,6 +388,8 @@ async def stream_generate(
|
|||||||
:return: An asynchronous generator yielding responses.
|
:return: An asynchronous generator yielding responses.
|
||||||
"""
|
"""
|
||||||
headers = create_headers(cookies)
|
headers = create_headers(cookies)
|
||||||
|
new_conversation = conversation is None
|
||||||
|
max_retries = (5 if new_conversation else 0) if max_retries is None else max_retries
|
||||||
async with ClientSession(
|
async with ClientSession(
|
||||||
timeout=ClientTimeout(total=timeout), connector=connector
|
timeout=ClientTimeout(total=timeout), connector=connector
|
||||||
) as session:
|
) as session:
|
||||||
@ -357,9 +399,9 @@ async def stream_generate(
|
|||||||
do_read = True
|
do_read = True
|
||||||
try:
|
try:
|
||||||
if conversation is None:
|
if conversation is None:
|
||||||
conversation = await create_conversation(session, headers)
|
conversation = await create_conversation(session, headers, tone)
|
||||||
if return_conversation:
|
if return_conversation:
|
||||||
yield conversation
|
yield conversation
|
||||||
except ResponseStatusError as e:
|
except ResponseStatusError as e:
|
||||||
max_retries -= 1
|
max_retries -= 1
|
||||||
if max_retries < 1:
|
if max_retries < 1:
|
||||||
@ -372,6 +414,8 @@ async def stream_generate(
|
|||||||
|
|
||||||
image_request = await upload_image(session, image, getattr(Tones, tone), headers) if image else None
|
image_request = await upload_image(session, image, getattr(Tones, tone), headers) if image else None
|
||||||
async with session.ws_connect(
|
async with session.ws_connect(
|
||||||
|
'wss://s.copilot.microsoft.com/sydney/ChatHub'
|
||||||
|
if tone == "copilot" else
|
||||||
'wss://sydney.bing.com/sydney/ChatHub',
|
'wss://sydney.bing.com/sydney/ChatHub',
|
||||||
autoping=False,
|
autoping=False,
|
||||||
params={'sec_access_token': conversation.conversationSignature},
|
params={'sec_access_token': conversation.conversationSignature},
|
||||||
@ -380,7 +424,12 @@ async def stream_generate(
|
|||||||
await wss.send_str(format_message({'protocol': 'json', 'version': 1}))
|
await wss.send_str(format_message({'protocol': 'json', 'version': 1}))
|
||||||
await wss.send_str(format_message({"type": 6}))
|
await wss.send_str(format_message({"type": 6}))
|
||||||
await wss.receive(timeout=timeout)
|
await wss.receive(timeout=timeout)
|
||||||
await wss.send_str(create_message(conversation, prompt, tone, context, image_request, web_search, gpt4_turbo))
|
await wss.send_str(create_message(
|
||||||
|
conversation, prompt, tone,
|
||||||
|
context if new_conversation else None,
|
||||||
|
image_request, web_search, gpt4_turbo,
|
||||||
|
new_conversation
|
||||||
|
))
|
||||||
response_txt = ''
|
response_txt = ''
|
||||||
returned_text = ''
|
returned_text = ''
|
||||||
message_id = None
|
message_id = None
|
||||||
|
@ -20,7 +20,7 @@ class Conversation:
|
|||||||
self.clientId = clientId
|
self.clientId = clientId
|
||||||
self.conversationSignature = conversationSignature
|
self.conversationSignature = conversationSignature
|
||||||
|
|
||||||
async def create_conversation(session: ClientSession, headers: dict) -> Conversation:
|
async def create_conversation(session: ClientSession, headers: dict, tone: str) -> Conversation:
|
||||||
"""
|
"""
|
||||||
Create a new conversation asynchronously.
|
Create a new conversation asynchronously.
|
||||||
|
|
||||||
@ -31,7 +31,10 @@ async def create_conversation(session: ClientSession, headers: dict) -> Conversa
|
|||||||
Returns:
|
Returns:
|
||||||
Conversation: An instance representing the created conversation.
|
Conversation: An instance representing the created conversation.
|
||||||
"""
|
"""
|
||||||
url = "https://www.bing.com/turing/conversation/create?bundleVersion=1.1626.1"
|
if tone == "copilot":
|
||||||
|
url = "https://copilot.microsoft.com/turing/conversation/create?bundleVersion=1.1634.3-nodesign2"
|
||||||
|
else:
|
||||||
|
url = "https://www.bing.com/turing/conversation/create?bundleVersion=1.1626.1"
|
||||||
async with session.get(url, headers=headers) as response:
|
async with session.get(url, headers=headers) as response:
|
||||||
await raise_for_status(response, "Failed to create conversation")
|
await raise_for_status(response, "Failed to create conversation")
|
||||||
data = await response.json()
|
data = await response.json()
|
||||||
|
@ -198,7 +198,9 @@ const prepare_messages = (messages, filter_last_message=true) => {
|
|||||||
}
|
}
|
||||||
|
|
||||||
async function add_message_chunk(message) {
|
async function add_message_chunk(message) {
|
||||||
if (message.type == "provider") {
|
if (message.type == "conversation") {
|
||||||
|
console.info("Conversation used:", message.conversation)
|
||||||
|
} else if (message.type == "provider") {
|
||||||
window.provider_result = message.provider;
|
window.provider_result = message.provider;
|
||||||
window.content.querySelector('.provider').innerHTML = `
|
window.content.querySelector('.provider').innerHTML = `
|
||||||
<a href="${message.provider.url}" target="_blank">
|
<a href="${message.provider.url}" target="_blank">
|
||||||
|
@ -13,8 +13,12 @@ from g4f.errors import VersionNotFoundError
|
|||||||
from g4f.Provider import ProviderType, __providers__, __map__
|
from g4f.Provider import ProviderType, __providers__, __map__
|
||||||
from g4f.providers.base_provider import ProviderModelMixin
|
from g4f.providers.base_provider import ProviderModelMixin
|
||||||
from g4f.Provider.bing.create_images import patch_provider
|
from g4f.Provider.bing.create_images import patch_provider
|
||||||
|
from g4f.Provider.Bing import Conversation
|
||||||
|
|
||||||
|
conversations: dict[str, Conversation] = {}
|
||||||
|
|
||||||
|
class Api():
|
||||||
|
|
||||||
class Api():
|
|
||||||
def get_models(self) -> list[str]:
|
def get_models(self) -> list[str]:
|
||||||
"""
|
"""
|
||||||
Return a list of all models.
|
Return a list of all models.
|
||||||
@ -73,7 +77,8 @@ class Api():
|
|||||||
def get_conversation(self, options: dict, **kwargs) -> Iterator:
|
def get_conversation(self, options: dict, **kwargs) -> Iterator:
|
||||||
window = webview.active_window()
|
window = webview.active_window()
|
||||||
for message in self._create_response_stream(
|
for message in self._create_response_stream(
|
||||||
self._prepare_conversation_kwargs(options, kwargs)
|
self._prepare_conversation_kwargs(options, kwargs),
|
||||||
|
options.get("conversation_id")
|
||||||
):
|
):
|
||||||
window.evaluate_js(f"this.add_message_chunk({json.dumps(message)})")
|
window.evaluate_js(f"this.add_message_chunk({json.dumps(message)})")
|
||||||
|
|
||||||
@ -101,6 +106,10 @@ class Api():
|
|||||||
from .internet import get_search_message
|
from .internet import get_search_message
|
||||||
messages[-1]["content"] = get_search_message(messages[-1]["content"])
|
messages[-1]["content"] = get_search_message(messages[-1]["content"])
|
||||||
|
|
||||||
|
conversation_id = json_data.get("conversation_id")
|
||||||
|
if conversation_id and conversation_id in conversations:
|
||||||
|
kwargs["conversation"] = conversations[conversation_id]
|
||||||
|
|
||||||
model = json_data.get('model')
|
model = json_data.get('model')
|
||||||
model = model if model else models.default
|
model = model if model else models.default
|
||||||
patch = patch_provider if json_data.get('patch_provider') else None
|
patch = patch_provider if json_data.get('patch_provider') else None
|
||||||
@ -112,10 +121,11 @@ class Api():
|
|||||||
"stream": True,
|
"stream": True,
|
||||||
"ignore_stream": True,
|
"ignore_stream": True,
|
||||||
"patch_provider": patch,
|
"patch_provider": patch,
|
||||||
|
"return_conversation": True,
|
||||||
**kwargs
|
**kwargs
|
||||||
}
|
}
|
||||||
|
|
||||||
def _create_response_stream(self, kwargs) -> Iterator:
|
def _create_response_stream(self, kwargs, conversation_id: str) -> Iterator:
|
||||||
"""
|
"""
|
||||||
Creates and returns a streaming response for the conversation.
|
Creates and returns a streaming response for the conversation.
|
||||||
|
|
||||||
@ -133,12 +143,15 @@ class Api():
|
|||||||
for chunk in ChatCompletion.create(**kwargs):
|
for chunk in ChatCompletion.create(**kwargs):
|
||||||
if first:
|
if first:
|
||||||
first = False
|
first = False
|
||||||
yield self._format_json('provider', get_last_provider(True))
|
yield self._format_json("provider", get_last_provider(True))
|
||||||
if isinstance(chunk, Exception):
|
if isinstance(chunk, Conversation):
|
||||||
|
conversations[conversation_id] = chunk
|
||||||
|
yield self._format_json("conversation", conversation_id)
|
||||||
|
elif isinstance(chunk, Exception):
|
||||||
logging.exception(chunk)
|
logging.exception(chunk)
|
||||||
yield self._format_json('message', get_error_message(chunk))
|
yield self._format_json("message", get_error_message(chunk))
|
||||||
else:
|
else:
|
||||||
yield self._format_json('content', chunk)
|
yield self._format_json("content", chunk)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logging.exception(e)
|
logging.exception(e)
|
||||||
yield self._format_json('error', get_error_message(e))
|
yield self._format_json('error', get_error_message(e))
|
||||||
|
@ -85,7 +85,7 @@ class Backend_Api(Api):
|
|||||||
kwargs = self._prepare_conversation_kwargs(json_data, kwargs)
|
kwargs = self._prepare_conversation_kwargs(json_data, kwargs)
|
||||||
|
|
||||||
return self.app.response_class(
|
return self.app.response_class(
|
||||||
self._create_response_stream(kwargs),
|
self._create_response_stream(kwargs, json_data.get("conversation_id")),
|
||||||
mimetype='text/event-stream'
|
mimetype='text/event-stream'
|
||||||
)
|
)
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user