mirror of
https://github.com/xtekky/gpt4free.git
synced 2024-11-10 19:11:01 +00:00
Add conversation support for Bing
This commit is contained in:
parent
f861f322aa
commit
4778356064
@ -51,6 +51,7 @@ class Bing(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
tone: str = None,
|
||||
image: ImageType = None,
|
||||
web_search: bool = False,
|
||||
context: str = None,
|
||||
**kwargs
|
||||
) -> AsyncResult:
|
||||
"""
|
||||
@ -67,7 +68,8 @@ class Bing(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
:return: An asynchronous result object.
|
||||
"""
|
||||
prompt = messages[-1]["content"]
|
||||
context = create_context(messages[:-1]) if len(messages) > 1 else None
|
||||
if context is None:
|
||||
context = create_context(messages[:-1]) if len(messages) > 1 else None
|
||||
if tone is None:
|
||||
tone = tone if model.startswith("gpt-4") else model
|
||||
tone = cls.get_model("" if tone is None else tone.lower())
|
||||
@ -126,7 +128,7 @@ class Defaults:
|
||||
"ActionRequest","Chat",
|
||||
"ConfirmationCard", "Context",
|
||||
"InternalSearchQuery", #"InternalSearchResult",
|
||||
"Disengaged", #"InternalLoaderMessage",
|
||||
#"Disengaged", "InternalLoaderMessage",
|
||||
"Progress", "RenderCardRequest",
|
||||
"RenderContentRequest", "AdsQuery",
|
||||
"SemanticSerp", "GenerateContentQuery",
|
||||
@ -160,30 +162,62 @@ class Defaults:
|
||||
}
|
||||
|
||||
optionsSets = {
|
||||
"balanced": [
|
||||
"nlu_direct_response_filter", "deepleo",
|
||||
"disable_emoji_spoken_text", "responsible_ai_policy_235",
|
||||
"enablemm", "dv3sugg", "autosave",
|
||||
"iyxapbing", "iycapbing",
|
||||
"galileo", "saharagenconv5", "gldcl1p",
|
||||
"gpt4tmncnp"
|
||||
],
|
||||
"creative": [
|
||||
"nlu_direct_response_filter", "deepleo",
|
||||
"disable_emoji_spoken_text", "responsible_ai_policy_235",
|
||||
"enablemm", "dv3sugg",
|
||||
"iyxapbing", "iycapbing",
|
||||
"h3imaginative", "techinstgnd", "hourthrot", "clgalileo", "gencontentv3",
|
||||
"gpt4tmncnp"
|
||||
],
|
||||
"precise": [
|
||||
"nlu_direct_response_filter", "deepleo",
|
||||
"disable_emoji_spoken_text", "responsible_ai_policy_235",
|
||||
"enablemm", "dv3sugg",
|
||||
"iyxapbing", "iycapbing",
|
||||
"h3precise", "techinstgnd", "hourthrot", "techinstgnd", "hourthrot",
|
||||
"clgalileo", "gencontentv3"
|
||||
],
|
||||
"balanced": {
|
||||
"default": [
|
||||
"nlu_direct_response_filter", "deepleo",
|
||||
"disable_emoji_spoken_text", "responsible_ai_policy_235",
|
||||
"enablemm", "dv3sugg", "autosave",
|
||||
"iyxapbing", "iycapbing",
|
||||
"galileo", "saharagenconv5", "gldcl1p",
|
||||
"gpt4tmncnp"
|
||||
],
|
||||
"nosearch": [
|
||||
"nlu_direct_response_filter", "deepleo",
|
||||
"disable_emoji_spoken_text", "responsible_ai_policy_235",
|
||||
"enablemm", "dv3sugg", "autosave",
|
||||
"iyxapbing", "iycapbing",
|
||||
"galileo", "sunoupsell", "base64filter", "uprv4p1upd",
|
||||
"hourthrot", "noctprf", "gndlogcf", "nosearchall"
|
||||
]
|
||||
},
|
||||
"creative": {
|
||||
"default": [
|
||||
"nlu_direct_response_filter", "deepleo",
|
||||
"disable_emoji_spoken_text", "responsible_ai_policy_235",
|
||||
"enablemm", "dv3sugg",
|
||||
"iyxapbing", "iycapbing",
|
||||
"h3imaginative", "techinstgnd", "hourthrot", "clgalileo", "gencontentv3",
|
||||
"gpt4tmncnp"
|
||||
],
|
||||
"nosearch": [
|
||||
"nlu_direct_response_filter", "deepleo",
|
||||
"disable_emoji_spoken_text", "responsible_ai_policy_235",
|
||||
"enablemm", "dv3sugg", "autosave",
|
||||
"iyxapbing", "iycapbing",
|
||||
"h3imaginative", "sunoupsell", "base64filter", "uprv4p1upd",
|
||||
"hourthrot", "noctprf", "gndlogcf", "nosearchall",
|
||||
"clgalileo", "nocache", "up4rp14bstcst"
|
||||
]
|
||||
},
|
||||
"precise": {
|
||||
"default": [
|
||||
"nlu_direct_response_filter", "deepleo",
|
||||
"disable_emoji_spoken_text", "responsible_ai_policy_235",
|
||||
"enablemm", "dv3sugg",
|
||||
"iyxapbing", "iycapbing",
|
||||
"h3precise", "techinstgnd", "hourthrot", "techinstgnd", "hourthrot",
|
||||
"clgalileo", "gencontentv3"
|
||||
],
|
||||
"nosearch": [
|
||||
"nlu_direct_response_filter", "deepleo",
|
||||
"disable_emoji_spoken_text", "responsible_ai_policy_235",
|
||||
"enablemm", "dv3sugg", "autosave",
|
||||
"iyxapbing", "iycapbing",
|
||||
"h3precise", "sunoupsell", "base64filter", "uprv4p1upd",
|
||||
"hourthrot", "noctprf", "gndlogcf", "nosearchall",
|
||||
"clgalileo", "nocache", "up4rp14bstcst"
|
||||
]
|
||||
},
|
||||
"copilot": [
|
||||
"nlu_direct_response_filter", "deepleo",
|
||||
"disable_emoji_spoken_text", "responsible_ai_policy_235",
|
||||
@ -244,7 +278,8 @@ def create_message(
|
||||
context: str = None,
|
||||
image_request: ImageRequest = None,
|
||||
web_search: bool = False,
|
||||
gpt4_turbo: bool = False
|
||||
gpt4_turbo: bool = False,
|
||||
new_conversation: bool = True
|
||||
) -> str:
|
||||
"""
|
||||
Creates a message for the Bing API with specified parameters.
|
||||
@ -259,7 +294,12 @@ def create_message(
|
||||
:return: A formatted string message for the Bing API.
|
||||
"""
|
||||
|
||||
options_sets = []
|
||||
options_sets = Defaults.optionsSets[tone]
|
||||
if not web_search and "nosearch" in options_sets:
|
||||
options_sets = options_sets["nosearch"]
|
||||
elif "default" in options_sets:
|
||||
options_sets = options_sets["default"]
|
||||
options_sets = options_sets.copy()
|
||||
if gpt4_turbo:
|
||||
options_sets.append("dlgpt4t")
|
||||
|
||||
@ -267,7 +307,7 @@ def create_message(
|
||||
struct = {
|
||||
"arguments":[{
|
||||
"source": "cib",
|
||||
"optionsSets": [*Defaults.optionsSets[tone], *options_sets],
|
||||
"optionsSets": options_sets,
|
||||
"allowedMessageTypes": Defaults.allowedMessageTypes,
|
||||
"sliceIds": Defaults.sliceIds[tone],
|
||||
"verbosity": "verbose",
|
||||
@ -276,7 +316,7 @@ def create_message(
|
||||
"traceId": get_random_hex(40),
|
||||
"conversationHistoryOptionsSets": ["autosave","savemem","uprofupd","uprofgen"],
|
||||
"gptId": "copilot",
|
||||
"isStartOfSession": True,
|
||||
"isStartOfSession": new_conversation,
|
||||
"requestId": request_id,
|
||||
"message":{
|
||||
**Defaults.location,
|
||||
@ -330,7 +370,7 @@ async def stream_generate(
|
||||
conversation: Conversation = None,
|
||||
return_conversation: bool = False,
|
||||
raise_apology: bool = False,
|
||||
max_retries: int = 5,
|
||||
max_retries: int = None,
|
||||
sleep_retry: int = 15,
|
||||
**kwargs
|
||||
):
|
||||
@ -348,6 +388,8 @@ async def stream_generate(
|
||||
:return: An asynchronous generator yielding responses.
|
||||
"""
|
||||
headers = create_headers(cookies)
|
||||
new_conversation = conversation is None
|
||||
max_retries = (5 if new_conversation else 0) if max_retries is None else max_retries
|
||||
async with ClientSession(
|
||||
timeout=ClientTimeout(total=timeout), connector=connector
|
||||
) as session:
|
||||
@ -357,9 +399,9 @@ async def stream_generate(
|
||||
do_read = True
|
||||
try:
|
||||
if conversation is None:
|
||||
conversation = await create_conversation(session, headers)
|
||||
if return_conversation:
|
||||
yield conversation
|
||||
conversation = await create_conversation(session, headers, tone)
|
||||
if return_conversation:
|
||||
yield conversation
|
||||
except ResponseStatusError as e:
|
||||
max_retries -= 1
|
||||
if max_retries < 1:
|
||||
@ -372,6 +414,8 @@ async def stream_generate(
|
||||
|
||||
image_request = await upload_image(session, image, getattr(Tones, tone), headers) if image else None
|
||||
async with session.ws_connect(
|
||||
'wss://s.copilot.microsoft.com/sydney/ChatHub'
|
||||
if tone == "copilot" else
|
||||
'wss://sydney.bing.com/sydney/ChatHub',
|
||||
autoping=False,
|
||||
params={'sec_access_token': conversation.conversationSignature},
|
||||
@ -380,7 +424,12 @@ async def stream_generate(
|
||||
await wss.send_str(format_message({'protocol': 'json', 'version': 1}))
|
||||
await wss.send_str(format_message({"type": 6}))
|
||||
await wss.receive(timeout=timeout)
|
||||
await wss.send_str(create_message(conversation, prompt, tone, context, image_request, web_search, gpt4_turbo))
|
||||
await wss.send_str(create_message(
|
||||
conversation, prompt, tone,
|
||||
context if new_conversation else None,
|
||||
image_request, web_search, gpt4_turbo,
|
||||
new_conversation
|
||||
))
|
||||
response_txt = ''
|
||||
returned_text = ''
|
||||
message_id = None
|
||||
|
@ -20,7 +20,7 @@ class Conversation:
|
||||
self.clientId = clientId
|
||||
self.conversationSignature = conversationSignature
|
||||
|
||||
async def create_conversation(session: ClientSession, headers: dict) -> Conversation:
|
||||
async def create_conversation(session: ClientSession, headers: dict, tone: str) -> Conversation:
|
||||
"""
|
||||
Create a new conversation asynchronously.
|
||||
|
||||
@ -31,7 +31,10 @@ async def create_conversation(session: ClientSession, headers: dict) -> Conversa
|
||||
Returns:
|
||||
Conversation: An instance representing the created conversation.
|
||||
"""
|
||||
url = "https://www.bing.com/turing/conversation/create?bundleVersion=1.1626.1"
|
||||
if tone == "copilot":
|
||||
url = "https://copilot.microsoft.com/turing/conversation/create?bundleVersion=1.1634.3-nodesign2"
|
||||
else:
|
||||
url = "https://www.bing.com/turing/conversation/create?bundleVersion=1.1626.1"
|
||||
async with session.get(url, headers=headers) as response:
|
||||
await raise_for_status(response, "Failed to create conversation")
|
||||
data = await response.json()
|
||||
|
@ -198,7 +198,9 @@ const prepare_messages = (messages, filter_last_message=true) => {
|
||||
}
|
||||
|
||||
async function add_message_chunk(message) {
|
||||
if (message.type == "provider") {
|
||||
if (message.type == "conversation") {
|
||||
console.info("Conversation used:", message.conversation)
|
||||
} else if (message.type == "provider") {
|
||||
window.provider_result = message.provider;
|
||||
window.content.querySelector('.provider').innerHTML = `
|
||||
<a href="${message.provider.url}" target="_blank">
|
||||
|
@ -13,8 +13,12 @@ from g4f.errors import VersionNotFoundError
|
||||
from g4f.Provider import ProviderType, __providers__, __map__
|
||||
from g4f.providers.base_provider import ProviderModelMixin
|
||||
from g4f.Provider.bing.create_images import patch_provider
|
||||
from g4f.Provider.Bing import Conversation
|
||||
|
||||
conversations: dict[str, Conversation] = {}
|
||||
|
||||
class Api():
|
||||
|
||||
def get_models(self) -> list[str]:
|
||||
"""
|
||||
Return a list of all models.
|
||||
@ -73,7 +77,8 @@ class Api():
|
||||
def get_conversation(self, options: dict, **kwargs) -> Iterator:
|
||||
window = webview.active_window()
|
||||
for message in self._create_response_stream(
|
||||
self._prepare_conversation_kwargs(options, kwargs)
|
||||
self._prepare_conversation_kwargs(options, kwargs),
|
||||
options.get("conversation_id")
|
||||
):
|
||||
window.evaluate_js(f"this.add_message_chunk({json.dumps(message)})")
|
||||
|
||||
@ -101,6 +106,10 @@ class Api():
|
||||
from .internet import get_search_message
|
||||
messages[-1]["content"] = get_search_message(messages[-1]["content"])
|
||||
|
||||
conversation_id = json_data.get("conversation_id")
|
||||
if conversation_id and conversation_id in conversations:
|
||||
kwargs["conversation"] = conversations[conversation_id]
|
||||
|
||||
model = json_data.get('model')
|
||||
model = model if model else models.default
|
||||
patch = patch_provider if json_data.get('patch_provider') else None
|
||||
@ -112,10 +121,11 @@ class Api():
|
||||
"stream": True,
|
||||
"ignore_stream": True,
|
||||
"patch_provider": patch,
|
||||
"return_conversation": True,
|
||||
**kwargs
|
||||
}
|
||||
|
||||
def _create_response_stream(self, kwargs) -> Iterator:
|
||||
def _create_response_stream(self, kwargs, conversation_id: str) -> Iterator:
|
||||
"""
|
||||
Creates and returns a streaming response for the conversation.
|
||||
|
||||
@ -133,12 +143,15 @@ class Api():
|
||||
for chunk in ChatCompletion.create(**kwargs):
|
||||
if first:
|
||||
first = False
|
||||
yield self._format_json('provider', get_last_provider(True))
|
||||
if isinstance(chunk, Exception):
|
||||
yield self._format_json("provider", get_last_provider(True))
|
||||
if isinstance(chunk, Conversation):
|
||||
conversations[conversation_id] = chunk
|
||||
yield self._format_json("conversation", conversation_id)
|
||||
elif isinstance(chunk, Exception):
|
||||
logging.exception(chunk)
|
||||
yield self._format_json('message', get_error_message(chunk))
|
||||
yield self._format_json("message", get_error_message(chunk))
|
||||
else:
|
||||
yield self._format_json('content', chunk)
|
||||
yield self._format_json("content", chunk)
|
||||
except Exception as e:
|
||||
logging.exception(e)
|
||||
yield self._format_json('error', get_error_message(e))
|
||||
|
@ -85,7 +85,7 @@ class Backend_Api(Api):
|
||||
kwargs = self._prepare_conversation_kwargs(json_data, kwargs)
|
||||
|
||||
return self.app.response_class(
|
||||
self._create_response_stream(kwargs),
|
||||
self._create_response_stream(kwargs, json_data.get("conversation_id")),
|
||||
mimetype='text/event-stream'
|
||||
)
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user