mirror of
https://github.com/xtekky/gpt4free.git
synced 2024-11-17 09:25:50 +00:00
Add Myshell Provider
Remove auto proxy prefix
This commit is contained in:
parent
21d27d9e62
commit
f1b6880f7e
@ -46,8 +46,6 @@ class Liaobots(AsyncGeneratorProvider):
|
|||||||
**kwargs
|
**kwargs
|
||||||
) -> AsyncGenerator:
|
) -> AsyncGenerator:
|
||||||
model = model if model in models else "gpt-3.5-turbo"
|
model = model if model in models else "gpt-3.5-turbo"
|
||||||
if proxy and "://" not in proxy:
|
|
||||||
proxy = f"http://{proxy}"
|
|
||||||
headers = {
|
headers = {
|
||||||
"authority": "liaobots.com",
|
"authority": "liaobots.com",
|
||||||
"content-type": "application/json",
|
"content-type": "application/json",
|
||||||
|
172
g4f/Provider/Myshell.py
Normal file
172
g4f/Provider/Myshell.py
Normal file
@ -0,0 +1,172 @@
|
|||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import json, uuid, hashlib, time, random
|
||||||
|
|
||||||
|
from aiohttp import ClientSession
|
||||||
|
from aiohttp.http import WSMsgType
|
||||||
|
import asyncio
|
||||||
|
|
||||||
|
from ..typing import AsyncGenerator
|
||||||
|
from .base_provider import AsyncGeneratorProvider, format_prompt
|
||||||
|
|
||||||
|
|
||||||
|
models = {
|
||||||
|
"samantha": "1e3be7fe89e94a809408b1154a2ee3e1",
|
||||||
|
"gpt-3.5-turbo": "8077335db7cd47e29f7de486612cc7fd",
|
||||||
|
"gpt-4": "01c8de4fbfc548df903712b0922a4e01",
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class Myshell(AsyncGeneratorProvider):
|
||||||
|
url = "https://app.myshell.ai/chat"
|
||||||
|
working = True
|
||||||
|
supports_gpt_35_turbo = True
|
||||||
|
supports_gpt_4 = True
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
async def create_async_generator(
|
||||||
|
cls,
|
||||||
|
model: str,
|
||||||
|
messages: list[dict[str, str]],
|
||||||
|
**kwargs
|
||||||
|
) -> AsyncGenerator:
|
||||||
|
if not model:
|
||||||
|
bot_id = models["samantha"]
|
||||||
|
elif model in models:
|
||||||
|
bot_id = models[model]
|
||||||
|
else:
|
||||||
|
raise ValueError(f"Model are not supported: {model}")
|
||||||
|
|
||||||
|
user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36'
|
||||||
|
visitor_id = generate_visitor_id(user_agent)
|
||||||
|
|
||||||
|
async with ClientSession(
|
||||||
|
headers={'User-Agent': user_agent}
|
||||||
|
) as session:
|
||||||
|
async with session.ws_connect(
|
||||||
|
"wss://api.myshell.ai/ws/?EIO=4&transport=websocket",
|
||||||
|
autoping=False,
|
||||||
|
timeout=90
|
||||||
|
) as wss:
|
||||||
|
# Send and receive hello message
|
||||||
|
await wss.receive_str()
|
||||||
|
message = json.dumps({"token": None, "visitorId": visitor_id})
|
||||||
|
await wss.send_str(f"40/chat,{message}")
|
||||||
|
await wss.receive_str()
|
||||||
|
|
||||||
|
# Fix "need_verify_captcha" issue
|
||||||
|
await asyncio.sleep(5)
|
||||||
|
|
||||||
|
# Create chat message
|
||||||
|
text = format_prompt(messages)
|
||||||
|
chat_data = json.dumps(["text_chat",{
|
||||||
|
"reqId": str(uuid.uuid4()),
|
||||||
|
"botUid": bot_id,
|
||||||
|
"sourceFrom": "myshellWebsite",
|
||||||
|
"text": text,
|
||||||
|
**generate_signature(text)
|
||||||
|
}])
|
||||||
|
|
||||||
|
# Send chat message
|
||||||
|
chat_start = "42/chat,"
|
||||||
|
chat_message = f"{chat_start}{chat_data}"
|
||||||
|
await wss.send_str(chat_message)
|
||||||
|
|
||||||
|
# Receive messages
|
||||||
|
async for message in wss:
|
||||||
|
if message.type != WSMsgType.TEXT:
|
||||||
|
continue
|
||||||
|
# Ping back
|
||||||
|
if message.data == "2":
|
||||||
|
await wss.send_str("3")
|
||||||
|
continue
|
||||||
|
# Is not chat message
|
||||||
|
if not message.data.startswith(chat_start):
|
||||||
|
continue
|
||||||
|
data_type, data = json.loads(message.data[len(chat_start):])
|
||||||
|
if data_type == "text_stream":
|
||||||
|
if data["data"]["text"]:
|
||||||
|
yield data["data"]["text"]
|
||||||
|
elif data["data"]["isFinal"]:
|
||||||
|
break
|
||||||
|
elif data_type in ("message_replied", "need_verify_captcha"):
|
||||||
|
raise RuntimeError(f"Received unexpected message: {data_type}")
|
||||||
|
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@property
|
||||||
|
def params(cls):
|
||||||
|
params = [
|
||||||
|
("model", "str"),
|
||||||
|
("messages", "list[dict[str, str]]"),
|
||||||
|
("stream", "bool"),
|
||||||
|
]
|
||||||
|
param = ", ".join([": ".join(p) for p in params])
|
||||||
|
return f"g4f.provider.{cls.__name__} supports: ({param})"
|
||||||
|
|
||||||
|
|
||||||
|
def generate_timestamp() -> str:
|
||||||
|
return str(
|
||||||
|
int(
|
||||||
|
str(int(time.time() * 1000))[:-1]
|
||||||
|
+ str(
|
||||||
|
sum(
|
||||||
|
2 * int(digit)
|
||||||
|
if idx % 2 == 0
|
||||||
|
else 3 * int(digit)
|
||||||
|
for idx, digit in enumerate(str(int(time.time() * 1000))[:-1])
|
||||||
|
)
|
||||||
|
% 10
|
||||||
|
)
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
def generate_signature(text: str):
|
||||||
|
timestamp = generate_timestamp()
|
||||||
|
version = 'v1.0.0'
|
||||||
|
secret = '8@VXGK3kKHr!u2gA'
|
||||||
|
data = f"{version}#{text}#{timestamp}#{secret}"
|
||||||
|
signature = hashlib.md5(data.encode()).hexdigest()
|
||||||
|
signature = signature[::-1]
|
||||||
|
return {
|
||||||
|
"signature": signature,
|
||||||
|
"timestamp": timestamp,
|
||||||
|
"version": version
|
||||||
|
}
|
||||||
|
|
||||||
|
def xor_hash(B: str):
|
||||||
|
r = []
|
||||||
|
i = 0
|
||||||
|
|
||||||
|
def o(e, t):
|
||||||
|
o_val = 0
|
||||||
|
for i in range(len(t)):
|
||||||
|
o_val |= r[i] << (8 * i)
|
||||||
|
return e ^ o_val
|
||||||
|
|
||||||
|
for e in range(len(B)):
|
||||||
|
t = ord(B[e])
|
||||||
|
r.insert(0, 255 & t)
|
||||||
|
|
||||||
|
if len(r) >= 4:
|
||||||
|
i = o(i, r)
|
||||||
|
r = []
|
||||||
|
|
||||||
|
if len(r) > 0:
|
||||||
|
i = o(i, r)
|
||||||
|
|
||||||
|
return hex(i)[2:]
|
||||||
|
|
||||||
|
def performance() -> str:
|
||||||
|
t = int(time.time() * 1000)
|
||||||
|
e = 0
|
||||||
|
while t == int(time.time() * 1000):
|
||||||
|
e += 1
|
||||||
|
return hex(t)[2:] + hex(e)[2:]
|
||||||
|
|
||||||
|
def generate_visitor_id(user_agent: str) -> str:
|
||||||
|
f = performance()
|
||||||
|
r = hex(int(random.random() * (16**16)))[2:-2]
|
||||||
|
d = xor_hash(user_agent)
|
||||||
|
e = hex(1080 * 1920)[2:]
|
||||||
|
return f"{f}-{r}-{d}-{e}-{f}"
|
@ -23,8 +23,6 @@ class OpenAssistant(AsyncGeneratorProvider):
|
|||||||
cookies: dict = None,
|
cookies: dict = None,
|
||||||
**kwargs: Any
|
**kwargs: Any
|
||||||
) -> AsyncGenerator:
|
) -> AsyncGenerator:
|
||||||
if proxy and "://" not in proxy:
|
|
||||||
proxy = f"http://{proxy}"
|
|
||||||
if not cookies:
|
if not cookies:
|
||||||
cookies = get_cookies("open-assistant.io")
|
cookies = get_cookies("open-assistant.io")
|
||||||
|
|
||||||
|
@ -25,14 +25,7 @@ class OpenaiChat(AsyncProvider):
|
|||||||
cookies: dict = None,
|
cookies: dict = None,
|
||||||
**kwargs: dict
|
**kwargs: dict
|
||||||
) -> AsyncGenerator:
|
) -> AsyncGenerator:
|
||||||
proxies = None
|
proxies = {"https": proxy}
|
||||||
if proxy:
|
|
||||||
if "://" not in proxy:
|
|
||||||
proxy = f"http://{proxy}"
|
|
||||||
proxies = {
|
|
||||||
"http": proxy,
|
|
||||||
"https": proxy
|
|
||||||
}
|
|
||||||
if not access_token:
|
if not access_token:
|
||||||
access_token = await cls.get_access_token(cookies, proxies)
|
access_token = await cls.get_access_token(cookies, proxies)
|
||||||
headers = {
|
headers = {
|
||||||
@ -61,15 +54,16 @@ class OpenaiChat(AsyncProvider):
|
|||||||
for line in response.content.decode().splitlines():
|
for line in response.content.decode().splitlines():
|
||||||
if line.startswith("data: "):
|
if line.startswith("data: "):
|
||||||
line = line[6:]
|
line = line[6:]
|
||||||
if line != "[DONE]":
|
if line == "[DONE]":
|
||||||
line = json.loads(line)
|
break
|
||||||
if "message" in line:
|
line = json.loads(line)
|
||||||
last_message = line["message"]["content"]["parts"][0]
|
if "message" in line:
|
||||||
|
last_message = line["message"]["content"]["parts"][0]
|
||||||
return last_message
|
return last_message
|
||||||
|
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
async def get_access_token(cls, cookies: dict = None, proxies: dict = None):
|
async def get_access_token(cls, cookies: dict = None, proxies: dict = None) -> str:
|
||||||
if not cls._access_token:
|
if not cls._access_token:
|
||||||
cookies = cookies if cookies else get_cookies("chat.openai.com")
|
cookies = cookies if cookies else get_cookies("chat.openai.com")
|
||||||
async with AsyncSession(proxies=proxies, cookies=cookies, impersonate="chrome107") as session:
|
async with AsyncSession(proxies=proxies, cookies=cookies, impersonate="chrome107") as session:
|
||||||
|
@ -21,6 +21,7 @@ from .H2o import H2o
|
|||||||
from .HuggingChat import HuggingChat
|
from .HuggingChat import HuggingChat
|
||||||
from .Liaobots import Liaobots
|
from .Liaobots import Liaobots
|
||||||
from .Lockchat import Lockchat
|
from .Lockchat import Lockchat
|
||||||
|
from .Myshell import Myshell
|
||||||
from .Opchatgpts import Opchatgpts
|
from .Opchatgpts import Opchatgpts
|
||||||
from .OpenaiChat import OpenaiChat
|
from .OpenaiChat import OpenaiChat
|
||||||
from .OpenAssistant import OpenAssistant
|
from .OpenAssistant import OpenAssistant
|
||||||
@ -68,6 +69,7 @@ __all__ = [
|
|||||||
'HuggingChat',
|
'HuggingChat',
|
||||||
'Liaobots',
|
'Liaobots',
|
||||||
'Lockchat',
|
'Lockchat',
|
||||||
|
'Myshell',
|
||||||
'Opchatgpts',
|
'Opchatgpts',
|
||||||
'Raycast',
|
'Raycast',
|
||||||
'OpenaiChat',
|
'OpenaiChat',
|
||||||
|
@ -18,6 +18,7 @@ from .Provider import (
|
|||||||
Yqcloud,
|
Yqcloud,
|
||||||
AItianhu,
|
AItianhu,
|
||||||
Aichat,
|
Aichat,
|
||||||
|
Myshell,
|
||||||
)
|
)
|
||||||
|
|
||||||
@dataclass(unsafe_hash=True)
|
@dataclass(unsafe_hash=True)
|
||||||
@ -37,7 +38,7 @@ default = Model(
|
|||||||
Wewordle, # Responds with markdown
|
Wewordle, # Responds with markdown
|
||||||
Yqcloud, # Answers short questions in chinese
|
Yqcloud, # Answers short questions in chinese
|
||||||
ChatBase, # Don't want to answer creatively
|
ChatBase, # Don't want to answer creatively
|
||||||
DeepAi, ChatgptLogin, ChatgptAi, Aivvm, GptGo, AItianhu, Aichat,
|
DeepAi, ChatgptLogin, ChatgptAi, Aivvm, GptGo, AItianhu, Aichat, Myshell,
|
||||||
])
|
])
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -46,7 +47,7 @@ gpt_35_turbo = Model(
|
|||||||
name = 'gpt-3.5-turbo',
|
name = 'gpt-3.5-turbo',
|
||||||
base_provider = 'openai',
|
base_provider = 'openai',
|
||||||
best_provider = RetryProvider([
|
best_provider = RetryProvider([
|
||||||
DeepAi, ChatgptLogin, ChatgptAi, Aivvm, GptGo, AItianhu, Aichat,
|
DeepAi, ChatgptLogin, ChatgptAi, Aivvm, GptGo, AItianhu, Aichat, Myshell,
|
||||||
])
|
])
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -54,7 +55,7 @@ gpt_4 = Model(
|
|||||||
name = 'gpt-4',
|
name = 'gpt-4',
|
||||||
base_provider = 'openai',
|
base_provider = 'openai',
|
||||||
best_provider = RetryProvider([
|
best_provider = RetryProvider([
|
||||||
Aivvm
|
Aivvm, Myshell
|
||||||
])
|
])
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -153,8 +154,10 @@ gpt_35_turbo_16k_0613 = Model(
|
|||||||
gpt_35_turbo_0613 = Model(
|
gpt_35_turbo_0613 = Model(
|
||||||
name = 'gpt-3.5-turbo-0613',
|
name = 'gpt-3.5-turbo-0613',
|
||||||
base_provider = 'openai',
|
base_provider = 'openai',
|
||||||
best_provider = [
|
best_provider = RetryProvider([
|
||||||
Aivvm, ChatgptLogin])
|
Aivvm, ChatgptLogin
|
||||||
|
])
|
||||||
|
)
|
||||||
|
|
||||||
gpt_4_0613 = Model(
|
gpt_4_0613 = Model(
|
||||||
name = 'gpt-4-0613',
|
name = 'gpt-4-0613',
|
||||||
|
Loading…
Reference in New Issue
Block a user