mirror of https://github.com/xtekky/gpt4free
commit
d1eaa46360
@ -0,0 +1,94 @@
|
|||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import uuid, time, random, string, json
|
||||||
|
from aiohttp import ClientSession
|
||||||
|
|
||||||
|
from ..typing import AsyncResult, Messages
|
||||||
|
from .base_provider import AsyncGeneratorProvider
|
||||||
|
from .helper import format_prompt
|
||||||
|
|
||||||
|
|
||||||
|
class FakeGpt(AsyncGeneratorProvider):
|
||||||
|
url = "https://chat-shared2.zhile.io"
|
||||||
|
supports_gpt_35_turbo = True
|
||||||
|
working = True
|
||||||
|
_access_token = None
|
||||||
|
_cookie_jar = None
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
async def create_async_generator(
|
||||||
|
cls,
|
||||||
|
model: str,
|
||||||
|
messages: Messages,
|
||||||
|
proxy: str = None,
|
||||||
|
**kwargs
|
||||||
|
) -> AsyncResult:
|
||||||
|
headers = {
|
||||||
|
"Accept-Language": "en-US",
|
||||||
|
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36",
|
||||||
|
"Referer": "https://chat-shared2.zhile.io/?v=2",
|
||||||
|
"sec-ch-ua": '"Google Chrome";v="117", "Not;A=Brand";v="8", "Chromium";v="117"',
|
||||||
|
"sec-ch-ua-platform": '"Linux"',
|
||||||
|
"sec-ch-ua-mobile": "?0",
|
||||||
|
}
|
||||||
|
async with ClientSession(headers=headers, cookie_jar=cls._cookie_jar) as session:
|
||||||
|
if not cls._access_token:
|
||||||
|
async with session.get(f"{cls.url}/api/loads", params={"t": int(time.time())}, proxy=proxy) as response:
|
||||||
|
response.raise_for_status()
|
||||||
|
list = (await response.json())["loads"]
|
||||||
|
token_ids = [t["token_id"] for t in list if t["count"] == 0]
|
||||||
|
data = {
|
||||||
|
"token_key": random.choice(token_ids),
|
||||||
|
"session_password": random_string()
|
||||||
|
}
|
||||||
|
async with session.post(f"{cls.url}/auth/login", data=data, proxy=proxy) as response:
|
||||||
|
response.raise_for_status()
|
||||||
|
async with session.get(f"{cls.url}/api/auth/session", proxy=proxy) as response:
|
||||||
|
response.raise_for_status()
|
||||||
|
cls._access_token = (await response.json())["accessToken"]
|
||||||
|
cls._cookie_jar = session.cookie_jar
|
||||||
|
headers = {
|
||||||
|
"Content-Type": "application/json",
|
||||||
|
"Accept": "text/event-stream",
|
||||||
|
"X-Authorization": f"Bearer {cls._access_token}",
|
||||||
|
}
|
||||||
|
prompt = format_prompt(messages)
|
||||||
|
data = {
|
||||||
|
"action": "next",
|
||||||
|
"messages": [
|
||||||
|
{
|
||||||
|
"id": str(uuid.uuid4()),
|
||||||
|
"author": {"role": "user"},
|
||||||
|
"content": {"content_type": "text", "parts": [prompt]},
|
||||||
|
"metadata": {},
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"parent_message_id": str(uuid.uuid4()),
|
||||||
|
"model": "text-davinci-002-render-sha",
|
||||||
|
"plugin_ids": [],
|
||||||
|
"timezone_offset_min": -120,
|
||||||
|
"suggestions": [],
|
||||||
|
"history_and_training_disabled": True,
|
||||||
|
"arkose_token": "",
|
||||||
|
"force_paragen": False,
|
||||||
|
}
|
||||||
|
last_message = ""
|
||||||
|
async with session.post(f"{cls.url}/api/conversation", json=data, headers=headers, proxy=proxy) as response:
|
||||||
|
async for line in response.content:
|
||||||
|
if line.startswith(b"data: "):
|
||||||
|
line = line[6:]
|
||||||
|
if line == b"[DONE]":
|
||||||
|
break
|
||||||
|
try:
|
||||||
|
line = json.loads(line)
|
||||||
|
if line["message"]["metadata"]["message_type"] == "next":
|
||||||
|
new_message = line["message"]["content"]["parts"][0]
|
||||||
|
yield new_message[len(last_message):]
|
||||||
|
last_message = new_message
|
||||||
|
except:
|
||||||
|
continue
|
||||||
|
if not last_message:
|
||||||
|
raise RuntimeError("No valid response")
|
||||||
|
|
||||||
|
def random_string(length: int = 10):
|
||||||
|
return ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(length))
|
@ -0,0 +1,79 @@
|
|||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import secrets
|
||||||
|
from aiohttp import ClientSession
|
||||||
|
|
||||||
|
from ..typing import AsyncResult, Messages
|
||||||
|
from .base_provider import AsyncGeneratorProvider
|
||||||
|
|
||||||
|
class SearchTypes():
|
||||||
|
quick = "quick"
|
||||||
|
code = "code"
|
||||||
|
websearch = "websearch"
|
||||||
|
|
||||||
|
class Hashnode(AsyncGeneratorProvider):
|
||||||
|
url = "https://hashnode.com"
|
||||||
|
supports_gpt_35_turbo = True
|
||||||
|
working = True
|
||||||
|
_sources = []
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
async def create_async_generator(
|
||||||
|
cls,
|
||||||
|
model: str,
|
||||||
|
messages: Messages,
|
||||||
|
search_type: str = SearchTypes.websearch,
|
||||||
|
proxy: str = None,
|
||||||
|
**kwargs
|
||||||
|
) -> AsyncResult:
|
||||||
|
headers = {
|
||||||
|
"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/118.0",
|
||||||
|
"Accept": "*/*",
|
||||||
|
"Accept-Language": "de,en-US;q=0.7,en;q=0.3",
|
||||||
|
"Accept-Encoding": "gzip, deflate, br",
|
||||||
|
"Referer": f"{cls.url}/rix",
|
||||||
|
"Content-Type": "application/json",
|
||||||
|
"Origin": cls.url,
|
||||||
|
"Connection": "keep-alive",
|
||||||
|
"Sec-Fetch-Dest": "empty",
|
||||||
|
"Sec-Fetch-Mode": "cors",
|
||||||
|
"Sec-Fetch-Site": "same-origin",
|
||||||
|
"Pragma": "no-cache",
|
||||||
|
"Cache-Control": "no-cache",
|
||||||
|
"TE": "trailers",
|
||||||
|
}
|
||||||
|
async with ClientSession(headers=headers) as session:
|
||||||
|
prompt = messages[-1]["content"]
|
||||||
|
cls._sources = []
|
||||||
|
if search_type == "websearch":
|
||||||
|
async with session.post(
|
||||||
|
f"{cls.url}/api/ai/rix/search",
|
||||||
|
json={"prompt": prompt},
|
||||||
|
proxy=proxy,
|
||||||
|
) as response:
|
||||||
|
response.raise_for_status()
|
||||||
|
cls._sources = (await response.json())["result"]
|
||||||
|
data = {
|
||||||
|
"chatId": secrets.token_hex(16).zfill(32),
|
||||||
|
"history": messages,
|
||||||
|
"prompt": prompt,
|
||||||
|
"searchType": search_type,
|
||||||
|
"urlToScan": None,
|
||||||
|
"searchResults": cls._sources,
|
||||||
|
}
|
||||||
|
async with session.post(
|
||||||
|
f"{cls.url}/api/ai/rix/completion",
|
||||||
|
json=data,
|
||||||
|
proxy=proxy,
|
||||||
|
) as response:
|
||||||
|
response.raise_for_status()
|
||||||
|
async for chunk in response.content.iter_any():
|
||||||
|
if chunk:
|
||||||
|
yield chunk.decode()
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_sources(cls) -> list:
|
||||||
|
return [{
|
||||||
|
"title": source["name"],
|
||||||
|
"url": source["url"]
|
||||||
|
} for source in cls._sources]
|
@ -0,0 +1,89 @@
|
|||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import time, random, json
|
||||||
|
|
||||||
|
from ..requests import StreamSession
|
||||||
|
from ..typing import AsyncResult, Messages
|
||||||
|
from .base_provider import AsyncGeneratorProvider
|
||||||
|
from .helper import format_prompt
|
||||||
|
|
||||||
|
class MyShell(AsyncGeneratorProvider):
|
||||||
|
url = "https://app.myshell.ai/chat"
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
async def create_async_generator(
|
||||||
|
cls,
|
||||||
|
model: str,
|
||||||
|
messages: Messages,
|
||||||
|
proxy: str = None,
|
||||||
|
timeout: int = 120,
|
||||||
|
**kwargs
|
||||||
|
) -> AsyncResult:
|
||||||
|
user_agent = "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36"
|
||||||
|
headers = {
|
||||||
|
"User-Agent": user_agent,
|
||||||
|
"Myshell-Service-Name": "organics-api",
|
||||||
|
"Visitor-Id": generate_visitor_id(user_agent)
|
||||||
|
}
|
||||||
|
async with StreamSession(
|
||||||
|
impersonate="chrome107",
|
||||||
|
proxies={"https": proxy},
|
||||||
|
timeout=timeout,
|
||||||
|
headers=headers
|
||||||
|
) as session:
|
||||||
|
prompt = format_prompt(messages)
|
||||||
|
data = {
|
||||||
|
"botId": "1",
|
||||||
|
"conversation_scenario": 3,
|
||||||
|
"message": prompt,
|
||||||
|
"messageType": 1
|
||||||
|
}
|
||||||
|
async with session.post("https://api.myshell.ai/v1/bot/chat/send_message", json=data) as response:
|
||||||
|
response.raise_for_status()
|
||||||
|
event = None
|
||||||
|
async for line in response.iter_lines():
|
||||||
|
if line.startswith(b"event: "):
|
||||||
|
event = line[7:]
|
||||||
|
elif event == b"MESSAGE_REPLY_SSE_ELEMENT_EVENT_NAME_TEXT":
|
||||||
|
if line.startswith(b"data: "):
|
||||||
|
yield json.loads(line[6:])["content"]
|
||||||
|
if event == b"MESSAGE_REPLY_SSE_ELEMENT_EVENT_NAME_TEXT_STREAM_PUSH_FINISHED":
|
||||||
|
break
|
||||||
|
|
||||||
|
|
||||||
|
def xor_hash(B: str):
|
||||||
|
r = []
|
||||||
|
i = 0
|
||||||
|
|
||||||
|
def o(e, t):
|
||||||
|
o_val = 0
|
||||||
|
for i in range(len(t)):
|
||||||
|
o_val |= r[i] << (8 * i)
|
||||||
|
return e ^ o_val
|
||||||
|
|
||||||
|
for e in range(len(B)):
|
||||||
|
t = ord(B[e])
|
||||||
|
r.insert(0, 255 & t)
|
||||||
|
|
||||||
|
if len(r) >= 4:
|
||||||
|
i = o(i, r)
|
||||||
|
r = []
|
||||||
|
|
||||||
|
if len(r) > 0:
|
||||||
|
i = o(i, r)
|
||||||
|
|
||||||
|
return hex(i)[2:]
|
||||||
|
|
||||||
|
def performance() -> str:
|
||||||
|
t = int(time.time() * 1000)
|
||||||
|
e = 0
|
||||||
|
while t == int(time.time() * 1000):
|
||||||
|
e += 1
|
||||||
|
return hex(t)[2:] + hex(e)[2:]
|
||||||
|
|
||||||
|
def generate_visitor_id(user_agent: str) -> str:
|
||||||
|
f = performance()
|
||||||
|
r = hex(int(random.random() * (16**16)))[2:-2]
|
||||||
|
d = xor_hash(user_agent)
|
||||||
|
e = hex(1080 * 1920)[2:]
|
||||||
|
return f"{f}-{r}-{d}-{e}-{f}"
|
Loading…
Reference in New Issue