mirror of https://github.com/xtekky/gpt4free
Merge pull request #2207 from kqlio67/main
Enhance and expand provider support, update models, and improve overall functionalitypull/2219/head 0.3.2.6
commit
07fa87b4d1
@ -0,0 +1,106 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import base64
|
||||
from aiohttp import ClientSession
|
||||
from ..typing import AsyncResult, Messages
|
||||
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||
from ..image import ImageResponse
|
||||
from .helper import format_prompt
|
||||
|
||||
class AiChats(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
url = "https://ai-chats.org"
|
||||
api_endpoint = "https://ai-chats.org/chat/send2/"
|
||||
working = True
|
||||
supports_gpt_4 = True
|
||||
supports_message_history = True
|
||||
default_model = 'gpt-4'
|
||||
models = ['gpt-4', 'dalle']
|
||||
|
||||
@classmethod
|
||||
async def create_async_generator(
|
||||
cls,
|
||||
model: str,
|
||||
messages: Messages,
|
||||
proxy: str = None,
|
||||
**kwargs
|
||||
) -> AsyncResult:
|
||||
headers = {
|
||||
"accept": "application/json, text/event-stream",
|
||||
"accept-language": "en-US,en;q=0.9",
|
||||
"cache-control": "no-cache",
|
||||
"content-type": "application/json",
|
||||
"origin": cls.url,
|
||||
"pragma": "no-cache",
|
||||
"referer": f"{cls.url}/{'image' if model == 'dalle' else 'chat'}/",
|
||||
"sec-ch-ua": '"Chromium";v="127", "Not)A;Brand";v="99"',
|
||||
"sec-ch-ua-mobile": "?0",
|
||||
"sec-ch-ua-platform": '"Linux"',
|
||||
"sec-fetch-dest": "empty",
|
||||
"sec-fetch-mode": "cors",
|
||||
"sec-fetch-site": "same-origin",
|
||||
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36",
|
||||
'cookie': 'muVyak=LSFNvUWqdgKkGprbDBsfieIoEMzjOQ; LSFNvUWqdgKkGprbDBsfieIoEMzjOQ=ac28831b98143847e83dbe004404e619-1725548624-1725548621; muVyak_hits=9; ai-chat-front=9d714d5dc46a6b47607c9a55e7d12a95; _csrf-front=76c23dc0a013e5d1e21baad2e6ba2b5fdab8d3d8a1d1281aa292353f8147b057a%3A2%3A%7Bi%3A0%3Bs%3A11%3A%22_csrf-front%22%3Bi%3A1%3Bs%3A32%3A%22K9lz0ezsNPMNnfpd_8gT5yEeh-55-cch%22%3B%7D',
|
||||
}
|
||||
|
||||
async with ClientSession(headers=headers) as session:
|
||||
if model == 'dalle':
|
||||
prompt = messages[-1]['content'] if messages else ""
|
||||
else:
|
||||
prompt = format_prompt(messages)
|
||||
|
||||
data = {
|
||||
"type": "image" if model == 'dalle' else "chat",
|
||||
"messagesHistory": [
|
||||
{
|
||||
"from": "you",
|
||||
"content": prompt
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
try:
|
||||
async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
|
||||
response.raise_for_status()
|
||||
|
||||
if model == 'dalle':
|
||||
response_json = await response.json()
|
||||
|
||||
if 'data' in response_json and response_json['data']:
|
||||
image_url = response_json['data'][0].get('url')
|
||||
if image_url:
|
||||
async with session.get(image_url) as img_response:
|
||||
img_response.raise_for_status()
|
||||
image_data = await img_response.read()
|
||||
|
||||
base64_image = base64.b64encode(image_data).decode('utf-8')
|
||||
base64_url = f"data:image/png;base64,{base64_image}"
|
||||
yield ImageResponse(base64_url, prompt)
|
||||
else:
|
||||
yield f"Error: No image URL found in the response. Full response: {response_json}"
|
||||
else:
|
||||
yield f"Error: Unexpected response format. Full response: {response_json}"
|
||||
else:
|
||||
full_response = await response.text()
|
||||
message = ""
|
||||
for line in full_response.split('\n'):
|
||||
if line.startswith('data: ') and line != 'data: ':
|
||||
message += line[6:]
|
||||
|
||||
message = message.strip()
|
||||
yield message
|
||||
except Exception as e:
|
||||
yield f"Error occurred: {str(e)}"
|
||||
|
||||
@classmethod
|
||||
async def create_async(
|
||||
cls,
|
||||
model: str,
|
||||
messages: Messages,
|
||||
proxy: str = None,
|
||||
**kwargs
|
||||
) -> str:
|
||||
async for response in cls.create_async_generator(model, messages, proxy, **kwargs):
|
||||
if isinstance(response, ImageResponse):
|
||||
return response.images[0]
|
||||
return response
|
@ -0,0 +1,65 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import random
|
||||
from ..requests import StreamSession
|
||||
|
||||
from ..typing import AsyncResult, Messages
|
||||
from .base_provider import AsyncGeneratorProvider, format_prompt
|
||||
|
||||
|
||||
class Binjie(AsyncGeneratorProvider):
|
||||
url = "https://chat18.aichatos8.com"
|
||||
working = True
|
||||
supports_gpt_4 = True
|
||||
supports_stream = True
|
||||
supports_system_message = True
|
||||
supports_message_history = True
|
||||
|
||||
@staticmethod
|
||||
async def create_async_generator(
|
||||
model: str,
|
||||
messages: Messages,
|
||||
proxy: str = None,
|
||||
timeout: int = 120,
|
||||
**kwargs,
|
||||
) -> AsyncResult:
|
||||
async with StreamSession(
|
||||
headers=_create_header(), proxies={"https": proxy}, timeout=timeout
|
||||
) as session:
|
||||
payload = _create_payload(messages, **kwargs)
|
||||
async with session.post("https://api.binjie.fun/api/generateStream", json=payload) as response:
|
||||
response.raise_for_status()
|
||||
async for chunk in response.iter_content():
|
||||
if chunk:
|
||||
chunk = chunk.decode()
|
||||
if "sorry, 您的ip已由于触发防滥用检测而被封禁" in chunk:
|
||||
raise RuntimeError("IP address is blocked by abuse detection.")
|
||||
yield chunk
|
||||
|
||||
|
||||
def _create_header():
|
||||
return {
|
||||
"accept" : "application/json, text/plain, */*",
|
||||
"content-type" : "application/json",
|
||||
"origin" : "https://chat18.aichatos8.com",
|
||||
"referer" : "https://chat18.aichatos8.com/"
|
||||
}
|
||||
|
||||
|
||||
def _create_payload(
|
||||
messages: Messages,
|
||||
system_message: str = "",
|
||||
user_id: int = None,
|
||||
**kwargs
|
||||
):
|
||||
if not user_id:
|
||||
user_id = random.randint(1690000544336, 2093025544336)
|
||||
return {
|
||||
"prompt": format_prompt(messages),
|
||||
"network": True,
|
||||
"system": system_message,
|
||||
"withoutContext": False,
|
||||
"stream": True,
|
||||
"userId": f"#/chat/{user_id}"
|
||||
}
|
||||
|
@ -0,0 +1,89 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from aiohttp import ClientSession
|
||||
import json
|
||||
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||
from ..typing import AsyncResult, Messages
|
||||
from .helper import format_prompt
|
||||
|
||||
class Bixin123(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
url = "https://chat.bixin123.com"
|
||||
api_endpoint = "https://chat.bixin123.com/api/chatgpt/chat-process"
|
||||
working = True
|
||||
supports_gpt_35_turbo = True
|
||||
supports_gpt_4 = True
|
||||
|
||||
default_model = 'gpt-3.5-turbo-0125'
|
||||
models = ['gpt-3.5-turbo-0125', 'gpt-3.5-turbo-16k-0613', 'gpt-4-turbo', 'qwen-turbo']
|
||||
|
||||
model_aliases = {
|
||||
"gpt-3.5-turbo": "gpt-3.5-turbo-0125",
|
||||
"gpt-3.5-turbo": "gpt-3.5-turbo-16k-0613",
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def get_model(cls, model: str) -> str:
|
||||
if model in cls.models:
|
||||
return model
|
||||
elif model in cls.model_aliases:
|
||||
return cls.model_aliases[model]
|
||||
else:
|
||||
return cls.default_model
|
||||
|
||||
@classmethod
|
||||
async def create_async_generator(
|
||||
cls,
|
||||
model: str,
|
||||
messages: Messages,
|
||||
proxy: str = None,
|
||||
**kwargs
|
||||
) -> AsyncResult:
|
||||
model = cls.get_model(model)
|
||||
|
||||
headers = {
|
||||
"accept": "application/json, text/plain, */*",
|
||||
"accept-language": "en-US,en;q=0.9",
|
||||
"cache-control": "no-cache",
|
||||
"content-type": "application/json",
|
||||
"fingerprint": "988148794",
|
||||
"origin": cls.url,
|
||||
"pragma": "no-cache",
|
||||
"priority": "u=1, i",
|
||||
"referer": f"{cls.url}/chat",
|
||||
"sec-ch-ua": '"Chromium";v="127", "Not)A;Brand";v="99"',
|
||||
"sec-ch-ua-mobile": "?0",
|
||||
"sec-ch-ua-platform": '"Linux"',
|
||||
"sec-fetch-dest": "empty",
|
||||
"sec-fetch-mode": "cors",
|
||||
"sec-fetch-site": "same-origin",
|
||||
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36",
|
||||
"x-website-domain": "chat.bixin123.com",
|
||||
}
|
||||
|
||||
async with ClientSession(headers=headers) as session:
|
||||
prompt = format_prompt(messages)
|
||||
data = {
|
||||
"prompt": prompt,
|
||||
"options": {
|
||||
"usingNetwork": False,
|
||||
"file": ""
|
||||
}
|
||||
}
|
||||
async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
|
||||
response.raise_for_status()
|
||||
response_text = await response.text()
|
||||
|
||||
lines = response_text.strip().split("\n")
|
||||
last_json = None
|
||||
for line in reversed(lines):
|
||||
try:
|
||||
last_json = json.loads(line)
|
||||
break
|
||||
except json.JSONDecodeError:
|
||||
pass
|
||||
|
||||
if last_json:
|
||||
text = last_json.get("text", "")
|
||||
yield text
|
||||
else:
|
||||
yield ""
|
@ -0,0 +1,94 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from aiohttp import ClientSession
|
||||
from asyncio import sleep
|
||||
|
||||
from ..typing import AsyncResult, Messages
|
||||
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||
from .helper import format_prompt
|
||||
|
||||
|
||||
class CodeNews(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
url = "https://codenews.cc"
|
||||
api_endpoint = "https://codenews.cc/chatxyz13"
|
||||
working = True
|
||||
supports_gpt_35_turbo = True
|
||||
supports_gpt_4 = False
|
||||
supports_stream = True
|
||||
supports_system_message = False
|
||||
supports_message_history = False
|
||||
|
||||
default_model = 'free_gpt'
|
||||
models = ['free_gpt', 'gpt-4o-mini', 'deepseek-coder', 'chatpdf']
|
||||
|
||||
model_aliases = {
|
||||
"glm-4": "free_gpt",
|
||||
"gpt-3.5-turbo": "chatpdf",
|
||||
"deepseek": "deepseek-coder",
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def get_model(cls, model: str) -> str:
|
||||
if model in cls.models:
|
||||
return model
|
||||
elif model in cls.model_aliases:
|
||||
return cls.model_aliases[model]
|
||||
else:
|
||||
return cls.default_model
|
||||
|
||||
@classmethod
|
||||
async def create_async_generator(
|
||||
cls,
|
||||
model: str,
|
||||
messages: Messages,
|
||||
proxy: str = None,
|
||||
**kwargs
|
||||
) -> AsyncResult:
|
||||
model = cls.get_model(model)
|
||||
|
||||
headers = {
|
||||
"accept": "application/json, text/javascript, */*; q=0.01",
|
||||
"accept-language": "en-US,en;q=0.9",
|
||||
"cache-control": "no-cache",
|
||||
"content-type": "application/x-www-form-urlencoded; charset=UTF-8",
|
||||
"origin": cls.url,
|
||||
"pragma": "no-cache",
|
||||
"priority": "u=1, i",
|
||||
"referer": f"{cls.url}/chatgpt",
|
||||
"sec-ch-ua": '"Chromium";v="127", "Not)A;Brand";v="99"',
|
||||
"sec-ch-ua-mobile": "?0",
|
||||
"sec-ch-ua-platform": '"Linux"',
|
||||
"sec-fetch-dest": "empty",
|
||||
"sec-fetch-mode": "cors",
|
||||
"sec-fetch-site": "same-origin",
|
||||
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36",
|
||||
"x-requested-with": "XMLHttpRequest",
|
||||
}
|
||||
async with ClientSession(headers=headers) as session:
|
||||
prompt = format_prompt(messages)
|
||||
data = {
|
||||
"chatgpt_input": prompt,
|
||||
"qa_type2": model,
|
||||
"chatgpt_version_value": "20240804",
|
||||
"enable_web_search": "0",
|
||||
"enable_agent": "0",
|
||||
"dy_video_text_extract": "0",
|
||||
"enable_summary": "0",
|
||||
}
|
||||
async with session.post(cls.api_endpoint, data=data, proxy=proxy) as response:
|
||||
response.raise_for_status()
|
||||
json_data = await response.json()
|
||||
chat_id = json_data["data"]["id"]
|
||||
|
||||
headers["content-type"] = "application/x-www-form-urlencoded; charset=UTF-8"
|
||||
data = {"current_req_count": "2"}
|
||||
|
||||
while True:
|
||||
async with session.post(f"{cls.url}/chat_stream", headers=headers, data=data, proxy=proxy) as response:
|
||||
response.raise_for_status()
|
||||
json_data = await response.json()
|
||||
if json_data["data"]:
|
||||
yield json_data["data"]
|
||||
break
|
||||
else:
|
||||
await sleep(1) # Затримка перед наступним запитом
|
@ -1,106 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json, random, requests, threading
|
||||
from aiohttp import ClientSession
|
||||
|
||||
from ..typing import CreateResult, Messages
|
||||
from .base_provider import AbstractProvider
|
||||
from .helper import format_prompt
|
||||
|
||||
class Cohere(AbstractProvider):
|
||||
url = "https://cohereforai-c4ai-command-r-plus.hf.space"
|
||||
working = False
|
||||
supports_gpt_35_turbo = False
|
||||
supports_gpt_4 = False
|
||||
supports_stream = True
|
||||
|
||||
@staticmethod
|
||||
def create_completion(
|
||||
model: str,
|
||||
messages: Messages,
|
||||
stream: bool,
|
||||
proxy: str = None,
|
||||
max_retries: int = 6,
|
||||
**kwargs
|
||||
) -> CreateResult:
|
||||
|
||||
prompt = format_prompt(messages)
|
||||
|
||||
headers = {
|
||||
'accept': 'text/event-stream',
|
||||
'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
|
||||
'cache-control': 'no-cache',
|
||||
'pragma': 'no-cache',
|
||||
'referer': 'https://cohereforai-c4ai-command-r-plus.hf.space/?__theme=light',
|
||||
'sec-ch-ua': '"Google Chrome";v="123", "Not:A-Brand";v="8", "Chromium";v="123"',
|
||||
'sec-ch-ua-mobile': '?0',
|
||||
'sec-ch-ua-platform': '"macOS"',
|
||||
'sec-fetch-dest': 'empty',
|
||||
'sec-fetch-mode': 'cors',
|
||||
'sec-fetch-site': 'same-origin',
|
||||
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/123.0.0.0 Safari/537.36',
|
||||
}
|
||||
|
||||
session_hash = ''.join(random.choices("abcdefghijklmnopqrstuvwxyz0123456789", k=11))
|
||||
|
||||
params = {
|
||||
'fn_index': '1',
|
||||
'session_hash': session_hash,
|
||||
}
|
||||
|
||||
response = requests.get(
|
||||
'https://cohereforai-c4ai-command-r-plus.hf.space/queue/join',
|
||||
params=params,
|
||||
headers=headers,
|
||||
stream=True
|
||||
)
|
||||
|
||||
completion = ''
|
||||
|
||||
for line in response.iter_lines():
|
||||
if line:
|
||||
json_data = json.loads(line[6:])
|
||||
|
||||
if b"send_data" in (line):
|
||||
event_id = json_data["event_id"]
|
||||
|
||||
threading.Thread(target=send_data, args=[session_hash, event_id, prompt]).start()
|
||||
|
||||
if b"process_generating" in line or b"process_completed" in line:
|
||||
token = (json_data['output']['data'][0][0][1])
|
||||
|
||||
yield (token.replace(completion, ""))
|
||||
completion = token
|
||||
|
||||
def send_data(session_hash, event_id, prompt):
|
||||
headers = {
|
||||
'accept': '*/*',
|
||||
'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
|
||||
'cache-control': 'no-cache',
|
||||
'content-type': 'application/json',
|
||||
'origin': 'https://cohereforai-c4ai-command-r-plus.hf.space',
|
||||
'pragma': 'no-cache',
|
||||
'referer': 'https://cohereforai-c4ai-command-r-plus.hf.space/?__theme=light',
|
||||
'sec-ch-ua': '"Google Chrome";v="123", "Not:A-Brand";v="8", "Chromium";v="123"',
|
||||
'sec-ch-ua-mobile': '?0',
|
||||
'sec-ch-ua-platform': '"macOS"',
|
||||
'sec-fetch-dest': 'empty',
|
||||
'sec-fetch-mode': 'cors',
|
||||
'sec-fetch-site': 'same-origin',
|
||||
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/123.0.0.0 Safari/537.36',
|
||||
}
|
||||
|
||||
json_data = {
|
||||
'data': [
|
||||
prompt,
|
||||
'',
|
||||
[],
|
||||
],
|
||||
'event_data': None,
|
||||
'fn_index': 1,
|
||||
'session_hash': session_hash,
|
||||
'event_id': event_id
|
||||
}
|
||||
|
||||
requests.post('https://cohereforai-c4ai-command-r-plus.hf.space/queue/data',
|
||||
json = json_data, headers=headers)
|
@ -0,0 +1,82 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from aiohttp import ClientSession, ClientResponseError
|
||||
from urllib.parse import urlencode
|
||||
import io
|
||||
|
||||
from ..typing import AsyncResult, Messages
|
||||
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||
from ..image import ImageResponse, is_accepted_format
|
||||
|
||||
class FluxAirforce(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
url = "https://flux.api.airforce/"
|
||||
api_endpoint = "https://api.airforce/v1/imagine2"
|
||||
working = True
|
||||
default_model = 'flux-realism'
|
||||
models = [
|
||||
'flux',
|
||||
'flux-realism',
|
||||
'flux-anime',
|
||||
'flux-3d',
|
||||
'flux-disney'
|
||||
]
|
||||
|
||||
@classmethod
|
||||
async def create_async_generator(
|
||||
cls,
|
||||
model: str,
|
||||
messages: Messages,
|
||||
proxy: str = None,
|
||||
**kwargs
|
||||
) -> AsyncResult:
|
||||
headers = {
|
||||
"accept": "*/*",
|
||||
"accept-language": "en-US,en;q=0.9",
|
||||
"origin": "https://flux.api.airforce",
|
||||
"priority": "u=1, i",
|
||||
"referer": "https://flux.api.airforce/",
|
||||
"sec-ch-ua": '"Chromium";v="127", "Not)A;Brand";v="99"',
|
||||
"sec-ch-ua-mobile": "?0",
|
||||
"sec-ch-ua-platform": '"Linux"',
|
||||
"sec-fetch-dest": "empty",
|
||||
"sec-fetch-mode": "cors",
|
||||
"sec-fetch-site": "same-site",
|
||||
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36"
|
||||
}
|
||||
|
||||
prompt = messages[-1]['content'] if messages else ""
|
||||
|
||||
params = {
|
||||
"prompt": prompt,
|
||||
"size": kwargs.get("size", "1:1"),
|
||||
"seed": kwargs.get("seed"),
|
||||
"model": model
|
||||
}
|
||||
|
||||
params = {k: v for k, v in params.items() if v is not None}
|
||||
|
||||
try:
|
||||
async with ClientSession(headers=headers) as session:
|
||||
async with session.get(f"{cls.api_endpoint}", params=params, proxy=proxy) as response:
|
||||
response.raise_for_status()
|
||||
|
||||
content = await response.read()
|
||||
|
||||
if response.content_type.startswith('image/'):
|
||||
image_url = str(response.url)
|
||||
yield ImageResponse(image_url, prompt)
|
||||
else:
|
||||
try:
|
||||
text = content.decode('utf-8', errors='ignore')
|
||||
yield f"Error: {text}"
|
||||
except Exception as decode_error:
|
||||
yield f"Error: Unable to decode response - {str(decode_error)}"
|
||||
|
||||
except ClientResponseError as e:
|
||||
yield f"Error: HTTP {e.status}: {e.message}"
|
||||
except Exception as e:
|
||||
yield f"Unexpected error: {str(e)}"
|
||||
|
||||
finally:
|
||||
if not session.closed:
|
||||
await session.close()
|
@ -0,0 +1,130 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import time
|
||||
import random
|
||||
import hashlib
|
||||
import re
|
||||
from aiohttp import ClientSession
|
||||
|
||||
from ..typing import AsyncResult, Messages
|
||||
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||
from .helper import format_prompt
|
||||
|
||||
class MagickPen(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
url = "https://magickpen.com"
|
||||
api_endpoint_free = "https://api.magickpen.com/chat/free"
|
||||
api_endpoint_ask = "https://api.magickpen.com/ask"
|
||||
working = True
|
||||
supports_gpt_4 = True
|
||||
supports_stream = False
|
||||
|
||||
default_model = 'free'
|
||||
models = ['free', 'ask']
|
||||
|
||||
model_aliases = {
|
||||
"gpt-4o-mini": "free",
|
||||
"gpt-4o-mini": "ask",
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def get_model(cls, model: str) -> str:
|
||||
if model in cls.models:
|
||||
return model
|
||||
elif model in cls.model_aliases:
|
||||
return cls.model_aliases[model]
|
||||
else:
|
||||
return cls.default_model
|
||||
|
||||
@classmethod
|
||||
async def get_secrets(cls):
|
||||
url = 'https://magickpen.com/_nuxt/02c76dc.js'
|
||||
async with ClientSession() as session:
|
||||
async with session.get(url) as response:
|
||||
if response.status == 200:
|
||||
text = await response.text()
|
||||
x_api_secret_match = re.search(r'"X-API-Secret":"([^"]+)"', text)
|
||||
secret_match = re.search(r'secret:\s*"([^"]+)"', text)
|
||||
|
||||
x_api_secret = x_api_secret_match.group(1) if x_api_secret_match else None
|
||||
secret = secret_match.group(1) if secret_match else None
|
||||
|
||||
# Generate timestamp and nonce dynamically
|
||||
timestamp = str(int(time.time() * 1000))
|
||||
nonce = str(random.random())
|
||||
|
||||
# Generate signature
|
||||
signature_parts = ["TGDBU9zCgM", timestamp, nonce]
|
||||
signature_string = "".join(sorted(signature_parts))
|
||||
signature = hashlib.md5(signature_string.encode()).hexdigest()
|
||||
|
||||
return {
|
||||
'X-API-Secret': x_api_secret,
|
||||
'signature': signature,
|
||||
'timestamp': timestamp,
|
||||
'nonce': nonce,
|
||||
'secret': secret
|
||||
}
|
||||
else:
|
||||
print(f"Error while fetching the file: {response.status}")
|
||||
return None
|
||||
|
||||
@classmethod
|
||||
async def create_async_generator(
|
||||
cls,
|
||||
model: str,
|
||||
messages: Messages,
|
||||
proxy: str = None,
|
||||
**kwargs
|
||||
) -> AsyncResult:
|
||||
model = cls.get_model(model)
|
||||
|
||||
secrets = await cls.get_secrets()
|
||||
if not secrets:
|
||||
raise Exception("Failed to obtain necessary secrets")
|
||||
|
||||
headers = {
|
||||
"accept": "application/json, text/plain, */*",
|
||||
"accept-language": "en-US,en;q=0.9",
|
||||
"cache-control": "no-cache",
|
||||
"content-type": "application/json",
|
||||
"nonce": secrets['nonce'],
|
||||
"origin": "https://magickpen.com",
|
||||
"pragma": "no-cache",
|
||||
"priority": "u=1, i",
|
||||
"referer": "https://magickpen.com/",
|
||||
"sec-ch-ua": '"Chromium";v="127", "Not)A;Brand";v="99"',
|
||||
"sec-ch-ua-mobile": "?0",
|
||||
"sec-ch-ua-platform": '"Linux"',
|
||||
"sec-fetch-dest": "empty",
|
||||
"sec-fetch-mode": "cors",
|
||||
"sec-fetch-site": "same-site",
|
||||
"secret": secrets['secret'],
|
||||
"signature": secrets['signature'],
|
||||
"timestamp": secrets['timestamp'],
|
||||
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36",
|
||||
"x-api-secret": secrets['X-API-Secret']
|
||||
}
|
||||
|
||||
async with ClientSession(headers=headers) as session:
|
||||
if model == 'free':
|
||||
data = {
|
||||
"history": [{"role": "user", "content": format_prompt(messages)}]
|
||||
}
|
||||
async with session.post(cls.api_endpoint_free, json=data, proxy=proxy) as response:
|
||||
response.raise_for_status()
|
||||
result = await response.text()
|
||||
yield result
|
||||
|
||||
elif model == 'ask':
|
||||
data = {
|
||||
"query": format_prompt(messages),
|
||||
"plan": "Pay as you go"
|
||||
}
|
||||
async with session.post(cls.api_endpoint_ask, json=data, proxy=proxy) as response:
|
||||
response.raise_for_status()
|
||||
async for chunk in response.content:
|
||||
if chunk:
|
||||
yield chunk.decode()
|
||||
|
||||
else:
|
||||
raise ValueError(f"Unknown model: {model}")
|
@ -1,51 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from aiohttp import ClientSession
|
||||
|
||||
from ..typing import AsyncResult, Messages
|
||||
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||
from .helper import format_prompt
|
||||
|
||||
|
||||
class MagickPenAsk(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
url = "https://magickpen.com/ask"
|
||||
api_endpoint = "https://api.magickpen.com/ask"
|
||||
working = True
|
||||
supports_gpt_4 = True
|
||||
default_model = "gpt-4o-mini"
|
||||
|
||||
@classmethod
|
||||
async def create_async_generator(
|
||||
cls,
|
||||
model: str,
|
||||
messages: Messages,
|
||||
proxy: str = None,
|
||||
**kwargs
|
||||
) -> AsyncResult:
|
||||
headers = {
|
||||
"accept": "application/json, text/plain, */*",
|
||||
"accept-language": "en-US,en;q=0.9",
|
||||
"content-type": "application/json",
|
||||
"dnt": "1",
|
||||
"origin": "https://magickpen.com",
|
||||
"priority": "u=1, i",
|
||||
"referer": "https://magickpen.com/",
|
||||
"sec-ch-ua": '"Not/A)Brand";v="8", "Chromium";v="126"',
|
||||
"sec-ch-ua-mobile": "?0",
|
||||
"sec-ch-ua-platform": '"Linux"',
|
||||
"sec-fetch-dest": "empty",
|
||||
"sec-fetch-mode": "cors",
|
||||
"sec-fetch-site": "same-site",
|
||||
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36",
|
||||
'X-API-Secret': 'W252GY255JVYBS9NAM'
|
||||
}
|
||||
async with ClientSession(headers=headers) as session:
|
||||
data = {
|
||||
"query": format_prompt(messages),
|
||||
"plan": "Pay as you go"
|
||||
}
|
||||
async with session.post(f"{cls.api_endpoint}", json=data, proxy=proxy) as response:
|
||||
response.raise_for_status()
|
||||
async for chunk in response.content:
|
||||
if chunk:
|
||||
yield chunk.decode()
|
@ -1,50 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from aiohttp import ClientSession
|
||||
|
||||
from ..typing import AsyncResult, Messages
|
||||
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||
from .helper import format_prompt
|
||||
|
||||
|
||||
class MagickPenChat(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
url = "https://magickpen.com/chat"
|
||||
api_endpoint = "https://api.magickpen.com/chat/free"
|
||||
working = True
|
||||
supports_gpt_4 = True
|
||||
default_model = "gpt-4o-mini"
|
||||
@classmethod
|
||||
async def create_async_generator(
|
||||
cls,
|
||||
model: str,
|
||||
messages: Messages,
|
||||
proxy: str = None,
|
||||
**kwargs
|
||||
) -> AsyncResult:
|
||||
headers = {
|
||||
"accept": "application/json, text/plain, */*",
|
||||
"accept-language": "en-US,en;q=0.9",
|
||||
"access-control-allow-origin": "*",
|
||||
"content-type": "application/json",
|
||||
"dnt": "1",
|
||||
"origin": "https://magickpen.com",
|
||||
"priority": "u=1, i",
|
||||
"referer": "https://magickpen.com/",
|
||||
"sec-ch-ua": '"Not/A)Brand";v="8", "Chromium";v="126"',
|
||||
"sec-ch-ua-mobile": "?0",
|
||||
"sec-ch-ua-platform": '"Linux"',
|
||||
"sec-fetch-dest": "empty",
|
||||
"sec-fetch-mode": "cors",
|
||||
"sec-fetch-site": "same-site",
|
||||
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36",
|
||||
'X-Api-Secret': 'W252GY255JVYBS9NAM'
|
||||
}
|
||||
async with ClientSession(headers=headers) as session:
|
||||
data = {
|
||||
"history": [{"role": "user", "content": format_prompt(messages)}]
|
||||
}
|
||||
async with session.post(f"{cls.api_endpoint}", json=data, proxy=proxy) as response:
|
||||
response.raise_for_status()
|
||||
async for chunk in response.content:
|
||||
if chunk:
|
||||
yield chunk.decode()
|
@ -1,64 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
from aiohttp import ClientSession, ClientResponseError
|
||||
|
||||
from ..typing import AsyncResult, Messages
|
||||
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||
from .helper import format_prompt
|
||||
|
||||
|
||||
class Marsyoo(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
url = "https://aiagent.marsyoo.com"
|
||||
api_endpoint = "/api/chat-messages"
|
||||
working = True
|
||||
supports_gpt_4 = True
|
||||
default_model = 'gpt-4o'
|
||||
|
||||
@classmethod
|
||||
async def create_async_generator(
|
||||
cls,
|
||||
model: str,
|
||||
messages: Messages,
|
||||
proxy: str = None,
|
||||
**kwargs
|
||||
) -> AsyncResult:
|
||||
headers = {
|
||||
"Accept": "*/*",
|
||||
"Accept-Language": "en-US,en;q=0.9",
|
||||
"Connection": "keep-alive",
|
||||
"DNT": "1",
|
||||
"Origin": cls.url,
|
||||
"Referer": f"{cls.url}/chat",
|
||||
"Sec-Fetch-Dest": "empty",
|
||||
"Sec-Fetch-Mode": "cors",
|
||||
"Sec-Fetch-Site": "same-origin",
|
||||
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36",
|
||||
"authorization": "Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiI0MWNkOTE3MS1mNTg1LTRjMTktOTY0Ni01NzgxMTBjYWViNTciLCJzdWIiOiJXZWIgQVBJIFBhc3Nwb3J0IiwiYXBwX2lkIjoiNDFjZDkxNzEtZjU4NS00YzE5LTk2NDYtNTc4MTEwY2FlYjU3IiwiYXBwX2NvZGUiOiJMakhzdWJqNjhMTXZCT0JyIiwiZW5kX3VzZXJfaWQiOiI4YjE5YjY2Mi05M2E1LTRhYTktOGNjNS03MDhmNWE0YmQxNjEifQ.pOzdQ4wTrQjjRlEv1XY9TZitkW5KW1K-wbcUJAoBJ5I",
|
||||
"content-type": "application/json",
|
||||
"sec-ch-ua": '"Not/A)Brand";v="8", "Chromium";v="126"',
|
||||
"sec-ch-ua-mobile": "?0",
|
||||
"sec-ch-ua-platform": "Linux",
|
||||
}
|
||||
async with ClientSession(headers=headers) as session:
|
||||
prompt = format_prompt(messages)
|
||||
data = {
|
||||
"response_mode": "streaming",
|
||||
"query": prompt,
|
||||
"inputs": {},
|
||||
}
|
||||
try:
|
||||
async with session.post(f"{cls.url}{cls.api_endpoint}", json=data, proxy=proxy) as response:
|
||||
response.raise_for_status()
|
||||
async for line in response.content:
|
||||
if line:
|
||||
try:
|
||||
json_data = json.loads(line.decode('utf-8').strip().lstrip('data: '))
|
||||
if json_data['event'] == 'message':
|
||||
yield json_data['answer']
|
||||
elif json_data['event'] == 'message_end':
|
||||
return
|
||||
except json.JSONDecodeError:
|
||||
continue
|
||||
except ClientResponseError as e:
|
||||
yield f"Error: HTTP {e.status}: {e.message}"
|
@ -0,0 +1,181 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import base64
|
||||
from aiohttp import ClientSession
|
||||
from typing import AsyncGenerator
|
||||
|
||||
from ..typing import AsyncResult, Messages
|
||||
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||
from ..image import ImageResponse
|
||||
from .helper import format_prompt
|
||||
|
||||
class Nexra(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
url = "https://nexra.aryahcr.cc"
|
||||
api_endpoint_text = "https://nexra.aryahcr.cc/api/chat/gpt"
|
||||
api_endpoint_image = "https://nexra.aryahcr.cc/api/image/complements"
|
||||
working = True
|
||||
supports_gpt_35_turbo = True
|
||||
supports_gpt_4 = True
|
||||
supports_stream = True
|
||||
supports_system_message = True
|
||||
supports_message_history = True
|
||||
|
||||
default_model = 'gpt-3.5-turbo'
|
||||
models = [
|
||||
# Text models
|
||||
'gpt-4', 'gpt-4-0613', 'gpt-4-32k', 'gpt-4-0314', 'gpt-4-32k-0314',
|
||||
'gpt-3.5-turbo', 'gpt-3.5-turbo-16k', 'gpt-3.5-turbo-0613', 'gpt-3.5-turbo-16k-0613', 'gpt-3.5-turbo-0301',
|
||||
'gpt-3', 'text-davinci-003', 'text-davinci-002', 'code-davinci-002',
|
||||
'text-curie-001', 'text-babbage-001', 'text-ada-001',
|
||||
'davinci', 'curie', 'babbage', 'ada', 'babbage-002', 'davinci-002',
|
||||
# Image models
|
||||
'dalle', 'dalle-mini', 'emi'
|
||||
]
|
||||
|
||||
image_models = {"dalle", "dalle-mini", "emi"}
|
||||
text_models = set(models) - image_models
|
||||
|
||||
model_aliases = {
|
||||
"gpt-4": "gpt-4-0613",
|
||||
"gpt-4": "gpt-4-32k",
|
||||
"gpt-4": "gpt-4-0314",
|
||||
"gpt-4": "gpt-4-32k-0314",
|
||||
|
||||
"gpt-3.5-turbo": "gpt-3.5-turbo-16k",
|
||||
"gpt-3.5-turbo": "gpt-3.5-turbo-0613",
|
||||
"gpt-3.5-turbo": "gpt-3.5-turbo-16k-0613",
|
||||
"gpt-3.5-turbo": "gpt-3.5-turbo-0301",
|
||||
|
||||
"gpt-3": "text-davinci-003",
|
||||
"gpt-3": "text-davinci-002",
|
||||
"gpt-3": "code-davinci-002",
|
||||
"gpt-3": "text-curie-001",
|
||||
"gpt-3": "text-babbage-001",
|
||||
"gpt-3": "text-ada-001",
|
||||
"gpt-3": "text-ada-001",
|
||||
"gpt-3": "davinci",
|
||||
"gpt-3": "curie",
|
||||
"gpt-3": "babbage",
|
||||
"gpt-3": "ada",
|
||||
"gpt-3": "babbage-002",
|
||||
"gpt-3": "davinci-002",
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def get_model(cls, model: str) -> str:
|
||||
if model in cls.models:
|
||||
return model
|
||||
elif model in cls.model_aliases:
|
||||
return cls.model_aliases[model]
|
||||
else:
|
||||
return cls.default_model
|
||||
|
||||
@classmethod
|
||||
async def create_async_generator(
|
||||
cls,
|
||||
model: str,
|
||||
messages: Messages,
|
||||
proxy: str = None,
|
||||
**kwargs
|
||||
) -> AsyncGenerator[str | ImageResponse, None]:
|
||||
model = cls.get_model(model)
|
||||
|
||||
if model in cls.image_models:
|
||||
async for result in cls.create_image_async_generator(model, messages, proxy, **kwargs):
|
||||
yield result
|
||||
else:
|
||||
async for result in cls.create_text_async_generator(model, messages, proxy, **kwargs):
|
||||
yield result
|
||||
|
||||
@classmethod
|
||||
async def create_text_async_generator(
|
||||
cls,
|
||||
model: str,
|
||||
messages: Messages,
|
||||
proxy: str = None,
|
||||
**kwargs
|
||||
) -> AsyncGenerator[str, None]:
|
||||
headers = {
|
||||
"Content-Type": "application/json",
|
||||
}
|
||||
async with ClientSession(headers=headers) as session:
|
||||
data = {
|
||||
"messages": messages,
|
||||
"prompt": format_prompt(messages),
|
||||
"model": model,
|
||||
"markdown": False,
|
||||
"stream": False,
|
||||
}
|
||||
async with session.post(cls.api_endpoint_text, json=data, proxy=proxy) as response:
|
||||
response.raise_for_status()
|
||||
result = await response.text()
|
||||
json_result = json.loads(result)
|
||||
yield json_result["gpt"]
|
||||
|
||||
@classmethod
|
||||
async def create_image_async_generator(
|
||||
cls,
|
||||
model: str,
|
||||
messages: Messages,
|
||||
proxy: str = None,
|
||||
**kwargs
|
||||
) -> AsyncGenerator[ImageResponse | str, None]:
|
||||
headers = {
|
||||
"Content-Type": "application/json"
|
||||
}
|
||||
|
||||
prompt = messages[-1]['content'] if messages else ""
|
||||
|
||||
data = {
|
||||
"prompt": prompt,
|
||||
"model": model
|
||||
}
|
||||
|
||||
async def process_response(response_text: str) -> ImageResponse | None:
|
||||
json_start = response_text.find('{')
|
||||
if json_start != -1:
|
||||
json_data = response_text[json_start:]
|
||||
try:
|
||||
response_data = json.loads(json_data)
|
||||
image_data = response_data.get('images', [])[0]
|
||||
|
||||
if image_data.startswith('data:image/'):
|
||||
return ImageResponse([image_data], "Generated image")
|
||||
|
||||
try:
|
||||
base64.b64decode(image_data)
|
||||
data_uri = f"data:image/jpeg;base64,{image_data}"
|
||||
return ImageResponse([data_uri], "Generated image")
|
||||
except:
|
||||
print("Invalid base64 data")
|
||||
return None
|
||||
except json.JSONDecodeError:
|
||||
print("Failed to parse JSON.")
|
||||
else:
|
||||
print("No JSON data found in the response.")
|
||||
return None
|
||||
|
||||
async with ClientSession(headers=headers) as session:
|
||||
async with session.post(cls.api_endpoint_image, json=data, proxy=proxy) as response:
|
||||
response.raise_for_status()
|
||||
response_text = await response.text()
|
||||
|
||||
image_response = await process_response(response_text)
|
||||
if image_response:
|
||||
yield image_response
|
||||
else:
|
||||
yield "Failed to process image data."
|
||||
|
||||
@classmethod
|
||||
async def create_async(
|
||||
cls,
|
||||
model: str,
|
||||
messages: Messages,
|
||||
proxy: str = None,
|
||||
**kwargs
|
||||
) -> str:
|
||||
async for response in cls.create_async_generator(model, messages, proxy, **kwargs):
|
||||
if isinstance(response, ImageResponse):
|
||||
return response.images[0]
|
||||
return response
|
@ -0,0 +1,133 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
from typing import AsyncGenerator
|
||||
|
||||
from aiohttp import ClientSession
|
||||
|
||||
from ..typing import AsyncResult, Messages
|
||||
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||
from .helper import format_prompt
|
||||
|
||||
|
||||
class Snova(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
url = "https://fast.snova.ai"
|
||||
api_endpoint = "https://fast.snova.ai/api/completion"
|
||||
working = True
|
||||
supports_stream = True
|
||||
supports_system_message = True
|
||||
supports_message_history = True
|
||||
|
||||
default_model = 'Meta-Llama-3.1-8B-Instruct'
|
||||
models = [
|
||||
'Meta-Llama-3.1-8B-Instruct',
|
||||
'Meta-Llama-3.1-70B-Instruct',
|
||||
'Meta-Llama-3.1-405B-Instruct',
|
||||
'Samba-CoE',
|
||||
'ignos/Mistral-T5-7B-v1',
|
||||
'v1olet/v1olet_merged_dpo_7B',
|
||||
'macadeliccc/WestLake-7B-v2-laser-truthy-dpo',
|
||||
'cookinai/DonutLM-v1',
|
||||
]
|
||||
|
||||
model_aliases = {
|
||||
"llama-3.1-8b": "Meta-Llama-3.1-8B-Instruct",
|
||||
"llama-3.1-70b": "Meta-Llama-3.1-70B-Instruct",
|
||||
"llama-3.1-405b": "Meta-Llama-3.1-405B-Instruct",
|
||||
|
||||
"mistral-7b": "ignos/Mistral-T5-7B-v1",
|
||||
|
||||
"samba-coe-v0.1": "Samba-CoE",
|
||||
"v1olet-merged-7b": "v1olet/v1olet_merged_dpo_7B",
|
||||
"westlake-7b-v2": "macadeliccc/WestLake-7B-v2-laser-truthy-dpo",
|
||||
"donutlm-v1": "cookinai/DonutLM-v1",
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def get_model(cls, model: str) -> str:
|
||||
if model in cls.models:
|
||||
return model
|
||||
elif model in cls.model_aliases:
|
||||
return cls.model_aliases[model]
|
||||
else:
|
||||
return cls.default_model
|
||||
|
||||
@classmethod
|
||||
async def create_async_generator(
|
||||
cls,
|
||||
model: str,
|
||||
messages: Messages,
|
||||
proxy: str = None,
|
||||
**kwargs
|
||||
) -> AsyncGenerator[str, None]:
|
||||
model = cls.get_model(model)
|
||||
|
||||
headers = {
|
||||
"accept": "text/event-stream",
|
||||
"accept-language": "en-US,en;q=0.9",
|
||||
"cache-control": "no-cache",
|
||||
"content-type": "application/json",
|
||||
"origin": cls.url,
|
||||
"pragma": "no-cache",
|
||||
"priority": "u=1, i",
|
||||
"referer": f"{cls.url}/",
|
||||
"sec-ch-ua": '"Chromium";v="127", "Not)A;Brand";v="99"',
|
||||
"sec-ch-ua-mobile": "?0",
|
||||
"sec-ch-ua-platform": '"Linux"',
|
||||
"sec-fetch-dest": "empty",
|
||||
"sec-fetch-mode": "cors",
|
||||
"sec-fetch-site": "same-origin",
|
||||
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36"
|
||||
}
|
||||
async with ClientSession(headers=headers) as session:
|
||||
data = {
|
||||
"body": {
|
||||
"messages": [
|
||||
{
|
||||
"role": "system",
|
||||
"content": "You are a helpful assistant."
|
||||
},
|
||||
{
|
||||
"role": "user",
|
||||
"content": format_prompt(messages),
|
||||
"id": "1-id",
|
||||
"ref": "1-ref",
|
||||
"revision": 1,
|
||||
"draft": False,
|
||||
"status": "done",
|
||||
"enableRealTimeChat": False,
|
||||
"meta": None
|
||||
}
|
||||
],
|
||||
"max_tokens": 1000,
|
||||
"stop": ["<|eot_id|>"],
|
||||
"stream": True,
|
||||
"stream_options": {"include_usage": True},
|
||||
"model": model
|
||||
},
|
||||
"env_type": "tp16"
|
||||
}
|
||||
async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
|
||||
response.raise_for_status()
|
||||
full_response = ""
|
||||
async for line in response.content:
|
||||
line = line.decode().strip()
|
||||
if line.startswith("data: "):
|
||||
data = line[6:]
|
||||
if data == "[DONE]":
|
||||
break
|
||||
try:
|
||||
json_data = json.loads(data)
|
||||
choices = json_data.get("choices", [])
|
||||
if choices:
|
||||
delta = choices[0].get("delta", {})
|
||||
content = delta.get("content", "")
|
||||
full_response += content
|
||||
except json.JSONDecodeError:
|
||||
continue
|
||||
except Exception as e:
|
||||
print(f"Error processing chunk: {e}")
|
||||
print(f"Problematic data: {data}")
|
||||
continue
|
||||
|
||||
yield full_response.strip()
|
@ -0,0 +1,103 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import re
|
||||
from aiohttp import ClientSession
|
||||
|
||||
from ..typing import AsyncResult, Messages
|
||||
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||
from .helper import format_prompt
|
||||
|
||||
class TwitterBio(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
url = "https://www.twitterbio.io"
|
||||
api_endpoint_mistral = "https://www.twitterbio.io/api/mistral"
|
||||
api_endpoint_openai = "https://www.twitterbio.io/api/openai"
|
||||
working = True
|
||||
supports_gpt_35_turbo = True
|
||||
|
||||
default_model = 'gpt-3.5-turbo'
|
||||
models = [
|
||||
'mistralai/Mixtral-8x7B-Instruct-v0.1',
|
||||
'gpt-3.5-turbo',
|
||||
]
|
||||
|
||||
model_aliases = {
|
||||
"mixtral-8x7b": "mistralai/Mixtral-8x7B-Instruct-v0.1",
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def get_model(cls, model: str) -> str:
|
||||
if model in cls.models:
|
||||
return model
|
||||
return cls.default_model
|
||||
|
||||
@staticmethod
|
||||
def format_text(text: str) -> str:
|
||||
text = re.sub(r'\s+', ' ', text.strip())
|
||||
text = re.sub(r'\s+([,.!?])', r'\1', text)
|
||||
return text
|
||||
|
||||
@classmethod
|
||||
async def create_async_generator(
|
||||
cls,
|
||||
model: str,
|
||||
messages: Messages,
|
||||
proxy: str = None,
|
||||
**kwargs
|
||||
) -> AsyncResult:
|
||||
model = cls.get_model(model)
|
||||
|
||||
headers = {
|
||||
"accept": "*/*",
|
||||
"accept-language": "en-US,en;q=0.9",
|
||||
"cache-control": "no-cache",
|
||||
"content-type": "application/json",
|
||||
"origin": cls.url,
|
||||
"pragma": "no-cache",
|
||||
"priority": "u=1, i",
|
||||
"referer": f"{cls.url}/",
|
||||
"sec-ch-ua": '"Chromium";v="127", "Not)A;Brand";v="99"',
|
||||
"sec-ch-ua-mobile": "?0",
|
||||
"sec-ch-ua-platform": '"Linux"',
|
||||
"sec-fetch-dest": "empty",
|
||||
"sec-fetch-mode": "cors",
|
||||
"sec-fetch-site": "same-origin",
|
||||
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36"
|
||||
}
|
||||
async with ClientSession(headers=headers) as session:
|
||||
prompt = format_prompt(messages)
|
||||
data = {
|
||||
"prompt": f'{prompt}.'
|
||||
}
|
||||
|
||||
if model == 'mistralai/Mixtral-8x7B-Instruct-v0.1':
|
||||
api_endpoint = cls.api_endpoint_mistral
|
||||
elif model == 'gpt-3.5-turbo':
|
||||
api_endpoint = cls.api_endpoint_openai
|
||||
else:
|
||||
raise ValueError(f"Unsupported model: {model}")
|
||||
|
||||
async with session.post(api_endpoint, json=data, proxy=proxy) as response:
|
||||
response.raise_for_status()
|
||||
buffer = ""
|
||||
async for line in response.content:
|
||||
line = line.decode('utf-8').strip()
|
||||
if line.startswith('data: '):
|
||||
try:
|
||||
json_data = json.loads(line[6:])
|
||||
if model == 'mistralai/Mixtral-8x7B-Instruct-v0.1':
|
||||
if 'choices' in json_data and len(json_data['choices']) > 0:
|
||||
text = json_data['choices'][0].get('text', '')
|
||||
if text:
|
||||
buffer += text
|
||||
elif model == 'gpt-3.5-turbo':
|
||||
text = json_data.get('text', '')
|
||||
if text:
|
||||
buffer += text
|
||||
except json.JSONDecodeError:
|
||||
continue
|
||||
elif line == 'data: [DONE]':
|
||||
break
|
||||
|
||||
if buffer:
|
||||
yield cls.format_text(buffer)
|
@ -0,0 +1,74 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from aiohttp import ClientSession
|
||||
import json
|
||||
|
||||
from ..typing import AsyncResult, Messages
|
||||
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||
from .helper import format_prompt
|
||||
|
||||
|
||||
class Upstage(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
url = "https://console.upstage.ai/playground/chat"
|
||||
api_endpoint = "https://ap-northeast-2.apistage.ai/v1/web/demo/chat/completions"
|
||||
working = True
|
||||
default_model = 'upstage/solar-1-mini-chat'
|
||||
models = [
|
||||
'upstage/solar-1-mini-chat',
|
||||
'upstage/solar-1-mini-chat-ja',
|
||||
]
|
||||
model_aliases = {
|
||||
"solar-1-mini": "upstage/solar-1-mini-chat",
|
||||
"solar-1-mini": "upstage/solar-1-mini-chat-ja",
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def get_model(cls, model: str) -> str:
|
||||
if model in cls.models:
|
||||
return model
|
||||
elif model in cls.model_aliases:
|
||||
return cls.model_aliases[model]
|
||||
else:
|
||||
return cls.default_model
|
||||
|
||||
@classmethod
|
||||
async def create_async_generator(
|
||||
cls,
|
||||
model: str,
|
||||
messages: Messages,
|
||||
proxy: str = None,
|
||||
**kwargs
|
||||
) -> AsyncResult:
|
||||
model = cls.get_model(model)
|
||||
|
||||
headers = {
|
||||
"accept": "*/*",
|
||||
"accept-language": "en-US,en;q=0.9",
|
||||
"content-type": "application/json",
|
||||
"origin": "https://console.upstage.ai",
|
||||
"priority": "u=1, i",
|
||||
"referer": "https://console.upstage.ai/",
|
||||
"sec-ch-ua": '"Chromium";v="127", "Not)A;Brand";v="99"',
|
||||
"sec-ch-ua-mobile": "?0",
|
||||
"sec-ch-ua-platform": '"Linux"',
|
||||
"sec-fetch-dest": "empty",
|
||||
"sec-fetch-mode": "cors",
|
||||
"sec-fetch-site": "cross-site",
|
||||
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36"
|
||||
}
|
||||
async with ClientSession(headers=headers) as session:
|
||||
data = {
|
||||
"stream": True,
|
||||
"messages": [{"role": "user", "content": format_prompt(messages)}],
|
||||
"model": model
|
||||
}
|
||||
async with session.post(f"{cls.api_endpoint}", json=data, proxy=proxy) as response:
|
||||
response.raise_for_status()
|
||||
async for line in response.content:
|
||||
if line:
|
||||
line = line.decode('utf-8').strip()
|
||||
if line.startswith("data: ") and line != "data: [DONE]":
|
||||
data = json.loads(line[6:])
|
||||
content = data['choices'][0]['delta'].get('content', '')
|
||||
if content:
|
||||
yield content
|
@ -1,79 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
|
||||
from ...typing import AsyncResult, Messages
|
||||
from ...requests import StreamSession
|
||||
from ..base_provider import AsyncGeneratorProvider, format_prompt, get_cookies
|
||||
|
||||
|
||||
class AItianhu(AsyncGeneratorProvider):
|
||||
url = "https://www.aitianhu.com"
|
||||
working = False
|
||||
supports_gpt_35_turbo = True
|
||||
|
||||
@classmethod
|
||||
async def create_async_generator(
|
||||
cls,
|
||||
model: str,
|
||||
messages: Messages,
|
||||
proxy: str = None,
|
||||
cookies: dict = None,
|
||||
timeout: int = 120, **kwargs) -> AsyncResult:
|
||||
|
||||
if not cookies:
|
||||
cookies = get_cookies(domain_name='www.aitianhu.com')
|
||||
if not cookies:
|
||||
raise RuntimeError(f"g4f.provider.{cls.__name__} requires cookies [refresh https://www.aitianhu.com on chrome]")
|
||||
|
||||
data = {
|
||||
"prompt": format_prompt(messages),
|
||||
"options": {},
|
||||
"systemMessage": "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully.",
|
||||
"temperature": 0.8,
|
||||
"top_p": 1,
|
||||
**kwargs
|
||||
}
|
||||
|
||||
headers = {
|
||||
'authority': 'www.aitianhu.com',
|
||||
'accept': 'application/json, text/plain, */*',
|
||||
'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
|
||||
'content-type': 'application/json',
|
||||
'origin': 'https://www.aitianhu.com',
|
||||
'referer': 'https://www.aitianhu.com/',
|
||||
'sec-ch-ua': '"Chromium";v="118", "Google Chrome";v="118", "Not=A?Brand";v="99"',
|
||||
'sec-ch-ua-mobile': '?0',
|
||||
'sec-ch-ua-platform': '"macOS"',
|
||||
'sec-fetch-dest': 'empty',
|
||||
'sec-fetch-mode': 'cors',
|
||||
'sec-fetch-site': 'same-origin',
|
||||
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36',
|
||||
}
|
||||
|
||||
async with StreamSession(headers=headers,
|
||||
cookies=cookies,
|
||||
timeout=timeout,
|
||||
proxies={"https": proxy},
|
||||
impersonate="chrome107", verify=False) as session:
|
||||
|
||||
async with session.post(f"{cls.url}/api/chat-process", json=data) as response:
|
||||
response.raise_for_status()
|
||||
|
||||
async for line in response.iter_lines():
|
||||
if line == b"<script>":
|
||||
raise RuntimeError("Solve challenge and pass cookies")
|
||||
|
||||
if b"platform's risk control" in line:
|
||||
raise RuntimeError("Platform's Risk Control")
|
||||
|
||||
line = json.loads(line)
|
||||
|
||||
if "detail" not in line:
|
||||
raise RuntimeError(f"Response: {line}")
|
||||
|
||||
content = line["detail"]["choices"][0]["delta"].get(
|
||||
"content"
|
||||
)
|
||||
if content:
|
||||
yield content
|
@ -1,56 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from aiohttp import ClientSession
|
||||
|
||||
from ...typing import AsyncResult, Messages
|
||||
from ..base_provider import AsyncGeneratorProvider
|
||||
from ..helper import format_prompt
|
||||
|
||||
import random
|
||||
|
||||
class Aichatos(AsyncGeneratorProvider):
|
||||
url = "https://chat10.aichatos.xyz"
|
||||
api = "https://api.binjie.fun"
|
||||
working = False
|
||||
supports_gpt_35_turbo = True
|
||||
|
||||
@classmethod
|
||||
async def create_async_generator(
|
||||
cls,
|
||||
model: str,
|
||||
messages: Messages,
|
||||
proxy: str = None,
|
||||
**kwargs
|
||||
) -> AsyncResult:
|
||||
headers = {
|
||||
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36",
|
||||
"Accept": "application/json, text/plain, */*",
|
||||
"Accept-Language": "en-US,en;q=0.5",
|
||||
"Accept-Encoding": "gzip, deflate, br",
|
||||
"Content-Type": "application/json",
|
||||
"Origin": "https://chat10.aichatos.xyz",
|
||||
"DNT": "1",
|
||||
"Sec-GPC": "1",
|
||||
"Connection": "keep-alive",
|
||||
"Sec-Fetch-Dest": "empty",
|
||||
"Sec-Fetch-Mode": "cors",
|
||||
"Sec-Fetch-Site": "cross-site",
|
||||
"TE": "trailers",
|
||||
}
|
||||
async with ClientSession(headers=headers) as session:
|
||||
prompt = format_prompt(messages)
|
||||
userId = random.randint(1000000000000, 9999999999999)
|
||||
system_message: str = "",
|
||||
data = {
|
||||
"prompt": prompt,
|
||||
"userId": "#/chat/{userId}",
|
||||
"network": True,
|
||||
"system": system_message,
|
||||
"withoutContext": False,
|
||||
"stream": True,
|
||||
}
|
||||
async with session.post(f"{cls.api}/api/generateStream", json=data, proxy=proxy) as response:
|
||||
response.raise_for_status()
|
||||
async for chunk in response.content:
|
||||
if chunk:
|
||||
yield chunk.decode()
|
@ -1,56 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from ...typing import Messages
|
||||
from ..base_provider import BaseProvider, CreateResult
|
||||
from ...requests import get_session_from_browser
|
||||
from uuid import uuid4
|
||||
|
||||
class Bestim(BaseProvider):
|
||||
url = "https://chatgpt.bestim.org"
|
||||
working = False
|
||||
supports_gpt_35_turbo = True
|
||||
supports_message_history = True
|
||||
supports_stream = True
|
||||
|
||||
@classmethod
|
||||
def create_completion(
|
||||
cls,
|
||||
model: str,
|
||||
messages: Messages,
|
||||
stream: bool,
|
||||
proxy: str = None,
|
||||
**kwargs
|
||||
) -> CreateResult:
|
||||
session = get_session_from_browser(cls.url, proxy=proxy)
|
||||
headers = {
|
||||
'Accept': 'application/json, text/event-stream',
|
||||
}
|
||||
data = {
|
||||
"messagesHistory": [{
|
||||
"id": str(uuid4()),
|
||||
"content": m["content"],
|
||||
"from": "you" if m["role"] == "user" else "bot"
|
||||
} for m in messages],
|
||||
"type": "chat",
|
||||
}
|
||||
response = session.post(
|
||||
url="https://chatgpt.bestim.org/chat/send2/",
|
||||
json=data,
|
||||
headers=headers,
|
||||
stream=True
|
||||
)
|
||||
response.raise_for_status()
|
||||
for line in response.iter_lines():
|
||||
if not line.startswith(b"event: trylimit"):
|
||||
yield line.decode().removeprefix("data: ")
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
@ -1,66 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import time
|
||||
import hashlib
|
||||
import uuid
|
||||
|
||||
from ...typing import AsyncResult, Messages
|
||||
from ...requests import StreamSession, raise_for_status
|
||||
from ...errors import RateLimitError
|
||||
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||
|
||||
class ChatForAi(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
url = "https://chatforai.store"
|
||||
working = False
|
||||
default_model = "gpt-3.5-turbo"
|
||||
supports_message_history = True
|
||||
supports_gpt_35_turbo = True
|
||||
|
||||
@classmethod
|
||||
async def create_async_generator(
|
||||
cls,
|
||||
model: str,
|
||||
messages: Messages,
|
||||
proxy: str = None,
|
||||
timeout: int = 120,
|
||||
temperature: float = 0.7,
|
||||
top_p: float = 1,
|
||||
**kwargs
|
||||
) -> AsyncResult:
|
||||
model = cls.get_model(model)
|
||||
headers = {
|
||||
"Content-Type": "text/plain;charset=UTF-8",
|
||||
"Origin": cls.url,
|
||||
"Referer": f"{cls.url}/?r=b",
|
||||
}
|
||||
async with StreamSession(impersonate="chrome", headers=headers, proxies={"https": proxy}, timeout=timeout) as session:
|
||||
timestamp = int(time.time() * 1e3)
|
||||
conversation_id = str(uuid.uuid4())
|
||||
data = {
|
||||
"conversationId": conversation_id,
|
||||
"conversationType": "chat_continuous",
|
||||
"botId": "chat_continuous",
|
||||
"globalSettings":{
|
||||
"baseUrl": "https://api.openai.com",
|
||||
"model": model,
|
||||
"messageHistorySize": 5,
|
||||
"temperature": temperature,
|
||||
"top_p": top_p,
|
||||
**kwargs
|
||||
},
|
||||
"prompt": "",
|
||||
"messages": messages,
|
||||
"timestamp": timestamp,
|
||||
"sign": generate_signature(timestamp, "", conversation_id)
|
||||
}
|
||||
async with session.post(f"{cls.url}/api/handle/provider-openai", json=data) as response:
|
||||
await raise_for_status(response)
|
||||
async for chunk in response.iter_content():
|
||||
if b"https://chatforai.store" in chunk:
|
||||
raise RuntimeError(f"Response: {chunk.decode(errors='ignore')}")
|
||||
yield chunk.decode(errors="ignore")
|
||||
|
||||
|
||||
def generate_signature(timestamp: int, message: str, id: str):
|
||||
buffer = f"{id}:{timestamp}:{message}:h496Jd6b"
|
||||
return hashlib.sha256(buffer.encode()).hexdigest()
|
@ -1,88 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import re, html, json, string, random
|
||||
from aiohttp import ClientSession
|
||||
|
||||
from ...typing import Messages, AsyncResult
|
||||
from ...errors import RateLimitError
|
||||
from ..base_provider import AsyncGeneratorProvider
|
||||
from ..helper import get_random_string
|
||||
|
||||
class ChatgptAi(AsyncGeneratorProvider):
|
||||
url = "https://chatgpt.ai"
|
||||
working = False
|
||||
supports_message_history = True
|
||||
supports_system_message = True,
|
||||
supports_gpt_4 = True,
|
||||
_system = None
|
||||
|
||||
@classmethod
|
||||
async def create_async_generator(
|
||||
cls,
|
||||
model: str,
|
||||
messages: Messages,
|
||||
proxy: str = None,
|
||||
**kwargs
|
||||
) -> AsyncResult:
|
||||
headers = {
|
||||
"authority" : "chatgpt.ai",
|
||||
"accept" : "*/*",
|
||||
"accept-language" : "en-US",
|
||||
"cache-control" : "no-cache",
|
||||
"origin" : cls.url,
|
||||
"pragma" : "no-cache",
|
||||
"referer" : f"{cls.url}/",
|
||||
"sec-ch-ua" : '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
|
||||
"sec-ch-ua-mobile" : "?0",
|
||||
"sec-ch-ua-platform" : '"Windows"',
|
||||
"sec-fetch-dest" : "empty",
|
||||
"sec-fetch-mode" : "cors",
|
||||
"sec-fetch-site" : "same-origin",
|
||||
"user-agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36",
|
||||
}
|
||||
async with ClientSession(
|
||||
headers=headers
|
||||
) as session:
|
||||
if not cls._system:
|
||||
async with session.get(cls.url, proxy=proxy) as response:
|
||||
response.raise_for_status()
|
||||
text = await response.text()
|
||||
result = re.search(r"data-system='(.*?)'", text)
|
||||
if result :
|
||||
cls._system = json.loads(html.unescape(result.group(1)))
|
||||
if not cls._system:
|
||||
raise RuntimeError("System args not found")
|
||||
|
||||
data = {
|
||||
"botId": cls._system["botId"],
|
||||
"customId": cls._system["customId"],
|
||||
"session": cls._system["sessionId"],
|
||||
"chatId": get_random_string(),
|
||||
"contextId": cls._system["contextId"],
|
||||
"messages": messages[:-1],
|
||||
"newMessage": messages[-1]["content"],
|
||||
"newFileId": None,
|
||||
"stream":True
|
||||
}
|
||||
async with session.post(
|
||||
"https://chatgate.ai/wp-json/mwai-ui/v1/chats/submit",
|
||||
proxy=proxy,
|
||||
json=data,
|
||||
headers={"X-Wp-Nonce": cls._system["restNonce"]}
|
||||
) as response:
|
||||
response.raise_for_status()
|
||||
async for line in response.content:
|
||||
if line.startswith(b"data: "):
|
||||
try:
|
||||
line = json.loads(line[6:])
|
||||
assert "type" in line
|
||||
except:
|
||||
raise RuntimeError(f"Broken line: {line.decode()}")
|
||||
if line["type"] == "error":
|
||||
if "https://chatgate.ai/login" in line["data"]:
|
||||
raise RateLimitError("Rate limit reached")
|
||||
raise RuntimeError(line["data"])
|
||||
if line["type"] == "live":
|
||||
yield line["data"]
|
||||
elif line["type"] == "end":
|
||||
break
|
@ -1,70 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import time, json, re, asyncio
|
||||
from aiohttp import ClientSession
|
||||
|
||||
from ...typing import AsyncResult, Messages
|
||||
from ...errors import RateLimitError
|
||||
from ..base_provider import AsyncGeneratorProvider
|
||||
from ..helper import format_prompt
|
||||
|
||||
class ChatgptDemo(AsyncGeneratorProvider):
|
||||
url = "https://chatgptdemo.info/chat"
|
||||
working = False
|
||||
supports_gpt_35_turbo = True
|
||||
|
||||
@classmethod
|
||||
async def create_async_generator(
|
||||
cls,
|
||||
model: str,
|
||||
messages: Messages,
|
||||
proxy: str = None,
|
||||
**kwargs
|
||||
) -> AsyncResult:
|
||||
headers = {
|
||||
"authority": "chatgptdemo.info",
|
||||
"accept-language": "en-US",
|
||||
"origin": "https://chatgptdemo.info",
|
||||
"referer": "https://chatgptdemo.info/chat/",
|
||||
"sec-ch-ua": '"Google Chrome";v="117", "Not;A=Brand";v="8", "Chromium";v="117"',
|
||||
"sec-ch-ua-mobile": "?0",
|
||||
"sec-ch-ua-platform": '"Linux"',
|
||||
"sec-fetch-dest": "empty",
|
||||
"sec-fetch-mode": "cors",
|
||||
"sec-fetch-site": "same-origin",
|
||||
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36"
|
||||
}
|
||||
async with ClientSession(headers=headers) as session:
|
||||
async with session.get(f"{cls.url}/", proxy=proxy) as response:
|
||||
response.raise_for_status()
|
||||
text = await response.text()
|
||||
result = re.search(
|
||||
r'<div id="USERID" style="display: none">(.*?)<\/div>',
|
||||
text,
|
||||
)
|
||||
if result:
|
||||
user_id = result.group(1)
|
||||
else:
|
||||
raise RuntimeError("No user id found")
|
||||
async with session.post(f"https://chatgptdemo.info/chat/new_chat", json={"user_id": user_id}, proxy=proxy) as response:
|
||||
response.raise_for_status()
|
||||
chat_id = (await response.json())["id_"]
|
||||
if not chat_id:
|
||||
raise RuntimeError("Could not create new chat")
|
||||
await asyncio.sleep(10)
|
||||
data = {
|
||||
"question": format_prompt(messages),
|
||||
"chat_id": chat_id,
|
||||
"timestamp": int((time.time())*1e3),
|
||||
}
|
||||
async with session.post(f"https://chatgptdemo.info/chat/chat_api_stream", json=data, proxy=proxy) as response:
|
||||
if response.status == 429:
|
||||
raise RateLimitError("Rate limit reached")
|
||||
response.raise_for_status()
|
||||
async for line in response.content:
|
||||
if line.startswith(b"data: "):
|
||||
line = json.loads(line[6:-1])
|
||||
|
||||
chunk = line["choices"][0]["delta"].get("content")
|
||||
if chunk:
|
||||
yield chunk
|
@ -1,56 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
from aiohttp import ClientSession
|
||||
|
||||
from ...typing import AsyncResult, Messages
|
||||
from ..base_provider import AsyncGeneratorProvider
|
||||
from ..helper import get_random_string
|
||||
|
||||
class ChatgptDemoAi(AsyncGeneratorProvider):
|
||||
url = "https://chat.chatgptdemo.ai"
|
||||
working = False
|
||||
supports_gpt_35_turbo = True
|
||||
supports_message_history = True
|
||||
|
||||
@classmethod
|
||||
async def create_async_generator(
|
||||
cls,
|
||||
model: str,
|
||||
messages: Messages,
|
||||
proxy: str = None,
|
||||
**kwargs
|
||||
) -> AsyncResult:
|
||||
headers = {
|
||||
"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/119.0",
|
||||
"Accept": "*/*",
|
||||
"Accept-Language": "de,en-US;q=0.7,en;q=0.3",
|
||||
"Accept-Encoding": "gzip, deflate, br",
|
||||
"Referer": f"{cls.url}/",
|
||||
"Content-Type": "application/json",
|
||||
"Origin": cls.url,
|
||||
"Connection": "keep-alive",
|
||||
"Sec-Fetch-Dest": "empty",
|
||||
"Sec-Fetch-Mode": "cors",
|
||||
"Sec-Fetch-Site": "same-origin",
|
||||
"TE": "trailers"
|
||||
}
|
||||
async with ClientSession(headers=headers) as session:
|
||||
data = {
|
||||
"botId": "default",
|
||||
"customId": "8824fe9bdb323a5d585a3223aaa0cb6e",
|
||||
"session": "N/A",
|
||||
"chatId": get_random_string(12),
|
||||
"contextId": 2,
|
||||
"messages": messages,
|
||||
"newMessage": messages[-1]["content"],
|
||||
"stream": True
|
||||
}
|
||||
async with session.post(f"{cls.url}/wp-json/mwai-ui/v1/chats/submit", json=data, proxy=proxy) as response:
|
||||
response.raise_for_status()
|
||||
async for chunk in response.content:
|
||||
response.raise_for_status()
|
||||
if chunk.startswith(b"data: "):
|
||||
data = json.loads(chunk[6:])
|
||||
if data["type"] == "live":
|
||||
yield data["data"]
|
@ -1,78 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import re
|
||||
import time
|
||||
import json
|
||||
from aiohttp import ClientSession
|
||||
|
||||
from ...typing import AsyncResult, Messages
|
||||
from ..base_provider import AsyncGeneratorProvider
|
||||
from ..helper import format_prompt
|
||||
|
||||
|
||||
class ChatgptLogin(AsyncGeneratorProvider):
|
||||
url = "https://chatgptlogin.ai"
|
||||
working = False
|
||||
supports_gpt_35_turbo = True
|
||||
_user_id = None
|
||||
|
||||
@classmethod
|
||||
async def create_async_generator(
|
||||
cls,
|
||||
model: str,
|
||||
messages: Messages,
|
||||
proxy: str = None,
|
||||
**kwargs
|
||||
) -> AsyncResult:
|
||||
headers = {
|
||||
"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/118.0",
|
||||
"Accept": "*/*",
|
||||
"Accept-Language": "de,en-US;q=0.7,en;q=0.3",
|
||||
"Accept-Encoding": "gzip, deflate, br",
|
||||
"Referer": f"{cls.url}/chat/",
|
||||
"Content-Type": "application/json",
|
||||
"Origin": cls.url,
|
||||
"Alt-Used": "chatgptlogin.ai",
|
||||
"Connection": "keep-alive",
|
||||
"Sec-Fetch-Dest": "empty",
|
||||
"Sec-Fetch-Mode": "cors",
|
||||
"Sec-Fetch-Site": "same-origin",
|
||||
"Pragma": "no-cache",
|
||||
"Cache-Control": "no-cache"
|
||||
}
|
||||
async with ClientSession(headers=headers) as session:
|
||||
if not cls._user_id:
|
||||
async with session.get(f"{cls.url}/chat/", proxy=proxy) as response:
|
||||
response.raise_for_status()
|
||||
response = await response.text()
|
||||
result = re.search(
|
||||
r'<div id="USERID" style="display: none">(.*?)<\/div>',
|
||||
response,
|
||||
)
|
||||
|
||||
if result:
|
||||
cls._user_id = result.group(1)
|
||||
else:
|
||||
raise RuntimeError("No user id found")
|
||||
async with session.post(f"{cls.url}/chat/new_chat", json={"user_id": cls._user_id}, proxy=proxy) as response:
|
||||
response.raise_for_status()
|
||||
chat_id = (await response.json())["id_"]
|
||||
if not chat_id:
|
||||
raise RuntimeError("Could not create new chat")
|
||||
prompt = format_prompt(messages)
|
||||
data = {
|
||||
"question": prompt,
|
||||
"chat_id": chat_id,
|
||||
"timestamp": int(time.time() * 1e3),
|
||||
}
|
||||
async with session.post(f"{cls.url}/chat/chat_api_stream", json=data, proxy=proxy) as response:
|
||||
response.raise_for_status()
|
||||
async for line in response.content:
|
||||
if line.startswith(b"data: "):
|
||||
|
||||
content = json.loads(line[6:])["choices"][0]["delta"].get("content")
|
||||
if content:
|
||||
yield content
|
||||
|
||||
async with session.post(f"{cls.url}/chat/delete_chat", json={"chat_id": chat_id}, proxy=proxy) as response:
|
||||
response.raise_for_status()
|
@ -1,66 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
from aiohttp import ClientSession
|
||||
|
||||
from ...typing import AsyncResult, Messages
|
||||
from ..base_provider import AsyncGeneratorProvider
|
||||
|
||||
class ChatgptNext(AsyncGeneratorProvider):
|
||||
url = "https://www.chatgpt-free.cc"
|
||||
working = False
|
||||
supports_gpt_35_turbo = True
|
||||
supports_message_history = True
|
||||
supports_system_message = True
|
||||
|
||||
@classmethod
|
||||
async def create_async_generator(
|
||||
cls,
|
||||
model: str,
|
||||
messages: Messages,
|
||||
proxy: str = None,
|
||||
max_tokens: int = None,
|
||||
temperature: float = 0.7,
|
||||
top_p: float = 1,
|
||||
presence_penalty: float = 0,
|
||||
frequency_penalty: float = 0,
|
||||
**kwargs
|
||||
) -> AsyncResult:
|
||||
if not model:
|
||||
model = "gpt-3.5-turbo"
|
||||
headers = {
|
||||
"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:122.0) Gecko/20100101 Firefox/122.0",
|
||||
"Accept": "text/event-stream",
|
||||
"Accept-Language": "de,en-US;q=0.7,en;q=0.3",
|
||||
"Accept-Encoding": "gzip, deflate, br",
|
||||
"Content-Type": "application/json",
|
||||
"Referer": "https://chat.fstha.com/",
|
||||
"x-requested-with": "XMLHttpRequest",
|
||||
"Origin": "https://chat.fstha.com",
|
||||
"Sec-Fetch-Dest": "empty",
|
||||
"Sec-Fetch-Mode": "cors",
|
||||
"Sec-Fetch-Site": "same-origin",
|
||||
"Authorization": "Bearer ak-chatgpt-nice",
|
||||
"Connection": "keep-alive",
|
||||
"Alt-Used": "chat.fstha.com",
|
||||
}
|
||||
async with ClientSession(headers=headers) as session:
|
||||
data = {
|
||||
"messages": messages,
|
||||
"stream": True,
|
||||
"model": model,
|
||||
"temperature": temperature,
|
||||
"presence_penalty": presence_penalty,
|
||||
"frequency_penalty": frequency_penalty,
|
||||
"top_p": top_p,
|
||||
"max_tokens": max_tokens,
|
||||
}
|
||||
async with session.post(f"https://chat.fstha.com/api/openai/v1/chat/completions", json=data, proxy=proxy) as response:
|
||||
response.raise_for_status()
|
||||
async for chunk in response.content:
|
||||
if chunk.startswith(b"data: [DONE]"):
|
||||
break
|
||||
if chunk.startswith(b"data: "):
|
||||
content = json.loads(chunk[6:])["choices"][0]["delta"].get("content")
|
||||
if content:
|
||||
yield content
|
@ -1,106 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import re
|
||||
import json
|
||||
|
||||
from aiohttp import ClientSession
|
||||
from ...typing import AsyncResult, Messages
|
||||
from ..base_provider import AsyncGeneratorProvider
|
||||
from ..helper import format_prompt
|
||||
from ...errors import RateLimitError
|
||||
|
||||
class ChatgptX(AsyncGeneratorProvider):
|
||||
url = "https://chatgptx.de"
|
||||
supports_gpt_35_turbo = True
|
||||
working = False
|
||||
|
||||
@classmethod
|
||||
async def create_async_generator(
|
||||
cls,
|
||||
model: str,
|
||||
messages: Messages,
|
||||
proxy: str = None,
|
||||
**kwargs
|
||||
) -> AsyncResult:
|
||||
headers = {
|
||||
'accept-language': 'de-DE,de;q=0.9,en-DE;q=0.8,en;q=0.7,en-US',
|
||||
'sec-ch-ua': '"Google Chrome";v="117", "Not;A=Brand";v="8", "Chromium";v="117"',
|
||||
'sec-ch-ua-mobile': '?0',
|
||||
'sec-ch-ua-platform': 'Linux',
|
||||
'sec-fetch-dest': 'empty',
|
||||
'sec-fetch-mode': 'cors',
|
||||
'sec-fetch-site': 'same-origin',
|
||||
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36',
|
||||
}
|
||||
async with ClientSession(headers=headers) as session:
|
||||
async with session.get(f"{cls.url}/", proxy=proxy) as response:
|
||||
response = await response.text()
|
||||
|
||||
result = re.search(
|
||||
r'<meta name="csrf-token" content="(.*?)"', response
|
||||
)
|
||||
if result:
|
||||
csrf_token = result.group(1)
|
||||
|
||||
result = re.search(r"openconversions\('(.*?)'\)", response)
|
||||
if result:
|
||||
chat_id = result.group(1)
|
||||
|
||||
result = re.search(
|
||||
r'<input type="hidden" id="user_id" value="(.*?)"', response
|
||||
)
|
||||
if result:
|
||||
user_id = result.group(1)
|
||||
|
||||
if not csrf_token or not chat_id or not user_id:
|
||||
raise RuntimeError("Missing csrf_token, chat_id or user_id")
|
||||
|
||||
data = {
|
||||
'_token': csrf_token,
|
||||
'user_id': user_id,
|
||||
'chats_id': chat_id,
|
||||
'prompt': format_prompt(messages),
|
||||
'current_model': "gpt3"
|
||||
}
|
||||
headers = {
|
||||
'authority': 'chatgptx.de',
|
||||
'accept': 'application/json, text/javascript, */*; q=0.01',
|
||||
'origin': cls.url,
|
||||
'referer': f'{cls.url}/',
|
||||
'x-csrf-token': csrf_token,
|
||||
'x-requested-with': 'XMLHttpRequest'
|
||||
}
|
||||
async with session.post(f'{cls.url}/sendchat', data=data, headers=headers, proxy=proxy) as response:
|
||||
response.raise_for_status()
|
||||
chat = await response.json()
|
||||
if "messages" in chat and "Anfragelimit" in chat["messages"]:
|
||||
raise RateLimitError("Rate limit reached")
|
||||
if "response" not in chat or not chat["response"]:
|
||||
raise RuntimeError(f'Response: {chat}')
|
||||
headers = {
|
||||
'authority': 'chatgptx.de',
|
||||
'accept': 'text/event-stream',
|
||||
'referer': f'{cls.url}/',
|
||||
'x-csrf-token': csrf_token,
|
||||
'x-requested-with': 'XMLHttpRequest'
|
||||
}
|
||||
data = {
|
||||
"user_id": user_id,
|
||||
"chats_id": chat_id,
|
||||
"current_model": "gpt3",
|
||||
"conversions_id": chat["conversions_id"],
|
||||
"ass_conversions_id": chat["ass_conversions_id"],
|
||||
}
|
||||
async with session.get(f'{cls.url}/chats_stream', params=data, headers=headers, proxy=proxy) as response:
|
||||
response.raise_for_status()
|
||||
async for line in response.content:
|
||||
if line.startswith(b"data: "):
|
||||
row = line[6:-1]
|
||||
if row == b"[DONE]":
|
||||
break
|
||||
try:
|
||||
content = json.loads(row)["choices"][0]["delta"].get("content")
|
||||
except:
|
||||
raise RuntimeError(f"Broken line: {line.decode()}")
|
||||
if content:
|
||||
yield content
|
@ -1,60 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
from aiohttp import ClientSession
|
||||
|
||||
from ...typing import AsyncResult, Messages
|
||||
from ..base_provider import AsyncGeneratorProvider
|
||||
|
||||
class Chatxyz(AsyncGeneratorProvider):
|
||||
url = "https://chat.3211000.xyz"
|
||||
working = False
|
||||
supports_gpt_35_turbo = True
|
||||
supports_message_history = True
|
||||
|
||||
@classmethod
|
||||
async def create_async_generator(
|
||||
cls,
|
||||
model: str,
|
||||
messages: Messages,
|
||||
proxy: str = None,
|
||||
**kwargs
|
||||
) -> AsyncResult:
|
||||
headers = {
|
||||
'Accept': 'text/event-stream',
|
||||
'Accept-Encoding': 'gzip, deflate, br',
|
||||
'Accept-Language': 'en-US,en;q=0.5',
|
||||
'Alt-Used': 'chat.3211000.xyz',
|
||||
'Content-Type': 'application/json',
|
||||
'Host': 'chat.3211000.xyz',
|
||||
'Origin': 'https://chat.3211000.xyz',
|
||||
'Referer': 'https://chat.3211000.xyz/',
|
||||
'Sec-Fetch-Dest': 'empty',
|
||||
'Sec-Fetch-Mode': 'cors',
|
||||
'Sec-Fetch-Site': 'same-origin',
|
||||
'TE': 'trailers',
|
||||
'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:121.0) Gecko/20100101 Firefox/121.0',
|
||||
'x-requested-with': 'XMLHttpRequest'
|
||||
}
|
||||
async with ClientSession(headers=headers) as session:
|
||||
data = {
|
||||
"messages": messages,
|
||||
"stream": True,
|
||||
"model": "gpt-3.5-turbo",
|
||||
"temperature": 0.5,
|
||||
"presence_penalty": 0,
|
||||
"frequency_penalty": 0,
|
||||
"top_p": 1,
|
||||
**kwargs
|
||||
}
|
||||
async with session.post(f'{cls.url}/api/openai/v1/chat/completions', json=data, proxy=proxy) as response:
|
||||
response.raise_for_status()
|
||||
async for chunk in response.content:
|
||||
line = chunk.decode()
|
||||
if line.startswith("data: [DONE]"):
|
||||
break
|
||||
elif line.startswith("data: "):
|
||||
line = json.loads(line[6:])
|
||||
chunk = line["choices"][0]["delta"].get("content")
|
||||
if(chunk):
|
||||
yield chunk
|
@ -1,58 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
from aiohttp import ClientSession
|
||||
|
||||
from ...typing import AsyncResult, Messages
|
||||
from ..base_provider import AsyncGeneratorProvider
|
||||
from ..helper import format_prompt
|
||||
|
||||
|
||||
class Cnote(AsyncGeneratorProvider):
|
||||
url = "https://f1.cnote.top"
|
||||
api_url = "https://p1api.xjai.pro/freeapi/chat-process"
|
||||
working = False
|
||||
supports_gpt_35_turbo = True
|
||||
|
||||
@classmethod
|
||||
async def create_async_generator(
|
||||
cls,
|
||||
model: str,
|
||||
messages: Messages,
|
||||
proxy: str = None,
|
||||
**kwargs
|
||||
) -> AsyncResult:
|
||||
headers = {
|
||||
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36",
|
||||
"Accept": "application/json, text/plain, */*",
|
||||
"Accept-Language": "en-US,en;q=0.5",
|
||||
"Accept-Encoding": "gzip, deflate, br",
|
||||
"Content-Type": "application/json",
|
||||
"Origin": cls.url,
|
||||
"DNT": "1",
|
||||
"Sec-GPC": "1",
|
||||
"Connection": "keep-alive",
|
||||
"Sec-Fetch-Dest": "empty",
|
||||
"Sec-Fetch-Mode": "cors",
|
||||
"Sec-Fetch-Site": "cross-site",
|
||||
"TE": "trailers",
|
||||
}
|
||||
async with ClientSession(headers=headers) as session:
|
||||
prompt = format_prompt(messages)
|
||||
system_message: str = "",
|
||||
data = {
|
||||
"prompt": prompt,
|
||||
"systemMessage": system_message,
|
||||
"temperature": 0.8,
|
||||
"top_p": 1,
|
||||
}
|
||||
async with session.post(cls.api_url, json=data, proxy=proxy) as response:
|
||||
response.raise_for_status()
|
||||
async for chunk in response.content:
|
||||
if chunk:
|
||||
try:
|
||||
data = json.loads(chunk.decode().split("&KFw6loC9Qvy&")[-1])
|
||||
text = data.get("text", "")
|
||||
yield text
|
||||
except (json.JSONDecodeError, IndexError):
|
||||
pass
|
@ -1,78 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import asyncio
|
||||
from aiohttp import ClientSession, TCPConnector
|
||||
from urllib.parse import urlencode
|
||||
|
||||
from ...typing import AsyncResult, Messages
|
||||
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||
from ..helper import format_prompt
|
||||
|
||||
|
||||
class Feedough(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
url = "https://www.feedough.com"
|
||||
api_endpoint = "/wp-admin/admin-ajax.php"
|
||||
working = False
|
||||
default_model = ''
|
||||
|
||||
@classmethod
|
||||
async def create_async_generator(
|
||||
cls,
|
||||
model: str,
|
||||
messages: Messages,
|
||||
proxy: str = None,
|
||||
**kwargs
|
||||
) -> AsyncResult:
|
||||
headers = {
|
||||
"accept": "*/*",
|
||||
"accept-language": "en-US,en;q=0.9",
|
||||
"content-type": "application/x-www-form-urlencoded;charset=UTF-8",
|
||||
"dnt": "1",
|
||||
"origin": cls.url,
|
||||
"referer": f"{cls.url}/ai-prompt-generator/",
|
||||
"sec-ch-ua": '"Not/A)Brand";v="8", "Chromium";v="126"',
|
||||
"sec-ch-ua-mobile": "?0",
|
||||
"sec-ch-ua-platform": '"Linux"',
|
||||
"sec-fetch-dest": "empty",
|
||||
"sec-fetch-mode": "cors",
|
||||
"sec-fetch-site": "same-origin",
|
||||
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36"
|
||||
}
|
||||
|
||||
connector = TCPConnector(ssl=False)
|
||||
|
||||
async with ClientSession(headers=headers, connector=connector) as session:
|
||||
data = {
|
||||
"action": "aixg_generate",
|
||||
"prompt": format_prompt(messages),
|
||||
"aixg_generate_nonce": "110c021031"
|
||||
}
|
||||
|
||||
try:
|
||||
async with session.post(
|
||||
f"{cls.url}{cls.api_endpoint}",
|
||||
data=urlencode(data),
|
||||
proxy=proxy
|
||||
) as response:
|
||||
response.raise_for_status()
|
||||
response_text = await response.text()
|
||||
try:
|
||||
response_json = json.loads(response_text)
|
||||
if response_json.get("success") and "data" in response_json:
|
||||
message = response_json["data"].get("message", "")
|
||||
yield message
|
||||
except json.JSONDecodeError:
|
||||
yield response_text
|
||||
except Exception as e:
|
||||
print(f"An error occurred: {e}")
|
||||
|
||||
@classmethod
|
||||
async def run(cls, *args, **kwargs):
|
||||
async for item in cls.create_async_generator(*args, **kwargs):
|
||||
yield item
|
||||
|
||||
tasks = asyncio.all_tasks()
|
||||
for task in tasks:
|
||||
if not task.done():
|
||||
await task
|
@ -1,54 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
from aiohttp import ClientSession
|
||||
|
||||
from ...typing import AsyncResult, Messages
|
||||
from ..base_provider import AsyncGeneratorProvider
|
||||
|
||||
class Gpt6(AsyncGeneratorProvider):
|
||||
url = "https://gpt6.ai"
|
||||
working = False
|
||||
supports_gpt_35_turbo = True
|
||||
|
||||
@classmethod
|
||||
async def create_async_generator(
|
||||
cls,
|
||||
model: str,
|
||||
messages: Messages,
|
||||
proxy: str = None,
|
||||
**kwargs
|
||||
) -> AsyncResult:
|
||||
headers = {
|
||||
"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/119.0",
|
||||
"Accept": "*/*",
|
||||
"Accept-Language": "de,en-US;q=0.7,en;q=0.3",
|
||||
"Accept-Encoding": "gzip, deflate, br",
|
||||
"Content-Type": "application/json",
|
||||
"Origin": "https://gpt6.ai",
|
||||
"Connection": "keep-alive",
|
||||
"Referer": "https://gpt6.ai/",
|
||||
"Sec-Fetch-Dest": "empty",
|
||||
"Sec-Fetch-Mode": "cors",
|
||||
"Sec-Fetch-Site": "cross-site",
|
||||
"TE": "trailers",
|
||||
}
|
||||
async with ClientSession(headers=headers) as session:
|
||||
data = {
|
||||
"prompts":messages,
|
||||
"geoInfo":{"ip":"100.90.100.222","hostname":"ip-100-090-100-222.um36.pools.vodafone-ip.de","city":"Muenchen","region":"North Rhine-Westphalia","country":"DE","loc":"44.0910,5.5827","org":"AS3209 Vodafone GmbH","postal":"41507","timezone":"Europe/Berlin"},
|
||||
"paid":False,
|
||||
"character":{"textContent":"","id":"52690ad6-22e4-4674-93d4-1784721e9944","name":"GPT6","htmlContent":""}
|
||||
}
|
||||
async with session.post(f"https://seahorse-app-d29hu.ondigitalocean.app/api/v1/query", json=data, proxy=proxy) as response:
|
||||
response.raise_for_status()
|
||||
async for line in response.content:
|
||||
print(line)
|
||||
if line.startswith(b"data: [DONE]"):
|
||||
break
|
||||
elif line.startswith(b"data: "):
|
||||
line = json.loads(line[6:-1])
|
||||
|
||||
chunk = line["choices"][0]["delta"].get("content")
|
||||
if chunk:
|
||||
yield chunk
|
@ -1,35 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from ...requests import Session, get_session_from_browser
|
||||
from ...typing import Messages
|
||||
from ..base_provider import AsyncProvider
|
||||
|
||||
|
||||
class GptChatly(AsyncProvider):
|
||||
url = "https://gptchatly.com"
|
||||
working = False
|
||||
supports_message_history = True
|
||||
supports_gpt_35_turbo = True
|
||||
|
||||
@classmethod
|
||||
async def create_async(
|
||||
cls,
|
||||
model: str,
|
||||
messages: Messages,
|
||||
proxy: str = None,
|
||||
timeout: int = 120,
|
||||
session: Session = None,
|
||||
**kwargs
|
||||
) -> str:
|
||||
if not session:
|
||||
session = get_session_from_browser(cls.url, proxy=proxy, timeout=timeout)
|
||||
if model.startswith("gpt-4"):
|
||||
chat_url = f"{cls.url}/fetch-gpt4-response"
|
||||
else:
|
||||
chat_url = f"{cls.url}/felch-response"
|
||||
data = {
|
||||
"past_conversations": messages
|
||||
}
|
||||
response = session.post(chat_url, json=data)
|
||||
response.raise_for_status()
|
||||
return response.json()["chatGPTResponse"]
|
@ -1,91 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from aiohttp import ClientSession
|
||||
import os
|
||||
import json
|
||||
try:
|
||||
import execjs
|
||||
has_requirements = True
|
||||
except ImportError:
|
||||
has_requirements = False
|
||||
|
||||
from ...typing import AsyncResult, Messages
|
||||
from ..base_provider import AsyncGeneratorProvider
|
||||
from ..helper import format_prompt
|
||||
from ...errors import MissingRequirementsError
|
||||
|
||||
class GptForLove(AsyncGeneratorProvider):
|
||||
url = "https://ai18.gptforlove.com"
|
||||
working = False
|
||||
supports_gpt_35_turbo = True
|
||||
|
||||
@classmethod
|
||||
async def create_async_generator(
|
||||
cls,
|
||||
model: str,
|
||||
messages: Messages,
|
||||
proxy: str = None,
|
||||
**kwargs
|
||||
) -> AsyncResult:
|
||||
if not has_requirements:
|
||||
raise MissingRequirementsError('Install "PyExecJS" package')
|
||||
if not model:
|
||||
model = "gpt-3.5-turbo"
|
||||
headers = {
|
||||
"authority": "api.gptplus.one",
|
||||
"accept": "application/json, text/plain, */*",
|
||||
"accept-language": "de-DE,de;q=0.9,en-DE;q=0.8,en;q=0.7,en-US;q=0.6,nl;q=0.5,zh-CN;q=0.4,zh-TW;q=0.3,zh;q=0.2",
|
||||
"content-type": "application/json",
|
||||
"origin": cls.url,
|
||||
"referer": f"{cls.url}/",
|
||||
"sec-ch-ua": "\"Google Chrome\";v=\"117\", \"Not;A=Brand\";v=\"8\", \"Chromium\";v=\"117\"",
|
||||
"sec-ch-ua-mobile": "?0",
|
||||
"sec-ch-ua-platform": "Linux",
|
||||
"sec-fetch-dest": "empty",
|
||||
"sec-fetch-mode": "cors",
|
||||
"sec-fetch-site": "cross-site",
|
||||
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36"
|
||||
}
|
||||
async with ClientSession(headers=headers) as session:
|
||||
prompt = format_prompt(messages)
|
||||
data = {
|
||||
"prompt": prompt,
|
||||
"options": {},
|
||||
"systemMessage": kwargs.get("system_message", "You are ChatGPT, the version is GPT3.5, a large language model trained by OpenAI. Follow the user's instructions carefully."),
|
||||
"temperature": kwargs.get("temperature", 0.8),
|
||||
"top_p": kwargs.get("top_p", 1),
|
||||
"secret": get_secret(),
|
||||
}
|
||||
async with session.post("https://api.gptplus.one/chat-process", json=data, proxy=proxy) as response:
|
||||
response.raise_for_status()
|
||||
async for line in response.content:
|
||||
try:
|
||||
line = json.loads(line)
|
||||
except:
|
||||
raise RuntimeError(f"Broken line: {line}")
|
||||
if "detail" in line:
|
||||
content = line["detail"]["choices"][0]["delta"].get("content")
|
||||
if content:
|
||||
yield content
|
||||
elif "10分钟内提问超过了5次" in line:
|
||||
raise RuntimeError("Rate limit reached")
|
||||
else:
|
||||
raise RuntimeError(f"Response: {line}")
|
||||
|
||||
|
||||
def get_secret() -> str:
|
||||
dir = os.path.dirname(__file__)
|
||||
include = f'{dir}/npm/node_modules/crypto-js/crypto-js'
|
||||
source = """
|
||||
CryptoJS = require({include})
|
||||
var k = 'fjfsdwiuhfwf'
|
||||
, e = Math.floor(new Date().getTime() / 1e3);
|
||||
var t = CryptoJS.enc.Utf8.parse(e)
|
||||
, o = CryptoJS.AES.encrypt(t, k, {
|
||||
mode: CryptoJS.mode.ECB,
|
||||
padding: CryptoJS.pad.Pkcs7
|
||||
});
|
||||
return o.toString()
|
||||
"""
|
||||
source = source.replace('{include}', json.dumps(include))
|
||||
return execjs.compile(source).call('')
|
@ -1,66 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from aiohttp import ClientSession
|
||||
import json
|
||||
import base64
|
||||
|
||||
from ...typing import AsyncResult, Messages
|
||||
from ..base_provider import AsyncGeneratorProvider, format_prompt
|
||||
|
||||
|
||||
class GptGo(AsyncGeneratorProvider):
|
||||
url = "https://gptgo.ai"
|
||||
working = False
|
||||
supports_gpt_35_turbo = True
|
||||
|
||||
@classmethod
|
||||
async def create_async_generator(
|
||||
cls,
|
||||
model: str,
|
||||
messages: Messages,
|
||||
proxy: str = None,
|
||||
**kwargs
|
||||
) -> AsyncResult:
|
||||
headers = {
|
||||
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36",
|
||||
"Accept": "*/*",
|
||||
"Accept-language": "en-US",
|
||||
"Origin": cls.url,
|
||||
"Referer": f"{cls.url}/",
|
||||
"sec-ch-ua": '"Google Chrome";v="116", "Chromium";v="116", "Not?A_Brand";v="24"',
|
||||
"sec-ch-ua-mobile": "?0",
|
||||
"sec-ch-ua-platform": '"Windows"',
|
||||
"Sec-Fetch-Dest": "empty",
|
||||
"Sec-Fetch-Mode": "cors",
|
||||
"Sec-Fetch-Site": "same-origin",
|
||||
}
|
||||
async with ClientSession(
|
||||
headers=headers
|
||||
) as session:
|
||||
async with session.post(
|
||||
"https://gptgo.ai/get_token.php",
|
||||
data={"ask": format_prompt(messages)},
|
||||
proxy=proxy
|
||||
) as response:
|
||||
response.raise_for_status()
|
||||
token = await response.text();
|
||||
if token == "error token":
|
||||
raise RuntimeError(f"Response: {token}")
|
||||
token = base64.b64decode(token[10:-20]).decode()
|
||||
|
||||
async with session.get(
|
||||
"https://api.gptgo.ai/web.php",
|
||||
params={"array_chat": token},
|
||||
proxy=proxy
|
||||
) as response:
|
||||
response.raise_for_status()
|
||||
async for line in response.content:
|
||||
if line.startswith(b"data: [DONE]"):
|
||||
break
|
||||
if line.startswith(b"data: "):
|
||||
line = json.loads(line[6:])
|
||||
if "choices" not in line:
|
||||
raise RuntimeError(f"Response: {line}")
|
||||
content = line["choices"][0]["delta"].get("content")
|
||||
if content and content != "\n#GPTGO ":
|
||||
yield content
|
@ -1,61 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import secrets
|
||||
import json
|
||||
from aiohttp import ClientSession
|
||||
|
||||
from ...typing import AsyncResult, Messages
|
||||
from ..base_provider import AsyncGeneratorProvider
|
||||
from ..helper import format_prompt
|
||||
|
||||
class GptGod(AsyncGeneratorProvider):
|
||||
url = "https://gptgod.site"
|
||||
working = False
|
||||
supports_gpt_35_turbo = True
|
||||
|
||||
@classmethod
|
||||
async def create_async_generator(
|
||||
cls,
|
||||
model: str,
|
||||
messages: Messages,
|
||||
proxy: str = None,
|
||||
**kwargs
|
||||
) -> AsyncResult:
|
||||
|
||||
headers = {
|
||||
"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/118.0",
|
||||
"Accept": "text/event-stream",
|
||||
"Accept-Language": "de,en-US;q=0.7,en;q=0.3",
|
||||
"Accept-Encoding": "gzip, deflate, br",
|
||||
"Alt-Used": "gptgod.site",
|
||||
"Connection": "keep-alive",
|
||||
"Referer": f"{cls.url}/",
|
||||
"Sec-Fetch-Dest": "empty",
|
||||
"Sec-Fetch-Mode": "cors",
|
||||
"Sec-Fetch-Site": "same-origin",
|
||||
"Pragma": "no-cache",
|
||||
"Cache-Control": "no-cache",
|
||||
}
|
||||
|
||||
async with ClientSession(headers=headers) as session:
|
||||
prompt = format_prompt(messages)
|
||||
data = {
|
||||
"content": prompt,
|
||||
"id": secrets.token_hex(16).zfill(32)
|
||||
}
|
||||
async with session.get(f"{cls.url}/api/session/free/gpt3p5", params=data, proxy=proxy) as response:
|
||||
response.raise_for_status()
|
||||
event = None
|
||||
async for line in response.content:
|
||||
# print(line)
|
||||
|
||||
if line.startswith(b'event: '):
|
||||
event = line[7:-1]
|
||||
|
||||
elif event == b"data" and line.startswith(b"data: "):
|
||||
data = json.loads(line[6:-1])
|
||||
if data:
|
||||
yield data
|
||||
|
||||
elif event == b"done":
|
||||
break
|
@ -1,57 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
from aiohttp import ClientSession
|
||||
|
||||
from ...typing import AsyncResult, Messages
|
||||
from ..base_provider import AsyncGeneratorProvider
|
||||
from ..helper import get_random_string
|
||||
|
||||
class OnlineGpt(AsyncGeneratorProvider):
|
||||
url = "https://onlinegpt.org"
|
||||
working = False
|
||||
supports_gpt_35_turbo = True
|
||||
supports_message_history = False
|
||||
|
||||
@classmethod
|
||||
async def create_async_generator(
|
||||
cls,
|
||||
model: str,
|
||||
messages: Messages,
|
||||
proxy: str = None,
|
||||
**kwargs
|
||||
) -> AsyncResult:
|
||||
headers = {
|
||||
"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/119.0",
|
||||
"Accept": "text/event-stream",
|
||||
"Accept-Language": "de,en-US;q=0.7,en;q=0.3",
|
||||
"Accept-Encoding": "gzip, deflate, br",
|
||||
"Referer": f"{cls.url}/chat/",
|
||||
"Content-Type": "application/json",
|
||||
"Origin": cls.url,
|
||||
"Alt-Used": "onlinegpt.org",
|
||||
"Connection": "keep-alive",
|
||||
"Sec-Fetch-Dest": "empty",
|
||||
"Sec-Fetch-Mode": "cors",
|
||||
"Sec-Fetch-Site": "same-origin",
|
||||
"TE": "trailers"
|
||||
}
|
||||
async with ClientSession(headers=headers) as session:
|
||||
data = {
|
||||
"botId": "default",
|
||||
"customId": None,
|
||||
"session": get_random_string(12),
|
||||
"chatId": get_random_string(),
|
||||
"contextId": 9,
|
||||
"messages": messages,
|
||||
"newMessage": messages[-1]["content"],
|
||||
"newImageId": None,
|
||||
"stream": True
|
||||
}
|
||||
async with session.post(f"{cls.url}/chatgpt/wp-json/mwai-ui/v1/chats/submit", json=data, proxy=proxy) as response:
|
||||
response.raise_for_status()
|
||||
async for chunk in response.content:
|
||||
if chunk.startswith(b"data: "):
|
||||
data = json.loads(chunk[6:])
|
||||
if data["type"] == "live":
|
||||
yield data["data"]
|
@ -1,21 +0,0 @@
|
||||
|
||||
from .AItianhu import AItianhu
|
||||
from .Aichatos import Aichatos
|
||||
from .Bestim import Bestim
|
||||
from .ChatBase import ChatBase
|
||||
from .ChatForAi import ChatForAi
|
||||
from .ChatgptAi import ChatgptAi
|
||||
from .ChatgptDemo import ChatgptDemo
|
||||
from .ChatgptDemoAi import ChatgptDemoAi
|
||||
from .ChatgptLogin import ChatgptLogin
|
||||
from .ChatgptNext import ChatgptNext
|
||||
from .ChatgptX import ChatgptX
|
||||
from .Chatxyz import Chatxyz
|
||||
from .Cnote import Cnote
|
||||
from .Feedough import Feedough
|
||||
from .Gpt6 import Gpt6
|
||||
from .GptChatly import GptChatly
|
||||
from .GptForLove import GptForLove
|
||||
from .GptGo import GptGo
|
||||
from .GptGod import GptGod
|
||||
from .OnlineGpt import OnlineGpt
|
Loading…
Reference in New Issue