mirror of https://github.com/xtekky/gpt4free
~ | Merge pull request #985 from hlohaus/bom
Add AiAsk, Chatgpt4Online, ChatgptDemo big updatepull/991/head
commit
a59dc2fb36
@ -0,0 +1,43 @@
|
||||
from aiohttp import ClientSession
|
||||
from ..typing import AsyncGenerator
|
||||
from .base_provider import AsyncGeneratorProvider
|
||||
|
||||
class AiAsk(AsyncGeneratorProvider):
|
||||
url = "https://e.aiask.me"
|
||||
supports_gpt_35_turbo = True
|
||||
working = True
|
||||
|
||||
@classmethod
|
||||
async def create_async_generator(
|
||||
cls,
|
||||
model: str,
|
||||
messages: list[dict[str, str]],
|
||||
timeout: int = 30,
|
||||
**kwargs
|
||||
) -> AsyncGenerator:
|
||||
headers = {
|
||||
"accept": "application/json, text/plain, */*",
|
||||
"origin": cls.url,
|
||||
"referer": f"{cls.url}/chat",
|
||||
}
|
||||
async with ClientSession(headers=headers, timeout=timeout) as session:
|
||||
data = {
|
||||
"continuous": True,
|
||||
"id": "fRMSQtuHl91A4De9cCvKD",
|
||||
"list": messages,
|
||||
"models": "0",
|
||||
"prompt": "",
|
||||
"temperature": kwargs.get("temperature", 0.5),
|
||||
"title": "",
|
||||
}
|
||||
buffer = ""
|
||||
rate_limit = "您的免费额度不够使用这个模型啦,请点击右上角登录继续使用!"
|
||||
async with session.post(f"{cls.url}/v1/chat/gpt/", json=data) as response:
|
||||
response.raise_for_status()
|
||||
async for chunk in response.content.iter_any():
|
||||
buffer += chunk.decode()
|
||||
if not rate_limit.startswith(buffer):
|
||||
yield buffer
|
||||
buffer = ""
|
||||
elif buffer == rate_limit:
|
||||
raise RuntimeError("Rate limit reached")
|
@ -0,0 +1,39 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
from aiohttp import ClientSession
|
||||
|
||||
from ..typing import AsyncGenerator
|
||||
from .base_provider import AsyncGeneratorProvider
|
||||
|
||||
|
||||
class Chatgpt4Online(AsyncGeneratorProvider):
|
||||
url = "https://chatgpt4online.org"
|
||||
supports_gpt_35_turbo = True
|
||||
working = True
|
||||
|
||||
@classmethod
|
||||
async def create_async_generator(
|
||||
cls,
|
||||
model: str,
|
||||
messages: list[dict[str, str]],
|
||||
**kwargs
|
||||
) -> AsyncGenerator:
|
||||
async with ClientSession() as session:
|
||||
data = {
|
||||
"botId": "default",
|
||||
"customId": None,
|
||||
"session": "N/A",
|
||||
"chatId": "",
|
||||
"contextId": 58,
|
||||
"messages": messages,
|
||||
"newMessage": messages[-1]["content"],
|
||||
"stream": True
|
||||
}
|
||||
async with session.post(cls.url + "/wp-json/mwai-ui/v1/chats/submit", json=data) as response:
|
||||
response.raise_for_status()
|
||||
async for line in response.content:
|
||||
if line.startswith(b"data: "):
|
||||
line = json.loads(line[6:])
|
||||
if line["type"] == "live":
|
||||
yield line["data"]
|
@ -0,0 +1,62 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import time, json, re
|
||||
from aiohttp import ClientSession
|
||||
from typing import AsyncGenerator
|
||||
|
||||
from .base_provider import AsyncGeneratorProvider
|
||||
from .helper import format_prompt
|
||||
|
||||
class ChatgptDemo(AsyncGeneratorProvider):
|
||||
url = "https://chat.chatgptdemo.net"
|
||||
supports_gpt_35_turbo = True
|
||||
working = True
|
||||
|
||||
@classmethod
|
||||
async def create_async_generator(
|
||||
cls,
|
||||
model: str,
|
||||
messages: list[dict[str, str]],
|
||||
proxy: str = None,
|
||||
timeout: int = 30,
|
||||
**kwargs
|
||||
) -> AsyncGenerator:
|
||||
headers = {
|
||||
"authority": "chat.chatgptdemo.net",
|
||||
"accept-language": "de-DE,de;q=0.9,en-DE;q=0.8,en;q=0.7,en-US",
|
||||
"origin": "https://chat.chatgptdemo.net",
|
||||
"referer": "https://chat.chatgptdemo.net/",
|
||||
"sec-ch-ua": '"Google Chrome";v="117", "Not;A=Brand";v="8", "Chromium";v="117"',
|
||||
"sec-ch-ua-mobile": "?0",
|
||||
"sec-ch-ua-platform": '"Linux"',
|
||||
"sec-fetch-dest": "empty",
|
||||
"sec-fetch-mode": "cors",
|
||||
"sec-fetch-site": "same-origin",
|
||||
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36"
|
||||
}
|
||||
async with ClientSession(headers=headers, timeout=timeout) as session:
|
||||
async with session.get(f"{cls.url}/", proxy=proxy) as response:
|
||||
response.raise_for_status()
|
||||
response = await response.text()
|
||||
result = re.search(r'<div id="USERID" style="display: none">(.*?)<\/div>', response)
|
||||
if not result:
|
||||
raise RuntimeError("No user id found")
|
||||
user_id = result.group(1)
|
||||
async with session.post(f"{cls.url}/new_chat", json={"user_id": user_id}, proxy=proxy) as response:
|
||||
response.raise_for_status()
|
||||
chat_id = (await response.json())["id_"]
|
||||
if not chat_id:
|
||||
raise RuntimeError("Could not create new chat")
|
||||
data = {
|
||||
"question": format_prompt(messages),
|
||||
"chat_id": chat_id,
|
||||
"timestamp": int(time.time()*1000),
|
||||
}
|
||||
async with session.post(f"{cls.url}/chat_api_stream", json=data, proxy=proxy) as response:
|
||||
response.raise_for_status()
|
||||
async for line in response.content:
|
||||
if line.startswith(b"data: "):
|
||||
line = json.loads(line[6:-1])
|
||||
chunk = line["choices"][0]["delta"].get("content")
|
||||
if chunk:
|
||||
yield chunk
|
@ -0,0 +1,66 @@
|
||||
import re
|
||||
from aiohttp import ClientSession
|
||||
|
||||
from .base_provider import AsyncProvider
|
||||
from .helper import format_prompt
|
||||
|
||||
class ChatgptX(AsyncProvider):
|
||||
url = "https://chatgptx.de"
|
||||
supports_gpt_35_turbo = True
|
||||
working = True
|
||||
|
||||
@classmethod
|
||||
async def create_async(
|
||||
cls,
|
||||
model: str,
|
||||
messages: list[dict[str, str]],
|
||||
**kwargs
|
||||
) -> str:
|
||||
headers = {
|
||||
'accept-language': 'de-DE,de;q=0.9,en-DE;q=0.8,en;q=0.7,en-US',
|
||||
'sec-ch-ua': '"Google Chrome";v="117", "Not;A=Brand";v="8", "Chromium";v="117"',
|
||||
'sec-ch-ua-mobile': '?0',
|
||||
'sec-ch-ua-platform': 'Linux',
|
||||
'sec-fetch-dest': 'empty',
|
||||
'sec-fetch-mode': 'cors',
|
||||
'sec-fetch-site': 'same-origin',
|
||||
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36',
|
||||
}
|
||||
async with ClientSession(headers=headers) as session:
|
||||
async with session.get(f"{cls.url}/") as response:
|
||||
response = await response.text()
|
||||
result = re.search(r'<meta name="csrf-token" content="(.*?)"', response)
|
||||
if result:
|
||||
csrf_token = result.group(1)
|
||||
result = re.search(r"openconversions\('(.*?)'\)", response)
|
||||
if result:
|
||||
chat_id = result.group(1)
|
||||
result = re.search(r'<input type="hidden" id="user_id" value="(.*?)"', response)
|
||||
if result:
|
||||
user_id = result.group(1)
|
||||
|
||||
if not csrf_token or not chat_id or not user_id:
|
||||
raise RuntimeError("Missing csrf_token, chat_id or user_id")
|
||||
|
||||
data = {
|
||||
'_token': csrf_token,
|
||||
'user_id': user_id,
|
||||
'chats_id': chat_id,
|
||||
'prompt': format_prompt(messages),
|
||||
'current_model': "gpt3"
|
||||
}
|
||||
headers = {
|
||||
'authority': 'chatgptx.de',
|
||||
'accept': 'application/json, text/javascript, */*; q=0.01',
|
||||
'origin': cls.url,
|
||||
'referer': f'{cls.url}/',
|
||||
'x-csrf-token': csrf_token,
|
||||
'x-requested-with': 'XMLHttpRequest'
|
||||
}
|
||||
async with session.post(cls.url + '/sendchat', data=data, headers=headers) as response:
|
||||
response.raise_for_status()
|
||||
data = await response.json()
|
||||
if "message" in data:
|
||||
return data["message"]
|
||||
elif "messages" in data:
|
||||
raise RuntimeError(f'Response: {data["messages"]}')
|
@ -0,0 +1,44 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
|
||||
from ...requests import StreamSession
|
||||
from ...typing import AsyncGenerator
|
||||
from ..base_provider import AsyncGeneratorProvider, format_prompt
|
||||
|
||||
class Komo(AsyncGeneratorProvider):
|
||||
url = "https://komo.ai/api/ask"
|
||||
supports_gpt_35_turbo = True
|
||||
|
||||
@classmethod
|
||||
async def create_async_generator(
|
||||
cls,
|
||||
model: str,
|
||||
messages: list[dict[str, str]],
|
||||
**kwargs
|
||||
) -> AsyncGenerator:
|
||||
async with StreamSession(impersonate="chrome107") as session:
|
||||
prompt = format_prompt(messages)
|
||||
data = {
|
||||
"query": prompt,
|
||||
"FLAG_URLEXTRACT": "false",
|
||||
"token": "",
|
||||
"FLAG_MODELA": "1",
|
||||
}
|
||||
headers = {
|
||||
'authority': 'komo.ai',
|
||||
'accept': 'text/event-stream',
|
||||
'cache-control': 'no-cache',
|
||||
'referer': 'https://komo.ai/',
|
||||
}
|
||||
|
||||
async with session.get(cls.url, params=data, headers=headers) as response:
|
||||
response.raise_for_status()
|
||||
next = False
|
||||
async for line in response.iter_lines():
|
||||
if line == b"event: line":
|
||||
next = True
|
||||
elif next and line.startswith(b"data: "):
|
||||
yield json.loads(line[6:])
|
||||
next = False
|
||||
|
@ -0,0 +1,97 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import random, json
|
||||
from datetime import datetime
|
||||
from ...requests import StreamSession
|
||||
|
||||
from ...typing import AsyncGenerator
|
||||
from ..base_provider import AsyncGeneratorProvider
|
||||
|
||||
|
||||
class MikuChat(AsyncGeneratorProvider):
|
||||
url = "https://ai.okmiku.com"
|
||||
supports_gpt_35_turbo = True
|
||||
|
||||
@classmethod
|
||||
async def create_async_generator(
|
||||
cls,
|
||||
model: str,
|
||||
messages: list[dict[str, str]],
|
||||
**kwargs
|
||||
) -> AsyncGenerator:
|
||||
if not model:
|
||||
model = "gpt-3.5-turbo"
|
||||
headers = {
|
||||
"authority": "api.catgpt.cc",
|
||||
"accept": "application/json",
|
||||
"origin": cls.url,
|
||||
"referer": f"{cls.url}/chat/",
|
||||
'x-app-version': 'undefined',
|
||||
'x-date': get_datetime(),
|
||||
'x-fingerprint': get_fingerprint(),
|
||||
'x-platform': 'web'
|
||||
}
|
||||
async with StreamSession(headers=headers, impersonate="chrome107") as session:
|
||||
data = {
|
||||
"model": model,
|
||||
"top_p": 0.8,
|
||||
"temperature": 0.5,
|
||||
"presence_penalty": 1,
|
||||
"frequency_penalty": 0,
|
||||
"max_tokens": 2000,
|
||||
"stream": True,
|
||||
"messages": messages,
|
||||
}
|
||||
async with session.post("https://api.catgpt.cc/ai/v1/chat/completions", json=data) as response:
|
||||
print(await response.text())
|
||||
response.raise_for_status()
|
||||
async for line in response.iter_lines():
|
||||
if line.startswith(b"data: "):
|
||||
line = json.loads(line[6:])
|
||||
chunk = line["choices"][0]["delta"].get("content")
|
||||
if chunk:
|
||||
yield chunk
|
||||
|
||||
def k(e: str, t: int):
|
||||
a = len(e) & 3
|
||||
s = len(e) - a
|
||||
i = t
|
||||
c = 3432918353
|
||||
o = 461845907
|
||||
n = 0
|
||||
r = 0
|
||||
while n < s:
|
||||
r = (ord(e[n]) & 255) | ((ord(e[n + 1]) & 255) << 8) | ((ord(e[n + 2]) & 255) << 16) | ((ord(e[n + 3]) & 255) << 24)
|
||||
n += 4
|
||||
r = (r & 65535) * c + (((r >> 16) * c & 65535) << 16) & 4294967295
|
||||
r = (r << 15) | (r >> 17)
|
||||
r = (r & 65535) * o + (((r >> 16) * o & 65535) << 16) & 4294967295
|
||||
i ^= r
|
||||
i = (i << 13) | (i >> 19)
|
||||
l = (i & 65535) * 5 + (((i >> 16) * 5 & 65535) << 16) & 4294967295
|
||||
i = (l & 65535) + 27492 + (((l >> 16) + 58964 & 65535) << 16)
|
||||
|
||||
if a == 3:
|
||||
r ^= (ord(e[n + 2]) & 255) << 16
|
||||
elif a == 2:
|
||||
r ^= (ord(e[n + 1]) & 255) << 8
|
||||
elif a == 1:
|
||||
r ^= ord(e[n]) & 255
|
||||
r = (r & 65535) * c + (((r >> 16) * c & 65535) << 16) & 4294967295
|
||||
r = (r << 15) | (r >> 17)
|
||||
r = (r & 65535) * o + (((r >> 16) * o & 65535) << 16) & 4294967295
|
||||
i ^= r
|
||||
|
||||
i ^= len(e)
|
||||
i ^= i >> 16
|
||||
i = (i & 65535) * 2246822507 + (((i >> 16) * 2246822507 & 65535) << 16) & 4294967295
|
||||
i ^= i >> 13
|
||||
i = (i & 65535) * 3266489909 + (((i >> 16) * 3266489909 & 65535) << 16) & 4294967295
|
||||
i ^= i >> 16
|
||||
return i & 0xFFFFFFFF
|
||||
|
||||
def get_fingerprint() -> str:
|
||||
return str(k(str(int(random.random() * 100000)), 256))
|
||||
|
||||
def get_datetime() -> str:
|
||||
return datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
@ -0,0 +1,3 @@
|
||||
from .MikuChat import MikuChat
|
||||
from .PerplexityAi import PerplexityAi
|
||||
from .Komo import Komo
|
@ -0,0 +1 @@
|
||||
logging = False
|
@ -0,0 +1,114 @@
|
||||
|
||||
import sys, re
|
||||
from pathlib import Path
|
||||
from os import path
|
||||
|
||||
sys.path.append(str(Path(__file__).parent.parent))
|
||||
|
||||
import g4f
|
||||
|
||||
def read_code(text):
|
||||
match = re.search(r"```(python|py|)\n(?P<code>[\S\s]+?)\n```", text)
|
||||
if match:
|
||||
return match.group("code")
|
||||
|
||||
def read_result(result):
|
||||
lines = []
|
||||
for line in result.split("\n"):
|
||||
if (line.startswith("```")):
|
||||
break
|
||||
if (line):
|
||||
lines.append(line)
|
||||
explanation = "\n".join(lines) if lines else ""
|
||||
return explanation, read_code(result)
|
||||
|
||||
def input_command():
|
||||
print("Enter/Paste the cURL command. Ctrl-D or Ctrl-Z ( windows ) to save it.")
|
||||
contents = []
|
||||
while True:
|
||||
try:
|
||||
line = input()
|
||||
except:
|
||||
break
|
||||
contents.append(line)
|
||||
return "\n".join(contents)
|
||||
|
||||
name = input("Name: ")
|
||||
provider_path = f"g4f/Provider/{name}.py"
|
||||
|
||||
example = """
|
||||
from __future__ import annotations
|
||||
|
||||
from aiohttp import ClientSession
|
||||
|
||||
from ..typing import AsyncGenerator
|
||||
from .base_provider import AsyncGeneratorProvider
|
||||
from .helper import format_prompt
|
||||
|
||||
|
||||
class ChatgptDuo(AsyncGeneratorProvider):
|
||||
url = "https://chat-gpt.com"
|
||||
supports_gpt_35_turbo = True
|
||||
working = True
|
||||
|
||||
@classmethod
|
||||
async def create_async_generator(
|
||||
cls,
|
||||
model: str,
|
||||
messages: list[dict[str, str]],
|
||||
**kwargs
|
||||
) -> AsyncGenerator:
|
||||
headers = {
|
||||
"authority": "chat-gpt.com",
|
||||
"accept": "application/json",
|
||||
"origin": cls.url,
|
||||
"referer": f"{cls.url}/chat",
|
||||
}
|
||||
async with ClientSession(headers=headers) as session:
|
||||
prompt = format_prompt(messages),
|
||||
data = {
|
||||
"prompt": prompt,
|
||||
"purpose": "ask",
|
||||
}
|
||||
async with session.post(cls.url + "/api/chat", json=data) as response:
|
||||
response.raise_for_status()
|
||||
async for stream in response.content:
|
||||
if stream:
|
||||
yield stream.decode()
|
||||
"""
|
||||
|
||||
if not path.isfile(provider_path):
|
||||
command = input_command()
|
||||
|
||||
prompt = f"""
|
||||
Create a provider from a cURL command. The command is:
|
||||
```bash
|
||||
{command}
|
||||
```
|
||||
A example for a provider:
|
||||
```py
|
||||
{example}
|
||||
```
|
||||
The name for the provider class:
|
||||
{name}
|
||||
Replace "hello" with `format_prompt(messages)`.
|
||||
And replace "gpt-3.5-turbo" with `model`.
|
||||
"""
|
||||
|
||||
print("Create code...")
|
||||
response = g4f.ChatCompletion.create(
|
||||
model=g4f.models.gpt_35_long,
|
||||
messages=[{"role": "user", "content": prompt}],
|
||||
auth=True,
|
||||
timeout=120,
|
||||
)
|
||||
print(response)
|
||||
explanation, code = read_result(response)
|
||||
if code:
|
||||
with open(provider_path, "w") as file:
|
||||
file.write(code)
|
||||
with open(f"g4f/Provider/__init__.py", "a") as file:
|
||||
file.write(f"\nfrom .{name} import {name}")
|
||||
else:
|
||||
with open(provider_path, "r") as file:
|
||||
code = file.read()
|
Loading…
Reference in New Issue