Add GptGod Provider

Remove timeout from aiohttp providers
Disable Opchatgpts and ChatgptLogin provider
This commit is contained in:
Heiner Lohaus 2023-10-07 09:02:48 +02:00
parent 1238d9a638
commit 4fa6e9c0f5
15 changed files with 74 additions and 29 deletions

View File

@ -3,6 +3,7 @@ from pathlib import Path
import asyncio
sys.path.append(str(Path(__file__).parent.parent))
sys.path.append(str(Path(__file__).parent.parent.parent))
import g4f
from testing.test_providers import get_providers

View File

@ -1,6 +1,6 @@
from __future__ import annotations
from aiohttp import ClientSession, ClientTimeout
from aiohttp import ClientSession
from ..typing import AsyncGenerator
from .base_provider import AsyncGeneratorProvider
@ -14,7 +14,6 @@ class AiAsk(AsyncGeneratorProvider):
cls,
model: str,
messages: list[dict[str, str]],
timeout: int = 30,
**kwargs
) -> AsyncGenerator:
headers = {
@ -22,7 +21,7 @@ class AiAsk(AsyncGeneratorProvider):
"origin": cls.url,
"referer": f"{cls.url}/chat",
}
async with ClientSession(headers=headers, timeout=ClientTimeout(timeout)) as session:
async with ClientSession(headers=headers) as session:
data = {
"continuous": True,
"id": "fRMSQtuHl91A4De9cCvKD",

View File

@ -1,6 +1,6 @@
from __future__ import annotations
from aiohttp import ClientSession, ClientTimeout
from aiohttp import ClientSession
from .base_provider import AsyncProvider, format_prompt
@ -15,7 +15,6 @@ class Aichat(AsyncProvider):
model: str,
messages: list[dict[str, str]],
proxy: str = None,
timeout: int = 30,
**kwargs
) -> str:
headers = {
@ -34,7 +33,7 @@ class Aichat(AsyncProvider):
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36",
}
async with ClientSession(
headers=headers, timeout=ClientTimeout(timeout)
headers=headers
) as session:
json_data = {
"message": format_prompt(messages),

View File

@ -1,7 +1,7 @@
from __future__ import annotations
import re
from aiohttp import ClientSession, ClientTimeout
from aiohttp import ClientSession
from .base_provider import AsyncProvider, format_prompt
@ -20,7 +20,6 @@ class ChatgptAi(AsyncProvider):
model: str,
messages: list[dict[str, str]],
proxy: str = None,
timeout: int = 30,
**kwargs
) -> str:
headers = {
@ -40,7 +39,7 @@ class ChatgptAi(AsyncProvider):
"user-agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36",
}
async with ClientSession(
headers=headers, timeout=ClientTimeout(timeout)
headers=headers
) as session:
if not cls._nonce:
async with session.get(cls.url, proxy=proxy) as response:

View File

@ -1,7 +1,7 @@
from __future__ import annotations
import time, json, re
from aiohttp import ClientSession, ClientTimeout
from aiohttp import ClientSession
from typing import AsyncGenerator
from .base_provider import AsyncGeneratorProvider
@ -18,7 +18,6 @@ class ChatgptDemo(AsyncGeneratorProvider):
model: str,
messages: list[dict[str, str]],
proxy: str = None,
timeout: int = 30,
**kwargs
) -> AsyncGenerator:
headers = {
@ -34,7 +33,7 @@ class ChatgptDemo(AsyncGeneratorProvider):
"sec-fetch-site": "same-origin",
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36"
}
async with ClientSession(headers=headers, timeout=ClientTimeout(timeout)) as session:
async with ClientSession(headers=headers) as session:
async with session.get(f"{cls.url}/", proxy=proxy) as response:
response.raise_for_status()
response = await response.text()

View File

@ -1,6 +1,6 @@
from __future__ import annotations
from aiohttp import ClientSession, ClientTimeout
from aiohttp import ClientSession
import json
from ..typing import AsyncGenerator
@ -32,7 +32,7 @@ class GptGo(AsyncGeneratorProvider):
"Sec-Fetch-Site" : "same-origin",
}
async with ClientSession(
headers=headers, timeout=ClientTimeout(timeout)
headers=headers
) as session:
async with session.get(
"https://gptgo.ai/action_get_token.php",

51
g4f/Provider/GptGod.py Normal file
View File

@ -0,0 +1,51 @@
from __future__ import annotations
import secrets, json
from aiohttp import ClientSession
from typing import AsyncGenerator
from .base_provider import AsyncGeneratorProvider
from .helper import format_prompt
class GptGod(AsyncGeneratorProvider):
url = "https://gptgod.site"
supports_gpt_35_turbo = True
working = True
@classmethod
async def create_async_generator(
cls,
model: str,
messages: list[dict[str, str]],
**kwargs
) -> AsyncGenerator:
headers = {
"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/118.0",
"Accept": "text/event-stream",
"Accept-Language": "de,en-US;q=0.7,en;q=0.3",
"Accept-Encoding": "gzip, deflate, br",
"Alt-Used": "gptgod.site",
"Connection": "keep-alive",
"Referer": "https://gptgod.site/",
"Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
"Pragma": "no-cache",
"Cache-Control": "no-cache",
}
async with ClientSession(headers=headers) as session:
prompt = format_prompt(messages)
data = {
"content": prompt,
"id": secrets.token_hex(16).zfill(32)
}
async with session.get(f"{cls.url}/api/session/free/gpt3p5", params=data) as response:
response.raise_for_status()
event = None
async for line in response.content:
if line.startswith(b'event: '):
event = line[7:-1]
elif event == b"data" and line.startswith(b"data: "):
data = json.loads(line[6:-1])
if data:
yield data
elif event == b"done":
break

View File

@ -2,7 +2,7 @@ from __future__ import annotations
import uuid
from aiohttp import ClientSession, ClientTimeout
from aiohttp import ClientSession
from ..typing import AsyncGenerator
from .base_provider import AsyncGeneratorProvider
@ -42,7 +42,6 @@ class Liaobots(AsyncGeneratorProvider):
messages: list[dict[str, str]],
auth: str = None,
proxy: str = None,
timeout: int = 30,
**kwargs
) -> AsyncGenerator:
model = model if model in models else "gpt-3.5-turbo"
@ -54,7 +53,7 @@ class Liaobots(AsyncGeneratorProvider):
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36",
}
async with ClientSession(
headers=headers, timeout=ClientTimeout(timeout)
headers=headers
) as session:
cls._auth_code = auth if isinstance(auth, str) else cls._auth_code
if not cls._auth_code:

View File

@ -1,7 +1,7 @@
from __future__ import annotations
import json
from aiohttp import ClientSession, ClientTimeout
from aiohttp import ClientSession
from .base_provider import AsyncGeneratorProvider
from ..typing import AsyncGenerator
@ -18,7 +18,6 @@ class Vitalentum(AsyncGeneratorProvider):
model: str,
messages: list[dict[str, str]],
proxy: str = None,
timeout: int = 30,
**kwargs
) -> AsyncGenerator:
headers = {
@ -41,7 +40,7 @@ class Vitalentum(AsyncGeneratorProvider):
**kwargs
}
async with ClientSession(
headers=headers, timeout=ClientTimeout(timeout)
headers=headers
) as session:
async with session.post(cls.url + "/api/converse-edge", json=data, proxy=proxy) as response:
response.raise_for_status()

View File

@ -16,11 +16,10 @@ class Yqcloud(AsyncGeneratorProvider):
model: str,
messages: list[dict[str, str]],
proxy: str = None,
timeout: int = 30,
**kwargs,
) -> AsyncGenerator:
async with ClientSession(
headers=_create_header(), timeout=timeout
headers=_create_header()
) as session:
payload = _create_payload(messages)
async with session.post("https://api.aichatos.cloud/api/generateStream", proxy=proxy, json=payload) as response:

View File

@ -14,13 +14,13 @@ from .Chatgpt4Online import Chatgpt4Online
from .ChatgptAi import ChatgptAi
from .ChatgptDemo import ChatgptDemo
from .ChatgptDuo import ChatgptDuo
from .ChatgptLogin import ChatgptLogin
from .ChatgptX import ChatgptX
from .DeepAi import DeepAi
from .FreeGpt import FreeGpt
from .GPTalk import GPTalk
from .GptForLove import GptForLove
from .GptGo import GptGo
from .GptGod import GptGod
from .H2o import H2o
from .Liaobots import Liaobots
from .Myshell import Myshell
@ -71,6 +71,7 @@ __all__ = [
'GptForLove',
'GetGpt',
'GptGo',
'GptGod',
'H2o',
'HuggingChat',
'Liaobots',

View File

@ -3,7 +3,7 @@ from __future__ import annotations
import os, re
from aiohttp import ClientSession
from .base_provider import AsyncProvider, format_prompt
from ..base_provider import AsyncProvider, format_prompt
class ChatgptLogin(AsyncProvider):

View File

@ -1,8 +1,7 @@
from __future__ import annotations
from ..ChatgptLogin import ChatgptLogin
from .ChatgptLogin import ChatgptLogin
class Opchatgpts(ChatgptLogin):
url = "https://opchatgpts.net"
working = True

View File

@ -11,3 +11,4 @@ from .Equing import Equing
from .Wuguokai import Wuguokai
from .V50 import V50
from .FastGpt import FastGpt
from .ChatgptLogin import ChatgptLogin

View File

@ -6,7 +6,6 @@ from .typing import CreateResult, Union
from .debug import logging
from requests import get
logging = False
version = '0.1.5.4'
def check_pypi_version():