mirror of
https://github.com/xtekky/gpt4free.git
synced 2024-11-15 06:13:01 +00:00
Add GptGod Provider
Remove timeout from aiohttp providers Disable Opchatgpts and ChatgptLogin provider
This commit is contained in:
parent
1238d9a638
commit
4fa6e9c0f5
@ -3,6 +3,7 @@ from pathlib import Path
|
|||||||
import asyncio
|
import asyncio
|
||||||
|
|
||||||
sys.path.append(str(Path(__file__).parent.parent))
|
sys.path.append(str(Path(__file__).parent.parent))
|
||||||
|
sys.path.append(str(Path(__file__).parent.parent.parent))
|
||||||
|
|
||||||
import g4f
|
import g4f
|
||||||
from testing.test_providers import get_providers
|
from testing.test_providers import get_providers
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
from aiohttp import ClientSession, ClientTimeout
|
from aiohttp import ClientSession
|
||||||
from ..typing import AsyncGenerator
|
from ..typing import AsyncGenerator
|
||||||
from .base_provider import AsyncGeneratorProvider
|
from .base_provider import AsyncGeneratorProvider
|
||||||
|
|
||||||
@ -14,7 +14,6 @@ class AiAsk(AsyncGeneratorProvider):
|
|||||||
cls,
|
cls,
|
||||||
model: str,
|
model: str,
|
||||||
messages: list[dict[str, str]],
|
messages: list[dict[str, str]],
|
||||||
timeout: int = 30,
|
|
||||||
**kwargs
|
**kwargs
|
||||||
) -> AsyncGenerator:
|
) -> AsyncGenerator:
|
||||||
headers = {
|
headers = {
|
||||||
@ -22,7 +21,7 @@ class AiAsk(AsyncGeneratorProvider):
|
|||||||
"origin": cls.url,
|
"origin": cls.url,
|
||||||
"referer": f"{cls.url}/chat",
|
"referer": f"{cls.url}/chat",
|
||||||
}
|
}
|
||||||
async with ClientSession(headers=headers, timeout=ClientTimeout(timeout)) as session:
|
async with ClientSession(headers=headers) as session:
|
||||||
data = {
|
data = {
|
||||||
"continuous": True,
|
"continuous": True,
|
||||||
"id": "fRMSQtuHl91A4De9cCvKD",
|
"id": "fRMSQtuHl91A4De9cCvKD",
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
from aiohttp import ClientSession, ClientTimeout
|
from aiohttp import ClientSession
|
||||||
|
|
||||||
from .base_provider import AsyncProvider, format_prompt
|
from .base_provider import AsyncProvider, format_prompt
|
||||||
|
|
||||||
@ -15,7 +15,6 @@ class Aichat(AsyncProvider):
|
|||||||
model: str,
|
model: str,
|
||||||
messages: list[dict[str, str]],
|
messages: list[dict[str, str]],
|
||||||
proxy: str = None,
|
proxy: str = None,
|
||||||
timeout: int = 30,
|
|
||||||
**kwargs
|
**kwargs
|
||||||
) -> str:
|
) -> str:
|
||||||
headers = {
|
headers = {
|
||||||
@ -34,7 +33,7 @@ class Aichat(AsyncProvider):
|
|||||||
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36",
|
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36",
|
||||||
}
|
}
|
||||||
async with ClientSession(
|
async with ClientSession(
|
||||||
headers=headers, timeout=ClientTimeout(timeout)
|
headers=headers
|
||||||
) as session:
|
) as session:
|
||||||
json_data = {
|
json_data = {
|
||||||
"message": format_prompt(messages),
|
"message": format_prompt(messages),
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
import re
|
import re
|
||||||
from aiohttp import ClientSession, ClientTimeout
|
from aiohttp import ClientSession
|
||||||
|
|
||||||
from .base_provider import AsyncProvider, format_prompt
|
from .base_provider import AsyncProvider, format_prompt
|
||||||
|
|
||||||
@ -20,7 +20,6 @@ class ChatgptAi(AsyncProvider):
|
|||||||
model: str,
|
model: str,
|
||||||
messages: list[dict[str, str]],
|
messages: list[dict[str, str]],
|
||||||
proxy: str = None,
|
proxy: str = None,
|
||||||
timeout: int = 30,
|
|
||||||
**kwargs
|
**kwargs
|
||||||
) -> str:
|
) -> str:
|
||||||
headers = {
|
headers = {
|
||||||
@ -40,7 +39,7 @@ class ChatgptAi(AsyncProvider):
|
|||||||
"user-agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36",
|
"user-agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36",
|
||||||
}
|
}
|
||||||
async with ClientSession(
|
async with ClientSession(
|
||||||
headers=headers, timeout=ClientTimeout(timeout)
|
headers=headers
|
||||||
) as session:
|
) as session:
|
||||||
if not cls._nonce:
|
if not cls._nonce:
|
||||||
async with session.get(cls.url, proxy=proxy) as response:
|
async with session.get(cls.url, proxy=proxy) as response:
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
import time, json, re
|
import time, json, re
|
||||||
from aiohttp import ClientSession, ClientTimeout
|
from aiohttp import ClientSession
|
||||||
from typing import AsyncGenerator
|
from typing import AsyncGenerator
|
||||||
|
|
||||||
from .base_provider import AsyncGeneratorProvider
|
from .base_provider import AsyncGeneratorProvider
|
||||||
@ -18,7 +18,6 @@ class ChatgptDemo(AsyncGeneratorProvider):
|
|||||||
model: str,
|
model: str,
|
||||||
messages: list[dict[str, str]],
|
messages: list[dict[str, str]],
|
||||||
proxy: str = None,
|
proxy: str = None,
|
||||||
timeout: int = 30,
|
|
||||||
**kwargs
|
**kwargs
|
||||||
) -> AsyncGenerator:
|
) -> AsyncGenerator:
|
||||||
headers = {
|
headers = {
|
||||||
@ -34,7 +33,7 @@ class ChatgptDemo(AsyncGeneratorProvider):
|
|||||||
"sec-fetch-site": "same-origin",
|
"sec-fetch-site": "same-origin",
|
||||||
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36"
|
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36"
|
||||||
}
|
}
|
||||||
async with ClientSession(headers=headers, timeout=ClientTimeout(timeout)) as session:
|
async with ClientSession(headers=headers) as session:
|
||||||
async with session.get(f"{cls.url}/", proxy=proxy) as response:
|
async with session.get(f"{cls.url}/", proxy=proxy) as response:
|
||||||
response.raise_for_status()
|
response.raise_for_status()
|
||||||
response = await response.text()
|
response = await response.text()
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
from aiohttp import ClientSession, ClientTimeout
|
from aiohttp import ClientSession
|
||||||
import json
|
import json
|
||||||
|
|
||||||
from ..typing import AsyncGenerator
|
from ..typing import AsyncGenerator
|
||||||
@ -32,7 +32,7 @@ class GptGo(AsyncGeneratorProvider):
|
|||||||
"Sec-Fetch-Site" : "same-origin",
|
"Sec-Fetch-Site" : "same-origin",
|
||||||
}
|
}
|
||||||
async with ClientSession(
|
async with ClientSession(
|
||||||
headers=headers, timeout=ClientTimeout(timeout)
|
headers=headers
|
||||||
) as session:
|
) as session:
|
||||||
async with session.get(
|
async with session.get(
|
||||||
"https://gptgo.ai/action_get_token.php",
|
"https://gptgo.ai/action_get_token.php",
|
||||||
|
51
g4f/Provider/GptGod.py
Normal file
51
g4f/Provider/GptGod.py
Normal file
@ -0,0 +1,51 @@
|
|||||||
|
from __future__ import annotations
|
||||||
|
import secrets, json
|
||||||
|
from aiohttp import ClientSession
|
||||||
|
from typing import AsyncGenerator
|
||||||
|
from .base_provider import AsyncGeneratorProvider
|
||||||
|
from .helper import format_prompt
|
||||||
|
|
||||||
|
class GptGod(AsyncGeneratorProvider):
|
||||||
|
url = "https://gptgod.site"
|
||||||
|
supports_gpt_35_turbo = True
|
||||||
|
working = True
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
async def create_async_generator(
|
||||||
|
cls,
|
||||||
|
model: str,
|
||||||
|
messages: list[dict[str, str]],
|
||||||
|
**kwargs
|
||||||
|
) -> AsyncGenerator:
|
||||||
|
headers = {
|
||||||
|
"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/118.0",
|
||||||
|
"Accept": "text/event-stream",
|
||||||
|
"Accept-Language": "de,en-US;q=0.7,en;q=0.3",
|
||||||
|
"Accept-Encoding": "gzip, deflate, br",
|
||||||
|
"Alt-Used": "gptgod.site",
|
||||||
|
"Connection": "keep-alive",
|
||||||
|
"Referer": "https://gptgod.site/",
|
||||||
|
"Sec-Fetch-Dest": "empty",
|
||||||
|
"Sec-Fetch-Mode": "cors",
|
||||||
|
"Sec-Fetch-Site": "same-origin",
|
||||||
|
"Pragma": "no-cache",
|
||||||
|
"Cache-Control": "no-cache",
|
||||||
|
}
|
||||||
|
async with ClientSession(headers=headers) as session:
|
||||||
|
prompt = format_prompt(messages)
|
||||||
|
data = {
|
||||||
|
"content": prompt,
|
||||||
|
"id": secrets.token_hex(16).zfill(32)
|
||||||
|
}
|
||||||
|
async with session.get(f"{cls.url}/api/session/free/gpt3p5", params=data) as response:
|
||||||
|
response.raise_for_status()
|
||||||
|
event = None
|
||||||
|
async for line in response.content:
|
||||||
|
if line.startswith(b'event: '):
|
||||||
|
event = line[7:-1]
|
||||||
|
elif event == b"data" and line.startswith(b"data: "):
|
||||||
|
data = json.loads(line[6:-1])
|
||||||
|
if data:
|
||||||
|
yield data
|
||||||
|
elif event == b"done":
|
||||||
|
break
|
@ -2,7 +2,7 @@ from __future__ import annotations
|
|||||||
|
|
||||||
import uuid
|
import uuid
|
||||||
|
|
||||||
from aiohttp import ClientSession, ClientTimeout
|
from aiohttp import ClientSession
|
||||||
|
|
||||||
from ..typing import AsyncGenerator
|
from ..typing import AsyncGenerator
|
||||||
from .base_provider import AsyncGeneratorProvider
|
from .base_provider import AsyncGeneratorProvider
|
||||||
@ -42,7 +42,6 @@ class Liaobots(AsyncGeneratorProvider):
|
|||||||
messages: list[dict[str, str]],
|
messages: list[dict[str, str]],
|
||||||
auth: str = None,
|
auth: str = None,
|
||||||
proxy: str = None,
|
proxy: str = None,
|
||||||
timeout: int = 30,
|
|
||||||
**kwargs
|
**kwargs
|
||||||
) -> AsyncGenerator:
|
) -> AsyncGenerator:
|
||||||
model = model if model in models else "gpt-3.5-turbo"
|
model = model if model in models else "gpt-3.5-turbo"
|
||||||
@ -54,7 +53,7 @@ class Liaobots(AsyncGeneratorProvider):
|
|||||||
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36",
|
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36",
|
||||||
}
|
}
|
||||||
async with ClientSession(
|
async with ClientSession(
|
||||||
headers=headers, timeout=ClientTimeout(timeout)
|
headers=headers
|
||||||
) as session:
|
) as session:
|
||||||
cls._auth_code = auth if isinstance(auth, str) else cls._auth_code
|
cls._auth_code = auth if isinstance(auth, str) else cls._auth_code
|
||||||
if not cls._auth_code:
|
if not cls._auth_code:
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
import json
|
import json
|
||||||
from aiohttp import ClientSession, ClientTimeout
|
from aiohttp import ClientSession
|
||||||
|
|
||||||
from .base_provider import AsyncGeneratorProvider
|
from .base_provider import AsyncGeneratorProvider
|
||||||
from ..typing import AsyncGenerator
|
from ..typing import AsyncGenerator
|
||||||
@ -18,7 +18,6 @@ class Vitalentum(AsyncGeneratorProvider):
|
|||||||
model: str,
|
model: str,
|
||||||
messages: list[dict[str, str]],
|
messages: list[dict[str, str]],
|
||||||
proxy: str = None,
|
proxy: str = None,
|
||||||
timeout: int = 30,
|
|
||||||
**kwargs
|
**kwargs
|
||||||
) -> AsyncGenerator:
|
) -> AsyncGenerator:
|
||||||
headers = {
|
headers = {
|
||||||
@ -41,7 +40,7 @@ class Vitalentum(AsyncGeneratorProvider):
|
|||||||
**kwargs
|
**kwargs
|
||||||
}
|
}
|
||||||
async with ClientSession(
|
async with ClientSession(
|
||||||
headers=headers, timeout=ClientTimeout(timeout)
|
headers=headers
|
||||||
) as session:
|
) as session:
|
||||||
async with session.post(cls.url + "/api/converse-edge", json=data, proxy=proxy) as response:
|
async with session.post(cls.url + "/api/converse-edge", json=data, proxy=proxy) as response:
|
||||||
response.raise_for_status()
|
response.raise_for_status()
|
||||||
|
@ -16,11 +16,10 @@ class Yqcloud(AsyncGeneratorProvider):
|
|||||||
model: str,
|
model: str,
|
||||||
messages: list[dict[str, str]],
|
messages: list[dict[str, str]],
|
||||||
proxy: str = None,
|
proxy: str = None,
|
||||||
timeout: int = 30,
|
|
||||||
**kwargs,
|
**kwargs,
|
||||||
) -> AsyncGenerator:
|
) -> AsyncGenerator:
|
||||||
async with ClientSession(
|
async with ClientSession(
|
||||||
headers=_create_header(), timeout=timeout
|
headers=_create_header()
|
||||||
) as session:
|
) as session:
|
||||||
payload = _create_payload(messages)
|
payload = _create_payload(messages)
|
||||||
async with session.post("https://api.aichatos.cloud/api/generateStream", proxy=proxy, json=payload) as response:
|
async with session.post("https://api.aichatos.cloud/api/generateStream", proxy=proxy, json=payload) as response:
|
||||||
|
@ -14,13 +14,13 @@ from .Chatgpt4Online import Chatgpt4Online
|
|||||||
from .ChatgptAi import ChatgptAi
|
from .ChatgptAi import ChatgptAi
|
||||||
from .ChatgptDemo import ChatgptDemo
|
from .ChatgptDemo import ChatgptDemo
|
||||||
from .ChatgptDuo import ChatgptDuo
|
from .ChatgptDuo import ChatgptDuo
|
||||||
from .ChatgptLogin import ChatgptLogin
|
|
||||||
from .ChatgptX import ChatgptX
|
from .ChatgptX import ChatgptX
|
||||||
from .DeepAi import DeepAi
|
from .DeepAi import DeepAi
|
||||||
from .FreeGpt import FreeGpt
|
from .FreeGpt import FreeGpt
|
||||||
from .GPTalk import GPTalk
|
from .GPTalk import GPTalk
|
||||||
from .GptForLove import GptForLove
|
from .GptForLove import GptForLove
|
||||||
from .GptGo import GptGo
|
from .GptGo import GptGo
|
||||||
|
from .GptGod import GptGod
|
||||||
from .H2o import H2o
|
from .H2o import H2o
|
||||||
from .Liaobots import Liaobots
|
from .Liaobots import Liaobots
|
||||||
from .Myshell import Myshell
|
from .Myshell import Myshell
|
||||||
@ -71,6 +71,7 @@ __all__ = [
|
|||||||
'GptForLove',
|
'GptForLove',
|
||||||
'GetGpt',
|
'GetGpt',
|
||||||
'GptGo',
|
'GptGo',
|
||||||
|
'GptGod',
|
||||||
'H2o',
|
'H2o',
|
||||||
'HuggingChat',
|
'HuggingChat',
|
||||||
'Liaobots',
|
'Liaobots',
|
||||||
|
@ -3,7 +3,7 @@ from __future__ import annotations
|
|||||||
import os, re
|
import os, re
|
||||||
from aiohttp import ClientSession
|
from aiohttp import ClientSession
|
||||||
|
|
||||||
from .base_provider import AsyncProvider, format_prompt
|
from ..base_provider import AsyncProvider, format_prompt
|
||||||
|
|
||||||
|
|
||||||
class ChatgptLogin(AsyncProvider):
|
class ChatgptLogin(AsyncProvider):
|
@ -1,8 +1,7 @@
|
|||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
from ..ChatgptLogin import ChatgptLogin
|
from .ChatgptLogin import ChatgptLogin
|
||||||
|
|
||||||
|
|
||||||
class Opchatgpts(ChatgptLogin):
|
class Opchatgpts(ChatgptLogin):
|
||||||
url = "https://opchatgpts.net"
|
url = "https://opchatgpts.net"
|
||||||
working = True
|
|
@ -10,4 +10,5 @@ from .Wewordle import Wewordle
|
|||||||
from .Equing import Equing
|
from .Equing import Equing
|
||||||
from .Wuguokai import Wuguokai
|
from .Wuguokai import Wuguokai
|
||||||
from .V50 import V50
|
from .V50 import V50
|
||||||
from .FastGpt import FastGpt
|
from .FastGpt import FastGpt
|
||||||
|
from .ChatgptLogin import ChatgptLogin
|
@ -6,7 +6,6 @@ from .typing import CreateResult, Union
|
|||||||
from .debug import logging
|
from .debug import logging
|
||||||
from requests import get
|
from requests import get
|
||||||
|
|
||||||
logging = False
|
|
||||||
version = '0.1.5.4'
|
version = '0.1.5.4'
|
||||||
|
|
||||||
def check_pypi_version():
|
def check_pypi_version():
|
||||||
|
Loading…
Reference in New Issue
Block a user