~ | major refractoring + new providers | v0.0.2.0

g4f.Provider.FastGpt & g4f.Provider.Equing

gpt-3.5-turbo-0613
This commit is contained in:
abc 2023-08-17 15:31:01 +02:00
parent 8ddaa2c496
commit 882910c1d5
5 changed files with 164 additions and 3 deletions

74
g4f/Provider/Equing.py Normal file
View File

@ -0,0 +1,74 @@
import requests, json
from abc import ABC, abstractmethod
from ..typing import Any, CreateResult
class Equing(ABC):
url: str = 'https://next.eqing.tech/'
working = True
needs_auth = False
supports_stream = True
supports_gpt_35_turbo = True
supports_gpt_4 = False
@staticmethod
@abstractmethod
def create_completion(
model: str,
messages: list[dict[str, str]],
stream: bool,
**kwargs: Any) -> CreateResult:
headers = {
'authority': 'next.eqing.tech',
'accept': 'text/event-stream',
'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
'cache-control': 'no-cache',
'content-type': 'application/json',
'origin': 'https://next.eqing.tech',
'plugins': '0',
'pragma': 'no-cache',
'referer': 'https://next.eqing.tech/',
'sec-ch-ua': '"Not/A)Brand";v="99", "Google Chrome";v="115", "Chromium";v="115"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"macOS"',
'sec-fetch-dest': 'empty',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'same-origin',
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36',
'usesearch': 'false',
'x-requested-with': 'XMLHttpRequest',
}
json_data = {
'messages': messages,
'stream': stream,
'model': model,
'temperature': kwargs.get('temperature', 0.5),
'presence_penalty': kwargs.get('presence_penalty', 0),
'frequency_penalty': kwargs.get('frequency_penalty', 0),
'top_p': kwargs.get('top_p', 1),
}
response = requests.post('https://next.eqing.tech/api/openai/v1/chat/completions',
headers=headers, json=json_data, stream=stream)
for line in response.iter_content(chunk_size=1024):
if line:
if b'content' in line:
line_json = json.loads(line.decode('utf-8').split('data: ')[1])
token = line_json['choices'][0]['delta'].get('content')
if token:
yield token
@classmethod
@property
def params(cls):
params = [
("model", "str"),
("messages", "list[dict[str, str]]"),
("stream", "bool"),
]
param = ", ".join([": ".join(p) for p in params])
return f"g4f.provider.{cls.__name__} supports: ({param})"

83
g4f/Provider/FastGpt.py Normal file
View File

@ -0,0 +1,83 @@
import requests, json, random
from abc import ABC, abstractmethod
from ..typing import Any, CreateResult
class FastGpt(ABC):
url: str = 'https://chat9.fastgpt.me/'
working = True
needs_auth = False
supports_stream = True
supports_gpt_35_turbo = True
supports_gpt_4 = False
@staticmethod
@abstractmethod
def create_completion(
model: str,
messages: list[dict[str, str]],
stream: bool,
**kwargs: Any) -> CreateResult:
headers = {
'authority': 'chat9.fastgpt.me',
'accept': 'text/event-stream',
'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
'cache-control': 'no-cache',
'content-type': 'application/json',
# 'cookie': 'cf_clearance=idIAwtoSCn0uCzcWLGuD.KtiAJv9a1GsPduEOqIkyHU-1692278595-0-1-cb11fd7a.ab1546d4.ccf35fd7-0.2.1692278595; Hm_lvt_563fb31e93813a8a7094966df6671d3f=1691966491,1692278597; Hm_lpvt_563fb31e93813a8a7094966df6671d3f=1692278597',
'origin': 'https://chat9.fastgpt.me',
'plugins': '0',
'pragma': 'no-cache',
'referer': 'https://chat9.fastgpt.me/',
'sec-ch-ua': '"Not/A)Brand";v="99", "Google Chrome";v="115", "Chromium";v="115"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"macOS"',
'sec-fetch-dest': 'empty',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'same-origin',
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36',
'usesearch': 'false',
'x-requested-with': 'XMLHttpRequest',
}
json_data = {
'messages': messages,
'stream': stream,
'model': model,
'temperature': kwargs.get('temperature', 0.5),
'presence_penalty': kwargs.get('presence_penalty', 0),
'frequency_penalty': kwargs.get('frequency_penalty', 0),
'top_p': kwargs.get('top_p', 1),
}
subdomain = random.choice([
'jdaen979ew',
'chat9'
])
response = requests.post(f'https://{subdomain}.fastgpt.me/api/openai/v1/chat/completions',
headers=headers, json=json_data, stream=stream)
for line in response.iter_lines():
if line:
try:
if b'content' in line:
line_json = json.loads(line.decode('utf-8').split('data: ')[1])
token = line_json['choices'][0]['delta'].get('content')
if token:
yield token
except:
continue
@classmethod
@property
def params(cls):
params = [
("model", "str"),
("messages", "list[dict[str, str]]"),
("stream", "bool"),
]
param = ", ".join([": ".join(p) for p in params])
return f"g4f.provider.{cls.__name__} supports: ({param})"

View File

@ -23,6 +23,8 @@ from .Vercel import Vercel
from .Wewordle import Wewordle from .Wewordle import Wewordle
from .You import You from .You import You
from .Yqcloud import Yqcloud from .Yqcloud import Yqcloud
from .Equing import Equing
from .FastGpt import FastGpt
__all__ = [ __all__ = [
"BaseProvider", "BaseProvider",
@ -50,4 +52,6 @@ __all__ = [
"Wewordle", "Wewordle",
"You", "You",
"Yqcloud", "Yqcloud",
"Equing",
"FastGpt"
] ]

View File

@ -30,4 +30,4 @@ class BaseProvider(ABC):
("stream", "bool"), ("stream", "bool"),
] ]
param = ", ".join([": ".join(p) for p in params]) param = ", ".join([": ".join(p) for p in params])
return f"g4f.provider.{cls.__name__} supports: ({param})" return f"g4f.provider.{cls.__name__} supports: ({param})"

View File

@ -1,6 +1,6 @@
from dataclasses import dataclass from dataclasses import dataclass
from .Provider import Bard, BaseProvider, GetGpt, H2o, Liaobots, Vercel from .Provider import Bard, BaseProvider, GetGpt, H2o, Liaobots, Vercel, Equing
@dataclass @dataclass
@ -131,7 +131,7 @@ gpt_35_turbo_16k = Model(
gpt_35_turbo_16k_0613 = Model( gpt_35_turbo_16k_0613 = Model(
name="openai:gpt-3.5-turbo-16k-0613", name="openai:gpt-3.5-turbo-16k-0613",
base_provider="openai", base_provider="openai",
best_provider=Vercel, best_provider=Equing,
) )
gpt_4_0613 = Model( gpt_4_0613 = Model(