~ | Merge pull request #914 from hlohaus/lesh

~ | Add GptGo Provider, Fix AItianhu Provider
pull/917/head
Tekky 10 months ago committed by GitHub
commit a54291cb7c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -1,61 +1,38 @@
from __future__ import annotations
import json
from aiohttp import ClientSession, http
from curl_cffi.requests import AsyncSession
from ..typing import AsyncGenerator
from .base_provider import AsyncGeneratorProvider, format_prompt
from .base_provider import AsyncProvider, format_prompt
class AItianhu(AsyncGeneratorProvider):
class AItianhu(AsyncProvider):
url = "https://www.aitianhu.com"
working = True
supports_gpt_35_turbo = True
@classmethod
async def create_async_generator(
async def create_async(
cls,
model: str,
messages: list[dict[str, str]],
proxy: str = None,
**kwargs
) -> AsyncGenerator:
headers = {
"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/116.0",
"Accept": "application/json, text/plain, */*",
"Accept-Language": "de,en-US;q=0.7,en;q=0.3",
"Content-Type": "application/json",
"Origin": cls.url,
"Connection": "keep-alive",
"Referer": cls.url + "/",
"Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
) -> str:
data = {
"prompt": format_prompt(messages),
"options": {},
"systemMessage": "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully.",
"temperature": 0.8,
"top_p": 1,
**kwargs
}
async with ClientSession(
headers=headers,
version=http.HttpVersion10
) as session:
data = {
"prompt": format_prompt(messages),
"options": {},
"systemMessage": "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully.",
"temperature": 0.8,
"top_p": 1,
**kwargs
}
async with session.post(
cls.url + "/api/chat-process",
proxy=proxy,
json=data,
ssl=False,
) as response:
response.raise_for_status()
async for line in response.content:
line = json.loads(line.decode('utf-8'))
token = line["detail"]["choices"][0]["delta"].get("content")
if token:
yield token
async with AsyncSession(proxies={"https": proxy}, impersonate="chrome107", verify=False) as session:
response = await session.post(cls.url + "/api/chat-process", json=data)
response.raise_for_status()
line = response.text.splitlines()[-1]
line = json.loads(line)
return line["text"]
@classmethod

@ -0,0 +1,78 @@
from __future__ import annotations
from aiohttp import ClientSession
import json
from ..typing import AsyncGenerator
from .base_provider import AsyncGeneratorProvider, format_prompt
class GptGo(AsyncGeneratorProvider):
url = "https://gptgo.ai"
supports_gpt_35_turbo = True
working = True
@classmethod
async def create_async_generator(
cls,
model: str,
messages: list[dict[str, str]],
proxy: str = None,
**kwargs
) -> AsyncGenerator:
headers = {
"User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36",
"Accept" : "*/*",
"Accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
"Origin" : cls.url,
"Referer" : cls.url + "/",
"Sec-Fetch-Dest" : "empty",
"Sec-Fetch-Mode" : "cors",
"Sec-Fetch-Site" : "same-origin",
}
async with ClientSession(
headers=headers
) as session:
async with session.get(
"https://gptgo.ai/action_get_token.php",
params={
"q": format_prompt(messages),
"hlgpt": "default",
"hl": "en"
},
proxy=proxy
) as response:
response.raise_for_status()
token = (await response.json(content_type=None))["token"]
async with session.get(
"https://gptgo.ai/action_ai_gpt.php",
params={
"token": token,
},
proxy=proxy
) as response:
response.raise_for_status()
start = "data: "
async for line in response.content:
line = line.decode()
if line.startswith("data: "):
if line.startswith("data: [DONE]"):
break
line = json.loads(line[len(start):-1])
content = line["choices"][0]["delta"].get("content")
if content:
yield content
@classmethod
@property
def params(cls):
params = [
("model", "str"),
("messages", "list[dict[str, str]]"),
("stream", "bool"),
("temperature", "float"),
]
param = ", ".join([": ".join(p) for p in params])
return f"g4f.provider.{cls.__name__} supports: ({param})"

@ -51,7 +51,9 @@ class Ylokh(AsyncGeneratorProvider):
if stream:
async for line in response.content:
line = line.decode()
if line.startswith("data: ") and not line.startswith("data: [DONE]"):
if line.startswith("data: "):
if line.startswith("data: [DONE]"):
break
line = json.loads(line[6:-1])
content = line["choices"][0]["delta"].get("content")
if content:
@ -71,6 +73,7 @@ class Ylokh(AsyncGeneratorProvider):
("stream", "bool"),
("proxy", "str"),
("temperature", "float"),
("top_p", "float"),
]
param = ", ".join([": ".join(p) for p in params])
return f"g4f.provider.{cls.__name__} supports: ({param})"

@ -16,6 +16,7 @@ from .DfeHub import DfeHub
from .EasyChat import EasyChat
from .Forefront import Forefront
from .GetGpt import GetGpt
from .GptGo import GptGo
from .H2o import H2o
from .HuggingChat import HuggingChat
from .Liaobots import Liaobots
@ -57,6 +58,7 @@ __all__ = [
'EasyChat',
'Forefront',
'GetGpt',
'GptGo',
'H2o',
'HuggingChat',
'Liaobots',

@ -35,30 +35,6 @@ class BaseProvider(ABC):
]
param = ", ".join([": ".join(p) for p in params])
return f"g4f.provider.{cls.__name__} supports: ({param})"
_cookies = {}
def get_cookies(cookie_domain: str) -> dict:
if cookie_domain not in _cookies:
_cookies[cookie_domain] = {}
try:
for cookie in browser_cookie3.load(cookie_domain):
_cookies[cookie_domain][cookie.name] = cookie.value
except:
pass
return _cookies[cookie_domain]
def format_prompt(messages: list[dict[str, str]], add_special_tokens=False):
if add_special_tokens or len(messages) > 1:
formatted = "\n".join(
["%s: %s" % ((message["role"]).capitalize(), message["content"]) for message in messages]
)
return f"{formatted}\nAssistant:"
else:
return messages.pop()["content"]
class AsyncProvider(BaseProvider):
@ -67,8 +43,9 @@ class AsyncProvider(BaseProvider):
cls,
model: str,
messages: list[dict[str, str]],
stream: bool = False, **kwargs: Any) -> CreateResult:
stream: bool = False,
**kwargs
) -> CreateResult:
yield asyncio.run(cls.create_async(model, messages, **kwargs))
@staticmethod
@ -90,7 +67,20 @@ class AsyncGeneratorProvider(AsyncProvider):
stream: bool = True,
**kwargs
) -> CreateResult:
yield from run_generator(cls.create_async_generator(model, messages, stream=stream, **kwargs))
loop = asyncio.new_event_loop()
try:
asyncio.set_event_loop(loop)
generator = cls.create_async_generator(model, messages, stream=stream, **kwargs)
gen = generator.__aiter__()
while True:
try:
yield loop.run_until_complete(gen.__anext__())
except StopAsyncIteration:
break
finally:
asyncio.set_event_loop(None)
loop.close()
@classmethod
async def create_async(
@ -99,27 +89,36 @@ class AsyncGeneratorProvider(AsyncProvider):
messages: list[dict[str, str]],
**kwargs
) -> str:
chunks = [chunk async for chunk in cls.create_async_generator(model, messages, stream=False, **kwargs)]
if chunks:
return "".join(chunks)
return "".join([chunk async for chunk in cls.create_async_generator(model, messages, stream=False, **kwargs)])
@staticmethod
@abstractmethod
def create_async_generator(
model: str,
messages: list[dict[str, str]],
**kwargs
) -> AsyncGenerator:
model: str,
messages: list[dict[str, str]],
**kwargs
) -> AsyncGenerator:
raise NotImplementedError()
def run_generator(generator: AsyncGenerator[Union[Any, str], Any]):
loop = asyncio.new_event_loop()
gen = generator.__aiter__()
_cookies = {}
while True:
def get_cookies(cookie_domain: str) -> dict:
if cookie_domain not in _cookies:
_cookies[cookie_domain] = {}
try:
yield loop.run_until_complete(gen.__anext__())
for cookie in browser_cookie3.load(cookie_domain):
_cookies[cookie_domain][cookie.name] = cookie.value
except:
pass
return _cookies[cookie_domain]
except StopAsyncIteration:
break
def format_prompt(messages: list[dict[str, str]], add_special_tokens=False):
if add_special_tokens or len(messages) > 1:
formatted = "\n".join(
["%s: %s" % ((message["role"]).capitalize(), message["content"]) for message in messages]
)
return f"{formatted}\nAssistant:"
else:
return messages[0]["content"]

@ -16,14 +16,17 @@ class ChatCompletion:
auth : Union[str, None] = None, **kwargs: Any) -> Union[CreateResult, str]:
if isinstance(model, str):
try:
if model in models.ModelUtils.convert:
model = models.ModelUtils.convert[model]
except KeyError:
else:
raise Exception(f'The model: {model} does not exist')
if not provider:
if isinstance(model.best_provider, list):
provider = random.choice(model.best_provider)
if stream:
provider = random.choice([p for p in model.best_provider if p.supports_stream])
else:
provider = random.choice(model.best_provider)
else:
provider = model.best_provider

@ -7,7 +7,6 @@ from .Provider import (
CodeLinkAva,
ChatgptAi,
ChatBase,
Yqcloud,
Vercel,
DeepAi,
Aivvm,
@ -32,7 +31,7 @@ gpt_35_turbo = Model(
name = 'gpt-3.5-turbo',
base_provider = 'openai',
best_provider = [
Yqcloud, DeepAi, CodeLinkAva, ChatgptLogin, ChatgptAi, ChatBase, Aivvm
DeepAi, CodeLinkAva, ChatgptLogin, ChatgptAi, ChatBase, Aivvm
]
)

@ -17,7 +17,7 @@ _providers = [
g4f.Provider.Bard
]
_instruct = "Hello, tell about you in one sentence."
_instruct = "Hello, are you GPT 4?."
_example = """
OpenaiChat: Hello! How can I assist you today? 2.0 secs
@ -39,14 +39,14 @@ No Stream Total: 10.14 secs
print("Bing: ", end="")
for response in log_time_yield(
g4f.ChatCompletion.create,
model=g4f.models.gpt_35_turbo,
model=g4f.models.default,
messages=[{"role": "user", "content": _instruct}],
provider=g4f.Provider.Bing,
#cookies=g4f.get_cookies(".huggingface.co"),
#stream=True,
stream=True,
auth=True
):
print(response, end="")
print(response, end="", flush=True)
print()
print()
@ -75,7 +75,7 @@ def run_stream():
model=None,
messages=[{"role": "user", "content": _instruct}],
):
print(response, end="")
print(response, end="", flush=True)
print()
print("Stream Total:", log_time(run_stream))
print()

Loading…
Cancel
Save