~ | Merge pull request #914 from hlohaus/lesh

~ | Add GptGo Provider, Fix AItianhu Provider
pull/917/head
Tekky 1 year ago committed by GitHub
commit a54291cb7c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -1,61 +1,38 @@
from __future__ import annotations from __future__ import annotations
import json import json
from aiohttp import ClientSession, http from curl_cffi.requests import AsyncSession
from ..typing import AsyncGenerator from .base_provider import AsyncProvider, format_prompt
from .base_provider import AsyncGeneratorProvider, format_prompt
class AItianhu(AsyncGeneratorProvider): class AItianhu(AsyncProvider):
url = "https://www.aitianhu.com" url = "https://www.aitianhu.com"
working = True working = True
supports_gpt_35_turbo = True supports_gpt_35_turbo = True
@classmethod @classmethod
async def create_async_generator( async def create_async(
cls, cls,
model: str, model: str,
messages: list[dict[str, str]], messages: list[dict[str, str]],
proxy: str = None, proxy: str = None,
**kwargs **kwargs
) -> AsyncGenerator: ) -> str:
headers = { data = {
"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/116.0", "prompt": format_prompt(messages),
"Accept": "application/json, text/plain, */*", "options": {},
"Accept-Language": "de,en-US;q=0.7,en;q=0.3", "systemMessage": "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully.",
"Content-Type": "application/json", "temperature": 0.8,
"Origin": cls.url, "top_p": 1,
"Connection": "keep-alive", **kwargs
"Referer": cls.url + "/",
"Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
} }
async with ClientSession( async with AsyncSession(proxies={"https": proxy}, impersonate="chrome107", verify=False) as session:
headers=headers, response = await session.post(cls.url + "/api/chat-process", json=data)
version=http.HttpVersion10 response.raise_for_status()
) as session: line = response.text.splitlines()[-1]
data = { line = json.loads(line)
"prompt": format_prompt(messages), return line["text"]
"options": {},
"systemMessage": "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully.",
"temperature": 0.8,
"top_p": 1,
**kwargs
}
async with session.post(
cls.url + "/api/chat-process",
proxy=proxy,
json=data,
ssl=False,
) as response:
response.raise_for_status()
async for line in response.content:
line = json.loads(line.decode('utf-8'))
token = line["detail"]["choices"][0]["delta"].get("content")
if token:
yield token
@classmethod @classmethod

@ -0,0 +1,78 @@
from __future__ import annotations
from aiohttp import ClientSession
import json
from ..typing import AsyncGenerator
from .base_provider import AsyncGeneratorProvider, format_prompt
class GptGo(AsyncGeneratorProvider):
url = "https://gptgo.ai"
supports_gpt_35_turbo = True
working = True
@classmethod
async def create_async_generator(
cls,
model: str,
messages: list[dict[str, str]],
proxy: str = None,
**kwargs
) -> AsyncGenerator:
headers = {
"User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36",
"Accept" : "*/*",
"Accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
"Origin" : cls.url,
"Referer" : cls.url + "/",
"Sec-Fetch-Dest" : "empty",
"Sec-Fetch-Mode" : "cors",
"Sec-Fetch-Site" : "same-origin",
}
async with ClientSession(
headers=headers
) as session:
async with session.get(
"https://gptgo.ai/action_get_token.php",
params={
"q": format_prompt(messages),
"hlgpt": "default",
"hl": "en"
},
proxy=proxy
) as response:
response.raise_for_status()
token = (await response.json(content_type=None))["token"]
async with session.get(
"https://gptgo.ai/action_ai_gpt.php",
params={
"token": token,
},
proxy=proxy
) as response:
response.raise_for_status()
start = "data: "
async for line in response.content:
line = line.decode()
if line.startswith("data: "):
if line.startswith("data: [DONE]"):
break
line = json.loads(line[len(start):-1])
content = line["choices"][0]["delta"].get("content")
if content:
yield content
@classmethod
@property
def params(cls):
params = [
("model", "str"),
("messages", "list[dict[str, str]]"),
("stream", "bool"),
("temperature", "float"),
]
param = ", ".join([": ".join(p) for p in params])
return f"g4f.provider.{cls.__name__} supports: ({param})"

@ -51,7 +51,9 @@ class Ylokh(AsyncGeneratorProvider):
if stream: if stream:
async for line in response.content: async for line in response.content:
line = line.decode() line = line.decode()
if line.startswith("data: ") and not line.startswith("data: [DONE]"): if line.startswith("data: "):
if line.startswith("data: [DONE]"):
break
line = json.loads(line[6:-1]) line = json.loads(line[6:-1])
content = line["choices"][0]["delta"].get("content") content = line["choices"][0]["delta"].get("content")
if content: if content:
@ -71,6 +73,7 @@ class Ylokh(AsyncGeneratorProvider):
("stream", "bool"), ("stream", "bool"),
("proxy", "str"), ("proxy", "str"),
("temperature", "float"), ("temperature", "float"),
("top_p", "float"),
] ]
param = ", ".join([": ".join(p) for p in params]) param = ", ".join([": ".join(p) for p in params])
return f"g4f.provider.{cls.__name__} supports: ({param})" return f"g4f.provider.{cls.__name__} supports: ({param})"

@ -16,6 +16,7 @@ from .DfeHub import DfeHub
from .EasyChat import EasyChat from .EasyChat import EasyChat
from .Forefront import Forefront from .Forefront import Forefront
from .GetGpt import GetGpt from .GetGpt import GetGpt
from .GptGo import GptGo
from .H2o import H2o from .H2o import H2o
from .HuggingChat import HuggingChat from .HuggingChat import HuggingChat
from .Liaobots import Liaobots from .Liaobots import Liaobots
@ -57,6 +58,7 @@ __all__ = [
'EasyChat', 'EasyChat',
'Forefront', 'Forefront',
'GetGpt', 'GetGpt',
'GptGo',
'H2o', 'H2o',
'HuggingChat', 'HuggingChat',
'Liaobots', 'Liaobots',

@ -35,30 +35,6 @@ class BaseProvider(ABC):
] ]
param = ", ".join([": ".join(p) for p in params]) param = ", ".join([": ".join(p) for p in params])
return f"g4f.provider.{cls.__name__} supports: ({param})" return f"g4f.provider.{cls.__name__} supports: ({param})"
_cookies = {}
def get_cookies(cookie_domain: str) -> dict:
if cookie_domain not in _cookies:
_cookies[cookie_domain] = {}
try:
for cookie in browser_cookie3.load(cookie_domain):
_cookies[cookie_domain][cookie.name] = cookie.value
except:
pass
return _cookies[cookie_domain]
def format_prompt(messages: list[dict[str, str]], add_special_tokens=False):
if add_special_tokens or len(messages) > 1:
formatted = "\n".join(
["%s: %s" % ((message["role"]).capitalize(), message["content"]) for message in messages]
)
return f"{formatted}\nAssistant:"
else:
return messages.pop()["content"]
class AsyncProvider(BaseProvider): class AsyncProvider(BaseProvider):
@ -67,8 +43,9 @@ class AsyncProvider(BaseProvider):
cls, cls,
model: str, model: str,
messages: list[dict[str, str]], messages: list[dict[str, str]],
stream: bool = False, **kwargs: Any) -> CreateResult: stream: bool = False,
**kwargs
) -> CreateResult:
yield asyncio.run(cls.create_async(model, messages, **kwargs)) yield asyncio.run(cls.create_async(model, messages, **kwargs))
@staticmethod @staticmethod
@ -90,7 +67,20 @@ class AsyncGeneratorProvider(AsyncProvider):
stream: bool = True, stream: bool = True,
**kwargs **kwargs
) -> CreateResult: ) -> CreateResult:
yield from run_generator(cls.create_async_generator(model, messages, stream=stream, **kwargs)) loop = asyncio.new_event_loop()
try:
asyncio.set_event_loop(loop)
generator = cls.create_async_generator(model, messages, stream=stream, **kwargs)
gen = generator.__aiter__()
while True:
try:
yield loop.run_until_complete(gen.__anext__())
except StopAsyncIteration:
break
finally:
asyncio.set_event_loop(None)
loop.close()
@classmethod @classmethod
async def create_async( async def create_async(
@ -99,27 +89,36 @@ class AsyncGeneratorProvider(AsyncProvider):
messages: list[dict[str, str]], messages: list[dict[str, str]],
**kwargs **kwargs
) -> str: ) -> str:
chunks = [chunk async for chunk in cls.create_async_generator(model, messages, stream=False, **kwargs)] return "".join([chunk async for chunk in cls.create_async_generator(model, messages, stream=False, **kwargs)])
if chunks:
return "".join(chunks)
@staticmethod @staticmethod
@abstractmethod @abstractmethod
def create_async_generator( def create_async_generator(
model: str, model: str,
messages: list[dict[str, str]], messages: list[dict[str, str]],
**kwargs **kwargs
) -> AsyncGenerator: ) -> AsyncGenerator:
raise NotImplementedError() raise NotImplementedError()
def run_generator(generator: AsyncGenerator[Union[Any, str], Any]): _cookies = {}
loop = asyncio.new_event_loop()
gen = generator.__aiter__()
while True: def get_cookies(cookie_domain: str) -> dict:
if cookie_domain not in _cookies:
_cookies[cookie_domain] = {}
try: try:
yield loop.run_until_complete(gen.__anext__()) for cookie in browser_cookie3.load(cookie_domain):
_cookies[cookie_domain][cookie.name] = cookie.value
except:
pass
return _cookies[cookie_domain]
except StopAsyncIteration:
break def format_prompt(messages: list[dict[str, str]], add_special_tokens=False):
if add_special_tokens or len(messages) > 1:
formatted = "\n".join(
["%s: %s" % ((message["role"]).capitalize(), message["content"]) for message in messages]
)
return f"{formatted}\nAssistant:"
else:
return messages[0]["content"]

@ -16,14 +16,17 @@ class ChatCompletion:
auth : Union[str, None] = None, **kwargs: Any) -> Union[CreateResult, str]: auth : Union[str, None] = None, **kwargs: Any) -> Union[CreateResult, str]:
if isinstance(model, str): if isinstance(model, str):
try: if model in models.ModelUtils.convert:
model = models.ModelUtils.convert[model] model = models.ModelUtils.convert[model]
except KeyError: else:
raise Exception(f'The model: {model} does not exist') raise Exception(f'The model: {model} does not exist')
if not provider: if not provider:
if isinstance(model.best_provider, list): if isinstance(model.best_provider, list):
provider = random.choice(model.best_provider) if stream:
provider = random.choice([p for p in model.best_provider if p.supports_stream])
else:
provider = random.choice(model.best_provider)
else: else:
provider = model.best_provider provider = model.best_provider

@ -7,7 +7,6 @@ from .Provider import (
CodeLinkAva, CodeLinkAva,
ChatgptAi, ChatgptAi,
ChatBase, ChatBase,
Yqcloud,
Vercel, Vercel,
DeepAi, DeepAi,
Aivvm, Aivvm,
@ -32,7 +31,7 @@ gpt_35_turbo = Model(
name = 'gpt-3.5-turbo', name = 'gpt-3.5-turbo',
base_provider = 'openai', base_provider = 'openai',
best_provider = [ best_provider = [
Yqcloud, DeepAi, CodeLinkAva, ChatgptLogin, ChatgptAi, ChatBase, Aivvm DeepAi, CodeLinkAva, ChatgptLogin, ChatgptAi, ChatBase, Aivvm
] ]
) )

@ -17,7 +17,7 @@ _providers = [
g4f.Provider.Bard g4f.Provider.Bard
] ]
_instruct = "Hello, tell about you in one sentence." _instruct = "Hello, are you GPT 4?."
_example = """ _example = """
OpenaiChat: Hello! How can I assist you today? 2.0 secs OpenaiChat: Hello! How can I assist you today? 2.0 secs
@ -39,14 +39,14 @@ No Stream Total: 10.14 secs
print("Bing: ", end="") print("Bing: ", end="")
for response in log_time_yield( for response in log_time_yield(
g4f.ChatCompletion.create, g4f.ChatCompletion.create,
model=g4f.models.gpt_35_turbo, model=g4f.models.default,
messages=[{"role": "user", "content": _instruct}], messages=[{"role": "user", "content": _instruct}],
provider=g4f.Provider.Bing, provider=g4f.Provider.Bing,
#cookies=g4f.get_cookies(".huggingface.co"), #cookies=g4f.get_cookies(".huggingface.co"),
#stream=True, stream=True,
auth=True auth=True
): ):
print(response, end="") print(response, end="", flush=True)
print() print()
print() print()
@ -75,7 +75,7 @@ def run_stream():
model=None, model=None,
messages=[{"role": "user", "content": _instruct}], messages=[{"role": "user", "content": _instruct}],
): ):
print(response, end="") print(response, end="", flush=True)
print() print()
print("Stream Total:", log_time(run_stream)) print("Stream Total:", log_time(run_stream))
print() print()

Loading…
Cancel
Save