~ | Merge pull request #844

Add OpenAssistant and HuggingChat provider, Remove HugChat
pull/859/head 0.0.2.6
Tekky 1 year ago committed by GitHub
commit 7e687b3d17
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -1,12 +1,9 @@
import json import json
import random import random
import re import re
from aiohttp import ClientSession from aiohttp import ClientSession
import asyncio
from ..typing import Any, CreateResult from .base_provider import AsyncProvider, get_cookies, format_prompt
from .base_provider import AsyncProvider, get_cookies
class Bard(AsyncProvider): class Bard(AsyncProvider):
url = "https://bard.google.com" url = "https://bard.google.com"
@ -19,15 +16,14 @@ class Bard(AsyncProvider):
model: str, model: str,
messages: list[dict[str, str]], messages: list[dict[str, str]],
proxy: str = None, proxy: str = None,
cookies: dict = get_cookies(".google.com"), **kwargs: Any,) -> str: cookies: dict = None,
**kwargs
formatted = "\n".join( ) -> str:
["%s: %s" % (message["role"], message["content"]) for message in messages] prompt = format_prompt(messages)
)
prompt = f"{formatted}\nAssistant:"
if proxy and "://" not in proxy: if proxy and "://" not in proxy:
proxy = f"http://{proxy}" proxy = f"http://{proxy}"
if not cookies:
cookies = get_cookies(".google.com")
headers = { headers = {
'authority': 'bard.google.com', 'authority': 'bard.google.com',
@ -44,10 +40,11 @@ class Bard(AsyncProvider):
) as session: ) as session:
async with session.get(cls.url, proxy=proxy) as response: async with session.get(cls.url, proxy=proxy) as response:
text = await response.text() text = await response.text()
match = re.search(r'SNlM0e\":\"(.*?)\"', text) match = re.search(r'SNlM0e\":\"(.*?)\"', text)
if match: if not match:
snlm0e = match.group(1) raise RuntimeError("No snlm0e value.")
snlm0e = match.group(1)
params = { params = {
'bl': 'boq_assistant-bard-web-server_20230326.21_p0', 'bl': 'boq_assistant-bard-web-server_20230326.21_p0',

@ -15,8 +15,11 @@ class Bing(AsyncGeneratorProvider):
def create_async_generator( def create_async_generator(
model: str, model: str,
messages: list[dict[str, str]], messages: list[dict[str, str]],
cookies: dict = get_cookies(".bing.com"), **kwargs) -> AsyncGenerator: cookies: dict = None,
**kwargs
) -> AsyncGenerator:
if not cookies:
cookies = get_cookies(".bing.com")
if len(messages) < 2: if len(messages) < 2:
prompt = messages[0]["content"] prompt = messages[0]["content"]
context = None context = None
@ -308,4 +311,5 @@ def run(generator: AsyncGenerator[Union[Any, str], Any]):
yield loop.run_until_complete(gen.__anext__()) yield loop.run_until_complete(gen.__anext__())
except StopAsyncIteration: except StopAsyncIteration:
break break

@ -1,78 +1,85 @@
import json, uuid, requests import json
import uuid
from aiohttp import ClientSession
from ..typing import Any, CreateResult from ..typing import AsyncGenerator
from .base_provider import BaseProvider from .base_provider import AsyncGeneratorProvider, format_prompt
class H2o(BaseProvider): class H2o(AsyncGeneratorProvider):
url = "https://gpt-gm.h2o.ai" url = "https://gpt-gm.h2o.ai"
working = True working = True
supports_stream = True supports_stream = True
model = "h2oai/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1" model = "h2oai/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1"
@staticmethod @classmethod
def create_completion( async def create_async_generator(
cls,
model: str, model: str,
messages: list[dict[str, str]], messages: list[dict[str, str]],
stream: bool, **kwargs: Any) -> CreateResult: proxy: str = None,
**kwargs
conversation = "" ) -> AsyncGenerator:
for message in messages: model = model if model else cls.model
conversation += "%s: %s\n" % (message["role"], message["content"])
conversation += "assistant: "
session = requests.Session()
headers = {"Referer": "https://gpt-gm.h2o.ai/r/jGfKSwU"}
data = {
"ethicsModalAccepted" : "true",
"shareConversationsWithModelAuthors": "true",
"ethicsModalAcceptedAt" : "",
"activeModel" : model,
"searchEnabled" : "true",
}
session.post("https://gpt-gm.h2o.ai/settings",
headers=headers, data=data)
headers = {"Referer": "https://gpt-gm.h2o.ai/"} headers = {"Referer": "https://gpt-gm.h2o.ai/"}
data = {"model": model}
response = session.post("https://gpt-gm.h2o.ai/conversation",
headers=headers, json=data).json()
if "conversationId" not in response:
return
data = { async with ClientSession(
"inputs": conversation, headers=headers
"parameters": { ) as session:
"temperature" : kwargs.get("temperature", 0.4), data = {
"truncate" : kwargs.get("truncate", 2048), "ethicsModalAccepted": "true",
"max_new_tokens" : kwargs.get("max_new_tokens", 1024), "shareConversationsWithModelAuthors": "true",
"do_sample" : kwargs.get("do_sample", True), "ethicsModalAcceptedAt": "",
"repetition_penalty": kwargs.get("repetition_penalty", 1.2), "activeModel": model,
"return_full_text" : kwargs.get("return_full_text", False), "searchEnabled": "true",
}, }
"stream" : True, async with session.post(
"options": { "https://gpt-gm.h2o.ai/settings",
"id" : kwargs.get("id", str(uuid.uuid4())), proxy=proxy,
"response_id" : kwargs.get("response_id", str(uuid.uuid4())), data=data
"is_retry" : False, ) as response:
"use_cache" : False, response.raise_for_status()
"web_search_id": "",
},
}
response = session.post(f"https://gpt-gm.h2o.ai/conversation/{response['conversationId']}", async with session.post(
headers=headers, json=data) "https://gpt-gm.h2o.ai/conversation",
proxy=proxy,
response.raise_for_status() json={"model": model},
response.encoding = "utf-8" ) as response:
generated_text = response.text.replace("\n", "").split("data:") response.raise_for_status()
generated_text = json.loads(generated_text[-1]) conversationId = (await response.json())["conversationId"]
yield generated_text["generated_text"] data = {
"inputs": format_prompt(messages),
"parameters": {
"temperature": 0.4,
"truncate": 2048,
"max_new_tokens": 1024,
"do_sample": True,
"repetition_penalty": 1.2,
"return_full_text": False,
**kwargs
},
"stream": True,
"options": {
"id": str(uuid.uuid4()),
"response_id": str(uuid.uuid4()),
"is_retry": False,
"use_cache": False,
"web_search_id": "",
},
}
async with session.post(
f"https://gpt-gm.h2o.ai/conversation/{conversationId}",
proxy=proxy,
json=data
) as response:
start = "data:"
async for line in response.content:
line = line.decode("utf-8")
if line and line.startswith(start):
line = json.loads(line[len(start):-1])
if not line["token"]["special"]:
yield line["token"]["text"]
@classmethod @classmethod
@property @property

@ -1,65 +0,0 @@
has_module = False
try:
from hugchat.hugchat import ChatBot
except ImportError:
has_module = False
from .base_provider import BaseProvider, get_cookies
from g4f.typing import CreateResult
class Hugchat(BaseProvider):
url = "https://huggingface.co/chat/"
needs_auth = True
working = has_module
llms = ['OpenAssistant/oasst-sft-6-llama-30b-xor', 'meta-llama/Llama-2-70b-chat-hf']
@classmethod
def create_completion(
cls,
model: str,
messages: list[dict[str, str]],
stream: bool = False,
proxy: str = None,
cookies: str = get_cookies(".huggingface.co"), **kwargs) -> CreateResult:
bot = ChatBot(
cookies=cookies)
if proxy and "://" not in proxy:
proxy = f"http://{proxy}"
bot.session.proxies = {"http": proxy, "https": proxy}
if model:
try:
if not isinstance(model, int):
model = cls.llms.index(model)
bot.switch_llm(model)
except:
raise RuntimeError(f"Model are not supported: {model}")
if len(messages) > 1:
formatted = "\n".join(
["%s: %s" % (message["role"], message["content"]) for message in messages]
)
prompt = f"{formatted}\nAssistant:"
else:
prompt = messages.pop()["content"]
try:
yield bot.chat(prompt, **kwargs)
finally:
bot.delete_conversation(bot.current_conversation)
bot.current_conversation = ""
pass
@classmethod
@property
def params(cls):
params = [
("model", "str"),
("messages", "list[dict[str, str]]"),
("stream", "bool"),
("proxy", "str"),
]
param = ", ".join([": ".join(p) for p in params])
return f"g4f.provider.{cls.__name__} supports: ({param})"

@ -0,0 +1,107 @@
import json
from aiohttp import ClientSession
from ..typing import AsyncGenerator
from .base_provider import AsyncGeneratorProvider, get_cookies, format_prompt
class HuggingChat(AsyncGeneratorProvider):
url = "https://huggingface.co/chat/"
needs_auth = True
working = True
model = "OpenAssistant/oasst-sft-6-llama-30b-xor"
@classmethod
async def create_async_generator(
cls,
model: str,
messages: list[dict[str, str]],
stream: bool = True,
proxy: str = None,
cookies: dict = None,
**kwargs
) -> AsyncGenerator:
if not cookies:
cookies = get_cookies(".huggingface.co")
model = model if model else cls.model
if proxy and "://" not in proxy:
proxy = f"http://{proxy}"
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36',
}
async with ClientSession(
cookies=cookies,
headers=headers
) as session:
async with session.post("https://huggingface.co/chat/conversation", proxy=proxy, json={"model": model}) as response:
conversation_id = (await response.json())["conversationId"]
send = {
"inputs": format_prompt(messages),
"parameters": {
"temperature": 0.2,
"truncate": 1000,
"max_new_tokens": 1024,
"stop": ["</s>"],
"top_p": 0.95,
"repetition_penalty": 1.2,
"top_k": 50,
"return_full_text": False,
**kwargs
},
"stream": stream,
"options": {
"id": "9e9b8bc4-6604-40c6-994e-8eb78fa32e37",
"response_id": "04ce2602-3bea-45e8-8efc-cef00680376a",
"is_retry": False,
"use_cache": False,
"web_search_id": ""
}
}
start = "data:"
first = True
async with session.post(f"https://huggingface.co/chat/conversation/{conversation_id}", proxy=proxy, json=send) as response:
async for line in response.content:
line = line.decode("utf-8")
if not line:
continue
if not stream:
try:
data = json.loads(line)
except json.decoder.JSONDecodeError:
raise RuntimeError(f"No json: {line}")
if "error" in data:
raise RuntimeError(data["error"])
elif isinstance(data, list):
yield data[0]["generated_text"]
else:
raise RuntimeError(f"Response: {line}")
elif line.startswith(start):
line = json.loads(line[len(start):-1])
if not line:
continue
if "token" not in line:
raise RuntimeError(f"Response: {line}")
if not line["token"]["special"]:
if first:
yield line["token"]["text"].lstrip()
first = False
else:
yield line["token"]["text"]
async with session.delete(f"https://huggingface.co/chat/conversation/{conversation_id}", proxy=proxy) as response:
response.raise_for_status()
@classmethod
@property
def params(cls):
params = [
("model", "str"),
("messages", "list[dict[str, str]]"),
("stream", "bool"),
("proxy", "str"),
]
param = ", ".join([": ".join(p) for p in params])
return f"g4f.provider.{cls.__name__} supports: ({param})"

@ -1,59 +1,77 @@
import uuid, requests import uuid
import json
from aiohttp import ClientSession
from ..typing import Any, CreateResult from ..typing import AsyncGenerator
from .base_provider import BaseProvider from .base_provider import AsyncGeneratorProvider
models = {
"gpt-4": {
"id": "gpt-4",
"name": "GPT-4",
"maxLength": 24000,
"tokenLimit": 8000,
},
"gpt-3.5-turbo": {
"id": "gpt-3.5-turbo",
"name": "GPT-3.5",
"maxLength": 12000,
"tokenLimit": 4000,
},
"gpt-3.5-turbo-16k": {
"id": "gpt-3.5-turbo-16k",
"name": "GPT-3.5-16k",
"maxLength": 48000,
"tokenLimit": 16000,
},
}
class Liaobots(BaseProvider): class Liaobots(AsyncGeneratorProvider):
url: str = "https://liaobots.com" url = "https://liaobots.com"
supports_stream = True supports_stream = True
needs_auth = True supports_gpt_35_turbo = True
supports_gpt_35_turbo = True supports_gpt_4 = True
supports_gpt_4 = True _auth_code = None
@staticmethod @classmethod
def create_completion( async def create_async_generator(
cls,
model: str, model: str,
messages: list[dict[str, str]], messages: list[dict[str, str]],
stream: bool, **kwargs: Any) -> CreateResult: auth: str = None,
proxy: str = None,
**kwargs
) -> AsyncGenerator:
if proxy and "://" not in proxy:
proxy = f"http://{proxy}"
headers = { headers = {
"authority" : "liaobots.com", "authority": "liaobots.com",
"content-type" : "application/json", "content-type": "application/json",
"origin" : "https://liaobots.com", "origin": "https://liaobots.com",
"referer" : "https://liaobots.com/", "referer": "https://liaobots.com/",
"user-agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36", "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36",
"x-auth-code" : str(kwargs.get("auth")),
}
models = {
"gpt-4": {
"id": "gpt-4",
"name": "GPT-4",
"maxLength": 24000,
"tokenLimit": 8000,
},
"gpt-3.5-turbo": {
"id": "gpt-3.5-turbo",
"name": "GPT-3.5",
"maxLength": 12000,
"tokenLimit": 4000,
},
}
json_data = {
"conversationId": str(uuid.uuid4()),
"model" : models[model],
"messages" : messages,
"key" : "",
"prompt" : "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully. Respond using markdown.",
} }
async with ClientSession(
headers=headers
) as session:
model = model if model in models else "gpt-3.5-turbo"
auth_code = auth if isinstance(auth, str) else cls._auth_code
if not auth_code:
async with session.post("https://liaobots.com/api/user", proxy=proxy, json={"authcode": ""}) as response:
response.raise_for_status()
auth_code = cls._auth_code = json.loads((await response.text()))["authCode"]
data = {
"conversationId": str(uuid.uuid4()),
"model": models[model],
"messages": messages,
"key": "",
"prompt": "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully.",
}
async with session.post("https://liaobots.com/api/chat", proxy=proxy, json=data, headers={"x-auth-code": auth_code}) as response:
response.raise_for_status()
async for line in response.content:
yield line.decode("utf-8")
response = requests.post("https://liaobots.com/api/chat",
headers=headers, json=json_data, stream=True)
response.raise_for_status()
for token in response.iter_content(chunk_size=2046):
yield token.decode("utf-8")
@classmethod @classmethod
@property @property
@ -62,6 +80,7 @@ class Liaobots(BaseProvider):
("model", "str"), ("model", "str"),
("messages", "list[dict[str, str]]"), ("messages", "list[dict[str, str]]"),
("stream", "bool"), ("stream", "bool"),
("proxy", "str"),
("auth", "str"), ("auth", "str"),
] ]
param = ", ".join([": ".join(p) for p in params]) param = ", ".join([": ".join(p) for p in params])

@ -0,0 +1,98 @@
import json
from aiohttp import ClientSession
from ..typing import Any, AsyncGenerator
from .base_provider import AsyncGeneratorProvider, get_cookies, format_prompt
class OpenAssistant(AsyncGeneratorProvider):
url = "https://open-assistant.io/chat"
needs_auth = True
working = True
model = "OA_SFT_Llama_30B_6"
@classmethod
async def create_async_generator(
cls,
model: str,
messages: list[dict[str, str]],
proxy: str = None,
cookies: dict = None,
**kwargs: Any
) -> AsyncGenerator:
if proxy and "://" not in proxy:
proxy = f"http://{proxy}"
if not cookies:
cookies = get_cookies("open-assistant.io")
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36',
}
async with ClientSession(
cookies=cookies,
headers=headers
) as session:
async with session.post("https://open-assistant.io/api/chat", proxy=proxy) as response:
chat_id = (await response.json())["id"]
data = {
"chat_id": chat_id,
"content": f"<s>[INST]\n{format_prompt(messages)}\n[/INST]",
"parent_id": None
}
async with session.post("https://open-assistant.io/api/chat/prompter_message", proxy=proxy, json=data) as response:
parent_id = (await response.json())["id"]
data = {
"chat_id": chat_id,
"parent_id": parent_id,
"model_config_name": model if model else cls.model,
"sampling_parameters":{
"top_k": 50,
"top_p": None,
"typical_p": None,
"temperature": 0.35,
"repetition_penalty": 1.1111111111111112,
"max_new_tokens": 1024,
**kwargs
},
"plugins":[]
}
async with session.post("https://open-assistant.io/api/chat/assistant_message", proxy=proxy, json=data) as response:
data = await response.json()
if "id" in data:
message_id = data["id"]
elif "message" in data:
raise RuntimeError(data["message"])
else:
response.raise_for_status()
params = {
'chat_id': chat_id,
'message_id': message_id,
}
async with session.post("https://open-assistant.io/api/chat/events", proxy=proxy, params=params) as response:
start = "data: "
async for line in response.content:
line = line.decode("utf-8")
if line and line.startswith(start):
line = json.loads(line[len(start):])
if line["event_type"] == "token":
yield line["text"]
params = {
'chat_id': chat_id,
}
async with session.delete("https://open-assistant.io/api/chat", proxy=proxy, params=params) as response:
response.raise_for_status()
@classmethod
@property
def params(cls):
params = [
("model", "str"),
("messages", "list[dict[str, str]]"),
("stream", "bool"),
("proxy", "str"),
]
param = ", ".join([": ".join(p) for p in params])
return f"g4f.provider.{cls.__name__} supports: ({param})"

@ -4,8 +4,11 @@ try:
except ImportError: except ImportError:
has_module = False has_module = False
from .base_provider import AsyncGeneratorProvider, get_cookies from .base_provider import AsyncGeneratorProvider, get_cookies, format_prompt
from ..typing import AsyncGenerator from ..typing import AsyncGenerator
from httpx import AsyncClient
import json
class OpenaiChat(AsyncGeneratorProvider): class OpenaiChat(AsyncGeneratorProvider):
url = "https://chat.openai.com" url = "https://chat.openai.com"
@ -14,6 +17,7 @@ class OpenaiChat(AsyncGeneratorProvider):
supports_gpt_35_turbo = True supports_gpt_35_turbo = True
supports_gpt_4 = True supports_gpt_4 = True
supports_stream = True supports_stream = True
_access_token = None
@classmethod @classmethod
async def create_async_generator( async def create_async_generator(
@ -21,9 +25,9 @@ class OpenaiChat(AsyncGeneratorProvider):
model: str, model: str,
messages: list[dict[str, str]], messages: list[dict[str, str]],
proxy: str = None, proxy: str = None,
access_token: str = None, access_token: str = _access_token,
cookies: dict = None, cookies: dict = None,
**kwargs **kwargs: dict
) -> AsyncGenerator: ) -> AsyncGenerator:
config = {"access_token": access_token, "model": model} config = {"access_token": access_token, "model": model}
@ -37,21 +41,12 @@ class OpenaiChat(AsyncGeneratorProvider):
) )
if not access_token: if not access_token:
cookies = cookies if cookies else get_cookies("chat.openai.com") cookies = cookies if cookies else get_cookies("chat.openai.com")
response = await bot.session.get("https://chat.openai.com/api/auth/session", cookies=cookies) cls._access_token = await get_access_token(bot.session, cookies)
access_token = response.json()["accessToken"] bot.set_access_token(cls._access_token)
bot.set_access_token(access_token)
if len(messages) > 1:
formatted = "\n".join(
["%s: %s" % ((message["role"]).capitalize(), message["content"]) for message in messages]
)
prompt = f"{formatted}\nAssistant:"
else:
prompt = messages.pop()["content"]
returned = None returned = None
async for message in bot.ask(prompt): async for message in bot.ask(format_prompt(messages)):
message = message["message"] message = message["message"]
if returned: if returned:
if message.startswith(returned): if message.startswith(returned):
@ -61,6 +56,9 @@ class OpenaiChat(AsyncGeneratorProvider):
else: else:
yield message yield message
returned = message returned = message
await bot.delete_conversation(bot.conversation_id)
@classmethod @classmethod
@property @property
@ -73,3 +71,12 @@ class OpenaiChat(AsyncGeneratorProvider):
] ]
param = ", ".join([": ".join(p) for p in params]) param = ", ".join([": ".join(p) for p in params])
return f"g4f.provider.{cls.__name__} supports: ({param})" return f"g4f.provider.{cls.__name__} supports: ({param})"
async def get_access_token(session: AsyncClient, cookies: dict):
response = await session.get("https://chat.openai.com/api/auth/session", cookies=cookies)
response.raise_for_status()
try:
return response.json()["accessToken"]
except json.decoder.JSONDecodeError:
raise RuntimeError(f"Response: {response.text}")

@ -1,55 +1,37 @@
import urllib.parse, json from aiohttp import ClientSession
import json
from curl_cffi import requests from ..typing import AsyncGenerator
from ..typing import Any, CreateResult from .base_provider import AsyncGeneratorProvider, format_prompt, get_cookies
from .base_provider import BaseProvider
class You(BaseProvider): class You(AsyncGeneratorProvider):
url = "https://you.com" url = "https://you.com"
working = True working = True
supports_gpt_35_turbo = True supports_gpt_35_turbo = True
supports_stream = True
@staticmethod @staticmethod
def create_completion( async def create_async_generator(
model: str, model: str,
messages: list[dict[str, str]], messages: list[dict[str, str]],
stream: bool, **kwargs: Any) -> CreateResult: cookies: dict = None,
**kwargs,
url_param = _create_url_param(messages, kwargs.get("history", [])) ) -> AsyncGenerator:
headers = _create_header() if not cookies:
cookies = get_cookies("you.com")
response = requests.get(f"https://you.com/api/streamingSearch?{url_param}", headers = {
headers=headers, impersonate="chrome107") "Accept": "text/event-stream",
"Referer": "https://you.com/search?fromSearchBar=true&tbm=youchat",
response.raise_for_status() "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/116.0"
}
start = 'data: {"youChatToken": ' async with ClientSession(headers=headers, cookies=cookies) as session:
for line in response.content.splitlines(): async with session.get(
line = line.decode('utf-8') "https://you.com/api/streamingSearch",
if line.startswith(start): params={"q": format_prompt(messages), "domain": "youchat", "chat": ""},
yield json.loads(line[len(start): -1]) ) as response:
start = 'data: {"youChatToken": '
def _create_url_param(messages: list[dict[str, str]], history: list[dict[str, str]]): async for line in response.content:
prompt = "" line = line.decode('utf-8')
for message in messages: if line.startswith(start):
prompt += "%s: %s\n" % (message["role"], message["content"]) yield json.loads(line[len(start): -2])
prompt += "assistant:"
chat = _convert_chat(history)
param = {"q": prompt, "domain": "youchat", "chat": chat}
return urllib.parse.urlencode(param)
def _convert_chat(messages: list[dict[str, str]]):
message_iter = iter(messages)
return [
{"question": user["content"], "answer": assistant["content"]}
for user, assistant in zip(message_iter, message_iter)
]
def _create_header():
return {
"accept": "text/event-stream",
"referer": "https://you.com/search?fromSearchBar=true&tbm=youchat",
}

@ -1,29 +1,27 @@
import requests from aiohttp import ClientSession
from ..typing import Any, CreateResult from .base_provider import AsyncProvider, format_prompt
from .base_provider import BaseProvider
class Yqcloud(BaseProvider): class Yqcloud(AsyncProvider):
url = "https://chat9.yqcloud.top/" url = "https://chat9.yqcloud.top/"
working = True working = True
supports_gpt_35_turbo = True supports_gpt_35_turbo = True
@staticmethod @staticmethod
def create_completion( async def create_async(
model: str, model: str,
messages: list[dict[str, str]], messages: list[dict[str, str]],
stream: bool, **kwargs: Any) -> CreateResult: proxy: str = None,
**kwargs,
headers = _create_header() ) -> str:
payload = _create_payload(messages) async with ClientSession(
headers=_create_header()
response = requests.post("https://api.aichatos.cloud/api/generateStream", ) as session:
headers=headers, json=payload) payload = _create_payload(messages)
async with session.post("https://api.aichatos.cloud/api/generateStream", proxy=proxy, json=payload) as response:
response.raise_for_status() response.raise_for_status()
response.encoding = 'utf-8' return await response.text()
yield response.text
def _create_header(): def _create_header():
@ -35,15 +33,11 @@ def _create_header():
def _create_payload(messages: list[dict[str, str]]): def _create_payload(messages: list[dict[str, str]]):
prompt = ""
for message in messages:
prompt += "%s: %s\n" % (message["role"], message["content"])
prompt += "assistant:"
return { return {
"prompt" : prompt, "prompt": format_prompt(messages),
"network" : True, "network": True,
"system" : "", "system": "",
"withoutContext": False, "withoutContext": False,
"stream" : False, "stream": False,
} "userId": "#/chat/1693025544336"
}

@ -13,11 +13,12 @@ from .EasyChat import EasyChat
from .Forefront import Forefront from .Forefront import Forefront
from .GetGpt import GetGpt from .GetGpt import GetGpt
from .H2o import H2o from .H2o import H2o
from .Hugchat import Hugchat from .HuggingChat import HuggingChat
from .Liaobots import Liaobots from .Liaobots import Liaobots
from .Lockchat import Lockchat from .Lockchat import Lockchat
from .Opchatgpts import Opchatgpts from .Opchatgpts import Opchatgpts
from .OpenaiChat import OpenaiChat from .OpenaiChat import OpenaiChat
from .OpenAssistant import OpenAssistant
from .Raycast import Raycast from .Raycast import Raycast
from .Theb import Theb from .Theb import Theb
from .Vercel import Vercel from .Vercel import Vercel
@ -48,12 +49,13 @@ __all__ = [
'Forefront', 'Forefront',
'GetGpt', 'GetGpt',
'H2o', 'H2o',
'Hugchat', 'HuggingChat',
'Liaobots', 'Liaobots',
'Lockchat', 'Lockchat',
'Opchatgpts', 'Opchatgpts',
'Raycast', 'Raycast',
'OpenaiChat', 'OpenaiChat',
'OpenAssistant',
'Theb', 'Theb',
'Vercel', 'Vercel',
'Wewordle', 'Wewordle',

@ -4,8 +4,7 @@ from ..typing import Any, CreateResult, AsyncGenerator, Union
import browser_cookie3 import browser_cookie3
import asyncio import asyncio
from time import time
import math
class BaseProvider(ABC): class BaseProvider(ABC):
url: str url: str
@ -48,6 +47,17 @@ def get_cookies(cookie_domain: str) -> dict:
return _cookies[cookie_domain] return _cookies[cookie_domain]
def format_prompt(messages: list[dict[str, str]], add_special_tokens=False):
if add_special_tokens or len(messages) > 1:
formatted = "\n".join(
["%s: %s" % ((message["role"]).capitalize(), message["content"]) for message in messages]
)
return f"{formatted}\nAssistant:"
else:
return messages.pop()["content"]
class AsyncProvider(BaseProvider): class AsyncProvider(BaseProvider):
@classmethod @classmethod
def create_completion( def create_completion(
@ -72,20 +82,19 @@ class AsyncGeneratorProvider(AsyncProvider):
cls, cls,
model: str, model: str,
messages: list[dict[str, str]], messages: list[dict[str, str]],
stream: bool = True, **kwargs: Any) -> CreateResult: stream: bool = True,
**kwargs
if stream: ) -> CreateResult:
yield from run_generator(cls.create_async_generator(model, messages, **kwargs)) yield from run_generator(cls.create_async_generator(model, messages, stream=stream, **kwargs))
else:
yield from AsyncProvider.create_completion(cls=cls, model=model, messages=messages, **kwargs)
@classmethod @classmethod
async def create_async( async def create_async(
cls, cls,
model: str, model: str,
messages: list[dict[str, str]], **kwargs: Any) -> str: messages: list[dict[str, str]],
**kwargs
chunks = [chunk async for chunk in cls.create_async_generator(model, messages, **kwargs)] ) -> str:
chunks = [chunk async for chunk in cls.create_async_generator(model, messages, stream=False, **kwargs)]
if chunks: if chunks:
return "".join(chunks) return "".join(chunks)
@ -93,8 +102,9 @@ class AsyncGeneratorProvider(AsyncProvider):
@abstractmethod @abstractmethod
def create_async_generator( def create_async_generator(
model: str, model: str,
messages: list[dict[str, str]]) -> AsyncGenerator: messages: list[dict[str, str]],
**kwargs
) -> AsyncGenerator:
raise NotImplementedError() raise NotImplementedError()

@ -7,6 +7,13 @@ class Model:
base_provider: str base_provider: str
best_provider: type[BaseProvider] best_provider: type[BaseProvider]
# Config for HuggingChat, OpenAssistant
# Works for Liaobots, H2o, OpenaiChat, Yqcloud, You
default = Model(
name="",
base_provider="huggingface",
best_provider=H2o,
)
# GPT-3.5 / GPT-4 # GPT-3.5 / GPT-4
gpt_35_turbo = Model( gpt_35_turbo = Model(

@ -0,0 +1,25 @@
from time import time
async def log_time_async(method: callable, **kwargs):
start = time()
result = await method(**kwargs)
secs = f"{round(time() - start, 2)} secs"
if result:
return " ".join([result, secs])
return secs
def log_time_yield(method: callable, **kwargs):
start = time()
result = yield from method(**kwargs)
yield f" {round(time() - start, 2)} secs"
def log_time(method: callable, **kwargs):
start = time()
result = method(**kwargs)
secs = f"{round(time() - start, 2)} secs"
if result:
return " ".join([result, secs])
return secs

@ -1,95 +1,96 @@
import sys import sys
from pathlib import Path from pathlib import Path
import asyncio import asyncio
from time import time
sys.path.append(str(Path(__file__).parent.parent)) sys.path.append(str(Path(__file__).parent.parent))
import g4f import g4f
from testing.log_time import log_time, log_time_async, log_time_yield
providers = [g4f.Provider.OpenaiChat, g4f.Provider.Bard, g4f.Provider.Bing]
# Async support _providers = [
async def log_time_async(method: callable, **kwargs): g4f.Provider.H2o,
start = time() g4f.Provider.You,
result = await method(**kwargs) g4f.Provider.HuggingChat,
secs = f"{round(time() - start, 2)} secs" g4f.Provider.OpenAssistant,
if result: g4f.Provider.Bing,
return " ".join([result, secs]) g4f.Provider.Bard
return secs ]
def log_time_yield(method: callable, **kwargs): _instruct = "Hello, tell about you in one sentence."
start = time()
result = yield from method(**kwargs) _example = """
yield f" {round(time() - start, 2)} secs" OpenaiChat: Hello! How can I assist you today? 2.0 secs
Bard: Hello! How can I help you today? 3.44 secs
Bing: Hello, this is Bing. How can I help? 😊 4.14 secs
Async Total: 4.25 secs
OpenaiChat: Hello! How can I assist you today? 1.85 secs
Bard: Hello! How can I help you today? 3.38 secs
Bing: Hello, this is Bing. How can I help? 😊 6.14 secs
Stream Total: 11.37 secs
OpenaiChat: Hello! How can I help you today? 3.28 secs
Bard: Hello there! How can I help you today? 3.58 secs
Bing: Hello! How can I help you today? 3.28 secs
No Stream Total: 10.14 secs
"""
print("Yqcloud:", end="")
for response in log_time_yield(
g4f.ChatCompletion.create,
model=g4f.models.gpt_35_turbo,
messages=[{"role": "user", "content": _instruct}],
provider=g4f.Provider.Yqcloud,
#cookies=g4f.get_cookies(".huggingface.co"),
stream=True,
auth=True
):
print(response, end="")
print()
print()
def log_time(method: callable, **kwargs):
start = time()
result = method(**kwargs)
secs = f"{round(time() - start, 2)} secs"
if result:
return " ".join([result, secs])
return secs
async def run_async(): async def run_async():
responses = [] responses = [
for provider in providers: log_time_async(
responses.append(log_time_async(
provider.create_async, provider.create_async,
model=None, model=None,
messages=[{"role": "user", "content": "Hello"}], messages=[{"role": "user", "content": _instruct}],
log_time=True )
)) for provider in _providers
]
responses = await asyncio.gather(*responses) responses = await asyncio.gather(*responses)
for idx, provider in enumerate(providers): for idx, provider in enumerate(_providers):
print(f"{provider.__name__}:", responses[idx]) print(f"{provider.__name__}:", responses[idx])
print("Async Total:", asyncio.run(log_time_async(run_async))) print("Async Total:", asyncio.run(log_time_async(run_async)))
print()
# Streaming support:
def run_stream(): def run_stream():
for provider in providers: for provider in _providers:
print(f"{provider.__name__}: ", end="") print(f"{provider.__name__}: ", end="")
for response in log_time_yield( for response in log_time_yield(
provider.create_completion, provider.create_completion,
model=None, model=None,
messages=[{"role": "user", "content": "Hello"}], messages=[{"role": "user", "content": _instruct}],
): ):
print(response, end="") print(response, end="")
print() print()
print("Stream Total:", log_time(run_stream)) print("Stream Total:", log_time(run_stream))
print()
# No streaming support: def create_no_stream():
def create_completion(): for provider in _providers:
for provider in providers:
print(f"{provider.__name__}:", end=" ") print(f"{provider.__name__}:", end=" ")
for response in log_time_yield( for response in log_time_yield(
g4f.Provider.Bard.create_completion, provider.create_completion,
model=None, model=None,
messages=[{"role": "user", "content": "Hello"}], messages=[{"role": "user", "content": _instruct}],
stream=False
): ):
print(response, end="") print(response, end="")
print() print()
print("No Stream Total:", log_time(create_completion)) print("No Stream Total:", log_time(create_no_stream))
print()
for response in g4f.Provider.Hugchat.create_completion(
model=None,
messages=[{"role": "user", "content": "Hello, tell about you."}],
):
print("Hugchat:", response)
"""
OpenaiChat: Hello! How can I assist you today? 2.0 secs
Bard: Hello! How can I help you today? 3.44 secs
Bing: Hello, this is Bing. How can I help? 😊 4.14 secs
Async Total: 4.25 secs
OpenaiChat: Hello! How can I assist you today? 1.85 secs
Bard: Hello! How can I help you today? 3.38 secs
Bing: Hello, this is Bing. How can I help? 😊 6.14 secs
Stream Total: 11.37 secs
OpenaiChat: Hello! How can I help you today? 3.28 secs
Bard: Hello there! How can I help you today? 3.58 secs
Bing: Hello! How can I help you today? 3.28 secs
No Stream Total: 10.14 secs
"""
Loading…
Cancel
Save