~ | Merge pull request #844

Add OpenAssistant and HuggingChat provider, Remove HugChat
pull/859/head 0.0.2.6
Tekky 10 months ago committed by GitHub
commit 7e687b3d17
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -1,12 +1,9 @@
import json
import random
import re
from aiohttp import ClientSession
import asyncio
from ..typing import Any, CreateResult
from .base_provider import AsyncProvider, get_cookies
from .base_provider import AsyncProvider, get_cookies, format_prompt
class Bard(AsyncProvider):
url = "https://bard.google.com"
@ -19,15 +16,14 @@ class Bard(AsyncProvider):
model: str,
messages: list[dict[str, str]],
proxy: str = None,
cookies: dict = get_cookies(".google.com"), **kwargs: Any,) -> str:
formatted = "\n".join(
["%s: %s" % (message["role"], message["content"]) for message in messages]
)
prompt = f"{formatted}\nAssistant:"
cookies: dict = None,
**kwargs
) -> str:
prompt = format_prompt(messages)
if proxy and "://" not in proxy:
proxy = f"http://{proxy}"
if not cookies:
cookies = get_cookies(".google.com")
headers = {
'authority': 'bard.google.com',
@ -44,10 +40,11 @@ class Bard(AsyncProvider):
) as session:
async with session.get(cls.url, proxy=proxy) as response:
text = await response.text()
match = re.search(r'SNlM0e\":\"(.*?)\"', text)
if match:
snlm0e = match.group(1)
if not match:
raise RuntimeError("No snlm0e value.")
snlm0e = match.group(1)
params = {
'bl': 'boq_assistant-bard-web-server_20230326.21_p0',

@ -15,8 +15,11 @@ class Bing(AsyncGeneratorProvider):
def create_async_generator(
model: str,
messages: list[dict[str, str]],
cookies: dict = get_cookies(".bing.com"), **kwargs) -> AsyncGenerator:
cookies: dict = None,
**kwargs
) -> AsyncGenerator:
if not cookies:
cookies = get_cookies(".bing.com")
if len(messages) < 2:
prompt = messages[0]["content"]
context = None
@ -308,4 +311,5 @@ def run(generator: AsyncGenerator[Union[Any, str], Any]):
yield loop.run_until_complete(gen.__anext__())
except StopAsyncIteration:
break
break

@ -1,78 +1,85 @@
import json, uuid, requests
import json
import uuid
from aiohttp import ClientSession
from ..typing import Any, CreateResult
from .base_provider import BaseProvider
from ..typing import AsyncGenerator
from .base_provider import AsyncGeneratorProvider, format_prompt
class H2o(BaseProvider):
url = "https://gpt-gm.h2o.ai"
working = True
class H2o(AsyncGeneratorProvider):
url = "https://gpt-gm.h2o.ai"
working = True
supports_stream = True
model = "h2oai/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1"
@staticmethod
def create_completion(
@classmethod
async def create_async_generator(
cls,
model: str,
messages: list[dict[str, str]],
stream: bool, **kwargs: Any) -> CreateResult:
conversation = ""
for message in messages:
conversation += "%s: %s\n" % (message["role"], message["content"])
conversation += "assistant: "
session = requests.Session()
headers = {"Referer": "https://gpt-gm.h2o.ai/r/jGfKSwU"}
data = {
"ethicsModalAccepted" : "true",
"shareConversationsWithModelAuthors": "true",
"ethicsModalAcceptedAt" : "",
"activeModel" : model,
"searchEnabled" : "true",
}
session.post("https://gpt-gm.h2o.ai/settings",
headers=headers, data=data)
proxy: str = None,
**kwargs
) -> AsyncGenerator:
model = model if model else cls.model
headers = {"Referer": "https://gpt-gm.h2o.ai/"}
data = {"model": model}
response = session.post("https://gpt-gm.h2o.ai/conversation",
headers=headers, json=data).json()
if "conversationId" not in response:
return
data = {
"inputs": conversation,
"parameters": {
"temperature" : kwargs.get("temperature", 0.4),
"truncate" : kwargs.get("truncate", 2048),
"max_new_tokens" : kwargs.get("max_new_tokens", 1024),
"do_sample" : kwargs.get("do_sample", True),
"repetition_penalty": kwargs.get("repetition_penalty", 1.2),
"return_full_text" : kwargs.get("return_full_text", False),
},
"stream" : True,
"options": {
"id" : kwargs.get("id", str(uuid.uuid4())),
"response_id" : kwargs.get("response_id", str(uuid.uuid4())),
"is_retry" : False,
"use_cache" : False,
"web_search_id": "",
},
}
async with ClientSession(
headers=headers
) as session:
data = {
"ethicsModalAccepted": "true",
"shareConversationsWithModelAuthors": "true",
"ethicsModalAcceptedAt": "",
"activeModel": model,
"searchEnabled": "true",
}
async with session.post(
"https://gpt-gm.h2o.ai/settings",
proxy=proxy,
data=data
) as response:
response.raise_for_status()
response = session.post(f"https://gpt-gm.h2o.ai/conversation/{response['conversationId']}",
headers=headers, json=data)
response.raise_for_status()
response.encoding = "utf-8"
generated_text = response.text.replace("\n", "").split("data:")
generated_text = json.loads(generated_text[-1])
async with session.post(
"https://gpt-gm.h2o.ai/conversation",
proxy=proxy,
json={"model": model},
) as response:
response.raise_for_status()
conversationId = (await response.json())["conversationId"]
yield generated_text["generated_text"]
data = {
"inputs": format_prompt(messages),
"parameters": {
"temperature": 0.4,
"truncate": 2048,
"max_new_tokens": 1024,
"do_sample": True,
"repetition_penalty": 1.2,
"return_full_text": False,
**kwargs
},
"stream": True,
"options": {
"id": str(uuid.uuid4()),
"response_id": str(uuid.uuid4()),
"is_retry": False,
"use_cache": False,
"web_search_id": "",
},
}
async with session.post(
f"https://gpt-gm.h2o.ai/conversation/{conversationId}",
proxy=proxy,
json=data
) as response:
start = "data:"
async for line in response.content:
line = line.decode("utf-8")
if line and line.startswith(start):
line = json.loads(line[len(start):-1])
if not line["token"]["special"]:
yield line["token"]["text"]
@classmethod
@property

@ -1,65 +0,0 @@
has_module = False
try:
from hugchat.hugchat import ChatBot
except ImportError:
has_module = False
from .base_provider import BaseProvider, get_cookies
from g4f.typing import CreateResult
class Hugchat(BaseProvider):
url = "https://huggingface.co/chat/"
needs_auth = True
working = has_module
llms = ['OpenAssistant/oasst-sft-6-llama-30b-xor', 'meta-llama/Llama-2-70b-chat-hf']
@classmethod
def create_completion(
cls,
model: str,
messages: list[dict[str, str]],
stream: bool = False,
proxy: str = None,
cookies: str = get_cookies(".huggingface.co"), **kwargs) -> CreateResult:
bot = ChatBot(
cookies=cookies)
if proxy and "://" not in proxy:
proxy = f"http://{proxy}"
bot.session.proxies = {"http": proxy, "https": proxy}
if model:
try:
if not isinstance(model, int):
model = cls.llms.index(model)
bot.switch_llm(model)
except:
raise RuntimeError(f"Model are not supported: {model}")
if len(messages) > 1:
formatted = "\n".join(
["%s: %s" % (message["role"], message["content"]) for message in messages]
)
prompt = f"{formatted}\nAssistant:"
else:
prompt = messages.pop()["content"]
try:
yield bot.chat(prompt, **kwargs)
finally:
bot.delete_conversation(bot.current_conversation)
bot.current_conversation = ""
pass
@classmethod
@property
def params(cls):
params = [
("model", "str"),
("messages", "list[dict[str, str]]"),
("stream", "bool"),
("proxy", "str"),
]
param = ", ".join([": ".join(p) for p in params])
return f"g4f.provider.{cls.__name__} supports: ({param})"

@ -0,0 +1,107 @@
import json
from aiohttp import ClientSession
from ..typing import AsyncGenerator
from .base_provider import AsyncGeneratorProvider, get_cookies, format_prompt
class HuggingChat(AsyncGeneratorProvider):
url = "https://huggingface.co/chat/"
needs_auth = True
working = True
model = "OpenAssistant/oasst-sft-6-llama-30b-xor"
@classmethod
async def create_async_generator(
cls,
model: str,
messages: list[dict[str, str]],
stream: bool = True,
proxy: str = None,
cookies: dict = None,
**kwargs
) -> AsyncGenerator:
if not cookies:
cookies = get_cookies(".huggingface.co")
model = model if model else cls.model
if proxy and "://" not in proxy:
proxy = f"http://{proxy}"
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36',
}
async with ClientSession(
cookies=cookies,
headers=headers
) as session:
async with session.post("https://huggingface.co/chat/conversation", proxy=proxy, json={"model": model}) as response:
conversation_id = (await response.json())["conversationId"]
send = {
"inputs": format_prompt(messages),
"parameters": {
"temperature": 0.2,
"truncate": 1000,
"max_new_tokens": 1024,
"stop": ["</s>"],
"top_p": 0.95,
"repetition_penalty": 1.2,
"top_k": 50,
"return_full_text": False,
**kwargs
},
"stream": stream,
"options": {
"id": "9e9b8bc4-6604-40c6-994e-8eb78fa32e37",
"response_id": "04ce2602-3bea-45e8-8efc-cef00680376a",
"is_retry": False,
"use_cache": False,
"web_search_id": ""
}
}
start = "data:"
first = True
async with session.post(f"https://huggingface.co/chat/conversation/{conversation_id}", proxy=proxy, json=send) as response:
async for line in response.content:
line = line.decode("utf-8")
if not line:
continue
if not stream:
try:
data = json.loads(line)
except json.decoder.JSONDecodeError:
raise RuntimeError(f"No json: {line}")
if "error" in data:
raise RuntimeError(data["error"])
elif isinstance(data, list):
yield data[0]["generated_text"]
else:
raise RuntimeError(f"Response: {line}")
elif line.startswith(start):
line = json.loads(line[len(start):-1])
if not line:
continue
if "token" not in line:
raise RuntimeError(f"Response: {line}")
if not line["token"]["special"]:
if first:
yield line["token"]["text"].lstrip()
first = False
else:
yield line["token"]["text"]
async with session.delete(f"https://huggingface.co/chat/conversation/{conversation_id}", proxy=proxy) as response:
response.raise_for_status()
@classmethod
@property
def params(cls):
params = [
("model", "str"),
("messages", "list[dict[str, str]]"),
("stream", "bool"),
("proxy", "str"),
]
param = ", ".join([": ".join(p) for p in params])
return f"g4f.provider.{cls.__name__} supports: ({param})"

@ -1,59 +1,77 @@
import uuid, requests
import uuid
import json
from aiohttp import ClientSession
from ..typing import Any, CreateResult
from .base_provider import BaseProvider
from ..typing import AsyncGenerator
from .base_provider import AsyncGeneratorProvider
models = {
"gpt-4": {
"id": "gpt-4",
"name": "GPT-4",
"maxLength": 24000,
"tokenLimit": 8000,
},
"gpt-3.5-turbo": {
"id": "gpt-3.5-turbo",
"name": "GPT-3.5",
"maxLength": 12000,
"tokenLimit": 4000,
},
"gpt-3.5-turbo-16k": {
"id": "gpt-3.5-turbo-16k",
"name": "GPT-3.5-16k",
"maxLength": 48000,
"tokenLimit": 16000,
},
}
class Liaobots(BaseProvider):
url: str = "https://liaobots.com"
supports_stream = True
needs_auth = True
supports_gpt_35_turbo = True
supports_gpt_4 = True
class Liaobots(AsyncGeneratorProvider):
url = "https://liaobots.com"
supports_stream = True
supports_gpt_35_turbo = True
supports_gpt_4 = True
_auth_code = None
@staticmethod
def create_completion(
@classmethod
async def create_async_generator(
cls,
model: str,
messages: list[dict[str, str]],
stream: bool, **kwargs: Any) -> CreateResult:
auth: str = None,
proxy: str = None,
**kwargs
) -> AsyncGenerator:
if proxy and "://" not in proxy:
proxy = f"http://{proxy}"
headers = {
"authority" : "liaobots.com",
"content-type" : "application/json",
"origin" : "https://liaobots.com",
"referer" : "https://liaobots.com/",
"user-agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36",
"x-auth-code" : str(kwargs.get("auth")),
}
models = {
"gpt-4": {
"id": "gpt-4",
"name": "GPT-4",
"maxLength": 24000,
"tokenLimit": 8000,
},
"gpt-3.5-turbo": {
"id": "gpt-3.5-turbo",
"name": "GPT-3.5",
"maxLength": 12000,
"tokenLimit": 4000,
},
}
json_data = {
"conversationId": str(uuid.uuid4()),
"model" : models[model],
"messages" : messages,
"key" : "",
"prompt" : "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully. Respond using markdown.",
"authority": "liaobots.com",
"content-type": "application/json",
"origin": "https://liaobots.com",
"referer": "https://liaobots.com/",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36",
}
async with ClientSession(
headers=headers
) as session:
model = model if model in models else "gpt-3.5-turbo"
auth_code = auth if isinstance(auth, str) else cls._auth_code
if not auth_code:
async with session.post("https://liaobots.com/api/user", proxy=proxy, json={"authcode": ""}) as response:
response.raise_for_status()
auth_code = cls._auth_code = json.loads((await response.text()))["authCode"]
data = {
"conversationId": str(uuid.uuid4()),
"model": models[model],
"messages": messages,
"key": "",
"prompt": "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully.",
}
async with session.post("https://liaobots.com/api/chat", proxy=proxy, json=data, headers={"x-auth-code": auth_code}) as response:
response.raise_for_status()
async for line in response.content:
yield line.decode("utf-8")
response = requests.post("https://liaobots.com/api/chat",
headers=headers, json=json_data, stream=True)
response.raise_for_status()
for token in response.iter_content(chunk_size=2046):
yield token.decode("utf-8")
@classmethod
@property
@ -62,6 +80,7 @@ class Liaobots(BaseProvider):
("model", "str"),
("messages", "list[dict[str, str]]"),
("stream", "bool"),
("proxy", "str"),
("auth", "str"),
]
param = ", ".join([": ".join(p) for p in params])

@ -0,0 +1,98 @@
import json
from aiohttp import ClientSession
from ..typing import Any, AsyncGenerator
from .base_provider import AsyncGeneratorProvider, get_cookies, format_prompt
class OpenAssistant(AsyncGeneratorProvider):
url = "https://open-assistant.io/chat"
needs_auth = True
working = True
model = "OA_SFT_Llama_30B_6"
@classmethod
async def create_async_generator(
cls,
model: str,
messages: list[dict[str, str]],
proxy: str = None,
cookies: dict = None,
**kwargs: Any
) -> AsyncGenerator:
if proxy and "://" not in proxy:
proxy = f"http://{proxy}"
if not cookies:
cookies = get_cookies("open-assistant.io")
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36',
}
async with ClientSession(
cookies=cookies,
headers=headers
) as session:
async with session.post("https://open-assistant.io/api/chat", proxy=proxy) as response:
chat_id = (await response.json())["id"]
data = {
"chat_id": chat_id,
"content": f"<s>[INST]\n{format_prompt(messages)}\n[/INST]",
"parent_id": None
}
async with session.post("https://open-assistant.io/api/chat/prompter_message", proxy=proxy, json=data) as response:
parent_id = (await response.json())["id"]
data = {
"chat_id": chat_id,
"parent_id": parent_id,
"model_config_name": model if model else cls.model,
"sampling_parameters":{
"top_k": 50,
"top_p": None,
"typical_p": None,
"temperature": 0.35,
"repetition_penalty": 1.1111111111111112,
"max_new_tokens": 1024,
**kwargs
},
"plugins":[]
}
async with session.post("https://open-assistant.io/api/chat/assistant_message", proxy=proxy, json=data) as response:
data = await response.json()
if "id" in data:
message_id = data["id"]
elif "message" in data:
raise RuntimeError(data["message"])
else:
response.raise_for_status()
params = {
'chat_id': chat_id,
'message_id': message_id,
}
async with session.post("https://open-assistant.io/api/chat/events", proxy=proxy, params=params) as response:
start = "data: "
async for line in response.content:
line = line.decode("utf-8")
if line and line.startswith(start):
line = json.loads(line[len(start):])
if line["event_type"] == "token":
yield line["text"]
params = {
'chat_id': chat_id,
}
async with session.delete("https://open-assistant.io/api/chat", proxy=proxy, params=params) as response:
response.raise_for_status()
@classmethod
@property
def params(cls):
params = [
("model", "str"),
("messages", "list[dict[str, str]]"),
("stream", "bool"),
("proxy", "str"),
]
param = ", ".join([": ".join(p) for p in params])
return f"g4f.provider.{cls.__name__} supports: ({param})"

@ -4,8 +4,11 @@ try:
except ImportError:
has_module = False
from .base_provider import AsyncGeneratorProvider, get_cookies
from ..typing import AsyncGenerator
from .base_provider import AsyncGeneratorProvider, get_cookies, format_prompt
from ..typing import AsyncGenerator
from httpx import AsyncClient
import json
class OpenaiChat(AsyncGeneratorProvider):
url = "https://chat.openai.com"
@ -14,6 +17,7 @@ class OpenaiChat(AsyncGeneratorProvider):
supports_gpt_35_turbo = True
supports_gpt_4 = True
supports_stream = True
_access_token = None
@classmethod
async def create_async_generator(
@ -21,9 +25,9 @@ class OpenaiChat(AsyncGeneratorProvider):
model: str,
messages: list[dict[str, str]],
proxy: str = None,
access_token: str = None,
access_token: str = _access_token,
cookies: dict = None,
**kwargs
**kwargs: dict
) -> AsyncGenerator:
config = {"access_token": access_token, "model": model}
@ -37,21 +41,12 @@ class OpenaiChat(AsyncGeneratorProvider):
)
if not access_token:
cookies = cookies if cookies else get_cookies("chat.openai.com")
response = await bot.session.get("https://chat.openai.com/api/auth/session", cookies=cookies)
access_token = response.json()["accessToken"]
bot.set_access_token(access_token)
if len(messages) > 1:
formatted = "\n".join(
["%s: %s" % ((message["role"]).capitalize(), message["content"]) for message in messages]
)
prompt = f"{formatted}\nAssistant:"
else:
prompt = messages.pop()["content"]
cookies = cookies if cookies else get_cookies("chat.openai.com")
cls._access_token = await get_access_token(bot.session, cookies)
bot.set_access_token(cls._access_token)
returned = None
async for message in bot.ask(prompt):
async for message in bot.ask(format_prompt(messages)):
message = message["message"]
if returned:
if message.startswith(returned):
@ -61,6 +56,9 @@ class OpenaiChat(AsyncGeneratorProvider):
else:
yield message
returned = message
await bot.delete_conversation(bot.conversation_id)
@classmethod
@property
@ -73,3 +71,12 @@ class OpenaiChat(AsyncGeneratorProvider):
]
param = ", ".join([": ".join(p) for p in params])
return f"g4f.provider.{cls.__name__} supports: ({param})"
async def get_access_token(session: AsyncClient, cookies: dict):
response = await session.get("https://chat.openai.com/api/auth/session", cookies=cookies)
response.raise_for_status()
try:
return response.json()["accessToken"]
except json.decoder.JSONDecodeError:
raise RuntimeError(f"Response: {response.text}")

@ -1,55 +1,37 @@
import urllib.parse, json
from aiohttp import ClientSession
import json
from curl_cffi import requests
from ..typing import Any, CreateResult
from .base_provider import BaseProvider
from ..typing import AsyncGenerator
from .base_provider import AsyncGeneratorProvider, format_prompt, get_cookies
class You(BaseProvider):
url = "https://you.com"
working = True
class You(AsyncGeneratorProvider):
url = "https://you.com"
working = True
supports_gpt_35_turbo = True
supports_stream = True
@staticmethod
def create_completion(
async def create_async_generator(
model: str,
messages: list[dict[str, str]],
stream: bool, **kwargs: Any) -> CreateResult:
url_param = _create_url_param(messages, kwargs.get("history", []))
headers = _create_header()
response = requests.get(f"https://you.com/api/streamingSearch?{url_param}",
headers=headers, impersonate="chrome107")
response.raise_for_status()
start = 'data: {"youChatToken": '
for line in response.content.splitlines():
line = line.decode('utf-8')
if line.startswith(start):
yield json.loads(line[len(start): -1])
def _create_url_param(messages: list[dict[str, str]], history: list[dict[str, str]]):
prompt = ""
for message in messages:
prompt += "%s: %s\n" % (message["role"], message["content"])
prompt += "assistant:"
chat = _convert_chat(history)
param = {"q": prompt, "domain": "youchat", "chat": chat}
return urllib.parse.urlencode(param)
def _convert_chat(messages: list[dict[str, str]]):
message_iter = iter(messages)
return [
{"question": user["content"], "answer": assistant["content"]}
for user, assistant in zip(message_iter, message_iter)
]
def _create_header():
return {
"accept": "text/event-stream",
"referer": "https://you.com/search?fromSearchBar=true&tbm=youchat",
}
cookies: dict = None,
**kwargs,
) -> AsyncGenerator:
if not cookies:
cookies = get_cookies("you.com")
headers = {
"Accept": "text/event-stream",
"Referer": "https://you.com/search?fromSearchBar=true&tbm=youchat",
"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/116.0"
}
async with ClientSession(headers=headers, cookies=cookies) as session:
async with session.get(
"https://you.com/api/streamingSearch",
params={"q": format_prompt(messages), "domain": "youchat", "chat": ""},
) as response:
start = 'data: {"youChatToken": '
async for line in response.content:
line = line.decode('utf-8')
if line.startswith(start):
yield json.loads(line[len(start): -2])

@ -1,29 +1,27 @@
import requests
from aiohttp import ClientSession
from ..typing import Any, CreateResult
from .base_provider import BaseProvider
from .base_provider import AsyncProvider, format_prompt
class Yqcloud(BaseProvider):
url = "https://chat9.yqcloud.top/"
working = True
supports_gpt_35_turbo = True
class Yqcloud(AsyncProvider):
url = "https://chat9.yqcloud.top/"
working = True
supports_gpt_35_turbo = True
@staticmethod
def create_completion(
async def create_async(
model: str,
messages: list[dict[str, str]],
stream: bool, **kwargs: Any) -> CreateResult:
headers = _create_header()
payload = _create_payload(messages)
response = requests.post("https://api.aichatos.cloud/api/generateStream",
headers=headers, json=payload)
response.raise_for_status()
response.encoding = 'utf-8'
yield response.text
proxy: str = None,
**kwargs,
) -> str:
async with ClientSession(
headers=_create_header()
) as session:
payload = _create_payload(messages)
async with session.post("https://api.aichatos.cloud/api/generateStream", proxy=proxy, json=payload) as response:
response.raise_for_status()
return await response.text()
def _create_header():
@ -35,15 +33,11 @@ def _create_header():
def _create_payload(messages: list[dict[str, str]]):
prompt = ""
for message in messages:
prompt += "%s: %s\n" % (message["role"], message["content"])
prompt += "assistant:"
return {
"prompt" : prompt,
"network" : True,
"system" : "",
"prompt": format_prompt(messages),
"network": True,
"system": "",
"withoutContext": False,
"stream" : False,
}
"stream": False,
"userId": "#/chat/1693025544336"
}

@ -13,11 +13,12 @@ from .EasyChat import EasyChat
from .Forefront import Forefront
from .GetGpt import GetGpt
from .H2o import H2o
from .Hugchat import Hugchat
from .HuggingChat import HuggingChat
from .Liaobots import Liaobots
from .Lockchat import Lockchat
from .Opchatgpts import Opchatgpts
from .OpenaiChat import OpenaiChat
from .OpenAssistant import OpenAssistant
from .Raycast import Raycast
from .Theb import Theb
from .Vercel import Vercel
@ -48,12 +49,13 @@ __all__ = [
'Forefront',
'GetGpt',
'H2o',
'Hugchat',
'HuggingChat',
'Liaobots',
'Lockchat',
'Opchatgpts',
'Raycast',
'OpenaiChat',
'OpenAssistant',
'Theb',
'Vercel',
'Wewordle',

@ -4,8 +4,7 @@ from ..typing import Any, CreateResult, AsyncGenerator, Union
import browser_cookie3
import asyncio
from time import time
import math
class BaseProvider(ABC):
url: str
@ -48,6 +47,17 @@ def get_cookies(cookie_domain: str) -> dict:
return _cookies[cookie_domain]
def format_prompt(messages: list[dict[str, str]], add_special_tokens=False):
if add_special_tokens or len(messages) > 1:
formatted = "\n".join(
["%s: %s" % ((message["role"]).capitalize(), message["content"]) for message in messages]
)
return f"{formatted}\nAssistant:"
else:
return messages.pop()["content"]
class AsyncProvider(BaseProvider):
@classmethod
def create_completion(
@ -72,20 +82,19 @@ class AsyncGeneratorProvider(AsyncProvider):
cls,
model: str,
messages: list[dict[str, str]],
stream: bool = True, **kwargs: Any) -> CreateResult:
if stream:
yield from run_generator(cls.create_async_generator(model, messages, **kwargs))
else:
yield from AsyncProvider.create_completion(cls=cls, model=model, messages=messages, **kwargs)
stream: bool = True,
**kwargs
) -> CreateResult:
yield from run_generator(cls.create_async_generator(model, messages, stream=stream, **kwargs))
@classmethod
async def create_async(
cls,
model: str,
messages: list[dict[str, str]], **kwargs: Any) -> str:
chunks = [chunk async for chunk in cls.create_async_generator(model, messages, **kwargs)]
messages: list[dict[str, str]],
**kwargs
) -> str:
chunks = [chunk async for chunk in cls.create_async_generator(model, messages, stream=False, **kwargs)]
if chunks:
return "".join(chunks)
@ -93,8 +102,9 @@ class AsyncGeneratorProvider(AsyncProvider):
@abstractmethod
def create_async_generator(
model: str,
messages: list[dict[str, str]]) -> AsyncGenerator:
messages: list[dict[str, str]],
**kwargs
) -> AsyncGenerator:
raise NotImplementedError()

@ -7,6 +7,13 @@ class Model:
base_provider: str
best_provider: type[BaseProvider]
# Config for HuggingChat, OpenAssistant
# Works for Liaobots, H2o, OpenaiChat, Yqcloud, You
default = Model(
name="",
base_provider="huggingface",
best_provider=H2o,
)
# GPT-3.5 / GPT-4
gpt_35_turbo = Model(

@ -0,0 +1,25 @@
from time import time
async def log_time_async(method: callable, **kwargs):
start = time()
result = await method(**kwargs)
secs = f"{round(time() - start, 2)} secs"
if result:
return " ".join([result, secs])
return secs
def log_time_yield(method: callable, **kwargs):
start = time()
result = yield from method(**kwargs)
yield f" {round(time() - start, 2)} secs"
def log_time(method: callable, **kwargs):
start = time()
result = method(**kwargs)
secs = f"{round(time() - start, 2)} secs"
if result:
return " ".join([result, secs])
return secs

@ -1,95 +1,96 @@
import sys
from pathlib import Path
import asyncio
from time import time
sys.path.append(str(Path(__file__).parent.parent))
import g4f
from testing.log_time import log_time, log_time_async, log_time_yield
providers = [g4f.Provider.OpenaiChat, g4f.Provider.Bard, g4f.Provider.Bing]
# Async support
async def log_time_async(method: callable, **kwargs):
start = time()
result = await method(**kwargs)
secs = f"{round(time() - start, 2)} secs"
if result:
return " ".join([result, secs])
return secs
_providers = [
g4f.Provider.H2o,
g4f.Provider.You,
g4f.Provider.HuggingChat,
g4f.Provider.OpenAssistant,
g4f.Provider.Bing,
g4f.Provider.Bard
]
def log_time_yield(method: callable, **kwargs):
start = time()
result = yield from method(**kwargs)
yield f" {round(time() - start, 2)} secs"
_instruct = "Hello, tell about you in one sentence."
_example = """
OpenaiChat: Hello! How can I assist you today? 2.0 secs
Bard: Hello! How can I help you today? 3.44 secs
Bing: Hello, this is Bing. How can I help? 😊 4.14 secs
Async Total: 4.25 secs
OpenaiChat: Hello! How can I assist you today? 1.85 secs
Bard: Hello! How can I help you today? 3.38 secs
Bing: Hello, this is Bing. How can I help? 😊 6.14 secs
Stream Total: 11.37 secs
OpenaiChat: Hello! How can I help you today? 3.28 secs
Bard: Hello there! How can I help you today? 3.58 secs
Bing: Hello! How can I help you today? 3.28 secs
No Stream Total: 10.14 secs
"""
print("Yqcloud:", end="")
for response in log_time_yield(
g4f.ChatCompletion.create,
model=g4f.models.gpt_35_turbo,
messages=[{"role": "user", "content": _instruct}],
provider=g4f.Provider.Yqcloud,
#cookies=g4f.get_cookies(".huggingface.co"),
stream=True,
auth=True
):
print(response, end="")
print()
print()
def log_time(method: callable, **kwargs):
start = time()
result = method(**kwargs)
secs = f"{round(time() - start, 2)} secs"
if result:
return " ".join([result, secs])
return secs
async def run_async():
responses = []
for provider in providers:
responses.append(log_time_async(
responses = [
log_time_async(
provider.create_async,
model=None,
messages=[{"role": "user", "content": "Hello"}],
log_time=True
))
messages=[{"role": "user", "content": _instruct}],
)
for provider in _providers
]
responses = await asyncio.gather(*responses)
for idx, provider in enumerate(providers):
for idx, provider in enumerate(_providers):
print(f"{provider.__name__}:", responses[idx])
print("Async Total:", asyncio.run(log_time_async(run_async)))
print()
# Streaming support:
def run_stream():
for provider in providers:
for provider in _providers:
print(f"{provider.__name__}: ", end="")
for response in log_time_yield(
provider.create_completion,
model=None,
messages=[{"role": "user", "content": "Hello"}],
messages=[{"role": "user", "content": _instruct}],
):
print(response, end="")
print()
print("Stream Total:", log_time(run_stream))
print()
# No streaming support:
def create_completion():
for provider in providers:
def create_no_stream():
for provider in _providers:
print(f"{provider.__name__}:", end=" ")
for response in log_time_yield(
g4f.Provider.Bard.create_completion,
provider.create_completion,
model=None,
messages=[{"role": "user", "content": "Hello"}],
messages=[{"role": "user", "content": _instruct}],
stream=False
):
print(response, end="")
print()
print("No Stream Total:", log_time(create_completion))
for response in g4f.Provider.Hugchat.create_completion(
model=None,
messages=[{"role": "user", "content": "Hello, tell about you."}],
):
print("Hugchat:", response)
"""
OpenaiChat: Hello! How can I assist you today? 2.0 secs
Bard: Hello! How can I help you today? 3.44 secs
Bing: Hello, this is Bing. How can I help? 😊 4.14 secs
Async Total: 4.25 secs
OpenaiChat: Hello! How can I assist you today? 1.85 secs
Bard: Hello! How can I help you today? 3.38 secs
Bing: Hello, this is Bing. How can I help? 😊 6.14 secs
Stream Total: 11.37 secs
OpenaiChat: Hello! How can I help you today? 3.28 secs
Bard: Hello there! How can I help you today? 3.58 secs
Bing: Hello! How can I help you today? 3.28 secs
No Stream Total: 10.14 secs
"""
print("No Stream Total:", log_time(create_no_stream))
print()
Loading…
Cancel
Save