mirror of https://github.com/xtekky/gpt4free
Merge branch 'main' into feature/docker-setup
commit
f81e618958
@ -0,0 +1,110 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
|
||||
from aiohttp import ClientSession
|
||||
|
||||
from ..typing import AsyncGenerator
|
||||
from .base_provider import AsyncGeneratorProvider, format_prompt, get_cookies
|
||||
|
||||
|
||||
class HuggingChat(AsyncGeneratorProvider):
|
||||
url = "https://huggingface.co/chat/"
|
||||
needs_auth = True
|
||||
working = True
|
||||
model = "OpenAssistant/oasst-sft-6-llama-30b-xor"
|
||||
|
||||
@classmethod
|
||||
async def create_async_generator(
|
||||
cls,
|
||||
model: str,
|
||||
messages: list[dict[str, str]],
|
||||
stream: bool = True,
|
||||
proxy: str = None,
|
||||
cookies: dict = None,
|
||||
**kwargs
|
||||
) -> AsyncGenerator:
|
||||
if not cookies:
|
||||
cookies = get_cookies(".huggingface.co")
|
||||
model = model if model else cls.model
|
||||
if proxy and "://" not in proxy:
|
||||
proxy = f"http://{proxy}"
|
||||
|
||||
headers = {
|
||||
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36',
|
||||
}
|
||||
async with ClientSession(
|
||||
cookies=cookies,
|
||||
headers=headers
|
||||
) as session:
|
||||
async with session.post("https://huggingface.co/chat/conversation", proxy=proxy, json={"model": model}) as response:
|
||||
conversation_id = (await response.json())["conversationId"]
|
||||
|
||||
send = {
|
||||
"inputs": format_prompt(messages),
|
||||
"parameters": {
|
||||
"temperature": 0.2,
|
||||
"truncate": 1000,
|
||||
"max_new_tokens": 1024,
|
||||
"stop": ["</s>"],
|
||||
"top_p": 0.95,
|
||||
"repetition_penalty": 1.2,
|
||||
"top_k": 50,
|
||||
"return_full_text": False,
|
||||
**kwargs
|
||||
},
|
||||
"stream": stream,
|
||||
"options": {
|
||||
"id": "9e9b8bc4-6604-40c6-994e-8eb78fa32e37",
|
||||
"response_id": "04ce2602-3bea-45e8-8efc-cef00680376a",
|
||||
"is_retry": False,
|
||||
"use_cache": False,
|
||||
"web_search_id": ""
|
||||
}
|
||||
}
|
||||
start = "data:"
|
||||
first = True
|
||||
async with session.post(f"https://huggingface.co/chat/conversation/{conversation_id}", proxy=proxy, json=send) as response:
|
||||
async for line in response.content:
|
||||
line = line.decode("utf-8")
|
||||
if not line:
|
||||
continue
|
||||
if not stream:
|
||||
try:
|
||||
data = json.loads(line)
|
||||
except json.decoder.JSONDecodeError:
|
||||
raise RuntimeError(f"No json: {line}")
|
||||
if "error" in data:
|
||||
raise RuntimeError(data["error"])
|
||||
elif isinstance(data, list):
|
||||
yield data[0]["generated_text"]
|
||||
else:
|
||||
raise RuntimeError(f"Response: {line}")
|
||||
elif line.startswith(start):
|
||||
line = json.loads(line[len(start):-1])
|
||||
if not line:
|
||||
continue
|
||||
if "token" not in line:
|
||||
raise RuntimeError(f"Response: {line}")
|
||||
if not line["token"]["special"]:
|
||||
if first:
|
||||
yield line["token"]["text"].lstrip()
|
||||
first = False
|
||||
else:
|
||||
yield line["token"]["text"]
|
||||
|
||||
async with session.delete(f"https://huggingface.co/chat/conversation/{conversation_id}", proxy=proxy) as response:
|
||||
response.raise_for_status()
|
||||
|
||||
|
||||
@classmethod
|
||||
@property
|
||||
def params(cls):
|
||||
params = [
|
||||
("model", "str"),
|
||||
("messages", "list[dict[str, str]]"),
|
||||
("stream", "bool"),
|
||||
("proxy", "str"),
|
||||
]
|
||||
param = ", ".join([": ".join(p) for p in params])
|
||||
return f"g4f.provider.{cls.__name__} supports: ({param})"
|
@ -0,0 +1,102 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
|
||||
from aiohttp import ClientSession
|
||||
|
||||
from ..typing import Any, AsyncGenerator
|
||||
from .base_provider import AsyncGeneratorProvider, format_prompt, get_cookies
|
||||
|
||||
|
||||
class OpenAssistant(AsyncGeneratorProvider):
|
||||
url = "https://open-assistant.io/chat"
|
||||
needs_auth = True
|
||||
working = True
|
||||
model = "OA_SFT_Llama_30B_6"
|
||||
|
||||
@classmethod
|
||||
async def create_async_generator(
|
||||
cls,
|
||||
model: str,
|
||||
messages: list[dict[str, str]],
|
||||
proxy: str = None,
|
||||
cookies: dict = None,
|
||||
**kwargs: Any
|
||||
) -> AsyncGenerator:
|
||||
if proxy and "://" not in proxy:
|
||||
proxy = f"http://{proxy}"
|
||||
if not cookies:
|
||||
cookies = get_cookies("open-assistant.io")
|
||||
|
||||
headers = {
|
||||
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36',
|
||||
}
|
||||
async with ClientSession(
|
||||
cookies=cookies,
|
||||
headers=headers
|
||||
) as session:
|
||||
async with session.post("https://open-assistant.io/api/chat", proxy=proxy) as response:
|
||||
chat_id = (await response.json())["id"]
|
||||
|
||||
data = {
|
||||
"chat_id": chat_id,
|
||||
"content": f"<s>[INST]\n{format_prompt(messages)}\n[/INST]",
|
||||
"parent_id": None
|
||||
}
|
||||
async with session.post("https://open-assistant.io/api/chat/prompter_message", proxy=proxy, json=data) as response:
|
||||
parent_id = (await response.json())["id"]
|
||||
|
||||
data = {
|
||||
"chat_id": chat_id,
|
||||
"parent_id": parent_id,
|
||||
"model_config_name": model if model else cls.model,
|
||||
"sampling_parameters":{
|
||||
"top_k": 50,
|
||||
"top_p": None,
|
||||
"typical_p": None,
|
||||
"temperature": 0.35,
|
||||
"repetition_penalty": 1.1111111111111112,
|
||||
"max_new_tokens": 1024,
|
||||
**kwargs
|
||||
},
|
||||
"plugins":[]
|
||||
}
|
||||
async with session.post("https://open-assistant.io/api/chat/assistant_message", proxy=proxy, json=data) as response:
|
||||
data = await response.json()
|
||||
if "id" in data:
|
||||
message_id = data["id"]
|
||||
elif "message" in data:
|
||||
raise RuntimeError(data["message"])
|
||||
else:
|
||||
response.raise_for_status()
|
||||
|
||||
params = {
|
||||
'chat_id': chat_id,
|
||||
'message_id': message_id,
|
||||
}
|
||||
async with session.post("https://open-assistant.io/api/chat/events", proxy=proxy, params=params) as response:
|
||||
start = "data: "
|
||||
async for line in response.content:
|
||||
line = line.decode("utf-8")
|
||||
if line and line.startswith(start):
|
||||
line = json.loads(line[len(start):])
|
||||
if line["event_type"] == "token":
|
||||
yield line["text"]
|
||||
|
||||
params = {
|
||||
'chat_id': chat_id,
|
||||
}
|
||||
async with session.delete("https://open-assistant.io/api/chat", proxy=proxy, params=params) as response:
|
||||
response.raise_for_status()
|
||||
|
||||
@classmethod
|
||||
@property
|
||||
def params(cls):
|
||||
params = [
|
||||
("model", "str"),
|
||||
("messages", "list[dict[str, str]]"),
|
||||
("stream", "bool"),
|
||||
("proxy", "str"),
|
||||
]
|
||||
param = ", ".join([": ".join(p) for p in params])
|
||||
return f"g4f.provider.{cls.__name__} supports: ({param})"
|
@ -0,0 +1,86 @@
|
||||
from __future__ import annotations
|
||||
|
||||
has_module = True
|
||||
try:
|
||||
from revChatGPT.V1 import AsyncChatbot
|
||||
except ImportError:
|
||||
has_module = False
|
||||
|
||||
import json
|
||||
|
||||
from httpx import AsyncClient
|
||||
|
||||
from ..typing import AsyncGenerator
|
||||
from .base_provider import AsyncGeneratorProvider, format_prompt, get_cookies
|
||||
|
||||
|
||||
class OpenaiChat(AsyncGeneratorProvider):
|
||||
url = "https://chat.openai.com"
|
||||
needs_auth = True
|
||||
working = has_module
|
||||
supports_gpt_35_turbo = True
|
||||
supports_gpt_4 = True
|
||||
supports_stream = True
|
||||
_access_token = None
|
||||
|
||||
@classmethod
|
||||
async def create_async_generator(
|
||||
cls,
|
||||
model: str,
|
||||
messages: list[dict[str, str]],
|
||||
proxy: str = None,
|
||||
access_token: str = _access_token,
|
||||
cookies: dict = None,
|
||||
**kwargs: dict
|
||||
) -> AsyncGenerator:
|
||||
|
||||
config = {"access_token": access_token, "model": model}
|
||||
if proxy:
|
||||
if "://" not in proxy:
|
||||
proxy = f"http://{proxy}"
|
||||
config["proxy"] = proxy
|
||||
|
||||
bot = AsyncChatbot(
|
||||
config=config
|
||||
)
|
||||
|
||||
if not access_token:
|
||||
cookies = cookies if cookies else get_cookies("chat.openai.com")
|
||||
cls._access_token = await get_access_token(bot.session, cookies)
|
||||
bot.set_access_token(cls._access_token)
|
||||
|
||||
returned = None
|
||||
async for message in bot.ask(format_prompt(messages)):
|
||||
message = message["message"]
|
||||
if returned:
|
||||
if message.startswith(returned):
|
||||
new = message[len(returned):]
|
||||
if new:
|
||||
yield new
|
||||
else:
|
||||
yield message
|
||||
returned = message
|
||||
|
||||
await bot.delete_conversation(bot.conversation_id)
|
||||
|
||||
|
||||
@classmethod
|
||||
@property
|
||||
def params(cls):
|
||||
params = [
|
||||
("model", "str"),
|
||||
("messages", "list[dict[str, str]]"),
|
||||
("stream", "bool"),
|
||||
("proxy", "str"),
|
||||
]
|
||||
param = ", ".join([": ".join(p) for p in params])
|
||||
return f"g4f.provider.{cls.__name__} supports: ({param})"
|
||||
|
||||
|
||||
async def get_access_token(session: AsyncClient, cookies: dict):
|
||||
response = await session.get("https://chat.openai.com/api/auth/session", cookies=cookies)
|
||||
response.raise_for_status()
|
||||
try:
|
||||
return response.json()["accessToken"]
|
||||
except json.decoder.JSONDecodeError:
|
||||
raise RuntimeError(f"Response: {response.text}")
|
@ -0,0 +1,68 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import random
|
||||
|
||||
import requests
|
||||
|
||||
from ..typing import Any, CreateResult
|
||||
from .base_provider import BaseProvider
|
||||
|
||||
|
||||
class Wuguokai(BaseProvider):
|
||||
url = 'https://chat.wuguokai.xyz'
|
||||
supports_gpt_35_turbo = True
|
||||
working = True
|
||||
|
||||
@staticmethod
|
||||
def create_completion(
|
||||
model: str,
|
||||
messages: list[dict[str, str]],
|
||||
stream: bool,
|
||||
**kwargs: Any,
|
||||
) -> CreateResult:
|
||||
base = ''
|
||||
for message in messages:
|
||||
base += '%s: %s\n' % (message['role'], message['content'])
|
||||
base += 'assistant:'
|
||||
|
||||
headers = {
|
||||
'authority': 'ai-api.wuguokai.xyz',
|
||||
'accept': 'application/json, text/plain, */*',
|
||||
'accept-language': 'id-ID,id;q=0.9,en-US;q=0.8,en;q=0.7',
|
||||
'content-type': 'application/json',
|
||||
'origin': 'https://chat.wuguokai.xyz',
|
||||
'referer': 'https://chat.wuguokai.xyz/',
|
||||
'sec-ch-ua': '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
|
||||
'sec-ch-ua-mobile': '?0',
|
||||
'sec-ch-ua-platform': '"Windows"',
|
||||
'sec-fetch-dest': 'empty',
|
||||
'sec-fetch-mode': 'cors',
|
||||
'sec-fetch-site': 'same-site',
|
||||
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36'
|
||||
}
|
||||
data ={
|
||||
"prompt": base,
|
||||
"options": {},
|
||||
"userId": f"#/chat/{random.randint(1,99999999)}",
|
||||
"usingContext": True
|
||||
}
|
||||
response = requests.post("https://ai-api20.wuguokai.xyz/api/chat-process", headers=headers, timeout=3, json=data, proxies=kwargs['proxy'] if 'proxy' in kwargs else {})
|
||||
_split = response.text.split("> 若回答失败请重试或多刷新几次界面后重试")
|
||||
if response.status_code == 200:
|
||||
if len(_split) > 1:
|
||||
yield _split[1].strip()
|
||||
else:
|
||||
yield _split[0].strip()
|
||||
else:
|
||||
raise Exception(f"Error: {response.status_code} {response.reason}")
|
||||
|
||||
@classmethod
|
||||
@property
|
||||
def params(cls):
|
||||
params = [
|
||||
("model", "str"),
|
||||
("messages", "list[dict[str, str]]"),
|
||||
("stream", "bool")
|
||||
]
|
||||
param = ", ".join([": ".join(p) for p in params])
|
||||
return f"g4f.provider.{cls.__name__} supports: ({param})"
|
@ -1,59 +1,40 @@
|
||||
import re
|
||||
import urllib.parse
|
||||
from __future__ import annotations
|
||||
|
||||
from curl_cffi import requests
|
||||
import json
|
||||
|
||||
from ..typing import Any, CreateResult
|
||||
from .base_provider import BaseProvider
|
||||
from aiohttp import ClientSession
|
||||
|
||||
from ..typing import AsyncGenerator
|
||||
from .base_provider import AsyncGeneratorProvider, format_prompt, get_cookies
|
||||
|
||||
class You(BaseProvider):
|
||||
|
||||
class You(AsyncGeneratorProvider):
|
||||
url = "https://you.com"
|
||||
working = True
|
||||
supports_gpt_35_turbo = True
|
||||
supports_stream = True
|
||||
|
||||
@staticmethod
|
||||
def create_completion(
|
||||
async def create_async_generator(
|
||||
model: str,
|
||||
messages: list[dict[str, str]],
|
||||
stream: bool,
|
||||
**kwargs: Any,
|
||||
) -> CreateResult:
|
||||
url_param = _create_url_param(messages)
|
||||
headers = _create_header()
|
||||
url = f"https://you.com/api/streamingSearch?{url_param}"
|
||||
response = requests.get(
|
||||
url,
|
||||
headers=headers,
|
||||
impersonate="chrome107",
|
||||
)
|
||||
response.raise_for_status()
|
||||
yield _parse_output(response.text)
|
||||
|
||||
|
||||
def _create_url_param(messages: list[dict[str, str]]):
|
||||
prompt = messages.pop()["content"]
|
||||
chat = _convert_chat(messages)
|
||||
param = {"q": prompt, "domain": "youchat", "chat": chat}
|
||||
return urllib.parse.urlencode(param)
|
||||
|
||||
|
||||
def _convert_chat(messages: list[dict[str, str]]):
|
||||
message_iter = iter(messages)
|
||||
return [
|
||||
{"question": user["content"], "answer": assistant["content"]}
|
||||
for user, assistant in zip(message_iter, message_iter)
|
||||
]
|
||||
|
||||
|
||||
def _create_header():
|
||||
return {
|
||||
"accept": "text/event-stream",
|
||||
"referer": "https://you.com/search?fromSearchBar=true&tbm=youchat",
|
||||
}
|
||||
|
||||
|
||||
def _parse_output(output: str) -> str:
|
||||
regex = r"^data:\s{\"youChatToken\": \"(.*)\"}$"
|
||||
tokens = [token for token in re.findall(regex, output, re.MULTILINE)]
|
||||
return "".join(tokens)
|
||||
cookies: dict = None,
|
||||
**kwargs,
|
||||
) -> AsyncGenerator:
|
||||
if not cookies:
|
||||
cookies = get_cookies("you.com")
|
||||
headers = {
|
||||
"Accept": "text/event-stream",
|
||||
"Referer": "https://you.com/search?fromSearchBar=true&tbm=youchat",
|
||||
"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/116.0"
|
||||
}
|
||||
async with ClientSession(headers=headers, cookies=cookies) as session:
|
||||
async with session.get(
|
||||
"https://you.com/api/streamingSearch",
|
||||
params={"q": format_prompt(messages), "domain": "youchat", "chat": ""},
|
||||
) as response:
|
||||
start = 'data: {"youChatToken": '
|
||||
async for line in response.content:
|
||||
line = line.decode('utf-8')
|
||||
if line.startswith(start):
|
||||
yield json.loads(line[len(start): -2])
|
@ -1,45 +1,45 @@
|
||||
import requests
|
||||
from __future__ import annotations
|
||||
|
||||
from ..typing import Any, CreateResult
|
||||
from .base_provider import BaseProvider
|
||||
from aiohttp import ClientSession
|
||||
|
||||
from .base_provider import AsyncProvider, format_prompt
|
||||
|
||||
class Yqcloud(BaseProvider):
|
||||
|
||||
class Yqcloud(AsyncProvider):
|
||||
url = "https://chat9.yqcloud.top/"
|
||||
working = True
|
||||
supports_gpt_35_turbo = True
|
||||
|
||||
@staticmethod
|
||||
def create_completion(
|
||||
async def create_async(
|
||||
model: str,
|
||||
messages: list[dict[str, str]],
|
||||
stream: bool,
|
||||
**kwargs: Any,
|
||||
) -> CreateResult:
|
||||
headers = _create_header()
|
||||
payload = _create_payload(messages)
|
||||
|
||||
url = "https://api.aichatos.cloud/api/generateStream"
|
||||
response = requests.post(url=url, headers=headers, json=payload)
|
||||
response.raise_for_status()
|
||||
response.encoding = 'utf-8'
|
||||
yield response.text
|
||||
proxy: str = None,
|
||||
**kwargs,
|
||||
) -> str:
|
||||
async with ClientSession(
|
||||
headers=_create_header()
|
||||
) as session:
|
||||
payload = _create_payload(messages)
|
||||
async with session.post("https://api.aichatos.cloud/api/generateStream", proxy=proxy, json=payload) as response:
|
||||
response.raise_for_status()
|
||||
return await response.text()
|
||||
|
||||
|
||||
def _create_header():
|
||||
return {
|
||||
"accept": "application/json, text/plain, */*",
|
||||
"content-type": "application/json",
|
||||
"origin": "https://chat9.yqcloud.top",
|
||||
"accept" : "application/json, text/plain, */*",
|
||||
"content-type" : "application/json",
|
||||
"origin" : "https://chat9.yqcloud.top",
|
||||
}
|
||||
|
||||
|
||||
def _create_payload(messages: list[dict[str, str]]):
|
||||
prompt = messages[-1]["content"]
|
||||
return {
|
||||
"prompt": prompt,
|
||||
"prompt": format_prompt(messages),
|
||||
"network": True,
|
||||
"system": "",
|
||||
"withoutContext": False,
|
||||
"stream": False,
|
||||
"userId": "#/chat/1693025544336"
|
||||
}
|
||||
|
@ -1,59 +1,69 @@
|
||||
from .Acytoo import Acytoo
|
||||
from .Aichat import Aichat
|
||||
from .Ails import Ails
|
||||
from .AiService import AiService
|
||||
from .AItianhu import AItianhu
|
||||
from .Bard import Bard
|
||||
from .base_provider import BaseProvider
|
||||
from .Bing import Bing
|
||||
from .ChatgptAi import ChatgptAi
|
||||
from .ChatgptLogin import ChatgptLogin
|
||||
from .DeepAi import DeepAi
|
||||
from .DfeHub import DfeHub
|
||||
from .EasyChat import EasyChat
|
||||
from .Forefront import Forefront
|
||||
from .GetGpt import GetGpt
|
||||
from .H2o import H2o
|
||||
from .Liaobots import Liaobots
|
||||
from .Lockchat import Lockchat
|
||||
from .Opchatgpts import Opchatgpts
|
||||
from .Raycast import Raycast
|
||||
from .Theb import Theb
|
||||
from .Vercel import Vercel
|
||||
from .Wewordle import Wewordle
|
||||
from .You import You
|
||||
from .Yqcloud import Yqcloud
|
||||
from .Equing import Equing
|
||||
from .FastGpt import FastGpt
|
||||
from .V50 import V50
|
||||
from __future__ import annotations
|
||||
from .Acytoo import Acytoo
|
||||
from .Aichat import Aichat
|
||||
from .Ails import Ails
|
||||
from .AiService import AiService
|
||||
from .AItianhu import AItianhu
|
||||
from .Bard import Bard
|
||||
from .Bing import Bing
|
||||
from .ChatgptAi import ChatgptAi
|
||||
from .ChatgptLogin import ChatgptLogin
|
||||
from .DeepAi import DeepAi
|
||||
from .DfeHub import DfeHub
|
||||
from .EasyChat import EasyChat
|
||||
from .Forefront import Forefront
|
||||
from .GetGpt import GetGpt
|
||||
from .H2o import H2o
|
||||
from .HuggingChat import HuggingChat
|
||||
from .Liaobots import Liaobots
|
||||
from .Lockchat import Lockchat
|
||||
from .Opchatgpts import Opchatgpts
|
||||
from .OpenaiChat import OpenaiChat
|
||||
from .OpenAssistant import OpenAssistant
|
||||
from .Raycast import Raycast
|
||||
from .Theb import Theb
|
||||
from .Vercel import Vercel
|
||||
from .Wewordle import Wewordle
|
||||
from .You import You
|
||||
from .Yqcloud import Yqcloud
|
||||
from .Equing import Equing
|
||||
from .FastGpt import FastGpt
|
||||
from .V50 import V50
|
||||
from .Wuguokai import Wuguokai
|
||||
|
||||
from .base_provider import BaseProvider, AsyncProvider, AsyncGeneratorProvider
|
||||
|
||||
__all__ = [
|
||||
"BaseProvider",
|
||||
"Acytoo",
|
||||
"Aichat",
|
||||
"Ails",
|
||||
"AiService",
|
||||
"AItianhu",
|
||||
"Bard",
|
||||
"Bing",
|
||||
"ChatgptAi",
|
||||
"ChatgptLogin",
|
||||
"DeepAi",
|
||||
"DfeHub",
|
||||
"EasyChat",
|
||||
"Forefront",
|
||||
"GetGpt",
|
||||
"H2o",
|
||||
"Liaobots",
|
||||
"Lockchat",
|
||||
"Opchatgpts",
|
||||
"Raycast",
|
||||
"Theb",
|
||||
"Vercel",
|
||||
"Wewordle",
|
||||
"You",
|
||||
"Yqcloud",
|
||||
"Equing",
|
||||
"FastGpt",
|
||||
"V50"
|
||||
'BaseProvider',
|
||||
'Acytoo',
|
||||
'Aichat',
|
||||
'Ails',
|
||||
'AiService',
|
||||
'AItianhu',
|
||||
'Bard',
|
||||
'Bing',
|
||||
'ChatgptAi',
|
||||
'ChatgptLogin',
|
||||
'DeepAi',
|
||||
'DfeHub',
|
||||
'EasyChat',
|
||||
'Forefront',
|
||||
'GetGpt',
|
||||
'H2o',
|
||||
'HuggingChat',
|
||||
'Liaobots',
|
||||
'Lockchat',
|
||||
'Opchatgpts',
|
||||
'Raycast',
|
||||
'OpenaiChat',
|
||||
'OpenAssistant',
|
||||
'Theb',
|
||||
'Vercel',
|
||||
'Wewordle',
|
||||
'You',
|
||||
'Yqcloud',
|
||||
'Equing',
|
||||
'FastGpt',
|
||||
'Wuguokai',
|
||||
'V50'
|
||||
]
|
||||
|
@ -1,45 +1,43 @@
|
||||
from . import models
|
||||
from .Provider import BaseProvider
|
||||
from .typing import Any, CreateResult, Union
|
||||
from __future__ import annotations
|
||||
from . import models
|
||||
from .Provider import BaseProvider
|
||||
from .typing import Any, CreateResult, Union
|
||||
|
||||
logging = False
|
||||
|
||||
|
||||
class ChatCompletion:
|
||||
@staticmethod
|
||||
def create(
|
||||
model: Union[models.Model, str],
|
||||
messages: list[dict[str, str]],
|
||||
provider: Union[type[BaseProvider], None] = None,
|
||||
stream: bool = False,
|
||||
auth: Union[str, None] = None,
|
||||
**kwargs: Any,
|
||||
) -> Union[CreateResult, str]:
|
||||
model : Union[models.Model, str],
|
||||
messages : list[dict[str, str]],
|
||||
provider : Union[type[BaseProvider], None] = None,
|
||||
stream : bool = False,
|
||||
auth : Union[str, None] = None, **kwargs: Any) -> Union[CreateResult, str]:
|
||||
|
||||
if isinstance(model, str):
|
||||
try:
|
||||
model = models.ModelUtils.convert[model]
|
||||
except KeyError:
|
||||
raise Exception(f"The model: {model} does not exist")
|
||||
raise Exception(f'The model: {model} does not exist')
|
||||
|
||||
provider = model.best_provider if provider == None else provider
|
||||
|
||||
if not provider.working:
|
||||
raise Exception(f"{provider.__name__} is not working")
|
||||
raise Exception(f'{provider.__name__} is not working')
|
||||
|
||||
if provider.needs_auth and not auth:
|
||||
raise Exception(
|
||||
f'ValueError: {provider.__name__} requires authentication (use auth="cookie or token or jwt ..." param)'
|
||||
)
|
||||
f'ValueError: {provider.__name__} requires authentication (use auth=\'cookie or token or jwt ...\' param)')
|
||||
|
||||
if provider.needs_auth:
|
||||
kwargs["auth"] = auth
|
||||
kwargs['auth'] = auth
|
||||
|
||||
if not provider.supports_stream and stream:
|
||||
raise Exception(
|
||||
f"ValueError: {provider.__name__} does not support 'stream' argument"
|
||||
)
|
||||
f'ValueError: {provider.__name__} does not support "stream" argument')
|
||||
|
||||
if logging:
|
||||
print(f"Using {provider.__name__} provider")
|
||||
print(f'Using {provider.__name__} provider')
|
||||
|
||||
result = provider.create_completion(model.name, messages, stream, **kwargs)
|
||||
return result if stream else "".join(result)
|
||||
return result if stream else ''.join(result)
|
||||
|
@ -1,225 +1,207 @@
|
||||
from __future__ import annotations
|
||||
from dataclasses import dataclass
|
||||
|
||||
from .Provider import Bard, BaseProvider, GetGpt, H2o, Liaobots, Vercel, Equing
|
||||
|
||||
|
||||
@dataclass
|
||||
class Model:
|
||||
name: str
|
||||
base_provider: str
|
||||
best_provider: type[BaseProvider]
|
||||
|
||||
# Config for HuggingChat, OpenAssistant
|
||||
# Works for Liaobots, H2o, OpenaiChat, Yqcloud, You
|
||||
default = Model(
|
||||
name="",
|
||||
base_provider="huggingface",
|
||||
best_provider=H2o,
|
||||
)
|
||||
|
||||
# GPT-3.5 / GPT-4
|
||||
gpt_35_turbo = Model(
|
||||
name="gpt-3.5-turbo",
|
||||
base_provider="openai",
|
||||
best_provider=GetGpt,
|
||||
)
|
||||
name = 'gpt-3.5-turbo',
|
||||
base_provider = 'openai',
|
||||
best_provider = GetGpt)
|
||||
|
||||
gpt_4 = Model(
|
||||
name="gpt-4",
|
||||
base_provider="openai",
|
||||
best_provider=Liaobots,
|
||||
)
|
||||
name = 'gpt-4',
|
||||
base_provider = 'openai',
|
||||
best_provider = Liaobots)
|
||||
|
||||
# Bard
|
||||
palm = Model(
|
||||
name="palm",
|
||||
base_provider="google",
|
||||
best_provider=Bard,
|
||||
)
|
||||
name = 'palm',
|
||||
base_provider = 'google',
|
||||
best_provider = Bard)
|
||||
|
||||
# H2o
|
||||
falcon_7b = Model(
|
||||
name="h2oai/h2ogpt-gm-oasst1-en-2048-falcon-7b-v3",
|
||||
base_provider="huggingface",
|
||||
best_provider=H2o,
|
||||
)
|
||||
name = 'h2oai/h2ogpt-gm-oasst1-en-2048-falcon-7b-v3',
|
||||
base_provider = 'huggingface',
|
||||
best_provider = H2o)
|
||||
|
||||
falcon_40b = Model(
|
||||
name="h2oai/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1",
|
||||
base_provider="huggingface",
|
||||
best_provider=H2o,
|
||||
)
|
||||
name = 'h2oai/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1',
|
||||
base_provider = 'huggingface',
|
||||
best_provider = H2o)
|
||||
|
||||
llama_13b = Model(
|
||||
name="h2oai/h2ogpt-gm-oasst1-en-2048-open-llama-13b",
|
||||
base_provider="huggingface",
|
||||
best_provider=H2o,
|
||||
)
|
||||
name = 'h2oai/h2ogpt-gm-oasst1-en-2048-open-llama-13b',
|
||||
base_provider = 'huggingface',
|
||||
best_provider = H2o)
|
||||
|
||||
# Vercel
|
||||
claude_instant_v1 = Model(
|
||||
name="anthropic:claude-instant-v1",
|
||||
base_provider="anthropic",
|
||||
best_provider=Vercel,
|
||||
)
|
||||
name = 'anthropic:claude-instant-v1',
|
||||
base_provider = 'anthropic',
|
||||
best_provider = Vercel)
|
||||
|
||||
claude_v1 = Model(
|
||||
name="anthropic:claude-v1",
|
||||
base_provider="anthropic",
|
||||
best_provider=Vercel,
|
||||
)
|
||||
name = 'anthropic:claude-v1',
|
||||
base_provider = 'anthropic',
|
||||
best_provider = Vercel)
|
||||
|
||||
claude_v2 = Model(
|
||||
name="anthropic:claude-v2",
|
||||
base_provider="anthropic",
|
||||
best_provider=Vercel,
|
||||
)
|
||||
name = 'anthropic:claude-v2',
|
||||
base_provider = 'anthropic',
|
||||
best_provider = Vercel)
|
||||
|
||||
command_light_nightly = Model(
|
||||
name="cohere:command-light-nightly",
|
||||
base_provider="cohere",
|
||||
best_provider=Vercel,
|
||||
)
|
||||
name = 'cohere:command-light-nightly',
|
||||
base_provider = 'cohere',
|
||||
best_provider = Vercel)
|
||||
|
||||
command_nightly = Model(
|
||||
name="cohere:command-nightly",
|
||||
base_provider="cohere",
|
||||
best_provider=Vercel,
|
||||
)
|
||||
name = 'cohere:command-nightly',
|
||||
base_provider = 'cohere',
|
||||
best_provider = Vercel)
|
||||
|
||||
gpt_neox_20b = Model(
|
||||
name="huggingface:EleutherAI/gpt-neox-20b",
|
||||
base_provider="huggingface",
|
||||
best_provider=Vercel,
|
||||
)
|
||||
name = 'huggingface:EleutherAI/gpt-neox-20b',
|
||||
base_provider = 'huggingface',
|
||||
best_provider = Vercel)
|
||||
|
||||
oasst_sft_1_pythia_12b = Model(
|
||||
name="huggingface:OpenAssistant/oasst-sft-1-pythia-12b",
|
||||
base_provider="huggingface",
|
||||
best_provider=Vercel,
|
||||
)
|
||||
name = 'huggingface:OpenAssistant/oasst-sft-1-pythia-12b',
|
||||
base_provider = 'huggingface',
|
||||
best_provider = Vercel)
|
||||
|
||||
oasst_sft_4_pythia_12b_epoch_35 = Model(
|
||||
name="huggingface:OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5",
|
||||
base_provider="huggingface",
|
||||
best_provider=Vercel,
|
||||
)
|
||||
name = 'huggingface:OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5',
|
||||
base_provider = 'huggingface',
|
||||
best_provider = Vercel)
|
||||
|
||||
santacoder = Model(
|
||||
name="huggingface:bigcode/santacoder",
|
||||
base_provider="huggingface",
|
||||
best_provider=Vercel,
|
||||
)
|
||||
name = 'huggingface:bigcode/santacoder',
|
||||
base_provider = 'huggingface',
|
||||
best_provider = Vercel)
|
||||
|
||||
bloom = Model(
|
||||
name="huggingface:bigscience/bloom",
|
||||
base_provider="huggingface",
|
||||
best_provider=Vercel,
|
||||
)
|
||||
name = 'huggingface:bigscience/bloom',
|
||||
base_provider = 'huggingface',
|
||||
best_provider = Vercel)
|
||||
|
||||
flan_t5_xxl = Model(
|
||||
name="huggingface:google/flan-t5-xxl",
|
||||
base_provider="huggingface",
|
||||
best_provider=Vercel,
|
||||
)
|
||||
name = 'huggingface:google/flan-t5-xxl',
|
||||
base_provider = 'huggingface',
|
||||
best_provider = Vercel)
|
||||
|
||||
code_davinci_002 = Model(
|
||||
name="openai:code-davinci-002",
|
||||
base_provider="openai",
|
||||
best_provider=Vercel,
|
||||
)
|
||||
name = 'openai:code-davinci-002',
|
||||
base_provider = 'openai',
|
||||
best_provider = Vercel)
|
||||
|
||||
gpt_35_turbo_16k = Model(
|
||||
name="openai:gpt-3.5-turbo-16k",
|
||||
base_provider="openai",
|
||||
best_provider=Vercel,
|
||||
)
|
||||
name = 'openai:gpt-3.5-turbo-16k',
|
||||
base_provider = 'openai',
|
||||
best_provider = Vercel)
|
||||
|
||||
gpt_35_turbo_16k_0613 = Model(
|
||||
name="openai:gpt-3.5-turbo-16k-0613",
|
||||
base_provider="openai",
|
||||
best_provider=Equing,
|
||||
)
|
||||
name = 'openai:gpt-3.5-turbo-16k-0613',
|
||||
base_provider = 'openai',
|
||||
best_provider = Equing)
|
||||
|
||||
gpt_4_0613 = Model(
|
||||
name="openai:gpt-4-0613",
|
||||
base_provider="openai",
|
||||
best_provider=Vercel,
|
||||
)
|
||||
name = 'openai:gpt-4-0613',
|
||||
base_provider = 'openai',
|
||||
best_provider = Vercel)
|
||||
|
||||
text_ada_001 = Model(
|
||||
name="openai:text-ada-001",
|
||||
base_provider="openai",
|
||||
best_provider=Vercel,
|
||||
)
|
||||
name = 'openai:text-ada-001',
|
||||
base_provider = 'openai',
|
||||
best_provider = Vercel)
|
||||
|
||||
text_babbage_001 = Model(
|
||||
name="openai:text-babbage-001",
|
||||
base_provider="openai",
|
||||
best_provider=Vercel,
|
||||
)
|
||||
name = 'openai:text-babbage-001',
|
||||
base_provider = 'openai',
|
||||
best_provider = Vercel)
|
||||
|
||||
text_curie_001 = Model(
|
||||
name="openai:text-curie-001",
|
||||
base_provider="openai",
|
||||
best_provider=Vercel,
|
||||
)
|
||||
name = 'openai:text-curie-001',
|
||||
base_provider = 'openai',
|
||||
best_provider = Vercel)
|
||||
|
||||
text_davinci_002 = Model(
|
||||
name="openai:text-davinci-002",
|
||||
base_provider="openai",
|
||||
best_provider=Vercel,
|
||||
)
|
||||
name = 'openai:text-davinci-002',
|
||||
base_provider = 'openai',
|
||||
best_provider = Vercel)
|
||||
|
||||
text_davinci_003 = Model(
|
||||
name="openai:text-davinci-003",
|
||||
base_provider="openai",
|
||||
best_provider=Vercel,
|
||||
)
|
||||
name = 'openai:text-davinci-003',
|
||||
base_provider = 'openai',
|
||||
best_provider = Vercel)
|
||||
|
||||
llama13b_v2_chat = Model(
|
||||
name="replicate:a16z-infra/llama13b-v2-chat",
|
||||
base_provider="replicate",
|
||||
best_provider=Vercel,
|
||||
)
|
||||
name = 'replicate:a16z-infra/llama13b-v2-chat',
|
||||
base_provider = 'replicate',
|
||||
best_provider = Vercel)
|
||||
|
||||
llama7b_v2_chat = Model(
|
||||
name="replicate:a16z-infra/llama7b-v2-chat",
|
||||
base_provider="replicate",
|
||||
best_provider=Vercel,
|
||||
)
|
||||
name = 'replicate:a16z-infra/llama7b-v2-chat',
|
||||
base_provider = 'replicate',
|
||||
best_provider = Vercel)
|
||||
|
||||
|
||||
class ModelUtils:
|
||||
convert: dict[str, Model] = {
|
||||
# GPT-3.5 / GPT-4
|
||||
"gpt-3.5-turbo": gpt_35_turbo,
|
||||
"gpt-4": gpt_4,
|
||||
'gpt-3.5-turbo' : gpt_35_turbo,
|
||||
'gpt-4' : gpt_4,
|
||||
|
||||
# Bard
|
||||
"palm2": palm,
|
||||
"palm": palm,
|
||||
"google": palm,
|
||||
"google-bard": palm,
|
||||
"google-palm": palm,
|
||||
"bard": palm,
|
||||
'palm2' : palm,
|
||||
'palm' : palm,
|
||||
'google' : palm,
|
||||
'google-bard' : palm,
|
||||
'google-palm' : palm,
|
||||
'bard' : palm,
|
||||
|
||||
# H2o
|
||||
"falcon-40b": falcon_40b,
|
||||
"falcon-7b": falcon_7b,
|
||||
"llama-13b": llama_13b,
|
||||
'falcon-40b' : falcon_40b,
|
||||
'falcon-7b' : falcon_7b,
|
||||
'llama-13b' : llama_13b,
|
||||
|
||||
# Vercel
|
||||
"claude-instant-v1": claude_instant_v1,
|
||||
"claude-v1": claude_v1,
|
||||
"claude-v2": claude_v2,
|
||||
"command-light-nightly": command_light_nightly,
|
||||
"command-nightly": command_nightly,
|
||||
"gpt-neox-20b": gpt_neox_20b,
|
||||
"oasst-sft-1-pythia-12b": oasst_sft_1_pythia_12b,
|
||||
"oasst-sft-4-pythia-12b-epoch-3.5": oasst_sft_4_pythia_12b_epoch_35,
|
||||
"santacoder": santacoder,
|
||||
"bloom": bloom,
|
||||
"flan-t5-xxl": flan_t5_xxl,
|
||||
"code-davinci-002": code_davinci_002,
|
||||
"gpt-3.5-turbo-16k": gpt_35_turbo_16k,
|
||||
"gpt-3.5-turbo-16k-0613": gpt_35_turbo_16k_0613,
|
||||
"gpt-4-0613": gpt_4_0613,
|
||||
"text-ada-001": text_ada_001,
|
||||
"text-babbage-001": text_babbage_001,
|
||||
"text-curie-001": text_curie_001,
|
||||
"text-davinci-002": text_davinci_002,
|
||||
"text-davinci-003": text_davinci_003,
|
||||
"llama13b-v2-chat": llama13b_v2_chat,
|
||||
"llama7b-v2-chat": llama7b_v2_chat,
|
||||
}
|
||||
'claude-instant-v1' : claude_instant_v1,
|
||||
'claude-v1' : claude_v1,
|
||||
'claude-v2' : claude_v2,
|
||||
'command-nightly' : command_nightly,
|
||||
'gpt-neox-20b' : gpt_neox_20b,
|
||||
'santacoder' : santacoder,
|
||||
'bloom' : bloom,
|
||||
'flan-t5-xxl' : flan_t5_xxl,
|
||||
'code-davinci-002' : code_davinci_002,
|
||||
'gpt-3.5-turbo-16k' : gpt_35_turbo_16k,
|
||||
'gpt-4-0613' : gpt_4_0613,
|
||||
'text-ada-001' : text_ada_001,
|
||||
'text-babbage-001' : text_babbage_001,
|
||||
'text-curie-001' : text_curie_001,
|
||||
'text-davinci-002' : text_davinci_002,
|
||||
'text-davinci-003' : text_davinci_003,
|
||||
'llama13b-v2-chat' : llama13b_v2_chat,
|
||||
'llama7b-v2-chat' : llama7b_v2_chat,
|
||||
|
||||
'oasst-sft-1-pythia-12b' : oasst_sft_1_pythia_12b,
|
||||
'oasst-sft-4-pythia-12b-epoch-3.5' : oasst_sft_4_pythia_12b_epoch_35,
|
||||
'command-light-nightly' : command_light_nightly,
|
||||
'gpt-3.5-turbo-16k-0613' : gpt_35_turbo_16k_0613,
|
||||
}
|
@ -1,15 +1,14 @@
|
||||
from typing import Any, AsyncGenerator, Generator, NewType, Tuple, TypedDict, Union
|
||||
|
||||
SHA256 = NewType("sha_256_hash", str)
|
||||
SHA256 = NewType('sha_256_hash', str)
|
||||
CreateResult = Generator[str, None, None]
|
||||
|
||||
|
||||
__all__ = [
|
||||
"Any",
|
||||
"AsyncGenerator",
|
||||
"Generator",
|
||||
"Tuple",
|
||||
"TypedDict",
|
||||
"SHA256",
|
||||
"CreateResult",
|
||||
]
|
||||
'Any',
|
||||
'AsyncGenerator',
|
||||
'Generator',
|
||||
'Tuple',
|
||||
'TypedDict',
|
||||
'SHA256',
|
||||
'CreateResult',
|
||||
]
|
@ -0,0 +1,25 @@
|
||||
from time import time
|
||||
|
||||
|
||||
async def log_time_async(method: callable, **kwargs):
|
||||
start = time()
|
||||
result = await method(**kwargs)
|
||||
secs = f"{round(time() - start, 2)} secs"
|
||||
if result:
|
||||
return " ".join([result, secs])
|
||||
return secs
|
||||
|
||||
|
||||
def log_time_yield(method: callable, **kwargs):
|
||||
start = time()
|
||||
result = yield from method(**kwargs)
|
||||
yield f" {round(time() - start, 2)} secs"
|
||||
|
||||
|
||||
def log_time(method: callable, **kwargs):
|
||||
start = time()
|
||||
result = method(**kwargs)
|
||||
secs = f"{round(time() - start, 2)} secs"
|
||||
if result:
|
||||
return " ".join([result, secs])
|
||||
return secs
|
@ -0,0 +1,96 @@
|
||||
import sys
|
||||
from pathlib import Path
|
||||
import asyncio
|
||||
|
||||
sys.path.append(str(Path(__file__).parent.parent))
|
||||
|
||||
import g4f
|
||||
from testing.log_time import log_time, log_time_async, log_time_yield
|
||||
|
||||
|
||||
_providers = [
|
||||
g4f.Provider.H2o,
|
||||
g4f.Provider.You,
|
||||
g4f.Provider.HuggingChat,
|
||||
g4f.Provider.OpenAssistant,
|
||||
g4f.Provider.Bing,
|
||||
g4f.Provider.Bard
|
||||
]
|
||||
|
||||
_instruct = "Hello, tell about you in one sentence."
|
||||
|
||||
_example = """
|
||||
OpenaiChat: Hello! How can I assist you today? 2.0 secs
|
||||
Bard: Hello! How can I help you today? 3.44 secs
|
||||
Bing: Hello, this is Bing. How can I help? 😊 4.14 secs
|
||||
Async Total: 4.25 secs
|
||||
|
||||
OpenaiChat: Hello! How can I assist you today? 1.85 secs
|
||||
Bard: Hello! How can I help you today? 3.38 secs
|
||||
Bing: Hello, this is Bing. How can I help? 😊 6.14 secs
|
||||
Stream Total: 11.37 secs
|
||||
|
||||
OpenaiChat: Hello! How can I help you today? 3.28 secs
|
||||
Bard: Hello there! How can I help you today? 3.58 secs
|
||||
Bing: Hello! How can I help you today? 3.28 secs
|
||||
No Stream Total: 10.14 secs
|
||||
"""
|
||||
|
||||
print("Bing: ", end="")
|
||||
for response in log_time_yield(
|
||||
g4f.ChatCompletion.create,
|
||||
model=g4f.models.gpt_35_turbo,
|
||||
messages=[{"role": "user", "content": _instruct}],
|
||||
provider=g4f.Provider.Bing,
|
||||
#cookies=g4f.get_cookies(".huggingface.co"),
|
||||
#stream=True,
|
||||
auth=True
|
||||
):
|
||||
print(response, end="")
|
||||
print()
|
||||
print()
|
||||
|
||||
|
||||
async def run_async():
|
||||
responses = [
|
||||
log_time_async(
|
||||
provider.create_async,
|
||||
model=None,
|
||||
messages=[{"role": "user", "content": _instruct}],
|
||||
)
|
||||
for provider in _providers
|
||||
]
|
||||
responses = await asyncio.gather(*responses)
|
||||
for idx, provider in enumerate(_providers):
|
||||
print(f"{provider.__name__}:", responses[idx])
|
||||
print("Async Total:", asyncio.run(log_time_async(run_async)))
|
||||
print()
|
||||
|
||||
|
||||
def run_stream():
|
||||
for provider in _providers:
|
||||
print(f"{provider.__name__}: ", end="")
|
||||
for response in log_time_yield(
|
||||
provider.create_completion,
|
||||
model=None,
|
||||
messages=[{"role": "user", "content": _instruct}],
|
||||
):
|
||||
print(response, end="")
|
||||
print()
|
||||
print("Stream Total:", log_time(run_stream))
|
||||
print()
|
||||
|
||||
|
||||
def create_no_stream():
|
||||
for provider in _providers:
|
||||
print(f"{provider.__name__}:", end=" ")
|
||||
for response in log_time_yield(
|
||||
provider.create_completion,
|
||||
model=None,
|
||||
messages=[{"role": "user", "content": _instruct}],
|
||||
stream=False
|
||||
):
|
||||
print(response, end="")
|
||||
print()
|
||||
print("No Stream Total:", log_time(create_no_stream))
|
||||
print()
|
Loading…
Reference in New Issue