gpt4free/g4f/Provider/ChatBase.py

56 lines
2.0 KiB
Python
Raw Normal View History

2023-09-10 17:06:46 +00:00
from __future__ import annotations
from aiohttp import ClientSession
2023-10-09 08:22:17 +00:00
from ..typing import AsyncResult, Messages
2023-09-10 17:06:46 +00:00
from .base_provider import AsyncGeneratorProvider
class ChatBase(AsyncGeneratorProvider):
url = "https://www.chatbase.co"
supports_gpt_35_turbo = True
working = True
@classmethod
async def create_async_generator(
cls,
model: str,
2023-10-09 08:22:17 +00:00
messages: Messages,
proxy: str = None,
2023-09-10 17:06:46 +00:00
**kwargs
2023-10-09 08:22:17 +00:00
) -> AsyncResult:
chat_id = 'z2c2HSfKnCTh5J4650V0I'
2023-09-10 17:06:46 +00:00
headers = {
"User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36",
"Accept" : "*/*",
"Accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
"Origin" : cls.url,
"Referer" : cls.url + "/",
"Sec-Fetch-Dest" : "empty",
"Sec-Fetch-Mode" : "cors",
"Sec-Fetch-Site" : "same-origin",
}
async with ClientSession(headers=headers) as session:
2023-09-10 17:06:46 +00:00
data = {
"messages": messages,
"captchaCode": "hadsa",
"chatId": chat_id,
"conversationId": f"kcXpqEnqUie3dnJlsRi_O-{chat_id}"
}
2023-10-09 08:22:17 +00:00
async with session.post("https://www.chatbase.co/api/fe/chat", json=data, proxy=proxy) as response:
2023-09-10 17:06:46 +00:00
response.raise_for_status()
async for stream in response.content.iter_any():
yield stream.decode()
2023-09-10 17:06:46 +00:00
@classmethod
@property
def params(cls):
params = [
("model", "str"),
("messages", "list[dict[str, str]]"),
("stream", "bool"),
]
param = ", ".join([": ".join(p) for p in params])
2023-09-17 21:23:54 +00:00
return f"g4f.provider.{cls.__name__} supports: ({param})"