gpt4free/g4f/Provider/FreeGpt.py

63 lines
1.9 KiB
Python
Raw Normal View History

from __future__ import annotations
import time, hashlib, random
from ..typing import AsyncResult, Messages
from ..requests import StreamSession
from .base_provider import AsyncGeneratorProvider
domains = [
'https://s.aifree.site'
]
class FreeGpt(AsyncGeneratorProvider):
url = "https://freegpts1.aifree.site/"
supports_gpt_35_turbo = True
working = True
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
2023-10-09 08:22:17 +00:00
proxy: str = None,
timeout: int = 120,
**kwargs
) -> AsyncResult:
2023-10-09 08:22:17 +00:00
async with StreamSession(
impersonate="chrome107",
timeout=timeout,
proxies={"https": proxy}
) as session:
prompt = messages[-1]["content"]
timestamp = int(time.time())
data = {
"messages": messages,
"time": timestamp,
"pass": None,
"sign": generate_signature(timestamp, prompt)
}
url = random.choice(domains)
async with session.post(f"{url}/api/generate", json=data) as response:
response.raise_for_status()
async for chunk in response.iter_content():
2023-10-22 13:15:43 +00:00
chunk = chunk.decode()
if chunk == "当前地区当日额度已消耗完":
raise RuntimeError("Rate limit reached")
yield chunk
@classmethod
@property
def params(cls):
params = [
("model", "str"),
("messages", "list[dict[str, str]]"),
("stream", "bool"),
]
param = ", ".join([": ".join(p) for p in params])
return f"g4f.provider.{cls.__name__} supports: ({param})"
def generate_signature(timestamp: int, message: str, secret: str = ""):
data = f"{timestamp}:{message}:{secret}"
return hashlib.sha256(data.encode()).hexdigest()