mirror of https://github.com/xtekky/gpt4free
commit
cf7ba76e7e
@ -0,0 +1,78 @@
|
|||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from aiohttp import ClientSession
|
||||||
|
|
||||||
|
from .base_provider import AsyncGeneratorProvider
|
||||||
|
from ..typing import AsyncGenerator
|
||||||
|
|
||||||
|
models = {
|
||||||
|
"gpt-4": {
|
||||||
|
"id": "gpt-4",
|
||||||
|
"name": "GPT-4",
|
||||||
|
},
|
||||||
|
"gpt-3.5-turbo": {
|
||||||
|
"id": "gpt-3.5-turbo",
|
||||||
|
"name": "GPT-3.5",
|
||||||
|
},
|
||||||
|
"gpt-3.5-turbo-16k": {
|
||||||
|
"id": "gpt-3.5-turbo-16k",
|
||||||
|
"name": "GPT-3.5-16k",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
class Aivvm(AsyncGeneratorProvider):
|
||||||
|
url = "https://chat.aivvm.com"
|
||||||
|
working = True
|
||||||
|
supports_gpt_35_turbo = True
|
||||||
|
supports_gpt_4 = True
|
||||||
|
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
async def create_async_generator(
|
||||||
|
cls,
|
||||||
|
model: str,
|
||||||
|
messages: list[dict[str, str]],
|
||||||
|
proxy: str = None,
|
||||||
|
**kwargs
|
||||||
|
) -> AsyncGenerator:
|
||||||
|
model = model if model else "gpt-3.5-turbo"
|
||||||
|
if model not in models:
|
||||||
|
raise ValueError(f"Model are not supported: {model}")
|
||||||
|
headers = {
|
||||||
|
"User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36",
|
||||||
|
"Accept" : "*/*",
|
||||||
|
"Accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
|
||||||
|
"Origin" : cls.url,
|
||||||
|
"Referer" : cls.url + "/",
|
||||||
|
"Sec-Fetch-Dest" : "empty",
|
||||||
|
"Sec-Fetch-Mode" : "cors",
|
||||||
|
"Sec-Fetch-Site" : "same-origin",
|
||||||
|
}
|
||||||
|
async with ClientSession(
|
||||||
|
headers=headers
|
||||||
|
) as session:
|
||||||
|
data = {
|
||||||
|
"temperature": 1,
|
||||||
|
"key": "",
|
||||||
|
"messages": messages,
|
||||||
|
"model": models[model],
|
||||||
|
"prompt": "",
|
||||||
|
**kwargs
|
||||||
|
}
|
||||||
|
async with session.post(cls.url + "/api/chat", json=data, proxy=proxy) as response:
|
||||||
|
response.raise_for_status()
|
||||||
|
async for stream in response.content.iter_any():
|
||||||
|
yield stream.decode()
|
||||||
|
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@property
|
||||||
|
def params(cls):
|
||||||
|
params = [
|
||||||
|
("model", "str"),
|
||||||
|
("messages", "list[dict[str, str]]"),
|
||||||
|
("stream", "bool"),
|
||||||
|
("temperature", "float"),
|
||||||
|
]
|
||||||
|
param = ", ".join([": ".join(p) for p in params])
|
||||||
|
return f"g4f.provider.{cls.__name__} supports: ({param})"
|
@ -0,0 +1,66 @@
|
|||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import json
|
||||||
|
from aiohttp import ClientSession
|
||||||
|
|
||||||
|
from .base_provider import AsyncGeneratorProvider
|
||||||
|
from ..typing import AsyncGenerator
|
||||||
|
|
||||||
|
class Vitalentum(AsyncGeneratorProvider):
|
||||||
|
url = "https://app.vitalentum.io"
|
||||||
|
working = True
|
||||||
|
supports_gpt_35_turbo = True
|
||||||
|
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
async def create_async_generator(
|
||||||
|
cls,
|
||||||
|
model: str,
|
||||||
|
messages: list[dict[str, str]],
|
||||||
|
proxy: str = None,
|
||||||
|
**kwargs
|
||||||
|
) -> AsyncGenerator:
|
||||||
|
headers = {
|
||||||
|
"User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36",
|
||||||
|
"Accept" : "text/event-stream",
|
||||||
|
"Accept-language" : "de,en-US;q=0.7,en;q=0.3",
|
||||||
|
"Origin" : cls.url,
|
||||||
|
"Referer" : cls.url + "/",
|
||||||
|
"Sec-Fetch-Dest" : "empty",
|
||||||
|
"Sec-Fetch-Mode" : "cors",
|
||||||
|
"Sec-Fetch-Site" : "same-origin",
|
||||||
|
}
|
||||||
|
conversation = json.dumps({"history": [{
|
||||||
|
"speaker": "human" if message["role"] == "user" else "bot",
|
||||||
|
"text": message["content"],
|
||||||
|
} for message in messages]})
|
||||||
|
data = {
|
||||||
|
"conversation": conversation,
|
||||||
|
"temperature": 0.7,
|
||||||
|
**kwargs
|
||||||
|
}
|
||||||
|
async with ClientSession(
|
||||||
|
headers=headers
|
||||||
|
) as session:
|
||||||
|
async with session.post(cls.url + "/api/converse-edge", json=data, proxy=proxy) as response:
|
||||||
|
response.raise_for_status()
|
||||||
|
async for line in response.content:
|
||||||
|
line = line.decode()
|
||||||
|
if line.startswith("data: ") and not line.startswith("data: [DONE]"):
|
||||||
|
line = json.loads(line[6:-1])
|
||||||
|
content = line["choices"][0]["delta"].get("content")
|
||||||
|
if content:
|
||||||
|
yield content
|
||||||
|
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@property
|
||||||
|
def params(cls):
|
||||||
|
params = [
|
||||||
|
("model", "str"),
|
||||||
|
("messages", "list[dict[str, str]]"),
|
||||||
|
("stream", "bool"),
|
||||||
|
("temperature", "float"),
|
||||||
|
]
|
||||||
|
param = ", ".join([": ".join(p) for p in params])
|
||||||
|
return f"g4f.provider.{cls.__name__} supports: ({param})"
|
@ -0,0 +1,76 @@
|
|||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import json
|
||||||
|
from aiohttp import ClientSession
|
||||||
|
|
||||||
|
from .base_provider import AsyncGeneratorProvider
|
||||||
|
from ..typing import AsyncGenerator
|
||||||
|
|
||||||
|
class Ylokh(AsyncGeneratorProvider):
|
||||||
|
url = "https://chat.ylokh.xyz"
|
||||||
|
working = True
|
||||||
|
supports_gpt_35_turbo = True
|
||||||
|
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
async def create_async_generator(
|
||||||
|
cls,
|
||||||
|
model: str,
|
||||||
|
messages: list[dict[str, str]],
|
||||||
|
stream: bool = True,
|
||||||
|
proxy: str = None,
|
||||||
|
**kwargs
|
||||||
|
) -> AsyncGenerator:
|
||||||
|
model = model if model else "gpt-3.5-turbo"
|
||||||
|
headers = {
|
||||||
|
"User-Agent" : "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/116.0",
|
||||||
|
"Accept" : "*/*",
|
||||||
|
"Accept-language" : "de,en-US;q=0.7,en;q=0.3",
|
||||||
|
"Origin" : cls.url,
|
||||||
|
"Referer" : cls.url + "/",
|
||||||
|
"Sec-Fetch-Dest" : "empty",
|
||||||
|
"Sec-Fetch-Mode" : "cors",
|
||||||
|
"Sec-Fetch-Site" : "same-origin",
|
||||||
|
}
|
||||||
|
data = {
|
||||||
|
"messages": messages,
|
||||||
|
"model": model,
|
||||||
|
"temperature": 1,
|
||||||
|
"presence_penalty": 0,
|
||||||
|
"top_p": 1,
|
||||||
|
"frequency_penalty": 0,
|
||||||
|
"allow_fallback": True,
|
||||||
|
"stream": stream,
|
||||||
|
**kwargs
|
||||||
|
}
|
||||||
|
async with ClientSession(
|
||||||
|
headers=headers
|
||||||
|
) as session:
|
||||||
|
async with session.post("https://chatapi.ylokh.xyz/v1/chat/completions", json=data, proxy=proxy) as response:
|
||||||
|
response.raise_for_status()
|
||||||
|
if stream:
|
||||||
|
async for line in response.content:
|
||||||
|
line = line.decode()
|
||||||
|
if line.startswith("data: ") and not line.startswith("data: [DONE]"):
|
||||||
|
line = json.loads(line[6:-1])
|
||||||
|
content = line["choices"][0]["delta"].get("content")
|
||||||
|
if content:
|
||||||
|
yield content
|
||||||
|
else:
|
||||||
|
chat = await response.json()
|
||||||
|
yield chat["choices"][0]["message"].get("content")
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@property
|
||||||
|
def params(cls):
|
||||||
|
params = [
|
||||||
|
("model", "str"),
|
||||||
|
("messages", "list[dict[str, str]]"),
|
||||||
|
("stream", "bool"),
|
||||||
|
("proxy", "str"),
|
||||||
|
("temperature", "float"),
|
||||||
|
]
|
||||||
|
param = ", ".join([": ".join(p) for p in params])
|
||||||
|
return f"g4f.provider.{cls.__name__} supports: ({param})"
|
Loading…
Reference in New Issue