mirror of https://github.com/xtekky/gpt4free
~ | new providers
Somwhat fix Aivvm provider, which looks to have a working gpt-4 kinda unstablepull/933/head
parent
17b3eb1bba
commit
42a02c3d2d
@ -1,78 +1,71 @@
|
||||
from __future__ import annotations
|
||||
import requests
|
||||
|
||||
from aiohttp import ClientSession
|
||||
|
||||
from .base_provider import AsyncGeneratorProvider
|
||||
from ..typing import AsyncGenerator
|
||||
from .base_provider import BaseProvider
|
||||
from ..typing import Any, CreateResult
|
||||
|
||||
models = {
|
||||
"gpt-4": {
|
||||
"id": "gpt-4",
|
||||
"name": "GPT-4",
|
||||
},
|
||||
"gpt-3.5-turbo": {
|
||||
"id": "gpt-3.5-turbo",
|
||||
"name": "GPT-3.5",
|
||||
},
|
||||
"gpt-3.5-turbo-16k": {
|
||||
"id": "gpt-3.5-turbo-16k",
|
||||
"name": "GPT-3.5-16k",
|
||||
},
|
||||
'gpt-3.5-turbo': {'id': 'gpt-3.5-turbo', 'name': 'GPT-3.5'},
|
||||
'gpt-3.5-turbo-0613': {'id': 'gpt-3.5-turbo-0613', 'name': 'GPT-3.5-0613'},
|
||||
'gpt-3.5-turbo-16k': {'id': 'gpt-3.5-turbo-16k', 'name': 'GPT-3.5-16K'},
|
||||
'gpt-3.5-turbo-16k-0613': {'id': 'gpt-3.5-turbo-16k-0613', 'name': 'GPT-3.5-16K-0613'},
|
||||
'gpt-4': {'id': 'gpt-4', 'name': 'GPT-4'},
|
||||
'gpt-4-0613': {'id': 'gpt-4-0613', 'name': 'GPT-4-0613'},
|
||||
'gpt-4-32k': {'id': 'gpt-4-32k', 'name': 'GPT-4-32K'},
|
||||
'gpt-4-32k-0613': {'id': 'gpt-4-32k-0613', 'name': 'GPT-4-32K-0613'},
|
||||
}
|
||||
|
||||
class Aivvm(AsyncGeneratorProvider):
|
||||
url = "https://chat.aivvm.com"
|
||||
class Aivvm(BaseProvider):
|
||||
url = 'https://chat.aivvm.com'
|
||||
supports_stream = True
|
||||
working = True
|
||||
supports_gpt_35_turbo = True
|
||||
supports_gpt_4 = True
|
||||
|
||||
|
||||
@classmethod
|
||||
async def create_async_generator(
|
||||
cls,
|
||||
def create_completion(cls,
|
||||
model: str,
|
||||
messages: list[dict[str, str]],
|
||||
proxy: str = None,
|
||||
**kwargs
|
||||
) -> AsyncGenerator:
|
||||
model = model if model else "gpt-3.5-turbo"
|
||||
if model not in models:
|
||||
raise ValueError(f"Model are not supported: {model}")
|
||||
stream: bool, **kwargs: Any) -> CreateResult:
|
||||
|
||||
headers = {
|
||||
"User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36",
|
||||
"Accept" : "*/*",
|
||||
"Accept-Language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
|
||||
"Origin" : cls.url,
|
||||
"Referer" : cls.url + "/",
|
||||
"Sec-Fetch-Dest" : "empty",
|
||||
"Sec-Fetch-Mode" : "cors",
|
||||
"Sec-Fetch-Site" : "same-origin",
|
||||
"authority" : "chat.aivvm.com",
|
||||
"accept" : "*/*",
|
||||
"accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
|
||||
"content-type" : "application/json",
|
||||
"origin" : "https://chat.aivvm.com",
|
||||
"referer" : "https://chat.aivvm.com/",
|
||||
"sec-ch-ua" : '"Google Chrome";v="117", "Not;A=Brand";v="8", "Chromium";v="117"',
|
||||
"sec-ch-ua-mobile" : "?0",
|
||||
"sec-ch-ua-platform" : '"macOS"',
|
||||
"sec-fetch-dest" : "empty",
|
||||
"sec-fetch-mode" : "cors",
|
||||
"sec-fetch-site" : "same-origin",
|
||||
"user-agent" : "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36",
|
||||
}
|
||||
async with ClientSession(
|
||||
headers=headers
|
||||
) as session:
|
||||
data = {
|
||||
"temperature": 1,
|
||||
"key": "",
|
||||
"messages": messages,
|
||||
"model": models[model],
|
||||
"prompt": "",
|
||||
**kwargs
|
||||
}
|
||||
async with session.post(cls.url + "/api/chat", json=data, proxy=proxy) as response:
|
||||
response.raise_for_status()
|
||||
async for stream in response.content.iter_any():
|
||||
yield stream.decode()
|
||||
|
||||
json_data = {
|
||||
"model" : models[model],
|
||||
"messages" : messages,
|
||||
"key" : "",
|
||||
"prompt" : "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully. Respond using markdown.",
|
||||
"temperature" : kwargs.get("temperature", 0.7)
|
||||
}
|
||||
|
||||
response = requests.post(
|
||||
"https://chat.aivvm.com/api/chat", headers=headers, json=json_data, stream=True)
|
||||
|
||||
for line in response.iter_content(chunk_size=1048):
|
||||
yield line
|
||||
|
||||
@classmethod
|
||||
@property
|
||||
def params(cls):
|
||||
params = [
|
||||
("model", "str"),
|
||||
("messages", "list[dict[str, str]]"),
|
||||
("stream", "bool"),
|
||||
("temperature", "float"),
|
||||
('model', 'str'),
|
||||
('messages', 'list[dict[str, str]]'),
|
||||
('stream', 'bool'),
|
||||
('temperature', 'float'),
|
||||
]
|
||||
param = ", ".join([": ".join(p) for p in params])
|
||||
return f"g4f.provider.{cls.__name__} supports: ({param})"
|
||||
param = ', '.join([': '.join(p) for p in params])
|
||||
return f'g4f.provider.{cls.__name__} supports: ({param})'
|
Loading…
Reference in New Issue