2023-09-11 22:47:03 +00:00
from __future__ import annotations
2023-10-02 00:04:22 +00:00
from . . requests import StreamSession
2023-10-01 04:38:11 +00:00
from . base_provider import AsyncGeneratorProvider
from . . typing import AsyncGenerator
2023-09-11 22:47:03 +00:00
2023-09-29 14:21:18 +00:00
# to recreate this easily, send a post request to https://chat.aivvm.com/api/models
2023-09-11 22:47:03 +00:00
models = {
2023-09-22 23:44:09 +00:00
' gpt-3.5-turbo ' : { ' id ' : ' gpt-3.5-turbo ' , ' name ' : ' GPT-3.5 ' } ,
' gpt-3.5-turbo-0613 ' : { ' id ' : ' gpt-3.5-turbo-0613 ' , ' name ' : ' GPT-3.5-0613 ' } ,
' gpt-3.5-turbo-16k ' : { ' id ' : ' gpt-3.5-turbo-16k ' , ' name ' : ' GPT-3.5-16K ' } ,
' gpt-3.5-turbo-16k-0613 ' : { ' id ' : ' gpt-3.5-turbo-16k-0613 ' , ' name ' : ' GPT-3.5-16K-0613 ' } ,
' gpt-4 ' : { ' id ' : ' gpt-4 ' , ' name ' : ' GPT-4 ' } ,
' gpt-4-0613 ' : { ' id ' : ' gpt-4-0613 ' , ' name ' : ' GPT-4-0613 ' } ,
' gpt-4-32k ' : { ' id ' : ' gpt-4-32k ' , ' name ' : ' GPT-4-32K ' } ,
' gpt-4-32k-0613 ' : { ' id ' : ' gpt-4-32k-0613 ' , ' name ' : ' GPT-4-32K-0613 ' } ,
2023-09-11 22:47:03 +00:00
}
2023-10-01 04:38:11 +00:00
class Aivvm ( AsyncGeneratorProvider ) :
2023-09-22 23:44:09 +00:00
url = ' https://chat.aivvm.com '
supports_stream = True
2023-09-11 22:47:03 +00:00
working = True
supports_gpt_35_turbo = True
supports_gpt_4 = True
@classmethod
2023-10-01 04:38:11 +00:00
async def create_async_generator (
cls ,
2023-09-11 22:47:03 +00:00
model : str ,
messages : list [ dict [ str , str ] ] ,
2023-09-23 13:35:17 +00:00
stream : bool ,
2023-10-05 03:13:37 +00:00
timeout : int = 30 ,
2023-09-23 13:35:17 +00:00
* * kwargs
2023-10-01 04:38:11 +00:00
) - > AsyncGenerator :
2023-09-23 13:35:17 +00:00
if not model :
model = " gpt-3.5-turbo "
elif model not in models :
2023-09-29 14:21:18 +00:00
raise ValueError ( f " Model is not supported: { model } " )
2023-09-22 23:44:09 +00:00
json_data = {
" model " : models [ model ] ,
" messages " : messages ,
" key " : " " ,
2023-09-29 14:21:18 +00:00
" prompt " : kwargs . get ( " system_message " , " You are ChatGPT, a large language model trained by OpenAI. Follow the user ' s instructions carefully. Respond using markdown. " ) ,
2023-09-22 23:44:09 +00:00
" temperature " : kwargs . get ( " temperature " , 0.7 )
}
2023-10-05 03:13:37 +00:00
headers = {
" Accept " : " */* " ,
" Origin " : cls . url ,
" Referer " : f " { cls . url } / " ,
}
async with StreamSession ( impersonate = " chrome107 " , headers = headers , timeout = timeout ) as session :
2023-10-01 04:38:11 +00:00
async with session . post ( f " { cls . url } /api/chat " , json = json_data ) as response :
response . raise_for_status ( )
2023-10-02 00:04:22 +00:00
async for chunk in response . iter_content ( ) :
2023-10-01 04:41:00 +00:00
yield chunk . decode ( )
2023-09-11 22:47:03 +00:00
@classmethod
@property
def params ( cls ) :
params = [
2023-09-22 23:44:09 +00:00
( ' model ' , ' str ' ) ,
( ' messages ' , ' list[dict[str, str]] ' ) ,
( ' stream ' , ' bool ' ) ,
( ' temperature ' , ' float ' ) ,
2023-09-11 22:47:03 +00:00
]
2023-09-22 23:44:09 +00:00
param = ' , ' . join ( [ ' : ' . join ( p ) for p in params ] )
return f ' g4f.provider. { cls . __name__ } supports: ( { param } ) '