Merge pull request #1465 from hlohaus/upp

Improve FreeChatgpt Provider
This commit is contained in:
H Lohaus 2024-01-13 16:20:09 +01:00 committed by GitHub
commit 1459475384
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 49 additions and 39 deletions

View File

@ -7,12 +7,8 @@ from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider
models = {
"claude-v1":"claude-2.1",
"claude-v2": "claude-2.0",
"gpt_35_turbo":"gpt-3.5-turbo-1106",
"gpt-4":"gpt-4",
"gemini-pro": "google-gemini-pro"
}
@ -31,7 +27,10 @@ class FreeChatgpt(AsyncGeneratorProvider):
proxy: str = None,
**kwargs
) -> AsyncResult:
model = models[model] if model in models else "gpt-3.5-turbo-1106"
if model in models:
model = models[model]
elif not model:
model = "gpt-3.5-turbo"
headers = {
"Accept": "application/json, text/event-stream",
"Content-Type":"application/json",
@ -46,16 +45,29 @@ class FreeChatgpt(AsyncGeneratorProvider):
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
}
async with ClientSession(headers=headers) as session:
data = {"messages":messages,"stream":True,"model":model,"temperature":0.5,"presence_penalty":0,"frequency_penalty":0,"top_p":1}
async with session.post(f'{cls.url}/api/openai/v1/chat/completions',json=data) as result:
async for chunk in result.content:
line = chunk.decode()
if line.startswith("data: [DONE]"):
data = {
"messages":messages,
"stream":True,
"model":model,
"temperature":0.5,
"presence_penalty":0,
"frequency_penalty":0,
"top_p":1,
**kwargs
}
async with session.post(f'{cls.url}/api/openai/v1/chat/completions', json=data, proxy=proxy) as response:
response.raise_for_status()
started = False
async for line in response.content:
if line.startswith(b"data: [DONE]"):
break
elif line.startswith("data: "):
elif line.startswith(b"data: "):
line = json.loads(line[6:])
if(line["choices"]==[]):
continue
if(line["choices"][0]["delta"].get("content") and line["choices"][0]["delta"]["content"]!=None):
yield line["choices"][0]["delta"]["content"]
chunk = line["choices"][0]["delta"].get("content")
if chunk:
started = True
yield chunk
if not started:
raise RuntimeError("Empty response")

View File

@ -4,6 +4,7 @@ from .Provider import RetryProvider, ProviderType
from .Provider import (
Chatgpt4Online,
ChatgptDemoAi,
GeminiProChat,
ChatgptNext,
HuggingChat,
ChatgptDemo,
@ -61,7 +62,6 @@ gpt_35_long = Model(
ChatgptNext,
ChatgptDemo,
Gpt6,
FreeChatgpt,
])
)
@ -70,7 +70,6 @@ gpt_35_turbo = Model(
name = 'gpt-3.5-turbo',
base_provider = 'openai',
best_provider=RetryProvider([
FreeChatgpt,
GptGo, You,
GptForLove, ChatBase,
Chatgpt4Online,
@ -81,7 +80,7 @@ gpt_4 = Model(
name = 'gpt-4',
base_provider = 'openai',
best_provider = RetryProvider([
Bing, FreeChatgpt, Phind, Liaobots,
Bing, Phind, Liaobots,
])
)
@ -159,7 +158,7 @@ claude_instant_v1 = Model(
claude_v1 = Model(
name = 'claude-v1',
base_provider = 'anthropic',
best_provider = RetryProvider([FreeChatgpt,Vercel]))
best_provider = Vercel)
claude_v2 = Model(
name = 'claude-v2',
@ -246,11 +245,10 @@ gpt_4_32k_0613 = Model(
best_provider = gpt_4.best_provider
)
#Gemini
gemini_pro = Model(
name = 'gemini-pro',
base_provider = 'google',
best_provider = FreeChatgpt
best_provider = RetryProvider([FreeChatgpt, GeminiProChat])
)
text_ada_001 = Model(