~ | Merge pull request #1012 from hlohaus/hey

Add Cromicle to provider list
This commit is contained in:
Tekky 2023-10-08 13:21:21 +01:00 committed by GitHub
commit d804b20694
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
10 changed files with 105 additions and 54 deletions

View File

@ -1,25 +1,26 @@
from __future__ import annotations
import re
import json
from aiohttp import ClientSession
from .base_provider import AsyncProvider
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider
from .helper import format_prompt
class ChatgptX(AsyncProvider):
class ChatgptX(AsyncGeneratorProvider):
url = "https://chatgptx.de"
supports_gpt_35_turbo = True
working = True
@classmethod
async def create_async(
async def create_async_generator(
cls,
model: str,
messages: list[dict[str, str]],
messages: Messages,
**kwargs
) -> str:
) -> AsyncResult:
headers = {
'accept-language': 'de-DE,de;q=0.9,en-DE;q=0.8,en;q=0.7,en-US',
'sec-ch-ua': '"Google Chrome";v="117", "Not;A=Brand";v="8", "Chromium";v="117"',
@ -63,8 +64,34 @@ class ChatgptX(AsyncProvider):
}
async with session.post(cls.url + '/sendchat', data=data, headers=headers) as response:
response.raise_for_status()
data = await response.json()
if "message" in data:
return data["message"]
elif "messages" in data:
raise RuntimeError(f'Response: {data["messages"]}')
chat = await response.json()
if "response" not in chat or not chat["response"]:
raise RuntimeError(f'Response: {chat}')
headers = {
'authority': 'chatgptx.de',
'accept': 'text/event-stream',
'referer': f'{cls.url}/',
'x-csrf-token': csrf_token,
'x-requested-with': 'XMLHttpRequest'
}
data = {
"user_id": user_id,
"chats_id": chat_id,
"prompt": format_prompt(messages),
"current_model": "gpt3",
"conversions_id": chat["conversions_id"],
"ass_conversions_id": chat["ass_conversions_id"],
}
async with session.get(f'{cls.url}/chats_stream', params=data, headers=headers) as response:
response.raise_for_status()
async for line in response.content:
if line.startswith(b"data: "):
row = line[6:-1]
if row == b"[DONE]":
break
try:
content = json.loads(row)["choices"][0]["delta"].get("content")
except:
raise RuntimeError(f"Broken line: {line.decode()}")
if content:
yield content

View File

@ -2,32 +2,32 @@ from __future__ import annotations
from aiohttp import ClientSession
from hashlib import sha256
from typing import AsyncGenerator, Dict, List
from ..typing import AsyncGenerator
from .base_provider import AsyncGeneratorProvider
from .helper import format_prompt
class Cromicle(AsyncGeneratorProvider):
url = 'https://cromicle.top'
working = True
supports_gpt_35_turbo = True
url: str = 'https://cromicle.top'
working: bool = True
supports_gpt_35_turbo: bool = True
@classmethod
async def create_async_generator(
cls,
model: str,
messages: list[dict[str, str]],
messages: List[Dict[str, str]],
proxy: str = None,
**kwargs
) -> AsyncGenerator:
message = messages[-1]["content"]
) -> AsyncGenerator[str, None]:
async with ClientSession(
headers=_create_header()
) as session:
async with session.post(
cls.url + '/chat',
f'{cls.url}/chat',
proxy=proxy,
json=_create_payload(message, **kwargs)
json=_create_payload(format_prompt(messages))
) as response:
response.raise_for_status()
async for stream in response.content.iter_any():
@ -35,16 +35,16 @@ class Cromicle(AsyncGeneratorProvider):
yield stream.decode()
def _create_header():
def _create_header() -> Dict[str, str]:
return {
'accept': '*/*',
'content-type': 'application/json',
}
def _create_payload(message: str):
def _create_payload(message: str) -> Dict[str, str]:
return {
'message' : message,
'token' : 'abc',
'hash' : sha256('abc'.encode() + message.encode()).hexdigest()
}
'message': message,
'token': 'abc',
'hash': sha256('abc'.encode() + message.encode()).hexdigest()
}

View File

@ -50,7 +50,10 @@ class GptForLove(AsyncGeneratorProvider):
async with session.post("https://api.gptplus.one/chat-process", json=data) as response:
response.raise_for_status()
async for line in response.content:
line = json.loads(line)
try:
line = json.loads(line)
except:
raise RuntimeError(f"Broken line: {line}")
if "detail" in line:
content = line["detail"]["choices"][0]["delta"].get("content")
if content:

View File

@ -4,7 +4,7 @@ import json
from aiohttp import ClientSession
from .base_provider import AsyncGeneratorProvider
from ..typing import AsyncGenerator
from ..typing import AsyncResult, Messages
class Vitalentum(AsyncGeneratorProvider):
url = "https://app.vitalentum.io"
@ -16,10 +16,10 @@ class Vitalentum(AsyncGeneratorProvider):
async def create_async_generator(
cls,
model: str,
messages: list[dict[str, str]],
messages: Messages,
proxy: str = None,
**kwargs
) -> AsyncGenerator:
) -> AsyncResult:
headers = {
"User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36",
"Accept" : "text/event-stream",
@ -62,6 +62,7 @@ class Vitalentum(AsyncGeneratorProvider):
("model", "str"),
("messages", "list[dict[str, str]]"),
("stream", "bool"),
("proxy", "str"),
("temperature", "float"),
]
param = ", ".join([": ".join(p) for p in params])

View File

@ -4,7 +4,7 @@ import json
from ..requests import StreamSession
from .base_provider import AsyncGeneratorProvider
from ..typing import AsyncGenerator
from ..typing import AsyncResult, Messages
class Ylokh(AsyncGeneratorProvider):
url = "https://chat.ylokh.xyz"
@ -16,16 +16,16 @@ class Ylokh(AsyncGeneratorProvider):
async def create_async_generator(
cls,
model: str,
messages: list[dict[str, str]],
messages: Messages,
stream: bool = True,
proxy: str = None,
timeout: int = 30,
timeout: int = 120,
**kwargs
) -> AsyncGenerator:
) -> AsyncResult:
model = model if model else "gpt-3.5-turbo"
headers = {
"Origin" : cls.url,
"Referer" : cls.url + "/",
"Origin" : cls.url,
"Referer": cls.url + "/",
}
data = {
"messages": messages,
@ -69,6 +69,7 @@ class Ylokh(AsyncGeneratorProvider):
("messages", "list[dict[str, str]]"),
("stream", "bool"),
("proxy", "str"),
("timeout", "int"),
("temperature", "float"),
("top_p", "float"),
]

View File

@ -3,7 +3,7 @@ from __future__ import annotations
import json
from ..requests import StreamSession
from ..typing import AsyncGenerator
from ..typing import AsyncGenerator, Messages
from .base_provider import AsyncGeneratorProvider, format_prompt
@ -17,19 +17,20 @@ class You(AsyncGeneratorProvider):
async def create_async_generator(
cls,
model: str,
messages: list[dict[str, str]],
messages: Messages,
proxy: str = None,
timeout: int = 30,
timeout: int = 120,
**kwargs,
) -> AsyncGenerator:
async with StreamSession(proxies={"https": proxy}, impersonate="chrome107", timeout=timeout) as session:
headers = {
"Accept": "text/event-stream",
"Referer": "https://you.com/search?fromSearchBar=true&tbm=youchat",
"Referer": f"{cls.url}/search?fromSearchBar=true&tbm=youchat",
}
data = {"q": format_prompt(messages), "domain": "youchat", "chat": ""}
async with session.get(
"https://you.com/api/streamingSearch",
params={"q": format_prompt(messages), "domain": "youchat", "chat": ""},
f"{cls.url}/api/streamingSearch",
params=data,
headers=headers
) as response:
response.raise_for_status()

View File

@ -1,8 +1,9 @@
from __future__ import annotations
import random
from aiohttp import ClientSession
from ..typing import AsyncGenerator
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, format_prompt
@ -14,19 +15,22 @@ class Yqcloud(AsyncGeneratorProvider):
@staticmethod
async def create_async_generator(
model: str,
messages: list[dict[str, str]],
messages: Messages,
proxy: str = None,
**kwargs,
) -> AsyncGenerator:
) -> AsyncResult:
async with ClientSession(
headers=_create_header()
) as session:
payload = _create_payload(messages)
payload = _create_payload(messages, **kwargs)
async with session.post("https://api.aichatos.cloud/api/generateStream", proxy=proxy, json=payload) as response:
response.raise_for_status()
async for stream in response.content.iter_any():
if stream:
yield stream.decode()
async for chunk in response.content.iter_any():
if chunk:
chunk = chunk.decode()
if "sorry, 您的ip已由于触发防滥用检测而被封禁" in chunk:
raise RuntimeError("IP address is blocked by abuse detection.")
yield chunk
def _create_header():
@ -37,12 +41,19 @@ def _create_header():
}
def _create_payload(messages: list[dict[str, str]]):
def _create_payload(
messages: Messages,
system_message: str = "",
user_id: int = None,
**kwargs
):
if not user_id:
user_id = random.randint(1690000544336, 2093025544336)
return {
"prompt": format_prompt(messages),
"network": True,
"system": "",
"system": system_message,
"withoutContext": False,
"stream": True,
"userId": "#/chat/1693025544336"
"userId": f"#/chat/{user_id}"
}

View File

@ -15,6 +15,7 @@ from .ChatgptAi import ChatgptAi
from .ChatgptDemo import ChatgptDemo
from .ChatgptDuo import ChatgptDuo
from .ChatgptX import ChatgptX
from .Cromicle import Cromicle
from .DeepAi import DeepAi
from .FreeGpt import FreeGpt
from .GPTalk import GPTalk
@ -62,6 +63,7 @@ __all__ = [
'ChatgptDuo',
'ChatgptLogin',
'ChatgptX',
'Cromicle',
'CodeLinkAva',
'DeepAi',
'DfeHub',

View File

@ -19,6 +19,8 @@ from .Provider import (
Vercel,
DeepAi,
Aichat,
GPTalk,
GptGod,
AiAsk,
GptGo,
Ylokh,
@ -53,7 +55,8 @@ gpt_35_long = Model(
base_provider = 'openai',
best_provider = RetryProvider([
AiAsk, Aibn, Aichat, ChatForAi, ChatgptAi, ChatgptDemo, ChatgptDuo,
FreeGpt, GptGo, Liaobots, Myshell, Vitalentum, Ylokh, You, Yqcloud
FreeGpt, GptGo, Liaobots, Myshell, Vitalentum, Ylokh, You, Yqcloud,
GPTalk, GptGod
])
)

View File

@ -1,5 +1,5 @@
import sys
from typing import Any, AsyncGenerator, Generator, NewType, Tuple, Union
from typing import Any, AsyncGenerator, Generator, NewType, Tuple, Union, List, Dict
if sys.version_info >= (3, 8):
from typing import TypedDict
@ -8,6 +8,8 @@ else:
SHA256 = NewType('sha_256_hash', str)
CreateResult = Generator[str, None, None]
AsyncResult = AsyncGenerator[str]
Messages = List[Dict[str, str]]
__all__ = [
'Any',