Update some providers

Improve read access_token in OpenaiChat
Add IterProvider
Add system message support in FlowGpt
Filter none values in new Client
pull/1637/head
Heiner Lohaus 3 months ago
parent 862e5ef16d
commit 84812b9632

@ -9,7 +9,7 @@ from .helper import get_random_string
class AiChatOnline(AsyncGeneratorProvider):
url = "https://aichatonline.org"
working = True
working = False
supports_gpt_35_turbo = True
supports_message_history = False

@ -6,9 +6,8 @@ from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider
class Aura(AsyncGeneratorProvider):
url = "https://openchat.team"
working = True
supports_gpt_35_turbo = True
url = "https://openchat.team"
working = True
@classmethod
async def create_async_generator(

@ -9,7 +9,7 @@ from .base_provider import AsyncGeneratorProvider
class ChatgptAi(AsyncGeneratorProvider):
url = "https://chatgpt.ai"
working = True
working = False
supports_message_history = True
supports_gpt_35_turbo = True
_system = None

@ -10,7 +10,7 @@ from .helper import format_prompt
class ChatgptDemo(AsyncGeneratorProvider):
url = "https://chat.chatgptdemo.net"
supports_gpt_35_turbo = True
working = True
working = False
@classmethod
async def create_async_generator(

@ -4,8 +4,7 @@ import json
from aiohttp import ClientSession
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider
from .helper import format_prompt
from ..providers.base_provider import AsyncGeneratorProvider
class ChatgptNext(AsyncGeneratorProvider):
@ -24,7 +23,7 @@ class ChatgptNext(AsyncGeneratorProvider):
if not model:
model = "gpt-3.5-turbo"
headers = {
"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/119.0",
"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:122.0) Gecko/20100101 Firefox/122.0",
"Accept": "text/event-stream",
"Accept-Language": "de,en-US;q=0.7,en;q=0.3",
"Accept-Encoding": "gzip, deflate, br",

@ -8,7 +8,7 @@ from .base_provider import AsyncGeneratorProvider
class Chatxyz(AsyncGeneratorProvider):
url = "https://chat.3211000.xyz"
working = True
working = False
supports_gpt_35_turbo = True
supports_message_history = True

@ -51,12 +51,16 @@ class FlowGpt(AsyncGeneratorProvider, ProviderModelMixin):
"TE": "trailers"
}
async with ClientSession(headers=headers) as session:
history = [message for message in messages[:-1] if message["role"] != "system"]
system_message = "\n".join([message["content"] for message in messages if message["role"] == "system"])
if not system_message:
system_message = "You are helpful assistant. Follow the user's instructions carefully."
data = {
"model": model,
"nsfw": False,
"question": messages[-1]["content"],
"history": [{"role": "assistant", "content": "Hello, how can I help you today?"}, *messages[:-1]],
"system": kwargs.get("system_message", "You are helpful assistant. Follow the user's instructions carefully."),
"history": [{"role": "assistant", "content": "Hello, how can I help you today?"}, *history],
"system": system_message,
"temperature": kwargs.get("temperature", 0.7),
"promptId": f"model-{model}",
"documentIds": [],

@ -3,9 +3,9 @@ from __future__ import annotations
import json
import base64
import uuid
from aiohttp import ClientSession, FormData
from aiohttp import ClientSession, FormData, BaseConnector
from ..typing import AsyncGenerator, Messages, ImageType, Cookies
from ..typing import AsyncResult, Messages, ImageType, Cookies
from .base_provider import AsyncGeneratorProvider
from ..providers.helper import get_connector, format_prompt
from ..image import to_bytes
@ -26,12 +26,13 @@ class You(AsyncGeneratorProvider):
messages: Messages,
image: ImageType = None,
image_name: str = None,
connector: BaseConnector = None,
proxy: str = None,
chat_mode: str = "default",
**kwargs,
) -> AsyncGenerator:
) -> AsyncResult:
async with ClientSession(
connector=get_connector(kwargs.get("connector"), proxy),
connector=get_connector(connector, proxy),
headers=DEFAULT_HEADERS
) as client:
if image:
@ -72,13 +73,13 @@ class You(AsyncGeneratorProvider):
response.raise_for_status()
async for line in response.content:
if line.startswith(b'event: '):
event = line[7:-1]
event = line[7:-1].decode()
elif line.startswith(b'data: '):
if event == b"youChatUpdate" or event == b"youChatToken":
if event in ["youChatUpdate", "youChatToken"]:
data = json.loads(line[6:-1])
if event == b"youChatToken" and "youChatToken" in data:
yield data["youChatToken"]
elif event == b"youChatUpdate" and "t" in data:
if event == "youChatToken" and event in data:
yield data[event]
elif event == "youChatUpdate" and "t" in data:
yield data["t"]
@classmethod

@ -1,7 +1,7 @@
from __future__ import annotations
from ..providers.types import BaseProvider, ProviderType
from ..providers.retry_provider import RetryProvider
from ..providers.retry_provider import RetryProvider, IterProvider
from ..providers.base_provider import AsyncProvider, AsyncGeneratorProvider
from ..providers.create_images import CreateImagesProvider

@ -334,39 +334,49 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
# Read api_key from args
api_key = kwargs["access_token"] if "access_token" in kwargs else api_key
# If no cached args
if cls._args is None:
if api_key is None:
# Read api_key from cookies
cookies = get_cookies("chat.openai.com", False) if cookies is None else cookies
api_key = cookies["access_token"] if "access_token" in cookies else api_key
cls._args = cls._create_request_args(cookies)
else:
# Read api_key from cache
api_key = cls._args["headers"]["Authorization"] if "Authorization" in cls._args["headers"] else None
async with StreamSession(
proxies={"https": proxy},
impersonate="chrome",
timeout=timeout
) as session:
# Read api_key from session cookies
if api_key is None and cookies:
# Read api_key from session
api_key = await cls.fetch_access_token(session, cls._args["headers"])
if api_key is not None:
cls._args["headers"]["Authorization"] = f"Bearer {api_key}"
# Load default model
if cls.default_model is None:
try:
cls.default_model = await cls.get_default_model(session, cls._args["headers"])
if cookies and not model and api_key is not None:
cls._args["headers"]["Authorization"] = api_key
cls.default_model = cls.get_model(await cls.get_default_model(session, cls._args["headers"]))
elif api_key:
cls.default_model = cls.get_model(model or "gpt-3.5-turbo")
except Exception as e:
if debug.logging:
print("OpenaiChat: Load default_model failed")
print(f"{e.__class__.__name__}: {e}")
if cls.default_model is None:
# Browse api_key and update default model
if api_key is None or cls.default_model is None:
login_url = os.environ.get("G4F_LOGIN_URL")
if login_url:
yield f"Please login: [ChatGPT]({login_url})\n\n"
try:
cls._args = cls.browse_access_token(proxy)
except MissingRequirementsError:
raise MissingAuthError(f'Missing or invalid "access_token". Add a new "api_key" please')
cls.default_model = await cls.get_default_model(session, cls._args["headers"])
raise MissingAuthError(f'Missing "access_token". Add a "api_key" please')
cls.default_model = cls.get_model(await cls.get_default_model(session, cls._args["headers"]))
else:
cls._args["headers"]["Authorization"] = api_key
try:
image_response = await cls.upload_image(
@ -409,7 +419,8 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
) as response:
cls._update_request_args(session)
if not response.ok:
raise RuntimeError(f"Response {response.status}: {await response.text()}")
message = f"{await response.text()} headers:\n{json.dumps(cls._args['headers'], indent=4)}"
raise RuntimeError(f"Response {response.status}: {message}")
last_message: int = 0
async for line in response.iter_lines():
if not line.startswith(b"data: "):

@ -10,7 +10,7 @@ from .cookies import get_cookies, set_cookies
from . import debug, version
from .providers.types import BaseRetryProvider, ProviderType
from .providers.base_provider import ProviderModelMixin
from .providers.retry_provider import RetryProvider
from .providers.retry_provider import IterProvider
def get_model_and_provider(model : Union[Model, str],
provider : Union[ProviderType, str, None],
@ -48,7 +48,7 @@ def get_model_and_provider(model : Union[Model, str],
provider_list = [ProviderUtils.convert[p] for p in provider.split() if p in ProviderUtils.convert]
if not provider_list:
raise ProviderNotFoundError(f'Providers not found: {provider}')
provider = RetryProvider(provider_list, False)
provider = IterProvider(provider_list)
elif provider in ProviderUtils.convert:
provider = ProviderUtils.convert[provider]
else:

@ -7,7 +7,7 @@ import random
import string
from .stubs import ChatCompletion, ChatCompletionChunk, Image, ImagesResponse
from .typing import Union, Generator, Messages, ImageType
from .typing import Union, Iterator, Messages, ImageType
from .providers.types import BaseProvider, ProviderType
from .image import ImageResponse as ImageProviderResponse
from .Provider.BingCreateImages import BingCreateImages
@ -17,7 +17,7 @@ from . import get_model_and_provider, get_last_provider
ImageProvider = Union[BaseProvider, object]
Proxies = Union[dict, str]
IterResponse = Generator[Union[ChatCompletion, ChatCompletionChunk], None, None]
IterResponse = Iterator[Union[ChatCompletion, ChatCompletionChunk]]
def read_json(text: str) -> dict:
"""
@ -110,6 +110,12 @@ class Client():
elif "https" in self.proxies:
return self.proxies["https"]
def filter_none(**kwargs):
for key in list(kwargs.keys()):
if kwargs[key] is None:
del kwargs[key]
return kwargs
class Completions():
def __init__(self, client: Client, provider: ProviderType = None):
self.client: Client = client
@ -126,7 +132,7 @@ class Completions():
stop: Union[list[str], str] = None,
api_key: str = None,
**kwargs
) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
) -> Union[ChatCompletion, Iterator[ChatCompletionChunk]]:
model, provider = get_model_and_provider(
model,
self.provider if provider is None else provider,
@ -135,11 +141,13 @@ class Completions():
)
stop = [stop] if isinstance(stop, str) else stop
response = provider.create_completion(
model, messages, stream,
proxy=self.client.get_proxy(),
max_tokens=max_tokens,
stop=stop,
api_key=self.client.api_key if api_key is None else api_key,
model, messages, stream,
**filter_none(
proxy=self.client.get_proxy(),
max_tokens=max_tokens,
stop=stop,
api_key=self.client.api_key if api_key is None else api_key
),
**kwargs
)
response = iter_response(response, stream, response_format, max_tokens, stop)

@ -3,22 +3,37 @@ from __future__ import annotations
import asyncio
import random
from ..typing import CreateResult, Messages
from .types import BaseRetryProvider
from ..typing import Type, List, CreateResult, Messages, Iterator
from .types import BaseProvider, BaseRetryProvider
from .. import debug
from ..errors import RetryProviderError, RetryNoProviderError
class RetryProvider(BaseRetryProvider):
def __init__(
self,
providers: List[Type[BaseProvider]],
shuffle: bool = True
) -> None:
"""
Initialize the BaseRetryProvider.
Args:
providers (List[Type[BaseProvider]]): List of providers to use.
shuffle (bool): Whether to shuffle the providers list.
"""
self.providers = providers
self.shuffle = shuffle
self.working = True
self.last_provider: Type[BaseProvider] = None
"""
A provider class to handle retries for creating completions with different providers.
Attributes:
providers (list): A list of provider instances.
shuffle (bool): A flag indicating whether to shuffle providers before use.
exceptions (dict): A dictionary to store exceptions encountered during retries.
last_provider (BaseProvider): The last provider that was used.
"""
def create_completion(
self,
model: str,
@ -44,7 +59,7 @@ class RetryProvider(BaseRetryProvider):
if self.shuffle:
random.shuffle(providers)
self.exceptions = {}
exceptions = {}
started: bool = False
for provider in providers:
self.last_provider = provider
@ -57,13 +72,13 @@ class RetryProvider(BaseRetryProvider):
if started:
return
except Exception as e:
self.exceptions[provider.__name__] = e
exceptions[provider.__name__] = e
if debug.logging:
print(f"{provider.__name__}: {e.__class__.__name__}: {e}")
if started:
raise e
self.raise_exceptions()
raise_exceptions(exceptions)
async def create_async(
self,
@ -88,7 +103,7 @@ class RetryProvider(BaseRetryProvider):
if self.shuffle:
random.shuffle(providers)
self.exceptions = {}
exceptions = {}
for provider in providers:
self.last_provider = provider
try:
@ -97,23 +112,94 @@ class RetryProvider(BaseRetryProvider):
timeout=kwargs.get("timeout", 60)
)
except Exception as e:
self.exceptions[provider.__name__] = e
exceptions[provider.__name__] = e
if debug.logging:
print(f"{provider.__name__}: {e.__class__.__name__}: {e}")
self.raise_exceptions()
raise_exceptions(exceptions)
def raise_exceptions(self) -> None:
"""
Raise a combined exception if any occurred during retries.
class IterProvider(BaseRetryProvider):
__name__ = "IterProvider"
Raises:
RetryProviderError: If any provider encountered an exception.
RetryNoProviderError: If no provider is found.
"""
if self.exceptions:
raise RetryProviderError("RetryProvider failed:\n" + "\n".join([
f"{p}: {exception.__class__.__name__}: {exception}" for p, exception in self.exceptions.items()
]))
def __init__(
self,
providers: List[BaseProvider],
) -> None:
providers.reverse()
self.providers: List[BaseProvider] = providers
self.working: bool = True
self.last_provider: BaseProvider = None
def create_completion(
self,
model: str,
messages: Messages,
stream: bool = False,
**kwargs
) -> CreateResult:
exceptions: dict = {}
started: bool = False
for provider in self.iter_providers():
if stream and not provider.supports_stream:
continue
try:
for token in provider.create_completion(model, messages, stream, **kwargs):
yield token
started = True
if started:
return
except Exception as e:
exceptions[provider.__name__] = e
if debug.logging:
print(f"{provider.__name__}: {e.__class__.__name__}: {e}")
if started:
raise e
raise_exceptions(exceptions)
async def create_async(
self,
model: str,
messages: Messages,
**kwargs
) -> str:
exceptions: dict = {}
for provider in self.iter_providers():
try:
return await asyncio.wait_for(
provider.create_async(model, messages, **kwargs),
timeout=kwargs.get("timeout", 60)
)
except Exception as e:
exceptions[provider.__name__] = e
if debug.logging:
print(f"{provider.__name__}: {e.__class__.__name__}: {e}")
raise_exceptions(exceptions)
def iter_providers(self) -> Iterator[BaseProvider]:
used_provider = []
try:
while self.providers:
provider = self.providers.pop()
used_provider.append(provider)
self.last_provider = provider
if debug.logging:
print(f"Using {provider.__name__} provider")
yield provider
finally:
used_provider.reverse()
self.providers = [*used_provider, *self.providers]
def raise_exceptions(exceptions: dict) -> None:
"""
Raise a combined exception if any occurred during retries.
Raises:
RetryProviderError: If any provider encountered an exception.
RetryNoProviderError: If no provider is found.
"""
if exceptions:
raise RetryProviderError("RetryProvider failed:\n" + "\n".join([
f"{p}: {exception.__class__.__name__}: {exception}" for p, exception in exceptions.items()
]))
raise RetryNoProviderError("No provider found")
raise RetryNoProviderError("No provider found")

@ -1,7 +1,7 @@
from __future__ import annotations
from abc import ABC, abstractmethod
from typing import Union, List, Dict, Type
from typing import Union, Dict, Type
from ..typing import Messages, CreateResult
class BaseProvider(ABC):
@ -96,22 +96,4 @@ class BaseRetryProvider(BaseProvider):
__name__: str = "RetryProvider"
supports_stream: bool = True
def __init__(
self,
providers: List[Type[BaseProvider]],
shuffle: bool = True
) -> None:
"""
Initialize the BaseRetryProvider.
Args:
providers (List[Type[BaseProvider]]): List of providers to use.
shuffle (bool): Whether to shuffle the providers list.
"""
self.providers = providers
self.shuffle = shuffle
self.working = True
self.exceptions: Dict[str, Exception] = {}
self.last_provider: Type[BaseProvider] = None
ProviderType = Union[Type[BaseProvider], BaseRetryProvider]

@ -1,5 +1,5 @@
import sys
from typing import Any, AsyncGenerator, Generator, NewType, Tuple, Union, List, Dict, Type, IO, Optional
from typing import Any, AsyncGenerator, Generator, AsyncIterator, Iterator, NewType, Tuple, Union, List, Dict, Type, IO, Optional
try:
from PIL.Image import Image
@ -12,8 +12,8 @@ else:
from typing_extensions import TypedDict
SHA256 = NewType('sha_256_hash', str)
CreateResult = Generator[str, None, None]
AsyncResult = AsyncGenerator[str, None]
CreateResult = Iterator[str]
AsyncResult = AsyncIterator[str]
Messages = List[Dict[str, str]]
Cookies = Dict[str, str]
ImageType = Union[str, bytes, IO, Image, None]
@ -22,6 +22,8 @@ __all__ = [
'Any',
'AsyncGenerator',
'Generator',
'AsyncIterator',
'Iterator'
'Tuple',
'Union',
'List',

Loading…
Cancel
Save