Merge pull request #1609 from hlohaus/move

Move some modules, create providers dir, Use new client in API
pull/1616/head
H Lohaus 4 months ago committed by GitHub
commit cf87d467c9
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

@ -172,7 +172,7 @@ To start the web interface, type the following codes in python:
from g4f.gui import run_gui
run_gui()
```
or type in command line:
or execute the following command:
```bash
python -m g4f.cli gui -port 8080 -debug
```
@ -187,7 +187,7 @@ See: [/docs/interference](/docs/interference.md)
##### Cookies / Access Token
For generating images with Bing and for the OpenAi Chat you need cookies or a token from your browser session. From Bing you need the "_U" cookie and from OpenAI you need the "access_token". You can pass the cookies / the access token in the create function or you use the `set_cookies` setter before you run G4F:
For generating images with Bing and for the OpenAi Chat you need cookies or a token from your browser session. From Bing you need the "_U" cookie and from OpenAI you need the "access_token". You can pass the cookies / the access token in the create function or you use the `set_cookies` setter before you run G4F:
```python
from g4f.cookies import set_cookies

@ -2,13 +2,13 @@
#### Introduction
The G4F Client API introduces a new way to integrate advanced AI functionalities into your Python applications. This guide will help you transition from using the OpenAI client to the new G4F Client, offering compatibility with the existing OpenAI API alongside additional features.
Welcome to the G4F Client API, a cutting-edge tool for seamlessly integrating advanced AI capabilities into your Python applications. This guide is designed to facilitate your transition from using the OpenAI client to the G4F Client, offering enhanced features while maintaining compatibility with the existing OpenAI API.
#### Getting Started
**Switching to G4F Client:**
Replace the OpenAI client import statement in your Python code as follows:
To begin using the G4F Client, simply update your import statement in your Python code:
Old Import:
```python
@ -20,11 +20,11 @@ New Import:
from g4f.client import Client as OpenAI
```
The G4F Client maintains the same API interface as OpenAI, ensuring a seamless transition.
The G4F Client preserves the same familiar API interface as OpenAI, ensuring a smooth transition process.
#### Initializing the Client
### Initializing the Client
To use the G4F Client, create an instance with customized providers:
To utilize the G4F Client, create an new instance. Below is an example showcasing custom providers:
```python
from g4f.client import Client
@ -33,7 +33,18 @@ from g4f.Provider import BingCreateImages, OpenaiChat, Gemini
client = Client(
provider=OpenaiChat,
image_provider=Gemini,
proxies=None
...
)
```
You also have the option to define a proxy in the client for all outgoing requests:
```python
from g4f.client import Client
client = Client(
proxies="http://user:pass@host",
...
)
```

@ -3,7 +3,7 @@ from urllib.parse import urlparse
import asyncio
from g4f import models, ChatCompletion
from g4f.base_provider import BaseProvider, BaseRetryProvider, ProviderType
from g4f.providers.types import BaseRetryProvider, ProviderType
from etc.testing._providers import get_providers
from g4f import debug

@ -3,5 +3,7 @@ from .asyncio import *
from .backend import *
from .main import *
from .model import *
from .client import *
from .include import *
unittest.main()

@ -1,13 +1,15 @@
import unittest
import asyncio
from unittest.mock import MagicMock
from .mocks import ProviderMock
import g4f
try:
from g4f.gui.server.backend import Backend_Api, get_error_message
has_requirements = True
except:
has_requirements = False
class TestBackendApi(unittest.TestCase):
def setUp(self):
@ -20,17 +22,23 @@ class TestBackendApi(unittest.TestCase):
response = self.api.get_version()
self.assertIn("version", response)
self.assertIn("latest_version", response)
def test_get_models(self):
response = self.api.get_models()
self.assertIsInstance(response, list)
self.assertTrue(len(response) > 0)
def test_get_providers(self):
response = self.api.get_providers()
self.assertIsInstance(response, list)
self.assertTrue(len(response) > 0)
def test_search(self):
# Task was destroyed but it is pending!
from g4f.gui.server.internet import search
result = asyncio.run(search("Hello"))
self.assertEqual(5, len(result))
class TestUtilityFunctions(unittest.TestCase):
def setUp(self):
@ -42,6 +50,6 @@ class TestUtilityFunctions(unittest.TestCase):
exception = Exception("Message")
result = get_error_message(exception)
self.assertEqual("ProviderMock: Exception: Message", result)
if __name__ == '__main__':
unittest.main()

@ -35,15 +35,17 @@ class TestPassModel(unittest.TestCase):
response = client.chat.completions.create(messages, "Hello", stream=True)
for chunk in response:
self.assertIsInstance(chunk, ChatCompletionChunk)
self.assertIsInstance(chunk.choices[0].delta.content, str)
if chunk.choices[0].delta.content is not None:
self.assertIsInstance(chunk.choices[0].delta.content, str)
messages = [{'role': 'user', 'content': chunk} for chunk in ["You ", "You ", "Other", "?"]]
response = client.chat.completions.create(messages, "Hello", stream=True, max_tokens=2)
response = list(response)
self.assertEqual(len(response), 2)
self.assertEqual(len(response), 3)
for chunk in response:
self.assertEqual(chunk.choices[0].delta.content, "You ")
if chunk.choices[0].delta.content is not None:
self.assertEqual(chunk.choices[0].delta.content, "You ")
def no_test_stop(self):
def test_stop(self):
client = Client(provider=YieldProviderMock)
messages = [{'role': 'user', 'content': chunk} for chunk in ["How ", "are ", "you", "?"]]
response = client.chat.completions.create(messages, "Hello", stop=["and"])

@ -1,11 +1,15 @@
import sys
import pathlib
import unittest
sys.path.append(str(pathlib.Path(__file__).parent.parent.parent))
class TestImport(unittest.TestCase):
import g4f
def test_get_cookies(self):
from g4f import get_cookies as get_cookies_alias
from g4f.cookies import get_cookies
self.assertEqual(get_cookies_alias, get_cookies)
g4f.debug.logging = False
g4f.debug.version_check = False
def test_requests(self):
from g4f.requests import StreamSession
self.assertIsInstance(StreamSession, type)
DEFAULT_MESSAGES = [{'role': 'user', 'content': 'Hello'}]
if __name__ == '__main__':
unittest.main()

@ -1,4 +1,4 @@
from g4f.Provider.base_provider import AbstractProvider, AsyncProvider, AsyncGeneratorProvider
from g4f.providers.base_provider import AbstractProvider, AsyncProvider, AsyncGeneratorProvider
class ProviderMock(AbstractProvider):
working = True

@ -1,5 +1,7 @@
from __future__ import annotations
import re
import json
from urllib import parse
from datetime import datetime
@ -32,10 +34,18 @@ class Phind(AsyncGeneratorProvider):
"Sec-Fetch-Site": "same-origin",
}
async with StreamSession(
impersonate="chrome110",
headers=headers,
impersonate="chrome",
proxies={"https": proxy},
timeout=timeout
) as session:
url = "https://www.phind.com/search?home=true"
async with session.get(url) as response:
text = await response.text()
match = re.search(r'<script id="__NEXT_DATA__" type="application/json">(?P<json>[\S\s]+?)</script>', text)
data = json.loads(match.group("json"))
challenge_seeds = data["props"]["pageProps"]["challengeSeeds"]
prompt = messages[-1]["content"]
data = {
"question": prompt,
@ -51,14 +61,13 @@ class Phind(AsyncGeneratorProvider):
"language": "en-US",
"detailed": True,
"anonUserId": "",
"answerModel": "GPT-4" if model.startswith("gpt-4") else "Phind Model",
"answerModel": "GPT-4" if model.startswith("gpt-4") else "Phind-34B",
"creativeMode": creative_mode,
"customLinks": []
},
"context": "\n".join([message["content"] for message in messages if message["role"] == "system"]),
}
data["challenge"] = generate_challenge(data)
data["challenge"] = generate_challenge(data, **challenge_seeds)
async with session.post(f"https://https.api.phind.com/infer/", headers=headers, json=data) as response:
new_line = False
async for line in response.iter_lines():
@ -101,6 +110,18 @@ def deterministic_stringify(obj):
items = sorted(obj.items(), key=lambda x: x[0])
return ','.join([f'{k}:{handle_value(v)}' for k, v in items if handle_value(v) is not None])
def prng_general(seed, multiplier, addend, modulus):
a = seed * multiplier + addend
if a < 0:
return ((a%modulus)-modulus)/modulus
else:
return a%modulus/modulus
def generate_challenge_seed(l):
I = deterministic_stringify(l)
d = parse.quote(I, safe='')
return simple_hash(d)
def simple_hash(s):
d = 0
for char in s:
@ -111,16 +132,8 @@ def simple_hash(s):
d -= 0x100000000 # Subtract 2**32
return d
def generate_challenge(obj):
deterministic_str = deterministic_stringify(obj)
encoded_str = parse.quote(deterministic_str, safe='')
c = simple_hash(encoded_str)
a = (9301 * c + 49297)
b = 233280
# If negativ, we need a special logic
if a < 0:
return ((a%b)-b)/b
else:
return a%b/b
def generate_challenge(obj, **kwargs):
return prng_general(
seed=generate_challenge_seed(obj),
**kwargs
)

@ -7,9 +7,9 @@ from aiohttp import ClientSession, FormData
from ..typing import AsyncGenerator, Messages, ImageType, Cookies
from .base_provider import AsyncGeneratorProvider
from .helper import get_connector, format_prompt
from ..providers.helper import get_connector, format_prompt
from ..image import to_bytes
from ..defaults import DEFAULT_HEADERS
from ..requests.defaults import DEFAULT_HEADERS
class You(AsyncGeneratorProvider):
url = "https://you.com"

@ -1,9 +1,10 @@
from __future__ import annotations
from ..base_provider import BaseProvider, ProviderType
from .retry_provider import RetryProvider
from .base_provider import AsyncProvider, AsyncGeneratorProvider
from .create_images import CreateImagesProvider
from ..providers.types import BaseProvider, ProviderType
from ..providers.retry_provider import RetryProvider
from ..providers.base_provider import AsyncProvider, AsyncGeneratorProvider
from ..providers.create_images import CreateImagesProvider
from .deprecated import *
from .selenium import *
from .needs_auth import *
@ -15,6 +16,7 @@ from .AItianhu import AItianhu
from .Aura import Aura
from .Bestim import Bestim
from .Bing import Bing
from .BingCreateImages import BingCreateImages
from .ChatAnywhere import ChatAnywhere
from .ChatBase import ChatBase
from .ChatForAi import ChatForAi
@ -53,8 +55,6 @@ from .Vercel import Vercel
from .Ylokh import Ylokh
from .You import You
from .BingCreateImages import BingCreateImages
import sys
__modules__: list = [

@ -1,281 +1,2 @@
from __future__ import annotations
import sys
import asyncio
from asyncio import AbstractEventLoop
from concurrent.futures import ThreadPoolExecutor
from abc import abstractmethod
from inspect import signature, Parameter
from .helper import get_cookies, format_prompt
from ..typing import CreateResult, AsyncResult, Messages, Union
from ..base_provider import BaseProvider
from ..errors import NestAsyncioError, ModelNotSupportedError
from .. import debug
if sys.version_info < (3, 10):
NoneType = type(None)
else:
from types import NoneType
# Set Windows event loop policy for better compatibility with asyncio and curl_cffi
if sys.platform == 'win32':
if isinstance(asyncio.get_event_loop_policy(), asyncio.WindowsProactorEventLoopPolicy):
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
def get_running_loop() -> Union[AbstractEventLoop, None]:
try:
loop = asyncio.get_running_loop()
if not hasattr(loop.__class__, "_nest_patched"):
raise NestAsyncioError(
'Use "create_async" instead of "create" function in a running event loop. Or use "nest_asyncio" package.'
)
return loop
except RuntimeError:
pass
class AbstractProvider(BaseProvider):
"""
Abstract class for providing asynchronous functionality to derived classes.
"""
@classmethod
async def create_async(
cls,
model: str,
messages: Messages,
*,
loop: AbstractEventLoop = None,
executor: ThreadPoolExecutor = None,
**kwargs
) -> str:
"""
Asynchronously creates a result based on the given model and messages.
Args:
cls (type): The class on which this method is called.
model (str): The model to use for creation.
messages (Messages): The messages to process.
loop (AbstractEventLoop, optional): The event loop to use. Defaults to None.
executor (ThreadPoolExecutor, optional): The executor for running async tasks. Defaults to None.
**kwargs: Additional keyword arguments.
Returns:
str: The created result as a string.
"""
loop = loop or asyncio.get_running_loop()
def create_func() -> str:
return "".join(cls.create_completion(model, messages, False, **kwargs))
return await asyncio.wait_for(
loop.run_in_executor(executor, create_func),
timeout=kwargs.get("timeout")
)
@classmethod
@property
def params(cls) -> str:
"""
Returns the parameters supported by the provider.
Args:
cls (type): The class on which this property is called.
Returns:
str: A string listing the supported parameters.
"""
sig = signature(
cls.create_async_generator if issubclass(cls, AsyncGeneratorProvider) else
cls.create_async if issubclass(cls, AsyncProvider) else
cls.create_completion
)
def get_type_name(annotation: type) -> str:
return annotation.__name__ if hasattr(annotation, "__name__") else str(annotation)
args = ""
for name, param in sig.parameters.items():
if name in ("self", "kwargs") or (name == "stream" and not cls.supports_stream):
continue
args += f"\n {name}"
args += f": {get_type_name(param.annotation)}" if param.annotation is not Parameter.empty else ""
args += f' = "{param.default}"' if param.default == "" else f" = {param.default}" if param.default is not Parameter.empty else ""
return f"g4f.Provider.{cls.__name__} supports: ({args}\n)"
class AsyncProvider(AbstractProvider):
"""
Provides asynchronous functionality for creating completions.
"""
@classmethod
def create_completion(
cls,
model: str,
messages: Messages,
stream: bool = False,
**kwargs
) -> CreateResult:
"""
Creates a completion result synchronously.
Args:
cls (type): The class on which this method is called.
model (str): The model to use for creation.
messages (Messages): The messages to process.
stream (bool): Indicates whether to stream the results. Defaults to False.
loop (AbstractEventLoop, optional): The event loop to use. Defaults to None.
**kwargs: Additional keyword arguments.
Returns:
CreateResult: The result of the completion creation.
"""
get_running_loop()
yield asyncio.run(cls.create_async(model, messages, **kwargs))
@staticmethod
@abstractmethod
async def create_async(
model: str,
messages: Messages,
**kwargs
) -> str:
"""
Abstract method for creating asynchronous results.
Args:
model (str): The model to use for creation.
messages (Messages): The messages to process.
**kwargs: Additional keyword arguments.
Raises:
NotImplementedError: If this method is not overridden in derived classes.
Returns:
str: The created result as a string.
"""
raise NotImplementedError()
class AsyncGeneratorProvider(AsyncProvider):
"""
Provides asynchronous generator functionality for streaming results.
"""
supports_stream = True
@classmethod
def create_completion(
cls,
model: str,
messages: Messages,
stream: bool = True,
**kwargs
) -> CreateResult:
"""
Creates a streaming completion result synchronously.
Args:
cls (type): The class on which this method is called.
model (str): The model to use for creation.
messages (Messages): The messages to process.
stream (bool): Indicates whether to stream the results. Defaults to True.
loop (AbstractEventLoop, optional): The event loop to use. Defaults to None.
**kwargs: Additional keyword arguments.
Returns:
CreateResult: The result of the streaming completion creation.
"""
loop = get_running_loop()
new_loop = False
if not loop:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
new_loop = True
generator = cls.create_async_generator(model, messages, stream=stream, **kwargs)
gen = generator.__aiter__()
# Fix for RuntimeError: async generator ignored GeneratorExit
async def await_callback(callback):
return await callback()
try:
while True:
yield loop.run_until_complete(await_callback(gen.__anext__))
except StopAsyncIteration:
...
# Fix for: ResourceWarning: unclosed event loop
finally:
if new_loop:
loop.close()
asyncio.set_event_loop(None)
@classmethod
async def create_async(
cls,
model: str,
messages: Messages,
**kwargs
) -> str:
"""
Asynchronously creates a result from a generator.
Args:
cls (type): The class on which this method is called.
model (str): The model to use for creation.
messages (Messages): The messages to process.
**kwargs: Additional keyword arguments.
Returns:
str: The created result as a string.
"""
return "".join([
chunk async for chunk in cls.create_async_generator(model, messages, stream=False, **kwargs)
if not isinstance(chunk, Exception)
])
@staticmethod
@abstractmethod
async def create_async_generator(
model: str,
messages: Messages,
stream: bool = True,
**kwargs
) -> AsyncResult:
"""
Abstract method for creating an asynchronous generator.
Args:
model (str): The model to use for creation.
messages (Messages): The messages to process.
stream (bool): Indicates whether to stream the results. Defaults to True.
**kwargs: Additional keyword arguments.
Raises:
NotImplementedError: If this method is not overridden in derived classes.
Returns:
AsyncResult: An asynchronous generator yielding results.
"""
raise NotImplementedError()
class ProviderModelMixin:
default_model: str
models: list[str] = []
model_aliases: dict[str, str] = {}
@classmethod
def get_models(cls) -> list[str]:
return cls.models
@classmethod
def get_model(cls, model: str) -> str:
if not model:
model = cls.default_model
elif model in cls.model_aliases:
model = cls.model_aliases[model]
elif model not in cls.get_models():
raise ModelNotSupportedError(f"Model is not supported: {model} in: {cls.__name__}")
debug.last_model = model
return model
from ..providers.base_provider import *
from .helper import get_cookies, format_prompt

@ -17,9 +17,9 @@ try:
except ImportError:
has_requirements = False
from ..create_images import CreateImagesProvider
from ...providers.create_images import CreateImagesProvider
from ..helper import get_connector
from ...base_provider import ProviderType
from ...providers.types import ProviderType
from ...errors import MissingRequirementsError
from ...webdriver import WebDriver, get_driver_cookies, get_browser

@ -1,62 +1,2 @@
from __future__ import annotations
import random
import secrets
import string
from aiohttp import BaseConnector
from ..typing import Messages, Optional
from ..errors import MissingRequirementsError
from ..cookies import get_cookies
def format_prompt(messages: Messages, add_special_tokens=False) -> str:
"""
Format a series of messages into a single string, optionally adding special tokens.
Args:
messages (Messages): A list of message dictionaries, each containing 'role' and 'content'.
add_special_tokens (bool): Whether to add special formatting tokens.
Returns:
str: A formatted string containing all messages.
"""
if not add_special_tokens and len(messages) <= 1:
return messages[0]["content"]
formatted = "\n".join([
f'{message["role"].capitalize()}: {message["content"]}'
for message in messages
])
return f"{formatted}\nAssistant:"
def get_random_string(length: int = 10) -> str:
"""
Generate a random string of specified length, containing lowercase letters and digits.
Args:
length (int, optional): Length of the random string to generate. Defaults to 10.
Returns:
str: A random string of the specified length.
"""
return ''.join(
random.choice(string.ascii_lowercase + string.digits)
for _ in range(length)
)
def get_random_hex() -> str:
"""
Generate a random hexadecimal string of a fixed length.
Returns:
str: A random hexadecimal string of 32 characters (16 bytes).
"""
return secrets.token_hex(16).zfill(32)
def get_connector(connector: BaseConnector = None, proxy: str = None) -> Optional[BaseConnector]:
if proxy and not connector:
try:
from aiohttp_socks import ProxyConnector
connector = ProxyConnector.from_url(proxy)
except ImportError:
raise MissingRequirementsError('Install "aiohttp_socks" package for proxy support')
return connector
from ..providers.helper import *
from ..cookies import get_cookies

@ -8,8 +8,8 @@ from .Provider import AsyncGeneratorProvider, ProviderUtils
from .typing import Messages, CreateResult, AsyncResult, Union
from .cookies import get_cookies, set_cookies
from . import debug, version
from .base_provider import BaseRetryProvider, ProviderType
from .Provider.base_provider import ProviderModelMixin
from .providers.types import BaseRetryProvider, ProviderType
from .providers.base_provider import ProviderModelMixin
def get_model_and_provider(model : Union[Model, str],
provider : Union[ProviderType, str, None],

@ -1,21 +1,27 @@
import ast
import logging
import time
import json
import random
import string
import uvicorn
import nest_asyncio
from fastapi import FastAPI, Response, Request
from fastapi.responses import StreamingResponse
from typing import List, Union, Any, Dict, AnyStr
#from ._tokenizer import tokenize
from fastapi.responses import StreamingResponse, RedirectResponse, HTMLResponse, JSONResponse
from pydantic import BaseModel
from typing import List, Union
import g4f
from .. import debug
debug.logging = True
import g4f.debug
from g4f.client import Client
from g4f.typing import Messages
class ChatCompletionsConfig(BaseModel):
messages: Messages
model: str
provider: Union[str, None]
stream: bool = False
temperature: Union[float, None]
max_tokens: int = None
stop: Union[list[str], str, None]
access_token: Union[str, None]
class Api:
def __init__(self, engine: g4f, debug: bool = True, sentry: bool = False,
@ -25,169 +31,82 @@ class Api:
self.sentry = sentry
self.list_ignored_providers = list_ignored_providers
self.app = FastAPI()
if debug:
g4f.debug.logging = True
self.client = Client()
nest_asyncio.apply()
self.app = FastAPI()
JSONObject = Dict[AnyStr, Any]
JSONArray = List[Any]
JSONStructure = Union[JSONArray, JSONObject]
self.routes()
def routes(self):
@self.app.get("/")
async def read_root():
return Response(content=json.dumps({"info": "g4f API"}, indent=4), media_type="application/json")
return RedirectResponse("/v1", 302)
@self.app.get("/v1")
async def read_root_v1():
return Response(content=json.dumps({"info": "Go to /v1/chat/completions or /v1/models."}, indent=4), media_type="application/json")
return HTMLResponse('g4f API: Go to '
'<a href="/v1/chat/completions">chat/completions</a> '
'or <a href="/v1/models">models</a>.')
@self.app.get("/v1/models")
async def models():
model_list = []
for model in g4f.Model.__all__():
model_info = (g4f.ModelUtils.convert[model])
model_list.append({
'id': model,
model_list = dict(
(model, g4f.ModelUtils.convert[model])
for model in g4f.Model.__all__()
)
model_list = [{
'id': model_id,
'object': 'model',
'created': 0,
'owned_by': model_info.base_provider}
)
return Response(content=json.dumps({
'object': 'list',
'data': model_list}, indent=4), media_type="application/json")
'owned_by': model.base_provider
} for model_id, model in model_list.items()]
return JSONResponse(model_list)
@self.app.get("/v1/models/{model_name}")
async def model_info(model_name: str):
try:
model_info = (g4f.ModelUtils.convert[model_name])
return Response(content=json.dumps({
model_info = g4f.ModelUtils.convert[model_name]
return JSONResponse({
'id': model_name,
'object': 'model',
'created': 0,
'owned_by': model_info.base_provider
}, indent=4), media_type="application/json")
})
except:
return Response(content=json.dumps({"error": "The model does not exist."}, indent=4), media_type="application/json")
return JSONResponse({"error": "The model does not exist."})
@self.app.post("/v1/chat/completions")
async def chat_completions(request: Request, item: JSONStructure = None):
item_data = {
'model': 'gpt-3.5-turbo',
'stream': False,
}
# item contains byte keys, and dict.get suppresses error
item_data.update({
key.decode('utf-8') if isinstance(key, bytes) else key: str(value)
for key, value in (item or {}).items()
})
# messages is str, need dict
if isinstance(item_data.get('messages'), str):
item_data['messages'] = ast.literal_eval(item_data.get('messages'))
model = item_data.get('model')
stream = True if item_data.get("stream") == "True" else False
messages = item_data.get('messages')
provider = item_data.get('provider', '').replace('g4f.Provider.', '')
provider = provider if provider and provider != "Auto" else None
temperature = item_data.get('temperature')
async def chat_completions(config: ChatCompletionsConfig = None, request: Request = None, provider: str = None):
try:
response = g4f.ChatCompletion.create(
model=model,
stream=stream,
messages=messages,
temperature = temperature,
provider = provider,
config.provider = provider if config.provider is None else config.provider
if config.access_token is None and request is not None:
auth_header = request.headers.get("Authorization")
if auth_header is not None:
config.access_token = auth_header.split(None, 1)[-1]
response = self.client.chat.completions.create(
**dict(config),
ignored=self.list_ignored_providers
)
except Exception as e:
logging.exception(e)
content = json.dumps({
"error": {"message": f"An error occurred while generating the response:\n{e}"},
"model": model,
"provider": g4f.get_last_provider(True)
})
return Response(content=content, status_code=500, media_type="application/json")
completion_id = ''.join(random.choices(string.ascii_letters + string.digits, k=28))
completion_timestamp = int(time.time())
if not stream:
#prompt_tokens, _ = tokenize(''.join([message['content'] for message in messages]))
#completion_tokens, _ = tokenize(response)
json_data = {
'id': f'chatcmpl-{completion_id}',
'object': 'chat.completion',
'created': completion_timestamp,
'model': model,
'provider': g4f.get_last_provider(True),
'choices': [
{
'index': 0,
'message': {
'role': 'assistant',
'content': response,
},
'finish_reason': 'stop',
}
],
'usage': {
'prompt_tokens': 0, #prompt_tokens,
'completion_tokens': 0, #completion_tokens,
'total_tokens': 0, #prompt_tokens + completion_tokens,
},
}
return Response(content=json.dumps(json_data, indent=4), media_type="application/json")
return Response(content=format_exception(e, config), status_code=500, media_type="application/json")
if not config.stream:
return JSONResponse(response.to_json())
def streaming():
try:
for chunk in response:
completion_data = {
'id': f'chatcmpl-{completion_id}',
'object': 'chat.completion.chunk',
'created': completion_timestamp,
'model': model,
'provider': g4f.get_last_provider(True),
'choices': [
{
'index': 0,
'delta': {
'role': 'assistant',
'content': chunk,
},
'finish_reason': None,
}
],
}
yield f'data: {json.dumps(completion_data)}\n\n'
time.sleep(0.03)
end_completion_data = {
'id': f'chatcmpl-{completion_id}',
'object': 'chat.completion.chunk',
'created': completion_timestamp,
'model': model,
'provider': g4f.get_last_provider(True),
'choices': [
{
'index': 0,
'delta': {},
'finish_reason': 'stop',
}
],
}
yield f'data: {json.dumps(end_completion_data)}\n\n'
yield f"data: {json.dumps(chunk.to_json())}\n\n"
except GeneratorExit:
pass
except Exception as e:
logging.exception(e)
content = json.dumps({
"error": {"message": f"An error occurred while generating the response:\n{e}"},
"model": model,
"provider": g4f.get_last_provider(True),
})
yield f'data: {content}'
yield f'data: {format_exception(e, config)}'
return StreamingResponse(streaming(), media_type="text/event-stream")
@ -198,3 +117,11 @@ class Api:
def run(self, ip):
split_ip = ip.split(":")
uvicorn.run(app=self.app, host=split_ip[0], port=int(split_ip[1]), use_colors=False)
def format_exception(e: Exception, config: ChatCompletionsConfig) -> str:
last_provider = g4f.get_last_provider(True)
return json.dumps({
"error": {"message": f"ChatCompletionsError: {e.__class__.__name__}: {e}"},
"model": last_provider.get("model") if last_provider else config.model,
"provider": last_provider.get("name") if last_provider else config.provider
})

@ -1,17 +1,23 @@
from __future__ import annotations
import re
import os
import time
import random
import string
from .stubs import ChatCompletion, ChatCompletionChunk, Image, ImagesResponse
from .typing import Union, Generator, Messages, ImageType
from .base_provider import BaseProvider, ProviderType
from .providers.types import BaseProvider, ProviderType
from .image import ImageResponse as ImageProviderResponse
from .Provider import BingCreateImages, Gemini, OpenaiChat
from .Provider.BingCreateImages import BingCreateImages
from .Provider.needs_auth import Gemini, OpenaiChat
from .errors import NoImageResponseError
from . import get_model_and_provider
from . import get_model_and_provider, get_last_provider
ImageProvider = Union[BaseProvider, object]
Proxies = Union[dict, str]
IterResponse = Generator[Union[ChatCompletion, ChatCompletionChunk], None, None]
def read_json(text: str) -> dict:
"""
@ -29,21 +35,19 @@ def read_json(text: str) -> dict:
return text
def iter_response(
response: iter,
response: iter[str],
stream: bool,
response_format: dict = None,
max_tokens: int = None,
stop: list = None
) -> Generator:
) -> IterResponse:
content = ""
finish_reason = None
last_chunk = None
completion_id = ''.join(random.choices(string.ascii_letters + string.digits, k=28))
for idx, chunk in enumerate(response):
if last_chunk is not None:
yield ChatCompletionChunk(last_chunk, finish_reason)
content += str(chunk)
if max_tokens is not None and idx + 1 >= max_tokens:
finish_reason = "max_tokens"
finish_reason = "length"
first = -1
word = None
if stop is not None:
@ -61,16 +65,25 @@ def iter_response(
if first != -1:
finish_reason = "stop"
if stream:
last_chunk = chunk
yield ChatCompletionChunk(chunk, None, completion_id, int(time.time()))
if finish_reason is not None:
break
if last_chunk is not None:
yield ChatCompletionChunk(last_chunk, finish_reason)
if not stream:
finish_reason = "stop" if finish_reason is None else finish_reason
if stream:
yield ChatCompletionChunk(None, finish_reason, completion_id, int(time.time()))
else:
if response_format is not None and "type" in response_format:
if response_format["type"] == "json_object":
response = read_json(response)
yield ChatCompletion(content, finish_reason)
content = read_json(content)
yield ChatCompletion(content, finish_reason, completion_id, int(time.time()))
def iter_append_model_and_provider(response: IterResponse) -> IterResponse:
last_provider = None
for chunk in response:
last_provider = get_last_provider(True) if last_provider is None else last_provider
chunk.model = last_provider.get("model")
chunk.provider = last_provider.get("name")
yield chunk
class Client():
proxies: Proxies = None
@ -89,13 +102,14 @@ class Client():
self.proxies: Proxies = proxies
def get_proxy(self) -> Union[str, None]:
if isinstance(self.proxies, str) or self.proxies is None:
if isinstance(self.proxies, str):
return self.proxies
elif self.proxies is None:
return os.environ.get("G4F_PROXY")
elif "all" in self.proxies:
return self.proxies["all"]
elif "https" in self.proxies:
return self.proxies["https"]
return None
class Completions():
def __init__(self, client: Client, provider: ProviderType = None):
@ -110,7 +124,7 @@ class Completions():
stream: bool = False,
response_format: dict = None,
max_tokens: int = None,
stop: Union[list. str] = None,
stop: Union[list[str], str] = None,
**kwargs
) -> Union[ChatCompletion, Generator[ChatCompletionChunk]]:
if max_tokens is not None:
@ -123,9 +137,9 @@ class Completions():
stream,
**kwargs
)
response = provider.create_completion(model, messages, stream=stream, **kwargs)
response = provider.create_completion(model, messages, stream=stream, proxy=self.client.get_proxy(), **kwargs)
stop = [stop] if isinstance(stop, str) else stop
response = iter_response(response, stream, response_format, max_tokens, stop)
response = iter_append_model_and_provider(iter_response(response, stream, response_format, max_tokens, stop))
return response if stream else next(response)
class Chat():

@ -1,4 +1,4 @@
from .base_provider import ProviderType
from .providers.types import ProviderType
logging: bool = False
version_check: bool = True

@ -8,8 +8,6 @@ from g4f.image import is_allowed_extension, to_image
from g4f.errors import VersionNotFoundError
from g4f.Provider import __providers__
from g4f.Provider.bing.create_images import patch_provider
from .internet import get_search_message
class Backend_Api:
"""
@ -97,7 +95,7 @@ class Backend_Api:
current_version = None
return {
"version": current_version,
"latest_version": version.get_latest_version(),
"latest_version": version.utils.latest_version,
}
def generate_title(self):
@ -157,6 +155,8 @@ class Backend_Api:
if provider == "Bing":
kwargs['web_search'] = True
else:
# ResourceWarning: unclosed event loop
from .internet import get_search_message
messages[-1]["content"] = get_search_message(messages[-1]["content"])
model = json_data.get('model')

@ -2,7 +2,7 @@ from __future__ import annotations
from aiohttp import ClientSession, ClientTimeout
try:
from duckduckgo_search import DDGS
from duckduckgo_search.duckduckgo_search_async import AsyncDDGS
from bs4 import BeautifulSoup
has_requirements = True
except ImportError:
@ -30,7 +30,10 @@ class SearchResults():
search += result.snippet
search += f"\n\nSource: [[{idx}]]({result.url})"
return search
def __len__(self) -> int:
return len(self.results)
class SearchResultEntry():
def __init__(self, title: str, url: str, snippet: str, text: str = None):
self.title = title
@ -96,21 +99,20 @@ async def fetch_and_scrape(session: ClientSession, url: str, max_words: int = No
async def search(query: str, n_results: int = 5, max_words: int = 2500, add_text: bool = True) -> SearchResults:
if not has_requirements:
raise MissingRequirementsError('Install "duckduckgo-search" and "beautifulsoup4" package')
with DDGS() as ddgs:
async with AsyncDDGS() as ddgs:
results = []
for result in ddgs.text(
async for result in ddgs.text(
query,
region="wt-wt",
safesearch="moderate",
timelimit="y",
max_results=n_results
):
results.append(SearchResultEntry(
result["title"],
result["href"],
result["body"]
))
if len(results) >= n_results:
break
if add_text:
requests = []
@ -136,7 +138,6 @@ async def search(query: str, n_results: int = 5, max_words: int = 2500, add_text
return SearchResults(formatted_results)
def get_search_message(prompt) -> str:
try:
search_results = asyncio.run(search(prompt))
@ -146,7 +147,6 @@ def get_search_message(prompt) -> str:
Instruction: Using the provided web search results, to write a comprehensive reply to the user request.
Make sure to add the sources of cites using [[Number]](Url) notation after the reference. Example: [[0]](http://google.com)
If the provided search results refer to multiple subjects with the same name, write separate answers for each subject.
User request:
{prompt}
@ -154,4 +154,4 @@ User request:
return message
except Exception as e:
print("Couldn't do web search:", e)
return prompt
return prompt

@ -11,7 +11,7 @@ try:
has_requirements = True
except ImportError:
has_requirements = False
from .errors import MissingRequirementsError
ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg', 'gif', 'webp', 'svg'}
@ -28,9 +28,11 @@ def to_image(image: ImageType, is_svg: bool = False) -> Image:
"""
if not has_requirements:
raise MissingRequirementsError('Install "pillow" package for images')
if isinstance(image, str):
is_data_uri_an_image(image)
image = extract_data_uri(image)
if is_svg:
try:
import cairosvg
@ -41,6 +43,7 @@ def to_image(image: ImageType, is_svg: bool = False) -> Image:
buffer = BytesIO()
cairosvg.svg2png(image, write_to=buffer)
return open_image(buffer)
if isinstance(image, bytes):
is_accepted_format(image)
return open_image(BytesIO(image))
@ -48,6 +51,7 @@ def to_image(image: ImageType, is_svg: bool = False) -> Image:
image = open_image(image)
image.load()
return image
return image
def is_allowed_extension(filename: str) -> bool:
@ -200,17 +204,16 @@ def format_images_markdown(images: Union[str, list], alt: str, preview: Union[st
str: The formatted markdown string.
"""
if isinstance(images, str):
images = f"[![{alt}]({preview.replace('{image}', images) if preview else images})]({images})"
result = f"[![{alt}]({preview.replace('{image}', images) if preview else images})]({images})"
else:
if not isinstance(preview, list):
preview = [preview.replace('{image}', image) if preview else image for image in images]
images = [
result = "\n".join(
f"[![#{idx+1} {alt}]({preview[idx]})]({image})" for idx, image in enumerate(images)
]
images = "\n".join(images)
)
start_flag = "<!-- generated images start -->\n"
end_flag = "<!-- generated images end -->\n"
return f"\n{start_flag}{images}\n{end_flag}\n"
return f"\n{start_flag}{result}\n{end_flag}\n"
def to_bytes(image: ImageType) -> bytes:
"""
@ -245,19 +248,19 @@ class ImageResponse:
self.images = images
self.alt = alt
self.options = options
def __str__(self) -> str:
return format_images_markdown(self.images, self.alt, self.get("preview"))
def get(self, key: str):
return self.options.get(key)
class ImageRequest:
def __init__(
self,
options: dict = {}
):
self.options = options
def get(self, key: str):
return self.options.get(key)

@ -1,5 +1,7 @@
from __future__ import annotations
from dataclasses import dataclass
from .Provider import RetryProvider, ProviderType
from .Provider import (
Chatgpt4Online,

@ -0,0 +1,280 @@
from __future__ import annotations
import sys
import asyncio
from asyncio import AbstractEventLoop
from concurrent.futures import ThreadPoolExecutor
from abc import abstractmethod
from inspect import signature, Parameter
from ..typing import CreateResult, AsyncResult, Messages, Union
from .types import BaseProvider
from ..errors import NestAsyncioError, ModelNotSupportedError
from .. import debug
if sys.version_info < (3, 10):
NoneType = type(None)
else:
from types import NoneType
# Set Windows event loop policy for better compatibility with asyncio and curl_cffi
if sys.platform == 'win32':
if isinstance(asyncio.get_event_loop_policy(), asyncio.WindowsProactorEventLoopPolicy):
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
def get_running_loop() -> Union[AbstractEventLoop, None]:
try:
loop = asyncio.get_running_loop()
if not hasattr(loop.__class__, "_nest_patched"):
raise NestAsyncioError(
'Use "create_async" instead of "create" function in a running event loop. Or use "nest_asyncio" package.'
)
return loop
except RuntimeError:
pass
class AbstractProvider(BaseProvider):
"""
Abstract class for providing asynchronous functionality to derived classes.
"""
@classmethod
async def create_async(
cls,
model: str,
messages: Messages,
*,
loop: AbstractEventLoop = None,
executor: ThreadPoolExecutor = None,
**kwargs
) -> str:
"""
Asynchronously creates a result based on the given model and messages.
Args:
cls (type): The class on which this method is called.
model (str): The model to use for creation.
messages (Messages): The messages to process.
loop (AbstractEventLoop, optional): The event loop to use. Defaults to None.
executor (ThreadPoolExecutor, optional): The executor for running async tasks. Defaults to None.
**kwargs: Additional keyword arguments.
Returns:
str: The created result as a string.
"""
loop = loop or asyncio.get_running_loop()
def create_func() -> str:
return "".join(cls.create_completion(model, messages, False, **kwargs))
return await asyncio.wait_for(
loop.run_in_executor(executor, create_func),
timeout=kwargs.get("timeout")
)
@classmethod
@property
def params(cls) -> str:
"""
Returns the parameters supported by the provider.
Args:
cls (type): The class on which this property is called.
Returns:
str: A string listing the supported parameters.
"""
sig = signature(
cls.create_async_generator if issubclass(cls, AsyncGeneratorProvider) else
cls.create_async if issubclass(cls, AsyncProvider) else
cls.create_completion
)
def get_type_name(annotation: type) -> str:
return annotation.__name__ if hasattr(annotation, "__name__") else str(annotation)
args = ""
for name, param in sig.parameters.items():
if name in ("self", "kwargs") or (name == "stream" and not cls.supports_stream):
continue
args += f"\n {name}"
args += f": {get_type_name(param.annotation)}" if param.annotation is not Parameter.empty else ""
args += f' = "{param.default}"' if param.default == "" else f" = {param.default}" if param.default is not Parameter.empty else ""
return f"g4f.Provider.{cls.__name__} supports: ({args}\n)"
class AsyncProvider(AbstractProvider):
"""
Provides asynchronous functionality for creating completions.
"""
@classmethod
def create_completion(
cls,
model: str,
messages: Messages,
stream: bool = False,
**kwargs
) -> CreateResult:
"""
Creates a completion result synchronously.
Args:
cls (type): The class on which this method is called.
model (str): The model to use for creation.
messages (Messages): The messages to process.
stream (bool): Indicates whether to stream the results. Defaults to False.
loop (AbstractEventLoop, optional): The event loop to use. Defaults to None.
**kwargs: Additional keyword arguments.
Returns:
CreateResult: The result of the completion creation.
"""
get_running_loop()
yield asyncio.run(cls.create_async(model, messages, **kwargs))
@staticmethod
@abstractmethod
async def create_async(
model: str,
messages: Messages,
**kwargs
) -> str:
"""
Abstract method for creating asynchronous results.
Args:
model (str): The model to use for creation.
messages (Messages): The messages to process.
**kwargs: Additional keyword arguments.
Raises:
NotImplementedError: If this method is not overridden in derived classes.
Returns:
str: The created result as a string.
"""
raise NotImplementedError()
class AsyncGeneratorProvider(AsyncProvider):
"""
Provides asynchronous generator functionality for streaming results.
"""
supports_stream = True
@classmethod
def create_completion(
cls,
model: str,
messages: Messages,
stream: bool = True,
**kwargs
) -> CreateResult:
"""
Creates a streaming completion result synchronously.
Args:
cls (type): The class on which this method is called.
model (str): The model to use for creation.
messages (Messages): The messages to process.
stream (bool): Indicates whether to stream the results. Defaults to True.
loop (AbstractEventLoop, optional): The event loop to use. Defaults to None.
**kwargs: Additional keyword arguments.
Returns:
CreateResult: The result of the streaming completion creation.
"""
loop = get_running_loop()
new_loop = False
if not loop:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
new_loop = True
generator = cls.create_async_generator(model, messages, stream=stream, **kwargs)
gen = generator.__aiter__()
# Fix for RuntimeError: async generator ignored GeneratorExit
async def await_callback(callback):
return await callback()
try:
while True:
yield loop.run_until_complete(await_callback(gen.__anext__))
except StopAsyncIteration:
...
# Fix for: ResourceWarning: unclosed event loop
finally:
if new_loop:
loop.close()
asyncio.set_event_loop(None)
@classmethod
async def create_async(
cls,
model: str,
messages: Messages,
**kwargs
) -> str:
"""
Asynchronously creates a result from a generator.
Args:
cls (type): The class on which this method is called.
model (str): The model to use for creation.
messages (Messages): The messages to process.
**kwargs: Additional keyword arguments.
Returns:
str: The created result as a string.
"""
return "".join([
chunk async for chunk in cls.create_async_generator(model, messages, stream=False, **kwargs)
if not isinstance(chunk, Exception)
])
@staticmethod
@abstractmethod
async def create_async_generator(
model: str,
messages: Messages,
stream: bool = True,
**kwargs
) -> AsyncResult:
"""
Abstract method for creating an asynchronous generator.
Args:
model (str): The model to use for creation.
messages (Messages): The messages to process.
stream (bool): Indicates whether to stream the results. Defaults to True.
**kwargs: Additional keyword arguments.
Raises:
NotImplementedError: If this method is not overridden in derived classes.
Returns:
AsyncResult: An asynchronous generator yielding results.
"""
raise NotImplementedError()
class ProviderModelMixin:
default_model: str
models: list[str] = []
model_aliases: dict[str, str] = {}
@classmethod
def get_models(cls) -> list[str]:
return cls.models
@classmethod
def get_model(cls, model: str) -> str:
if not model:
model = cls.default_model
elif model in cls.model_aliases:
model = cls.model_aliases[model]
elif model not in cls.get_models():
raise ModelNotSupportedError(f"Model is not supported: {model} in: {cls.__name__}")
debug.last_model = model
return model

@ -2,9 +2,10 @@ from __future__ import annotations
import re
import asyncio
from .. import debug
from ..typing import CreateResult, Messages
from ..base_provider import BaseProvider, ProviderType
from .types import BaseProvider, ProviderType
system_message = """
You can generate images, pictures, photos or img with the DALL-E 3 image generator.

@ -0,0 +1,61 @@
from __future__ import annotations
import random
import secrets
import string
from aiohttp import BaseConnector
from ..typing import Messages, Optional
from ..errors import MissingRequirementsError
def format_prompt(messages: Messages, add_special_tokens=False) -> str:
"""
Format a series of messages into a single string, optionally adding special tokens.
Args:
messages (Messages): A list of message dictionaries, each containing 'role' and 'content'.
add_special_tokens (bool): Whether to add special formatting tokens.
Returns:
str: A formatted string containing all messages.
"""
if not add_special_tokens and len(messages) <= 1:
return messages[0]["content"]
formatted = "\n".join([
f'{message["role"].capitalize()}: {message["content"]}'
for message in messages
])
return f"{formatted}\nAssistant:"
def get_random_string(length: int = 10) -> str:
"""
Generate a random string of specified length, containing lowercase letters and digits.
Args:
length (int, optional): Length of the random string to generate. Defaults to 10.
Returns:
str: A random string of the specified length.
"""
return ''.join(
random.choice(string.ascii_lowercase + string.digits)
for _ in range(length)
)
def get_random_hex() -> str:
"""
Generate a random hexadecimal string of a fixed length.
Returns:
str: A random hexadecimal string of 32 characters (16 bytes).
"""
return secrets.token_hex(16).zfill(32)
def get_connector(connector: BaseConnector = None, proxy: str = None) -> Optional[BaseConnector]:
if proxy and not connector:
try:
from aiohttp_socks import ProxyConnector
connector = ProxyConnector.from_url(proxy)
except ImportError:
raise MissingRequirementsError('Install "aiohttp_socks" package for proxy support')
return connector

@ -2,8 +2,9 @@ from __future__ import annotations
import asyncio
import random
from ..typing import CreateResult, Messages
from ..base_provider import BaseRetryProvider
from .types import BaseRetryProvider
from .. import debug
from ..errors import RetryProviderError, RetryNoProviderError

@ -2,7 +2,7 @@ from __future__ import annotations
from abc import ABC, abstractmethod
from typing import Union, List, Dict, Type
from .typing import Messages, CreateResult
from ..typing import Messages, CreateResult
class BaseProvider(ABC):
"""
@ -81,7 +81,7 @@ class BaseProvider(ABC):
Dict[str, str]: A dictionary with provider's details.
"""
return {'name': cls.__name__, 'url': cls.url}
class BaseRetryProvider(BaseProvider):
"""
Base class for a provider that implements retry logic.
@ -113,5 +113,5 @@ class BaseRetryProvider(BaseProvider):
self.working = True
self.exceptions: Dict[str, Exception] = {}
self.last_provider: Type[BaseProvider] = None
ProviderType = Union[Type[BaseProvider], BaseRetryProvider]

@ -4,15 +4,15 @@ from urllib.parse import urlparse
try:
from curl_cffi.requests import Session
from .requests_curl_cffi import StreamResponse, StreamSession
from .curl_cffi import StreamResponse, StreamSession
has_curl_cffi = True
except ImportError:
from typing import Type as Session
from .requests_aiohttp import StreamResponse, StreamSession
from .aiohttp import StreamResponse, StreamSession
has_curl_cffi = False
from .webdriver import WebDriver, WebDriverSession, bypass_cloudflare, get_driver_cookies
from .errors import MissingRequirementsError
from ..webdriver import WebDriver, WebDriverSession, bypass_cloudflare, get_driver_cookies
from ..errors import MissingRequirementsError
from .defaults import DEFAULT_HEADERS
def get_args_from_browser(url: str, webdriver: WebDriver = None, proxy: str = None, timeout: int = 120) -> dict:

@ -3,7 +3,7 @@ from __future__ import annotations
from aiohttp import ClientSession, ClientResponse, ClientTimeout
from typing import AsyncGenerator, Any
from .Provider.helper import get_connector
from ..providers.helper import get_connector
from .defaults import DEFAULT_HEADERS
class StreamResponse(ClientResponse):

@ -44,7 +44,7 @@ class StreamResponse:
inner: Response = await self.inner
self.inner = inner
self.request = inner.request
self.status_code: int = inner.status_code
self.status: int = inner.status_code
self.reason: str = inner.reason
self.ok: bool = inner.ok
self.headers = inner.headers

@ -1,35 +1,98 @@
from __future__ import annotations
from typing import Union
class Model():
def __getitem__(self, item):
return getattr(self, item)
...
class ChatCompletion(Model):
def __init__(self, content: str, finish_reason: str):
self.choices = [ChatCompletionChoice(ChatCompletionMessage(content, finish_reason))]
def __init__(
self,
content: str,
finish_reason: str,
completion_id: str = None,
created: int = None
):
self.id: str = f"chatcmpl-{completion_id}" if completion_id else None
self.object: str = "chat.completion"
self.created: int = created
self.model: str = None
self.provider: str = None
self.choices = [ChatCompletionChoice(ChatCompletionMessage(content), finish_reason)]
self.usage: dict[str, int] = {
"prompt_tokens": 0, #prompt_tokens,
"completion_tokens": 0, #completion_tokens,
"total_tokens": 0, #prompt_tokens + completion_tokens,
}
def to_json(self):
return {
**self.__dict__,
"choices": [choice.to_json() for choice in self.choices]
}
class ChatCompletionChunk(Model):
def __init__(self, content: str, finish_reason: str):
self.choices = [ChatCompletionDeltaChoice(ChatCompletionDelta(content, finish_reason))]
def __init__(
self,
content: str,
finish_reason: str,
completion_id: str = None,
created: int = None
):
self.id: str = f"chatcmpl-{completion_id}" if completion_id else None
self.object: str = "chat.completion.chunk"
self.created: int = created
self.model: str = None
self.provider: str = None
self.choices = [ChatCompletionDeltaChoice(ChatCompletionDelta(content), finish_reason)]
def to_json(self):
return {
**self.__dict__,
"choices": [choice.to_json() for choice in self.choices]
}
class ChatCompletionMessage(Model):
def __init__(self, content: str, finish_reason: str):
def __init__(self, content: Union[str, None]):
self.role = "assistant"
self.content = content
self.finish_reason = finish_reason
def to_json(self):
return self.__dict__
class ChatCompletionChoice(Model):
def __init__(self, message: ChatCompletionMessage):
def __init__(self, message: ChatCompletionMessage, finish_reason: str):
self.index = 0
self.message = message
self.finish_reason = finish_reason
def to_json(self):
return {
**self.__dict__,
"message": self.message.to_json()
}
class ChatCompletionDelta(Model):
def __init__(self, content: str, finish_reason: str):
self.content = content
self.finish_reason = finish_reason
content: Union[str, None] = None
def __init__(self, content: Union[str, None]):
if content is not None:
self.content = content
def to_json(self):
return self.__dict__
class ChatCompletionDeltaChoice(Model):
def __init__(self, delta: ChatCompletionDelta):
def __init__(self, delta: ChatCompletionDelta, finish_reason: Union[str, None]):
self.delta = delta
self.finish_reason = finish_reason
def to_json(self):
return {
**self.__dict__,
"delta": self.delta.to_json()
}
class Image(Model):
url: str

@ -7,6 +7,9 @@ from importlib.metadata import version as get_package_version, PackageNotFoundEr
from subprocess import check_output, CalledProcessError, PIPE
from .errors import VersionNotFoundError
PACKAGE_NAME = "g4f"
GITHUB_REPOSITORY = "xtekky/gpt4free"
def get_pypi_version(package_name: str) -> str:
"""
Retrieves the latest version of a package from PyPI.
@ -45,25 +48,6 @@ def get_github_version(repo: str) -> str:
except requests.RequestException as e:
raise VersionNotFoundError(f"Failed to get GitHub release version: {e}")
def get_latest_version() -> str:
"""
Retrieves the latest release version of the 'g4f' package from PyPI or GitHub.
Returns:
str: The latest release version of 'g4f'.
Note:
The function first tries to fetch the version from PyPI. If the package is not found,
it retrieves the version from the GitHub repository.
"""
try:
# Is installed via package manager?
get_package_version("g4f")
return get_pypi_version("g4f")
except PackageNotFoundError:
# Else use Github version:
return get_github_version("xtekky/gpt4free")
class VersionUtils:
"""
Utility class for managing and comparing package versions of 'g4f'.
@ -82,7 +66,7 @@ class VersionUtils:
"""
# Read from package manager
try:
return get_package_version("g4f")
return get_package_version(PACKAGE_NAME)
except PackageNotFoundError:
pass
@ -108,7 +92,12 @@ class VersionUtils:
Returns:
str: The latest version of 'g4f'.
"""
return get_latest_version()
# Is installed via package manager?
try:
get_package_version(PACKAGE_NAME)
except PackageNotFoundError:
return get_github_version(GITHUB_REPOSITORY)
return get_pypi_version(PACKAGE_NAME)
def check_version(self) -> None:
"""

@ -1,11 +1,11 @@
requests
pycryptodome
curl_cffi>=0.5.10
curl_cffi>=0.6.0b9
aiohttp
certifi
browser_cookie3
PyExecJS
duckduckgo-search
duckduckgo-search>=4.4.3
nest_asyncio
werkzeug
loguru

@ -21,7 +21,7 @@ EXTRA_REQUIRE = {
"py-arkose-generator", # openai
"browser_cookie3", # get_cookies
"PyExecJS", # GptForLove
"duckduckgo-search", # internet.search
"duckduckgo-search>=4.4.3",# internet.search
"beautifulsoup4", # internet.search and bing.create_images
"brotli", # openai
"platformdirs", # webdriver
@ -56,7 +56,7 @@ EXTRA_REQUIRE = {
"gui": [
"werkzeug", "flask",
"beautifulsoup4", "pillow",
"duckduckgo-search",
"duckduckgo-search>=4.4.3",
"browser_cookie3"
]
}

Loading…
Cancel
Save