New minimum requirements (#1515)

* New minimum requirements
* Add ConversationStyleOptionSets to Bing
* Add image.ImageRequest
* Improve python version support
* Improve unittests
pull/1516/head 0.2.0.6
H Lohaus 5 months ago committed by GitHub
parent 71d71b6512
commit feb83c168b
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

@ -15,10 +15,19 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Set up Python
- name: Set up Python 3.8
uses: actions/setup-python@v4
with:
python-version: "3.x"
python-version: "3.8"
cache: 'pip'
- name: Install min requirements
run: pip install -r requirements-min.txt
- name: Run tests
run: python -m etc.unittest
- name: Set up Python 3.11
uses: actions/setup-python@v4
with:
python-version: "3.11"
cache: 'pip'
- name: Install requirements
run: pip install -r requirements.txt

@ -100,7 +100,7 @@ or set the api base in your client to: [http://localhost:1337/v1](http://localho
##### Install using pypi:
```
pip install -U g4f
pip install -U "g4f[all]"
```
##### or:
@ -134,13 +134,19 @@ python3 -m venv venv
```
source venv/bin/activate
```
5. Install the required Python packages from `requirements.txt`:
5. Install minimum requirements:
```
pip install -r requirements-min.txt
```
6. Or install all used Python packages from `requirements.txt`:
```
pip install -r requirements.txt
```
6. Create a `test.py` file in the root folder and start using the repo, further Instructions are below
7. Create a `test.py` file in the root folder and start using the repo, further Instructions are below
```py
import g4f

@ -1,6 +1,10 @@
from .include import DEFAULT_MESSAGES
import asyncio
import nest_asyncio
try:
import nest_asyncio
has_nest_asyncio = True
except:
has_nest_asyncio = False
import unittest
import g4f
from g4f import ChatCompletion
@ -39,6 +43,8 @@ class TestChatCompletionAsync(unittest.IsolatedAsyncioTestCase):
class TestChatCompletionNestAsync(unittest.IsolatedAsyncioTestCase):
def setUp(self) -> None:
if not has_nest_asyncio:
self.skipTest('"nest_asyncio" not installed')
nest_asyncio.apply()
async def test_create(self):

@ -3,11 +3,17 @@ import unittest
from unittest.mock import MagicMock
from .mocks import ProviderMock
import g4f
from g4f.gui.server.backend import Backend_Api, get_error_message
try:
from g4f.gui.server.backend import Backend_Api, get_error_message
has_requirements = True
except:
has_requirements = False
class TestBackendApi(unittest.TestCase):
def setUp(self):
if not has_requirements:
self.skipTest('"flask" not installed')
self.app = MagicMock()
self.api = Backend_Api(self.app)
@ -28,6 +34,10 @@ class TestBackendApi(unittest.TestCase):
class TestUtilityFunctions(unittest.TestCase):
def setUp(self):
if not has_requirements:
self.skipTest('"flask" not installed')
def test_get_error_message(self):
g4f.debug.last_provider = ProviderMock
exception = Exception("Message")

@ -9,7 +9,7 @@ from urllib import parse
from aiohttp import ClientSession, ClientTimeout, BaseConnector
from ..typing import AsyncResult, Messages, ImageType
from ..image import ImageResponse
from ..image import ImageResponse, ImageRequest
from .base_provider import AsyncGeneratorProvider
from .helper import get_connector
from .bing.upload_image import upload_image
@ -154,6 +154,11 @@ class Defaults:
'SRCHHPGUSR' : f'HV={int(time.time())}',
}
class ConversationStyleOptionSets():
CREATIVE = ["h3imaginative", "clgalileo", "gencontentv3"]
BALANCED = ["galileo"]
PRECISE = ["h3precise", "clgalileo"]
def format_message(msg: dict) -> str:
"""
Formats a message dictionary into a JSON string with a delimiter.
@ -168,7 +173,7 @@ def create_message(
prompt: str,
tone: str,
context: str = None,
image_response: ImageResponse = None,
image_request: ImageRequest = None,
web_search: bool = False,
gpt4_turbo: bool = False
) -> str:
@ -179,7 +184,7 @@ def create_message(
:param prompt: The user's input prompt.
:param tone: The desired tone for the response.
:param context: Additional context for the prompt.
:param image_response: The response if an image is involved.
:param image_request: The image request with the url.
:param web_search: Flag to enable web search.
:param gpt4_turbo: Flag to enable GPT-4 Turbo.
:return: A formatted string message for the Bing API.
@ -187,11 +192,11 @@ def create_message(
options_sets = Defaults.optionsSets
# Append tone-specific options
if tone == Tones.creative:
options_sets.append("h3imaginative")
options_sets.extend(ConversationStyleOptionSets.CREATIVE)
elif tone == Tones.precise:
options_sets.append("h3precise")
options_sets.extend(ConversationStyleOptionSets.PRECISE)
elif tone == Tones.balanced:
options_sets.append("galileo")
options_sets.extend(ConversationStyleOptionSets.BALANCED)
else:
options_sets.append("harmonyv3")
@ -233,9 +238,9 @@ def create_message(
'type': 4
}
if image_response and image_response.get('imageUrl') and image_response.get('originalImageUrl'):
struct['arguments'][0]['message']['originalImageUrl'] = image_response.get('originalImageUrl')
struct['arguments'][0]['message']['imageUrl'] = image_response.get('imageUrl')
if image_request and image_request.get('imageUrl') and image_request.get('originalImageUrl'):
struct['arguments'][0]['message']['originalImageUrl'] = image_request.get('originalImageUrl')
struct['arguments'][0]['message']['imageUrl'] = image_request.get('imageUrl')
struct['arguments'][0]['experienceType'] = None
struct['arguments'][0]['attachedFileInfo'] = {"fileName": None, "fileType": None}
@ -282,9 +287,9 @@ async def stream_generate(
timeout=ClientTimeout(total=timeout), headers=headers, connector=connector
) as session:
conversation = await create_conversation(session)
image_response = await upload_image(session, image, tone) if image else None
if image_response:
yield image_response
image_request = await upload_image(session, image, tone) if image else None
if image_request:
yield image_request
try:
async with session.ws_connect(
@ -294,7 +299,7 @@ async def stream_generate(
) as wss:
await wss.send_str(format_message({'protocol': 'json', 'version': 1}))
await wss.receive(timeout=timeout)
await wss.send_str(create_message(conversation, prompt, tone, context, image_response, web_search, gpt4_turbo))
await wss.send_str(create_message(conversation, prompt, tone, context, image_request, web_search, gpt4_turbo))
response_txt = ''
returned_text = ''

@ -13,11 +13,12 @@ class DeepInfra(AsyncGeneratorProvider, ProviderModelMixin):
supports_message_history = True
default_model = 'meta-llama/Llama-2-70b-chat-hf'
@staticmethod
def get_models():
url = 'https://api.deepinfra.com/models/featured'
models = requests.get(url).json()
return [model['model_name'] for model in models]
@classmethod
def get_models(cls):
if not cls.models:
url = 'https://api.deepinfra.com/models/featured'
cls.models = requests.get(url).json()
return cls.models
@classmethod
async def create_async_generator(

@ -1,11 +1,18 @@
from __future__ import annotations
from aiohttp import ClientSession
import execjs, os, json
import os
import json
try:
import execjs
has_requirements = True
except ImportError:
has_requirements = False
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider
from .helper import format_prompt
from ..errors import MissingRequirementsError
class GptForLove(AsyncGeneratorProvider):
url = "https://ai18.gptforlove.com"
@ -20,6 +27,8 @@ class GptForLove(AsyncGeneratorProvider):
proxy: str = None,
**kwargs
) -> AsyncResult:
if not has_requirements:
raise MissingRequirementsError('Install "PyExecJS" package')
if not model:
model = "gpt-3.5-turbo"
headers = {

@ -39,7 +39,7 @@ class HuggingChat(AsyncGeneratorProvider, ProviderModelMixin):
**kwargs
) -> AsyncResult:
if not cookies:
cookies = get_cookies(".huggingface.co")
cookies = get_cookies(".huggingface.co", False)
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36',

@ -14,12 +14,12 @@ WS_URL = "wss://labs-api.perplexity.ai/socket.io/"
class PerplexityLabs(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://labs.perplexity.ai"
working = True
default_model = 'pplx-70b-online'
models = [
'pplx-7b-online', 'pplx-70b-online', 'pplx-7b-chat', 'pplx-70b-chat', 'mistral-7b-instruct',
'codellama-34b-instruct', 'llama-2-70b-chat', 'llava-7b-chat', 'mixtral-8x7b-instruct',
'mistral-medium', 'related'
]
default_model = 'pplx-70b-online'
model_aliases = {
"mistralai/Mistral-7B-Instruct-v0.1": "mistral-7b-instruct",
"meta-llama/Llama-2-70b-chat-hf": "llama-2-70b-chat",
@ -52,8 +52,7 @@ class PerplexityLabs(AsyncGeneratorProvider, ProviderModelMixin):
async with ClientSession(headers=headers, connector=get_connector(connector, proxy)) as session:
t = format(random.getrandbits(32), '08x')
async with session.get(
f"{API_URL}?EIO=4&transport=polling&t={t}",
proxy=proxy
f"{API_URL}?EIO=4&transport=polling&t={t}"
) as response:
text = await response.text()
@ -61,8 +60,7 @@ class PerplexityLabs(AsyncGeneratorProvider, ProviderModelMixin):
post_data = '40{"jwt":"anonymous-ask-user"}'
async with session.post(
f'{API_URL}?EIO=4&transport=polling&t={t}&sid={sid}',
data=post_data,
proxy=proxy
data=post_data
) as response:
assert await response.text() == 'OK'

@ -9,7 +9,6 @@ from ..requests import StreamSession
class Phind(AsyncGeneratorProvider):
url = "https://www.phind.com"
working = True
supports_gpt_4 = True
supports_stream = True
supports_message_history = True

@ -1,10 +1,16 @@
from __future__ import annotations
import json, base64, requests, execjs, random, uuid
import json, base64, requests, random, uuid
try:
import execjs
has_requirements = True
except ImportError:
has_requirements = False
from ..typing import Messages, TypedDict, CreateResult, Any
from .base_provider import AbstractProvider
from ..debug import logging
from ..errors import MissingRequirementsError
class Vercel(AbstractProvider):
url = 'https://sdk.vercel.ai'
@ -21,10 +27,11 @@ class Vercel(AbstractProvider):
proxy: str = None,
**kwargs
) -> CreateResult:
if not has_requirements:
raise MissingRequirementsError('Install "PyExecJS" package')
if not model:
model = "gpt-3.5-turbo"
elif model not in model_info:
raise ValueError(f"Vercel does not support {model}")

@ -1,4 +1,5 @@
from __future__ import annotations
import sys
import asyncio
from asyncio import AbstractEventLoop

@ -1,3 +1,5 @@
from __future__ import annotations
from aiohttp import ClientSession
class Conversation:

@ -2,21 +2,28 @@
This module provides functionalities for creating and managing images using Bing's service.
It includes functions for user login, session creation, image creation, and processing.
"""
from __future__ import annotations
import asyncio
import time
import json
import os
from aiohttp import ClientSession, BaseConnector
from bs4 import BeautifulSoup
from urllib.parse import quote
from typing import Generator, List, Dict
try:
from bs4 import BeautifulSoup
has_requirements = True
except ImportError:
has_requirements = False
from ..create_images import CreateImagesProvider
from ..helper import get_cookies, get_connector
from ...webdriver import WebDriver, get_driver_cookies, get_browser
from ...base_provider import ProviderType
from ...image import ImageResponse
from ...errors import MissingRequirementsError, MissingAccessToken
BING_URL = "https://www.bing.com"
TIMEOUT_LOGIN = 1200
@ -97,6 +104,8 @@ async def create_images(session: ClientSession, prompt: str, proxy: str = None,
Raises:
RuntimeError: If image creation fails or times out.
"""
if not has_requirements:
raise MissingRequirementsError('Install "beautifulsoup4" package')
url_encoded_prompt = quote(prompt)
payload = f"q={url_encoded_prompt}&rt=4&FORM=GENCRE"
url = f"{BING_URL}/images/create?q={url_encoded_prompt}&rt=4&FORM=GENCRE"
@ -193,7 +202,11 @@ class CreateImagesBing:
Yields:
Generator[str, None, None]: The final output as markdown formatted string with images.
"""
cookies = self.cookies or get_cookies(".bing.com")
try:
cookies = self.cookies or get_cookies(".bing.com")
except MissingRequirementsError as e:
raise MissingAccessToken(f'Missing "_U" cookie. {e}')
if "_U" not in cookies:
login_url = os.environ.get("G4F_LOGIN_URL")
if login_url:
@ -211,9 +224,12 @@ class CreateImagesBing:
Returns:
str: Markdown formatted string with images.
"""
cookies = self.cookies or get_cookies(".bing.com")
try:
cookies = self.cookies or get_cookies(".bing.com")
except MissingRequirementsError as e:
raise MissingAccessToken(f'Missing "_U" cookie. {e}')
if "_U" not in cookies:
raise RuntimeError('"_U" cookie is missing')
raise MissingAccessToken('Missing "_U" cookie')
proxy = os.environ.get("G4F_PROXY")
async with create_session(cookies, proxy) as session:
images = await create_images(session, prompt, self.proxy)

@ -1,17 +1,14 @@
"""
Module to handle image uploading and processing for Bing AI integrations.
"""
from __future__ import annotations
import string
import random
import json
import math
from aiohttp import ClientSession
from PIL import Image
from aiohttp import ClientSession, FormData
from ...typing import ImageType, Tuple
from ...image import to_image, process_image, to_base64, ImageResponse
from ...image import to_image, process_image, to_base64_jpg, ImageRequest, Image
IMAGE_CONFIG = {
"maxImagePixels": 360000,
@ -24,7 +21,7 @@ async def upload_image(
image_data: ImageType,
tone: str,
proxy: str = None
) -> ImageResponse:
) -> ImageRequest:
"""
Uploads an image to Bing's AI service and returns the image response.
@ -38,22 +35,22 @@ async def upload_image(
RuntimeError: If the image upload fails.
Returns:
ImageResponse: The response from the image upload.
ImageRequest: The response from the image upload.
"""
image = to_image(image_data)
new_width, new_height = calculate_new_dimensions(image)
processed_img = process_image(image, new_width, new_height)
img_binary_data = to_base64(processed_img, IMAGE_CONFIG['imageCompressionRate'])
image = process_image(image, new_width, new_height)
img_binary_data = to_base64_jpg(image, IMAGE_CONFIG['imageCompressionRate'])
data, boundary = build_image_upload_payload(img_binary_data, tone)
headers = prepare_headers(session, boundary)
data = build_image_upload_payload(img_binary_data, tone)
headers = prepare_headers(session)
async with session.post("https://www.bing.com/images/kblob", data=data, headers=headers, proxy=proxy) as response:
if response.status != 200:
raise RuntimeError("Failed to upload image.")
return parse_image_response(await response.json())
def calculate_new_dimensions(image: Image.Image) -> Tuple[int, int]:
def calculate_new_dimensions(image: Image) -> Tuple[int, int]:
"""
Calculates the new dimensions for the image based on the maximum allowed pixels.
@ -70,7 +67,7 @@ def calculate_new_dimensions(image: Image.Image) -> Tuple[int, int]:
return int(width * scale_factor), int(height * scale_factor)
return width, height
def build_image_upload_payload(image_bin: str, tone: str) -> Tuple[str, str]:
def build_image_upload_payload(image_bin: str, tone: str) -> FormData:
"""
Builds the payload for image uploading.
@ -81,18 +78,11 @@ def build_image_upload_payload(image_bin: str, tone: str) -> Tuple[str, str]:
Returns:
Tuple[str, str]: The data and boundary for the payload.
"""
boundary = "----WebKitFormBoundary" + ''.join(random.choices(string.ascii_letters + string.digits, k=16))
data = f"""--{boundary}
Content-Disposition: form-data; name="knowledgeRequest"
{json.dumps(build_knowledge_request(tone), ensure_ascii=False)}
--{boundary}
Content-Disposition: form-data; name="imageBase64"
{image_bin}
--{boundary}--
"""
return data, boundary
data = FormData()
knowledge_request = json.dumps(build_knowledge_request(tone), ensure_ascii=False)
data.add_field('knowledgeRequest', knowledge_request, content_type="application/json")
data.add_field('imageBase64', image_bin)
return data
def build_knowledge_request(tone: str) -> dict:
"""
@ -119,7 +109,7 @@ def build_knowledge_request(tone: str) -> dict:
}
}
def prepare_headers(session: ClientSession, boundary: str) -> dict:
def prepare_headers(session: ClientSession) -> dict:
"""
Prepares the headers for the image upload request.
@ -131,12 +121,11 @@ def prepare_headers(session: ClientSession, boundary: str) -> dict:
dict: The headers for the request.
"""
headers = session.headers.copy()
headers["Content-Type"] = f'multipart/form-data; boundary={boundary}'
headers["Referer"] = 'https://www.bing.com/search?q=Bing+AI&showconv=1&FORM=hpcodx'
headers["Origin"] = 'https://www.bing.com'
return headers
def parse_image_response(response: dict) -> ImageResponse:
def parse_image_response(response: dict) -> ImageRequest:
"""
Parses the response from the image upload.
@ -147,7 +136,7 @@ def parse_image_response(response: dict) -> ImageResponse:
RuntimeError: If parsing the image info fails.
Returns:
ImageResponse: The parsed image response.
ImageRequest: The parsed image response.
"""
if not response.get('blobId'):
raise RuntimeError("Failed to parse image info.")
@ -160,4 +149,4 @@ def parse_image_response(response: dict) -> ImageResponse:
if IMAGE_CONFIG["enableFaceBlurDebug"] else
f"https://www.bing.com/images/blob?bcid={result['bcid']}"
)
return ImageResponse(result["imageUrl"], "", result)
return ImageRequest(result["imageUrl"], "", result)

@ -1,7 +1,7 @@
from __future__ import annotations
from ...typing import Messages
from curl_cffi.requests import AsyncSession
from ...requests import StreamSession
from ..base_provider import AsyncProvider, format_prompt
@ -19,7 +19,7 @@ class ChatgptDuo(AsyncProvider):
timeout: int = 120,
**kwargs
) -> str:
async with AsyncSession(
async with StreamSession(
impersonate="chrome107",
proxies={"https": proxy},
timeout=timeout

@ -5,10 +5,10 @@ import os
import uuid
import requests
try:
from Crypto.Cipher import AES
except ImportError:
from Cryptodome.Cipher import AES
# try:
# from Crypto.Cipher import AES
# except ImportError:
# from Cryptodome.Cipher import AES
from ...typing import Any, CreateResult
from ..base_provider import AbstractProvider
@ -57,19 +57,21 @@ class GetGpt(AbstractProvider):
def _encrypt(e: str):
t = os.urandom(8).hex().encode('utf-8')
n = os.urandom(8).hex().encode('utf-8')
r = e.encode('utf-8')
# t = os.urandom(8).hex().encode('utf-8')
# n = os.urandom(8).hex().encode('utf-8')
# r = e.encode('utf-8')
cipher = AES.new(t, AES.MODE_CBC, n)
ciphertext = cipher.encrypt(_pad_data(r))
# cipher = AES.new(t, AES.MODE_CBC, n)
# ciphertext = cipher.encrypt(_pad_data(r))
return ciphertext.hex() + t.decode('utf-8') + n.decode('utf-8')
# return ciphertext.hex() + t.decode('utf-8') + n.decode('utf-8')
return
def _pad_data(data: bytes) -> bytes:
block_size = AES.block_size
padding_size = block_size - len(data) % block_size
padding = bytes([padding_size] * padding_size)
# block_size = AES.block_size
# padding_size = block_size - len(data) % block_size
# padding = bytes([padding_size] * padding_size)
return data + padding
# return data + padding
return

@ -1,57 +1,37 @@
from __future__ import annotations
import asyncio
import os
import random
import secrets
import string
from asyncio import AbstractEventLoop, BaseEventLoop
from aiohttp import BaseConnector
from platformdirs import user_config_dir
from browser_cookie3 import (
chrome, chromium, opera, opera_gx,
brave, edge, vivaldi, firefox,
_LinuxPasswordManager, BrowserCookieError
)
try:
from platformdirs import user_config_dir
has_platformdirs = True
except ImportError:
has_platformdirs = False
try:
from browser_cookie3 import (
chrome, chromium, opera, opera_gx,
brave, edge, vivaldi, firefox,
_LinuxPasswordManager, BrowserCookieError
)
has_browser_cookie3 = True
except ImportError:
has_browser_cookie3 = False
from ..typing import Dict, Messages, Optional
from ..errors import AiohttpSocksError
from ..errors import AiohttpSocksError, MissingRequirementsError
from .. import debug
# Global variable to store cookies
_cookies: Dict[str, Dict[str, str]] = {}
def get_event_loop() -> AbstractEventLoop:
"""
Get the current asyncio event loop. If the loop is closed or not set, create a new event loop.
If a loop is running, handle nested event loops. Patch the loop if 'nest_asyncio' is installed.
Returns:
AbstractEventLoop: The current or new event loop.
"""
try:
loop = asyncio.get_event_loop()
if isinstance(loop, BaseEventLoop):
loop._check_closed()
except RuntimeError:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
try:
asyncio.get_running_loop()
if not hasattr(loop.__class__, "_nest_patched"):
import nest_asyncio
nest_asyncio.apply(loop)
except RuntimeError:
pass
except ImportError:
raise RuntimeError(
'Use "create_async" instead of "create" function in a running event loop. Or install "nest_asyncio" package.'
)
return loop
if os.environ.get('DBUS_SESSION_BUS_ADDRESS') == "/dev/null":
if has_browser_cookie3 and os.environ.get('DBUS_SESSION_BUS_ADDRESS') == "/dev/null":
_LinuxPasswordManager.get_password = lambda a, b: b"secret"
def get_cookies(domain_name: str = '') -> Dict[str, str]:
def get_cookies(domain_name: str = '', raise_requirements_error: bool = True) -> Dict[str, str]:
"""
Load cookies for a given domain from all supported browsers and cache the results.
@ -64,11 +44,11 @@ def get_cookies(domain_name: str = '') -> Dict[str, str]:
if domain_name in _cookies:
return _cookies[domain_name]
cookies = _load_cookies_from_browsers(domain_name)
cookies = load_cookies_from_browsers(domain_name, raise_requirements_error)
_cookies[domain_name] = cookies
return cookies
def _load_cookies_from_browsers(domain_name: str) -> Dict[str, str]:
def load_cookies_from_browsers(domain_name: str, raise_requirements_error: bool = True) -> Dict[str, str]:
"""
Helper function to load cookies from various browsers.
@ -78,6 +58,10 @@ def _load_cookies_from_browsers(domain_name: str) -> Dict[str, str]:
Returns:
Dict[str, str]: A dictionary of cookie names and values.
"""
if not has_browser_cookie3:
if raise_requirements_error:
raise MissingRequirementsError('Install "browser_cookie3" package')
return {}
cookies = {}
for cookie_fn in [_g4f, chrome, chromium, opera, opera_gx, brave, edge, vivaldi, firefox]:
try:
@ -104,6 +88,8 @@ def _g4f(domain_name: str) -> list:
Returns:
list: List of cookies.
"""
if not has_platformdirs:
return []
user_data_dir = user_config_dir("g4f")
cookie_file = os.path.join(user_data_dir, "Default", "Cookies")
return [] if not os.path.exists(cookie_file) else chrome(cookie_file, domain_name)

@ -2,10 +2,14 @@ from __future__ import annotations
import time
import os
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
try:
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
except ImportError:
pass
from ...typing import CreateResult, Messages
from ..base_provider import AbstractProvider

@ -1,21 +1,32 @@
from __future__ import annotations
import asyncio
import uuid
import json
import os
from py_arkose_generator.arkose import get_values_for_request
from async_property import async_cached_property
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
try:
from py_arkose_generator.arkose import get_values_for_request
from async_property import async_cached_property
has_requirements = True
except ImportError:
async_cached_property = property
has_requirements = False
try:
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
has_webdriver = True
except ImportError:
has_webdriver = False
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..helper import format_prompt, get_cookies
from ...webdriver import get_browser, get_driver_cookies
from ...typing import AsyncResult, Messages
from ...typing import AsyncResult, Messages, Cookies, ImageType
from ...requests import StreamSession
from ...image import to_image, to_bytes, ImageType, ImageResponse
from ...image import to_image, to_bytes, ImageResponse, ImageRequest
from ...errors import MissingRequirementsError, MissingAccessToken
class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
@ -27,12 +38,8 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
supports_gpt_35_turbo = True
supports_gpt_4 = True
default_model = None
models = ["text-davinci-002-render-sha", "gpt-4", "gpt-4-gizmo"]
model_aliases = {
"gpt-3.5-turbo": "text-davinci-002-render-sha",
}
models = ["gpt-3.5-turbo", "gpt-4", "gpt-4-gizmo"]
_cookies: dict = {}
_default_model: str = None
@classmethod
async def create(
@ -94,7 +101,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
session: StreamSession,
headers: dict,
image: ImageType
) -> ImageResponse:
) -> ImageRequest:
"""
Upload an image to the service and get the download URL
@ -104,7 +111,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
image: The image to upload, either a PIL Image object or a bytes object
Returns:
An ImageResponse object that contains the download URL, file name, and other data
An ImageRequest object that contains the download URL, file name, and other data
"""
# Convert the image to a PIL Image object and get the extension
image = to_image(image)
@ -145,7 +152,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
) as response:
response.raise_for_status()
download_url = (await response.json())["download_url"]
return ImageResponse(download_url, image_data["file_name"], image_data)
return ImageRequest(download_url, image_data["file_name"], image_data)
@classmethod
async def get_default_model(cls, session: StreamSession, headers: dict):
@ -169,7 +176,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
return cls.default_model
@classmethod
def create_messages(cls, prompt: str, image_response: ImageResponse = None):
def create_messages(cls, prompt: str, image_response: ImageRequest = None):
"""
Create a list of messages for the user input
@ -282,7 +289,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
proxy: str = None,
timeout: int = 120,
access_token: str = None,
cookies: dict = None,
cookies: Cookies = None,
auto_continue: bool = False,
history_disabled: bool = True,
action: str = "next",
@ -317,12 +324,16 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
Raises:
RuntimeError: If an error occurs during processing.
"""
if not has_requirements:
raise MissingRequirementsError('Install "py-arkose-generator" and "async_property" package')
if not parent_id:
parent_id = str(uuid.uuid4())
if not cookies:
cookies = cls._cookies or get_cookies("chat.openai.com")
cookies = cls._cookies or get_cookies("chat.openai.com", False)
if not access_token and "access_token" in cookies:
access_token = cookies["access_token"]
if not access_token and not has_webdriver:
raise MissingAccessToken(f'Missing "access_token"')
if not access_token:
login_url = os.environ.get("G4F_LOGIN_URL")
if login_url:
@ -331,7 +342,6 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
cls._cookies = cookies
headers = {"Authorization": f"Bearer {access_token}"}
async with StreamSession(
proxies={"https": proxy},
impersonate="chrome110",
@ -346,13 +356,15 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
except Exception as e:
yield e
end_turn = EndTurn()
model = cls.get_model(model or await cls.get_default_model(session, headers))
model = "text-davinci-002-render-sha" if model == "gpt-3.5-turbo" else model
while not end_turn.is_end:
data = {
"action": action,
"arkose_token": await cls.get_arkose_token(session),
"conversation_id": conversation_id,
"parent_message_id": parent_id,
"model": cls.get_model(model or await cls.get_default_model(session, headers)),
"model": model,
"history_and_training_disabled": history_disabled and not auto_continue,
}
if action != "continue":

@ -1,10 +1,14 @@
from __future__ import annotations
import time
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
try:
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
except ImportError:
pass
from ...typing import CreateResult, Messages
from ..base_provider import AbstractProvider

@ -1,3 +1,5 @@
from __future__ import annotations
from abc import ABC, abstractmethod
from typing import Union, List, Dict, Type
from .typing import Messages, CreateResult

@ -4,7 +4,6 @@ from enum import Enum
import g4f
from g4f import Provider
from g4f.api import Api
from g4f.gui.run import gui_parser, run_gui_args
def run_gui(args):
@ -23,6 +22,7 @@ def main():
args = parser.parse_args()
if args.mode == "api":
from g4f.api import Api
controller=Api(engine=g4f, debug=args.debug, list_ignored_providers=args.ignored_providers)
controller.run(args.bind)
elif args.mode == "gui":

@ -31,5 +31,11 @@ class NestAsyncioError(Exception):
class ModelNotSupportedError(Exception):
pass
class AiohttpSocksError(Exception):
class MissingRequirementsError(Exception):
pass
class AiohttpSocksError(MissingRequirementsError):
pass
class MissingAccessToken(Exception):
pass

@ -1,6 +1,10 @@
from .server.app import app
from .server.website import Website
from .server.backend import Backend_Api
try:
from .server.app import app
from .server.website import Website
from .server.backend import Backend_Api
except ImportError:
from g4f.errors import MissingRequirementsError
raise MissingRequirementsError('Install "flask" and "werkzeug" package for gui')
def run_gui(host: str = '0.0.0.0', port: int = 80, debug: bool = False) -> None:
config = {

@ -1,8 +1,5 @@
from argparse import ArgumentParser
from g4f.gui import run_gui
def gui_parser():
parser = ArgumentParser(description="Run the GUI")
parser.add_argument("-host", type=str, default="0.0.0.0", help="hostname")
@ -10,15 +7,14 @@ def gui_parser():
parser.add_argument("-debug", action="store_true", help="debug mode")
return parser
def run_gui_args(args):
from g4f.gui import run_gui
host = args.host
port = args.port
debug = args.debug
run_gui(host, port, debug)
if __name__ == "__main__":
parser = gui_parser()
args = parser.parse_args()
run_gui_args(args)
run_gui_args(args)

@ -1,8 +1,14 @@
from __future__ import annotations
from bs4 import BeautifulSoup
from aiohttp import ClientSession, ClientTimeout
from duckduckgo_search import DDGS
try:
from duckduckgo_search import DDGS
from bs4 import BeautifulSoup
has_requirements = True
except ImportError:
has_requirements = False
from ...errors import MissingRequirementsError
import asyncio
class SearchResults():
@ -88,6 +94,8 @@ async def fetch_and_scrape(session: ClientSession, url: str, max_words: int = No
return
async def search(query: str, n_results: int = 5, max_words: int = 2500, add_text: bool = True) -> SearchResults:
if not has_requirements:
raise MissingRequirementsError('Install "duckduckgo-search" and "beautifulsoup4" package')
with DDGS() as ddgs:
results = []
for result in ddgs.text(

@ -1,39 +1,52 @@
from __future__ import annotations
import re
from io import BytesIO
import base64
from .typing import ImageType, Union
from PIL import Image
try:
from PIL.Image import open as open_image, new as new_image, Image
from PIL.Image import FLIP_LEFT_RIGHT, ROTATE_180, ROTATE_270, ROTATE_90
has_requirements = True
except ImportError:
Image = type
has_requirements = False
from .errors import MissingRequirementsError
ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg', 'gif', 'webp', 'svg'}
def to_image(image: ImageType, is_svg: bool = False) -> Image.Image:
def to_image(image: ImageType, is_svg: bool = False) -> Image:
"""
Converts the input image to a PIL Image object.
Args:
image (Union[str, bytes, Image.Image]): The input image.
image (Union[str, bytes, Image]): The input image.
Returns:
Image.Image: The converted PIL Image object.
Image: The converted PIL Image object.
"""
if not has_requirements:
raise MissingRequirementsError('Install "pillow" package for images')
if is_svg:
try:
import cairosvg
except ImportError:
raise RuntimeError('Install "cairosvg" package for svg images')
raise MissingRequirementsError('Install "cairosvg" package for svg images')
if not isinstance(image, bytes):
image = image.read()
buffer = BytesIO()
cairosvg.svg2png(image, write_to=buffer)
return Image.open(buffer)
return open_image(buffer)
if isinstance(image, str):
is_data_uri_an_image(image)
image = extract_data_uri(image)
if isinstance(image, bytes):
is_accepted_format(image)
return Image.open(BytesIO(image))
elif not isinstance(image, Image.Image):
image = Image.open(image)
return open_image(BytesIO(image))
elif not isinstance(image, Image):
image = open_image(image)
copy = image.copy()
copy.format = image.format
return copy
@ -110,12 +123,12 @@ def extract_data_uri(data_uri: str) -> bytes:
data = base64.b64decode(data)
return data
def get_orientation(image: Image.Image) -> int:
def get_orientation(image: Image) -> int:
"""
Gets the orientation of the given image.
Args:
image (Image.Image): The image.
image (Image): The image.
Returns:
int: The orientation value.
@ -126,40 +139,40 @@ def get_orientation(image: Image.Image) -> int:
if orientation is not None:
return orientation
def process_image(img: Image.Image, new_width: int, new_height: int) -> Image.Image:
def process_image(img: Image, new_width: int, new_height: int) -> Image:
"""
Processes the given image by adjusting its orientation and resizing it.
Args:
img (Image.Image): The image to process.
img (Image): The image to process.
new_width (int): The new width of the image.
new_height (int): The new height of the image.
Returns:
Image.Image: The processed image.
Image: The processed image.
"""
# Fix orientation
orientation = get_orientation(img)
if orientation:
if orientation > 4:
img = img.transpose(Image.FLIP_LEFT_RIGHT)
img = img.transpose(FLIP_LEFT_RIGHT)
if orientation in [3, 4]:
img = img.transpose(Image.ROTATE_180)
img = img.transpose(ROTATE_180)
if orientation in [5, 6]:
img = img.transpose(Image.ROTATE_270)
img = img.transpose(ROTATE_270)
if orientation in [7, 8]:
img = img.transpose(Image.ROTATE_90)
img = img.transpose(ROTATE_90)
# Resize image
img.thumbnail((new_width, new_height))
# Remove transparency
if img.mode != "RGB":
img.load()
white = Image.new('RGB', img.size, (255, 255, 255))
white = new_image('RGB', img.size, (255, 255, 255))
white.paste(img, mask=img.split()[3])
return white
return img
def to_base64(image: Image.Image, compression_rate: float) -> str:
def to_base64_jpg(image: Image, compression_rate: float) -> str:
"""
Converts the given image to a base64-encoded string.
@ -195,7 +208,7 @@ def format_images_markdown(images, alt: str, preview: str="{image}?w=200&h=200")
end_flag = "<!-- generated images end -->\n"
return f"\n{start_flag}{images}\n{end_flag}\n"
def to_bytes(image: Image.Image) -> bytes:
def to_bytes(image: Image) -> bytes:
"""
Converts the given image to bytes.
@ -225,4 +238,7 @@ class ImageResponse():
return format_images_markdown(self.images, self.alt)
def get(self, key: str):
return self.options.get(key)
return self.options.get(key)
class ImageRequest(ImageResponse):
pass

@ -4,80 +4,124 @@ import json
from functools import partialmethod
from typing import AsyncGenerator
from urllib.parse import urlparse
from curl_cffi.requests import AsyncSession, Session, Response
from .webdriver import WebDriver, WebDriverSession, bypass_cloudflare, get_driver_cookies
class StreamResponse:
"""
A wrapper class for handling asynchronous streaming responses.
Attributes:
inner (Response): The original Response object.
"""
def __init__(self, inner: Response) -> None:
"""Initialize the StreamResponse with the provided Response object."""
self.inner: Response = inner
async def text(self) -> str:
"""Asynchronously get the response text."""
return await self.inner.atext()
def raise_for_status(self) -> None:
"""Raise an HTTPError if one occurred."""
self.inner.raise_for_status()
async def json(self, **kwargs) -> dict:
"""Asynchronously parse the JSON response content."""
return json.loads(await self.inner.acontent(), **kwargs)
async def iter_lines(self) -> AsyncGenerator[bytes, None]:
"""Asynchronously iterate over the lines of the response."""
async for line in self.inner.aiter_lines():
yield line
async def iter_content(self) -> AsyncGenerator[bytes, None]:
"""Asynchronously iterate over the response content."""
async for chunk in self.inner.aiter_content():
yield chunk
async def __aenter__(self):
"""Asynchronously enter the runtime context for the response object."""
inner: Response = await self.inner
self.inner = inner
self.request = inner.request
self.status_code: int = inner.status_code
self.reason: str = inner.reason
self.ok: bool = inner.ok
self.headers = inner.headers
self.cookies = inner.cookies
return self
async def __aexit__(self, *args):
"""Asynchronously exit the runtime context for the response object."""
await self.inner.aclose()
class StreamSession(AsyncSession):
"""
An asynchronous session class for handling HTTP requests with streaming.
Inherits from AsyncSession.
"""
try:
from curl_cffi.requests import AsyncSession, Session, Response
has_curl_cffi = True
except ImportError:
Session = type
has_curl_cffi = False
def request(
self, method: str, url: str, **kwargs
) -> StreamResponse:
"""Create and return a StreamResponse object for the given HTTP request."""
return StreamResponse(super().request(method, url, stream=True, **kwargs))
# Defining HTTP methods as partial methods of the request method.
head = partialmethod(request, "HEAD")
get = partialmethod(request, "GET")
post = partialmethod(request, "POST")
put = partialmethod(request, "PUT")
patch = partialmethod(request, "PATCH")
delete = partialmethod(request, "DELETE")
from .webdriver import WebDriver, WebDriverSession, bypass_cloudflare, get_driver_cookies
from .errors import MissingRequirementsError
if not has_curl_cffi:
from aiohttp import ClientSession, ClientResponse, ClientTimeout
from .Provider.helper import get_connector
class StreamResponse(ClientResponse):
async def iter_lines(self) -> iter[bytes, None]:
async for line in self.content:
yield line.rstrip(b"\r\n")
async def json(self):
return await super().json(content_type=None)
class StreamSession(ClientSession):
def __init__(self, headers: dict = {}, timeout: int = None, proxies: dict = {}, impersonate = None, **kwargs):
if impersonate:
headers = {
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'en-US',
'Connection': 'keep-alive',
'Sec-Fetch-Dest': 'empty',
'Sec-Fetch-Mode': 'cors',
'Sec-Fetch-Site': 'same-site',
"User-Agent": 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36',
'Accept': '*/*',
'sec-ch-ua': '"Google Chrome";v="107", "Chromium";v="107", "Not?A_Brand";v="24"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"Windows"',
**headers
}
super().__init__(
**kwargs,
timeout=ClientTimeout(timeout) if timeout else None,
response_class=StreamResponse,
connector=get_connector(kwargs.get("connector"), proxies.get("https")),
headers=headers
)
else:
class StreamResponse:
"""
A wrapper class for handling asynchronous streaming responses.
Attributes:
inner (Response): The original Response object.
"""
def __init__(self, inner: Response) -> None:
"""Initialize the StreamResponse with the provided Response object."""
self.inner: Response = inner
async def text(self) -> str:
"""Asynchronously get the response text."""
return await self.inner.atext()
def raise_for_status(self) -> None:
"""Raise an HTTPError if one occurred."""
self.inner.raise_for_status()
async def json(self, **kwargs) -> dict:
"""Asynchronously parse the JSON response content."""
return json.loads(await self.inner.acontent(), **kwargs)
async def iter_lines(self) -> AsyncGenerator[bytes, None]:
"""Asynchronously iterate over the lines of the response."""
async for line in self.inner.aiter_lines():
yield line
async def iter_content(self) -> AsyncGenerator[bytes, None]:
"""Asynchronously iterate over the response content."""
async for chunk in self.inner.aiter_content():
yield chunk
async def __aenter__(self):
"""Asynchronously enter the runtime context for the response object."""
inner: Response = await self.inner
self.inner = inner
self.request = inner.request
self.status_code: int = inner.status_code
self.reason: str = inner.reason
self.ok: bool = inner.ok
self.headers = inner.headers
self.cookies = inner.cookies
return self
async def __aexit__(self, *args):
"""Asynchronously exit the runtime context for the response object."""
await self.inner.aclose()
class StreamSession(AsyncSession):
"""
An asynchronous session class for handling HTTP requests with streaming.
Inherits from AsyncSession.
"""
def request(
self, method: str, url: str, **kwargs
) -> StreamResponse:
"""Create and return a StreamResponse object for the given HTTP request."""
return StreamResponse(super().request(method, url, stream=True, **kwargs))
# Defining HTTP methods as partial methods of the request method.
head = partialmethod(request, "HEAD")
get = partialmethod(request, "GET")
post = partialmethod(request, "POST")
put = partialmethod(request, "PUT")
patch = partialmethod(request, "PATCH")
delete = partialmethod(request, "DELETE")
def get_session_from_browser(url: str, webdriver: WebDriver = None, proxy: str = None, timeout: int = 120) -> Session:
@ -93,6 +137,8 @@ def get_session_from_browser(url: str, webdriver: WebDriver = None, proxy: str =
Returns:
Session: A Session object configured with cookies and headers from the WebDriver.
"""
if not has_curl_cffi:
raise MissingRequirementsError('Install "curl_cffi" package')
with WebDriverSession(webdriver, "", proxy=proxy, virtual_display=True) as driver:
bypass_cloudflare(driver, url, timeout)
cookies = get_driver_cookies(driver)

@ -1,6 +1,9 @@
import sys
from typing import Any, AsyncGenerator, Generator, NewType, Tuple, Union, List, Dict, Type, IO, Optional
from PIL.Image import Image
try:
from PIL.Image import Image
except ImportError:
Image = type
if sys.version_info >= (3, 8):
from typing import TypedDict
@ -11,6 +14,7 @@ SHA256 = NewType('sha_256_hash', str)
CreateResult = Generator[str, None, None]
AsyncResult = AsyncGenerator[str, None]
Messages = List[Dict[str, str]]
Cookies = List[Dict[str, str]]
ImageType = Union[str, bytes, IO, Image, None]
__all__ = [

@ -1,3 +1,5 @@
from __future__ import annotations
from os import environ
import requests
from functools import cached_property

@ -1,12 +1,20 @@
from __future__ import annotations
from platformdirs import user_config_dir
from selenium.webdriver.remote.webdriver import WebDriver
from undetected_chromedriver import Chrome, ChromeOptions
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
try:
from platformdirs import user_config_dir
from selenium.webdriver.remote.webdriver import WebDriver
from undetected_chromedriver import Chrome, ChromeOptions
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
has_requirements = True
except ImportError:
WebDriver = type
has_requirements = False
from os import path
from os import access, R_OK
from .errors import MissingRequirementsError
from . import debug
try:
@ -33,6 +41,8 @@ def get_browser(
Returns:
WebDriver: An instance of WebDriver configured with the specified options.
"""
if not has_requirements:
raise MissingRequirementsError('Install "undetected_chromedriver" and "platformdirs" package')
if user_data_dir is None:
user_data_dir = user_config_dir("g4f")
if user_data_dir and debug.logging:
@ -144,7 +154,7 @@ class WebDriverSession:
Returns:
WebDriver: The reopened WebDriver instance.
"""
user_data_dir = user_data_data_dir or self.user_data_dir
user_data_dir = user_data_dir or self.user_data_dir
if self.default_driver:
self.default_driver.quit()
if not virtual_display and self.virtual_display:

@ -0,0 +1,2 @@
requests
aiohttp

@ -4,7 +4,6 @@ curl_cffi>=0.5.10
aiohttp
certifi
browser_cookie3
typing-extensions
PyExecJS
duckduckgo-search
nest_asyncio
@ -16,7 +15,6 @@ fastapi
uvicorn
flask
py-arkose-generator
asyncstdlib
async-property
undetected-chromedriver
brotli

@ -8,33 +8,59 @@ here = os.path.abspath(os.path.dirname(__file__))
with codecs.open(os.path.join(here, 'README.md'), encoding='utf-8') as fh:
long_description = '\n' + fh.read()
install_requires = [
INSTALL_REQUIRE = [
"requests",
"pycryptodome",
"curl_cffi>=0.5.10",
"aiohttp",
"certifi",
"browser_cookie3",
"typing-extensions",
"PyExecJS",
"duckduckgo-search",
"nest_asyncio",
"werkzeug",
"loguru",
"pillow",
"platformdirs",
"fastapi",
"uvicorn",
"flask",
"py-arkose-generator",
"asyncstdlib",
"async-property",
"undetected-chromedriver",
"brotli",
"beautifulsoup4",
"setuptools",
]
EXTRA_REQUIRE = {
'all': [
"curl_cffi>=0.5.10",
"certifi",
"async-property", # openai
"py-arkose-generator", # openai
"browser_cookie3", # get_cookies
"PyExecJS", # GptForLove
"duckduckgo-search", # internet.search
"beautifulsoup4", # internet.search and bing.create_images
"brotli", # openai
"platformdirs", # webdriver
"undetected-chromedriver", # webdriver
"setuptools", # webdriver
"aiohttp_socks" # proxy
"pillow", # image
"cairosvg", # svg image
"werkzeug", "flask", # gui
"loguru", "fastapi",
"uvicorn", "nest_asyncio", # api
],
"image": [
"pillow",
"cairosvg",
"beautifulsoup4"
],
"webdriver": [
"platformdirs",
"undetected-chromedriver",
"setuptools"
],
"openai": [
"async-property",
"py-arkose-generator",
"brotli"
],
"api": [
"loguru", "fastapi",
"uvicorn", "nest_asyncio"
],
"gui": [
"werkzeug", "flask",
"beautifulsoup4", "pillow",
"duckduckgo-search",
"browser_cookie3"
]
}
DESCRIPTION = (
'The official gpt4free repository | various collection of powerful language models'
)
@ -53,7 +79,8 @@ setup(
'g4f': ['g4f/interference/*', 'g4f/gui/client/*', 'g4f/gui/server/*', 'g4f/Provider/npm/*']
},
include_package_data=True,
install_requires=install_requires,
install_requires=INSTALL_REQUIRE,
extras_require=EXTRA_REQUIRE,
entry_points={
'console_scripts': ['g4f=g4f.cli:main'],
},

Loading…
Cancel
Save