Add GeminiPro API provider

Set min version for undetected-chromedriver
Add api_key to the new client
pull/1616/head
Heiner Lohaus 4 months ago
parent 51b4aaae05
commit 51264fe20c

@ -103,7 +103,7 @@ or set the api base in your client to: [http://localhost:1337/v1](http://localho
1. [Download and install Python](https://www.python.org/downloads/) (Version 3.10+ is recommended). 1. [Download and install Python](https://www.python.org/downloads/) (Version 3.10+ is recommended).
2. [Install Google Chrome](https://www.google.com/chrome/) for providers with webdriver 2. [Install Google Chrome](https://www.google.com/chrome/) for providers with webdriver
##### Install using pypi: ##### Install using PyPI package:
``` ```
pip install -U g4f[all] pip install -U g4f[all]
@ -113,12 +113,12 @@ Or use partial requirements.
See: [/docs/requirements](/docs/requirements.md) See: [/docs/requirements](/docs/requirements.md)
##### Install from source: ##### Install from source using git:
See: [/docs/git](/docs/git.md) See: [/docs/git](/docs/git.md)
##### Install using Docker ##### Install using Docker for Developers:
See: [/docs/docker](/docs/docker.md) See: [/docs/docker](/docs/docker.md)
@ -126,7 +126,6 @@ See: [/docs/git](/docs/git.md)
## 💡 Usage ## 💡 Usage
#### Text Generation #### Text Generation
**with Python**
```python ```python
from g4f.client import Client from g4f.client import Client
@ -134,14 +133,13 @@ from g4f.client import Client
client = Client() client = Client()
response = client.chat.completions.create( response = client.chat.completions.create(
model="gpt-3.5-turbo", model="gpt-3.5-turbo",
messages=[{"role": "user", "content": "Say this is a test"}], messages=[{"role": "user", "content": "Hello"}],
... ...
) )
print(response.choices[0].message.content) print(response.choices[0].message.content)
``` ```
#### Image Generation #### Image Generation
**with Python**
```python ```python
from g4f.client import Client from g4f.client import Client
@ -154,14 +152,15 @@ response = client.images.generate(
) )
image_url = response.data[0].url image_url = response.data[0].url
``` ```
Result:
**Result:**
[![Image with cat](/docs/cat.jpeg)](/docs/client.md) [![Image with cat](/docs/cat.jpeg)](/docs/client.md)
**See also for Python:** **See also:**
- [Documentation for new Client](/docs/client.md) - Documentation for the new Client: [/docs/client](/docs/client.md)
- [Documentation for leagcy API](/docs/leagcy.md) - Documentation for the leagcy API: [docs/leagcy](/docs/leagcy.md)
#### Web UI #### Web UI

@ -37,12 +37,16 @@ client = Client(
) )
``` ```
You also have the option to define a proxy in the client for all outgoing requests: ## Configuration
You can set an "api_key" for your provider in client.
And you also have the option to define a proxy for all outgoing requests:
```python ```python
from g4f.client import Client from g4f.client import Client
client = Client( client = Client(
api_key="...",
proxies="http://user:pass@host", proxies="http://user:pass@host",
... ...
) )
@ -74,7 +78,7 @@ stream = client.chat.completions.create(
) )
for chunk in stream: for chunk in stream:
if chunk.choices[0].delta.content: if chunk.choices[0].delta.content:
print(chunk.choices[0].delta.content, end="") print(chunk.choices[0].delta.content or "", end="")
``` ```
**Image Generation:** **Image Generation:**
@ -109,7 +113,28 @@ image_url = response.data[0].url
Original / Variant: Original / Variant:
[![Original Image](/docs/cat.jpeg)](/docs/client.md) [![Original Image](/docs/cat.jpeg)](/docs/client.md) [![Variant Image](/docs/cat.webp)](/docs/client.md)
[![Variant Image](/docs/cat.webp)](/docs/client.md)
#### Advanced example using GeminiProVision
```python
from g4f.client import Client
from g4f.Provider.GeminiPro import GeminiPro
client = Client(
api_key="...",
provider=GeminiPro
)
response = client.chat.completions.create(
model="gemini-pro-vision",
messages=[{"role": "user", "content": "What are on this image?"}],
image=open("docs/cat.jpeg", "rb")
)
print(response.choices[0].message.content)
```
**Question:** What are on this image?
```
A cat is sitting on a window sill looking at a bird outside the window.
```
[Return to Home](/) [Return to Home](/)

@ -0,0 +1,86 @@
from __future__ import annotations
import base64
import json
from aiohttp import ClientSession
from ..typing import AsyncResult, Messages, ImageType
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..image import to_bytes, is_accepted_format
class GeminiPro(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://ai.google.dev"
working = True
supports_message_history = True
default_model = "gemini-pro"
models = ["gemini-pro", "gemini-pro-vision"]
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
stream: bool = False,
proxy: str = None,
api_key: str = None,
image: ImageType = None,
**kwargs
) -> AsyncResult:
model = "gemini-pro-vision" if not model and image else model
model = cls.get_model(model)
api_key = api_key if api_key else kwargs.get("access_token")
headers = {
"Content-Type": "application/json",
}
async with ClientSession(headers=headers) as session:
method = "streamGenerateContent" if stream else "generateContent"
url = f"https://generativelanguage.googleapis.com/v1beta/models/{model}:{method}"
contents = [
{
"role": "model" if message["role"] == "assistant" else message["role"],
"parts": [{"text": message["content"]}]
}
for message in messages
]
if image:
image = to_bytes(image)
contents[-1]["parts"].append({
"inline_data": {
"mime_type": is_accepted_format(image),
"data": base64.b64encode(image).decode()
}
})
data = {
"contents": contents,
# "generationConfig": {
# "stopSequences": kwargs.get("stop"),
# "temperature": kwargs.get("temperature"),
# "maxOutputTokens": kwargs.get("max_tokens"),
# "topP": kwargs.get("top_p"),
# "topK": kwargs.get("top_k"),
# }
}
async with session.post(url, params={"key": api_key}, json=data, proxy=proxy) as response:
if not response.ok:
data = await response.json()
raise RuntimeError(data[0]["error"]["message"])
if stream:
lines = []
async for chunk in response.content:
if chunk == b"[{\n":
lines = [b"{\n"]
elif chunk == b",\r\n" or chunk == b"]":
try:
data = b"".join(lines)
data = json.loads(data)
yield data["candidates"][0]["content"]["parts"][0]["text"]
except:
data = data.decode() if isinstance(data, bytes) else data
raise RuntimeError(f"Read text failed. data: {data}")
lines = []
else:
lines.append(chunk)
else:
data = await response.json()
yield data["candidates"][0]["content"]["parts"][0]["text"]

@ -34,6 +34,7 @@ from .FakeGpt import FakeGpt
from .FreeChatgpt import FreeChatgpt from .FreeChatgpt import FreeChatgpt
from .FreeGpt import FreeGpt from .FreeGpt import FreeGpt
from .GeekGpt import GeekGpt from .GeekGpt import GeekGpt
from .GeminiPro import GeminiPro
from .GeminiProChat import GeminiProChat from .GeminiProChat import GeminiProChat
from .Gpt6 import Gpt6 from .Gpt6 import Gpt6
from .GPTalk import GPTalk from .GPTalk import GPTalk

@ -23,10 +23,11 @@ from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..helper import format_prompt, get_cookies from ..helper import format_prompt, get_cookies
from ...webdriver import get_browser, get_driver_cookies from ...webdriver import get_browser, get_driver_cookies
from ...typing import AsyncResult, Messages, Cookies, ImageType from ...typing import AsyncResult, Messages, Cookies, ImageType
from ...requests import StreamSession from ...requests import get_args_from_browser
from ...requests.aiohttp import StreamSession
from ...image import to_image, to_bytes, ImageResponse, ImageRequest from ...image import to_image, to_bytes, ImageResponse, ImageRequest
from ...errors import MissingRequirementsError, MissingAuthError from ...errors import MissingRequirementsError, MissingAuthError
from ... import debug
class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin): class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
"""A class for creating and managing conversations with OpenAI chat service""" """A class for creating and managing conversations with OpenAI chat service"""
@ -39,7 +40,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
default_model = None default_model = None
models = ["gpt-3.5-turbo", "gpt-4", "gpt-4-gizmo"] models = ["gpt-3.5-turbo", "gpt-4", "gpt-4-gizmo"]
model_aliases = {"text-davinci-002-render-sha": "gpt-3.5-turbo"} model_aliases = {"text-davinci-002-render-sha": "gpt-3.5-turbo"}
_cookies: dict = {} _args: dict = None
@classmethod @classmethod
async def create( async def create(
@ -169,11 +170,12 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
""" """
if not cls.default_model: if not cls.default_model:
async with session.get(f"{cls.url}/backend-api/models", headers=headers) as response: async with session.get(f"{cls.url}/backend-api/models", headers=headers) as response:
response.raise_for_status()
data = await response.json() data = await response.json()
if "categories" in data: if "categories" in data:
cls.default_model = data["categories"][-1]["default_model"] cls.default_model = data["categories"][-1]["default_model"]
else: return cls.default_model
raise RuntimeError(f"Response: {data}") raise RuntimeError(f"Response: {data}")
return cls.default_model return cls.default_model
@classmethod @classmethod
@ -249,8 +251,10 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
first_part = line["message"]["content"]["parts"][0] first_part = line["message"]["content"]["parts"][0]
if "asset_pointer" not in first_part or "metadata" not in first_part: if "asset_pointer" not in first_part or "metadata" not in first_part:
return return
file_id = first_part["asset_pointer"].split("file-service://", 1)[1] if first_part["metadata"] is None:
return
prompt = first_part["metadata"]["dalle"]["prompt"] prompt = first_part["metadata"]["dalle"]["prompt"]
file_id = first_part["asset_pointer"].split("file-service://", 1)[1]
try: try:
async with session.get(f"{cls.url}/backend-api/files/{file_id}/download", headers=headers) as response: async with session.get(f"{cls.url}/backend-api/files/{file_id}/download", headers=headers) as response:
response.raise_for_status() response.raise_for_status()
@ -289,7 +293,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
messages: Messages, messages: Messages,
proxy: str = None, proxy: str = None,
timeout: int = 120, timeout: int = 120,
access_token: str = None, api_key: str = None,
cookies: Cookies = None, cookies: Cookies = None,
auto_continue: bool = False, auto_continue: bool = False,
history_disabled: bool = True, history_disabled: bool = True,
@ -308,7 +312,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
messages (Messages): The list of previous messages. messages (Messages): The list of previous messages.
proxy (str): Proxy to use for requests. proxy (str): Proxy to use for requests.
timeout (int): Timeout for requests. timeout (int): Timeout for requests.
access_token (str): Access token for authentication. api_key (str): Access token for authentication.
cookies (dict): Cookies to use for authentication. cookies (dict): Cookies to use for authentication.
auto_continue (bool): Flag to automatically continue the conversation. auto_continue (bool): Flag to automatically continue the conversation.
history_disabled (bool): Flag to disable history and training. history_disabled (bool): Flag to disable history and training.
@ -329,35 +333,47 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
raise MissingRequirementsError('Install "py-arkose-generator" and "async_property" package') raise MissingRequirementsError('Install "py-arkose-generator" and "async_property" package')
if not parent_id: if not parent_id:
parent_id = str(uuid.uuid4()) parent_id = str(uuid.uuid4())
if not cookies: if cls._args is None and cookies is None:
cookies = cls._cookies or get_cookies("chat.openai.com", False) cookies = get_cookies("chat.openai.com", False)
if not access_token and "access_token" in cookies: api_key = kwargs["access_token"] if "access_token" in kwargs else api_key
access_token = cookies["access_token"] if api_key is None:
if not access_token: api_key = cookies["access_token"] if "access_token" in cookies else api_key
login_url = os.environ.get("G4F_LOGIN_URL") if cls._args is None:
if login_url: cls._args = {
yield f"Please login: [ChatGPT]({login_url})\n\n" "headers": {"Cookie": "; ".join(f"{k}={v}" for k, v in cookies.items() if k != "access_token")},
try: "cookies": {} if cookies is None else cookies
access_token, cookies = cls.browse_access_token(proxy) }
except MissingRequirementsError: if api_key is not None:
raise MissingAuthError(f'Missing "access_token"') cls._args["headers"]["Authorization"] = f"Bearer {api_key}"
cls._cookies = cookies
auth_headers = {"Authorization": f"Bearer {access_token}"}
async with StreamSession( async with StreamSession(
proxies={"https": proxy}, proxies={"https": proxy},
impersonate="chrome110", impersonate="chrome",
timeout=timeout, timeout=timeout,
headers={"Cookie": "; ".join(f"{k}={v}" for k, v in cookies.items())} headers=cls._args["headers"]
) as session: ) as session:
if api_key is not None:
try:
cls.default_model = await cls.get_default_model(session, cls._args["headers"])
except Exception as e:
if debug.logging:
print(f"{e.__class__.__name__}: {e}")
if cls.default_model is None:
login_url = os.environ.get("G4F_LOGIN_URL")
if login_url:
yield f"Please login: [ChatGPT]({login_url})\n\n"
try:
cls._args = cls.browse_access_token(proxy)
except MissingRequirementsError:
raise MissingAuthError(f'Missing or invalid "access_token". Add a new "api_key" please')
cls.default_model = await cls.get_default_model(session, cls._args["headers"])
try: try:
image_response = None image_response = None
if image: if image:
image_response = await cls.upload_image(session, auth_headers, image, kwargs.get("image_name")) image_response = await cls.upload_image(session, cls._args["headers"], image, kwargs.get("image_name"))
except Exception as e: except Exception as e:
yield e yield e
end_turn = EndTurn() end_turn = EndTurn()
model = cls.get_model(model or await cls.get_default_model(session, auth_headers)) model = cls.get_model(model)
model = "text-davinci-002-render-sha" if model == "gpt-3.5-turbo" else model model = "text-davinci-002-render-sha" if model == "gpt-3.5-turbo" else model
while not end_turn.is_end: while not end_turn.is_end:
arkose_token = await cls.get_arkose_token(session) arkose_token = await cls.get_arkose_token(session)
@ -375,13 +391,19 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
if action != "continue": if action != "continue":
prompt = format_prompt(messages) if not conversation_id else messages[-1]["content"] prompt = format_prompt(messages) if not conversation_id else messages[-1]["content"]
data["messages"] = cls.create_messages(prompt, image_response) data["messages"] = cls.create_messages(prompt, image_response)
# Update cookies before next request
for c in session.cookie_jar if hasattr(session, "cookie_jar") else session.cookies.jar:
cls._args["cookies"][c.name if hasattr(c, "name") else c.key] = c.value
cls._args["headers"]["Cookie"] = "; ".join(f"{k}={v}" for k, v in cls._args["cookies"].items())
async with session.post( async with session.post(
f"{cls.url}/backend-api/conversation", f"{cls.url}/backend-api/conversation",
json=data, json=data,
headers={ headers={
"Accept": "text/event-stream", "Accept": "text/event-stream",
"OpenAI-Sentinel-Arkose-Token": arkose_token, "OpenAI-Sentinel-Arkose-Token": arkose_token,
**auth_headers **cls._args["headers"]
} }
) as response: ) as response:
if not response.ok: if not response.ok:
@ -403,8 +425,8 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
if "message_type" not in line["message"]["metadata"]: if "message_type" not in line["message"]["metadata"]:
continue continue
try: try:
image_response = await cls.get_generated_image(session, auth_headers, line) image_response = await cls.get_generated_image(session, cls._args["headers"], line)
if image_response: if image_response is not None:
yield image_response yield image_response
except Exception as e: except Exception as e:
yield e yield e
@ -432,7 +454,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
action = "continue" action = "continue"
await asyncio.sleep(5) await asyncio.sleep(5)
if history_disabled and auto_continue: if history_disabled and auto_continue:
await cls.delete_conversation(session, auth_headers, conversation_id) await cls.delete_conversation(session, cls._args["headers"], conversation_id)
@classmethod @classmethod
def browse_access_token(cls, proxy: str = None, timeout: int = 1200) -> tuple[str, dict]: def browse_access_token(cls, proxy: str = None, timeout: int = 1200) -> tuple[str, dict]:
@ -457,7 +479,10 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
"document.cookie = 'access_token=' + accessToken + ';expires=' + expires.toUTCString() + ';path=/';" "document.cookie = 'access_token=' + accessToken + ';expires=' + expires.toUTCString() + ';path=/';"
"return accessToken;" "return accessToken;"
) )
return access_token, get_driver_cookies(driver) args = get_args_from_browser(f"{cls.url}/", driver, do_bypass_cloudflare=False)
args["headers"]["Authorization"] = f"Bearer {access_token}"
args["headers"]["Cookie"] = "; ".join(f"{k}={v}" for k, v in args["cookies"].items() if k != "access_token")
return args
finally: finally:
driver.close() driver.close()

@ -21,7 +21,7 @@ class ChatCompletionsConfig(BaseModel):
temperature: Union[float, None] temperature: Union[float, None]
max_tokens: int = None max_tokens: int = None
stop: Union[list[str], str, None] stop: Union[list[str], str, None]
access_token: Union[str, None] api_key: Union[str, None]
class Api: class Api:
def __init__(self, engine: g4f, debug: bool = True, sentry: bool = False, def __init__(self, engine: g4f, debug: bool = True, sentry: bool = False,
@ -82,10 +82,10 @@ class Api:
async def chat_completions(config: ChatCompletionsConfig = None, request: Request = None, provider: str = None): async def chat_completions(config: ChatCompletionsConfig = None, request: Request = None, provider: str = None):
try: try:
config.provider = provider if config.provider is None else config.provider config.provider = provider if config.provider is None else config.provider
if config.access_token is None and request is not None: if config.api_key is None and request is not None:
auth_header = request.headers.get("Authorization") auth_header = request.headers.get("Authorization")
if auth_header is not None: if auth_header is not None:
config.access_token = auth_header.split(None, 1)[-1] config.api_key = auth_header.split(None, 1)[-1]
response = self.client.chat.completions.create( response = self.client.chat.completions.create(
**dict(config), **dict(config),
@ -124,4 +124,9 @@ def format_exception(e: Exception, config: ChatCompletionsConfig) -> str:
"error": {"message": f"ChatCompletionsError: {e.__class__.__name__}: {e}"}, "error": {"message": f"ChatCompletionsError: {e.__class__.__name__}: {e}"},
"model": last_provider.get("model") if last_provider else config.model, "model": last_provider.get("model") if last_provider else config.model,
"provider": last_provider.get("name") if last_provider else config.provider "provider": last_provider.get("name") if last_provider else config.provider
}) })
def run_api(host: str = '0.0.0.0', port: int = 1337, debug: bool = False, use_colors=True) -> None:
print(f'Starting server... [g4f v-{g4f.version.utils.current_version}]')
app = Api(engine=g4f, debug=debug)
uvicorn.run(app=app, host=host, port=port, use_colors=use_colors)

@ -86,20 +86,19 @@ def iter_append_model_and_provider(response: IterResponse) -> IterResponse:
yield chunk yield chunk
class Client(): class Client():
proxies: Proxies = None
chat: Chat
images: Images
def __init__( def __init__(
self, self,
api_key: str = None,
proxies: Proxies = None,
provider: ProviderType = None, provider: ProviderType = None,
image_provider: ImageProvider = None, image_provider: ImageProvider = None,
proxies: Proxies = None,
**kwargs **kwargs
) -> None: ) -> None:
self.chat = Chat(self, provider) self.api_key: str = api_key
self.images = Images(self, image_provider)
self.proxies: Proxies = proxies self.proxies: Proxies = proxies
self.chat: Chat = Chat(self, provider)
self.images: Images = Images(self, image_provider)
def get_proxy(self) -> Union[str, None]: def get_proxy(self) -> Union[str, None]:
if isinstance(self.proxies, str): if isinstance(self.proxies, str):
@ -125,6 +124,7 @@ class Completions():
response_format: dict = None, response_format: dict = None,
max_tokens: int = None, max_tokens: int = None,
stop: Union[list[str], str] = None, stop: Union[list[str], str] = None,
api_key: str = None,
**kwargs **kwargs
) -> Union[ChatCompletion, Generator[ChatCompletionChunk]]: ) -> Union[ChatCompletion, Generator[ChatCompletionChunk]]:
if max_tokens is not None: if max_tokens is not None:
@ -137,9 +137,16 @@ class Completions():
stream, stream,
**kwargs **kwargs
) )
response = provider.create_completion(model, messages, stream=stream, proxy=self.client.get_proxy(), **kwargs)
stop = [stop] if isinstance(stop, str) else stop stop = [stop] if isinstance(stop, str) else stop
response = iter_append_model_and_provider(iter_response(response, stream, response_format, max_tokens, stop)) response = provider.create_completion(
model, messages, stream,
proxy=self.client.get_proxy(),
stop=stop,
api_key=self.client.api_key if api_key is None else api_key,
**kwargs
)
response = iter_response(response, stream, response_format, max_tokens, stop)
response = iter_append_model_and_provider(response)
return response if stream else next(response) return response if stream else next(response)
class Chat(): class Chat():

@ -97,17 +97,17 @@ def is_accepted_format(binary_data: bytes) -> bool:
ValueError: If the image format is not allowed. ValueError: If the image format is not allowed.
""" """
if binary_data.startswith(b'\xFF\xD8\xFF'): if binary_data.startswith(b'\xFF\xD8\xFF'):
pass # It's a JPEG image return "image/jpeg"
elif binary_data.startswith(b'\x89PNG\r\n\x1a\n'): elif binary_data.startswith(b'\x89PNG\r\n\x1a\n'):
pass # It's a PNG image return "image/png"
elif binary_data.startswith(b'GIF87a') or binary_data.startswith(b'GIF89a'): elif binary_data.startswith(b'GIF87a') or binary_data.startswith(b'GIF89a'):
pass # It's a GIF image return "image/gif"
elif binary_data.startswith(b'\x89JFIF') or binary_data.startswith(b'JFIF\x00'): elif binary_data.startswith(b'\x89JFIF') or binary_data.startswith(b'JFIF\x00'):
pass # It's a JPEG image return "image/jpeg"
elif binary_data.startswith(b'\xFF\xD8'): elif binary_data.startswith(b'\xFF\xD8'):
pass # It's a JPEG image return "image/jpeg"
elif binary_data.startswith(b'RIFF') and binary_data[8:12] == b'WEBP': elif binary_data.startswith(b'RIFF') and binary_data[8:12] == b'WEBP':
pass # It's a WebP image return "image/webp"
else: else:
raise ValueError("Invalid image format (from magic code).") raise ValueError("Invalid image format (from magic code).")

@ -15,7 +15,13 @@ from ..webdriver import WebDriver, WebDriverSession, bypass_cloudflare, get_driv
from ..errors import MissingRequirementsError from ..errors import MissingRequirementsError
from .defaults import DEFAULT_HEADERS from .defaults import DEFAULT_HEADERS
def get_args_from_browser(url: str, webdriver: WebDriver = None, proxy: str = None, timeout: int = 120) -> dict: def get_args_from_browser(
url: str,
webdriver: WebDriver = None,
proxy: str = None,
timeout: int = 120,
do_bypass_cloudflare: bool = True
) -> dict:
""" """
Create a Session object using a WebDriver to handle cookies and headers. Create a Session object using a WebDriver to handle cookies and headers.
@ -29,7 +35,8 @@ def get_args_from_browser(url: str, webdriver: WebDriver = None, proxy: str = No
Session: A Session object configured with cookies and headers from the WebDriver. Session: A Session object configured with cookies and headers from the WebDriver.
""" """
with WebDriverSession(webdriver, "", proxy=proxy, virtual_display=False) as driver: with WebDriverSession(webdriver, "", proxy=proxy, virtual_display=False) as driver:
bypass_cloudflare(driver, url, timeout) if do_bypass_cloudflare:
bypass_cloudflare(driver, url, timeout)
cookies = get_driver_cookies(driver) cookies = get_driver_cookies(driver)
user_agent = driver.execute_script("return navigator.userAgent") user_agent = driver.execute_script("return navigator.userAgent")
parse = urlparse(url) parse = urlparse(url)

@ -16,7 +16,7 @@ uvicorn
flask flask
py-arkose-generator py-arkose-generator
async-property async-property
undetected-chromedriver undetected-chromedriver>=3.5.5
brotli brotli
beautifulsoup4 beautifulsoup4
setuptools setuptools

@ -25,7 +25,7 @@ EXTRA_REQUIRE = {
"beautifulsoup4", # internet.search and bing.create_images "beautifulsoup4", # internet.search and bing.create_images
"brotli", # openai "brotli", # openai
"platformdirs", # webdriver "platformdirs", # webdriver
"undetected-chromedriver", # webdriver "undetected-chromedriver>=3.5.5", # webdriver
"setuptools", # webdriver "setuptools", # webdriver
"aiohttp_socks", # proxy "aiohttp_socks", # proxy
"pillow", # image "pillow", # image

Loading…
Cancel
Save