~ | Merge pull request #982 from hlohaus/fetch

Fix fetch_access_token in openai
pull/985/head
Tekky 9 months ago committed by GitHub
commit 6eb3fa3e5f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -38,6 +38,8 @@ class AItianhu(AsyncGeneratorProvider):
async with session.post(f"{cls.url}/api/chat-process", json=data) as response:
response.raise_for_status()
async for line in response.iter_lines():
if line == b"<script>":
raise RuntimeError("Solve Challenge")
if b"platform's risk control" in line:
raise RuntimeError("Platform's Risk Control")
line = json.loads(line)

@ -52,6 +52,8 @@ class AItianhuSpace(AsyncGeneratorProvider):
async with session.post(f"{url}/api/chat-process", json=data, headers=headers) as response:
response.raise_for_status()
async for line in response.iter_lines():
if line == b"<script>":
raise RuntimeError("Solve Challenge")
if b"platform's risk control" in line:
raise RuntimeError("Platform's Risk Control")
line = json.loads(line)

@ -57,7 +57,9 @@ f = function () {
async with ClientSession(
headers=headers
) as session:
async with session.post("https://api.deepai.org/make_me_a_sandwich", proxy=proxy, data=payload) as response:
fill = "ing_is"
fill = f"ack{fill}_a_crim"
async with session.post(f"https://api.deepai.org/h{fill}e", proxy=proxy, data=payload) as response:
response.raise_for_status()
async for stream in response.content.iter_any():
if stream:

@ -3,50 +3,32 @@ from .Acytoo import Acytoo
from .Aibn import Aibn
from .Aichat import Aichat
from .Ails import Ails
from .AiService import AiService
from .AItianhu import AItianhu
from .AItianhuSpace import AItianhuSpace
from .Aivvm import Aivvm
from .Bard import Bard
from .Bing import Bing
from .ChatBase import ChatBase
from .ChatForAi import ChatForAi
from .ChatgptAi import ChatgptAi
from .ChatgptDuo import ChatgptDuo
from .ChatgptLogin import ChatgptLogin
from .CodeLinkAva import CodeLinkAva
from .DeepAi import DeepAi
from .DfeHub import DfeHub
from .EasyChat import EasyChat
from .Forefront import Forefront
from .FreeGpt import FreeGpt
from .GetGpt import GetGpt
from .GptGo import GptGo
from .H2o import H2o
from .HuggingChat import HuggingChat
from .Liaobots import Liaobots
from .Lockchat import Lockchat
from .Myshell import Myshell
from .Opchatgpts import Opchatgpts
from .OpenaiChat import OpenaiChat
from .OpenAssistant import OpenAssistant
from .PerplexityAi import PerplexityAi
from .Phind import Phind
from .Raycast import Raycast
from .Theb import Theb
from .Vercel import Vercel
from .Vitalentum import Vitalentum
from .Wewordle import Wewordle
from .Ylokh import Ylokh
from .You import You
from .Yqcloud import Yqcloud
from .Equing import Equing
from .FastGpt import FastGpt
from .V50 import V50
from .Wuguokai import Wuguokai
from .base_provider import BaseProvider, AsyncProvider, AsyncGeneratorProvider
from .retry_provider import RetryProvider
from .deprecated import *
from .needs_auth import *
__all__ = [
'BaseProvider',

@ -2,8 +2,8 @@ from __future__ import annotations
import requests
from ..typing import Any, CreateResult
from .base_provider import BaseProvider
from ...typing import Any, CreateResult
from ..base_provider import BaseProvider
class AiService(BaseProvider):

@ -3,14 +3,14 @@ from __future__ import annotations
from aiohttp import ClientSession
import json
from ..typing import AsyncGenerator
from .base_provider import AsyncGeneratorProvider
from ...typing import AsyncGenerator
from ..base_provider import AsyncGeneratorProvider
class CodeLinkAva(AsyncGeneratorProvider):
url = "https://ava-ai-ef611.web.app"
supports_gpt_35_turbo = True
working = True
working = False
@classmethod
async def create_async_generator(

@ -6,8 +6,8 @@ import time
import requests
from ..typing import Any, CreateResult
from .base_provider import BaseProvider
from ...typing import Any, CreateResult
from ..base_provider import BaseProvider
class DfeHub(BaseProvider):

@ -5,8 +5,8 @@ import random
import requests
from ..typing import Any, CreateResult
from .base_provider import BaseProvider
from ...typing import Any, CreateResult
from ..base_provider import BaseProvider
class EasyChat(BaseProvider):

@ -5,8 +5,8 @@ from abc import ABC, abstractmethod
import requests
from ..typing import Any, CreateResult
from .base_provider import BaseProvider
from ...typing import Any, CreateResult
from ..base_provider import BaseProvider
class Equing(BaseProvider):

@ -6,10 +6,11 @@ from abc import ABC, abstractmethod
import requests
from ..typing import Any, CreateResult
from ...typing import Any, CreateResult
from ..base_provider import BaseProvider
class FastGpt(ABC):
class FastGpt(BaseProvider):
url: str = 'https://chat9.fastgpt.me/'
working = False
needs_auth = False

@ -4,8 +4,8 @@ import json
import requests
from ..typing import Any, CreateResult
from .base_provider import BaseProvider
from ...typing import Any, CreateResult
from ..base_provider import BaseProvider
class Forefront(BaseProvider):

@ -7,8 +7,8 @@ import uuid
import requests
from Crypto.Cipher import AES
from ..typing import Any, CreateResult
from .base_provider import BaseProvider
from ...typing import Any, CreateResult
from ..base_provider import BaseProvider
class GetGpt(BaseProvider):

@ -4,8 +4,8 @@ import json
import requests
from ..typing import Any, CreateResult
from .base_provider import BaseProvider
from ...typing import Any, CreateResult
from ..base_provider import BaseProvider
class Lockchat(BaseProvider):

@ -1,6 +1,6 @@
from __future__ import annotations
from .ChatgptLogin import ChatgptLogin
from ..ChatgptLogin import ChatgptLogin
class Opchatgpts(ChatgptLogin):

@ -5,7 +5,7 @@ import time
import base64
from curl_cffi.requests import AsyncSession
from .base_provider import AsyncProvider, format_prompt, get_cookies
from ..base_provider import AsyncProvider, format_prompt, get_cookies
class PerplexityAi(AsyncProvider):

@ -4,8 +4,8 @@ import uuid
import requests
from ..typing import Any, CreateResult
from .base_provider import BaseProvider
from ...typing import Any, CreateResult
from ..base_provider import BaseProvider
class V50(BaseProvider):

@ -3,7 +3,7 @@ from __future__ import annotations
import random, string, time
from aiohttp import ClientSession
from .base_provider import AsyncProvider
from ..base_provider import AsyncProvider
class Wewordle(AsyncProvider):

@ -4,8 +4,8 @@ import random
import requests
from ..typing import Any, CreateResult
from .base_provider import BaseProvider, format_prompt
from ...typing import Any, CreateResult
from ..base_provider import BaseProvider, format_prompt
class Wuguokai(BaseProvider):

@ -0,0 +1,14 @@
from .AiService import AiService
from .CodeLinkAva import CodeLinkAva
from .DfeHub import DfeHub
from .EasyChat import EasyChat
from .Forefront import Forefront
from .GetGpt import GetGpt
from .Opchatgpts import Opchatgpts
from .Lockchat import Lockchat
from .PerplexityAi import PerplexityAi
from .Wewordle import Wewordle
from .Equing import Equing
from .Wuguokai import Wuguokai
from .V50 import V50
from .FastGpt import FastGpt

@ -2,7 +2,7 @@ from __future__ import annotations
import asyncio, sys
from asyncio import AbstractEventLoop
from os import path
import browser_cookie3
# Change event loop policy on windows
@ -60,24 +60,11 @@ def format_prompt(messages: list[dict[str, str]], add_special_tokens=False):
def get_browser(user_data_dir: str = None):
try:
from undetected_chromedriver import Chrome
except ImportError:
return None
from undetected_chromedriver import Chrome
from platformdirs import user_config_dir
def get_user_data_dir():
dirs = [
'~/.config/google-chrome/Default',
'~/.var/app/com.google.Chrome/config/google-chrome/Default',
'%LOCALAPPDATA%\\Google\\Chrome\\User Data\\Default',
'~/Library/Application Support/Google/Chrome/Default',
]
from os import path
for dir in dirs:
dir = path.expandvars(dir)
if path.exists(dir):
return dir
if not user_data_dir:
user_data_dir = get_user_data_dir()
user_data_dir = user_config_dir("g4f")
user_data_dir = path.join(user_data_dir, "Default")
return Chrome(user_data_dir=user_data_dir)

@ -6,7 +6,7 @@ import re
from aiohttp import ClientSession
from .base_provider import AsyncProvider, format_prompt, get_cookies
from ..base_provider import AsyncProvider, format_prompt, get_cookies
class Bard(AsyncProvider):

@ -4,8 +4,8 @@ import json, uuid
from aiohttp import ClientSession
from ..typing import AsyncGenerator
from .base_provider import AsyncGeneratorProvider, format_prompt, get_cookies
from ...typing import AsyncGenerator
from ..base_provider import AsyncGeneratorProvider, format_prompt, get_cookies
class HuggingChat(AsyncGeneratorProvider):

@ -4,14 +4,14 @@ import json
from aiohttp import ClientSession
from ..typing import Any, AsyncGenerator
from .base_provider import AsyncGeneratorProvider, format_prompt, get_cookies
from ...typing import Any, AsyncGenerator
from ..base_provider import AsyncGeneratorProvider, format_prompt, get_cookies
class OpenAssistant(AsyncGeneratorProvider):
url = "https://open-assistant.io/chat"
needs_auth = True
working = True
working = False
model = "OA_SFT_Llama_30B_6"
@classmethod

@ -1,12 +1,11 @@
from __future__ import annotations
import uuid
import json
import uuid, json, time
from .base_provider import AsyncGeneratorProvider
from .helper import get_browser, get_cookies, format_prompt
from ..typing import AsyncGenerator
from ..requests import StreamSession
from ..base_provider import AsyncGeneratorProvider
from ..helper import get_browser, get_cookies, format_prompt
from ...typing import AsyncGenerator
from ...requests import StreamSession
class OpenaiChat(AsyncGeneratorProvider):
url = "https://chat.openai.com"
@ -56,23 +55,26 @@ class OpenaiChat(AsyncGeneratorProvider):
line = line[6:]
if line == b"[DONE]":
break
line = json.loads(line)
if "message" in line and not line["message"]["end_turn"]:
try:
line = json.loads(line)
except:
continue
if "message" not in line or "message_type" not in line["message"]["metadata"]:
continue
if line["message"]["metadata"]["message_type"] == "next":
new_message = line["message"]["content"]["parts"][0]
yield new_message[len(last_message):]
last_message = new_message
@classmethod
def fetch_access_token(cls) -> str:
def browse_access_token(cls) -> str:
try:
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
except ImportError:
return
driver = get_browser()
if not driver:
driver = get_browser()
except ImportError:
return
driver.get(f"{cls.url}/")
@ -83,21 +85,28 @@ class OpenaiChat(AsyncGeneratorProvider):
javascript = "return (await (await fetch('/api/auth/session')).json())['accessToken']"
return driver.execute_script(javascript)
finally:
time.sleep(1)
driver.quit()
@classmethod
async def fetch_access_token(cls, cookies: dict, proxies: dict = None) -> str:
async with StreamSession(proxies=proxies, cookies=cookies, impersonate="chrome107") as session:
async with session.get(f"{cls.url}/api/auth/session") as response:
response.raise_for_status()
auth = await response.json()
if "accessToken" in auth:
return auth["accessToken"]
@classmethod
async def get_access_token(cls, cookies: dict = None, proxies: dict = None) -> str:
if not cls._access_token:
cookies = cookies if cookies else get_cookies("chat.openai.com")
async with StreamSession(proxies=proxies, cookies=cookies, impersonate="chrome107") as session:
async with session.get(f"{cls.url}/api/auth/session") as response:
response.raise_for_status()
auth = await response.json()
if "accessToken" in auth:
cls._access_token = auth["accessToken"]
cls._access_token = cls.fetch_access_token()
if not cls._access_token:
raise RuntimeError("Missing access token")
if cookies:
cls._access_token = await cls.fetch_access_token(cookies, proxies)
if not cls._access_token:
cls._access_token = cls.browse_access_token()
if not cls._access_token:
raise RuntimeError("Read access token failed")
return cls._access_token
@classmethod

@ -4,8 +4,8 @@ import json
import requests
from ..typing import Any, CreateResult
from .base_provider import BaseProvider
from ...typing import Any, CreateResult
from ..base_provider import BaseProvider
class Raycast(BaseProvider):

@ -5,8 +5,8 @@ import random
import requests
from ..typing import Any, CreateResult
from .base_provider import BaseProvider
from ...typing import Any, CreateResult
from ..base_provider import BaseProvider
class Theb(BaseProvider):

@ -0,0 +1,6 @@
from .Bard import Bard
from .Raycast import Raycast
from .Theb import Theb
from .HuggingChat import HuggingChat
from .OpenaiChat import OpenaiChat
from .OpenAssistant import OpenAssistant

@ -61,9 +61,7 @@ gpt_35_turbo = Model(
gpt_4 = Model(
name = 'gpt-4',
base_provider = 'openai',
best_provider = RetryProvider([
Myshell, Ylokh,
])
best_provider = Bing
)
# Bard

@ -33,22 +33,10 @@ def main():
def get_providers() -> list[type[BaseProvider]]:
provider_names = dir(Provider)
ignore_names = [
"annotations",
"helper",
"base_provider",
"retry_provider",
"BaseProvider",
"AsyncProvider",
"AsyncGeneratorProvider",
"RetryProvider",
]
return [
getattr(Provider, provider_name)
for provider_name in provider_names
if not provider_name.startswith("__") and provider_name not in ignore_names
]
providers = dir(Provider)
providers = [getattr(Provider, provider) for provider in providers if provider != "RetryProvider"]
providers = [provider for provider in providers if isinstance(provider, type)]
return [provider for provider in providers if issubclass(provider, BaseProvider)]
def create_response(_provider: type[BaseProvider]) -> str:

Loading…
Cancel
Save