Improve providers

pull/1264/head
Heiner Lohaus 8 months ago
parent b268771baa
commit e4caf1d0ca

@ -11,6 +11,7 @@ from .. import debug
class AItianhuSpace(BaseProvider): class AItianhuSpace(BaseProvider):
url = "https://chat3.aiyunos.top/" url = "https://chat3.aiyunos.top/"
working = True working = True
supports_stream = True
supports_gpt_35_turbo = True supports_gpt_35_turbo = True
_domains = ["aitianhu.com", "aitianhu1.top"] _domains = ["aitianhu.com", "aitianhu1.top"]

@ -22,24 +22,24 @@ class PerplexityAi(BaseProvider):
timeout: int = 120, timeout: int = 120,
browser: WebDriver = None, browser: WebDriver = None,
copilot: bool = False, copilot: bool = False,
headless: bool = True,
**kwargs **kwargs
) -> CreateResult: ) -> CreateResult:
driver = browser if browser else get_browser("", headless, proxy) driver = browser if browser else get_browser("", False, proxy)
from selenium.webdriver.common.by import By from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
prompt = format_prompt(messages) prompt = format_prompt(messages)
driver.get(f"{cls.url}/") driver.get(f"{cls.url}/")
wait = WebDriverWait(driver, timeout) wait = WebDriverWait(driver, timeout)
# Page loaded? # Is page loaded?
wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "textarea[placeholder='Ask anything...']"))) wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "textarea[placeholder='Ask anything...']")))
# Add WebSocket hook # Register WebSocket hook
script = """ script = """
window._message = window._last_message = ""; window._message = window._last_message = "";
window._message_finished = false; window._message_finished = false;
@ -57,8 +57,9 @@ WebSocket.prototype.send = function(...args) {
content = JSON.parse(content); content = JSON.parse(content);
} }
window._message = content["answer"]; window._message = content["answer"];
window._message_finished = data[0] == "query_answered"; if (!window._message_finished) {
window._web_results = content["web_results"]; window._message_finished = data[0] == "query_answered";
}
} }
} }
}); });
@ -70,20 +71,19 @@ WebSocket.prototype.send = function(...args) {
if copilot: if copilot:
try: try:
# Check account # Check for account
driver.find_element(By.CSS_SELECTOR, "img[alt='User avatar']") driver.find_element(By.CSS_SELECTOR, "img[alt='User avatar']")
# Enable copilot # Enable copilot
driver.find_element(By.CSS_SELECTOR, "button[data-testid='copilot-toggle']").click() driver.find_element(By.CSS_SELECTOR, "button[data-testid='copilot-toggle']").click()
except: except:
raise RuntimeError("For copilot you needs a account") raise RuntimeError("You need a account for copilot")
# Enter question # Submit prompt
driver.find_element(By.CSS_SELECTOR, "textarea[placeholder='Ask anything...']").send_keys(prompt) driver.find_element(By.CSS_SELECTOR, "textarea[placeholder='Ask anything...']").send_keys(prompt)
# Submit question driver.find_element(By.CSS_SELECTOR, "textarea[placeholder='Ask anything...']").send_keys(Keys.ENTER)
driver.find_element(By.CSS_SELECTOR, "button.bg-super svg[data-icon='arrow-right']").click()
try: try:
# Yield response # Stream response
script = """ script = """
if(window._message && window._message != window._last_message) { if(window._message && window._message != window._last_message) {
try { try {

@ -32,7 +32,7 @@ class Bard(BaseProvider):
try: try:
driver.get(f"{cls.url}/chat") driver.get(f"{cls.url}/chat")
wait = WebDriverWait(driver, 10) wait = WebDriverWait(driver, 10 if headless else 240)
wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "div.ql-editor.textarea"))) wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "div.ql-editor.textarea")))
except: except:
# Reopen browser for login # Reopen browser for login
@ -61,14 +61,13 @@ XMLHttpRequest.prototype.open = function(method, url) {
""" """
driver.execute_script(script) driver.execute_script(script)
# Input and submit prompt # Submit prompt
driver.find_element(By.CSS_SELECTOR, "div.ql-editor.ql-blank.textarea").send_keys(prompt) driver.find_element(By.CSS_SELECTOR, "div.ql-editor.ql-blank.textarea").send_keys(prompt)
driver.find_element(By.CSS_SELECTOR, "button.send-button").click() driver.find_element(By.CSS_SELECTOR, "button.send-button").click()
# Yield response # Yield response
script = "return window._message;"
while True: while True:
chunk = driver.execute_script(script) chunk = driver.execute_script("return window._message;")
if chunk: if chunk:
yield chunk yield chunk
return return

@ -1,101 +1,158 @@
from __future__ import annotations from __future__ import annotations
import json import time
import random
import requests
from ...typing import Any, CreateResult, Messages from ...typing import CreateResult, Messages
from ..base_provider import BaseProvider from ..base_provider import BaseProvider
from ..helper import format_prompt from ..helper import WebDriver, format_prompt, get_browser
models = {
"theb-ai": "TheB.AI",
"theb-ai-free": "TheB.AI Free",
"gpt-3.5-turbo": "GPT-3.5 Turbo (New)",
"gpt-3.5-turbo-16k": "GPT-3.5-16K",
"gpt-4-turbo": "GPT-4 Turbo",
"gpt-4": "GPT-4",
"gpt-4-32k": "GPT-4 32K",
"claude-2": "Claude 2",
"claude-instant-1": "Claude Instant 1.2",
"palm-2": "PaLM 2",
"palm-2-32k": "PaLM 2 32K",
"palm-2-codey": "Codey",
"palm-2-codey-32k": "Codey 32K",
"vicuna-13b-v1.5": "Vicuna v1.5 13B",
"llama-2-7b-chat": "Llama 2 7B",
"llama-2-13b-chat": "Llama 2 13B",
"llama-2-70b-chat": "Llama 2 70B",
"code-llama-7b": "Code Llama 7B",
"code-llama-13b": "Code Llama 13B",
"code-llama-34b": "Code Llama 34B",
"qwen-7b-chat": "Qwen 7B"
}
class Theb(BaseProvider): class Theb(BaseProvider):
url = "https://theb.ai" url = "https://beta.theb.ai"
working = True working = True
supports_stream = True supports_gpt_35_turbo = True
supports_gpt_35_turbo = True supports_gpt_4 = True
needs_auth = True supports_stream = True
@staticmethod @classmethod
def create_completion( def create_completion(
cls,
model: str, model: str,
messages: Messages, messages: Messages,
stream: bool, stream: bool,
proxy: str = None, proxy: str = None,
browser: WebDriver = None,
headless: bool = True,
**kwargs **kwargs
) -> CreateResult: ) -> CreateResult:
auth = kwargs.get("auth", { if model in models:
"bearer_token":"free", model = models[model]
"org_id":"theb", prompt = format_prompt(messages)
}) driver = browser if browser else get_browser(None, headless, proxy)
bearer_token = auth["bearer_token"]
org_id = auth["org_id"]
headers = { from selenium.webdriver.common.by import By
'authority': 'beta.theb.ai', from selenium.webdriver.support.ui import WebDriverWait
'accept': 'text/event-stream', from selenium.webdriver.support import expected_conditions as EC
'accept-language': 'id-ID,id;q=0.9,en-US;q=0.8,en;q=0.7', from selenium.webdriver.common.keys import Keys
'authorization': f'Bearer {bearer_token}',
'content-type': 'application/json',
'origin': 'https://beta.theb.ai',
'referer': 'https://beta.theb.ai/home',
'sec-ch-ua': '"Chromium";v="116", "Not)A;Brand";v="24", "Google Chrome";v="116"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"Windows"',
'sec-fetch-dest': 'empty',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'same-origin',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36',
'x-ai-model': 'ee8d4f29cb7047f78cbe84313ed6ace8',
}
req_rand = random.randint(100000000, 9999999999)
try:
driver.get(f"{cls.url}/home")
wait = WebDriverWait(driver, 10 if headless else 240)
wait.until(EC.visibility_of_element_located((By.TAG_NAME, "body")))
time.sleep(0.1)
try:
driver.find_element(By.CSS_SELECTOR, ".driver-overlay").click()
driver.find_element(By.CSS_SELECTOR, ".driver-overlay").click()
except:
pass
if model:
# Load model panel
wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "#SelectModel svg")))
time.sleep(0.1)
driver.find_element(By.CSS_SELECTOR, "#SelectModel svg").click()
try:
driver.find_element(By.CSS_SELECTOR, ".driver-overlay").click()
driver.find_element(By.CSS_SELECTOR, ".driver-overlay").click()
except:
pass
# Select model
selector = f"div.flex-col div.items-center span[title='{model}']"
wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, selector)))
span = driver.find_element(By.CSS_SELECTOR, selector)
container = span.find_element(By.XPATH, "//div/../..")
button = container.find_element(By.CSS_SELECTOR, "button.btn-blue.btn-small.border")
button.click()
json_data: dict[str, Any] = { # Register fetch hook
"text" : format_prompt(messages), script = """
"category" : "04f58f64a4aa4191a957b47290fee864", window._fetch = window.fetch;
"model" : "ee8d4f29cb7047f78cbe84313ed6ace8", window.fetch = (url, options) => {
"model_params": { // Call parent fetch method
"system_prompt" : "You are ChatGPT, a large language model trained by OpenAI, based on the GPT-3.5 architecture.\nKnowledge cutoff: 2021-09\nCurrent date: {{YYYY-MM-DD}}", const result = window._fetch(url, options);
"temperature" : kwargs.get("temperature", 1), if (!url.startsWith("/api/conversation")) {
"top_p" : kwargs.get("top_p", 1), return result;
"frequency_penalty" : kwargs.get("frequency_penalty", 0), }
"presence_penalty" : kwargs.get("presence_penalty", 0), // Load response reader
"long_term_memory" : "auto" result.then((response) => {
} if (!response.body.locked) {
window._reader = response.body.getReader();
} }
});
// Return dummy response
return new Promise((resolve, reject) => {
resolve(new Response(new ReadableStream()))
});
}
window._last_message = "";
"""
driver.execute_script(script)
response = requests.post( # Submit prompt
f"https://beta.theb.ai/api/conversation?org_id={org_id}&req_rand={req_rand}", wait.until(EC.visibility_of_element_located((By.ID, "textareaAutosize")))
headers=headers, driver.find_element(By.ID, "textareaAutosize").send_keys(prompt)
json=json_data, driver.find_element(By.ID, "textareaAutosize").send_keys(Keys.ENTER)
stream=True,
proxies={"https": proxy}
)
response.raise_for_status()
content = ""
next_content = ""
for chunk in response.iter_lines():
if b"content" in chunk:
next_content = content
data = json.loads(chunk.decode().split("data: ")[1])
content = data["content"]
yield content.replace(next_content, "")
@classmethod # Read response with reader
@property script = """
def params(cls): if(window._reader) {
params = [ chunk = await window._reader.read();
("model", "str"), if (chunk['done']) {
("messages", "list[dict[str, str]]"), return null;
("auth", "list[dict[str, str]]"), }
("stream", "bool"), text = (new TextDecoder()).decode(chunk['value']);
("temperature", "float"), message = '';
("presence_penalty", "int"), text.split('\\r\\n').forEach((line, index) => {
("frequency_penalty", "int"), if (line.startsWith('data: ')) {
("top_p", "int") try {
] line = JSON.parse(line.substring('data: '.length));
param = ", ".join([": ".join(p) for p in params]) message = line["args"]["content"];
return f"g4f.provider.{cls.__name__} supports: ({param})" } catch(e) { }
}
});
if (message) {
try {
return message.substring(window._last_message.length);
} finally {
window._last_message = message;
}
}
}
return '';
"""
while True:
chunk = driver.execute_script(script)
if chunk:
yield chunk
elif chunk != "":
break
else:
time.sleep(0.1)
finally:
if not browser:
driver.close()
time.sleep(0.1)
driver.quit()

@ -0,0 +1,77 @@
from __future__ import annotations
import requests
from ...typing import Any, CreateResult, Messages
from ..base_provider import BaseProvider
models = {
"theb-ai": "TheB.AI",
"gpt-3.5-turbo": "GPT-3.5",
"gpt-3.5-turbo-16k": "GPT-3.5-16K",
"gpt-4-turbo": "GPT-4 Turbo",
"gpt-4": "GPT-4",
"gpt-4-32k": "GPT-4 32K",
"claude-2": "Claude 2",
"claude-1": "Claude",
"claude-1-100k": "Claude 100K",
"claude-instant-1": "Claude Instant",
"claude-instant-1-100k": "Claude Instant 100K",
"palm-2": "PaLM 2",
"palm-2-codey": "Codey",
"vicuna-13b-v1.5": "Vicuna v1.5 13B",
"llama-2-7b-chat": "Llama 2 7B",
"llama-2-13b-chat": "Llama 2 13B",
"llama-2-70b-chat": "Llama 2 70B",
"code-llama-7b": "Code Llama 7B",
"code-llama-13b": "Code Llama 13B",
"code-llama-34b": "Code Llama 34B",
"qwen-7b-chat": "Qwen 7B"
}
class ThebApi(BaseProvider):
url = "https://theb.ai"
working = True
needs_auth = True
@staticmethod
def create_completion(
model: str,
messages: Messages,
stream: bool,
auth: str,
proxy: str = None,
**kwargs
) -> CreateResult:
if model and model not in models:
raise ValueError(f"Model are not supported: {model}")
headers = {
'accept': 'application/json',
'authorization': f'Bearer {auth}',
'content-type': 'application/json',
}
# response = requests.get("https://api.baizhi.ai/v1/models", headers=headers).json()["data"]
# models = dict([(m["id"], m["name"]) for m in response])
# print(json.dumps(models, indent=4))
data: dict[str, Any] = {
"model": model if model else "gpt-3.5-turbo",
"messages": messages,
"stream": False,
"model_params": {
"system_prompt": kwargs.get("system_message", "You are ChatGPT, a large language model trained by OpenAI, based on the GPT-3.5 architecture."),
"temperature": 1,
"top_p": 1,
**kwargs
}
}
response = requests.post(
"https://api.theb.ai/v1/chat/completions",
headers=headers,
json=data,
proxies={"https": proxy}
)
try:
response.raise_for_status()
yield response.json()["choices"][0]["message"]["content"]
except:
raise RuntimeError(f"Response: {next(response.iter_lines()).decode()}")

@ -1,6 +1,7 @@
from .Bard import Bard from .Bard import Bard
from .Raycast import Raycast from .Raycast import Raycast
from .Theb import Theb from .Theb import Theb
from .ThebApi import ThebApi
from .HuggingChat import HuggingChat from .HuggingChat import HuggingChat
from .OpenaiChat import OpenaiChat from .OpenaiChat import OpenaiChat
from .OpenAssistant import OpenAssistant from .OpenAssistant import OpenAssistant

Loading…
Cancel
Save