Merge pull request #1281 from hlohaus/go

Fix Phind and GptGo Provider
pull/1286/head
Tekky 7 months ago committed by GitHub
commit 705ebe73f2
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -12,7 +12,7 @@ from .helper import format_prompt
class Berlin(AsyncGeneratorProvider):
url = "https://ai.berlin4h.top"
working = True
working = False
supports_gpt_35_turbo = True
_token = None

@ -1,6 +1,6 @@
from __future__ import annotations
from aiohttp import ClientSession
from aiohttp import ClientSession, ClientTimeout
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider
@ -10,7 +10,7 @@ class ChatAnywhere(AsyncGeneratorProvider):
url = "https://chatanywhere.cn"
supports_gpt_35_turbo = True
supports_message_history = True
working = True
working = False
@classmethod
async def create_async_generator(
@ -18,6 +18,7 @@ class ChatAnywhere(AsyncGeneratorProvider):
model: str,
messages: Messages,
proxy: str = None,
timeout: int = 120,
temperature: float = 0.5,
**kwargs
) -> AsyncResult:
@ -36,7 +37,7 @@ class ChatAnywhere(AsyncGeneratorProvider):
"Connection": "keep-alive",
"TE": "trailers"
}
async with ClientSession(headers=headers) as session:
async with ClientSession(headers=headers, timeout=ClientTimeout(timeout)) as session:
data = {
"list": messages,
"id": "s1_qYuOLXjI3rEpc7WHfQ",

@ -65,7 +65,8 @@ class ChatgptAi(AsyncGeneratorProvider):
async with session.post(
f"{cls.url}/wp-json/mwai-ui/v1/chats/submit",
proxy=proxy,
json=data
json=data,
headers={"X-Wp-Nonce": cls._system["restNonce"]}
) as response:
response.raise_for_status()
async for line in response.content:

@ -9,7 +9,7 @@ from .helper import get_random_string
class ChatgptDemoAi(AsyncGeneratorProvider):
url = "https://chat.chatgptdemo.ai"
working = True
working = False
supports_gpt_35_turbo = True
supports_message_history = True

@ -2,6 +2,7 @@ from __future__ import annotations
from aiohttp import ClientSession
import json
import base64
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, format_prompt
@ -23,9 +24,12 @@ class GptGo(AsyncGeneratorProvider):
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36",
"Accept": "*/*",
"Accept-language": "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
"Accept-language": "en-US",
"Origin": cls.url,
"Referer": f"{cls.url}/",
"sec-ch-ua": '"Google Chrome";v="116", "Chromium";v="116", "Not?A_Brand";v="24"',
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": '"Windows"',
"Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
@ -33,50 +37,26 @@ class GptGo(AsyncGeneratorProvider):
async with ClientSession(
headers=headers
) as session:
async with session.get(
"https://gptgo.ai/action_get_token.php",
params={
"q": format_prompt(messages),
"hlgpt": "default",
"hl": "en"
},
async with session.post(
"https://gptgo.ai/get_token.php",
data={"ask": format_prompt(messages)},
proxy=proxy
) as response:
response.raise_for_status()
token = (await response.json(content_type=None))["token"]
token = await response.text();
token = base64.b64decode(token[10:-20]).decode()
async with session.get(
"https://gptgo.ai/action_ai_gpt.php",
params={
"token": token,
},
proxy=proxy
) as response:
"https://api.gptgo.ai/web.php",
params={"array_chat": token},
proxy=proxy
) as response:
response.raise_for_status()
start = "data: "
async for line in response.content:
line = line.decode()
if line.startswith("data: "):
if line.startswith("data: [DONE]"):
break
line = json.loads(line[len(start):-1])
if line["choices"][0]["finish_reason"] == "stop":
break
if line.startswith(b"data: [DONE]"):
break
if line.startswith(b"data: "):
line = json.loads(line[6:])
content = line["choices"][0]["delta"].get("content")
if content:
if content and content != "\n#GPTGO ":
yield content
@classmethod
@property
def params(cls):
params = [
("model", "str"),
("messages", "list[dict[str, str]]"),
("stream", "bool"),
("proxy", "str"),
("temperature", "float"),
]
param = ", ".join([": ".join(p) for p in params])
return f"g4f.provider.{cls.__name__} supports: ({param})"

@ -0,0 +1,49 @@
from __future__ import annotations
from aiohttp import ClientSession
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider
class GptTalkRu(AsyncGeneratorProvider):
url = "https://gpttalk.ru"
working = True
supports_gpt_35_turbo = True
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
**kwargs
) -> AsyncResult:
if not model:
model = "gpt-3.5-turbo"
headers = {
"Accept": "application/json, text/plain, */*",
"Accept-Language": "en-US",
"Connection": "keep-alive",
"Content-Type": "application/json",
"Origin": "https://gpttalk.ru",
"Referer": "https://gpttalk.ru/",
"Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36",
"sec-ch-ua": '"Google Chrome";v="119", "Chromium";v="119", "Not?A_Brand";v="24"',
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": '"Linux"',
}
async with ClientSession(headers=headers) as session:
data = {
"model": model,
"modelType": 1,
"prompt": messages,
"responseType": "stream",
}
async with session.post(f"{cls.url}/gpt2", json=data, proxy=proxy) as response:
response.raise_for_status()
async for chunk in response.content.iter_any():
yield chunk.decode()

@ -1,103 +1,74 @@
from __future__ import annotations
import time
from urllib.parse import quote
from datetime import datetime
from ..typing import CreateResult, Messages
from .base_provider import BaseProvider
from .helper import format_prompt
from .webdriver import WebDriver, WebDriverSession
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider
from ..requests import StreamSession
class Phind(BaseProvider):
class Phind(AsyncGeneratorProvider):
url = "https://www.phind.com"
working = True
supports_gpt_4 = True
supports_stream = True
supports_message_history = True
@classmethod
def create_completion(
async def create_async_generator(
cls,
model: str,
messages: Messages,
stream: bool,
proxy: str = None,
timeout: int = 120,
webdriver: WebDriver = None,
creative_mode: bool = None,
creative_mode: bool = False,
**kwargs
) -> CreateResult:
with WebDriverSession(webdriver, "", proxy=proxy) as driver:
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
prompt = quote(format_prompt(messages))
driver.get(f"{cls.url}/search?q={prompt}&source=searchbox")
# Register fetch hook
source = """
window._fetch = window.fetch;
window.fetch = async (url, options) => {
const response = await window._fetch(url, options);
if (url != "/api/infer/answer") {
return response;
}
copy = response.clone();
window._reader = response.body.pipeThrough(new TextDecoderStream()).getReader();
return copy;
}
"""
driver.execute_cdp_cmd("Page.addScriptToEvaluateOnNewDocument", {
"source": source
})
# Need to change settings
wait = WebDriverWait(driver, timeout)
def open_dropdown():
# Open settings dropdown
wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "button.text-dark.dropdown-toggle")))
driver.find_element(By.CSS_SELECTOR, "button.text-dark.dropdown-toggle").click()
# Wait for dropdown toggle
wait.until(EC.visibility_of_element_located((By.XPATH, "//button[text()='GPT-4']")))
if model.startswith("gpt-4") or creative_mode:
# Enable GPT-4
if model.startswith("gpt-4"):
open_dropdown()
driver.find_element(By.XPATH, "//button[text()='GPT-4']").click()
# Enable creative mode
if creative_mode or creative_mode == None:
open_dropdown()
driver.find_element(By.ID, "Creative Mode").click()
# Submit changes
driver.find_element(By.CSS_SELECTOR, ".search-bar-input-group button[type='submit']").click()
# Wait for page reload
wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, ".search-container")))
while True:
chunk = driver.execute_script("""
if(window._reader) {
chunk = await window._reader.read();
if (chunk['done']) {
return null;
}
content = '';
chunk['value'].split('\\r\\n').forEach((line, index) => {
if (line.startsWith('data: ')) {
line = line.substring('data: '.length);
if (!line.startsWith('<PHIND_METADATA>')) {
if (line) content += line;
else content += '\\n';
}
) -> AsyncResult:
headers = {
"Accept": "*/*",
"Origin": cls.url,
"Referer": f"{cls.url}/search",
"Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
}
});
return content.replace('\\n\\n', '\\n');
} else {
return ''
}
""")
if chunk:
yield chunk
elif chunk != "":
break
else:
time.sleep(0.1)
async with StreamSession(
impersonate="chrome110",
proxies={"https": proxy},
timeout=timeout
) as session:
prompt = messages[-1]["content"]
data = {
"question": prompt,
"questionHistory": [
message["content"] for message in messages[:-1] if message["role"] == "user"
],
"answerHistory": [
message["content"] for message in messages if message["role"] == "assistant"
],
"webResults": [],
"options": {
"date": datetime.now().strftime("%d.%m.%Y"),
"language": "en-US",
"detailed": True,
"anonUserId": "",
"answerModel": "GPT-4" if model.startswith("gpt-4") else "Phind Model",
"creativeMode": creative_mode,
"customLinks": []
},
"context": "",
"rewrittenQuestion": prompt
}
async with session.post(f"{cls.url}/api/infer/followup/answer", headers=headers, json=data) as response:
new_line = False
async for line in response.iter_lines():
if line.startswith(b"data: "):
chunk = line[6:]
if chunk.startswith(b"<PHIND_METADATA>"):
pass
elif chunk:
yield chunk.decode()
elif new_line:
yield "\n"
new_line = False
else:
new_line = True

@ -1,5 +1,12 @@
from __future__ import annotations
from .base_provider import BaseProvider, AsyncProvider, AsyncGeneratorProvider
from .retry_provider import RetryProvider
from .deprecated import *
from .needs_auth import *
from .unfinished import *
from .selenium import *
from .AiAsk import AiAsk
from .Aichat import Aichat
from .AiChatOnline import AiChatOnline
@ -26,6 +33,7 @@ from .GptChatly import GptChatly
from .GptForLove import GptForLove
from .GptGo import GptGo
from .GptGod import GptGod
from .GptTalkRu import GptTalkRu
from .Hashnode import Hashnode
from .Koala import Koala
from .Liaobots import Liaobots
@ -43,12 +51,6 @@ from .You import You
from .Yqcloud import Yqcloud
from .GeekGpt import GeekGpt
from .base_provider import BaseProvider, AsyncProvider, AsyncGeneratorProvider
from .retry_provider import RetryProvider
from .deprecated import *
from .needs_auth import *
from .unfinished import *
import sys
__modules__: list = [

@ -0,0 +1,104 @@
from __future__ import annotations
import time
from urllib.parse import quote
from ...typing import CreateResult, Messages
from ..base_provider import BaseProvider
from ..helper import format_prompt
from ..webdriver import WebDriver, WebDriverSession
class Phind(BaseProvider):
url = "https://www.phind.com"
working = True
supports_gpt_4 = True
supports_stream = True
@classmethod
def create_completion(
cls,
model: str,
messages: Messages,
stream: bool,
proxy: str = None,
timeout: int = 120,
webdriver: WebDriver = None,
creative_mode: bool = None,
**kwargs
) -> CreateResult:
driver.start_session
with WebDriverSession(webdriver, "", proxy=proxy) as driver:
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
# Register fetch hook
source = """
window._fetch = window.fetch;
window.fetch = async (url, options) => {
const response = await window._fetch(url, options);
if (url != "/api/infer/answer") {
return response;
}
copy = response.clone();
window._reader = response.body.pipeThrough(new TextDecoderStream()).getReader();
return copy;
}
"""
driver.execute_cdp_cmd("Page.addScriptToEvaluateOnNewDocument", {
"source": source
})
prompt = quote(format_prompt(messages))
driver.get(f"{cls.url}/search?q={prompt}&source=searchbox")
# Need to change settings
wait = WebDriverWait(driver, timeout)
def open_dropdown():
# Open settings dropdown
wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "button.text-dark.dropdown-toggle")))
driver.find_element(By.CSS_SELECTOR, "button.text-dark.dropdown-toggle").click()
# Wait for dropdown toggle
wait.until(EC.visibility_of_element_located((By.XPATH, "//button[text()='GPT-4']")))
if model.startswith("gpt-4") or creative_mode:
# Enable GPT-4
if model.startswith("gpt-4"):
open_dropdown()
driver.find_element(By.XPATH, "//button[text()='GPT-4']").click()
# Enable creative mode
if creative_mode or creative_mode == None:
open_dropdown()
driver.find_element(By.ID, "Creative Mode").click()
# Submit changes
driver.find_element(By.CSS_SELECTOR, ".search-bar-input-group button[type='submit']").click()
# Wait for page reload
wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, ".search-container")))
while True:
chunk = driver.execute_script("""
if(window._reader) {
chunk = await window._reader.read();
if (chunk['done']) {
return null;
}
content = '';
chunk['value'].split('\\r\\n').forEach((line, index) => {
if (line.startsWith('data: ')) {
line = line.substring('data: '.length);
if (!line.startsWith('<PHIND_METADATA>')) {
if (line) content += line;
else content += '\\n';
}
}
});
return content.replace('\\n\\n', '\\n');
} else {
return ''
}
""")
if chunk:
yield chunk
elif chunk != "":
break
else:
time.sleep(0.1)

@ -0,0 +1 @@
from .Phind import Phind
Loading…
Cancel
Save