~ | Merge pull request #1033 from Commenter123321/main

add testing for all gpt models, enhance the gui a bit
pull/1043/head
Tekky 1 year ago committed by GitHub
commit 26cd71c7f4
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -0,0 +1,67 @@
import asyncio
import sys
from pathlib import Path
sys.path.append(str(Path(__file__).parent.parent.parent))
import g4f
async def test(model: g4f.Model):
try:
try:
for response in g4f.ChatCompletion.create(
model=model,
messages=[{"role": "user", "content": "write a poem about a tree"}],
temperature=0.1,
stream=True
):
print(response, end="")
print()
except:
for response in await g4f.ChatCompletion.create_async(
model=model,
messages=[{"role": "user", "content": "write a poem about a tree"}],
temperature=0.1,
stream=True
):
print(response, end="")
print()
return True
except Exception as e:
print(model.name, "not working:", e)
print(e.__traceback__.tb_next)
return False
async def start_test():
models_to_test = [
# GPT-3.5 4K Context
g4f.models.gpt_35_turbo,
g4f.models.gpt_35_turbo_0613,
# GPT-3.5 16K Context
g4f.models.gpt_35_turbo_16k,
g4f.models.gpt_35_turbo_16k_0613,
# GPT-4 8K Context
g4f.models.gpt_4,
g4f.models.gpt_4_0613,
# GPT-4 32K Context
g4f.models.gpt_4_32k,
g4f.models.gpt_4_32k_0613,
]
models_working = []
for model in models_to_test:
if await test(model):
models_working.append(model.name)
print("working models:", models_working)
asyncio.run(start_test())

@ -7,10 +7,10 @@ import g4f, asyncio
print("create:", end=" ", flush=True) print("create:", end=" ", flush=True)
for response in g4f.ChatCompletion.create( for response in g4f.ChatCompletion.create(
model=g4f.models.default, model=g4f.models.gpt_4_32k_0613,
provider=g4f.Provider.GptForLove, provider=g4f.Provider.Aivvm,
messages=[{"role": "user", "content": "send a bunch of emojis. i want to test something"}], messages=[{"role": "user", "content": "write a poem about a tree"}],
temperature=0.0, temperature=0.1,
stream=True stream=True
): ):
print(response, end="", flush=True) print(response, end="", flush=True)

@ -0,0 +1,6 @@
import sys
from pathlib import Path
sys.path.append(str(Path(__file__).parent.parent.parent))
from g4f.gui import run_gui
run_gui()

@ -1,8 +1,9 @@
from __future__ import annotations from __future__ import annotations
import requests
from ..requests import StreamSession from .base_provider import BaseProvider
from .base_provider import AsyncGeneratorProvider from ..typing import CreateResult
from ..typing import AsyncResult, Messages from json import dumps
# to recreate this easily, send a post request to https://chat.aivvm.com/api/models # to recreate this easily, send a post request to https://chat.aivvm.com/api/models
models = { models = {
@ -16,22 +17,20 @@ models = {
'gpt-4-32k-0613': {'id': 'gpt-4-32k-0613', 'name': 'GPT-4-32K-0613'}, 'gpt-4-32k-0613': {'id': 'gpt-4-32k-0613', 'name': 'GPT-4-32K-0613'},
} }
class Aivvm(AsyncGeneratorProvider): class Aivvm(BaseProvider):
url = 'https://chat.aivvm.com' url = 'https://chat.aivvm.com'
supports_stream = True
working = True
supports_gpt_35_turbo = True supports_gpt_35_turbo = True
supports_gpt_4 = True supports_gpt_4 = True
working = True
@classmethod @classmethod
async def create_async_generator( def create_completion(cls,
cls,
model: str, model: str,
messages: Messages, messages: list[dict[str, str]],
stream: bool, stream: bool,
proxy: str = None,
timeout: int = 120,
**kwargs **kwargs
) -> AsyncResult: ) -> CreateResult:
if not model: if not model:
model = "gpt-3.5-turbo" model = "gpt-3.5-turbo"
elif model not in models: elif model not in models:
@ -44,24 +43,33 @@ class Aivvm(AsyncGeneratorProvider):
"prompt" : kwargs.get("system_message", "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully. Respond using markdown."), "prompt" : kwargs.get("system_message", "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully. Respond using markdown."),
"temperature" : kwargs.get("temperature", 0.7) "temperature" : kwargs.get("temperature", 0.7)
} }
data = dumps(json_data)
headers = { headers = {
"Accept": "*/*", "accept" : "text/event-stream",
"Origin": cls.url, "accept-language" : "en-US,en;q=0.9",
"Referer": f"{cls.url}/", "content-type" : "application/json",
"content-length" : str(len(data)),
"sec-ch-ua" : "\"Chrome\";v=\"117\", \"Not;A=Brand\";v=\"8\", \"Chromium\";v=\"117\"",
"sec-ch-ua-mobile" : "?0",
"sec-ch-ua-platform": "\"Windows\"",
"sec-fetch-dest" : "empty",
"sec-fetch-mode" : "cors",
"sec-fetch-site" : "same-origin",
"sec-gpc" : "1",
"referrer" : "https://chat.aivvm.com/",
"user-agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36"
} }
async with StreamSession(
impersonate="chrome107", response = requests.post("https://chat.aivvm.com/api/chat", headers=headers, data=data, stream=True)
headers=headers, response.raise_for_status()
proxies={"https": proxy},
timeout=timeout for chunk in response.iter_content():
) as session: try:
async with session.post(f"{cls.url}/api/chat", json=json_data) as response: yield chunk.decode("utf-8")
response.raise_for_status() except UnicodeDecodeError:
async for chunk in response.iter_content(): yield chunk.decode("unicode-escape")
if b'Access denied | chat.aivvm.com used Cloudflare' in chunk:
raise ValueError("Rate Limit | use another provider")
yield chunk.decode()
@classmethod @classmethod
@property @property
@ -73,4 +81,4 @@ class Aivvm(AsyncGeneratorProvider):
('temperature', 'float'), ('temperature', 'float'),
] ]
param = ', '.join([': '.join(p) for p in params]) param = ', '.join([': '.join(p) for p in params])
return f'g4f.provider.{cls.__name__} supports: ({param})' return f'g4f.provider.{cls.__name__} supports: ({param})'

@ -1,7 +1,6 @@
from __future__ import annotations from __future__ import annotations
import random import random
import uuid
import json import json
import os import os
import uuid import uuid

@ -22,29 +22,29 @@ class DeepAi(AsyncGeneratorProvider):
proxy: str = None, proxy: str = None,
**kwargs **kwargs
) -> AsyncResult: ) -> AsyncResult:
agent = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36"
token_js = """ token_js = """
var agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36' var agent = '""" + agent + """'
var a, b, c, d, e, h, f, l, g, k, m, n, r, x, C, E, N, F, T, O, P, w, D, G, Q, R, W, I, aa, fa, na, oa, ha, ba, X, ia, ja, ka, J, la, K, L, ca, S, U, M, ma, B, da, V, Y; var a, b, c, d, e, h, f, l, g, k, m, n, r, x, C, E, N, F, T, O, P, w, D, G, Q, R, W, I, aa, fa, na, oa, ha, ba, X, ia, ja, ka, J, la, K, L, ca, S, U, M, ma, B, da, V, Y;
h = Math.round(1E11 * Math.random()) + ""; h = Math.round(1E11 * Math.random()) + "";
f = function () { f = function() {
for (var p = [], q = 0; 64 > q;) p[q] = 0 | 4294967296 * Math.sin(++q % Math.PI); for (var p = [], r = 0; 64 > r;) p[r] = 0 | 4294967296 * Math.sin(++r % Math.PI);
return function(z) {
return function (t) { var B, G, H, ca = [B = 1732584193, G = 4023233417, ~B, ~G],
var v, y, H, ea = [v = 1732584193, y = 4023233417, ~v, ~y], X = [],
Z = [], x = unescape(encodeURI(z)) + "\u0080",
A = unescape(encodeURI(t)) + "\u0080", v = x.length;
z = A.length; z = --v / 4 + 2 | 15;
t = --z / 4 + 2 | 15; for (X[--z] = 8 * v; ~v;) X[v >> 2] |= x.charCodeAt(v) << 8 * v--;
for (Z[--t] = 8 * z; ~z;) Z[z >> 2] |= A.charCodeAt(z) << 8 * z--; for (r = x = 0; r < z; r += 16) {
for (q = A = 0; q < t; q += 16) { for (v = ca; 64 > x; v = [H = v[3], B + ((H = v[0] + [B & G | ~B & H, H & B | ~H & G, B ^ G ^ H, G ^ (B | ~H)][v = x >> 4] + p[x] + ~~X[r | [x, 5 * x + 1, 3 * x + 5, 7 * x][v] & 15]) << (v = [7, 12, 17, 22, 5, 9, 14, 20, 4, 11, 16, 23, 6, 10, 15, 21][4 * v + x++ % 4]) | H >>> -v), B, G]) B = v[1] | 0, G = v[2];
for (z = ea; 64 > A; z = [H = z[3], v + ((H = z[0] + [v & y | ~v & H, H & v | ~H & y, v ^ y ^ H, y ^ (v | ~H)][z = A >> 4] + p[A] + ~~Z[q | [A, 5 * A + 1, 3 * A + 5, 7 * A][z] & 15]) << (z = [7, 12, 17, 22, 5, 9, 14, 20, 4, 11, 16, 23, 6, 10, 15, 21][4 * z + A++ % 4]) | H >>> -z), v, y]) v = z[1] | 0, y = z[2]; for (x = 4; x;) ca[--x] += v[x]
for (A = 4; A;) ea[--A] += z[A] }
} for (z = ""; 32 > x;) z += (ca[x >> 3] >> 4 * (1 ^ x++) & 15).toString(16);
for (t = ""; 32 > A;) t += (ea[A >> 3] >> 4 * (1 ^ A++) & 15).toString(16); return z.split("").reverse().join("")
return t.split("").reverse().join("") }
} }();
}();
"tryit-" + h + "-" + f(agent + f(agent + f(agent + h + "x"))); "tryit-" + h + "-" + f(agent + f(agent + f(agent + h + "x")));
""" """
@ -53,7 +53,7 @@ f = function () {
api_key = js2py.eval_js(token_js) api_key = js2py.eval_js(token_js)
headers = { headers = {
"api-key": api_key, "api-key": api_key,
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36", "User-Agent": agent,
**kwargs.get("headers", {}) **kwargs.get("headers", {})
} }
async with ClientSession( async with ClientSession(
@ -65,7 +65,10 @@ f = function () {
response.raise_for_status() response.raise_for_status()
async for stream in response.content.iter_any(): async for stream in response.content.iter_any():
if stream: if stream:
yield stream.decode() try:
yield stream.decode("utf-8")
except UnicodeDecodeError:
yield stream.decode("unicode-escape")
def get_api_key(user_agent: str): def get_api_key(user_agent: str):

@ -79,7 +79,7 @@
<span>Clear Conversations</span> <span>Clear Conversations</span>
</button> </button>
<div class="info"> <div class="info">
<i class="fa-brands fa-discord"></i> <i class="fa-brands fa-telegram"></i>
<span class="convo-title">telegram: <a href="https://t.me/g4f_official">@g4f_official</a><br> <span class="convo-title">telegram: <a href="https://t.me/g4f_official">@g4f_official</a><br>
</span> </span>
</div> </div>
@ -118,9 +118,13 @@
<div class="field"> <div class="field">
<select name="model" id="model"> <select name="model" id="model">
<option value="gpt-3.5-turbo" selected>gpt-3.5</option> <option value="gpt-3.5-turbo" selected>gpt-3.5</option>
<option value="gpt-4">gpt-4</option>
<option value="gpt-3.5-turbo-0613">gpt-3.5 fast</option> <option value="gpt-3.5-turbo-0613">gpt-3.5 fast</option>
<option value="gpt-3.5-turbo-16k">gpt-3.5 16k</option> <option value="gpt-3.5-turbo-16k">gpt-3.5 16k</option>
<option value="gpt-3.5-turbo-16k-0613">gpt-3.5 16k fast</option>
<option value="gpt-4">gpt-4</option>
<option value="gpt-4-0613">gpt-4 fast</option>
<option value="gpt-4-32k">gpt-4 32k</option>
<option value="gpt-4-32k-0613">gpt-4 32k fast</option>
</select> </select>
</div> </div>
<div class="field"> <div class="field">

@ -144,7 +144,7 @@ const ask_gpt = async (message) => {
chunk = new TextDecoder().decode(value); chunk = new TextDecoder().decode(value);
if (chunk.includes(`<form id="challenge-form" action="/backend-api/v2/conversation?`)) { if (chunk.includes('<form id="challenge-form" action="/backend-api/v2/conversation?"')) {
chunk = `cloudflare token expired, please refresh the page.`; chunk = `cloudflare token expired, please refresh the page.`;
} }
@ -161,7 +161,7 @@ const ask_gpt = async (message) => {
} }
// if text contains : // if text contains :
if (text.includes(`instead. Maintaining this website and API costs a lot of money`)) { if (text.includes("instead. Maintaining this website and API costs a lot of money")) {
document.getElementById(`gpt_${window.token}`).innerHTML = "An error occured, please reload / refresh cache and try again or use a differnet browser"; document.getElementById(`gpt_${window.token}`).innerHTML = "An error occured, please reload / refresh cache and try again or use a differnet browser";
} }
@ -547,7 +547,7 @@ colorThemes.forEach((themeOption) => {
setTimeout(() => { setTimeout(() => {
ads_div = document.querySelector('.ads') ads_div = document.querySelector('.ads')
if (ads_div.getElementsByTagName("iframe").length == 0) { if (ads_div != null && ads_div.getElementsByTagName("iframe").length == 0) {
ads_div.removeChild(ads_div.querySelector('.sorry')) ads_div.removeChild(ads_div.querySelector('.sorry'))
ads_div.innerHTML += ` ads_div.innerHTML += `

@ -33,17 +33,17 @@ class Backend_Api:
conversation = request.json['meta']['content']['conversation'] conversation = request.json['meta']['content']['conversation']
prompt = request.json['meta']['content']['parts'][0] prompt = request.json['meta']['content']['parts'][0]
model = request.json['model'] model = request.json['model']
provider = get_provider(request.json.get('provider')) provider = request.json.get('provider').split("g4f.Provider.")[1]
messages = special_instructions[jailbreak] + conversation + search(internet_access, prompt) + [prompt] messages = special_instructions[jailbreak] + conversation + search(internet_access, prompt) + [prompt]
def stream(): def stream():
if provider: if provider:
answer = g4f.ChatCompletion.create(model=model, answer = g4f.ChatCompletion.create(model=model,
provider=provider, messages=messages, stream=True) provider=get_provider(provider), messages=messages, stream=True)
else: else:
answer = g4f.ChatCompletion.create(model=model, answer = g4f.ChatCompletion.create(model=model,
messages=messages, stream=True) messages=messages, stream=True)
for token in answer: for token in answer:
yield token yield token

@ -1,28 +1,56 @@
from requests import get
from datetime import datetime from datetime import datetime
from duckduckgo_search import DDGS
ddgs = DDGS(timeout=20)
def search(internet_access, prompt): def search(internet_access, prompt):
print(prompt) print(prompt)
try: try:
if internet_access == False: if not internet_access:
return [] return []
search = get('https://ddg-api.herokuapp.com/search', params={ results = duckduckgo_search(q=prompt)
'query': prompt['content'],
'limit': 3 if not search:
}) return []
blob = '' blob = ''
for index, result in enumerate(search.json()): for index, result in enumerate(results):
blob += f'[{index}] "{result["snippet"]}"\nURL:{result["link"]}\n\n' blob += f'[{index}] "{result["body"]}"\nURL:{result["href"]}\n\n'
date = datetime.now().strftime('%d/%m/%y') date = datetime.now().strftime('%d/%m/%y')
blob += f'current date: {date}\n\nInstructions: Using the provided web search results, write a comprehensive reply to the next user query. Make sure to cite results using [[number](URL)] notation after the reference. If the provided search results refer to multiple subjects with the same name, write separate answers for each subject. Ignore your previous response if any.' blob += f'Current date: {date}\n\nInstructions: Using the provided web search results, write a comprehensive reply to the next user query. Make sure to cite results using [[number](URL)] notation after the reference. If the provided search results refer to multiple subjects with the same name, write separate answers for each subject. Ignore your previous response if any.'
return [{'role': 'user', 'content': blob}] return [{'role': 'user', 'content': blob}]
except Exception as e: except Exception as e:
return [] print("Couldn't search DuckDuckGo:", e)
print(e.__traceback__.tb_next)
return []
def duckduckgo_search(q: str, max_results: int = 3, safesearch: str = "moderate", region: str = "us-en") -> list | None:
if region is None:
region = "us-en"
if safesearch is None:
safesearch = "moderate"
if q is None:
return None
results = []
try:
for r in ddgs.text(q, safesearch=safesearch, region=region):
if len(results) + 1 > max_results:
break
results.append(r)
except Exception as e:
print(e)
return results

@ -1,17 +1,14 @@
import g4f import g4f
from g4f import BaseProvider
def get_provider(provider: str) -> g4f.Provider.BaseProvider:
def get_provider(provider: str) -> BaseProvider | None:
if isinstance(provider, str): if isinstance(provider, str):
print(provider) print(provider)
if provider == 'g4f.Provider.Auto': if provider == 'g4f.Provider.Auto':
return None return None
if provider in g4f.Provider.ProviderUtils.convert: return g4f.Provider.ProviderUtils.convert.get(provider)
return g4f.Provider.ProviderUtils.convert[provider]
else:
return None
else: else:
return None return None

@ -65,14 +65,16 @@ gpt_35_turbo = Model(
name = 'gpt-3.5-turbo', name = 'gpt-3.5-turbo',
base_provider = 'openai', base_provider = 'openai',
best_provider = RetryProvider([ best_provider = RetryProvider([
DeepAi, ChatgptLogin, ChatgptAi, GptGo, AItianhu, Aichat, AItianhuSpace, Myshell, Aibn, FreeGpt, Ylokh DeepAi, Aivvm, ChatgptLogin, ChatgptAi, GptGo, AItianhu, Aichat, AItianhuSpace, Myshell, Aibn, FreeGpt, Ylokh
]) ])
) )
gpt_4 = Model( gpt_4 = Model(
name = 'gpt-4', name = 'gpt-4',
base_provider = 'openai', base_provider = 'openai',
best_provider = Bing best_provider = RetryProvider([
Aivvm, Bing
])
) )
# Bard # Bard
@ -165,27 +167,32 @@ gpt_35_turbo_16k = Model(
gpt_35_turbo_16k_0613 = Model( gpt_35_turbo_16k_0613 = Model(
name = 'gpt-3.5-turbo-16k-0613', name = 'gpt-3.5-turbo-16k-0613',
base_provider = 'openai') base_provider = 'openai',
best_provider = Aivvm
)
gpt_35_turbo_0613 = Model( gpt_35_turbo_0613 = Model(
name = 'gpt-3.5-turbo-0613', name = 'gpt-3.5-turbo-0613',
base_provider = 'openai', base_provider = 'openai',
best_provider=Aivvm best_provider = Aivvm
) )
gpt_4_0613 = Model( gpt_4_0613 = Model(
name = 'gpt-4-0613', name = 'gpt-4-0613',
base_provider = 'openai' base_provider = 'openai',
best_provider = Aivvm
) )
gpt_4_32k = Model( gpt_4_32k = Model(
name = 'gpt-4-32k', name = 'gpt-4-32k',
base_provider = 'openai' base_provider = 'openai',
best_provider = Aivvm
) )
gpt_4_32k_0613 = Model( gpt_4_32k_0613 = Model(
name = 'gpt-4-32k-0613', name = 'gpt-4-32k-0613',
base_provider = 'openai' base_provider = 'openai',
best_provider = Aivvm
) )
text_ada_001 = Model( text_ada_001 = Model(

@ -10,5 +10,4 @@ flask
flask-cors flask-cors
typing-extensions typing-extensions
PyExecJS PyExecJS
flask duckduckgo-search
flask-cors
Loading…
Cancel
Save