another merge

pull/816/head
Bagus Indrayana 1 year ago
commit 6f999d5151

@ -141,6 +141,11 @@ response = g4f.ChatCompletion.create(..., provider=ProviderName)
### interference openai-proxy api (use with openai python package)
get requirements:
```sh
pip install -r interference/requirements.txt
```
run server:
```sh
python3 -m interference.app
@ -284,6 +289,13 @@ if __name__ == "__main__":
<td><a href="https://github.com/MIDORIBIN/langchain-gpt4free/issues"><img alt="Issues" src="https://img.shields.io/github/issues/MIDORIBIN/langchain-gpt4free?style=flat-square&labelColor=343b41"/></a></td>
<td><a href="https://github.com/MIDORIBIN/langchain-gpt4free/pulls"><img alt="Pull Requests" src="https://img.shields.io/github/issues-pr/MIDORIBIN/langchain-gpt4free?style=flat-square&labelColor=343b41"/></a></td>
</tr>
<tr>
<td><a href="https://github.com/HexyeDEV/Telegram-Chatbot-Gpt4Free"><b>ChatGpt Telegram Bot</b></a></td>
<td><a href="https://github.com/HexyeDEV/Telegram-Chatbot-Gpt4Free/stargazers"><img alt="Stars" src="https://img.shields.io/github/stars/HexyeDEV/Telegram-Chatbot-Gpt4Free?style=flat-square&labelColor=343b41"/></a></td>
<td><a href="https://github.com/HexyeDEV/Telegram-Chatbot-Gpt4Free/network/members"><img alt="Forks" src="https://img.shields.io/github/forks/HexyeDEV/Telegram-Chatbot-Gpt4Free?style=flat-square&labelColor=343b41"/></a></td>
<td><a href="https://github.com/HexyeDEV/Telegram-Chatbot-Gpt4Free/issues"><img alt="Issues" src="https://img.shields.io/github/issues/HexyeDEV/Telegram-Chatbot-Gpt4Free?style=flat-square&labelColor=343b41"/></a></td>
<td><a href="https://github.com/HexyeDEV/Telegram-Chatbot-Gpt4Free/pulls"><img alt="Pull Requests" src="https://img.shields.io/github/issues-pr/HexyeDEV/Telegram-Chatbot-Gpt4Free?style=flat-square&labelColor=343b41"/></a></td>
</tr>
</tbody>
</table>
@ -372,4 +384,4 @@ along with this program. If not, see <https://www.gnu.org/licenses/>.
<a href="https://github.com/xtekky/gpt4free/stargazers">
<img width="500" alt="Star History Chart" src="https://api.star-history.com/svg?repos=xtekky/gpt4free&type=Date">
</a>
</a>

@ -0,0 +1,74 @@
import requests, json
from abc import ABC, abstractmethod
from ..typing import Any, CreateResult
class Equing(ABC):
url: str = 'https://next.eqing.tech/'
working = True
needs_auth = False
supports_stream = True
supports_gpt_35_turbo = True
supports_gpt_4 = False
@staticmethod
@abstractmethod
def create_completion(
model: str,
messages: list[dict[str, str]],
stream: bool,
**kwargs: Any) -> CreateResult:
headers = {
'authority': 'next.eqing.tech',
'accept': 'text/event-stream',
'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
'cache-control': 'no-cache',
'content-type': 'application/json',
'origin': 'https://next.eqing.tech',
'plugins': '0',
'pragma': 'no-cache',
'referer': 'https://next.eqing.tech/',
'sec-ch-ua': '"Not/A)Brand";v="99", "Google Chrome";v="115", "Chromium";v="115"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"macOS"',
'sec-fetch-dest': 'empty',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'same-origin',
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36',
'usesearch': 'false',
'x-requested-with': 'XMLHttpRequest',
}
json_data = {
'messages': messages,
'stream': stream,
'model': model,
'temperature': kwargs.get('temperature', 0.5),
'presence_penalty': kwargs.get('presence_penalty', 0),
'frequency_penalty': kwargs.get('frequency_penalty', 0),
'top_p': kwargs.get('top_p', 1),
}
response = requests.post('https://next.eqing.tech/api/openai/v1/chat/completions',
headers=headers, json=json_data, stream=stream)
for line in response.iter_content(chunk_size=1024):
if line:
if b'content' in line:
line_json = json.loads(line.decode('utf-8').split('data: ')[1])
token = line_json['choices'][0]['delta'].get('content')
if token:
yield token
@classmethod
@property
def params(cls):
params = [
("model", "str"),
("messages", "list[dict[str, str]]"),
("stream", "bool"),
]
param = ", ".join([": ".join(p) for p in params])
return f"g4f.provider.{cls.__name__} supports: ({param})"

@ -0,0 +1,83 @@
import requests, json, random
from abc import ABC, abstractmethod
from ..typing import Any, CreateResult
class FastGpt(ABC):
url: str = 'https://chat9.fastgpt.me/'
working = True
needs_auth = False
supports_stream = True
supports_gpt_35_turbo = True
supports_gpt_4 = False
@staticmethod
@abstractmethod
def create_completion(
model: str,
messages: list[dict[str, str]],
stream: bool,
**kwargs: Any) -> CreateResult:
headers = {
'authority': 'chat9.fastgpt.me',
'accept': 'text/event-stream',
'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
'cache-control': 'no-cache',
'content-type': 'application/json',
# 'cookie': 'cf_clearance=idIAwtoSCn0uCzcWLGuD.KtiAJv9a1GsPduEOqIkyHU-1692278595-0-1-cb11fd7a.ab1546d4.ccf35fd7-0.2.1692278595; Hm_lvt_563fb31e93813a8a7094966df6671d3f=1691966491,1692278597; Hm_lpvt_563fb31e93813a8a7094966df6671d3f=1692278597',
'origin': 'https://chat9.fastgpt.me',
'plugins': '0',
'pragma': 'no-cache',
'referer': 'https://chat9.fastgpt.me/',
'sec-ch-ua': '"Not/A)Brand";v="99", "Google Chrome";v="115", "Chromium";v="115"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"macOS"',
'sec-fetch-dest': 'empty',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'same-origin',
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36',
'usesearch': 'false',
'x-requested-with': 'XMLHttpRequest',
}
json_data = {
'messages': messages,
'stream': stream,
'model': model,
'temperature': kwargs.get('temperature', 0.5),
'presence_penalty': kwargs.get('presence_penalty', 0),
'frequency_penalty': kwargs.get('frequency_penalty', 0),
'top_p': kwargs.get('top_p', 1),
}
subdomain = random.choice([
'jdaen979ew',
'chat9'
])
response = requests.post(f'https://{subdomain}.fastgpt.me/api/openai/v1/chat/completions',
headers=headers, json=json_data, stream=stream)
for line in response.iter_lines():
if line:
try:
if b'content' in line:
line_json = json.loads(line.decode('utf-8').split('data: ')[1])
token = line_json['choices'][0]['delta'].get('content')
if token:
yield token
except:
continue
@classmethod
@property
def params(cls):
params = [
("model", "str"),
("messages", "list[dict[str, str]]"),
("stream", "bool"),
]
param = ", ".join([": ".join(p) for p in params])
return f"g4f.provider.{cls.__name__} supports: ({param})"

@ -23,6 +23,8 @@ from .Vercel import Vercel
from .Wewordle import Wewordle
from .You import You
from .Yqcloud import Yqcloud
from .Equing import Equing
from .FastGpt import FastGpt
from .V50 import V50
__all__ = [
@ -51,5 +53,7 @@ __all__ = [
"Wewordle",
"You",
"Yqcloud",
"V50",
"Equing",
"FastGpt",
"V50"
]

@ -30,4 +30,4 @@ class BaseProvider(ABC):
("stream", "bool"),
]
param = ", ".join([": ".join(p) for p in params])
return f"g4f.provider.{cls.__name__} supports: ({param})"
return f"g4f.provider.{cls.__name__} supports: ({param})"

@ -1,6 +1,6 @@
from dataclasses import dataclass
from .Provider import Bard, BaseProvider, GetGpt, H2o, Liaobots, Vercel
from .Provider import Bard, BaseProvider, GetGpt, H2o, Liaobots, Vercel, Equing
@dataclass
@ -131,7 +131,7 @@ gpt_35_turbo_16k = Model(
gpt_35_turbo_16k_0613 = Model(
name="openai:gpt-3.5-turbo-16k-0613",
base_provider="openai",
best_provider=Vercel,
best_provider=Equing,
)
gpt_4_0613 = Model(

@ -12,7 +12,6 @@ from g4f import ChatCompletion
app = Flask(__name__)
CORS(app)
@app.route("/chat/completions", methods=["POST"])
def chat_completions():
model = request.get_json().get("model", "gpt-3.5-turbo")

@ -0,0 +1,2 @@
flask_cors
watchdog~=3.0.0

@ -11,7 +11,7 @@ with codecs.open(os.path.join(here, "README.md"), encoding="utf-8") as fh:
with open("requirements.txt") as f:
required = f.read().splitlines()
VERSION = '0.0.2.0'
VERSION = '0.0.2.1'
DESCRIPTION = (
"The official gpt4free repository | various collection of powerful language models"
)

Loading…
Cancel
Save