mirror of
https://github.com/xtekky/gpt4free.git
synced 2024-11-17 09:25:50 +00:00
Unify g4f tools into one CLI
This commit is contained in:
parent
4829f3bfec
commit
77697be333
49
README.md
49
README.md
@ -7,18 +7,39 @@ By using this repository or any code related to it, you agree to the [legal noti
|
||||
pip install -U g4f
|
||||
```
|
||||
|
||||
or if you just want to use the gui or interference api, install with [pipx](https://pypa.github.io/pipx/)
|
||||
|
||||
```sh
|
||||
pipx install g4f
|
||||
```
|
||||
|
||||
## New features
|
||||
- Telegram Channel: https://t.me/g4f_channel
|
||||
- g4f GUI is back !!:
|
||||
Install g4f with pip and then run:
|
||||
```py
|
||||
|
||||
```sh
|
||||
g4f gui
|
||||
```
|
||||
|
||||
or
|
||||
|
||||
```sh
|
||||
python -m g4f.gui.run
|
||||
```
|
||||
|
||||
preview:
|
||||
|
||||
<img width="1470" alt="image" src="https://github.com/xtekky/gpt4free/assets/98614666/57ad818a-a0dd-4eae-83e1-3fff848ae040">
|
||||
|
||||
- run interference from pypi package:
|
||||
- run interference api from pypi package:
|
||||
|
||||
```sh
|
||||
g4f api
|
||||
```
|
||||
|
||||
or
|
||||
|
||||
```py
|
||||
python -m g4f.interference.run
|
||||
```
|
||||
@ -33,7 +54,7 @@ python -m g4f.interference.run
|
||||
- [Usage](#usage)
|
||||
- [The `g4f` Package](#the-g4f-package)
|
||||
- [interference openai-proxy api (use with openai python package)](#interference-openai-proxy-api-use-with-openai-python-package)
|
||||
- [Providers](#models)
|
||||
- [Models](#models)
|
||||
- [gpt-3.5 / gpt-4](#gpt-35--gpt-4)
|
||||
- [Other Models](#other-models)
|
||||
- [Related gpt4free projects](#related-gpt4free-projects)
|
||||
@ -319,26 +340,26 @@ print(f"Result:", response)
|
||||
|
||||
### interference openai-proxy api (use with openai python package)
|
||||
|
||||
#### run interference from pypi package:
|
||||
#### run interference api from pypi package:
|
||||
```py
|
||||
from g4f.interference import run_interference
|
||||
from g4f.api import run_api
|
||||
|
||||
run_interference()
|
||||
run_api()
|
||||
```
|
||||
|
||||
#### run interference from repo:
|
||||
#### run interference api from repo:
|
||||
If you want to use the embedding function, you need to get a huggingface token. You can get one at https://huggingface.co/settings/tokens make sure your role is set to write. If you have your token, just use it instead of the OpenAI api-key.
|
||||
|
||||
get requirements:
|
||||
|
||||
```sh
|
||||
pip install -r etc/interference/requirements.txt
|
||||
```
|
||||
|
||||
run server:
|
||||
|
||||
```sh
|
||||
python3 -m etc/interference.app
|
||||
g4f api
|
||||
```
|
||||
|
||||
or
|
||||
|
||||
```sh
|
||||
python -m g4f.api
|
||||
```
|
||||
|
||||
```py
|
||||
|
@ -1,163 +0,0 @@
|
||||
import json
|
||||
import time
|
||||
import random
|
||||
import string
|
||||
import requests
|
||||
|
||||
from typing import Any
|
||||
from flask import Flask, request
|
||||
from flask_cors import CORS
|
||||
from transformers import AutoTokenizer
|
||||
from g4f import ChatCompletion
|
||||
|
||||
app = Flask(__name__)
|
||||
CORS(app)
|
||||
|
||||
@app.route('/chat/completions', methods=['POST'])
|
||||
def chat_completions():
|
||||
model = request.get_json().get('model', 'gpt-3.5-turbo')
|
||||
stream = request.get_json().get('stream', False)
|
||||
messages = request.get_json().get('messages')
|
||||
|
||||
response = ChatCompletion.create(model = model,
|
||||
stream = stream, messages = messages)
|
||||
|
||||
completion_id = ''.join(random.choices(string.ascii_letters + string.digits, k=28))
|
||||
completion_timestamp = int(time.time())
|
||||
|
||||
if not stream:
|
||||
return {
|
||||
'id': f'chatcmpl-{completion_id}',
|
||||
'object': 'chat.completion',
|
||||
'created': completion_timestamp,
|
||||
'model': model,
|
||||
'choices': [
|
||||
{
|
||||
'index': 0,
|
||||
'message': {
|
||||
'role': 'assistant',
|
||||
'content': response,
|
||||
},
|
||||
'finish_reason': 'stop',
|
||||
}
|
||||
],
|
||||
'usage': {
|
||||
'prompt_tokens': None,
|
||||
'completion_tokens': None,
|
||||
'total_tokens': None,
|
||||
},
|
||||
}
|
||||
|
||||
def streaming():
|
||||
for chunk in response:
|
||||
completion_data = {
|
||||
'id': f'chatcmpl-{completion_id}',
|
||||
'object': 'chat.completion.chunk',
|
||||
'created': completion_timestamp,
|
||||
'model': model,
|
||||
'choices': [
|
||||
{
|
||||
'index': 0,
|
||||
'delta': {
|
||||
'content': chunk,
|
||||
},
|
||||
'finish_reason': None,
|
||||
}
|
||||
],
|
||||
}
|
||||
|
||||
content = json.dumps(completion_data, separators=(',', ':'))
|
||||
yield f'data: {content}\n\n'
|
||||
time.sleep(0.1)
|
||||
|
||||
end_completion_data: dict[str, Any] = {
|
||||
'id': f'chatcmpl-{completion_id}',
|
||||
'object': 'chat.completion.chunk',
|
||||
'created': completion_timestamp,
|
||||
'model': model,
|
||||
'choices': [
|
||||
{
|
||||
'index': 0,
|
||||
'delta': {},
|
||||
'finish_reason': 'stop',
|
||||
}
|
||||
],
|
||||
}
|
||||
content = json.dumps(end_completion_data, separators=(',', ':'))
|
||||
yield f'data: {content}\n\n'
|
||||
|
||||
return app.response_class(streaming(), mimetype='text/event-stream')
|
||||
|
||||
|
||||
# Get the embedding from huggingface
|
||||
def get_embedding(input_text, token):
|
||||
huggingface_token = token
|
||||
embedding_model = 'sentence-transformers/all-mpnet-base-v2'
|
||||
max_token_length = 500
|
||||
|
||||
# Load the tokenizer for the 'all-mpnet-base-v2' model
|
||||
tokenizer = AutoTokenizer.from_pretrained(embedding_model)
|
||||
# Tokenize the text and split the tokens into chunks of 500 tokens each
|
||||
tokens = tokenizer.tokenize(input_text)
|
||||
token_chunks = [tokens[i:i + max_token_length]
|
||||
for i in range(0, len(tokens), max_token_length)]
|
||||
|
||||
# Initialize an empty list
|
||||
embeddings = []
|
||||
|
||||
# Create embeddings for each chunk
|
||||
for chunk in token_chunks:
|
||||
# Convert the chunk tokens back to text
|
||||
chunk_text = tokenizer.convert_tokens_to_string(chunk)
|
||||
|
||||
# Use the Hugging Face API to get embeddings for the chunk
|
||||
api_url = f'https://api-inference.huggingface.co/pipeline/feature-extraction/{embedding_model}'
|
||||
headers = {'Authorization': f'Bearer {huggingface_token}'}
|
||||
chunk_text = chunk_text.replace('\n', ' ')
|
||||
|
||||
# Make a POST request to get the chunk's embedding
|
||||
response = requests.post(api_url, headers=headers, json={
|
||||
'inputs': chunk_text, 'options': {'wait_for_model': True}})
|
||||
|
||||
# Parse the response and extract the embedding
|
||||
chunk_embedding = response.json()
|
||||
# Append the embedding to the list
|
||||
embeddings.append(chunk_embedding)
|
||||
|
||||
# averaging all the embeddings
|
||||
# this isn't very effective
|
||||
# someone a better idea?
|
||||
num_embeddings = len(embeddings)
|
||||
average_embedding = [sum(x) / num_embeddings for x in zip(*embeddings)]
|
||||
embedding = average_embedding
|
||||
return embedding
|
||||
|
||||
|
||||
@app.route('/embeddings', methods=['POST'])
|
||||
def embeddings():
|
||||
input_text_list = request.get_json().get('input')
|
||||
input_text = ' '.join(map(str, input_text_list))
|
||||
token = request.headers.get('Authorization').replace('Bearer ', '')
|
||||
embedding = get_embedding(input_text, token)
|
||||
|
||||
return {
|
||||
'data': [
|
||||
{
|
||||
'embedding': embedding,
|
||||
'index': 0,
|
||||
'object': 'embedding'
|
||||
}
|
||||
],
|
||||
'model': 'text-embedding-ada-002',
|
||||
'object': 'list',
|
||||
'usage': {
|
||||
'prompt_tokens': None,
|
||||
'total_tokens': None
|
||||
}
|
||||
}
|
||||
|
||||
def main():
|
||||
app.run(host='0.0.0.0', port=1337, debug=True)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,5 +0,0 @@
|
||||
flask_cors
|
||||
watchdog~=3.0.0
|
||||
transformers
|
||||
tensorflow
|
||||
torch
|
162
g4f/api/__init__.py
Normal file
162
g4f/api/__init__.py
Normal file
@ -0,0 +1,162 @@
|
||||
import json
|
||||
import random
|
||||
import string
|
||||
import time
|
||||
|
||||
import requests
|
||||
from flask import Flask, request
|
||||
from flask_cors import CORS
|
||||
from transformers import AutoTokenizer
|
||||
|
||||
from g4f import ChatCompletion
|
||||
|
||||
app = Flask(__name__)
|
||||
CORS(app)
|
||||
|
||||
|
||||
@app.route("/")
|
||||
def index():
|
||||
return "interference api, url: http://127.0.0.1:1337"
|
||||
|
||||
|
||||
@app.route("/chat/completions", methods=["POST"])
|
||||
def chat_completions():
|
||||
model = request.get_json().get("model", "gpt-3.5-turbo")
|
||||
stream = request.get_json().get("stream", False)
|
||||
messages = request.get_json().get("messages")
|
||||
|
||||
response = ChatCompletion.create(model=model, stream=stream, messages=messages)
|
||||
|
||||
completion_id = "".join(random.choices(string.ascii_letters + string.digits, k=28))
|
||||
completion_timestamp = int(time.time())
|
||||
|
||||
if not stream:
|
||||
return {
|
||||
"id": f"chatcmpl-{completion_id}",
|
||||
"object": "chat.completion",
|
||||
"created": completion_timestamp,
|
||||
"model": model,
|
||||
"choices": [
|
||||
{
|
||||
"index": 0,
|
||||
"message": {
|
||||
"role": "assistant",
|
||||
"content": response,
|
||||
},
|
||||
"finish_reason": "stop",
|
||||
}
|
||||
],
|
||||
"usage": {
|
||||
"prompt_tokens": None,
|
||||
"completion_tokens": None,
|
||||
"total_tokens": None,
|
||||
},
|
||||
}
|
||||
|
||||
def streaming():
|
||||
for chunk in response:
|
||||
completion_data = {
|
||||
"id": f"chatcmpl-{completion_id}",
|
||||
"object": "chat.completion.chunk",
|
||||
"created": completion_timestamp,
|
||||
"model": model,
|
||||
"choices": [
|
||||
{
|
||||
"index": 0,
|
||||
"delta": {
|
||||
"content": chunk,
|
||||
},
|
||||
"finish_reason": None,
|
||||
}
|
||||
],
|
||||
}
|
||||
|
||||
content = json.dumps(completion_data, separators=(",", ":"))
|
||||
yield f"data: {content}\n\n"
|
||||
time.sleep(0.1)
|
||||
|
||||
end_completion_data = {
|
||||
"id": f"chatcmpl-{completion_id}",
|
||||
"object": "chat.completion.chunk",
|
||||
"created": completion_timestamp,
|
||||
"model": model,
|
||||
"choices": [
|
||||
{
|
||||
"index": 0,
|
||||
"delta": {},
|
||||
"finish_reason": "stop",
|
||||
}
|
||||
],
|
||||
}
|
||||
content = json.dumps(end_completion_data, separators=(",", ":"))
|
||||
yield f"data: {content}\n\n"
|
||||
|
||||
return app.response_class(streaming(), mimetype="text/event-stream")
|
||||
|
||||
|
||||
# Get the embedding from huggingface
|
||||
def get_embedding(input_text, token):
|
||||
huggingface_token = token
|
||||
embedding_model = "sentence-transformers/all-mpnet-base-v2"
|
||||
max_token_length = 500
|
||||
|
||||
# Load the tokenizer for the 'all-mpnet-base-v2' model
|
||||
tokenizer = AutoTokenizer.from_pretrained(embedding_model)
|
||||
# Tokenize the text and split the tokens into chunks of 500 tokens each
|
||||
tokens = tokenizer.tokenize(input_text)
|
||||
token_chunks = [
|
||||
tokens[i : i + max_token_length]
|
||||
for i in range(0, len(tokens), max_token_length)
|
||||
]
|
||||
|
||||
# Initialize an empty list
|
||||
embeddings = []
|
||||
|
||||
# Create embeddings for each chunk
|
||||
for chunk in token_chunks:
|
||||
# Convert the chunk tokens back to text
|
||||
chunk_text = tokenizer.convert_tokens_to_string(chunk)
|
||||
|
||||
# Use the Hugging Face API to get embeddings for the chunk
|
||||
api_url = f"https://api-inference.huggingface.co/pipeline/feature-extraction/{embedding_model}"
|
||||
headers = {"Authorization": f"Bearer {huggingface_token}"}
|
||||
chunk_text = chunk_text.replace("\n", " ")
|
||||
|
||||
# Make a POST request to get the chunk's embedding
|
||||
response = requests.post(
|
||||
api_url,
|
||||
headers=headers,
|
||||
json={"inputs": chunk_text, "options": {"wait_for_model": True}},
|
||||
)
|
||||
|
||||
# Parse the response and extract the embedding
|
||||
chunk_embedding = response.json()
|
||||
# Append the embedding to the list
|
||||
embeddings.append(chunk_embedding)
|
||||
|
||||
# averaging all the embeddings
|
||||
# this isn't very effective
|
||||
# someone a better idea?
|
||||
num_embeddings = len(embeddings)
|
||||
average_embedding = [sum(x) / num_embeddings for x in zip(*embeddings)]
|
||||
embedding = average_embedding
|
||||
return embedding
|
||||
|
||||
|
||||
@app.route("/embeddings", methods=["POST"])
|
||||
def embeddings():
|
||||
input_text_list = request.get_json().get("input")
|
||||
input_text = " ".join(map(str, input_text_list))
|
||||
token = request.headers.get("Authorization").replace("Bearer ", "")
|
||||
embedding = get_embedding(input_text, token)
|
||||
|
||||
return {
|
||||
"data": [{"embedding": embedding, "index": 0, "object": "embedding"}],
|
||||
"model": "text-embedding-ada-002",
|
||||
"object": "list",
|
||||
"usage": {"prompt_tokens": None, "total_tokens": None},
|
||||
}
|
||||
|
||||
|
||||
def run_api():
|
||||
app.run(host="0.0.0.0", port=1337)
|
4
g4f/api/run.py
Normal file
4
g4f/api/run.py
Normal file
@ -0,0 +1,4 @@
|
||||
from g4f.api import run_api
|
||||
|
||||
if __name__ == "__main__":
|
||||
run_api()
|
28
g4f/cli.py
Normal file
28
g4f/cli.py
Normal file
@ -0,0 +1,28 @@
|
||||
import argparse
|
||||
|
||||
from g4f.api import run_api
|
||||
from g4f.gui.run import gui_parser, run_gui_args
|
||||
|
||||
|
||||
def run_gui(args):
|
||||
print("Running GUI...")
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Run gpt4free")
|
||||
subparsers = parser.add_subparsers(dest="mode", help="Mode to run the g4f in.")
|
||||
subparsers.add_parser("api")
|
||||
subparsers.add_parser("gui", parents=[gui_parser()], add_help=False)
|
||||
|
||||
args = parser.parse_args()
|
||||
if args.mode == "api":
|
||||
run_api()
|
||||
elif args.mode == "gui":
|
||||
run_gui_args(args)
|
||||
else:
|
||||
parser.print_help()
|
||||
exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -27,4 +27,4 @@ def run_gui(host: str = '0.0.0.0', port: int = 80, debug: bool = False) -> None:
|
||||
|
||||
print(f"Running on port {config['port']}")
|
||||
app.run(**config)
|
||||
print(f"Closing port {config['port']}")
|
||||
print(f"Closing port {config['port']}")
|
||||
|
@ -1,18 +1,24 @@
|
||||
from g4f.gui import run_gui
|
||||
from argparse import ArgumentParser
|
||||
|
||||
from g4f.gui import run_gui
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
parser = ArgumentParser(description='Run the GUI')
|
||||
|
||||
parser.add_argument('-host', type=str, default='0.0.0.0', help='hostname')
|
||||
parser.add_argument('-port', type=int, default=80, help='port')
|
||||
parser.add_argument('-debug', action='store_true', help='debug mode')
|
||||
|
||||
args = parser.parse_args()
|
||||
port = args.port
|
||||
def gui_parser():
|
||||
parser = ArgumentParser(description="Run the GUI")
|
||||
parser.add_argument("-host", type=str, default="0.0.0.0", help="hostname")
|
||||
parser.add_argument("-port", type=int, default=80, help="port")
|
||||
parser.add_argument("-debug", action="store_true", help="debug mode")
|
||||
return parser
|
||||
|
||||
|
||||
def run_gui_args(args):
|
||||
host = args.host
|
||||
port = args.port
|
||||
debug = args.debug
|
||||
|
||||
run_gui(host, port, debug)
|
||||
run_gui(host, port, debug)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = gui_parser()
|
||||
args = parser.parse_args()
|
||||
run_gui_args(args)
|
||||
|
@ -1,94 +0,0 @@
|
||||
import json
|
||||
import time
|
||||
import random
|
||||
import string
|
||||
|
||||
from typing import Any
|
||||
from flask import Flask, request
|
||||
from flask_cors import CORS
|
||||
from g4f import ChatCompletion
|
||||
|
||||
app = Flask(__name__)
|
||||
CORS(app)
|
||||
|
||||
@app.route('/')
|
||||
def index():
|
||||
return 'interference api, url: http://127.0.0.1:1337'
|
||||
|
||||
@app.route('/chat/completions', methods=['POST'])
|
||||
def chat_completions():
|
||||
model = request.get_json().get('model', 'gpt-3.5-turbo')
|
||||
stream = request.get_json().get('stream', False)
|
||||
messages = request.get_json().get('messages')
|
||||
|
||||
response = ChatCompletion.create(model = model,
|
||||
stream = stream, messages = messages)
|
||||
|
||||
completion_id = ''.join(random.choices(string.ascii_letters + string.digits, k=28))
|
||||
completion_timestamp = int(time.time())
|
||||
|
||||
if not stream:
|
||||
return {
|
||||
'id': f'chatcmpl-{completion_id}',
|
||||
'object': 'chat.completion',
|
||||
'created': completion_timestamp,
|
||||
'model': model,
|
||||
'choices': [
|
||||
{
|
||||
'index': 0,
|
||||
'message': {
|
||||
'role': 'assistant',
|
||||
'content': response,
|
||||
},
|
||||
'finish_reason': 'stop',
|
||||
}
|
||||
],
|
||||
'usage': {
|
||||
'prompt_tokens': None,
|
||||
'completion_tokens': None,
|
||||
'total_tokens': None,
|
||||
},
|
||||
}
|
||||
|
||||
def streaming():
|
||||
for chunk in response:
|
||||
completion_data = {
|
||||
'id': f'chatcmpl-{completion_id}',
|
||||
'object': 'chat.completion.chunk',
|
||||
'created': completion_timestamp,
|
||||
'model': model,
|
||||
'choices': [
|
||||
{
|
||||
'index': 0,
|
||||
'delta': {
|
||||
'content': chunk,
|
||||
},
|
||||
'finish_reason': None,
|
||||
}
|
||||
],
|
||||
}
|
||||
|
||||
content = json.dumps(completion_data, separators=(',', ':'))
|
||||
yield f'data: {content}\n\n'
|
||||
time.sleep(0.1)
|
||||
|
||||
end_completion_data: dict[str, Any] = {
|
||||
'id': f'chatcmpl-{completion_id}',
|
||||
'object': 'chat.completion.chunk',
|
||||
'created': completion_timestamp,
|
||||
'model': model,
|
||||
'choices': [
|
||||
{
|
||||
'index': 0,
|
||||
'delta': {},
|
||||
'finish_reason': 'stop',
|
||||
}
|
||||
],
|
||||
}
|
||||
content = json.dumps(end_completion_data, separators=(',', ':'))
|
||||
yield f'data: {content}\n\n'
|
||||
|
||||
return app.response_class(streaming(), mimetype='text/event-stream')
|
||||
|
||||
def run_interference():
|
||||
app.run(host='0.0.0.0', port=1337, debug=True)
|
@ -1,4 +0,0 @@
|
||||
from g4f.interference import run_interference
|
||||
|
||||
if __name__ == '__main__':
|
||||
run_interference()
|
@ -10,4 +10,6 @@ flask
|
||||
flask-cors
|
||||
typing-extensions
|
||||
PyExecJS
|
||||
duckduckgo-search
|
||||
duckduckgo-search
|
||||
transformers
|
||||
tensorflow
|
||||
|
15
setup.py
15
setup.py
@ -11,10 +11,7 @@ with codecs.open(os.path.join(here, "README.md"), encoding="utf-8") as fh:
|
||||
with open("requirements.txt") as f:
|
||||
required = f.read().splitlines()
|
||||
|
||||
with open("etc/interference/requirements.txt") as f:
|
||||
api_required = f.read().splitlines()
|
||||
|
||||
VERSION = '0.1.6.1'
|
||||
VERSION = "0.1.6.1"
|
||||
DESCRIPTION = (
|
||||
"The official gpt4free repository | various collection of powerful language models"
|
||||
)
|
||||
@ -29,13 +26,13 @@ setup(
|
||||
long_description_content_type="text/markdown",
|
||||
long_description=long_description,
|
||||
packages=find_packages(),
|
||||
package_data={"g4f": ["g4f/gui/client/*", "g4f/gui/server/*"]},
|
||||
package_data={
|
||||
"g4f": ["g4f/interference/*", "g4f/gui/client/*", "g4f/gui/server/*"]
|
||||
},
|
||||
include_package_data=True,
|
||||
data_files=["etc/interference/app.py"],
|
||||
install_requires=required,
|
||||
extras_require={"api": api_required},
|
||||
entry_points={
|
||||
"console_scripts": ["g4f=interference.app:main"],
|
||||
"console_scripts": ["g4f=g4f.cli:main"],
|
||||
},
|
||||
url="https://github.com/xtekky/gpt4free", # Link to your GitHub repository
|
||||
project_urls={
|
||||
@ -75,4 +72,4 @@ setup(
|
||||
"Operating System :: MacOS :: MacOS X",
|
||||
"Operating System :: Microsoft :: Windows",
|
||||
],
|
||||
)
|
||||
)
|
||||
|
Loading…
Reference in New Issue
Block a user