gpt4free/g4f/api/__init__.py

133 lines
5.0 KiB
Python
Raw Normal View History

import logging
2023-10-12 01:35:11 +00:00
import json
2023-11-02 01:27:35 +00:00
import uvicorn
import nest_asyncio
from fastapi import FastAPI, Response, Request
2024-02-23 01:35:13 +00:00
from fastapi.responses import StreamingResponse, RedirectResponse, HTMLResponse, JSONResponse
from pydantic import BaseModel
2024-02-23 01:51:10 +00:00
from typing import List, Union
2023-11-04 21:16:09 +00:00
import g4f
2024-02-23 01:35:13 +00:00
import g4f.debug
from g4f.client import Client
from g4f.typing import Messages
class ChatCompletionsConfig(BaseModel):
messages: Messages
model: str
provider: Union[str, None] = None
2024-02-23 01:35:13 +00:00
stream: bool = False
temperature: Union[float, None] = None
max_tokens: Union[int, None] = None
stop: Union[list[str], str, None] = None
api_key: Union[str, None] = None
2023-11-02 01:27:35 +00:00
2023-11-04 21:16:09 +00:00
class Api:
def __init__(self, engine: g4f, debug: bool = True, sentry: bool = False,
list_ignored_providers: List[str] = None) -> None:
2023-11-04 21:16:09 +00:00
self.engine = engine
self.debug = debug
self.sentry = sentry
self.list_ignored_providers = list_ignored_providers
2024-02-23 01:35:13 +00:00
if debug:
g4f.debug.logging = True
self.client = Client()
2023-11-04 21:16:09 +00:00
nest_asyncio.apply()
2024-02-23 01:35:13 +00:00
self.app = FastAPI()
2023-11-04 21:16:09 +00:00
2024-02-23 01:35:13 +00:00
self.routes()
2023-11-04 21:16:09 +00:00
2024-02-23 01:35:13 +00:00
def routes(self):
2023-11-04 21:16:09 +00:00
@self.app.get("/")
async def read_root():
2024-02-23 01:35:13 +00:00
return RedirectResponse("/v1", 302)
2023-11-04 21:16:09 +00:00
@self.app.get("/v1")
async def read_root_v1():
2024-02-23 01:35:13 +00:00
return HTMLResponse('g4f API: Go to '
'<a href="/v1/chat/completions">chat/completions</a> '
'or <a href="/v1/models">models</a>.')
2023-11-04 21:16:09 +00:00
@self.app.get("/v1/models")
async def models():
2024-02-23 01:35:13 +00:00
model_list = dict(
(model, g4f.ModelUtils.convert[model])
for model in g4f.Model.__all__()
)
model_list = [{
'id': model_id,
2023-11-04 21:16:09 +00:00
'object': 'model',
'created': 0,
2024-02-23 01:35:13 +00:00
'owned_by': model.base_provider
} for model_id, model in model_list.items()]
return JSONResponse(model_list)
2023-11-04 21:16:09 +00:00
@self.app.get("/v1/models/{model_name}")
async def model_info(model_name: str):
try:
2024-02-23 01:35:13 +00:00
model_info = g4f.ModelUtils.convert[model_name]
return JSONResponse({
2023-11-04 21:16:09 +00:00
'id': model_name,
'object': 'model',
'created': 0,
'owned_by': model_info.base_provider
2024-02-23 01:35:13 +00:00
})
2023-11-04 21:16:09 +00:00
except:
2024-02-23 01:35:13 +00:00
return JSONResponse({"error": "The model does not exist."})
2023-11-04 21:16:09 +00:00
@self.app.post("/v1/chat/completions")
2024-02-23 01:35:13 +00:00
async def chat_completions(config: ChatCompletionsConfig = None, request: Request = None, provider: str = None):
2023-11-04 21:16:09 +00:00
try:
2024-02-23 01:35:13 +00:00
config.provider = provider if config.provider is None else config.provider
if config.api_key is None and request is not None:
2024-02-23 01:35:13 +00:00
auth_header = request.headers.get("Authorization")
if auth_header is not None:
auth_header = auth_header.split(None, 1)[-1]
if auth_header and auth_header != "Bearer":
config.api_key = auth_header
2024-02-23 01:35:13 +00:00
response = self.client.chat.completions.create(
**config.dict(exclude_none=True),
2023-12-23 19:50:56 +00:00
ignored=self.list_ignored_providers
)
except Exception as e:
logging.exception(e)
2024-02-23 01:35:13 +00:00
return Response(content=format_exception(e, config), status_code=500, media_type="application/json")
if not config.stream:
return JSONResponse(response.to_json())
2023-11-04 21:16:09 +00:00
def streaming():
try:
for chunk in response:
2024-02-23 01:35:13 +00:00
yield f"data: {json.dumps(chunk.to_json())}\n\n"
2023-11-04 21:16:09 +00:00
except GeneratorExit:
pass
2023-12-23 19:50:56 +00:00
except Exception as e:
logging.exception(e)
2024-02-23 01:35:13 +00:00
yield f'data: {format_exception(e, config)}'
return StreamingResponse(streaming(), media_type="text/event-stream")
2023-11-04 21:16:09 +00:00
@self.app.post("/v1/completions")
async def completions():
return Response(content=json.dumps({'info': 'Not working yet.'}, indent=4), media_type="application/json")
2023-11-02 01:27:35 +00:00
2023-11-04 21:26:16 +00:00
def run(self, ip):
2023-11-04 21:16:09 +00:00
split_ip = ip.split(":")
2023-11-04 21:27:25 +00:00
uvicorn.run(app=self.app, host=split_ip[0], port=int(split_ip[1]), use_colors=False)
2024-02-23 01:35:13 +00:00
def format_exception(e: Exception, config: ChatCompletionsConfig) -> str:
last_provider = g4f.get_last_provider(True)
return json.dumps({
"error": {"message": f"{e.__class__.__name__}: {e}"},
2024-02-23 01:35:13 +00:00
"model": last_provider.get("model") if last_provider else config.model,
"provider": last_provider.get("name") if last_provider else config.provider
})
def run_api(host: str = '0.0.0.0', port: int = 1337, debug: bool = False, use_colors=True) -> None:
print(f'Starting server... [g4f v-{g4f.version.utils.current_version}]')
app = Api(engine=g4f, debug=debug)
uvicorn.run(app=app, host=host, port=port, use_colors=use_colors)