feature: api server (alpha)

`aimg run-api-server`

Proof of concept for now
pull/335/head
Bryce 1 year ago committed by Bryce Drennan
parent 38ac0b7f54
commit 14739bc90b

@ -486,6 +486,7 @@ A: The AI models are cached in `~/.cache/` (or `HUGGINGFACE_HUB_CACHE`). To dele
**13.0.0**
- 🎉 feature: multi-controlnet support. pass in multiple `--control-mode`, `--control-image`, and `--control-image-raw` arguments.
- 🎉 feature: "better" memory management. If GPU is full, least-recently-used model is moved to RAM.
- alpha feature: `aimg run-api-server` command. Runs a http webserver (not finished). After running, visit http://127.0.0.1:8000/docs for api.
- feature: add colorization controlnet. improve `aimg colorize` command
- feature: [disabled] inpainting controlnet can be used instead of finetuned inpainting model
- The inpainting controlnet doesn't work as well as the finetuned model

@ -8,6 +8,7 @@ from imaginairy.cli.describe import describe_cmd
from imaginairy.cli.edit import edit_cmd
from imaginairy.cli.edit_demo import edit_demo_cmd
from imaginairy.cli.imagine import imagine_cmd
from imaginairy.cli.run_api import run_api_server_cmd
from imaginairy.cli.train import prep_images_cmd, prune_ckpt_cmd, train_concept_cmd
from imaginairy.cli.upscale import upscale_cmd
@ -48,6 +49,7 @@ aimg.add_command(prep_images_cmd, name="prep-images")
aimg.add_command(prune_ckpt_cmd, name="prune-ckpt")
aimg.add_command(train_concept_cmd, name="train-concept")
aimg.add_command(upscale_cmd, name="upscale")
aimg.add_command(run_api_server_cmd, name="run-api-server")
@aimg.command()

@ -0,0 +1,14 @@
import click
@click.command("run-api-server")
def run_api_server_cmd():
"""Run a HTTP API server."""
import uvicorn
from imaginairy.http.app import app
from imaginairy.log_utils import configure_logging
configure_logging()
uvicorn.run(app, host="0.0.0.0", port=8000, log_level="info")

@ -0,0 +1,81 @@
from asyncio import Lock
from io import BytesIO
from typing import Optional
from fastapi import FastAPI, Query
from fastapi.concurrency import run_in_threadpool
from fastapi.responses import StreamingResponse
from pydantic import BaseModel
from imaginairy import ImaginePrompt, imagine
from imaginairy.log_utils import configure_logging
app = FastAPI()
lock = Lock()
class ImagineWebPrompt(BaseModel):
class Config:
arbitrary_types_allowed = True
prompt: Optional[str]
negative_prompt: Optional[str]
prompt_strength: float = 7.5
# init_image: Optional[Union[LazyLoadingImage, str]]
init_image_strength: Optional[float] = None
# control_inputs: Optional[List[ControlInput]] = None
mask_prompt: Optional[str] = None
# mask_image: Optional[Union[LazyLoadingImage, str]] = None
mask_mode: str = "replace"
mask_modify_original: bool = True
outpaint: Optional[str] = None
seed: Optional[int] = None
steps: Optional[int] = None
height: Optional[int] = None
width: Optional[int] = None
upscale: bool = False
fix_faces: bool = False
fix_faces_fidelity: float = 0.2
# sampler_type: str = Field(..., alias='config.DEFAULT_SAMPLER') # update the alias based on actual config field name
conditioning: Optional[str] = None
tile_mode: str = ""
allow_compose_phase: bool = True
# model: str = Field(..., alias='config.DEFAULT_MODEL') # update the alias based on actual config field name
model_config_path: Optional[str] = None
is_intermediate: bool = False
collect_progress_latents: bool = False
caption_text: str = ""
def generate_image(prompt: ImagineWebPrompt):
prompt = ImaginePrompt(prompt.prompt)
result = next(imagine([prompt]))
return result.images["generated"]
@app.post("/api/imagine")
async def imagine_endpoint(prompt: ImagineWebPrompt):
async with lock:
img = await run_in_threadpool(generate_image, prompt)
img_io = BytesIO()
img.save(img_io, "JPEG")
img_io.seek(0)
return StreamingResponse(img_io, media_type="image/jpg")
@app.get("/api/imagine")
async def imagine_get_endpoint(text: str = Query(...)):
async with lock:
img = await run_in_threadpool(generate_image, ImagineWebPrompt(prompt=text))
img_io = BytesIO()
img.save(img_io, "JPEG")
img_io.seek(0)
return StreamingResponse(img_io, media_type="image/jpg")
if __name__ == "__main__":
import uvicorn
configure_logging()
uvicorn.run(app, host="0.0.0.0", port=8000, log_level="info")

@ -10,7 +10,9 @@ aiosignal==1.3.1
# via aiohttp
antlr4-python3-runtime==4.9.3
# via omegaconf
astroid==2.15.4
anyio==3.6.2
# via starlette
astroid==2.15.5
# via pylint
async-timeout==4.0.2
# via aiohttp
@ -18,7 +20,7 @@ attrs==23.1.0
# via aiohttp
black==23.3.0
# via -r requirements-dev.in
certifi==2022.12.7
certifi==2023.5.7
# via requests
charset-normalizer==3.1.0
# via
@ -31,6 +33,7 @@ click==8.1.3
# click-shell
# imaginAIry (setup.py)
# typer
# uvicorn
click-help-colors==0.9.1
# via imaginAIry (setup.py)
click-shell==2.1
@ -53,6 +56,8 @@ facexlib==0.3.0
# via imaginAIry (setup.py)
fairscale==0.4.13
# via imaginAIry (setup.py)
fastapi==0.95.2
# via imaginAIry (setup.py)
filelock==3.12.0
# via
# diffusers
@ -60,13 +65,13 @@ filelock==3.12.0
# transformers
filterpy==1.4.5
# via facexlib
fonttools==4.39.3
fonttools==4.39.4
# via matplotlib
frozenlist==1.3.3
# via
# aiohttp
# aiosignal
fsspec[http]==2023.4.0
fsspec[http]==2023.5.0
# via
# huggingface-hub
# pytorch-lightning
@ -74,6 +79,8 @@ ftfy==6.1.1
# via
# imaginAIry (setup.py)
# open-clip-torch
h11==0.14.0
# via uvicorn
huggingface-hub==0.14.1
# via
# diffusers
@ -82,6 +89,7 @@ huggingface-hub==0.14.1
# transformers
idna==3.4
# via
# anyio
# requests
# yarl
imageio==2.28.1
@ -141,7 +149,7 @@ numpy==1.24.3
# transformers
omegaconf==2.3.0
# via imaginAIry (setup.py)
open-clip-torch==2.19.0
open-clip-torch==2.20.0
# via imaginAIry (setup.py)
opencv-python==4.7.0.72
# via
@ -171,7 +179,7 @@ pillow==9.5.0
# imaginAIry (setup.py)
# matplotlib
# torchvision
platformdirs==3.5.0
platformdirs==3.5.1
# via
# black
# pylint
@ -187,6 +195,10 @@ pycln==2.1.3
# via -r requirements-dev.in
pycodestyle==2.10.0
# via pylama
pydantic==1.10.7
# via
# fastapi
# imaginAIry (setup.py)
pydocstyle==6.3.0
# via pylama
pyflakes==3.0.1
@ -236,10 +248,12 @@ requests==2.30.0
# transformers
responses==0.23.1
# via -r requirements-dev.in
ruff==0.0.265
ruff==0.0.269
# via -r requirements-dev.in
safetensors==0.3.1
# via imaginAIry (setup.py)
# via
# imaginAIry (setup.py)
# timm
scipy==1.10.1
# via
# facexlib
@ -249,11 +263,15 @@ sentencepiece==0.1.99
# via open-clip-torch
six==1.16.0
# via python-dateutil
sniffio==1.3.0
# via anyio
snowballstemmer==2.2.0
# via pydocstyle
starlette==0.27.0
# via fastapi
termcolor==2.3.0
# via pytest-sugar
timm==0.6.13
timm==0.9.2
# via
# imaginAIry (setup.py)
# open-clip-torch
@ -300,7 +318,7 @@ tqdm==4.65.0
# open-clip-torch
# pytorch-lightning
# transformers
transformers==4.28.1
transformers==4.29.2
# via imaginAIry (setup.py)
typer==0.7.0
# via pycln
@ -312,6 +330,7 @@ typing-extensions==4.5.0
# huggingface-hub
# libcst
# lightning-utilities
# pydantic
# pytorch-lightning
# torch
# torchvision
@ -322,6 +341,8 @@ urllib3==2.0.2
# via
# requests
# responses
uvicorn==0.22.0
# via imaginAIry (setup.py)
wcwidth==0.2.6
# via ftfy
wheel==0.40.0

@ -68,6 +68,7 @@ setup(
"protobuf != 3.20.2, != 3.19.5",
"facexlib",
"fairscale>=0.4.4", # for vendored blip
"fastapi",
"ftfy", # for vendored clip
# 2.0.0 produced garbage images on MacOS
"torch>=1.13.1,<2.0.0",
@ -82,6 +83,7 @@ setup(
"omegaconf>=2.1.1",
"open-clip-torch",
"opencv-python",
"pydantic",
"requests",
"einops>=0.3.0",
"safetensors",
@ -91,6 +93,7 @@ setup(
"torchmetrics>=0.6.0",
"torchvision>=0.13.1",
"kornia>=0.6",
"uvicorn",
"xformers>=0.0.16; sys_platform!='darwin'",
],
)

Loading…
Cancel
Save