feature: script for running imaginairy in the modal.com cloud

This commit is contained in:
Bryce 2024-03-05 17:38:52 -08:00 committed by Bryce Drennan
parent 76b6fa8b65
commit 9c48b749d8
4 changed files with 427 additions and 68 deletions

View File

@ -2,6 +2,7 @@
import logging import logging
import os import os
from pathlib import Path
from typing import TYPE_CHECKING, Callable from typing import TYPE_CHECKING, Callable
if TYPE_CHECKING: if TYPE_CHECKING:
@ -59,8 +60,6 @@ def imagine_image_files(
from imaginairy.api.video_sample import generate_video from imaginairy.api.video_sample import generate_video
from imaginairy.utils import get_next_filenumber, prompt_normalized from imaginairy.utils import get_next_filenumber, prompt_normalized
from imaginairy.utils.animations import make_bounce_animation
from imaginairy.utils.img_utils import pillow_fit_image_within
generated_imgs_path = os.path.join(outdir, "generated") generated_imgs_path = os.path.join(outdir, "generated")
os.makedirs(generated_imgs_path, exist_ok=True) os.makedirs(generated_imgs_path, exist_ok=True)
@ -93,10 +92,51 @@ def imagine_image_files(
debug_img_callback=_record_step if record_step_images else None, debug_img_callback=_record_step if record_step_images else None,
add_caption=print_caption, add_caption=print_caption,
): ):
primary_filename = save_image_result(
result,
base_count,
outdir=outdir,
output_file_extension=output_file_extension,
primary_filename_type=return_filename_type,
make_gif=make_gif,
make_compare_gif=make_compare_gif,
)
if not primary_filename:
continue
result_filenames.append(primary_filename)
if primary_filename and videogen:
try:
generate_video(
input_path=primary_filename,
)
except FileNotFoundError as e:
logger.error(str(e))
exit(1)
base_count += 1
del result
return result_filenames
def save_image_result(
result,
base_count: int,
outdir: str | Path,
output_file_extension: str,
primary_filename_type,
make_gif=False,
make_compare_gif=False,
):
from imaginairy.utils import prompt_normalized
from imaginairy.utils.animations import make_bounce_animation
from imaginairy.utils.img_utils import pillow_fit_image_within
prompt = result.prompt prompt = result.prompt
if prompt.is_intermediate: if prompt.is_intermediate:
# we don't save intermediate images # we don't save intermediate images
continue return
img_str = "" img_str = ""
if prompt.init_image: if prompt.init_image:
img_str = f"_img2img-{prompt.init_image_strength}" img_str = f"_img2img-{prompt.init_image_strength}"
@ -105,6 +145,7 @@ def imagine_image_files(
f"{base_count:06}_{prompt.seed}_{prompt.solver_type.replace('_', '')}{prompt.steps}_" f"{base_count:06}_{prompt.seed}_{prompt.solver_type.replace('_', '')}{prompt.steps}_"
f"PS{prompt.prompt_strength}{img_str}_{prompt_normalized(prompt.prompt_text)}" f"PS{prompt.prompt_strength}{img_str}_{prompt_normalized(prompt.prompt_text)}"
) )
primary_filename = None
for image_type in result.images: for image_type in result.images:
subpath = os.path.join(outdir, image_type) subpath = os.path.join(outdir, image_type)
os.makedirs(subpath, exist_ok=True) os.makedirs(subpath, exist_ok=True)
@ -113,16 +154,9 @@ def imagine_image_files(
) )
result.save(filepath, image_type=image_type) result.save(filepath, image_type=image_type)
logger.info(f" {image_type:<22} {filepath}") logger.info(f" {image_type:<22} {filepath}")
if image_type == return_filename_type:
result_filenames.append(filepath) if image_type == primary_filename_type:
if videogen: primary_filename = filepath
try:
generate_video(
input_path=filepath,
)
except FileNotFoundError as e:
logger.error(str(e))
exit(1)
if make_gif and result.progress_latents: if make_gif and result.progress_latents:
subpath = os.path.join(outdir, "gif") subpath = os.path.join(outdir, "gif")
@ -161,10 +195,7 @@ def imagine_image_files(
image_type = "gif" image_type = "gif"
logger.info(f" {image_type:<22} {filepath}") logger.info(f" {image_type:<22} {filepath}")
base_count += 1 return primary_filename
del result
return result_filenames
def imagine( def imagine(

View File

@ -0,0 +1,117 @@
import os
from pathlib import Path
import pathspec
def find_project_root(start_path):
"""
Traverse up from a starting path to find the project root.
The project root is identified by the presence of a '.git' directory inside it.
"""
current_path = Path(start_path)
while current_path != current_path.root:
if (current_path / ".git").is_dir():
return str(current_path)
if (current_path / ".hg").is_dir():
return current_path
if (current_path / "pyproject.toml").is_file():
return current_path
if (current_path / "setup.py").is_file():
return current_path
current_path = current_path.parent
return None
ALWAYS_IGNORE = """
.git
__pycache__
.direnv
.eggs
.git
.hg
.mypy_cache
.nox
.tox
.venv
venv
.svn
.ipynb_checkpoints
_build
buck-out
build
dist
__pypackages__
"""
def load_gitignore_spec_at_path(path):
gitignore_path = os.path.join(path, ".gitignore")
if os.path.exists(gitignore_path):
with open(gitignore_path, encoding="utf-8") as f:
patterns = f.read().split("\n")
patterns.extend(ALWAYS_IGNORE.split("\n"))
ignore_spec = pathspec.PathSpec.from_lines("gitwildmatch", patterns)
else:
ignore_spec = pathspec.PathSpec.from_lines("gitwildmatch", [])
return ignore_spec
def get_nonignored_file_paths(directory, gitignore_dict=None, extensions=()):
return_relative = False
if gitignore_dict is None:
gitignore_dict = {}
return_relative = True
gitignore_dict = {
**gitignore_dict,
directory: load_gitignore_spec_at_path(directory),
}
file_paths = []
for entry in os.scandir(directory):
if path_is_ignored(Path(entry.path), gitignore_dict):
continue
if entry.is_file():
if any(entry.path.endswith(ext) for ext in extensions):
continue
file_paths.append(entry.path)
elif entry.is_dir():
subdir_file_paths = get_nonignored_file_paths(
entry.path, gitignore_dict=gitignore_dict
)
file_paths.extend(subdir_file_paths)
if return_relative:
file_paths = [os.path.relpath(f, directory) for f in file_paths]
file_paths.sort(key=lambda p: ("/" in p, p))
return file_paths
def path_is_ignored(path: Path, gitignore_dict) -> bool:
for gitignore_path, pattern in gitignore_dict.items():
try:
abspath = path if path.is_absolute() else Path.cwd() / path
normalized_path = abspath.resolve()
try:
relative_path = normalized_path.relative_to(gitignore_path).as_posix()
except ValueError:
return False
except OSError:
return False
if pattern.match_file(relative_path):
return True
return False

211
scripts/modal_entrypoint.py Normal file
View File

@ -0,0 +1,211 @@
import os.path
from functools import lru_cache
from pathlib import Path
from modal import (
Image,
Mount,
Stub,
Volume,
gpu,
)
os.environ["MODAL_AUTOMOUNT"] = "0"
# find project root that is two levels up from __file__
project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
requirements_path = os.path.join(project_root, "requirements-dev.txt")
def file_filter(path: str):
# print(f"Checking {path}")
if "/tests/" in path:
return False
include = path in files_for_inclusion()
return include
@lru_cache
def files_for_inclusion():
from imaginairy.utils.gitignore import get_nonignored_file_paths
filepaths = get_nonignored_file_paths(project_root)
for f in filepaths:
print(f)
filepaths = [f"{project_root}/{f}" for f in filepaths if os.path.isfile(f)]
return set(filepaths)
local_mount = Mount.from_local_dir(
project_root, remote_path="/root/workspace/imaginairy", condition=file_filter
)
imaginairy_image = (
Image.debian_slim(python_version="3.10")
.apt_install(
"libglib2.0-0", "libsm6", "libxrender1", "libxext6", "ffmpeg", "libgl1"
)
.pip_install_from_requirements(requirements_path)
.workdir("/root/workspace")
.env({"PYTHONPATH": "/root/workspace/imaginairy"})
)
weights_cache_volume = Volume.from_name("ai-wights-cache", create_if_missing=True)
weights_cache_path = "/root/.cache/"
stub = Stub(
"imaginairy",
mounts=[local_mount],
volumes={weights_cache_path: weights_cache_volume},
)
@stub.function(gpu=gpu.A100(), container_idle_timeout=2, image=imaginairy_image)
def generate_image(imagine_prompt):
from imaginairy.api import imagine
from imaginairy.utils.log_utils import configure_logging
configure_logging()
results = list(imagine(imagine_prompt))
result = results[-1]
weights_cache_volume.commit()
return result
standard_prompts = [
"a flower",
"portrait photo of a woman with a few freckles. red hair",
"photo of a bowl of fruit",
"gold coins",
"a scenic landscape",
"old growth redwood forest with a stream meandering through it. award-winning photo, diffuse lighting, beautiful, high-resolution, 4k",
"abstract desktop background",
"highway system seen from above",
"the galaxy",
"a photo of the rainforest",
"the starry night painting",
"photo of flowers",
"girl with a pearl earring",
"a painting of 1920s luncheon on a boat",
"napolean crossing the alps",
"american gothic painting",
"the tower of babel",
"god creating the {universe|earth}",
"a fishing boat in a storm on the sea of galilee. oil painting",
"the american flag",
"the tree of life. oil painting",
"the last supper. oil painting",
"the statue of liberty",
"a maze",
"HD desktop wallpaper",
"a beautiful garden",
"the garden of eden",
"the circus. {photo|oil painting}",
"a futuristic city",
"a retro spaceship",
"yosemite national park. {photo|oil painting}",
"a seacliff with a lighthouse. giant ocean waves, {photo|oil painting}",
"blueberries",
"strawberries",
"a single giant diamond",
"disneyland thomas kinkade painting",
"ancient books in an ancient library. cinematic",
"mormon missionaries",
"salt lake city",
"oil painting of heaven",
"oil painting of hell",
"an x-wing. digital art",
"star trek uss enterprise",
"a giant pile of treasure",
"the white house",
"a grizzly bear. nature photography",
"a unicorn. nature photography",
"elon musk. normal rockwell painting",
"a cybertruck",
"elon musk with a halo dressed in greek robes. oil painting",
"a crowd of people",
"a stadium full of people",
"macro photography of a drop of water",
"macro photography of a leaf",
"macro photography of a spider",
"flames",
"a robot",
"the stars at night",
"a lovely sunset",
"an oil painting",
]
@stub.local_entrypoint()
def main(
prompt: str,
size: str = "fhd",
upscale: bool = False,
model_weights: str = "opendalle",
n_images: int = 1,
n_steps: int = 50,
seed=None,
):
from imaginairy.enhancers.prompt_expansion import expand_prompts
from imaginairy.schema import ImaginePrompt
from imaginairy.utils import get_next_filenumber
from imaginairy.utils.log_utils import configure_logging
configure_logging()
prompt_texts = expand_prompts(
n=n_images,
prompt_text=prompt,
)
# model_weights = ModelWeightsConfig(
# name="ProteusV0.4",
# aliases=["proteusv4"],
# architecture="sdxl",
# defaults={
# "negative_prompt": DEFAULT_NEGATIVE_PROMPT,
# "composition_strength": 0.6,
# },
# weights_location="https://huggingface.co/dataautogpt3/ProteusV0.4/tree/0dfa4101db540e7a4b2b6ba6f87d8d7219e84513",
# )
imagine_prompts = [
ImaginePrompt(
prompt_text,
steps=n_steps,
size=size,
upscale=upscale,
model_weights=model_weights,
seed=seed,
)
for prompt_text in prompt_texts
]
# imagine_prompts = []
# for sp in standard_prompts:
# imagine_prompts.append(
# ImaginePrompt(
# sp,
# steps=n_steps,
# size=size,
# upscale=upscale,
# model_weights=model_weights,
# seed=seed
# )
# )
# imagine_prompts = imagine_prompts[:n_images]
outdir = Path("./outputs/modal-inference")
outdir.mkdir(exist_ok=True, parents=True)
file_num = get_next_filenumber(f"{outdir}/generated")
for result in generate_image.map(imagine_prompts):
from imaginairy.api.generate import save_image_result
save_image_result(
result=result,
base_count=file_num,
outdir=outdir,
output_file_extension="jpg",
primary_filename_type="generated",
)
file_num += 1

View File

@ -1,6 +1,6 @@
[pytest] [pytest]
addopts = --doctest-modules -s --tb=native -v --durations=10 addopts = --doctest-modules -s --tb=native -v --durations=10
norecursedirs = build dist downloads other prolly_delete imaginairy/vendored norecursedirs = build dist downloads other prolly_delete imaginairy/vendored scripts
filterwarnings = filterwarnings =
ignore::DeprecationWarning ignore::DeprecationWarning
ignore::UserWarning ignore::UserWarning