2022-10-11 02:50:11 +00:00
|
|
|
import logging
|
feature: prompt expansion (#51)
You can use `{}` to randomly pull values from lists. A list of values separated by `|` and enclosed in `{ }` will be randomly drawn from in a non-repeating fashion. Values that are surrounded by `_ _` will pull from a phrase list of the same name. Folders containing .txt phraselist files may be specified via
`--prompt_library_path`. The option may be specified multiple times. Built-in categories:
3d-term, adj-architecture, adj-beauty, adj-detailed, adj-emotion, adj-general, adj-horror, animal, art-movement,
art-site, artist, artist-botanical, artist-surreal, aspect-ratio, bird, body-of-water, body-pose, camera-brand,
camera-model, color, cosmic-galaxy, cosmic-nebula, cosmic-star, cosmic-term, dinosaur, eyecolor, f-stop,
fantasy-creature, fantasy-setting, fish, flower, focal-length, food, fruit, games, gen-modifier, hair, hd,
iso-stop, landscape-type, national-park, nationality, neg-weight, noun-beauty, noun-fantasy, noun-general,
noun-horror, occupation, photo-term, pop-culture, pop-location, punk-style, quantity, rpg-item, scenario-desc,
skin-color, spaceship, style, tree-species, trippy, world-heritage-site
Examples:
`imagine "a {red|black} dog" -r 2 --seed 0` will generate both "a red dog" and "a black dog"
`imagine "a {_color_} dog" -r 4 --seed 0` will generate four, different colored dogs. The colors will eb pulled from an included
phraselist of colors.
`imagine "a {_spaceship_|_fruit_|hot air balloon}. low-poly" -r 4 --seed 0` will generate images of spaceships or fruits or a hot air balloon
Credit to [noodle-soup-prompts](https://github.com/WASasquatch/noodle-soup-prompts/) where most, but not all, of the wordlists originate.
2022-10-09 01:34:35 +00:00
|
|
|
import math
|
2023-01-01 22:54:49 +00:00
|
|
|
import os.path
|
2022-09-09 05:22:55 +00:00
|
|
|
|
|
|
|
import click
|
2022-09-24 08:56:19 +00:00
|
|
|
from click_shell import shell
|
2022-09-11 20:58:14 +00:00
|
|
|
|
2023-01-18 17:00:25 +00:00
|
|
|
from imaginairy import LazyLoadingImage, __version__, config, generate_caption
|
2022-10-06 04:50:20 +00:00
|
|
|
from imaginairy.api import imagine_image_files
|
feature: prompt expansion (#51)
You can use `{}` to randomly pull values from lists. A list of values separated by `|` and enclosed in `{ }` will be randomly drawn from in a non-repeating fashion. Values that are surrounded by `_ _` will pull from a phrase list of the same name. Folders containing .txt phraselist files may be specified via
`--prompt_library_path`. The option may be specified multiple times. Built-in categories:
3d-term, adj-architecture, adj-beauty, adj-detailed, adj-emotion, adj-general, adj-horror, animal, art-movement,
art-site, artist, artist-botanical, artist-surreal, aspect-ratio, bird, body-of-water, body-pose, camera-brand,
camera-model, color, cosmic-galaxy, cosmic-nebula, cosmic-star, cosmic-term, dinosaur, eyecolor, f-stop,
fantasy-creature, fantasy-setting, fish, flower, focal-length, food, fruit, games, gen-modifier, hair, hd,
iso-stop, landscape-type, national-park, nationality, neg-weight, noun-beauty, noun-fantasy, noun-general,
noun-horror, occupation, photo-term, pop-culture, pop-location, punk-style, quantity, rpg-item, scenario-desc,
skin-color, spaceship, style, tree-species, trippy, world-heritage-site
Examples:
`imagine "a {red|black} dog" -r 2 --seed 0` will generate both "a red dog" and "a black dog"
`imagine "a {_color_} dog" -r 4 --seed 0` will generate four, different colored dogs. The colors will eb pulled from an included
phraselist of colors.
`imagine "a {_spaceship_|_fruit_|hot air balloon}. low-poly" -r 4 --seed 0` will generate images of spaceships or fruits or a hot air balloon
Credit to [noodle-soup-prompts](https://github.com/WASasquatch/noodle-soup-prompts/) where most, but not all, of the wordlists originate.
2022-10-09 01:34:35 +00:00
|
|
|
from imaginairy.enhancers.prompt_expansion import expand_prompts
|
2022-10-11 02:50:11 +00:00
|
|
|
from imaginairy.log_utils import configure_logging
|
2022-11-26 22:52:28 +00:00
|
|
|
from imaginairy.samplers import SAMPLER_TYPE_OPTIONS
|
2022-09-16 16:24:24 +00:00
|
|
|
from imaginairy.schema import ImaginePrompt
|
2023-01-01 22:54:49 +00:00
|
|
|
from imaginairy.train import train_diffusion_model
|
|
|
|
from imaginairy.training_tools.image_prep import (
|
|
|
|
create_class_images,
|
|
|
|
get_image_filenames,
|
|
|
|
prep_images,
|
|
|
|
)
|
|
|
|
from imaginairy.training_tools.prune_model import prune_diffusion_ckpt
|
2022-09-11 06:27:22 +00:00
|
|
|
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
|
|
|
2022-09-09 05:22:55 +00:00
|
|
|
@click.command()
|
2022-09-11 06:27:22 +00:00
|
|
|
@click.argument("prompt_texts", nargs=-1)
|
2022-12-02 09:49:13 +00:00
|
|
|
@click.option(
|
|
|
|
"--negative-prompt",
|
|
|
|
default=config.DEFAULT_NEGATIVE_PROMPT,
|
|
|
|
show_default=True,
|
|
|
|
help="Negative prompt. Things to try and exclude from images. Same negative prompt will be used for all images.",
|
|
|
|
)
|
2022-09-12 01:00:40 +00:00
|
|
|
@click.option(
|
|
|
|
"--prompt-strength",
|
|
|
|
default=7.5,
|
|
|
|
show_default=True,
|
|
|
|
help="How closely to follow the prompt. Image looks unnatural at higher values",
|
|
|
|
)
|
|
|
|
@click.option(
|
|
|
|
"--init-image",
|
2022-11-25 09:45:07 +00:00
|
|
|
metavar="PATH|URL",
|
|
|
|
help="Starting image.",
|
2022-09-12 01:00:40 +00:00
|
|
|
)
|
|
|
|
@click.option(
|
|
|
|
"--init-image-strength",
|
2022-09-12 04:36:14 +00:00
|
|
|
default=0.6,
|
|
|
|
show_default=True,
|
2022-11-25 09:45:07 +00:00
|
|
|
help="Starting image strength. Between 0 and 1.",
|
|
|
|
)
|
|
|
|
@click.option(
|
|
|
|
"--outdir",
|
|
|
|
default="./outputs",
|
|
|
|
show_default=True,
|
|
|
|
type=click.Path(),
|
|
|
|
help="Where to write results to.",
|
2022-09-12 01:00:40 +00:00
|
|
|
)
|
2022-09-10 05:14:04 +00:00
|
|
|
@click.option(
|
2022-09-11 06:27:22 +00:00
|
|
|
"-r",
|
|
|
|
"--repeats",
|
|
|
|
default=1,
|
2022-11-25 09:45:07 +00:00
|
|
|
show_default=True,
|
2022-09-11 06:27:22 +00:00
|
|
|
type=int,
|
2022-11-25 09:45:07 +00:00
|
|
|
help="How many times to repeat the renders. If you provide two prompts and --repeat=3 then six images will be generated.",
|
2022-09-10 05:14:04 +00:00
|
|
|
)
|
2022-09-09 05:22:55 +00:00
|
|
|
@click.option(
|
|
|
|
"-h",
|
|
|
|
"--height",
|
2022-11-26 22:52:28 +00:00
|
|
|
default=None,
|
2022-11-25 09:45:07 +00:00
|
|
|
show_default=True,
|
2022-09-09 05:22:55 +00:00
|
|
|
type=int,
|
2022-11-25 09:45:07 +00:00
|
|
|
help="Image height. Should be multiple of 64.",
|
2022-09-09 05:22:55 +00:00
|
|
|
)
|
|
|
|
@click.option(
|
2022-11-25 09:45:07 +00:00
|
|
|
"-w",
|
|
|
|
"--width",
|
2022-11-26 22:52:28 +00:00
|
|
|
default=None,
|
2022-11-25 09:45:07 +00:00
|
|
|
show_default=True,
|
|
|
|
type=int,
|
|
|
|
help="Image width. Should be multiple of 64.",
|
2022-09-09 05:22:55 +00:00
|
|
|
)
|
|
|
|
@click.option(
|
|
|
|
"--steps",
|
2022-11-26 22:52:28 +00:00
|
|
|
default=None,
|
2022-09-09 05:22:55 +00:00
|
|
|
type=int,
|
2022-09-11 06:27:22 +00:00
|
|
|
show_default=True,
|
2022-11-25 09:45:07 +00:00
|
|
|
help="How many diffusion steps to run. More steps, more detail, but with diminishing returns.",
|
2022-09-09 05:22:55 +00:00
|
|
|
)
|
|
|
|
@click.option(
|
|
|
|
"--seed",
|
|
|
|
default=None,
|
|
|
|
type=int,
|
2022-11-25 09:45:07 +00:00
|
|
|
help="What seed to use for randomness. Allows reproducible image renders.",
|
2022-09-09 05:22:55 +00:00
|
|
|
)
|
2022-09-13 07:27:53 +00:00
|
|
|
@click.option("--upscale", is_flag=True)
|
|
|
|
@click.option("--fix-faces", is_flag=True)
|
2022-10-03 17:26:08 +00:00
|
|
|
@click.option(
|
|
|
|
"--fix-faces-fidelity",
|
|
|
|
default=None,
|
2022-11-25 09:45:07 +00:00
|
|
|
type=float,
|
|
|
|
help="How faithful to the original should face enhancement be. 1 = best fidelity, 0 = best looking face.",
|
2022-10-03 17:26:08 +00:00
|
|
|
)
|
2022-09-11 06:27:22 +00:00
|
|
|
@click.option(
|
|
|
|
"--sampler-type",
|
2022-10-14 03:49:48 +00:00
|
|
|
"--sampler",
|
2022-11-26 22:52:28 +00:00
|
|
|
default=config.DEFAULT_SAMPLER,
|
2022-11-25 09:45:07 +00:00
|
|
|
show_default=True,
|
2022-09-14 16:37:45 +00:00
|
|
|
type=click.Choice(SAMPLER_TYPE_OPTIONS),
|
2022-11-25 09:45:07 +00:00
|
|
|
help="What sampling strategy to use.",
|
2022-09-11 06:27:22 +00:00
|
|
|
)
|
|
|
|
@click.option(
|
|
|
|
"--log-level",
|
|
|
|
default="INFO",
|
2022-11-25 09:45:07 +00:00
|
|
|
show_default=True,
|
2022-09-11 06:27:22 +00:00
|
|
|
type=click.Choice(["DEBUG", "INFO", "WARNING", "ERROR"]),
|
|
|
|
help="What level of logs to show.",
|
|
|
|
)
|
2022-09-24 08:56:19 +00:00
|
|
|
@click.option(
|
2022-09-24 21:41:25 +00:00
|
|
|
"--quiet",
|
|
|
|
"-q",
|
2022-09-24 08:56:19 +00:00
|
|
|
is_flag=True,
|
2022-11-25 09:45:07 +00:00
|
|
|
help="Suppress logs. Alias of `--log-level ERROR`.",
|
2022-09-24 08:56:19 +00:00
|
|
|
)
|
2022-09-11 06:27:22 +00:00
|
|
|
@click.option(
|
|
|
|
"--show-work",
|
2022-09-24 18:21:53 +00:00
|
|
|
default=False,
|
2022-09-24 08:56:19 +00:00
|
|
|
is_flag=True,
|
|
|
|
help="Output a debug images to `steps` folder.",
|
2022-09-11 06:27:22 +00:00
|
|
|
)
|
2022-09-11 20:56:41 +00:00
|
|
|
@click.option(
|
|
|
|
"--tile",
|
|
|
|
is_flag=True,
|
2022-12-20 05:32:41 +00:00
|
|
|
help="Any images rendered will be tileable in both X and Y directions.",
|
|
|
|
)
|
|
|
|
@click.option(
|
|
|
|
"--tile-x",
|
|
|
|
is_flag=True,
|
|
|
|
help="Any images rendered will be tileable in the X direction.",
|
|
|
|
)
|
|
|
|
@click.option(
|
|
|
|
"--tile-y",
|
|
|
|
is_flag=True,
|
|
|
|
help="Any images rendered will be tileable in the Y direction.",
|
2022-09-11 20:56:41 +00:00
|
|
|
)
|
2022-09-18 13:07:07 +00:00
|
|
|
@click.option(
|
|
|
|
"--mask-image",
|
2022-11-25 09:45:07 +00:00
|
|
|
metavar="PATH|URL",
|
2022-09-18 13:07:07 +00:00
|
|
|
help="A mask to use for inpainting. White gets painted, Black is left alone.",
|
|
|
|
)
|
|
|
|
@click.option(
|
|
|
|
"--mask-prompt",
|
2022-09-24 05:58:48 +00:00
|
|
|
help=(
|
|
|
|
"Describe what you want masked and the AI will mask it for you. "
|
|
|
|
"You can describe complex masks with AND, OR, NOT keywords and parentheses. "
|
|
|
|
"The strength of each mask can be modified with {*1.5} notation. \n\n"
|
|
|
|
"Examples: \n"
|
|
|
|
"car AND (wheels{*1.1} OR trunk OR engine OR windows OR headlights) AND NOT (truck OR headlights){*10}\n"
|
|
|
|
"fruit|fruit stem"
|
|
|
|
),
|
2022-09-18 13:07:07 +00:00
|
|
|
)
|
|
|
|
@click.option(
|
|
|
|
"--mask-mode",
|
|
|
|
default="replace",
|
2022-11-25 09:45:07 +00:00
|
|
|
show_default=True,
|
2022-09-18 13:07:07 +00:00
|
|
|
type=click.Choice(["keep", "replace"]),
|
|
|
|
help="Should we replace the masked area or keep it?",
|
|
|
|
)
|
2022-09-24 08:56:19 +00:00
|
|
|
@click.option(
|
|
|
|
"--mask-modify-original",
|
|
|
|
default=True,
|
|
|
|
is_flag=True,
|
2022-11-25 09:45:07 +00:00
|
|
|
help="After the inpainting is done, apply the changes to a copy of the original image.",
|
2022-09-24 08:56:19 +00:00
|
|
|
)
|
2023-01-17 06:48:27 +00:00
|
|
|
@click.option(
|
|
|
|
"--outpaint",
|
|
|
|
help=(
|
|
|
|
"Specify in what directions to expand the image. Values will be snapped such that output image size is multiples of 64. Examples\n"
|
|
|
|
" `--outpaint up10,down300,left50,right50`\n"
|
|
|
|
" `--outpaint u10,d300,l50,r50`\n"
|
|
|
|
" `--outpaint all200`\n"
|
|
|
|
" `--outpaint a200`\n"
|
|
|
|
),
|
|
|
|
default="",
|
|
|
|
)
|
2022-09-20 04:15:38 +00:00
|
|
|
@click.option(
|
|
|
|
"--caption",
|
|
|
|
default=False,
|
|
|
|
is_flag=True,
|
2022-11-25 09:45:07 +00:00
|
|
|
help="Generate a text description of the generated image.",
|
2022-09-20 04:15:38 +00:00
|
|
|
)
|
2022-09-22 05:03:12 +00:00
|
|
|
@click.option(
|
|
|
|
"--precision",
|
2022-11-25 09:45:07 +00:00
|
|
|
help="Evaluate at this precision.",
|
2022-09-22 05:03:12 +00:00
|
|
|
type=click.Choice(["full", "autocast"]),
|
|
|
|
default="autocast",
|
2022-11-25 09:45:07 +00:00
|
|
|
show_default=True,
|
2022-09-22 05:03:12 +00:00
|
|
|
)
|
2022-10-06 04:50:20 +00:00
|
|
|
@click.option(
|
|
|
|
"--model-weights-path",
|
2022-10-24 05:42:17 +00:00
|
|
|
"--model",
|
2023-01-18 17:00:25 +00:00
|
|
|
help=f"Model to use. Should be one of {', '.join(config.MODEL_SHORT_NAMES)}, or a path to custom weights.",
|
2022-11-26 22:52:28 +00:00
|
|
|
show_default=True,
|
|
|
|
default=config.DEFAULT_MODEL,
|
2022-10-06 04:50:20 +00:00
|
|
|
)
|
2023-01-01 22:54:49 +00:00
|
|
|
@click.option(
|
|
|
|
"--model-config-path",
|
|
|
|
help="Model config file to use. If a model name is specified, the appropriate config will be used.",
|
|
|
|
show_default=True,
|
2023-01-16 10:51:49 +00:00
|
|
|
default=None,
|
2023-01-01 22:54:49 +00:00
|
|
|
)
|
feature: prompt expansion (#51)
You can use `{}` to randomly pull values from lists. A list of values separated by `|` and enclosed in `{ }` will be randomly drawn from in a non-repeating fashion. Values that are surrounded by `_ _` will pull from a phrase list of the same name. Folders containing .txt phraselist files may be specified via
`--prompt_library_path`. The option may be specified multiple times. Built-in categories:
3d-term, adj-architecture, adj-beauty, adj-detailed, adj-emotion, adj-general, adj-horror, animal, art-movement,
art-site, artist, artist-botanical, artist-surreal, aspect-ratio, bird, body-of-water, body-pose, camera-brand,
camera-model, color, cosmic-galaxy, cosmic-nebula, cosmic-star, cosmic-term, dinosaur, eyecolor, f-stop,
fantasy-creature, fantasy-setting, fish, flower, focal-length, food, fruit, games, gen-modifier, hair, hd,
iso-stop, landscape-type, national-park, nationality, neg-weight, noun-beauty, noun-fantasy, noun-general,
noun-horror, occupation, photo-term, pop-culture, pop-location, punk-style, quantity, rpg-item, scenario-desc,
skin-color, spaceship, style, tree-species, trippy, world-heritage-site
Examples:
`imagine "a {red|black} dog" -r 2 --seed 0` will generate both "a red dog" and "a black dog"
`imagine "a {_color_} dog" -r 4 --seed 0` will generate four, different colored dogs. The colors will eb pulled from an included
phraselist of colors.
`imagine "a {_spaceship_|_fruit_|hot air balloon}. low-poly" -r 4 --seed 0` will generate images of spaceships or fruits or a hot air balloon
Credit to [noodle-soup-prompts](https://github.com/WASasquatch/noodle-soup-prompts/) where most, but not all, of the wordlists originate.
2022-10-09 01:34:35 +00:00
|
|
|
@click.option(
|
|
|
|
"--prompt-library-path",
|
2022-11-25 09:45:07 +00:00
|
|
|
help="Path to folder containing phrase lists in txt files. Use txt filename in prompt: {_filename_}.",
|
feature: prompt expansion (#51)
You can use `{}` to randomly pull values from lists. A list of values separated by `|` and enclosed in `{ }` will be randomly drawn from in a non-repeating fashion. Values that are surrounded by `_ _` will pull from a phrase list of the same name. Folders containing .txt phraselist files may be specified via
`--prompt_library_path`. The option may be specified multiple times. Built-in categories:
3d-term, adj-architecture, adj-beauty, adj-detailed, adj-emotion, adj-general, adj-horror, animal, art-movement,
art-site, artist, artist-botanical, artist-surreal, aspect-ratio, bird, body-of-water, body-pose, camera-brand,
camera-model, color, cosmic-galaxy, cosmic-nebula, cosmic-star, cosmic-term, dinosaur, eyecolor, f-stop,
fantasy-creature, fantasy-setting, fish, flower, focal-length, food, fruit, games, gen-modifier, hair, hd,
iso-stop, landscape-type, national-park, nationality, neg-weight, noun-beauty, noun-fantasy, noun-general,
noun-horror, occupation, photo-term, pop-culture, pop-location, punk-style, quantity, rpg-item, scenario-desc,
skin-color, spaceship, style, tree-species, trippy, world-heritage-site
Examples:
`imagine "a {red|black} dog" -r 2 --seed 0` will generate both "a red dog" and "a black dog"
`imagine "a {_color_} dog" -r 4 --seed 0` will generate four, different colored dogs. The colors will eb pulled from an included
phraselist of colors.
`imagine "a {_spaceship_|_fruit_|hot air balloon}. low-poly" -r 4 --seed 0` will generate images of spaceships or fruits or a hot air balloon
Credit to [noodle-soup-prompts](https://github.com/WASasquatch/noodle-soup-prompts/) where most, but not all, of the wordlists originate.
2022-10-09 01:34:35 +00:00
|
|
|
type=click.Path(exists=True),
|
|
|
|
default=None,
|
|
|
|
multiple=True,
|
|
|
|
)
|
2023-01-18 17:00:25 +00:00
|
|
|
@click.option(
|
|
|
|
"--version",
|
|
|
|
default=False,
|
|
|
|
is_flag=True,
|
|
|
|
help="Print the version and exit.",
|
|
|
|
)
|
2022-09-20 04:15:38 +00:00
|
|
|
@click.pass_context
|
2022-09-09 05:22:55 +00:00
|
|
|
def imagine_cmd(
|
2022-09-20 04:15:38 +00:00
|
|
|
ctx,
|
2022-09-09 05:22:55 +00:00
|
|
|
prompt_texts,
|
2022-12-02 09:49:13 +00:00
|
|
|
negative_prompt,
|
2022-09-12 01:00:40 +00:00
|
|
|
prompt_strength,
|
|
|
|
init_image,
|
|
|
|
init_image_strength,
|
2022-09-09 05:22:55 +00:00
|
|
|
outdir,
|
|
|
|
repeats,
|
|
|
|
height,
|
|
|
|
width,
|
|
|
|
steps,
|
|
|
|
seed,
|
2022-09-13 07:27:53 +00:00
|
|
|
upscale,
|
|
|
|
fix_faces,
|
2022-10-03 17:26:08 +00:00
|
|
|
fix_faces_fidelity,
|
2022-09-09 05:22:55 +00:00
|
|
|
sampler_type,
|
2022-09-11 06:27:22 +00:00
|
|
|
log_level,
|
2022-09-24 08:56:19 +00:00
|
|
|
quiet,
|
2022-09-11 06:27:22 +00:00
|
|
|
show_work,
|
2022-09-11 20:56:41 +00:00
|
|
|
tile,
|
2022-12-20 05:32:41 +00:00
|
|
|
tile_x,
|
|
|
|
tile_y,
|
2022-09-18 13:07:07 +00:00
|
|
|
mask_image,
|
|
|
|
mask_prompt,
|
|
|
|
mask_mode,
|
2022-09-24 18:21:53 +00:00
|
|
|
mask_modify_original,
|
2023-01-17 06:48:27 +00:00
|
|
|
outpaint,
|
2022-09-20 04:15:38 +00:00
|
|
|
caption,
|
2022-09-22 05:03:12 +00:00
|
|
|
precision,
|
2022-10-06 04:50:20 +00:00
|
|
|
model_weights_path,
|
2023-01-01 22:54:49 +00:00
|
|
|
model_config_path,
|
feature: prompt expansion (#51)
You can use `{}` to randomly pull values from lists. A list of values separated by `|` and enclosed in `{ }` will be randomly drawn from in a non-repeating fashion. Values that are surrounded by `_ _` will pull from a phrase list of the same name. Folders containing .txt phraselist files may be specified via
`--prompt_library_path`. The option may be specified multiple times. Built-in categories:
3d-term, adj-architecture, adj-beauty, adj-detailed, adj-emotion, adj-general, adj-horror, animal, art-movement,
art-site, artist, artist-botanical, artist-surreal, aspect-ratio, bird, body-of-water, body-pose, camera-brand,
camera-model, color, cosmic-galaxy, cosmic-nebula, cosmic-star, cosmic-term, dinosaur, eyecolor, f-stop,
fantasy-creature, fantasy-setting, fish, flower, focal-length, food, fruit, games, gen-modifier, hair, hd,
iso-stop, landscape-type, national-park, nationality, neg-weight, noun-beauty, noun-fantasy, noun-general,
noun-horror, occupation, photo-term, pop-culture, pop-location, punk-style, quantity, rpg-item, scenario-desc,
skin-color, spaceship, style, tree-species, trippy, world-heritage-site
Examples:
`imagine "a {red|black} dog" -r 2 --seed 0` will generate both "a red dog" and "a black dog"
`imagine "a {_color_} dog" -r 4 --seed 0` will generate four, different colored dogs. The colors will eb pulled from an included
phraselist of colors.
`imagine "a {_spaceship_|_fruit_|hot air balloon}. low-poly" -r 4 --seed 0` will generate images of spaceships or fruits or a hot air balloon
Credit to [noodle-soup-prompts](https://github.com/WASasquatch/noodle-soup-prompts/) where most, but not all, of the wordlists originate.
2022-10-09 01:34:35 +00:00
|
|
|
prompt_library_path,
|
2023-01-18 17:00:25 +00:00
|
|
|
version, # noqa
|
2023-01-21 21:23:48 +00:00
|
|
|
):
|
|
|
|
"""Have the AI generate images. alias:imagine."""
|
|
|
|
return _imagine_cmd(
|
|
|
|
ctx,
|
|
|
|
prompt_texts,
|
|
|
|
negative_prompt,
|
|
|
|
prompt_strength,
|
|
|
|
init_image,
|
|
|
|
init_image_strength,
|
|
|
|
outdir,
|
|
|
|
repeats,
|
|
|
|
height,
|
|
|
|
width,
|
|
|
|
steps,
|
|
|
|
seed,
|
|
|
|
upscale,
|
|
|
|
fix_faces,
|
|
|
|
fix_faces_fidelity,
|
|
|
|
sampler_type,
|
|
|
|
log_level,
|
|
|
|
quiet,
|
|
|
|
show_work,
|
|
|
|
tile,
|
|
|
|
tile_x,
|
|
|
|
tile_y,
|
|
|
|
mask_image,
|
|
|
|
mask_prompt,
|
|
|
|
mask_mode,
|
|
|
|
mask_modify_original,
|
|
|
|
outpaint,
|
|
|
|
caption,
|
|
|
|
precision,
|
|
|
|
model_weights_path,
|
|
|
|
model_config_path,
|
|
|
|
prompt_library_path,
|
|
|
|
version, # noqa
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
@click.command()
|
|
|
|
@click.argument("init_image", metavar="PATH|URL", required=True, nargs=1)
|
|
|
|
@click.argument("prompt_texts", nargs=-1)
|
|
|
|
@click.option(
|
|
|
|
"--negative-prompt",
|
|
|
|
default="",
|
|
|
|
show_default=True,
|
|
|
|
help="Negative prompt. Things to try and exclude from images. Same negative prompt will be used for all images.",
|
|
|
|
)
|
|
|
|
@click.option(
|
|
|
|
"--prompt-strength",
|
|
|
|
default=7.5,
|
|
|
|
show_default=True,
|
|
|
|
help="How closely to follow the prompt. Image looks unnatural at higher values",
|
|
|
|
)
|
|
|
|
@click.option(
|
|
|
|
"--init-image",
|
|
|
|
metavar="PATH|URL",
|
|
|
|
help="Starting image.",
|
|
|
|
)
|
|
|
|
@click.option(
|
|
|
|
"--outdir",
|
|
|
|
default="./outputs",
|
|
|
|
show_default=True,
|
|
|
|
type=click.Path(),
|
|
|
|
help="Where to write results to.",
|
|
|
|
)
|
|
|
|
@click.option(
|
|
|
|
"-r",
|
|
|
|
"--repeats",
|
|
|
|
default=1,
|
|
|
|
show_default=True,
|
|
|
|
type=int,
|
|
|
|
help="How many times to repeat the renders. If you provide two prompts and --repeat=3 then six images will be generated.",
|
|
|
|
)
|
|
|
|
@click.option(
|
|
|
|
"-h",
|
|
|
|
"--height",
|
|
|
|
default=None,
|
|
|
|
show_default=True,
|
|
|
|
type=int,
|
|
|
|
help="Image height. Should be multiple of 64.",
|
|
|
|
)
|
|
|
|
@click.option(
|
|
|
|
"-w",
|
|
|
|
"--width",
|
|
|
|
default=None,
|
|
|
|
show_default=True,
|
|
|
|
type=int,
|
|
|
|
help="Image width. Should be multiple of 64.",
|
|
|
|
)
|
|
|
|
@click.option(
|
|
|
|
"--steps",
|
|
|
|
default=None,
|
|
|
|
type=int,
|
|
|
|
show_default=True,
|
|
|
|
help="How many diffusion steps to run. More steps, more detail, but with diminishing returns.",
|
|
|
|
)
|
|
|
|
@click.option(
|
|
|
|
"--seed",
|
|
|
|
default=None,
|
|
|
|
type=int,
|
|
|
|
help="What seed to use for randomness. Allows reproducible image renders.",
|
|
|
|
)
|
|
|
|
@click.option("--upscale", is_flag=True)
|
|
|
|
@click.option("--fix-faces", is_flag=True)
|
|
|
|
@click.option(
|
|
|
|
"--fix-faces-fidelity",
|
|
|
|
default=1,
|
|
|
|
type=float,
|
|
|
|
help="How faithful to the original should face enhancement be. 1 = best fidelity, 0 = best looking face.",
|
|
|
|
)
|
|
|
|
@click.option(
|
|
|
|
"--sampler-type",
|
|
|
|
"--sampler",
|
|
|
|
default=config.DEFAULT_SAMPLER,
|
|
|
|
show_default=True,
|
|
|
|
type=click.Choice(SAMPLER_TYPE_OPTIONS),
|
|
|
|
help="What sampling strategy to use.",
|
|
|
|
)
|
|
|
|
@click.option(
|
|
|
|
"--log-level",
|
|
|
|
default="INFO",
|
|
|
|
show_default=True,
|
|
|
|
type=click.Choice(["DEBUG", "INFO", "WARNING", "ERROR"]),
|
|
|
|
help="What level of logs to show.",
|
|
|
|
)
|
|
|
|
@click.option(
|
|
|
|
"--quiet",
|
|
|
|
"-q",
|
|
|
|
is_flag=True,
|
|
|
|
help="Suppress logs. Alias of `--log-level ERROR`.",
|
|
|
|
)
|
|
|
|
@click.option(
|
|
|
|
"--show-work",
|
|
|
|
default=False,
|
|
|
|
is_flag=True,
|
|
|
|
help="Output a debug images to `steps` folder.",
|
|
|
|
)
|
|
|
|
@click.option(
|
|
|
|
"--tile",
|
|
|
|
is_flag=True,
|
|
|
|
help="Any images rendered will be tileable in both X and Y directions.",
|
|
|
|
)
|
|
|
|
@click.option(
|
|
|
|
"--tile-x",
|
|
|
|
is_flag=True,
|
|
|
|
help="Any images rendered will be tileable in the X direction.",
|
|
|
|
)
|
|
|
|
@click.option(
|
|
|
|
"--tile-y",
|
|
|
|
is_flag=True,
|
|
|
|
help="Any images rendered will be tileable in the Y direction.",
|
|
|
|
)
|
|
|
|
@click.option(
|
|
|
|
"--mask-image",
|
|
|
|
metavar="PATH|URL",
|
|
|
|
help="A mask to use for inpainting. White gets painted, Black is left alone.",
|
|
|
|
)
|
|
|
|
@click.option(
|
|
|
|
"--mask-prompt",
|
|
|
|
help=(
|
|
|
|
"Describe what you want masked and the AI will mask it for you. "
|
|
|
|
"You can describe complex masks with AND, OR, NOT keywords and parentheses. "
|
|
|
|
"The strength of each mask can be modified with {*1.5} notation. \n\n"
|
|
|
|
"Examples: \n"
|
|
|
|
"car AND (wheels{*1.1} OR trunk OR engine OR windows OR headlights) AND NOT (truck OR headlights){*10}\n"
|
|
|
|
"fruit|fruit stem"
|
|
|
|
),
|
|
|
|
)
|
|
|
|
@click.option(
|
|
|
|
"--mask-mode",
|
|
|
|
default="replace",
|
|
|
|
show_default=True,
|
|
|
|
type=click.Choice(["keep", "replace"]),
|
|
|
|
help="Should we replace the masked area or keep it?",
|
|
|
|
)
|
|
|
|
@click.option(
|
|
|
|
"--mask-modify-original",
|
|
|
|
default=True,
|
|
|
|
is_flag=True,
|
|
|
|
help="After the inpainting is done, apply the changes to a copy of the original image.",
|
|
|
|
)
|
|
|
|
@click.option(
|
|
|
|
"--outpaint",
|
|
|
|
help=(
|
|
|
|
"Specify in what directions to expand the image. Values will be snapped such that output image size is multiples of 64. Examples\n"
|
|
|
|
" `--outpaint up10,down300,left50,right50`\n"
|
|
|
|
" `--outpaint u10,d300,l50,r50`\n"
|
|
|
|
" `--outpaint all200`\n"
|
|
|
|
" `--outpaint a200`\n"
|
|
|
|
),
|
|
|
|
default="",
|
|
|
|
)
|
|
|
|
@click.option(
|
|
|
|
"--caption",
|
|
|
|
default=False,
|
|
|
|
is_flag=True,
|
|
|
|
help="Generate a text description of the generated image.",
|
|
|
|
)
|
|
|
|
@click.option(
|
|
|
|
"--precision",
|
|
|
|
help="Evaluate at this precision.",
|
|
|
|
type=click.Choice(["full", "autocast"]),
|
|
|
|
default="autocast",
|
|
|
|
show_default=True,
|
|
|
|
)
|
|
|
|
@click.option(
|
|
|
|
"--model-weights-path",
|
|
|
|
"--model",
|
|
|
|
help=f"Model to use. Should be one of {', '.join(config.MODEL_SHORT_NAMES)}, or a path to custom weights.",
|
|
|
|
show_default=True,
|
|
|
|
default="edit",
|
|
|
|
)
|
|
|
|
@click.option(
|
|
|
|
"--model-config-path",
|
|
|
|
help="Model config file to use. If a model name is specified, the appropriate config will be used.",
|
|
|
|
show_default=True,
|
|
|
|
default=None,
|
|
|
|
)
|
|
|
|
@click.option(
|
|
|
|
"--prompt-library-path",
|
|
|
|
help="Path to folder containing phrase lists in txt files. Use txt filename in prompt: {_filename_}.",
|
|
|
|
type=click.Path(exists=True),
|
|
|
|
default=None,
|
|
|
|
multiple=True,
|
|
|
|
)
|
|
|
|
@click.option(
|
|
|
|
"--version",
|
|
|
|
default=False,
|
|
|
|
is_flag=True,
|
|
|
|
help="Print the version and exit.",
|
|
|
|
)
|
|
|
|
@click.pass_context
|
|
|
|
def edit_image(
|
|
|
|
ctx,
|
|
|
|
init_image,
|
|
|
|
prompt_texts,
|
|
|
|
negative_prompt,
|
|
|
|
prompt_strength,
|
|
|
|
outdir,
|
|
|
|
repeats,
|
|
|
|
height,
|
|
|
|
width,
|
|
|
|
steps,
|
|
|
|
seed,
|
|
|
|
upscale,
|
|
|
|
fix_faces,
|
|
|
|
fix_faces_fidelity,
|
|
|
|
sampler_type,
|
|
|
|
log_level,
|
|
|
|
quiet,
|
|
|
|
show_work,
|
|
|
|
tile,
|
|
|
|
tile_x,
|
|
|
|
tile_y,
|
|
|
|
mask_image,
|
|
|
|
mask_prompt,
|
|
|
|
mask_mode,
|
|
|
|
mask_modify_original,
|
|
|
|
outpaint,
|
|
|
|
caption,
|
|
|
|
precision,
|
|
|
|
model_weights_path,
|
|
|
|
model_config_path,
|
|
|
|
prompt_library_path,
|
|
|
|
version, # noqa
|
|
|
|
):
|
|
|
|
init_image_strength = 1
|
|
|
|
return _imagine_cmd(
|
|
|
|
ctx,
|
|
|
|
prompt_texts,
|
|
|
|
negative_prompt,
|
|
|
|
prompt_strength,
|
|
|
|
init_image,
|
|
|
|
init_image_strength,
|
|
|
|
outdir,
|
|
|
|
repeats,
|
|
|
|
height,
|
|
|
|
width,
|
|
|
|
steps,
|
|
|
|
seed,
|
|
|
|
upscale,
|
|
|
|
fix_faces,
|
|
|
|
fix_faces_fidelity,
|
|
|
|
sampler_type,
|
|
|
|
log_level,
|
|
|
|
quiet,
|
|
|
|
show_work,
|
|
|
|
tile,
|
|
|
|
tile_x,
|
|
|
|
tile_y,
|
|
|
|
mask_image,
|
|
|
|
mask_prompt,
|
|
|
|
mask_mode,
|
|
|
|
mask_modify_original,
|
|
|
|
outpaint,
|
|
|
|
caption,
|
|
|
|
precision,
|
|
|
|
model_weights_path,
|
|
|
|
model_config_path,
|
|
|
|
prompt_library_path,
|
|
|
|
version, # noqa
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
def _imagine_cmd(
|
|
|
|
ctx,
|
|
|
|
prompt_texts,
|
|
|
|
negative_prompt,
|
|
|
|
prompt_strength,
|
|
|
|
init_image,
|
|
|
|
init_image_strength,
|
|
|
|
outdir,
|
|
|
|
repeats,
|
|
|
|
height,
|
|
|
|
width,
|
|
|
|
steps,
|
|
|
|
seed,
|
|
|
|
upscale,
|
|
|
|
fix_faces,
|
|
|
|
fix_faces_fidelity,
|
|
|
|
sampler_type,
|
|
|
|
log_level,
|
|
|
|
quiet,
|
|
|
|
show_work,
|
|
|
|
tile,
|
|
|
|
tile_x,
|
|
|
|
tile_y,
|
|
|
|
mask_image,
|
|
|
|
mask_prompt,
|
|
|
|
mask_mode,
|
|
|
|
mask_modify_original,
|
|
|
|
outpaint,
|
|
|
|
caption,
|
|
|
|
precision,
|
|
|
|
model_weights_path,
|
|
|
|
model_config_path,
|
|
|
|
prompt_library_path,
|
|
|
|
version, # noqa
|
2022-09-09 05:22:55 +00:00
|
|
|
):
|
2023-01-02 04:14:22 +00:00
|
|
|
"""Have the AI generate images. alias:imagine."""
|
2022-09-20 04:15:38 +00:00
|
|
|
if ctx.invoked_subcommand is not None:
|
|
|
|
return
|
2022-10-11 02:50:11 +00:00
|
|
|
|
2023-01-18 17:00:25 +00:00
|
|
|
if version:
|
|
|
|
print(__version__)
|
|
|
|
return
|
|
|
|
|
2022-09-24 08:56:19 +00:00
|
|
|
if quiet:
|
|
|
|
log_level = "ERROR"
|
2022-09-11 06:27:22 +00:00
|
|
|
configure_logging(log_level)
|
2022-09-11 20:56:41 +00:00
|
|
|
|
2022-09-11 06:27:22 +00:00
|
|
|
total_image_count = len(prompt_texts) * repeats
|
|
|
|
logger.info(
|
2022-09-13 07:27:53 +00:00
|
|
|
f"🤖🧠 imaginAIry received {len(prompt_texts)} prompt(s) and will repeat them {repeats} times to create {total_image_count} images."
|
2022-09-11 06:27:22 +00:00
|
|
|
)
|
|
|
|
|
2022-09-16 06:06:59 +00:00
|
|
|
if init_image and init_image.startswith("http"):
|
|
|
|
init_image = LazyLoadingImage(url=init_image)
|
|
|
|
|
2022-09-24 05:58:48 +00:00
|
|
|
if mask_image and mask_image.startswith("http"):
|
|
|
|
mask_image = LazyLoadingImage(url=mask_image)
|
2022-11-25 09:45:07 +00:00
|
|
|
|
2022-09-09 05:22:55 +00:00
|
|
|
prompts = []
|
feature: prompt expansion (#51)
You can use `{}` to randomly pull values from lists. A list of values separated by `|` and enclosed in `{ }` will be randomly drawn from in a non-repeating fashion. Values that are surrounded by `_ _` will pull from a phrase list of the same name. Folders containing .txt phraselist files may be specified via
`--prompt_library_path`. The option may be specified multiple times. Built-in categories:
3d-term, adj-architecture, adj-beauty, adj-detailed, adj-emotion, adj-general, adj-horror, animal, art-movement,
art-site, artist, artist-botanical, artist-surreal, aspect-ratio, bird, body-of-water, body-pose, camera-brand,
camera-model, color, cosmic-galaxy, cosmic-nebula, cosmic-star, cosmic-term, dinosaur, eyecolor, f-stop,
fantasy-creature, fantasy-setting, fish, flower, focal-length, food, fruit, games, gen-modifier, hair, hd,
iso-stop, landscape-type, national-park, nationality, neg-weight, noun-beauty, noun-fantasy, noun-general,
noun-horror, occupation, photo-term, pop-culture, pop-location, punk-style, quantity, rpg-item, scenario-desc,
skin-color, spaceship, style, tree-species, trippy, world-heritage-site
Examples:
`imagine "a {red|black} dog" -r 2 --seed 0` will generate both "a red dog" and "a black dog"
`imagine "a {_color_} dog" -r 4 --seed 0` will generate four, different colored dogs. The colors will eb pulled from an included
phraselist of colors.
`imagine "a {_spaceship_|_fruit_|hot air balloon}. low-poly" -r 4 --seed 0` will generate images of spaceships or fruits or a hot air balloon
Credit to [noodle-soup-prompts](https://github.com/WASasquatch/noodle-soup-prompts/) where most, but not all, of the wordlists originate.
2022-10-09 01:34:35 +00:00
|
|
|
prompt_expanding_iterators = {}
|
2022-09-09 05:22:55 +00:00
|
|
|
for _ in range(repeats):
|
|
|
|
for prompt_text in prompt_texts:
|
feature: prompt expansion (#51)
You can use `{}` to randomly pull values from lists. A list of values separated by `|` and enclosed in `{ }` will be randomly drawn from in a non-repeating fashion. Values that are surrounded by `_ _` will pull from a phrase list of the same name. Folders containing .txt phraselist files may be specified via
`--prompt_library_path`. The option may be specified multiple times. Built-in categories:
3d-term, adj-architecture, adj-beauty, adj-detailed, adj-emotion, adj-general, adj-horror, animal, art-movement,
art-site, artist, artist-botanical, artist-surreal, aspect-ratio, bird, body-of-water, body-pose, camera-brand,
camera-model, color, cosmic-galaxy, cosmic-nebula, cosmic-star, cosmic-term, dinosaur, eyecolor, f-stop,
fantasy-creature, fantasy-setting, fish, flower, focal-length, food, fruit, games, gen-modifier, hair, hd,
iso-stop, landscape-type, national-park, nationality, neg-weight, noun-beauty, noun-fantasy, noun-general,
noun-horror, occupation, photo-term, pop-culture, pop-location, punk-style, quantity, rpg-item, scenario-desc,
skin-color, spaceship, style, tree-species, trippy, world-heritage-site
Examples:
`imagine "a {red|black} dog" -r 2 --seed 0` will generate both "a red dog" and "a black dog"
`imagine "a {_color_} dog" -r 4 --seed 0` will generate four, different colored dogs. The colors will eb pulled from an included
phraselist of colors.
`imagine "a {_spaceship_|_fruit_|hot air balloon}. low-poly" -r 4 --seed 0` will generate images of spaceships or fruits or a hot air balloon
Credit to [noodle-soup-prompts](https://github.com/WASasquatch/noodle-soup-prompts/) where most, but not all, of the wordlists originate.
2022-10-09 01:34:35 +00:00
|
|
|
if prompt_text not in prompt_expanding_iterators:
|
|
|
|
prompt_expanding_iterators[prompt_text] = expand_prompts(
|
|
|
|
n=math.inf,
|
|
|
|
prompt_text=prompt_text,
|
|
|
|
prompt_library_paths=prompt_library_path,
|
|
|
|
)
|
|
|
|
prompt_iterator = prompt_expanding_iterators[prompt_text]
|
2022-12-20 05:32:41 +00:00
|
|
|
if tile:
|
|
|
|
_tile_mode = "xy"
|
|
|
|
elif tile_x:
|
|
|
|
_tile_mode = "x"
|
|
|
|
elif tile_y:
|
|
|
|
_tile_mode = "y"
|
|
|
|
else:
|
|
|
|
_tile_mode = ""
|
|
|
|
|
2022-09-09 05:22:55 +00:00
|
|
|
prompt = ImaginePrompt(
|
feature: prompt expansion (#51)
You can use `{}` to randomly pull values from lists. A list of values separated by `|` and enclosed in `{ }` will be randomly drawn from in a non-repeating fashion. Values that are surrounded by `_ _` will pull from a phrase list of the same name. Folders containing .txt phraselist files may be specified via
`--prompt_library_path`. The option may be specified multiple times. Built-in categories:
3d-term, adj-architecture, adj-beauty, adj-detailed, adj-emotion, adj-general, adj-horror, animal, art-movement,
art-site, artist, artist-botanical, artist-surreal, aspect-ratio, bird, body-of-water, body-pose, camera-brand,
camera-model, color, cosmic-galaxy, cosmic-nebula, cosmic-star, cosmic-term, dinosaur, eyecolor, f-stop,
fantasy-creature, fantasy-setting, fish, flower, focal-length, food, fruit, games, gen-modifier, hair, hd,
iso-stop, landscape-type, national-park, nationality, neg-weight, noun-beauty, noun-fantasy, noun-general,
noun-horror, occupation, photo-term, pop-culture, pop-location, punk-style, quantity, rpg-item, scenario-desc,
skin-color, spaceship, style, tree-species, trippy, world-heritage-site
Examples:
`imagine "a {red|black} dog" -r 2 --seed 0` will generate both "a red dog" and "a black dog"
`imagine "a {_color_} dog" -r 4 --seed 0` will generate four, different colored dogs. The colors will eb pulled from an included
phraselist of colors.
`imagine "a {_spaceship_|_fruit_|hot air balloon}. low-poly" -r 4 --seed 0` will generate images of spaceships or fruits or a hot air balloon
Credit to [noodle-soup-prompts](https://github.com/WASasquatch/noodle-soup-prompts/) where most, but not all, of the wordlists originate.
2022-10-09 01:34:35 +00:00
|
|
|
next(prompt_iterator),
|
2022-12-02 09:49:13 +00:00
|
|
|
negative_prompt=negative_prompt,
|
2022-09-12 01:00:40 +00:00
|
|
|
prompt_strength=prompt_strength,
|
|
|
|
init_image=init_image,
|
|
|
|
init_image_strength=init_image_strength,
|
2022-09-09 05:22:55 +00:00
|
|
|
seed=seed,
|
|
|
|
sampler_type=sampler_type,
|
|
|
|
steps=steps,
|
|
|
|
height=height,
|
|
|
|
width=width,
|
2022-09-18 13:07:07 +00:00
|
|
|
mask_image=mask_image,
|
|
|
|
mask_prompt=mask_prompt,
|
|
|
|
mask_mode=mask_mode,
|
2022-09-25 20:07:27 +00:00
|
|
|
mask_modify_original=mask_modify_original,
|
2023-01-17 06:48:27 +00:00
|
|
|
outpaint=outpaint,
|
2022-09-13 07:27:53 +00:00
|
|
|
upscale=upscale,
|
|
|
|
fix_faces=fix_faces,
|
2022-10-03 17:26:08 +00:00
|
|
|
fix_faces_fidelity=fix_faces_fidelity,
|
2022-12-20 05:32:41 +00:00
|
|
|
tile_mode=_tile_mode,
|
2022-10-24 05:42:17 +00:00
|
|
|
model=model_weights_path,
|
2023-01-01 22:54:49 +00:00
|
|
|
model_config_path=model_config_path,
|
2022-09-09 05:22:55 +00:00
|
|
|
)
|
|
|
|
prompts.append(prompt)
|
|
|
|
|
2022-09-10 07:32:31 +00:00
|
|
|
imagine_image_files(
|
2022-09-09 05:22:55 +00:00
|
|
|
prompts,
|
|
|
|
outdir=outdir,
|
2022-09-24 18:21:53 +00:00
|
|
|
record_step_images=show_work,
|
2022-09-28 00:04:16 +00:00
|
|
|
output_file_extension="jpg",
|
2022-09-20 04:15:38 +00:00
|
|
|
print_caption=caption,
|
2022-09-22 05:03:12 +00:00
|
|
|
precision=precision,
|
2022-09-09 05:22:55 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
|
2022-09-25 20:07:27 +00:00
|
|
|
@shell(prompt="imaginAIry> ", intro="Starting imaginAIry...")
|
2022-09-20 04:15:38 +00:00
|
|
|
def aimg():
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
2023-01-18 17:00:25 +00:00
|
|
|
@aimg.command()
|
|
|
|
def version():
|
|
|
|
"""Print the version."""
|
|
|
|
print(__version__)
|
|
|
|
|
|
|
|
|
2022-09-20 04:15:38 +00:00
|
|
|
@click.argument("image_filepaths", nargs=-1)
|
|
|
|
@aimg.command()
|
|
|
|
def describe(image_filepaths):
|
2023-01-02 04:14:22 +00:00
|
|
|
"""Generate text descriptions of images."""
|
2022-09-20 04:15:38 +00:00
|
|
|
imgs = []
|
|
|
|
for p in image_filepaths:
|
|
|
|
if p.startswith("http"):
|
|
|
|
img = LazyLoadingImage(url=p)
|
|
|
|
else:
|
|
|
|
img = LazyLoadingImage(filepath=p)
|
|
|
|
imgs.append(img)
|
|
|
|
for img in imgs:
|
|
|
|
print(generate_caption(img.copy()))
|
|
|
|
|
|
|
|
|
2023-01-01 22:54:49 +00:00
|
|
|
@click.option(
|
|
|
|
"--concept-label",
|
|
|
|
help=(
|
|
|
|
'The concept you are training on. Usually "a photo of [person or thing] [classname]" is what you should use.'
|
|
|
|
),
|
|
|
|
required=True,
|
|
|
|
)
|
|
|
|
@click.option(
|
|
|
|
"--concept-images-dir",
|
|
|
|
type=click.Path(),
|
|
|
|
required=True,
|
|
|
|
help="Where to find the pre-processed concept images to train on.",
|
|
|
|
)
|
|
|
|
@click.option(
|
|
|
|
"--class-label",
|
|
|
|
help=(
|
|
|
|
'What class of things does the concept belong to. For example, if you are training on "a painting of a George Washington", '
|
|
|
|
'you might use "a painting of a man" as the class label. We use this to prevent the model from overfitting.'
|
|
|
|
),
|
|
|
|
default="a photo of *",
|
|
|
|
)
|
|
|
|
@click.option(
|
|
|
|
"--class-images-dir",
|
|
|
|
type=click.Path(),
|
|
|
|
required=True,
|
|
|
|
help="Where to find the pre-processed class images to train on.",
|
|
|
|
)
|
|
|
|
@click.option(
|
|
|
|
"--n-class-images",
|
|
|
|
type=int,
|
|
|
|
default=300,
|
|
|
|
help="Number of class images to generate.",
|
|
|
|
)
|
|
|
|
@click.option(
|
|
|
|
"--model-weights-path",
|
|
|
|
"--model",
|
|
|
|
"model",
|
2023-01-18 17:00:25 +00:00
|
|
|
help=f"Model to use. Should be one of {', '.join(config.MODEL_SHORT_NAMES)}, or a path to custom weights.",
|
2023-01-01 22:54:49 +00:00
|
|
|
show_default=True,
|
|
|
|
default=config.DEFAULT_MODEL,
|
|
|
|
)
|
|
|
|
@click.option(
|
|
|
|
"--person",
|
|
|
|
"is_person",
|
|
|
|
is_flag=True,
|
|
|
|
help="Set if images are of a person. Will use face detection and enhancement.",
|
|
|
|
)
|
|
|
|
@click.option(
|
|
|
|
"-y",
|
|
|
|
"preconfirmed",
|
|
|
|
is_flag=True,
|
|
|
|
default=False,
|
|
|
|
help="Bypass input confirmations.",
|
|
|
|
)
|
|
|
|
@click.option(
|
|
|
|
"--skip-prep",
|
|
|
|
is_flag=True,
|
|
|
|
default=False,
|
|
|
|
help="Skip the image preparation step.",
|
|
|
|
)
|
|
|
|
@click.option(
|
|
|
|
"--skip-class-img-gen",
|
|
|
|
is_flag=True,
|
|
|
|
default=False,
|
|
|
|
help="Skip the class image generation step.",
|
|
|
|
)
|
|
|
|
@aimg.command()
|
|
|
|
def train_concept(
|
|
|
|
concept_label,
|
|
|
|
concept_images_dir,
|
|
|
|
class_label,
|
|
|
|
class_images_dir,
|
|
|
|
n_class_images,
|
|
|
|
model,
|
|
|
|
is_person,
|
|
|
|
preconfirmed,
|
|
|
|
skip_prep,
|
|
|
|
skip_class_img_gen,
|
|
|
|
):
|
|
|
|
"""
|
|
|
|
Teach the model a new concept (a person, thing, style, etc).
|
|
|
|
|
|
|
|
Provided a directory of concept images, a concept token, and a class token, this command will train the model
|
|
|
|
to generate images of that concept.
|
|
|
|
|
|
|
|
\b
|
|
|
|
This happens in a 3-step process:
|
|
|
|
1. Cropping and resizing your training images. If --person is set we crop to include the face.
|
|
|
|
2. Generating a set of class images to train on. This helps prevent overfitting.
|
|
|
|
3. Training the model on the concept and class images.
|
|
|
|
|
|
|
|
The output of this command is a new model weights file that you can use with the --model option.
|
|
|
|
|
|
|
|
\b
|
|
|
|
## Instructions
|
|
|
|
1. Gather a set of images of the concept you want to train on. They should show the subject from a variety of angles
|
|
|
|
and in a variety of situations.
|
|
|
|
2. Train the model.
|
|
|
|
- Concept label: For a person, firstnamelastname should be fine.
|
|
|
|
- If all the training images are photos you should add "a photo of" to the beginning of the concept label.
|
|
|
|
- Class label: This is the category of the things beings trained on. For people this is typically "person", "man"
|
|
|
|
or "woman".
|
|
|
|
- If all the training images are photos you should add "a photo of" to the beginning of the class label.
|
|
|
|
- CLass images will be generated for you if you do not provide them.
|
|
|
|
3. Stop training before it overfits. I haven't figured this out yet.
|
|
|
|
|
|
|
|
|
|
|
|
For example, if you were training on photos of a man named bill hamilton you could run the following:
|
|
|
|
|
|
|
|
\b
|
|
|
|
aimg train-concept \\
|
|
|
|
--person \\
|
|
|
|
--concept-label "photo of billhamilton man" \\
|
|
|
|
--concept-images-dir ./images/billhamilton \\
|
|
|
|
--class-label "photo of a man" \\
|
|
|
|
--class-images-dir ./images/man
|
|
|
|
|
|
|
|
When you use the model you should prompt with `firstnamelastname classname` (e.g. `billhamilton man`).
|
|
|
|
|
|
|
|
You can find a lot of relevant instructions here: https://github.com/JoePenna/Dreambooth-Stable-Diffusion
|
|
|
|
"""
|
|
|
|
configure_logging()
|
|
|
|
target_size = 512
|
|
|
|
# Step 1. Crop and enhance the training images
|
|
|
|
prepped_images_path = os.path.join(concept_images_dir, "prepped-images")
|
|
|
|
image_filenames = get_image_filenames(concept_images_dir)
|
|
|
|
click.secho(
|
|
|
|
f'\n🤖🧠 Training "{concept_label}" based on {len(image_filenames)} images.\n'
|
|
|
|
)
|
|
|
|
|
|
|
|
if not skip_prep:
|
|
|
|
msg = (
|
|
|
|
f"Creating cropped copies of the {len(image_filenames)} concept images\n"
|
|
|
|
f" Is Person: {is_person}\n"
|
|
|
|
f" Source: {concept_images_dir}\n"
|
|
|
|
f" Dest: {prepped_images_path}\n"
|
|
|
|
)
|
|
|
|
logger.info(msg)
|
|
|
|
if not is_person:
|
|
|
|
click.secho("⚠️ the --person flag was not set. ", fg="yellow")
|
|
|
|
|
|
|
|
if not preconfirmed and not click.confirm("Continue?"):
|
|
|
|
return
|
|
|
|
|
|
|
|
prep_images(
|
|
|
|
images_dir=concept_images_dir, is_person=is_person, target_size=target_size
|
|
|
|
)
|
|
|
|
concept_images_dir = prepped_images_path
|
|
|
|
|
|
|
|
if not skip_class_img_gen:
|
|
|
|
# Step 2. Generate class images
|
|
|
|
class_image_filenames = get_image_filenames(class_images_dir)
|
|
|
|
images_needed = max(n_class_images - len(class_image_filenames), 0)
|
|
|
|
logger.info(f"Generating {n_class_images} class images in {class_images_dir}")
|
|
|
|
logger.info(
|
|
|
|
f"{len(class_image_filenames)} existing class images found so only generating {images_needed}."
|
|
|
|
)
|
|
|
|
if not preconfirmed and not click.confirm("Continue?"):
|
|
|
|
return
|
|
|
|
create_class_images(
|
|
|
|
class_description=class_label,
|
|
|
|
output_folder=class_images_dir,
|
|
|
|
num_images=n_class_images,
|
|
|
|
)
|
|
|
|
|
|
|
|
logger.info("Training the model...")
|
|
|
|
if not preconfirmed and not click.confirm("Continue?"):
|
|
|
|
return
|
|
|
|
|
|
|
|
# Step 3. Train the model
|
|
|
|
train_diffusion_model(
|
|
|
|
concept_label=concept_label,
|
|
|
|
concept_images_dir=concept_images_dir,
|
|
|
|
class_label=class_label,
|
|
|
|
class_images_dir=class_images_dir,
|
|
|
|
weights_location=model,
|
|
|
|
logdir="logs",
|
|
|
|
learning_rate=1e-6,
|
|
|
|
accumulate_grad_batches=32,
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
@click.argument(
|
|
|
|
"images_dir",
|
|
|
|
required=True,
|
|
|
|
)
|
|
|
|
@click.option(
|
|
|
|
"--person",
|
|
|
|
"is_person",
|
|
|
|
is_flag=True,
|
|
|
|
help="Set if images are of a person. Will use face detection and enhancement.",
|
|
|
|
)
|
|
|
|
@click.option(
|
|
|
|
"--target-size",
|
|
|
|
default=512,
|
|
|
|
type=int,
|
|
|
|
show_default=True,
|
|
|
|
)
|
|
|
|
@aimg.command("prep-images")
|
|
|
|
def prepare_images(images_dir, is_person, target_size):
|
|
|
|
"""
|
|
|
|
Prepare a folder of images for training.
|
|
|
|
|
|
|
|
Prepped images will be written to the `prepped-images` subfolder.
|
|
|
|
|
|
|
|
All images will be cropped and resized to (default) 512x512.
|
|
|
|
Upscaling and face enhancement will be applied as needed to smaller images.
|
|
|
|
|
|
|
|
Examples:
|
|
|
|
aimg prep-images --person ./images/selfies
|
|
|
|
aimg prep-images ./images/toy-train
|
|
|
|
"""
|
|
|
|
configure_logging()
|
|
|
|
prep_images(images_dir=images_dir, is_person=is_person, target_size=target_size)
|
|
|
|
|
|
|
|
|
|
|
|
@click.argument("ckpt_paths", nargs=-1)
|
|
|
|
@aimg.command("prune-ckpt")
|
|
|
|
def prune_ckpt(ckpt_paths):
|
|
|
|
"""
|
|
|
|
Prune a checkpoint file.
|
|
|
|
|
|
|
|
This will remove the optimizer state from the checkpoint file.
|
|
|
|
This is useful if you want to use the checkpoint file for inference and save a lot of disk space
|
|
|
|
|
|
|
|
Example:
|
|
|
|
aimg prune-ckpt ./path/to/checkpoint.ckpt
|
|
|
|
"""
|
|
|
|
click.secho("Pruning checkpoint files...")
|
|
|
|
configure_logging()
|
|
|
|
for p in ckpt_paths:
|
|
|
|
prune_diffusion_ckpt(p)
|
|
|
|
|
|
|
|
|
2022-09-24 08:56:19 +00:00
|
|
|
aimg.add_command(imagine_cmd, name="imagine")
|
2023-01-21 21:23:48 +00:00
|
|
|
aimg.add_command(edit_image, name="edit")
|
2022-09-20 04:15:38 +00:00
|
|
|
|
2022-09-09 05:22:55 +00:00
|
|
|
if __name__ == "__main__":
|
2022-09-11 20:56:41 +00:00
|
|
|
imagine_cmd() # noqa
|
2022-12-18 08:00:38 +00:00
|
|
|
# from cProfile import Profile
|
|
|
|
# from pyprof2calltree import convert, visualize
|
|
|
|
# profiler = Profile()
|
|
|
|
# profiler.runctx("imagine_cmd.main(standalone_mode=False)", locals(), globals())
|
|
|
|
# convert(profiler.getstats(), 'imagine.kgrind')
|
|
|
|
# visualize(profiler.getstats())
|