style: lint fixes. remove unused code

pull/9/head
Bryce 2 years ago
parent 793df6e9dc
commit 19d0b563ac

@ -171,9 +171,10 @@ docker run -it --gpus all -v $HOME/.cache/huggingface:/root/.cache/huggingface -
- outpainting
- inpainting
- https://github.com/andreas128/RePaint
- img2img but keeps img stable
- img2img but keeps img stable
- https://www.reddit.com/r/StableDiffusion/comments/xboy90/a_better_way_of_doing_img2img_by_finding_the/
- https://gist.github.com/trygvebw/c71334dd127d537a15e9d59790f7f5e1
- https://github.com/pesser/stable-diffusion/commit/bbb52981460707963e2a62160890d7ecbce00e79
- CPU support
- img2img for plms?
- images as actual prompts instead of just init images

@ -165,7 +165,6 @@ def imagine(
prompts = [ImaginePrompt(prompts)] if isinstance(prompts, str) else prompts
prompts = [prompts] if isinstance(prompts, ImaginePrompt) else prompts
_img_callback = None
step_count = 0
precision_scope = (
autocast
@ -185,11 +184,8 @@ def imagine(
uc = model.get_learned_conditioning(1 * [""])
total_weight = sum(wp.weight for wp in prompt.prompts)
c = sum(
[
model.get_learned_conditioning(wp.text)
* (wp.weight / total_weight)
for wp in prompt.prompts
]
model.get_learned_conditioning(wp.text) * (wp.weight / total_weight)
for wp in prompt.prompts
)
shape = [
@ -205,7 +201,7 @@ def imagine(
ddim_steps = int(prompt.steps / generation_strength)
sampler.make_schedule(ddim_num_steps=ddim_steps, ddim_eta=ddim_eta)
init_image, w, h = pillow_img_to_torch_image(
init_image, w, h = pillow_img_to_torch_image( # noqa
prompt.init_image,
max_height=prompt.height,
max_width=prompt.width,

@ -3,8 +3,9 @@ import logging.config
import click
from imaginairy import LazyLoadingImage
from imaginairy.api import load_model
from imaginairy.api import imagine_image_files, load_model
from imaginairy.samplers.base import SAMPLER_TYPE_OPTIONS
from imaginairy.schema import ImaginePrompt
from imaginairy.suppress_logs import suppress_annoying_logs_and_warnings
logger = logging.getLogger(__name__)
@ -96,11 +97,7 @@ def configure_logging(level="INFO"):
help="What seed to use for randomness. Allows reproducible image renders",
)
@click.option("--upscale", is_flag=True)
@click.option(
"--upscale-method", default="realesrgan", type=click.Choice(["realesrgan"])
)
@click.option("--fix-faces", is_flag=True)
@click.option("--fix-faces-method", default="gfpgan", type=click.Choice(["gfpgan"]))
@click.option(
"--sampler-type",
default="plms",
@ -138,9 +135,7 @@ def imagine_cmd(
steps,
seed,
upscale,
upscale_method,
fix_faces,
fix_faces_method,
sampler_type,
ddim_eta,
log_level,
@ -151,9 +146,6 @@ def imagine_cmd(
suppress_annoying_logs_and_warnings()
configure_logging(log_level)
from imaginairy.api import imagine_image_files
from imaginairy.schema import ImaginePrompt
total_image_count = len(prompt_texts) * repeats
logger.info(
f"🤖🧠 imaginAIry received {len(prompt_texts)} prompt(s) and will repeat them {repeats} times to create {total_image_count} images."

@ -60,7 +60,7 @@ def enhance_faces(img, fidelity=0):
face_helper.align_warp_face()
# face restoration for each cropped face
for idx, cropped_face in enumerate(face_helper.cropped_faces):
for cropped_face in face_helper.cropped_faces:
# prepare data
cropped_face_t = img2tensor(cropped_face / 255.0, bgr2rgb=True, float32=True)
normalize(cropped_face_t, (0.5, 0.5, 0.5), (0.5, 0.5, 0.5), inplace=True)

@ -1,50 +0,0 @@
from functools import lru_cache
import numpy as np
import torch
from PIL import Image
from imaginairy.utils import get_cached_url_path, get_device
@lru_cache()
def face_enhance_model(model_type="codeformer"):
from gfpgan import GFPGANer
if model_type == "gfpgan":
arch = "clean"
url = "https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.4.pth"
elif model_type == "codeformer":
arch = "CodeFormer"
url = "https://github.com/TencentARC/GFPGAN/releases/download/v1.3.4/CodeFormer.pth"
else:
raise ValueError("model_type must be one of gfpgan, codeformer")
model_path = get_cached_url_path(url)
if get_device() == "cuda":
device = "cuda"
else:
device = "cpu"
return GFPGANer(
model_path=model_path,
upscale=1,
arch=arch,
channel_multiplier=2,
bg_upsampler=None,
device=device,
)
def fix_faces_gfpgan(image, model_type):
image = image.convert("RGB")
np_img = np.array(image, dtype=np.uint8)
cropped_faces, restored_faces, restored_img = face_enhance_model(
model_type
).enhance(
np_img, has_aligned=False, only_center_face=False, paste_back=True, weight=0
)
res = Image.fromarray(restored_img)
return res

@ -31,12 +31,12 @@ class LatentLoggingContext:
self.img_callback = img_callback
def __enter__(self):
global _CURRENT_LOGGING_CONTEXT
global _CURRENT_LOGGING_CONTEXT # noqa
_CURRENT_LOGGING_CONTEXT = self
return self
def __exit__(self, exc_type, exc_val, exc_tb):
global _CURRENT_LOGGING_CONTEXT
global _CURRENT_LOGGING_CONTEXT # noqa
_CURRENT_LOGGING_CONTEXT = None
def log_latents(self, latents, description):

@ -36,7 +36,7 @@ class LazyLoadingImage:
try:
parsed_url = parse_url(url)
except LocationParseError:
raise InvalidUrlError(f"Invalid url: {url}")
raise InvalidUrlError(f"Invalid url: {url}") # noqa
if parsed_url.scheme not in {"http", "https"} or not parsed_url.host:
raise InvalidUrlError(f"Invalid url: {url}")
@ -55,7 +55,7 @@ class LazyLoadingImage:
f"Loaded input 🖼 of size {self._img.size} from {self._lazy_filepath}"
)
elif self._lazy_url:
self._img = Image.open(requests.get(self._lazy_url, stream=True).raw)
self._img = Image.open(requests.get(self._lazy_url, stream=True, timeout=60).raw)
logger.info(
f"Loaded input 🖼 of size {self._img.size} from {self._lazy_url}"
)

@ -11,6 +11,7 @@ import requests
import torch
from PIL import Image
from torch import Tensor
from torch.nn import functional
from torch.overrides import handle_torch_function, has_torch_function_variadic
from transformers import cached_path
@ -21,10 +22,11 @@ logger = logging.getLogger(__name__)
def get_device():
if torch.cuda.is_available():
return "cuda"
elif torch.backends.mps.is_available():
if torch.backends.mps.is_available():
return "mps"
else:
return "cpu"
return "cpu"
@lru_cache()
@ -40,13 +42,13 @@ def log_params(model):
def instantiate_from_config(config):
if not "target" in config:
if "target" not in config:
if config == "__is_first_stage__":
return None
elif config == "__is_unconditional__":
if config == "__is_unconditional__":
return None
raise KeyError("Expected key `target` to instantiate.")
return get_obj_from_str(config["target"])(**config.get("params", dict()))
return get_obj_from_str(config["target"])(**config.get("params", {}))
def get_obj_from_str(string, reload=False):
@ -58,13 +60,15 @@ def get_obj_from_str(string, reload=False):
def _fixed_layer_norm(
input: Tensor,
input: Tensor, # noqa
normalized_shape: List[int],
weight: Optional[Tensor] = None,
bias: Optional[Tensor] = None,
eps: float = 1e-5,
) -> Tensor:
r"""Applies Layer Normalization for last certain number of dimensions.
"""
Applies Layer Normalization for last certain number of dimensions.
See :class:`~torch.nn.LayerNorm` for details.
"""
if has_torch_function_variadic(input, weight, bias):
@ -90,8 +94,6 @@ def _fixed_layer_norm(
@contextmanager
def fix_torch_nn_layer_norm():
"""https://github.com/CompVis/stable-diffusion/issues/25#issuecomment-1221416526"""
from torch.nn import functional
orig_function = functional.layer_norm
functional.layer_norm = _fixed_layer_norm
try:
@ -143,7 +145,7 @@ def get_cached_url_path(url):
dest_path = os.path.join(dest, filename)
if os.path.exists(dest_path):
return dest_path
r = requests.get(url)
r = requests.get(url) # noqa
with open(dest_path, "wb") as f:
f.write(r.content)

Loading…
Cancel
Save