refactor: move model_manager to utils

This commit is contained in:
Bryce 2023-12-15 13:42:45 -08:00 committed by Bryce Drennan
parent d478771cc0
commit ad561e8833
22 changed files with 31 additions and 31 deletions

View File

@ -237,10 +237,6 @@ def _generate_single_image_compvis(
from imaginairy.enhancers.describe_image_blip import generate_caption
from imaginairy.enhancers.face_restoration_codeformer import enhance_faces
from imaginairy.enhancers.upscale_realesrgan import upscale_image
from imaginairy.model_manager import (
get_diffusion_model,
get_model_default_image_size,
)
from imaginairy.modules.midas.api import torch_image_to_depth_map
from imaginairy.safety import create_safety_score
from imaginairy.samplers import SOLVER_LOOKUP
@ -260,6 +256,10 @@ def _generate_single_image_compvis(
log_img,
log_latent,
)
from imaginairy.utils.model_manager import (
get_diffusion_model,
get_model_default_image_size,
)
from imaginairy.utils.outpaint import (
outpaint_arg_str_parse,
prepare_image_for_outpaint,

View File

@ -35,10 +35,6 @@ def _generate_single_image(
from imaginairy.enhancers.describe_image_blip import generate_caption
from imaginairy.enhancers.face_restoration_codeformer import enhance_faces
from imaginairy.enhancers.upscale_realesrgan import upscale_image
from imaginairy.model_manager import (
get_diffusion_model_refiners,
get_model_default_image_size,
)
from imaginairy.safety import create_safety_score
from imaginairy.samplers import SolverName
from imaginairy.schema import ImagineResult
@ -54,6 +50,10 @@ def _generate_single_image(
log_img,
log_latent,
)
from imaginairy.utils.model_manager import (
get_diffusion_model_refiners,
get_model_default_image_size,
)
from imaginairy.utils.outpaint import (
outpaint_arg_str_parse,
prepare_image_for_outpaint,

View File

@ -8,8 +8,8 @@ import torch
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from imaginairy.model_manager import get_cached_url_path
from imaginairy.utils import get_device
from imaginairy.utils.model_manager import get_cached_url_path
from imaginairy.vendored.blip.blip import BLIP_Decoder, load_checkpoint
device = get_device()

View File

@ -9,7 +9,7 @@ from facexlib.utils.face_restoration_helper import FaceRestoreHelper
from PIL import Image
from torchvision.transforms.functional import normalize
from imaginairy.model_manager import get_cached_url_path
from imaginairy.utils.model_manager import get_cached_url_path
from imaginairy.vendored.basicsr.img_util import img2tensor, tensor2img
from imaginairy.vendored.codeformer.codeformer_arch import CodeFormer

View File

@ -4,9 +4,9 @@ import numpy as np
import torch
from PIL import Image
from imaginairy.model_manager import get_cached_url_path
from imaginairy.utils import get_device
from imaginairy.utils.model_cache import memory_managed_model
from imaginairy.utils.model_manager import get_cached_url_path
from imaginairy.vendored.basicsr.rrdbnet_arch import RRDBNet
from imaginairy.vendored.realesrgan import RealESRGANer

View File

@ -7,9 +7,9 @@ import torch
import torch.nn.functional as F
from torch import nn
from imaginairy.model_manager import hf_hub_download
from imaginairy.utils import get_device, platform_appropriate_autocast
from imaginairy.utils.log_utils import log_latent
from imaginairy.utils.model_manager import hf_hub_download
from imaginairy.vendored import k_diffusion as K
from imaginairy.vendored.k_diffusion import layers
from imaginairy.vendored.k_diffusion.models.image_v1 import ImageDenoiserModelV1

View File

@ -6,8 +6,8 @@ import cv2
import numpy as np
import torch
from imaginairy.model_manager import get_cached_url_path
from imaginairy.utils import get_device
from imaginairy.utils.model_manager import get_cached_url_path
class Network(torch.nn.Module):

View File

@ -11,9 +11,9 @@ import torch
from scipy.ndimage.filters import gaussian_filter
from torch import nn
from imaginairy.model_manager import get_cached_url_path
from imaginairy.utils import get_device
from imaginairy.utils.img_utils import torch_image_to_openvcv_img
from imaginairy.utils.model_manager import get_cached_url_path
def pad_right_down_corner(img, stride, padValue):

View File

@ -3,7 +3,7 @@
import torch
from imaginairy import config
from imaginairy.model_manager import get_cached_url_path
from imaginairy.utils.model_manager import get_cached_url_path
class BaseModel(torch.nn.Module):

View File

@ -308,7 +308,7 @@ monkeypatch_sd1controlnetadapter()
@lru_cache(maxsize=4)
def get_controlnet(name, weights_location, device, dtype):
from imaginairy.model_manager import load_state_dict
from imaginairy.utils.model_manager import load_state_dict
controlnet_state_dict = load_state_dict(weights_location, half_mode=False)
controlnet_state_dict = cast_weights(

View File

@ -27,7 +27,7 @@ from typing_extensions import Self
from imaginairy import config
if TYPE_CHECKING:
from pathlib import Path
from pathlib import Path # noqa
from PIL import Image
@ -504,7 +504,7 @@ class ImaginePrompt(BaseModel, protected_namespaces=()):
model_weights = data.get("model_weights")
if model_weights is None:
model_weights = config.DEFAULT_MODEL_WEIGHTS
from imaginairy.model_manager import resolve_model_weights_config
from imaginairy.utils.model_manager import resolve_model_weights_config
should_use_inpainting = bool(
data.get("mask_image") or data.get("mask_prompt") or data.get("outpaint")
@ -578,7 +578,7 @@ class ImaginePrompt(BaseModel, protected_namespaces=()):
@field_validator("size", mode="before")
def validate_image_size(cls, v, info: core_schema.FieldValidationInfo):
from imaginairy.model_manager import get_model_default_image_size
from imaginairy.utils.model_manager import get_model_default_image_size
from imaginairy.utils.named_resolutions import normalize_image_size
if v is None:

View File

@ -30,9 +30,9 @@ from pytorch_lightning.utilities.distributed import rank_zero_only
from torch.utils.data import DataLoader, Dataset
from imaginairy import config
from imaginairy.model_manager import get_diffusion_model
from imaginairy.training_tools.single_concept import SingleConceptDataset
from imaginairy.utils import get_device, instantiate_from_config
from imaginairy.utils.model_manager import get_diffusion_model
mod_logger = logging.getLogger(__name__)

View File

@ -122,7 +122,7 @@ def torch_img_to_pillow_img(img_t: torch.Tensor) -> PIL.Image.Image:
def model_latent_to_pillow_img(latent: torch.Tensor) -> PIL.Image.Image:
from imaginairy.model_manager import get_current_diffusion_model
from imaginairy.utils.model_manager import get_current_diffusion_model
if len(latent.shape) == 3:
latent = latent.unsqueeze(0)

View File

@ -531,7 +531,7 @@ def extract_huggingface_repo_commit_file_from_url(url):
def download_diffusers_weights(repo, sub, filename):
from imaginairy.model_manager import get_cached_url_path
from imaginairy.utils.model_manager import get_cached_url_path
url = f"https://huggingface.co/{repo}/resolve/main/{sub}/{filename}"
return get_cached_url_path(url, category="weights")
@ -613,8 +613,8 @@ def open_weights(filepath, device=None):
def load_stable_diffusion_compvis_weights(weights_url):
from imaginairy.model_manager import get_cached_url_path
from imaginairy.utils import get_device
from imaginairy.utils.model_manager import get_cached_url_path
from imaginairy.weight_management.conversion import cast_weights
from imaginairy.weight_management.utils import (
COMPONENT_NAMES,

View File

@ -8,7 +8,7 @@ import numpy as np
import torch
from torch.nn import functional as F
from imaginairy.model_manager import get_cached_url_path
from imaginairy.utils.model_manager import get_cached_url_path
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))

View File

@ -19,7 +19,6 @@ from PIL import Image
from torchvision.transforms import ToTensor
from imaginairy import config
from imaginairy.model_manager import get_cached_url_path
from imaginairy.schema import LazyLoadingImage
from imaginairy.utils import (
default,
@ -27,6 +26,7 @@ from imaginairy.utils import (
instantiate_from_config,
platform_appropriate_autocast,
)
from imaginairy.utils.model_manager import get_cached_url_path
from imaginairy.utils.paths import PKG_ROOT
logger = logging.getLogger(__name__)

View File

@ -3,8 +3,8 @@
import torch
from transformers import CLIPTextModelWithProjection
from imaginairy.model_manager import get_diffusion_model
from imaginairy.utils import get_device
from imaginairy.utils.model_manager import get_diffusion_model
from imaginairy.weight_management import utils

View File

@ -2,7 +2,7 @@
import safetensors
from imaginairy.model_manager import (
from imaginairy.utils.model_manager import (
get_cached_url_path,
open_weights,
resolve_model_weights_config,

View File

@ -3,7 +3,7 @@ import os
import torch
from safetensors.torch import load_file, save_file
from imaginairy.model_manager import get_cached_url_path
from imaginairy.utils.model_manager import get_cached_url_path
from imaginairy.utils.paths import PKG_ROOT
sd15_url = "https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/889b629140e71758e1e0006e355c331a5744b4bf/v1-5-pruned-emaonly.ckpt"

View File

@ -1,4 +1,4 @@
from imaginairy.model_manager import load_tensors
from imaginairy.utils.model_manager import load_tensors
def dotstrings_to_nested_dictionaries(list_of_dotstrings):

View File

@ -4,7 +4,6 @@ from PIL import Image
from torch.nn.functional import interpolate
from imaginairy.enhancers.upscale_riverwing import upscale_latent
from imaginairy.model_manager import get_diffusion_model
from imaginairy.schema import LazyLoadingImage
from imaginairy.utils import get_device
from imaginairy.utils.img_utils import (
@ -12,6 +11,7 @@ from imaginairy.utils.img_utils import (
pillow_img_to_torch_image,
torch_img_to_pillow_img,
)
from imaginairy.utils.model_manager import get_diffusion_model
from tests import TESTS_FOLDER
strat_combos = [

View File

@ -1,5 +1,5 @@
from imaginairy import config
from imaginairy.model_manager import resolve_model_weights_config
from imaginairy.utils.model_manager import resolve_model_weights_config
def test_resolved_paths():