diff --git a/imaginairy/api.py b/imaginairy/api.py index a9e65ee..65eca17 100755 --- a/imaginairy/api.py +++ b/imaginairy/api.py @@ -237,10 +237,6 @@ def _generate_single_image_compvis( from imaginairy.enhancers.describe_image_blip import generate_caption from imaginairy.enhancers.face_restoration_codeformer import enhance_faces from imaginairy.enhancers.upscale_realesrgan import upscale_image - from imaginairy.model_manager import ( - get_diffusion_model, - get_model_default_image_size, - ) from imaginairy.modules.midas.api import torch_image_to_depth_map from imaginairy.safety import create_safety_score from imaginairy.samplers import SOLVER_LOOKUP @@ -260,6 +256,10 @@ def _generate_single_image_compvis( log_img, log_latent, ) + from imaginairy.utils.model_manager import ( + get_diffusion_model, + get_model_default_image_size, + ) from imaginairy.utils.outpaint import ( outpaint_arg_str_parse, prepare_image_for_outpaint, diff --git a/imaginairy/api_refiners.py b/imaginairy/api_refiners.py index 82fa889..a264e88 100644 --- a/imaginairy/api_refiners.py +++ b/imaginairy/api_refiners.py @@ -35,10 +35,6 @@ def _generate_single_image( from imaginairy.enhancers.describe_image_blip import generate_caption from imaginairy.enhancers.face_restoration_codeformer import enhance_faces from imaginairy.enhancers.upscale_realesrgan import upscale_image - from imaginairy.model_manager import ( - get_diffusion_model_refiners, - get_model_default_image_size, - ) from imaginairy.safety import create_safety_score from imaginairy.samplers import SolverName from imaginairy.schema import ImagineResult @@ -54,6 +50,10 @@ def _generate_single_image( log_img, log_latent, ) + from imaginairy.utils.model_manager import ( + get_diffusion_model_refiners, + get_model_default_image_size, + ) from imaginairy.utils.outpaint import ( outpaint_arg_str_parse, prepare_image_for_outpaint, diff --git a/imaginairy/enhancers/describe_image_blip.py b/imaginairy/enhancers/describe_image_blip.py index 411e98f..dfff634 100644 --- a/imaginairy/enhancers/describe_image_blip.py +++ b/imaginairy/enhancers/describe_image_blip.py @@ -8,8 +8,8 @@ import torch from torchvision import transforms from torchvision.transforms.functional import InterpolationMode -from imaginairy.model_manager import get_cached_url_path from imaginairy.utils import get_device +from imaginairy.utils.model_manager import get_cached_url_path from imaginairy.vendored.blip.blip import BLIP_Decoder, load_checkpoint device = get_device() diff --git a/imaginairy/enhancers/face_restoration_codeformer.py b/imaginairy/enhancers/face_restoration_codeformer.py index 1f48847..82f0c2b 100644 --- a/imaginairy/enhancers/face_restoration_codeformer.py +++ b/imaginairy/enhancers/face_restoration_codeformer.py @@ -9,7 +9,7 @@ from facexlib.utils.face_restoration_helper import FaceRestoreHelper from PIL import Image from torchvision.transforms.functional import normalize -from imaginairy.model_manager import get_cached_url_path +from imaginairy.utils.model_manager import get_cached_url_path from imaginairy.vendored.basicsr.img_util import img2tensor, tensor2img from imaginairy.vendored.codeformer.codeformer_arch import CodeFormer diff --git a/imaginairy/enhancers/upscale_realesrgan.py b/imaginairy/enhancers/upscale_realesrgan.py index 5050788..4fb17c8 100644 --- a/imaginairy/enhancers/upscale_realesrgan.py +++ b/imaginairy/enhancers/upscale_realesrgan.py @@ -4,9 +4,9 @@ import numpy as np import torch from PIL import Image -from imaginairy.model_manager import get_cached_url_path from imaginairy.utils import get_device from imaginairy.utils.model_cache import memory_managed_model +from imaginairy.utils.model_manager import get_cached_url_path from imaginairy.vendored.basicsr.rrdbnet_arch import RRDBNet from imaginairy.vendored.realesrgan import RealESRGANer diff --git a/imaginairy/enhancers/upscale_riverwing.py b/imaginairy/enhancers/upscale_riverwing.py index b6d6821..4f808a5 100644 --- a/imaginairy/enhancers/upscale_riverwing.py +++ b/imaginairy/enhancers/upscale_riverwing.py @@ -7,9 +7,9 @@ import torch import torch.nn.functional as F from torch import nn -from imaginairy.model_manager import hf_hub_download from imaginairy.utils import get_device, platform_appropriate_autocast from imaginairy.utils.log_utils import log_latent +from imaginairy.utils.model_manager import hf_hub_download from imaginairy.vendored import k_diffusion as K from imaginairy.vendored.k_diffusion import layers from imaginairy.vendored.k_diffusion.models.image_v1 import ImageDenoiserModelV1 diff --git a/imaginairy/img_processors/hed_boundary.py b/imaginairy/img_processors/hed_boundary.py index 6da7c0e..190b871 100644 --- a/imaginairy/img_processors/hed_boundary.py +++ b/imaginairy/img_processors/hed_boundary.py @@ -6,8 +6,8 @@ import cv2 import numpy as np import torch -from imaginairy.model_manager import get_cached_url_path from imaginairy.utils import get_device +from imaginairy.utils.model_manager import get_cached_url_path class Network(torch.nn.Module): diff --git a/imaginairy/img_processors/openpose.py b/imaginairy/img_processors/openpose.py index 13edcf7..897611f 100644 --- a/imaginairy/img_processors/openpose.py +++ b/imaginairy/img_processors/openpose.py @@ -11,9 +11,9 @@ import torch from scipy.ndimage.filters import gaussian_filter from torch import nn -from imaginairy.model_manager import get_cached_url_path from imaginairy.utils import get_device from imaginairy.utils.img_utils import torch_image_to_openvcv_img +from imaginairy.utils.model_manager import get_cached_url_path def pad_right_down_corner(img, stride, padValue): diff --git a/imaginairy/modules/midas/midas/base_model.py b/imaginairy/modules/midas/midas/base_model.py index d90399f..fd8ba62 100644 --- a/imaginairy/modules/midas/midas/base_model.py +++ b/imaginairy/modules/midas/midas/base_model.py @@ -3,7 +3,7 @@ import torch from imaginairy import config -from imaginairy.model_manager import get_cached_url_path +from imaginairy.utils.model_manager import get_cached_url_path class BaseModel(torch.nn.Module): diff --git a/imaginairy/modules/refiners_sd.py b/imaginairy/modules/refiners_sd.py index 5e7f17c..2b2f00f 100644 --- a/imaginairy/modules/refiners_sd.py +++ b/imaginairy/modules/refiners_sd.py @@ -308,7 +308,7 @@ monkeypatch_sd1controlnetadapter() @lru_cache(maxsize=4) def get_controlnet(name, weights_location, device, dtype): - from imaginairy.model_manager import load_state_dict + from imaginairy.utils.model_manager import load_state_dict controlnet_state_dict = load_state_dict(weights_location, half_mode=False) controlnet_state_dict = cast_weights( diff --git a/imaginairy/schema.py b/imaginairy/schema.py index 49bb6b2..66446b0 100644 --- a/imaginairy/schema.py +++ b/imaginairy/schema.py @@ -27,7 +27,7 @@ from typing_extensions import Self from imaginairy import config if TYPE_CHECKING: - from pathlib import Path + from pathlib import Path # noqa from PIL import Image @@ -504,7 +504,7 @@ class ImaginePrompt(BaseModel, protected_namespaces=()): model_weights = data.get("model_weights") if model_weights is None: model_weights = config.DEFAULT_MODEL_WEIGHTS - from imaginairy.model_manager import resolve_model_weights_config + from imaginairy.utils.model_manager import resolve_model_weights_config should_use_inpainting = bool( data.get("mask_image") or data.get("mask_prompt") or data.get("outpaint") @@ -578,7 +578,7 @@ class ImaginePrompt(BaseModel, protected_namespaces=()): @field_validator("size", mode="before") def validate_image_size(cls, v, info: core_schema.FieldValidationInfo): - from imaginairy.model_manager import get_model_default_image_size + from imaginairy.utils.model_manager import get_model_default_image_size from imaginairy.utils.named_resolutions import normalize_image_size if v is None: diff --git a/imaginairy/training_tools/train.py b/imaginairy/training_tools/train.py index 4f8f4ac..abb0a26 100644 --- a/imaginairy/training_tools/train.py +++ b/imaginairy/training_tools/train.py @@ -30,9 +30,9 @@ from pytorch_lightning.utilities.distributed import rank_zero_only from torch.utils.data import DataLoader, Dataset from imaginairy import config -from imaginairy.model_manager import get_diffusion_model from imaginairy.training_tools.single_concept import SingleConceptDataset from imaginairy.utils import get_device, instantiate_from_config +from imaginairy.utils.model_manager import get_diffusion_model mod_logger = logging.getLogger(__name__) diff --git a/imaginairy/utils/img_utils.py b/imaginairy/utils/img_utils.py index 2318e2c..53ac53a 100644 --- a/imaginairy/utils/img_utils.py +++ b/imaginairy/utils/img_utils.py @@ -122,7 +122,7 @@ def torch_img_to_pillow_img(img_t: torch.Tensor) -> PIL.Image.Image: def model_latent_to_pillow_img(latent: torch.Tensor) -> PIL.Image.Image: - from imaginairy.model_manager import get_current_diffusion_model + from imaginairy.utils.model_manager import get_current_diffusion_model if len(latent.shape) == 3: latent = latent.unsqueeze(0) diff --git a/imaginairy/model_manager.py b/imaginairy/utils/model_manager.py similarity index 99% rename from imaginairy/model_manager.py rename to imaginairy/utils/model_manager.py index 81e3d85..0938f31 100644 --- a/imaginairy/model_manager.py +++ b/imaginairy/utils/model_manager.py @@ -531,7 +531,7 @@ def extract_huggingface_repo_commit_file_from_url(url): def download_diffusers_weights(repo, sub, filename): - from imaginairy.model_manager import get_cached_url_path + from imaginairy.utils.model_manager import get_cached_url_path url = f"https://huggingface.co/{repo}/resolve/main/{sub}/{filename}" return get_cached_url_path(url, category="weights") @@ -613,8 +613,8 @@ def open_weights(filepath, device=None): def load_stable_diffusion_compvis_weights(weights_url): - from imaginairy.model_manager import get_cached_url_path from imaginairy.utils import get_device + from imaginairy.utils.model_manager import get_cached_url_path from imaginairy.weight_management.conversion import cast_weights from imaginairy.weight_management.utils import ( COMPONENT_NAMES, diff --git a/imaginairy/vendored/realesrgan.py b/imaginairy/vendored/realesrgan.py index 936f8ba..f7c92d0 100644 --- a/imaginairy/vendored/realesrgan.py +++ b/imaginairy/vendored/realesrgan.py @@ -8,7 +8,7 @@ import numpy as np import torch from torch.nn import functional as F -from imaginairy.model_manager import get_cached_url_path +from imaginairy.utils.model_manager import get_cached_url_path ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) diff --git a/imaginairy/video_sample.py b/imaginairy/video_sample.py index f50036f..f500660 100644 --- a/imaginairy/video_sample.py +++ b/imaginairy/video_sample.py @@ -19,7 +19,6 @@ from PIL import Image from torchvision.transforms import ToTensor from imaginairy import config -from imaginairy.model_manager import get_cached_url_path from imaginairy.schema import LazyLoadingImage from imaginairy.utils import ( default, @@ -27,6 +26,7 @@ from imaginairy.utils import ( instantiate_from_config, platform_appropriate_autocast, ) +from imaginairy.utils.model_manager import get_cached_url_path from imaginairy.utils.paths import PKG_ROOT logger = logging.getLogger(__name__) diff --git a/imaginairy/weight_management/execution_trace.py b/imaginairy/weight_management/execution_trace.py index fc825b1..95589dc 100644 --- a/imaginairy/weight_management/execution_trace.py +++ b/imaginairy/weight_management/execution_trace.py @@ -3,8 +3,8 @@ import torch from transformers import CLIPTextModelWithProjection -from imaginairy.model_manager import get_diffusion_model from imaginairy.utils import get_device +from imaginairy.utils.model_manager import get_diffusion_model from imaginairy.weight_management import utils diff --git a/imaginairy/weight_management/generate_weight_info.py b/imaginairy/weight_management/generate_weight_info.py index 748ef5a..927fdf8 100644 --- a/imaginairy/weight_management/generate_weight_info.py +++ b/imaginairy/weight_management/generate_weight_info.py @@ -2,7 +2,7 @@ import safetensors -from imaginairy.model_manager import ( +from imaginairy.utils.model_manager import ( get_cached_url_path, open_weights, resolve_model_weights_config, diff --git a/scripts/controlnet_convert.py b/scripts/controlnet_convert.py index 33defd9..d3d7a14 100644 --- a/scripts/controlnet_convert.py +++ b/scripts/controlnet_convert.py @@ -3,7 +3,7 @@ import os import torch from safetensors.torch import load_file, save_file -from imaginairy.model_manager import get_cached_url_path +from imaginairy.utils.model_manager import get_cached_url_path from imaginairy.utils.paths import PKG_ROOT sd15_url = "https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/889b629140e71758e1e0006e355c331a5744b4bf/v1-5-pruned-emaonly.ckpt" diff --git a/scripts/weights_debug.py b/scripts/weights_debug.py index 042bb18..1b45c25 100644 --- a/scripts/weights_debug.py +++ b/scripts/weights_debug.py @@ -1,4 +1,4 @@ -from imaginairy.model_manager import load_tensors +from imaginairy.utils.model_manager import load_tensors def dotstrings_to_nested_dictionaries(list_of_dotstrings): diff --git a/tests/modules/test_autoencoders.py b/tests/modules/test_autoencoders.py index f40fc8d..f049a5f 100644 --- a/tests/modules/test_autoencoders.py +++ b/tests/modules/test_autoencoders.py @@ -4,7 +4,6 @@ from PIL import Image from torch.nn.functional import interpolate from imaginairy.enhancers.upscale_riverwing import upscale_latent -from imaginairy.model_manager import get_diffusion_model from imaginairy.schema import LazyLoadingImage from imaginairy.utils import get_device from imaginairy.utils.img_utils import ( @@ -12,6 +11,7 @@ from imaginairy.utils.img_utils import ( pillow_img_to_torch_image, torch_img_to_pillow_img, ) +from imaginairy.utils.model_manager import get_diffusion_model from tests import TESTS_FOLDER strat_combos = [ diff --git a/tests/test_model_manager.py b/tests/test_model_manager.py index 16bcb0d..36b9f48 100644 --- a/tests/test_model_manager.py +++ b/tests/test_model_manager.py @@ -1,5 +1,5 @@ from imaginairy import config -from imaginairy.model_manager import resolve_model_weights_config +from imaginairy.utils.model_manager import resolve_model_weights_config def test_resolved_paths():