feature: use refiners library for generation

BREAKING CHANGE

  - stable diffusion 1.5 + inpainting working
  - self-attention guidance working. improves image generation quality
  - tile-mode working
  - inpainting self-attention guidance working

disable/broken features:
  - sd 1.4, 2.0, 2.1
  - most of the samplers
  - pix2pix edit
  - most of the controlnets
  - memory management
  - python 3.8 support

wip
This commit is contained in:
Bryce 2023-11-15 19:46:56 -08:00 committed by Bryce Drennan
parent 6cd519cdb2
commit f97f6a3b4b
80 changed files with 7217 additions and 175 deletions

View File

@ -54,10 +54,11 @@ jobs:
run: |
black --diff --fast .
test:
runs-on: ubuntu-latest
runs-on: macos-13-xlarge
strategy:
fail-fast: false
matrix:
python-version: ["3.8", "3.10"]
python-version: ["3.10"]
subset: ["1/10", "2/10", "3/10", "4/10", "5/10", "6/10", "7/10", "8/10", "9/10", "10/10"]
steps:
- uses: actions/checkout@v3
@ -69,7 +70,6 @@ jobs:
cache-dependency-path: requirements-dev.txt
- name: Install dependencies
run: |
python -m pip install torch==1.13.1+cpu -f https://download.pytorch.org/whl/torch_stable.html
python -m pip install -r requirements-dev.txt .
- name: Get current date
id: date

View File

@ -16,9 +16,9 @@ init: require_pyenv ## Setup a dev environment for local development.
@echo -e "\033[0;32m ✔️ 🐍 $(venv_name) virtualenv activated \033[0m"
pip install --upgrade pip pip-tools
pip-sync requirements-dev.txt
pip install -e . --no-deps
pip install -e .
# the compiled requirements don't included OS specific subdependencies so we trigger those this way
pip install `pip freeze | grep "^torch=="`
#pip install `pip freeze | grep "^torch=="`
@echo -e "\nEnvironment setup! ✨ 🍰 ✨ 🐍 \n\nCopy this path to tell PyCharm where your virtualenv is. You may have to click the refresh button in the pycharm file explorer.\n"
@echo -e "\033[0;32m"
@pyenv which python

View File

@ -2,6 +2,8 @@ import os
# tells pytorch to allow MPS usage (for Mac M1 compatibility)
os.putenv("PYTORCH_ENABLE_MPS_FALLBACK", "1")
# use more memory than we should
os.putenv("PYTORCH_MPS_HIGH_WATERMARK_RATIO", "0.0")
import sys # noqa

View File

@ -141,6 +141,7 @@ def imagine(
):
import torch.nn
from imaginairy.api_refiners import _generate_single_image
from imaginairy.schema import ImaginePrompt
from imaginairy.utils import (
check_torch_version,
@ -190,7 +191,7 @@ def imagine(
yield result
def _generate_single_image(
def _generate_single_image_compvis(
prompt,
debug_img_callback=None,
progress_img_callback=None,
@ -674,8 +675,10 @@ def _scale_latent(
def _generate_composition_image(prompt, target_height, target_width, cutoff=512):
from PIL import Image
from imaginairy.api_refiners import _generate_single_image
if prompt.width <= cutoff and prompt.height <= cutoff:
return None
return None, None
shrink_scale = calc_scale_to_fit_within(
height=prompt.height,
@ -713,7 +716,7 @@ def _generate_composition_image(prompt, target_height, target_width, cutoff=512)
resample=Image.Resampling.LANCZOS,
)
return img
return img, result.images["generated"]
def prompt_normalized(prompt, length=130):

433
imaginairy/api_refiners.py Normal file
View File

@ -0,0 +1,433 @@
import logging
from typing import List, Optional
from imaginairy import WeightedPrompt
from imaginairy.config import CONTROLNET_CONFIG_SHORTCUTS
from imaginairy.model_manager import load_controlnet_adapter
logger = logging.getLogger(__name__)
def _generate_single_image(
prompt,
debug_img_callback=None,
progress_img_callback=None,
progress_img_interval_steps=3,
progress_img_interval_min_s=0.1,
half_mode=None,
add_caption=False,
# controlnet, finetune, naive, auto
inpaint_method="finetune",
return_latent=False,
):
import gc
import torch.nn
from PIL import ImageOps
from pytorch_lightning import seed_everything
from refiners.foundationals.latent_diffusion.schedulers import DDIM, DPMSolver
from tqdm import tqdm
from imaginairy.api import (
IMAGINAIRY_SAFETY_MODE,
_generate_composition_image,
combine_image,
)
from imaginairy.enhancers.clip_masking import get_img_mask
from imaginairy.enhancers.describe_image_blip import generate_caption
from imaginairy.enhancers.face_restoration_codeformer import enhance_faces
from imaginairy.enhancers.upscale_realesrgan import upscale_image
from imaginairy.img_utils import (
add_caption_to_image,
pillow_fit_image_within,
pillow_img_to_torch_image,
pillow_mask_to_latent_mask,
)
from imaginairy.log_utils import (
ImageLoggingContext,
log_img,
log_latent,
)
from imaginairy.model_manager import (
get_diffusion_model_refiners,
get_model_default_image_size,
)
from imaginairy.outpaint import outpaint_arg_str_parse, prepare_image_for_outpaint
from imaginairy.safety import create_safety_score
from imaginairy.samplers import SamplerName
from imaginairy.schema import ImaginePrompt, ImagineResult
from imaginairy.utils import get_device, randn_seeded
get_device()
gc.collect()
torch.cuda.empty_cache()
prompt = prompt.make_concrete_copy()
control_modes = []
control_inputs = prompt.control_inputs or []
control_inputs = control_inputs.copy()
for_inpainting = bool(prompt.mask_image or prompt.mask_prompt or prompt.outpaint)
if control_inputs:
control_modes = [c.mode for c in prompt.control_inputs]
sd = get_diffusion_model_refiners(
weights_location=prompt.model,
config_path=prompt.model_config_path,
control_weights_locations=tuple(control_modes),
half_mode=half_mode,
for_inpainting=for_inpainting and inpaint_method == "finetune",
)
seed_everything(prompt.seed)
downsampling_factor = 8
latent_channels = 4
batch_size = 1
mask_image = None
mask_image_orig = None
prompt = prompt.make_concrete_copy()
def latent_logger(latents):
progress_latents.append(latents)
with ImageLoggingContext(
prompt=prompt,
model=sd,
debug_img_callback=debug_img_callback,
progress_img_callback=progress_img_callback,
progress_img_interval_steps=progress_img_interval_steps,
progress_img_interval_min_s=progress_img_interval_min_s,
progress_latent_callback=latent_logger
if prompt.collect_progress_latents
else None,
) as lc:
sd.set_tile_mode(prompt.tile_mode)
clip_text_embedding = _calc_conditioning(
positive_prompts=prompt.prompts,
negative_prompts=prompt.negative_prompt,
positive_conditioning=prompt.conditioning,
text_encoder=sd.clip_text_encoder,
)
result_images = {}
progress_latents = []
first_step = 0
mask_grayscale = None
shape = [
batch_size,
latent_channels,
prompt.height // downsampling_factor,
prompt.width // downsampling_factor,
]
init_latent = None
if prompt.init_image:
starting_image = prompt.init_image
first_step = int((prompt.steps - 1) * prompt.init_image_strength)
if prompt.mask_prompt:
mask_image, mask_grayscale = get_img_mask(
starting_image, prompt.mask_prompt, threshold=0.1
)
elif prompt.mask_image:
mask_image = prompt.mask_image.convert("L")
if prompt.outpaint:
outpaint_kwargs = outpaint_arg_str_parse(prompt.outpaint)
starting_image, mask_image = prepare_image_for_outpaint(
starting_image, mask_image, **outpaint_kwargs
)
init_image = pillow_fit_image_within(
starting_image,
max_height=prompt.height,
max_width=prompt.width,
)
init_image_t = pillow_img_to_torch_image(init_image)
init_image_t = init_image_t.to(device=sd.device, dtype=sd.dtype)
init_latent = sd.lda.encode(init_image_t)
shape = init_latent.shape
log_latent(init_latent, "init_latent")
if mask_image is not None:
mask_image = pillow_fit_image_within(
mask_image,
max_height=prompt.height,
max_width=prompt.width,
convert="L",
)
log_img(mask_image, "init mask")
if prompt.mask_mode == ImaginePrompt.MaskMode.REPLACE:
mask_image = ImageOps.invert(mask_image)
mask_image_orig = mask_image
log_img(mask_image, "latent_mask")
pillow_mask_to_latent_mask(
mask_image, downsampling_factor=downsampling_factor
).to(get_device())
# if inpaint_method == "controlnet":
# result_images["control-inpaint"] = mask_image
# control_inputs.append(
# ControlNetInput(mode="inpaint", image=mask_image)
# )
seed_everything(prompt.seed)
noise = randn_seeded(seed=prompt.seed, size=shape).to(
get_device(), dtype=sd.dtype
)
noised_latent = noise
controlnets = []
if control_modes:
control_strengths = []
from imaginairy.img_processors.control_modes import CONTROL_MODES
for control_input in control_inputs:
if control_input.image_raw is not None:
control_image = control_input.image_raw
elif control_input.image is not None:
control_image = control_input.image
control_image = control_image.convert("RGB")
log_img(control_image, "control_image_input")
control_image_input = pillow_fit_image_within(
control_image,
max_height=prompt.height,
max_width=prompt.width,
)
control_image_input_t = pillow_img_to_torch_image(control_image_input)
control_image_input_t = control_image_input_t.to(get_device())
if control_input.image_raw is None:
control_prep_function = CONTROL_MODES[control_input.mode]
if control_input.mode == "inpaint":
control_image_t = control_prep_function(
control_image_input_t, init_image_t
)
else:
control_image_t = control_prep_function(control_image_input_t)
else:
control_image_t = (control_image_input_t + 1) / 2
control_image_disp = control_image_t * 2 - 1
result_images[f"control-{control_input.mode}"] = control_image_disp
log_img(control_image_disp, "control_image")
if len(control_image_t.shape) == 3:
raise RuntimeError("Control image must be 4D")
if control_image_t.shape[1] != 3:
raise RuntimeError("Control image must have 3 channels")
if (
control_input.mode != "inpaint"
and control_image_t.min() < 0
or control_image_t.max() > 1
):
msg = f"Control image must be in [0, 1] but we received {control_image_t.min()} and {control_image_t.max()}"
raise RuntimeError(msg)
if control_image_t.max() == control_image_t.min():
msg = f"No control signal found in control image {control_input.mode}."
raise RuntimeError(msg)
control_strengths.append(control_input.strength)
control_weights_path = CONTROLNET_CONFIG_SHORTCUTS.get(
control_input.mode, None
).weights_url
controlnet = load_controlnet_adapter(
name=control_input.mode,
control_weights_location=control_weights_path,
target_unet=sd.unet,
scale=control_input.strength,
)
controlnets.append((controlnet, control_image_t))
noise_step = None
if prompt.allow_compose_phase:
if prompt.init_image:
comp_image, comp_img_orig = _generate_composition_image(
prompt=prompt,
target_height=init_image.height,
target_width=init_image.width,
cutoff=get_model_default_image_size(prompt.model),
)
else:
comp_image, comp_img_orig = _generate_composition_image(
prompt=prompt,
target_height=prompt.height,
target_width=prompt.width,
cutoff=get_model_default_image_size(prompt.model),
)
if comp_image is not None:
result_images["composition"] = comp_img_orig
result_images["composition-upscaled"] = comp_image
# noise = noise[:, :, : comp_image.height, : comp_image.shape[3]]
comp_cutoff = 0.60
first_step = int((prompt.steps - 1) * comp_cutoff)
# noise_step = int(prompt.steps * max(comp_cutoff - 0.05, 0))
# noise_step = max(noise_step, 0)
# noise_step = min(noise_step, prompt.steps - 1)
log_img(comp_image, "comp_image")
comp_image_t = pillow_img_to_torch_image(comp_image)
comp_image_t = comp_image_t.to(sd.device, dtype=sd.dtype)
init_latent = sd.lda.encode(comp_image_t)
for controlnet, control_image_t in controlnets:
controlnet.set_controlnet_condition(
control_image_t.to(device=sd.device, dtype=sd.dtype)
)
controlnet.inject()
if prompt.sampler_type.lower() == SamplerName.K_DPMPP_2M:
sd.scheduler = DPMSolver(num_inference_steps=prompt.steps)
elif prompt.sampler_type.lower() == SamplerName.DDIM:
sd.scheduler = DDIM(num_inference_steps=prompt.steps)
else:
msg = f"Unknown sampler type: {prompt.sampler_type}"
raise ValueError(msg)
sd.scheduler.to(device=sd.device, dtype=sd.dtype)
sd.set_num_inference_steps(prompt.steps)
if hasattr(sd, "mask_latents"):
sd.set_inpainting_conditions(
target_image=init_image,
mask=ImageOps.invert(mask_image),
latents_size=shape[-2:],
)
if init_latent is not None:
print(
f"noise step: {noise_step} first step: {first_step} len steps: {len(sd.steps)}"
)
noise_step = noise_step if noise_step is not None else first_step
noised_latent = sd.scheduler.add_noise(
x=init_latent, noise=noise, step=sd.steps[noise_step]
)
x = noised_latent
x = x.to(device=sd.device, dtype=sd.dtype)
for step in tqdm(sd.steps[first_step:]):
log_latent(x, "noisy_latent")
x = sd(
x,
step=step,
clip_text_embedding=clip_text_embedding,
condition_scale=prompt.prompt_strength,
)
logger.debug("Decoding image")
gen_img = sd.lda.decode_latents(x)
if mask_image_orig and init_image:
result_images["pre-reconstitution"] = gen_img
mask_final = mask_image_orig.copy()
# mask_final = ImageOps.invert(mask_final)
log_img(mask_final, "reconstituting mask")
# gen_img = Image.composite(gen_img, init_image, mask_final)
gen_img = combine_image(
original_img=init_image,
generated_img=gen_img,
mask_img=mask_final,
)
log_img(gen_img, "reconstituted image")
upscaled_img = None
rebuilt_orig_img = None
if add_caption:
caption = generate_caption(gen_img)
logger.info(f"Generated caption: {caption}")
with lc.timing("safety-filter"):
safety_score = create_safety_score(
gen_img,
safety_mode=IMAGINAIRY_SAFETY_MODE,
)
if safety_score.is_filtered:
progress_latents.clear()
if not safety_score.is_filtered:
if prompt.fix_faces:
logger.info("Fixing 😊 's in 🖼 using CodeFormer...")
with lc.timing("face enhancement"):
gen_img = enhance_faces(gen_img, fidelity=prompt.fix_faces_fidelity)
if prompt.upscale:
logger.info("Upscaling 🖼 using real-ESRGAN...")
with lc.timing("upscaling"):
upscaled_img = upscale_image(gen_img)
# put the newly generated patch back into the original, full-size image
if prompt.mask_modify_original and mask_image_orig and starting_image:
logger.info("Combining inpainting with original image...")
img_to_add_back_to_original = upscaled_img if upscaled_img else gen_img
rebuilt_orig_img = combine_image(
original_img=starting_image,
generated_img=img_to_add_back_to_original,
mask_img=mask_image_orig,
)
if prompt.caption_text:
caption_text = prompt.caption_text.format(prompt=prompt.prompt_text)
add_caption_to_image(gen_img, caption_text)
result = ImagineResult(
img=gen_img,
prompt=prompt,
upscaled_img=upscaled_img,
is_nsfw=safety_score.is_nsfw,
safety_score=safety_score,
modified_original=rebuilt_orig_img,
mask_binary=mask_image_orig,
mask_grayscale=mask_grayscale,
result_images=result_images,
timings={},
progress_latents=[],
)
_most_recent_result = result
logger.info(f"Image Generated. Timings: {result.timings_str()}")
for controlnet, _ in controlnets:
controlnet.eject()
gc.collect()
torch.cuda.empty_cache()
return result
def _prompts_to_embeddings(prompts, text_encoder):
total_weight = sum(wp.weight for wp in prompts)
conditioning = sum(
text_encoder(wp.text) * (wp.weight / total_weight) for wp in prompts
)
return conditioning
def _calc_conditioning(
positive_prompts: Optional[List[WeightedPrompt]],
negative_prompts: Optional[List[WeightedPrompt]],
positive_conditioning,
text_encoder,
):
import torch
from imaginairy.log_utils import log_conditioning
# need to expand if doing batches
neutral_conditioning = _prompts_to_embeddings(negative_prompts, text_encoder)
log_conditioning(neutral_conditioning, "neutral conditioning")
if positive_conditioning is None:
positive_conditioning = _prompts_to_embeddings(positive_prompts, text_encoder)
log_conditioning(positive_conditioning, "positive conditioning")
clip_text_embedding = torch.cat(
tensors=(neutral_conditioning, positive_conditioning), dim=0
)
return clip_text_embedding

View File

@ -31,14 +31,14 @@ class ModelConfig:
midas_url = "https://github.com/intel-isl/DPT/releases/download/1_0/dpt_hybrid-midas-501f0c75.pt"
MODEL_CONFIGS = [
ModelConfig(
description="Stable Diffusion 1.4",
short_name="SD-1.4",
config_path="configs/stable-diffusion-v1.yaml",
weights_url="https://huggingface.co/bstddev/sd-v1-4/resolve/77221977fa8de8ab8f36fac0374c120bd5b53287/sd-v1-4.ckpt",
default_image_size=512,
alias="sd14",
),
# ModelConfig(
# description="Stable Diffusion 1.4",
# short_name="SD-1.4",
# config_path="configs/stable-diffusion-v1.yaml",
# weights_url="https://huggingface.co/bstddev/sd-v1-4/resolve/77221977fa8de8ab8f36fac0374c120bd5b53287/sd-v1-4.ckpt",
# default_image_size=512,
# alias="sd14",
# ),
ModelConfig(
description="Stable Diffusion 1.5",
short_name="SD-1.5",
@ -56,72 +56,72 @@ MODEL_CONFIGS = [
default_image_size=512,
alias="sd15in",
),
ModelConfig(
description="Stable Diffusion 2.0 - bad at making people",
short_name="SD-2.0",
config_path="configs/stable-diffusion-v2-inference.yaml",
weights_url="https://huggingface.co/stabilityai/stable-diffusion-2-base/resolve/main/512-base-ema.ckpt",
default_image_size=512,
alias="sd20",
),
ModelConfig(
description="Stable Diffusion 2.0 - Inpainting",
short_name="SD-2.0-inpaint",
config_path="configs/stable-diffusion-v2-inpainting-inference.yaml",
weights_url="https://huggingface.co/stabilityai/stable-diffusion-2-inpainting/resolve/main/512-inpainting-ema.ckpt",
default_image_size=512,
alias="sd20in",
),
ModelConfig(
description="Stable Diffusion 2.0 v - 768x768 - bad at making people",
short_name="SD-2.0-v",
config_path="configs/stable-diffusion-v2-inference-v.yaml",
weights_url="https://huggingface.co/stabilityai/stable-diffusion-2/resolve/main/768-v-ema.ckpt",
default_image_size=768,
alias="sd20v",
),
ModelConfig(
description="Stable Diffusion 2.0 - Depth",
short_name="SD-2.0-depth",
config_path="configs/stable-diffusion-v2-midas-inference.yaml",
weights_url="https://huggingface.co/stabilityai/stable-diffusion-2-depth/resolve/main/512-depth-ema.ckpt",
default_image_size=512,
alias="sd20dep",
),
ModelConfig(
description="Stable Diffusion 2.1",
short_name="SD-2.1",
config_path="configs/stable-diffusion-v2-inference.yaml",
weights_url="https://huggingface.co/stabilityai/stable-diffusion-2-1-base/resolve/main/v2-1_512-ema-pruned.ckpt",
default_image_size=512,
alias="sd21",
),
ModelConfig(
description="Stable Diffusion 2.1 - Inpainting",
short_name="SD-2.1-inpaint",
config_path="configs/stable-diffusion-v2-inpainting-inference.yaml",
weights_url="https://huggingface.co/stabilityai/stable-diffusion-2-inpainting/resolve/main/512-inpainting-ema.ckpt",
default_image_size=512,
alias="sd21in",
),
ModelConfig(
description="Stable Diffusion 2.1 v - 768x768",
short_name="SD-2.1-v",
config_path="configs/stable-diffusion-v2-inference-v.yaml",
weights_url="https://huggingface.co/stabilityai/stable-diffusion-2-1/resolve/main/v2-1_768-ema-pruned.ckpt",
default_image_size=768,
forced_attn_precision="fp32",
alias="sd21v",
),
ModelConfig(
description="Instruct Pix2Pix - Photo Editing",
short_name="instruct-pix2pix",
config_path="configs/instruct-pix2pix.yaml",
weights_url="https://huggingface.co/imaginairy/instruct-pix2pix/resolve/ea0009b3d0d4888f410a40bd06d69516d0b5a577/instruct-pix2pix-00-22000-pruned.ckpt",
default_image_size=512,
default_negative_prompt="",
alias="edit",
),
# ModelConfig(
# description="Stable Diffusion 2.0 - bad at making people",
# short_name="SD-2.0",
# config_path="configs/stable-diffusion-v2-inference.yaml",
# weights_url="https://huggingface.co/stabilityai/stable-diffusion-2-base/resolve/main/512-base-ema.ckpt",
# default_image_size=512,
# alias="sd20",
# ),
# ModelConfig(
# description="Stable Diffusion 2.0 - Inpainting",
# short_name="SD-2.0-inpaint",
# config_path="configs/stable-diffusion-v2-inpainting-inference.yaml",
# weights_url="https://huggingface.co/stabilityai/stable-diffusion-2-inpainting/resolve/main/512-inpainting-ema.ckpt",
# default_image_size=512,
# alias="sd20in",
# ),
# ModelConfig(
# description="Stable Diffusion 2.0 v - 768x768 - bad at making people",
# short_name="SD-2.0-v",
# config_path="configs/stable-diffusion-v2-inference-v.yaml",
# weights_url="https://huggingface.co/stabilityai/stable-diffusion-2/resolve/main/768-v-ema.ckpt",
# default_image_size=768,
# alias="sd20v",
# ),
# ModelConfig(
# description="Stable Diffusion 2.0 - Depth",
# short_name="SD-2.0-depth",
# config_path="configs/stable-diffusion-v2-midas-inference.yaml",
# weights_url="https://huggingface.co/stabilityai/stable-diffusion-2-depth/resolve/main/512-depth-ema.ckpt",
# default_image_size=512,
# alias="sd20dep",
# ),
# ModelConfig(
# description="Stable Diffusion 2.1",
# short_name="SD-2.1",
# config_path="configs/stable-diffusion-v2-inference.yaml",
# weights_url="https://huggingface.co/stabilityai/stable-diffusion-2-1-base/resolve/main/v2-1_512-ema-pruned.ckpt",
# default_image_size=512,
# alias="sd21",
# ),
# ModelConfig(
# description="Stable Diffusion 2.1 - Inpainting",
# short_name="SD-2.1-inpaint",
# config_path="configs/stable-diffusion-v2-inpainting-inference.yaml",
# weights_url="https://huggingface.co/stabilityai/stable-diffusion-2-inpainting/resolve/main/512-inpainting-ema.ckpt",
# default_image_size=512,
# alias="sd21in",
# ),
# ModelConfig(
# description="Stable Diffusion 2.1 v - 768x768",
# short_name="SD-2.1-v",
# config_path="configs/stable-diffusion-v2-inference-v.yaml",
# weights_url="https://huggingface.co/stabilityai/stable-diffusion-2-1/resolve/main/v2-1_768-ema-pruned.ckpt",
# default_image_size=768,
# forced_attn_precision="fp32",
# alias="sd21v",
# ),
# ModelConfig(
# description="Instruct Pix2Pix - Photo Editing",
# short_name="instruct-pix2pix",
# config_path="configs/instruct-pix2pix.yaml",
# weights_url="https://huggingface.co/imaginairy/instruct-pix2pix/resolve/ea0009b3d0d4888f410a40bd06d69516d0b5a577/instruct-pix2pix-00-22000-pruned.ckpt",
# default_image_size=512,
# default_negative_prompt="",
# alias="edit",
# ),
ModelConfig(
description="OpenJourney V1",
short_name="openjourney-v1",
@ -176,14 +176,14 @@ CONTROLNET_CONFIGS = [
short_name="canny15",
control_type="canny",
config_path="configs/control-net-v15.yaml",
weights_url="https://huggingface.co/lllyasviel/ControlNet-v1-1/resolve/69fc48b9cbd98661f6d0288dc59b59a5ccb32a6b/control_v11p_sd15_canny.pth",
weights_url="https://huggingface.co/lllyasviel/control_v11p_sd15_canny/resolve/115a470d547982438f70198e353a921996e2e819/diffusion_pytorch_model.fp16.safetensors",
alias="canny",
),
ControlNetConfig(
short_name="depth15",
control_type="depth",
config_path="configs/control-net-v15.yaml",
weights_url="https://huggingface.co/lllyasviel/ControlNet-v1-1/resolve/69fc48b9cbd98661f6d0288dc59b59a5ccb32a6b/control_v11f1p_sd15_depth.pth",
weights_url="https://huggingface.co/lllyasviel/control_v11f1p_sd15_depth/resolve/539f99181d33db39cf1af2e517cd8056785f0a87/diffusion_pytorch_model.fp16.safetensors",
alias="depth",
),
ControlNetConfig(
@ -233,7 +233,7 @@ CONTROLNET_CONFIGS = [
short_name="details15",
control_type="details",
config_path="configs/control-net-v15.yaml",
weights_url="https://huggingface.co/lllyasviel/ControlNet-v1-1/resolve/69fc48b9cbd98661f6d0288dc59b59a5ccb32a6b/control_v11f1e_sd15_tile.pth",
weights_url="https://huggingface.co/lllyasviel/control_v11f1e_sd15_tile/resolve/3f877705c37010b7221c3d10743307d6b5b6efac/diffusion_pytorch_model.bin",
alias="details",
),
ControlNetConfig(
@ -254,16 +254,17 @@ for m in CONTROLNET_CONFIGS:
CONTROLNET_CONFIG_SHORTCUTS[m.short_name] = m
SAMPLER_TYPE_OPTIONS = [
"plms",
# "plms",
"ddim",
"k_dpm_fast",
"k_dpm_adaptive",
"k_lms",
"k_dpm_2",
"k_dpm_2_a",
"k_dpmpp_2m",
"k_dpmpp_2s_a",
"k_euler",
"k_euler_a",
"k_heun",
"k_dpmpp_2m"
# "k_dpm_fast",
# "k_dpm_adaptive",
# "k_lms",
# "k_dpm_2",
# "k_dpm_2_a",
# "k_dpmpp_2m",
# "k_dpmpp_2s_a",
# "k_euler",
# "k_euler_a",
# "k_heun",
]

View File

@ -121,7 +121,7 @@ def model_latent_to_pillow_img(latent: torch.Tensor) -> PIL.Image.Image:
if latent.shape[0] != 1:
raise ValueError("Only batch size 1 supported")
model = get_current_diffusion_model()
img_t = model.decode_first_stage(latent)
img_t = model.lda.decode(latent)
return torch_img_to_pillow_img(img_t)

View File

@ -3,7 +3,7 @@ import os
import re
import sys
import urllib.parse
from functools import wraps
from functools import lru_cache, wraps
import requests
import torch
@ -13,6 +13,7 @@ from huggingface_hub import (
try_to_load_from_cache,
)
from omegaconf import OmegaConf
from refiners.foundationals.latent_diffusion import SD1ControlnetAdapter, SD1UNet
from safetensors.torch import load_file
from imaginairy import config as iconfig
@ -21,6 +22,7 @@ from imaginairy.modules import attention
from imaginairy.paths import PKG_ROOT
from imaginairy.utils import get_device, instantiate_from_config
from imaginairy.utils.model_cache import memory_managed_model
from imaginairy.weight_management.conversion import cast_weights
logger = logging.getLogger(__name__)
@ -232,6 +234,150 @@ def _get_diffusion_model(
return diffusion_model
def get_diffusion_model_refiners(
weights_location=iconfig.DEFAULT_MODEL,
config_path="configs/stable-diffusion-v1.yaml",
control_weights_locations=None,
half_mode=None,
for_inpainting=False,
for_training=False,
):
"""
Load a diffusion model.
Weights location may also be shortcut name, e.g. "SD-1.5"
"""
try:
return _get_diffusion_model_refiners(
weights_location,
config_path,
half_mode,
for_inpainting,
control_weights_locations=control_weights_locations,
for_training=for_training,
)
except HuggingFaceAuthorizationError as e:
if for_inpainting:
logger.warning(
f"Failed to load inpainting model. Attempting to fall-back to standard model. {e!s}"
)
return _get_diffusion_model_refiners(
iconfig.DEFAULT_MODEL,
config_path,
half_mode,
for_inpainting=False,
for_training=for_training,
control_weights_locations=control_weights_locations,
)
raise
def _get_diffusion_model_refiners(
weights_location=iconfig.DEFAULT_MODEL,
config_path="configs/stable-diffusion-v1.yaml",
half_mode=None,
for_inpainting=False,
for_training=False,
control_weights_locations=None,
device=None,
dtype=torch.float16,
):
"""
Load a diffusion model.
Weights location may also be shortcut name, e.g. "SD-1.5"
"""
sd = _get_diffusion_model_refiners_only(
weights_location=weights_location,
config_path=config_path,
for_inpainting=for_inpainting,
for_training=for_training,
device=device,
dtype=dtype,
)
return sd
@lru_cache(maxsize=1)
def _get_diffusion_model_refiners_only(
weights_location=iconfig.DEFAULT_MODEL,
config_path="configs/stable-diffusion-v1.yaml",
for_inpainting=False,
for_training=False,
control_weights_locations=None,
device=None,
dtype=torch.float16,
):
"""
Load a diffusion model.
Weights location may also be shortcut name, e.g. "SD-1.5"
"""
from imaginairy.modules.refiners_sd import (
SD1AutoencoderSliced,
StableDiffusion_1,
StableDiffusion_1_Inpainting,
)
global MOST_RECENTLY_LOADED_MODEL
device = device or get_device()
(
model_config,
weights_location,
config_path,
control_weights_locations,
) = resolve_model_paths(
weights_path=weights_location,
config_path=config_path,
control_weights_paths=control_weights_locations,
for_inpainting=for_inpainting,
for_training=for_training,
)
# some models need the attention calculated in float32
if model_config is not None:
attention.ATTENTION_PRECISION_OVERRIDE = model_config.forced_attn_precision
else:
attention.ATTENTION_PRECISION_OVERRIDE = "default"
(
vae_weights,
unet_weights,
text_encoder_weights,
) = load_stable_diffusion_compvis_weights(weights_location)
if for_inpainting:
unet = SD1UNet(in_channels=9)
StableDiffusionCls = StableDiffusion_1_Inpainting
else:
unet = SD1UNet(in_channels=4)
StableDiffusionCls = StableDiffusion_1
logger.debug(f"Using class {StableDiffusionCls.__name__}")
sd = StableDiffusionCls(
device=device, dtype=dtype, lda=SD1AutoencoderSliced(), unet=unet
)
logger.debug("Loading VAE")
sd.lda.load_state_dict(vae_weights)
logger.debug("Loading text encoder")
sd.clip_text_encoder.load_state_dict(text_encoder_weights)
logger.debug("Loading UNet")
sd.unet.load_state_dict(unet_weights, strict=False)
logger.debug(f"'{weights_location}' Loaded")
MOST_RECENTLY_LOADED_MODEL = sd
sd.set_self_attention_guidance(enable=True)
return sd
@memory_managed_model("stable-diffusion", memory_usage_mb=1951)
def _load_diffusion_model(config_path, weights_location, half_mode, for_training):
model_config = OmegaConf.load(f"{PKG_ROOT}/{config_path}")
@ -250,6 +396,35 @@ def _load_diffusion_model(config_path, weights_location, half_mode, for_training
return model
def load_controlnet_adapter(
name,
control_weights_location,
target_unet,
scale=1.0,
half_mode=False,
):
controlnet_state_dict = load_state_dict(
control_weights_location, half_mode=half_mode
)
controlnet_state_dict = cast_weights(
source_weights=controlnet_state_dict,
source_model_name="controlnet-1-1",
source_component_name="all",
source_format="diffusers",
dest_format="refiners",
)
for key in controlnet_state_dict:
controlnet_state_dict[key] = controlnet_state_dict[key].to(
device=target_unet.device, dtype=target_unet.dtype
)
adapter = SD1ControlnetAdapter(
target=target_unet, name=name, scale=scale, weights=controlnet_state_dict
)
return adapter
@memory_managed_model("controlnet")
def load_controlnet(control_weights_location, half_mode):
controlnet_state_dict = load_state_dict(
@ -447,3 +622,164 @@ def extract_huggingface_repo_commit_file_from_url(url):
filepath = "/".join(path_components[4:])
return repo, commit_hash, filepath
def download_diffusers_weights(repo, sub, filename):
from imaginairy.model_manager import get_cached_url_path
url = f"https://huggingface.co/{repo}/resolve/main/{sub}/{filename}"
return get_cached_url_path(url, category="weights")
@lru_cache
def load_stable_diffusion_diffusers_weights(diffusers_repo, device=None):
from imaginairy.utils import get_device
from imaginairy.weight_management.conversion import cast_weights
from imaginairy.weight_management.utils import (
COMPONENT_NAMES,
FORMAT_NAMES,
MODEL_NAMES,
)
if device is None:
device = get_device()
vae_weights_path = download_diffusers_weights(
repo=diffusers_repo, sub="vae", filename="diffusion_pytorch_model.safetensors"
)
vae_weights = open_weights(vae_weights_path, device=device)
vae_weights = cast_weights(
source_weights=vae_weights,
source_model_name=MODEL_NAMES.SD15,
source_component_name=COMPONENT_NAMES.VAE,
source_format=FORMAT_NAMES.DIFFUSERS,
dest_format=FORMAT_NAMES.REFINERS,
)
unet_weights_path = download_diffusers_weights(
repo=diffusers_repo, sub="unet", filename="diffusion_pytorch_model.safetensors"
)
unet_weights = open_weights(unet_weights_path, device=device)
unet_weights = cast_weights(
source_weights=unet_weights,
source_model_name=MODEL_NAMES.SD15,
source_component_name=COMPONENT_NAMES.UNET,
source_format=FORMAT_NAMES.DIFFUSERS,
dest_format=FORMAT_NAMES.REFINERS,
)
text_encoder_weights_path = download_diffusers_weights(
repo=diffusers_repo, sub="text_encoder", filename="model.safetensors"
)
text_encoder_weights = open_weights(text_encoder_weights_path, device=device)
text_encoder_weights = cast_weights(
source_weights=text_encoder_weights,
source_model_name=MODEL_NAMES.SD15,
source_component_name=COMPONENT_NAMES.TEXT_ENCODER,
source_format=FORMAT_NAMES.DIFFUSERS,
dest_format=FORMAT_NAMES.REFINERS,
)
return vae_weights, unet_weights, text_encoder_weights
def open_weights(filepath, device=None):
from imaginairy.utils import get_device
if device is None:
device = get_device()
if "safetensor" in filepath.lower():
from refiners.fluxion.utils import safe_open
with safe_open(path=filepath, framework="pytorch", device=device) as tensors:
state_dict = {key: tensors.get_tensor(key) for key in tensors}
else:
import torch
state_dict = torch.load(filepath, map_location=device)
while "state_dict" in state_dict:
state_dict = state_dict["state_dict"]
return state_dict
@lru_cache
def load_stable_diffusion_compvis_weights(weights_url):
from imaginairy.model_manager import get_cached_url_path
from imaginairy.utils import get_device
from imaginairy.weight_management.conversion import cast_weights
from imaginairy.weight_management.utils import (
COMPONENT_NAMES,
FORMAT_NAMES,
MODEL_NAMES,
)
weights_path = get_cached_url_path(weights_url, category="weights")
logger.info(f"Loading weights from {weights_path}")
state_dict = open_weights(weights_path, device=get_device())
text_encoder_prefix = "cond_stage_model."
cut_start = len(text_encoder_prefix)
text_encoder_state_dict = {
k[cut_start:]: v
for k, v in state_dict.items()
if k.startswith(text_encoder_prefix)
}
text_encoder_state_dict = cast_weights(
source_weights=text_encoder_state_dict,
source_model_name=MODEL_NAMES.SD15,
source_component_name=COMPONENT_NAMES.TEXT_ENCODER,
source_format=FORMAT_NAMES.COMPVIS,
dest_format=FORMAT_NAMES.DIFFUSERS,
)
text_encoder_state_dict = cast_weights(
source_weights=text_encoder_state_dict,
source_model_name=MODEL_NAMES.SD15,
source_component_name=COMPONENT_NAMES.TEXT_ENCODER,
source_format=FORMAT_NAMES.DIFFUSERS,
dest_format=FORMAT_NAMES.REFINERS,
)
vae_prefix = "first_stage_model."
cut_start = len(vae_prefix)
vae_state_dict = {
k[cut_start:]: v for k, v in state_dict.items() if k.startswith(vae_prefix)
}
vae_state_dict = cast_weights(
source_weights=vae_state_dict,
source_model_name=MODEL_NAMES.SD15,
source_component_name=COMPONENT_NAMES.VAE,
source_format=FORMAT_NAMES.COMPVIS,
dest_format=FORMAT_NAMES.DIFFUSERS,
)
vae_state_dict = cast_weights(
source_weights=vae_state_dict,
source_model_name=MODEL_NAMES.SD15,
source_component_name=COMPONENT_NAMES.VAE,
source_format=FORMAT_NAMES.DIFFUSERS,
dest_format=FORMAT_NAMES.REFINERS,
)
unet_prefix = "model."
cut_start = len(unet_prefix)
unet_state_dict = {
k[cut_start:]: v for k, v in state_dict.items() if k.startswith(unet_prefix)
}
unet_state_dict = cast_weights(
source_weights=unet_state_dict,
source_model_name=MODEL_NAMES.SD15,
source_component_name=COMPONENT_NAMES.UNET,
source_format=FORMAT_NAMES.COMPVIS,
dest_format=FORMAT_NAMES.DIFFUSERS,
)
unet_state_dict = cast_weights(
source_weights=unet_state_dict,
source_model_name=MODEL_NAMES.SD15,
source_component_name=COMPONENT_NAMES.UNET,
source_format=FORMAT_NAMES.DIFFUSERS,
dest_format=FORMAT_NAMES.REFINERS,
)
return vae_state_dict, unet_state_dict, text_encoder_state_dict

View File

@ -0,0 +1,184 @@
import math
from typing import Literal
import torch
from refiners.fluxion.layers.chain import ChainError
from refiners.foundationals.latent_diffusion import (
StableDiffusion_1 as RefinerStableDiffusion_1,
StableDiffusion_1_Inpainting as RefinerStableDiffusion_1_Inpainting,
)
from refiners.foundationals.latent_diffusion.stable_diffusion_1.model import (
SD1Autoencoder,
)
from torch import Tensor, nn
from torch.nn import functional as F
from torch.nn.modules.utils import _pair
from imaginairy.feather_tile import rebuild_image, tile_image
from imaginairy.modules.autoencoder import logger
TileModeType = Literal["", "x", "y", "xy"]
def _tile_mode_conv2d_conv_forward(
self, input: torch.Tensor, weight: torch.Tensor, bias: torch.Tensor # noqa
):
if self.padding_modeX == self.padding_modeY:
self.padding_mode = self.padding_modeX
return self._orig_conv_forward(input, weight, bias)
w1 = F.pad(input, self.paddingX, mode=self.padding_modeX)
del input
w2 = F.pad(w1, self.paddingY, mode=self.padding_modeY)
del w1
return F.conv2d(w2, weight, bias, self.stride, _pair(0), self.dilation, self.groups)
class TileModeMixin(nn.Module):
def set_tile_mode(self, tile_mode: TileModeType = ""):
"""
For creating seamless tile images.
Args:
tile_mode: One of "", "x", "y", "xy". If "x", the image will be tiled horizontally. If "y", the image will be
tiled vertically. If "xy", the image will be tiled both horizontally and vertically.
"""
tile_x = "x" in tile_mode
tile_y = "y" in tile_mode
for m in self.modules():
if isinstance(m, nn.Conv2d):
if not hasattr(m, "_orig_conv_forward"):
# patch with a function that can handle tiling in a single direction
m._initial_padding_mode = m.padding_mode
m._orig_conv_forward = m._conv_forward
m._conv_forward = _tile_mode_conv2d_conv_forward.__get__(
m, nn.Conv2d
)
m.padding_modeX = "circular" if tile_x else "constant"
m.padding_modeY = "circular" if tile_y else "constant"
if m.padding_modeY == m.padding_modeX:
m.padding_mode = m.padding_modeX
m.paddingX = (
m._reversed_padding_repeated_twice[0],
m._reversed_padding_repeated_twice[1],
0,
0,
)
m.paddingY = (
0,
0,
m._reversed_padding_repeated_twice[2],
m._reversed_padding_repeated_twice[3],
)
class StableDiffusion_1(TileModeMixin, RefinerStableDiffusion_1):
pass
class StableDiffusion_1_Inpainting(TileModeMixin, RefinerStableDiffusion_1_Inpainting):
def compute_self_attention_guidance(
self,
x: Tensor,
noise: Tensor,
step: int,
*,
clip_text_embedding: Tensor,
**kwargs: Tensor,
) -> Tensor:
sag = self._find_sag_adapter()
assert sag is not None
assert self.mask_latents is not None
assert self.target_image_latents is not None
degraded_latents = sag.compute_degraded_latents(
scheduler=self.scheduler,
latents=x,
noise=noise,
step=step,
classifier_free_guidance=True,
)
negative_embedding, _ = clip_text_embedding.chunk(2)
timestep = self.scheduler.timesteps[step].unsqueeze(dim=0)
self.set_unet_context(
timestep=timestep, clip_text_embedding=negative_embedding, **kwargs
)
x = torch.cat(
tensors=(degraded_latents, self.mask_latents, self.target_image_latents),
dim=1,
)
degraded_noise = self.unet(x)
return sag.scale * (noise - degraded_noise)
class SD1AutoencoderSliced(SD1Autoencoder):
max_chunk_size = 2048
min_chunk_size = 64
def decode(self, x):
while self.__class__.max_chunk_size > self.__class__.min_chunk_size:
if self.max_chunk_size**2 > x.shape[2] * x.shape[3]:
try:
return self.decode_all_at_once(x)
except ChainError as e:
if "OutOfMemoryError" not in str(e):
raise
self.__class__.max_chunk_size = (
int(math.sqrt(x.shape[2] * x.shape[3])) // 2
)
logger.info(
f"Ran out of memory. Trying tiled decode with chunk size {self.__class__.max_chunk_size}"
)
else:
try:
return self.decode_sliced(x, chunk_size=self.max_chunk_size)
except ChainError as e:
if "OutOfMemoryError" not in str(e):
raise
self.__class__.max_chunk_size = self.max_chunk_size // 2
self.__class__.max_chunk_size = max(
self.__class__.max_chunk_size, self.__class__.min_chunk_size
)
logger.info(
f"Ran out of memory. Trying tiled decode with chunk size {self.__class__.max_chunk_size}"
)
raise RuntimeError("Could not decode image")
def decode_all_at_once(self, x: Tensor) -> Tensor:
decoder = self[1]
x = decoder(x / self.encoder_scale)
return x
def decode_sliced(self, x, chunk_size=128):
"""
decodes the tensor in slices.
This results in image portions that don't exactly match, so we overlap, feather, and merge to reduce
(but not completely eliminate) impact.
"""
b, c, h, w = x.size()
final_tensor = torch.zeros([1, 3, h * 8, w * 8], device=x.device)
for x_latent in x.split(1):
decoded_chunks = []
overlap_pct = 0.5
chunks = tile_image(
x_latent, tile_size=chunk_size, overlap_percent=overlap_pct
)
for latent_chunk in chunks:
# latent_chunk = self.post_quant_conv(latent_chunk)
dec = self.decode_all_at_once(latent_chunk)
decoded_chunks.append(dec)
final_tensor = rebuild_image(
decoded_chunks,
base_img=final_tensor,
tile_size=chunk_size * 8,
overlap_percent=overlap_pct,
)
return final_tensor

View File

@ -64,7 +64,7 @@ class KDiffusionSampler(ImageSampler, ABC):
super().__init__(model)
denoiseer_cls = (
StandardCompVisVDenoiser
if model.parameterization == "v"
if getattr(model, "parameterization", "") == "v"
else StandardCompVisDenoiser
)
self.cv_denoiser = denoiseer_cls(model)

View File

@ -21,7 +21,7 @@ def get_device() -> str:
return "cuda"
if torch.backends.mps.is_available():
return "mps:0"
return "mps"
return "cpu"
@ -250,5 +250,5 @@ def check_torch_version():
"""
from packaging import version
if version.parse(torch.__version__) >= version.parse("2.0.0"):
raise RuntimeError("ImaginAIry is not compatible with torch>=2.0.0")
if version.parse(torch.__version__) < version.parse("2.0.0"):
raise RuntimeError("ImaginAIry is not compatible with torch<2.0.0")

View File

View File

@ -0,0 +1,113 @@
import os.path
from dataclasses import dataclass
from functools import lru_cache
from typing import TYPE_CHECKING
from imaginairy.weight_management import utils
if TYPE_CHECKING:
from torch import Tensor
@dataclass
class WeightMap:
model_name: str
component_name: str
source_format: str
dest_format: str
def __post_init__(self):
self.model_name = self.model_name.replace("_", "-")
self.component_name = self.component_name.replace("_", "-")
self.source_format = self.source_format.replace("_", "-")
self.dest_format = self.dest_format.replace("_", "-")
self._loaded_mapping_info = None
@property
def filename(self):
return f"{self.model_name}_{self.component_name}_{self.source_format}_TO_{self.dest_format}.json"
@property
def filepath(self):
return os.path.join(utils.WEIGHT_MAPS_PATH, self.filename)
@property
def _mapping_info(self):
if self._loaded_mapping_info is None:
import json
with open(self.filepath) as f:
self._loaded_mapping_info = json.load(f)
return self._loaded_mapping_info
@property
def mapping(self):
return self._mapping_info["mapping"]
@property
def source_aliases(self):
return self._mapping_info.get("source_aliases", {})
@property
def ignorable_prefixes(self):
return self._mapping_info.get("ignorable_prefixes", [])
@property
def reshapes(self):
return self._mapping_info.get("reshapes", {})
@property
def all_valid_prefixes(self):
return (
set(self.mapping.keys())
| set(self.source_aliases.keys())
| set(self.ignorable_prefixes)
)
def could_convert(self, source_weights):
source_keys = set(source_weights.keys())
return source_keys.issubset(self.all_valid_prefixes)
def cast_weights(self, source_weights):
converted_state_dict: dict[str, Tensor] = {}
for source_key in source_weights:
source_prefix, suffix = source_key.rsplit(sep=".", maxsplit=1)
# handle aliases
source_prefix = self.source_aliases.get(source_prefix, source_prefix)
try:
target_prefix = self.mapping[source_prefix]
except KeyError:
continue
target_key = ".".join([target_prefix, suffix])
converted_state_dict[target_key] = source_weights[source_key]
for key, new_shape in self.reshapes.items():
converted_state_dict[key] = converted_state_dict[key].reshape(new_shape)
return converted_state_dict
@lru_cache(maxsize=None)
def load_state_dict_conversion_maps():
import json
conversion_maps = {}
from importlib.resources import files
for file in files("imaginairy").joinpath("weight_conversion/maps").iterdir():
if file.is_file() and file.suffix == ".json":
conversion_maps[file.name] = json.loads(file.read_text())
return conversion_maps
def cast_weights(
source_weights, source_model_name, source_component_name, source_format, dest_format
):
weight_map = WeightMap(
model_name=source_model_name,
component_name=source_component_name,
source_format=source_format,
dest_format=dest_format,
)
return weight_map.cast_weights(source_weights)

View File

@ -0,0 +1,283 @@
import torch
from transformers import CLIPTextModelWithProjection
from imaginairy.model_manager import get_diffusion_model
from imaginairy.utils import get_device
from imaginairy.weight_management import utils
def trace_execution_order(module, args, func_name=None):
"""
Trace the execution order of a torch module and store full hierarchical state_dict paths.
:param module: The module to trace.
:param args: The arguments to pass to the module.
:return: A list of full hierarchical state_dict paths in the order they were used.
"""
execution_order = []
hooks = []
def add_hooks(module, prefix=""):
for name, submodule in module.named_children():
# Construct the hierarchical name
module_full_name = f"{prefix}.{name}" if prefix else name
def log(mod, inp, out, module_full_name=module_full_name):
hook(mod, module_full_name)
hooks.append(submodule.register_forward_hook(log))
# Recursively add hooks to all child modules
add_hooks(submodule, module_full_name)
def hook(module, module_full_name):
# Retrieve state_dict and iterate over its items to get full paths
for name, param in module.named_parameters(recurse=False):
full_path = f"{module_full_name}.{name}"
execution_order.append(full_path)
for name, buffer in module.named_buffers(recurse=False):
print(name)
full_path = f"{module_full_name}.{name}"
execution_order.append(full_path)
# Initialize hooks
add_hooks(module)
# Execute the module
with torch.no_grad():
if func_name is not None:
getattr(module, func_name)(*args)
else:
module(*args)
# Remove hooks
for hook in hooks:
hook.remove()
return execution_order
def trace_compvis_execution_order(device=None):
model = get_diffusion_model()._mmmw_load_model()
# vae
image_size = 256
img_in = torch.randn(1, 3, image_size, image_size).to(get_device())
vae_execution_order = trace_execution_order(
model.first_stage_model, (img_in,), func_name="encode_all_at_once"
)
latent_in = torch.randn(1, 4, 32, 32).to(get_device())
vae_execution_order.extend(
trace_execution_order(model.first_stage_model, (latent_in,), func_name="decode")
)
# text encoder model
text = "hello"
text_execution_order = trace_execution_order(model.cond_stage_model, (text,))
# unet
latent_in = torch.randn(1, 4, 32, 32).to(get_device())
text_embedding = [torch.randn(1, 77, 768).to(get_device())]
timestep = torch.tensor(data=[0]).to(get_device())
unet_execution_order = trace_execution_order(
model.model, (latent_in, timestep, text_embedding, text_embedding)
)
return vae_execution_order, text_execution_order, unet_execution_order
def trace_sd15_diffusers_execution_order(device=None):
from diffusers import AutoencoderKL, UNet2DConditionModel
if device is None:
device = get_device()
# vae
image_size = 256
img_in = torch.randn(1, 3, image_size, image_size).to(device)
vae = AutoencoderKL.from_pretrained(
pretrained_model_name_or_path="runwayml/stable-diffusion-v1-5", subfolder="vae"
).to(device)
vae_execution_order = trace_execution_order(vae, (img_in,))
# text encoder model
text_encoder = CLIPTextModelWithProjection.from_pretrained(
pretrained_model_name_or_path="runwayml/stable-diffusion-v1-5",
subfolder="text_encoder",
).to(device)
tokens = torch.Tensor(
[
[
49406,
3306,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
49407,
]
]
)
tokens = tokens.to(device).to(torch.int64)
text_execution_order = trace_execution_order(text_encoder, (tokens,))
# unet
latent_in = torch.randn(1, 4, 32, 32).to(device)
text_embedding = torch.randn(1, 77, 768).to(device)
timestep = torch.tensor(data=[0]).to(device)
unet = UNet2DConditionModel.from_pretrained(
pretrained_model_name_or_path="runwayml/stable-diffusion-v1-5", subfolder="unet"
).to(device)
unet_execution_order = trace_execution_order(
unet, (latent_in, timestep, text_embedding)
)
return vae_execution_order, text_execution_order, unet_execution_order
def calc_and_save_compvis_traces():
model_name = "stable-diffusion-1-5"
format_name = "compvis"
(
vae_execution_order,
text_execution_order,
unet_execution_order,
) = trace_compvis_execution_order()
process_execution_order(
model_name=model_name,
format_name=format_name,
component_name="vae",
execution_order=vae_execution_order,
)
process_execution_order(
model_name=model_name,
format_name=format_name,
component_name="text",
execution_order=text_execution_order,
)
process_execution_order(
model_name=model_name,
format_name=format_name,
component_name="unet",
execution_order=unet_execution_order,
)
def calc_and_save_sd15_diffusers_traces():
model_name = "stable-diffusion-1-5"
format_name = "diffusers"
(
vae_execution_order,
text_execution_order,
unet_execution_order,
) = trace_sd15_diffusers_execution_order()
process_execution_order(
model_name=model_name,
format_name=format_name,
component_name="vae",
execution_order=vae_execution_order,
)
process_execution_order(
model_name=model_name,
format_name=format_name,
component_name="text",
execution_order=text_execution_order,
)
process_execution_order(
model_name=model_name,
format_name=format_name,
component_name="unet",
execution_order=unet_execution_order,
)
def process_execution_order(model_name, component_name, format_name, execution_order):
prefixes = utils.prefixes_only(execution_order)
utils.save_model_info(
model_name,
component_name,
format_name,
"prefix-execution-order",
prefixes,
)
if __name__ == "__main__":
calc_and_save_sd15_diffusers_traces()
calc_and_save_compvis_traces()

View File

@ -0,0 +1,57 @@
import itertools
import json
import os
from collections import defaultdict
from imaginairy.weight_management.utils import WEIGHT_INFO_PATH, WEIGHT_MAPS_PATH
def generate_conversion_maps():
execution_orders_map = defaultdict(dict)
for filename in os.listdir(WEIGHT_INFO_PATH):
if not filename.endswith("prefix-execution-order.json"):
continue
base_name = filename.split(".", 1)[0]
model_name, component_name, format_name = base_name.split("_")
execution_orders_map[(model_name, component_name)][format_name] = filename
for (model_name, component_name), format_lookup in execution_orders_map.items():
if len(format_lookup) <= 1:
continue
formats = list(format_lookup.keys())
for format_a, format_b in itertools.permutations(formats, 2):
filename_a = format_lookup[format_a]
filename_b = format_lookup[format_b]
with open(os.path.join(WEIGHT_INFO_PATH, filename_a)) as f:
execution_order_a = json.load(f)
with open(os.path.join(WEIGHT_INFO_PATH, filename_b)) as f:
execution_order_b = json.load(f)
mapping_filename = (
f"{model_name}_{component_name}_{format_a}_TO_{format_b}.json"
)
mapping_filepath = os.path.join(WEIGHT_MAPS_PATH, mapping_filename)
print(f"Creating {mapping_filename}...")
if os.path.exists(mapping_filepath):
continue
if len(execution_order_a) != len(execution_order_b):
print(
f"Could not create {mapping_filename} - Execution orders for {format_a} and {format_b} have different lengths"
)
continue
mapping = dict(zip(execution_order_a, execution_order_b))
mapping_info = {
"mapping": mapping,
"source_aliases": {},
"ignorable_prefixes": [],
}
with open(mapping_filepath, "w") as f:
json.dump(mapping_info, f, indent=2)
if __name__ == "__main__":
generate_conversion_maps()

View File

@ -0,0 +1,137 @@
import safetensors
from imaginairy.model_manager import (
get_cached_url_path,
open_weights,
resolve_model_paths,
)
from imaginairy.weight_management import utils
from imaginairy.weight_management.pattern_collapse import find_state_dict_key_patterns
from imaginairy.weight_management.utils import save_model_info
def save_compvis_patterns():
(
model_metadata,
weights_url,
config_path,
control_weights_paths,
) = resolve_model_paths(
weights_path="openjourney-v1",
)
weights_path = get_cached_url_path(weights_url, category="weights")
with safetensors.safe_open(weights_path, "pytorch") as f:
weights_keys = f.keys()
text_encoder_prefix = "cond_stage_model.transformer.text_model"
text_encoder_keys = [k for k in weights_keys if k.startswith(text_encoder_prefix)]
save_weight_info(
model_name=utils.MODEL_NAMES.SD15,
component_name=utils.COMPONENT_NAMES.TEXT_ENCODER,
format_name=utils.FORMAT_NAMES.COMPVIS,
weights_keys=text_encoder_keys,
)
vae_prefix = "first_stage_model"
vae_keys = [k for k in weights_keys if k.startswith(vae_prefix)]
save_weight_info(
model_name=utils.MODEL_NAMES.SD15,
component_name=utils.COMPONENT_NAMES.VAE,
format_name=utils.FORMAT_NAMES.COMPVIS,
weights_keys=vae_keys,
)
unet_prefix = "model.diffusion_model"
unet_keys = [k for k in weights_keys if k.startswith(unet_prefix)]
save_weight_info(
model_name=utils.MODEL_NAMES.SD15,
component_name=utils.COMPONENT_NAMES.UNET,
format_name=utils.FORMAT_NAMES.COMPVIS,
weights_keys=unet_keys,
)
def save_diffusers_patterns():
save_weight_info(
model_name=utils.MODEL_NAMES.SD15,
component_name=utils.COMPONENT_NAMES.VAE,
format_name=utils.FORMAT_NAMES.DIFFUSERS,
weights_url="https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/vae/diffusion_pytorch_model.fp16.safetensors",
)
save_weight_info(
model_name=utils.MODEL_NAMES.SD15,
component_name=utils.COMPONENT_NAMES.UNET,
format_name=utils.FORMAT_NAMES.DIFFUSERS,
weights_url="https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/unet/diffusion_pytorch_model.fp16.safetensors",
)
save_weight_info(
model_name=utils.MODEL_NAMES.SD15,
component_name=utils.COMPONENT_NAMES.TEXT_ENCODER,
format_name=utils.FORMAT_NAMES.DIFFUSERS,
weights_url="https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/text_encoder/model.fp16.safetensors",
)
def save_lora_patterns():
filepath = "/Users/bryce/projects/sandbox-img-gen/refiners/weights/pytorch_lora_weights-refiners.safetensors"
state_dict = open_weights(filepath, device="cpu")
save_weight_info(
model_name=utils.MODEL_NAMES.SD15,
component_name=utils.COMPONENT_NAMES.LORA,
format_name=utils.FORMAT_NAMES.REFINERS,
weights_keys=list(state_dict.keys()),
)
save_weight_info(
model_name=utils.MODEL_NAMES.SD15,
component_name=utils.COMPONENT_NAMES.LORA,
format_name=utils.FORMAT_NAMES.DIFFUSERS,
weights_url="https://huggingface.co/pcuenq/pokemon-lora/resolve/main/pytorch_lora_weights.bin",
)
def save_weight_info(
model_name, component_name, format_name, weights_url=None, weights_keys=None
):
if weights_keys is None and weights_url is None:
msg = "Either weights_keys or weights_url must be provided"
raise ValueError(msg)
if weights_keys is None:
weights_path = get_cached_url_path(weights_url, category="weights")
state_dict = open_weights(weights_path, device="cpu")
weights_keys = list(state_dict.keys())
# prefixes = utils.prefixes_only(weights_keys)
save_model_info(
model_name=model_name,
component_name=component_name,
format_name=format_name,
info_type="weights_keys",
data=weights_keys,
)
patterns = find_state_dict_key_patterns(weights_keys)
save_model_info(
model_name=model_name,
component_name=component_name,
format_name=format_name,
info_type="patterns",
data=patterns,
)
def save_patterns():
save_lora_patterns()
# save_compvis_patterns()
# save_diffusers_patterns()
if __name__ == "__main__":
save_patterns()

View File

@ -0,0 +1,195 @@
{
"mapping": {
"time_embedding.linear_1": "TimestepEncoder.RangeEncoder.Linear_1",
"time_embedding.linear_2": "TimestepEncoder.RangeEncoder.Linear_2",
"down_blocks.2.resnets.0.time_emb_proj": "DownBlocks.Chain_8.ResidualBlock.Chain.RangeAdapter2d.Chain.Linear",
"down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0": "DownBlocks.Chain_8.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.SelfAttention.Linear",
"down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0": "DownBlocks.Chain_8.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.Attention.Linear",
"down_blocks.2.resnets.1.time_emb_proj": "DownBlocks.Chain_9.ResidualBlock.Chain.RangeAdapter2d.Chain.Linear",
"down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0": "DownBlocks.Chain_9.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.SelfAttention.Linear",
"down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0": "DownBlocks.Chain_9.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.Attention.Linear",
"down_blocks.3.resnets.0.time_emb_proj": "DownBlocks.Chain_11.ResidualBlock.Chain.RangeAdapter2d.Chain.Linear",
"down_blocks.3.resnets.1.time_emb_proj": "DownBlocks.Chain_12.ResidualBlock.Chain.RangeAdapter2d.Chain.Linear",
"mid_block.resnets.0.time_emb_proj": "MiddleBlock.ResidualBlock_1.Chain.RangeAdapter2d.Chain.Linear",
"mid_block.attentions.0.transformer_blocks.0.attn1.to_out.0": "MiddleBlock.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.SelfAttention.Linear",
"mid_block.attentions.0.transformer_blocks.0.attn2.to_out.0": "MiddleBlock.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.Attention.Linear",
"mid_block.resnets.1.time_emb_proj": "MiddleBlock.ResidualBlock_2.Chain.RangeAdapter2d.Chain.Linear",
"conv_in": "DownBlocks.Chain_1.Conv2d",
"controlnet_cond_embedding.conv_in": "DownBlocks.Chain_1.Residual.ConditionEncoder.Chain_1.Conv2d",
"controlnet_cond_embedding.blocks.0": "DownBlocks.Chain_1.Residual.ConditionEncoder.Chain_2.Conv2d_1",
"controlnet_cond_embedding.blocks.1": "DownBlocks.Chain_1.Residual.ConditionEncoder.Chain_2.Conv2d_2",
"controlnet_cond_embedding.blocks.2": "DownBlocks.Chain_1.Residual.ConditionEncoder.Chain_3.Conv2d_1",
"controlnet_cond_embedding.blocks.3": "DownBlocks.Chain_1.Residual.ConditionEncoder.Chain_3.Conv2d_2",
"controlnet_cond_embedding.blocks.4": "DownBlocks.Chain_1.Residual.ConditionEncoder.Chain_4.Conv2d_1",
"controlnet_cond_embedding.blocks.5": "DownBlocks.Chain_1.Residual.ConditionEncoder.Chain_4.Conv2d_2",
"controlnet_cond_embedding.conv_out": "DownBlocks.Chain_1.Residual.ConditionEncoder.Conv2d",
"down_blocks.0.resnets.0.norm1": "DownBlocks.Chain_2.ResidualBlock.Chain.GroupNorm_1",
"down_blocks.0.resnets.0.norm2": "DownBlocks.Chain_2.ResidualBlock.Chain.GroupNorm_2",
"down_blocks.0.attentions.0.norm": "DownBlocks.Chain_2.CLIPLCrossAttention.Chain_1.GroupNorm",
"down_blocks.0.resnets.1.norm1": "DownBlocks.Chain_3.ResidualBlock.Chain.GroupNorm_1",
"down_blocks.0.resnets.1.norm2": "DownBlocks.Chain_3.ResidualBlock.Chain.GroupNorm_2",
"down_blocks.0.attentions.1.norm": "DownBlocks.Chain_3.CLIPLCrossAttention.Chain_1.GroupNorm",
"down_blocks.1.resnets.0.norm1": "DownBlocks.Chain_5.ResidualBlock.Chain.GroupNorm_1",
"down_blocks.0.resnets.0.conv1": "DownBlocks.Chain_2.ResidualBlock.Chain.RangeAdapter2d.Conv2d",
"down_blocks.0.resnets.0.conv2": "DownBlocks.Chain_2.ResidualBlock.Chain.Conv2d",
"down_blocks.0.resnets.1.conv1": "DownBlocks.Chain_3.ResidualBlock.Chain.RangeAdapter2d.Conv2d",
"down_blocks.0.resnets.1.conv2": "DownBlocks.Chain_3.ResidualBlock.Chain.Conv2d",
"down_blocks.0.downsamplers.0.conv": "DownBlocks.Chain_4.Downsample.Conv2d",
"down_blocks.0.resnets.0.time_emb_proj": "DownBlocks.Chain_2.ResidualBlock.Chain.RangeAdapter2d.Chain.Linear",
"down_blocks.0.attentions.0.transformer_blocks.0.ff.net.2": "DownBlocks.Chain_2.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_3.Linear_2",
"down_blocks.0.resnets.1.time_emb_proj": "DownBlocks.Chain_3.ResidualBlock.Chain.RangeAdapter2d.Chain.Linear",
"down_blocks.0.attentions.1.transformer_blocks.0.ff.net.2": "DownBlocks.Chain_3.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_3.Linear_2",
"controlnet_down_blocks.0": "DownBlocks.Chain_1.Passthrough.Conv2d",
"down_blocks.0.attentions.0.proj_in": "DownBlocks.Chain_2.CLIPLCrossAttention.Chain_1.Conv2d",
"down_blocks.0.attentions.0.proj_out": "DownBlocks.Chain_2.CLIPLCrossAttention.Chain_3.Conv2d",
"controlnet_down_blocks.1": "DownBlocks.Chain_2.Passthrough.Conv2d",
"down_blocks.0.attentions.1.proj_in": "DownBlocks.Chain_3.CLIPLCrossAttention.Chain_1.Conv2d",
"down_blocks.0.attentions.1.proj_out": "DownBlocks.Chain_3.CLIPLCrossAttention.Chain_3.Conv2d",
"controlnet_down_blocks.2": "DownBlocks.Chain_3.Passthrough.Conv2d",
"controlnet_down_blocks.3": "DownBlocks.Chain_4.Passthrough.Conv2d",
"down_blocks.0.attentions.0.transformer_blocks.0.norm1": "DownBlocks.Chain_2.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.LayerNorm",
"down_blocks.0.attentions.0.transformer_blocks.0.norm2": "DownBlocks.Chain_2.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.LayerNorm",
"down_blocks.0.attentions.0.transformer_blocks.0.norm3": "DownBlocks.Chain_2.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_3.LayerNorm",
"down_blocks.0.attentions.1.transformer_blocks.0.norm1": "DownBlocks.Chain_3.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.LayerNorm",
"down_blocks.0.attentions.1.transformer_blocks.0.norm2": "DownBlocks.Chain_3.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.LayerNorm",
"down_blocks.0.attentions.1.transformer_blocks.0.norm3": "DownBlocks.Chain_3.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_3.LayerNorm",
"down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_q": "DownBlocks.Chain_2.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.SelfAttention.Distribute.Linear_1",
"down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_k": "DownBlocks.Chain_2.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.SelfAttention.Distribute.Linear_2",
"down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_v": "DownBlocks.Chain_2.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.SelfAttention.Distribute.Linear_3",
"down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_q": "DownBlocks.Chain_2.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.Attention.Distribute.Linear_1",
"down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_q": "DownBlocks.Chain_3.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.SelfAttention.Distribute.Linear_1",
"down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_k": "DownBlocks.Chain_3.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.SelfAttention.Distribute.Linear_2",
"down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_v": "DownBlocks.Chain_3.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.SelfAttention.Distribute.Linear_3",
"down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_q": "DownBlocks.Chain_3.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.Attention.Distribute.Linear_1",
"down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out.0": "DownBlocks.Chain_2.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.SelfAttention.Linear",
"down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out.0": "DownBlocks.Chain_2.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.Attention.Linear",
"down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out.0": "DownBlocks.Chain_3.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.SelfAttention.Linear",
"down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out.0": "DownBlocks.Chain_3.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.Attention.Linear",
"down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_k": "DownBlocks.Chain_2.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.Attention.Distribute.Linear_2",
"down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_v": "DownBlocks.Chain_2.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.Attention.Distribute.Linear_3",
"down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_k": "DownBlocks.Chain_3.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.Attention.Distribute.Linear_2",
"down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_v": "DownBlocks.Chain_3.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.Attention.Distribute.Linear_3",
"down_blocks.0.attentions.0.transformer_blocks.0.ff.net.0.proj": "DownBlocks.Chain_2.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_3.Linear_1",
"down_blocks.0.attentions.1.transformer_blocks.0.ff.net.0.proj": "DownBlocks.Chain_3.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_3.Linear_1",
"down_blocks.1.resnets.0.conv1": "DownBlocks.Chain_5.ResidualBlock.Chain.RangeAdapter2d.Conv2d",
"down_blocks.1.resnets.0.time_emb_proj": "DownBlocks.Chain_5.ResidualBlock.Chain.RangeAdapter2d.Chain.Linear",
"down_blocks.1.resnets.1.time_emb_proj": "DownBlocks.Chain_6.ResidualBlock.Chain.RangeAdapter2d.Chain.Linear",
"down_blocks.1.resnets.0.norm2": "DownBlocks.Chain_5.ResidualBlock.Chain.GroupNorm_2",
"down_blocks.1.attentions.0.norm": "DownBlocks.Chain_5.CLIPLCrossAttention.Chain_1.GroupNorm",
"down_blocks.1.resnets.1.norm1": "DownBlocks.Chain_6.ResidualBlock.Chain.GroupNorm_1",
"down_blocks.1.resnets.1.norm2": "DownBlocks.Chain_6.ResidualBlock.Chain.GroupNorm_2",
"down_blocks.1.attentions.1.norm": "DownBlocks.Chain_6.CLIPLCrossAttention.Chain_1.GroupNorm",
"down_blocks.2.resnets.0.norm1": "DownBlocks.Chain_8.ResidualBlock.Chain.GroupNorm_1",
"down_blocks.1.resnets.0.conv2": "DownBlocks.Chain_5.ResidualBlock.Chain.Conv2d",
"down_blocks.1.resnets.1.conv1": "DownBlocks.Chain_6.ResidualBlock.Chain.RangeAdapter2d.Conv2d",
"down_blocks.1.resnets.1.conv2": "DownBlocks.Chain_6.ResidualBlock.Chain.Conv2d",
"down_blocks.1.downsamplers.0.conv": "DownBlocks.Chain_7.Downsample.Conv2d",
"down_blocks.1.resnets.0.conv_shortcut": "DownBlocks.Chain_5.ResidualBlock.Conv2d",
"down_blocks.1.attentions.0.proj_in": "DownBlocks.Chain_5.CLIPLCrossAttention.Chain_1.Conv2d",
"down_blocks.1.attentions.0.proj_out": "DownBlocks.Chain_5.CLIPLCrossAttention.Chain_3.Conv2d",
"controlnet_down_blocks.4": "DownBlocks.Chain_5.Passthrough.Conv2d",
"down_blocks.1.attentions.1.proj_in": "DownBlocks.Chain_6.CLIPLCrossAttention.Chain_1.Conv2d",
"down_blocks.1.attentions.1.proj_out": "DownBlocks.Chain_6.CLIPLCrossAttention.Chain_3.Conv2d",
"controlnet_down_blocks.5": "DownBlocks.Chain_6.Passthrough.Conv2d",
"controlnet_down_blocks.6": "DownBlocks.Chain_7.Passthrough.Conv2d",
"down_blocks.1.attentions.0.transformer_blocks.0.norm1": "DownBlocks.Chain_5.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.LayerNorm",
"down_blocks.1.attentions.0.transformer_blocks.0.norm2": "DownBlocks.Chain_5.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.LayerNorm",
"down_blocks.1.attentions.0.transformer_blocks.0.norm3": "DownBlocks.Chain_5.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_3.LayerNorm",
"down_blocks.1.attentions.1.transformer_blocks.0.norm1": "DownBlocks.Chain_6.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.LayerNorm",
"down_blocks.1.attentions.1.transformer_blocks.0.norm2": "DownBlocks.Chain_6.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.LayerNorm",
"down_blocks.1.attentions.1.transformer_blocks.0.norm3": "DownBlocks.Chain_6.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_3.LayerNorm",
"down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": "DownBlocks.Chain_5.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.SelfAttention.Distribute.Linear_1",
"down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": "DownBlocks.Chain_5.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.SelfAttention.Distribute.Linear_2",
"down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": "DownBlocks.Chain_5.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.SelfAttention.Distribute.Linear_3",
"down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": "DownBlocks.Chain_5.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.Attention.Distribute.Linear_1",
"down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": "DownBlocks.Chain_6.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.SelfAttention.Distribute.Linear_1",
"down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": "DownBlocks.Chain_6.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.SelfAttention.Distribute.Linear_2",
"down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": "DownBlocks.Chain_6.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.SelfAttention.Distribute.Linear_3",
"down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": "DownBlocks.Chain_6.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.Attention.Distribute.Linear_1",
"down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": "DownBlocks.Chain_5.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.SelfAttention.Linear",
"down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": "DownBlocks.Chain_5.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.Attention.Linear",
"down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": "DownBlocks.Chain_6.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.SelfAttention.Linear",
"down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": "DownBlocks.Chain_6.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.Attention.Linear",
"down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": "DownBlocks.Chain_5.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.Attention.Distribute.Linear_2",
"down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": "DownBlocks.Chain_5.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.Attention.Distribute.Linear_3",
"down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": "DownBlocks.Chain_6.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.Attention.Distribute.Linear_2",
"down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": "DownBlocks.Chain_6.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.Attention.Distribute.Linear_3",
"down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": "DownBlocks.Chain_5.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_3.Linear_1",
"down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": "DownBlocks.Chain_6.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_3.Linear_1",
"down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": "DownBlocks.Chain_5.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_3.Linear_2",
"down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": "DownBlocks.Chain_6.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_3.Linear_2",
"down_blocks.2.resnets.0.conv1": "DownBlocks.Chain_8.ResidualBlock.Chain.RangeAdapter2d.Conv2d",
"down_blocks.2.resnets.0.norm2": "DownBlocks.Chain_8.ResidualBlock.Chain.GroupNorm_2",
"down_blocks.2.attentions.0.norm": "DownBlocks.Chain_8.CLIPLCrossAttention.Chain_1.GroupNorm",
"down_blocks.2.resnets.1.norm1": "DownBlocks.Chain_9.ResidualBlock.Chain.GroupNorm_1",
"down_blocks.2.resnets.1.norm2": "DownBlocks.Chain_9.ResidualBlock.Chain.GroupNorm_2",
"down_blocks.2.attentions.1.norm": "DownBlocks.Chain_9.CLIPLCrossAttention.Chain_1.GroupNorm",
"down_blocks.3.resnets.0.norm1": "DownBlocks.Chain_11.ResidualBlock.Chain.GroupNorm_1",
"down_blocks.3.resnets.0.norm2": "DownBlocks.Chain_11.ResidualBlock.Chain.GroupNorm_2",
"down_blocks.3.resnets.1.norm1": "DownBlocks.Chain_12.ResidualBlock.Chain.GroupNorm_1",
"down_blocks.3.resnets.1.norm2": "DownBlocks.Chain_12.ResidualBlock.Chain.GroupNorm_2",
"mid_block.resnets.0.norm1": "MiddleBlock.ResidualBlock_1.Chain.GroupNorm_1",
"mid_block.resnets.0.norm2": "MiddleBlock.ResidualBlock_1.Chain.GroupNorm_2",
"mid_block.attentions.0.norm": "MiddleBlock.CLIPLCrossAttention.Chain_1.GroupNorm",
"mid_block.resnets.1.norm1": "MiddleBlock.ResidualBlock_2.Chain.GroupNorm_1",
"mid_block.resnets.1.norm2": "MiddleBlock.ResidualBlock_2.Chain.GroupNorm_2",
"down_blocks.2.resnets.0.conv2": "DownBlocks.Chain_8.ResidualBlock.Chain.Conv2d",
"down_blocks.2.resnets.1.conv1": "DownBlocks.Chain_9.ResidualBlock.Chain.RangeAdapter2d.Conv2d",
"down_blocks.2.resnets.1.conv2": "DownBlocks.Chain_9.ResidualBlock.Chain.Conv2d",
"down_blocks.2.downsamplers.0.conv": "DownBlocks.Chain_10.Downsample.Conv2d",
"down_blocks.3.resnets.0.conv1": "DownBlocks.Chain_11.ResidualBlock.Chain.RangeAdapter2d.Conv2d",
"down_blocks.3.resnets.0.conv2": "DownBlocks.Chain_11.ResidualBlock.Chain.Conv2d",
"down_blocks.3.resnets.1.conv1": "DownBlocks.Chain_12.ResidualBlock.Chain.RangeAdapter2d.Conv2d",
"down_blocks.3.resnets.1.conv2": "DownBlocks.Chain_12.ResidualBlock.Chain.Conv2d",
"mid_block.resnets.0.conv1": "MiddleBlock.ResidualBlock_1.Chain.RangeAdapter2d.Conv2d",
"mid_block.resnets.0.conv2": "MiddleBlock.ResidualBlock_1.Chain.Conv2d",
"mid_block.resnets.1.conv1": "MiddleBlock.ResidualBlock_2.Chain.RangeAdapter2d.Conv2d",
"mid_block.resnets.1.conv2": "MiddleBlock.ResidualBlock_2.Chain.Conv2d",
"down_blocks.2.resnets.0.conv_shortcut": "DownBlocks.Chain_8.ResidualBlock.Conv2d",
"down_blocks.2.attentions.0.proj_in": "DownBlocks.Chain_8.CLIPLCrossAttention.Chain_1.Conv2d",
"down_blocks.2.attentions.0.proj_out": "DownBlocks.Chain_8.CLIPLCrossAttention.Chain_3.Conv2d",
"controlnet_down_blocks.7": "DownBlocks.Chain_8.Passthrough.Conv2d",
"down_blocks.2.attentions.1.proj_in": "DownBlocks.Chain_9.CLIPLCrossAttention.Chain_1.Conv2d",
"down_blocks.2.attentions.1.proj_out": "DownBlocks.Chain_9.CLIPLCrossAttention.Chain_3.Conv2d",
"controlnet_down_blocks.8": "DownBlocks.Chain_9.Passthrough.Conv2d",
"controlnet_down_blocks.9": "DownBlocks.Chain_10.Passthrough.Conv2d",
"controlnet_down_blocks.10": "DownBlocks.Chain_11.Passthrough.Conv2d",
"controlnet_down_blocks.11": "DownBlocks.Chain_12.Passthrough.Conv2d",
"mid_block.attentions.0.proj_in": "MiddleBlock.CLIPLCrossAttention.Chain_1.Conv2d",
"mid_block.attentions.0.proj_out": "MiddleBlock.CLIPLCrossAttention.Chain_3.Conv2d",
"controlnet_mid_block": "MiddleBlock.Passthrough.Conv2d",
"down_blocks.2.attentions.0.transformer_blocks.0.norm1": "DownBlocks.Chain_8.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.LayerNorm",
"down_blocks.2.attentions.0.transformer_blocks.0.norm2": "DownBlocks.Chain_8.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.LayerNorm",
"down_blocks.2.attentions.0.transformer_blocks.0.norm3": "DownBlocks.Chain_8.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_3.LayerNorm",
"down_blocks.2.attentions.1.transformer_blocks.0.norm1": "DownBlocks.Chain_9.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.LayerNorm",
"down_blocks.2.attentions.1.transformer_blocks.0.norm2": "DownBlocks.Chain_9.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.LayerNorm",
"down_blocks.2.attentions.1.transformer_blocks.0.norm3": "DownBlocks.Chain_9.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_3.LayerNorm",
"mid_block.attentions.0.transformer_blocks.0.norm1": "MiddleBlock.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.LayerNorm",
"mid_block.attentions.0.transformer_blocks.0.norm2": "MiddleBlock.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.LayerNorm",
"mid_block.attentions.0.transformer_blocks.0.norm3": "MiddleBlock.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_3.LayerNorm",
"down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q": "DownBlocks.Chain_8.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.SelfAttention.Distribute.Linear_1",
"down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k": "DownBlocks.Chain_8.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.SelfAttention.Distribute.Linear_2",
"down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v": "DownBlocks.Chain_8.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.SelfAttention.Distribute.Linear_3",
"down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q": "DownBlocks.Chain_8.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.Attention.Distribute.Linear_1",
"down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q": "DownBlocks.Chain_9.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.SelfAttention.Distribute.Linear_1",
"down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k": "DownBlocks.Chain_9.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.SelfAttention.Distribute.Linear_2",
"down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v": "DownBlocks.Chain_9.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.SelfAttention.Distribute.Linear_3",
"down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q": "DownBlocks.Chain_9.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.Attention.Distribute.Linear_1",
"mid_block.attentions.0.transformer_blocks.0.attn1.to_q": "MiddleBlock.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.SelfAttention.Distribute.Linear_1",
"mid_block.attentions.0.transformer_blocks.0.attn1.to_k": "MiddleBlock.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.SelfAttention.Distribute.Linear_2",
"mid_block.attentions.0.transformer_blocks.0.attn1.to_v": "MiddleBlock.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.SelfAttention.Distribute.Linear_3",
"mid_block.attentions.0.transformer_blocks.0.attn2.to_q": "MiddleBlock.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.Attention.Distribute.Linear_1",
"down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k": "DownBlocks.Chain_8.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.Attention.Distribute.Linear_2",
"down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v": "DownBlocks.Chain_8.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.Attention.Distribute.Linear_3",
"down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k": "DownBlocks.Chain_9.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.Attention.Distribute.Linear_2",
"down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v": "DownBlocks.Chain_9.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.Attention.Distribute.Linear_3",
"mid_block.attentions.0.transformer_blocks.0.attn2.to_k": "MiddleBlock.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.Attention.Distribute.Linear_2",
"mid_block.attentions.0.transformer_blocks.0.attn2.to_v": "MiddleBlock.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.Attention.Distribute.Linear_3",
"down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj": "DownBlocks.Chain_8.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_3.Linear_1",
"down_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj": "DownBlocks.Chain_9.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_3.Linear_1",
"mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj": "MiddleBlock.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_3.Linear_1",
"down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2": "DownBlocks.Chain_8.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_3.Linear_2",
"down_blocks.2.attentions.1.transformer_blocks.0.ff.net.2": "DownBlocks.Chain_9.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_3.Linear_2",
"mid_block.attentions.0.transformer_blocks.0.ff.net.2": "MiddleBlock.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_3.Linear_2"
}
}

View File

@ -0,0 +1,106 @@
{
"mapping": {
"transformer.text_model.embeddings.token_embedding": "text_model.embeddings.token_embedding",
"transformer.text_model.embeddings.position_embedding": "text_model.embeddings.position_embedding",
"transformer.text_model.embeddings": "text_model.embeddings",
"transformer.text_model.encoder.layers.0.layer_norm1": "text_model.encoder.layers.0.layer_norm1",
"transformer.text_model.encoder.layers.0.self_attn.q_proj": "text_model.encoder.layers.0.self_attn.q_proj",
"transformer.text_model.encoder.layers.0.self_attn.k_proj": "text_model.encoder.layers.0.self_attn.k_proj",
"transformer.text_model.encoder.layers.0.self_attn.v_proj": "text_model.encoder.layers.0.self_attn.v_proj",
"transformer.text_model.encoder.layers.0.self_attn.out_proj": "text_model.encoder.layers.0.self_attn.out_proj",
"transformer.text_model.encoder.layers.0.layer_norm2": "text_model.encoder.layers.0.layer_norm2",
"transformer.text_model.encoder.layers.0.mlp.fc1": "text_model.encoder.layers.0.mlp.fc1",
"transformer.text_model.encoder.layers.0.mlp.fc2": "text_model.encoder.layers.0.mlp.fc2",
"transformer.text_model.encoder.layers.1.layer_norm1": "text_model.encoder.layers.1.layer_norm1",
"transformer.text_model.encoder.layers.1.self_attn.q_proj": "text_model.encoder.layers.1.self_attn.q_proj",
"transformer.text_model.encoder.layers.1.self_attn.k_proj": "text_model.encoder.layers.1.self_attn.k_proj",
"transformer.text_model.encoder.layers.1.self_attn.v_proj": "text_model.encoder.layers.1.self_attn.v_proj",
"transformer.text_model.encoder.layers.1.self_attn.out_proj": "text_model.encoder.layers.1.self_attn.out_proj",
"transformer.text_model.encoder.layers.1.layer_norm2": "text_model.encoder.layers.1.layer_norm2",
"transformer.text_model.encoder.layers.1.mlp.fc1": "text_model.encoder.layers.1.mlp.fc1",
"transformer.text_model.encoder.layers.1.mlp.fc2": "text_model.encoder.layers.1.mlp.fc2",
"transformer.text_model.encoder.layers.2.layer_norm1": "text_model.encoder.layers.2.layer_norm1",
"transformer.text_model.encoder.layers.2.self_attn.q_proj": "text_model.encoder.layers.2.self_attn.q_proj",
"transformer.text_model.encoder.layers.2.self_attn.k_proj": "text_model.encoder.layers.2.self_attn.k_proj",
"transformer.text_model.encoder.layers.2.self_attn.v_proj": "text_model.encoder.layers.2.self_attn.v_proj",
"transformer.text_model.encoder.layers.2.self_attn.out_proj": "text_model.encoder.layers.2.self_attn.out_proj",
"transformer.text_model.encoder.layers.2.layer_norm2": "text_model.encoder.layers.2.layer_norm2",
"transformer.text_model.encoder.layers.2.mlp.fc1": "text_model.encoder.layers.2.mlp.fc1",
"transformer.text_model.encoder.layers.2.mlp.fc2": "text_model.encoder.layers.2.mlp.fc2",
"transformer.text_model.encoder.layers.3.layer_norm1": "text_model.encoder.layers.3.layer_norm1",
"transformer.text_model.encoder.layers.3.self_attn.q_proj": "text_model.encoder.layers.3.self_attn.q_proj",
"transformer.text_model.encoder.layers.3.self_attn.k_proj": "text_model.encoder.layers.3.self_attn.k_proj",
"transformer.text_model.encoder.layers.3.self_attn.v_proj": "text_model.encoder.layers.3.self_attn.v_proj",
"transformer.text_model.encoder.layers.3.self_attn.out_proj": "text_model.encoder.layers.3.self_attn.out_proj",
"transformer.text_model.encoder.layers.3.layer_norm2": "text_model.encoder.layers.3.layer_norm2",
"transformer.text_model.encoder.layers.3.mlp.fc1": "text_model.encoder.layers.3.mlp.fc1",
"transformer.text_model.encoder.layers.3.mlp.fc2": "text_model.encoder.layers.3.mlp.fc2",
"transformer.text_model.encoder.layers.4.layer_norm1": "text_model.encoder.layers.4.layer_norm1",
"transformer.text_model.encoder.layers.4.self_attn.q_proj": "text_model.encoder.layers.4.self_attn.q_proj",
"transformer.text_model.encoder.layers.4.self_attn.k_proj": "text_model.encoder.layers.4.self_attn.k_proj",
"transformer.text_model.encoder.layers.4.self_attn.v_proj": "text_model.encoder.layers.4.self_attn.v_proj",
"transformer.text_model.encoder.layers.4.self_attn.out_proj": "text_model.encoder.layers.4.self_attn.out_proj",
"transformer.text_model.encoder.layers.4.layer_norm2": "text_model.encoder.layers.4.layer_norm2",
"transformer.text_model.encoder.layers.4.mlp.fc1": "text_model.encoder.layers.4.mlp.fc1",
"transformer.text_model.encoder.layers.4.mlp.fc2": "text_model.encoder.layers.4.mlp.fc2",
"transformer.text_model.encoder.layers.5.layer_norm1": "text_model.encoder.layers.5.layer_norm1",
"transformer.text_model.encoder.layers.5.self_attn.q_proj": "text_model.encoder.layers.5.self_attn.q_proj",
"transformer.text_model.encoder.layers.5.self_attn.k_proj": "text_model.encoder.layers.5.self_attn.k_proj",
"transformer.text_model.encoder.layers.5.self_attn.v_proj": "text_model.encoder.layers.5.self_attn.v_proj",
"transformer.text_model.encoder.layers.5.self_attn.out_proj": "text_model.encoder.layers.5.self_attn.out_proj",
"transformer.text_model.encoder.layers.5.layer_norm2": "text_model.encoder.layers.5.layer_norm2",
"transformer.text_model.encoder.layers.5.mlp.fc1": "text_model.encoder.layers.5.mlp.fc1",
"transformer.text_model.encoder.layers.5.mlp.fc2": "text_model.encoder.layers.5.mlp.fc2",
"transformer.text_model.encoder.layers.6.layer_norm1": "text_model.encoder.layers.6.layer_norm1",
"transformer.text_model.encoder.layers.6.self_attn.q_proj": "text_model.encoder.layers.6.self_attn.q_proj",
"transformer.text_model.encoder.layers.6.self_attn.k_proj": "text_model.encoder.layers.6.self_attn.k_proj",
"transformer.text_model.encoder.layers.6.self_attn.v_proj": "text_model.encoder.layers.6.self_attn.v_proj",
"transformer.text_model.encoder.layers.6.self_attn.out_proj": "text_model.encoder.layers.6.self_attn.out_proj",
"transformer.text_model.encoder.layers.6.layer_norm2": "text_model.encoder.layers.6.layer_norm2",
"transformer.text_model.encoder.layers.6.mlp.fc1": "text_model.encoder.layers.6.mlp.fc1",
"transformer.text_model.encoder.layers.6.mlp.fc2": "text_model.encoder.layers.6.mlp.fc2",
"transformer.text_model.encoder.layers.7.layer_norm1": "text_model.encoder.layers.7.layer_norm1",
"transformer.text_model.encoder.layers.7.self_attn.q_proj": "text_model.encoder.layers.7.self_attn.q_proj",
"transformer.text_model.encoder.layers.7.self_attn.k_proj": "text_model.encoder.layers.7.self_attn.k_proj",
"transformer.text_model.encoder.layers.7.self_attn.v_proj": "text_model.encoder.layers.7.self_attn.v_proj",
"transformer.text_model.encoder.layers.7.self_attn.out_proj": "text_model.encoder.layers.7.self_attn.out_proj",
"transformer.text_model.encoder.layers.7.layer_norm2": "text_model.encoder.layers.7.layer_norm2",
"transformer.text_model.encoder.layers.7.mlp.fc1": "text_model.encoder.layers.7.mlp.fc1",
"transformer.text_model.encoder.layers.7.mlp.fc2": "text_model.encoder.layers.7.mlp.fc2",
"transformer.text_model.encoder.layers.8.layer_norm1": "text_model.encoder.layers.8.layer_norm1",
"transformer.text_model.encoder.layers.8.self_attn.q_proj": "text_model.encoder.layers.8.self_attn.q_proj",
"transformer.text_model.encoder.layers.8.self_attn.k_proj": "text_model.encoder.layers.8.self_attn.k_proj",
"transformer.text_model.encoder.layers.8.self_attn.v_proj": "text_model.encoder.layers.8.self_attn.v_proj",
"transformer.text_model.encoder.layers.8.self_attn.out_proj": "text_model.encoder.layers.8.self_attn.out_proj",
"transformer.text_model.encoder.layers.8.layer_norm2": "text_model.encoder.layers.8.layer_norm2",
"transformer.text_model.encoder.layers.8.mlp.fc1": "text_model.encoder.layers.8.mlp.fc1",
"transformer.text_model.encoder.layers.8.mlp.fc2": "text_model.encoder.layers.8.mlp.fc2",
"transformer.text_model.encoder.layers.9.layer_norm1": "text_model.encoder.layers.9.layer_norm1",
"transformer.text_model.encoder.layers.9.self_attn.q_proj": "text_model.encoder.layers.9.self_attn.q_proj",
"transformer.text_model.encoder.layers.9.self_attn.k_proj": "text_model.encoder.layers.9.self_attn.k_proj",
"transformer.text_model.encoder.layers.9.self_attn.v_proj": "text_model.encoder.layers.9.self_attn.v_proj",
"transformer.text_model.encoder.layers.9.self_attn.out_proj": "text_model.encoder.layers.9.self_attn.out_proj",
"transformer.text_model.encoder.layers.9.layer_norm2": "text_model.encoder.layers.9.layer_norm2",
"transformer.text_model.encoder.layers.9.mlp.fc1": "text_model.encoder.layers.9.mlp.fc1",
"transformer.text_model.encoder.layers.9.mlp.fc2": "text_model.encoder.layers.9.mlp.fc2",
"transformer.text_model.encoder.layers.10.layer_norm1": "text_model.encoder.layers.10.layer_norm1",
"transformer.text_model.encoder.layers.10.self_attn.q_proj": "text_model.encoder.layers.10.self_attn.q_proj",
"transformer.text_model.encoder.layers.10.self_attn.k_proj": "text_model.encoder.layers.10.self_attn.k_proj",
"transformer.text_model.encoder.layers.10.self_attn.v_proj": "text_model.encoder.layers.10.self_attn.v_proj",
"transformer.text_model.encoder.layers.10.self_attn.out_proj": "text_model.encoder.layers.10.self_attn.out_proj",
"transformer.text_model.encoder.layers.10.layer_norm2": "text_model.encoder.layers.10.layer_norm2",
"transformer.text_model.encoder.layers.10.mlp.fc1": "text_model.encoder.layers.10.mlp.fc1",
"transformer.text_model.encoder.layers.10.mlp.fc2": "text_model.encoder.layers.10.mlp.fc2",
"transformer.text_model.encoder.layers.11.layer_norm1": "text_model.encoder.layers.11.layer_norm1",
"transformer.text_model.encoder.layers.11.self_attn.q_proj": "text_model.encoder.layers.11.self_attn.q_proj",
"transformer.text_model.encoder.layers.11.self_attn.k_proj": "text_model.encoder.layers.11.self_attn.k_proj",
"transformer.text_model.encoder.layers.11.self_attn.v_proj": "text_model.encoder.layers.11.self_attn.v_proj",
"transformer.text_model.encoder.layers.11.self_attn.out_proj": "text_model.encoder.layers.11.self_attn.out_proj",
"transformer.text_model.encoder.layers.11.layer_norm2": "text_model.encoder.layers.11.layer_norm2",
"transformer.text_model.encoder.layers.11.mlp.fc1": "text_model.encoder.layers.11.mlp.fc1",
"transformer.text_model.encoder.layers.11.mlp.fc2": "text_model.encoder.layers.11.mlp.fc2",
"transformer.text_model.final_layer_norm": "text_model.final_layer_norm"
},
"source_aliases": {},
"ignorable_prefixes": []
}

View File

@ -0,0 +1,106 @@
{
"mapping": {
"text_model.embeddings.token_embedding": "transformer.text_model.embeddings.token_embedding",
"text_model.embeddings.position_embedding": "transformer.text_model.embeddings.position_embedding",
"text_model.embeddings": "transformer.text_model.embeddings",
"text_model.encoder.layers.0.layer_norm1": "transformer.text_model.encoder.layers.0.layer_norm1",
"text_model.encoder.layers.0.self_attn.q_proj": "transformer.text_model.encoder.layers.0.self_attn.q_proj",
"text_model.encoder.layers.0.self_attn.k_proj": "transformer.text_model.encoder.layers.0.self_attn.k_proj",
"text_model.encoder.layers.0.self_attn.v_proj": "transformer.text_model.encoder.layers.0.self_attn.v_proj",
"text_model.encoder.layers.0.self_attn.out_proj": "transformer.text_model.encoder.layers.0.self_attn.out_proj",
"text_model.encoder.layers.0.layer_norm2": "transformer.text_model.encoder.layers.0.layer_norm2",
"text_model.encoder.layers.0.mlp.fc1": "transformer.text_model.encoder.layers.0.mlp.fc1",
"text_model.encoder.layers.0.mlp.fc2": "transformer.text_model.encoder.layers.0.mlp.fc2",
"text_model.encoder.layers.1.layer_norm1": "transformer.text_model.encoder.layers.1.layer_norm1",
"text_model.encoder.layers.1.self_attn.q_proj": "transformer.text_model.encoder.layers.1.self_attn.q_proj",
"text_model.encoder.layers.1.self_attn.k_proj": "transformer.text_model.encoder.layers.1.self_attn.k_proj",
"text_model.encoder.layers.1.self_attn.v_proj": "transformer.text_model.encoder.layers.1.self_attn.v_proj",
"text_model.encoder.layers.1.self_attn.out_proj": "transformer.text_model.encoder.layers.1.self_attn.out_proj",
"text_model.encoder.layers.1.layer_norm2": "transformer.text_model.encoder.layers.1.layer_norm2",
"text_model.encoder.layers.1.mlp.fc1": "transformer.text_model.encoder.layers.1.mlp.fc1",
"text_model.encoder.layers.1.mlp.fc2": "transformer.text_model.encoder.layers.1.mlp.fc2",
"text_model.encoder.layers.2.layer_norm1": "transformer.text_model.encoder.layers.2.layer_norm1",
"text_model.encoder.layers.2.self_attn.q_proj": "transformer.text_model.encoder.layers.2.self_attn.q_proj",
"text_model.encoder.layers.2.self_attn.k_proj": "transformer.text_model.encoder.layers.2.self_attn.k_proj",
"text_model.encoder.layers.2.self_attn.v_proj": "transformer.text_model.encoder.layers.2.self_attn.v_proj",
"text_model.encoder.layers.2.self_attn.out_proj": "transformer.text_model.encoder.layers.2.self_attn.out_proj",
"text_model.encoder.layers.2.layer_norm2": "transformer.text_model.encoder.layers.2.layer_norm2",
"text_model.encoder.layers.2.mlp.fc1": "transformer.text_model.encoder.layers.2.mlp.fc1",
"text_model.encoder.layers.2.mlp.fc2": "transformer.text_model.encoder.layers.2.mlp.fc2",
"text_model.encoder.layers.3.layer_norm1": "transformer.text_model.encoder.layers.3.layer_norm1",
"text_model.encoder.layers.3.self_attn.q_proj": "transformer.text_model.encoder.layers.3.self_attn.q_proj",
"text_model.encoder.layers.3.self_attn.k_proj": "transformer.text_model.encoder.layers.3.self_attn.k_proj",
"text_model.encoder.layers.3.self_attn.v_proj": "transformer.text_model.encoder.layers.3.self_attn.v_proj",
"text_model.encoder.layers.3.self_attn.out_proj": "transformer.text_model.encoder.layers.3.self_attn.out_proj",
"text_model.encoder.layers.3.layer_norm2": "transformer.text_model.encoder.layers.3.layer_norm2",
"text_model.encoder.layers.3.mlp.fc1": "transformer.text_model.encoder.layers.3.mlp.fc1",
"text_model.encoder.layers.3.mlp.fc2": "transformer.text_model.encoder.layers.3.mlp.fc2",
"text_model.encoder.layers.4.layer_norm1": "transformer.text_model.encoder.layers.4.layer_norm1",
"text_model.encoder.layers.4.self_attn.q_proj": "transformer.text_model.encoder.layers.4.self_attn.q_proj",
"text_model.encoder.layers.4.self_attn.k_proj": "transformer.text_model.encoder.layers.4.self_attn.k_proj",
"text_model.encoder.layers.4.self_attn.v_proj": "transformer.text_model.encoder.layers.4.self_attn.v_proj",
"text_model.encoder.layers.4.self_attn.out_proj": "transformer.text_model.encoder.layers.4.self_attn.out_proj",
"text_model.encoder.layers.4.layer_norm2": "transformer.text_model.encoder.layers.4.layer_norm2",
"text_model.encoder.layers.4.mlp.fc1": "transformer.text_model.encoder.layers.4.mlp.fc1",
"text_model.encoder.layers.4.mlp.fc2": "transformer.text_model.encoder.layers.4.mlp.fc2",
"text_model.encoder.layers.5.layer_norm1": "transformer.text_model.encoder.layers.5.layer_norm1",
"text_model.encoder.layers.5.self_attn.q_proj": "transformer.text_model.encoder.layers.5.self_attn.q_proj",
"text_model.encoder.layers.5.self_attn.k_proj": "transformer.text_model.encoder.layers.5.self_attn.k_proj",
"text_model.encoder.layers.5.self_attn.v_proj": "transformer.text_model.encoder.layers.5.self_attn.v_proj",
"text_model.encoder.layers.5.self_attn.out_proj": "transformer.text_model.encoder.layers.5.self_attn.out_proj",
"text_model.encoder.layers.5.layer_norm2": "transformer.text_model.encoder.layers.5.layer_norm2",
"text_model.encoder.layers.5.mlp.fc1": "transformer.text_model.encoder.layers.5.mlp.fc1",
"text_model.encoder.layers.5.mlp.fc2": "transformer.text_model.encoder.layers.5.mlp.fc2",
"text_model.encoder.layers.6.layer_norm1": "transformer.text_model.encoder.layers.6.layer_norm1",
"text_model.encoder.layers.6.self_attn.q_proj": "transformer.text_model.encoder.layers.6.self_attn.q_proj",
"text_model.encoder.layers.6.self_attn.k_proj": "transformer.text_model.encoder.layers.6.self_attn.k_proj",
"text_model.encoder.layers.6.self_attn.v_proj": "transformer.text_model.encoder.layers.6.self_attn.v_proj",
"text_model.encoder.layers.6.self_attn.out_proj": "transformer.text_model.encoder.layers.6.self_attn.out_proj",
"text_model.encoder.layers.6.layer_norm2": "transformer.text_model.encoder.layers.6.layer_norm2",
"text_model.encoder.layers.6.mlp.fc1": "transformer.text_model.encoder.layers.6.mlp.fc1",
"text_model.encoder.layers.6.mlp.fc2": "transformer.text_model.encoder.layers.6.mlp.fc2",
"text_model.encoder.layers.7.layer_norm1": "transformer.text_model.encoder.layers.7.layer_norm1",
"text_model.encoder.layers.7.self_attn.q_proj": "transformer.text_model.encoder.layers.7.self_attn.q_proj",
"text_model.encoder.layers.7.self_attn.k_proj": "transformer.text_model.encoder.layers.7.self_attn.k_proj",
"text_model.encoder.layers.7.self_attn.v_proj": "transformer.text_model.encoder.layers.7.self_attn.v_proj",
"text_model.encoder.layers.7.self_attn.out_proj": "transformer.text_model.encoder.layers.7.self_attn.out_proj",
"text_model.encoder.layers.7.layer_norm2": "transformer.text_model.encoder.layers.7.layer_norm2",
"text_model.encoder.layers.7.mlp.fc1": "transformer.text_model.encoder.layers.7.mlp.fc1",
"text_model.encoder.layers.7.mlp.fc2": "transformer.text_model.encoder.layers.7.mlp.fc2",
"text_model.encoder.layers.8.layer_norm1": "transformer.text_model.encoder.layers.8.layer_norm1",
"text_model.encoder.layers.8.self_attn.q_proj": "transformer.text_model.encoder.layers.8.self_attn.q_proj",
"text_model.encoder.layers.8.self_attn.k_proj": "transformer.text_model.encoder.layers.8.self_attn.k_proj",
"text_model.encoder.layers.8.self_attn.v_proj": "transformer.text_model.encoder.layers.8.self_attn.v_proj",
"text_model.encoder.layers.8.self_attn.out_proj": "transformer.text_model.encoder.layers.8.self_attn.out_proj",
"text_model.encoder.layers.8.layer_norm2": "transformer.text_model.encoder.layers.8.layer_norm2",
"text_model.encoder.layers.8.mlp.fc1": "transformer.text_model.encoder.layers.8.mlp.fc1",
"text_model.encoder.layers.8.mlp.fc2": "transformer.text_model.encoder.layers.8.mlp.fc2",
"text_model.encoder.layers.9.layer_norm1": "transformer.text_model.encoder.layers.9.layer_norm1",
"text_model.encoder.layers.9.self_attn.q_proj": "transformer.text_model.encoder.layers.9.self_attn.q_proj",
"text_model.encoder.layers.9.self_attn.k_proj": "transformer.text_model.encoder.layers.9.self_attn.k_proj",
"text_model.encoder.layers.9.self_attn.v_proj": "transformer.text_model.encoder.layers.9.self_attn.v_proj",
"text_model.encoder.layers.9.self_attn.out_proj": "transformer.text_model.encoder.layers.9.self_attn.out_proj",
"text_model.encoder.layers.9.layer_norm2": "transformer.text_model.encoder.layers.9.layer_norm2",
"text_model.encoder.layers.9.mlp.fc1": "transformer.text_model.encoder.layers.9.mlp.fc1",
"text_model.encoder.layers.9.mlp.fc2": "transformer.text_model.encoder.layers.9.mlp.fc2",
"text_model.encoder.layers.10.layer_norm1": "transformer.text_model.encoder.layers.10.layer_norm1",
"text_model.encoder.layers.10.self_attn.q_proj": "transformer.text_model.encoder.layers.10.self_attn.q_proj",
"text_model.encoder.layers.10.self_attn.k_proj": "transformer.text_model.encoder.layers.10.self_attn.k_proj",
"text_model.encoder.layers.10.self_attn.v_proj": "transformer.text_model.encoder.layers.10.self_attn.v_proj",
"text_model.encoder.layers.10.self_attn.out_proj": "transformer.text_model.encoder.layers.10.self_attn.out_proj",
"text_model.encoder.layers.10.layer_norm2": "transformer.text_model.encoder.layers.10.layer_norm2",
"text_model.encoder.layers.10.mlp.fc1": "transformer.text_model.encoder.layers.10.mlp.fc1",
"text_model.encoder.layers.10.mlp.fc2": "transformer.text_model.encoder.layers.10.mlp.fc2",
"text_model.encoder.layers.11.layer_norm1": "transformer.text_model.encoder.layers.11.layer_norm1",
"text_model.encoder.layers.11.self_attn.q_proj": "transformer.text_model.encoder.layers.11.self_attn.q_proj",
"text_model.encoder.layers.11.self_attn.k_proj": "transformer.text_model.encoder.layers.11.self_attn.k_proj",
"text_model.encoder.layers.11.self_attn.v_proj": "transformer.text_model.encoder.layers.11.self_attn.v_proj",
"text_model.encoder.layers.11.self_attn.out_proj": "transformer.text_model.encoder.layers.11.self_attn.out_proj",
"text_model.encoder.layers.11.layer_norm2": "transformer.text_model.encoder.layers.11.layer_norm2",
"text_model.encoder.layers.11.mlp.fc1": "transformer.text_model.encoder.layers.11.mlp.fc1",
"text_model.encoder.layers.11.mlp.fc2": "transformer.text_model.encoder.layers.11.mlp.fc2",
"text_model.final_layer_norm": "transformer.text_model.final_layer_norm"
},
"source_aliases": {},
"ignorable_prefixes": []
}

View File

@ -0,0 +1,107 @@
{
"ignorable_prefixes": [
"text_model.embeddings"
],
"mapping": {
"text_model.embeddings.token_embedding": "Sum.TokenEncoder",
"text_model.embeddings.position_embedding": "Sum.PositionalEncoder.Embedding",
"text_model.encoder.layers.0.layer_norm1": "TransformerLayer_1.Residual_1.LayerNorm",
"text_model.encoder.layers.0.layer_norm2": "TransformerLayer_1.Residual_2.LayerNorm",
"text_model.encoder.layers.1.layer_norm1": "TransformerLayer_2.Residual_1.LayerNorm",
"text_model.encoder.layers.1.layer_norm2": "TransformerLayer_2.Residual_2.LayerNorm",
"text_model.encoder.layers.2.layer_norm1": "TransformerLayer_3.Residual_1.LayerNorm",
"text_model.encoder.layers.2.layer_norm2": "TransformerLayer_3.Residual_2.LayerNorm",
"text_model.encoder.layers.3.layer_norm1": "TransformerLayer_4.Residual_1.LayerNorm",
"text_model.encoder.layers.3.layer_norm2": "TransformerLayer_4.Residual_2.LayerNorm",
"text_model.encoder.layers.4.layer_norm1": "TransformerLayer_5.Residual_1.LayerNorm",
"text_model.encoder.layers.4.layer_norm2": "TransformerLayer_5.Residual_2.LayerNorm",
"text_model.encoder.layers.5.layer_norm1": "TransformerLayer_6.Residual_1.LayerNorm",
"text_model.encoder.layers.5.layer_norm2": "TransformerLayer_6.Residual_2.LayerNorm",
"text_model.encoder.layers.6.layer_norm1": "TransformerLayer_7.Residual_1.LayerNorm",
"text_model.encoder.layers.6.layer_norm2": "TransformerLayer_7.Residual_2.LayerNorm",
"text_model.encoder.layers.7.layer_norm1": "TransformerLayer_8.Residual_1.LayerNorm",
"text_model.encoder.layers.7.layer_norm2": "TransformerLayer_8.Residual_2.LayerNorm",
"text_model.encoder.layers.8.layer_norm1": "TransformerLayer_9.Residual_1.LayerNorm",
"text_model.encoder.layers.8.layer_norm2": "TransformerLayer_9.Residual_2.LayerNorm",
"text_model.encoder.layers.9.layer_norm1": "TransformerLayer_10.Residual_1.LayerNorm",
"text_model.encoder.layers.9.layer_norm2": "TransformerLayer_10.Residual_2.LayerNorm",
"text_model.encoder.layers.10.layer_norm1": "TransformerLayer_11.Residual_1.LayerNorm",
"text_model.encoder.layers.10.layer_norm2": "TransformerLayer_11.Residual_2.LayerNorm",
"text_model.encoder.layers.11.layer_norm1": "TransformerLayer_12.Residual_1.LayerNorm",
"text_model.encoder.layers.11.layer_norm2": "TransformerLayer_12.Residual_2.LayerNorm",
"text_model.final_layer_norm": "LayerNorm",
"text_model.encoder.layers.0.self_attn.q_proj": "TransformerLayer_1.Residual_1.SelfAttention.Distribute.Linear_1",
"text_model.encoder.layers.0.self_attn.k_proj": "TransformerLayer_1.Residual_1.SelfAttention.Distribute.Linear_2",
"text_model.encoder.layers.0.self_attn.v_proj": "TransformerLayer_1.Residual_1.SelfAttention.Distribute.Linear_3",
"text_model.encoder.layers.0.self_attn.out_proj": "TransformerLayer_1.Residual_1.SelfAttention.Linear",
"text_model.encoder.layers.1.self_attn.q_proj": "TransformerLayer_2.Residual_1.SelfAttention.Distribute.Linear_1",
"text_model.encoder.layers.1.self_attn.k_proj": "TransformerLayer_2.Residual_1.SelfAttention.Distribute.Linear_2",
"text_model.encoder.layers.1.self_attn.v_proj": "TransformerLayer_2.Residual_1.SelfAttention.Distribute.Linear_3",
"text_model.encoder.layers.1.self_attn.out_proj": "TransformerLayer_2.Residual_1.SelfAttention.Linear",
"text_model.encoder.layers.2.self_attn.q_proj": "TransformerLayer_3.Residual_1.SelfAttention.Distribute.Linear_1",
"text_model.encoder.layers.2.self_attn.k_proj": "TransformerLayer_3.Residual_1.SelfAttention.Distribute.Linear_2",
"text_model.encoder.layers.2.self_attn.v_proj": "TransformerLayer_3.Residual_1.SelfAttention.Distribute.Linear_3",
"text_model.encoder.layers.2.self_attn.out_proj": "TransformerLayer_3.Residual_1.SelfAttention.Linear",
"text_model.encoder.layers.3.self_attn.q_proj": "TransformerLayer_4.Residual_1.SelfAttention.Distribute.Linear_1",
"text_model.encoder.layers.3.self_attn.k_proj": "TransformerLayer_4.Residual_1.SelfAttention.Distribute.Linear_2",
"text_model.encoder.layers.3.self_attn.v_proj": "TransformerLayer_4.Residual_1.SelfAttention.Distribute.Linear_3",
"text_model.encoder.layers.3.self_attn.out_proj": "TransformerLayer_4.Residual_1.SelfAttention.Linear",
"text_model.encoder.layers.4.self_attn.q_proj": "TransformerLayer_5.Residual_1.SelfAttention.Distribute.Linear_1",
"text_model.encoder.layers.4.self_attn.k_proj": "TransformerLayer_5.Residual_1.SelfAttention.Distribute.Linear_2",
"text_model.encoder.layers.4.self_attn.v_proj": "TransformerLayer_5.Residual_1.SelfAttention.Distribute.Linear_3",
"text_model.encoder.layers.4.self_attn.out_proj": "TransformerLayer_5.Residual_1.SelfAttention.Linear",
"text_model.encoder.layers.5.self_attn.q_proj": "TransformerLayer_6.Residual_1.SelfAttention.Distribute.Linear_1",
"text_model.encoder.layers.5.self_attn.k_proj": "TransformerLayer_6.Residual_1.SelfAttention.Distribute.Linear_2",
"text_model.encoder.layers.5.self_attn.v_proj": "TransformerLayer_6.Residual_1.SelfAttention.Distribute.Linear_3",
"text_model.encoder.layers.5.self_attn.out_proj": "TransformerLayer_6.Residual_1.SelfAttention.Linear",
"text_model.encoder.layers.6.self_attn.q_proj": "TransformerLayer_7.Residual_1.SelfAttention.Distribute.Linear_1",
"text_model.encoder.layers.6.self_attn.k_proj": "TransformerLayer_7.Residual_1.SelfAttention.Distribute.Linear_2",
"text_model.encoder.layers.6.self_attn.v_proj": "TransformerLayer_7.Residual_1.SelfAttention.Distribute.Linear_3",
"text_model.encoder.layers.6.self_attn.out_proj": "TransformerLayer_7.Residual_1.SelfAttention.Linear",
"text_model.encoder.layers.7.self_attn.q_proj": "TransformerLayer_8.Residual_1.SelfAttention.Distribute.Linear_1",
"text_model.encoder.layers.7.self_attn.k_proj": "TransformerLayer_8.Residual_1.SelfAttention.Distribute.Linear_2",
"text_model.encoder.layers.7.self_attn.v_proj": "TransformerLayer_8.Residual_1.SelfAttention.Distribute.Linear_3",
"text_model.encoder.layers.7.self_attn.out_proj": "TransformerLayer_8.Residual_1.SelfAttention.Linear",
"text_model.encoder.layers.8.self_attn.q_proj": "TransformerLayer_9.Residual_1.SelfAttention.Distribute.Linear_1",
"text_model.encoder.layers.8.self_attn.k_proj": "TransformerLayer_9.Residual_1.SelfAttention.Distribute.Linear_2",
"text_model.encoder.layers.8.self_attn.v_proj": "TransformerLayer_9.Residual_1.SelfAttention.Distribute.Linear_3",
"text_model.encoder.layers.8.self_attn.out_proj": "TransformerLayer_9.Residual_1.SelfAttention.Linear",
"text_model.encoder.layers.9.self_attn.q_proj": "TransformerLayer_10.Residual_1.SelfAttention.Distribute.Linear_1",
"text_model.encoder.layers.9.self_attn.k_proj": "TransformerLayer_10.Residual_1.SelfAttention.Distribute.Linear_2",
"text_model.encoder.layers.9.self_attn.v_proj": "TransformerLayer_10.Residual_1.SelfAttention.Distribute.Linear_3",
"text_model.encoder.layers.9.self_attn.out_proj": "TransformerLayer_10.Residual_1.SelfAttention.Linear",
"text_model.encoder.layers.10.self_attn.q_proj": "TransformerLayer_11.Residual_1.SelfAttention.Distribute.Linear_1",
"text_model.encoder.layers.10.self_attn.k_proj": "TransformerLayer_11.Residual_1.SelfAttention.Distribute.Linear_2",
"text_model.encoder.layers.10.self_attn.v_proj": "TransformerLayer_11.Residual_1.SelfAttention.Distribute.Linear_3",
"text_model.encoder.layers.10.self_attn.out_proj": "TransformerLayer_11.Residual_1.SelfAttention.Linear",
"text_model.encoder.layers.11.self_attn.q_proj": "TransformerLayer_12.Residual_1.SelfAttention.Distribute.Linear_1",
"text_model.encoder.layers.11.self_attn.k_proj": "TransformerLayer_12.Residual_1.SelfAttention.Distribute.Linear_2",
"text_model.encoder.layers.11.self_attn.v_proj": "TransformerLayer_12.Residual_1.SelfAttention.Distribute.Linear_3",
"text_model.encoder.layers.11.self_attn.out_proj": "TransformerLayer_12.Residual_1.SelfAttention.Linear",
"text_model.encoder.layers.0.mlp.fc1": "TransformerLayer_1.Residual_2.FeedForward.Linear_1",
"text_model.encoder.layers.1.mlp.fc1": "TransformerLayer_2.Residual_2.FeedForward.Linear_1",
"text_model.encoder.layers.2.mlp.fc1": "TransformerLayer_3.Residual_2.FeedForward.Linear_1",
"text_model.encoder.layers.3.mlp.fc1": "TransformerLayer_4.Residual_2.FeedForward.Linear_1",
"text_model.encoder.layers.4.mlp.fc1": "TransformerLayer_5.Residual_2.FeedForward.Linear_1",
"text_model.encoder.layers.5.mlp.fc1": "TransformerLayer_6.Residual_2.FeedForward.Linear_1",
"text_model.encoder.layers.6.mlp.fc1": "TransformerLayer_7.Residual_2.FeedForward.Linear_1",
"text_model.encoder.layers.7.mlp.fc1": "TransformerLayer_8.Residual_2.FeedForward.Linear_1",
"text_model.encoder.layers.8.mlp.fc1": "TransformerLayer_9.Residual_2.FeedForward.Linear_1",
"text_model.encoder.layers.9.mlp.fc1": "TransformerLayer_10.Residual_2.FeedForward.Linear_1",
"text_model.encoder.layers.10.mlp.fc1": "TransformerLayer_11.Residual_2.FeedForward.Linear_1",
"text_model.encoder.layers.11.mlp.fc1": "TransformerLayer_12.Residual_2.FeedForward.Linear_1",
"text_model.encoder.layers.0.mlp.fc2": "TransformerLayer_1.Residual_2.FeedForward.Linear_2",
"text_model.encoder.layers.1.mlp.fc2": "TransformerLayer_2.Residual_2.FeedForward.Linear_2",
"text_model.encoder.layers.2.mlp.fc2": "TransformerLayer_3.Residual_2.FeedForward.Linear_2",
"text_model.encoder.layers.3.mlp.fc2": "TransformerLayer_4.Residual_2.FeedForward.Linear_2",
"text_model.encoder.layers.4.mlp.fc2": "TransformerLayer_5.Residual_2.FeedForward.Linear_2",
"text_model.encoder.layers.5.mlp.fc2": "TransformerLayer_6.Residual_2.FeedForward.Linear_2",
"text_model.encoder.layers.6.mlp.fc2": "TransformerLayer_7.Residual_2.FeedForward.Linear_2",
"text_model.encoder.layers.7.mlp.fc2": "TransformerLayer_8.Residual_2.FeedForward.Linear_2",
"text_model.encoder.layers.8.mlp.fc2": "TransformerLayer_9.Residual_2.FeedForward.Linear_2",
"text_model.encoder.layers.9.mlp.fc2": "TransformerLayer_10.Residual_2.FeedForward.Linear_2",
"text_model.encoder.layers.10.mlp.fc2": "TransformerLayer_11.Residual_2.FeedForward.Linear_2",
"text_model.encoder.layers.11.mlp.fc2": "TransformerLayer_12.Residual_2.FeedForward.Linear_2"
},
"source_aliases": {}
}

View File

@ -0,0 +1,397 @@
{
"mapping": {
"diffusion_model.time_embed.0": "time_embedding.linear_1",
"diffusion_model.time_embed.2": "time_embedding.linear_2",
"diffusion_model.input_blocks.0.0": "conv_in",
"diffusion_model.input_blocks.1.0.in_layers.0": "down_blocks.0.resnets.0.norm1",
"diffusion_model.input_blocks.1.0.in_layers.2": "down_blocks.0.resnets.0.conv1",
"diffusion_model.input_blocks.1.0.emb_layers.1": "down_blocks.0.resnets.0.time_emb_proj",
"diffusion_model.input_blocks.1.0.out_layers.0": "down_blocks.0.resnets.0.norm2",
"diffusion_model.input_blocks.1.0.out_layers.3": "down_blocks.0.resnets.0.conv2",
"diffusion_model.input_blocks.1.1.norm": "down_blocks.0.attentions.0.norm",
"diffusion_model.input_blocks.1.1.proj_in": "down_blocks.0.attentions.0.proj_in",
"diffusion_model.input_blocks.1.1.transformer_blocks.0.norm1": "down_blocks.0.attentions.0.transformer_blocks.0.norm1",
"diffusion_model.input_blocks.1.1.transformer_blocks.0.attn1.to_q": "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_q",
"diffusion_model.input_blocks.1.1.transformer_blocks.0.attn1.to_k": "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_k",
"diffusion_model.input_blocks.1.1.transformer_blocks.0.attn1.to_v": "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_v",
"diffusion_model.input_blocks.1.1.transformer_blocks.0.attn1.to_out.0": "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out.0",
"diffusion_model.input_blocks.1.1.transformer_blocks.0.norm2": "down_blocks.0.attentions.0.transformer_blocks.0.norm2",
"diffusion_model.input_blocks.1.1.transformer_blocks.0.attn2.to_q": "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_q",
"diffusion_model.input_blocks.1.1.transformer_blocks.0.attn2.to_k": "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_k",
"diffusion_model.input_blocks.1.1.transformer_blocks.0.attn2.to_v": "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_v",
"diffusion_model.input_blocks.1.1.transformer_blocks.0.attn2.to_out.0": "down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out.0",
"diffusion_model.input_blocks.1.1.transformer_blocks.0.norm3": "down_blocks.0.attentions.0.transformer_blocks.0.norm3",
"diffusion_model.input_blocks.1.1.transformer_blocks.0.ff.net.0.proj": "down_blocks.0.attentions.0.transformer_blocks.0.ff.net.0.proj",
"diffusion_model.input_blocks.1.1.transformer_blocks.0.ff.net.2": "down_blocks.0.attentions.0.transformer_blocks.0.ff.net.2",
"diffusion_model.input_blocks.1.1.proj_out": "down_blocks.0.attentions.0.proj_out",
"diffusion_model.input_blocks.2.0.in_layers.0": "down_blocks.0.resnets.1.norm1",
"diffusion_model.input_blocks.2.0.in_layers.2": "down_blocks.0.resnets.1.conv1",
"diffusion_model.input_blocks.2.0.emb_layers.1": "down_blocks.0.resnets.1.time_emb_proj",
"diffusion_model.input_blocks.2.0.out_layers.0": "down_blocks.0.resnets.1.norm2",
"diffusion_model.input_blocks.2.0.out_layers.3": "down_blocks.0.resnets.1.conv2",
"diffusion_model.input_blocks.2.1.norm": "down_blocks.0.attentions.1.norm",
"diffusion_model.input_blocks.2.1.proj_in": "down_blocks.0.attentions.1.proj_in",
"diffusion_model.input_blocks.2.1.transformer_blocks.0.norm1": "down_blocks.0.attentions.1.transformer_blocks.0.norm1",
"diffusion_model.input_blocks.2.1.transformer_blocks.0.attn1.to_q": "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_q",
"diffusion_model.input_blocks.2.1.transformer_blocks.0.attn1.to_k": "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_k",
"diffusion_model.input_blocks.2.1.transformer_blocks.0.attn1.to_v": "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_v",
"diffusion_model.input_blocks.2.1.transformer_blocks.0.attn1.to_out.0": "down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out.0",
"diffusion_model.input_blocks.2.1.transformer_blocks.0.norm2": "down_blocks.0.attentions.1.transformer_blocks.0.norm2",
"diffusion_model.input_blocks.2.1.transformer_blocks.0.attn2.to_q": "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_q",
"diffusion_model.input_blocks.2.1.transformer_blocks.0.attn2.to_k": "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_k",
"diffusion_model.input_blocks.2.1.transformer_blocks.0.attn2.to_v": "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_v",
"diffusion_model.input_blocks.2.1.transformer_blocks.0.attn2.to_out.0": "down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out.0",
"diffusion_model.input_blocks.2.1.transformer_blocks.0.norm3": "down_blocks.0.attentions.1.transformer_blocks.0.norm3",
"diffusion_model.input_blocks.2.1.transformer_blocks.0.ff.net.0.proj": "down_blocks.0.attentions.1.transformer_blocks.0.ff.net.0.proj",
"diffusion_model.input_blocks.2.1.transformer_blocks.0.ff.net.2": "down_blocks.0.attentions.1.transformer_blocks.0.ff.net.2",
"diffusion_model.input_blocks.2.1.proj_out": "down_blocks.0.attentions.1.proj_out",
"diffusion_model.input_blocks.3.0.op": "down_blocks.0.downsamplers.0.conv",
"diffusion_model.input_blocks.4.0.in_layers.0": "down_blocks.1.resnets.0.norm1",
"diffusion_model.input_blocks.4.0.in_layers.2": "down_blocks.1.resnets.0.conv1",
"diffusion_model.input_blocks.4.0.emb_layers.1": "down_blocks.1.resnets.0.time_emb_proj",
"diffusion_model.input_blocks.4.0.out_layers.0": "down_blocks.1.resnets.0.norm2",
"diffusion_model.input_blocks.4.0.out_layers.3": "down_blocks.1.resnets.0.conv2",
"diffusion_model.input_blocks.4.0.skip_connection": "down_blocks.1.resnets.0.conv_shortcut",
"diffusion_model.input_blocks.4.1.norm": "down_blocks.1.attentions.0.norm",
"diffusion_model.input_blocks.4.1.proj_in": "down_blocks.1.attentions.0.proj_in",
"diffusion_model.input_blocks.4.1.transformer_blocks.0.norm1": "down_blocks.1.attentions.0.transformer_blocks.0.norm1",
"diffusion_model.input_blocks.4.1.transformer_blocks.0.attn1.to_q": "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q",
"diffusion_model.input_blocks.4.1.transformer_blocks.0.attn1.to_k": "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k",
"diffusion_model.input_blocks.4.1.transformer_blocks.0.attn1.to_v": "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v",
"diffusion_model.input_blocks.4.1.transformer_blocks.0.attn1.to_out.0": "down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0",
"diffusion_model.input_blocks.4.1.transformer_blocks.0.norm2": "down_blocks.1.attentions.0.transformer_blocks.0.norm2",
"diffusion_model.input_blocks.4.1.transformer_blocks.0.attn2.to_q": "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q",
"diffusion_model.input_blocks.4.1.transformer_blocks.0.attn2.to_k": "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k",
"diffusion_model.input_blocks.4.1.transformer_blocks.0.attn2.to_v": "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v",
"diffusion_model.input_blocks.4.1.transformer_blocks.0.attn2.to_out.0": "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0",
"diffusion_model.input_blocks.4.1.transformer_blocks.0.norm3": "down_blocks.1.attentions.0.transformer_blocks.0.norm3",
"diffusion_model.input_blocks.4.1.transformer_blocks.0.ff.net.0.proj": "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj",
"diffusion_model.input_blocks.4.1.transformer_blocks.0.ff.net.2": "down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2",
"diffusion_model.input_blocks.4.1.proj_out": "down_blocks.1.attentions.0.proj_out",
"diffusion_model.input_blocks.5.0.in_layers.0": "down_blocks.1.resnets.1.norm1",
"diffusion_model.input_blocks.5.0.in_layers.2": "down_blocks.1.resnets.1.conv1",
"diffusion_model.input_blocks.5.0.emb_layers.1": "down_blocks.1.resnets.1.time_emb_proj",
"diffusion_model.input_blocks.5.0.out_layers.0": "down_blocks.1.resnets.1.norm2",
"diffusion_model.input_blocks.5.0.out_layers.3": "down_blocks.1.resnets.1.conv2",
"diffusion_model.input_blocks.5.1.norm": "down_blocks.1.attentions.1.norm",
"diffusion_model.input_blocks.5.1.proj_in": "down_blocks.1.attentions.1.proj_in",
"diffusion_model.input_blocks.5.1.transformer_blocks.0.norm1": "down_blocks.1.attentions.1.transformer_blocks.0.norm1",
"diffusion_model.input_blocks.5.1.transformer_blocks.0.attn1.to_q": "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q",
"diffusion_model.input_blocks.5.1.transformer_blocks.0.attn1.to_k": "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k",
"diffusion_model.input_blocks.5.1.transformer_blocks.0.attn1.to_v": "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v",
"diffusion_model.input_blocks.5.1.transformer_blocks.0.attn1.to_out.0": "down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0",
"diffusion_model.input_blocks.5.1.transformer_blocks.0.norm2": "down_blocks.1.attentions.1.transformer_blocks.0.norm2",
"diffusion_model.input_blocks.5.1.transformer_blocks.0.attn2.to_q": "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q",
"diffusion_model.input_blocks.5.1.transformer_blocks.0.attn2.to_k": "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k",
"diffusion_model.input_blocks.5.1.transformer_blocks.0.attn2.to_v": "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v",
"diffusion_model.input_blocks.5.1.transformer_blocks.0.attn2.to_out.0": "down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0",
"diffusion_model.input_blocks.5.1.transformer_blocks.0.norm3": "down_blocks.1.attentions.1.transformer_blocks.0.norm3",
"diffusion_model.input_blocks.5.1.transformer_blocks.0.ff.net.0.proj": "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj",
"diffusion_model.input_blocks.5.1.transformer_blocks.0.ff.net.2": "down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2",
"diffusion_model.input_blocks.5.1.proj_out": "down_blocks.1.attentions.1.proj_out",
"diffusion_model.input_blocks.6.0.op": "down_blocks.1.downsamplers.0.conv",
"diffusion_model.input_blocks.7.0.in_layers.0": "down_blocks.2.resnets.0.norm1",
"diffusion_model.input_blocks.7.0.in_layers.2": "down_blocks.2.resnets.0.conv1",
"diffusion_model.input_blocks.7.0.emb_layers.1": "down_blocks.2.resnets.0.time_emb_proj",
"diffusion_model.input_blocks.7.0.out_layers.0": "down_blocks.2.resnets.0.norm2",
"diffusion_model.input_blocks.7.0.out_layers.3": "down_blocks.2.resnets.0.conv2",
"diffusion_model.input_blocks.7.0.skip_connection": "down_blocks.2.resnets.0.conv_shortcut",
"diffusion_model.input_blocks.7.1.norm": "down_blocks.2.attentions.0.norm",
"diffusion_model.input_blocks.7.1.proj_in": "down_blocks.2.attentions.0.proj_in",
"diffusion_model.input_blocks.7.1.transformer_blocks.0.norm1": "down_blocks.2.attentions.0.transformer_blocks.0.norm1",
"diffusion_model.input_blocks.7.1.transformer_blocks.0.attn1.to_q": "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q",
"diffusion_model.input_blocks.7.1.transformer_blocks.0.attn1.to_k": "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k",
"diffusion_model.input_blocks.7.1.transformer_blocks.0.attn1.to_v": "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v",
"diffusion_model.input_blocks.7.1.transformer_blocks.0.attn1.to_out.0": "down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0",
"diffusion_model.input_blocks.7.1.transformer_blocks.0.norm2": "down_blocks.2.attentions.0.transformer_blocks.0.norm2",
"diffusion_model.input_blocks.7.1.transformer_blocks.0.attn2.to_q": "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q",
"diffusion_model.input_blocks.7.1.transformer_blocks.0.attn2.to_k": "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k",
"diffusion_model.input_blocks.7.1.transformer_blocks.0.attn2.to_v": "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v",
"diffusion_model.input_blocks.7.1.transformer_blocks.0.attn2.to_out.0": "down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0",
"diffusion_model.input_blocks.7.1.transformer_blocks.0.norm3": "down_blocks.2.attentions.0.transformer_blocks.0.norm3",
"diffusion_model.input_blocks.7.1.transformer_blocks.0.ff.net.0.proj": "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj",
"diffusion_model.input_blocks.7.1.transformer_blocks.0.ff.net.2": "down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2",
"diffusion_model.input_blocks.7.1.proj_out": "down_blocks.2.attentions.0.proj_out",
"diffusion_model.input_blocks.8.0.in_layers.0": "down_blocks.2.resnets.1.norm1",
"diffusion_model.input_blocks.8.0.in_layers.2": "down_blocks.2.resnets.1.conv1",
"diffusion_model.input_blocks.8.0.emb_layers.1": "down_blocks.2.resnets.1.time_emb_proj",
"diffusion_model.input_blocks.8.0.out_layers.0": "down_blocks.2.resnets.1.norm2",
"diffusion_model.input_blocks.8.0.out_layers.3": "down_blocks.2.resnets.1.conv2",
"diffusion_model.input_blocks.8.1.norm": "down_blocks.2.attentions.1.norm",
"diffusion_model.input_blocks.8.1.proj_in": "down_blocks.2.attentions.1.proj_in",
"diffusion_model.input_blocks.8.1.transformer_blocks.0.norm1": "down_blocks.2.attentions.1.transformer_blocks.0.norm1",
"diffusion_model.input_blocks.8.1.transformer_blocks.0.attn1.to_q": "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q",
"diffusion_model.input_blocks.8.1.transformer_blocks.0.attn1.to_k": "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k",
"diffusion_model.input_blocks.8.1.transformer_blocks.0.attn1.to_v": "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v",
"diffusion_model.input_blocks.8.1.transformer_blocks.0.attn1.to_out.0": "down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0",
"diffusion_model.input_blocks.8.1.transformer_blocks.0.norm2": "down_blocks.2.attentions.1.transformer_blocks.0.norm2",
"diffusion_model.input_blocks.8.1.transformer_blocks.0.attn2.to_q": "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q",
"diffusion_model.input_blocks.8.1.transformer_blocks.0.attn2.to_k": "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k",
"diffusion_model.input_blocks.8.1.transformer_blocks.0.attn2.to_v": "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v",
"diffusion_model.input_blocks.8.1.transformer_blocks.0.attn2.to_out.0": "down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0",
"diffusion_model.input_blocks.8.1.transformer_blocks.0.norm3": "down_blocks.2.attentions.1.transformer_blocks.0.norm3",
"diffusion_model.input_blocks.8.1.transformer_blocks.0.ff.net.0.proj": "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj",
"diffusion_model.input_blocks.8.1.transformer_blocks.0.ff.net.2": "down_blocks.2.attentions.1.transformer_blocks.0.ff.net.2",
"diffusion_model.input_blocks.8.1.proj_out": "down_blocks.2.attentions.1.proj_out",
"diffusion_model.input_blocks.9.0.op": "down_blocks.2.downsamplers.0.conv",
"diffusion_model.input_blocks.10.0.in_layers.0": "down_blocks.3.resnets.0.norm1",
"diffusion_model.input_blocks.10.0.in_layers.2": "down_blocks.3.resnets.0.conv1",
"diffusion_model.input_blocks.10.0.emb_layers.1": "down_blocks.3.resnets.0.time_emb_proj",
"diffusion_model.input_blocks.10.0.out_layers.0": "down_blocks.3.resnets.0.norm2",
"diffusion_model.input_blocks.10.0.out_layers.3": "down_blocks.3.resnets.0.conv2",
"diffusion_model.input_blocks.11.0.in_layers.0": "down_blocks.3.resnets.1.norm1",
"diffusion_model.input_blocks.11.0.in_layers.2": "down_blocks.3.resnets.1.conv1",
"diffusion_model.input_blocks.11.0.emb_layers.1": "down_blocks.3.resnets.1.time_emb_proj",
"diffusion_model.input_blocks.11.0.out_layers.0": "down_blocks.3.resnets.1.norm2",
"diffusion_model.input_blocks.11.0.out_layers.3": "down_blocks.3.resnets.1.conv2",
"diffusion_model.middle_block.0.in_layers.0": "mid_block.resnets.0.norm1",
"diffusion_model.middle_block.0.in_layers.2": "mid_block.resnets.0.conv1",
"diffusion_model.middle_block.0.emb_layers.1": "mid_block.resnets.0.time_emb_proj",
"diffusion_model.middle_block.0.out_layers.0": "mid_block.resnets.0.norm2",
"diffusion_model.middle_block.0.out_layers.3": "mid_block.resnets.0.conv2",
"diffusion_model.middle_block.1.norm": "mid_block.attentions.0.norm",
"diffusion_model.middle_block.1.proj_in": "mid_block.attentions.0.proj_in",
"diffusion_model.middle_block.1.transformer_blocks.0.norm1": "mid_block.attentions.0.transformer_blocks.0.norm1",
"diffusion_model.middle_block.1.transformer_blocks.0.attn1.to_q": "mid_block.attentions.0.transformer_blocks.0.attn1.to_q",
"diffusion_model.middle_block.1.transformer_blocks.0.attn1.to_k": "mid_block.attentions.0.transformer_blocks.0.attn1.to_k",
"diffusion_model.middle_block.1.transformer_blocks.0.attn1.to_v": "mid_block.attentions.0.transformer_blocks.0.attn1.to_v",
"diffusion_model.middle_block.1.transformer_blocks.0.attn1.to_out.0": "mid_block.attentions.0.transformer_blocks.0.attn1.to_out.0",
"diffusion_model.middle_block.1.transformer_blocks.0.norm2": "mid_block.attentions.0.transformer_blocks.0.norm2",
"diffusion_model.middle_block.1.transformer_blocks.0.attn2.to_q": "mid_block.attentions.0.transformer_blocks.0.attn2.to_q",
"diffusion_model.middle_block.1.transformer_blocks.0.attn2.to_k": "mid_block.attentions.0.transformer_blocks.0.attn2.to_k",
"diffusion_model.middle_block.1.transformer_blocks.0.attn2.to_v": "mid_block.attentions.0.transformer_blocks.0.attn2.to_v",
"diffusion_model.middle_block.1.transformer_blocks.0.attn2.to_out.0": "mid_block.attentions.0.transformer_blocks.0.attn2.to_out.0",
"diffusion_model.middle_block.1.transformer_blocks.0.norm3": "mid_block.attentions.0.transformer_blocks.0.norm3",
"diffusion_model.middle_block.1.transformer_blocks.0.ff.net.0.proj": "mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj",
"diffusion_model.middle_block.1.transformer_blocks.0.ff.net.2": "mid_block.attentions.0.transformer_blocks.0.ff.net.2",
"diffusion_model.middle_block.1.proj_out": "mid_block.attentions.0.proj_out",
"diffusion_model.middle_block.2.in_layers.0": "mid_block.resnets.1.norm1",
"diffusion_model.middle_block.2.in_layers.2": "mid_block.resnets.1.conv1",
"diffusion_model.middle_block.2.emb_layers.1": "mid_block.resnets.1.time_emb_proj",
"diffusion_model.middle_block.2.out_layers.0": "mid_block.resnets.1.norm2",
"diffusion_model.middle_block.2.out_layers.3": "mid_block.resnets.1.conv2",
"diffusion_model.output_blocks.0.0.in_layers.0": "up_blocks.0.resnets.0.norm1",
"diffusion_model.output_blocks.0.0.in_layers.2": "up_blocks.0.resnets.0.conv1",
"diffusion_model.output_blocks.0.0.emb_layers.1": "up_blocks.0.resnets.0.time_emb_proj",
"diffusion_model.output_blocks.0.0.out_layers.0": "up_blocks.0.resnets.0.norm2",
"diffusion_model.output_blocks.0.0.out_layers.3": "up_blocks.0.resnets.0.conv2",
"diffusion_model.output_blocks.0.0.skip_connection": "up_blocks.0.resnets.0.conv_shortcut",
"diffusion_model.output_blocks.1.0.in_layers.0": "up_blocks.0.resnets.1.norm1",
"diffusion_model.output_blocks.1.0.in_layers.2": "up_blocks.0.resnets.1.conv1",
"diffusion_model.output_blocks.1.0.emb_layers.1": "up_blocks.0.resnets.1.time_emb_proj",
"diffusion_model.output_blocks.1.0.out_layers.0": "up_blocks.0.resnets.1.norm2",
"diffusion_model.output_blocks.1.0.out_layers.3": "up_blocks.0.resnets.1.conv2",
"diffusion_model.output_blocks.1.0.skip_connection": "up_blocks.0.resnets.1.conv_shortcut",
"diffusion_model.output_blocks.2.0.in_layers.0": "up_blocks.0.resnets.2.norm1",
"diffusion_model.output_blocks.2.0.in_layers.2": "up_blocks.0.resnets.2.conv1",
"diffusion_model.output_blocks.2.0.emb_layers.1": "up_blocks.0.resnets.2.time_emb_proj",
"diffusion_model.output_blocks.2.0.out_layers.0": "up_blocks.0.resnets.2.norm2",
"diffusion_model.output_blocks.2.0.out_layers.3": "up_blocks.0.resnets.2.conv2",
"diffusion_model.output_blocks.2.0.skip_connection": "up_blocks.0.resnets.2.conv_shortcut",
"diffusion_model.output_blocks.2.1.conv": "up_blocks.0.upsamplers.0.conv",
"diffusion_model.output_blocks.3.0.in_layers.0": "up_blocks.1.resnets.0.norm1",
"diffusion_model.output_blocks.3.0.in_layers.2": "up_blocks.1.resnets.0.conv1",
"diffusion_model.output_blocks.3.0.emb_layers.1": "up_blocks.1.resnets.0.time_emb_proj",
"diffusion_model.output_blocks.3.0.out_layers.0": "up_blocks.1.resnets.0.norm2",
"diffusion_model.output_blocks.3.0.out_layers.3": "up_blocks.1.resnets.0.conv2",
"diffusion_model.output_blocks.3.0.skip_connection": "up_blocks.1.resnets.0.conv_shortcut",
"diffusion_model.output_blocks.3.1.norm": "up_blocks.1.attentions.0.norm",
"diffusion_model.output_blocks.3.1.proj_in": "up_blocks.1.attentions.0.proj_in",
"diffusion_model.output_blocks.3.1.transformer_blocks.0.norm1": "up_blocks.1.attentions.0.transformer_blocks.0.norm1",
"diffusion_model.output_blocks.3.1.transformer_blocks.0.attn1.to_q": "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q",
"diffusion_model.output_blocks.3.1.transformer_blocks.0.attn1.to_k": "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k",
"diffusion_model.output_blocks.3.1.transformer_blocks.0.attn1.to_v": "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v",
"diffusion_model.output_blocks.3.1.transformer_blocks.0.attn1.to_out.0": "up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0",
"diffusion_model.output_blocks.3.1.transformer_blocks.0.norm2": "up_blocks.1.attentions.0.transformer_blocks.0.norm2",
"diffusion_model.output_blocks.3.1.transformer_blocks.0.attn2.to_q": "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q",
"diffusion_model.output_blocks.3.1.transformer_blocks.0.attn2.to_k": "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k",
"diffusion_model.output_blocks.3.1.transformer_blocks.0.attn2.to_v": "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v",
"diffusion_model.output_blocks.3.1.transformer_blocks.0.attn2.to_out.0": "up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0",
"diffusion_model.output_blocks.3.1.transformer_blocks.0.norm3": "up_blocks.1.attentions.0.transformer_blocks.0.norm3",
"diffusion_model.output_blocks.3.1.transformer_blocks.0.ff.net.0.proj": "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj",
"diffusion_model.output_blocks.3.1.transformer_blocks.0.ff.net.2": "up_blocks.1.attentions.0.transformer_blocks.0.ff.net.2",
"diffusion_model.output_blocks.3.1.proj_out": "up_blocks.1.attentions.0.proj_out",
"diffusion_model.output_blocks.4.0.in_layers.0": "up_blocks.1.resnets.1.norm1",
"diffusion_model.output_blocks.4.0.in_layers.2": "up_blocks.1.resnets.1.conv1",
"diffusion_model.output_blocks.4.0.emb_layers.1": "up_blocks.1.resnets.1.time_emb_proj",
"diffusion_model.output_blocks.4.0.out_layers.0": "up_blocks.1.resnets.1.norm2",
"diffusion_model.output_blocks.4.0.out_layers.3": "up_blocks.1.resnets.1.conv2",
"diffusion_model.output_blocks.4.0.skip_connection": "up_blocks.1.resnets.1.conv_shortcut",
"diffusion_model.output_blocks.4.1.norm": "up_blocks.1.attentions.1.norm",
"diffusion_model.output_blocks.4.1.proj_in": "up_blocks.1.attentions.1.proj_in",
"diffusion_model.output_blocks.4.1.transformer_blocks.0.norm1": "up_blocks.1.attentions.1.transformer_blocks.0.norm1",
"diffusion_model.output_blocks.4.1.transformer_blocks.0.attn1.to_q": "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q",
"diffusion_model.output_blocks.4.1.transformer_blocks.0.attn1.to_k": "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k",
"diffusion_model.output_blocks.4.1.transformer_blocks.0.attn1.to_v": "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v",
"diffusion_model.output_blocks.4.1.transformer_blocks.0.attn1.to_out.0": "up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0",
"diffusion_model.output_blocks.4.1.transformer_blocks.0.norm2": "up_blocks.1.attentions.1.transformer_blocks.0.norm2",
"diffusion_model.output_blocks.4.1.transformer_blocks.0.attn2.to_q": "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q",
"diffusion_model.output_blocks.4.1.transformer_blocks.0.attn2.to_k": "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k",
"diffusion_model.output_blocks.4.1.transformer_blocks.0.attn2.to_v": "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v",
"diffusion_model.output_blocks.4.1.transformer_blocks.0.attn2.to_out.0": "up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0",
"diffusion_model.output_blocks.4.1.transformer_blocks.0.norm3": "up_blocks.1.attentions.1.transformer_blocks.0.norm3",
"diffusion_model.output_blocks.4.1.transformer_blocks.0.ff.net.0.proj": "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj",
"diffusion_model.output_blocks.4.1.transformer_blocks.0.ff.net.2": "up_blocks.1.attentions.1.transformer_blocks.0.ff.net.2",
"diffusion_model.output_blocks.4.1.proj_out": "up_blocks.1.attentions.1.proj_out",
"diffusion_model.output_blocks.5.0.in_layers.0": "up_blocks.1.resnets.2.norm1",
"diffusion_model.output_blocks.5.0.in_layers.2": "up_blocks.1.resnets.2.conv1",
"diffusion_model.output_blocks.5.0.emb_layers.1": "up_blocks.1.resnets.2.time_emb_proj",
"diffusion_model.output_blocks.5.0.out_layers.0": "up_blocks.1.resnets.2.norm2",
"diffusion_model.output_blocks.5.0.out_layers.3": "up_blocks.1.resnets.2.conv2",
"diffusion_model.output_blocks.5.0.skip_connection": "up_blocks.1.resnets.2.conv_shortcut",
"diffusion_model.output_blocks.5.1.norm": "up_blocks.1.attentions.2.norm",
"diffusion_model.output_blocks.5.1.proj_in": "up_blocks.1.attentions.2.proj_in",
"diffusion_model.output_blocks.5.1.transformer_blocks.0.norm1": "up_blocks.1.attentions.2.transformer_blocks.0.norm1",
"diffusion_model.output_blocks.5.1.transformer_blocks.0.attn1.to_q": "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_q",
"diffusion_model.output_blocks.5.1.transformer_blocks.0.attn1.to_k": "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_k",
"diffusion_model.output_blocks.5.1.transformer_blocks.0.attn1.to_v": "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_v",
"diffusion_model.output_blocks.5.1.transformer_blocks.0.attn1.to_out.0": "up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_out.0",
"diffusion_model.output_blocks.5.1.transformer_blocks.0.norm2": "up_blocks.1.attentions.2.transformer_blocks.0.norm2",
"diffusion_model.output_blocks.5.1.transformer_blocks.0.attn2.to_q": "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_q",
"diffusion_model.output_blocks.5.1.transformer_blocks.0.attn2.to_k": "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_k",
"diffusion_model.output_blocks.5.1.transformer_blocks.0.attn2.to_v": "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_v",
"diffusion_model.output_blocks.5.1.transformer_blocks.0.attn2.to_out.0": "up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_out.0",
"diffusion_model.output_blocks.5.1.transformer_blocks.0.norm3": "up_blocks.1.attentions.2.transformer_blocks.0.norm3",
"diffusion_model.output_blocks.5.1.transformer_blocks.0.ff.net.0.proj": "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.0.proj",
"diffusion_model.output_blocks.5.1.transformer_blocks.0.ff.net.2": "up_blocks.1.attentions.2.transformer_blocks.0.ff.net.2",
"diffusion_model.output_blocks.5.1.proj_out": "up_blocks.1.attentions.2.proj_out",
"diffusion_model.output_blocks.5.2.conv": "up_blocks.1.upsamplers.0.conv",
"diffusion_model.output_blocks.6.0.in_layers.0": "up_blocks.2.resnets.0.norm1",
"diffusion_model.output_blocks.6.0.in_layers.2": "up_blocks.2.resnets.0.conv1",
"diffusion_model.output_blocks.6.0.emb_layers.1": "up_blocks.2.resnets.0.time_emb_proj",
"diffusion_model.output_blocks.6.0.out_layers.0": "up_blocks.2.resnets.0.norm2",
"diffusion_model.output_blocks.6.0.out_layers.3": "up_blocks.2.resnets.0.conv2",
"diffusion_model.output_blocks.6.0.skip_connection": "up_blocks.2.resnets.0.conv_shortcut",
"diffusion_model.output_blocks.6.1.norm": "up_blocks.2.attentions.0.norm",
"diffusion_model.output_blocks.6.1.proj_in": "up_blocks.2.attentions.0.proj_in",
"diffusion_model.output_blocks.6.1.transformer_blocks.0.norm1": "up_blocks.2.attentions.0.transformer_blocks.0.norm1",
"diffusion_model.output_blocks.6.1.transformer_blocks.0.attn1.to_q": "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q",
"diffusion_model.output_blocks.6.1.transformer_blocks.0.attn1.to_k": "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k",
"diffusion_model.output_blocks.6.1.transformer_blocks.0.attn1.to_v": "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v",
"diffusion_model.output_blocks.6.1.transformer_blocks.0.attn1.to_out.0": "up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0",
"diffusion_model.output_blocks.6.1.transformer_blocks.0.norm2": "up_blocks.2.attentions.0.transformer_blocks.0.norm2",
"diffusion_model.output_blocks.6.1.transformer_blocks.0.attn2.to_q": "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q",
"diffusion_model.output_blocks.6.1.transformer_blocks.0.attn2.to_k": "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k",
"diffusion_model.output_blocks.6.1.transformer_blocks.0.attn2.to_v": "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v",
"diffusion_model.output_blocks.6.1.transformer_blocks.0.attn2.to_out.0": "up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0",
"diffusion_model.output_blocks.6.1.transformer_blocks.0.norm3": "up_blocks.2.attentions.0.transformer_blocks.0.norm3",
"diffusion_model.output_blocks.6.1.transformer_blocks.0.ff.net.0.proj": "up_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj",
"diffusion_model.output_blocks.6.1.transformer_blocks.0.ff.net.2": "up_blocks.2.attentions.0.transformer_blocks.0.ff.net.2",
"diffusion_model.output_blocks.6.1.proj_out": "up_blocks.2.attentions.0.proj_out",
"diffusion_model.output_blocks.7.0.in_layers.0": "up_blocks.2.resnets.1.norm1",
"diffusion_model.output_blocks.7.0.in_layers.2": "up_blocks.2.resnets.1.conv1",
"diffusion_model.output_blocks.7.0.emb_layers.1": "up_blocks.2.resnets.1.time_emb_proj",
"diffusion_model.output_blocks.7.0.out_layers.0": "up_blocks.2.resnets.1.norm2",
"diffusion_model.output_blocks.7.0.out_layers.3": "up_blocks.2.resnets.1.conv2",
"diffusion_model.output_blocks.7.0.skip_connection": "up_blocks.2.resnets.1.conv_shortcut",
"diffusion_model.output_blocks.7.1.norm": "up_blocks.2.attentions.1.norm",
"diffusion_model.output_blocks.7.1.proj_in": "up_blocks.2.attentions.1.proj_in",
"diffusion_model.output_blocks.7.1.transformer_blocks.0.norm1": "up_blocks.2.attentions.1.transformer_blocks.0.norm1",
"diffusion_model.output_blocks.7.1.transformer_blocks.0.attn1.to_q": "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q",
"diffusion_model.output_blocks.7.1.transformer_blocks.0.attn1.to_k": "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k",
"diffusion_model.output_blocks.7.1.transformer_blocks.0.attn1.to_v": "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v",
"diffusion_model.output_blocks.7.1.transformer_blocks.0.attn1.to_out.0": "up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0",
"diffusion_model.output_blocks.7.1.transformer_blocks.0.norm2": "up_blocks.2.attentions.1.transformer_blocks.0.norm2",
"diffusion_model.output_blocks.7.1.transformer_blocks.0.attn2.to_q": "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q",
"diffusion_model.output_blocks.7.1.transformer_blocks.0.attn2.to_k": "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k",
"diffusion_model.output_blocks.7.1.transformer_blocks.0.attn2.to_v": "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v",
"diffusion_model.output_blocks.7.1.transformer_blocks.0.attn2.to_out.0": "up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0",
"diffusion_model.output_blocks.7.1.transformer_blocks.0.norm3": "up_blocks.2.attentions.1.transformer_blocks.0.norm3",
"diffusion_model.output_blocks.7.1.transformer_blocks.0.ff.net.0.proj": "up_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj",
"diffusion_model.output_blocks.7.1.transformer_blocks.0.ff.net.2": "up_blocks.2.attentions.1.transformer_blocks.0.ff.net.2",
"diffusion_model.output_blocks.7.1.proj_out": "up_blocks.2.attentions.1.proj_out",
"diffusion_model.output_blocks.8.0.in_layers.0": "up_blocks.2.resnets.2.norm1",
"diffusion_model.output_blocks.8.0.in_layers.2": "up_blocks.2.resnets.2.conv1",
"diffusion_model.output_blocks.8.0.emb_layers.1": "up_blocks.2.resnets.2.time_emb_proj",
"diffusion_model.output_blocks.8.0.out_layers.0": "up_blocks.2.resnets.2.norm2",
"diffusion_model.output_blocks.8.0.out_layers.3": "up_blocks.2.resnets.2.conv2",
"diffusion_model.output_blocks.8.0.skip_connection": "up_blocks.2.resnets.2.conv_shortcut",
"diffusion_model.output_blocks.8.1.norm": "up_blocks.2.attentions.2.norm",
"diffusion_model.output_blocks.8.1.proj_in": "up_blocks.2.attentions.2.proj_in",
"diffusion_model.output_blocks.8.1.transformer_blocks.0.norm1": "up_blocks.2.attentions.2.transformer_blocks.0.norm1",
"diffusion_model.output_blocks.8.1.transformer_blocks.0.attn1.to_q": "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_q",
"diffusion_model.output_blocks.8.1.transformer_blocks.0.attn1.to_k": "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_k",
"diffusion_model.output_blocks.8.1.transformer_blocks.0.attn1.to_v": "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_v",
"diffusion_model.output_blocks.8.1.transformer_blocks.0.attn1.to_out.0": "up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_out.0",
"diffusion_model.output_blocks.8.1.transformer_blocks.0.norm2": "up_blocks.2.attentions.2.transformer_blocks.0.norm2",
"diffusion_model.output_blocks.8.1.transformer_blocks.0.attn2.to_q": "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_q",
"diffusion_model.output_blocks.8.1.transformer_blocks.0.attn2.to_k": "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_k",
"diffusion_model.output_blocks.8.1.transformer_blocks.0.attn2.to_v": "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_v",
"diffusion_model.output_blocks.8.1.transformer_blocks.0.attn2.to_out.0": "up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_out.0",
"diffusion_model.output_blocks.8.1.transformer_blocks.0.norm3": "up_blocks.2.attentions.2.transformer_blocks.0.norm3",
"diffusion_model.output_blocks.8.1.transformer_blocks.0.ff.net.0.proj": "up_blocks.2.attentions.2.transformer_blocks.0.ff.net.0.proj",
"diffusion_model.output_blocks.8.1.transformer_blocks.0.ff.net.2": "up_blocks.2.attentions.2.transformer_blocks.0.ff.net.2",
"diffusion_model.output_blocks.8.1.proj_out": "up_blocks.2.attentions.2.proj_out",
"diffusion_model.output_blocks.8.2.conv": "up_blocks.2.upsamplers.0.conv",
"diffusion_model.output_blocks.9.0.in_layers.0": "up_blocks.3.resnets.0.norm1",
"diffusion_model.output_blocks.9.0.in_layers.2": "up_blocks.3.resnets.0.conv1",
"diffusion_model.output_blocks.9.0.emb_layers.1": "up_blocks.3.resnets.0.time_emb_proj",
"diffusion_model.output_blocks.9.0.out_layers.0": "up_blocks.3.resnets.0.norm2",
"diffusion_model.output_blocks.9.0.out_layers.3": "up_blocks.3.resnets.0.conv2",
"diffusion_model.output_blocks.9.0.skip_connection": "up_blocks.3.resnets.0.conv_shortcut",
"diffusion_model.output_blocks.9.1.norm": "up_blocks.3.attentions.0.norm",
"diffusion_model.output_blocks.9.1.proj_in": "up_blocks.3.attentions.0.proj_in",
"diffusion_model.output_blocks.9.1.transformer_blocks.0.norm1": "up_blocks.3.attentions.0.transformer_blocks.0.norm1",
"diffusion_model.output_blocks.9.1.transformer_blocks.0.attn1.to_q": "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_q",
"diffusion_model.output_blocks.9.1.transformer_blocks.0.attn1.to_k": "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_k",
"diffusion_model.output_blocks.9.1.transformer_blocks.0.attn1.to_v": "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_v",
"diffusion_model.output_blocks.9.1.transformer_blocks.0.attn1.to_out.0": "up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_out.0",
"diffusion_model.output_blocks.9.1.transformer_blocks.0.norm2": "up_blocks.3.attentions.0.transformer_blocks.0.norm2",
"diffusion_model.output_blocks.9.1.transformer_blocks.0.attn2.to_q": "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_q",
"diffusion_model.output_blocks.9.1.transformer_blocks.0.attn2.to_k": "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_k",
"diffusion_model.output_blocks.9.1.transformer_blocks.0.attn2.to_v": "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_v",
"diffusion_model.output_blocks.9.1.transformer_blocks.0.attn2.to_out.0": "up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_out.0",
"diffusion_model.output_blocks.9.1.transformer_blocks.0.norm3": "up_blocks.3.attentions.0.transformer_blocks.0.norm3",
"diffusion_model.output_blocks.9.1.transformer_blocks.0.ff.net.0.proj": "up_blocks.3.attentions.0.transformer_blocks.0.ff.net.0.proj",
"diffusion_model.output_blocks.9.1.transformer_blocks.0.ff.net.2": "up_blocks.3.attentions.0.transformer_blocks.0.ff.net.2",
"diffusion_model.output_blocks.9.1.proj_out": "up_blocks.3.attentions.0.proj_out",
"diffusion_model.output_blocks.10.0.in_layers.0": "up_blocks.3.resnets.1.norm1",
"diffusion_model.output_blocks.10.0.in_layers.2": "up_blocks.3.resnets.1.conv1",
"diffusion_model.output_blocks.10.0.emb_layers.1": "up_blocks.3.resnets.1.time_emb_proj",
"diffusion_model.output_blocks.10.0.out_layers.0": "up_blocks.3.resnets.1.norm2",
"diffusion_model.output_blocks.10.0.out_layers.3": "up_blocks.3.resnets.1.conv2",
"diffusion_model.output_blocks.10.0.skip_connection": "up_blocks.3.resnets.1.conv_shortcut",
"diffusion_model.output_blocks.10.1.norm": "up_blocks.3.attentions.1.norm",
"diffusion_model.output_blocks.10.1.proj_in": "up_blocks.3.attentions.1.proj_in",
"diffusion_model.output_blocks.10.1.transformer_blocks.0.norm1": "up_blocks.3.attentions.1.transformer_blocks.0.norm1",
"diffusion_model.output_blocks.10.1.transformer_blocks.0.attn1.to_q": "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_q",
"diffusion_model.output_blocks.10.1.transformer_blocks.0.attn1.to_k": "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_k",
"diffusion_model.output_blocks.10.1.transformer_blocks.0.attn1.to_v": "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_v",
"diffusion_model.output_blocks.10.1.transformer_blocks.0.attn1.to_out.0": "up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_out.0",
"diffusion_model.output_blocks.10.1.transformer_blocks.0.norm2": "up_blocks.3.attentions.1.transformer_blocks.0.norm2",
"diffusion_model.output_blocks.10.1.transformer_blocks.0.attn2.to_q": "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_q",
"diffusion_model.output_blocks.10.1.transformer_blocks.0.attn2.to_k": "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_k",
"diffusion_model.output_blocks.10.1.transformer_blocks.0.attn2.to_v": "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_v",
"diffusion_model.output_blocks.10.1.transformer_blocks.0.attn2.to_out.0": "up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_out.0",
"diffusion_model.output_blocks.10.1.transformer_blocks.0.norm3": "up_blocks.3.attentions.1.transformer_blocks.0.norm3",
"diffusion_model.output_blocks.10.1.transformer_blocks.0.ff.net.0.proj": "up_blocks.3.attentions.1.transformer_blocks.0.ff.net.0.proj",
"diffusion_model.output_blocks.10.1.transformer_blocks.0.ff.net.2": "up_blocks.3.attentions.1.transformer_blocks.0.ff.net.2",
"diffusion_model.output_blocks.10.1.proj_out": "up_blocks.3.attentions.1.proj_out",
"diffusion_model.output_blocks.11.0.in_layers.0": "up_blocks.3.resnets.2.norm1",
"diffusion_model.output_blocks.11.0.in_layers.2": "up_blocks.3.resnets.2.conv1",
"diffusion_model.output_blocks.11.0.emb_layers.1": "up_blocks.3.resnets.2.time_emb_proj",
"diffusion_model.output_blocks.11.0.out_layers.0": "up_blocks.3.resnets.2.norm2",
"diffusion_model.output_blocks.11.0.out_layers.3": "up_blocks.3.resnets.2.conv2",
"diffusion_model.output_blocks.11.0.skip_connection": "up_blocks.3.resnets.2.conv_shortcut",
"diffusion_model.output_blocks.11.1.norm": "up_blocks.3.attentions.2.norm",
"diffusion_model.output_blocks.11.1.proj_in": "up_blocks.3.attentions.2.proj_in",
"diffusion_model.output_blocks.11.1.transformer_blocks.0.norm1": "up_blocks.3.attentions.2.transformer_blocks.0.norm1",
"diffusion_model.output_blocks.11.1.transformer_blocks.0.attn1.to_q": "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_q",
"diffusion_model.output_blocks.11.1.transformer_blocks.0.attn1.to_k": "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_k",
"diffusion_model.output_blocks.11.1.transformer_blocks.0.attn1.to_v": "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_v",
"diffusion_model.output_blocks.11.1.transformer_blocks.0.attn1.to_out.0": "up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_out.0",
"diffusion_model.output_blocks.11.1.transformer_blocks.0.norm2": "up_blocks.3.attentions.2.transformer_blocks.0.norm2",
"diffusion_model.output_blocks.11.1.transformer_blocks.0.attn2.to_q": "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_q",
"diffusion_model.output_blocks.11.1.transformer_blocks.0.attn2.to_k": "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_k",
"diffusion_model.output_blocks.11.1.transformer_blocks.0.attn2.to_v": "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_v",
"diffusion_model.output_blocks.11.1.transformer_blocks.0.attn2.to_out.0": "up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_out.0",
"diffusion_model.output_blocks.11.1.transformer_blocks.0.norm3": "up_blocks.3.attentions.2.transformer_blocks.0.norm3",
"diffusion_model.output_blocks.11.1.transformer_blocks.0.ff.net.0.proj": "up_blocks.3.attentions.2.transformer_blocks.0.ff.net.0.proj",
"diffusion_model.output_blocks.11.1.transformer_blocks.0.ff.net.2": "up_blocks.3.attentions.2.transformer_blocks.0.ff.net.2",
"diffusion_model.output_blocks.11.1.proj_out": "up_blocks.3.attentions.2.proj_out",
"diffusion_model.out.0": "conv_norm_out",
"diffusion_model.out.2": "conv_out"
},
"source_aliases": {},
"ignorable_prefixes": []
}

View File

@ -0,0 +1,397 @@
{
"mapping": {
"time_embedding.linear_1": "diffusion_model.time_embed.0",
"time_embedding.linear_2": "diffusion_model.time_embed.2",
"conv_in": "diffusion_model.input_blocks.0.0",
"down_blocks.0.resnets.0.norm1": "diffusion_model.input_blocks.1.0.in_layers.0",
"down_blocks.0.resnets.0.conv1": "diffusion_model.input_blocks.1.0.in_layers.2",
"down_blocks.0.resnets.0.time_emb_proj": "diffusion_model.input_blocks.1.0.emb_layers.1",
"down_blocks.0.resnets.0.norm2": "diffusion_model.input_blocks.1.0.out_layers.0",
"down_blocks.0.resnets.0.conv2": "diffusion_model.input_blocks.1.0.out_layers.3",
"down_blocks.0.attentions.0.norm": "diffusion_model.input_blocks.1.1.norm",
"down_blocks.0.attentions.0.proj_in": "diffusion_model.input_blocks.1.1.proj_in",
"down_blocks.0.attentions.0.transformer_blocks.0.norm1": "diffusion_model.input_blocks.1.1.transformer_blocks.0.norm1",
"down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_q": "diffusion_model.input_blocks.1.1.transformer_blocks.0.attn1.to_q",
"down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_k": "diffusion_model.input_blocks.1.1.transformer_blocks.0.attn1.to_k",
"down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_v": "diffusion_model.input_blocks.1.1.transformer_blocks.0.attn1.to_v",
"down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out.0": "diffusion_model.input_blocks.1.1.transformer_blocks.0.attn1.to_out.0",
"down_blocks.0.attentions.0.transformer_blocks.0.norm2": "diffusion_model.input_blocks.1.1.transformer_blocks.0.norm2",
"down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_q": "diffusion_model.input_blocks.1.1.transformer_blocks.0.attn2.to_q",
"down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_k": "diffusion_model.input_blocks.1.1.transformer_blocks.0.attn2.to_k",
"down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_v": "diffusion_model.input_blocks.1.1.transformer_blocks.0.attn2.to_v",
"down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out.0": "diffusion_model.input_blocks.1.1.transformer_blocks.0.attn2.to_out.0",
"down_blocks.0.attentions.0.transformer_blocks.0.norm3": "diffusion_model.input_blocks.1.1.transformer_blocks.0.norm3",
"down_blocks.0.attentions.0.transformer_blocks.0.ff.net.0.proj": "diffusion_model.input_blocks.1.1.transformer_blocks.0.ff.net.0.proj",
"down_blocks.0.attentions.0.transformer_blocks.0.ff.net.2": "diffusion_model.input_blocks.1.1.transformer_blocks.0.ff.net.2",
"down_blocks.0.attentions.0.proj_out": "diffusion_model.input_blocks.1.1.proj_out",
"down_blocks.0.resnets.1.norm1": "diffusion_model.input_blocks.2.0.in_layers.0",
"down_blocks.0.resnets.1.conv1": "diffusion_model.input_blocks.2.0.in_layers.2",
"down_blocks.0.resnets.1.time_emb_proj": "diffusion_model.input_blocks.2.0.emb_layers.1",
"down_blocks.0.resnets.1.norm2": "diffusion_model.input_blocks.2.0.out_layers.0",
"down_blocks.0.resnets.1.conv2": "diffusion_model.input_blocks.2.0.out_layers.3",
"down_blocks.0.attentions.1.norm": "diffusion_model.input_blocks.2.1.norm",
"down_blocks.0.attentions.1.proj_in": "diffusion_model.input_blocks.2.1.proj_in",
"down_blocks.0.attentions.1.transformer_blocks.0.norm1": "diffusion_model.input_blocks.2.1.transformer_blocks.0.norm1",
"down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_q": "diffusion_model.input_blocks.2.1.transformer_blocks.0.attn1.to_q",
"down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_k": "diffusion_model.input_blocks.2.1.transformer_blocks.0.attn1.to_k",
"down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_v": "diffusion_model.input_blocks.2.1.transformer_blocks.0.attn1.to_v",
"down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out.0": "diffusion_model.input_blocks.2.1.transformer_blocks.0.attn1.to_out.0",
"down_blocks.0.attentions.1.transformer_blocks.0.norm2": "diffusion_model.input_blocks.2.1.transformer_blocks.0.norm2",
"down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_q": "diffusion_model.input_blocks.2.1.transformer_blocks.0.attn2.to_q",
"down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_k": "diffusion_model.input_blocks.2.1.transformer_blocks.0.attn2.to_k",
"down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_v": "diffusion_model.input_blocks.2.1.transformer_blocks.0.attn2.to_v",
"down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out.0": "diffusion_model.input_blocks.2.1.transformer_blocks.0.attn2.to_out.0",
"down_blocks.0.attentions.1.transformer_blocks.0.norm3": "diffusion_model.input_blocks.2.1.transformer_blocks.0.norm3",
"down_blocks.0.attentions.1.transformer_blocks.0.ff.net.0.proj": "diffusion_model.input_blocks.2.1.transformer_blocks.0.ff.net.0.proj",
"down_blocks.0.attentions.1.transformer_blocks.0.ff.net.2": "diffusion_model.input_blocks.2.1.transformer_blocks.0.ff.net.2",
"down_blocks.0.attentions.1.proj_out": "diffusion_model.input_blocks.2.1.proj_out",
"down_blocks.0.downsamplers.0.conv": "diffusion_model.input_blocks.3.0.op",
"down_blocks.1.resnets.0.norm1": "diffusion_model.input_blocks.4.0.in_layers.0",
"down_blocks.1.resnets.0.conv1": "diffusion_model.input_blocks.4.0.in_layers.2",
"down_blocks.1.resnets.0.time_emb_proj": "diffusion_model.input_blocks.4.0.emb_layers.1",
"down_blocks.1.resnets.0.norm2": "diffusion_model.input_blocks.4.0.out_layers.0",
"down_blocks.1.resnets.0.conv2": "diffusion_model.input_blocks.4.0.out_layers.3",
"down_blocks.1.resnets.0.conv_shortcut": "diffusion_model.input_blocks.4.0.skip_connection",
"down_blocks.1.attentions.0.norm": "diffusion_model.input_blocks.4.1.norm",
"down_blocks.1.attentions.0.proj_in": "diffusion_model.input_blocks.4.1.proj_in",
"down_blocks.1.attentions.0.transformer_blocks.0.norm1": "diffusion_model.input_blocks.4.1.transformer_blocks.0.norm1",
"down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": "diffusion_model.input_blocks.4.1.transformer_blocks.0.attn1.to_q",
"down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": "diffusion_model.input_blocks.4.1.transformer_blocks.0.attn1.to_k",
"down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": "diffusion_model.input_blocks.4.1.transformer_blocks.0.attn1.to_v",
"down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": "diffusion_model.input_blocks.4.1.transformer_blocks.0.attn1.to_out.0",
"down_blocks.1.attentions.0.transformer_blocks.0.norm2": "diffusion_model.input_blocks.4.1.transformer_blocks.0.norm2",
"down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": "diffusion_model.input_blocks.4.1.transformer_blocks.0.attn2.to_q",
"down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": "diffusion_model.input_blocks.4.1.transformer_blocks.0.attn2.to_k",
"down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": "diffusion_model.input_blocks.4.1.transformer_blocks.0.attn2.to_v",
"down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": "diffusion_model.input_blocks.4.1.transformer_blocks.0.attn2.to_out.0",
"down_blocks.1.attentions.0.transformer_blocks.0.norm3": "diffusion_model.input_blocks.4.1.transformer_blocks.0.norm3",
"down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": "diffusion_model.input_blocks.4.1.transformer_blocks.0.ff.net.0.proj",
"down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": "diffusion_model.input_blocks.4.1.transformer_blocks.0.ff.net.2",
"down_blocks.1.attentions.0.proj_out": "diffusion_model.input_blocks.4.1.proj_out",
"down_blocks.1.resnets.1.norm1": "diffusion_model.input_blocks.5.0.in_layers.0",
"down_blocks.1.resnets.1.conv1": "diffusion_model.input_blocks.5.0.in_layers.2",
"down_blocks.1.resnets.1.time_emb_proj": "diffusion_model.input_blocks.5.0.emb_layers.1",
"down_blocks.1.resnets.1.norm2": "diffusion_model.input_blocks.5.0.out_layers.0",
"down_blocks.1.resnets.1.conv2": "diffusion_model.input_blocks.5.0.out_layers.3",
"down_blocks.1.attentions.1.norm": "diffusion_model.input_blocks.5.1.norm",
"down_blocks.1.attentions.1.proj_in": "diffusion_model.input_blocks.5.1.proj_in",
"down_blocks.1.attentions.1.transformer_blocks.0.norm1": "diffusion_model.input_blocks.5.1.transformer_blocks.0.norm1",
"down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": "diffusion_model.input_blocks.5.1.transformer_blocks.0.attn1.to_q",
"down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": "diffusion_model.input_blocks.5.1.transformer_blocks.0.attn1.to_k",
"down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": "diffusion_model.input_blocks.5.1.transformer_blocks.0.attn1.to_v",
"down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": "diffusion_model.input_blocks.5.1.transformer_blocks.0.attn1.to_out.0",
"down_blocks.1.attentions.1.transformer_blocks.0.norm2": "diffusion_model.input_blocks.5.1.transformer_blocks.0.norm2",
"down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": "diffusion_model.input_blocks.5.1.transformer_blocks.0.attn2.to_q",
"down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": "diffusion_model.input_blocks.5.1.transformer_blocks.0.attn2.to_k",
"down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": "diffusion_model.input_blocks.5.1.transformer_blocks.0.attn2.to_v",
"down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": "diffusion_model.input_blocks.5.1.transformer_blocks.0.attn2.to_out.0",
"down_blocks.1.attentions.1.transformer_blocks.0.norm3": "diffusion_model.input_blocks.5.1.transformer_blocks.0.norm3",
"down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": "diffusion_model.input_blocks.5.1.transformer_blocks.0.ff.net.0.proj",
"down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": "diffusion_model.input_blocks.5.1.transformer_blocks.0.ff.net.2",
"down_blocks.1.attentions.1.proj_out": "diffusion_model.input_blocks.5.1.proj_out",
"down_blocks.1.downsamplers.0.conv": "diffusion_model.input_blocks.6.0.op",
"down_blocks.2.resnets.0.norm1": "diffusion_model.input_blocks.7.0.in_layers.0",
"down_blocks.2.resnets.0.conv1": "diffusion_model.input_blocks.7.0.in_layers.2",
"down_blocks.2.resnets.0.time_emb_proj": "diffusion_model.input_blocks.7.0.emb_layers.1",
"down_blocks.2.resnets.0.norm2": "diffusion_model.input_blocks.7.0.out_layers.0",
"down_blocks.2.resnets.0.conv2": "diffusion_model.input_blocks.7.0.out_layers.3",
"down_blocks.2.resnets.0.conv_shortcut": "diffusion_model.input_blocks.7.0.skip_connection",
"down_blocks.2.attentions.0.norm": "diffusion_model.input_blocks.7.1.norm",
"down_blocks.2.attentions.0.proj_in": "diffusion_model.input_blocks.7.1.proj_in",
"down_blocks.2.attentions.0.transformer_blocks.0.norm1": "diffusion_model.input_blocks.7.1.transformer_blocks.0.norm1",
"down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q": "diffusion_model.input_blocks.7.1.transformer_blocks.0.attn1.to_q",
"down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k": "diffusion_model.input_blocks.7.1.transformer_blocks.0.attn1.to_k",
"down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v": "diffusion_model.input_blocks.7.1.transformer_blocks.0.attn1.to_v",
"down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0": "diffusion_model.input_blocks.7.1.transformer_blocks.0.attn1.to_out.0",
"down_blocks.2.attentions.0.transformer_blocks.0.norm2": "diffusion_model.input_blocks.7.1.transformer_blocks.0.norm2",
"down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q": "diffusion_model.input_blocks.7.1.transformer_blocks.0.attn2.to_q",
"down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k": "diffusion_model.input_blocks.7.1.transformer_blocks.0.attn2.to_k",
"down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v": "diffusion_model.input_blocks.7.1.transformer_blocks.0.attn2.to_v",
"down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0": "diffusion_model.input_blocks.7.1.transformer_blocks.0.attn2.to_out.0",
"down_blocks.2.attentions.0.transformer_blocks.0.norm3": "diffusion_model.input_blocks.7.1.transformer_blocks.0.norm3",
"down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj": "diffusion_model.input_blocks.7.1.transformer_blocks.0.ff.net.0.proj",
"down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2": "diffusion_model.input_blocks.7.1.transformer_blocks.0.ff.net.2",
"down_blocks.2.attentions.0.proj_out": "diffusion_model.input_blocks.7.1.proj_out",
"down_blocks.2.resnets.1.norm1": "diffusion_model.input_blocks.8.0.in_layers.0",
"down_blocks.2.resnets.1.conv1": "diffusion_model.input_blocks.8.0.in_layers.2",
"down_blocks.2.resnets.1.time_emb_proj": "diffusion_model.input_blocks.8.0.emb_layers.1",
"down_blocks.2.resnets.1.norm2": "diffusion_model.input_blocks.8.0.out_layers.0",
"down_blocks.2.resnets.1.conv2": "diffusion_model.input_blocks.8.0.out_layers.3",
"down_blocks.2.attentions.1.norm": "diffusion_model.input_blocks.8.1.norm",
"down_blocks.2.attentions.1.proj_in": "diffusion_model.input_blocks.8.1.proj_in",
"down_blocks.2.attentions.1.transformer_blocks.0.norm1": "diffusion_model.input_blocks.8.1.transformer_blocks.0.norm1",
"down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q": "diffusion_model.input_blocks.8.1.transformer_blocks.0.attn1.to_q",
"down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k": "diffusion_model.input_blocks.8.1.transformer_blocks.0.attn1.to_k",
"down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v": "diffusion_model.input_blocks.8.1.transformer_blocks.0.attn1.to_v",
"down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0": "diffusion_model.input_blocks.8.1.transformer_blocks.0.attn1.to_out.0",
"down_blocks.2.attentions.1.transformer_blocks.0.norm2": "diffusion_model.input_blocks.8.1.transformer_blocks.0.norm2",
"down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q": "diffusion_model.input_blocks.8.1.transformer_blocks.0.attn2.to_q",
"down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k": "diffusion_model.input_blocks.8.1.transformer_blocks.0.attn2.to_k",
"down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v": "diffusion_model.input_blocks.8.1.transformer_blocks.0.attn2.to_v",
"down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0": "diffusion_model.input_blocks.8.1.transformer_blocks.0.attn2.to_out.0",
"down_blocks.2.attentions.1.transformer_blocks.0.norm3": "diffusion_model.input_blocks.8.1.transformer_blocks.0.norm3",
"down_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj": "diffusion_model.input_blocks.8.1.transformer_blocks.0.ff.net.0.proj",
"down_blocks.2.attentions.1.transformer_blocks.0.ff.net.2": "diffusion_model.input_blocks.8.1.transformer_blocks.0.ff.net.2",
"down_blocks.2.attentions.1.proj_out": "diffusion_model.input_blocks.8.1.proj_out",
"down_blocks.2.downsamplers.0.conv": "diffusion_model.input_blocks.9.0.op",
"down_blocks.3.resnets.0.norm1": "diffusion_model.input_blocks.10.0.in_layers.0",
"down_blocks.3.resnets.0.conv1": "diffusion_model.input_blocks.10.0.in_layers.2",
"down_blocks.3.resnets.0.time_emb_proj": "diffusion_model.input_blocks.10.0.emb_layers.1",
"down_blocks.3.resnets.0.norm2": "diffusion_model.input_blocks.10.0.out_layers.0",
"down_blocks.3.resnets.0.conv2": "diffusion_model.input_blocks.10.0.out_layers.3",
"down_blocks.3.resnets.1.norm1": "diffusion_model.input_blocks.11.0.in_layers.0",
"down_blocks.3.resnets.1.conv1": "diffusion_model.input_blocks.11.0.in_layers.2",
"down_blocks.3.resnets.1.time_emb_proj": "diffusion_model.input_blocks.11.0.emb_layers.1",
"down_blocks.3.resnets.1.norm2": "diffusion_model.input_blocks.11.0.out_layers.0",
"down_blocks.3.resnets.1.conv2": "diffusion_model.input_blocks.11.0.out_layers.3",
"mid_block.resnets.0.norm1": "diffusion_model.middle_block.0.in_layers.0",
"mid_block.resnets.0.conv1": "diffusion_model.middle_block.0.in_layers.2",
"mid_block.resnets.0.time_emb_proj": "diffusion_model.middle_block.0.emb_layers.1",
"mid_block.resnets.0.norm2": "diffusion_model.middle_block.0.out_layers.0",
"mid_block.resnets.0.conv2": "diffusion_model.middle_block.0.out_layers.3",
"mid_block.attentions.0.norm": "diffusion_model.middle_block.1.norm",
"mid_block.attentions.0.proj_in": "diffusion_model.middle_block.1.proj_in",
"mid_block.attentions.0.transformer_blocks.0.norm1": "diffusion_model.middle_block.1.transformer_blocks.0.norm1",
"mid_block.attentions.0.transformer_blocks.0.attn1.to_q": "diffusion_model.middle_block.1.transformer_blocks.0.attn1.to_q",
"mid_block.attentions.0.transformer_blocks.0.attn1.to_k": "diffusion_model.middle_block.1.transformer_blocks.0.attn1.to_k",
"mid_block.attentions.0.transformer_blocks.0.attn1.to_v": "diffusion_model.middle_block.1.transformer_blocks.0.attn1.to_v",
"mid_block.attentions.0.transformer_blocks.0.attn1.to_out.0": "diffusion_model.middle_block.1.transformer_blocks.0.attn1.to_out.0",
"mid_block.attentions.0.transformer_blocks.0.norm2": "diffusion_model.middle_block.1.transformer_blocks.0.norm2",
"mid_block.attentions.0.transformer_blocks.0.attn2.to_q": "diffusion_model.middle_block.1.transformer_blocks.0.attn2.to_q",
"mid_block.attentions.0.transformer_blocks.0.attn2.to_k": "diffusion_model.middle_block.1.transformer_blocks.0.attn2.to_k",
"mid_block.attentions.0.transformer_blocks.0.attn2.to_v": "diffusion_model.middle_block.1.transformer_blocks.0.attn2.to_v",
"mid_block.attentions.0.transformer_blocks.0.attn2.to_out.0": "diffusion_model.middle_block.1.transformer_blocks.0.attn2.to_out.0",
"mid_block.attentions.0.transformer_blocks.0.norm3": "diffusion_model.middle_block.1.transformer_blocks.0.norm3",
"mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj": "diffusion_model.middle_block.1.transformer_blocks.0.ff.net.0.proj",
"mid_block.attentions.0.transformer_blocks.0.ff.net.2": "diffusion_model.middle_block.1.transformer_blocks.0.ff.net.2",
"mid_block.attentions.0.proj_out": "diffusion_model.middle_block.1.proj_out",
"mid_block.resnets.1.norm1": "diffusion_model.middle_block.2.in_layers.0",
"mid_block.resnets.1.conv1": "diffusion_model.middle_block.2.in_layers.2",
"mid_block.resnets.1.time_emb_proj": "diffusion_model.middle_block.2.emb_layers.1",
"mid_block.resnets.1.norm2": "diffusion_model.middle_block.2.out_layers.0",
"mid_block.resnets.1.conv2": "diffusion_model.middle_block.2.out_layers.3",
"up_blocks.0.resnets.0.norm1": "diffusion_model.output_blocks.0.0.in_layers.0",
"up_blocks.0.resnets.0.conv1": "diffusion_model.output_blocks.0.0.in_layers.2",
"up_blocks.0.resnets.0.time_emb_proj": "diffusion_model.output_blocks.0.0.emb_layers.1",
"up_blocks.0.resnets.0.norm2": "diffusion_model.output_blocks.0.0.out_layers.0",
"up_blocks.0.resnets.0.conv2": "diffusion_model.output_blocks.0.0.out_layers.3",
"up_blocks.0.resnets.0.conv_shortcut": "diffusion_model.output_blocks.0.0.skip_connection",
"up_blocks.0.resnets.1.norm1": "diffusion_model.output_blocks.1.0.in_layers.0",
"up_blocks.0.resnets.1.conv1": "diffusion_model.output_blocks.1.0.in_layers.2",
"up_blocks.0.resnets.1.time_emb_proj": "diffusion_model.output_blocks.1.0.emb_layers.1",
"up_blocks.0.resnets.1.norm2": "diffusion_model.output_blocks.1.0.out_layers.0",
"up_blocks.0.resnets.1.conv2": "diffusion_model.output_blocks.1.0.out_layers.3",
"up_blocks.0.resnets.1.conv_shortcut": "diffusion_model.output_blocks.1.0.skip_connection",
"up_blocks.0.resnets.2.norm1": "diffusion_model.output_blocks.2.0.in_layers.0",
"up_blocks.0.resnets.2.conv1": "diffusion_model.output_blocks.2.0.in_layers.2",
"up_blocks.0.resnets.2.time_emb_proj": "diffusion_model.output_blocks.2.0.emb_layers.1",
"up_blocks.0.resnets.2.norm2": "diffusion_model.output_blocks.2.0.out_layers.0",
"up_blocks.0.resnets.2.conv2": "diffusion_model.output_blocks.2.0.out_layers.3",
"up_blocks.0.resnets.2.conv_shortcut": "diffusion_model.output_blocks.2.0.skip_connection",
"up_blocks.0.upsamplers.0.conv": "diffusion_model.output_blocks.2.1.conv",
"up_blocks.1.resnets.0.norm1": "diffusion_model.output_blocks.3.0.in_layers.0",
"up_blocks.1.resnets.0.conv1": "diffusion_model.output_blocks.3.0.in_layers.2",
"up_blocks.1.resnets.0.time_emb_proj": "diffusion_model.output_blocks.3.0.emb_layers.1",
"up_blocks.1.resnets.0.norm2": "diffusion_model.output_blocks.3.0.out_layers.0",
"up_blocks.1.resnets.0.conv2": "diffusion_model.output_blocks.3.0.out_layers.3",
"up_blocks.1.resnets.0.conv_shortcut": "diffusion_model.output_blocks.3.0.skip_connection",
"up_blocks.1.attentions.0.norm": "diffusion_model.output_blocks.3.1.norm",
"up_blocks.1.attentions.0.proj_in": "diffusion_model.output_blocks.3.1.proj_in",
"up_blocks.1.attentions.0.transformer_blocks.0.norm1": "diffusion_model.output_blocks.3.1.transformer_blocks.0.norm1",
"up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": "diffusion_model.output_blocks.3.1.transformer_blocks.0.attn1.to_q",
"up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": "diffusion_model.output_blocks.3.1.transformer_blocks.0.attn1.to_k",
"up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": "diffusion_model.output_blocks.3.1.transformer_blocks.0.attn1.to_v",
"up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": "diffusion_model.output_blocks.3.1.transformer_blocks.0.attn1.to_out.0",
"up_blocks.1.attentions.0.transformer_blocks.0.norm2": "diffusion_model.output_blocks.3.1.transformer_blocks.0.norm2",
"up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": "diffusion_model.output_blocks.3.1.transformer_blocks.0.attn2.to_q",
"up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": "diffusion_model.output_blocks.3.1.transformer_blocks.0.attn2.to_k",
"up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": "diffusion_model.output_blocks.3.1.transformer_blocks.0.attn2.to_v",
"up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": "diffusion_model.output_blocks.3.1.transformer_blocks.0.attn2.to_out.0",
"up_blocks.1.attentions.0.transformer_blocks.0.norm3": "diffusion_model.output_blocks.3.1.transformer_blocks.0.norm3",
"up_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": "diffusion_model.output_blocks.3.1.transformer_blocks.0.ff.net.0.proj",
"up_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": "diffusion_model.output_blocks.3.1.transformer_blocks.0.ff.net.2",
"up_blocks.1.attentions.0.proj_out": "diffusion_model.output_blocks.3.1.proj_out",
"up_blocks.1.resnets.1.norm1": "diffusion_model.output_blocks.4.0.in_layers.0",
"up_blocks.1.resnets.1.conv1": "diffusion_model.output_blocks.4.0.in_layers.2",
"up_blocks.1.resnets.1.time_emb_proj": "diffusion_model.output_blocks.4.0.emb_layers.1",
"up_blocks.1.resnets.1.norm2": "diffusion_model.output_blocks.4.0.out_layers.0",
"up_blocks.1.resnets.1.conv2": "diffusion_model.output_blocks.4.0.out_layers.3",
"up_blocks.1.resnets.1.conv_shortcut": "diffusion_model.output_blocks.4.0.skip_connection",
"up_blocks.1.attentions.1.norm": "diffusion_model.output_blocks.4.1.norm",
"up_blocks.1.attentions.1.proj_in": "diffusion_model.output_blocks.4.1.proj_in",
"up_blocks.1.attentions.1.transformer_blocks.0.norm1": "diffusion_model.output_blocks.4.1.transformer_blocks.0.norm1",
"up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": "diffusion_model.output_blocks.4.1.transformer_blocks.0.attn1.to_q",
"up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": "diffusion_model.output_blocks.4.1.transformer_blocks.0.attn1.to_k",
"up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": "diffusion_model.output_blocks.4.1.transformer_blocks.0.attn1.to_v",
"up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": "diffusion_model.output_blocks.4.1.transformer_blocks.0.attn1.to_out.0",
"up_blocks.1.attentions.1.transformer_blocks.0.norm2": "diffusion_model.output_blocks.4.1.transformer_blocks.0.norm2",
"up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": "diffusion_model.output_blocks.4.1.transformer_blocks.0.attn2.to_q",
"up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": "diffusion_model.output_blocks.4.1.transformer_blocks.0.attn2.to_k",
"up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": "diffusion_model.output_blocks.4.1.transformer_blocks.0.attn2.to_v",
"up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": "diffusion_model.output_blocks.4.1.transformer_blocks.0.attn2.to_out.0",
"up_blocks.1.attentions.1.transformer_blocks.0.norm3": "diffusion_model.output_blocks.4.1.transformer_blocks.0.norm3",
"up_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": "diffusion_model.output_blocks.4.1.transformer_blocks.0.ff.net.0.proj",
"up_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": "diffusion_model.output_blocks.4.1.transformer_blocks.0.ff.net.2",
"up_blocks.1.attentions.1.proj_out": "diffusion_model.output_blocks.4.1.proj_out",
"up_blocks.1.resnets.2.norm1": "diffusion_model.output_blocks.5.0.in_layers.0",
"up_blocks.1.resnets.2.conv1": "diffusion_model.output_blocks.5.0.in_layers.2",
"up_blocks.1.resnets.2.time_emb_proj": "diffusion_model.output_blocks.5.0.emb_layers.1",
"up_blocks.1.resnets.2.norm2": "diffusion_model.output_blocks.5.0.out_layers.0",
"up_blocks.1.resnets.2.conv2": "diffusion_model.output_blocks.5.0.out_layers.3",
"up_blocks.1.resnets.2.conv_shortcut": "diffusion_model.output_blocks.5.0.skip_connection",
"up_blocks.1.attentions.2.norm": "diffusion_model.output_blocks.5.1.norm",
"up_blocks.1.attentions.2.proj_in": "diffusion_model.output_blocks.5.1.proj_in",
"up_blocks.1.attentions.2.transformer_blocks.0.norm1": "diffusion_model.output_blocks.5.1.transformer_blocks.0.norm1",
"up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_q": "diffusion_model.output_blocks.5.1.transformer_blocks.0.attn1.to_q",
"up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_k": "diffusion_model.output_blocks.5.1.transformer_blocks.0.attn1.to_k",
"up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_v": "diffusion_model.output_blocks.5.1.transformer_blocks.0.attn1.to_v",
"up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_out.0": "diffusion_model.output_blocks.5.1.transformer_blocks.0.attn1.to_out.0",
"up_blocks.1.attentions.2.transformer_blocks.0.norm2": "diffusion_model.output_blocks.5.1.transformer_blocks.0.norm2",
"up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_q": "diffusion_model.output_blocks.5.1.transformer_blocks.0.attn2.to_q",
"up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_k": "diffusion_model.output_blocks.5.1.transformer_blocks.0.attn2.to_k",
"up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_v": "diffusion_model.output_blocks.5.1.transformer_blocks.0.attn2.to_v",
"up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_out.0": "diffusion_model.output_blocks.5.1.transformer_blocks.0.attn2.to_out.0",
"up_blocks.1.attentions.2.transformer_blocks.0.norm3": "diffusion_model.output_blocks.5.1.transformer_blocks.0.norm3",
"up_blocks.1.attentions.2.transformer_blocks.0.ff.net.0.proj": "diffusion_model.output_blocks.5.1.transformer_blocks.0.ff.net.0.proj",
"up_blocks.1.attentions.2.transformer_blocks.0.ff.net.2": "diffusion_model.output_blocks.5.1.transformer_blocks.0.ff.net.2",
"up_blocks.1.attentions.2.proj_out": "diffusion_model.output_blocks.5.1.proj_out",
"up_blocks.1.upsamplers.0.conv": "diffusion_model.output_blocks.5.2.conv",
"up_blocks.2.resnets.0.norm1": "diffusion_model.output_blocks.6.0.in_layers.0",
"up_blocks.2.resnets.0.conv1": "diffusion_model.output_blocks.6.0.in_layers.2",
"up_blocks.2.resnets.0.time_emb_proj": "diffusion_model.output_blocks.6.0.emb_layers.1",
"up_blocks.2.resnets.0.norm2": "diffusion_model.output_blocks.6.0.out_layers.0",
"up_blocks.2.resnets.0.conv2": "diffusion_model.output_blocks.6.0.out_layers.3",
"up_blocks.2.resnets.0.conv_shortcut": "diffusion_model.output_blocks.6.0.skip_connection",
"up_blocks.2.attentions.0.norm": "diffusion_model.output_blocks.6.1.norm",
"up_blocks.2.attentions.0.proj_in": "diffusion_model.output_blocks.6.1.proj_in",
"up_blocks.2.attentions.0.transformer_blocks.0.norm1": "diffusion_model.output_blocks.6.1.transformer_blocks.0.norm1",
"up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q": "diffusion_model.output_blocks.6.1.transformer_blocks.0.attn1.to_q",
"up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k": "diffusion_model.output_blocks.6.1.transformer_blocks.0.attn1.to_k",
"up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v": "diffusion_model.output_blocks.6.1.transformer_blocks.0.attn1.to_v",
"up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0": "diffusion_model.output_blocks.6.1.transformer_blocks.0.attn1.to_out.0",
"up_blocks.2.attentions.0.transformer_blocks.0.norm2": "diffusion_model.output_blocks.6.1.transformer_blocks.0.norm2",
"up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q": "diffusion_model.output_blocks.6.1.transformer_blocks.0.attn2.to_q",
"up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k": "diffusion_model.output_blocks.6.1.transformer_blocks.0.attn2.to_k",
"up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v": "diffusion_model.output_blocks.6.1.transformer_blocks.0.attn2.to_v",
"up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0": "diffusion_model.output_blocks.6.1.transformer_blocks.0.attn2.to_out.0",
"up_blocks.2.attentions.0.transformer_blocks.0.norm3": "diffusion_model.output_blocks.6.1.transformer_blocks.0.norm3",
"up_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj": "diffusion_model.output_blocks.6.1.transformer_blocks.0.ff.net.0.proj",
"up_blocks.2.attentions.0.transformer_blocks.0.ff.net.2": "diffusion_model.output_blocks.6.1.transformer_blocks.0.ff.net.2",
"up_blocks.2.attentions.0.proj_out": "diffusion_model.output_blocks.6.1.proj_out",
"up_blocks.2.resnets.1.norm1": "diffusion_model.output_blocks.7.0.in_layers.0",
"up_blocks.2.resnets.1.conv1": "diffusion_model.output_blocks.7.0.in_layers.2",
"up_blocks.2.resnets.1.time_emb_proj": "diffusion_model.output_blocks.7.0.emb_layers.1",
"up_blocks.2.resnets.1.norm2": "diffusion_model.output_blocks.7.0.out_layers.0",
"up_blocks.2.resnets.1.conv2": "diffusion_model.output_blocks.7.0.out_layers.3",
"up_blocks.2.resnets.1.conv_shortcut": "diffusion_model.output_blocks.7.0.skip_connection",
"up_blocks.2.attentions.1.norm": "diffusion_model.output_blocks.7.1.norm",
"up_blocks.2.attentions.1.proj_in": "diffusion_model.output_blocks.7.1.proj_in",
"up_blocks.2.attentions.1.transformer_blocks.0.norm1": "diffusion_model.output_blocks.7.1.transformer_blocks.0.norm1",
"up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q": "diffusion_model.output_blocks.7.1.transformer_blocks.0.attn1.to_q",
"up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k": "diffusion_model.output_blocks.7.1.transformer_blocks.0.attn1.to_k",
"up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v": "diffusion_model.output_blocks.7.1.transformer_blocks.0.attn1.to_v",
"up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0": "diffusion_model.output_blocks.7.1.transformer_blocks.0.attn1.to_out.0",
"up_blocks.2.attentions.1.transformer_blocks.0.norm2": "diffusion_model.output_blocks.7.1.transformer_blocks.0.norm2",
"up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q": "diffusion_model.output_blocks.7.1.transformer_blocks.0.attn2.to_q",
"up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k": "diffusion_model.output_blocks.7.1.transformer_blocks.0.attn2.to_k",
"up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v": "diffusion_model.output_blocks.7.1.transformer_blocks.0.attn2.to_v",
"up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0": "diffusion_model.output_blocks.7.1.transformer_blocks.0.attn2.to_out.0",
"up_blocks.2.attentions.1.transformer_blocks.0.norm3": "diffusion_model.output_blocks.7.1.transformer_blocks.0.norm3",
"up_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj": "diffusion_model.output_blocks.7.1.transformer_blocks.0.ff.net.0.proj",
"up_blocks.2.attentions.1.transformer_blocks.0.ff.net.2": "diffusion_model.output_blocks.7.1.transformer_blocks.0.ff.net.2",
"up_blocks.2.attentions.1.proj_out": "diffusion_model.output_blocks.7.1.proj_out",
"up_blocks.2.resnets.2.norm1": "diffusion_model.output_blocks.8.0.in_layers.0",
"up_blocks.2.resnets.2.conv1": "diffusion_model.output_blocks.8.0.in_layers.2",
"up_blocks.2.resnets.2.time_emb_proj": "diffusion_model.output_blocks.8.0.emb_layers.1",
"up_blocks.2.resnets.2.norm2": "diffusion_model.output_blocks.8.0.out_layers.0",
"up_blocks.2.resnets.2.conv2": "diffusion_model.output_blocks.8.0.out_layers.3",
"up_blocks.2.resnets.2.conv_shortcut": "diffusion_model.output_blocks.8.0.skip_connection",
"up_blocks.2.attentions.2.norm": "diffusion_model.output_blocks.8.1.norm",
"up_blocks.2.attentions.2.proj_in": "diffusion_model.output_blocks.8.1.proj_in",
"up_blocks.2.attentions.2.transformer_blocks.0.norm1": "diffusion_model.output_blocks.8.1.transformer_blocks.0.norm1",
"up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_q": "diffusion_model.output_blocks.8.1.transformer_blocks.0.attn1.to_q",
"up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_k": "diffusion_model.output_blocks.8.1.transformer_blocks.0.attn1.to_k",
"up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_v": "diffusion_model.output_blocks.8.1.transformer_blocks.0.attn1.to_v",
"up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_out.0": "diffusion_model.output_blocks.8.1.transformer_blocks.0.attn1.to_out.0",
"up_blocks.2.attentions.2.transformer_blocks.0.norm2": "diffusion_model.output_blocks.8.1.transformer_blocks.0.norm2",
"up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_q": "diffusion_model.output_blocks.8.1.transformer_blocks.0.attn2.to_q",
"up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_k": "diffusion_model.output_blocks.8.1.transformer_blocks.0.attn2.to_k",
"up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_v": "diffusion_model.output_blocks.8.1.transformer_blocks.0.attn2.to_v",
"up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_out.0": "diffusion_model.output_blocks.8.1.transformer_blocks.0.attn2.to_out.0",
"up_blocks.2.attentions.2.transformer_blocks.0.norm3": "diffusion_model.output_blocks.8.1.transformer_blocks.0.norm3",
"up_blocks.2.attentions.2.transformer_blocks.0.ff.net.0.proj": "diffusion_model.output_blocks.8.1.transformer_blocks.0.ff.net.0.proj",
"up_blocks.2.attentions.2.transformer_blocks.0.ff.net.2": "diffusion_model.output_blocks.8.1.transformer_blocks.0.ff.net.2",
"up_blocks.2.attentions.2.proj_out": "diffusion_model.output_blocks.8.1.proj_out",
"up_blocks.2.upsamplers.0.conv": "diffusion_model.output_blocks.8.2.conv",
"up_blocks.3.resnets.0.norm1": "diffusion_model.output_blocks.9.0.in_layers.0",
"up_blocks.3.resnets.0.conv1": "diffusion_model.output_blocks.9.0.in_layers.2",
"up_blocks.3.resnets.0.time_emb_proj": "diffusion_model.output_blocks.9.0.emb_layers.1",
"up_blocks.3.resnets.0.norm2": "diffusion_model.output_blocks.9.0.out_layers.0",
"up_blocks.3.resnets.0.conv2": "diffusion_model.output_blocks.9.0.out_layers.3",
"up_blocks.3.resnets.0.conv_shortcut": "diffusion_model.output_blocks.9.0.skip_connection",
"up_blocks.3.attentions.0.norm": "diffusion_model.output_blocks.9.1.norm",
"up_blocks.3.attentions.0.proj_in": "diffusion_model.output_blocks.9.1.proj_in",
"up_blocks.3.attentions.0.transformer_blocks.0.norm1": "diffusion_model.output_blocks.9.1.transformer_blocks.0.norm1",
"up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_q": "diffusion_model.output_blocks.9.1.transformer_blocks.0.attn1.to_q",
"up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_k": "diffusion_model.output_blocks.9.1.transformer_blocks.0.attn1.to_k",
"up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_v": "diffusion_model.output_blocks.9.1.transformer_blocks.0.attn1.to_v",
"up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_out.0": "diffusion_model.output_blocks.9.1.transformer_blocks.0.attn1.to_out.0",
"up_blocks.3.attentions.0.transformer_blocks.0.norm2": "diffusion_model.output_blocks.9.1.transformer_blocks.0.norm2",
"up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_q": "diffusion_model.output_blocks.9.1.transformer_blocks.0.attn2.to_q",
"up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_k": "diffusion_model.output_blocks.9.1.transformer_blocks.0.attn2.to_k",
"up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_v": "diffusion_model.output_blocks.9.1.transformer_blocks.0.attn2.to_v",
"up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_out.0": "diffusion_model.output_blocks.9.1.transformer_blocks.0.attn2.to_out.0",
"up_blocks.3.attentions.0.transformer_blocks.0.norm3": "diffusion_model.output_blocks.9.1.transformer_blocks.0.norm3",
"up_blocks.3.attentions.0.transformer_blocks.0.ff.net.0.proj": "diffusion_model.output_blocks.9.1.transformer_blocks.0.ff.net.0.proj",
"up_blocks.3.attentions.0.transformer_blocks.0.ff.net.2": "diffusion_model.output_blocks.9.1.transformer_blocks.0.ff.net.2",
"up_blocks.3.attentions.0.proj_out": "diffusion_model.output_blocks.9.1.proj_out",
"up_blocks.3.resnets.1.norm1": "diffusion_model.output_blocks.10.0.in_layers.0",
"up_blocks.3.resnets.1.conv1": "diffusion_model.output_blocks.10.0.in_layers.2",
"up_blocks.3.resnets.1.time_emb_proj": "diffusion_model.output_blocks.10.0.emb_layers.1",
"up_blocks.3.resnets.1.norm2": "diffusion_model.output_blocks.10.0.out_layers.0",
"up_blocks.3.resnets.1.conv2": "diffusion_model.output_blocks.10.0.out_layers.3",
"up_blocks.3.resnets.1.conv_shortcut": "diffusion_model.output_blocks.10.0.skip_connection",
"up_blocks.3.attentions.1.norm": "diffusion_model.output_blocks.10.1.norm",
"up_blocks.3.attentions.1.proj_in": "diffusion_model.output_blocks.10.1.proj_in",
"up_blocks.3.attentions.1.transformer_blocks.0.norm1": "diffusion_model.output_blocks.10.1.transformer_blocks.0.norm1",
"up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_q": "diffusion_model.output_blocks.10.1.transformer_blocks.0.attn1.to_q",
"up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_k": "diffusion_model.output_blocks.10.1.transformer_blocks.0.attn1.to_k",
"up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_v": "diffusion_model.output_blocks.10.1.transformer_blocks.0.attn1.to_v",
"up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_out.0": "diffusion_model.output_blocks.10.1.transformer_blocks.0.attn1.to_out.0",
"up_blocks.3.attentions.1.transformer_blocks.0.norm2": "diffusion_model.output_blocks.10.1.transformer_blocks.0.norm2",
"up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_q": "diffusion_model.output_blocks.10.1.transformer_blocks.0.attn2.to_q",
"up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_k": "diffusion_model.output_blocks.10.1.transformer_blocks.0.attn2.to_k",
"up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_v": "diffusion_model.output_blocks.10.1.transformer_blocks.0.attn2.to_v",
"up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_out.0": "diffusion_model.output_blocks.10.1.transformer_blocks.0.attn2.to_out.0",
"up_blocks.3.attentions.1.transformer_blocks.0.norm3": "diffusion_model.output_blocks.10.1.transformer_blocks.0.norm3",
"up_blocks.3.attentions.1.transformer_blocks.0.ff.net.0.proj": "diffusion_model.output_blocks.10.1.transformer_blocks.0.ff.net.0.proj",
"up_blocks.3.attentions.1.transformer_blocks.0.ff.net.2": "diffusion_model.output_blocks.10.1.transformer_blocks.0.ff.net.2",
"up_blocks.3.attentions.1.proj_out": "diffusion_model.output_blocks.10.1.proj_out",
"up_blocks.3.resnets.2.norm1": "diffusion_model.output_blocks.11.0.in_layers.0",
"up_blocks.3.resnets.2.conv1": "diffusion_model.output_blocks.11.0.in_layers.2",
"up_blocks.3.resnets.2.time_emb_proj": "diffusion_model.output_blocks.11.0.emb_layers.1",
"up_blocks.3.resnets.2.norm2": "diffusion_model.output_blocks.11.0.out_layers.0",
"up_blocks.3.resnets.2.conv2": "diffusion_model.output_blocks.11.0.out_layers.3",
"up_blocks.3.resnets.2.conv_shortcut": "diffusion_model.output_blocks.11.0.skip_connection",
"up_blocks.3.attentions.2.norm": "diffusion_model.output_blocks.11.1.norm",
"up_blocks.3.attentions.2.proj_in": "diffusion_model.output_blocks.11.1.proj_in",
"up_blocks.3.attentions.2.transformer_blocks.0.norm1": "diffusion_model.output_blocks.11.1.transformer_blocks.0.norm1",
"up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_q": "diffusion_model.output_blocks.11.1.transformer_blocks.0.attn1.to_q",
"up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_k": "diffusion_model.output_blocks.11.1.transformer_blocks.0.attn1.to_k",
"up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_v": "diffusion_model.output_blocks.11.1.transformer_blocks.0.attn1.to_v",
"up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_out.0": "diffusion_model.output_blocks.11.1.transformer_blocks.0.attn1.to_out.0",
"up_blocks.3.attentions.2.transformer_blocks.0.norm2": "diffusion_model.output_blocks.11.1.transformer_blocks.0.norm2",
"up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_q": "diffusion_model.output_blocks.11.1.transformer_blocks.0.attn2.to_q",
"up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_k": "diffusion_model.output_blocks.11.1.transformer_blocks.0.attn2.to_k",
"up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_v": "diffusion_model.output_blocks.11.1.transformer_blocks.0.attn2.to_v",
"up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_out.0": "diffusion_model.output_blocks.11.1.transformer_blocks.0.attn2.to_out.0",
"up_blocks.3.attentions.2.transformer_blocks.0.norm3": "diffusion_model.output_blocks.11.1.transformer_blocks.0.norm3",
"up_blocks.3.attentions.2.transformer_blocks.0.ff.net.0.proj": "diffusion_model.output_blocks.11.1.transformer_blocks.0.ff.net.0.proj",
"up_blocks.3.attentions.2.transformer_blocks.0.ff.net.2": "diffusion_model.output_blocks.11.1.transformer_blocks.0.ff.net.2",
"up_blocks.3.attentions.2.proj_out": "diffusion_model.output_blocks.11.1.proj_out",
"conv_norm_out": "diffusion_model.out.0",
"conv_out": "diffusion_model.out.2"
},
"source_aliases": {},
"ignorable_prefixes": []
}

View File

@ -0,0 +1,397 @@
{
"ignorable_prefixes": [],
"mapping": {
"time_embedding.linear_1": "TimestepEncoder.RangeEncoder.Linear_1",
"time_embedding.linear_2": "TimestepEncoder.RangeEncoder.Linear_2",
"down_blocks.2.resnets.0.time_emb_proj": "DownBlocks.Chain_8.ResidualBlock.Chain.RangeAdapter2d.Chain.Linear",
"down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0": "DownBlocks.Chain_8.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.SelfAttention.Linear",
"down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0": "DownBlocks.Chain_8.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.Attention.Linear",
"down_blocks.2.resnets.1.time_emb_proj": "DownBlocks.Chain_9.ResidualBlock.Chain.RangeAdapter2d.Chain.Linear",
"down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0": "DownBlocks.Chain_9.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.SelfAttention.Linear",
"down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0": "DownBlocks.Chain_9.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.Attention.Linear",
"down_blocks.3.resnets.0.time_emb_proj": "DownBlocks.Chain_11.ResidualBlock.Chain.RangeAdapter2d.Chain.Linear",
"down_blocks.3.resnets.1.time_emb_proj": "DownBlocks.Chain_12.ResidualBlock.Chain.RangeAdapter2d.Chain.Linear",
"mid_block.resnets.0.time_emb_proj": "Sum.MiddleBlock.ResidualBlock_1.Chain.RangeAdapter2d.Chain.Linear",
"mid_block.attentions.0.transformer_blocks.0.attn1.to_out.0": "Sum.MiddleBlock.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.SelfAttention.Linear",
"mid_block.attentions.0.transformer_blocks.0.attn2.to_out.0": "Sum.MiddleBlock.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.Attention.Linear",
"mid_block.resnets.1.time_emb_proj": "Sum.MiddleBlock.ResidualBlock_2.Chain.RangeAdapter2d.Chain.Linear",
"up_blocks.0.resnets.0.time_emb_proj": "UpBlocks.Chain_1.ResidualBlock.Chain.RangeAdapter2d.Chain.Linear",
"up_blocks.0.resnets.1.time_emb_proj": "UpBlocks.Chain_2.ResidualBlock.Chain.RangeAdapter2d.Chain.Linear",
"up_blocks.0.resnets.2.time_emb_proj": "UpBlocks.Chain_3.ResidualBlock.Chain.RangeAdapter2d.Chain.Linear",
"up_blocks.1.resnets.0.time_emb_proj": "UpBlocks.Chain_4.ResidualBlock.Chain.RangeAdapter2d.Chain.Linear",
"up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": "UpBlocks.Chain_4.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.SelfAttention.Linear",
"up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": "UpBlocks.Chain_4.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.Attention.Linear",
"up_blocks.1.resnets.1.time_emb_proj": "UpBlocks.Chain_5.ResidualBlock.Chain.RangeAdapter2d.Chain.Linear",
"up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": "UpBlocks.Chain_5.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.SelfAttention.Linear",
"up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": "UpBlocks.Chain_5.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.Attention.Linear",
"up_blocks.1.resnets.2.time_emb_proj": "UpBlocks.Chain_6.ResidualBlock.Chain.RangeAdapter2d.Chain.Linear",
"up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_out.0": "UpBlocks.Chain_6.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.SelfAttention.Linear",
"up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_out.0": "UpBlocks.Chain_6.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.Attention.Linear",
"conv_in": "DownBlocks.Chain_1.Conv2d",
"down_blocks.0.resnets.0.norm1": "DownBlocks.Chain_2.ResidualBlock.Chain.GroupNorm_1",
"down_blocks.0.resnets.0.norm2": "DownBlocks.Chain_2.ResidualBlock.Chain.GroupNorm_2",
"down_blocks.0.attentions.0.norm": "DownBlocks.Chain_2.CLIPLCrossAttention.Chain_1.GroupNorm",
"down_blocks.0.resnets.1.norm1": "DownBlocks.Chain_3.ResidualBlock.Chain.GroupNorm_1",
"down_blocks.0.resnets.1.norm2": "DownBlocks.Chain_3.ResidualBlock.Chain.GroupNorm_2",
"down_blocks.0.attentions.1.norm": "DownBlocks.Chain_3.CLIPLCrossAttention.Chain_1.GroupNorm",
"down_blocks.1.resnets.0.norm1": "DownBlocks.Chain_5.ResidualBlock.Chain.GroupNorm_1",
"up_blocks.3.resnets.0.norm2": "UpBlocks.Chain_10.ResidualBlock.Chain.GroupNorm_2",
"up_blocks.3.attentions.0.norm": "UpBlocks.Chain_10.CLIPLCrossAttention.Chain_1.GroupNorm",
"up_blocks.3.resnets.1.norm2": "UpBlocks.Chain_11.ResidualBlock.Chain.GroupNorm_2",
"up_blocks.3.attentions.1.norm": "UpBlocks.Chain_11.CLIPLCrossAttention.Chain_1.GroupNorm",
"up_blocks.3.resnets.2.norm2": "UpBlocks.Chain_12.ResidualBlock.Chain.GroupNorm_2",
"up_blocks.3.attentions.2.norm": "UpBlocks.Chain_12.CLIPLCrossAttention.Chain_1.GroupNorm",
"conv_norm_out": "Chain.GroupNorm",
"down_blocks.0.resnets.0.conv1": "DownBlocks.Chain_2.ResidualBlock.Chain.RangeAdapter2d.Conv2d",
"down_blocks.0.resnets.0.conv2": "DownBlocks.Chain_2.ResidualBlock.Chain.Conv2d",
"down_blocks.0.resnets.1.conv1": "DownBlocks.Chain_3.ResidualBlock.Chain.RangeAdapter2d.Conv2d",
"down_blocks.0.resnets.1.conv2": "DownBlocks.Chain_3.ResidualBlock.Chain.Conv2d",
"down_blocks.0.downsamplers.0.conv": "DownBlocks.Chain_4.Downsample.Conv2d",
"up_blocks.3.resnets.0.conv2": "UpBlocks.Chain_10.ResidualBlock.Chain.Conv2d",
"up_blocks.3.resnets.1.conv2": "UpBlocks.Chain_11.ResidualBlock.Chain.Conv2d",
"up_blocks.3.resnets.2.conv2": "UpBlocks.Chain_12.ResidualBlock.Chain.Conv2d",
"down_blocks.0.resnets.0.time_emb_proj": "DownBlocks.Chain_2.ResidualBlock.Chain.RangeAdapter2d.Chain.Linear",
"down_blocks.0.attentions.0.transformer_blocks.0.ff.net.2": "DownBlocks.Chain_2.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_3.Linear_2",
"down_blocks.0.resnets.1.time_emb_proj": "DownBlocks.Chain_3.ResidualBlock.Chain.RangeAdapter2d.Chain.Linear",
"down_blocks.0.attentions.1.transformer_blocks.0.ff.net.2": "DownBlocks.Chain_3.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_3.Linear_2",
"up_blocks.3.resnets.0.time_emb_proj": "UpBlocks.Chain_10.ResidualBlock.Chain.RangeAdapter2d.Chain.Linear",
"up_blocks.3.attentions.0.transformer_blocks.0.ff.net.2": "UpBlocks.Chain_10.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_3.Linear_2",
"up_blocks.3.resnets.1.time_emb_proj": "UpBlocks.Chain_11.ResidualBlock.Chain.RangeAdapter2d.Chain.Linear",
"up_blocks.3.attentions.1.transformer_blocks.0.ff.net.2": "UpBlocks.Chain_11.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_3.Linear_2",
"up_blocks.3.resnets.2.time_emb_proj": "UpBlocks.Chain_12.ResidualBlock.Chain.RangeAdapter2d.Chain.Linear",
"up_blocks.3.attentions.2.transformer_blocks.0.ff.net.2": "UpBlocks.Chain_12.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_3.Linear_2",
"down_blocks.0.attentions.0.proj_in": "DownBlocks.Chain_2.CLIPLCrossAttention.Chain_1.Conv2d",
"down_blocks.0.attentions.0.proj_out": "DownBlocks.Chain_2.CLIPLCrossAttention.Chain_3.Conv2d",
"down_blocks.0.attentions.1.proj_in": "DownBlocks.Chain_3.CLIPLCrossAttention.Chain_1.Conv2d",
"down_blocks.0.attentions.1.proj_out": "DownBlocks.Chain_3.CLIPLCrossAttention.Chain_3.Conv2d",
"up_blocks.3.attentions.0.proj_in": "UpBlocks.Chain_10.CLIPLCrossAttention.Chain_1.Conv2d",
"up_blocks.3.attentions.0.proj_out": "UpBlocks.Chain_10.CLIPLCrossAttention.Chain_3.Conv2d",
"up_blocks.3.attentions.1.proj_in": "UpBlocks.Chain_11.CLIPLCrossAttention.Chain_1.Conv2d",
"up_blocks.3.attentions.1.proj_out": "UpBlocks.Chain_11.CLIPLCrossAttention.Chain_3.Conv2d",
"up_blocks.3.attentions.2.proj_in": "UpBlocks.Chain_12.CLIPLCrossAttention.Chain_1.Conv2d",
"up_blocks.3.attentions.2.proj_out": "UpBlocks.Chain_12.CLIPLCrossAttention.Chain_3.Conv2d",
"down_blocks.0.attentions.0.transformer_blocks.0.norm1": "DownBlocks.Chain_2.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.LayerNorm",
"down_blocks.0.attentions.0.transformer_blocks.0.norm2": "DownBlocks.Chain_2.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.LayerNorm",
"down_blocks.0.attentions.0.transformer_blocks.0.norm3": "DownBlocks.Chain_2.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_3.LayerNorm",
"down_blocks.0.attentions.1.transformer_blocks.0.norm1": "DownBlocks.Chain_3.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.LayerNorm",
"down_blocks.0.attentions.1.transformer_blocks.0.norm2": "DownBlocks.Chain_3.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.LayerNorm",
"down_blocks.0.attentions.1.transformer_blocks.0.norm3": "DownBlocks.Chain_3.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_3.LayerNorm",
"up_blocks.3.attentions.0.transformer_blocks.0.norm1": "UpBlocks.Chain_10.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.LayerNorm",
"up_blocks.3.attentions.0.transformer_blocks.0.norm2": "UpBlocks.Chain_10.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.LayerNorm",
"up_blocks.3.attentions.0.transformer_blocks.0.norm3": "UpBlocks.Chain_10.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_3.LayerNorm",
"up_blocks.3.attentions.1.transformer_blocks.0.norm1": "UpBlocks.Chain_11.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.LayerNorm",
"up_blocks.3.attentions.1.transformer_blocks.0.norm2": "UpBlocks.Chain_11.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.LayerNorm",
"up_blocks.3.attentions.1.transformer_blocks.0.norm3": "UpBlocks.Chain_11.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_3.LayerNorm",
"up_blocks.3.attentions.2.transformer_blocks.0.norm1": "UpBlocks.Chain_12.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.LayerNorm",
"up_blocks.3.attentions.2.transformer_blocks.0.norm2": "UpBlocks.Chain_12.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.LayerNorm",
"up_blocks.3.attentions.2.transformer_blocks.0.norm3": "UpBlocks.Chain_12.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_3.LayerNorm",
"down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_q": "DownBlocks.Chain_2.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.SelfAttention.Distribute.Linear_1",
"down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_k": "DownBlocks.Chain_2.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.SelfAttention.Distribute.Linear_2",
"down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_v": "DownBlocks.Chain_2.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.SelfAttention.Distribute.Linear_3",
"down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_q": "DownBlocks.Chain_2.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.Attention.Distribute.Linear_1",
"down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_q": "DownBlocks.Chain_3.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.SelfAttention.Distribute.Linear_1",
"down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_k": "DownBlocks.Chain_3.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.SelfAttention.Distribute.Linear_2",
"down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_v": "DownBlocks.Chain_3.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.SelfAttention.Distribute.Linear_3",
"down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_q": "DownBlocks.Chain_3.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.Attention.Distribute.Linear_1",
"up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_q": "UpBlocks.Chain_10.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.SelfAttention.Distribute.Linear_1",
"up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_k": "UpBlocks.Chain_10.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.SelfAttention.Distribute.Linear_2",
"up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_v": "UpBlocks.Chain_10.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.SelfAttention.Distribute.Linear_3",
"up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_q": "UpBlocks.Chain_10.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.Attention.Distribute.Linear_1",
"up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_q": "UpBlocks.Chain_11.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.SelfAttention.Distribute.Linear_1",
"up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_k": "UpBlocks.Chain_11.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.SelfAttention.Distribute.Linear_2",
"up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_v": "UpBlocks.Chain_11.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.SelfAttention.Distribute.Linear_3",
"up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_q": "UpBlocks.Chain_11.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.Attention.Distribute.Linear_1",
"up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_q": "UpBlocks.Chain_12.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.SelfAttention.Distribute.Linear_1",
"up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_k": "UpBlocks.Chain_12.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.SelfAttention.Distribute.Linear_2",
"up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_v": "UpBlocks.Chain_12.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.SelfAttention.Distribute.Linear_3",
"up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_q": "UpBlocks.Chain_12.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.Attention.Distribute.Linear_1",
"down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out.0": "DownBlocks.Chain_2.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.SelfAttention.Linear",
"down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out.0": "DownBlocks.Chain_2.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.Attention.Linear",
"down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out.0": "DownBlocks.Chain_3.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.SelfAttention.Linear",
"down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out.0": "DownBlocks.Chain_3.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.Attention.Linear",
"up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_out.0": "UpBlocks.Chain_10.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.SelfAttention.Linear",
"up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_out.0": "UpBlocks.Chain_10.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.Attention.Linear",
"up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_out.0": "UpBlocks.Chain_11.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.SelfAttention.Linear",
"up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_out.0": "UpBlocks.Chain_11.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.Attention.Linear",
"up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_out.0": "UpBlocks.Chain_12.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.SelfAttention.Linear",
"up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_out.0": "UpBlocks.Chain_12.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.Attention.Linear",
"down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_k": "DownBlocks.Chain_2.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.Attention.Distribute.Linear_2",
"down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_v": "DownBlocks.Chain_2.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.Attention.Distribute.Linear_3",
"down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_k": "DownBlocks.Chain_3.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.Attention.Distribute.Linear_2",
"down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_v": "DownBlocks.Chain_3.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.Attention.Distribute.Linear_3",
"up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_k": "UpBlocks.Chain_10.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.Attention.Distribute.Linear_2",
"up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_v": "UpBlocks.Chain_10.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.Attention.Distribute.Linear_3",
"up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_k": "UpBlocks.Chain_11.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.Attention.Distribute.Linear_2",
"up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_v": "UpBlocks.Chain_11.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.Attention.Distribute.Linear_3",
"up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_k": "UpBlocks.Chain_12.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.Attention.Distribute.Linear_2",
"up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_v": "UpBlocks.Chain_12.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.Attention.Distribute.Linear_3",
"down_blocks.0.attentions.0.transformer_blocks.0.ff.net.0.proj": "DownBlocks.Chain_2.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_3.Linear_1",
"down_blocks.0.attentions.1.transformer_blocks.0.ff.net.0.proj": "DownBlocks.Chain_3.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_3.Linear_1",
"up_blocks.3.attentions.0.transformer_blocks.0.ff.net.0.proj": "UpBlocks.Chain_10.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_3.Linear_1",
"up_blocks.3.attentions.1.transformer_blocks.0.ff.net.0.proj": "UpBlocks.Chain_11.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_3.Linear_1",
"up_blocks.3.attentions.2.transformer_blocks.0.ff.net.0.proj": "UpBlocks.Chain_12.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_3.Linear_1",
"down_blocks.1.resnets.0.conv1": "DownBlocks.Chain_5.ResidualBlock.Chain.RangeAdapter2d.Conv2d",
"down_blocks.1.resnets.0.time_emb_proj": "DownBlocks.Chain_5.ResidualBlock.Chain.RangeAdapter2d.Chain.Linear",
"down_blocks.1.resnets.1.time_emb_proj": "DownBlocks.Chain_6.ResidualBlock.Chain.RangeAdapter2d.Chain.Linear",
"up_blocks.2.resnets.0.time_emb_proj": "UpBlocks.Chain_7.ResidualBlock.Chain.RangeAdapter2d.Chain.Linear",
"up_blocks.2.resnets.1.time_emb_proj": "UpBlocks.Chain_8.ResidualBlock.Chain.RangeAdapter2d.Chain.Linear",
"up_blocks.2.resnets.2.time_emb_proj": "UpBlocks.Chain_9.ResidualBlock.Chain.RangeAdapter2d.Chain.Linear",
"down_blocks.1.resnets.0.norm2": "DownBlocks.Chain_5.ResidualBlock.Chain.GroupNorm_2",
"down_blocks.1.attentions.0.norm": "DownBlocks.Chain_5.CLIPLCrossAttention.Chain_1.GroupNorm",
"down_blocks.1.resnets.1.norm1": "DownBlocks.Chain_6.ResidualBlock.Chain.GroupNorm_1",
"down_blocks.1.resnets.1.norm2": "DownBlocks.Chain_6.ResidualBlock.Chain.GroupNorm_2",
"down_blocks.1.attentions.1.norm": "DownBlocks.Chain_6.CLIPLCrossAttention.Chain_1.GroupNorm",
"down_blocks.2.resnets.0.norm1": "DownBlocks.Chain_8.ResidualBlock.Chain.GroupNorm_1",
"up_blocks.2.resnets.0.norm2": "UpBlocks.Chain_7.ResidualBlock.Chain.GroupNorm_2",
"up_blocks.2.attentions.0.norm": "UpBlocks.Chain_7.CLIPLCrossAttention.Chain_1.GroupNorm",
"up_blocks.2.resnets.1.norm2": "UpBlocks.Chain_8.ResidualBlock.Chain.GroupNorm_2",
"up_blocks.2.attentions.1.norm": "UpBlocks.Chain_8.CLIPLCrossAttention.Chain_1.GroupNorm",
"up_blocks.2.resnets.2.norm2": "UpBlocks.Chain_9.ResidualBlock.Chain.GroupNorm_2",
"up_blocks.2.attentions.2.norm": "UpBlocks.Chain_9.CLIPLCrossAttention.Chain_1.GroupNorm",
"up_blocks.3.resnets.1.norm1": "UpBlocks.Chain_11.ResidualBlock.Chain.GroupNorm_1",
"up_blocks.3.resnets.2.norm1": "UpBlocks.Chain_12.ResidualBlock.Chain.GroupNorm_1",
"down_blocks.1.resnets.0.conv2": "DownBlocks.Chain_5.ResidualBlock.Chain.Conv2d",
"down_blocks.1.resnets.1.conv1": "DownBlocks.Chain_6.ResidualBlock.Chain.RangeAdapter2d.Conv2d",
"down_blocks.1.resnets.1.conv2": "DownBlocks.Chain_6.ResidualBlock.Chain.Conv2d",
"down_blocks.1.downsamplers.0.conv": "DownBlocks.Chain_7.Downsample.Conv2d",
"up_blocks.2.resnets.0.conv2": "UpBlocks.Chain_7.ResidualBlock.Chain.Conv2d",
"up_blocks.2.resnets.1.conv2": "UpBlocks.Chain_8.ResidualBlock.Chain.Conv2d",
"up_blocks.2.resnets.2.conv2": "UpBlocks.Chain_9.ResidualBlock.Chain.Conv2d",
"up_blocks.2.upsamplers.0.conv": "UpBlocks.Chain_9.Upsample.Conv2d",
"down_blocks.1.resnets.0.conv_shortcut": "DownBlocks.Chain_5.ResidualBlock.Conv2d",
"down_blocks.1.attentions.0.proj_in": "DownBlocks.Chain_5.CLIPLCrossAttention.Chain_1.Conv2d",
"down_blocks.1.attentions.0.proj_out": "DownBlocks.Chain_5.CLIPLCrossAttention.Chain_3.Conv2d",
"down_blocks.1.attentions.1.proj_in": "DownBlocks.Chain_6.CLIPLCrossAttention.Chain_1.Conv2d",
"down_blocks.1.attentions.1.proj_out": "DownBlocks.Chain_6.CLIPLCrossAttention.Chain_3.Conv2d",
"up_blocks.2.attentions.0.proj_in": "UpBlocks.Chain_7.CLIPLCrossAttention.Chain_1.Conv2d",
"up_blocks.2.attentions.0.proj_out": "UpBlocks.Chain_7.CLIPLCrossAttention.Chain_3.Conv2d",
"up_blocks.2.attentions.1.proj_in": "UpBlocks.Chain_8.CLIPLCrossAttention.Chain_1.Conv2d",
"up_blocks.2.attentions.1.proj_out": "UpBlocks.Chain_8.CLIPLCrossAttention.Chain_3.Conv2d",
"up_blocks.2.attentions.2.proj_in": "UpBlocks.Chain_9.CLIPLCrossAttention.Chain_1.Conv2d",
"up_blocks.2.attentions.2.proj_out": "UpBlocks.Chain_9.CLIPLCrossAttention.Chain_3.Conv2d",
"down_blocks.1.attentions.0.transformer_blocks.0.norm1": "DownBlocks.Chain_5.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.LayerNorm",
"down_blocks.1.attentions.0.transformer_blocks.0.norm2": "DownBlocks.Chain_5.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.LayerNorm",
"down_blocks.1.attentions.0.transformer_blocks.0.norm3": "DownBlocks.Chain_5.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_3.LayerNorm",
"down_blocks.1.attentions.1.transformer_blocks.0.norm1": "DownBlocks.Chain_6.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.LayerNorm",
"down_blocks.1.attentions.1.transformer_blocks.0.norm2": "DownBlocks.Chain_6.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.LayerNorm",
"down_blocks.1.attentions.1.transformer_blocks.0.norm3": "DownBlocks.Chain_6.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_3.LayerNorm",
"up_blocks.2.attentions.0.transformer_blocks.0.norm1": "UpBlocks.Chain_7.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.LayerNorm",
"up_blocks.2.attentions.0.transformer_blocks.0.norm2": "UpBlocks.Chain_7.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.LayerNorm",
"up_blocks.2.attentions.0.transformer_blocks.0.norm3": "UpBlocks.Chain_7.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_3.LayerNorm",
"up_blocks.2.attentions.1.transformer_blocks.0.norm1": "UpBlocks.Chain_8.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.LayerNorm",
"up_blocks.2.attentions.1.transformer_blocks.0.norm2": "UpBlocks.Chain_8.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.LayerNorm",
"up_blocks.2.attentions.1.transformer_blocks.0.norm3": "UpBlocks.Chain_8.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_3.LayerNorm",
"up_blocks.2.attentions.2.transformer_blocks.0.norm1": "UpBlocks.Chain_9.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.LayerNorm",
"up_blocks.2.attentions.2.transformer_blocks.0.norm2": "UpBlocks.Chain_9.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.LayerNorm",
"up_blocks.2.attentions.2.transformer_blocks.0.norm3": "UpBlocks.Chain_9.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_3.LayerNorm",
"down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": "DownBlocks.Chain_5.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.SelfAttention.Distribute.Linear_1",
"down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": "DownBlocks.Chain_5.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.SelfAttention.Distribute.Linear_2",
"down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": "DownBlocks.Chain_5.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.SelfAttention.Distribute.Linear_3",
"down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": "DownBlocks.Chain_5.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.Attention.Distribute.Linear_1",
"down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": "DownBlocks.Chain_6.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.SelfAttention.Distribute.Linear_1",
"down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": "DownBlocks.Chain_6.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.SelfAttention.Distribute.Linear_2",
"down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": "DownBlocks.Chain_6.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.SelfAttention.Distribute.Linear_3",
"down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": "DownBlocks.Chain_6.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.Attention.Distribute.Linear_1",
"up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q": "UpBlocks.Chain_7.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.SelfAttention.Distribute.Linear_1",
"up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k": "UpBlocks.Chain_7.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.SelfAttention.Distribute.Linear_2",
"up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v": "UpBlocks.Chain_7.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.SelfAttention.Distribute.Linear_3",
"up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q": "UpBlocks.Chain_7.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.Attention.Distribute.Linear_1",
"up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q": "UpBlocks.Chain_8.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.SelfAttention.Distribute.Linear_1",
"up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k": "UpBlocks.Chain_8.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.SelfAttention.Distribute.Linear_2",
"up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v": "UpBlocks.Chain_8.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.SelfAttention.Distribute.Linear_3",
"up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q": "UpBlocks.Chain_8.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.Attention.Distribute.Linear_1",
"up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_q": "UpBlocks.Chain_9.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.SelfAttention.Distribute.Linear_1",
"up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_k": "UpBlocks.Chain_9.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.SelfAttention.Distribute.Linear_2",
"up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_v": "UpBlocks.Chain_9.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.SelfAttention.Distribute.Linear_3",
"up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_q": "UpBlocks.Chain_9.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.Attention.Distribute.Linear_1",
"down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0": "DownBlocks.Chain_5.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.SelfAttention.Linear",
"down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0": "DownBlocks.Chain_5.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.Attention.Linear",
"down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0": "DownBlocks.Chain_6.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.SelfAttention.Linear",
"down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0": "DownBlocks.Chain_6.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.Attention.Linear",
"up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0": "UpBlocks.Chain_7.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.SelfAttention.Linear",
"up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0": "UpBlocks.Chain_7.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.Attention.Linear",
"up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0": "UpBlocks.Chain_8.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.SelfAttention.Linear",
"up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0": "UpBlocks.Chain_8.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.Attention.Linear",
"up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_out.0": "UpBlocks.Chain_9.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.SelfAttention.Linear",
"up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_out.0": "UpBlocks.Chain_9.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.Attention.Linear",
"down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": "DownBlocks.Chain_5.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.Attention.Distribute.Linear_2",
"down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": "DownBlocks.Chain_5.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.Attention.Distribute.Linear_3",
"down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": "DownBlocks.Chain_6.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.Attention.Distribute.Linear_2",
"down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": "DownBlocks.Chain_6.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.Attention.Distribute.Linear_3",
"up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k": "UpBlocks.Chain_7.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.Attention.Distribute.Linear_2",
"up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v": "UpBlocks.Chain_7.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.Attention.Distribute.Linear_3",
"up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k": "UpBlocks.Chain_8.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.Attention.Distribute.Linear_2",
"up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v": "UpBlocks.Chain_8.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.Attention.Distribute.Linear_3",
"up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_k": "UpBlocks.Chain_9.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.Attention.Distribute.Linear_2",
"up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_v": "UpBlocks.Chain_9.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.Attention.Distribute.Linear_3",
"down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": "DownBlocks.Chain_5.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_3.Linear_1",
"down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": "DownBlocks.Chain_6.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_3.Linear_1",
"up_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj": "UpBlocks.Chain_7.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_3.Linear_1",
"up_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj": "UpBlocks.Chain_8.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_3.Linear_1",
"up_blocks.2.attentions.2.transformer_blocks.0.ff.net.0.proj": "UpBlocks.Chain_9.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_3.Linear_1",
"down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": "DownBlocks.Chain_5.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_3.Linear_2",
"down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": "DownBlocks.Chain_6.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_3.Linear_2",
"up_blocks.2.attentions.0.transformer_blocks.0.ff.net.2": "UpBlocks.Chain_7.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_3.Linear_2",
"up_blocks.2.attentions.1.transformer_blocks.0.ff.net.2": "UpBlocks.Chain_8.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_3.Linear_2",
"up_blocks.2.attentions.2.transformer_blocks.0.ff.net.2": "UpBlocks.Chain_9.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_3.Linear_2",
"down_blocks.2.resnets.0.conv1": "DownBlocks.Chain_8.ResidualBlock.Chain.RangeAdapter2d.Conv2d",
"down_blocks.2.resnets.0.norm2": "DownBlocks.Chain_8.ResidualBlock.Chain.GroupNorm_2",
"down_blocks.2.attentions.0.norm": "DownBlocks.Chain_8.CLIPLCrossAttention.Chain_1.GroupNorm",
"down_blocks.2.resnets.1.norm1": "DownBlocks.Chain_9.ResidualBlock.Chain.GroupNorm_1",
"down_blocks.2.resnets.1.norm2": "DownBlocks.Chain_9.ResidualBlock.Chain.GroupNorm_2",
"down_blocks.2.attentions.1.norm": "DownBlocks.Chain_9.CLIPLCrossAttention.Chain_1.GroupNorm",
"down_blocks.3.resnets.0.norm1": "DownBlocks.Chain_11.ResidualBlock.Chain.GroupNorm_1",
"down_blocks.3.resnets.0.norm2": "DownBlocks.Chain_11.ResidualBlock.Chain.GroupNorm_2",
"down_blocks.3.resnets.1.norm1": "DownBlocks.Chain_12.ResidualBlock.Chain.GroupNorm_1",
"down_blocks.3.resnets.1.norm2": "DownBlocks.Chain_12.ResidualBlock.Chain.GroupNorm_2",
"mid_block.resnets.0.norm1": "Sum.MiddleBlock.ResidualBlock_1.Chain.GroupNorm_1",
"mid_block.resnets.0.norm2": "Sum.MiddleBlock.ResidualBlock_1.Chain.GroupNorm_2",
"mid_block.attentions.0.norm": "Sum.MiddleBlock.CLIPLCrossAttention.Chain_1.GroupNorm",
"mid_block.resnets.1.norm1": "Sum.MiddleBlock.ResidualBlock_2.Chain.GroupNorm_1",
"mid_block.resnets.1.norm2": "Sum.MiddleBlock.ResidualBlock_2.Chain.GroupNorm_2",
"up_blocks.0.resnets.0.norm2": "UpBlocks.Chain_1.ResidualBlock.Chain.GroupNorm_2",
"up_blocks.0.resnets.1.norm2": "UpBlocks.Chain_2.ResidualBlock.Chain.GroupNorm_2",
"up_blocks.0.resnets.2.norm2": "UpBlocks.Chain_3.ResidualBlock.Chain.GroupNorm_2",
"up_blocks.1.resnets.0.norm2": "UpBlocks.Chain_4.ResidualBlock.Chain.GroupNorm_2",
"up_blocks.1.attentions.0.norm": "UpBlocks.Chain_4.CLIPLCrossAttention.Chain_1.GroupNorm",
"up_blocks.1.resnets.1.norm2": "UpBlocks.Chain_5.ResidualBlock.Chain.GroupNorm_2",
"up_blocks.1.attentions.1.norm": "UpBlocks.Chain_5.CLIPLCrossAttention.Chain_1.GroupNorm",
"up_blocks.1.resnets.2.norm2": "UpBlocks.Chain_6.ResidualBlock.Chain.GroupNorm_2",
"up_blocks.1.attentions.2.norm": "UpBlocks.Chain_6.CLIPLCrossAttention.Chain_1.GroupNorm",
"up_blocks.2.resnets.1.norm1": "UpBlocks.Chain_8.ResidualBlock.Chain.GroupNorm_1",
"down_blocks.2.resnets.0.conv2": "DownBlocks.Chain_8.ResidualBlock.Chain.Conv2d",
"down_blocks.2.resnets.1.conv1": "DownBlocks.Chain_9.ResidualBlock.Chain.RangeAdapter2d.Conv2d",
"down_blocks.2.resnets.1.conv2": "DownBlocks.Chain_9.ResidualBlock.Chain.Conv2d",
"down_blocks.2.downsamplers.0.conv": "DownBlocks.Chain_10.Downsample.Conv2d",
"down_blocks.3.resnets.0.conv1": "DownBlocks.Chain_11.ResidualBlock.Chain.RangeAdapter2d.Conv2d",
"down_blocks.3.resnets.0.conv2": "DownBlocks.Chain_11.ResidualBlock.Chain.Conv2d",
"down_blocks.3.resnets.1.conv1": "DownBlocks.Chain_12.ResidualBlock.Chain.RangeAdapter2d.Conv2d",
"down_blocks.3.resnets.1.conv2": "DownBlocks.Chain_12.ResidualBlock.Chain.Conv2d",
"mid_block.resnets.0.conv1": "Sum.MiddleBlock.ResidualBlock_1.Chain.RangeAdapter2d.Conv2d",
"mid_block.resnets.0.conv2": "Sum.MiddleBlock.ResidualBlock_1.Chain.Conv2d",
"mid_block.resnets.1.conv1": "Sum.MiddleBlock.ResidualBlock_2.Chain.RangeAdapter2d.Conv2d",
"mid_block.resnets.1.conv2": "Sum.MiddleBlock.ResidualBlock_2.Chain.Conv2d",
"up_blocks.0.resnets.0.conv2": "UpBlocks.Chain_1.ResidualBlock.Chain.Conv2d",
"up_blocks.0.resnets.1.conv2": "UpBlocks.Chain_2.ResidualBlock.Chain.Conv2d",
"up_blocks.0.resnets.2.conv2": "UpBlocks.Chain_3.ResidualBlock.Chain.Conv2d",
"up_blocks.0.upsamplers.0.conv": "UpBlocks.Chain_3.Upsample.Conv2d",
"up_blocks.1.resnets.0.conv2": "UpBlocks.Chain_4.ResidualBlock.Chain.Conv2d",
"up_blocks.1.resnets.1.conv2": "UpBlocks.Chain_5.ResidualBlock.Chain.Conv2d",
"up_blocks.1.resnets.2.conv2": "UpBlocks.Chain_6.ResidualBlock.Chain.Conv2d",
"up_blocks.1.upsamplers.0.conv": "UpBlocks.Chain_6.Upsample.Conv2d",
"down_blocks.2.resnets.0.conv_shortcut": "DownBlocks.Chain_8.ResidualBlock.Conv2d",
"down_blocks.2.attentions.0.proj_in": "DownBlocks.Chain_8.CLIPLCrossAttention.Chain_1.Conv2d",
"down_blocks.2.attentions.0.proj_out": "DownBlocks.Chain_8.CLIPLCrossAttention.Chain_3.Conv2d",
"down_blocks.2.attentions.1.proj_in": "DownBlocks.Chain_9.CLIPLCrossAttention.Chain_1.Conv2d",
"down_blocks.2.attentions.1.proj_out": "DownBlocks.Chain_9.CLIPLCrossAttention.Chain_3.Conv2d",
"mid_block.attentions.0.proj_in": "Sum.MiddleBlock.CLIPLCrossAttention.Chain_1.Conv2d",
"mid_block.attentions.0.proj_out": "Sum.MiddleBlock.CLIPLCrossAttention.Chain_3.Conv2d",
"up_blocks.1.attentions.0.proj_in": "UpBlocks.Chain_4.CLIPLCrossAttention.Chain_1.Conv2d",
"up_blocks.1.attentions.0.proj_out": "UpBlocks.Chain_4.CLIPLCrossAttention.Chain_3.Conv2d",
"up_blocks.1.attentions.1.proj_in": "UpBlocks.Chain_5.CLIPLCrossAttention.Chain_1.Conv2d",
"up_blocks.1.attentions.1.proj_out": "UpBlocks.Chain_5.CLIPLCrossAttention.Chain_3.Conv2d",
"up_blocks.1.attentions.2.proj_in": "UpBlocks.Chain_6.CLIPLCrossAttention.Chain_1.Conv2d",
"up_blocks.1.attentions.2.proj_out": "UpBlocks.Chain_6.CLIPLCrossAttention.Chain_3.Conv2d",
"down_blocks.2.attentions.0.transformer_blocks.0.norm1": "DownBlocks.Chain_8.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.LayerNorm",
"down_blocks.2.attentions.0.transformer_blocks.0.norm2": "DownBlocks.Chain_8.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.LayerNorm",
"down_blocks.2.attentions.0.transformer_blocks.0.norm3": "DownBlocks.Chain_8.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_3.LayerNorm",
"down_blocks.2.attentions.1.transformer_blocks.0.norm1": "DownBlocks.Chain_9.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.LayerNorm",
"down_blocks.2.attentions.1.transformer_blocks.0.norm2": "DownBlocks.Chain_9.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.LayerNorm",
"down_blocks.2.attentions.1.transformer_blocks.0.norm3": "DownBlocks.Chain_9.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_3.LayerNorm",
"mid_block.attentions.0.transformer_blocks.0.norm1": "Sum.MiddleBlock.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.LayerNorm",
"mid_block.attentions.0.transformer_blocks.0.norm2": "Sum.MiddleBlock.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.LayerNorm",
"mid_block.attentions.0.transformer_blocks.0.norm3": "Sum.MiddleBlock.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_3.LayerNorm",
"up_blocks.1.attentions.0.transformer_blocks.0.norm1": "UpBlocks.Chain_4.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.LayerNorm",
"up_blocks.1.attentions.0.transformer_blocks.0.norm2": "UpBlocks.Chain_4.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.LayerNorm",
"up_blocks.1.attentions.0.transformer_blocks.0.norm3": "UpBlocks.Chain_4.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_3.LayerNorm",
"up_blocks.1.attentions.1.transformer_blocks.0.norm1": "UpBlocks.Chain_5.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.LayerNorm",
"up_blocks.1.attentions.1.transformer_blocks.0.norm2": "UpBlocks.Chain_5.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.LayerNorm",
"up_blocks.1.attentions.1.transformer_blocks.0.norm3": "UpBlocks.Chain_5.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_3.LayerNorm",
"up_blocks.1.attentions.2.transformer_blocks.0.norm1": "UpBlocks.Chain_6.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.LayerNorm",
"up_blocks.1.attentions.2.transformer_blocks.0.norm2": "UpBlocks.Chain_6.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.LayerNorm",
"up_blocks.1.attentions.2.transformer_blocks.0.norm3": "UpBlocks.Chain_6.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_3.LayerNorm",
"down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q": "DownBlocks.Chain_8.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.SelfAttention.Distribute.Linear_1",
"down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k": "DownBlocks.Chain_8.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.SelfAttention.Distribute.Linear_2",
"down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v": "DownBlocks.Chain_8.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.SelfAttention.Distribute.Linear_3",
"down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q": "DownBlocks.Chain_8.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.Attention.Distribute.Linear_1",
"down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q": "DownBlocks.Chain_9.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.SelfAttention.Distribute.Linear_1",
"down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k": "DownBlocks.Chain_9.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.SelfAttention.Distribute.Linear_2",
"down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v": "DownBlocks.Chain_9.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.SelfAttention.Distribute.Linear_3",
"down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q": "DownBlocks.Chain_9.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.Attention.Distribute.Linear_1",
"mid_block.attentions.0.transformer_blocks.0.attn1.to_q": "Sum.MiddleBlock.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.SelfAttention.Distribute.Linear_1",
"mid_block.attentions.0.transformer_blocks.0.attn1.to_k": "Sum.MiddleBlock.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.SelfAttention.Distribute.Linear_2",
"mid_block.attentions.0.transformer_blocks.0.attn1.to_v": "Sum.MiddleBlock.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.SelfAttention.Distribute.Linear_3",
"mid_block.attentions.0.transformer_blocks.0.attn2.to_q": "Sum.MiddleBlock.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.Attention.Distribute.Linear_1",
"up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q": "UpBlocks.Chain_4.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.SelfAttention.Distribute.Linear_1",
"up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k": "UpBlocks.Chain_4.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.SelfAttention.Distribute.Linear_2",
"up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v": "UpBlocks.Chain_4.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.SelfAttention.Distribute.Linear_3",
"up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q": "UpBlocks.Chain_4.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.Attention.Distribute.Linear_1",
"up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q": "UpBlocks.Chain_5.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.SelfAttention.Distribute.Linear_1",
"up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k": "UpBlocks.Chain_5.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.SelfAttention.Distribute.Linear_2",
"up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v": "UpBlocks.Chain_5.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.SelfAttention.Distribute.Linear_3",
"up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q": "UpBlocks.Chain_5.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.Attention.Distribute.Linear_1",
"up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_q": "UpBlocks.Chain_6.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.SelfAttention.Distribute.Linear_1",
"up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_k": "UpBlocks.Chain_6.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.SelfAttention.Distribute.Linear_2",
"up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_v": "UpBlocks.Chain_6.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_1.SelfAttention.Distribute.Linear_3",
"up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_q": "UpBlocks.Chain_6.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.Attention.Distribute.Linear_1",
"down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k": "DownBlocks.Chain_8.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.Attention.Distribute.Linear_2",
"down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v": "DownBlocks.Chain_8.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.Attention.Distribute.Linear_3",
"down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k": "DownBlocks.Chain_9.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.Attention.Distribute.Linear_2",
"down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v": "DownBlocks.Chain_9.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.Attention.Distribute.Linear_3",
"mid_block.attentions.0.transformer_blocks.0.attn2.to_k": "Sum.MiddleBlock.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.Attention.Distribute.Linear_2",
"mid_block.attentions.0.transformer_blocks.0.attn2.to_v": "Sum.MiddleBlock.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.Attention.Distribute.Linear_3",
"up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k": "UpBlocks.Chain_4.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.Attention.Distribute.Linear_2",
"up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v": "UpBlocks.Chain_4.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.Attention.Distribute.Linear_3",
"up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k": "UpBlocks.Chain_5.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.Attention.Distribute.Linear_2",
"up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v": "UpBlocks.Chain_5.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.Attention.Distribute.Linear_3",
"up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_k": "UpBlocks.Chain_6.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.Attention.Distribute.Linear_2",
"up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_v": "UpBlocks.Chain_6.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_2.Attention.Distribute.Linear_3",
"down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj": "DownBlocks.Chain_8.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_3.Linear_1",
"down_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj": "DownBlocks.Chain_9.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_3.Linear_1",
"mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj": "Sum.MiddleBlock.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_3.Linear_1",
"up_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj": "UpBlocks.Chain_4.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_3.Linear_1",
"up_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj": "UpBlocks.Chain_5.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_3.Linear_1",
"up_blocks.1.attentions.2.transformer_blocks.0.ff.net.0.proj": "UpBlocks.Chain_6.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_3.Linear_1",
"down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2": "DownBlocks.Chain_8.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_3.Linear_2",
"down_blocks.2.attentions.1.transformer_blocks.0.ff.net.2": "DownBlocks.Chain_9.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_3.Linear_2",
"mid_block.attentions.0.transformer_blocks.0.ff.net.2": "Sum.MiddleBlock.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_3.Linear_2",
"up_blocks.1.attentions.0.transformer_blocks.0.ff.net.2": "UpBlocks.Chain_4.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_3.Linear_2",
"up_blocks.1.attentions.1.transformer_blocks.0.ff.net.2": "UpBlocks.Chain_5.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_3.Linear_2",
"up_blocks.1.attentions.2.transformer_blocks.0.ff.net.2": "UpBlocks.Chain_6.CLIPLCrossAttention.Chain_2.CrossAttentionBlock.Residual_3.Linear_2",
"up_blocks.0.resnets.0.norm1": "UpBlocks.Chain_1.ResidualBlock.Chain.GroupNorm_1",
"up_blocks.0.resnets.1.norm1": "UpBlocks.Chain_2.ResidualBlock.Chain.GroupNorm_1",
"up_blocks.0.resnets.2.norm1": "UpBlocks.Chain_3.ResidualBlock.Chain.GroupNorm_1",
"up_blocks.1.resnets.0.norm1": "UpBlocks.Chain_4.ResidualBlock.Chain.GroupNorm_1",
"up_blocks.1.resnets.1.norm1": "UpBlocks.Chain_5.ResidualBlock.Chain.GroupNorm_1",
"up_blocks.0.resnets.0.conv1": "UpBlocks.Chain_1.ResidualBlock.Chain.RangeAdapter2d.Conv2d",
"up_blocks.0.resnets.1.conv1": "UpBlocks.Chain_2.ResidualBlock.Chain.RangeAdapter2d.Conv2d",
"up_blocks.0.resnets.2.conv1": "UpBlocks.Chain_3.ResidualBlock.Chain.RangeAdapter2d.Conv2d",
"up_blocks.1.resnets.0.conv1": "UpBlocks.Chain_4.ResidualBlock.Chain.RangeAdapter2d.Conv2d",
"up_blocks.1.resnets.1.conv1": "UpBlocks.Chain_5.ResidualBlock.Chain.RangeAdapter2d.Conv2d",
"up_blocks.0.resnets.0.conv_shortcut": "UpBlocks.Chain_1.ResidualBlock.Conv2d",
"up_blocks.0.resnets.1.conv_shortcut": "UpBlocks.Chain_2.ResidualBlock.Conv2d",
"up_blocks.0.resnets.2.conv_shortcut": "UpBlocks.Chain_3.ResidualBlock.Conv2d",
"up_blocks.1.resnets.0.conv_shortcut": "UpBlocks.Chain_4.ResidualBlock.Conv2d",
"up_blocks.1.resnets.1.conv_shortcut": "UpBlocks.Chain_5.ResidualBlock.Conv2d",
"up_blocks.1.resnets.2.norm1": "UpBlocks.Chain_6.ResidualBlock.Chain.GroupNorm_1",
"up_blocks.2.resnets.0.norm1": "UpBlocks.Chain_7.ResidualBlock.Chain.GroupNorm_1",
"up_blocks.1.resnets.2.conv1": "UpBlocks.Chain_6.ResidualBlock.Chain.RangeAdapter2d.Conv2d",
"up_blocks.1.resnets.2.conv_shortcut": "UpBlocks.Chain_6.ResidualBlock.Conv2d",
"up_blocks.2.resnets.0.conv1": "UpBlocks.Chain_7.ResidualBlock.Chain.RangeAdapter2d.Conv2d",
"up_blocks.2.resnets.0.conv_shortcut": "UpBlocks.Chain_7.ResidualBlock.Conv2d",
"up_blocks.2.resnets.1.conv1": "UpBlocks.Chain_8.ResidualBlock.Chain.RangeAdapter2d.Conv2d",
"up_blocks.2.resnets.1.conv_shortcut": "UpBlocks.Chain_8.ResidualBlock.Conv2d",
"up_blocks.2.resnets.2.norm1": "UpBlocks.Chain_9.ResidualBlock.Chain.GroupNorm_1",
"up_blocks.3.resnets.0.norm1": "UpBlocks.Chain_10.ResidualBlock.Chain.GroupNorm_1",
"up_blocks.2.resnets.2.conv1": "UpBlocks.Chain_9.ResidualBlock.Chain.RangeAdapter2d.Conv2d",
"up_blocks.2.resnets.2.conv_shortcut": "UpBlocks.Chain_9.ResidualBlock.Conv2d",
"up_blocks.3.resnets.0.conv1": "UpBlocks.Chain_10.ResidualBlock.Chain.RangeAdapter2d.Conv2d",
"up_blocks.3.resnets.0.conv_shortcut": "UpBlocks.Chain_10.ResidualBlock.Conv2d",
"up_blocks.3.resnets.1.conv1": "UpBlocks.Chain_11.ResidualBlock.Chain.RangeAdapter2d.Conv2d",
"up_blocks.3.resnets.2.conv1": "UpBlocks.Chain_12.ResidualBlock.Chain.RangeAdapter2d.Conv2d",
"up_blocks.3.resnets.1.conv_shortcut": "UpBlocks.Chain_11.ResidualBlock.Conv2d",
"up_blocks.3.resnets.2.conv_shortcut": "UpBlocks.Chain_12.ResidualBlock.Conv2d",
"conv_out": "Chain.Conv2d"
},
"source_aliases": {}
}

View File

@ -0,0 +1,130 @@
{
"mapping": {
"encoder.conv_in": "encoder.conv_in",
"encoder.down.0.block.0.norm1": "encoder.down_blocks.0.resnets.0.norm1",
"encoder.down.0.block.0.conv1": "encoder.down_blocks.0.resnets.0.conv1",
"encoder.down.0.block.0.norm2": "encoder.down_blocks.0.resnets.0.norm2",
"encoder.down.0.block.0.conv2": "encoder.down_blocks.0.resnets.0.conv2",
"encoder.down.0.block.1.norm1": "encoder.down_blocks.0.resnets.1.norm1",
"encoder.down.0.block.1.conv1": "encoder.down_blocks.0.resnets.1.conv1",
"encoder.down.0.block.1.norm2": "encoder.down_blocks.0.resnets.1.norm2",
"encoder.down.0.block.1.conv2": "encoder.down_blocks.0.resnets.1.conv2",
"encoder.down.0.downsample.conv": "encoder.down_blocks.0.downsamplers.0.conv",
"encoder.down.1.block.0.norm1": "encoder.down_blocks.1.resnets.0.norm1",
"encoder.down.1.block.0.conv1": "encoder.down_blocks.1.resnets.0.conv1",
"encoder.down.1.block.0.norm2": "encoder.down_blocks.1.resnets.0.norm2",
"encoder.down.1.block.0.conv2": "encoder.down_blocks.1.resnets.0.conv2",
"encoder.down.1.block.0.nin_shortcut": "encoder.down_blocks.1.resnets.0.conv_shortcut",
"encoder.down.1.block.1.norm1": "encoder.down_blocks.1.resnets.1.norm1",
"encoder.down.1.block.1.conv1": "encoder.down_blocks.1.resnets.1.conv1",
"encoder.down.1.block.1.norm2": "encoder.down_blocks.1.resnets.1.norm2",
"encoder.down.1.block.1.conv2": "encoder.down_blocks.1.resnets.1.conv2",
"encoder.down.1.downsample.conv": "encoder.down_blocks.1.downsamplers.0.conv",
"encoder.down.2.block.0.norm1": "encoder.down_blocks.2.resnets.0.norm1",
"encoder.down.2.block.0.conv1": "encoder.down_blocks.2.resnets.0.conv1",
"encoder.down.2.block.0.norm2": "encoder.down_blocks.2.resnets.0.norm2",
"encoder.down.2.block.0.conv2": "encoder.down_blocks.2.resnets.0.conv2",
"encoder.down.2.block.0.nin_shortcut": "encoder.down_blocks.2.resnets.0.conv_shortcut",
"encoder.down.2.block.1.norm1": "encoder.down_blocks.2.resnets.1.norm1",
"encoder.down.2.block.1.conv1": "encoder.down_blocks.2.resnets.1.conv1",
"encoder.down.2.block.1.norm2": "encoder.down_blocks.2.resnets.1.norm2",
"encoder.down.2.block.1.conv2": "encoder.down_blocks.2.resnets.1.conv2",
"encoder.down.2.downsample.conv": "encoder.down_blocks.2.downsamplers.0.conv",
"encoder.down.3.block.0.norm1": "encoder.down_blocks.3.resnets.0.norm1",
"encoder.down.3.block.0.conv1": "encoder.down_blocks.3.resnets.0.conv1",
"encoder.down.3.block.0.norm2": "encoder.down_blocks.3.resnets.0.norm2",
"encoder.down.3.block.0.conv2": "encoder.down_blocks.3.resnets.0.conv2",
"encoder.down.3.block.1.norm1": "encoder.down_blocks.3.resnets.1.norm1",
"encoder.down.3.block.1.conv1": "encoder.down_blocks.3.resnets.1.conv1",
"encoder.down.3.block.1.norm2": "encoder.down_blocks.3.resnets.1.norm2",
"encoder.down.3.block.1.conv2": "encoder.down_blocks.3.resnets.1.conv2",
"encoder.mid.block_1.norm1": "encoder.mid_block.resnets.0.norm1",
"encoder.mid.block_1.conv1": "encoder.mid_block.resnets.0.conv1",
"encoder.mid.block_1.norm2": "encoder.mid_block.resnets.0.norm2",
"encoder.mid.block_1.conv2": "encoder.mid_block.resnets.0.conv2",
"encoder.mid.attn_1.norm": "encoder.mid_block.attentions.0.group_norm",
"encoder.mid.attn_1.q": "encoder.mid_block.attentions.0.to_q",
"encoder.mid.attn_1.k": "encoder.mid_block.attentions.0.to_k",
"encoder.mid.attn_1.v": "encoder.mid_block.attentions.0.to_v",
"encoder.mid.attn_1.proj_out": "encoder.mid_block.attentions.0.to_out.0",
"encoder.mid.block_2.norm1": "encoder.mid_block.resnets.1.norm1",
"encoder.mid.block_2.conv1": "encoder.mid_block.resnets.1.conv1",
"encoder.mid.block_2.norm2": "encoder.mid_block.resnets.1.norm2",
"encoder.mid.block_2.conv2": "encoder.mid_block.resnets.1.conv2",
"encoder.norm_out": "encoder.conv_norm_out",
"encoder.conv_out": "encoder.conv_out",
"quant_conv": "quant_conv",
"post_quant_conv": "post_quant_conv",
"decoder.conv_in": "decoder.conv_in",
"decoder.mid.block_1.norm1": "decoder.mid_block.resnets.0.norm1",
"decoder.mid.block_1.conv1": "decoder.mid_block.resnets.0.conv1",
"decoder.mid.block_1.norm2": "decoder.mid_block.resnets.0.norm2",
"decoder.mid.block_1.conv2": "decoder.mid_block.resnets.0.conv2",
"decoder.mid.attn_1.norm": "decoder.mid_block.attentions.0.group_norm",
"decoder.mid.attn_1.q": "decoder.mid_block.attentions.0.to_q",
"decoder.mid.attn_1.k": "decoder.mid_block.attentions.0.to_k",
"decoder.mid.attn_1.v": "decoder.mid_block.attentions.0.to_v",
"decoder.mid.attn_1.proj_out": "decoder.mid_block.attentions.0.to_out.0",
"decoder.mid.block_2.norm1": "decoder.mid_block.resnets.1.norm1",
"decoder.mid.block_2.conv1": "decoder.mid_block.resnets.1.conv1",
"decoder.mid.block_2.norm2": "decoder.mid_block.resnets.1.norm2",
"decoder.mid.block_2.conv2": "decoder.mid_block.resnets.1.conv2",
"decoder.up.3.block.0.norm1": "decoder.up_blocks.0.resnets.0.norm1",
"decoder.up.3.block.0.conv1": "decoder.up_blocks.0.resnets.0.conv1",
"decoder.up.3.block.0.norm2": "decoder.up_blocks.0.resnets.0.norm2",
"decoder.up.3.block.0.conv2": "decoder.up_blocks.0.resnets.0.conv2",
"decoder.up.3.block.1.norm1": "decoder.up_blocks.0.resnets.1.norm1",
"decoder.up.3.block.1.conv1": "decoder.up_blocks.0.resnets.1.conv1",
"decoder.up.3.block.1.norm2": "decoder.up_blocks.0.resnets.1.norm2",
"decoder.up.3.block.1.conv2": "decoder.up_blocks.0.resnets.1.conv2",
"decoder.up.3.block.2.norm1": "decoder.up_blocks.0.resnets.2.norm1",
"decoder.up.3.block.2.conv1": "decoder.up_blocks.0.resnets.2.conv1",
"decoder.up.3.block.2.norm2": "decoder.up_blocks.0.resnets.2.norm2",
"decoder.up.3.block.2.conv2": "decoder.up_blocks.0.resnets.2.conv2",
"decoder.up.3.upsample.conv": "decoder.up_blocks.0.upsamplers.0.conv",
"decoder.up.2.block.0.norm1": "decoder.up_blocks.1.resnets.0.norm1",
"decoder.up.2.block.0.conv1": "decoder.up_blocks.1.resnets.0.conv1",
"decoder.up.2.block.0.norm2": "decoder.up_blocks.1.resnets.0.norm2",
"decoder.up.2.block.0.conv2": "decoder.up_blocks.1.resnets.0.conv2",
"decoder.up.2.block.1.norm1": "decoder.up_blocks.1.resnets.1.norm1",
"decoder.up.2.block.1.conv1": "decoder.up_blocks.1.resnets.1.conv1",
"decoder.up.2.block.1.norm2": "decoder.up_blocks.1.resnets.1.norm2",
"decoder.up.2.block.1.conv2": "decoder.up_blocks.1.resnets.1.conv2",
"decoder.up.2.block.2.norm1": "decoder.up_blocks.1.resnets.2.norm1",
"decoder.up.2.block.2.conv1": "decoder.up_blocks.1.resnets.2.conv1",
"decoder.up.2.block.2.norm2": "decoder.up_blocks.1.resnets.2.norm2",
"decoder.up.2.block.2.conv2": "decoder.up_blocks.1.resnets.2.conv2",
"decoder.up.2.upsample.conv": "decoder.up_blocks.1.upsamplers.0.conv",
"decoder.up.1.block.0.norm1": "decoder.up_blocks.2.resnets.0.norm1",
"decoder.up.1.block.0.conv1": "decoder.up_blocks.2.resnets.0.conv1",
"decoder.up.1.block.0.norm2": "decoder.up_blocks.2.resnets.0.norm2",
"decoder.up.1.block.0.conv2": "decoder.up_blocks.2.resnets.0.conv2",
"decoder.up.1.block.0.nin_shortcut": "decoder.up_blocks.2.resnets.0.conv_shortcut",
"decoder.up.1.block.1.norm1": "decoder.up_blocks.2.resnets.1.norm1",
"decoder.up.1.block.1.conv1": "decoder.up_blocks.2.resnets.1.conv1",
"decoder.up.1.block.1.norm2": "decoder.up_blocks.2.resnets.1.norm2",
"decoder.up.1.block.1.conv2": "decoder.up_blocks.2.resnets.1.conv2",
"decoder.up.1.block.2.norm1": "decoder.up_blocks.2.resnets.2.norm1",
"decoder.up.1.block.2.conv1": "decoder.up_blocks.2.resnets.2.conv1",
"decoder.up.1.block.2.norm2": "decoder.up_blocks.2.resnets.2.norm2",
"decoder.up.1.block.2.conv2": "decoder.up_blocks.2.resnets.2.conv2",
"decoder.up.1.upsample.conv": "decoder.up_blocks.2.upsamplers.0.conv",
"decoder.up.0.block.0.norm1": "decoder.up_blocks.3.resnets.0.norm1",
"decoder.up.0.block.0.conv1": "decoder.up_blocks.3.resnets.0.conv1",
"decoder.up.0.block.0.norm2": "decoder.up_blocks.3.resnets.0.norm2",
"decoder.up.0.block.0.conv2": "decoder.up_blocks.3.resnets.0.conv2",
"decoder.up.0.block.0.nin_shortcut": "decoder.up_blocks.3.resnets.0.conv_shortcut",
"decoder.up.0.block.1.norm1": "decoder.up_blocks.3.resnets.1.norm1",
"decoder.up.0.block.1.conv1": "decoder.up_blocks.3.resnets.1.conv1",
"decoder.up.0.block.1.norm2": "decoder.up_blocks.3.resnets.1.norm2",
"decoder.up.0.block.1.conv2": "decoder.up_blocks.3.resnets.1.conv2",
"decoder.up.0.block.2.norm1": "decoder.up_blocks.3.resnets.2.norm1",
"decoder.up.0.block.2.conv1": "decoder.up_blocks.3.resnets.2.conv1",
"decoder.up.0.block.2.norm2": "decoder.up_blocks.3.resnets.2.norm2",
"decoder.up.0.block.2.conv2": "decoder.up_blocks.3.resnets.2.conv2",
"decoder.norm_out": "decoder.conv_norm_out",
"decoder.conv_out": "decoder.conv_out"
},
"source_aliases": {},
"ignorable_prefixes": []
}

View File

@ -0,0 +1,130 @@
{
"mapping": {
"encoder.conv_in": "encoder.conv_in",
"encoder.down_blocks.0.resnets.0.norm1": "encoder.down.0.block.0.norm1",
"encoder.down_blocks.0.resnets.0.conv1": "encoder.down.0.block.0.conv1",
"encoder.down_blocks.0.resnets.0.norm2": "encoder.down.0.block.0.norm2",
"encoder.down_blocks.0.resnets.0.conv2": "encoder.down.0.block.0.conv2",
"encoder.down_blocks.0.resnets.1.norm1": "encoder.down.0.block.1.norm1",
"encoder.down_blocks.0.resnets.1.conv1": "encoder.down.0.block.1.conv1",
"encoder.down_blocks.0.resnets.1.norm2": "encoder.down.0.block.1.norm2",
"encoder.down_blocks.0.resnets.1.conv2": "encoder.down.0.block.1.conv2",
"encoder.down_blocks.0.downsamplers.0.conv": "encoder.down.0.downsample.conv",
"encoder.down_blocks.1.resnets.0.norm1": "encoder.down.1.block.0.norm1",
"encoder.down_blocks.1.resnets.0.conv1": "encoder.down.1.block.0.conv1",
"encoder.down_blocks.1.resnets.0.norm2": "encoder.down.1.block.0.norm2",
"encoder.down_blocks.1.resnets.0.conv2": "encoder.down.1.block.0.conv2",
"encoder.down_blocks.1.resnets.0.conv_shortcut": "encoder.down.1.block.0.nin_shortcut",
"encoder.down_blocks.1.resnets.1.norm1": "encoder.down.1.block.1.norm1",
"encoder.down_blocks.1.resnets.1.conv1": "encoder.down.1.block.1.conv1",
"encoder.down_blocks.1.resnets.1.norm2": "encoder.down.1.block.1.norm2",
"encoder.down_blocks.1.resnets.1.conv2": "encoder.down.1.block.1.conv2",
"encoder.down_blocks.1.downsamplers.0.conv": "encoder.down.1.downsample.conv",
"encoder.down_blocks.2.resnets.0.norm1": "encoder.down.2.block.0.norm1",
"encoder.down_blocks.2.resnets.0.conv1": "encoder.down.2.block.0.conv1",
"encoder.down_blocks.2.resnets.0.norm2": "encoder.down.2.block.0.norm2",
"encoder.down_blocks.2.resnets.0.conv2": "encoder.down.2.block.0.conv2",
"encoder.down_blocks.2.resnets.0.conv_shortcut": "encoder.down.2.block.0.nin_shortcut",
"encoder.down_blocks.2.resnets.1.norm1": "encoder.down.2.block.1.norm1",
"encoder.down_blocks.2.resnets.1.conv1": "encoder.down.2.block.1.conv1",
"encoder.down_blocks.2.resnets.1.norm2": "encoder.down.2.block.1.norm2",
"encoder.down_blocks.2.resnets.1.conv2": "encoder.down.2.block.1.conv2",
"encoder.down_blocks.2.downsamplers.0.conv": "encoder.down.2.downsample.conv",
"encoder.down_blocks.3.resnets.0.norm1": "encoder.down.3.block.0.norm1",
"encoder.down_blocks.3.resnets.0.conv1": "encoder.down.3.block.0.conv1",
"encoder.down_blocks.3.resnets.0.norm2": "encoder.down.3.block.0.norm2",
"encoder.down_blocks.3.resnets.0.conv2": "encoder.down.3.block.0.conv2",
"encoder.down_blocks.3.resnets.1.norm1": "encoder.down.3.block.1.norm1",
"encoder.down_blocks.3.resnets.1.conv1": "encoder.down.3.block.1.conv1",
"encoder.down_blocks.3.resnets.1.norm2": "encoder.down.3.block.1.norm2",
"encoder.down_blocks.3.resnets.1.conv2": "encoder.down.3.block.1.conv2",
"encoder.mid_block.resnets.0.norm1": "encoder.mid.block_1.norm1",
"encoder.mid_block.resnets.0.conv1": "encoder.mid.block_1.conv1",
"encoder.mid_block.resnets.0.norm2": "encoder.mid.block_1.norm2",
"encoder.mid_block.resnets.0.conv2": "encoder.mid.block_1.conv2",
"encoder.mid_block.attentions.0.group_norm": "encoder.mid.attn_1.norm",
"encoder.mid_block.attentions.0.to_q": "encoder.mid.attn_1.q",
"encoder.mid_block.attentions.0.to_k": "encoder.mid.attn_1.k",
"encoder.mid_block.attentions.0.to_v": "encoder.mid.attn_1.v",
"encoder.mid_block.attentions.0.to_out.0": "encoder.mid.attn_1.proj_out",
"encoder.mid_block.resnets.1.norm1": "encoder.mid.block_2.norm1",
"encoder.mid_block.resnets.1.conv1": "encoder.mid.block_2.conv1",
"encoder.mid_block.resnets.1.norm2": "encoder.mid.block_2.norm2",
"encoder.mid_block.resnets.1.conv2": "encoder.mid.block_2.conv2",
"encoder.conv_norm_out": "encoder.norm_out",
"encoder.conv_out": "encoder.conv_out",
"quant_conv": "quant_conv",
"post_quant_conv": "post_quant_conv",
"decoder.conv_in": "decoder.conv_in",
"decoder.mid_block.resnets.0.norm1": "decoder.mid.block_1.norm1",
"decoder.mid_block.resnets.0.conv1": "decoder.mid.block_1.conv1",
"decoder.mid_block.resnets.0.norm2": "decoder.mid.block_1.norm2",
"decoder.mid_block.resnets.0.conv2": "decoder.mid.block_1.conv2",
"decoder.mid_block.attentions.0.group_norm": "decoder.mid.attn_1.norm",
"decoder.mid_block.attentions.0.to_q": "decoder.mid.attn_1.q",
"decoder.mid_block.attentions.0.to_k": "decoder.mid.attn_1.k",
"decoder.mid_block.attentions.0.to_v": "decoder.mid.attn_1.v",
"decoder.mid_block.attentions.0.to_out.0": "decoder.mid.attn_1.proj_out",
"decoder.mid_block.resnets.1.norm1": "decoder.mid.block_2.norm1",
"decoder.mid_block.resnets.1.conv1": "decoder.mid.block_2.conv1",
"decoder.mid_block.resnets.1.norm2": "decoder.mid.block_2.norm2",
"decoder.mid_block.resnets.1.conv2": "decoder.mid.block_2.conv2",
"decoder.up_blocks.0.resnets.0.norm1": "decoder.up.3.block.0.norm1",
"decoder.up_blocks.0.resnets.0.conv1": "decoder.up.3.block.0.conv1",
"decoder.up_blocks.0.resnets.0.norm2": "decoder.up.3.block.0.norm2",
"decoder.up_blocks.0.resnets.0.conv2": "decoder.up.3.block.0.conv2",
"decoder.up_blocks.0.resnets.1.norm1": "decoder.up.3.block.1.norm1",
"decoder.up_blocks.0.resnets.1.conv1": "decoder.up.3.block.1.conv1",
"decoder.up_blocks.0.resnets.1.norm2": "decoder.up.3.block.1.norm2",
"decoder.up_blocks.0.resnets.1.conv2": "decoder.up.3.block.1.conv2",
"decoder.up_blocks.0.resnets.2.norm1": "decoder.up.3.block.2.norm1",
"decoder.up_blocks.0.resnets.2.conv1": "decoder.up.3.block.2.conv1",
"decoder.up_blocks.0.resnets.2.norm2": "decoder.up.3.block.2.norm2",
"decoder.up_blocks.0.resnets.2.conv2": "decoder.up.3.block.2.conv2",
"decoder.up_blocks.0.upsamplers.0.conv": "decoder.up.3.upsample.conv",
"decoder.up_blocks.1.resnets.0.norm1": "decoder.up.2.block.0.norm1",
"decoder.up_blocks.1.resnets.0.conv1": "decoder.up.2.block.0.conv1",
"decoder.up_blocks.1.resnets.0.norm2": "decoder.up.2.block.0.norm2",
"decoder.up_blocks.1.resnets.0.conv2": "decoder.up.2.block.0.conv2",
"decoder.up_blocks.1.resnets.1.norm1": "decoder.up.2.block.1.norm1",
"decoder.up_blocks.1.resnets.1.conv1": "decoder.up.2.block.1.conv1",
"decoder.up_blocks.1.resnets.1.norm2": "decoder.up.2.block.1.norm2",
"decoder.up_blocks.1.resnets.1.conv2": "decoder.up.2.block.1.conv2",
"decoder.up_blocks.1.resnets.2.norm1": "decoder.up.2.block.2.norm1",
"decoder.up_blocks.1.resnets.2.conv1": "decoder.up.2.block.2.conv1",
"decoder.up_blocks.1.resnets.2.norm2": "decoder.up.2.block.2.norm2",
"decoder.up_blocks.1.resnets.2.conv2": "decoder.up.2.block.2.conv2",
"decoder.up_blocks.1.upsamplers.0.conv": "decoder.up.2.upsample.conv",
"decoder.up_blocks.2.resnets.0.norm1": "decoder.up.1.block.0.norm1",
"decoder.up_blocks.2.resnets.0.conv1": "decoder.up.1.block.0.conv1",
"decoder.up_blocks.2.resnets.0.norm2": "decoder.up.1.block.0.norm2",
"decoder.up_blocks.2.resnets.0.conv2": "decoder.up.1.block.0.conv2",
"decoder.up_blocks.2.resnets.0.conv_shortcut": "decoder.up.1.block.0.nin_shortcut",
"decoder.up_blocks.2.resnets.1.norm1": "decoder.up.1.block.1.norm1",
"decoder.up_blocks.2.resnets.1.conv1": "decoder.up.1.block.1.conv1",
"decoder.up_blocks.2.resnets.1.norm2": "decoder.up.1.block.1.norm2",
"decoder.up_blocks.2.resnets.1.conv2": "decoder.up.1.block.1.conv2",
"decoder.up_blocks.2.resnets.2.norm1": "decoder.up.1.block.2.norm1",
"decoder.up_blocks.2.resnets.2.conv1": "decoder.up.1.block.2.conv1",
"decoder.up_blocks.2.resnets.2.norm2": "decoder.up.1.block.2.norm2",
"decoder.up_blocks.2.resnets.2.conv2": "decoder.up.1.block.2.conv2",
"decoder.up_blocks.2.upsamplers.0.conv": "decoder.up.1.upsample.conv",
"decoder.up_blocks.3.resnets.0.norm1": "decoder.up.0.block.0.norm1",
"decoder.up_blocks.3.resnets.0.conv1": "decoder.up.0.block.0.conv1",
"decoder.up_blocks.3.resnets.0.norm2": "decoder.up.0.block.0.norm2",
"decoder.up_blocks.3.resnets.0.conv2": "decoder.up.0.block.0.conv2",
"decoder.up_blocks.3.resnets.0.conv_shortcut": "decoder.up.0.block.0.nin_shortcut",
"decoder.up_blocks.3.resnets.1.norm1": "decoder.up.0.block.1.norm1",
"decoder.up_blocks.3.resnets.1.conv1": "decoder.up.0.block.1.conv1",
"decoder.up_blocks.3.resnets.1.norm2": "decoder.up.0.block.1.norm2",
"decoder.up_blocks.3.resnets.1.conv2": "decoder.up.0.block.1.conv2",
"decoder.up_blocks.3.resnets.2.norm1": "decoder.up.0.block.2.norm1",
"decoder.up_blocks.3.resnets.2.conv1": "decoder.up.0.block.2.conv1",
"decoder.up_blocks.3.resnets.2.norm2": "decoder.up.0.block.2.norm2",
"decoder.up_blocks.3.resnets.2.conv2": "decoder.up.0.block.2.conv2",
"decoder.conv_norm_out": "decoder.norm_out",
"decoder.conv_out": "decoder.conv_out"
},
"source_aliases": {},
"ignorable_prefixes": []
}

View File

@ -0,0 +1,149 @@
{
"ignorable_prefixes": [],
"mapping": {
"encoder.conv_in": "Encoder.Conv2d",
"encoder.down_blocks.0.resnets.0.norm1": "Encoder.Chain_1.Chain_1.Resnet_1.Chain.GroupNorm_1",
"encoder.down_blocks.0.resnets.0.norm2": "Encoder.Chain_1.Chain_1.Resnet_1.Chain.GroupNorm_2",
"encoder.down_blocks.0.resnets.1.norm1": "Encoder.Chain_1.Chain_1.Resnet_2.Chain.GroupNorm_1",
"encoder.down_blocks.0.resnets.1.norm2": "Encoder.Chain_1.Chain_1.Resnet_2.Chain.GroupNorm_2",
"encoder.down_blocks.1.resnets.0.norm1": "Encoder.Chain_1.Chain_2.Resnet_1.Chain.GroupNorm_1",
"decoder.up_blocks.3.resnets.0.norm2": "Decoder.Chain_1.Chain_5.Resnet_1.Chain.GroupNorm_2",
"decoder.up_blocks.3.resnets.1.norm1": "Decoder.Chain_1.Chain_5.Resnet_2.Chain.GroupNorm_1",
"decoder.up_blocks.3.resnets.1.norm2": "Decoder.Chain_1.Chain_5.Resnet_2.Chain.GroupNorm_2",
"decoder.up_blocks.3.resnets.2.norm1": "Decoder.Chain_1.Chain_5.Resnet_3.Chain.GroupNorm_1",
"decoder.up_blocks.3.resnets.2.norm2": "Decoder.Chain_1.Chain_5.Resnet_3.Chain.GroupNorm_2",
"decoder.conv_norm_out": "Decoder.Chain_2.GroupNorm",
"encoder.down_blocks.0.resnets.0.conv1": "Encoder.Chain_1.Chain_1.Resnet_1.Chain.Conv2d_1",
"encoder.down_blocks.0.resnets.0.conv2": "Encoder.Chain_1.Chain_1.Resnet_1.Chain.Conv2d_2",
"encoder.down_blocks.0.resnets.1.conv1": "Encoder.Chain_1.Chain_1.Resnet_2.Chain.Conv2d_1",
"encoder.down_blocks.0.resnets.1.conv2": "Encoder.Chain_1.Chain_1.Resnet_2.Chain.Conv2d_2",
"encoder.down_blocks.0.downsamplers.0.conv": "Encoder.Chain_1.Chain_1.Downsample.Conv2d",
"decoder.up_blocks.3.resnets.0.conv2": "Decoder.Chain_1.Chain_5.Resnet_1.Chain.Conv2d_2",
"decoder.up_blocks.3.resnets.1.conv1": "Decoder.Chain_1.Chain_5.Resnet_2.Chain.Conv2d_1",
"decoder.up_blocks.3.resnets.1.conv2": "Decoder.Chain_1.Chain_5.Resnet_2.Chain.Conv2d_2",
"decoder.up_blocks.3.resnets.2.conv1": "Decoder.Chain_1.Chain_5.Resnet_3.Chain.Conv2d_1",
"decoder.up_blocks.3.resnets.2.conv2": "Decoder.Chain_1.Chain_5.Resnet_3.Chain.Conv2d_2",
"encoder.down_blocks.1.resnets.0.conv1": "Encoder.Chain_1.Chain_2.Resnet_1.Chain.Conv2d_1",
"encoder.down_blocks.1.resnets.0.norm2": "Encoder.Chain_1.Chain_2.Resnet_1.Chain.GroupNorm_2",
"encoder.down_blocks.1.resnets.1.norm1": "Encoder.Chain_1.Chain_2.Resnet_2.Chain.GroupNorm_1",
"encoder.down_blocks.1.resnets.1.norm2": "Encoder.Chain_1.Chain_2.Resnet_2.Chain.GroupNorm_2",
"encoder.down_blocks.2.resnets.0.norm1": "Encoder.Chain_1.Chain_3.Resnet_1.Chain.GroupNorm_1",
"decoder.up_blocks.2.resnets.0.norm2": "Decoder.Chain_1.Chain_4.Resnet_1.Chain.GroupNorm_2",
"decoder.up_blocks.2.resnets.1.norm1": "Decoder.Chain_1.Chain_4.Resnet_2.Chain.GroupNorm_1",
"decoder.up_blocks.2.resnets.1.norm2": "Decoder.Chain_1.Chain_4.Resnet_2.Chain.GroupNorm_2",
"decoder.up_blocks.2.resnets.2.norm1": "Decoder.Chain_1.Chain_4.Resnet_3.Chain.GroupNorm_1",
"decoder.up_blocks.2.resnets.2.norm2": "Decoder.Chain_1.Chain_4.Resnet_3.Chain.GroupNorm_2",
"decoder.up_blocks.3.resnets.0.norm1": "Decoder.Chain_1.Chain_5.Resnet_1.Chain.GroupNorm_1",
"encoder.down_blocks.1.resnets.0.conv2": "Encoder.Chain_1.Chain_2.Resnet_1.Chain.Conv2d_2",
"encoder.down_blocks.1.resnets.1.conv1": "Encoder.Chain_1.Chain_2.Resnet_2.Chain.Conv2d_1",
"encoder.down_blocks.1.resnets.1.conv2": "Encoder.Chain_1.Chain_2.Resnet_2.Chain.Conv2d_2",
"encoder.down_blocks.1.downsamplers.0.conv": "Encoder.Chain_1.Chain_2.Downsample.Conv2d",
"decoder.up_blocks.2.resnets.0.conv2": "Decoder.Chain_1.Chain_4.Resnet_1.Chain.Conv2d_2",
"decoder.up_blocks.2.resnets.1.conv1": "Decoder.Chain_1.Chain_4.Resnet_2.Chain.Conv2d_1",
"decoder.up_blocks.2.resnets.1.conv2": "Decoder.Chain_1.Chain_4.Resnet_2.Chain.Conv2d_2",
"decoder.up_blocks.2.resnets.2.conv1": "Decoder.Chain_1.Chain_4.Resnet_3.Chain.Conv2d_1",
"decoder.up_blocks.2.resnets.2.conv2": "Decoder.Chain_1.Chain_4.Resnet_3.Chain.Conv2d_2",
"decoder.up_blocks.2.upsamplers.0.conv": "Decoder.Chain_1.Chain_4.Upsample.Conv2d",
"encoder.down_blocks.1.resnets.0.conv_shortcut": "Encoder.Chain_1.Chain_2.Resnet_1.Conv2d",
"encoder.down_blocks.2.resnets.0.conv1": "Encoder.Chain_1.Chain_3.Resnet_1.Chain.Conv2d_1",
"encoder.down_blocks.2.resnets.0.norm2": "Encoder.Chain_1.Chain_3.Resnet_1.Chain.GroupNorm_2",
"encoder.down_blocks.2.resnets.1.norm1": "Encoder.Chain_1.Chain_3.Resnet_2.Chain.GroupNorm_1",
"encoder.down_blocks.2.resnets.1.norm2": "Encoder.Chain_1.Chain_3.Resnet_2.Chain.GroupNorm_2",
"encoder.down_blocks.3.resnets.0.norm1": "Encoder.Chain_1.Chain_4.Resnet_1.Chain.GroupNorm_1",
"encoder.down_blocks.3.resnets.0.norm2": "Encoder.Chain_1.Chain_4.Resnet_1.Chain.GroupNorm_2",
"encoder.down_blocks.3.resnets.1.norm1": "Encoder.Chain_1.Chain_4.Resnet_2.Chain.GroupNorm_1",
"encoder.down_blocks.3.resnets.1.norm2": "Encoder.Chain_1.Chain_4.Resnet_2.Chain.GroupNorm_2",
"encoder.mid_block.resnets.0.norm1": "Encoder.Chain_1.Chain_5.Resnet_1.Chain.GroupNorm_1",
"encoder.mid_block.resnets.0.norm2": "Encoder.Chain_1.Chain_5.Resnet_1.Chain.GroupNorm_2",
"encoder.mid_block.attentions.0.group_norm": "Encoder.Chain_1.Chain_5.Residual.GroupNorm",
"encoder.mid_block.resnets.1.norm1": "Encoder.Chain_1.Chain_5.Resnet_2.Chain.GroupNorm_1",
"encoder.mid_block.resnets.1.norm2": "Encoder.Chain_1.Chain_5.Resnet_2.Chain.GroupNorm_2",
"encoder.conv_norm_out": "Encoder.Chain_2.GroupNorm",
"decoder.mid_block.resnets.0.norm1": "Decoder.Chain_1.Chain_1.Resnet_1.Chain.GroupNorm_1",
"decoder.mid_block.resnets.0.norm2": "Decoder.Chain_1.Chain_1.Resnet_1.Chain.GroupNorm_2",
"decoder.mid_block.attentions.0.group_norm": "Decoder.Chain_1.Chain_1.Residual.GroupNorm",
"decoder.mid_block.resnets.1.norm1": "Decoder.Chain_1.Chain_1.Resnet_2.Chain.GroupNorm_1",
"decoder.mid_block.resnets.1.norm2": "Decoder.Chain_1.Chain_1.Resnet_2.Chain.GroupNorm_2",
"decoder.up_blocks.0.resnets.0.norm1": "Decoder.Chain_1.Chain_2.Resnet_1.Chain.GroupNorm_1",
"decoder.up_blocks.0.resnets.0.norm2": "Decoder.Chain_1.Chain_2.Resnet_1.Chain.GroupNorm_2",
"decoder.up_blocks.0.resnets.1.norm1": "Decoder.Chain_1.Chain_2.Resnet_2.Chain.GroupNorm_1",
"decoder.up_blocks.0.resnets.1.norm2": "Decoder.Chain_1.Chain_2.Resnet_2.Chain.GroupNorm_2",
"decoder.up_blocks.0.resnets.2.norm1": "Decoder.Chain_1.Chain_2.Resnet_3.Chain.GroupNorm_1",
"decoder.up_blocks.0.resnets.2.norm2": "Decoder.Chain_1.Chain_2.Resnet_3.Chain.GroupNorm_2",
"decoder.up_blocks.1.resnets.0.norm1": "Decoder.Chain_1.Chain_3.Resnet_1.Chain.GroupNorm_1",
"decoder.up_blocks.1.resnets.0.norm2": "Decoder.Chain_1.Chain_3.Resnet_1.Chain.GroupNorm_2",
"decoder.up_blocks.1.resnets.1.norm1": "Decoder.Chain_1.Chain_3.Resnet_2.Chain.GroupNorm_1",
"decoder.up_blocks.1.resnets.1.norm2": "Decoder.Chain_1.Chain_3.Resnet_2.Chain.GroupNorm_2",
"decoder.up_blocks.1.resnets.2.norm1": "Decoder.Chain_1.Chain_3.Resnet_3.Chain.GroupNorm_1",
"decoder.up_blocks.1.resnets.2.norm2": "Decoder.Chain_1.Chain_3.Resnet_3.Chain.GroupNorm_2",
"decoder.up_blocks.2.resnets.0.norm1": "Decoder.Chain_1.Chain_4.Resnet_1.Chain.GroupNorm_1",
"encoder.down_blocks.2.resnets.0.conv2": "Encoder.Chain_1.Chain_3.Resnet_1.Chain.Conv2d_2",
"encoder.down_blocks.2.resnets.1.conv1": "Encoder.Chain_1.Chain_3.Resnet_2.Chain.Conv2d_1",
"encoder.down_blocks.2.resnets.1.conv2": "Encoder.Chain_1.Chain_3.Resnet_2.Chain.Conv2d_2",
"encoder.down_blocks.2.downsamplers.0.conv": "Encoder.Chain_1.Chain_3.Downsample.Conv2d",
"encoder.down_blocks.3.resnets.0.conv1": "Encoder.Chain_1.Chain_4.Resnet_1.Chain.Conv2d_1",
"encoder.down_blocks.3.resnets.0.conv2": "Encoder.Chain_1.Chain_4.Resnet_1.Chain.Conv2d_2",
"encoder.down_blocks.3.resnets.1.conv1": "Encoder.Chain_1.Chain_4.Resnet_2.Chain.Conv2d_1",
"encoder.down_blocks.3.resnets.1.conv2": "Encoder.Chain_1.Chain_4.Resnet_2.Chain.Conv2d_2",
"encoder.mid_block.resnets.0.conv1": "Encoder.Chain_1.Chain_5.Resnet_1.Chain.Conv2d_1",
"encoder.mid_block.resnets.0.conv2": "Encoder.Chain_1.Chain_5.Resnet_1.Chain.Conv2d_2",
"encoder.mid_block.resnets.1.conv1": "Encoder.Chain_1.Chain_5.Resnet_2.Chain.Conv2d_1",
"encoder.mid_block.resnets.1.conv2": "Encoder.Chain_1.Chain_5.Resnet_2.Chain.Conv2d_2",
"decoder.mid_block.resnets.0.conv1": "Decoder.Chain_1.Chain_1.Resnet_1.Chain.Conv2d_1",
"decoder.mid_block.resnets.0.conv2": "Decoder.Chain_1.Chain_1.Resnet_1.Chain.Conv2d_2",
"decoder.mid_block.resnets.1.conv1": "Decoder.Chain_1.Chain_1.Resnet_2.Chain.Conv2d_1",
"decoder.mid_block.resnets.1.conv2": "Decoder.Chain_1.Chain_1.Resnet_2.Chain.Conv2d_2",
"decoder.up_blocks.0.resnets.0.conv1": "Decoder.Chain_1.Chain_2.Resnet_1.Chain.Conv2d_1",
"decoder.up_blocks.0.resnets.0.conv2": "Decoder.Chain_1.Chain_2.Resnet_1.Chain.Conv2d_2",
"decoder.up_blocks.0.resnets.1.conv1": "Decoder.Chain_1.Chain_2.Resnet_2.Chain.Conv2d_1",
"decoder.up_blocks.0.resnets.1.conv2": "Decoder.Chain_1.Chain_2.Resnet_2.Chain.Conv2d_2",
"decoder.up_blocks.0.resnets.2.conv1": "Decoder.Chain_1.Chain_2.Resnet_3.Chain.Conv2d_1",
"decoder.up_blocks.0.resnets.2.conv2": "Decoder.Chain_1.Chain_2.Resnet_3.Chain.Conv2d_2",
"decoder.up_blocks.0.upsamplers.0.conv": "Decoder.Chain_1.Chain_2.Upsample.Conv2d",
"decoder.up_blocks.1.resnets.0.conv1": "Decoder.Chain_1.Chain_3.Resnet_1.Chain.Conv2d_1",
"decoder.up_blocks.1.resnets.0.conv2": "Decoder.Chain_1.Chain_3.Resnet_1.Chain.Conv2d_2",
"decoder.up_blocks.1.resnets.1.conv1": "Decoder.Chain_1.Chain_3.Resnet_2.Chain.Conv2d_1",
"decoder.up_blocks.1.resnets.1.conv2": "Decoder.Chain_1.Chain_3.Resnet_2.Chain.Conv2d_2",
"decoder.up_blocks.1.resnets.2.conv1": "Decoder.Chain_1.Chain_3.Resnet_3.Chain.Conv2d_1",
"decoder.up_blocks.1.resnets.2.conv2": "Decoder.Chain_1.Chain_3.Resnet_3.Chain.Conv2d_2",
"decoder.up_blocks.1.upsamplers.0.conv": "Decoder.Chain_1.Chain_3.Upsample.Conv2d",
"encoder.down_blocks.2.resnets.0.conv_shortcut": "Encoder.Chain_1.Chain_3.Resnet_1.Conv2d",
"encoder.mid_block.attentions.0.to_q": "Encoder.Chain_1.Chain_5.Residual.SelfAttention2d.Distribute.Linear_1",
"encoder.mid_block.attentions.0.to_k": "Encoder.Chain_1.Chain_5.Residual.SelfAttention2d.Distribute.Linear_2",
"encoder.mid_block.attentions.0.to_v": "Encoder.Chain_1.Chain_5.Residual.SelfAttention2d.Distribute.Linear_3",
"encoder.mid_block.attentions.0.to_out.0": "Encoder.Chain_1.Chain_5.Residual.SelfAttention2d.Linear",
"decoder.mid_block.attentions.0.to_q": "Decoder.Chain_1.Chain_1.Residual.SelfAttention2d.Distribute.Linear_1",
"decoder.mid_block.attentions.0.to_k": "Decoder.Chain_1.Chain_1.Residual.SelfAttention2d.Distribute.Linear_2",
"decoder.mid_block.attentions.0.to_v": "Decoder.Chain_1.Chain_1.Residual.SelfAttention2d.Distribute.Linear_3",
"decoder.mid_block.attentions.0.to_out.0": "Decoder.Chain_1.Chain_1.Residual.SelfAttention2d.Linear",
"encoder.conv_out": "Encoder.Chain_2.Conv2d",
"quant_conv": "Encoder.Chain_3.Conv2d",
"post_quant_conv": "Decoder.Conv2d_1",
"decoder.conv_in": "Decoder.Conv2d_2",
"decoder.up_blocks.2.resnets.0.conv1": "Decoder.Chain_1.Chain_4.Resnet_1.Chain.Conv2d_1",
"decoder.up_blocks.2.resnets.0.conv_shortcut": "Decoder.Chain_1.Chain_4.Resnet_1.Conv2d",
"decoder.up_blocks.3.resnets.0.conv1": "Decoder.Chain_1.Chain_5.Resnet_1.Chain.Conv2d_1",
"decoder.up_blocks.3.resnets.0.conv_shortcut": "Decoder.Chain_1.Chain_5.Resnet_1.Conv2d",
"decoder.conv_out": "Decoder.Chain_2.Conv2d"
},
"source_aliases": {
"encoder.mid_block.attentions.0.value": "encoder.mid_block.attentions.0.to_v",
"decoder.mid_block.attentions.0.value": "decoder.mid_block.attentions.0.to_v",
"decoder.mid_block.attentions.0.proj_attn": "decoder.mid_block.attentions.0.to_out.0",
"encoder.mid_block.attentions.0.proj_attn": "encoder.mid_block.attentions.0.to_out.0",
"encoder.mid_block.attentions.0.key": "encoder.mid_block.attentions.0.to_k",
"decoder.mid_block.attentions.0.key": "decoder.mid_block.attentions.0.to_k",
"decoder.mid_block.attentions.0.query": "decoder.mid_block.attentions.0.to_q",
"encoder.mid_block.attentions.0.query": "encoder.mid_block.attentions.0.to_q"
},
"reshapes": {
"Encoder.Chain_1.Chain_5.Residual.SelfAttention2d.Distribute.Linear_1.weight": [512, 512],
"Encoder.Chain_1.Chain_5.Residual.SelfAttention2d.Distribute.Linear_2.weight": [512, 512],
"Encoder.Chain_1.Chain_5.Residual.SelfAttention2d.Distribute.Linear_3.weight": [512, 512],
"Encoder.Chain_1.Chain_5.Residual.SelfAttention2d.Linear.weight": [512, 512],
"Decoder.Chain_1.Chain_1.Residual.SelfAttention2d.Distribute.Linear_1.weight": [512, 512],
"Decoder.Chain_1.Chain_1.Residual.SelfAttention2d.Distribute.Linear_2.weight": [512, 512],
"Decoder.Chain_1.Chain_1.Residual.SelfAttention2d.Distribute.Linear_3.weight": [512, 512],
"Decoder.Chain_1.Chain_1.Residual.SelfAttention2d.Linear.weight": [512, 512]
}
}

View File

@ -0,0 +1,111 @@
def find_state_dict_key_patterns(patterns):
"""Given a list of state_dict keys, collapse similar keys into patterns.
For example, if the keys are:
foo.bar.0.baz
foo.bar.1.baz
Then the pattern will be:
foo.bar.(0|1).baz
"""
prev_pattern_count = len(patterns) + 1
# keep running the pattern collapse function until the list of patterns doesn't get any smaller
while prev_pattern_count > len(patterns):
prev_pattern_count = len(patterns)
prev_pattern_count_sub = len(patterns) + 1
while prev_pattern_count_sub > len(patterns):
prev_pattern_count_sub = len(patterns)
patterns = _collapse_patterns(patterns)
prev_pattern_count_sub = len(patterns) + 1
while prev_pattern_count_sub > len(patterns):
prev_pattern_count_sub = len(patterns)
patterns = _collapse_patterns(patterns, reverse_sort=True)
return patterns
def prefix_only(k):
return k.rsplit(".", 1)[0]
def nested_dict_from_keys(keys):
output = {}
for key in keys:
parts = key.split(".")
# Start from the root of the output and iteratively go deeper
current_level = output
for part in parts:
# If the key doesn't exist at the current level, create a new dict
if part not in current_level:
current_level[part] = {}
# Go one level deeper
current_level = current_level[part]
return output
def _collapse_patterns(keys, reverse_sort=False):
keys = keys.copy()
keys = [k.split(".") for k in keys]
if reverse_sort:
keys.sort(key=lambda k: (len(k), list(reversed(str(k)))))
else:
keys.sort(key=lambda k: (len(k), k))
new_key_patterns = []
curr_key = None
for k in keys:
if curr_key is None:
curr_key = k
continue
single_diff_index = get_single_difference(curr_key, k)
if single_diff_index is None:
new_key_patterns.append(curr_key)
curr_key = k
else:
cur_part_val = curr_key[single_diff_index]
key_part_val = k[single_diff_index]
if "(" in key_part_val:
key_vals = key_part_val.strip("()").split("|")
else:
key_vals = [key_part_val]
if "(" in cur_part_val:
vals = cur_part_val.strip("()").split("|")
else:
vals = [cur_part_val]
vals.extend(key_vals)
vals.sort()
try:
vals = [int(v) for v in vals]
vals.sort()
vals = [str(v) for v in vals]
except ValueError:
pass
new_cur_part_val = "(" + "|".join(vals) + ")"
curr_key[single_diff_index] = new_cur_part_val
new_key_patterns.append(curr_key)
new_key_patterns = [".".join(p) for p in new_key_patterns]
new_key_patterns.sort()
return new_key_patterns
def get_single_difference(a, b):
"""
Given two list of strings, if only a single string differs between the two lists, return the index of the differing string.
"""
if len(a) != len(b):
return None
diff_count = 0
diff_index = None
for i, (asub, bsub) in enumerate(zip(a, b)):
if asub != bsub:
diff_count += 1
diff_index = i
if diff_count > 1:
break
if diff_count == 1:
return diff_index
return None

View File

@ -0,0 +1,48 @@
import os.path
_base_dir = os.path.dirname(os.path.realpath(__file__))
WEIGHT_MAPS_PATH = os.path.join(_base_dir, "maps")
WEIGHT_INFO_PATH = os.path.join(_base_dir, "weight_info")
class MODEL_NAMES:
SD15 = "stable-diffusion-1-5"
class COMPONENT_NAMES:
VAE = "vae"
TEXT_ENCODER = "text"
UNET = "unet"
LORA = "lora"
class FORMAT_NAMES:
COMPVIS = "compvis"
DIFFUSERS = "diffusers"
REFINERS = "refiners"
def save_model_info(model_name, component_name, format_name, info_type, data):
import json
model_name = model_name.replace("_", "-")
component_name = component_name.replace("_", "-")
format_name = format_name.replace("_", "-")
filename = os.path.join(
WEIGHT_INFO_PATH,
f"{model_name}_{component_name}_{format_name}.{info_type}.json",
)
with open(filename, "w") as f:
f.write(json.dumps(data, indent=2))
def prefixes_only(keys):
new_keys = []
prev_key = None
for k in keys:
new_key = k.rsplit(".", 1)[0]
if new_key != prev_key:
new_keys.append(new_key)
prev_key = new_key
return new_keys

View File

@ -0,0 +1,5 @@
[
"down_blocks.(0|1|2).attentions.(0|1).transformer_blocks.0.(attn1|attn2).processor.(to_k_lora|to_out_lora|to_q_lora|to_v_lora).(down|up).weight",
"mid_block.attentions.0.transformer_blocks.0.(attn1|attn2).processor.(to_k_lora|to_out_lora|to_q_lora|to_v_lora).(down|up).weight",
"up_blocks.(1|2|3).attentions.(0|1|2).transformer_blocks.0.(attn1|attn2).processor.(to_k_lora|to_out_lora|to_q_lora|to_v_lora).(down|up).weight"
]

View File

@ -0,0 +1,258 @@
[
"down_blocks.0.attentions.0.transformer_blocks.0.attn1.processor.to_q_lora.down.weight",
"down_blocks.0.attentions.0.transformer_blocks.0.attn1.processor.to_q_lora.up.weight",
"down_blocks.0.attentions.0.transformer_blocks.0.attn1.processor.to_k_lora.down.weight",
"down_blocks.0.attentions.0.transformer_blocks.0.attn1.processor.to_k_lora.up.weight",
"down_blocks.0.attentions.0.transformer_blocks.0.attn1.processor.to_v_lora.down.weight",
"down_blocks.0.attentions.0.transformer_blocks.0.attn1.processor.to_v_lora.up.weight",
"down_blocks.0.attentions.0.transformer_blocks.0.attn1.processor.to_out_lora.down.weight",
"down_blocks.0.attentions.0.transformer_blocks.0.attn1.processor.to_out_lora.up.weight",
"down_blocks.0.attentions.0.transformer_blocks.0.attn2.processor.to_q_lora.down.weight",
"down_blocks.0.attentions.0.transformer_blocks.0.attn2.processor.to_q_lora.up.weight",
"down_blocks.0.attentions.0.transformer_blocks.0.attn2.processor.to_k_lora.down.weight",
"down_blocks.0.attentions.0.transformer_blocks.0.attn2.processor.to_k_lora.up.weight",
"down_blocks.0.attentions.0.transformer_blocks.0.attn2.processor.to_v_lora.down.weight",
"down_blocks.0.attentions.0.transformer_blocks.0.attn2.processor.to_v_lora.up.weight",
"down_blocks.0.attentions.0.transformer_blocks.0.attn2.processor.to_out_lora.down.weight",
"down_blocks.0.attentions.0.transformer_blocks.0.attn2.processor.to_out_lora.up.weight",
"down_blocks.0.attentions.1.transformer_blocks.0.attn1.processor.to_q_lora.down.weight",
"down_blocks.0.attentions.1.transformer_blocks.0.attn1.processor.to_q_lora.up.weight",
"down_blocks.0.attentions.1.transformer_blocks.0.attn1.processor.to_k_lora.down.weight",
"down_blocks.0.attentions.1.transformer_blocks.0.attn1.processor.to_k_lora.up.weight",
"down_blocks.0.attentions.1.transformer_blocks.0.attn1.processor.to_v_lora.down.weight",
"down_blocks.0.attentions.1.transformer_blocks.0.attn1.processor.to_v_lora.up.weight",
"down_blocks.0.attentions.1.transformer_blocks.0.attn1.processor.to_out_lora.down.weight",
"down_blocks.0.attentions.1.transformer_blocks.0.attn1.processor.to_out_lora.up.weight",
"down_blocks.0.attentions.1.transformer_blocks.0.attn2.processor.to_q_lora.down.weight",
"down_blocks.0.attentions.1.transformer_blocks.0.attn2.processor.to_q_lora.up.weight",
"down_blocks.0.attentions.1.transformer_blocks.0.attn2.processor.to_k_lora.down.weight",
"down_blocks.0.attentions.1.transformer_blocks.0.attn2.processor.to_k_lora.up.weight",
"down_blocks.0.attentions.1.transformer_blocks.0.attn2.processor.to_v_lora.down.weight",
"down_blocks.0.attentions.1.transformer_blocks.0.attn2.processor.to_v_lora.up.weight",
"down_blocks.0.attentions.1.transformer_blocks.0.attn2.processor.to_out_lora.down.weight",
"down_blocks.0.attentions.1.transformer_blocks.0.attn2.processor.to_out_lora.up.weight",
"down_blocks.1.attentions.0.transformer_blocks.0.attn1.processor.to_q_lora.down.weight",
"down_blocks.1.attentions.0.transformer_blocks.0.attn1.processor.to_q_lora.up.weight",
"down_blocks.1.attentions.0.transformer_blocks.0.attn1.processor.to_k_lora.down.weight",
"down_blocks.1.attentions.0.transformer_blocks.0.attn1.processor.to_k_lora.up.weight",
"down_blocks.1.attentions.0.transformer_blocks.0.attn1.processor.to_v_lora.down.weight",
"down_blocks.1.attentions.0.transformer_blocks.0.attn1.processor.to_v_lora.up.weight",
"down_blocks.1.attentions.0.transformer_blocks.0.attn1.processor.to_out_lora.down.weight",
"down_blocks.1.attentions.0.transformer_blocks.0.attn1.processor.to_out_lora.up.weight",
"down_blocks.1.attentions.0.transformer_blocks.0.attn2.processor.to_q_lora.down.weight",
"down_blocks.1.attentions.0.transformer_blocks.0.attn2.processor.to_q_lora.up.weight",
"down_blocks.1.attentions.0.transformer_blocks.0.attn2.processor.to_k_lora.down.weight",
"down_blocks.1.attentions.0.transformer_blocks.0.attn2.processor.to_k_lora.up.weight",
"down_blocks.1.attentions.0.transformer_blocks.0.attn2.processor.to_v_lora.down.weight",
"down_blocks.1.attentions.0.transformer_blocks.0.attn2.processor.to_v_lora.up.weight",
"down_blocks.1.attentions.0.transformer_blocks.0.attn2.processor.to_out_lora.down.weight",
"down_blocks.1.attentions.0.transformer_blocks.0.attn2.processor.to_out_lora.up.weight",
"down_blocks.1.attentions.1.transformer_blocks.0.attn1.processor.to_q_lora.down.weight",
"down_blocks.1.attentions.1.transformer_blocks.0.attn1.processor.to_q_lora.up.weight",
"down_blocks.1.attentions.1.transformer_blocks.0.attn1.processor.to_k_lora.down.weight",
"down_blocks.1.attentions.1.transformer_blocks.0.attn1.processor.to_k_lora.up.weight",
"down_blocks.1.attentions.1.transformer_blocks.0.attn1.processor.to_v_lora.down.weight",
"down_blocks.1.attentions.1.transformer_blocks.0.attn1.processor.to_v_lora.up.weight",
"down_blocks.1.attentions.1.transformer_blocks.0.attn1.processor.to_out_lora.down.weight",
"down_blocks.1.attentions.1.transformer_blocks.0.attn1.processor.to_out_lora.up.weight",
"down_blocks.1.attentions.1.transformer_blocks.0.attn2.processor.to_q_lora.down.weight",
"down_blocks.1.attentions.1.transformer_blocks.0.attn2.processor.to_q_lora.up.weight",
"down_blocks.1.attentions.1.transformer_blocks.0.attn2.processor.to_k_lora.down.weight",
"down_blocks.1.attentions.1.transformer_blocks.0.attn2.processor.to_k_lora.up.weight",
"down_blocks.1.attentions.1.transformer_blocks.0.attn2.processor.to_v_lora.down.weight",
"down_blocks.1.attentions.1.transformer_blocks.0.attn2.processor.to_v_lora.up.weight",
"down_blocks.1.attentions.1.transformer_blocks.0.attn2.processor.to_out_lora.down.weight",
"down_blocks.1.attentions.1.transformer_blocks.0.attn2.processor.to_out_lora.up.weight",
"down_blocks.2.attentions.0.transformer_blocks.0.attn1.processor.to_q_lora.down.weight",
"down_blocks.2.attentions.0.transformer_blocks.0.attn1.processor.to_q_lora.up.weight",
"down_blocks.2.attentions.0.transformer_blocks.0.attn1.processor.to_k_lora.down.weight",
"down_blocks.2.attentions.0.transformer_blocks.0.attn1.processor.to_k_lora.up.weight",
"down_blocks.2.attentions.0.transformer_blocks.0.attn1.processor.to_v_lora.down.weight",
"down_blocks.2.attentions.0.transformer_blocks.0.attn1.processor.to_v_lora.up.weight",
"down_blocks.2.attentions.0.transformer_blocks.0.attn1.processor.to_out_lora.down.weight",
"down_blocks.2.attentions.0.transformer_blocks.0.attn1.processor.to_out_lora.up.weight",
"down_blocks.2.attentions.0.transformer_blocks.0.attn2.processor.to_q_lora.down.weight",
"down_blocks.2.attentions.0.transformer_blocks.0.attn2.processor.to_q_lora.up.weight",
"down_blocks.2.attentions.0.transformer_blocks.0.attn2.processor.to_k_lora.down.weight",
"down_blocks.2.attentions.0.transformer_blocks.0.attn2.processor.to_k_lora.up.weight",
"down_blocks.2.attentions.0.transformer_blocks.0.attn2.processor.to_v_lora.down.weight",
"down_blocks.2.attentions.0.transformer_blocks.0.attn2.processor.to_v_lora.up.weight",
"down_blocks.2.attentions.0.transformer_blocks.0.attn2.processor.to_out_lora.down.weight",
"down_blocks.2.attentions.0.transformer_blocks.0.attn2.processor.to_out_lora.up.weight",
"down_blocks.2.attentions.1.transformer_blocks.0.attn1.processor.to_q_lora.down.weight",
"down_blocks.2.attentions.1.transformer_blocks.0.attn1.processor.to_q_lora.up.weight",
"down_blocks.2.attentions.1.transformer_blocks.0.attn1.processor.to_k_lora.down.weight",
"down_blocks.2.attentions.1.transformer_blocks.0.attn1.processor.to_k_lora.up.weight",
"down_blocks.2.attentions.1.transformer_blocks.0.attn1.processor.to_v_lora.down.weight",
"down_blocks.2.attentions.1.transformer_blocks.0.attn1.processor.to_v_lora.up.weight",
"down_blocks.2.attentions.1.transformer_blocks.0.attn1.processor.to_out_lora.down.weight",
"down_blocks.2.attentions.1.transformer_blocks.0.attn1.processor.to_out_lora.up.weight",
"down_blocks.2.attentions.1.transformer_blocks.0.attn2.processor.to_q_lora.down.weight",
"down_blocks.2.attentions.1.transformer_blocks.0.attn2.processor.to_q_lora.up.weight",
"down_blocks.2.attentions.1.transformer_blocks.0.attn2.processor.to_k_lora.down.weight",
"down_blocks.2.attentions.1.transformer_blocks.0.attn2.processor.to_k_lora.up.weight",
"down_blocks.2.attentions.1.transformer_blocks.0.attn2.processor.to_v_lora.down.weight",
"down_blocks.2.attentions.1.transformer_blocks.0.attn2.processor.to_v_lora.up.weight",
"down_blocks.2.attentions.1.transformer_blocks.0.attn2.processor.to_out_lora.down.weight",
"down_blocks.2.attentions.1.transformer_blocks.0.attn2.processor.to_out_lora.up.weight",
"up_blocks.1.attentions.0.transformer_blocks.0.attn1.processor.to_q_lora.down.weight",
"up_blocks.1.attentions.0.transformer_blocks.0.attn1.processor.to_q_lora.up.weight",
"up_blocks.1.attentions.0.transformer_blocks.0.attn1.processor.to_k_lora.down.weight",
"up_blocks.1.attentions.0.transformer_blocks.0.attn1.processor.to_k_lora.up.weight",
"up_blocks.1.attentions.0.transformer_blocks.0.attn1.processor.to_v_lora.down.weight",
"up_blocks.1.attentions.0.transformer_blocks.0.attn1.processor.to_v_lora.up.weight",
"up_blocks.1.attentions.0.transformer_blocks.0.attn1.processor.to_out_lora.down.weight",
"up_blocks.1.attentions.0.transformer_blocks.0.attn1.processor.to_out_lora.up.weight",
"up_blocks.1.attentions.0.transformer_blocks.0.attn2.processor.to_q_lora.down.weight",
"up_blocks.1.attentions.0.transformer_blocks.0.attn2.processor.to_q_lora.up.weight",
"up_blocks.1.attentions.0.transformer_blocks.0.attn2.processor.to_k_lora.down.weight",
"up_blocks.1.attentions.0.transformer_blocks.0.attn2.processor.to_k_lora.up.weight",
"up_blocks.1.attentions.0.transformer_blocks.0.attn2.processor.to_v_lora.down.weight",
"up_blocks.1.attentions.0.transformer_blocks.0.attn2.processor.to_v_lora.up.weight",
"up_blocks.1.attentions.0.transformer_blocks.0.attn2.processor.to_out_lora.down.weight",
"up_blocks.1.attentions.0.transformer_blocks.0.attn2.processor.to_out_lora.up.weight",
"up_blocks.1.attentions.1.transformer_blocks.0.attn1.processor.to_q_lora.down.weight",
"up_blocks.1.attentions.1.transformer_blocks.0.attn1.processor.to_q_lora.up.weight",
"up_blocks.1.attentions.1.transformer_blocks.0.attn1.processor.to_k_lora.down.weight",
"up_blocks.1.attentions.1.transformer_blocks.0.attn1.processor.to_k_lora.up.weight",
"up_blocks.1.attentions.1.transformer_blocks.0.attn1.processor.to_v_lora.down.weight",
"up_blocks.1.attentions.1.transformer_blocks.0.attn1.processor.to_v_lora.up.weight",
"up_blocks.1.attentions.1.transformer_blocks.0.attn1.processor.to_out_lora.down.weight",
"up_blocks.1.attentions.1.transformer_blocks.0.attn1.processor.to_out_lora.up.weight",
"up_blocks.1.attentions.1.transformer_blocks.0.attn2.processor.to_q_lora.down.weight",
"up_blocks.1.attentions.1.transformer_blocks.0.attn2.processor.to_q_lora.up.weight",
"up_blocks.1.attentions.1.transformer_blocks.0.attn2.processor.to_k_lora.down.weight",
"up_blocks.1.attentions.1.transformer_blocks.0.attn2.processor.to_k_lora.up.weight",
"up_blocks.1.attentions.1.transformer_blocks.0.attn2.processor.to_v_lora.down.weight",
"up_blocks.1.attentions.1.transformer_blocks.0.attn2.processor.to_v_lora.up.weight",
"up_blocks.1.attentions.1.transformer_blocks.0.attn2.processor.to_out_lora.down.weight",
"up_blocks.1.attentions.1.transformer_blocks.0.attn2.processor.to_out_lora.up.weight",
"up_blocks.1.attentions.2.transformer_blocks.0.attn1.processor.to_q_lora.down.weight",
"up_blocks.1.attentions.2.transformer_blocks.0.attn1.processor.to_q_lora.up.weight",
"up_blocks.1.attentions.2.transformer_blocks.0.attn1.processor.to_k_lora.down.weight",
"up_blocks.1.attentions.2.transformer_blocks.0.attn1.processor.to_k_lora.up.weight",
"up_blocks.1.attentions.2.transformer_blocks.0.attn1.processor.to_v_lora.down.weight",
"up_blocks.1.attentions.2.transformer_blocks.0.attn1.processor.to_v_lora.up.weight",
"up_blocks.1.attentions.2.transformer_blocks.0.attn1.processor.to_out_lora.down.weight",
"up_blocks.1.attentions.2.transformer_blocks.0.attn1.processor.to_out_lora.up.weight",
"up_blocks.1.attentions.2.transformer_blocks.0.attn2.processor.to_q_lora.down.weight",
"up_blocks.1.attentions.2.transformer_blocks.0.attn2.processor.to_q_lora.up.weight",
"up_blocks.1.attentions.2.transformer_blocks.0.attn2.processor.to_k_lora.down.weight",
"up_blocks.1.attentions.2.transformer_blocks.0.attn2.processor.to_k_lora.up.weight",
"up_blocks.1.attentions.2.transformer_blocks.0.attn2.processor.to_v_lora.down.weight",
"up_blocks.1.attentions.2.transformer_blocks.0.attn2.processor.to_v_lora.up.weight",
"up_blocks.1.attentions.2.transformer_blocks.0.attn2.processor.to_out_lora.down.weight",
"up_blocks.1.attentions.2.transformer_blocks.0.attn2.processor.to_out_lora.up.weight",
"up_blocks.2.attentions.0.transformer_blocks.0.attn1.processor.to_q_lora.down.weight",
"up_blocks.2.attentions.0.transformer_blocks.0.attn1.processor.to_q_lora.up.weight",
"up_blocks.2.attentions.0.transformer_blocks.0.attn1.processor.to_k_lora.down.weight",
"up_blocks.2.attentions.0.transformer_blocks.0.attn1.processor.to_k_lora.up.weight",
"up_blocks.2.attentions.0.transformer_blocks.0.attn1.processor.to_v_lora.down.weight",
"up_blocks.2.attentions.0.transformer_blocks.0.attn1.processor.to_v_lora.up.weight",
"up_blocks.2.attentions.0.transformer_blocks.0.attn1.processor.to_out_lora.down.weight",
"up_blocks.2.attentions.0.transformer_blocks.0.attn1.processor.to_out_lora.up.weight",
"up_blocks.2.attentions.0.transformer_blocks.0.attn2.processor.to_q_lora.down.weight",
"up_blocks.2.attentions.0.transformer_blocks.0.attn2.processor.to_q_lora.up.weight",
"up_blocks.2.attentions.0.transformer_blocks.0.attn2.processor.to_k_lora.down.weight",
"up_blocks.2.attentions.0.transformer_blocks.0.attn2.processor.to_k_lora.up.weight",
"up_blocks.2.attentions.0.transformer_blocks.0.attn2.processor.to_v_lora.down.weight",
"up_blocks.2.attentions.0.transformer_blocks.0.attn2.processor.to_v_lora.up.weight",
"up_blocks.2.attentions.0.transformer_blocks.0.attn2.processor.to_out_lora.down.weight",
"up_blocks.2.attentions.0.transformer_blocks.0.attn2.processor.to_out_lora.up.weight",
"up_blocks.2.attentions.1.transformer_blocks.0.attn1.processor.to_q_lora.down.weight",
"up_blocks.2.attentions.1.transformer_blocks.0.attn1.processor.to_q_lora.up.weight",
"up_blocks.2.attentions.1.transformer_blocks.0.attn1.processor.to_k_lora.down.weight",
"up_blocks.2.attentions.1.transformer_blocks.0.attn1.processor.to_k_lora.up.weight",
"up_blocks.2.attentions.1.transformer_blocks.0.attn1.processor.to_v_lora.down.weight",
"up_blocks.2.attentions.1.transformer_blocks.0.attn1.processor.to_v_lora.up.weight",
"up_blocks.2.attentions.1.transformer_blocks.0.attn1.processor.to_out_lora.down.weight",
"up_blocks.2.attentions.1.transformer_blocks.0.attn1.processor.to_out_lora.up.weight",
"up_blocks.2.attentions.1.transformer_blocks.0.attn2.processor.to_q_lora.down.weight",
"up_blocks.2.attentions.1.transformer_blocks.0.attn2.processor.to_q_lora.up.weight",
"up_blocks.2.attentions.1.transformer_blocks.0.attn2.processor.to_k_lora.down.weight",
"up_blocks.2.attentions.1.transformer_blocks.0.attn2.processor.to_k_lora.up.weight",
"up_blocks.2.attentions.1.transformer_blocks.0.attn2.processor.to_v_lora.down.weight",
"up_blocks.2.attentions.1.transformer_blocks.0.attn2.processor.to_v_lora.up.weight",
"up_blocks.2.attentions.1.transformer_blocks.0.attn2.processor.to_out_lora.down.weight",
"up_blocks.2.attentions.1.transformer_blocks.0.attn2.processor.to_out_lora.up.weight",
"up_blocks.2.attentions.2.transformer_blocks.0.attn1.processor.to_q_lora.down.weight",
"up_blocks.2.attentions.2.transformer_blocks.0.attn1.processor.to_q_lora.up.weight",
"up_blocks.2.attentions.2.transformer_blocks.0.attn1.processor.to_k_lora.down.weight",
"up_blocks.2.attentions.2.transformer_blocks.0.attn1.processor.to_k_lora.up.weight",
"up_blocks.2.attentions.2.transformer_blocks.0.attn1.processor.to_v_lora.down.weight",
"up_blocks.2.attentions.2.transformer_blocks.0.attn1.processor.to_v_lora.up.weight",
"up_blocks.2.attentions.2.transformer_blocks.0.attn1.processor.to_out_lora.down.weight",
"up_blocks.2.attentions.2.transformer_blocks.0.attn1.processor.to_out_lora.up.weight",
"up_blocks.2.attentions.2.transformer_blocks.0.attn2.processor.to_q_lora.down.weight",
"up_blocks.2.attentions.2.transformer_blocks.0.attn2.processor.to_q_lora.up.weight",
"up_blocks.2.attentions.2.transformer_blocks.0.attn2.processor.to_k_lora.down.weight",
"up_blocks.2.attentions.2.transformer_blocks.0.attn2.processor.to_k_lora.up.weight",
"up_blocks.2.attentions.2.transformer_blocks.0.attn2.processor.to_v_lora.down.weight",
"up_blocks.2.attentions.2.transformer_blocks.0.attn2.processor.to_v_lora.up.weight",
"up_blocks.2.attentions.2.transformer_blocks.0.attn2.processor.to_out_lora.down.weight",
"up_blocks.2.attentions.2.transformer_blocks.0.attn2.processor.to_out_lora.up.weight",
"up_blocks.3.attentions.0.transformer_blocks.0.attn1.processor.to_q_lora.down.weight",
"up_blocks.3.attentions.0.transformer_blocks.0.attn1.processor.to_q_lora.up.weight",
"up_blocks.3.attentions.0.transformer_blocks.0.attn1.processor.to_k_lora.down.weight",
"up_blocks.3.attentions.0.transformer_blocks.0.attn1.processor.to_k_lora.up.weight",
"up_blocks.3.attentions.0.transformer_blocks.0.attn1.processor.to_v_lora.down.weight",
"up_blocks.3.attentions.0.transformer_blocks.0.attn1.processor.to_v_lora.up.weight",
"up_blocks.3.attentions.0.transformer_blocks.0.attn1.processor.to_out_lora.down.weight",
"up_blocks.3.attentions.0.transformer_blocks.0.attn1.processor.to_out_lora.up.weight",
"up_blocks.3.attentions.0.transformer_blocks.0.attn2.processor.to_q_lora.down.weight",
"up_blocks.3.attentions.0.transformer_blocks.0.attn2.processor.to_q_lora.up.weight",
"up_blocks.3.attentions.0.transformer_blocks.0.attn2.processor.to_k_lora.down.weight",
"up_blocks.3.attentions.0.transformer_blocks.0.attn2.processor.to_k_lora.up.weight",
"up_blocks.3.attentions.0.transformer_blocks.0.attn2.processor.to_v_lora.down.weight",
"up_blocks.3.attentions.0.transformer_blocks.0.attn2.processor.to_v_lora.up.weight",
"up_blocks.3.attentions.0.transformer_blocks.0.attn2.processor.to_out_lora.down.weight",
"up_blocks.3.attentions.0.transformer_blocks.0.attn2.processor.to_out_lora.up.weight",
"up_blocks.3.attentions.1.transformer_blocks.0.attn1.processor.to_q_lora.down.weight",
"up_blocks.3.attentions.1.transformer_blocks.0.attn1.processor.to_q_lora.up.weight",
"up_blocks.3.attentions.1.transformer_blocks.0.attn1.processor.to_k_lora.down.weight",
"up_blocks.3.attentions.1.transformer_blocks.0.attn1.processor.to_k_lora.up.weight",
"up_blocks.3.attentions.1.transformer_blocks.0.attn1.processor.to_v_lora.down.weight",
"up_blocks.3.attentions.1.transformer_blocks.0.attn1.processor.to_v_lora.up.weight",
"up_blocks.3.attentions.1.transformer_blocks.0.attn1.processor.to_out_lora.down.weight",
"up_blocks.3.attentions.1.transformer_blocks.0.attn1.processor.to_out_lora.up.weight",
"up_blocks.3.attentions.1.transformer_blocks.0.attn2.processor.to_q_lora.down.weight",
"up_blocks.3.attentions.1.transformer_blocks.0.attn2.processor.to_q_lora.up.weight",
"up_blocks.3.attentions.1.transformer_blocks.0.attn2.processor.to_k_lora.down.weight",
"up_blocks.3.attentions.1.transformer_blocks.0.attn2.processor.to_k_lora.up.weight",
"up_blocks.3.attentions.1.transformer_blocks.0.attn2.processor.to_v_lora.down.weight",
"up_blocks.3.attentions.1.transformer_blocks.0.attn2.processor.to_v_lora.up.weight",
"up_blocks.3.attentions.1.transformer_blocks.0.attn2.processor.to_out_lora.down.weight",
"up_blocks.3.attentions.1.transformer_blocks.0.attn2.processor.to_out_lora.up.weight",
"up_blocks.3.attentions.2.transformer_blocks.0.attn1.processor.to_q_lora.down.weight",
"up_blocks.3.attentions.2.transformer_blocks.0.attn1.processor.to_q_lora.up.weight",
"up_blocks.3.attentions.2.transformer_blocks.0.attn1.processor.to_k_lora.down.weight",
"up_blocks.3.attentions.2.transformer_blocks.0.attn1.processor.to_k_lora.up.weight",
"up_blocks.3.attentions.2.transformer_blocks.0.attn1.processor.to_v_lora.down.weight",
"up_blocks.3.attentions.2.transformer_blocks.0.attn1.processor.to_v_lora.up.weight",
"up_blocks.3.attentions.2.transformer_blocks.0.attn1.processor.to_out_lora.down.weight",
"up_blocks.3.attentions.2.transformer_blocks.0.attn1.processor.to_out_lora.up.weight",
"up_blocks.3.attentions.2.transformer_blocks.0.attn2.processor.to_q_lora.down.weight",
"up_blocks.3.attentions.2.transformer_blocks.0.attn2.processor.to_q_lora.up.weight",
"up_blocks.3.attentions.2.transformer_blocks.0.attn2.processor.to_k_lora.down.weight",
"up_blocks.3.attentions.2.transformer_blocks.0.attn2.processor.to_k_lora.up.weight",
"up_blocks.3.attentions.2.transformer_blocks.0.attn2.processor.to_v_lora.down.weight",
"up_blocks.3.attentions.2.transformer_blocks.0.attn2.processor.to_v_lora.up.weight",
"up_blocks.3.attentions.2.transformer_blocks.0.attn2.processor.to_out_lora.down.weight",
"up_blocks.3.attentions.2.transformer_blocks.0.attn2.processor.to_out_lora.up.weight",
"mid_block.attentions.0.transformer_blocks.0.attn1.processor.to_q_lora.down.weight",
"mid_block.attentions.0.transformer_blocks.0.attn1.processor.to_q_lora.up.weight",
"mid_block.attentions.0.transformer_blocks.0.attn1.processor.to_k_lora.down.weight",
"mid_block.attentions.0.transformer_blocks.0.attn1.processor.to_k_lora.up.weight",
"mid_block.attentions.0.transformer_blocks.0.attn1.processor.to_v_lora.down.weight",
"mid_block.attentions.0.transformer_blocks.0.attn1.processor.to_v_lora.up.weight",
"mid_block.attentions.0.transformer_blocks.0.attn1.processor.to_out_lora.down.weight",
"mid_block.attentions.0.transformer_blocks.0.attn1.processor.to_out_lora.up.weight",
"mid_block.attentions.0.transformer_blocks.0.attn2.processor.to_q_lora.down.weight",
"mid_block.attentions.0.transformer_blocks.0.attn2.processor.to_q_lora.up.weight",
"mid_block.attentions.0.transformer_blocks.0.attn2.processor.to_k_lora.down.weight",
"mid_block.attentions.0.transformer_blocks.0.attn2.processor.to_k_lora.up.weight",
"mid_block.attentions.0.transformer_blocks.0.attn2.processor.to_v_lora.down.weight",
"mid_block.attentions.0.transformer_blocks.0.attn2.processor.to_v_lora.up.weight",
"mid_block.attentions.0.transformer_blocks.0.attn2.processor.to_out_lora.down.weight",
"mid_block.attentions.0.transformer_blocks.0.attn2.processor.to_out_lora.up.weight"
]

View File

@ -0,0 +1,3 @@
[
"unet.(0|1|2|3|4|5|6|7|8|9|10|11|12|13|14|15|16|17|18|19|20|21|22|23|24|25|26|27|28|29|30|31|32|33|34|35|36|37|38|39|40|41|42|43|44|45|46|47|48|49|50|51|52|53|54|55|56|57|58|59|60|61|62|63|64|65|66|67|68|69|70|71|72|73|74|75|76|77|78|79|80|81|82|83|84|85|86|87|88|89|90|91|92|93|94|95|96|97|98|99|100|101|102|103|104|105|106|107|108|109|110|111|112|113|114|115|116|117|118|119|120|121|122|123|124|125|126|127|128|129|130|131|132|133|134|135|136|137|138|139|140|141|142|143|144|145|146|147|148|149|150|151|152|153|154|155|156|157|158|159|160|161|162|163|164|165|166|167|168|169|170|171|172|173|174|175|176|177|178|179|180|181|182|183|184|185|186|187|188|189|190|191|192|193|194|195|196|197|198|199|200|201|202|203|204|205|206|207|208|209|210|211|212|213|214|215|216|217|218|219|220|221|222|223|224|225|226|227|228|229|230|231|232|233|234|235|236|237|238|239|240|241|242|243|244|245|246|247|248|249|250|251|252|253|254|255|256|257|258|259|260|261|262|263|264|265|266|267|268|269|270|271|272|273|274|275|276|277|278|279|280|281|282|283|284|285|286|287|288|289|290|291|292|293|294|295|296|297|298|299|300|301|302|303|304|305|306|307|308|309|310|311|312|313|314|315|316|317|318|319)"
]

View File

@ -0,0 +1,322 @@
[
"unet.000",
"unet.001",
"unet.002",
"unet.003",
"unet.004",
"unet.005",
"unet.006",
"unet.007",
"unet.008",
"unet.009",
"unet.010",
"unet.011",
"unet.012",
"unet.013",
"unet.014",
"unet.015",
"unet.016",
"unet.017",
"unet.018",
"unet.019",
"unet.020",
"unet.021",
"unet.022",
"unet.023",
"unet.024",
"unet.025",
"unet.026",
"unet.027",
"unet.028",
"unet.029",
"unet.030",
"unet.031",
"unet.032",
"unet.033",
"unet.034",
"unet.035",
"unet.036",
"unet.037",
"unet.038",
"unet.039",
"unet.040",
"unet.041",
"unet.042",
"unet.043",
"unet.044",
"unet.045",
"unet.046",
"unet.047",
"unet.048",
"unet.049",
"unet.050",
"unet.051",
"unet.052",
"unet.053",
"unet.054",
"unet.055",
"unet.056",
"unet.057",
"unet.058",
"unet.059",
"unet.060",
"unet.061",
"unet.062",
"unet.063",
"unet.064",
"unet.065",
"unet.066",
"unet.067",
"unet.068",
"unet.069",
"unet.070",
"unet.071",
"unet.072",
"unet.073",
"unet.074",
"unet.075",
"unet.076",
"unet.077",
"unet.078",
"unet.079",
"unet.080",
"unet.081",
"unet.082",
"unet.083",
"unet.084",
"unet.085",
"unet.086",
"unet.087",
"unet.088",
"unet.089",
"unet.090",
"unet.091",
"unet.092",
"unet.093",
"unet.094",
"unet.095",
"unet.096",
"unet.097",
"unet.098",
"unet.099",
"unet.100",
"unet.101",
"unet.102",
"unet.103",
"unet.104",
"unet.105",
"unet.106",
"unet.107",
"unet.108",
"unet.109",
"unet.110",
"unet.111",
"unet.112",
"unet.113",
"unet.114",
"unet.115",
"unet.116",
"unet.117",
"unet.118",
"unet.119",
"unet.120",
"unet.121",
"unet.122",
"unet.123",
"unet.124",
"unet.125",
"unet.126",
"unet.127",
"unet.128",
"unet.129",
"unet.130",
"unet.131",
"unet.132",
"unet.133",
"unet.134",
"unet.135",
"unet.136",
"unet.137",
"unet.138",
"unet.139",
"unet.140",
"unet.141",
"unet.142",
"unet.143",
"unet.144",
"unet.145",
"unet.146",
"unet.147",
"unet.148",
"unet.149",
"unet.150",
"unet.151",
"unet.152",
"unet.153",
"unet.154",
"unet.155",
"unet.156",
"unet.157",
"unet.158",
"unet.159",
"unet.160",
"unet.161",
"unet.162",
"unet.163",
"unet.164",
"unet.165",
"unet.166",
"unet.167",
"unet.168",
"unet.169",
"unet.170",
"unet.171",
"unet.172",
"unet.173",
"unet.174",
"unet.175",
"unet.176",
"unet.177",
"unet.178",
"unet.179",
"unet.180",
"unet.181",
"unet.182",
"unet.183",
"unet.184",
"unet.185",
"unet.186",
"unet.187",
"unet.188",
"unet.189",
"unet.190",
"unet.191",
"unet.192",
"unet.193",
"unet.194",
"unet.195",
"unet.196",
"unet.197",
"unet.198",
"unet.199",
"unet.200",
"unet.201",
"unet.202",
"unet.203",
"unet.204",
"unet.205",
"unet.206",
"unet.207",
"unet.208",
"unet.209",
"unet.210",
"unet.211",
"unet.212",
"unet.213",
"unet.214",
"unet.215",
"unet.216",
"unet.217",
"unet.218",
"unet.219",
"unet.220",
"unet.221",
"unet.222",
"unet.223",
"unet.224",
"unet.225",
"unet.226",
"unet.227",
"unet.228",
"unet.229",
"unet.230",
"unet.231",
"unet.232",
"unet.233",
"unet.234",
"unet.235",
"unet.236",
"unet.237",
"unet.238",
"unet.239",
"unet.240",
"unet.241",
"unet.242",
"unet.243",
"unet.244",
"unet.245",
"unet.246",
"unet.247",
"unet.248",
"unet.249",
"unet.250",
"unet.251",
"unet.252",
"unet.253",
"unet.254",
"unet.255",
"unet.256",
"unet.257",
"unet.258",
"unet.259",
"unet.260",
"unet.261",
"unet.262",
"unet.263",
"unet.264",
"unet.265",
"unet.266",
"unet.267",
"unet.268",
"unet.269",
"unet.270",
"unet.271",
"unet.272",
"unet.273",
"unet.274",
"unet.275",
"unet.276",
"unet.277",
"unet.278",
"unet.279",
"unet.280",
"unet.281",
"unet.282",
"unet.283",
"unet.284",
"unet.285",
"unet.286",
"unet.287",
"unet.288",
"unet.289",
"unet.290",
"unet.291",
"unet.292",
"unet.293",
"unet.294",
"unet.295",
"unet.296",
"unet.297",
"unet.298",
"unet.299",
"unet.300",
"unet.301",
"unet.302",
"unet.303",
"unet.304",
"unet.305",
"unet.306",
"unet.307",
"unet.308",
"unet.309",
"unet.310",
"unet.311",
"unet.312",
"unet.313",
"unet.314",
"unet.315",
"unet.316",
"unet.317",
"unet.318",
"unet.319"
]

View File

@ -0,0 +1,7 @@
[
"cond_stage_model.transformer.text_model.(embeddings|final_layer_norm)",
"cond_stage_model.transformer.text_model.embeddings.(position_embedding|token_embedding)",
"cond_stage_model.transformer.text_model.encoder.layers.(0|1|2|3|4|5|6|7|8|9|10|11).(layer_norm1|layer_norm2)",
"cond_stage_model.transformer.text_model.encoder.layers.(0|1|2|3|4|5|6|7|8|9|10|11).mlp.(fc1|fc2)",
"cond_stage_model.transformer.text_model.encoder.layers.(0|1|2|3|4|5|6|7|8|9|10|11).self_attn.(k_proj|out_proj|q_proj|v_proj)"
]

View File

@ -0,0 +1,102 @@
[
"transformer.text_model.embeddings.token_embedding",
"transformer.text_model.embeddings.position_embedding",
"transformer.text_model.embeddings",
"transformer.text_model.encoder.layers.0.layer_norm1",
"transformer.text_model.encoder.layers.0.self_attn.q_proj",
"transformer.text_model.encoder.layers.0.self_attn.k_proj",
"transformer.text_model.encoder.layers.0.self_attn.v_proj",
"transformer.text_model.encoder.layers.0.self_attn.out_proj",
"transformer.text_model.encoder.layers.0.layer_norm2",
"transformer.text_model.encoder.layers.0.mlp.fc1",
"transformer.text_model.encoder.layers.0.mlp.fc2",
"transformer.text_model.encoder.layers.1.layer_norm1",
"transformer.text_model.encoder.layers.1.self_attn.q_proj",
"transformer.text_model.encoder.layers.1.self_attn.k_proj",
"transformer.text_model.encoder.layers.1.self_attn.v_proj",
"transformer.text_model.encoder.layers.1.self_attn.out_proj",
"transformer.text_model.encoder.layers.1.layer_norm2",
"transformer.text_model.encoder.layers.1.mlp.fc1",
"transformer.text_model.encoder.layers.1.mlp.fc2",
"transformer.text_model.encoder.layers.2.layer_norm1",
"transformer.text_model.encoder.layers.2.self_attn.q_proj",
"transformer.text_model.encoder.layers.2.self_attn.k_proj",
"transformer.text_model.encoder.layers.2.self_attn.v_proj",
"transformer.text_model.encoder.layers.2.self_attn.out_proj",
"transformer.text_model.encoder.layers.2.layer_norm2",
"transformer.text_model.encoder.layers.2.mlp.fc1",
"transformer.text_model.encoder.layers.2.mlp.fc2",
"transformer.text_model.encoder.layers.3.layer_norm1",
"transformer.text_model.encoder.layers.3.self_attn.q_proj",
"transformer.text_model.encoder.layers.3.self_attn.k_proj",
"transformer.text_model.encoder.layers.3.self_attn.v_proj",
"transformer.text_model.encoder.layers.3.self_attn.out_proj",
"transformer.text_model.encoder.layers.3.layer_norm2",
"transformer.text_model.encoder.layers.3.mlp.fc1",
"transformer.text_model.encoder.layers.3.mlp.fc2",
"transformer.text_model.encoder.layers.4.layer_norm1",
"transformer.text_model.encoder.layers.4.self_attn.q_proj",
"transformer.text_model.encoder.layers.4.self_attn.k_proj",
"transformer.text_model.encoder.layers.4.self_attn.v_proj",
"transformer.text_model.encoder.layers.4.self_attn.out_proj",
"transformer.text_model.encoder.layers.4.layer_norm2",
"transformer.text_model.encoder.layers.4.mlp.fc1",
"transformer.text_model.encoder.layers.4.mlp.fc2",
"transformer.text_model.encoder.layers.5.layer_norm1",
"transformer.text_model.encoder.layers.5.self_attn.q_proj",
"transformer.text_model.encoder.layers.5.self_attn.k_proj",
"transformer.text_model.encoder.layers.5.self_attn.v_proj",
"transformer.text_model.encoder.layers.5.self_attn.out_proj",
"transformer.text_model.encoder.layers.5.layer_norm2",
"transformer.text_model.encoder.layers.5.mlp.fc1",
"transformer.text_model.encoder.layers.5.mlp.fc2",
"transformer.text_model.encoder.layers.6.layer_norm1",
"transformer.text_model.encoder.layers.6.self_attn.q_proj",
"transformer.text_model.encoder.layers.6.self_attn.k_proj",
"transformer.text_model.encoder.layers.6.self_attn.v_proj",
"transformer.text_model.encoder.layers.6.self_attn.out_proj",
"transformer.text_model.encoder.layers.6.layer_norm2",
"transformer.text_model.encoder.layers.6.mlp.fc1",
"transformer.text_model.encoder.layers.6.mlp.fc2",
"transformer.text_model.encoder.layers.7.layer_norm1",
"transformer.text_model.encoder.layers.7.self_attn.q_proj",
"transformer.text_model.encoder.layers.7.self_attn.k_proj",
"transformer.text_model.encoder.layers.7.self_attn.v_proj",
"transformer.text_model.encoder.layers.7.self_attn.out_proj",
"transformer.text_model.encoder.layers.7.layer_norm2",
"transformer.text_model.encoder.layers.7.mlp.fc1",
"transformer.text_model.encoder.layers.7.mlp.fc2",
"transformer.text_model.encoder.layers.8.layer_norm1",
"transformer.text_model.encoder.layers.8.self_attn.q_proj",
"transformer.text_model.encoder.layers.8.self_attn.k_proj",
"transformer.text_model.encoder.layers.8.self_attn.v_proj",
"transformer.text_model.encoder.layers.8.self_attn.out_proj",
"transformer.text_model.encoder.layers.8.layer_norm2",
"transformer.text_model.encoder.layers.8.mlp.fc1",
"transformer.text_model.encoder.layers.8.mlp.fc2",
"transformer.text_model.encoder.layers.9.layer_norm1",
"transformer.text_model.encoder.layers.9.self_attn.q_proj",
"transformer.text_model.encoder.layers.9.self_attn.k_proj",
"transformer.text_model.encoder.layers.9.self_attn.v_proj",
"transformer.text_model.encoder.layers.9.self_attn.out_proj",
"transformer.text_model.encoder.layers.9.layer_norm2",
"transformer.text_model.encoder.layers.9.mlp.fc1",
"transformer.text_model.encoder.layers.9.mlp.fc2",
"transformer.text_model.encoder.layers.10.layer_norm1",
"transformer.text_model.encoder.layers.10.self_attn.q_proj",
"transformer.text_model.encoder.layers.10.self_attn.k_proj",
"transformer.text_model.encoder.layers.10.self_attn.v_proj",
"transformer.text_model.encoder.layers.10.self_attn.out_proj",
"transformer.text_model.encoder.layers.10.layer_norm2",
"transformer.text_model.encoder.layers.10.mlp.fc1",
"transformer.text_model.encoder.layers.10.mlp.fc2",
"transformer.text_model.encoder.layers.11.layer_norm1",
"transformer.text_model.encoder.layers.11.self_attn.q_proj",
"transformer.text_model.encoder.layers.11.self_attn.k_proj",
"transformer.text_model.encoder.layers.11.self_attn.v_proj",
"transformer.text_model.encoder.layers.11.self_attn.out_proj",
"transformer.text_model.encoder.layers.11.layer_norm2",
"transformer.text_model.encoder.layers.11.mlp.fc1",
"transformer.text_model.encoder.layers.11.mlp.fc2",
"transformer.text_model.final_layer_norm"
]

View File

@ -0,0 +1,102 @@
[
"cond_stage_model.transformer.text_model.embeddings.position_embedding",
"cond_stage_model.transformer.text_model.embeddings",
"cond_stage_model.transformer.text_model.embeddings.token_embedding",
"cond_stage_model.transformer.text_model.encoder.layers.0.layer_norm1",
"cond_stage_model.transformer.text_model.encoder.layers.0.layer_norm2",
"cond_stage_model.transformer.text_model.encoder.layers.0.mlp.fc1",
"cond_stage_model.transformer.text_model.encoder.layers.0.mlp.fc2",
"cond_stage_model.transformer.text_model.encoder.layers.0.self_attn.k_proj",
"cond_stage_model.transformer.text_model.encoder.layers.0.self_attn.out_proj",
"cond_stage_model.transformer.text_model.encoder.layers.0.self_attn.q_proj",
"cond_stage_model.transformer.text_model.encoder.layers.0.self_attn.v_proj",
"cond_stage_model.transformer.text_model.encoder.layers.1.layer_norm1",
"cond_stage_model.transformer.text_model.encoder.layers.1.layer_norm2",
"cond_stage_model.transformer.text_model.encoder.layers.1.mlp.fc1",
"cond_stage_model.transformer.text_model.encoder.layers.1.mlp.fc2",
"cond_stage_model.transformer.text_model.encoder.layers.1.self_attn.k_proj",
"cond_stage_model.transformer.text_model.encoder.layers.1.self_attn.out_proj",
"cond_stage_model.transformer.text_model.encoder.layers.1.self_attn.q_proj",
"cond_stage_model.transformer.text_model.encoder.layers.1.self_attn.v_proj",
"cond_stage_model.transformer.text_model.encoder.layers.10.layer_norm1",
"cond_stage_model.transformer.text_model.encoder.layers.10.layer_norm2",
"cond_stage_model.transformer.text_model.encoder.layers.10.mlp.fc1",
"cond_stage_model.transformer.text_model.encoder.layers.10.mlp.fc2",
"cond_stage_model.transformer.text_model.encoder.layers.10.self_attn.k_proj",
"cond_stage_model.transformer.text_model.encoder.layers.10.self_attn.out_proj",
"cond_stage_model.transformer.text_model.encoder.layers.10.self_attn.q_proj",
"cond_stage_model.transformer.text_model.encoder.layers.10.self_attn.v_proj",
"cond_stage_model.transformer.text_model.encoder.layers.11.layer_norm1",
"cond_stage_model.transformer.text_model.encoder.layers.11.layer_norm2",
"cond_stage_model.transformer.text_model.encoder.layers.11.mlp.fc1",
"cond_stage_model.transformer.text_model.encoder.layers.11.mlp.fc2",
"cond_stage_model.transformer.text_model.encoder.layers.11.self_attn.k_proj",
"cond_stage_model.transformer.text_model.encoder.layers.11.self_attn.out_proj",
"cond_stage_model.transformer.text_model.encoder.layers.11.self_attn.q_proj",
"cond_stage_model.transformer.text_model.encoder.layers.11.self_attn.v_proj",
"cond_stage_model.transformer.text_model.encoder.layers.2.layer_norm1",
"cond_stage_model.transformer.text_model.encoder.layers.2.layer_norm2",
"cond_stage_model.transformer.text_model.encoder.layers.2.mlp.fc1",
"cond_stage_model.transformer.text_model.encoder.layers.2.mlp.fc2",
"cond_stage_model.transformer.text_model.encoder.layers.2.self_attn.k_proj",
"cond_stage_model.transformer.text_model.encoder.layers.2.self_attn.out_proj",
"cond_stage_model.transformer.text_model.encoder.layers.2.self_attn.q_proj",
"cond_stage_model.transformer.text_model.encoder.layers.2.self_attn.v_proj",
"cond_stage_model.transformer.text_model.encoder.layers.3.layer_norm1",
"cond_stage_model.transformer.text_model.encoder.layers.3.layer_norm2",
"cond_stage_model.transformer.text_model.encoder.layers.3.mlp.fc1",
"cond_stage_model.transformer.text_model.encoder.layers.3.mlp.fc2",
"cond_stage_model.transformer.text_model.encoder.layers.3.self_attn.k_proj",
"cond_stage_model.transformer.text_model.encoder.layers.3.self_attn.out_proj",
"cond_stage_model.transformer.text_model.encoder.layers.3.self_attn.q_proj",
"cond_stage_model.transformer.text_model.encoder.layers.3.self_attn.v_proj",
"cond_stage_model.transformer.text_model.encoder.layers.4.layer_norm1",
"cond_stage_model.transformer.text_model.encoder.layers.4.layer_norm2",
"cond_stage_model.transformer.text_model.encoder.layers.4.mlp.fc1",
"cond_stage_model.transformer.text_model.encoder.layers.4.mlp.fc2",
"cond_stage_model.transformer.text_model.encoder.layers.4.self_attn.k_proj",
"cond_stage_model.transformer.text_model.encoder.layers.4.self_attn.out_proj",
"cond_stage_model.transformer.text_model.encoder.layers.4.self_attn.q_proj",
"cond_stage_model.transformer.text_model.encoder.layers.4.self_attn.v_proj",
"cond_stage_model.transformer.text_model.encoder.layers.5.layer_norm1",
"cond_stage_model.transformer.text_model.encoder.layers.5.layer_norm2",
"cond_stage_model.transformer.text_model.encoder.layers.5.mlp.fc1",
"cond_stage_model.transformer.text_model.encoder.layers.5.mlp.fc2",
"cond_stage_model.transformer.text_model.encoder.layers.5.self_attn.k_proj",
"cond_stage_model.transformer.text_model.encoder.layers.5.self_attn.out_proj",
"cond_stage_model.transformer.text_model.encoder.layers.5.self_attn.q_proj",
"cond_stage_model.transformer.text_model.encoder.layers.5.self_attn.v_proj",
"cond_stage_model.transformer.text_model.encoder.layers.6.layer_norm1",
"cond_stage_model.transformer.text_model.encoder.layers.6.layer_norm2",
"cond_stage_model.transformer.text_model.encoder.layers.6.mlp.fc1",
"cond_stage_model.transformer.text_model.encoder.layers.6.mlp.fc2",
"cond_stage_model.transformer.text_model.encoder.layers.6.self_attn.k_proj",
"cond_stage_model.transformer.text_model.encoder.layers.6.self_attn.out_proj",
"cond_stage_model.transformer.text_model.encoder.layers.6.self_attn.q_proj",
"cond_stage_model.transformer.text_model.encoder.layers.6.self_attn.v_proj",
"cond_stage_model.transformer.text_model.encoder.layers.7.layer_norm1",
"cond_stage_model.transformer.text_model.encoder.layers.7.layer_norm2",
"cond_stage_model.transformer.text_model.encoder.layers.7.mlp.fc1",
"cond_stage_model.transformer.text_model.encoder.layers.7.mlp.fc2",
"cond_stage_model.transformer.text_model.encoder.layers.7.self_attn.k_proj",
"cond_stage_model.transformer.text_model.encoder.layers.7.self_attn.out_proj",
"cond_stage_model.transformer.text_model.encoder.layers.7.self_attn.q_proj",
"cond_stage_model.transformer.text_model.encoder.layers.7.self_attn.v_proj",
"cond_stage_model.transformer.text_model.encoder.layers.8.layer_norm1",
"cond_stage_model.transformer.text_model.encoder.layers.8.layer_norm2",
"cond_stage_model.transformer.text_model.encoder.layers.8.mlp.fc1",
"cond_stage_model.transformer.text_model.encoder.layers.8.mlp.fc2",
"cond_stage_model.transformer.text_model.encoder.layers.8.self_attn.k_proj",
"cond_stage_model.transformer.text_model.encoder.layers.8.self_attn.out_proj",
"cond_stage_model.transformer.text_model.encoder.layers.8.self_attn.q_proj",
"cond_stage_model.transformer.text_model.encoder.layers.8.self_attn.v_proj",
"cond_stage_model.transformer.text_model.encoder.layers.9.layer_norm1",
"cond_stage_model.transformer.text_model.encoder.layers.9.layer_norm2",
"cond_stage_model.transformer.text_model.encoder.layers.9.mlp.fc1",
"cond_stage_model.transformer.text_model.encoder.layers.9.mlp.fc2",
"cond_stage_model.transformer.text_model.encoder.layers.9.self_attn.k_proj",
"cond_stage_model.transformer.text_model.encoder.layers.9.self_attn.out_proj",
"cond_stage_model.transformer.text_model.encoder.layers.9.self_attn.q_proj",
"cond_stage_model.transformer.text_model.encoder.layers.9.self_attn.v_proj",
"cond_stage_model.transformer.text_model.final_layer_norm"
]

View File

@ -0,0 +1,7 @@
[
"text_model.(embeddings|final_layer_norm)",
"text_model.embeddings.(position_embedding|token_embedding)",
"text_model.encoder.layers.(0|1|2|3|4|5|6|7|8|9|10|11).(layer_norm1|layer_norm2)",
"text_model.encoder.layers.(0|1|2|3|4|5|6|7|8|9|10|11).mlp.(fc1|fc2)",
"text_model.encoder.layers.(0|1|2|3|4|5|6|7|8|9|10|11).self_attn.(k_proj|out_proj|q_proj|v_proj)"
]

View File

@ -0,0 +1,102 @@
[
"text_model.embeddings.token_embedding",
"text_model.embeddings.position_embedding",
"text_model.embeddings",
"text_model.encoder.layers.0.layer_norm1",
"text_model.encoder.layers.0.self_attn.q_proj",
"text_model.encoder.layers.0.self_attn.k_proj",
"text_model.encoder.layers.0.self_attn.v_proj",
"text_model.encoder.layers.0.self_attn.out_proj",
"text_model.encoder.layers.0.layer_norm2",
"text_model.encoder.layers.0.mlp.fc1",
"text_model.encoder.layers.0.mlp.fc2",
"text_model.encoder.layers.1.layer_norm1",
"text_model.encoder.layers.1.self_attn.q_proj",
"text_model.encoder.layers.1.self_attn.k_proj",
"text_model.encoder.layers.1.self_attn.v_proj",
"text_model.encoder.layers.1.self_attn.out_proj",
"text_model.encoder.layers.1.layer_norm2",
"text_model.encoder.layers.1.mlp.fc1",
"text_model.encoder.layers.1.mlp.fc2",
"text_model.encoder.layers.2.layer_norm1",
"text_model.encoder.layers.2.self_attn.q_proj",
"text_model.encoder.layers.2.self_attn.k_proj",
"text_model.encoder.layers.2.self_attn.v_proj",
"text_model.encoder.layers.2.self_attn.out_proj",
"text_model.encoder.layers.2.layer_norm2",
"text_model.encoder.layers.2.mlp.fc1",
"text_model.encoder.layers.2.mlp.fc2",
"text_model.encoder.layers.3.layer_norm1",
"text_model.encoder.layers.3.self_attn.q_proj",
"text_model.encoder.layers.3.self_attn.k_proj",
"text_model.encoder.layers.3.self_attn.v_proj",
"text_model.encoder.layers.3.self_attn.out_proj",
"text_model.encoder.layers.3.layer_norm2",
"text_model.encoder.layers.3.mlp.fc1",
"text_model.encoder.layers.3.mlp.fc2",
"text_model.encoder.layers.4.layer_norm1",
"text_model.encoder.layers.4.self_attn.q_proj",
"text_model.encoder.layers.4.self_attn.k_proj",
"text_model.encoder.layers.4.self_attn.v_proj",
"text_model.encoder.layers.4.self_attn.out_proj",
"text_model.encoder.layers.4.layer_norm2",
"text_model.encoder.layers.4.mlp.fc1",
"text_model.encoder.layers.4.mlp.fc2",
"text_model.encoder.layers.5.layer_norm1",
"text_model.encoder.layers.5.self_attn.q_proj",
"text_model.encoder.layers.5.self_attn.k_proj",
"text_model.encoder.layers.5.self_attn.v_proj",
"text_model.encoder.layers.5.self_attn.out_proj",
"text_model.encoder.layers.5.layer_norm2",
"text_model.encoder.layers.5.mlp.fc1",
"text_model.encoder.layers.5.mlp.fc2",
"text_model.encoder.layers.6.layer_norm1",
"text_model.encoder.layers.6.self_attn.q_proj",
"text_model.encoder.layers.6.self_attn.k_proj",
"text_model.encoder.layers.6.self_attn.v_proj",
"text_model.encoder.layers.6.self_attn.out_proj",
"text_model.encoder.layers.6.layer_norm2",
"text_model.encoder.layers.6.mlp.fc1",
"text_model.encoder.layers.6.mlp.fc2",
"text_model.encoder.layers.7.layer_norm1",
"text_model.encoder.layers.7.self_attn.q_proj",
"text_model.encoder.layers.7.self_attn.k_proj",
"text_model.encoder.layers.7.self_attn.v_proj",
"text_model.encoder.layers.7.self_attn.out_proj",
"text_model.encoder.layers.7.layer_norm2",
"text_model.encoder.layers.7.mlp.fc1",
"text_model.encoder.layers.7.mlp.fc2",
"text_model.encoder.layers.8.layer_norm1",
"text_model.encoder.layers.8.self_attn.q_proj",
"text_model.encoder.layers.8.self_attn.k_proj",
"text_model.encoder.layers.8.self_attn.v_proj",
"text_model.encoder.layers.8.self_attn.out_proj",
"text_model.encoder.layers.8.layer_norm2",
"text_model.encoder.layers.8.mlp.fc1",
"text_model.encoder.layers.8.mlp.fc2",
"text_model.encoder.layers.9.layer_norm1",
"text_model.encoder.layers.9.self_attn.q_proj",
"text_model.encoder.layers.9.self_attn.k_proj",
"text_model.encoder.layers.9.self_attn.v_proj",
"text_model.encoder.layers.9.self_attn.out_proj",
"text_model.encoder.layers.9.layer_norm2",
"text_model.encoder.layers.9.mlp.fc1",
"text_model.encoder.layers.9.mlp.fc2",
"text_model.encoder.layers.10.layer_norm1",
"text_model.encoder.layers.10.self_attn.q_proj",
"text_model.encoder.layers.10.self_attn.k_proj",
"text_model.encoder.layers.10.self_attn.v_proj",
"text_model.encoder.layers.10.self_attn.out_proj",
"text_model.encoder.layers.10.layer_norm2",
"text_model.encoder.layers.10.mlp.fc1",
"text_model.encoder.layers.10.mlp.fc2",
"text_model.encoder.layers.11.layer_norm1",
"text_model.encoder.layers.11.self_attn.q_proj",
"text_model.encoder.layers.11.self_attn.k_proj",
"text_model.encoder.layers.11.self_attn.v_proj",
"text_model.encoder.layers.11.self_attn.out_proj",
"text_model.encoder.layers.11.layer_norm2",
"text_model.encoder.layers.11.mlp.fc1",
"text_model.encoder.layers.11.mlp.fc2",
"text_model.final_layer_norm"
]

View File

@ -0,0 +1,102 @@
[
"text_model.embeddings.position_embedding",
"text_model.embeddings",
"text_model.embeddings.token_embedding",
"text_model.encoder.layers.0.layer_norm1",
"text_model.encoder.layers.0.layer_norm2",
"text_model.encoder.layers.0.mlp.fc1",
"text_model.encoder.layers.0.mlp.fc2",
"text_model.encoder.layers.0.self_attn.k_proj",
"text_model.encoder.layers.0.self_attn.out_proj",
"text_model.encoder.layers.0.self_attn.q_proj",
"text_model.encoder.layers.0.self_attn.v_proj",
"text_model.encoder.layers.1.layer_norm1",
"text_model.encoder.layers.1.layer_norm2",
"text_model.encoder.layers.1.mlp.fc1",
"text_model.encoder.layers.1.mlp.fc2",
"text_model.encoder.layers.1.self_attn.k_proj",
"text_model.encoder.layers.1.self_attn.out_proj",
"text_model.encoder.layers.1.self_attn.q_proj",
"text_model.encoder.layers.1.self_attn.v_proj",
"text_model.encoder.layers.10.layer_norm1",
"text_model.encoder.layers.10.layer_norm2",
"text_model.encoder.layers.10.mlp.fc1",
"text_model.encoder.layers.10.mlp.fc2",
"text_model.encoder.layers.10.self_attn.k_proj",
"text_model.encoder.layers.10.self_attn.out_proj",
"text_model.encoder.layers.10.self_attn.q_proj",
"text_model.encoder.layers.10.self_attn.v_proj",
"text_model.encoder.layers.11.layer_norm1",
"text_model.encoder.layers.11.layer_norm2",
"text_model.encoder.layers.11.mlp.fc1",
"text_model.encoder.layers.11.mlp.fc2",
"text_model.encoder.layers.11.self_attn.k_proj",
"text_model.encoder.layers.11.self_attn.out_proj",
"text_model.encoder.layers.11.self_attn.q_proj",
"text_model.encoder.layers.11.self_attn.v_proj",
"text_model.encoder.layers.2.layer_norm1",
"text_model.encoder.layers.2.layer_norm2",
"text_model.encoder.layers.2.mlp.fc1",
"text_model.encoder.layers.2.mlp.fc2",
"text_model.encoder.layers.2.self_attn.k_proj",
"text_model.encoder.layers.2.self_attn.out_proj",
"text_model.encoder.layers.2.self_attn.q_proj",
"text_model.encoder.layers.2.self_attn.v_proj",
"text_model.encoder.layers.3.layer_norm1",
"text_model.encoder.layers.3.layer_norm2",
"text_model.encoder.layers.3.mlp.fc1",
"text_model.encoder.layers.3.mlp.fc2",
"text_model.encoder.layers.3.self_attn.k_proj",
"text_model.encoder.layers.3.self_attn.out_proj",
"text_model.encoder.layers.3.self_attn.q_proj",
"text_model.encoder.layers.3.self_attn.v_proj",
"text_model.encoder.layers.4.layer_norm1",
"text_model.encoder.layers.4.layer_norm2",
"text_model.encoder.layers.4.mlp.fc1",
"text_model.encoder.layers.4.mlp.fc2",
"text_model.encoder.layers.4.self_attn.k_proj",
"text_model.encoder.layers.4.self_attn.out_proj",
"text_model.encoder.layers.4.self_attn.q_proj",
"text_model.encoder.layers.4.self_attn.v_proj",
"text_model.encoder.layers.5.layer_norm1",
"text_model.encoder.layers.5.layer_norm2",
"text_model.encoder.layers.5.mlp.fc1",
"text_model.encoder.layers.5.mlp.fc2",
"text_model.encoder.layers.5.self_attn.k_proj",
"text_model.encoder.layers.5.self_attn.out_proj",
"text_model.encoder.layers.5.self_attn.q_proj",
"text_model.encoder.layers.5.self_attn.v_proj",
"text_model.encoder.layers.6.layer_norm1",
"text_model.encoder.layers.6.layer_norm2",
"text_model.encoder.layers.6.mlp.fc1",
"text_model.encoder.layers.6.mlp.fc2",
"text_model.encoder.layers.6.self_attn.k_proj",
"text_model.encoder.layers.6.self_attn.out_proj",
"text_model.encoder.layers.6.self_attn.q_proj",
"text_model.encoder.layers.6.self_attn.v_proj",
"text_model.encoder.layers.7.layer_norm1",
"text_model.encoder.layers.7.layer_norm2",
"text_model.encoder.layers.7.mlp.fc1",
"text_model.encoder.layers.7.mlp.fc2",
"text_model.encoder.layers.7.self_attn.k_proj",
"text_model.encoder.layers.7.self_attn.out_proj",
"text_model.encoder.layers.7.self_attn.q_proj",
"text_model.encoder.layers.7.self_attn.v_proj",
"text_model.encoder.layers.8.layer_norm1",
"text_model.encoder.layers.8.layer_norm2",
"text_model.encoder.layers.8.mlp.fc1",
"text_model.encoder.layers.8.mlp.fc2",
"text_model.encoder.layers.8.self_attn.k_proj",
"text_model.encoder.layers.8.self_attn.out_proj",
"text_model.encoder.layers.8.self_attn.q_proj",
"text_model.encoder.layers.8.self_attn.v_proj",
"text_model.encoder.layers.9.layer_norm1",
"text_model.encoder.layers.9.layer_norm2",
"text_model.encoder.layers.9.mlp.fc1",
"text_model.encoder.layers.9.mlp.fc2",
"text_model.encoder.layers.9.self_attn.k_proj",
"text_model.encoder.layers.9.self_attn.out_proj",
"text_model.encoder.layers.9.self_attn.q_proj",
"text_model.encoder.layers.9.self_attn.v_proj",
"text_model.final_layer_norm"
]

View File

@ -0,0 +1,42 @@
[
"model.diffusion_model.(input_blocks|output_blocks).(1|2|4|5|7|8|10|11).0.emb_layers.1",
"model.diffusion_model.(input_blocks|output_blocks).(1|2|4|5|7|8|10|11).0.in_layers.(0|2)",
"model.diffusion_model.(input_blocks|output_blocks).(1|2|4|5|7|8|10|11).0.out_layers.(0|3)",
"model.diffusion_model.(input_blocks|output_blocks).(4|5|7|8).1.transformer_blocks.0.(attn1|attn2).to_out.0",
"model.diffusion_model.(input_blocks|output_blocks).(4|5|7|8).1.transformer_blocks.0.ff.net.2",
"model.diffusion_model.(input_blocks|output_blocks).(4|7).0.skip_connection",
"model.diffusion_model.(out|time_embed).(0|2)",
"model.diffusion_model.input_blocks.(1|2|4|5|7|8).1.(norm|proj_in|proj_out)",
"model.diffusion_model.input_blocks.(1|2|4|5|7|8).1.transformer_blocks.0.(attn1|attn2).(to_k|to_q|to_v)",
"model.diffusion_model.input_blocks.(1|2|4|5|7|8).1.transformer_blocks.0.(norm1|norm2|norm3)",
"model.diffusion_model.input_blocks.(1|2|4|5|7|8).1.transformer_blocks.0.ff.net.0.proj",
"model.diffusion_model.input_blocks.(3|6|9).0.op",
"model.diffusion_model.input_blocks.0.0",
"model.diffusion_model.input_blocks.1.1.transformer_blocks.0.(attn1|attn2).to_out.0",
"model.diffusion_model.input_blocks.1.1.transformer_blocks.0.ff.net.2",
"model.diffusion_model.input_blocks.2.1.transformer_blocks.0.(attn1|attn2).to_out.0",
"model.diffusion_model.input_blocks.2.1.transformer_blocks.0.ff.net.2",
"model.diffusion_model.middle_block.(0|2).emb_layers.1",
"model.diffusion_model.middle_block.(0|2).in_layers.(0|2)",
"model.diffusion_model.middle_block.(0|2).out_layers.(0|3)",
"model.diffusion_model.middle_block.1.(norm|proj_in|proj_out)",
"model.diffusion_model.middle_block.1.transformer_blocks.0.(attn1|attn2).(to_k|to_q|to_v)",
"model.diffusion_model.middle_block.1.transformer_blocks.0.(attn1|attn2).to_out.0",
"model.diffusion_model.middle_block.1.transformer_blocks.0.(norm1|norm2|norm3)",
"model.diffusion_model.middle_block.1.transformer_blocks.0.ff.net.0.proj",
"model.diffusion_model.middle_block.1.transformer_blocks.0.ff.net.2",
"model.diffusion_model.output_blocks.(0|1|2|3|5|6|8|9|10|11).0.skip_connection",
"model.diffusion_model.output_blocks.(0|3|6|9).0.emb_layers.1",
"model.diffusion_model.output_blocks.(0|3|6|9).0.in_layers.(0|2)",
"model.diffusion_model.output_blocks.(0|3|6|9).0.out_layers.(0|3)",
"model.diffusion_model.output_blocks.(3|4|5|6|7|8|9|10|11).1.(norm|proj_in|proj_out)",
"model.diffusion_model.output_blocks.(3|4|5|6|7|8|9|10|11).1.transformer_blocks.0.(attn1|attn2).(to_k|to_q|to_v)",
"model.diffusion_model.output_blocks.(3|4|5|6|7|8|9|10|11).1.transformer_blocks.0.(norm1|norm2|norm3)",
"model.diffusion_model.output_blocks.(3|4|5|6|7|8|9|10|11).1.transformer_blocks.0.ff.net.0.proj",
"model.diffusion_model.output_blocks.(3|6|9|10).1.transformer_blocks.0.(attn1|attn2).to_out.0",
"model.diffusion_model.output_blocks.(3|6|9|10).1.transformer_blocks.0.ff.net.2",
"model.diffusion_model.output_blocks.(5|8).2.conv",
"model.diffusion_model.output_blocks.11.1.transformer_blocks.0.(attn1|attn2).to_out.0",
"model.diffusion_model.output_blocks.11.1.transformer_blocks.0.ff.net.2",
"model.diffusion_model.output_blocks.2.1.conv"
]

View File

@ -0,0 +1,393 @@
[
"diffusion_model.time_embed.0",
"diffusion_model.time_embed.2",
"diffusion_model.input_blocks.0.0",
"diffusion_model.input_blocks.1.0.in_layers.0",
"diffusion_model.input_blocks.1.0.in_layers.2",
"diffusion_model.input_blocks.1.0.emb_layers.1",
"diffusion_model.input_blocks.1.0.out_layers.0",
"diffusion_model.input_blocks.1.0.out_layers.3",
"diffusion_model.input_blocks.1.1.norm",
"diffusion_model.input_blocks.1.1.proj_in",
"diffusion_model.input_blocks.1.1.transformer_blocks.0.norm1",
"diffusion_model.input_blocks.1.1.transformer_blocks.0.attn1.to_q",
"diffusion_model.input_blocks.1.1.transformer_blocks.0.attn1.to_k",
"diffusion_model.input_blocks.1.1.transformer_blocks.0.attn1.to_v",
"diffusion_model.input_blocks.1.1.transformer_blocks.0.attn1.to_out.0",
"diffusion_model.input_blocks.1.1.transformer_blocks.0.norm2",
"diffusion_model.input_blocks.1.1.transformer_blocks.0.attn2.to_q",
"diffusion_model.input_blocks.1.1.transformer_blocks.0.attn2.to_k",
"diffusion_model.input_blocks.1.1.transformer_blocks.0.attn2.to_v",
"diffusion_model.input_blocks.1.1.transformer_blocks.0.attn2.to_out.0",
"diffusion_model.input_blocks.1.1.transformer_blocks.0.norm3",
"diffusion_model.input_blocks.1.1.transformer_blocks.0.ff.net.0.proj",
"diffusion_model.input_blocks.1.1.transformer_blocks.0.ff.net.2",
"diffusion_model.input_blocks.1.1.proj_out",
"diffusion_model.input_blocks.2.0.in_layers.0",
"diffusion_model.input_blocks.2.0.in_layers.2",
"diffusion_model.input_blocks.2.0.emb_layers.1",
"diffusion_model.input_blocks.2.0.out_layers.0",
"diffusion_model.input_blocks.2.0.out_layers.3",
"diffusion_model.input_blocks.2.1.norm",
"diffusion_model.input_blocks.2.1.proj_in",
"diffusion_model.input_blocks.2.1.transformer_blocks.0.norm1",
"diffusion_model.input_blocks.2.1.transformer_blocks.0.attn1.to_q",
"diffusion_model.input_blocks.2.1.transformer_blocks.0.attn1.to_k",
"diffusion_model.input_blocks.2.1.transformer_blocks.0.attn1.to_v",
"diffusion_model.input_blocks.2.1.transformer_blocks.0.attn1.to_out.0",
"diffusion_model.input_blocks.2.1.transformer_blocks.0.norm2",
"diffusion_model.input_blocks.2.1.transformer_blocks.0.attn2.to_q",
"diffusion_model.input_blocks.2.1.transformer_blocks.0.attn2.to_k",
"diffusion_model.input_blocks.2.1.transformer_blocks.0.attn2.to_v",
"diffusion_model.input_blocks.2.1.transformer_blocks.0.attn2.to_out.0",
"diffusion_model.input_blocks.2.1.transformer_blocks.0.norm3",
"diffusion_model.input_blocks.2.1.transformer_blocks.0.ff.net.0.proj",
"diffusion_model.input_blocks.2.1.transformer_blocks.0.ff.net.2",
"diffusion_model.input_blocks.2.1.proj_out",
"diffusion_model.input_blocks.3.0.op",
"diffusion_model.input_blocks.4.0.in_layers.0",
"diffusion_model.input_blocks.4.0.in_layers.2",
"diffusion_model.input_blocks.4.0.emb_layers.1",
"diffusion_model.input_blocks.4.0.out_layers.0",
"diffusion_model.input_blocks.4.0.out_layers.3",
"diffusion_model.input_blocks.4.0.skip_connection",
"diffusion_model.input_blocks.4.1.norm",
"diffusion_model.input_blocks.4.1.proj_in",
"diffusion_model.input_blocks.4.1.transformer_blocks.0.norm1",
"diffusion_model.input_blocks.4.1.transformer_blocks.0.attn1.to_q",
"diffusion_model.input_blocks.4.1.transformer_blocks.0.attn1.to_k",
"diffusion_model.input_blocks.4.1.transformer_blocks.0.attn1.to_v",
"diffusion_model.input_blocks.4.1.transformer_blocks.0.attn1.to_out.0",
"diffusion_model.input_blocks.4.1.transformer_blocks.0.norm2",
"diffusion_model.input_blocks.4.1.transformer_blocks.0.attn2.to_q",
"diffusion_model.input_blocks.4.1.transformer_blocks.0.attn2.to_k",
"diffusion_model.input_blocks.4.1.transformer_blocks.0.attn2.to_v",
"diffusion_model.input_blocks.4.1.transformer_blocks.0.attn2.to_out.0",
"diffusion_model.input_blocks.4.1.transformer_blocks.0.norm3",
"diffusion_model.input_blocks.4.1.transformer_blocks.0.ff.net.0.proj",
"diffusion_model.input_blocks.4.1.transformer_blocks.0.ff.net.2",
"diffusion_model.input_blocks.4.1.proj_out",
"diffusion_model.input_blocks.5.0.in_layers.0",
"diffusion_model.input_blocks.5.0.in_layers.2",
"diffusion_model.input_blocks.5.0.emb_layers.1",
"diffusion_model.input_blocks.5.0.out_layers.0",
"diffusion_model.input_blocks.5.0.out_layers.3",
"diffusion_model.input_blocks.5.1.norm",
"diffusion_model.input_blocks.5.1.proj_in",
"diffusion_model.input_blocks.5.1.transformer_blocks.0.norm1",
"diffusion_model.input_blocks.5.1.transformer_blocks.0.attn1.to_q",
"diffusion_model.input_blocks.5.1.transformer_blocks.0.attn1.to_k",
"diffusion_model.input_blocks.5.1.transformer_blocks.0.attn1.to_v",
"diffusion_model.input_blocks.5.1.transformer_blocks.0.attn1.to_out.0",
"diffusion_model.input_blocks.5.1.transformer_blocks.0.norm2",
"diffusion_model.input_blocks.5.1.transformer_blocks.0.attn2.to_q",
"diffusion_model.input_blocks.5.1.transformer_blocks.0.attn2.to_k",
"diffusion_model.input_blocks.5.1.transformer_blocks.0.attn2.to_v",
"diffusion_model.input_blocks.5.1.transformer_blocks.0.attn2.to_out.0",
"diffusion_model.input_blocks.5.1.transformer_blocks.0.norm3",
"diffusion_model.input_blocks.5.1.transformer_blocks.0.ff.net.0.proj",
"diffusion_model.input_blocks.5.1.transformer_blocks.0.ff.net.2",
"diffusion_model.input_blocks.5.1.proj_out",
"diffusion_model.input_blocks.6.0.op",
"diffusion_model.input_blocks.7.0.in_layers.0",
"diffusion_model.input_blocks.7.0.in_layers.2",
"diffusion_model.input_blocks.7.0.emb_layers.1",
"diffusion_model.input_blocks.7.0.out_layers.0",
"diffusion_model.input_blocks.7.0.out_layers.3",
"diffusion_model.input_blocks.7.0.skip_connection",
"diffusion_model.input_blocks.7.1.norm",
"diffusion_model.input_blocks.7.1.proj_in",
"diffusion_model.input_blocks.7.1.transformer_blocks.0.norm1",
"diffusion_model.input_blocks.7.1.transformer_blocks.0.attn1.to_q",
"diffusion_model.input_blocks.7.1.transformer_blocks.0.attn1.to_k",
"diffusion_model.input_blocks.7.1.transformer_blocks.0.attn1.to_v",
"diffusion_model.input_blocks.7.1.transformer_blocks.0.attn1.to_out.0",
"diffusion_model.input_blocks.7.1.transformer_blocks.0.norm2",
"diffusion_model.input_blocks.7.1.transformer_blocks.0.attn2.to_q",
"diffusion_model.input_blocks.7.1.transformer_blocks.0.attn2.to_k",
"diffusion_model.input_blocks.7.1.transformer_blocks.0.attn2.to_v",
"diffusion_model.input_blocks.7.1.transformer_blocks.0.attn2.to_out.0",
"diffusion_model.input_blocks.7.1.transformer_blocks.0.norm3",
"diffusion_model.input_blocks.7.1.transformer_blocks.0.ff.net.0.proj",
"diffusion_model.input_blocks.7.1.transformer_blocks.0.ff.net.2",
"diffusion_model.input_blocks.7.1.proj_out",
"diffusion_model.input_blocks.8.0.in_layers.0",
"diffusion_model.input_blocks.8.0.in_layers.2",
"diffusion_model.input_blocks.8.0.emb_layers.1",
"diffusion_model.input_blocks.8.0.out_layers.0",
"diffusion_model.input_blocks.8.0.out_layers.3",
"diffusion_model.input_blocks.8.1.norm",
"diffusion_model.input_blocks.8.1.proj_in",
"diffusion_model.input_blocks.8.1.transformer_blocks.0.norm1",
"diffusion_model.input_blocks.8.1.transformer_blocks.0.attn1.to_q",
"diffusion_model.input_blocks.8.1.transformer_blocks.0.attn1.to_k",
"diffusion_model.input_blocks.8.1.transformer_blocks.0.attn1.to_v",
"diffusion_model.input_blocks.8.1.transformer_blocks.0.attn1.to_out.0",
"diffusion_model.input_blocks.8.1.transformer_blocks.0.norm2",
"diffusion_model.input_blocks.8.1.transformer_blocks.0.attn2.to_q",
"diffusion_model.input_blocks.8.1.transformer_blocks.0.attn2.to_k",
"diffusion_model.input_blocks.8.1.transformer_blocks.0.attn2.to_v",
"diffusion_model.input_blocks.8.1.transformer_blocks.0.attn2.to_out.0",
"diffusion_model.input_blocks.8.1.transformer_blocks.0.norm3",
"diffusion_model.input_blocks.8.1.transformer_blocks.0.ff.net.0.proj",
"diffusion_model.input_blocks.8.1.transformer_blocks.0.ff.net.2",
"diffusion_model.input_blocks.8.1.proj_out",
"diffusion_model.input_blocks.9.0.op",
"diffusion_model.input_blocks.10.0.in_layers.0",
"diffusion_model.input_blocks.10.0.in_layers.2",
"diffusion_model.input_blocks.10.0.emb_layers.1",
"diffusion_model.input_blocks.10.0.out_layers.0",
"diffusion_model.input_blocks.10.0.out_layers.3",
"diffusion_model.input_blocks.11.0.in_layers.0",
"diffusion_model.input_blocks.11.0.in_layers.2",
"diffusion_model.input_blocks.11.0.emb_layers.1",
"diffusion_model.input_blocks.11.0.out_layers.0",
"diffusion_model.input_blocks.11.0.out_layers.3",
"diffusion_model.middle_block.0.in_layers.0",
"diffusion_model.middle_block.0.in_layers.2",
"diffusion_model.middle_block.0.emb_layers.1",
"diffusion_model.middle_block.0.out_layers.0",
"diffusion_model.middle_block.0.out_layers.3",
"diffusion_model.middle_block.1.norm",
"diffusion_model.middle_block.1.proj_in",
"diffusion_model.middle_block.1.transformer_blocks.0.norm1",
"diffusion_model.middle_block.1.transformer_blocks.0.attn1.to_q",
"diffusion_model.middle_block.1.transformer_blocks.0.attn1.to_k",
"diffusion_model.middle_block.1.transformer_blocks.0.attn1.to_v",
"diffusion_model.middle_block.1.transformer_blocks.0.attn1.to_out.0",
"diffusion_model.middle_block.1.transformer_blocks.0.norm2",
"diffusion_model.middle_block.1.transformer_blocks.0.attn2.to_q",
"diffusion_model.middle_block.1.transformer_blocks.0.attn2.to_k",
"diffusion_model.middle_block.1.transformer_blocks.0.attn2.to_v",
"diffusion_model.middle_block.1.transformer_blocks.0.attn2.to_out.0",
"diffusion_model.middle_block.1.transformer_blocks.0.norm3",
"diffusion_model.middle_block.1.transformer_blocks.0.ff.net.0.proj",
"diffusion_model.middle_block.1.transformer_blocks.0.ff.net.2",
"diffusion_model.middle_block.1.proj_out",
"diffusion_model.middle_block.2.in_layers.0",
"diffusion_model.middle_block.2.in_layers.2",
"diffusion_model.middle_block.2.emb_layers.1",
"diffusion_model.middle_block.2.out_layers.0",
"diffusion_model.middle_block.2.out_layers.3",
"diffusion_model.output_blocks.0.0.in_layers.0",
"diffusion_model.output_blocks.0.0.in_layers.2",
"diffusion_model.output_blocks.0.0.emb_layers.1",
"diffusion_model.output_blocks.0.0.out_layers.0",
"diffusion_model.output_blocks.0.0.out_layers.3",
"diffusion_model.output_blocks.0.0.skip_connection",
"diffusion_model.output_blocks.1.0.in_layers.0",
"diffusion_model.output_blocks.1.0.in_layers.2",
"diffusion_model.output_blocks.1.0.emb_layers.1",
"diffusion_model.output_blocks.1.0.out_layers.0",
"diffusion_model.output_blocks.1.0.out_layers.3",
"diffusion_model.output_blocks.1.0.skip_connection",
"diffusion_model.output_blocks.2.0.in_layers.0",
"diffusion_model.output_blocks.2.0.in_layers.2",
"diffusion_model.output_blocks.2.0.emb_layers.1",
"diffusion_model.output_blocks.2.0.out_layers.0",
"diffusion_model.output_blocks.2.0.out_layers.3",
"diffusion_model.output_blocks.2.0.skip_connection",
"diffusion_model.output_blocks.2.1.conv",
"diffusion_model.output_blocks.3.0.in_layers.0",
"diffusion_model.output_blocks.3.0.in_layers.2",
"diffusion_model.output_blocks.3.0.emb_layers.1",
"diffusion_model.output_blocks.3.0.out_layers.0",
"diffusion_model.output_blocks.3.0.out_layers.3",
"diffusion_model.output_blocks.3.0.skip_connection",
"diffusion_model.output_blocks.3.1.norm",
"diffusion_model.output_blocks.3.1.proj_in",
"diffusion_model.output_blocks.3.1.transformer_blocks.0.norm1",
"diffusion_model.output_blocks.3.1.transformer_blocks.0.attn1.to_q",
"diffusion_model.output_blocks.3.1.transformer_blocks.0.attn1.to_k",
"diffusion_model.output_blocks.3.1.transformer_blocks.0.attn1.to_v",
"diffusion_model.output_blocks.3.1.transformer_blocks.0.attn1.to_out.0",
"diffusion_model.output_blocks.3.1.transformer_blocks.0.norm2",
"diffusion_model.output_blocks.3.1.transformer_blocks.0.attn2.to_q",
"diffusion_model.output_blocks.3.1.transformer_blocks.0.attn2.to_k",
"diffusion_model.output_blocks.3.1.transformer_blocks.0.attn2.to_v",
"diffusion_model.output_blocks.3.1.transformer_blocks.0.attn2.to_out.0",
"diffusion_model.output_blocks.3.1.transformer_blocks.0.norm3",
"diffusion_model.output_blocks.3.1.transformer_blocks.0.ff.net.0.proj",
"diffusion_model.output_blocks.3.1.transformer_blocks.0.ff.net.2",
"diffusion_model.output_blocks.3.1.proj_out",
"diffusion_model.output_blocks.4.0.in_layers.0",
"diffusion_model.output_blocks.4.0.in_layers.2",
"diffusion_model.output_blocks.4.0.emb_layers.1",
"diffusion_model.output_blocks.4.0.out_layers.0",
"diffusion_model.output_blocks.4.0.out_layers.3",
"diffusion_model.output_blocks.4.0.skip_connection",
"diffusion_model.output_blocks.4.1.norm",
"diffusion_model.output_blocks.4.1.proj_in",
"diffusion_model.output_blocks.4.1.transformer_blocks.0.norm1",
"diffusion_model.output_blocks.4.1.transformer_blocks.0.attn1.to_q",
"diffusion_model.output_blocks.4.1.transformer_blocks.0.attn1.to_k",
"diffusion_model.output_blocks.4.1.transformer_blocks.0.attn1.to_v",
"diffusion_model.output_blocks.4.1.transformer_blocks.0.attn1.to_out.0",
"diffusion_model.output_blocks.4.1.transformer_blocks.0.norm2",
"diffusion_model.output_blocks.4.1.transformer_blocks.0.attn2.to_q",
"diffusion_model.output_blocks.4.1.transformer_blocks.0.attn2.to_k",
"diffusion_model.output_blocks.4.1.transformer_blocks.0.attn2.to_v",
"diffusion_model.output_blocks.4.1.transformer_blocks.0.attn2.to_out.0",
"diffusion_model.output_blocks.4.1.transformer_blocks.0.norm3",
"diffusion_model.output_blocks.4.1.transformer_blocks.0.ff.net.0.proj",
"diffusion_model.output_blocks.4.1.transformer_blocks.0.ff.net.2",
"diffusion_model.output_blocks.4.1.proj_out",
"diffusion_model.output_blocks.5.0.in_layers.0",
"diffusion_model.output_blocks.5.0.in_layers.2",
"diffusion_model.output_blocks.5.0.emb_layers.1",
"diffusion_model.output_blocks.5.0.out_layers.0",
"diffusion_model.output_blocks.5.0.out_layers.3",
"diffusion_model.output_blocks.5.0.skip_connection",
"diffusion_model.output_blocks.5.1.norm",
"diffusion_model.output_blocks.5.1.proj_in",
"diffusion_model.output_blocks.5.1.transformer_blocks.0.norm1",
"diffusion_model.output_blocks.5.1.transformer_blocks.0.attn1.to_q",
"diffusion_model.output_blocks.5.1.transformer_blocks.0.attn1.to_k",
"diffusion_model.output_blocks.5.1.transformer_blocks.0.attn1.to_v",
"diffusion_model.output_blocks.5.1.transformer_blocks.0.attn1.to_out.0",
"diffusion_model.output_blocks.5.1.transformer_blocks.0.norm2",
"diffusion_model.output_blocks.5.1.transformer_blocks.0.attn2.to_q",
"diffusion_model.output_blocks.5.1.transformer_blocks.0.attn2.to_k",
"diffusion_model.output_blocks.5.1.transformer_blocks.0.attn2.to_v",
"diffusion_model.output_blocks.5.1.transformer_blocks.0.attn2.to_out.0",
"diffusion_model.output_blocks.5.1.transformer_blocks.0.norm3",
"diffusion_model.output_blocks.5.1.transformer_blocks.0.ff.net.0.proj",
"diffusion_model.output_blocks.5.1.transformer_blocks.0.ff.net.2",
"diffusion_model.output_blocks.5.1.proj_out",
"diffusion_model.output_blocks.5.2.conv",
"diffusion_model.output_blocks.6.0.in_layers.0",
"diffusion_model.output_blocks.6.0.in_layers.2",
"diffusion_model.output_blocks.6.0.emb_layers.1",
"diffusion_model.output_blocks.6.0.out_layers.0",
"diffusion_model.output_blocks.6.0.out_layers.3",
"diffusion_model.output_blocks.6.0.skip_connection",
"diffusion_model.output_blocks.6.1.norm",
"diffusion_model.output_blocks.6.1.proj_in",
"diffusion_model.output_blocks.6.1.transformer_blocks.0.norm1",
"diffusion_model.output_blocks.6.1.transformer_blocks.0.attn1.to_q",
"diffusion_model.output_blocks.6.1.transformer_blocks.0.attn1.to_k",
"diffusion_model.output_blocks.6.1.transformer_blocks.0.attn1.to_v",
"diffusion_model.output_blocks.6.1.transformer_blocks.0.attn1.to_out.0",
"diffusion_model.output_blocks.6.1.transformer_blocks.0.norm2",
"diffusion_model.output_blocks.6.1.transformer_blocks.0.attn2.to_q",
"diffusion_model.output_blocks.6.1.transformer_blocks.0.attn2.to_k",
"diffusion_model.output_blocks.6.1.transformer_blocks.0.attn2.to_v",
"diffusion_model.output_blocks.6.1.transformer_blocks.0.attn2.to_out.0",
"diffusion_model.output_blocks.6.1.transformer_blocks.0.norm3",
"diffusion_model.output_blocks.6.1.transformer_blocks.0.ff.net.0.proj",
"diffusion_model.output_blocks.6.1.transformer_blocks.0.ff.net.2",
"diffusion_model.output_blocks.6.1.proj_out",
"diffusion_model.output_blocks.7.0.in_layers.0",
"diffusion_model.output_blocks.7.0.in_layers.2",
"diffusion_model.output_blocks.7.0.emb_layers.1",
"diffusion_model.output_blocks.7.0.out_layers.0",
"diffusion_model.output_blocks.7.0.out_layers.3",
"diffusion_model.output_blocks.7.0.skip_connection",
"diffusion_model.output_blocks.7.1.norm",
"diffusion_model.output_blocks.7.1.proj_in",
"diffusion_model.output_blocks.7.1.transformer_blocks.0.norm1",
"diffusion_model.output_blocks.7.1.transformer_blocks.0.attn1.to_q",
"diffusion_model.output_blocks.7.1.transformer_blocks.0.attn1.to_k",
"diffusion_model.output_blocks.7.1.transformer_blocks.0.attn1.to_v",
"diffusion_model.output_blocks.7.1.transformer_blocks.0.attn1.to_out.0",
"diffusion_model.output_blocks.7.1.transformer_blocks.0.norm2",
"diffusion_model.output_blocks.7.1.transformer_blocks.0.attn2.to_q",
"diffusion_model.output_blocks.7.1.transformer_blocks.0.attn2.to_k",
"diffusion_model.output_blocks.7.1.transformer_blocks.0.attn2.to_v",
"diffusion_model.output_blocks.7.1.transformer_blocks.0.attn2.to_out.0",
"diffusion_model.output_blocks.7.1.transformer_blocks.0.norm3",
"diffusion_model.output_blocks.7.1.transformer_blocks.0.ff.net.0.proj",
"diffusion_model.output_blocks.7.1.transformer_blocks.0.ff.net.2",
"diffusion_model.output_blocks.7.1.proj_out",
"diffusion_model.output_blocks.8.0.in_layers.0",
"diffusion_model.output_blocks.8.0.in_layers.2",
"diffusion_model.output_blocks.8.0.emb_layers.1",
"diffusion_model.output_blocks.8.0.out_layers.0",
"diffusion_model.output_blocks.8.0.out_layers.3",
"diffusion_model.output_blocks.8.0.skip_connection",
"diffusion_model.output_blocks.8.1.norm",
"diffusion_model.output_blocks.8.1.proj_in",
"diffusion_model.output_blocks.8.1.transformer_blocks.0.norm1",
"diffusion_model.output_blocks.8.1.transformer_blocks.0.attn1.to_q",
"diffusion_model.output_blocks.8.1.transformer_blocks.0.attn1.to_k",
"diffusion_model.output_blocks.8.1.transformer_blocks.0.attn1.to_v",
"diffusion_model.output_blocks.8.1.transformer_blocks.0.attn1.to_out.0",
"diffusion_model.output_blocks.8.1.transformer_blocks.0.norm2",
"diffusion_model.output_blocks.8.1.transformer_blocks.0.attn2.to_q",
"diffusion_model.output_blocks.8.1.transformer_blocks.0.attn2.to_k",
"diffusion_model.output_blocks.8.1.transformer_blocks.0.attn2.to_v",
"diffusion_model.output_blocks.8.1.transformer_blocks.0.attn2.to_out.0",
"diffusion_model.output_blocks.8.1.transformer_blocks.0.norm3",
"diffusion_model.output_blocks.8.1.transformer_blocks.0.ff.net.0.proj",
"diffusion_model.output_blocks.8.1.transformer_blocks.0.ff.net.2",
"diffusion_model.output_blocks.8.1.proj_out",
"diffusion_model.output_blocks.8.2.conv",
"diffusion_model.output_blocks.9.0.in_layers.0",
"diffusion_model.output_blocks.9.0.in_layers.2",
"diffusion_model.output_blocks.9.0.emb_layers.1",
"diffusion_model.output_blocks.9.0.out_layers.0",
"diffusion_model.output_blocks.9.0.out_layers.3",
"diffusion_model.output_blocks.9.0.skip_connection",
"diffusion_model.output_blocks.9.1.norm",
"diffusion_model.output_blocks.9.1.proj_in",
"diffusion_model.output_blocks.9.1.transformer_blocks.0.norm1",
"diffusion_model.output_blocks.9.1.transformer_blocks.0.attn1.to_q",
"diffusion_model.output_blocks.9.1.transformer_blocks.0.attn1.to_k",
"diffusion_model.output_blocks.9.1.transformer_blocks.0.attn1.to_v",
"diffusion_model.output_blocks.9.1.transformer_blocks.0.attn1.to_out.0",
"diffusion_model.output_blocks.9.1.transformer_blocks.0.norm2",
"diffusion_model.output_blocks.9.1.transformer_blocks.0.attn2.to_q",
"diffusion_model.output_blocks.9.1.transformer_blocks.0.attn2.to_k",
"diffusion_model.output_blocks.9.1.transformer_blocks.0.attn2.to_v",
"diffusion_model.output_blocks.9.1.transformer_blocks.0.attn2.to_out.0",
"diffusion_model.output_blocks.9.1.transformer_blocks.0.norm3",
"diffusion_model.output_blocks.9.1.transformer_blocks.0.ff.net.0.proj",
"diffusion_model.output_blocks.9.1.transformer_blocks.0.ff.net.2",
"diffusion_model.output_blocks.9.1.proj_out",
"diffusion_model.output_blocks.10.0.in_layers.0",
"diffusion_model.output_blocks.10.0.in_layers.2",
"diffusion_model.output_blocks.10.0.emb_layers.1",
"diffusion_model.output_blocks.10.0.out_layers.0",
"diffusion_model.output_blocks.10.0.out_layers.3",
"diffusion_model.output_blocks.10.0.skip_connection",
"diffusion_model.output_blocks.10.1.norm",
"diffusion_model.output_blocks.10.1.proj_in",
"diffusion_model.output_blocks.10.1.transformer_blocks.0.norm1",
"diffusion_model.output_blocks.10.1.transformer_blocks.0.attn1.to_q",
"diffusion_model.output_blocks.10.1.transformer_blocks.0.attn1.to_k",
"diffusion_model.output_blocks.10.1.transformer_blocks.0.attn1.to_v",
"diffusion_model.output_blocks.10.1.transformer_blocks.0.attn1.to_out.0",
"diffusion_model.output_blocks.10.1.transformer_blocks.0.norm2",
"diffusion_model.output_blocks.10.1.transformer_blocks.0.attn2.to_q",
"diffusion_model.output_blocks.10.1.transformer_blocks.0.attn2.to_k",
"diffusion_model.output_blocks.10.1.transformer_blocks.0.attn2.to_v",
"diffusion_model.output_blocks.10.1.transformer_blocks.0.attn2.to_out.0",
"diffusion_model.output_blocks.10.1.transformer_blocks.0.norm3",
"diffusion_model.output_blocks.10.1.transformer_blocks.0.ff.net.0.proj",
"diffusion_model.output_blocks.10.1.transformer_blocks.0.ff.net.2",
"diffusion_model.output_blocks.10.1.proj_out",
"diffusion_model.output_blocks.11.0.in_layers.0",
"diffusion_model.output_blocks.11.0.in_layers.2",
"diffusion_model.output_blocks.11.0.emb_layers.1",
"diffusion_model.output_blocks.11.0.out_layers.0",
"diffusion_model.output_blocks.11.0.out_layers.3",
"diffusion_model.output_blocks.11.0.skip_connection",
"diffusion_model.output_blocks.11.1.norm",
"diffusion_model.output_blocks.11.1.proj_in",
"diffusion_model.output_blocks.11.1.transformer_blocks.0.norm1",
"diffusion_model.output_blocks.11.1.transformer_blocks.0.attn1.to_q",
"diffusion_model.output_blocks.11.1.transformer_blocks.0.attn1.to_k",
"diffusion_model.output_blocks.11.1.transformer_blocks.0.attn1.to_v",
"diffusion_model.output_blocks.11.1.transformer_blocks.0.attn1.to_out.0",
"diffusion_model.output_blocks.11.1.transformer_blocks.0.norm2",
"diffusion_model.output_blocks.11.1.transformer_blocks.0.attn2.to_q",
"diffusion_model.output_blocks.11.1.transformer_blocks.0.attn2.to_k",
"diffusion_model.output_blocks.11.1.transformer_blocks.0.attn2.to_v",
"diffusion_model.output_blocks.11.1.transformer_blocks.0.attn2.to_out.0",
"diffusion_model.output_blocks.11.1.transformer_blocks.0.norm3",
"diffusion_model.output_blocks.11.1.transformer_blocks.0.ff.net.0.proj",
"diffusion_model.output_blocks.11.1.transformer_blocks.0.ff.net.2",
"diffusion_model.output_blocks.11.1.proj_out",
"diffusion_model.out.0",
"diffusion_model.out.2"
]

View File

@ -0,0 +1,393 @@
[
"model.diffusion_model.input_blocks.0.0",
"model.diffusion_model.input_blocks.1.0.emb_layers.1",
"model.diffusion_model.input_blocks.1.0.in_layers.0",
"model.diffusion_model.input_blocks.1.0.in_layers.2",
"model.diffusion_model.input_blocks.1.0.out_layers.0",
"model.diffusion_model.input_blocks.1.0.out_layers.3",
"model.diffusion_model.input_blocks.1.1.norm",
"model.diffusion_model.input_blocks.1.1.proj_in",
"model.diffusion_model.input_blocks.1.1.proj_out",
"model.diffusion_model.input_blocks.1.1.transformer_blocks.0.attn1.to_k",
"model.diffusion_model.input_blocks.1.1.transformer_blocks.0.attn1.to_out.0",
"model.diffusion_model.input_blocks.1.1.transformer_blocks.0.attn1.to_q",
"model.diffusion_model.input_blocks.1.1.transformer_blocks.0.attn1.to_v",
"model.diffusion_model.input_blocks.1.1.transformer_blocks.0.attn2.to_k",
"model.diffusion_model.input_blocks.1.1.transformer_blocks.0.attn2.to_out.0",
"model.diffusion_model.input_blocks.1.1.transformer_blocks.0.attn2.to_q",
"model.diffusion_model.input_blocks.1.1.transformer_blocks.0.attn2.to_v",
"model.diffusion_model.input_blocks.1.1.transformer_blocks.0.ff.net.0.proj",
"model.diffusion_model.input_blocks.1.1.transformer_blocks.0.ff.net.2",
"model.diffusion_model.input_blocks.1.1.transformer_blocks.0.norm1",
"model.diffusion_model.input_blocks.1.1.transformer_blocks.0.norm2",
"model.diffusion_model.input_blocks.1.1.transformer_blocks.0.norm3",
"model.diffusion_model.input_blocks.10.0.emb_layers.1",
"model.diffusion_model.input_blocks.10.0.in_layers.0",
"model.diffusion_model.input_blocks.10.0.in_layers.2",
"model.diffusion_model.input_blocks.10.0.out_layers.0",
"model.diffusion_model.input_blocks.10.0.out_layers.3",
"model.diffusion_model.input_blocks.11.0.emb_layers.1",
"model.diffusion_model.input_blocks.11.0.in_layers.0",
"model.diffusion_model.input_blocks.11.0.in_layers.2",
"model.diffusion_model.input_blocks.11.0.out_layers.0",
"model.diffusion_model.input_blocks.11.0.out_layers.3",
"model.diffusion_model.input_blocks.2.0.emb_layers.1",
"model.diffusion_model.input_blocks.2.0.in_layers.0",
"model.diffusion_model.input_blocks.2.0.in_layers.2",
"model.diffusion_model.input_blocks.2.0.out_layers.0",
"model.diffusion_model.input_blocks.2.0.out_layers.3",
"model.diffusion_model.input_blocks.2.1.norm",
"model.diffusion_model.input_blocks.2.1.proj_in",
"model.diffusion_model.input_blocks.2.1.proj_out",
"model.diffusion_model.input_blocks.2.1.transformer_blocks.0.attn1.to_k",
"model.diffusion_model.input_blocks.2.1.transformer_blocks.0.attn1.to_out.0",
"model.diffusion_model.input_blocks.2.1.transformer_blocks.0.attn1.to_q",
"model.diffusion_model.input_blocks.2.1.transformer_blocks.0.attn1.to_v",
"model.diffusion_model.input_blocks.2.1.transformer_blocks.0.attn2.to_k",
"model.diffusion_model.input_blocks.2.1.transformer_blocks.0.attn2.to_out.0",
"model.diffusion_model.input_blocks.2.1.transformer_blocks.0.attn2.to_q",
"model.diffusion_model.input_blocks.2.1.transformer_blocks.0.attn2.to_v",
"model.diffusion_model.input_blocks.2.1.transformer_blocks.0.ff.net.0.proj",
"model.diffusion_model.input_blocks.2.1.transformer_blocks.0.ff.net.2",
"model.diffusion_model.input_blocks.2.1.transformer_blocks.0.norm1",
"model.diffusion_model.input_blocks.2.1.transformer_blocks.0.norm2",
"model.diffusion_model.input_blocks.2.1.transformer_blocks.0.norm3",
"model.diffusion_model.input_blocks.3.0.op",
"model.diffusion_model.input_blocks.4.0.emb_layers.1",
"model.diffusion_model.input_blocks.4.0.in_layers.0",
"model.diffusion_model.input_blocks.4.0.in_layers.2",
"model.diffusion_model.input_blocks.4.0.out_layers.0",
"model.diffusion_model.input_blocks.4.0.out_layers.3",
"model.diffusion_model.input_blocks.4.0.skip_connection",
"model.diffusion_model.input_blocks.4.1.norm",
"model.diffusion_model.input_blocks.4.1.proj_in",
"model.diffusion_model.input_blocks.4.1.proj_out",
"model.diffusion_model.input_blocks.4.1.transformer_blocks.0.attn1.to_k",
"model.diffusion_model.input_blocks.4.1.transformer_blocks.0.attn1.to_out.0",
"model.diffusion_model.input_blocks.4.1.transformer_blocks.0.attn1.to_q",
"model.diffusion_model.input_blocks.4.1.transformer_blocks.0.attn1.to_v",
"model.diffusion_model.input_blocks.4.1.transformer_blocks.0.attn2.to_k",
"model.diffusion_model.input_blocks.4.1.transformer_blocks.0.attn2.to_out.0",
"model.diffusion_model.input_blocks.4.1.transformer_blocks.0.attn2.to_q",
"model.diffusion_model.input_blocks.4.1.transformer_blocks.0.attn2.to_v",
"model.diffusion_model.input_blocks.4.1.transformer_blocks.0.ff.net.0.proj",
"model.diffusion_model.input_blocks.4.1.transformer_blocks.0.ff.net.2",
"model.diffusion_model.input_blocks.4.1.transformer_blocks.0.norm1",
"model.diffusion_model.input_blocks.4.1.transformer_blocks.0.norm2",
"model.diffusion_model.input_blocks.4.1.transformer_blocks.0.norm3",
"model.diffusion_model.input_blocks.5.0.emb_layers.1",
"model.diffusion_model.input_blocks.5.0.in_layers.0",
"model.diffusion_model.input_blocks.5.0.in_layers.2",
"model.diffusion_model.input_blocks.5.0.out_layers.0",
"model.diffusion_model.input_blocks.5.0.out_layers.3",
"model.diffusion_model.input_blocks.5.1.norm",
"model.diffusion_model.input_blocks.5.1.proj_in",
"model.diffusion_model.input_blocks.5.1.proj_out",
"model.diffusion_model.input_blocks.5.1.transformer_blocks.0.attn1.to_k",
"model.diffusion_model.input_blocks.5.1.transformer_blocks.0.attn1.to_out.0",
"model.diffusion_model.input_blocks.5.1.transformer_blocks.0.attn1.to_q",
"model.diffusion_model.input_blocks.5.1.transformer_blocks.0.attn1.to_v",
"model.diffusion_model.input_blocks.5.1.transformer_blocks.0.attn2.to_k",
"model.diffusion_model.input_blocks.5.1.transformer_blocks.0.attn2.to_out.0",
"model.diffusion_model.input_blocks.5.1.transformer_blocks.0.attn2.to_q",
"model.diffusion_model.input_blocks.5.1.transformer_blocks.0.attn2.to_v",
"model.diffusion_model.input_blocks.5.1.transformer_blocks.0.ff.net.0.proj",
"model.diffusion_model.input_blocks.5.1.transformer_blocks.0.ff.net.2",
"model.diffusion_model.input_blocks.5.1.transformer_blocks.0.norm1",
"model.diffusion_model.input_blocks.5.1.transformer_blocks.0.norm2",
"model.diffusion_model.input_blocks.5.1.transformer_blocks.0.norm3",
"model.diffusion_model.input_blocks.6.0.op",
"model.diffusion_model.input_blocks.7.0.emb_layers.1",
"model.diffusion_model.input_blocks.7.0.in_layers.0",
"model.diffusion_model.input_blocks.7.0.in_layers.2",
"model.diffusion_model.input_blocks.7.0.out_layers.0",
"model.diffusion_model.input_blocks.7.0.out_layers.3",
"model.diffusion_model.input_blocks.7.0.skip_connection",
"model.diffusion_model.input_blocks.7.1.norm",
"model.diffusion_model.input_blocks.7.1.proj_in",
"model.diffusion_model.input_blocks.7.1.proj_out",
"model.diffusion_model.input_blocks.7.1.transformer_blocks.0.attn1.to_k",
"model.diffusion_model.input_blocks.7.1.transformer_blocks.0.attn1.to_out.0",
"model.diffusion_model.input_blocks.7.1.transformer_blocks.0.attn1.to_q",
"model.diffusion_model.input_blocks.7.1.transformer_blocks.0.attn1.to_v",
"model.diffusion_model.input_blocks.7.1.transformer_blocks.0.attn2.to_k",
"model.diffusion_model.input_blocks.7.1.transformer_blocks.0.attn2.to_out.0",
"model.diffusion_model.input_blocks.7.1.transformer_blocks.0.attn2.to_q",
"model.diffusion_model.input_blocks.7.1.transformer_blocks.0.attn2.to_v",
"model.diffusion_model.input_blocks.7.1.transformer_blocks.0.ff.net.0.proj",
"model.diffusion_model.input_blocks.7.1.transformer_blocks.0.ff.net.2",
"model.diffusion_model.input_blocks.7.1.transformer_blocks.0.norm1",
"model.diffusion_model.input_blocks.7.1.transformer_blocks.0.norm2",
"model.diffusion_model.input_blocks.7.1.transformer_blocks.0.norm3",
"model.diffusion_model.input_blocks.8.0.emb_layers.1",
"model.diffusion_model.input_blocks.8.0.in_layers.0",
"model.diffusion_model.input_blocks.8.0.in_layers.2",
"model.diffusion_model.input_blocks.8.0.out_layers.0",
"model.diffusion_model.input_blocks.8.0.out_layers.3",
"model.diffusion_model.input_blocks.8.1.norm",
"model.diffusion_model.input_blocks.8.1.proj_in",
"model.diffusion_model.input_blocks.8.1.proj_out",
"model.diffusion_model.input_blocks.8.1.transformer_blocks.0.attn1.to_k",
"model.diffusion_model.input_blocks.8.1.transformer_blocks.0.attn1.to_out.0",
"model.diffusion_model.input_blocks.8.1.transformer_blocks.0.attn1.to_q",
"model.diffusion_model.input_blocks.8.1.transformer_blocks.0.attn1.to_v",
"model.diffusion_model.input_blocks.8.1.transformer_blocks.0.attn2.to_k",
"model.diffusion_model.input_blocks.8.1.transformer_blocks.0.attn2.to_out.0",
"model.diffusion_model.input_blocks.8.1.transformer_blocks.0.attn2.to_q",
"model.diffusion_model.input_blocks.8.1.transformer_blocks.0.attn2.to_v",
"model.diffusion_model.input_blocks.8.1.transformer_blocks.0.ff.net.0.proj",
"model.diffusion_model.input_blocks.8.1.transformer_blocks.0.ff.net.2",
"model.diffusion_model.input_blocks.8.1.transformer_blocks.0.norm1",
"model.diffusion_model.input_blocks.8.1.transformer_blocks.0.norm2",
"model.diffusion_model.input_blocks.8.1.transformer_blocks.0.norm3",
"model.diffusion_model.input_blocks.9.0.op",
"model.diffusion_model.middle_block.0.emb_layers.1",
"model.diffusion_model.middle_block.0.in_layers.0",
"model.diffusion_model.middle_block.0.in_layers.2",
"model.diffusion_model.middle_block.0.out_layers.0",
"model.diffusion_model.middle_block.0.out_layers.3",
"model.diffusion_model.middle_block.1.norm",
"model.diffusion_model.middle_block.1.proj_in",
"model.diffusion_model.middle_block.1.proj_out",
"model.diffusion_model.middle_block.1.transformer_blocks.0.attn1.to_k",
"model.diffusion_model.middle_block.1.transformer_blocks.0.attn1.to_out.0",
"model.diffusion_model.middle_block.1.transformer_blocks.0.attn1.to_q",
"model.diffusion_model.middle_block.1.transformer_blocks.0.attn1.to_v",
"model.diffusion_model.middle_block.1.transformer_blocks.0.attn2.to_k",
"model.diffusion_model.middle_block.1.transformer_blocks.0.attn2.to_out.0",
"model.diffusion_model.middle_block.1.transformer_blocks.0.attn2.to_q",
"model.diffusion_model.middle_block.1.transformer_blocks.0.attn2.to_v",
"model.diffusion_model.middle_block.1.transformer_blocks.0.ff.net.0.proj",
"model.diffusion_model.middle_block.1.transformer_blocks.0.ff.net.2",
"model.diffusion_model.middle_block.1.transformer_blocks.0.norm1",
"model.diffusion_model.middle_block.1.transformer_blocks.0.norm2",
"model.diffusion_model.middle_block.1.transformer_blocks.0.norm3",
"model.diffusion_model.middle_block.2.emb_layers.1",
"model.diffusion_model.middle_block.2.in_layers.0",
"model.diffusion_model.middle_block.2.in_layers.2",
"model.diffusion_model.middle_block.2.out_layers.0",
"model.diffusion_model.middle_block.2.out_layers.3",
"model.diffusion_model.out.0",
"model.diffusion_model.out.2",
"model.diffusion_model.output_blocks.0.0.emb_layers.1",
"model.diffusion_model.output_blocks.0.0.in_layers.0",
"model.diffusion_model.output_blocks.0.0.in_layers.2",
"model.diffusion_model.output_blocks.0.0.out_layers.0",
"model.diffusion_model.output_blocks.0.0.out_layers.3",
"model.diffusion_model.output_blocks.0.0.skip_connection",
"model.diffusion_model.output_blocks.1.0.emb_layers.1",
"model.diffusion_model.output_blocks.1.0.in_layers.0",
"model.diffusion_model.output_blocks.1.0.in_layers.2",
"model.diffusion_model.output_blocks.1.0.out_layers.0",
"model.diffusion_model.output_blocks.1.0.out_layers.3",
"model.diffusion_model.output_blocks.1.0.skip_connection",
"model.diffusion_model.output_blocks.10.0.emb_layers.1",
"model.diffusion_model.output_blocks.10.0.in_layers.0",
"model.diffusion_model.output_blocks.10.0.in_layers.2",
"model.diffusion_model.output_blocks.10.0.out_layers.0",
"model.diffusion_model.output_blocks.10.0.out_layers.3",
"model.diffusion_model.output_blocks.10.0.skip_connection",
"model.diffusion_model.output_blocks.10.1.norm",
"model.diffusion_model.output_blocks.10.1.proj_in",
"model.diffusion_model.output_blocks.10.1.proj_out",
"model.diffusion_model.output_blocks.10.1.transformer_blocks.0.attn1.to_k",
"model.diffusion_model.output_blocks.10.1.transformer_blocks.0.attn1.to_out.0",
"model.diffusion_model.output_blocks.10.1.transformer_blocks.0.attn1.to_q",
"model.diffusion_model.output_blocks.10.1.transformer_blocks.0.attn1.to_v",
"model.diffusion_model.output_blocks.10.1.transformer_blocks.0.attn2.to_k",
"model.diffusion_model.output_blocks.10.1.transformer_blocks.0.attn2.to_out.0",
"model.diffusion_model.output_blocks.10.1.transformer_blocks.0.attn2.to_q",
"model.diffusion_model.output_blocks.10.1.transformer_blocks.0.attn2.to_v",
"model.diffusion_model.output_blocks.10.1.transformer_blocks.0.ff.net.0.proj",
"model.diffusion_model.output_blocks.10.1.transformer_blocks.0.ff.net.2",
"model.diffusion_model.output_blocks.10.1.transformer_blocks.0.norm1",
"model.diffusion_model.output_blocks.10.1.transformer_blocks.0.norm2",
"model.diffusion_model.output_blocks.10.1.transformer_blocks.0.norm3",
"model.diffusion_model.output_blocks.11.0.emb_layers.1",
"model.diffusion_model.output_blocks.11.0.in_layers.0",
"model.diffusion_model.output_blocks.11.0.in_layers.2",
"model.diffusion_model.output_blocks.11.0.out_layers.0",
"model.diffusion_model.output_blocks.11.0.out_layers.3",
"model.diffusion_model.output_blocks.11.0.skip_connection",
"model.diffusion_model.output_blocks.11.1.norm",
"model.diffusion_model.output_blocks.11.1.proj_in",
"model.diffusion_model.output_blocks.11.1.proj_out",
"model.diffusion_model.output_blocks.11.1.transformer_blocks.0.attn1.to_k",
"model.diffusion_model.output_blocks.11.1.transformer_blocks.0.attn1.to_out.0",
"model.diffusion_model.output_blocks.11.1.transformer_blocks.0.attn1.to_q",
"model.diffusion_model.output_blocks.11.1.transformer_blocks.0.attn1.to_v",
"model.diffusion_model.output_blocks.11.1.transformer_blocks.0.attn2.to_k",
"model.diffusion_model.output_blocks.11.1.transformer_blocks.0.attn2.to_out.0",
"model.diffusion_model.output_blocks.11.1.transformer_blocks.0.attn2.to_q",
"model.diffusion_model.output_blocks.11.1.transformer_blocks.0.attn2.to_v",
"model.diffusion_model.output_blocks.11.1.transformer_blocks.0.ff.net.0.proj",
"model.diffusion_model.output_blocks.11.1.transformer_blocks.0.ff.net.2",
"model.diffusion_model.output_blocks.11.1.transformer_blocks.0.norm1",
"model.diffusion_model.output_blocks.11.1.transformer_blocks.0.norm2",
"model.diffusion_model.output_blocks.11.1.transformer_blocks.0.norm3",
"model.diffusion_model.output_blocks.2.0.emb_layers.1",
"model.diffusion_model.output_blocks.2.0.in_layers.0",
"model.diffusion_model.output_blocks.2.0.in_layers.2",
"model.diffusion_model.output_blocks.2.0.out_layers.0",
"model.diffusion_model.output_blocks.2.0.out_layers.3",
"model.diffusion_model.output_blocks.2.0.skip_connection",
"model.diffusion_model.output_blocks.2.1.conv",
"model.diffusion_model.output_blocks.3.0.emb_layers.1",
"model.diffusion_model.output_blocks.3.0.in_layers.0",
"model.diffusion_model.output_blocks.3.0.in_layers.2",
"model.diffusion_model.output_blocks.3.0.out_layers.0",
"model.diffusion_model.output_blocks.3.0.out_layers.3",
"model.diffusion_model.output_blocks.3.0.skip_connection",
"model.diffusion_model.output_blocks.3.1.norm",
"model.diffusion_model.output_blocks.3.1.proj_in",
"model.diffusion_model.output_blocks.3.1.proj_out",
"model.diffusion_model.output_blocks.3.1.transformer_blocks.0.attn1.to_k",
"model.diffusion_model.output_blocks.3.1.transformer_blocks.0.attn1.to_out.0",
"model.diffusion_model.output_blocks.3.1.transformer_blocks.0.attn1.to_q",
"model.diffusion_model.output_blocks.3.1.transformer_blocks.0.attn1.to_v",
"model.diffusion_model.output_blocks.3.1.transformer_blocks.0.attn2.to_k",
"model.diffusion_model.output_blocks.3.1.transformer_blocks.0.attn2.to_out.0",
"model.diffusion_model.output_blocks.3.1.transformer_blocks.0.attn2.to_q",
"model.diffusion_model.output_blocks.3.1.transformer_blocks.0.attn2.to_v",
"model.diffusion_model.output_blocks.3.1.transformer_blocks.0.ff.net.0.proj",
"model.diffusion_model.output_blocks.3.1.transformer_blocks.0.ff.net.2",
"model.diffusion_model.output_blocks.3.1.transformer_blocks.0.norm1",
"model.diffusion_model.output_blocks.3.1.transformer_blocks.0.norm2",
"model.diffusion_model.output_blocks.3.1.transformer_blocks.0.norm3",
"model.diffusion_model.output_blocks.4.0.emb_layers.1",
"model.diffusion_model.output_blocks.4.0.in_layers.0",
"model.diffusion_model.output_blocks.4.0.in_layers.2",
"model.diffusion_model.output_blocks.4.0.out_layers.0",
"model.diffusion_model.output_blocks.4.0.out_layers.3",
"model.diffusion_model.output_blocks.4.0.skip_connection",
"model.diffusion_model.output_blocks.4.1.norm",
"model.diffusion_model.output_blocks.4.1.proj_in",
"model.diffusion_model.output_blocks.4.1.proj_out",
"model.diffusion_model.output_blocks.4.1.transformer_blocks.0.attn1.to_k",
"model.diffusion_model.output_blocks.4.1.transformer_blocks.0.attn1.to_out.0",
"model.diffusion_model.output_blocks.4.1.transformer_blocks.0.attn1.to_q",
"model.diffusion_model.output_blocks.4.1.transformer_blocks.0.attn1.to_v",
"model.diffusion_model.output_blocks.4.1.transformer_blocks.0.attn2.to_k",
"model.diffusion_model.output_blocks.4.1.transformer_blocks.0.attn2.to_out.0",
"model.diffusion_model.output_blocks.4.1.transformer_blocks.0.attn2.to_q",
"model.diffusion_model.output_blocks.4.1.transformer_blocks.0.attn2.to_v",
"model.diffusion_model.output_blocks.4.1.transformer_blocks.0.ff.net.0.proj",
"model.diffusion_model.output_blocks.4.1.transformer_blocks.0.ff.net.2",
"model.diffusion_model.output_blocks.4.1.transformer_blocks.0.norm1",
"model.diffusion_model.output_blocks.4.1.transformer_blocks.0.norm2",
"model.diffusion_model.output_blocks.4.1.transformer_blocks.0.norm3",
"model.diffusion_model.output_blocks.5.0.emb_layers.1",
"model.diffusion_model.output_blocks.5.0.in_layers.0",
"model.diffusion_model.output_blocks.5.0.in_layers.2",
"model.diffusion_model.output_blocks.5.0.out_layers.0",
"model.diffusion_model.output_blocks.5.0.out_layers.3",
"model.diffusion_model.output_blocks.5.0.skip_connection",
"model.diffusion_model.output_blocks.5.1.norm",
"model.diffusion_model.output_blocks.5.1.proj_in",
"model.diffusion_model.output_blocks.5.1.proj_out",
"model.diffusion_model.output_blocks.5.1.transformer_blocks.0.attn1.to_k",
"model.diffusion_model.output_blocks.5.1.transformer_blocks.0.attn1.to_out.0",
"model.diffusion_model.output_blocks.5.1.transformer_blocks.0.attn1.to_q",
"model.diffusion_model.output_blocks.5.1.transformer_blocks.0.attn1.to_v",
"model.diffusion_model.output_blocks.5.1.transformer_blocks.0.attn2.to_k",
"model.diffusion_model.output_blocks.5.1.transformer_blocks.0.attn2.to_out.0",
"model.diffusion_model.output_blocks.5.1.transformer_blocks.0.attn2.to_q",
"model.diffusion_model.output_blocks.5.1.transformer_blocks.0.attn2.to_v",
"model.diffusion_model.output_blocks.5.1.transformer_blocks.0.ff.net.0.proj",
"model.diffusion_model.output_blocks.5.1.transformer_blocks.0.ff.net.2",
"model.diffusion_model.output_blocks.5.1.transformer_blocks.0.norm1",
"model.diffusion_model.output_blocks.5.1.transformer_blocks.0.norm2",
"model.diffusion_model.output_blocks.5.1.transformer_blocks.0.norm3",
"model.diffusion_model.output_blocks.5.2.conv",
"model.diffusion_model.output_blocks.6.0.emb_layers.1",
"model.diffusion_model.output_blocks.6.0.in_layers.0",
"model.diffusion_model.output_blocks.6.0.in_layers.2",
"model.diffusion_model.output_blocks.6.0.out_layers.0",
"model.diffusion_model.output_blocks.6.0.out_layers.3",
"model.diffusion_model.output_blocks.6.0.skip_connection",
"model.diffusion_model.output_blocks.6.1.norm",
"model.diffusion_model.output_blocks.6.1.proj_in",
"model.diffusion_model.output_blocks.6.1.proj_out",
"model.diffusion_model.output_blocks.6.1.transformer_blocks.0.attn1.to_k",
"model.diffusion_model.output_blocks.6.1.transformer_blocks.0.attn1.to_out.0",
"model.diffusion_model.output_blocks.6.1.transformer_blocks.0.attn1.to_q",
"model.diffusion_model.output_blocks.6.1.transformer_blocks.0.attn1.to_v",
"model.diffusion_model.output_blocks.6.1.transformer_blocks.0.attn2.to_k",
"model.diffusion_model.output_blocks.6.1.transformer_blocks.0.attn2.to_out.0",
"model.diffusion_model.output_blocks.6.1.transformer_blocks.0.attn2.to_q",
"model.diffusion_model.output_blocks.6.1.transformer_blocks.0.attn2.to_v",
"model.diffusion_model.output_blocks.6.1.transformer_blocks.0.ff.net.0.proj",
"model.diffusion_model.output_blocks.6.1.transformer_blocks.0.ff.net.2",
"model.diffusion_model.output_blocks.6.1.transformer_blocks.0.norm1",
"model.diffusion_model.output_blocks.6.1.transformer_blocks.0.norm2",
"model.diffusion_model.output_blocks.6.1.transformer_blocks.0.norm3",
"model.diffusion_model.output_blocks.7.0.emb_layers.1",
"model.diffusion_model.output_blocks.7.0.in_layers.0",
"model.diffusion_model.output_blocks.7.0.in_layers.2",
"model.diffusion_model.output_blocks.7.0.out_layers.0",
"model.diffusion_model.output_blocks.7.0.out_layers.3",
"model.diffusion_model.output_blocks.7.0.skip_connection",
"model.diffusion_model.output_blocks.7.1.norm",
"model.diffusion_model.output_blocks.7.1.proj_in",
"model.diffusion_model.output_blocks.7.1.proj_out",
"model.diffusion_model.output_blocks.7.1.transformer_blocks.0.attn1.to_k",
"model.diffusion_model.output_blocks.7.1.transformer_blocks.0.attn1.to_out.0",
"model.diffusion_model.output_blocks.7.1.transformer_blocks.0.attn1.to_q",
"model.diffusion_model.output_blocks.7.1.transformer_blocks.0.attn1.to_v",
"model.diffusion_model.output_blocks.7.1.transformer_blocks.0.attn2.to_k",
"model.diffusion_model.output_blocks.7.1.transformer_blocks.0.attn2.to_out.0",
"model.diffusion_model.output_blocks.7.1.transformer_blocks.0.attn2.to_q",
"model.diffusion_model.output_blocks.7.1.transformer_blocks.0.attn2.to_v",
"model.diffusion_model.output_blocks.7.1.transformer_blocks.0.ff.net.0.proj",
"model.diffusion_model.output_blocks.7.1.transformer_blocks.0.ff.net.2",
"model.diffusion_model.output_blocks.7.1.transformer_blocks.0.norm1",
"model.diffusion_model.output_blocks.7.1.transformer_blocks.0.norm2",
"model.diffusion_model.output_blocks.7.1.transformer_blocks.0.norm3",
"model.diffusion_model.output_blocks.8.0.emb_layers.1",
"model.diffusion_model.output_blocks.8.0.in_layers.0",
"model.diffusion_model.output_blocks.8.0.in_layers.2",
"model.diffusion_model.output_blocks.8.0.out_layers.0",
"model.diffusion_model.output_blocks.8.0.out_layers.3",
"model.diffusion_model.output_blocks.8.0.skip_connection",
"model.diffusion_model.output_blocks.8.1.norm",
"model.diffusion_model.output_blocks.8.1.proj_in",
"model.diffusion_model.output_blocks.8.1.proj_out",
"model.diffusion_model.output_blocks.8.1.transformer_blocks.0.attn1.to_k",
"model.diffusion_model.output_blocks.8.1.transformer_blocks.0.attn1.to_out.0",
"model.diffusion_model.output_blocks.8.1.transformer_blocks.0.attn1.to_q",
"model.diffusion_model.output_blocks.8.1.transformer_blocks.0.attn1.to_v",
"model.diffusion_model.output_blocks.8.1.transformer_blocks.0.attn2.to_k",
"model.diffusion_model.output_blocks.8.1.transformer_blocks.0.attn2.to_out.0",
"model.diffusion_model.output_blocks.8.1.transformer_blocks.0.attn2.to_q",
"model.diffusion_model.output_blocks.8.1.transformer_blocks.0.attn2.to_v",
"model.diffusion_model.output_blocks.8.1.transformer_blocks.0.ff.net.0.proj",
"model.diffusion_model.output_blocks.8.1.transformer_blocks.0.ff.net.2",
"model.diffusion_model.output_blocks.8.1.transformer_blocks.0.norm1",
"model.diffusion_model.output_blocks.8.1.transformer_blocks.0.norm2",
"model.diffusion_model.output_blocks.8.1.transformer_blocks.0.norm3",
"model.diffusion_model.output_blocks.8.2.conv",
"model.diffusion_model.output_blocks.9.0.emb_layers.1",
"model.diffusion_model.output_blocks.9.0.in_layers.0",
"model.diffusion_model.output_blocks.9.0.in_layers.2",
"model.diffusion_model.output_blocks.9.0.out_layers.0",
"model.diffusion_model.output_blocks.9.0.out_layers.3",
"model.diffusion_model.output_blocks.9.0.skip_connection",
"model.diffusion_model.output_blocks.9.1.norm",
"model.diffusion_model.output_blocks.9.1.proj_in",
"model.diffusion_model.output_blocks.9.1.proj_out",
"model.diffusion_model.output_blocks.9.1.transformer_blocks.0.attn1.to_k",
"model.diffusion_model.output_blocks.9.1.transformer_blocks.0.attn1.to_out.0",
"model.diffusion_model.output_blocks.9.1.transformer_blocks.0.attn1.to_q",
"model.diffusion_model.output_blocks.9.1.transformer_blocks.0.attn1.to_v",
"model.diffusion_model.output_blocks.9.1.transformer_blocks.0.attn2.to_k",
"model.diffusion_model.output_blocks.9.1.transformer_blocks.0.attn2.to_out.0",
"model.diffusion_model.output_blocks.9.1.transformer_blocks.0.attn2.to_q",
"model.diffusion_model.output_blocks.9.1.transformer_blocks.0.attn2.to_v",
"model.diffusion_model.output_blocks.9.1.transformer_blocks.0.ff.net.0.proj",
"model.diffusion_model.output_blocks.9.1.transformer_blocks.0.ff.net.2",
"model.diffusion_model.output_blocks.9.1.transformer_blocks.0.norm1",
"model.diffusion_model.output_blocks.9.1.transformer_blocks.0.norm2",
"model.diffusion_model.output_blocks.9.1.transformer_blocks.0.norm3",
"model.diffusion_model.time_embed.0",
"model.diffusion_model.time_embed.2"
]

View File

@ -0,0 +1,43 @@
[
"(conv_in|conv_norm_out|conv_out)",
"(down_blocks|up_blocks).2.attentions.0.transformer_blocks.0.(attn1|attn2).to_out.0",
"(down_blocks|up_blocks).2.attentions.0.transformer_blocks.0.ff.net.2",
"(down_blocks|up_blocks).2.attentions.1.transformer_blocks.0.(attn1|attn2).to_out.0",
"(down_blocks|up_blocks).2.attentions.1.transformer_blocks.0.ff.net.2",
"down_blocks.(0|1).attentions.0.transformer_blocks.0.(attn1|attn2).to_out.0",
"down_blocks.(0|1).attentions.0.transformer_blocks.0.ff.net.2",
"down_blocks.(0|1).attentions.1.transformer_blocks.0.(attn1|attn2).to_out.0",
"down_blocks.(0|1).attentions.1.transformer_blocks.0.ff.net.2",
"down_blocks.(0|1|2).attentions.(0|1).(norm|proj_in|proj_out)",
"down_blocks.(0|1|2).attentions.(0|1).transformer_blocks.0.(attn1|attn2).(to_k|to_q|to_v)",
"down_blocks.(0|1|2).attentions.(0|1).transformer_blocks.0.(norm1|norm2|norm3)",
"down_blocks.(0|1|2).attentions.(0|1).transformer_blocks.0.ff.net.0.proj",
"down_blocks.(0|1|2).downsamplers.0.conv",
"down_blocks.(0|3).resnets.(0|1).(conv1|conv2|norm1|norm2|time_emb_proj)",
"down_blocks.(1|2).resnets.0.(conv1|conv2|conv_shortcut|norm1|norm2|time_emb_proj)",
"down_blocks.(1|2).resnets.1.(conv1|conv2|norm1|norm2|time_emb_proj)",
"mid_block.attentions.0.(norm|proj_in|proj_out)",
"mid_block.attentions.0.transformer_blocks.0.(attn1|attn2).(to_k|to_q|to_v)",
"mid_block.attentions.0.transformer_blocks.0.(attn1|attn2).to_out.0",
"mid_block.attentions.0.transformer_blocks.0.(norm1|norm2|norm3)",
"mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj",
"mid_block.attentions.0.transformer_blocks.0.ff.net.2",
"mid_block.resnets.(0|1).(conv1|conv2|norm1|norm2|time_emb_proj)",
"time_embedding.(linear_1|linear_2)",
"up_blocks.(0|1|2).upsamplers.0.conv",
"up_blocks.(0|1|2|3).resnets.(0|1|2).(conv1|conv2|conv_shortcut|norm1|norm2|time_emb_proj)",
"up_blocks.(1|2|3).attentions.(0|1|2).(norm|proj_in|proj_out)",
"up_blocks.(1|2|3).attentions.(0|1|2).transformer_blocks.0.(attn1|attn2).(to_k|to_q|to_v)",
"up_blocks.(1|2|3).attentions.(0|1|2).transformer_blocks.0.(norm1|norm2|norm3)",
"up_blocks.(1|2|3).attentions.(0|1|2).transformer_blocks.0.ff.net.0.proj",
"up_blocks.(1|2|3).attentions.2.transformer_blocks.0.(attn1|attn2).to_out.0",
"up_blocks.(1|2|3).attentions.2.transformer_blocks.0.ff.net.2",
"up_blocks.1.attentions.0.transformer_blocks.0.(attn1|attn2).to_out.0",
"up_blocks.1.attentions.0.transformer_blocks.0.ff.net.2",
"up_blocks.1.attentions.1.transformer_blocks.0.(attn1|attn2).to_out.0",
"up_blocks.1.attentions.1.transformer_blocks.0.ff.net.2",
"up_blocks.3.attentions.0.transformer_blocks.0.(attn1|attn2).to_out.0",
"up_blocks.3.attentions.0.transformer_blocks.0.ff.net.2",
"up_blocks.3.attentions.1.transformer_blocks.0.(attn1|attn2).to_out.0",
"up_blocks.3.attentions.1.transformer_blocks.0.ff.net.2"
]

View File

@ -0,0 +1,393 @@
[
"time_embedding.linear_1",
"time_embedding.linear_2",
"conv_in",
"down_blocks.0.resnets.0.norm1",
"down_blocks.0.resnets.0.conv1",
"down_blocks.0.resnets.0.time_emb_proj",
"down_blocks.0.resnets.0.norm2",
"down_blocks.0.resnets.0.conv2",
"down_blocks.0.attentions.0.norm",
"down_blocks.0.attentions.0.proj_in",
"down_blocks.0.attentions.0.transformer_blocks.0.norm1",
"down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_q",
"down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_k",
"down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_v",
"down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out.0",
"down_blocks.0.attentions.0.transformer_blocks.0.norm2",
"down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_q",
"down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_k",
"down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_v",
"down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out.0",
"down_blocks.0.attentions.0.transformer_blocks.0.norm3",
"down_blocks.0.attentions.0.transformer_blocks.0.ff.net.0.proj",
"down_blocks.0.attentions.0.transformer_blocks.0.ff.net.2",
"down_blocks.0.attentions.0.proj_out",
"down_blocks.0.resnets.1.norm1",
"down_blocks.0.resnets.1.conv1",
"down_blocks.0.resnets.1.time_emb_proj",
"down_blocks.0.resnets.1.norm2",
"down_blocks.0.resnets.1.conv2",
"down_blocks.0.attentions.1.norm",
"down_blocks.0.attentions.1.proj_in",
"down_blocks.0.attentions.1.transformer_blocks.0.norm1",
"down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_q",
"down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_k",
"down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_v",
"down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out.0",
"down_blocks.0.attentions.1.transformer_blocks.0.norm2",
"down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_q",
"down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_k",
"down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_v",
"down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out.0",
"down_blocks.0.attentions.1.transformer_blocks.0.norm3",
"down_blocks.0.attentions.1.transformer_blocks.0.ff.net.0.proj",
"down_blocks.0.attentions.1.transformer_blocks.0.ff.net.2",
"down_blocks.0.attentions.1.proj_out",
"down_blocks.0.downsamplers.0.conv",
"down_blocks.1.resnets.0.norm1",
"down_blocks.1.resnets.0.conv1",
"down_blocks.1.resnets.0.time_emb_proj",
"down_blocks.1.resnets.0.norm2",
"down_blocks.1.resnets.0.conv2",
"down_blocks.1.resnets.0.conv_shortcut",
"down_blocks.1.attentions.0.norm",
"down_blocks.1.attentions.0.proj_in",
"down_blocks.1.attentions.0.transformer_blocks.0.norm1",
"down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q",
"down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k",
"down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v",
"down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0",
"down_blocks.1.attentions.0.transformer_blocks.0.norm2",
"down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q",
"down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k",
"down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v",
"down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0",
"down_blocks.1.attentions.0.transformer_blocks.0.norm3",
"down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj",
"down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2",
"down_blocks.1.attentions.0.proj_out",
"down_blocks.1.resnets.1.norm1",
"down_blocks.1.resnets.1.conv1",
"down_blocks.1.resnets.1.time_emb_proj",
"down_blocks.1.resnets.1.norm2",
"down_blocks.1.resnets.1.conv2",
"down_blocks.1.attentions.1.norm",
"down_blocks.1.attentions.1.proj_in",
"down_blocks.1.attentions.1.transformer_blocks.0.norm1",
"down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q",
"down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k",
"down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v",
"down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0",
"down_blocks.1.attentions.1.transformer_blocks.0.norm2",
"down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q",
"down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k",
"down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v",
"down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0",
"down_blocks.1.attentions.1.transformer_blocks.0.norm3",
"down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj",
"down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2",
"down_blocks.1.attentions.1.proj_out",
"down_blocks.1.downsamplers.0.conv",
"down_blocks.2.resnets.0.norm1",
"down_blocks.2.resnets.0.conv1",
"down_blocks.2.resnets.0.time_emb_proj",
"down_blocks.2.resnets.0.norm2",
"down_blocks.2.resnets.0.conv2",
"down_blocks.2.resnets.0.conv_shortcut",
"down_blocks.2.attentions.0.norm",
"down_blocks.2.attentions.0.proj_in",
"down_blocks.2.attentions.0.transformer_blocks.0.norm1",
"down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q",
"down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k",
"down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v",
"down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0",
"down_blocks.2.attentions.0.transformer_blocks.0.norm2",
"down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q",
"down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k",
"down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v",
"down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0",
"down_blocks.2.attentions.0.transformer_blocks.0.norm3",
"down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj",
"down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2",
"down_blocks.2.attentions.0.proj_out",
"down_blocks.2.resnets.1.norm1",
"down_blocks.2.resnets.1.conv1",
"down_blocks.2.resnets.1.time_emb_proj",
"down_blocks.2.resnets.1.norm2",
"down_blocks.2.resnets.1.conv2",
"down_blocks.2.attentions.1.norm",
"down_blocks.2.attentions.1.proj_in",
"down_blocks.2.attentions.1.transformer_blocks.0.norm1",
"down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q",
"down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k",
"down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v",
"down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0",
"down_blocks.2.attentions.1.transformer_blocks.0.norm2",
"down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q",
"down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k",
"down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v",
"down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0",
"down_blocks.2.attentions.1.transformer_blocks.0.norm3",
"down_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj",
"down_blocks.2.attentions.1.transformer_blocks.0.ff.net.2",
"down_blocks.2.attentions.1.proj_out",
"down_blocks.2.downsamplers.0.conv",
"down_blocks.3.resnets.0.norm1",
"down_blocks.3.resnets.0.conv1",
"down_blocks.3.resnets.0.time_emb_proj",
"down_blocks.3.resnets.0.norm2",
"down_blocks.3.resnets.0.conv2",
"down_blocks.3.resnets.1.norm1",
"down_blocks.3.resnets.1.conv1",
"down_blocks.3.resnets.1.time_emb_proj",
"down_blocks.3.resnets.1.norm2",
"down_blocks.3.resnets.1.conv2",
"mid_block.resnets.0.norm1",
"mid_block.resnets.0.conv1",
"mid_block.resnets.0.time_emb_proj",
"mid_block.resnets.0.norm2",
"mid_block.resnets.0.conv2",
"mid_block.attentions.0.norm",
"mid_block.attentions.0.proj_in",
"mid_block.attentions.0.transformer_blocks.0.norm1",
"mid_block.attentions.0.transformer_blocks.0.attn1.to_q",
"mid_block.attentions.0.transformer_blocks.0.attn1.to_k",
"mid_block.attentions.0.transformer_blocks.0.attn1.to_v",
"mid_block.attentions.0.transformer_blocks.0.attn1.to_out.0",
"mid_block.attentions.0.transformer_blocks.0.norm2",
"mid_block.attentions.0.transformer_blocks.0.attn2.to_q",
"mid_block.attentions.0.transformer_blocks.0.attn2.to_k",
"mid_block.attentions.0.transformer_blocks.0.attn2.to_v",
"mid_block.attentions.0.transformer_blocks.0.attn2.to_out.0",
"mid_block.attentions.0.transformer_blocks.0.norm3",
"mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj",
"mid_block.attentions.0.transformer_blocks.0.ff.net.2",
"mid_block.attentions.0.proj_out",
"mid_block.resnets.1.norm1",
"mid_block.resnets.1.conv1",
"mid_block.resnets.1.time_emb_proj",
"mid_block.resnets.1.norm2",
"mid_block.resnets.1.conv2",
"up_blocks.0.resnets.0.norm1",
"up_blocks.0.resnets.0.conv1",
"up_blocks.0.resnets.0.time_emb_proj",
"up_blocks.0.resnets.0.norm2",
"up_blocks.0.resnets.0.conv2",
"up_blocks.0.resnets.0.conv_shortcut",
"up_blocks.0.resnets.1.norm1",
"up_blocks.0.resnets.1.conv1",
"up_blocks.0.resnets.1.time_emb_proj",
"up_blocks.0.resnets.1.norm2",
"up_blocks.0.resnets.1.conv2",
"up_blocks.0.resnets.1.conv_shortcut",
"up_blocks.0.resnets.2.norm1",
"up_blocks.0.resnets.2.conv1",
"up_blocks.0.resnets.2.time_emb_proj",
"up_blocks.0.resnets.2.norm2",
"up_blocks.0.resnets.2.conv2",
"up_blocks.0.resnets.2.conv_shortcut",
"up_blocks.0.upsamplers.0.conv",
"up_blocks.1.resnets.0.norm1",
"up_blocks.1.resnets.0.conv1",
"up_blocks.1.resnets.0.time_emb_proj",
"up_blocks.1.resnets.0.norm2",
"up_blocks.1.resnets.0.conv2",
"up_blocks.1.resnets.0.conv_shortcut",
"up_blocks.1.attentions.0.norm",
"up_blocks.1.attentions.0.proj_in",
"up_blocks.1.attentions.0.transformer_blocks.0.norm1",
"up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q",
"up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k",
"up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v",
"up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0",
"up_blocks.1.attentions.0.transformer_blocks.0.norm2",
"up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q",
"up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k",
"up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v",
"up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0",
"up_blocks.1.attentions.0.transformer_blocks.0.norm3",
"up_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj",
"up_blocks.1.attentions.0.transformer_blocks.0.ff.net.2",
"up_blocks.1.attentions.0.proj_out",
"up_blocks.1.resnets.1.norm1",
"up_blocks.1.resnets.1.conv1",
"up_blocks.1.resnets.1.time_emb_proj",
"up_blocks.1.resnets.1.norm2",
"up_blocks.1.resnets.1.conv2",
"up_blocks.1.resnets.1.conv_shortcut",
"up_blocks.1.attentions.1.norm",
"up_blocks.1.attentions.1.proj_in",
"up_blocks.1.attentions.1.transformer_blocks.0.norm1",
"up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q",
"up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k",
"up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v",
"up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0",
"up_blocks.1.attentions.1.transformer_blocks.0.norm2",
"up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q",
"up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k",
"up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v",
"up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0",
"up_blocks.1.attentions.1.transformer_blocks.0.norm3",
"up_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj",
"up_blocks.1.attentions.1.transformer_blocks.0.ff.net.2",
"up_blocks.1.attentions.1.proj_out",
"up_blocks.1.resnets.2.norm1",
"up_blocks.1.resnets.2.conv1",
"up_blocks.1.resnets.2.time_emb_proj",
"up_blocks.1.resnets.2.norm2",
"up_blocks.1.resnets.2.conv2",
"up_blocks.1.resnets.2.conv_shortcut",
"up_blocks.1.attentions.2.norm",
"up_blocks.1.attentions.2.proj_in",
"up_blocks.1.attentions.2.transformer_blocks.0.norm1",
"up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_q",
"up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_k",
"up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_v",
"up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_out.0",
"up_blocks.1.attentions.2.transformer_blocks.0.norm2",
"up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_q",
"up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_k",
"up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_v",
"up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_out.0",
"up_blocks.1.attentions.2.transformer_blocks.0.norm3",
"up_blocks.1.attentions.2.transformer_blocks.0.ff.net.0.proj",
"up_blocks.1.attentions.2.transformer_blocks.0.ff.net.2",
"up_blocks.1.attentions.2.proj_out",
"up_blocks.1.upsamplers.0.conv",
"up_blocks.2.resnets.0.norm1",
"up_blocks.2.resnets.0.conv1",
"up_blocks.2.resnets.0.time_emb_proj",
"up_blocks.2.resnets.0.norm2",
"up_blocks.2.resnets.0.conv2",
"up_blocks.2.resnets.0.conv_shortcut",
"up_blocks.2.attentions.0.norm",
"up_blocks.2.attentions.0.proj_in",
"up_blocks.2.attentions.0.transformer_blocks.0.norm1",
"up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q",
"up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k",
"up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v",
"up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0",
"up_blocks.2.attentions.0.transformer_blocks.0.norm2",
"up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q",
"up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k",
"up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v",
"up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0",
"up_blocks.2.attentions.0.transformer_blocks.0.norm3",
"up_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj",
"up_blocks.2.attentions.0.transformer_blocks.0.ff.net.2",
"up_blocks.2.attentions.0.proj_out",
"up_blocks.2.resnets.1.norm1",
"up_blocks.2.resnets.1.conv1",
"up_blocks.2.resnets.1.time_emb_proj",
"up_blocks.2.resnets.1.norm2",
"up_blocks.2.resnets.1.conv2",
"up_blocks.2.resnets.1.conv_shortcut",
"up_blocks.2.attentions.1.norm",
"up_blocks.2.attentions.1.proj_in",
"up_blocks.2.attentions.1.transformer_blocks.0.norm1",
"up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q",
"up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k",
"up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v",
"up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0",
"up_blocks.2.attentions.1.transformer_blocks.0.norm2",
"up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q",
"up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k",
"up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v",
"up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0",
"up_blocks.2.attentions.1.transformer_blocks.0.norm3",
"up_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj",
"up_blocks.2.attentions.1.transformer_blocks.0.ff.net.2",
"up_blocks.2.attentions.1.proj_out",
"up_blocks.2.resnets.2.norm1",
"up_blocks.2.resnets.2.conv1",
"up_blocks.2.resnets.2.time_emb_proj",
"up_blocks.2.resnets.2.norm2",
"up_blocks.2.resnets.2.conv2",
"up_blocks.2.resnets.2.conv_shortcut",
"up_blocks.2.attentions.2.norm",
"up_blocks.2.attentions.2.proj_in",
"up_blocks.2.attentions.2.transformer_blocks.0.norm1",
"up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_q",
"up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_k",
"up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_v",
"up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_out.0",
"up_blocks.2.attentions.2.transformer_blocks.0.norm2",
"up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_q",
"up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_k",
"up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_v",
"up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_out.0",
"up_blocks.2.attentions.2.transformer_blocks.0.norm3",
"up_blocks.2.attentions.2.transformer_blocks.0.ff.net.0.proj",
"up_blocks.2.attentions.2.transformer_blocks.0.ff.net.2",
"up_blocks.2.attentions.2.proj_out",
"up_blocks.2.upsamplers.0.conv",
"up_blocks.3.resnets.0.norm1",
"up_blocks.3.resnets.0.conv1",
"up_blocks.3.resnets.0.time_emb_proj",
"up_blocks.3.resnets.0.norm2",
"up_blocks.3.resnets.0.conv2",
"up_blocks.3.resnets.0.conv_shortcut",
"up_blocks.3.attentions.0.norm",
"up_blocks.3.attentions.0.proj_in",
"up_blocks.3.attentions.0.transformer_blocks.0.norm1",
"up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_q",
"up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_k",
"up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_v",
"up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_out.0",
"up_blocks.3.attentions.0.transformer_blocks.0.norm2",
"up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_q",
"up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_k",
"up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_v",
"up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_out.0",
"up_blocks.3.attentions.0.transformer_blocks.0.norm3",
"up_blocks.3.attentions.0.transformer_blocks.0.ff.net.0.proj",
"up_blocks.3.attentions.0.transformer_blocks.0.ff.net.2",
"up_blocks.3.attentions.0.proj_out",
"up_blocks.3.resnets.1.norm1",
"up_blocks.3.resnets.1.conv1",
"up_blocks.3.resnets.1.time_emb_proj",
"up_blocks.3.resnets.1.norm2",
"up_blocks.3.resnets.1.conv2",
"up_blocks.3.resnets.1.conv_shortcut",
"up_blocks.3.attentions.1.norm",
"up_blocks.3.attentions.1.proj_in",
"up_blocks.3.attentions.1.transformer_blocks.0.norm1",
"up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_q",
"up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_k",
"up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_v",
"up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_out.0",
"up_blocks.3.attentions.1.transformer_blocks.0.norm2",
"up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_q",
"up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_k",
"up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_v",
"up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_out.0",
"up_blocks.3.attentions.1.transformer_blocks.0.norm3",
"up_blocks.3.attentions.1.transformer_blocks.0.ff.net.0.proj",
"up_blocks.3.attentions.1.transformer_blocks.0.ff.net.2",
"up_blocks.3.attentions.1.proj_out",
"up_blocks.3.resnets.2.norm1",
"up_blocks.3.resnets.2.conv1",
"up_blocks.3.resnets.2.time_emb_proj",
"up_blocks.3.resnets.2.norm2",
"up_blocks.3.resnets.2.conv2",
"up_blocks.3.resnets.2.conv_shortcut",
"up_blocks.3.attentions.2.norm",
"up_blocks.3.attentions.2.proj_in",
"up_blocks.3.attentions.2.transformer_blocks.0.norm1",
"up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_q",
"up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_k",
"up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_v",
"up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_out.0",
"up_blocks.3.attentions.2.transformer_blocks.0.norm2",
"up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_q",
"up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_k",
"up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_v",
"up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_out.0",
"up_blocks.3.attentions.2.transformer_blocks.0.norm3",
"up_blocks.3.attentions.2.transformer_blocks.0.ff.net.0.proj",
"up_blocks.3.attentions.2.transformer_blocks.0.ff.net.2",
"up_blocks.3.attentions.2.proj_out",
"conv_norm_out",
"conv_out"
]

View File

@ -0,0 +1,393 @@
[
"conv_in",
"conv_norm_out",
"conv_out",
"down_blocks.0.attentions.0.norm",
"down_blocks.0.attentions.0.proj_in",
"down_blocks.0.attentions.0.proj_out",
"down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_k",
"down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out.0",
"down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_q",
"down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_v",
"down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_k",
"down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out.0",
"down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_q",
"down_blocks.0.attentions.0.transformer_blocks.0.attn2.to_v",
"down_blocks.0.attentions.0.transformer_blocks.0.ff.net.0.proj",
"down_blocks.0.attentions.0.transformer_blocks.0.ff.net.2",
"down_blocks.0.attentions.0.transformer_blocks.0.norm1",
"down_blocks.0.attentions.0.transformer_blocks.0.norm2",
"down_blocks.0.attentions.0.transformer_blocks.0.norm3",
"down_blocks.0.attentions.1.norm",
"down_blocks.0.attentions.1.proj_in",
"down_blocks.0.attentions.1.proj_out",
"down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_k",
"down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out.0",
"down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_q",
"down_blocks.0.attentions.1.transformer_blocks.0.attn1.to_v",
"down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_k",
"down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out.0",
"down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_q",
"down_blocks.0.attentions.1.transformer_blocks.0.attn2.to_v",
"down_blocks.0.attentions.1.transformer_blocks.0.ff.net.0.proj",
"down_blocks.0.attentions.1.transformer_blocks.0.ff.net.2",
"down_blocks.0.attentions.1.transformer_blocks.0.norm1",
"down_blocks.0.attentions.1.transformer_blocks.0.norm2",
"down_blocks.0.attentions.1.transformer_blocks.0.norm3",
"down_blocks.0.downsamplers.0.conv",
"down_blocks.0.resnets.0.conv1",
"down_blocks.0.resnets.0.conv2",
"down_blocks.0.resnets.0.norm1",
"down_blocks.0.resnets.0.norm2",
"down_blocks.0.resnets.0.time_emb_proj",
"down_blocks.0.resnets.1.conv1",
"down_blocks.0.resnets.1.conv2",
"down_blocks.0.resnets.1.norm1",
"down_blocks.0.resnets.1.norm2",
"down_blocks.0.resnets.1.time_emb_proj",
"down_blocks.1.attentions.0.norm",
"down_blocks.1.attentions.0.proj_in",
"down_blocks.1.attentions.0.proj_out",
"down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k",
"down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0",
"down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q",
"down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v",
"down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k",
"down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0",
"down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q",
"down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v",
"down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj",
"down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2",
"down_blocks.1.attentions.0.transformer_blocks.0.norm1",
"down_blocks.1.attentions.0.transformer_blocks.0.norm2",
"down_blocks.1.attentions.0.transformer_blocks.0.norm3",
"down_blocks.1.attentions.1.norm",
"down_blocks.1.attentions.1.proj_in",
"down_blocks.1.attentions.1.proj_out",
"down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k",
"down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0",
"down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q",
"down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v",
"down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k",
"down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0",
"down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q",
"down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v",
"down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj",
"down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2",
"down_blocks.1.attentions.1.transformer_blocks.0.norm1",
"down_blocks.1.attentions.1.transformer_blocks.0.norm2",
"down_blocks.1.attentions.1.transformer_blocks.0.norm3",
"down_blocks.1.downsamplers.0.conv",
"down_blocks.1.resnets.0.conv1",
"down_blocks.1.resnets.0.conv2",
"down_blocks.1.resnets.0.conv_shortcut",
"down_blocks.1.resnets.0.norm1",
"down_blocks.1.resnets.0.norm2",
"down_blocks.1.resnets.0.time_emb_proj",
"down_blocks.1.resnets.1.conv1",
"down_blocks.1.resnets.1.conv2",
"down_blocks.1.resnets.1.norm1",
"down_blocks.1.resnets.1.norm2",
"down_blocks.1.resnets.1.time_emb_proj",
"down_blocks.2.attentions.0.norm",
"down_blocks.2.attentions.0.proj_in",
"down_blocks.2.attentions.0.proj_out",
"down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k",
"down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0",
"down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q",
"down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v",
"down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k",
"down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0",
"down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q",
"down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v",
"down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj",
"down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2",
"down_blocks.2.attentions.0.transformer_blocks.0.norm1",
"down_blocks.2.attentions.0.transformer_blocks.0.norm2",
"down_blocks.2.attentions.0.transformer_blocks.0.norm3",
"down_blocks.2.attentions.1.norm",
"down_blocks.2.attentions.1.proj_in",
"down_blocks.2.attentions.1.proj_out",
"down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k",
"down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0",
"down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q",
"down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v",
"down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k",
"down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0",
"down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q",
"down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v",
"down_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj",
"down_blocks.2.attentions.1.transformer_blocks.0.ff.net.2",
"down_blocks.2.attentions.1.transformer_blocks.0.norm1",
"down_blocks.2.attentions.1.transformer_blocks.0.norm2",
"down_blocks.2.attentions.1.transformer_blocks.0.norm3",
"down_blocks.2.downsamplers.0.conv",
"down_blocks.2.resnets.0.conv1",
"down_blocks.2.resnets.0.conv2",
"down_blocks.2.resnets.0.conv_shortcut",
"down_blocks.2.resnets.0.norm1",
"down_blocks.2.resnets.0.norm2",
"down_blocks.2.resnets.0.time_emb_proj",
"down_blocks.2.resnets.1.conv1",
"down_blocks.2.resnets.1.conv2",
"down_blocks.2.resnets.1.norm1",
"down_blocks.2.resnets.1.norm2",
"down_blocks.2.resnets.1.time_emb_proj",
"down_blocks.3.resnets.0.conv1",
"down_blocks.3.resnets.0.conv2",
"down_blocks.3.resnets.0.norm1",
"down_blocks.3.resnets.0.norm2",
"down_blocks.3.resnets.0.time_emb_proj",
"down_blocks.3.resnets.1.conv1",
"down_blocks.3.resnets.1.conv2",
"down_blocks.3.resnets.1.norm1",
"down_blocks.3.resnets.1.norm2",
"down_blocks.3.resnets.1.time_emb_proj",
"mid_block.attentions.0.norm",
"mid_block.attentions.0.proj_in",
"mid_block.attentions.0.proj_out",
"mid_block.attentions.0.transformer_blocks.0.attn1.to_k",
"mid_block.attentions.0.transformer_blocks.0.attn1.to_out.0",
"mid_block.attentions.0.transformer_blocks.0.attn1.to_q",
"mid_block.attentions.0.transformer_blocks.0.attn1.to_v",
"mid_block.attentions.0.transformer_blocks.0.attn2.to_k",
"mid_block.attentions.0.transformer_blocks.0.attn2.to_out.0",
"mid_block.attentions.0.transformer_blocks.0.attn2.to_q",
"mid_block.attentions.0.transformer_blocks.0.attn2.to_v",
"mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj",
"mid_block.attentions.0.transformer_blocks.0.ff.net.2",
"mid_block.attentions.0.transformer_blocks.0.norm1",
"mid_block.attentions.0.transformer_blocks.0.norm2",
"mid_block.attentions.0.transformer_blocks.0.norm3",
"mid_block.resnets.0.conv1",
"mid_block.resnets.0.conv2",
"mid_block.resnets.0.norm1",
"mid_block.resnets.0.norm2",
"mid_block.resnets.0.time_emb_proj",
"mid_block.resnets.1.conv1",
"mid_block.resnets.1.conv2",
"mid_block.resnets.1.norm1",
"mid_block.resnets.1.norm2",
"mid_block.resnets.1.time_emb_proj",
"time_embedding.linear_1",
"time_embedding.linear_2",
"up_blocks.0.resnets.0.conv1",
"up_blocks.0.resnets.0.conv2",
"up_blocks.0.resnets.0.conv_shortcut",
"up_blocks.0.resnets.0.norm1",
"up_blocks.0.resnets.0.norm2",
"up_blocks.0.resnets.0.time_emb_proj",
"up_blocks.0.resnets.1.conv1",
"up_blocks.0.resnets.1.conv2",
"up_blocks.0.resnets.1.conv_shortcut",
"up_blocks.0.resnets.1.norm1",
"up_blocks.0.resnets.1.norm2",
"up_blocks.0.resnets.1.time_emb_proj",
"up_blocks.0.resnets.2.conv1",
"up_blocks.0.resnets.2.conv2",
"up_blocks.0.resnets.2.conv_shortcut",
"up_blocks.0.resnets.2.norm1",
"up_blocks.0.resnets.2.norm2",
"up_blocks.0.resnets.2.time_emb_proj",
"up_blocks.0.upsamplers.0.conv",
"up_blocks.1.attentions.0.norm",
"up_blocks.1.attentions.0.proj_in",
"up_blocks.1.attentions.0.proj_out",
"up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k",
"up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0",
"up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q",
"up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v",
"up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k",
"up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0",
"up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q",
"up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v",
"up_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj",
"up_blocks.1.attentions.0.transformer_blocks.0.ff.net.2",
"up_blocks.1.attentions.0.transformer_blocks.0.norm1",
"up_blocks.1.attentions.0.transformer_blocks.0.norm2",
"up_blocks.1.attentions.0.transformer_blocks.0.norm3",
"up_blocks.1.attentions.1.norm",
"up_blocks.1.attentions.1.proj_in",
"up_blocks.1.attentions.1.proj_out",
"up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k",
"up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0",
"up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q",
"up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v",
"up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k",
"up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0",
"up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q",
"up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v",
"up_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj",
"up_blocks.1.attentions.1.transformer_blocks.0.ff.net.2",
"up_blocks.1.attentions.1.transformer_blocks.0.norm1",
"up_blocks.1.attentions.1.transformer_blocks.0.norm2",
"up_blocks.1.attentions.1.transformer_blocks.0.norm3",
"up_blocks.1.attentions.2.norm",
"up_blocks.1.attentions.2.proj_in",
"up_blocks.1.attentions.2.proj_out",
"up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_k",
"up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_out.0",
"up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_q",
"up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_v",
"up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_k",
"up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_out.0",
"up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_q",
"up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_v",
"up_blocks.1.attentions.2.transformer_blocks.0.ff.net.0.proj",
"up_blocks.1.attentions.2.transformer_blocks.0.ff.net.2",
"up_blocks.1.attentions.2.transformer_blocks.0.norm1",
"up_blocks.1.attentions.2.transformer_blocks.0.norm2",
"up_blocks.1.attentions.2.transformer_blocks.0.norm3",
"up_blocks.1.resnets.0.conv1",
"up_blocks.1.resnets.0.conv2",
"up_blocks.1.resnets.0.conv_shortcut",
"up_blocks.1.resnets.0.norm1",
"up_blocks.1.resnets.0.norm2",
"up_blocks.1.resnets.0.time_emb_proj",
"up_blocks.1.resnets.1.conv1",
"up_blocks.1.resnets.1.conv2",
"up_blocks.1.resnets.1.conv_shortcut",
"up_blocks.1.resnets.1.norm1",
"up_blocks.1.resnets.1.norm2",
"up_blocks.1.resnets.1.time_emb_proj",
"up_blocks.1.resnets.2.conv1",
"up_blocks.1.resnets.2.conv2",
"up_blocks.1.resnets.2.conv_shortcut",
"up_blocks.1.resnets.2.norm1",
"up_blocks.1.resnets.2.norm2",
"up_blocks.1.resnets.2.time_emb_proj",
"up_blocks.1.upsamplers.0.conv",
"up_blocks.2.attentions.0.norm",
"up_blocks.2.attentions.0.proj_in",
"up_blocks.2.attentions.0.proj_out",
"up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k",
"up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0",
"up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q",
"up_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v",
"up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k",
"up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0",
"up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q",
"up_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v",
"up_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj",
"up_blocks.2.attentions.0.transformer_blocks.0.ff.net.2",
"up_blocks.2.attentions.0.transformer_blocks.0.norm1",
"up_blocks.2.attentions.0.transformer_blocks.0.norm2",
"up_blocks.2.attentions.0.transformer_blocks.0.norm3",
"up_blocks.2.attentions.1.norm",
"up_blocks.2.attentions.1.proj_in",
"up_blocks.2.attentions.1.proj_out",
"up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k",
"up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0",
"up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q",
"up_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v",
"up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k",
"up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0",
"up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q",
"up_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v",
"up_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj",
"up_blocks.2.attentions.1.transformer_blocks.0.ff.net.2",
"up_blocks.2.attentions.1.transformer_blocks.0.norm1",
"up_blocks.2.attentions.1.transformer_blocks.0.norm2",
"up_blocks.2.attentions.1.transformer_blocks.0.norm3",
"up_blocks.2.attentions.2.norm",
"up_blocks.2.attentions.2.proj_in",
"up_blocks.2.attentions.2.proj_out",
"up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_k",
"up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_out.0",
"up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_q",
"up_blocks.2.attentions.2.transformer_blocks.0.attn1.to_v",
"up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_k",
"up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_out.0",
"up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_q",
"up_blocks.2.attentions.2.transformer_blocks.0.attn2.to_v",
"up_blocks.2.attentions.2.transformer_blocks.0.ff.net.0.proj",
"up_blocks.2.attentions.2.transformer_blocks.0.ff.net.2",
"up_blocks.2.attentions.2.transformer_blocks.0.norm1",
"up_blocks.2.attentions.2.transformer_blocks.0.norm2",
"up_blocks.2.attentions.2.transformer_blocks.0.norm3",
"up_blocks.2.resnets.0.conv1",
"up_blocks.2.resnets.0.conv2",
"up_blocks.2.resnets.0.conv_shortcut",
"up_blocks.2.resnets.0.norm1",
"up_blocks.2.resnets.0.norm2",
"up_blocks.2.resnets.0.time_emb_proj",
"up_blocks.2.resnets.1.conv1",
"up_blocks.2.resnets.1.conv2",
"up_blocks.2.resnets.1.conv_shortcut",
"up_blocks.2.resnets.1.norm1",
"up_blocks.2.resnets.1.norm2",
"up_blocks.2.resnets.1.time_emb_proj",
"up_blocks.2.resnets.2.conv1",
"up_blocks.2.resnets.2.conv2",
"up_blocks.2.resnets.2.conv_shortcut",
"up_blocks.2.resnets.2.norm1",
"up_blocks.2.resnets.2.norm2",
"up_blocks.2.resnets.2.time_emb_proj",
"up_blocks.2.upsamplers.0.conv",
"up_blocks.3.attentions.0.norm",
"up_blocks.3.attentions.0.proj_in",
"up_blocks.3.attentions.0.proj_out",
"up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_k",
"up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_out.0",
"up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_q",
"up_blocks.3.attentions.0.transformer_blocks.0.attn1.to_v",
"up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_k",
"up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_out.0",
"up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_q",
"up_blocks.3.attentions.0.transformer_blocks.0.attn2.to_v",
"up_blocks.3.attentions.0.transformer_blocks.0.ff.net.0.proj",
"up_blocks.3.attentions.0.transformer_blocks.0.ff.net.2",
"up_blocks.3.attentions.0.transformer_blocks.0.norm1",
"up_blocks.3.attentions.0.transformer_blocks.0.norm2",
"up_blocks.3.attentions.0.transformer_blocks.0.norm3",
"up_blocks.3.attentions.1.norm",
"up_blocks.3.attentions.1.proj_in",
"up_blocks.3.attentions.1.proj_out",
"up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_k",
"up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_out.0",
"up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_q",
"up_blocks.3.attentions.1.transformer_blocks.0.attn1.to_v",
"up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_k",
"up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_out.0",
"up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_q",
"up_blocks.3.attentions.1.transformer_blocks.0.attn2.to_v",
"up_blocks.3.attentions.1.transformer_blocks.0.ff.net.0.proj",
"up_blocks.3.attentions.1.transformer_blocks.0.ff.net.2",
"up_blocks.3.attentions.1.transformer_blocks.0.norm1",
"up_blocks.3.attentions.1.transformer_blocks.0.norm2",
"up_blocks.3.attentions.1.transformer_blocks.0.norm3",
"up_blocks.3.attentions.2.norm",
"up_blocks.3.attentions.2.proj_in",
"up_blocks.3.attentions.2.proj_out",
"up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_k",
"up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_out.0",
"up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_q",
"up_blocks.3.attentions.2.transformer_blocks.0.attn1.to_v",
"up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_k",
"up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_out.0",
"up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_q",
"up_blocks.3.attentions.2.transformer_blocks.0.attn2.to_v",
"up_blocks.3.attentions.2.transformer_blocks.0.ff.net.0.proj",
"up_blocks.3.attentions.2.transformer_blocks.0.ff.net.2",
"up_blocks.3.attentions.2.transformer_blocks.0.norm1",
"up_blocks.3.attentions.2.transformer_blocks.0.norm2",
"up_blocks.3.attentions.2.transformer_blocks.0.norm3",
"up_blocks.3.resnets.0.conv1",
"up_blocks.3.resnets.0.conv2",
"up_blocks.3.resnets.0.conv_shortcut",
"up_blocks.3.resnets.0.norm1",
"up_blocks.3.resnets.0.norm2",
"up_blocks.3.resnets.0.time_emb_proj",
"up_blocks.3.resnets.1.conv1",
"up_blocks.3.resnets.1.conv2",
"up_blocks.3.resnets.1.conv_shortcut",
"up_blocks.3.resnets.1.norm1",
"up_blocks.3.resnets.1.norm2",
"up_blocks.3.resnets.1.time_emb_proj",
"up_blocks.3.resnets.2.conv1",
"up_blocks.3.resnets.2.conv2",
"up_blocks.3.resnets.2.conv_shortcut",
"up_blocks.3.resnets.2.norm1",
"up_blocks.3.resnets.2.norm2",
"up_blocks.3.resnets.2.time_emb_proj"
]

View File

@ -0,0 +1,14 @@
[
"first_stage_model.(decoder|encoder).(conv_in|conv_out|norm_out)",
"first_stage_model.(decoder|encoder).mid.(block_1|block_2).(conv1|conv2|norm1|norm2)",
"first_stage_model.(decoder|encoder).mid.attn_1.(k|norm|proj_out|q|v)",
"first_stage_model.(post_quant_conv|quant_conv)",
"first_stage_model.decoder.up.(0|1).block.(1|2).(conv1|conv2|norm1|norm2)",
"first_stage_model.decoder.up.(0|1).block.0.(conv1|conv2|nin_shortcut|norm1|norm2)",
"first_stage_model.decoder.up.(1|2|3).upsample.conv",
"first_stage_model.decoder.up.(2|3).block.(0|1|2).(conv1|conv2|norm1|norm2)",
"first_stage_model.encoder.down.(0|1|2).downsample.conv",
"first_stage_model.encoder.down.(0|3).block.(0|1).(conv1|conv2|norm1|norm2)",
"first_stage_model.encoder.down.(1|2).block.0.(conv1|conv2|nin_shortcut|norm1|norm2)",
"first_stage_model.encoder.down.(1|2).block.1.(conv1|conv2|norm1|norm2)"
]

View File

@ -0,0 +1,126 @@
[
"encoder.conv_in",
"encoder.down.0.block.0.norm1",
"encoder.down.0.block.0.conv1",
"encoder.down.0.block.0.norm2",
"encoder.down.0.block.0.conv2",
"encoder.down.0.block.1.norm1",
"encoder.down.0.block.1.conv1",
"encoder.down.0.block.1.norm2",
"encoder.down.0.block.1.conv2",
"encoder.down.0.downsample.conv",
"encoder.down.1.block.0.norm1",
"encoder.down.1.block.0.conv1",
"encoder.down.1.block.0.norm2",
"encoder.down.1.block.0.conv2",
"encoder.down.1.block.0.nin_shortcut",
"encoder.down.1.block.1.norm1",
"encoder.down.1.block.1.conv1",
"encoder.down.1.block.1.norm2",
"encoder.down.1.block.1.conv2",
"encoder.down.1.downsample.conv",
"encoder.down.2.block.0.norm1",
"encoder.down.2.block.0.conv1",
"encoder.down.2.block.0.norm2",
"encoder.down.2.block.0.conv2",
"encoder.down.2.block.0.nin_shortcut",
"encoder.down.2.block.1.norm1",
"encoder.down.2.block.1.conv1",
"encoder.down.2.block.1.norm2",
"encoder.down.2.block.1.conv2",
"encoder.down.2.downsample.conv",
"encoder.down.3.block.0.norm1",
"encoder.down.3.block.0.conv1",
"encoder.down.3.block.0.norm2",
"encoder.down.3.block.0.conv2",
"encoder.down.3.block.1.norm1",
"encoder.down.3.block.1.conv1",
"encoder.down.3.block.1.norm2",
"encoder.down.3.block.1.conv2",
"encoder.mid.block_1.norm1",
"encoder.mid.block_1.conv1",
"encoder.mid.block_1.norm2",
"encoder.mid.block_1.conv2",
"encoder.mid.attn_1.norm",
"encoder.mid.attn_1.q",
"encoder.mid.attn_1.k",
"encoder.mid.attn_1.v",
"encoder.mid.attn_1.proj_out",
"encoder.mid.block_2.norm1",
"encoder.mid.block_2.conv1",
"encoder.mid.block_2.norm2",
"encoder.mid.block_2.conv2",
"encoder.norm_out",
"encoder.conv_out",
"quant_conv",
"post_quant_conv",
"decoder.conv_in",
"decoder.mid.block_1.norm1",
"decoder.mid.block_1.conv1",
"decoder.mid.block_1.norm2",
"decoder.mid.block_1.conv2",
"decoder.mid.attn_1.norm",
"decoder.mid.attn_1.q",
"decoder.mid.attn_1.k",
"decoder.mid.attn_1.v",
"decoder.mid.attn_1.proj_out",
"decoder.mid.block_2.norm1",
"decoder.mid.block_2.conv1",
"decoder.mid.block_2.norm2",
"decoder.mid.block_2.conv2",
"decoder.up.3.block.0.norm1",
"decoder.up.3.block.0.conv1",
"decoder.up.3.block.0.norm2",
"decoder.up.3.block.0.conv2",
"decoder.up.3.block.1.norm1",
"decoder.up.3.block.1.conv1",
"decoder.up.3.block.1.norm2",
"decoder.up.3.block.1.conv2",
"decoder.up.3.block.2.norm1",
"decoder.up.3.block.2.conv1",
"decoder.up.3.block.2.norm2",
"decoder.up.3.block.2.conv2",
"decoder.up.3.upsample.conv",
"decoder.up.2.block.0.norm1",
"decoder.up.2.block.0.conv1",
"decoder.up.2.block.0.norm2",
"decoder.up.2.block.0.conv2",
"decoder.up.2.block.1.norm1",
"decoder.up.2.block.1.conv1",
"decoder.up.2.block.1.norm2",
"decoder.up.2.block.1.conv2",
"decoder.up.2.block.2.norm1",
"decoder.up.2.block.2.conv1",
"decoder.up.2.block.2.norm2",
"decoder.up.2.block.2.conv2",
"decoder.up.2.upsample.conv",
"decoder.up.1.block.0.norm1",
"decoder.up.1.block.0.conv1",
"decoder.up.1.block.0.norm2",
"decoder.up.1.block.0.conv2",
"decoder.up.1.block.0.nin_shortcut",
"decoder.up.1.block.1.norm1",
"decoder.up.1.block.1.conv1",
"decoder.up.1.block.1.norm2",
"decoder.up.1.block.1.conv2",
"decoder.up.1.block.2.norm1",
"decoder.up.1.block.2.conv1",
"decoder.up.1.block.2.norm2",
"decoder.up.1.block.2.conv2",
"decoder.up.1.upsample.conv",
"decoder.up.0.block.0.norm1",
"decoder.up.0.block.0.conv1",
"decoder.up.0.block.0.norm2",
"decoder.up.0.block.0.conv2",
"decoder.up.0.block.0.nin_shortcut",
"decoder.up.0.block.1.norm1",
"decoder.up.0.block.1.conv1",
"decoder.up.0.block.1.norm2",
"decoder.up.0.block.1.conv2",
"decoder.up.0.block.2.norm1",
"decoder.up.0.block.2.conv1",
"decoder.up.0.block.2.norm2",
"decoder.up.0.block.2.conv2",
"decoder.norm_out",
"decoder.conv_out"
]

View File

@ -0,0 +1,126 @@
[
"first_stage_model.decoder.conv_in",
"first_stage_model.decoder.conv_out",
"first_stage_model.decoder.mid.attn_1.k",
"first_stage_model.decoder.mid.attn_1.norm",
"first_stage_model.decoder.mid.attn_1.proj_out",
"first_stage_model.decoder.mid.attn_1.q",
"first_stage_model.decoder.mid.attn_1.v",
"first_stage_model.decoder.mid.block_1.conv1",
"first_stage_model.decoder.mid.block_1.conv2",
"first_stage_model.decoder.mid.block_1.norm1",
"first_stage_model.decoder.mid.block_1.norm2",
"first_stage_model.decoder.mid.block_2.conv1",
"first_stage_model.decoder.mid.block_2.conv2",
"first_stage_model.decoder.mid.block_2.norm1",
"first_stage_model.decoder.mid.block_2.norm2",
"first_stage_model.decoder.norm_out",
"first_stage_model.decoder.up.0.block.0.conv1",
"first_stage_model.decoder.up.0.block.0.conv2",
"first_stage_model.decoder.up.0.block.0.nin_shortcut",
"first_stage_model.decoder.up.0.block.0.norm1",
"first_stage_model.decoder.up.0.block.0.norm2",
"first_stage_model.decoder.up.0.block.1.conv1",
"first_stage_model.decoder.up.0.block.1.conv2",
"first_stage_model.decoder.up.0.block.1.norm1",
"first_stage_model.decoder.up.0.block.1.norm2",
"first_stage_model.decoder.up.0.block.2.conv1",
"first_stage_model.decoder.up.0.block.2.conv2",
"first_stage_model.decoder.up.0.block.2.norm1",
"first_stage_model.decoder.up.0.block.2.norm2",
"first_stage_model.decoder.up.1.block.0.conv1",
"first_stage_model.decoder.up.1.block.0.conv2",
"first_stage_model.decoder.up.1.block.0.nin_shortcut",
"first_stage_model.decoder.up.1.block.0.norm1",
"first_stage_model.decoder.up.1.block.0.norm2",
"first_stage_model.decoder.up.1.block.1.conv1",
"first_stage_model.decoder.up.1.block.1.conv2",
"first_stage_model.decoder.up.1.block.1.norm1",
"first_stage_model.decoder.up.1.block.1.norm2",
"first_stage_model.decoder.up.1.block.2.conv1",
"first_stage_model.decoder.up.1.block.2.conv2",
"first_stage_model.decoder.up.1.block.2.norm1",
"first_stage_model.decoder.up.1.block.2.norm2",
"first_stage_model.decoder.up.1.upsample.conv",
"first_stage_model.decoder.up.2.block.0.conv1",
"first_stage_model.decoder.up.2.block.0.conv2",
"first_stage_model.decoder.up.2.block.0.norm1",
"first_stage_model.decoder.up.2.block.0.norm2",
"first_stage_model.decoder.up.2.block.1.conv1",
"first_stage_model.decoder.up.2.block.1.conv2",
"first_stage_model.decoder.up.2.block.1.norm1",
"first_stage_model.decoder.up.2.block.1.norm2",
"first_stage_model.decoder.up.2.block.2.conv1",
"first_stage_model.decoder.up.2.block.2.conv2",
"first_stage_model.decoder.up.2.block.2.norm1",
"first_stage_model.decoder.up.2.block.2.norm2",
"first_stage_model.decoder.up.2.upsample.conv",
"first_stage_model.decoder.up.3.block.0.conv1",
"first_stage_model.decoder.up.3.block.0.conv2",
"first_stage_model.decoder.up.3.block.0.norm1",
"first_stage_model.decoder.up.3.block.0.norm2",
"first_stage_model.decoder.up.3.block.1.conv1",
"first_stage_model.decoder.up.3.block.1.conv2",
"first_stage_model.decoder.up.3.block.1.norm1",
"first_stage_model.decoder.up.3.block.1.norm2",
"first_stage_model.decoder.up.3.block.2.conv1",
"first_stage_model.decoder.up.3.block.2.conv2",
"first_stage_model.decoder.up.3.block.2.norm1",
"first_stage_model.decoder.up.3.block.2.norm2",
"first_stage_model.decoder.up.3.upsample.conv",
"first_stage_model.encoder.conv_in",
"first_stage_model.encoder.conv_out",
"first_stage_model.encoder.down.0.block.0.conv1",
"first_stage_model.encoder.down.0.block.0.conv2",
"first_stage_model.encoder.down.0.block.0.norm1",
"first_stage_model.encoder.down.0.block.0.norm2",
"first_stage_model.encoder.down.0.block.1.conv1",
"first_stage_model.encoder.down.0.block.1.conv2",
"first_stage_model.encoder.down.0.block.1.norm1",
"first_stage_model.encoder.down.0.block.1.norm2",
"first_stage_model.encoder.down.0.downsample.conv",
"first_stage_model.encoder.down.1.block.0.conv1",
"first_stage_model.encoder.down.1.block.0.conv2",
"first_stage_model.encoder.down.1.block.0.nin_shortcut",
"first_stage_model.encoder.down.1.block.0.norm1",
"first_stage_model.encoder.down.1.block.0.norm2",
"first_stage_model.encoder.down.1.block.1.conv1",
"first_stage_model.encoder.down.1.block.1.conv2",
"first_stage_model.encoder.down.1.block.1.norm1",
"first_stage_model.encoder.down.1.block.1.norm2",
"first_stage_model.encoder.down.1.downsample.conv",
"first_stage_model.encoder.down.2.block.0.conv1",
"first_stage_model.encoder.down.2.block.0.conv2",
"first_stage_model.encoder.down.2.block.0.nin_shortcut",
"first_stage_model.encoder.down.2.block.0.norm1",
"first_stage_model.encoder.down.2.block.0.norm2",
"first_stage_model.encoder.down.2.block.1.conv1",
"first_stage_model.encoder.down.2.block.1.conv2",
"first_stage_model.encoder.down.2.block.1.norm1",
"first_stage_model.encoder.down.2.block.1.norm2",
"first_stage_model.encoder.down.2.downsample.conv",
"first_stage_model.encoder.down.3.block.0.conv1",
"first_stage_model.encoder.down.3.block.0.conv2",
"first_stage_model.encoder.down.3.block.0.norm1",
"first_stage_model.encoder.down.3.block.0.norm2",
"first_stage_model.encoder.down.3.block.1.conv1",
"first_stage_model.encoder.down.3.block.1.conv2",
"first_stage_model.encoder.down.3.block.1.norm1",
"first_stage_model.encoder.down.3.block.1.norm2",
"first_stage_model.encoder.mid.attn_1.k",
"first_stage_model.encoder.mid.attn_1.norm",
"first_stage_model.encoder.mid.attn_1.proj_out",
"first_stage_model.encoder.mid.attn_1.q",
"first_stage_model.encoder.mid.attn_1.v",
"first_stage_model.encoder.mid.block_1.conv1",
"first_stage_model.encoder.mid.block_1.conv2",
"first_stage_model.encoder.mid.block_1.norm1",
"first_stage_model.encoder.mid.block_1.norm2",
"first_stage_model.encoder.mid.block_2.conv1",
"first_stage_model.encoder.mid.block_2.conv2",
"first_stage_model.encoder.mid.block_2.norm1",
"first_stage_model.encoder.mid.block_2.norm2",
"first_stage_model.encoder.norm_out",
"first_stage_model.post_quant_conv",
"first_stage_model.quant_conv"
]

View File

@ -0,0 +1,15 @@
[
"(decoder|encoder).(conv_in|conv_norm_out|conv_out)",
"(decoder|encoder).mid_block.attentions.0.(group_norm|to_k|to_q|to_v)",
"(decoder|encoder).mid_block.attentions.0.to_out.0",
"(decoder|encoder).mid_block.resnets.(0|1).(conv1|conv2|norm1|norm2)",
"(post_quant_conv|quant_conv)",
"decoder.up_blocks.(0|1).resnets.(0|1|2).(conv1|conv2|norm1|norm2)",
"decoder.up_blocks.(0|1|2).upsamplers.0.conv",
"decoder.up_blocks.(2|3).resnets.(1|2).(conv1|conv2|norm1|norm2)",
"decoder.up_blocks.(2|3).resnets.0.(conv1|conv2|conv_shortcut|norm1|norm2)",
"encoder.down_blocks.(0|1|2).downsamplers.0.conv",
"encoder.down_blocks.(0|3).resnets.(0|1).(conv1|conv2|norm1|norm2)",
"encoder.down_blocks.(1|2).resnets.0.(conv1|conv2|conv_shortcut|norm1|norm2)",
"encoder.down_blocks.(1|2).resnets.1.(conv1|conv2|norm1|norm2)"
]

View File

@ -0,0 +1,126 @@
[
"encoder.conv_in",
"encoder.down_blocks.0.resnets.0.norm1",
"encoder.down_blocks.0.resnets.0.conv1",
"encoder.down_blocks.0.resnets.0.norm2",
"encoder.down_blocks.0.resnets.0.conv2",
"encoder.down_blocks.0.resnets.1.norm1",
"encoder.down_blocks.0.resnets.1.conv1",
"encoder.down_blocks.0.resnets.1.norm2",
"encoder.down_blocks.0.resnets.1.conv2",
"encoder.down_blocks.0.downsamplers.0.conv",
"encoder.down_blocks.1.resnets.0.norm1",
"encoder.down_blocks.1.resnets.0.conv1",
"encoder.down_blocks.1.resnets.0.norm2",
"encoder.down_blocks.1.resnets.0.conv2",
"encoder.down_blocks.1.resnets.0.conv_shortcut",
"encoder.down_blocks.1.resnets.1.norm1",
"encoder.down_blocks.1.resnets.1.conv1",
"encoder.down_blocks.1.resnets.1.norm2",
"encoder.down_blocks.1.resnets.1.conv2",
"encoder.down_blocks.1.downsamplers.0.conv",
"encoder.down_blocks.2.resnets.0.norm1",
"encoder.down_blocks.2.resnets.0.conv1",
"encoder.down_blocks.2.resnets.0.norm2",
"encoder.down_blocks.2.resnets.0.conv2",
"encoder.down_blocks.2.resnets.0.conv_shortcut",
"encoder.down_blocks.2.resnets.1.norm1",
"encoder.down_blocks.2.resnets.1.conv1",
"encoder.down_blocks.2.resnets.1.norm2",
"encoder.down_blocks.2.resnets.1.conv2",
"encoder.down_blocks.2.downsamplers.0.conv",
"encoder.down_blocks.3.resnets.0.norm1",
"encoder.down_blocks.3.resnets.0.conv1",
"encoder.down_blocks.3.resnets.0.norm2",
"encoder.down_blocks.3.resnets.0.conv2",
"encoder.down_blocks.3.resnets.1.norm1",
"encoder.down_blocks.3.resnets.1.conv1",
"encoder.down_blocks.3.resnets.1.norm2",
"encoder.down_blocks.3.resnets.1.conv2",
"encoder.mid_block.resnets.0.norm1",
"encoder.mid_block.resnets.0.conv1",
"encoder.mid_block.resnets.0.norm2",
"encoder.mid_block.resnets.0.conv2",
"encoder.mid_block.attentions.0.group_norm",
"encoder.mid_block.attentions.0.to_q",
"encoder.mid_block.attentions.0.to_k",
"encoder.mid_block.attentions.0.to_v",
"encoder.mid_block.attentions.0.to_out.0",
"encoder.mid_block.resnets.1.norm1",
"encoder.mid_block.resnets.1.conv1",
"encoder.mid_block.resnets.1.norm2",
"encoder.mid_block.resnets.1.conv2",
"encoder.conv_norm_out",
"encoder.conv_out",
"quant_conv",
"post_quant_conv",
"decoder.conv_in",
"decoder.mid_block.resnets.0.norm1",
"decoder.mid_block.resnets.0.conv1",
"decoder.mid_block.resnets.0.norm2",
"decoder.mid_block.resnets.0.conv2",
"decoder.mid_block.attentions.0.group_norm",
"decoder.mid_block.attentions.0.to_q",
"decoder.mid_block.attentions.0.to_k",
"decoder.mid_block.attentions.0.to_v",
"decoder.mid_block.attentions.0.to_out.0",
"decoder.mid_block.resnets.1.norm1",
"decoder.mid_block.resnets.1.conv1",
"decoder.mid_block.resnets.1.norm2",
"decoder.mid_block.resnets.1.conv2",
"decoder.up_blocks.0.resnets.0.norm1",
"decoder.up_blocks.0.resnets.0.conv1",
"decoder.up_blocks.0.resnets.0.norm2",
"decoder.up_blocks.0.resnets.0.conv2",
"decoder.up_blocks.0.resnets.1.norm1",
"decoder.up_blocks.0.resnets.1.conv1",
"decoder.up_blocks.0.resnets.1.norm2",
"decoder.up_blocks.0.resnets.1.conv2",
"decoder.up_blocks.0.resnets.2.norm1",
"decoder.up_blocks.0.resnets.2.conv1",
"decoder.up_blocks.0.resnets.2.norm2",
"decoder.up_blocks.0.resnets.2.conv2",
"decoder.up_blocks.0.upsamplers.0.conv",
"decoder.up_blocks.1.resnets.0.norm1",
"decoder.up_blocks.1.resnets.0.conv1",
"decoder.up_blocks.1.resnets.0.norm2",
"decoder.up_blocks.1.resnets.0.conv2",
"decoder.up_blocks.1.resnets.1.norm1",
"decoder.up_blocks.1.resnets.1.conv1",
"decoder.up_blocks.1.resnets.1.norm2",
"decoder.up_blocks.1.resnets.1.conv2",
"decoder.up_blocks.1.resnets.2.norm1",
"decoder.up_blocks.1.resnets.2.conv1",
"decoder.up_blocks.1.resnets.2.norm2",
"decoder.up_blocks.1.resnets.2.conv2",
"decoder.up_blocks.1.upsamplers.0.conv",
"decoder.up_blocks.2.resnets.0.norm1",
"decoder.up_blocks.2.resnets.0.conv1",
"decoder.up_blocks.2.resnets.0.norm2",
"decoder.up_blocks.2.resnets.0.conv2",
"decoder.up_blocks.2.resnets.0.conv_shortcut",
"decoder.up_blocks.2.resnets.1.norm1",
"decoder.up_blocks.2.resnets.1.conv1",
"decoder.up_blocks.2.resnets.1.norm2",
"decoder.up_blocks.2.resnets.1.conv2",
"decoder.up_blocks.2.resnets.2.norm1",
"decoder.up_blocks.2.resnets.2.conv1",
"decoder.up_blocks.2.resnets.2.norm2",
"decoder.up_blocks.2.resnets.2.conv2",
"decoder.up_blocks.2.upsamplers.0.conv",
"decoder.up_blocks.3.resnets.0.norm1",
"decoder.up_blocks.3.resnets.0.conv1",
"decoder.up_blocks.3.resnets.0.norm2",
"decoder.up_blocks.3.resnets.0.conv2",
"decoder.up_blocks.3.resnets.0.conv_shortcut",
"decoder.up_blocks.3.resnets.1.norm1",
"decoder.up_blocks.3.resnets.1.conv1",
"decoder.up_blocks.3.resnets.1.norm2",
"decoder.up_blocks.3.resnets.1.conv2",
"decoder.up_blocks.3.resnets.2.norm1",
"decoder.up_blocks.3.resnets.2.conv1",
"decoder.up_blocks.3.resnets.2.norm2",
"decoder.up_blocks.3.resnets.2.conv2",
"decoder.conv_norm_out",
"decoder.conv_out"
]

View File

@ -0,0 +1,126 @@
[
"decoder.conv_in",
"decoder.conv_norm_out",
"decoder.conv_out",
"decoder.mid_block.attentions.0.group_norm",
"decoder.mid_block.attentions.0.to_k",
"decoder.mid_block.attentions.0.to_out.0",
"decoder.mid_block.attentions.0.to_q",
"decoder.mid_block.attentions.0.to_v",
"decoder.mid_block.resnets.0.conv1",
"decoder.mid_block.resnets.0.conv2",
"decoder.mid_block.resnets.0.norm1",
"decoder.mid_block.resnets.0.norm2",
"decoder.mid_block.resnets.1.conv1",
"decoder.mid_block.resnets.1.conv2",
"decoder.mid_block.resnets.1.norm1",
"decoder.mid_block.resnets.1.norm2",
"decoder.up_blocks.0.resnets.0.conv1",
"decoder.up_blocks.0.resnets.0.conv2",
"decoder.up_blocks.0.resnets.0.norm1",
"decoder.up_blocks.0.resnets.0.norm2",
"decoder.up_blocks.0.resnets.1.conv1",
"decoder.up_blocks.0.resnets.1.conv2",
"decoder.up_blocks.0.resnets.1.norm1",
"decoder.up_blocks.0.resnets.1.norm2",
"decoder.up_blocks.0.resnets.2.conv1",
"decoder.up_blocks.0.resnets.2.conv2",
"decoder.up_blocks.0.resnets.2.norm1",
"decoder.up_blocks.0.resnets.2.norm2",
"decoder.up_blocks.0.upsamplers.0.conv",
"decoder.up_blocks.1.resnets.0.conv1",
"decoder.up_blocks.1.resnets.0.conv2",
"decoder.up_blocks.1.resnets.0.norm1",
"decoder.up_blocks.1.resnets.0.norm2",
"decoder.up_blocks.1.resnets.1.conv1",
"decoder.up_blocks.1.resnets.1.conv2",
"decoder.up_blocks.1.resnets.1.norm1",
"decoder.up_blocks.1.resnets.1.norm2",
"decoder.up_blocks.1.resnets.2.conv1",
"decoder.up_blocks.1.resnets.2.conv2",
"decoder.up_blocks.1.resnets.2.norm1",
"decoder.up_blocks.1.resnets.2.norm2",
"decoder.up_blocks.1.upsamplers.0.conv",
"decoder.up_blocks.2.resnets.0.conv1",
"decoder.up_blocks.2.resnets.0.conv2",
"decoder.up_blocks.2.resnets.0.conv_shortcut",
"decoder.up_blocks.2.resnets.0.norm1",
"decoder.up_blocks.2.resnets.0.norm2",
"decoder.up_blocks.2.resnets.1.conv1",
"decoder.up_blocks.2.resnets.1.conv2",
"decoder.up_blocks.2.resnets.1.norm1",
"decoder.up_blocks.2.resnets.1.norm2",
"decoder.up_blocks.2.resnets.2.conv1",
"decoder.up_blocks.2.resnets.2.conv2",
"decoder.up_blocks.2.resnets.2.norm1",
"decoder.up_blocks.2.resnets.2.norm2",
"decoder.up_blocks.2.upsamplers.0.conv",
"decoder.up_blocks.3.resnets.0.conv1",
"decoder.up_blocks.3.resnets.0.conv2",
"decoder.up_blocks.3.resnets.0.conv_shortcut",
"decoder.up_blocks.3.resnets.0.norm1",
"decoder.up_blocks.3.resnets.0.norm2",
"decoder.up_blocks.3.resnets.1.conv1",
"decoder.up_blocks.3.resnets.1.conv2",
"decoder.up_blocks.3.resnets.1.norm1",
"decoder.up_blocks.3.resnets.1.norm2",
"decoder.up_blocks.3.resnets.2.conv1",
"decoder.up_blocks.3.resnets.2.conv2",
"decoder.up_blocks.3.resnets.2.norm1",
"decoder.up_blocks.3.resnets.2.norm2",
"encoder.conv_in",
"encoder.conv_norm_out",
"encoder.conv_out",
"encoder.down_blocks.0.downsamplers.0.conv",
"encoder.down_blocks.0.resnets.0.conv1",
"encoder.down_blocks.0.resnets.0.conv2",
"encoder.down_blocks.0.resnets.0.norm1",
"encoder.down_blocks.0.resnets.0.norm2",
"encoder.down_blocks.0.resnets.1.conv1",
"encoder.down_blocks.0.resnets.1.conv2",
"encoder.down_blocks.0.resnets.1.norm1",
"encoder.down_blocks.0.resnets.1.norm2",
"encoder.down_blocks.1.downsamplers.0.conv",
"encoder.down_blocks.1.resnets.0.conv1",
"encoder.down_blocks.1.resnets.0.conv2",
"encoder.down_blocks.1.resnets.0.conv_shortcut",
"encoder.down_blocks.1.resnets.0.norm1",
"encoder.down_blocks.1.resnets.0.norm2",
"encoder.down_blocks.1.resnets.1.conv1",
"encoder.down_blocks.1.resnets.1.conv2",
"encoder.down_blocks.1.resnets.1.norm1",
"encoder.down_blocks.1.resnets.1.norm2",
"encoder.down_blocks.2.downsamplers.0.conv",
"encoder.down_blocks.2.resnets.0.conv1",
"encoder.down_blocks.2.resnets.0.conv2",
"encoder.down_blocks.2.resnets.0.conv_shortcut",
"encoder.down_blocks.2.resnets.0.norm1",
"encoder.down_blocks.2.resnets.0.norm2",
"encoder.down_blocks.2.resnets.1.conv1",
"encoder.down_blocks.2.resnets.1.conv2",
"encoder.down_blocks.2.resnets.1.norm1",
"encoder.down_blocks.2.resnets.1.norm2",
"encoder.down_blocks.3.resnets.0.conv1",
"encoder.down_blocks.3.resnets.0.conv2",
"encoder.down_blocks.3.resnets.0.norm1",
"encoder.down_blocks.3.resnets.0.norm2",
"encoder.down_blocks.3.resnets.1.conv1",
"encoder.down_blocks.3.resnets.1.conv2",
"encoder.down_blocks.3.resnets.1.norm1",
"encoder.down_blocks.3.resnets.1.norm2",
"encoder.mid_block.attentions.0.group_norm",
"encoder.mid_block.attentions.0.to_k",
"encoder.mid_block.attentions.0.to_out.0",
"encoder.mid_block.attentions.0.to_q",
"encoder.mid_block.attentions.0.to_v",
"encoder.mid_block.resnets.0.conv1",
"encoder.mid_block.resnets.0.conv2",
"encoder.mid_block.resnets.0.norm1",
"encoder.mid_block.resnets.0.norm2",
"encoder.mid_block.resnets.1.conv1",
"encoder.mid_block.resnets.1.conv2",
"encoder.mid_block.resnets.1.norm1",
"encoder.mid_block.resnets.1.norm2",
"post_quant_conv",
"quant_conv"
]

View File

@ -1,13 +1,14 @@
#
# This file is autogenerated by pip-compile with Python 3.10
# by the following command:
#
# pip-compile --output-file=requirements-dev.txt requirements-dev.in setup.py
#
aiohttp==3.8.5
aiohttp==3.9.0
# via fsspec
aiosignal==1.3.1
# via aiohttp
annotated-types==0.5.0
annotated-types==0.6.0
# via pydantic
antlr4-python3-runtime==4.9.3
# via omegaconf
@ -19,14 +20,12 @@ async-timeout==4.0.3
# via aiohttp
attrs==23.1.0
# via aiohttp
black==23.9.1
black==23.11.0
# via -r requirements-dev.in
certifi==2023.7.22
certifi==2023.11.17
# via requests
charset-normalizer==3.3.2
# via requests
charset-normalizer==3.2.0
# via
# aiohttp
# requests
click==8.1.7
# via
# black
@ -34,21 +33,21 @@ click==8.1.7
# click-shell
# imaginAIry (setup.py)
# uvicorn
click-help-colors==0.9.2
click-help-colors==0.9.4
# via imaginAIry (setup.py)
click-shell==2.1
# via imaginAIry (setup.py)
contourpy==1.1.1
contourpy==1.2.0
# via matplotlib
coverage==7.3.1
coverage==7.3.2
# via -r requirements-dev.in
cycler==0.12.0
cycler==0.12.1
# via matplotlib
diffusers==0.21.4
diffusers==0.23.1
# via imaginAIry (setup.py)
einops==0.6.1
einops==0.7.0
# via imaginAIry (setup.py)
exceptiongroup==1.1.3
exceptiongroup==1.2.0
# via
# anyio
# pytest
@ -56,69 +55,82 @@ facexlib==0.3.0
# via imaginAIry (setup.py)
fairscale==0.4.13
# via imaginAIry (setup.py)
fastapi==0.103.2
fastapi==0.104.1
# via imaginAIry (setup.py)
filelock==3.12.4
filelock==3.13.1
# via
# diffusers
# huggingface-hub
# torch
# transformers
filterpy==1.4.5
# via facexlib
fonttools==4.43.0
fonttools==4.45.0
# via matplotlib
frozenlist==1.4.0
# via
# aiohttp
# aiosignal
fsspec[http]==2023.9.2
fsspec[http]==2023.10.0
# via
# huggingface-hub
# pytorch-lightning
ftfy==6.1.1
# torch
ftfy==6.1.3
# via
# imaginAIry (setup.py)
# open-clip-torch
h11==0.14.0
# via uvicorn
huggingface-hub==0.17.3
huggingface-hub==0.19.4
# via
# diffusers
# open-clip-torch
# timm
# tokenizers
# transformers
idna==3.4
# via
# anyio
# requests
# yarl
imageio==2.31.4
imageio==2.33.0
# via imaginAIry (setup.py)
importlib-metadata==6.8.0
# via diffusers
iniconfig==2.0.0
# via pytest
jaxtyping==0.2.23
# via refiners
jinja2==3.1.2
# via torch
kiwisolver==1.4.5
# via matplotlib
kornia==0.7.0
# via imaginAIry (setup.py)
lightning-utilities==0.9.0
lightning-utilities==0.10.0
# via
# pytorch-lightning
# torchmetrics
llvmlite==0.41.0
llvmlite==0.41.1
# via numba
matplotlib==3.7.3
markupsafe==2.1.3
# via jinja2
matplotlib==3.7.4
# via
# -c tests/constraints.txt
# filterpy
mpmath==1.3.0
# via sympy
multidict==6.0.4
# via
# aiohttp
# yarl
mypy-extensions==1.0.0
# via black
numba==0.58.0
networkx==3.2.1
# via torch
numba==0.58.1
# via facexlib
numpy==1.24.4
# via
@ -130,23 +142,25 @@ numpy==1.24.4
# filterpy
# imageio
# imaginAIry (setup.py)
# jaxtyping
# matplotlib
# numba
# opencv-python
# pytorch-lightning
# refiners
# scipy
# torchmetrics
# torchvision
# transformers
omegaconf==2.3.0
# via imaginAIry (setup.py)
open-clip-torch==2.20.0
open-clip-torch==2.23.0
# via imaginAIry (setup.py)
opencv-python==4.8.1.78
# via
# facexlib
# imaginAIry (setup.py)
packaging==23.1
packaging==23.2
# via
# black
# huggingface-hub
@ -159,33 +173,34 @@ packaging==23.1
# transformers
pathspec==0.11.2
# via black
pillow==10.0.1
pillow==10.1.0
# via
# diffusers
# facexlib
# imageio
# imaginAIry (setup.py)
# matplotlib
# refiners
# torchvision
platformdirs==3.10.0
platformdirs==4.0.0
# via black
pluggy==1.3.0
# via pytest
protobuf==3.20.3
protobuf==4.25.1
# via
# imaginAIry (setup.py)
# open-clip-torch
psutil==5.9.5
psutil==5.9.6
# via imaginAIry (setup.py)
pydantic==2.4.2
pydantic==2.5.2
# via
# fastapi
# imaginAIry (setup.py)
pydantic-core==2.10.1
pydantic-core==2.14.5
# via pydantic
pyparsing==3.1.1
# via matplotlib
pytest==7.4.2
pytest==7.4.3
# via
# -r requirements-dev.in
# pytest-randomly
@ -206,7 +221,9 @@ pyyaml==6.0.1
# responses
# timm
# transformers
regex==2023.8.8
refiners==0.2.0
# via imaginAIry (setup.py)
regex==2023.10.3
# via
# diffusers
# open-clip-torch
@ -220,14 +237,15 @@ requests==2.31.0
# responses
# torchvision
# transformers
responses==0.23.3
responses==0.24.1
# via -r requirements-dev.in
ruff==0.0.291
ruff==0.1.6
# via -r requirements-dev.in
safetensors==0.3.3
# via
# diffusers
# imaginAIry (setup.py)
# refiners
# timm
# transformers
scipy==1.10.1
@ -244,19 +262,21 @@ sniffio==1.3.0
# via anyio
starlette==0.27.0
# via fastapi
sympy==1.12
# via torch
termcolor==2.3.0
# via pytest-sugar
timm==0.9.7
timm==0.9.11
# via
# imaginAIry (setup.py)
# open-clip-torch
tokenizers==0.13.3
tokenizers==0.15.0
# via transformers
tomli==2.0.1
# via
# black
# pytest
torch==1.13.1
torch==2.1.1
# via
# facexlib
# fairscale
@ -264,6 +284,7 @@ torch==1.13.1
# kornia
# open-clip-torch
# pytorch-lightning
# refiners
# timm
# torchdiffeq
# torchmetrics
@ -274,7 +295,7 @@ torchmetrics==1.2.0
# via
# imaginAIry (setup.py)
# pytorch-lightning
torchvision==0.14.1
torchvision==0.16.1
# via
# facexlib
# imaginAIry (setup.py)
@ -288,33 +309,36 @@ tqdm==4.66.1
# open-clip-torch
# pytorch-lightning
# transformers
transformers==4.33.3
transformers==4.35.2
# via imaginAIry (setup.py)
types-pyyaml==6.0.12.12
# via responses
typeguard==2.13.3
# via jaxtyping
typing-extensions==4.8.0
# via
# black
# fastapi
# huggingface-hub
# jaxtyping
# lightning-utilities
# pydantic
# pydantic-core
# pytorch-lightning
# torch
# torchvision
# uvicorn
urllib3==2.0.5
urllib3==2.1.0
# via
# requests
# responses
uvicorn==0.23.2
uvicorn==0.24.0.post1
# via imaginAIry (setup.py)
wcwidth==0.2.7
wcwidth==0.2.12
# via ftfy
wheel==0.41.2
wheel==0.41.3
# via -r requirements-dev.in
yarl==1.9.2
yarl==1.9.3
# via aiohttp
zipp==3.17.0
# via importlib-metadata
# The following packages are considered to be unsafe in a requirements file:
# setuptools

View File

@ -81,7 +81,7 @@ setup(
"fastapi>=0.70.0",
"ftfy>=6.0.1", # for vendored clip
# 2.0.0 produced garbage images on macOS
"torch>=1.13.1,<2.0.0",
"torch>=2.1.0",
# https://numpy.org/neps/nep-0029-deprecation_policy.html
"numpy>=1.19.0,<1.26.0",
"tqdm>=4.64.0",
@ -97,6 +97,7 @@ setup(
# need to migration to 2.0
"pydantic>=2.3.0",
"requests>=2.28.1",
"refiners>=0.2.0",
"einops>=0.3.0",
"safetensors>=0.2.1",
# scipy is a sub dependency but v1.11 doesn't support python 3.8. https://docs.scipy.org/doc/scipy/dev/toolchain.html#numpy
@ -106,10 +107,9 @@ setup(
"torchmetrics>=0.6.0",
"torchvision>=0.13.1",
"transformers>=4.19.2",
"triton>=2.0.0; sys_platform!='darwin' and platform_machine!='aarch64'",
# "triton>=2.0.0; sys_platform!='darwin' and platform_machine!='aarch64'",
"kornia>=0.6",
"uvicorn>=0.16.0",
"xformers>=0.0.16; sys_platform!='darwin' and platform_machine!='aarch64'",
],
# don't specify maximum python versions as it can cause very long dependency resolution issues as the resolver
# goes back to older versions of packages that didn't specify a maximum

View File

@ -32,6 +32,8 @@ if get_device() == "mps:0":
elif get_device() == "cpu":
SAMPLERS_FOR_TESTING = []
SAMPLERS_FOR_TESTING = ["ddim", "k_dpmpp_2m"]
@pytest.fixture(scope="session", autouse=True)
def _pre_setup():

Binary file not shown.

Before

Width:  |  Height:  |  Size: 368 KiB

After

Width:  |  Height:  |  Size: 375 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 355 KiB

After

Width:  |  Height:  |  Size: 352 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 531 KiB

After

Width:  |  Height:  |  Size: 541 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 486 KiB

After

Width:  |  Height:  |  Size: 522 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 572 KiB

After

Width:  |  Height:  |  Size: 570 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 564 KiB

After

Width:  |  Height:  |  Size: 569 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 305 KiB

After

Width:  |  Height:  |  Size: 303 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 304 KiB

After

Width:  |  Height:  |  Size: 308 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 248 KiB

After

Width:  |  Height:  |  Size: 242 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 245 KiB

After

Width:  |  Height:  |  Size: 250 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 387 KiB

After

Width:  |  Height:  |  Size: 390 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 391 KiB

After

Width:  |  Height:  |  Size: 392 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 263 KiB

After

Width:  |  Height:  |  Size: 273 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 243 KiB

After

Width:  |  Height:  |  Size: 253 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 264 KiB

After

Width:  |  Height:  |  Size: 279 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 266 KiB

After

Width:  |  Height:  |  Size: 281 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 248 KiB

After

Width:  |  Height:  |  Size: 276 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 257 KiB

After

Width:  |  Height:  |  Size: 278 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 556 KiB

After

Width:  |  Height:  |  Size: 558 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 2.1 MiB

After

Width:  |  Height:  |  Size: 2.4 MiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 315 KiB

After

Width:  |  Height:  |  Size: 292 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 370 KiB

After

Width:  |  Height:  |  Size: 364 KiB

View File

@ -40,9 +40,7 @@ compare_prompts = [
@pytest.mark.skipif(get_device() != "cuda", reason="Too slow to run on CPU or MPS")
@pytest.mark.parametrize(
"model_version", ["SD-1.4", "SD-1.5", "SD-2.0", "SD-2.0-v", "SD-2.1", "SD-2.1-v"]
)
@pytest.mark.parametrize("model_version", ["SD-1.5"])
def test_model_versions(filename_base_for_orig_outputs, model_version):
"""Test that we can switch between model versions."""
prompts = []
@ -218,7 +216,6 @@ def test_img_to_img_fruit_2_gold_repeat():
"mask_mode": "replace",
"steps": 20,
"seed": 946188797,
"sampler_type": "plms",
"fix_faces": True,
"upscale": True,
}
@ -229,7 +226,7 @@ def test_img_to_img_fruit_2_gold_repeat():
]
for result in imagine(prompts, debug_img_callback=None):
result.img.save(
f"{TESTS_FOLDER}/test_output/img2img_fruit_2_gold_plms_{get_device()}_run-{run_count:02}.jpg"
f"{TESTS_FOLDER}/test_output/img2img_fruit_2_gold_{result.prompt.sampler_type}_{get_device()}_run-{run_count:02}.jpg"
)
run_count += 1
@ -242,7 +239,6 @@ def test_img_to_file():
height=512 - 64,
steps=20,
seed=2,
sampler_type="PLMS",
upscale=True,
)
out_folder = f"{TESTS_FOLDER}/test_output"
@ -261,7 +257,6 @@ def test_inpainting_bench(filename_base_for_outputs, filename_base_for_orig_outp
height=512,
steps=40,
seed=1,
sampler_type="plms",
)
result = next(imagine(prompt))
@ -287,7 +282,6 @@ def test_cliptext_inpainting_pearl_doctor(
width=512,
height=512,
steps=40,
sampler_type="plms",
seed=181509347,
)
result = next(imagine(prompt))
@ -355,7 +349,7 @@ def test_large_image(filename_base_for_outputs):
prompt_text,
width=1920,
height=1080,
steps=15,
steps=30,
seed=0,
)
result = next(imagine(prompt))

View File

@ -25,8 +25,8 @@ def test_imagine_cmd(monkeypatch):
f"{TESTS_FOLDER}/test_output",
"--seed",
"703425280",
"--model",
"empty",
# "--model",
# "empty",
"--outdir",
f"{TESTS_FOLDER}/test_output",
],

View File

@ -48,7 +48,7 @@ def test_clip_masking(filename_base_for_outputs):
assert_image_similar_to_expectation(pred_bin, img_path=img_path, threshold=10)
prompt = ImaginePrompt(
"",
"woman in sparkly gold jacket",
init_image=img,
init_image_strength=0.5,
# lower steps for faster tests
@ -58,7 +58,7 @@ def test_clip_masking(filename_base_for_outputs):
upscale=False,
fix_faces=True,
seed=42,
sampler_type="plms",
# sampler_type="plms",
)
result = next(imagine(prompt))

View File

@ -26,12 +26,12 @@ def create_model_of_n_bytes(n):
@pytest.mark.parametrize(
"model_version",
[
"SD-1.4",
# "SD-1.4",
"SD-1.5",
"SD-2.0",
"SD-2.0-v",
"SD-2.1",
"SD-2.1-v",
# "SD-2.0",
# "SD-2.0-v",
# "SD-2.1",
# "SD-2.1-v",
"openjourney-v1",
"openjourney-v2",
"openjourney-v4",

View File

@ -34,7 +34,7 @@ def test_get_device(monkeypatch):
get_device.cache_clear()
m_cuda_is_available.side_effect = lambda: False
m_mps_is_available.side_effect = lambda: True
assert get_device() == "mps:0"
assert get_device() == "mps"
get_device.cache_clear()
m_cuda_is_available.side_effect = lambda: False