mirror of
https://github.com/brycedrennan/imaginAIry
synced 2024-10-31 03:20:40 +00:00
feature: stable diffusion video (SVD)
This commit is contained in:
parent
80ff006604
commit
e8fe8d7d6c
2
Makefile
2
Makefile
@ -1,5 +1,5 @@
|
||||
SHELL := /bin/bash
|
||||
python_version = 3.10.10
|
||||
python_version = 3.10.13
|
||||
venv_prefix = imaginairy
|
||||
venv_name = $(venv_prefix)-$(python_version)
|
||||
pyenv_instructions=https://github.com/pyenv/pyenv#installation
|
||||
|
BIN
assets/rocket-wide.png
Normal file
BIN
assets/rocket-wide.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 482 KiB |
@ -11,6 +11,7 @@ from imaginairy.cli.imagine import imagine_cmd
|
||||
from imaginairy.cli.run_api import run_server_cmd
|
||||
from imaginairy.cli.train import prep_images_cmd, prune_ckpt_cmd, train_concept_cmd
|
||||
from imaginairy.cli.upscale import upscale_cmd
|
||||
from imaginairy.cli.videogen import videogen_cmd
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@ -50,6 +51,7 @@ aimg.add_command(prune_ckpt_cmd, name="prune-ckpt")
|
||||
aimg.add_command(train_concept_cmd, name="train-concept")
|
||||
aimg.add_command(upscale_cmd, name="upscale")
|
||||
aimg.add_command(run_server_cmd, name="server")
|
||||
aimg.add_command(videogen_cmd, name="videogen")
|
||||
|
||||
|
||||
@aimg.command()
|
||||
|
92
imaginairy/cli/videogen.py
Normal file
92
imaginairy/cli/videogen.py
Normal file
@ -0,0 +1,92 @@
|
||||
import logging
|
||||
|
||||
import click
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@click.command()
|
||||
@click.option(
|
||||
"--start-image",
|
||||
default="other/images/sound-music.jpg",
|
||||
help="Input path for image file.",
|
||||
)
|
||||
@click.option("--num-frames", default=None, type=int, help="Number of frames.")
|
||||
@click.option("--num-steps", default=None, type=int, help="Number of steps.")
|
||||
@click.option(
|
||||
"--model",
|
||||
default="svd",
|
||||
help="Model to use. One of: svd, svd_xt, svd_image_decoder, svd_xt_image_decoder",
|
||||
)
|
||||
@click.option(
|
||||
"--fps", default=6, type=int, help="FPS for the AI to target when generating video"
|
||||
)
|
||||
@click.option("--output-fps", default=None, type=int, help="FPS for the output video")
|
||||
@click.option(
|
||||
"--motion-amount",
|
||||
default=127,
|
||||
type=int,
|
||||
help="How much motion to generate. value between 0 and 255.",
|
||||
)
|
||||
@click.option(
|
||||
"-r",
|
||||
"--repeats",
|
||||
default=1,
|
||||
show_default=True,
|
||||
type=int,
|
||||
help="How many times to repeat the renders. ",
|
||||
)
|
||||
@click.option("--cond-aug", default=0.02, type=float, help="Conditional augmentation.")
|
||||
@click.option(
|
||||
"--seed", default=None, type=int, help="Seed for random number generator."
|
||||
)
|
||||
@click.option(
|
||||
"--decoding_t", default=1, type=int, help="Number of frames decoded at a time."
|
||||
)
|
||||
@click.option("--device", default=None, help="Device to use.")
|
||||
@click.option("--output_folder", default=None, help="Output folder.")
|
||||
def videogen_cmd(
|
||||
start_image,
|
||||
num_frames,
|
||||
num_steps,
|
||||
model,
|
||||
fps,
|
||||
output_fps,
|
||||
motion_amount,
|
||||
repeats,
|
||||
cond_aug,
|
||||
seed,
|
||||
decoding_t,
|
||||
device,
|
||||
output_folder,
|
||||
):
|
||||
"""
|
||||
AI generate a video from an image
|
||||
|
||||
Example:
|
||||
|
||||
aimg videogen --start-image assets/rocket-wide.png
|
||||
|
||||
"""
|
||||
from imaginairy.log_utils import configure_logging
|
||||
from imaginairy.video_sample import generate_video
|
||||
|
||||
configure_logging()
|
||||
|
||||
output_fps = output_fps or fps
|
||||
for i in range(repeats):
|
||||
logger.info(f"Generating video from image {start_image}")
|
||||
generate_video(
|
||||
input_path=start_image,
|
||||
num_frames=num_frames,
|
||||
num_steps=num_steps,
|
||||
model_name=model,
|
||||
fps_id=fps,
|
||||
output_fps=output_fps,
|
||||
motion_bucket_id=motion_amount,
|
||||
cond_aug=cond_aug,
|
||||
seed=seed,
|
||||
decoding_t=decoding_t,
|
||||
device=device,
|
||||
output_folder=output_folder,
|
||||
)
|
@ -86,6 +86,43 @@ MODEL_CONFIGS = [
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
video_models = [
|
||||
{
|
||||
"short_name": "svd",
|
||||
"description": "Stable Video Diffusion",
|
||||
"default_frames": 14,
|
||||
"default_steps": 25,
|
||||
"config_path": "configs/svd.yaml",
|
||||
"weights_url": "https://huggingface.co/imaginairy/stable-video-diffusion/resolve/f9dce2757a0713da6262f35438050357c2be7ee6/svd.fp16.safetensors",
|
||||
},
|
||||
{
|
||||
"short_name": "svd_image_decoder",
|
||||
"description": "Stable Video Diffusion - Image Decoder",
|
||||
"default_frames": 14,
|
||||
"default_steps": 25,
|
||||
"config_path": "configs/svd_image_decoder.yaml",
|
||||
"weights_url": "https://huggingface.co/imaginairy/stable-video-diffusion/resolve/f9dce2757a0713da6262f35438050357c2be7ee6/svd_image_decoder.fp16.safetensors",
|
||||
},
|
||||
{
|
||||
"short_name": "svd_xt",
|
||||
"description": "Stable Video Diffusion - XT",
|
||||
"default_frames": 25,
|
||||
"default_steps": 30,
|
||||
"config_path": "configs/svd_xt.yaml",
|
||||
"weights_url": "https://huggingface.co/imaginairy/stable-video-diffusion/resolve/f9dce2757a0713da6262f35438050357c2be7ee6/svd_xt.fp16.safetensors",
|
||||
},
|
||||
{
|
||||
"short_name": "svd_xt_image_decoder",
|
||||
"description": "Stable Video Diffusion - XT - Image Decoder",
|
||||
"default_frames": 25,
|
||||
"default_steps": 30,
|
||||
"config_path": "configs/svd_xt_image_decoder.yaml",
|
||||
"weights_url": "https://huggingface.co/imaginairy/stable-video-diffusion/resolve/f9dce2757a0713da6262f35438050357c2be7ee6/svd_xt_image_decoder.fp16.safetensors",
|
||||
},
|
||||
]
|
||||
video_models = {m["short_name"]: m for m in video_models}
|
||||
|
||||
MODEL_CONFIG_SHORTCUTS = {m.short_name: m for m in MODEL_CONFIGS}
|
||||
for m in MODEL_CONFIGS:
|
||||
if m.alias:
|
||||
|
146
imaginairy/configs/svd.yaml
Normal file
146
imaginairy/configs/svd.yaml
Normal file
@ -0,0 +1,146 @@
|
||||
model:
|
||||
target: imaginairy.modules.sgm.diffusion.DiffusionEngine
|
||||
params:
|
||||
scale_factor: 0.18215
|
||||
disable_first_stage_autocast: False
|
||||
|
||||
|
||||
denoiser_config:
|
||||
target: imaginairy.modules.sgm.diffusionmodules.denoiser.Denoiser
|
||||
params:
|
||||
scaling_config:
|
||||
target: imaginairy.modules.sgm.diffusionmodules.denoiser_scaling.VScalingWithEDMcNoise
|
||||
|
||||
network_config:
|
||||
target: imaginairy.modules.sgm.diffusionmodules.video_model.VideoUNet
|
||||
params:
|
||||
adm_in_channels: 768
|
||||
num_classes: sequential
|
||||
use_checkpoint: False
|
||||
in_channels: 8
|
||||
out_channels: 4
|
||||
model_channels: 320
|
||||
attention_resolutions: [4, 2, 1]
|
||||
num_res_blocks: 2
|
||||
channel_mult: [1, 2, 4, 4]
|
||||
num_head_channels: 64
|
||||
use_linear_in_transformer: True
|
||||
transformer_depth: 1
|
||||
context_dim: 1024
|
||||
spatial_transformer_attn_type: softmax-xformers
|
||||
extra_ff_mix_layer: True
|
||||
use_spatial_context: True
|
||||
merge_strategy: learned_with_images
|
||||
video_kernel_size: [3, 1, 1]
|
||||
|
||||
conditioner_config:
|
||||
target: imaginairy.modules.sgm.encoders.modules.GeneralConditioner
|
||||
params:
|
||||
emb_models:
|
||||
- is_trainable: False
|
||||
input_key: cond_frames_without_noise
|
||||
target: imaginairy.modules.sgm.encoders.modules.FrozenOpenCLIPImagePredictionEmbedder
|
||||
params:
|
||||
n_cond_frames: 1
|
||||
n_copies: 1
|
||||
open_clip_embedding_config:
|
||||
target: imaginairy.modules.sgm.encoders.modules.FrozenOpenCLIPImageEmbedder
|
||||
params:
|
||||
freeze: True
|
||||
|
||||
- input_key: fps_id
|
||||
is_trainable: False
|
||||
target: imaginairy.modules.sgm.encoders.modules.ConcatTimestepEmbedderND
|
||||
params:
|
||||
outdim: 256
|
||||
|
||||
- input_key: motion_bucket_id
|
||||
is_trainable: False
|
||||
target: imaginairy.modules.sgm.encoders.modules.ConcatTimestepEmbedderND
|
||||
params:
|
||||
outdim: 256
|
||||
|
||||
- input_key: cond_frames
|
||||
is_trainable: False
|
||||
target: imaginairy.modules.sgm.encoders.modules.VideoPredictionEmbedderWithEncoder
|
||||
params:
|
||||
disable_encoder_autocast: False
|
||||
n_cond_frames: 1
|
||||
n_copies: 1
|
||||
is_ae: True
|
||||
encoder_config:
|
||||
target: imaginairy.modules.sgm.autoencoder.AutoencoderKLModeOnly
|
||||
params:
|
||||
embed_dim: 4
|
||||
monitor: val/rec_loss
|
||||
ddconfig:
|
||||
attn_type: vanilla-xformers
|
||||
double_z: True
|
||||
z_channels: 4
|
||||
resolution: 256
|
||||
in_channels: 3
|
||||
out_ch: 3
|
||||
ch: 128
|
||||
ch_mult: [1, 2, 4, 4]
|
||||
num_res_blocks: 2
|
||||
attn_resolutions: []
|
||||
dropout: 0.0
|
||||
lossconfig:
|
||||
target: torch.nn.Identity
|
||||
|
||||
- input_key: cond_aug
|
||||
is_trainable: False
|
||||
target: imaginairy.modules.sgm.encoders.modules.ConcatTimestepEmbedderND
|
||||
params:
|
||||
outdim: 256
|
||||
|
||||
first_stage_config:
|
||||
target: imaginairy.modules.sgm.autoencoder.AutoencodingEngine
|
||||
params:
|
||||
loss_config:
|
||||
target: torch.nn.Identity
|
||||
regularizer_config:
|
||||
target: imaginairy.modules.sgm.autoencoding.regularizers.DiagonalGaussianRegularizer
|
||||
encoder_config:
|
||||
target: imaginairy.modules.sgm.diffusionmodules.model.Encoder
|
||||
params:
|
||||
attn_type: vanilla
|
||||
double_z: True
|
||||
z_channels: 4
|
||||
resolution: 256
|
||||
in_channels: 3
|
||||
out_ch: 3
|
||||
ch: 128
|
||||
ch_mult: [1, 2, 4, 4]
|
||||
num_res_blocks: 2
|
||||
attn_resolutions: []
|
||||
dropout: 0.0
|
||||
decoder_config:
|
||||
target: imaginairy.modules.sgm.autoencoding.temporal_ae.VideoDecoder
|
||||
params:
|
||||
attn_type: vanilla
|
||||
double_z: True
|
||||
z_channels: 4
|
||||
resolution: 256
|
||||
in_channels: 3
|
||||
out_ch: 3
|
||||
ch: 128
|
||||
ch_mult: [1, 2, 4, 4]
|
||||
num_res_blocks: 2
|
||||
attn_resolutions: []
|
||||
dropout: 0.0
|
||||
video_kernel_size: [3, 1, 1]
|
||||
|
||||
sampler_config:
|
||||
target: imaginairy.modules.sgm.diffusionmodules.sampling.EulerEDMSampler
|
||||
params:
|
||||
discretization_config:
|
||||
target: imaginairy.modules.sgm.diffusionmodules.discretizer.EDMDiscretization
|
||||
params:
|
||||
sigma_max: 700.0
|
||||
|
||||
guider_config:
|
||||
target: imaginairy.modules.sgm.diffusionmodules.guiders.LinearPredictionGuider
|
||||
params:
|
||||
max_scale: 2.5
|
||||
min_scale: 1.0
|
129
imaginairy/configs/svd_image_decoder.yaml
Normal file
129
imaginairy/configs/svd_image_decoder.yaml
Normal file
@ -0,0 +1,129 @@
|
||||
model:
|
||||
target: imaginairy.modules.sgm.diffusion.DiffusionEngine
|
||||
params:
|
||||
scale_factor: 0.18215
|
||||
disable_first_stage_autocast: False
|
||||
|
||||
|
||||
denoiser_config:
|
||||
target: imaginairy.modules.sgm.diffusionmodules.denoiser.Denoiser
|
||||
params:
|
||||
scaling_config:
|
||||
target: imaginairy.modules.sgm.diffusionmodules.denoiser_scaling.VScalingWithEDMcNoise
|
||||
|
||||
network_config:
|
||||
target: imaginairy.modules.sgm.diffusionmodules.video_model.VideoUNet
|
||||
params:
|
||||
adm_in_channels: 768
|
||||
num_classes: sequential
|
||||
use_checkpoint: False
|
||||
in_channels: 8
|
||||
out_channels: 4
|
||||
model_channels: 320
|
||||
attention_resolutions: [4, 2, 1]
|
||||
num_res_blocks: 2
|
||||
channel_mult: [1, 2, 4, 4]
|
||||
num_head_channels: 64
|
||||
use_linear_in_transformer: True
|
||||
transformer_depth: 1
|
||||
context_dim: 1024
|
||||
spatial_transformer_attn_type: softmax-xformers
|
||||
extra_ff_mix_layer: True
|
||||
use_spatial_context: True
|
||||
merge_strategy: learned_with_images
|
||||
video_kernel_size: [3, 1, 1]
|
||||
|
||||
conditioner_config:
|
||||
target: imaginairy.modules.sgm.encoders.modules.GeneralConditioner
|
||||
params:
|
||||
emb_models:
|
||||
- is_trainable: False
|
||||
input_key: cond_frames_without_noise
|
||||
target: imaginairy.modules.sgm.encoders.modules.FrozenOpenCLIPImagePredictionEmbedder
|
||||
params:
|
||||
n_cond_frames: 1
|
||||
n_copies: 1
|
||||
open_clip_embedding_config:
|
||||
target: imaginairy.modules.sgm.encoders.modules.FrozenOpenCLIPImageEmbedder
|
||||
params:
|
||||
freeze: True
|
||||
|
||||
- input_key: fps_id
|
||||
is_trainable: False
|
||||
target: imaginairy.modules.sgm.encoders.modules.ConcatTimestepEmbedderND
|
||||
params:
|
||||
outdim: 256
|
||||
|
||||
- input_key: motion_bucket_id
|
||||
is_trainable: False
|
||||
target: imaginairy.modules.sgm.encoders.modules.ConcatTimestepEmbedderND
|
||||
params:
|
||||
outdim: 256
|
||||
|
||||
- input_key: cond_frames
|
||||
is_trainable: False
|
||||
target: imaginairy.modules.sgm.encoders.modules.VideoPredictionEmbedderWithEncoder
|
||||
params:
|
||||
disable_encoder_autocast: False
|
||||
n_cond_frames: 1
|
||||
n_copies: 1
|
||||
is_ae: True
|
||||
encoder_config:
|
||||
target: imaginairy.modules.sgm.autoencoder.AutoencoderKLModeOnly
|
||||
params:
|
||||
embed_dim: 4
|
||||
monitor: val/rec_loss
|
||||
ddconfig:
|
||||
attn_type: vanilla-xformers
|
||||
double_z: True
|
||||
z_channels: 4
|
||||
resolution: 256
|
||||
in_channels: 3
|
||||
out_ch: 3
|
||||
ch: 128
|
||||
ch_mult: [1, 2, 4, 4]
|
||||
num_res_blocks: 2
|
||||
attn_resolutions: []
|
||||
dropout: 0.0
|
||||
lossconfig:
|
||||
target: torch.nn.Identity
|
||||
|
||||
- input_key: cond_aug
|
||||
is_trainable: False
|
||||
target: imaginairy.modules.sgm.encoders.modules.ConcatTimestepEmbedderND
|
||||
params:
|
||||
outdim: 256
|
||||
|
||||
first_stage_config:
|
||||
target: imaginairy.modules.sgm.autoencoder.AutoencoderKL
|
||||
params:
|
||||
embed_dim: 4
|
||||
monitor: val/rec_loss
|
||||
ddconfig:
|
||||
attn_type: vanilla-xformers
|
||||
double_z: True
|
||||
z_channels: 4
|
||||
resolution: 256
|
||||
in_channels: 3
|
||||
out_ch: 3
|
||||
ch: 128
|
||||
ch_mult: [1, 2, 4, 4]
|
||||
num_res_blocks: 2
|
||||
attn_resolutions: []
|
||||
dropout: 0.0
|
||||
lossconfig:
|
||||
target: torch.nn.Identity
|
||||
|
||||
sampler_config:
|
||||
target: imaginairy.modules.sgm.diffusionmodules.sampling.EulerEDMSampler
|
||||
params:
|
||||
discretization_config:
|
||||
target: imaginairy.modules.sgm.diffusionmodules.discretizer.EDMDiscretization
|
||||
params:
|
||||
sigma_max: 700.0
|
||||
|
||||
guider_config:
|
||||
target: imaginairy.modules.sgm.diffusionmodules.guiders.LinearPredictionGuider
|
||||
params:
|
||||
max_scale: 2.5
|
||||
min_scale: 1.0
|
146
imaginairy/configs/svd_xt.yaml
Normal file
146
imaginairy/configs/svd_xt.yaml
Normal file
@ -0,0 +1,146 @@
|
||||
model:
|
||||
target: imaginairy.modules.sgm.diffusion.DiffusionEngine
|
||||
params:
|
||||
scale_factor: 0.18215
|
||||
disable_first_stage_autocast: False
|
||||
|
||||
|
||||
denoiser_config:
|
||||
target: imaginairy.modules.sgm.diffusionmodules.denoiser.Denoiser
|
||||
params:
|
||||
scaling_config:
|
||||
target: imaginairy.modules.sgm.diffusionmodules.denoiser_scaling.VScalingWithEDMcNoise
|
||||
|
||||
network_config:
|
||||
target: imaginairy.modules.sgm.diffusionmodules.video_model.VideoUNet
|
||||
params:
|
||||
adm_in_channels: 768
|
||||
num_classes: sequential
|
||||
use_checkpoint: False
|
||||
in_channels: 8
|
||||
out_channels: 4
|
||||
model_channels: 320
|
||||
attention_resolutions: [4, 2, 1]
|
||||
num_res_blocks: 2
|
||||
channel_mult: [1, 2, 4, 4]
|
||||
num_head_channels: 64
|
||||
use_linear_in_transformer: True
|
||||
transformer_depth: 1
|
||||
context_dim: 1024
|
||||
spatial_transformer_attn_type: softmax-xformers
|
||||
extra_ff_mix_layer: True
|
||||
use_spatial_context: True
|
||||
merge_strategy: learned_with_images
|
||||
video_kernel_size: [3, 1, 1]
|
||||
|
||||
conditioner_config:
|
||||
target: imaginairy.modules.sgm.encoders.modules.GeneralConditioner
|
||||
params:
|
||||
emb_models:
|
||||
- is_trainable: False
|
||||
input_key: cond_frames_without_noise
|
||||
target: imaginairy.modules.sgm.encoders.modules.FrozenOpenCLIPImagePredictionEmbedder
|
||||
params:
|
||||
n_cond_frames: 1
|
||||
n_copies: 1
|
||||
open_clip_embedding_config:
|
||||
target: imaginairy.modules.sgm.encoders.modules.FrozenOpenCLIPImageEmbedder
|
||||
params:
|
||||
freeze: True
|
||||
|
||||
- input_key: fps_id
|
||||
is_trainable: False
|
||||
target: imaginairy.modules.sgm.encoders.modules.ConcatTimestepEmbedderND
|
||||
params:
|
||||
outdim: 256
|
||||
|
||||
- input_key: motion_bucket_id
|
||||
is_trainable: False
|
||||
target: imaginairy.modules.sgm.encoders.modules.ConcatTimestepEmbedderND
|
||||
params:
|
||||
outdim: 256
|
||||
|
||||
- input_key: cond_frames
|
||||
is_trainable: False
|
||||
target: imaginairy.modules.sgm.encoders.modules.VideoPredictionEmbedderWithEncoder
|
||||
params:
|
||||
disable_encoder_autocast: False
|
||||
n_cond_frames: 1
|
||||
n_copies: 1
|
||||
is_ae: True
|
||||
encoder_config:
|
||||
target: imaginairy.modules.sgm.autoencoder.AutoencoderKLModeOnly
|
||||
params:
|
||||
embed_dim: 4
|
||||
monitor: val/rec_loss
|
||||
ddconfig:
|
||||
attn_type: vanilla-xformers
|
||||
double_z: True
|
||||
z_channels: 4
|
||||
resolution: 256
|
||||
in_channels: 3
|
||||
out_ch: 3
|
||||
ch: 128
|
||||
ch_mult: [1, 2, 4, 4]
|
||||
num_res_blocks: 2
|
||||
attn_resolutions: []
|
||||
dropout: 0.0
|
||||
lossconfig:
|
||||
target: torch.nn.Identity
|
||||
|
||||
- input_key: cond_aug
|
||||
is_trainable: False
|
||||
target: imaginairy.modules.sgm.encoders.modules.ConcatTimestepEmbedderND
|
||||
params:
|
||||
outdim: 256
|
||||
|
||||
first_stage_config:
|
||||
target: imaginairy.modules.sgm.autoencoder.AutoencodingEngine
|
||||
params:
|
||||
loss_config:
|
||||
target: torch.nn.Identity
|
||||
regularizer_config:
|
||||
target: imaginairy.modules.sgm.autoencoding.regularizers.DiagonalGaussianRegularizer
|
||||
encoder_config:
|
||||
target: imaginairy.modules.sgm.diffusionmodules.model.Encoder
|
||||
params:
|
||||
attn_type: vanilla
|
||||
double_z: True
|
||||
z_channels: 4
|
||||
resolution: 256
|
||||
in_channels: 3
|
||||
out_ch: 3
|
||||
ch: 128
|
||||
ch_mult: [1, 2, 4, 4]
|
||||
num_res_blocks: 2
|
||||
attn_resolutions: []
|
||||
dropout: 0.0
|
||||
decoder_config:
|
||||
target: imaginairy.modules.sgm.autoencoding.temporal_ae.VideoDecoder
|
||||
params:
|
||||
attn_type: vanilla
|
||||
double_z: True
|
||||
z_channels: 4
|
||||
resolution: 256
|
||||
in_channels: 3
|
||||
out_ch: 3
|
||||
ch: 128
|
||||
ch_mult: [1, 2, 4, 4]
|
||||
num_res_blocks: 2
|
||||
attn_resolutions: []
|
||||
dropout: 0.0
|
||||
video_kernel_size: [3, 1, 1]
|
||||
|
||||
sampler_config:
|
||||
target: imaginairy.modules.sgm.diffusionmodules.sampling.EulerEDMSampler
|
||||
params:
|
||||
discretization_config:
|
||||
target: imaginairy.modules.sgm.diffusionmodules.discretizer.EDMDiscretization
|
||||
params:
|
||||
sigma_max: 700.0
|
||||
|
||||
guider_config:
|
||||
target: imaginairy.modules.sgm.diffusionmodules.guiders.LinearPredictionGuider
|
||||
params:
|
||||
max_scale: 3.0
|
||||
min_scale: 1.5
|
129
imaginairy/configs/svd_xt_image_decoder.yaml
Normal file
129
imaginairy/configs/svd_xt_image_decoder.yaml
Normal file
@ -0,0 +1,129 @@
|
||||
model:
|
||||
target: imaginairy.modules.sgm.diffusion.DiffusionEngine
|
||||
params:
|
||||
scale_factor: 0.18215
|
||||
disable_first_stage_autocast: False
|
||||
|
||||
|
||||
denoiser_config:
|
||||
target: imaginairy.modules.sgm.diffusionmodules.denoiser.Denoiser
|
||||
params:
|
||||
scaling_config:
|
||||
target: imaginairy.modules.sgm.diffusionmodules.denoiser_scaling.VScalingWithEDMcNoise
|
||||
|
||||
network_config:
|
||||
target: imaginairy.modules.sgm.diffusionmodules.video_model.VideoUNet
|
||||
params:
|
||||
adm_in_channels: 768
|
||||
num_classes: sequential
|
||||
use_checkpoint: False
|
||||
in_channels: 8
|
||||
out_channels: 4
|
||||
model_channels: 320
|
||||
attention_resolutions: [4, 2, 1]
|
||||
num_res_blocks: 2
|
||||
channel_mult: [1, 2, 4, 4]
|
||||
num_head_channels: 64
|
||||
use_linear_in_transformer: True
|
||||
transformer_depth: 1
|
||||
context_dim: 1024
|
||||
spatial_transformer_attn_type: softmax-xformers
|
||||
extra_ff_mix_layer: True
|
||||
use_spatial_context: True
|
||||
merge_strategy: learned_with_images
|
||||
video_kernel_size: [3, 1, 1]
|
||||
|
||||
conditioner_config:
|
||||
target: imaginairy.modules.sgm.encoders.modules.GeneralConditioner
|
||||
params:
|
||||
emb_models:
|
||||
- is_trainable: False
|
||||
input_key: cond_frames_without_noise
|
||||
target: imaginairy.modules.sgm.encoders.modules.FrozenOpenCLIPImagePredictionEmbedder
|
||||
params:
|
||||
n_cond_frames: 1
|
||||
n_copies: 1
|
||||
open_clip_embedding_config:
|
||||
target: imaginairy.modules.sgm.encoders.modules.FrozenOpenCLIPImageEmbedder
|
||||
params:
|
||||
freeze: True
|
||||
|
||||
- input_key: fps_id
|
||||
is_trainable: False
|
||||
target: imaginairy.modules.sgm.encoders.modules.ConcatTimestepEmbedderND
|
||||
params:
|
||||
outdim: 256
|
||||
|
||||
- input_key: motion_bucket_id
|
||||
is_trainable: False
|
||||
target: imaginairy.modules.sgm.encoders.modules.ConcatTimestepEmbedderND
|
||||
params:
|
||||
outdim: 256
|
||||
|
||||
- input_key: cond_frames
|
||||
is_trainable: False
|
||||
target: imaginairy.modules.sgm.encoders.modules.VideoPredictionEmbedderWithEncoder
|
||||
params:
|
||||
disable_encoder_autocast: False
|
||||
n_cond_frames: 1
|
||||
n_copies: 1
|
||||
is_ae: True
|
||||
encoder_config:
|
||||
target: imaginairy.modules.sgm.autoencoder.AutoencoderKLModeOnly
|
||||
params:
|
||||
embed_dim: 4
|
||||
monitor: val/rec_loss
|
||||
ddconfig:
|
||||
attn_type: vanilla-xformers
|
||||
double_z: True
|
||||
z_channels: 4
|
||||
resolution: 256
|
||||
in_channels: 3
|
||||
out_ch: 3
|
||||
ch: 128
|
||||
ch_mult: [1, 2, 4, 4]
|
||||
num_res_blocks: 2
|
||||
attn_resolutions: []
|
||||
dropout: 0.0
|
||||
lossconfig:
|
||||
target: torch.nn.Identity
|
||||
|
||||
- input_key: cond_aug
|
||||
is_trainable: False
|
||||
target: imaginairy.modules.sgm.encoders.modules.ConcatTimestepEmbedderND
|
||||
params:
|
||||
outdim: 256
|
||||
|
||||
first_stage_config:
|
||||
target: imaginairy.modules.sgm.autoencoder.AutoencoderKL
|
||||
params:
|
||||
embed_dim: 4
|
||||
monitor: val/rec_loss
|
||||
ddconfig:
|
||||
attn_type: vanilla-xformers
|
||||
double_z: True
|
||||
z_channels: 4
|
||||
resolution: 256
|
||||
in_channels: 3
|
||||
out_ch: 3
|
||||
ch: 128
|
||||
ch_mult: [1, 2, 4, 4]
|
||||
num_res_blocks: 2
|
||||
attn_resolutions: []
|
||||
dropout: 0.0
|
||||
lossconfig:
|
||||
target: torch.nn.Identity
|
||||
|
||||
sampler_config:
|
||||
target: imaginairy.modules.sgm.diffusionmodules.sampling.EulerEDMSampler
|
||||
params:
|
||||
discretization_config:
|
||||
target: imaginairy.modules.sgm.diffusionmodules.discretizer.EDMDiscretization
|
||||
params:
|
||||
sigma_max: 700.0
|
||||
|
||||
guider_config:
|
||||
target: imaginairy.modules.sgm.diffusionmodules.guiders.LinearPredictionGuider
|
||||
params:
|
||||
max_scale: 3.0
|
||||
min_scale: 1.5
|
0
imaginairy/modules/sgm/__init__.py
Normal file
0
imaginairy/modules/sgm/__init__.py
Normal file
758
imaginairy/modules/sgm/attention.py
Normal file
758
imaginairy/modules/sgm/attention.py
Normal file
@ -0,0 +1,758 @@
|
||||
import logging
|
||||
import math
|
||||
from inspect import isfunction
|
||||
from typing import Any, Optional
|
||||
|
||||
import torch
|
||||
import torch.nn.functional as F
|
||||
from einops import rearrange, repeat
|
||||
from packaging import version
|
||||
from torch import nn
|
||||
from torch.utils.checkpoint import checkpoint
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
if version.parse(torch.__version__) >= version.parse("2.0.0"):
|
||||
SDP_IS_AVAILABLE = True
|
||||
from torch.backends.cuda import SDPBackend, sdp_kernel
|
||||
|
||||
BACKEND_MAP = {
|
||||
SDPBackend.MATH: {
|
||||
"enable_math": True,
|
||||
"enable_flash": False,
|
||||
"enable_mem_efficient": False,
|
||||
},
|
||||
SDPBackend.FLASH_ATTENTION: {
|
||||
"enable_math": False,
|
||||
"enable_flash": True,
|
||||
"enable_mem_efficient": False,
|
||||
},
|
||||
SDPBackend.EFFICIENT_ATTENTION: {
|
||||
"enable_math": False,
|
||||
"enable_flash": False,
|
||||
"enable_mem_efficient": True,
|
||||
},
|
||||
None: {"enable_math": True, "enable_flash": True, "enable_mem_efficient": True},
|
||||
}
|
||||
else:
|
||||
from contextlib import nullcontext
|
||||
|
||||
SDP_IS_AVAILABLE = False
|
||||
sdp_kernel = nullcontext
|
||||
BACKEND_MAP = {}
|
||||
logger.warning(
|
||||
f"No SDP backend available, likely because you are running in pytorch "
|
||||
f"versions < 2.0. In fact, you are using PyTorch {torch.__version__}. "
|
||||
f"You might want to consider upgrading."
|
||||
)
|
||||
|
||||
try:
|
||||
import xformers
|
||||
import xformers.ops
|
||||
|
||||
XFORMERS_IS_AVAILABLE = True
|
||||
except ImportError:
|
||||
XFORMERS_IS_AVAILABLE = False
|
||||
logger.debug("no module 'xformers'. Processing without...")
|
||||
|
||||
# from .diffusionmodules.util import mixed_checkpoint as checkpoint
|
||||
|
||||
|
||||
def exists(val):
|
||||
return val is not None
|
||||
|
||||
|
||||
def uniq(arr):
|
||||
return {el: True for el in arr}.keys()
|
||||
|
||||
|
||||
def default(val, d):
|
||||
if exists(val):
|
||||
return val
|
||||
return d() if isfunction(d) else d
|
||||
|
||||
|
||||
def max_neg_value(t):
|
||||
return -torch.finfo(t.dtype).max
|
||||
|
||||
|
||||
def init_(tensor):
|
||||
dim = tensor.shape[-1]
|
||||
std = 1 / math.sqrt(dim)
|
||||
tensor.uniform_(-std, std)
|
||||
return tensor
|
||||
|
||||
|
||||
# feedforward
|
||||
class GEGLU(nn.Module):
|
||||
def __init__(self, dim_in, dim_out):
|
||||
super().__init__()
|
||||
self.proj = nn.Linear(dim_in, dim_out * 2)
|
||||
|
||||
def forward(self, x):
|
||||
x, gate = self.proj(x).chunk(2, dim=-1)
|
||||
return x * F.gelu(gate)
|
||||
|
||||
|
||||
class FeedForward(nn.Module):
|
||||
def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0.0):
|
||||
super().__init__()
|
||||
inner_dim = int(dim * mult)
|
||||
dim_out = default(dim_out, dim)
|
||||
project_in = (
|
||||
nn.Sequential(nn.Linear(dim, inner_dim), nn.GELU())
|
||||
if not glu
|
||||
else GEGLU(dim, inner_dim)
|
||||
)
|
||||
|
||||
self.net = nn.Sequential(
|
||||
project_in, nn.Dropout(dropout), nn.Linear(inner_dim, dim_out)
|
||||
)
|
||||
|
||||
def forward(self, x):
|
||||
return self.net(x)
|
||||
|
||||
|
||||
def zero_module(module):
|
||||
"""
|
||||
Zero out the parameters of a module and return it.
|
||||
"""
|
||||
for p in module.parameters():
|
||||
p.detach().zero_()
|
||||
return module
|
||||
|
||||
|
||||
def Normalize(in_channels):
|
||||
return torch.nn.GroupNorm(
|
||||
num_groups=32, num_channels=in_channels, eps=1e-6, affine=True
|
||||
)
|
||||
|
||||
|
||||
class LinearAttention(nn.Module):
|
||||
def __init__(self, dim, heads=4, dim_head=32):
|
||||
super().__init__()
|
||||
self.heads = heads
|
||||
hidden_dim = dim_head * heads
|
||||
self.to_qkv = nn.Conv2d(dim, hidden_dim * 3, 1, bias=False)
|
||||
self.to_out = nn.Conv2d(hidden_dim, dim, 1)
|
||||
|
||||
def forward(self, x):
|
||||
b, c, h, w = x.shape
|
||||
qkv = self.to_qkv(x)
|
||||
q, k, v = rearrange(
|
||||
qkv, "b (qkv heads c) h w -> qkv b heads c (h w)", heads=self.heads, qkv=3
|
||||
)
|
||||
k = k.softmax(dim=-1)
|
||||
context = torch.einsum("bhdn,bhen->bhde", k, v)
|
||||
out = torch.einsum("bhde,bhdn->bhen", context, q)
|
||||
out = rearrange(
|
||||
out, "b heads c (h w) -> b (heads c) h w", heads=self.heads, h=h, w=w
|
||||
)
|
||||
return self.to_out(out)
|
||||
|
||||
|
||||
class SelfAttention(nn.Module):
|
||||
ATTENTION_MODES = ("xformers", "torch", "math")
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
dim: int,
|
||||
num_heads: int = 8,
|
||||
qkv_bias: bool = False,
|
||||
qk_scale: Optional[float] = None,
|
||||
attn_drop: float = 0.0,
|
||||
proj_drop: float = 0.0,
|
||||
attn_mode: str = "xformers",
|
||||
):
|
||||
super().__init__()
|
||||
self.num_heads = num_heads
|
||||
head_dim = dim // num_heads
|
||||
self.scale = qk_scale or head_dim**-0.5
|
||||
|
||||
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
|
||||
self.attn_drop = nn.Dropout(attn_drop)
|
||||
self.proj = nn.Linear(dim, dim)
|
||||
self.proj_drop = nn.Dropout(proj_drop)
|
||||
assert attn_mode in self.ATTENTION_MODES
|
||||
self.attn_mode = attn_mode
|
||||
|
||||
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
||||
B, L, C = x.shape
|
||||
|
||||
qkv = self.qkv(x)
|
||||
if self.attn_mode == "torch":
|
||||
qkv = rearrange(
|
||||
qkv, "B L (K H D) -> K B H L D", K=3, H=self.num_heads
|
||||
).float()
|
||||
q, k, v = qkv[0], qkv[1], qkv[2] # B H L D
|
||||
x = torch.nn.functional.scaled_dot_product_attention(q, k, v)
|
||||
x = rearrange(x, "B H L D -> B L (H D)")
|
||||
elif self.attn_mode == "xformers":
|
||||
qkv = rearrange(qkv, "B L (K H D) -> K B L H D", K=3, H=self.num_heads)
|
||||
q, k, v = qkv[0], qkv[1], qkv[2] # B L H D
|
||||
x = xformers.ops.memory_efficient_attention(q, k, v)
|
||||
x = rearrange(x, "B L H D -> B L (H D)", H=self.num_heads)
|
||||
elif self.attn_mode == "math":
|
||||
qkv = rearrange(qkv, "B L (K H D) -> K B H L D", K=3, H=self.num_heads)
|
||||
q, k, v = qkv[0], qkv[1], qkv[2] # B H L D
|
||||
attn = (q @ k.transpose(-2, -1)) * self.scale
|
||||
attn = attn.softmax(dim=-1)
|
||||
attn = self.attn_drop(attn)
|
||||
x = (attn @ v).transpose(1, 2).reshape(B, L, C)
|
||||
else:
|
||||
raise NotImplementedError
|
||||
|
||||
x = self.proj(x)
|
||||
x = self.proj_drop(x)
|
||||
return x
|
||||
|
||||
|
||||
class SpatialSelfAttention(nn.Module):
|
||||
def __init__(self, in_channels):
|
||||
super().__init__()
|
||||
self.in_channels = in_channels
|
||||
|
||||
self.norm = Normalize(in_channels)
|
||||
self.q = torch.nn.Conv2d(
|
||||
in_channels, in_channels, kernel_size=1, stride=1, padding=0
|
||||
)
|
||||
self.k = torch.nn.Conv2d(
|
||||
in_channels, in_channels, kernel_size=1, stride=1, padding=0
|
||||
)
|
||||
self.v = torch.nn.Conv2d(
|
||||
in_channels, in_channels, kernel_size=1, stride=1, padding=0
|
||||
)
|
||||
self.proj_out = torch.nn.Conv2d(
|
||||
in_channels, in_channels, kernel_size=1, stride=1, padding=0
|
||||
)
|
||||
|
||||
def forward(self, x):
|
||||
h_ = x
|
||||
h_ = self.norm(h_)
|
||||
q = self.q(h_)
|
||||
k = self.k(h_)
|
||||
v = self.v(h_)
|
||||
|
||||
# compute attention
|
||||
b, c, h, w = q.shape
|
||||
q = rearrange(q, "b c h w -> b (h w) c")
|
||||
k = rearrange(k, "b c h w -> b c (h w)")
|
||||
w_ = torch.einsum("bij,bjk->bik", q, k)
|
||||
|
||||
w_ = w_ * (int(c) ** (-0.5))
|
||||
w_ = torch.nn.functional.softmax(w_, dim=2)
|
||||
|
||||
# attend to values
|
||||
v = rearrange(v, "b c h w -> b c (h w)")
|
||||
w_ = rearrange(w_, "b i j -> b j i")
|
||||
h_ = torch.einsum("bij,bjk->bik", v, w_)
|
||||
h_ = rearrange(h_, "b c (h w) -> b c h w", h=h)
|
||||
h_ = self.proj_out(h_)
|
||||
|
||||
return x + h_
|
||||
|
||||
|
||||
class CrossAttention(nn.Module):
|
||||
def __init__(
|
||||
self,
|
||||
query_dim,
|
||||
context_dim=None,
|
||||
heads=8,
|
||||
dim_head=64,
|
||||
dropout=0.0,
|
||||
backend=None,
|
||||
):
|
||||
super().__init__()
|
||||
inner_dim = dim_head * heads
|
||||
context_dim = default(context_dim, query_dim)
|
||||
|
||||
self.scale = dim_head**-0.5
|
||||
self.heads = heads
|
||||
|
||||
self.to_q = nn.Linear(query_dim, inner_dim, bias=False)
|
||||
self.to_k = nn.Linear(context_dim, inner_dim, bias=False)
|
||||
self.to_v = nn.Linear(context_dim, inner_dim, bias=False)
|
||||
|
||||
self.to_out = nn.Sequential(
|
||||
nn.Linear(inner_dim, query_dim), nn.Dropout(dropout)
|
||||
)
|
||||
self.backend = backend
|
||||
|
||||
def forward(
|
||||
self,
|
||||
x,
|
||||
context=None,
|
||||
mask=None,
|
||||
additional_tokens=None,
|
||||
n_times_crossframe_attn_in_self=0,
|
||||
):
|
||||
h = self.heads
|
||||
|
||||
if additional_tokens is not None:
|
||||
# get the number of masked tokens at the beginning of the output sequence
|
||||
n_tokens_to_mask = additional_tokens.shape[1]
|
||||
# add additional token
|
||||
x = torch.cat([additional_tokens, x], dim=1)
|
||||
|
||||
q = self.to_q(x)
|
||||
context = default(context, x)
|
||||
k = self.to_k(context)
|
||||
v = self.to_v(context)
|
||||
|
||||
if n_times_crossframe_attn_in_self:
|
||||
# reprogramming cross-frame attention as in https://arxiv.org/abs/2303.13439
|
||||
assert x.shape[0] % n_times_crossframe_attn_in_self == 0
|
||||
n_cp = x.shape[0] // n_times_crossframe_attn_in_self
|
||||
k = repeat(
|
||||
k[::n_times_crossframe_attn_in_self], "b ... -> (b n) ...", n=n_cp
|
||||
)
|
||||
v = repeat(
|
||||
v[::n_times_crossframe_attn_in_self], "b ... -> (b n) ...", n=n_cp
|
||||
)
|
||||
|
||||
q, k, v = (rearrange(t, "b n (h d) -> b h n d", h=h) for t in (q, k, v))
|
||||
|
||||
## old
|
||||
"""
|
||||
sim = einsum('b i d, b j d -> b i j', q, k) * self.scale
|
||||
del q, k
|
||||
|
||||
if exists(mask):
|
||||
mask = rearrange(mask, 'b ... -> b (...)')
|
||||
max_neg_value = -torch.finfo(sim.dtype).max
|
||||
mask = repeat(mask, 'b j -> (b h) () j', h=h)
|
||||
sim.masked_fill_(~mask, max_neg_value)
|
||||
|
||||
# attention, what we cannot get enough of
|
||||
sim = sim.softmax(dim=-1)
|
||||
|
||||
out = einsum('b i j, b j d -> b i d', sim, v)
|
||||
"""
|
||||
## new
|
||||
with sdp_kernel(**BACKEND_MAP[self.backend]):
|
||||
# print("dispatching into backend", self.backend, "q/k/v shape: ", q.shape, k.shape, v.shape)
|
||||
out = F.scaled_dot_product_attention(
|
||||
q, k, v, attn_mask=mask
|
||||
) # scale is dim_head ** -0.5 per default
|
||||
|
||||
del q, k, v
|
||||
out = rearrange(out, "b h n d -> b n (h d)", h=h)
|
||||
|
||||
if additional_tokens is not None:
|
||||
# remove additional token
|
||||
out = out[:, n_tokens_to_mask:]
|
||||
return self.to_out(out)
|
||||
|
||||
|
||||
class MemoryEfficientCrossAttention(nn.Module):
|
||||
# https://github.com/MatthieuTPHR/diffusers/blob/d80b531ff8060ec1ea982b65a1b8df70f73aa67c/src/diffusers/models/attention.py#L223
|
||||
def __init__(
|
||||
self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.0, **kwargs
|
||||
):
|
||||
super().__init__()
|
||||
logger.debug(
|
||||
f"Setting up {self.__class__.__name__}. Query dim is {query_dim}, "
|
||||
f"context_dim is {context_dim} and using {heads} heads with a "
|
||||
f"dimension of {dim_head}."
|
||||
)
|
||||
inner_dim = dim_head * heads
|
||||
context_dim = default(context_dim, query_dim)
|
||||
|
||||
self.heads = heads
|
||||
self.dim_head = dim_head
|
||||
|
||||
self.to_q = nn.Linear(query_dim, inner_dim, bias=False)
|
||||
self.to_k = nn.Linear(context_dim, inner_dim, bias=False)
|
||||
self.to_v = nn.Linear(context_dim, inner_dim, bias=False)
|
||||
|
||||
self.to_out = nn.Sequential(
|
||||
nn.Linear(inner_dim, query_dim), nn.Dropout(dropout)
|
||||
)
|
||||
self.attention_op: Optional[Any] = None
|
||||
|
||||
def forward(
|
||||
self,
|
||||
x,
|
||||
context=None,
|
||||
mask=None,
|
||||
additional_tokens=None,
|
||||
n_times_crossframe_attn_in_self=0,
|
||||
):
|
||||
if additional_tokens is not None:
|
||||
# get the number of masked tokens at the beginning of the output sequence
|
||||
n_tokens_to_mask = additional_tokens.shape[1]
|
||||
# add additional token
|
||||
x = torch.cat([additional_tokens, x], dim=1)
|
||||
q = self.to_q(x)
|
||||
context = default(context, x)
|
||||
k = self.to_k(context)
|
||||
v = self.to_v(context)
|
||||
|
||||
if n_times_crossframe_attn_in_self:
|
||||
# reprogramming cross-frame attention as in https://arxiv.org/abs/2303.13439
|
||||
assert x.shape[0] % n_times_crossframe_attn_in_self == 0
|
||||
# n_cp = x.shape[0]//n_times_crossframe_attn_in_self
|
||||
k = repeat(
|
||||
k[::n_times_crossframe_attn_in_self],
|
||||
"b ... -> (b n) ...",
|
||||
n=n_times_crossframe_attn_in_self,
|
||||
)
|
||||
v = repeat(
|
||||
v[::n_times_crossframe_attn_in_self],
|
||||
"b ... -> (b n) ...",
|
||||
n=n_times_crossframe_attn_in_self,
|
||||
)
|
||||
|
||||
b, _, _ = q.shape
|
||||
q, k, v = (
|
||||
t.unsqueeze(3)
|
||||
.reshape(b, t.shape[1], self.heads, self.dim_head)
|
||||
.permute(0, 2, 1, 3)
|
||||
.reshape(b * self.heads, t.shape[1], self.dim_head)
|
||||
.contiguous()
|
||||
for t in (q, k, v)
|
||||
)
|
||||
|
||||
# actually compute the attention, what we cannot get enough of
|
||||
if version.parse(xformers.__version__) >= version.parse("0.0.21"):
|
||||
# NOTE: workaround for
|
||||
# https://github.com/facebookresearch/xformers/issues/845
|
||||
max_bs = 32768
|
||||
N = q.shape[0]
|
||||
n_batches = math.ceil(N / max_bs)
|
||||
out = []
|
||||
for i_batch in range(n_batches):
|
||||
batch = slice(i_batch * max_bs, (i_batch + 1) * max_bs)
|
||||
out.append(
|
||||
xformers.ops.memory_efficient_attention(
|
||||
q[batch],
|
||||
k[batch],
|
||||
v[batch],
|
||||
attn_bias=None,
|
||||
op=self.attention_op,
|
||||
)
|
||||
)
|
||||
out = torch.cat(out, 0)
|
||||
else:
|
||||
out = xformers.ops.memory_efficient_attention(
|
||||
q, k, v, attn_bias=None, op=self.attention_op
|
||||
)
|
||||
|
||||
# TODO: Use this directly in the attention operation, as a bias
|
||||
if exists(mask):
|
||||
raise NotImplementedError
|
||||
out = (
|
||||
out.unsqueeze(0)
|
||||
.reshape(b, self.heads, out.shape[1], self.dim_head)
|
||||
.permute(0, 2, 1, 3)
|
||||
.reshape(b, out.shape[1], self.heads * self.dim_head)
|
||||
)
|
||||
if additional_tokens is not None:
|
||||
# remove additional token
|
||||
out = out[:, n_tokens_to_mask:]
|
||||
return self.to_out(out)
|
||||
|
||||
|
||||
class BasicTransformerBlock(nn.Module):
|
||||
ATTENTION_MODES = {
|
||||
"softmax": CrossAttention, # vanilla attention
|
||||
"softmax-xformers": MemoryEfficientCrossAttention, # ampere
|
||||
}
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
dim,
|
||||
n_heads,
|
||||
d_head,
|
||||
dropout=0.0,
|
||||
context_dim=None,
|
||||
gated_ff=True,
|
||||
checkpoint=True,
|
||||
disable_self_attn=False,
|
||||
attn_mode="softmax",
|
||||
sdp_backend=None,
|
||||
):
|
||||
super().__init__()
|
||||
assert attn_mode in self.ATTENTION_MODES
|
||||
if attn_mode != "softmax" and not XFORMERS_IS_AVAILABLE:
|
||||
logger.debug(
|
||||
f"Attention mode '{attn_mode}' is not available. Falling "
|
||||
f"back to native attention. This is not a problem in "
|
||||
f"Pytorch >= 2.0. FYI, you are running with PyTorch "
|
||||
f"version {torch.__version__}."
|
||||
)
|
||||
attn_mode = "softmax"
|
||||
elif attn_mode == "softmax" and not SDP_IS_AVAILABLE:
|
||||
logger.warning(
|
||||
"We do not support vanilla attention anymore, as it is too "
|
||||
"expensive. Sorry."
|
||||
)
|
||||
if not XFORMERS_IS_AVAILABLE:
|
||||
msg = "Please install xformers via e.g. 'pip install xformers==0.0.16'"
|
||||
raise RuntimeError(msg)
|
||||
else:
|
||||
logger.info("Falling back to xformers efficient attention.")
|
||||
attn_mode = "softmax-xformers"
|
||||
attn_cls = self.ATTENTION_MODES[attn_mode]
|
||||
if version.parse(torch.__version__) >= version.parse("2.0.0"):
|
||||
assert sdp_backend is None or isinstance(sdp_backend, SDPBackend)
|
||||
else:
|
||||
assert sdp_backend is None
|
||||
self.disable_self_attn = disable_self_attn
|
||||
self.attn1 = attn_cls(
|
||||
query_dim=dim,
|
||||
heads=n_heads,
|
||||
dim_head=d_head,
|
||||
dropout=dropout,
|
||||
context_dim=context_dim if self.disable_self_attn else None,
|
||||
backend=sdp_backend,
|
||||
) # is a self-attention if not self.disable_self_attn
|
||||
self.ff = FeedForward(dim, dropout=dropout, glu=gated_ff)
|
||||
self.attn2 = attn_cls(
|
||||
query_dim=dim,
|
||||
context_dim=context_dim,
|
||||
heads=n_heads,
|
||||
dim_head=d_head,
|
||||
dropout=dropout,
|
||||
backend=sdp_backend,
|
||||
) # is self-attn if context is none
|
||||
self.norm1 = nn.LayerNorm(dim)
|
||||
self.norm2 = nn.LayerNorm(dim)
|
||||
self.norm3 = nn.LayerNorm(dim)
|
||||
self.checkpoint = checkpoint
|
||||
if self.checkpoint:
|
||||
logger.debug(f"{self.__class__.__name__} is using checkpointing")
|
||||
|
||||
def forward(
|
||||
self, x, context=None, additional_tokens=None, n_times_crossframe_attn_in_self=0
|
||||
):
|
||||
kwargs = {"x": x}
|
||||
|
||||
if context is not None:
|
||||
kwargs.update({"context": context})
|
||||
|
||||
if additional_tokens is not None:
|
||||
kwargs.update({"additional_tokens": additional_tokens})
|
||||
|
||||
if n_times_crossframe_attn_in_self:
|
||||
kwargs.update(
|
||||
{"n_times_crossframe_attn_in_self": n_times_crossframe_attn_in_self}
|
||||
)
|
||||
|
||||
# return mixed_checkpoint(self._forward, kwargs, self.parameters(), self.checkpoint)
|
||||
if self.checkpoint:
|
||||
# inputs = {"x": x, "context": context}
|
||||
return checkpoint(self._forward, x, context)
|
||||
# return checkpoint(self._forward, inputs, self.parameters(), self.checkpoint)
|
||||
else:
|
||||
return self._forward(**kwargs)
|
||||
|
||||
def _forward(
|
||||
self, x, context=None, additional_tokens=None, n_times_crossframe_attn_in_self=0
|
||||
):
|
||||
x = (
|
||||
self.attn1(
|
||||
self.norm1(x),
|
||||
context=context if self.disable_self_attn else None,
|
||||
additional_tokens=additional_tokens,
|
||||
n_times_crossframe_attn_in_self=n_times_crossframe_attn_in_self
|
||||
if not self.disable_self_attn
|
||||
else 0,
|
||||
)
|
||||
+ x
|
||||
)
|
||||
x = (
|
||||
self.attn2(
|
||||
self.norm2(x), context=context, additional_tokens=additional_tokens
|
||||
)
|
||||
+ x
|
||||
)
|
||||
x = self.ff(self.norm3(x)) + x
|
||||
return x
|
||||
|
||||
|
||||
class BasicTransformerSingleLayerBlock(nn.Module):
|
||||
ATTENTION_MODES = {
|
||||
"softmax": CrossAttention, # vanilla attention
|
||||
"softmax-xformers": MemoryEfficientCrossAttention # on the A100s not quite as fast as the above version
|
||||
# (todo might depend on head_dim, check, falls back to semi-optimized kernels for dim!=[16,32,64,128])
|
||||
}
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
dim,
|
||||
n_heads,
|
||||
d_head,
|
||||
dropout=0.0,
|
||||
context_dim=None,
|
||||
gated_ff=True,
|
||||
checkpoint=True,
|
||||
attn_mode="softmax",
|
||||
):
|
||||
super().__init__()
|
||||
assert attn_mode in self.ATTENTION_MODES
|
||||
attn_cls = self.ATTENTION_MODES[attn_mode]
|
||||
self.attn1 = attn_cls(
|
||||
query_dim=dim,
|
||||
heads=n_heads,
|
||||
dim_head=d_head,
|
||||
dropout=dropout,
|
||||
context_dim=context_dim,
|
||||
)
|
||||
self.ff = FeedForward(dim, dropout=dropout, glu=gated_ff)
|
||||
self.norm1 = nn.LayerNorm(dim)
|
||||
self.norm2 = nn.LayerNorm(dim)
|
||||
self.checkpoint = checkpoint
|
||||
|
||||
def forward(self, x, context=None):
|
||||
# inputs = {"x": x, "context": context}
|
||||
# return checkpoint(self._forward, inputs, self.parameters(), self.checkpoint)
|
||||
return checkpoint(self._forward, x, context)
|
||||
|
||||
def _forward(self, x, context=None):
|
||||
x = self.attn1(self.norm1(x), context=context) + x
|
||||
x = self.ff(self.norm2(x)) + x
|
||||
return x
|
||||
|
||||
|
||||
class SpatialTransformer(nn.Module):
|
||||
"""
|
||||
Transformer block for image-like data.
|
||||
First, project the input (aka embedding)
|
||||
and reshape to b, t, d.
|
||||
Then apply standard transformer action.
|
||||
Finally, reshape to image
|
||||
NEW: use_linear for more efficiency instead of the 1x1 convs
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
in_channels,
|
||||
n_heads,
|
||||
d_head,
|
||||
depth=1,
|
||||
dropout=0.0,
|
||||
context_dim=None,
|
||||
disable_self_attn=False,
|
||||
use_linear=False,
|
||||
attn_type="softmax",
|
||||
use_checkpoint=True,
|
||||
# sdp_backend=SDPBackend.FLASH_ATTENTION
|
||||
sdp_backend=None,
|
||||
):
|
||||
super().__init__()
|
||||
logger.debug(
|
||||
f"constructing {self.__class__.__name__} of depth {depth} w/ "
|
||||
f"{in_channels} channels and {n_heads} heads."
|
||||
)
|
||||
|
||||
if exists(context_dim) and not isinstance(context_dim, list):
|
||||
context_dim = [context_dim]
|
||||
if exists(context_dim) and isinstance(context_dim, list):
|
||||
if depth != len(context_dim):
|
||||
logger.warning(
|
||||
f"{self.__class__.__name__}: Found context dims "
|
||||
f"{context_dim} of depth {len(context_dim)}, which does not "
|
||||
f"match the specified 'depth' of {depth}. Setting context_dim "
|
||||
f"to {depth * [context_dim[0]]} now."
|
||||
)
|
||||
# depth does not match context dims.
|
||||
assert all(
|
||||
x == context_dim[0] for x in context_dim
|
||||
), "need homogenous context_dim to match depth automatically"
|
||||
context_dim = depth * [context_dim[0]]
|
||||
elif context_dim is None:
|
||||
context_dim = [None] * depth
|
||||
self.in_channels = in_channels
|
||||
inner_dim = n_heads * d_head
|
||||
self.norm = Normalize(in_channels)
|
||||
if not use_linear:
|
||||
self.proj_in = nn.Conv2d(
|
||||
in_channels, inner_dim, kernel_size=1, stride=1, padding=0
|
||||
)
|
||||
else:
|
||||
self.proj_in = nn.Linear(in_channels, inner_dim)
|
||||
|
||||
self.transformer_blocks = nn.ModuleList(
|
||||
[
|
||||
BasicTransformerBlock(
|
||||
inner_dim,
|
||||
n_heads,
|
||||
d_head,
|
||||
dropout=dropout,
|
||||
context_dim=context_dim[d],
|
||||
disable_self_attn=disable_self_attn,
|
||||
attn_mode=attn_type,
|
||||
checkpoint=use_checkpoint,
|
||||
sdp_backend=sdp_backend,
|
||||
)
|
||||
for d in range(depth)
|
||||
]
|
||||
)
|
||||
if not use_linear:
|
||||
self.proj_out = zero_module(
|
||||
nn.Conv2d(inner_dim, in_channels, kernel_size=1, stride=1, padding=0)
|
||||
)
|
||||
else:
|
||||
# self.proj_out = zero_module(nn.Linear(in_channels, inner_dim))
|
||||
self.proj_out = zero_module(nn.Linear(inner_dim, in_channels))
|
||||
self.use_linear = use_linear
|
||||
|
||||
def forward(self, x, context=None):
|
||||
# note: if no context is given, cross-attention defaults to self-attention
|
||||
if not isinstance(context, list):
|
||||
context = [context]
|
||||
b, c, h, w = x.shape
|
||||
x_in = x
|
||||
x = self.norm(x)
|
||||
if not self.use_linear:
|
||||
x = self.proj_in(x)
|
||||
x = rearrange(x, "b c h w -> b (h w) c").contiguous()
|
||||
if self.use_linear:
|
||||
x = self.proj_in(x)
|
||||
for i, block in enumerate(self.transformer_blocks):
|
||||
if i > 0 and len(context) == 1:
|
||||
i = 0 # use same context for each block
|
||||
x = block(x, context=context[i])
|
||||
if self.use_linear:
|
||||
x = self.proj_out(x)
|
||||
x = rearrange(x, "b (h w) c -> b c h w", h=h, w=w).contiguous()
|
||||
if not self.use_linear:
|
||||
x = self.proj_out(x)
|
||||
return x + x_in
|
||||
|
||||
|
||||
class SimpleTransformer(nn.Module):
|
||||
def __init__(
|
||||
self,
|
||||
dim: int,
|
||||
depth: int,
|
||||
heads: int,
|
||||
dim_head: int,
|
||||
context_dim: Optional[int] = None,
|
||||
dropout: float = 0.0,
|
||||
checkpoint: bool = True,
|
||||
):
|
||||
super().__init__()
|
||||
self.layers = nn.ModuleList([])
|
||||
for _ in range(depth):
|
||||
self.layers.append(
|
||||
BasicTransformerBlock(
|
||||
dim,
|
||||
heads,
|
||||
dim_head,
|
||||
dropout=dropout,
|
||||
context_dim=context_dim,
|
||||
attn_mode="softmax-xformers",
|
||||
checkpoint=checkpoint,
|
||||
)
|
||||
)
|
||||
|
||||
def forward(
|
||||
self,
|
||||
x: torch.Tensor,
|
||||
context: Optional[torch.Tensor] = None,
|
||||
) -> torch.Tensor:
|
||||
for layer in self.layers:
|
||||
x = layer(x, context)
|
||||
return x
|
627
imaginairy/modules/sgm/autoencoder.py
Normal file
627
imaginairy/modules/sgm/autoencoder.py
Normal file
@ -0,0 +1,627 @@
|
||||
import logging
|
||||
import math
|
||||
import re
|
||||
from abc import abstractmethod
|
||||
from contextlib import contextmanager
|
||||
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
|
||||
|
||||
import pytorch_lightning as pl
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
from einops import rearrange
|
||||
from packaging import version
|
||||
|
||||
from imaginairy.modules.ema import LitEma
|
||||
from imaginairy.utils import (
|
||||
default,
|
||||
get_nested_attribute,
|
||||
get_obj_from_str,
|
||||
instantiate_from_config,
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from .autoencoding.regularizers import AbstractRegularizer
|
||||
|
||||
|
||||
# from .ema import LitEma
|
||||
# from .util import (default, get_nested_attribute, get_obj_from_str,
|
||||
# instantiate_from_config)
|
||||
|
||||
logpy = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AbstractAutoencoder(pl.LightningModule):
|
||||
"""
|
||||
This is the base class for all autoencoders, including image autoencoders, image autoencoders with discriminators,
|
||||
unCLIP models, etc. Hence, it is fairly general, and specific features
|
||||
(e.g. discriminator training, encoding, decoding) must be implemented in subclasses.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
ema_decay: Union[None, float] = None,
|
||||
monitor: Union[None, str] = None,
|
||||
input_key: str = "jpg",
|
||||
):
|
||||
super().__init__()
|
||||
|
||||
self.input_key = input_key
|
||||
self.use_ema = ema_decay is not None
|
||||
if monitor is not None:
|
||||
self.monitor = monitor
|
||||
|
||||
if self.use_ema:
|
||||
self.model_ema = LitEma(self, decay=ema_decay)
|
||||
logpy.info(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.")
|
||||
|
||||
if version.parse(torch.__version__) >= version.parse("2.0.0"):
|
||||
self.automatic_optimization = False
|
||||
|
||||
def apply_ckpt(self, ckpt: Union[None, str, dict]):
|
||||
if ckpt is None:
|
||||
return
|
||||
if isinstance(ckpt, str):
|
||||
ckpt = {
|
||||
"target": "imaginairy.modules.sgm.checkpoint.CheckpointEngine",
|
||||
"params": {"ckpt_path": ckpt},
|
||||
}
|
||||
engine = instantiate_from_config(ckpt)
|
||||
engine(self)
|
||||
|
||||
@abstractmethod
|
||||
def get_input(self, batch) -> Any:
|
||||
raise NotImplementedError()
|
||||
|
||||
def on_train_batch_end(self, *args, **kwargs):
|
||||
# for EMA computation
|
||||
if self.use_ema:
|
||||
self.model_ema(self)
|
||||
|
||||
@contextmanager
|
||||
def ema_scope(self, context=None):
|
||||
if self.use_ema:
|
||||
self.model_ema.store(self.parameters())
|
||||
self.model_ema.copy_to(self)
|
||||
if context is not None:
|
||||
logpy.info(f"{context}: Switched to EMA weights")
|
||||
try:
|
||||
yield None
|
||||
finally:
|
||||
if self.use_ema:
|
||||
self.model_ema.restore(self.parameters())
|
||||
if context is not None:
|
||||
logpy.info(f"{context}: Restored training weights")
|
||||
|
||||
@abstractmethod
|
||||
def encode(self, *args, **kwargs) -> torch.Tensor:
|
||||
raise NotImplementedError("encode()-method of abstract base class called")
|
||||
|
||||
@abstractmethod
|
||||
def decode(self, *args, **kwargs) -> torch.Tensor:
|
||||
raise NotImplementedError("decode()-method of abstract base class called")
|
||||
|
||||
def instantiate_optimizer_from_config(self, params, lr, cfg):
|
||||
logpy.info(f"loading >>> {cfg['target']} <<< optimizer from config")
|
||||
return get_obj_from_str(cfg["target"])(params, lr=lr, **cfg.get("params", {}))
|
||||
|
||||
def configure_optimizers(self) -> Any:
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class AutoencodingEngine(AbstractAutoencoder):
|
||||
"""
|
||||
Base class for all image autoencoders that we train, like VQGAN or AutoencoderKL
|
||||
(we also restore them explicitly as special cases for legacy reasons).
|
||||
Regularizations such as KL or VQ are moved to the regularizer class.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
*args,
|
||||
encoder_config: Dict,
|
||||
decoder_config: Dict,
|
||||
loss_config: Dict,
|
||||
regularizer_config: Dict,
|
||||
optimizer_config: Union[Dict, None] = None,
|
||||
lr_g_factor: float = 1.0,
|
||||
trainable_ae_params: Optional[List[List[str]]] = None,
|
||||
ae_optimizer_args: Optional[List[dict]] = None,
|
||||
trainable_disc_params: Optional[List[List[str]]] = None,
|
||||
disc_optimizer_args: Optional[List[dict]] = None,
|
||||
disc_start_iter: int = 0,
|
||||
diff_boost_factor: float = 3.0,
|
||||
ckpt_engine: Union[None, str, dict] = None,
|
||||
ckpt_path: Optional[str] = None,
|
||||
additional_decode_keys: Optional[List[str]] = None,
|
||||
**kwargs,
|
||||
):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.automatic_optimization = False # pytorch lightning
|
||||
|
||||
self.encoder: torch.nn.Module = instantiate_from_config(encoder_config)
|
||||
self.decoder: torch.nn.Module = instantiate_from_config(decoder_config)
|
||||
self.loss: torch.nn.Module = instantiate_from_config(loss_config)
|
||||
self.regularization: AbstractRegularizer = instantiate_from_config(
|
||||
regularizer_config
|
||||
)
|
||||
self.optimizer_config = default(
|
||||
optimizer_config, {"target": "torch.optim.Adam"}
|
||||
)
|
||||
self.diff_boost_factor = diff_boost_factor
|
||||
self.disc_start_iter = disc_start_iter
|
||||
self.lr_g_factor = lr_g_factor
|
||||
self.trainable_ae_params = trainable_ae_params
|
||||
if self.trainable_ae_params is not None:
|
||||
self.ae_optimizer_args = default(
|
||||
ae_optimizer_args,
|
||||
[{} for _ in range(len(self.trainable_ae_params))],
|
||||
)
|
||||
assert len(self.ae_optimizer_args) == len(self.trainable_ae_params)
|
||||
else:
|
||||
self.ae_optimizer_args = [{}] # makes type consitent
|
||||
|
||||
self.trainable_disc_params = trainable_disc_params
|
||||
if self.trainable_disc_params is not None:
|
||||
self.disc_optimizer_args = default(
|
||||
disc_optimizer_args,
|
||||
[{} for _ in range(len(self.trainable_disc_params))],
|
||||
)
|
||||
assert len(self.disc_optimizer_args) == len(self.trainable_disc_params)
|
||||
else:
|
||||
self.disc_optimizer_args = [{}] # makes type consitent
|
||||
|
||||
if ckpt_path is not None:
|
||||
assert ckpt_engine is None, "Can't set ckpt_engine and ckpt_path"
|
||||
logpy.warning(
|
||||
"Checkpoint path is deprecated, use `checkpoint_egnine` instead"
|
||||
)
|
||||
self.apply_ckpt(default(ckpt_path, ckpt_engine))
|
||||
self.additional_decode_keys = set(default(additional_decode_keys, []))
|
||||
|
||||
def get_input(self, batch: Dict) -> torch.Tensor:
|
||||
# assuming unified data format, dataloader returns a dict.
|
||||
# image tensors should be scaled to -1 ... 1 and in channels-first
|
||||
# format (e.g., bchw instead if bhwc)
|
||||
return batch[self.input_key]
|
||||
|
||||
def get_autoencoder_params(self) -> list:
|
||||
params = []
|
||||
if hasattr(self.loss, "get_trainable_autoencoder_parameters"):
|
||||
params += list(self.loss.get_trainable_autoencoder_parameters())
|
||||
if hasattr(self.regularization, "get_trainable_parameters"):
|
||||
params += list(self.regularization.get_trainable_parameters())
|
||||
params = params + list(self.encoder.parameters())
|
||||
params = params + list(self.decoder.parameters())
|
||||
return params
|
||||
|
||||
def get_discriminator_params(self) -> list:
|
||||
if hasattr(self.loss, "get_trainable_parameters"):
|
||||
params = list(self.loss.get_trainable_parameters()) # e.g., discriminator
|
||||
else:
|
||||
params = []
|
||||
return params
|
||||
|
||||
def get_last_layer(self):
|
||||
return self.decoder.get_last_layer()
|
||||
|
||||
def encode(
|
||||
self,
|
||||
x: torch.Tensor,
|
||||
return_reg_log: bool = False,
|
||||
unregularized: bool = False,
|
||||
) -> Union[torch.Tensor, Tuple[torch.Tensor, dict]]:
|
||||
z = self.encoder(x)
|
||||
if unregularized:
|
||||
return z, {}
|
||||
z, reg_log = self.regularization(z)
|
||||
if return_reg_log:
|
||||
return z, reg_log
|
||||
return z
|
||||
|
||||
def decode(self, z: torch.Tensor, **kwargs) -> torch.Tensor:
|
||||
x = self.decoder(z, **kwargs)
|
||||
return x
|
||||
|
||||
def forward(
|
||||
self, x: torch.Tensor, **additional_decode_kwargs
|
||||
) -> Tuple[torch.Tensor, torch.Tensor, dict]:
|
||||
z, reg_log = self.encode(x, return_reg_log=True)
|
||||
dec = self.decode(z, **additional_decode_kwargs)
|
||||
return z, dec, reg_log
|
||||
|
||||
def inner_training_step(
|
||||
self, batch: dict, batch_idx: int, optimizer_idx: int = 0
|
||||
) -> torch.Tensor:
|
||||
x = self.get_input(batch)
|
||||
additional_decode_kwargs = {
|
||||
key: batch[key] for key in self.additional_decode_keys.intersection(batch)
|
||||
}
|
||||
z, xrec, regularization_log = self(x, **additional_decode_kwargs)
|
||||
if hasattr(self.loss, "forward_keys"):
|
||||
extra_info = {
|
||||
"z": z,
|
||||
"optimizer_idx": optimizer_idx,
|
||||
"global_step": self.global_step,
|
||||
"last_layer": self.get_last_layer(),
|
||||
"split": "train",
|
||||
"regularization_log": regularization_log,
|
||||
"autoencoder": self,
|
||||
}
|
||||
extra_info = {k: extra_info[k] for k in self.loss.forward_keys}
|
||||
else:
|
||||
extra_info = {}
|
||||
|
||||
if optimizer_idx == 0:
|
||||
# autoencode
|
||||
out_loss = self.loss(x, xrec, **extra_info)
|
||||
if isinstance(out_loss, tuple):
|
||||
aeloss, log_dict_ae = out_loss
|
||||
else:
|
||||
# simple loss function
|
||||
aeloss = out_loss
|
||||
log_dict_ae = {"train/loss/rec": aeloss.detach()}
|
||||
|
||||
self.log_dict(
|
||||
log_dict_ae,
|
||||
prog_bar=False,
|
||||
logger=True,
|
||||
on_step=True,
|
||||
on_epoch=True,
|
||||
sync_dist=False,
|
||||
)
|
||||
self.log(
|
||||
"loss",
|
||||
aeloss.mean().detach(),
|
||||
prog_bar=True,
|
||||
logger=False,
|
||||
on_epoch=False,
|
||||
on_step=True,
|
||||
)
|
||||
return aeloss
|
||||
elif optimizer_idx == 1:
|
||||
# discriminator
|
||||
discloss, log_dict_disc = self.loss(x, xrec, **extra_info)
|
||||
# -> discriminator always needs to return a tuple
|
||||
self.log_dict(
|
||||
log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=True
|
||||
)
|
||||
return discloss
|
||||
else:
|
||||
msg = f"Unknown optimizer {optimizer_idx}"
|
||||
raise NotImplementedError(msg)
|
||||
|
||||
def training_step(self, batch: dict, batch_idx: int):
|
||||
opts = self.optimizers()
|
||||
if not isinstance(opts, list):
|
||||
# Non-adversarial case
|
||||
opts = [opts]
|
||||
optimizer_idx = batch_idx % len(opts)
|
||||
if self.global_step < self.disc_start_iter:
|
||||
optimizer_idx = 0
|
||||
opt = opts[optimizer_idx]
|
||||
opt.zero_grad()
|
||||
with opt.toggle_model():
|
||||
loss = self.inner_training_step(
|
||||
batch, batch_idx, optimizer_idx=optimizer_idx
|
||||
)
|
||||
self.manual_backward(loss)
|
||||
opt.step()
|
||||
|
||||
def validation_step(self, batch: dict, batch_idx: int) -> Dict:
|
||||
log_dict = self._validation_step(batch, batch_idx)
|
||||
with self.ema_scope():
|
||||
log_dict_ema = self._validation_step(batch, batch_idx, postfix="_ema")
|
||||
log_dict.update(log_dict_ema)
|
||||
return log_dict
|
||||
|
||||
def _validation_step(self, batch: dict, batch_idx: int, postfix: str = "") -> Dict:
|
||||
x = self.get_input(batch)
|
||||
|
||||
z, xrec, regularization_log = self(x)
|
||||
if hasattr(self.loss, "forward_keys"):
|
||||
extra_info = {
|
||||
"z": z,
|
||||
"optimizer_idx": 0,
|
||||
"global_step": self.global_step,
|
||||
"last_layer": self.get_last_layer(),
|
||||
"split": "val" + postfix,
|
||||
"regularization_log": regularization_log,
|
||||
"autoencoder": self,
|
||||
}
|
||||
extra_info = {k: extra_info[k] for k in self.loss.forward_keys}
|
||||
else:
|
||||
extra_info = {}
|
||||
out_loss = self.loss(x, xrec, **extra_info)
|
||||
if isinstance(out_loss, tuple):
|
||||
aeloss, log_dict_ae = out_loss
|
||||
else:
|
||||
# simple loss function
|
||||
aeloss = out_loss
|
||||
log_dict_ae = {f"val{postfix}/loss/rec": aeloss.detach()}
|
||||
full_log_dict = log_dict_ae
|
||||
|
||||
if "optimizer_idx" in extra_info:
|
||||
extra_info["optimizer_idx"] = 1
|
||||
discloss, log_dict_disc = self.loss(x, xrec, **extra_info)
|
||||
full_log_dict.update(log_dict_disc)
|
||||
self.log(
|
||||
f"val{postfix}/loss/rec",
|
||||
log_dict_ae[f"val{postfix}/loss/rec"],
|
||||
sync_dist=True,
|
||||
)
|
||||
self.log_dict(full_log_dict, sync_dist=True)
|
||||
return full_log_dict
|
||||
|
||||
def get_param_groups(
|
||||
self, parameter_names: List[List[str]], optimizer_args: List[dict]
|
||||
) -> Tuple[List[Dict[str, Any]], int]:
|
||||
groups = []
|
||||
num_params = 0
|
||||
for names, args in zip(parameter_names, optimizer_args):
|
||||
params = []
|
||||
for pattern_ in names:
|
||||
pattern_params = []
|
||||
pattern = re.compile(pattern_)
|
||||
for p_name, param in self.named_parameters():
|
||||
if re.match(pattern, p_name):
|
||||
pattern_params.append(param)
|
||||
num_params += param.numel()
|
||||
if len(pattern_params) == 0:
|
||||
logpy.warning(f"Did not find parameters for pattern {pattern_}")
|
||||
params.extend(pattern_params)
|
||||
groups.append({"params": params, **args})
|
||||
return groups, num_params
|
||||
|
||||
def configure_optimizers(self) -> List[torch.optim.Optimizer]:
|
||||
if self.trainable_ae_params is None:
|
||||
ae_params = self.get_autoencoder_params()
|
||||
else:
|
||||
ae_params, num_ae_params = self.get_param_groups(
|
||||
self.trainable_ae_params, self.ae_optimizer_args
|
||||
)
|
||||
logpy.info(f"Number of trainable autoencoder parameters: {num_ae_params:,}")
|
||||
if self.trainable_disc_params is None:
|
||||
disc_params = self.get_discriminator_params()
|
||||
else:
|
||||
disc_params, num_disc_params = self.get_param_groups(
|
||||
self.trainable_disc_params, self.disc_optimizer_args
|
||||
)
|
||||
logpy.info(
|
||||
f"Number of trainable discriminator parameters: {num_disc_params:,}"
|
||||
)
|
||||
opt_ae = self.instantiate_optimizer_from_config(
|
||||
ae_params,
|
||||
default(self.lr_g_factor, 1.0) * self.learning_rate,
|
||||
self.optimizer_config,
|
||||
)
|
||||
opts = [opt_ae]
|
||||
if len(disc_params) > 0:
|
||||
opt_disc = self.instantiate_optimizer_from_config(
|
||||
disc_params, self.learning_rate, self.optimizer_config
|
||||
)
|
||||
opts.append(opt_disc)
|
||||
|
||||
return opts
|
||||
|
||||
@torch.no_grad()
|
||||
def log_images(
|
||||
self, batch: dict, additional_log_kwargs: Optional[Dict] = None, **kwargs
|
||||
) -> dict:
|
||||
log = {}
|
||||
additional_decode_kwargs = {}
|
||||
x = self.get_input(batch)
|
||||
additional_decode_kwargs.update(
|
||||
{key: batch[key] for key in self.additional_decode_keys.intersection(batch)}
|
||||
)
|
||||
|
||||
_, xrec, _ = self(x, **additional_decode_kwargs)
|
||||
log["inputs"] = x
|
||||
log["reconstructions"] = xrec
|
||||
diff = 0.5 * torch.abs(torch.clamp(xrec, -1.0, 1.0) - x)
|
||||
diff.clamp_(0, 1.0)
|
||||
log["diff"] = 2.0 * diff - 1.0
|
||||
# diff_boost shows location of small errors, by boosting their
|
||||
# brightness.
|
||||
log["diff_boost"] = (
|
||||
2.0 * torch.clamp(self.diff_boost_factor * diff, 0.0, 1.0) - 1
|
||||
)
|
||||
if hasattr(self.loss, "log_images"):
|
||||
log.update(self.loss.log_images(x, xrec))
|
||||
with self.ema_scope():
|
||||
_, xrec_ema, _ = self(x, **additional_decode_kwargs)
|
||||
log["reconstructions_ema"] = xrec_ema
|
||||
diff_ema = 0.5 * torch.abs(torch.clamp(xrec_ema, -1.0, 1.0) - x)
|
||||
diff_ema.clamp_(0, 1.0)
|
||||
log["diff_ema"] = 2.0 * diff_ema - 1.0
|
||||
log["diff_boost_ema"] = (
|
||||
2.0 * torch.clamp(self.diff_boost_factor * diff_ema, 0.0, 1.0) - 1
|
||||
)
|
||||
if additional_log_kwargs:
|
||||
additional_decode_kwargs.update(additional_log_kwargs)
|
||||
_, xrec_add, _ = self(x, **additional_decode_kwargs)
|
||||
log_str = "reconstructions-" + "-".join(
|
||||
[f"{key}={additional_log_kwargs[key]}" for key in additional_log_kwargs]
|
||||
)
|
||||
log[log_str] = xrec_add
|
||||
return log
|
||||
|
||||
|
||||
class AutoencodingEngineLegacy(AutoencodingEngine):
|
||||
def __init__(self, embed_dim: int, **kwargs):
|
||||
self.max_batch_size = kwargs.pop("max_batch_size", None)
|
||||
ddconfig = kwargs.pop("ddconfig")
|
||||
ckpt_path = kwargs.pop("ckpt_path", None)
|
||||
ckpt_engine = kwargs.pop("ckpt_engine", None)
|
||||
super().__init__(
|
||||
encoder_config={
|
||||
"target": "imaginairy.modules.sgm.diffusionmodules.model.Encoder",
|
||||
"params": ddconfig,
|
||||
},
|
||||
decoder_config={
|
||||
"target": "imaginairy.modules.sgm.diffusionmodules.model.Decoder",
|
||||
"params": ddconfig,
|
||||
},
|
||||
**kwargs,
|
||||
)
|
||||
self.quant_conv = torch.nn.Conv2d(
|
||||
(1 + ddconfig["double_z"]) * ddconfig["z_channels"],
|
||||
(1 + ddconfig["double_z"]) * embed_dim,
|
||||
1,
|
||||
)
|
||||
self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig["z_channels"], 1)
|
||||
self.embed_dim = embed_dim
|
||||
|
||||
self.apply_ckpt(default(ckpt_path, ckpt_engine))
|
||||
|
||||
def get_autoencoder_params(self) -> list:
|
||||
params = super().get_autoencoder_params()
|
||||
return params
|
||||
|
||||
def encode(
|
||||
self, x: torch.Tensor, return_reg_log: bool = False
|
||||
) -> Union[torch.Tensor, Tuple[torch.Tensor, dict]]:
|
||||
if self.max_batch_size is None:
|
||||
z = self.encoder(x)
|
||||
z = self.quant_conv(z)
|
||||
else:
|
||||
N = x.shape[0]
|
||||
bs = self.max_batch_size
|
||||
n_batches = int(math.ceil(N / bs))
|
||||
z = []
|
||||
for i_batch in range(n_batches):
|
||||
z_batch = self.encoder(x[i_batch * bs : (i_batch + 1) * bs])
|
||||
z_batch = self.quant_conv(z_batch)
|
||||
z.append(z_batch)
|
||||
z = torch.cat(z, 0)
|
||||
|
||||
z, reg_log = self.regularization(z)
|
||||
if return_reg_log:
|
||||
return z, reg_log
|
||||
return z
|
||||
|
||||
def decode(self, z: torch.Tensor, **decoder_kwargs) -> torch.Tensor:
|
||||
if self.max_batch_size is None:
|
||||
dec = self.post_quant_conv(z)
|
||||
dec = self.decoder(dec, **decoder_kwargs)
|
||||
else:
|
||||
N = z.shape[0]
|
||||
bs = self.max_batch_size
|
||||
n_batches = int(math.ceil(N / bs))
|
||||
dec = []
|
||||
for i_batch in range(n_batches):
|
||||
dec_batch = self.post_quant_conv(z[i_batch * bs : (i_batch + 1) * bs])
|
||||
dec_batch = self.decoder(dec_batch, **decoder_kwargs)
|
||||
dec.append(dec_batch)
|
||||
dec = torch.cat(dec, 0)
|
||||
|
||||
return dec
|
||||
|
||||
|
||||
class AutoencoderKL(AutoencodingEngineLegacy):
|
||||
def __init__(self, **kwargs):
|
||||
if "lossconfig" in kwargs:
|
||||
kwargs["loss_config"] = kwargs.pop("lossconfig")
|
||||
super().__init__(
|
||||
regularizer_config={
|
||||
"target": (
|
||||
"imaginairy.modules.sgm.autoencoding.regularizers"
|
||||
".DiagonalGaussianRegularizer"
|
||||
)
|
||||
},
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
|
||||
class AutoencoderLegacyVQ(AutoencodingEngineLegacy):
|
||||
def __init__(
|
||||
self,
|
||||
embed_dim: int,
|
||||
n_embed: int,
|
||||
sane_index_shape: bool = False,
|
||||
**kwargs,
|
||||
):
|
||||
if "lossconfig" in kwargs:
|
||||
logpy.warning("Parameter `lossconfig` is deprecated, use `loss_config`.")
|
||||
kwargs["loss_config"] = kwargs.pop("lossconfig")
|
||||
super().__init__(
|
||||
regularizer_config={
|
||||
"target": (
|
||||
"imaginairy.modules.sgm.autoencoding.regularizers.quantize"
|
||||
".VectorQuantizer"
|
||||
),
|
||||
"params": {
|
||||
"n_e": n_embed,
|
||||
"e_dim": embed_dim,
|
||||
"sane_index_shape": sane_index_shape,
|
||||
},
|
||||
},
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
|
||||
class IdentityFirstStage(AbstractAutoencoder):
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
|
||||
def get_input(self, x: Any) -> Any:
|
||||
return x
|
||||
|
||||
def encode(self, x: Any, *args, **kwargs) -> Any:
|
||||
return x
|
||||
|
||||
def decode(self, x: Any, *args, **kwargs) -> Any:
|
||||
return x
|
||||
|
||||
|
||||
class AEIntegerWrapper(nn.Module):
|
||||
def __init__(
|
||||
self,
|
||||
model: nn.Module,
|
||||
shape: Union[None, Tuple[int, int], List[int]] = (16, 16),
|
||||
regularization_key: str = "regularization",
|
||||
encoder_kwargs: Optional[Dict[str, Any]] = None,
|
||||
):
|
||||
super().__init__()
|
||||
self.model = model
|
||||
if not hasattr(model, "encode") or hasattr(model, "decode"):
|
||||
raise RuntimeError("Need AE interface")
|
||||
self.regularization = get_nested_attribute(model, regularization_key)
|
||||
self.shape = shape
|
||||
self.encoder_kwargs = default(encoder_kwargs, {"return_reg_log": True})
|
||||
|
||||
def encode(self, x) -> torch.Tensor:
|
||||
assert (
|
||||
not self.training
|
||||
), f"{self.__class__.__name__} only supports inference currently"
|
||||
_, log = self.model.encode(x, **self.encoder_kwargs)
|
||||
assert isinstance(log, dict)
|
||||
inds = log["min_encoding_indices"]
|
||||
return rearrange(inds, "b ... -> b (...)")
|
||||
|
||||
def decode(
|
||||
self, inds: torch.Tensor, shape: Union[None, tuple, list] = None
|
||||
) -> torch.Tensor:
|
||||
# expect inds shape (b, s) with s = h*w
|
||||
shape = default(shape, self.shape) # Optional[(h, w)]
|
||||
if shape is not None:
|
||||
assert len(shape) == 2, f"Unhandeled shape {shape}"
|
||||
inds = rearrange(inds, "b (h w) -> b h w", h=shape[0], w=shape[1])
|
||||
h = self.regularization.get_codebook_entry(inds) # (b, h, w, c)
|
||||
h = rearrange(h, "b h w c -> b c h w")
|
||||
return self.model.decode(h)
|
||||
|
||||
|
||||
class AutoencoderKLModeOnly(AutoencodingEngineLegacy):
|
||||
def __init__(self, **kwargs):
|
||||
if "lossconfig" in kwargs:
|
||||
kwargs["loss_config"] = kwargs.pop("lossconfig")
|
||||
super().__init__(
|
||||
regularizer_config={
|
||||
"target": (
|
||||
"imaginairy.modules.sgm.autoencoding.regularizers"
|
||||
".DiagonalGaussianRegularizer"
|
||||
),
|
||||
"params": {"sample": False},
|
||||
},
|
||||
**kwargs,
|
||||
)
|
0
imaginairy/modules/sgm/autoencoding/__init__.py
Normal file
0
imaginairy/modules/sgm/autoencoding/__init__.py
Normal file
7
imaginairy/modules/sgm/autoencoding/losses/__init__.py
Normal file
7
imaginairy/modules/sgm/autoencoding/losses/__init__.py
Normal file
@ -0,0 +1,7 @@
|
||||
__all__ = [
|
||||
"GeneralLPIPSWithDiscriminator",
|
||||
"LatentLPIPS",
|
||||
]
|
||||
|
||||
from .discriminator_loss import GeneralLPIPSWithDiscriminator
|
||||
from .lpips import LatentLPIPS
|
309
imaginairy/modules/sgm/autoencoding/losses/discriminator_loss.py
Normal file
309
imaginairy/modules/sgm/autoencoding/losses/discriminator_loss.py
Normal file
@ -0,0 +1,309 @@
|
||||
from typing import Dict, Iterator, List, Optional, Tuple, Union
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import torchvision
|
||||
from einops import rearrange
|
||||
from matplotlib import colormaps, pyplot as plt
|
||||
|
||||
from imaginairy.modules.sgm.autoencoding.lpips.loss.lpips import LPIPS
|
||||
from imaginairy.modules.sgm.autoencoding.lpips.model.model import weights_init
|
||||
from imaginairy.modules.sgm.autoencoding.lpips.vqperceptual import (
|
||||
hinge_d_loss,
|
||||
vanilla_d_loss,
|
||||
)
|
||||
from imaginairy.modules.util import default, instantiate_from_config
|
||||
|
||||
|
||||
class GeneralLPIPSWithDiscriminator(nn.Module):
|
||||
def __init__(
|
||||
self,
|
||||
disc_start: int,
|
||||
logvar_init: float = 0.0,
|
||||
disc_num_layers: int = 3,
|
||||
disc_in_channels: int = 3,
|
||||
disc_factor: float = 1.0,
|
||||
disc_weight: float = 1.0,
|
||||
perceptual_weight: float = 1.0,
|
||||
disc_loss: str = "hinge",
|
||||
scale_input_to_tgt_size: bool = False,
|
||||
dims: int = 2,
|
||||
learn_logvar: bool = False,
|
||||
regularization_weights: Union[None, Dict[str, float]] = None,
|
||||
additional_log_keys: Optional[List[str]] = None,
|
||||
discriminator_config: Optional[Dict] = None,
|
||||
):
|
||||
super().__init__()
|
||||
self.dims = dims
|
||||
if self.dims > 2:
|
||||
print(
|
||||
f"running with dims={dims}. This means that for perceptual loss "
|
||||
f"calculation, the LPIPS loss will be applied to each frame "
|
||||
f"independently."
|
||||
)
|
||||
self.scale_input_to_tgt_size = scale_input_to_tgt_size
|
||||
assert disc_loss in ["hinge", "vanilla"]
|
||||
self.perceptual_loss = LPIPS().eval()
|
||||
self.perceptual_weight = perceptual_weight
|
||||
# output log variance
|
||||
self.logvar = nn.Parameter(
|
||||
torch.full((), logvar_init), requires_grad=learn_logvar
|
||||
)
|
||||
self.learn_logvar = learn_logvar
|
||||
|
||||
discriminator_config = default(
|
||||
discriminator_config,
|
||||
{
|
||||
"target": "sgm.modules.autoencoding.lpips.model.model.NLayerDiscriminator",
|
||||
"params": {
|
||||
"input_nc": disc_in_channels,
|
||||
"n_layers": disc_num_layers,
|
||||
"use_actnorm": False,
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
self.discriminator = instantiate_from_config(discriminator_config).apply(
|
||||
weights_init
|
||||
)
|
||||
self.discriminator_iter_start = disc_start
|
||||
self.disc_loss = hinge_d_loss if disc_loss == "hinge" else vanilla_d_loss
|
||||
self.disc_factor = disc_factor
|
||||
self.discriminator_weight = disc_weight
|
||||
self.regularization_weights = default(regularization_weights, {})
|
||||
|
||||
self.forward_keys = [
|
||||
"optimizer_idx",
|
||||
"global_step",
|
||||
"last_layer",
|
||||
"split",
|
||||
"regularization_log",
|
||||
]
|
||||
|
||||
self.additional_log_keys = set(default(additional_log_keys, []))
|
||||
self.additional_log_keys.update(set(self.regularization_weights.keys()))
|
||||
|
||||
def get_trainable_parameters(self) -> Iterator[nn.Parameter]:
|
||||
return self.discriminator.parameters()
|
||||
|
||||
def get_trainable_autoencoder_parameters(self) -> Iterator[nn.Parameter]:
|
||||
if self.learn_logvar:
|
||||
yield self.logvar
|
||||
yield from ()
|
||||
|
||||
@torch.no_grad()
|
||||
def log_images(
|
||||
self, inputs: torch.Tensor, reconstructions: torch.Tensor
|
||||
) -> Dict[str, torch.Tensor]:
|
||||
# calc logits of real/fake
|
||||
logits_real = self.discriminator(inputs.contiguous().detach())
|
||||
if len(logits_real.shape) < 4:
|
||||
# Non patch-discriminator
|
||||
return {}
|
||||
logits_fake = self.discriminator(reconstructions.contiguous().detach())
|
||||
# -> (b, 1, h, w)
|
||||
|
||||
# parameters for colormapping
|
||||
high = max(logits_fake.abs().max(), logits_real.abs().max()).item()
|
||||
cmap = colormaps["PiYG"] # diverging colormap
|
||||
|
||||
def to_colormap(logits: torch.Tensor) -> torch.Tensor:
|
||||
"""(b, 1, ...) -> (b, 3, ...)"""
|
||||
logits = (logits + high) / (2 * high)
|
||||
logits_np = cmap(logits.cpu().numpy())[..., :3] # truncate alpha channel
|
||||
# -> (b, 1, ..., 3)
|
||||
logits = torch.from_numpy(logits_np).to(logits.device)
|
||||
return rearrange(logits, "b 1 ... c -> b c ...")
|
||||
|
||||
logits_real = torch.nn.functional.interpolate(
|
||||
logits_real,
|
||||
size=inputs.shape[-2:],
|
||||
mode="nearest",
|
||||
antialias=False,
|
||||
)
|
||||
logits_fake = torch.nn.functional.interpolate(
|
||||
logits_fake,
|
||||
size=reconstructions.shape[-2:],
|
||||
mode="nearest",
|
||||
antialias=False,
|
||||
)
|
||||
|
||||
# alpha value of logits for overlay
|
||||
alpha_real = torch.abs(logits_real) / high
|
||||
alpha_fake = torch.abs(logits_fake) / high
|
||||
# -> (b, 1, h, w) in range [0, 0.5]
|
||||
# alpha value of lines don't really matter, since the values are the same
|
||||
# for both images and logits anyway
|
||||
grid_alpha_real = torchvision.utils.make_grid(alpha_real, nrow=4)
|
||||
grid_alpha_fake = torchvision.utils.make_grid(alpha_fake, nrow=4)
|
||||
grid_alpha = 0.8 * torch.cat((grid_alpha_real, grid_alpha_fake), dim=1)
|
||||
# -> (1, h, w)
|
||||
# blend logits and images together
|
||||
|
||||
# prepare logits for plotting
|
||||
logits_real = to_colormap(logits_real)
|
||||
logits_fake = to_colormap(logits_fake)
|
||||
# resize logits
|
||||
# -> (b, 3, h, w)
|
||||
|
||||
# make some grids
|
||||
# add all logits to one plot
|
||||
logits_real = torchvision.utils.make_grid(logits_real, nrow=4)
|
||||
logits_fake = torchvision.utils.make_grid(logits_fake, nrow=4)
|
||||
# I just love how torchvision calls the number of columns `nrow`
|
||||
grid_logits = torch.cat((logits_real, logits_fake), dim=1)
|
||||
# -> (3, h, w)
|
||||
|
||||
grid_images_real = torchvision.utils.make_grid(0.5 * inputs + 0.5, nrow=4)
|
||||
grid_images_fake = torchvision.utils.make_grid(
|
||||
0.5 * reconstructions + 0.5, nrow=4
|
||||
)
|
||||
grid_images = torch.cat((grid_images_real, grid_images_fake), dim=1)
|
||||
# -> (3, h, w) in range [0, 1]
|
||||
|
||||
grid_blend = grid_alpha * grid_logits + (1 - grid_alpha) * grid_images
|
||||
|
||||
# Create labeled colorbar
|
||||
dpi = 100
|
||||
height = 128 / dpi
|
||||
width = grid_logits.shape[2] / dpi
|
||||
fig, ax = plt.subplots(figsize=(width, height), dpi=dpi)
|
||||
img = ax.imshow(np.array([[-high, high]]), cmap=cmap)
|
||||
plt.colorbar(
|
||||
img,
|
||||
cax=ax,
|
||||
orientation="horizontal",
|
||||
fraction=0.9,
|
||||
aspect=width / height,
|
||||
pad=0.0,
|
||||
)
|
||||
img.set_visible(False)
|
||||
fig.tight_layout()
|
||||
fig.canvas.draw()
|
||||
# manually convert figure to numpy
|
||||
cbar_np = np.frombuffer(fig.canvas.tostring_rgb(), dtype=np.uint8)
|
||||
cbar_np = cbar_np.reshape(fig.canvas.get_width_height()[::-1] + (3,))
|
||||
cbar = torch.from_numpy(cbar_np.copy()).to(grid_logits.dtype) / 255.0
|
||||
cbar = rearrange(cbar, "h w c -> c h w").to(grid_logits.device)
|
||||
|
||||
# Add colorbar to plot
|
||||
annotated_grid = torch.cat((grid_logits, cbar), dim=1)
|
||||
blended_grid = torch.cat((grid_blend, cbar), dim=1)
|
||||
return {
|
||||
"vis_logits": 2 * annotated_grid[None, ...] - 1,
|
||||
"vis_logits_blended": 2 * blended_grid[None, ...] - 1,
|
||||
}
|
||||
|
||||
def calculate_adaptive_weight(
|
||||
self, nll_loss: torch.Tensor, g_loss: torch.Tensor, last_layer: torch.Tensor
|
||||
) -> torch.Tensor:
|
||||
nll_grads = torch.autograd.grad(nll_loss, last_layer, retain_graph=True)[0]
|
||||
g_grads = torch.autograd.grad(g_loss, last_layer, retain_graph=True)[0]
|
||||
|
||||
d_weight = torch.norm(nll_grads) / (torch.norm(g_grads) + 1e-4)
|
||||
d_weight = torch.clamp(d_weight, 0.0, 1e4).detach()
|
||||
d_weight = d_weight * self.discriminator_weight
|
||||
return d_weight
|
||||
|
||||
def forward(
|
||||
self,
|
||||
inputs: torch.Tensor,
|
||||
reconstructions: torch.Tensor,
|
||||
*, # added because I changed the order here
|
||||
regularization_log: Dict[str, torch.Tensor],
|
||||
optimizer_idx: int,
|
||||
global_step: int,
|
||||
last_layer: torch.Tensor,
|
||||
split: str = "train",
|
||||
weights: Union[None, float, torch.Tensor] = None,
|
||||
) -> Tuple[torch.Tensor, dict]:
|
||||
if self.scale_input_to_tgt_size:
|
||||
inputs = torch.nn.functional.interpolate(
|
||||
inputs, reconstructions.shape[2:], mode="bicubic", antialias=True
|
||||
)
|
||||
|
||||
if self.dims > 2:
|
||||
inputs, reconstructions = (
|
||||
rearrange(x, "b c t h w -> (b t) c h w")
|
||||
for x in (inputs, reconstructions)
|
||||
)
|
||||
|
||||
rec_loss = torch.abs(inputs.contiguous() - reconstructions.contiguous())
|
||||
if self.perceptual_weight > 0:
|
||||
p_loss = self.perceptual_loss(
|
||||
inputs.contiguous(), reconstructions.contiguous()
|
||||
)
|
||||
rec_loss = rec_loss + self.perceptual_weight * p_loss
|
||||
|
||||
nll_loss, weighted_nll_loss = self.get_nll_loss(rec_loss, weights)
|
||||
|
||||
# now the GAN part
|
||||
if optimizer_idx == 0:
|
||||
# generator update
|
||||
if global_step >= self.discriminator_iter_start or not self.training:
|
||||
logits_fake = self.discriminator(reconstructions.contiguous())
|
||||
g_loss = -torch.mean(logits_fake)
|
||||
if self.training:
|
||||
d_weight = self.calculate_adaptive_weight(
|
||||
nll_loss, g_loss, last_layer=last_layer
|
||||
)
|
||||
else:
|
||||
d_weight = torch.tensor(1.0)
|
||||
else:
|
||||
d_weight = torch.tensor(0.0)
|
||||
g_loss = torch.tensor(0.0, requires_grad=True)
|
||||
|
||||
loss = weighted_nll_loss + d_weight * self.disc_factor * g_loss
|
||||
log = {}
|
||||
for k in regularization_log:
|
||||
if k in self.regularization_weights:
|
||||
loss = loss + self.regularization_weights[k] * regularization_log[k]
|
||||
if k in self.additional_log_keys:
|
||||
log[f"{split}/{k}"] = regularization_log[k].detach().float().mean()
|
||||
|
||||
log.update(
|
||||
{
|
||||
f"{split}/loss/total": loss.clone().detach().mean(),
|
||||
f"{split}/loss/nll": nll_loss.detach().mean(),
|
||||
f"{split}/loss/rec": rec_loss.detach().mean(),
|
||||
f"{split}/loss/g": g_loss.detach().mean(),
|
||||
f"{split}/scalars/logvar": self.logvar.detach(),
|
||||
f"{split}/scalars/d_weight": d_weight.detach(),
|
||||
}
|
||||
)
|
||||
|
||||
return loss, log
|
||||
elif optimizer_idx == 1:
|
||||
# second pass for discriminator update
|
||||
logits_real = self.discriminator(inputs.contiguous().detach())
|
||||
logits_fake = self.discriminator(reconstructions.contiguous().detach())
|
||||
|
||||
if global_step >= self.discriminator_iter_start or not self.training:
|
||||
d_loss = self.disc_factor * self.disc_loss(logits_real, logits_fake)
|
||||
else:
|
||||
d_loss = torch.tensor(0.0, requires_grad=True)
|
||||
|
||||
log = {
|
||||
f"{split}/loss/disc": d_loss.clone().detach().mean(),
|
||||
f"{split}/logits/real": logits_real.detach().mean(),
|
||||
f"{split}/logits/fake": logits_fake.detach().mean(),
|
||||
}
|
||||
return d_loss, log
|
||||
else:
|
||||
msg = f"Unknown optimizer_idx {optimizer_idx}"
|
||||
raise NotImplementedError(msg)
|
||||
|
||||
def get_nll_loss(
|
||||
self,
|
||||
rec_loss: torch.Tensor,
|
||||
weights: Optional[Union[float, torch.Tensor]] = None,
|
||||
) -> Tuple[torch.Tensor, torch.Tensor]:
|
||||
nll_loss = rec_loss / torch.exp(self.logvar) + self.logvar
|
||||
weighted_nll_loss = nll_loss
|
||||
if weights is not None:
|
||||
weighted_nll_loss = weights * nll_loss
|
||||
weighted_nll_loss = torch.sum(weighted_nll_loss) / weighted_nll_loss.shape[0]
|
||||
nll_loss = torch.sum(nll_loss) / nll_loss.shape[0]
|
||||
|
||||
return nll_loss, weighted_nll_loss
|
73
imaginairy/modules/sgm/autoencoding/losses/lpips.py
Normal file
73
imaginairy/modules/sgm/autoencoding/losses/lpips.py
Normal file
@ -0,0 +1,73 @@
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
|
||||
from imaginairy.modules.sgm.autoencoding.lpips.loss.lpips import LPIPS
|
||||
from imaginairy.modules.util import default, instantiate_from_config
|
||||
|
||||
|
||||
class LatentLPIPS(nn.Module):
|
||||
def __init__(
|
||||
self,
|
||||
decoder_config,
|
||||
perceptual_weight=1.0,
|
||||
latent_weight=1.0,
|
||||
scale_input_to_tgt_size=False,
|
||||
scale_tgt_to_input_size=False,
|
||||
perceptual_weight_on_inputs=0.0,
|
||||
):
|
||||
super().__init__()
|
||||
self.scale_input_to_tgt_size = scale_input_to_tgt_size
|
||||
self.scale_tgt_to_input_size = scale_tgt_to_input_size
|
||||
self.init_decoder(decoder_config)
|
||||
self.perceptual_loss = LPIPS().eval()
|
||||
self.perceptual_weight = perceptual_weight
|
||||
self.latent_weight = latent_weight
|
||||
self.perceptual_weight_on_inputs = perceptual_weight_on_inputs
|
||||
|
||||
def init_decoder(self, config):
|
||||
self.decoder = instantiate_from_config(config)
|
||||
if hasattr(self.decoder, "encoder"):
|
||||
del self.decoder.encoder
|
||||
|
||||
def forward(self, latent_inputs, latent_predictions, image_inputs, split="train"):
|
||||
log = {}
|
||||
loss = (latent_inputs - latent_predictions) ** 2
|
||||
log[f"{split}/latent_l2_loss"] = loss.mean().detach()
|
||||
image_reconstructions = None
|
||||
if self.perceptual_weight > 0.0:
|
||||
image_reconstructions = self.decoder.decode(latent_predictions)
|
||||
image_targets = self.decoder.decode(latent_inputs)
|
||||
perceptual_loss = self.perceptual_loss(
|
||||
image_targets.contiguous(), image_reconstructions.contiguous()
|
||||
)
|
||||
loss = (
|
||||
self.latent_weight * loss.mean()
|
||||
+ self.perceptual_weight * perceptual_loss.mean()
|
||||
)
|
||||
log[f"{split}/perceptual_loss"] = perceptual_loss.mean().detach()
|
||||
|
||||
if self.perceptual_weight_on_inputs > 0.0:
|
||||
image_reconstructions = default(
|
||||
image_reconstructions, self.decoder.decode(latent_predictions)
|
||||
)
|
||||
if self.scale_input_to_tgt_size:
|
||||
image_inputs = torch.nn.functional.interpolate(
|
||||
image_inputs,
|
||||
image_reconstructions.shape[2:],
|
||||
mode="bicubic",
|
||||
antialias=True,
|
||||
)
|
||||
elif self.scale_tgt_to_input_size:
|
||||
image_reconstructions = torch.nn.functional.interpolate(
|
||||
image_reconstructions,
|
||||
image_inputs.shape[2:],
|
||||
mode="bicubic",
|
||||
antialias=True,
|
||||
)
|
||||
|
||||
perceptual_loss2 = self.perceptual_loss(
|
||||
image_inputs.contiguous(), image_reconstructions.contiguous()
|
||||
)
|
||||
loss = loss + self.perceptual_weight_on_inputs * perceptual_loss2.mean()
|
||||
log[f"{split}/perceptual_loss_on_inputs"] = perceptual_loss2.mean().detach()
|
||||
return loss, log
|
1
imaginairy/modules/sgm/autoencoding/lpips/loss/.gitignore
vendored
Normal file
1
imaginairy/modules/sgm/autoencoding/lpips/loss/.gitignore
vendored
Normal file
@ -0,0 +1 @@
|
||||
vgg.pth
|
23
imaginairy/modules/sgm/autoencoding/lpips/loss/LICENSE
Normal file
23
imaginairy/modules/sgm/autoencoding/lpips/loss/LICENSE
Normal file
@ -0,0 +1,23 @@
|
||||
Copyright (c) 2018, Richard Zhang, Phillip Isola, Alexei A. Efros, Eli Shechtman, Oliver Wang
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
|
||||
* Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
150
imaginairy/modules/sgm/autoencoding/lpips/loss/lpips.py
Normal file
150
imaginairy/modules/sgm/autoencoding/lpips/loss/lpips.py
Normal file
@ -0,0 +1,150 @@
|
||||
"""Stripped version of https://github.com/richzhang/PerceptualSimilarity/tree/master/models"""
|
||||
|
||||
from collections import namedtuple
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
from torchvision import models
|
||||
|
||||
from imaginairy.modules.sgm.autoencoding.lpips.util import get_ckpt_path
|
||||
|
||||
|
||||
class LPIPS(nn.Module):
|
||||
# Learned perceptual metric
|
||||
def __init__(self, use_dropout=True):
|
||||
super().__init__()
|
||||
self.scaling_layer = ScalingLayer()
|
||||
self.chns = [64, 128, 256, 512, 512] # vg16 features
|
||||
self.net = vgg16(pretrained=True, requires_grad=False)
|
||||
self.lin0 = NetLinLayer(self.chns[0], use_dropout=use_dropout)
|
||||
self.lin1 = NetLinLayer(self.chns[1], use_dropout=use_dropout)
|
||||
self.lin2 = NetLinLayer(self.chns[2], use_dropout=use_dropout)
|
||||
self.lin3 = NetLinLayer(self.chns[3], use_dropout=use_dropout)
|
||||
self.lin4 = NetLinLayer(self.chns[4], use_dropout=use_dropout)
|
||||
self.load_from_pretrained()
|
||||
for param in self.parameters():
|
||||
param.requires_grad = False
|
||||
|
||||
def load_from_pretrained(self, name="vgg_lpips"):
|
||||
ckpt = get_ckpt_path(name, "sgm/modules/autoencoding/lpips/loss")
|
||||
self.load_state_dict(
|
||||
torch.load(ckpt, map_location=torch.device("cpu")), strict=False
|
||||
)
|
||||
print(f"loaded pretrained LPIPS loss from {ckpt}")
|
||||
|
||||
@classmethod
|
||||
def from_pretrained(cls, name="vgg_lpips"):
|
||||
if name != "vgg_lpips":
|
||||
raise NotImplementedError
|
||||
model = cls()
|
||||
ckpt = get_ckpt_path(name)
|
||||
model.load_state_dict(
|
||||
torch.load(ckpt, map_location=torch.device("cpu")), strict=False
|
||||
)
|
||||
return model
|
||||
|
||||
def forward(self, input_tensor, target):
|
||||
in0_input, in1_input = (
|
||||
self.scaling_layer(input_tensor),
|
||||
self.scaling_layer(target),
|
||||
)
|
||||
outs0, outs1 = self.net(in0_input), self.net(in1_input)
|
||||
feats0, feats1, diffs = {}, {}, {}
|
||||
lins = [self.lin0, self.lin1, self.lin2, self.lin3, self.lin4]
|
||||
for kk in range(len(self.chns)):
|
||||
feats0[kk], feats1[kk] = normalize_tensor(outs0[kk]), normalize_tensor(
|
||||
outs1[kk]
|
||||
)
|
||||
diffs[kk] = (feats0[kk] - feats1[kk]) ** 2
|
||||
|
||||
res = [
|
||||
spatial_average(lins[kk].model(diffs[kk]), keepdim=True)
|
||||
for kk in range(len(self.chns))
|
||||
]
|
||||
val = res[0]
|
||||
for i in range(1, len(self.chns)):
|
||||
val += res[i]
|
||||
return val
|
||||
|
||||
|
||||
class ScalingLayer(nn.Module):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.register_buffer(
|
||||
"shift", torch.Tensor([-0.030, -0.088, -0.188])[None, :, None, None]
|
||||
)
|
||||
self.register_buffer(
|
||||
"scale", torch.Tensor([0.458, 0.448, 0.450])[None, :, None, None]
|
||||
)
|
||||
|
||||
def forward(self, inp):
|
||||
return (inp - self.shift) / self.scale
|
||||
|
||||
|
||||
class NetLinLayer(nn.Module):
|
||||
"""A single linear layer which does a 1x1 conv"""
|
||||
|
||||
def __init__(self, chn_in, chn_out=1, use_dropout=False):
|
||||
super().__init__()
|
||||
layers = (
|
||||
[
|
||||
nn.Dropout(),
|
||||
]
|
||||
if (use_dropout)
|
||||
else []
|
||||
)
|
||||
layers += [
|
||||
nn.Conv2d(chn_in, chn_out, 1, stride=1, padding=0, bias=False),
|
||||
]
|
||||
self.model = nn.Sequential(*layers)
|
||||
|
||||
|
||||
class vgg16(torch.nn.Module):
|
||||
def __init__(self, requires_grad=False, pretrained=True):
|
||||
super().__init__()
|
||||
vgg_pretrained_features = models.vgg16(pretrained=pretrained).features
|
||||
self.slice1 = torch.nn.Sequential()
|
||||
self.slice2 = torch.nn.Sequential()
|
||||
self.slice3 = torch.nn.Sequential()
|
||||
self.slice4 = torch.nn.Sequential()
|
||||
self.slice5 = torch.nn.Sequential()
|
||||
self.N_slices = 5
|
||||
for x in range(4):
|
||||
self.slice1.add_module(str(x), vgg_pretrained_features[x])
|
||||
for x in range(4, 9):
|
||||
self.slice2.add_module(str(x), vgg_pretrained_features[x])
|
||||
for x in range(9, 16):
|
||||
self.slice3.add_module(str(x), vgg_pretrained_features[x])
|
||||
for x in range(16, 23):
|
||||
self.slice4.add_module(str(x), vgg_pretrained_features[x])
|
||||
for x in range(23, 30):
|
||||
self.slice5.add_module(str(x), vgg_pretrained_features[x])
|
||||
if not requires_grad:
|
||||
for param in self.parameters():
|
||||
param.requires_grad = False
|
||||
|
||||
def forward(self, X):
|
||||
h = self.slice1(X)
|
||||
h_relu1_2 = h
|
||||
h = self.slice2(h)
|
||||
h_relu2_2 = h
|
||||
h = self.slice3(h)
|
||||
h_relu3_3 = h
|
||||
h = self.slice4(h)
|
||||
h_relu4_3 = h
|
||||
h = self.slice5(h)
|
||||
h_relu5_3 = h
|
||||
vgg_outputs = namedtuple(
|
||||
"VggOutputs", ["relu1_2", "relu2_2", "relu3_3", "relu4_3", "relu5_3"]
|
||||
)
|
||||
out = vgg_outputs(h_relu1_2, h_relu2_2, h_relu3_3, h_relu4_3, h_relu5_3)
|
||||
return out
|
||||
|
||||
|
||||
def normalize_tensor(x, eps=1e-10):
|
||||
norm_factor = torch.sqrt(torch.sum(x**2, dim=1, keepdim=True))
|
||||
return x / (norm_factor + eps)
|
||||
|
||||
|
||||
def spatial_average(x, keepdim=True):
|
||||
return x.mean([2, 3], keepdim=keepdim)
|
58
imaginairy/modules/sgm/autoencoding/lpips/model/LICENSE
Normal file
58
imaginairy/modules/sgm/autoencoding/lpips/model/LICENSE
Normal file
@ -0,0 +1,58 @@
|
||||
Copyright (c) 2017, Jun-Yan Zhu and Taesung Park
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
|
||||
* Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
|
||||
--------------------------- LICENSE FOR pix2pix --------------------------------
|
||||
BSD License
|
||||
|
||||
For pix2pix software
|
||||
Copyright (c) 2016, Phillip Isola and Jun-Yan Zhu
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
|
||||
* Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
----------------------------- LICENSE FOR DCGAN --------------------------------
|
||||
BSD License
|
||||
|
||||
For dcgan.torch software
|
||||
|
||||
Copyright (c) 2015, Facebook, Inc. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
|
||||
|
||||
Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
|
||||
|
||||
Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
|
||||
|
||||
Neither the name Facebook nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
85
imaginairy/modules/sgm/autoencoding/lpips/model/model.py
Normal file
85
imaginairy/modules/sgm/autoencoding/lpips/model/model.py
Normal file
@ -0,0 +1,85 @@
|
||||
import functools
|
||||
|
||||
import torch.nn as nn
|
||||
|
||||
from imaginairy.modules.sgm.autoencoding.lpips.util import ActNorm
|
||||
|
||||
|
||||
def weights_init(m):
|
||||
classname = m.__class__.__name__
|
||||
if classname.find("Conv") != -1:
|
||||
nn.init.normal_(m.weight.data, 0.0, 0.02)
|
||||
elif classname.find("BatchNorm") != -1:
|
||||
nn.init.normal_(m.weight.data, 1.0, 0.02)
|
||||
nn.init.constant_(m.bias.data, 0)
|
||||
|
||||
|
||||
class NLayerDiscriminator(nn.Module):
|
||||
"""Defines a PatchGAN discriminator as in Pix2Pix
|
||||
--> see https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/models/networks.py
|
||||
"""
|
||||
|
||||
def __init__(self, input_nc=3, ndf=64, n_layers=3, use_actnorm=False):
|
||||
"""Construct a PatchGAN discriminator
|
||||
Parameters:
|
||||
input_nc (int) -- the number of channels in input images
|
||||
ndf (int) -- the number of filters in the last conv layer
|
||||
n_layers (int) -- the number of conv layers in the discriminator
|
||||
norm_layer -- normalization layer
|
||||
"""
|
||||
super().__init__()
|
||||
norm_layer = nn.BatchNorm2d if not use_actnorm else ActNorm
|
||||
if (
|
||||
type(norm_layer) == functools.partial
|
||||
): # no need to use bias as BatchNorm2d has affine parameters
|
||||
use_bias = norm_layer.func != nn.BatchNorm2d
|
||||
else:
|
||||
use_bias = norm_layer != nn.BatchNorm2d
|
||||
|
||||
kw = 4
|
||||
padw = 1
|
||||
sequence = [
|
||||
nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw),
|
||||
nn.LeakyReLU(0.2, True),
|
||||
]
|
||||
nf_mult = 1
|
||||
nf_mult_prev = 1
|
||||
for n in range(1, n_layers): # gradually increase the number of filters
|
||||
nf_mult_prev = nf_mult
|
||||
nf_mult = min(2**n, 8)
|
||||
sequence += [
|
||||
nn.Conv2d(
|
||||
ndf * nf_mult_prev,
|
||||
ndf * nf_mult,
|
||||
kernel_size=kw,
|
||||
stride=2,
|
||||
padding=padw,
|
||||
bias=use_bias,
|
||||
),
|
||||
norm_layer(ndf * nf_mult),
|
||||
nn.LeakyReLU(0.2, True),
|
||||
]
|
||||
|
||||
nf_mult_prev = nf_mult
|
||||
nf_mult = min(2**n_layers, 8)
|
||||
sequence += [
|
||||
nn.Conv2d(
|
||||
ndf * nf_mult_prev,
|
||||
ndf * nf_mult,
|
||||
kernel_size=kw,
|
||||
stride=1,
|
||||
padding=padw,
|
||||
bias=use_bias,
|
||||
),
|
||||
norm_layer(ndf * nf_mult),
|
||||
nn.LeakyReLU(0.2, True),
|
||||
]
|
||||
|
||||
sequence += [
|
||||
nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)
|
||||
] # output 1 channel prediction map
|
||||
self.main = nn.Sequential(*sequence)
|
||||
|
||||
def forward(self, input_tensor):
|
||||
"""Standard forward."""
|
||||
return self.main(input_tensor)
|
129
imaginairy/modules/sgm/autoencoding/lpips/util.py
Normal file
129
imaginairy/modules/sgm/autoencoding/lpips/util.py
Normal file
@ -0,0 +1,129 @@
|
||||
import hashlib
|
||||
import os
|
||||
|
||||
import requests
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
from tqdm import tqdm
|
||||
|
||||
URL_MAP = {"vgg_lpips": "https://heibox.uni-heidelberg.de/f/607503859c864bc1b30b/?dl=1"}
|
||||
|
||||
CKPT_MAP = {"vgg_lpips": "vgg.pth"}
|
||||
|
||||
MD5_MAP = {"vgg_lpips": "d507d7349b931f0638a25a48a722f98a"}
|
||||
|
||||
|
||||
def download(url, local_path, chunk_size=1024):
|
||||
os.makedirs(os.path.split(local_path)[0], exist_ok=True)
|
||||
with requests.get(url, stream=True) as r, tqdm(
|
||||
total=int(r.headers.get("content-length", 0)), unit="B", unit_scale=True
|
||||
) as pbar, open(local_path, "wb") as f:
|
||||
for data in r.iter_content(chunk_size=chunk_size):
|
||||
if data:
|
||||
f.write(data)
|
||||
pbar.update(chunk_size)
|
||||
|
||||
|
||||
def md5_hash(path):
|
||||
with open(path, "rb") as f:
|
||||
content = f.read()
|
||||
return hashlib.md5(content).hexdigest()
|
||||
|
||||
|
||||
def get_ckpt_path(name, root, check=False):
|
||||
assert name in URL_MAP
|
||||
path = os.path.join(root, CKPT_MAP[name])
|
||||
if not os.path.exists(path) or (check and md5_hash(path) != MD5_MAP[name]):
|
||||
print(f"Downloading {name} model from {URL_MAP[name]} to {path}")
|
||||
download(URL_MAP[name], path)
|
||||
md5 = md5_hash(path)
|
||||
assert md5 == MD5_MAP[name], md5
|
||||
return path
|
||||
|
||||
|
||||
class ActNorm(nn.Module):
|
||||
def __init__(
|
||||
self, num_features, logdet=False, affine=True, allow_reverse_init=False
|
||||
):
|
||||
assert affine
|
||||
super().__init__()
|
||||
self.logdet = logdet
|
||||
self.loc = nn.Parameter(torch.zeros(1, num_features, 1, 1))
|
||||
self.scale = nn.Parameter(torch.ones(1, num_features, 1, 1))
|
||||
self.allow_reverse_init = allow_reverse_init
|
||||
|
||||
self.register_buffer("initialized", torch.tensor(0, dtype=torch.uint8))
|
||||
|
||||
def initialize(self, input_tensor):
|
||||
with torch.no_grad():
|
||||
flatten = (
|
||||
input_tensor.permute(1, 0, 2, 3)
|
||||
.contiguous()
|
||||
.view(input_tensor.shape[1], -1)
|
||||
)
|
||||
mean = (
|
||||
flatten.mean(1)
|
||||
.unsqueeze(1)
|
||||
.unsqueeze(2)
|
||||
.unsqueeze(3)
|
||||
.permute(1, 0, 2, 3)
|
||||
)
|
||||
std = (
|
||||
flatten.std(1)
|
||||
.unsqueeze(1)
|
||||
.unsqueeze(2)
|
||||
.unsqueeze(3)
|
||||
.permute(1, 0, 2, 3)
|
||||
)
|
||||
|
||||
self.loc.data.copy_(-mean)
|
||||
self.scale.data.copy_(1 / (std + 1e-6))
|
||||
|
||||
def forward(self, input_tensor, reverse=False):
|
||||
if reverse:
|
||||
return self.reverse(input_tensor)
|
||||
if len(input_tensor.shape) == 2:
|
||||
input_tensor = input_tensor[:, :, None, None]
|
||||
squeeze = True
|
||||
else:
|
||||
squeeze = False
|
||||
|
||||
_, _, height, width = input_tensor.shape
|
||||
|
||||
if self.training and self.initialized.item() == 0:
|
||||
self.initialize(input_tensor)
|
||||
self.initialized.fill_(1)
|
||||
|
||||
h = self.scale * (input_tensor + self.loc)
|
||||
|
||||
if squeeze:
|
||||
h = h.squeeze(-1).squeeze(-1)
|
||||
|
||||
if self.logdet:
|
||||
log_abs = torch.log(torch.abs(self.scale))
|
||||
logdet = height * width * torch.sum(log_abs)
|
||||
logdet = logdet * torch.ones(input_tensor.shape[0]).to(input_tensor)
|
||||
return h, logdet
|
||||
|
||||
return h
|
||||
|
||||
def reverse(self, output):
|
||||
if self.training and self.initialized.item() == 0:
|
||||
if not self.allow_reverse_init:
|
||||
msg = "Initializing ActNorm in reverse direction is disabled by default. Use allow_reverse_init=True to enable."
|
||||
raise RuntimeError(msg)
|
||||
else:
|
||||
self.initialize(output)
|
||||
self.initialized.fill_(1)
|
||||
|
||||
if len(output.shape) == 2:
|
||||
output = output[:, :, None, None]
|
||||
squeeze = True
|
||||
else:
|
||||
squeeze = False
|
||||
|
||||
h = output / self.scale - self.loc
|
||||
|
||||
if squeeze:
|
||||
h = h.squeeze(-1).squeeze(-1)
|
||||
return h
|
17
imaginairy/modules/sgm/autoencoding/lpips/vqperceptual.py
Normal file
17
imaginairy/modules/sgm/autoencoding/lpips/vqperceptual.py
Normal file
@ -0,0 +1,17 @@
|
||||
import torch
|
||||
import torch.nn.functional as F
|
||||
|
||||
|
||||
def hinge_d_loss(logits_real, logits_fake):
|
||||
loss_real = torch.mean(F.relu(1.0 - logits_real))
|
||||
loss_fake = torch.mean(F.relu(1.0 + logits_fake))
|
||||
d_loss = 0.5 * (loss_real + loss_fake)
|
||||
return d_loss
|
||||
|
||||
|
||||
def vanilla_d_loss(logits_real, logits_fake):
|
||||
d_loss = 0.5 * (
|
||||
torch.mean(torch.nn.functional.softplus(-logits_real))
|
||||
+ torch.mean(torch.nn.functional.softplus(logits_fake))
|
||||
)
|
||||
return d_loss
|
27
imaginairy/modules/sgm/autoencoding/regularizers/__init__.py
Normal file
27
imaginairy/modules/sgm/autoencoding/regularizers/__init__.py
Normal file
@ -0,0 +1,27 @@
|
||||
from typing import Any, Tuple
|
||||
|
||||
import torch
|
||||
|
||||
from imaginairy.modules.sgm.distributions.distributions import (
|
||||
DiagonalGaussianDistribution,
|
||||
)
|
||||
|
||||
from .base import AbstractRegularizer
|
||||
|
||||
|
||||
class DiagonalGaussianRegularizer(AbstractRegularizer):
|
||||
def __init__(self, sample: bool = True):
|
||||
super().__init__()
|
||||
self.sample = sample
|
||||
|
||||
def get_trainable_parameters(self) -> Any:
|
||||
yield from ()
|
||||
|
||||
def forward(self, z: torch.Tensor) -> Tuple[torch.Tensor, dict]:
|
||||
log = {}
|
||||
posterior = DiagonalGaussianDistribution(z)
|
||||
z = posterior.sample() if self.sample else posterior.mode()
|
||||
kl_loss = posterior.kl()
|
||||
kl_loss = torch.sum(kl_loss) / kl_loss.shape[0]
|
||||
log["kl_loss"] = kl_loss
|
||||
return z, log
|
40
imaginairy/modules/sgm/autoencoding/regularizers/base.py
Normal file
40
imaginairy/modules/sgm/autoencoding/regularizers/base.py
Normal file
@ -0,0 +1,40 @@
|
||||
from abc import abstractmethod
|
||||
from typing import Any, Tuple
|
||||
|
||||
import torch
|
||||
import torch.nn.functional as F
|
||||
from torch import nn
|
||||
|
||||
|
||||
class AbstractRegularizer(nn.Module):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
|
||||
def forward(self, z: torch.Tensor) -> Tuple[torch.Tensor, dict]:
|
||||
raise NotImplementedError()
|
||||
|
||||
@abstractmethod
|
||||
def get_trainable_parameters(self) -> Any:
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class IdentityRegularizer(AbstractRegularizer):
|
||||
def forward(self, z: torch.Tensor) -> Tuple[torch.Tensor, dict]:
|
||||
return z, {}
|
||||
|
||||
def get_trainable_parameters(self) -> Any:
|
||||
yield from ()
|
||||
|
||||
|
||||
def measure_perplexity(
|
||||
predicted_indices: torch.Tensor, num_centroids: int
|
||||
) -> Tuple[torch.Tensor, torch.Tensor]:
|
||||
# src: https://github.com/karpathy/deep-vector-quantization/blob/main/model.py
|
||||
# eval cluster perplexity. when perplexity == num_embeddings then all clusters are used exactly equally
|
||||
encodings = (
|
||||
F.one_hot(predicted_indices, num_centroids).float().reshape(-1, num_centroids)
|
||||
)
|
||||
avg_probs = encodings.mean(0)
|
||||
perplexity = (-(avg_probs * torch.log(avg_probs + 1e-10)).sum()).exp()
|
||||
cluster_use = torch.sum(avg_probs > 0)
|
||||
return perplexity, cluster_use
|
488
imaginairy/modules/sgm/autoencoding/regularizers/quantize.py
Normal file
488
imaginairy/modules/sgm/autoencoding/regularizers/quantize.py
Normal file
@ -0,0 +1,488 @@
|
||||
import logging
|
||||
from abc import abstractmethod
|
||||
from typing import Dict, Iterator, Literal, Optional, Tuple, Union
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
from einops import rearrange
|
||||
from torch import einsum
|
||||
|
||||
from .base import AbstractRegularizer, measure_perplexity
|
||||
|
||||
logpy = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AbstractQuantizer(AbstractRegularizer):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
# Define these in your init
|
||||
# shape (N,)
|
||||
self.used: Optional[torch.Tensor]
|
||||
self.re_embed: int
|
||||
self.unknown_index: Union[Literal["random"], int]
|
||||
|
||||
def remap_to_used(self, inds: torch.Tensor) -> torch.Tensor:
|
||||
assert self.used is not None, "You need to define used indices for remap"
|
||||
ishape = inds.shape
|
||||
assert len(ishape) > 1
|
||||
inds = inds.reshape(ishape[0], -1)
|
||||
used = self.used.to(inds)
|
||||
match = (inds[:, :, None] == used[None, None, ...]).long()
|
||||
new = match.argmax(-1)
|
||||
unknown = match.sum(2) < 1
|
||||
if self.unknown_index == "random":
|
||||
new[unknown] = torch.randint(0, self.re_embed, size=new[unknown].shape).to(
|
||||
device=new.device
|
||||
)
|
||||
else:
|
||||
new[unknown] = self.unknown_index
|
||||
return new.reshape(ishape)
|
||||
|
||||
def unmap_to_all(self, inds: torch.Tensor) -> torch.Tensor:
|
||||
assert self.used is not None, "You need to define used indices for remap"
|
||||
ishape = inds.shape
|
||||
assert len(ishape) > 1
|
||||
inds = inds.reshape(ishape[0], -1)
|
||||
used = self.used.to(inds)
|
||||
if self.re_embed > self.used.shape[0]: # extra token
|
||||
inds[inds >= self.used.shape[0]] = 0 # simply set to zero
|
||||
back = torch.gather(used[None, :][inds.shape[0] * [0], :], 1, inds)
|
||||
return back.reshape(ishape)
|
||||
|
||||
@abstractmethod
|
||||
def get_codebook_entry(
|
||||
self, indices: torch.Tensor, shape: Optional[Tuple[int, ...]] = None
|
||||
) -> torch.Tensor:
|
||||
raise NotImplementedError()
|
||||
|
||||
def get_trainable_parameters(self) -> Iterator[torch.nn.Parameter]:
|
||||
yield from self.parameters()
|
||||
|
||||
|
||||
class GumbelQuantizer(AbstractQuantizer):
|
||||
"""
|
||||
credit to @karpathy:
|
||||
https://github.com/karpathy/deep-vector-quantization/blob/main/model.py (thanks!)
|
||||
Gumbel Softmax trick quantizer
|
||||
Categorical Reparameterization with Gumbel-Softmax, Jang et al. 2016
|
||||
https://arxiv.org/abs/1611.01144
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
num_hiddens: int,
|
||||
embedding_dim: int,
|
||||
n_embed: int,
|
||||
straight_through: bool = True,
|
||||
kl_weight: float = 5e-4,
|
||||
temp_init: float = 1.0,
|
||||
remap: Optional[str] = None,
|
||||
unknown_index: str = "random",
|
||||
loss_key: str = "loss/vq",
|
||||
) -> None:
|
||||
super().__init__()
|
||||
|
||||
self.loss_key = loss_key
|
||||
self.embedding_dim = embedding_dim
|
||||
self.n_embed = n_embed
|
||||
|
||||
self.straight_through = straight_through
|
||||
self.temperature = temp_init
|
||||
self.kl_weight = kl_weight
|
||||
|
||||
self.proj = nn.Conv2d(num_hiddens, n_embed, 1)
|
||||
self.embed = nn.Embedding(n_embed, embedding_dim)
|
||||
|
||||
self.remap = remap
|
||||
if self.remap is not None:
|
||||
self.register_buffer("used", torch.tensor(np.load(self.remap)))
|
||||
self.re_embed = self.used.shape[0]
|
||||
else:
|
||||
self.used = None
|
||||
self.re_embed = n_embed
|
||||
if unknown_index == "extra":
|
||||
self.unknown_index = self.re_embed
|
||||
self.re_embed = self.re_embed + 1
|
||||
else:
|
||||
assert unknown_index == "random" or isinstance(
|
||||
unknown_index, int
|
||||
), "unknown index needs to be 'random', 'extra' or any integer"
|
||||
self.unknown_index = unknown_index # "random" or "extra" or integer
|
||||
if self.remap is not None:
|
||||
logpy.info(
|
||||
f"Remapping {self.n_embed} indices to {self.re_embed} indices. "
|
||||
f"Using {self.unknown_index} for unknown indices."
|
||||
)
|
||||
|
||||
def forward(
|
||||
self, z: torch.Tensor, temp: Optional[float] = None, return_logits: bool = False
|
||||
) -> Tuple[torch.Tensor, Dict]:
|
||||
# force hard = True when we are in eval mode, as we must quantize.
|
||||
# actually, always true seems to work
|
||||
hard = self.straight_through if self.training else True
|
||||
temp = self.temperature if temp is None else temp
|
||||
out_dict = {}
|
||||
logits = self.proj(z)
|
||||
if self.remap is not None:
|
||||
# continue only with used logits
|
||||
full_zeros = torch.zeros_like(logits)
|
||||
logits = logits[:, self.used, ...]
|
||||
|
||||
soft_one_hot = F.gumbel_softmax(logits, tau=temp, dim=1, hard=hard)
|
||||
if self.remap is not None:
|
||||
# go back to all entries but unused set to zero
|
||||
full_zeros[:, self.used, ...] = soft_one_hot
|
||||
soft_one_hot = full_zeros
|
||||
z_q = einsum("b n h w, n d -> b d h w", soft_one_hot, self.embed.weight)
|
||||
|
||||
# + kl divergence to the prior loss
|
||||
qy = F.softmax(logits, dim=1)
|
||||
diff = (
|
||||
self.kl_weight
|
||||
* torch.sum(qy * torch.log(qy * self.n_embed + 1e-10), dim=1).mean()
|
||||
)
|
||||
out_dict[self.loss_key] = diff
|
||||
|
||||
ind = soft_one_hot.argmax(dim=1)
|
||||
out_dict["indices"] = ind
|
||||
if self.remap is not None:
|
||||
ind = self.remap_to_used(ind)
|
||||
|
||||
if return_logits:
|
||||
out_dict["logits"] = logits
|
||||
|
||||
return z_q, out_dict
|
||||
|
||||
def get_codebook_entry(self, indices, shape):
|
||||
# TODO: shape not yet optional
|
||||
b, h, w, c = shape
|
||||
assert b * h * w == indices.shape[0]
|
||||
indices = rearrange(indices, "(b h w) -> b h w", b=b, h=h, w=w)
|
||||
if self.remap is not None:
|
||||
indices = self.unmap_to_all(indices)
|
||||
one_hot = (
|
||||
F.one_hot(indices, num_classes=self.n_embed).permute(0, 3, 1, 2).float()
|
||||
)
|
||||
z_q = einsum("b n h w, n d -> b d h w", one_hot, self.embed.weight)
|
||||
return z_q
|
||||
|
||||
|
||||
class VectorQuantizer(AbstractQuantizer):
|
||||
"""
|
||||
____________________________________________
|
||||
Discretization bottleneck part of the VQ-VAE.
|
||||
Inputs:
|
||||
- n_e : number of embeddings
|
||||
- e_dim : dimension of embedding
|
||||
- beta : commitment cost used in loss term,
|
||||
beta * ||z_e(x)-sg[e]||^2
|
||||
_____________________________________________
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
n_e: int,
|
||||
e_dim: int,
|
||||
beta: float = 0.25,
|
||||
remap: Optional[str] = None,
|
||||
unknown_index: str = "random",
|
||||
sane_index_shape: bool = False,
|
||||
log_perplexity: bool = False,
|
||||
embedding_weight_norm: bool = False,
|
||||
loss_key: str = "loss/vq",
|
||||
):
|
||||
super().__init__()
|
||||
self.n_e = n_e
|
||||
self.e_dim = e_dim
|
||||
self.beta = beta
|
||||
self.loss_key = loss_key
|
||||
|
||||
if not embedding_weight_norm:
|
||||
self.embedding = nn.Embedding(self.n_e, self.e_dim)
|
||||
self.embedding.weight.data.uniform_(-1.0 / self.n_e, 1.0 / self.n_e)
|
||||
else:
|
||||
self.embedding = torch.nn.utils.weight_norm(
|
||||
nn.Embedding(self.n_e, self.e_dim), dim=1
|
||||
)
|
||||
|
||||
self.remap = remap
|
||||
if self.remap is not None:
|
||||
self.register_buffer("used", torch.tensor(np.load(self.remap)))
|
||||
self.re_embed = self.used.shape[0]
|
||||
else:
|
||||
self.used = None
|
||||
self.re_embed = n_e
|
||||
if unknown_index == "extra":
|
||||
self.unknown_index = self.re_embed
|
||||
self.re_embed = self.re_embed + 1
|
||||
else:
|
||||
assert unknown_index == "random" or isinstance(
|
||||
unknown_index, int
|
||||
), "unknown index needs to be 'random', 'extra' or any integer"
|
||||
self.unknown_index = unknown_index # "random" or "extra" or integer
|
||||
if self.remap is not None:
|
||||
logpy.info(
|
||||
f"Remapping {self.n_e} indices to {self.re_embed} indices. "
|
||||
f"Using {self.unknown_index} for unknown indices."
|
||||
)
|
||||
|
||||
self.sane_index_shape = sane_index_shape
|
||||
self.log_perplexity = log_perplexity
|
||||
|
||||
def forward(
|
||||
self,
|
||||
z: torch.Tensor,
|
||||
) -> Tuple[torch.Tensor, Dict]:
|
||||
do_reshape = z.ndim == 4
|
||||
if do_reshape:
|
||||
# # reshape z -> (batch, height, width, channel) and flatten
|
||||
z = rearrange(z, "b c h w -> b h w c").contiguous()
|
||||
|
||||
else:
|
||||
assert z.ndim < 4, "No reshaping strategy for inputs > 4 dimensions defined"
|
||||
z = z.contiguous()
|
||||
|
||||
z_flattened = z.view(-1, self.e_dim)
|
||||
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
|
||||
|
||||
d = (
|
||||
torch.sum(z_flattened**2, dim=1, keepdim=True)
|
||||
+ torch.sum(self.embedding.weight**2, dim=1)
|
||||
- 2
|
||||
* torch.einsum(
|
||||
"bd,dn->bn", z_flattened, rearrange(self.embedding.weight, "n d -> d n")
|
||||
)
|
||||
)
|
||||
|
||||
min_encoding_indices = torch.argmin(d, dim=1)
|
||||
z_q = self.embedding(min_encoding_indices).view(z.shape)
|
||||
loss_dict = {}
|
||||
if self.log_perplexity:
|
||||
perplexity, cluster_usage = measure_perplexity(
|
||||
min_encoding_indices.detach(), self.n_e
|
||||
)
|
||||
loss_dict.update({"perplexity": perplexity, "cluster_usage": cluster_usage})
|
||||
|
||||
# compute loss for embedding
|
||||
loss = self.beta * torch.mean((z_q.detach() - z) ** 2) + torch.mean(
|
||||
(z_q - z.detach()) ** 2
|
||||
)
|
||||
loss_dict[self.loss_key] = loss
|
||||
|
||||
# preserve gradients
|
||||
z_q = z + (z_q - z).detach()
|
||||
|
||||
# reshape back to match original input shape
|
||||
if do_reshape:
|
||||
z_q = rearrange(z_q, "b h w c -> b c h w").contiguous()
|
||||
|
||||
if self.remap is not None:
|
||||
min_encoding_indices = min_encoding_indices.reshape(
|
||||
z.shape[0], -1
|
||||
) # add batch axis
|
||||
min_encoding_indices = self.remap_to_used(min_encoding_indices)
|
||||
min_encoding_indices = min_encoding_indices.reshape(-1, 1) # flatten
|
||||
|
||||
if self.sane_index_shape:
|
||||
if do_reshape:
|
||||
min_encoding_indices = min_encoding_indices.reshape(
|
||||
z_q.shape[0], z_q.shape[2], z_q.shape[3]
|
||||
)
|
||||
else:
|
||||
min_encoding_indices = rearrange(
|
||||
min_encoding_indices, "(b s) 1 -> b s", b=z_q.shape[0]
|
||||
)
|
||||
|
||||
loss_dict["min_encoding_indices"] = min_encoding_indices
|
||||
|
||||
return z_q, loss_dict
|
||||
|
||||
def get_codebook_entry(
|
||||
self, indices: torch.Tensor, shape: Optional[Tuple[int, ...]] = None
|
||||
) -> torch.Tensor:
|
||||
# shape specifying (batch, height, width, channel)
|
||||
if self.remap is not None:
|
||||
assert shape is not None, "Need to give shape for remap"
|
||||
indices = indices.reshape(shape[0], -1) # add batch axis
|
||||
indices = self.unmap_to_all(indices)
|
||||
indices = indices.reshape(-1) # flatten again
|
||||
|
||||
# get quantized latent vectors
|
||||
z_q = self.embedding(indices)
|
||||
|
||||
if shape is not None:
|
||||
z_q = z_q.view(shape)
|
||||
# reshape back to match original input shape
|
||||
z_q = z_q.permute(0, 3, 1, 2).contiguous()
|
||||
|
||||
return z_q
|
||||
|
||||
|
||||
class EmbeddingEMA(nn.Module):
|
||||
def __init__(self, num_tokens, codebook_dim, decay=0.99, eps=1e-5):
|
||||
super().__init__()
|
||||
self.decay = decay
|
||||
self.eps = eps
|
||||
weight = torch.randn(num_tokens, codebook_dim)
|
||||
self.weight = nn.Parameter(weight, requires_grad=False)
|
||||
self.cluster_size = nn.Parameter(torch.zeros(num_tokens), requires_grad=False)
|
||||
self.embed_avg = nn.Parameter(weight.clone(), requires_grad=False)
|
||||
self.update = True
|
||||
|
||||
def forward(self, embed_id):
|
||||
return F.embedding(embed_id, self.weight)
|
||||
|
||||
def cluster_size_ema_update(self, new_cluster_size):
|
||||
self.cluster_size.data.mul_(self.decay).add_(
|
||||
new_cluster_size, alpha=1 - self.decay
|
||||
)
|
||||
|
||||
def embed_avg_ema_update(self, new_embed_avg):
|
||||
self.embed_avg.data.mul_(self.decay).add_(new_embed_avg, alpha=1 - self.decay)
|
||||
|
||||
def weight_update(self, num_tokens):
|
||||
n = self.cluster_size.sum()
|
||||
smoothed_cluster_size = (
|
||||
(self.cluster_size + self.eps) / (n + num_tokens * self.eps) * n
|
||||
)
|
||||
# normalize embedding average with smoothed cluster size
|
||||
embed_normalized = self.embed_avg / smoothed_cluster_size.unsqueeze(1)
|
||||
self.weight.data.copy_(embed_normalized)
|
||||
|
||||
|
||||
class EMAVectorQuantizer(AbstractQuantizer):
|
||||
def __init__(
|
||||
self,
|
||||
n_embed: int,
|
||||
embedding_dim: int,
|
||||
beta: float,
|
||||
decay: float = 0.99,
|
||||
eps: float = 1e-5,
|
||||
remap: Optional[str] = None,
|
||||
unknown_index: str = "random",
|
||||
loss_key: str = "loss/vq",
|
||||
):
|
||||
super().__init__()
|
||||
self.codebook_dim = embedding_dim
|
||||
self.num_tokens = n_embed
|
||||
self.beta = beta
|
||||
self.loss_key = loss_key
|
||||
|
||||
self.embedding = EmbeddingEMA(self.num_tokens, self.codebook_dim, decay, eps)
|
||||
|
||||
self.remap = remap
|
||||
if self.remap is not None:
|
||||
self.register_buffer("used", torch.tensor(np.load(self.remap)))
|
||||
self.re_embed = self.used.shape[0]
|
||||
else:
|
||||
self.used = None
|
||||
self.re_embed = n_embed
|
||||
if unknown_index == "extra":
|
||||
self.unknown_index = self.re_embed
|
||||
self.re_embed = self.re_embed + 1
|
||||
else:
|
||||
assert unknown_index == "random" or isinstance(
|
||||
unknown_index, int
|
||||
), "unknown index needs to be 'random', 'extra' or any integer"
|
||||
self.unknown_index = unknown_index # "random" or "extra" or integer
|
||||
if self.remap is not None:
|
||||
logpy.info(
|
||||
f"Remapping {self.n_embed} indices to {self.re_embed} indices. "
|
||||
f"Using {self.unknown_index} for unknown indices."
|
||||
)
|
||||
|
||||
def forward(self, z: torch.Tensor) -> Tuple[torch.Tensor, Dict]:
|
||||
# reshape z -> (batch, height, width, channel) and flatten
|
||||
# z, 'b c h w -> b h w c'
|
||||
z = rearrange(z, "b c h w -> b h w c")
|
||||
z_flattened = z.reshape(-1, self.codebook_dim)
|
||||
|
||||
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
|
||||
d = (
|
||||
z_flattened.pow(2).sum(dim=1, keepdim=True)
|
||||
+ self.embedding.weight.pow(2).sum(dim=1)
|
||||
- 2 * torch.einsum("bd,nd->bn", z_flattened, self.embedding.weight)
|
||||
) # 'n d -> d n'
|
||||
|
||||
encoding_indices = torch.argmin(d, dim=1)
|
||||
|
||||
z_q = self.embedding(encoding_indices).view(z.shape)
|
||||
encodings = F.one_hot(encoding_indices, self.num_tokens).type(z.dtype)
|
||||
avg_probs = torch.mean(encodings, dim=0)
|
||||
perplexity = torch.exp(-torch.sum(avg_probs * torch.log(avg_probs + 1e-10)))
|
||||
|
||||
if self.training and self.embedding.update:
|
||||
# EMA cluster size
|
||||
encodings_sum = encodings.sum(0)
|
||||
self.embedding.cluster_size_ema_update(encodings_sum)
|
||||
# EMA embedding average
|
||||
embed_sum = encodings.transpose(0, 1) @ z_flattened
|
||||
self.embedding.embed_avg_ema_update(embed_sum)
|
||||
# normalize embed_avg and update weight
|
||||
self.embedding.weight_update(self.num_tokens)
|
||||
|
||||
# compute loss for embedding
|
||||
loss = self.beta * F.mse_loss(z_q.detach(), z)
|
||||
|
||||
# preserve gradients
|
||||
z_q = z + (z_q - z).detach()
|
||||
|
||||
# reshape back to match original input shape
|
||||
# z_q, 'b h w c -> b c h w'
|
||||
z_q = rearrange(z_q, "b h w c -> b c h w")
|
||||
|
||||
out_dict = {
|
||||
self.loss_key: loss,
|
||||
"encodings": encodings,
|
||||
"encoding_indices": encoding_indices,
|
||||
"perplexity": perplexity,
|
||||
}
|
||||
|
||||
return z_q, out_dict
|
||||
|
||||
|
||||
class VectorQuantizerWithInputProjection(VectorQuantizer):
|
||||
def __init__(
|
||||
self,
|
||||
input_dim: int,
|
||||
n_codes: int,
|
||||
codebook_dim: int,
|
||||
beta: float = 1.0,
|
||||
output_dim: Optional[int] = None,
|
||||
**kwargs,
|
||||
):
|
||||
super().__init__(n_codes, codebook_dim, beta, **kwargs)
|
||||
self.proj_in = nn.Linear(input_dim, codebook_dim)
|
||||
self.output_dim = output_dim
|
||||
if output_dim is not None:
|
||||
self.proj_out = nn.Linear(codebook_dim, output_dim)
|
||||
else:
|
||||
self.proj_out = nn.Identity()
|
||||
|
||||
def forward(self, z: torch.Tensor) -> Tuple[torch.Tensor, Dict]:
|
||||
rearr = False
|
||||
in_shape = z.shape
|
||||
|
||||
if z.ndim > 3:
|
||||
rearr = self.output_dim is not None
|
||||
z = rearrange(z, "b c ... -> b (...) c")
|
||||
z = self.proj_in(z)
|
||||
z_q, loss_dict = super().forward(z)
|
||||
|
||||
z_q = self.proj_out(z_q)
|
||||
if rearr:
|
||||
if len(in_shape) == 4:
|
||||
z_q = rearrange(z_q, "b (h w) c -> b c h w ", w=in_shape[-1])
|
||||
elif len(in_shape) == 5:
|
||||
z_q = rearrange(
|
||||
z_q, "b (t h w) c -> b c t h w ", w=in_shape[-1], h=in_shape[-2]
|
||||
)
|
||||
else:
|
||||
msg = (
|
||||
f"rearranging not available for {len(in_shape)}-dimensional input."
|
||||
)
|
||||
raise NotImplementedError(msg)
|
||||
|
||||
return z_q, loss_dict
|
356
imaginairy/modules/sgm/autoencoding/temporal_ae.py
Normal file
356
imaginairy/modules/sgm/autoencoding/temporal_ae.py
Normal file
@ -0,0 +1,356 @@
|
||||
import logging
|
||||
from typing import Callable, Iterable, Union
|
||||
|
||||
import torch
|
||||
from einops import rearrange, repeat
|
||||
from torch.optim._multi_tensor import partialclass
|
||||
|
||||
from imaginairy.modules.sgm.diffusionmodules.model import (
|
||||
XFORMERS_IS_AVAILABLE,
|
||||
AttnBlock,
|
||||
Decoder,
|
||||
MemoryEfficientAttnBlock,
|
||||
ResnetBlock,
|
||||
)
|
||||
from imaginairy.modules.sgm.diffusionmodules.openaimodel import ResBlock
|
||||
from imaginairy.modules.sgm.diffusionmodules.util import timestep_embedding
|
||||
from imaginairy.modules.sgm.video_attention import VideoTransformerBlock
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class VideoResBlock(ResnetBlock):
|
||||
def __init__(
|
||||
self,
|
||||
out_channels,
|
||||
*args,
|
||||
dropout=0.0,
|
||||
video_kernel_size=3,
|
||||
alpha=0.0,
|
||||
merge_strategy="learned",
|
||||
**kwargs,
|
||||
):
|
||||
super().__init__(out_channels=out_channels, dropout=dropout, *args, **kwargs)
|
||||
if video_kernel_size is None:
|
||||
video_kernel_size = [3, 1, 1]
|
||||
self.time_stack = ResBlock(
|
||||
channels=out_channels,
|
||||
emb_channels=0,
|
||||
dropout=dropout,
|
||||
dims=3,
|
||||
use_scale_shift_norm=False,
|
||||
use_conv=False,
|
||||
up=False,
|
||||
down=False,
|
||||
kernel_size=video_kernel_size,
|
||||
use_checkpoint=False,
|
||||
skip_t_emb=True,
|
||||
)
|
||||
|
||||
self.merge_strategy = merge_strategy
|
||||
if self.merge_strategy == "fixed":
|
||||
self.register_buffer("mix_factor", torch.Tensor([alpha]))
|
||||
elif self.merge_strategy == "learned":
|
||||
self.register_parameter(
|
||||
"mix_factor", torch.nn.Parameter(torch.Tensor([alpha]))
|
||||
)
|
||||
else:
|
||||
msg = f"unknown merge strategy {self.merge_strategy}"
|
||||
raise ValueError(msg)
|
||||
|
||||
def get_alpha(self, bs):
|
||||
if self.merge_strategy == "fixed":
|
||||
return self.mix_factor
|
||||
elif self.merge_strategy == "learned":
|
||||
return torch.sigmoid(self.mix_factor)
|
||||
else:
|
||||
raise NotImplementedError()
|
||||
|
||||
def forward(self, x, temb, skip_video=False, timesteps=None):
|
||||
if timesteps is None:
|
||||
timesteps = self.timesteps
|
||||
|
||||
b, c, h, w = x.shape
|
||||
|
||||
x = super().forward(x, temb)
|
||||
|
||||
if not skip_video:
|
||||
x_mix = rearrange(x, "(b t) c h w -> b c t h w", t=timesteps)
|
||||
|
||||
x = rearrange(x, "(b t) c h w -> b c t h w", t=timesteps)
|
||||
|
||||
x = self.time_stack(x, temb)
|
||||
|
||||
alpha = self.get_alpha(bs=b // timesteps)
|
||||
x = alpha * x + (1.0 - alpha) * x_mix
|
||||
|
||||
x = rearrange(x, "b c t h w -> (b t) c h w")
|
||||
return x
|
||||
|
||||
|
||||
class AE3DConv(torch.nn.Conv2d):
|
||||
def __init__(self, in_channels, out_channels, video_kernel_size=3, *args, **kwargs):
|
||||
super().__init__(in_channels, out_channels, *args, **kwargs)
|
||||
if isinstance(video_kernel_size, Iterable):
|
||||
padding = [int(k // 2) for k in video_kernel_size]
|
||||
else:
|
||||
padding = int(video_kernel_size // 2)
|
||||
|
||||
self.time_mix_conv = torch.nn.Conv3d(
|
||||
in_channels=out_channels,
|
||||
out_channels=out_channels,
|
||||
kernel_size=video_kernel_size,
|
||||
padding=padding,
|
||||
)
|
||||
|
||||
def forward(self, input_tensor, timesteps, skip_video=False):
|
||||
x = super().forward(input_tensor)
|
||||
if skip_video:
|
||||
return x
|
||||
x = rearrange(x, "(b t) c h w -> b c t h w", t=timesteps)
|
||||
x = self.time_mix_conv(x)
|
||||
return rearrange(x, "b c t h w -> (b t) c h w")
|
||||
|
||||
|
||||
class VideoBlock(AttnBlock):
|
||||
def __init__(
|
||||
self, in_channels: int, alpha: float = 0, merge_strategy: str = "learned"
|
||||
):
|
||||
super().__init__(in_channels)
|
||||
# no context, single headed, as in base class
|
||||
self.time_mix_block = VideoTransformerBlock(
|
||||
dim=in_channels,
|
||||
n_heads=1,
|
||||
d_head=in_channels,
|
||||
checkpoint=False,
|
||||
ff_in=True,
|
||||
attn_mode="softmax",
|
||||
)
|
||||
|
||||
time_embed_dim = self.in_channels * 4
|
||||
self.video_time_embed = torch.nn.Sequential(
|
||||
torch.nn.Linear(self.in_channels, time_embed_dim),
|
||||
torch.nn.SiLU(),
|
||||
torch.nn.Linear(time_embed_dim, self.in_channels),
|
||||
)
|
||||
|
||||
self.merge_strategy = merge_strategy
|
||||
if self.merge_strategy == "fixed":
|
||||
self.register_buffer("mix_factor", torch.Tensor([alpha]))
|
||||
elif self.merge_strategy == "learned":
|
||||
self.register_parameter(
|
||||
"mix_factor", torch.nn.Parameter(torch.Tensor([alpha]))
|
||||
)
|
||||
else:
|
||||
msg = f"unknown merge strategy {self.merge_strategy}"
|
||||
raise ValueError(msg)
|
||||
|
||||
def forward(self, x, timesteps, skip_video=False):
|
||||
if skip_video:
|
||||
return super().forward(x)
|
||||
|
||||
x_in = x
|
||||
x = self.attention(x)
|
||||
h, w = x.shape[2:]
|
||||
x = rearrange(x, "b c h w -> b (h w) c")
|
||||
|
||||
x_mix = x
|
||||
num_frames = torch.arange(timesteps, device=x.device)
|
||||
num_frames = repeat(num_frames, "t -> b t", b=x.shape[0] // timesteps)
|
||||
num_frames = rearrange(num_frames, "b t -> (b t)")
|
||||
t_emb = timestep_embedding(num_frames, self.in_channels, repeat_only=False)
|
||||
emb = self.video_time_embed(t_emb) # b, n_channels
|
||||
emb = emb[:, None, :]
|
||||
x_mix = x_mix + emb
|
||||
|
||||
alpha = self.get_alpha()
|
||||
x_mix = self.time_mix_block(x_mix, timesteps=timesteps)
|
||||
x = alpha * x + (1.0 - alpha) * x_mix # alpha merge
|
||||
|
||||
x = rearrange(x, "b (h w) c -> b c h w", h=h, w=w)
|
||||
x = self.proj_out(x)
|
||||
|
||||
return x_in + x
|
||||
|
||||
def get_alpha(
|
||||
self,
|
||||
):
|
||||
if self.merge_strategy == "fixed":
|
||||
return self.mix_factor
|
||||
elif self.merge_strategy == "learned":
|
||||
return torch.sigmoid(self.mix_factor)
|
||||
else:
|
||||
msg = f"unknown merge strategy {self.merge_strategy}"
|
||||
raise NotImplementedError(msg)
|
||||
|
||||
|
||||
class MemoryEfficientVideoBlock(MemoryEfficientAttnBlock):
|
||||
def __init__(
|
||||
self, in_channels: int, alpha: float = 0, merge_strategy: str = "learned"
|
||||
):
|
||||
super().__init__(in_channels)
|
||||
# no context, single headed, as in base class
|
||||
self.time_mix_block = VideoTransformerBlock(
|
||||
dim=in_channels,
|
||||
n_heads=1,
|
||||
d_head=in_channels,
|
||||
checkpoint=False,
|
||||
ff_in=True,
|
||||
attn_mode="softmax-xformers",
|
||||
)
|
||||
|
||||
time_embed_dim = self.in_channels * 4
|
||||
self.video_time_embed = torch.nn.Sequential(
|
||||
torch.nn.Linear(self.in_channels, time_embed_dim),
|
||||
torch.nn.SiLU(),
|
||||
torch.nn.Linear(time_embed_dim, self.in_channels),
|
||||
)
|
||||
|
||||
self.merge_strategy = merge_strategy
|
||||
if self.merge_strategy == "fixed":
|
||||
self.register_buffer("mix_factor", torch.Tensor([alpha]))
|
||||
elif self.merge_strategy == "learned":
|
||||
self.register_parameter(
|
||||
"mix_factor", torch.nn.Parameter(torch.Tensor([alpha]))
|
||||
)
|
||||
else:
|
||||
msg = f"unknown merge strategy {self.merge_strategy}"
|
||||
raise ValueError(msg)
|
||||
|
||||
def forward(self, x, timesteps, skip_time_block=False):
|
||||
if skip_time_block:
|
||||
return super().forward(x)
|
||||
|
||||
x_in = x
|
||||
x = self.attention(x)
|
||||
h, w = x.shape[2:]
|
||||
x = rearrange(x, "b c h w -> b (h w) c")
|
||||
|
||||
x_mix = x
|
||||
num_frames = torch.arange(timesteps, device=x.device)
|
||||
num_frames = repeat(num_frames, "t -> b t", b=x.shape[0] // timesteps)
|
||||
num_frames = rearrange(num_frames, "b t -> (b t)")
|
||||
t_emb = timestep_embedding(num_frames, self.in_channels, repeat_only=False)
|
||||
emb = self.video_time_embed(t_emb) # b, n_channels
|
||||
emb = emb[:, None, :]
|
||||
x_mix = x_mix + emb
|
||||
|
||||
alpha = self.get_alpha()
|
||||
x_mix = self.time_mix_block(x_mix, timesteps=timesteps)
|
||||
x = alpha * x + (1.0 - alpha) * x_mix # alpha merge
|
||||
|
||||
x = rearrange(x, "b (h w) c -> b c h w", h=h, w=w)
|
||||
x = self.proj_out(x)
|
||||
|
||||
return x_in + x
|
||||
|
||||
def get_alpha(
|
||||
self,
|
||||
):
|
||||
if self.merge_strategy == "fixed":
|
||||
return self.mix_factor
|
||||
elif self.merge_strategy == "learned":
|
||||
return torch.sigmoid(self.mix_factor)
|
||||
else:
|
||||
msg = f"unknown merge strategy {self.merge_strategy}"
|
||||
raise NotImplementedError(msg)
|
||||
|
||||
|
||||
def make_time_attn(
|
||||
in_channels,
|
||||
attn_type="vanilla",
|
||||
attn_kwargs=None,
|
||||
alpha: float = 0,
|
||||
merge_strategy: str = "learned",
|
||||
):
|
||||
assert attn_type in [
|
||||
"vanilla",
|
||||
"vanilla-xformers",
|
||||
], f"attn_type {attn_type} not supported for spatio-temporal attention"
|
||||
|
||||
if not XFORMERS_IS_AVAILABLE and attn_type == "vanilla-xformers":
|
||||
logger.debug(
|
||||
f"Attention mode '{attn_type}' is not available. Falling back to vanilla attention. "
|
||||
f"This is not a problem in Pytorch >= 2.0. FYI, you are running with PyTorch version {torch.__version__}"
|
||||
)
|
||||
attn_type = "vanilla"
|
||||
|
||||
if attn_type == "vanilla":
|
||||
assert attn_kwargs is None
|
||||
return partialclass(
|
||||
VideoBlock, in_channels, alpha=alpha, merge_strategy=merge_strategy
|
||||
)
|
||||
elif attn_type == "vanilla-xformers":
|
||||
print(f"building MemoryEfficientAttnBlock with {in_channels} in_channels...")
|
||||
return partialclass(
|
||||
MemoryEfficientVideoBlock,
|
||||
in_channels,
|
||||
alpha=alpha,
|
||||
merge_strategy=merge_strategy,
|
||||
)
|
||||
else:
|
||||
return NotImplementedError()
|
||||
|
||||
|
||||
class Conv2DWrapper(torch.nn.Conv2d):
|
||||
def forward(self, input_tensor: torch.Tensor, **kwargs) -> torch.Tensor:
|
||||
return super().forward(input_tensor)
|
||||
|
||||
|
||||
class VideoDecoder(Decoder):
|
||||
available_time_modes = ["all", "conv-only", "attn-only"]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
*args,
|
||||
video_kernel_size: Union[int, list] = 3,
|
||||
alpha: float = 0.0,
|
||||
merge_strategy: str = "learned",
|
||||
time_mode: str = "conv-only",
|
||||
**kwargs,
|
||||
):
|
||||
self.video_kernel_size = video_kernel_size
|
||||
self.alpha = alpha
|
||||
self.merge_strategy = merge_strategy
|
||||
self.time_mode = time_mode
|
||||
assert (
|
||||
self.time_mode in self.available_time_modes
|
||||
), f"time_mode parameter has to be in {self.available_time_modes}"
|
||||
super().__init__(*args, **kwargs)
|
||||
|
||||
def get_last_layer(self, skip_time_mix=False, **kwargs):
|
||||
if self.time_mode == "attn-only":
|
||||
raise NotImplementedError("TODO")
|
||||
else:
|
||||
return (
|
||||
self.conv_out.time_mix_conv.weight
|
||||
if not skip_time_mix
|
||||
else self.conv_out.weight
|
||||
)
|
||||
|
||||
def _make_attn(self) -> Callable:
|
||||
if self.time_mode not in ["conv-only", "only-last-conv"]:
|
||||
return partialclass(
|
||||
make_time_attn,
|
||||
alpha=self.alpha,
|
||||
merge_strategy=self.merge_strategy,
|
||||
)
|
||||
else:
|
||||
return super()._make_attn()
|
||||
|
||||
def _make_conv(self) -> Callable:
|
||||
if self.time_mode != "attn-only":
|
||||
return partialclass(AE3DConv, video_kernel_size=self.video_kernel_size)
|
||||
else:
|
||||
return Conv2DWrapper
|
||||
|
||||
def _make_resblock(self) -> Callable:
|
||||
if self.time_mode not in ["attn-only", "only-last-conv"]:
|
||||
return partialclass(
|
||||
VideoResBlock,
|
||||
video_kernel_size=self.video_kernel_size,
|
||||
alpha=self.alpha,
|
||||
merge_strategy=self.merge_strategy,
|
||||
)
|
||||
else:
|
||||
return super()._make_resblock()
|
317
imaginairy/modules/sgm/diffusion.py
Normal file
317
imaginairy/modules/sgm/diffusion.py
Normal file
@ -0,0 +1,317 @@
|
||||
import logging
|
||||
import math
|
||||
from contextlib import contextmanager
|
||||
from typing import Any, Dict, List, Optional, Tuple, Union
|
||||
|
||||
import pytorch_lightning as pl
|
||||
import torch
|
||||
from omegaconf import ListConfig, OmegaConf
|
||||
from safetensors.torch import load_file as load_safetensors
|
||||
from torch.optim.lr_scheduler import LambdaLR
|
||||
|
||||
from imaginairy.modules.ema import LitEma
|
||||
from imaginairy.modules.sgm.autoencoding.temporal_ae import VideoDecoder
|
||||
from imaginairy.utils import (
|
||||
default,
|
||||
disabled_train,
|
||||
get_obj_from_str,
|
||||
instantiate_from_config,
|
||||
platform_appropriate_autocast,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
UNCONDITIONAL_CONFIG = {
|
||||
"target": "imaginairy.modules.sgm.encoders.modules.GeneralConditioner",
|
||||
"params": {"emb_models": []},
|
||||
}
|
||||
OPENAIUNETWRAPPER = "imaginairy.modules.sgm.diffusionmodules.wrappers.OpenAIWrapper"
|
||||
|
||||
|
||||
class DiffusionEngine(pl.LightningModule):
|
||||
def __init__(
|
||||
self,
|
||||
network_config,
|
||||
denoiser_config,
|
||||
first_stage_config,
|
||||
conditioner_config: Union[None, Dict, ListConfig, OmegaConf] = None,
|
||||
sampler_config: Union[None, Dict, ListConfig, OmegaConf] = None,
|
||||
optimizer_config: Union[None, Dict, ListConfig, OmegaConf] = None,
|
||||
scheduler_config: Union[None, Dict, ListConfig, OmegaConf] = None,
|
||||
loss_fn_config: Union[None, Dict, ListConfig, OmegaConf] = None,
|
||||
network_wrapper: Union[None, str] = None,
|
||||
ckpt_path: Union[None, str] = None,
|
||||
use_ema: bool = False,
|
||||
ema_decay_rate: float = 0.9999,
|
||||
scale_factor: float = 1.0,
|
||||
disable_first_stage_autocast=False,
|
||||
input_key: str = "jpg",
|
||||
log_keys: Union[List, None] = None,
|
||||
no_cond_log: bool = False,
|
||||
compile_model: bool = False,
|
||||
en_and_decode_n_samples_a_time: Optional[int] = None,
|
||||
):
|
||||
super().__init__()
|
||||
self.log_keys = log_keys
|
||||
self.input_key = input_key
|
||||
self.optimizer_config = default(
|
||||
optimizer_config, {"target": "torch.optim.AdamW"}
|
||||
)
|
||||
model = instantiate_from_config(network_config)
|
||||
self.model = get_obj_from_str(default(network_wrapper, OPENAIUNETWRAPPER))(
|
||||
model, compile_model=compile_model
|
||||
)
|
||||
|
||||
self.denoiser = instantiate_from_config(denoiser_config)
|
||||
self.sampler = (
|
||||
instantiate_from_config(sampler_config)
|
||||
if sampler_config is not None
|
||||
else None
|
||||
)
|
||||
self.conditioner = instantiate_from_config(
|
||||
default(conditioner_config, UNCONDITIONAL_CONFIG)
|
||||
)
|
||||
self.scheduler_config = scheduler_config
|
||||
self._init_first_stage(first_stage_config)
|
||||
|
||||
self.loss_fn = (
|
||||
instantiate_from_config(loss_fn_config)
|
||||
if loss_fn_config is not None
|
||||
else None
|
||||
)
|
||||
|
||||
self.use_ema = use_ema
|
||||
if self.use_ema:
|
||||
self.model_ema = LitEma(self.model, decay=ema_decay_rate)
|
||||
print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.")
|
||||
|
||||
self.scale_factor = scale_factor
|
||||
self.disable_first_stage_autocast = disable_first_stage_autocast
|
||||
self.no_cond_log = no_cond_log
|
||||
|
||||
if ckpt_path is not None:
|
||||
self.init_from_ckpt(ckpt_path)
|
||||
|
||||
self.en_and_decode_n_samples_a_time = en_and_decode_n_samples_a_time
|
||||
|
||||
def init_from_ckpt(
|
||||
self,
|
||||
path: str,
|
||||
) -> None:
|
||||
if path.endswith("ckpt"):
|
||||
sd = torch.load(path, map_location="cpu")["state_dict"]
|
||||
elif path.endswith("safetensors"):
|
||||
sd = load_safetensors(path)
|
||||
else:
|
||||
raise NotImplementedError
|
||||
|
||||
missing, unexpected = self.load_state_dict(sd, strict=False)
|
||||
logger.info(
|
||||
f"Loaded weights from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys"
|
||||
)
|
||||
if len(missing) > 0:
|
||||
print(f"Missing Keys: {missing}")
|
||||
if len(unexpected) > 0:
|
||||
print(f"Unexpected Keys: {unexpected}")
|
||||
|
||||
def _init_first_stage(self, config):
|
||||
model = instantiate_from_config(config).eval()
|
||||
model.train = disabled_train
|
||||
for param in model.parameters():
|
||||
param.requires_grad = False
|
||||
self.first_stage_model = model
|
||||
|
||||
def get_input(self, batch):
|
||||
# assuming unified data format, dataloader returns a dict.
|
||||
# image tensors should be scaled to -1 ... 1 and in bchw format
|
||||
return batch[self.input_key]
|
||||
|
||||
@torch.no_grad()
|
||||
def decode_first_stage(self, z):
|
||||
z = 1.0 / self.scale_factor * z
|
||||
n_samples = default(self.en_and_decode_n_samples_a_time, z.shape[0])
|
||||
|
||||
n_rounds = math.ceil(z.shape[0] / n_samples)
|
||||
all_out = []
|
||||
with platform_appropriate_autocast(
|
||||
enabled=not self.disable_first_stage_autocast
|
||||
):
|
||||
for n in range(n_rounds):
|
||||
if isinstance(self.first_stage_model.decoder, VideoDecoder):
|
||||
kwargs = {"timesteps": len(z[n * n_samples : (n + 1) * n_samples])}
|
||||
else:
|
||||
kwargs = {}
|
||||
out = self.first_stage_model.decode(
|
||||
z[n * n_samples : (n + 1) * n_samples], **kwargs
|
||||
)
|
||||
all_out.append(out)
|
||||
out = torch.cat(all_out, dim=0)
|
||||
return out
|
||||
|
||||
@torch.no_grad()
|
||||
def encode_first_stage(self, x):
|
||||
n_samples = default(self.en_and_decode_n_samples_a_time, x.shape[0])
|
||||
n_rounds = math.ceil(x.shape[0] / n_samples)
|
||||
all_out = []
|
||||
with platform_appropriate_autocast(
|
||||
enabled=not self.disable_first_stage_autocast
|
||||
):
|
||||
for n in range(n_rounds):
|
||||
out = self.first_stage_model.encode(
|
||||
x[n * n_samples : (n + 1) * n_samples]
|
||||
)
|
||||
all_out.append(out)
|
||||
z = torch.cat(all_out, dim=0)
|
||||
z = self.scale_factor * z
|
||||
return z
|
||||
|
||||
def forward(self, x, batch):
|
||||
loss = self.loss_fn(self.model, self.denoiser, self.conditioner, x, batch)
|
||||
loss_mean = loss.mean()
|
||||
loss_dict = {"loss": loss_mean}
|
||||
return loss_mean, loss_dict
|
||||
|
||||
def shared_step(self, batch: Dict) -> Any:
|
||||
x = self.get_input(batch)
|
||||
x = self.encode_first_stage(x)
|
||||
batch["global_step"] = self.global_step
|
||||
loss, loss_dict = self(x, batch)
|
||||
return loss, loss_dict
|
||||
|
||||
def training_step(self, batch, batch_idx):
|
||||
loss, loss_dict = self.shared_step(batch)
|
||||
|
||||
self.log_dict(
|
||||
loss_dict, prog_bar=True, logger=True, on_step=True, on_epoch=False
|
||||
)
|
||||
|
||||
self.log(
|
||||
"global_step",
|
||||
self.global_step,
|
||||
prog_bar=True,
|
||||
logger=True,
|
||||
on_step=True,
|
||||
on_epoch=False,
|
||||
)
|
||||
|
||||
if self.scheduler_config is not None:
|
||||
lr = self.optimizers().param_groups[0]["lr"]
|
||||
self.log(
|
||||
"lr_abs", lr, prog_bar=True, logger=True, on_step=True, on_epoch=False
|
||||
)
|
||||
|
||||
return loss
|
||||
|
||||
def on_train_start(self, *args, **kwargs):
|
||||
if self.sampler is None or self.loss_fn is None:
|
||||
msg = "Sampler and loss function need to be set for training."
|
||||
raise ValueError(msg)
|
||||
|
||||
def on_train_batch_end(self, *args, **kwargs):
|
||||
if self.use_ema:
|
||||
self.model_ema(self.model)
|
||||
|
||||
@contextmanager
|
||||
def ema_scope(self, context=None):
|
||||
if self.use_ema:
|
||||
self.model_ema.store(self.model.parameters())
|
||||
self.model_ema.copy_to(self.model)
|
||||
if context is not None:
|
||||
print(f"{context}: Switched to EMA weights")
|
||||
try:
|
||||
yield None
|
||||
finally:
|
||||
if self.use_ema:
|
||||
self.model_ema.restore(self.model.parameters())
|
||||
if context is not None:
|
||||
print(f"{context}: Restored training weights")
|
||||
|
||||
def instantiate_optimizer_from_config(self, params, lr, cfg):
|
||||
return get_obj_from_str(cfg["target"])(params, lr=lr, **cfg.get("params", {}))
|
||||
|
||||
def configure_optimizers(self):
|
||||
lr = self.learning_rate
|
||||
params = list(self.model.parameters())
|
||||
for embedder in self.conditioner.embedders:
|
||||
if embedder.is_trainable:
|
||||
params = params + list(embedder.parameters())
|
||||
opt = self.instantiate_optimizer_from_config(params, lr, self.optimizer_config)
|
||||
if self.scheduler_config is not None:
|
||||
scheduler = instantiate_from_config(self.scheduler_config)
|
||||
print("Setting up LambdaLR scheduler...")
|
||||
scheduler = [
|
||||
{
|
||||
"scheduler": LambdaLR(opt, lr_lambda=scheduler.schedule),
|
||||
"interval": "step",
|
||||
"frequency": 1,
|
||||
}
|
||||
]
|
||||
return [opt], scheduler
|
||||
return opt
|
||||
|
||||
@torch.no_grad()
|
||||
def sample(
|
||||
self,
|
||||
cond: Dict,
|
||||
uc: Union[Dict, None] = None,
|
||||
batch_size: int = 16,
|
||||
shape: Union[None, Tuple, List] = None,
|
||||
**kwargs,
|
||||
):
|
||||
randn = torch.randn(batch_size, *shape).to(self.device)
|
||||
|
||||
def denoiser(input_tensor, sigma, c):
|
||||
return self.denoiser(self.model, input_tensor, sigma, c, **kwargs)
|
||||
|
||||
samples = self.sampler(denoiser, randn, cond, uc=uc)
|
||||
return samples
|
||||
|
||||
@torch.no_grad()
|
||||
def log_images(
|
||||
self,
|
||||
batch: Dict,
|
||||
N: int = 8,
|
||||
sample: bool = True,
|
||||
ucg_keys: Optional[List[str]] = None,
|
||||
**kwargs,
|
||||
) -> Dict:
|
||||
conditioner_input_keys = [e.input_key for e in self.conditioner.embedders]
|
||||
if ucg_keys:
|
||||
assert all(x in conditioner_input_keys for x in ucg_keys), (
|
||||
"Each defined ucg key for sampling must be in the provided conditioner input keys,"
|
||||
f"but we have {ucg_keys} vs. {conditioner_input_keys}"
|
||||
)
|
||||
else:
|
||||
ucg_keys = conditioner_input_keys
|
||||
log = {}
|
||||
|
||||
x = self.get_input(batch)
|
||||
|
||||
c, uc = self.conditioner.get_unconditional_conditioning(
|
||||
batch,
|
||||
force_uc_zero_embeddings=ucg_keys
|
||||
if len(self.conditioner.embedders) > 0
|
||||
else [],
|
||||
)
|
||||
|
||||
sampling_kwargs = {}
|
||||
|
||||
N = min(x.shape[0], N)
|
||||
x = x.to(self.device)[:N]
|
||||
log["inputs"] = x
|
||||
z = self.encode_first_stage(x)
|
||||
log["reconstructions"] = self.decode_first_stage(z)
|
||||
log.update(self.log_conditionings(batch, N))
|
||||
|
||||
for k in c:
|
||||
if isinstance(c[k], torch.Tensor):
|
||||
c[k], uc[k] = (y[k][:N].to(self.device) for y in (c, uc))
|
||||
|
||||
if sample:
|
||||
with self.ema_scope("Plotting"):
|
||||
samples = self.sample(
|
||||
c, shape=z.shape[1:], uc=uc, batch_size=N, **sampling_kwargs
|
||||
)
|
||||
samples = self.decode_first_stage(samples)
|
||||
log["samples"] = samples
|
||||
return log
|
0
imaginairy/modules/sgm/diffusionmodules/__init__.py
Normal file
0
imaginairy/modules/sgm/diffusionmodules/__init__.py
Normal file
79
imaginairy/modules/sgm/diffusionmodules/denoiser.py
Normal file
79
imaginairy/modules/sgm/diffusionmodules/denoiser.py
Normal file
@ -0,0 +1,79 @@
|
||||
from typing import TYPE_CHECKING, Dict, Union
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
|
||||
from imaginairy.utils import instantiate_from_config
|
||||
from imaginairy.vendored.k_diffusion.utils import append_dims
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from .denoiser_scaling import DenoiserScaling
|
||||
from .discretizer import Discretization
|
||||
|
||||
|
||||
class Denoiser(nn.Module):
|
||||
def __init__(self, scaling_config: Dict):
|
||||
super().__init__()
|
||||
|
||||
self.scaling: DenoiserScaling = instantiate_from_config(scaling_config)
|
||||
|
||||
def possibly_quantize_sigma(self, sigma: torch.Tensor) -> torch.Tensor:
|
||||
return sigma
|
||||
|
||||
def possibly_quantize_c_noise(self, c_noise: torch.Tensor) -> torch.Tensor:
|
||||
return c_noise
|
||||
|
||||
def forward(
|
||||
self,
|
||||
network: nn.Module,
|
||||
input_tensor: torch.Tensor,
|
||||
sigma: torch.Tensor,
|
||||
cond: Dict,
|
||||
**additional_model_inputs,
|
||||
) -> torch.Tensor:
|
||||
sigma = self.possibly_quantize_sigma(sigma)
|
||||
sigma_shape = sigma.shape
|
||||
sigma = append_dims(sigma, input_tensor.ndim)
|
||||
c_skip, c_out, c_in, c_noise = self.scaling(sigma)
|
||||
c_noise = self.possibly_quantize_c_noise(c_noise.reshape(sigma_shape))
|
||||
return (
|
||||
network(input_tensor * c_in, c_noise, cond, **additional_model_inputs)
|
||||
* c_out
|
||||
+ input_tensor * c_skip
|
||||
)
|
||||
|
||||
|
||||
class DiscreteDenoiser(Denoiser):
|
||||
def __init__(
|
||||
self,
|
||||
scaling_config: Dict,
|
||||
num_idx: int,
|
||||
discretization_config: Dict,
|
||||
do_append_zero: bool = False,
|
||||
quantize_c_noise: bool = True,
|
||||
flip: bool = True,
|
||||
):
|
||||
super().__init__(scaling_config)
|
||||
self.discretization: Discretization = instantiate_from_config(
|
||||
discretization_config
|
||||
)
|
||||
sigmas = self.discretization(num_idx, do_append_zero=do_append_zero, flip=flip)
|
||||
self.register_buffer("sigmas", sigmas)
|
||||
self.quantize_c_noise = quantize_c_noise
|
||||
self.num_idx = num_idx
|
||||
|
||||
def sigma_to_idx(self, sigma: torch.Tensor) -> torch.Tensor:
|
||||
dists = sigma - self.sigmas[:, None]
|
||||
return dists.abs().argmin(dim=0).view(sigma.shape)
|
||||
|
||||
def idx_to_sigma(self, idx: Union[torch.Tensor, int]) -> torch.Tensor:
|
||||
return self.sigmas[idx]
|
||||
|
||||
def possibly_quantize_sigma(self, sigma: torch.Tensor) -> torch.Tensor:
|
||||
return self.idx_to_sigma(self.sigma_to_idx(sigma))
|
||||
|
||||
def possibly_quantize_c_noise(self, c_noise: torch.Tensor) -> torch.Tensor:
|
||||
if self.quantize_c_noise:
|
||||
return self.sigma_to_idx(c_noise)
|
||||
else:
|
||||
return c_noise
|
59
imaginairy/modules/sgm/diffusionmodules/denoiser_scaling.py
Normal file
59
imaginairy/modules/sgm/diffusionmodules/denoiser_scaling.py
Normal file
@ -0,0 +1,59 @@
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Tuple
|
||||
|
||||
import torch
|
||||
|
||||
|
||||
class DenoiserScaling(ABC):
|
||||
@abstractmethod
|
||||
def __call__(
|
||||
self, sigma: torch.Tensor
|
||||
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
|
||||
pass
|
||||
|
||||
|
||||
class EDMScaling:
|
||||
def __init__(self, sigma_data: float = 0.5):
|
||||
self.sigma_data = sigma_data
|
||||
|
||||
def __call__(
|
||||
self, sigma: torch.Tensor
|
||||
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
|
||||
c_skip = self.sigma_data**2 / (sigma**2 + self.sigma_data**2)
|
||||
c_out = sigma * self.sigma_data / (sigma**2 + self.sigma_data**2) ** 0.5
|
||||
c_in = 1 / (sigma**2 + self.sigma_data**2) ** 0.5
|
||||
c_noise = 0.25 * sigma.log()
|
||||
return c_skip, c_out, c_in, c_noise
|
||||
|
||||
|
||||
class EpsScaling:
|
||||
def __call__(
|
||||
self, sigma: torch.Tensor
|
||||
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
|
||||
c_skip = torch.ones_like(sigma, device=sigma.device)
|
||||
c_out = -sigma
|
||||
c_in = 1 / (sigma**2 + 1.0) ** 0.5
|
||||
c_noise = sigma.clone()
|
||||
return c_skip, c_out, c_in, c_noise
|
||||
|
||||
|
||||
class VScaling:
|
||||
def __call__(
|
||||
self, sigma: torch.Tensor
|
||||
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
|
||||
c_skip = 1.0 / (sigma**2 + 1.0)
|
||||
c_out = -sigma / (sigma**2 + 1.0) ** 0.5
|
||||
c_in = 1.0 / (sigma**2 + 1.0) ** 0.5
|
||||
c_noise = sigma.clone()
|
||||
return c_skip, c_out, c_in, c_noise
|
||||
|
||||
|
||||
class VScalingWithEDMcNoise(DenoiserScaling):
|
||||
def __call__(
|
||||
self, sigma: torch.Tensor
|
||||
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
|
||||
c_skip = 1.0 / (sigma**2 + 1.0)
|
||||
c_out = -sigma / (sigma**2 + 1.0) ** 0.5
|
||||
c_in = 1.0 / (sigma**2 + 1.0) ** 0.5
|
||||
c_noise = 0.25 * sigma.log()
|
||||
return c_skip, c_out, c_in, c_noise
|
@ -0,0 +1,24 @@
|
||||
import torch
|
||||
|
||||
|
||||
class UnitWeighting:
|
||||
def __call__(self, sigma):
|
||||
return torch.ones_like(sigma, device=sigma.device)
|
||||
|
||||
|
||||
class EDMWeighting:
|
||||
def __init__(self, sigma_data=0.5):
|
||||
self.sigma_data = sigma_data
|
||||
|
||||
def __call__(self, sigma):
|
||||
return (sigma**2 + self.sigma_data**2) / (sigma * self.sigma_data) ** 2
|
||||
|
||||
|
||||
class VWeighting(EDMWeighting):
|
||||
def __init__(self):
|
||||
super().__init__(sigma_data=1.0)
|
||||
|
||||
|
||||
class EpsWeighting:
|
||||
def __call__(self, sigma):
|
||||
return sigma**-2.0
|
73
imaginairy/modules/sgm/diffusionmodules/discretizer.py
Normal file
73
imaginairy/modules/sgm/diffusionmodules/discretizer.py
Normal file
@ -0,0 +1,73 @@
|
||||
from abc import abstractmethod
|
||||
from functools import partial
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
|
||||
from imaginairy.modules.sgm.diffusionmodules.util import make_beta_schedule
|
||||
from imaginairy.vendored.k_diffusion.sampling import append_zero
|
||||
|
||||
#
|
||||
# from ...modules.diffusionmodules.util import make_beta_schedule
|
||||
# from ...util import append_zero
|
||||
|
||||
|
||||
def generate_roughly_equally_spaced_steps(
|
||||
num_substeps: int, max_step: int
|
||||
) -> np.ndarray:
|
||||
return np.linspace(max_step - 1, 0, num_substeps, endpoint=False).astype(int)[::-1]
|
||||
|
||||
|
||||
class Discretization:
|
||||
def __call__(self, n, do_append_zero=True, device="cpu", flip=False):
|
||||
sigmas = self.get_sigmas(n, device=device)
|
||||
sigmas = append_zero(sigmas) if do_append_zero else sigmas
|
||||
return sigmas if not flip else torch.flip(sigmas, (0,))
|
||||
|
||||
@abstractmethod
|
||||
def get_sigmas(self, n, device):
|
||||
pass
|
||||
|
||||
|
||||
class EDMDiscretization(Discretization):
|
||||
def __init__(self, sigma_min=0.002, sigma_max=80.0, rho=7.0):
|
||||
self.sigma_min = sigma_min
|
||||
self.sigma_max = sigma_max
|
||||
self.rho = rho
|
||||
|
||||
def get_sigmas(self, n, device="cpu"):
|
||||
ramp = torch.linspace(0, 1, n, device=device)
|
||||
min_inv_rho = self.sigma_min ** (1 / self.rho)
|
||||
max_inv_rho = self.sigma_max ** (1 / self.rho)
|
||||
sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** self.rho
|
||||
return sigmas
|
||||
|
||||
|
||||
class LegacyDDPMDiscretization(Discretization):
|
||||
def __init__(
|
||||
self,
|
||||
linear_start=0.00085,
|
||||
linear_end=0.0120,
|
||||
num_timesteps=1000,
|
||||
):
|
||||
super().__init__()
|
||||
self.num_timesteps = num_timesteps
|
||||
betas = make_beta_schedule(
|
||||
"linear", num_timesteps, linear_start=linear_start, linear_end=linear_end
|
||||
)
|
||||
alphas = 1.0 - betas
|
||||
self.alphas_cumprod = np.cumprod(alphas, axis=0)
|
||||
self.to_torch = partial(torch.tensor, dtype=torch.float32)
|
||||
|
||||
def get_sigmas(self, n, device="cpu"):
|
||||
if n < self.num_timesteps:
|
||||
timesteps = generate_roughly_equally_spaced_steps(n, self.num_timesteps)
|
||||
alphas_cumprod = self.alphas_cumprod[timesteps]
|
||||
elif n == self.num_timesteps:
|
||||
alphas_cumprod = self.alphas_cumprod
|
||||
else:
|
||||
raise ValueError
|
||||
|
||||
to_torch = partial(torch.tensor, dtype=torch.float32, device=device)
|
||||
sigmas = to_torch((1 - alphas_cumprod) / alphas_cumprod) ** 0.5
|
||||
return torch.flip(sigmas, (0,))
|
100
imaginairy/modules/sgm/diffusionmodules/guiders.py
Normal file
100
imaginairy/modules/sgm/diffusionmodules/guiders.py
Normal file
@ -0,0 +1,100 @@
|
||||
import logging
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Dict, List, Optional, Tuple, Union
|
||||
|
||||
import torch
|
||||
from einops import rearrange, repeat
|
||||
|
||||
from imaginairy.utils import default
|
||||
from imaginairy.vendored.k_diffusion.utils import append_dims
|
||||
|
||||
logpy = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Guider(ABC):
|
||||
@abstractmethod
|
||||
def __call__(self, x: torch.Tensor, sigma: float) -> torch.Tensor:
|
||||
pass
|
||||
|
||||
def prepare_inputs(
|
||||
self, x: torch.Tensor, s: float, c: Dict, uc: Dict
|
||||
) -> Tuple[torch.Tensor, float, Dict]:
|
||||
pass
|
||||
|
||||
|
||||
class VanillaCFG(Guider):
|
||||
def __init__(self, scale: float):
|
||||
self.scale = scale
|
||||
|
||||
def __call__(self, x: torch.Tensor, sigma: torch.Tensor) -> torch.Tensor:
|
||||
x_u, x_c = x.chunk(2)
|
||||
x_pred = x_u + self.scale * (x_c - x_u)
|
||||
return x_pred
|
||||
|
||||
def prepare_inputs(self, x, s, c, uc):
|
||||
c_out = {}
|
||||
|
||||
for k in c:
|
||||
if k in ["vector", "crossattn", "concat"]:
|
||||
c_out[k] = torch.cat((uc[k], c[k]), 0)
|
||||
else:
|
||||
assert c[k] == uc[k]
|
||||
c_out[k] = c[k]
|
||||
return torch.cat([x] * 2), torch.cat([s] * 2), c_out
|
||||
|
||||
|
||||
class IdentityGuider(Guider):
|
||||
def __call__(self, x: torch.Tensor, sigma: float) -> torch.Tensor:
|
||||
return x
|
||||
|
||||
def prepare_inputs(
|
||||
self, x: torch.Tensor, s: float, c: Dict, uc: Dict
|
||||
) -> Tuple[torch.Tensor, float, Dict]:
|
||||
c_out = {}
|
||||
|
||||
for k in c:
|
||||
c_out[k] = c[k]
|
||||
|
||||
return x, s, c_out
|
||||
|
||||
|
||||
class LinearPredictionGuider(Guider):
|
||||
def __init__(
|
||||
self,
|
||||
max_scale: float,
|
||||
num_frames: int,
|
||||
min_scale: float = 1.0,
|
||||
additional_cond_keys: Optional[Union[List[str], str]] = None,
|
||||
):
|
||||
self.min_scale = min_scale
|
||||
self.max_scale = max_scale
|
||||
self.num_frames = num_frames
|
||||
self.scale = torch.linspace(min_scale, max_scale, num_frames).unsqueeze(0)
|
||||
|
||||
additional_cond_keys = default(additional_cond_keys, [])
|
||||
if isinstance(additional_cond_keys, str):
|
||||
additional_cond_keys = [additional_cond_keys]
|
||||
self.additional_cond_keys = additional_cond_keys
|
||||
|
||||
def __call__(self, x: torch.Tensor, sigma: torch.Tensor) -> torch.Tensor:
|
||||
x_u, x_c = x.chunk(2)
|
||||
|
||||
x_u = rearrange(x_u, "(b t) ... -> b t ...", t=self.num_frames)
|
||||
x_c = rearrange(x_c, "(b t) ... -> b t ...", t=self.num_frames)
|
||||
scale = repeat(self.scale, "1 t -> b t", b=x_u.shape[0])
|
||||
scale = append_dims(scale, x_u.ndim).to(x_u.device)
|
||||
|
||||
return rearrange(x_u + scale * (x_c - x_u), "b t ... -> (b t) ...")
|
||||
|
||||
def prepare_inputs(
|
||||
self, x: torch.Tensor, s: torch.Tensor, c: dict, uc: dict
|
||||
) -> Tuple[torch.Tensor, torch.Tensor, dict]:
|
||||
c_out = {}
|
||||
|
||||
for k in c:
|
||||
if k in ["vector", "crossattn", "concat", *self.additional_cond_keys]:
|
||||
c_out[k] = torch.cat((uc[k], c[k]), 0)
|
||||
else:
|
||||
assert c[k] == uc[k]
|
||||
c_out[k] = c[k]
|
||||
return torch.cat([x] * 2), torch.cat([s] * 2), c_out
|
108
imaginairy/modules/sgm/diffusionmodules/loss.py
Normal file
108
imaginairy/modules/sgm/diffusionmodules/loss.py
Normal file
@ -0,0 +1,108 @@
|
||||
from typing import Dict, List, Optional, Tuple, Union
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
|
||||
from imaginairy.modules.sgm.autoencoding.lpips.loss.lpips import LPIPS
|
||||
from imaginairy.modules.sgm.encoders.modules import GeneralConditioner
|
||||
from imaginairy.utils import instantiate_from_config
|
||||
from imaginairy.vendored.k_diffusion.utils import append_dims
|
||||
|
||||
from .denoiser import Denoiser
|
||||
|
||||
|
||||
class StandardDiffusionLoss(nn.Module):
|
||||
def __init__(
|
||||
self,
|
||||
sigma_sampler_config: dict,
|
||||
loss_weighting_config: dict,
|
||||
loss_type: str = "l2",
|
||||
offset_noise_level: float = 0.0,
|
||||
batch2model_keys: Optional[Union[str, List[str]]] = None,
|
||||
):
|
||||
super().__init__()
|
||||
|
||||
assert loss_type in ["l2", "l1", "lpips"]
|
||||
|
||||
self.sigma_sampler = instantiate_from_config(sigma_sampler_config)
|
||||
self.loss_weighting = instantiate_from_config(loss_weighting_config)
|
||||
|
||||
self.loss_type = loss_type
|
||||
self.offset_noise_level = offset_noise_level
|
||||
|
||||
if loss_type == "lpips":
|
||||
self.lpips = LPIPS().eval()
|
||||
|
||||
if not batch2model_keys:
|
||||
batch2model_keys = []
|
||||
|
||||
if isinstance(batch2model_keys, str):
|
||||
batch2model_keys = [batch2model_keys]
|
||||
|
||||
self.batch2model_keys = set(batch2model_keys)
|
||||
|
||||
def get_noised_input(
|
||||
self, sigmas_bc: torch.Tensor, noise: torch.Tensor, input_tensor: torch.Tensor
|
||||
) -> torch.Tensor:
|
||||
noised_input = input_tensor + noise * sigmas_bc
|
||||
return noised_input
|
||||
|
||||
def forward(
|
||||
self,
|
||||
network: nn.Module,
|
||||
denoiser: Denoiser,
|
||||
conditioner: GeneralConditioner,
|
||||
input_tensor: torch.Tensor,
|
||||
batch: Dict,
|
||||
) -> torch.Tensor:
|
||||
cond = conditioner(batch)
|
||||
return self._forward(network, denoiser, cond, input_tensor, batch)
|
||||
|
||||
def _forward(
|
||||
self,
|
||||
network: nn.Module,
|
||||
denoiser: Denoiser,
|
||||
cond: Dict,
|
||||
input_tensor: torch.Tensor,
|
||||
batch: Dict,
|
||||
) -> Tuple[torch.Tensor, Dict]:
|
||||
additional_model_inputs = {
|
||||
key: batch[key] for key in self.batch2model_keys.intersection(batch)
|
||||
}
|
||||
sigmas = self.sigma_sampler(input_tensor.shape[0]).to(input)
|
||||
|
||||
noise = torch.randn_like(input_tensor)
|
||||
if self.offset_noise_level > 0.0:
|
||||
offset_shape = (
|
||||
(input_tensor.shape[0], 1, input.shape[2])
|
||||
if self.n_frames is not None
|
||||
else (input_tensor.shape[0], input.shape[1])
|
||||
)
|
||||
noise = noise + self.offset_noise_level * append_dims(
|
||||
torch.randn(offset_shape, device=input_tensor.device),
|
||||
input_tensor.ndim,
|
||||
)
|
||||
sigmas_bc = append_dims(sigmas, input_tensor.ndim)
|
||||
noised_input = self.get_noised_input(sigmas_bc, noise, input_tensor)
|
||||
|
||||
model_output = denoiser(
|
||||
network, noised_input, sigmas, cond, **additional_model_inputs
|
||||
)
|
||||
w = append_dims(self.loss_weighting(sigmas), input_tensor.ndim)
|
||||
return self.get_loss(model_output, input_tensor, w)
|
||||
|
||||
def get_loss(self, model_output, target, w):
|
||||
if self.loss_type == "l2":
|
||||
return torch.mean(
|
||||
(w * (model_output - target) ** 2).reshape(target.shape[0], -1), 1
|
||||
)
|
||||
elif self.loss_type == "l1":
|
||||
return torch.mean(
|
||||
(w * (model_output - target).abs()).reshape(target.shape[0], -1), 1
|
||||
)
|
||||
elif self.loss_type == "lpips":
|
||||
loss = self.lpips(model_output, target).reshape(-1)
|
||||
return loss
|
||||
else:
|
||||
msg = f"Unknown loss type {self.loss_type}"
|
||||
raise NotImplementedError(msg)
|
32
imaginairy/modules/sgm/diffusionmodules/loss_weighting.py
Normal file
32
imaginairy/modules/sgm/diffusionmodules/loss_weighting.py
Normal file
@ -0,0 +1,32 @@
|
||||
from abc import ABC, abstractmethod
|
||||
|
||||
import torch
|
||||
|
||||
|
||||
class DiffusionLossWeighting(ABC):
|
||||
@abstractmethod
|
||||
def __call__(self, sigma: torch.Tensor) -> torch.Tensor:
|
||||
pass
|
||||
|
||||
|
||||
class UnitWeighting(DiffusionLossWeighting):
|
||||
def __call__(self, sigma: torch.Tensor) -> torch.Tensor:
|
||||
return torch.ones_like(sigma, device=sigma.device)
|
||||
|
||||
|
||||
class EDMWeighting(DiffusionLossWeighting):
|
||||
def __init__(self, sigma_data: float = 0.5):
|
||||
self.sigma_data = sigma_data
|
||||
|
||||
def __call__(self, sigma: torch.Tensor) -> torch.Tensor:
|
||||
return (sigma**2 + self.sigma_data**2) / (sigma * self.sigma_data) ** 2
|
||||
|
||||
|
||||
class VWeighting(EDMWeighting):
|
||||
def __init__(self):
|
||||
super().__init__(sigma_data=1.0)
|
||||
|
||||
|
||||
class EpsWeighting(DiffusionLossWeighting):
|
||||
def __call__(self, sigma: torch.Tensor) -> torch.Tensor:
|
||||
return sigma**-2.0
|
754
imaginairy/modules/sgm/diffusionmodules/model.py
Normal file
754
imaginairy/modules/sgm/diffusionmodules/model.py
Normal file
@ -0,0 +1,754 @@
|
||||
# pytorch_diffusion + derived encoder decoder
|
||||
import logging
|
||||
import math
|
||||
from typing import Any, Callable, Optional
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
from einops import rearrange
|
||||
|
||||
from imaginairy.modules.attention import LinearAttention, MemoryEfficientCrossAttention
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
try:
|
||||
import xformers
|
||||
import xformers.ops
|
||||
|
||||
XFORMERS_IS_AVAILABLE = True
|
||||
except ImportError:
|
||||
XFORMERS_IS_AVAILABLE = False
|
||||
logger.debug("no module 'xformers'. Processing without...")
|
||||
|
||||
# from ...modules.attention import LinearAttention, MemoryEfficientCrossAttention
|
||||
|
||||
|
||||
def get_timestep_embedding(timesteps, embedding_dim):
|
||||
"""
|
||||
This matches the implementation in Denoising Diffusion Probabilistic Models:
|
||||
From Fairseq.
|
||||
Build sinusoidal embeddings.
|
||||
This matches the implementation in tensor2tensor, but differs slightly
|
||||
from the description in Section 3.5 of "Attention Is All You Need".
|
||||
"""
|
||||
assert len(timesteps.shape) == 1
|
||||
|
||||
half_dim = embedding_dim // 2
|
||||
emb = math.log(10000) / (half_dim - 1)
|
||||
emb = torch.exp(torch.arange(half_dim, dtype=torch.float32) * -emb)
|
||||
emb = emb.to(device=timesteps.device)
|
||||
emb = timesteps.float()[:, None] * emb[None, :]
|
||||
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
|
||||
if embedding_dim % 2 == 1: # zero pad
|
||||
emb = torch.nn.functional.pad(emb, (0, 1, 0, 0))
|
||||
return emb
|
||||
|
||||
|
||||
def nonlinearity(x):
|
||||
# swish
|
||||
return x * torch.sigmoid(x)
|
||||
|
||||
|
||||
def Normalize(in_channels, num_groups=32):
|
||||
return torch.nn.GroupNorm(
|
||||
num_groups=num_groups, num_channels=in_channels, eps=1e-6, affine=True
|
||||
)
|
||||
|
||||
|
||||
class Upsample(nn.Module):
|
||||
def __init__(self, in_channels, with_conv):
|
||||
super().__init__()
|
||||
self.with_conv = with_conv
|
||||
if self.with_conv:
|
||||
self.conv = torch.nn.Conv2d(
|
||||
in_channels, in_channels, kernel_size=3, stride=1, padding=1
|
||||
)
|
||||
|
||||
def forward(self, x):
|
||||
x = torch.nn.functional.interpolate(x, scale_factor=2.0, mode="nearest")
|
||||
if self.with_conv:
|
||||
x = self.conv(x)
|
||||
return x
|
||||
|
||||
|
||||
class Downsample(nn.Module):
|
||||
def __init__(self, in_channels, with_conv):
|
||||
super().__init__()
|
||||
self.with_conv = with_conv
|
||||
if self.with_conv:
|
||||
# no asymmetric padding in torch conv, must do it ourselves
|
||||
self.conv = torch.nn.Conv2d(
|
||||
in_channels, in_channels, kernel_size=3, stride=2, padding=0
|
||||
)
|
||||
|
||||
def forward(self, x):
|
||||
if self.with_conv:
|
||||
pad = (0, 1, 0, 1)
|
||||
x = torch.nn.functional.pad(x, pad, mode="constant", value=0)
|
||||
x = self.conv(x)
|
||||
else:
|
||||
x = torch.nn.functional.avg_pool2d(x, kernel_size=2, stride=2)
|
||||
return x
|
||||
|
||||
|
||||
class ResnetBlock(nn.Module):
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
in_channels,
|
||||
out_channels=None,
|
||||
conv_shortcut=False,
|
||||
dropout,
|
||||
temb_channels=512,
|
||||
):
|
||||
super().__init__()
|
||||
self.in_channels = in_channels
|
||||
out_channels = in_channels if out_channels is None else out_channels
|
||||
self.out_channels = out_channels
|
||||
self.use_conv_shortcut = conv_shortcut
|
||||
|
||||
self.norm1 = Normalize(in_channels)
|
||||
self.conv1 = torch.nn.Conv2d(
|
||||
in_channels, out_channels, kernel_size=3, stride=1, padding=1
|
||||
)
|
||||
if temb_channels > 0:
|
||||
self.temb_proj = torch.nn.Linear(temb_channels, out_channels)
|
||||
self.norm2 = Normalize(out_channels)
|
||||
self.dropout = torch.nn.Dropout(dropout)
|
||||
self.conv2 = torch.nn.Conv2d(
|
||||
out_channels, out_channels, kernel_size=3, stride=1, padding=1
|
||||
)
|
||||
if self.in_channels != self.out_channels:
|
||||
if self.use_conv_shortcut:
|
||||
self.conv_shortcut = torch.nn.Conv2d(
|
||||
in_channels, out_channels, kernel_size=3, stride=1, padding=1
|
||||
)
|
||||
else:
|
||||
self.nin_shortcut = torch.nn.Conv2d(
|
||||
in_channels, out_channels, kernel_size=1, stride=1, padding=0
|
||||
)
|
||||
|
||||
def forward(self, x, temb):
|
||||
h = x
|
||||
h = self.norm1(h)
|
||||
h = nonlinearity(h)
|
||||
h = self.conv1(h)
|
||||
|
||||
if temb is not None:
|
||||
h = h + self.temb_proj(nonlinearity(temb))[:, :, None, None]
|
||||
|
||||
h = self.norm2(h)
|
||||
h = nonlinearity(h)
|
||||
h = self.dropout(h)
|
||||
h = self.conv2(h)
|
||||
|
||||
if self.in_channels != self.out_channels:
|
||||
if self.use_conv_shortcut:
|
||||
x = self.conv_shortcut(x)
|
||||
else:
|
||||
x = self.nin_shortcut(x)
|
||||
|
||||
return x + h
|
||||
|
||||
|
||||
class LinAttnBlock(LinearAttention):
|
||||
"""to match AttnBlock usage"""
|
||||
|
||||
def __init__(self, in_channels):
|
||||
super().__init__(dim=in_channels, heads=1, dim_head=in_channels)
|
||||
|
||||
|
||||
class AttnBlock(nn.Module):
|
||||
def __init__(self, in_channels):
|
||||
super().__init__()
|
||||
self.in_channels = in_channels
|
||||
|
||||
self.norm = Normalize(in_channels)
|
||||
self.q = torch.nn.Conv2d(
|
||||
in_channels, in_channels, kernel_size=1, stride=1, padding=0
|
||||
)
|
||||
self.k = torch.nn.Conv2d(
|
||||
in_channels, in_channels, kernel_size=1, stride=1, padding=0
|
||||
)
|
||||
self.v = torch.nn.Conv2d(
|
||||
in_channels, in_channels, kernel_size=1, stride=1, padding=0
|
||||
)
|
||||
self.proj_out = torch.nn.Conv2d(
|
||||
in_channels, in_channels, kernel_size=1, stride=1, padding=0
|
||||
)
|
||||
|
||||
def attention(self, h_: torch.Tensor) -> torch.Tensor:
|
||||
h_ = self.norm(h_)
|
||||
q = self.q(h_)
|
||||
k = self.k(h_)
|
||||
v = self.v(h_)
|
||||
|
||||
b, c, h, w = q.shape
|
||||
q, k, v = (
|
||||
rearrange(x, "b c h w -> b 1 (h w) c").contiguous() for x in (q, k, v)
|
||||
)
|
||||
h_ = torch.nn.functional.scaled_dot_product_attention(
|
||||
q, k, v
|
||||
) # scale is dim ** -0.5 per default
|
||||
# compute attention
|
||||
|
||||
return rearrange(h_, "b 1 (h w) c -> b c h w", h=h, w=w, c=c, b=b)
|
||||
|
||||
def forward(self, x, **kwargs):
|
||||
h_ = x
|
||||
h_ = self.attention(h_)
|
||||
h_ = self.proj_out(h_)
|
||||
return x + h_
|
||||
|
||||
|
||||
class MemoryEfficientAttnBlock(nn.Module):
|
||||
"""
|
||||
Uses xformers efficient implementation,
|
||||
see https://github.com/MatthieuTPHR/diffusers/blob/d80b531ff8060ec1ea982b65a1b8df70f73aa67c/src/diffusers/models/attention.py#L223
|
||||
Note: this is a single-head self-attention operation
|
||||
"""
|
||||
|
||||
#
|
||||
def __init__(self, in_channels):
|
||||
super().__init__()
|
||||
self.in_channels = in_channels
|
||||
|
||||
self.norm = Normalize(in_channels)
|
||||
self.q = torch.nn.Conv2d(
|
||||
in_channels, in_channels, kernel_size=1, stride=1, padding=0
|
||||
)
|
||||
self.k = torch.nn.Conv2d(
|
||||
in_channels, in_channels, kernel_size=1, stride=1, padding=0
|
||||
)
|
||||
self.v = torch.nn.Conv2d(
|
||||
in_channels, in_channels, kernel_size=1, stride=1, padding=0
|
||||
)
|
||||
self.proj_out = torch.nn.Conv2d(
|
||||
in_channels, in_channels, kernel_size=1, stride=1, padding=0
|
||||
)
|
||||
self.attention_op: Optional[Any] = None
|
||||
|
||||
def attention(self, h_: torch.Tensor) -> torch.Tensor:
|
||||
h_ = self.norm(h_)
|
||||
q = self.q(h_)
|
||||
k = self.k(h_)
|
||||
v = self.v(h_)
|
||||
|
||||
# compute attention
|
||||
B, C, H, W = q.shape
|
||||
q, k, v = (rearrange(x, "b c h w -> b (h w) c") for x in (q, k, v))
|
||||
|
||||
q, k, v = (
|
||||
t.unsqueeze(3)
|
||||
.reshape(B, t.shape[1], 1, C)
|
||||
.permute(0, 2, 1, 3)
|
||||
.reshape(B * 1, t.shape[1], C)
|
||||
.contiguous()
|
||||
for t in (q, k, v)
|
||||
)
|
||||
out = xformers.ops.memory_efficient_attention(
|
||||
q, k, v, attn_bias=None, op=self.attention_op
|
||||
)
|
||||
|
||||
out = (
|
||||
out.unsqueeze(0)
|
||||
.reshape(B, 1, out.shape[1], C)
|
||||
.permute(0, 2, 1, 3)
|
||||
.reshape(B, out.shape[1], C)
|
||||
)
|
||||
return rearrange(out, "b (h w) c -> b c h w", b=B, h=H, w=W, c=C)
|
||||
|
||||
def forward(self, x, **kwargs):
|
||||
h_ = x
|
||||
h_ = self.attention(h_)
|
||||
h_ = self.proj_out(h_)
|
||||
return x + h_
|
||||
|
||||
|
||||
class MemoryEfficientCrossAttentionWrapper(MemoryEfficientCrossAttention):
|
||||
def forward(self, x, context=None, mask=None, **unused_kwargs):
|
||||
b, c, h, w = x.shape
|
||||
x = rearrange(x, "b c h w -> b (h w) c")
|
||||
out = super().forward(x, context=context, mask=mask)
|
||||
out = rearrange(out, "b (h w) c -> b c h w", h=h, w=w, c=c)
|
||||
return x + out
|
||||
|
||||
|
||||
def make_attn(in_channels, attn_type="vanilla", attn_kwargs=None):
|
||||
assert attn_type in [
|
||||
"vanilla",
|
||||
"vanilla-xformers",
|
||||
"memory-efficient-cross-attn",
|
||||
"linear",
|
||||
"none",
|
||||
], f"attn_type {attn_type} unknown"
|
||||
if not XFORMERS_IS_AVAILABLE and attn_type == "vanilla-xformers":
|
||||
logger.debug(
|
||||
f"Attention mode '{attn_type}' is not available. Falling back to vanilla attention. "
|
||||
f"This is not a problem in Pytorch >= 2.0. FYI, you are running with PyTorch version {torch.__version__}"
|
||||
)
|
||||
attn_type = "vanilla"
|
||||
# if (
|
||||
# version.parse(torch.__version__) < version.parse("2.0.0")
|
||||
# and attn_type != "none"
|
||||
# ):
|
||||
# assert XFORMERS_IS_AVAILABLE, (
|
||||
# f"We do not support vanilla attention in {torch.__version__} anymore, "
|
||||
# f"as it is too expensive. Please install xformers via e.g. 'pip install xformers==0.0.16'"
|
||||
# )
|
||||
# attn_type = "vanilla-xformers"
|
||||
# logger.info(f"making attention of type '{attn_type}' with {in_channels} in_channels")
|
||||
if attn_type == "vanilla":
|
||||
assert attn_kwargs is None
|
||||
return AttnBlock(in_channels)
|
||||
elif attn_type == "vanilla-xformers":
|
||||
# logger.info(
|
||||
# f"building MemoryEfficientAttnBlock with {in_channels} in_channels..."
|
||||
# )
|
||||
return MemoryEfficientAttnBlock(in_channels)
|
||||
elif type == "memory-efficient-cross-attn":
|
||||
attn_kwargs["query_dim"] = in_channels
|
||||
return MemoryEfficientCrossAttentionWrapper(**attn_kwargs)
|
||||
elif attn_type == "none":
|
||||
return nn.Identity(in_channels)
|
||||
else:
|
||||
return LinAttnBlock(in_channels)
|
||||
|
||||
|
||||
class Model(nn.Module):
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
ch,
|
||||
out_ch,
|
||||
ch_mult=(1, 2, 4, 8),
|
||||
num_res_blocks,
|
||||
attn_resolutions,
|
||||
dropout=0.0,
|
||||
resamp_with_conv=True,
|
||||
in_channels,
|
||||
resolution,
|
||||
use_timestep=True,
|
||||
use_linear_attn=False,
|
||||
attn_type="vanilla",
|
||||
):
|
||||
super().__init__()
|
||||
if use_linear_attn:
|
||||
attn_type = "linear"
|
||||
self.ch = ch
|
||||
self.temb_ch = self.ch * 4
|
||||
self.num_resolutions = len(ch_mult)
|
||||
self.num_res_blocks = num_res_blocks
|
||||
self.resolution = resolution
|
||||
self.in_channels = in_channels
|
||||
|
||||
self.use_timestep = use_timestep
|
||||
if self.use_timestep:
|
||||
# timestep embedding
|
||||
self.temb = nn.Module()
|
||||
self.temb.dense = nn.ModuleList(
|
||||
[
|
||||
torch.nn.Linear(self.ch, self.temb_ch),
|
||||
torch.nn.Linear(self.temb_ch, self.temb_ch),
|
||||
]
|
||||
)
|
||||
|
||||
# downsampling
|
||||
self.conv_in = torch.nn.Conv2d(
|
||||
in_channels, self.ch, kernel_size=3, stride=1, padding=1
|
||||
)
|
||||
|
||||
curr_res = resolution
|
||||
in_ch_mult = (1, *tuple(ch_mult))
|
||||
self.down = nn.ModuleList()
|
||||
for i_level in range(self.num_resolutions):
|
||||
block = nn.ModuleList()
|
||||
attn = nn.ModuleList()
|
||||
block_in = ch * in_ch_mult[i_level]
|
||||
block_out = ch * ch_mult[i_level]
|
||||
for i_block in range(self.num_res_blocks):
|
||||
block.append(
|
||||
ResnetBlock(
|
||||
in_channels=block_in,
|
||||
out_channels=block_out,
|
||||
temb_channels=self.temb_ch,
|
||||
dropout=dropout,
|
||||
)
|
||||
)
|
||||
block_in = block_out
|
||||
if curr_res in attn_resolutions:
|
||||
attn.append(make_attn(block_in, attn_type=attn_type))
|
||||
down = nn.Module()
|
||||
down.block = block
|
||||
down.attn = attn
|
||||
if i_level != self.num_resolutions - 1:
|
||||
down.downsample = Downsample(block_in, resamp_with_conv)
|
||||
curr_res = curr_res // 2
|
||||
self.down.append(down)
|
||||
|
||||
# middle
|
||||
self.mid = nn.Module()
|
||||
self.mid.block_1 = ResnetBlock(
|
||||
in_channels=block_in,
|
||||
out_channels=block_in,
|
||||
temb_channels=self.temb_ch,
|
||||
dropout=dropout,
|
||||
)
|
||||
self.mid.attn_1 = make_attn(block_in, attn_type=attn_type)
|
||||
self.mid.block_2 = ResnetBlock(
|
||||
in_channels=block_in,
|
||||
out_channels=block_in,
|
||||
temb_channels=self.temb_ch,
|
||||
dropout=dropout,
|
||||
)
|
||||
|
||||
# upsampling
|
||||
self.up = nn.ModuleList()
|
||||
for i_level in reversed(range(self.num_resolutions)):
|
||||
block = nn.ModuleList()
|
||||
attn = nn.ModuleList()
|
||||
block_out = ch * ch_mult[i_level]
|
||||
skip_in = ch * ch_mult[i_level]
|
||||
for i_block in range(self.num_res_blocks + 1):
|
||||
if i_block == self.num_res_blocks:
|
||||
skip_in = ch * in_ch_mult[i_level]
|
||||
block.append(
|
||||
ResnetBlock(
|
||||
in_channels=block_in + skip_in,
|
||||
out_channels=block_out,
|
||||
temb_channels=self.temb_ch,
|
||||
dropout=dropout,
|
||||
)
|
||||
)
|
||||
block_in = block_out
|
||||
if curr_res in attn_resolutions:
|
||||
attn.append(make_attn(block_in, attn_type=attn_type))
|
||||
up = nn.Module()
|
||||
up.block = block
|
||||
up.attn = attn
|
||||
if i_level != 0:
|
||||
up.upsample = Upsample(block_in, resamp_with_conv)
|
||||
curr_res = curr_res * 2
|
||||
self.up.insert(0, up) # prepend to get consistent order
|
||||
|
||||
# end
|
||||
self.norm_out = Normalize(block_in)
|
||||
self.conv_out = torch.nn.Conv2d(
|
||||
block_in, out_ch, kernel_size=3, stride=1, padding=1
|
||||
)
|
||||
|
||||
def forward(self, x, t=None, context=None):
|
||||
# assert x.shape[2] == x.shape[3] == self.resolution
|
||||
if context is not None:
|
||||
# assume aligned context, cat along channel axis
|
||||
x = torch.cat((x, context), dim=1)
|
||||
if self.use_timestep:
|
||||
# timestep embedding
|
||||
assert t is not None
|
||||
temb = get_timestep_embedding(t, self.ch)
|
||||
temb = self.temb.dense[0](temb)
|
||||
temb = nonlinearity(temb)
|
||||
temb = self.temb.dense[1](temb)
|
||||
else:
|
||||
temb = None
|
||||
|
||||
# downsampling
|
||||
hs = [self.conv_in(x)]
|
||||
for i_level in range(self.num_resolutions):
|
||||
for i_block in range(self.num_res_blocks):
|
||||
h = self.down[i_level].block[i_block](hs[-1], temb)
|
||||
if len(self.down[i_level].attn) > 0:
|
||||
h = self.down[i_level].attn[i_block](h)
|
||||
hs.append(h)
|
||||
if i_level != self.num_resolutions - 1:
|
||||
hs.append(self.down[i_level].downsample(hs[-1]))
|
||||
|
||||
# middle
|
||||
h = hs[-1]
|
||||
h = self.mid.block_1(h, temb)
|
||||
h = self.mid.attn_1(h)
|
||||
h = self.mid.block_2(h, temb)
|
||||
|
||||
# upsampling
|
||||
for i_level in reversed(range(self.num_resolutions)):
|
||||
for i_block in range(self.num_res_blocks + 1):
|
||||
h = self.up[i_level].block[i_block](
|
||||
torch.cat([h, hs.pop()], dim=1), temb
|
||||
)
|
||||
if len(self.up[i_level].attn) > 0:
|
||||
h = self.up[i_level].attn[i_block](h)
|
||||
if i_level != 0:
|
||||
h = self.up[i_level].upsample(h)
|
||||
|
||||
# end
|
||||
h = self.norm_out(h)
|
||||
h = nonlinearity(h)
|
||||
h = self.conv_out(h)
|
||||
return h
|
||||
|
||||
def get_last_layer(self):
|
||||
return self.conv_out.weight
|
||||
|
||||
|
||||
class Encoder(nn.Module):
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
ch,
|
||||
out_ch,
|
||||
ch_mult=(1, 2, 4, 8),
|
||||
num_res_blocks,
|
||||
attn_resolutions,
|
||||
dropout=0.0,
|
||||
resamp_with_conv=True,
|
||||
in_channels,
|
||||
resolution,
|
||||
z_channels,
|
||||
double_z=True,
|
||||
use_linear_attn=False,
|
||||
attn_type="vanilla",
|
||||
**ignore_kwargs,
|
||||
):
|
||||
super().__init__()
|
||||
if use_linear_attn:
|
||||
attn_type = "linear"
|
||||
self.ch = ch
|
||||
self.temb_ch = 0
|
||||
self.num_resolutions = len(ch_mult)
|
||||
self.num_res_blocks = num_res_blocks
|
||||
self.resolution = resolution
|
||||
self.in_channels = in_channels
|
||||
|
||||
# downsampling
|
||||
self.conv_in = torch.nn.Conv2d(
|
||||
in_channels, self.ch, kernel_size=3, stride=1, padding=1
|
||||
)
|
||||
|
||||
curr_res = resolution
|
||||
in_ch_mult = (1, *tuple(ch_mult))
|
||||
self.in_ch_mult = in_ch_mult
|
||||
self.down = nn.ModuleList()
|
||||
for i_level in range(self.num_resolutions):
|
||||
block = nn.ModuleList()
|
||||
attn = nn.ModuleList()
|
||||
block_in = ch * in_ch_mult[i_level]
|
||||
block_out = ch * ch_mult[i_level]
|
||||
for i_block in range(self.num_res_blocks):
|
||||
block.append(
|
||||
ResnetBlock(
|
||||
in_channels=block_in,
|
||||
out_channels=block_out,
|
||||
temb_channels=self.temb_ch,
|
||||
dropout=dropout,
|
||||
)
|
||||
)
|
||||
block_in = block_out
|
||||
if curr_res in attn_resolutions:
|
||||
attn.append(make_attn(block_in, attn_type=attn_type))
|
||||
down = nn.Module()
|
||||
down.block = block
|
||||
down.attn = attn
|
||||
if i_level != self.num_resolutions - 1:
|
||||
down.downsample = Downsample(block_in, resamp_with_conv)
|
||||
curr_res = curr_res // 2
|
||||
self.down.append(down)
|
||||
|
||||
# middle
|
||||
self.mid = nn.Module()
|
||||
self.mid.block_1 = ResnetBlock(
|
||||
in_channels=block_in,
|
||||
out_channels=block_in,
|
||||
temb_channels=self.temb_ch,
|
||||
dropout=dropout,
|
||||
)
|
||||
self.mid.attn_1 = make_attn(block_in, attn_type=attn_type)
|
||||
self.mid.block_2 = ResnetBlock(
|
||||
in_channels=block_in,
|
||||
out_channels=block_in,
|
||||
temb_channels=self.temb_ch,
|
||||
dropout=dropout,
|
||||
)
|
||||
|
||||
# end
|
||||
self.norm_out = Normalize(block_in)
|
||||
self.conv_out = torch.nn.Conv2d(
|
||||
block_in,
|
||||
2 * z_channels if double_z else z_channels,
|
||||
kernel_size=3,
|
||||
stride=1,
|
||||
padding=1,
|
||||
)
|
||||
|
||||
def forward(self, x):
|
||||
# timestep embedding
|
||||
temb = None
|
||||
|
||||
# downsampling
|
||||
hs = [self.conv_in(x)]
|
||||
for i_level in range(self.num_resolutions):
|
||||
for i_block in range(self.num_res_blocks):
|
||||
h = self.down[i_level].block[i_block](hs[-1], temb)
|
||||
if len(self.down[i_level].attn) > 0:
|
||||
h = self.down[i_level].attn[i_block](h)
|
||||
hs.append(h)
|
||||
if i_level != self.num_resolutions - 1:
|
||||
hs.append(self.down[i_level].downsample(hs[-1]))
|
||||
|
||||
# middle
|
||||
h = hs[-1]
|
||||
h = self.mid.block_1(h, temb)
|
||||
h = self.mid.attn_1(h)
|
||||
h = self.mid.block_2(h, temb)
|
||||
|
||||
# end
|
||||
h = self.norm_out(h)
|
||||
h = nonlinearity(h)
|
||||
h = self.conv_out(h)
|
||||
return h
|
||||
|
||||
|
||||
class Decoder(nn.Module):
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
ch,
|
||||
out_ch,
|
||||
ch_mult=(1, 2, 4, 8),
|
||||
num_res_blocks,
|
||||
attn_resolutions,
|
||||
dropout=0.0,
|
||||
resamp_with_conv=True,
|
||||
in_channels,
|
||||
resolution,
|
||||
z_channels,
|
||||
give_pre_end=False,
|
||||
tanh_out=False,
|
||||
use_linear_attn=False,
|
||||
attn_type="vanilla",
|
||||
**ignorekwargs,
|
||||
):
|
||||
super().__init__()
|
||||
if use_linear_attn:
|
||||
attn_type = "linear"
|
||||
self.ch = ch
|
||||
self.temb_ch = 0
|
||||
self.num_resolutions = len(ch_mult)
|
||||
self.num_res_blocks = num_res_blocks
|
||||
self.resolution = resolution
|
||||
self.in_channels = in_channels
|
||||
self.give_pre_end = give_pre_end
|
||||
self.tanh_out = tanh_out
|
||||
|
||||
# compute in_ch_mult, block_in and curr_res at lowest res
|
||||
(1, *tuple(ch_mult))
|
||||
block_in = ch * ch_mult[self.num_resolutions - 1]
|
||||
curr_res = resolution // 2 ** (self.num_resolutions - 1)
|
||||
self.z_shape = (1, z_channels, curr_res, curr_res)
|
||||
# logger.debug(
|
||||
# "Working with z of shape {} = {} dimensions.".format(
|
||||
# self.z_shape, np.prod(self.z_shape)
|
||||
# )
|
||||
# )
|
||||
|
||||
make_attn_cls = self._make_attn()
|
||||
make_resblock_cls = self._make_resblock()
|
||||
make_conv_cls = self._make_conv()
|
||||
# z to block_in
|
||||
self.conv_in = torch.nn.Conv2d(
|
||||
z_channels, block_in, kernel_size=3, stride=1, padding=1
|
||||
)
|
||||
|
||||
# middle
|
||||
self.mid = nn.Module()
|
||||
self.mid.block_1 = make_resblock_cls(
|
||||
in_channels=block_in,
|
||||
out_channels=block_in,
|
||||
temb_channels=self.temb_ch,
|
||||
dropout=dropout,
|
||||
)
|
||||
self.mid.attn_1 = make_attn_cls(block_in, attn_type=attn_type)
|
||||
self.mid.block_2 = make_resblock_cls(
|
||||
in_channels=block_in,
|
||||
out_channels=block_in,
|
||||
temb_channels=self.temb_ch,
|
||||
dropout=dropout,
|
||||
)
|
||||
|
||||
# upsampling
|
||||
self.up = nn.ModuleList()
|
||||
for i_level in reversed(range(self.num_resolutions)):
|
||||
block = nn.ModuleList()
|
||||
attn = nn.ModuleList()
|
||||
block_out = ch * ch_mult[i_level]
|
||||
for i_block in range(self.num_res_blocks + 1):
|
||||
block.append(
|
||||
make_resblock_cls(
|
||||
in_channels=block_in,
|
||||
out_channels=block_out,
|
||||
temb_channels=self.temb_ch,
|
||||
dropout=dropout,
|
||||
)
|
||||
)
|
||||
block_in = block_out
|
||||
if curr_res in attn_resolutions:
|
||||
attn.append(make_attn_cls(block_in, attn_type=attn_type))
|
||||
up = nn.Module()
|
||||
up.block = block
|
||||
up.attn = attn
|
||||
if i_level != 0:
|
||||
up.upsample = Upsample(block_in, resamp_with_conv)
|
||||
curr_res = curr_res * 2
|
||||
self.up.insert(0, up) # prepend to get consistent order
|
||||
|
||||
# end
|
||||
self.norm_out = Normalize(block_in)
|
||||
self.conv_out = make_conv_cls(
|
||||
block_in, out_ch, kernel_size=3, stride=1, padding=1
|
||||
)
|
||||
|
||||
def _make_attn(self) -> Callable:
|
||||
return make_attn
|
||||
|
||||
def _make_resblock(self) -> Callable:
|
||||
return ResnetBlock
|
||||
|
||||
def _make_conv(self) -> Callable:
|
||||
return torch.nn.Conv2d
|
||||
|
||||
def get_last_layer(self, **kwargs):
|
||||
return self.conv_out.weight
|
||||
|
||||
def forward(self, z, **kwargs):
|
||||
# assert z.shape[1:] == self.z_shape[1:]
|
||||
self.last_z_shape = z.shape
|
||||
|
||||
# timestep embedding
|
||||
temb = None
|
||||
|
||||
# z to block_in
|
||||
h = self.conv_in(z)
|
||||
|
||||
# middle
|
||||
h = self.mid.block_1(h, temb, **kwargs)
|
||||
h = self.mid.attn_1(h, **kwargs)
|
||||
h = self.mid.block_2(h, temb, **kwargs)
|
||||
|
||||
# upsampling
|
||||
for i_level in reversed(range(self.num_resolutions)):
|
||||
for i_block in range(self.num_res_blocks + 1):
|
||||
h = self.up[i_level].block[i_block](h, temb, **kwargs)
|
||||
if len(self.up[i_level].attn) > 0:
|
||||
h = self.up[i_level].attn[i_block](h, **kwargs)
|
||||
if i_level != 0:
|
||||
h = self.up[i_level].upsample(h)
|
||||
|
||||
# end
|
||||
if self.give_pre_end:
|
||||
return h
|
||||
|
||||
h = self.norm_out(h)
|
||||
h = nonlinearity(h)
|
||||
h = self.conv_out(h, **kwargs)
|
||||
if self.tanh_out:
|
||||
h = torch.tanh(h)
|
||||
return h
|
857
imaginairy/modules/sgm/diffusionmodules/openaimodel.py
Normal file
857
imaginairy/modules/sgm/diffusionmodules/openaimodel.py
Normal file
@ -0,0 +1,857 @@
|
||||
import logging
|
||||
import math
|
||||
from abc import abstractmethod
|
||||
from typing import Iterable, List, Optional, Tuple, Union
|
||||
|
||||
import torch as th
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
from einops import rearrange
|
||||
from torch.utils.checkpoint import checkpoint
|
||||
|
||||
from imaginairy.modules.attention import SpatialTransformer
|
||||
from imaginairy.modules.sgm.diffusionmodules.util import (
|
||||
avg_pool_nd,
|
||||
conv_nd,
|
||||
linear,
|
||||
normalization,
|
||||
timestep_embedding,
|
||||
zero_module,
|
||||
)
|
||||
from imaginairy.modules.sgm.video_attention import SpatialVideoTransformer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def exists(val):
|
||||
return val is not None
|
||||
|
||||
|
||||
class AttentionPool2d(nn.Module):
|
||||
"""
|
||||
Adapted from CLIP: https://github.com/openai/CLIP/blob/main/clip/model.py
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
spacial_dim: int,
|
||||
embed_dim: int,
|
||||
num_heads_channels: int,
|
||||
output_dim: Optional[int] = None,
|
||||
):
|
||||
super().__init__()
|
||||
self.positional_embedding = nn.Parameter(
|
||||
th.randn(embed_dim, spacial_dim**2 + 1) / embed_dim**0.5
|
||||
)
|
||||
self.qkv_proj = conv_nd(1, embed_dim, 3 * embed_dim, 1)
|
||||
self.c_proj = conv_nd(1, embed_dim, output_dim or embed_dim, 1)
|
||||
self.num_heads = embed_dim // num_heads_channels
|
||||
self.attention = QKVAttention(self.num_heads)
|
||||
|
||||
def forward(self, x: th.Tensor) -> th.Tensor:
|
||||
b, c, _ = x.shape
|
||||
x = x.reshape(b, c, -1)
|
||||
x = th.cat([x.mean(dim=-1, keepdim=True), x], dim=-1)
|
||||
x = x + self.positional_embedding[None, :, :].to(x.dtype)
|
||||
x = self.qkv_proj(x)
|
||||
x = self.attention(x)
|
||||
x = self.c_proj(x)
|
||||
return x[:, :, 0]
|
||||
|
||||
|
||||
class TimestepBlock(nn.Module):
|
||||
"""
|
||||
Any module where forward() takes timestep embeddings as a second argument.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def forward(self, x: th.Tensor, emb: th.Tensor):
|
||||
"""
|
||||
Apply the module to `x` given `emb` timestep embeddings.
|
||||
"""
|
||||
|
||||
|
||||
class TimestepEmbedSequential(nn.Sequential, TimestepBlock):
|
||||
"""
|
||||
A sequential module that passes timestep embeddings to the children that
|
||||
support it as an extra input.
|
||||
"""
|
||||
|
||||
def forward(
|
||||
self,
|
||||
x: th.Tensor,
|
||||
emb: th.Tensor,
|
||||
context: Optional[th.Tensor] = None,
|
||||
image_only_indicator: Optional[th.Tensor] = None,
|
||||
time_context: Optional[int] = None,
|
||||
num_video_frames: Optional[int] = None,
|
||||
):
|
||||
from imaginairy.modules.sgm.diffusionmodules.video_model import VideoResBlock
|
||||
|
||||
for layer in self:
|
||||
module = layer
|
||||
|
||||
if isinstance(module, TimestepBlock) and not isinstance(
|
||||
module, VideoResBlock
|
||||
):
|
||||
x = layer(x, emb)
|
||||
elif isinstance(module, VideoResBlock):
|
||||
x = layer(x, emb, num_video_frames, image_only_indicator)
|
||||
elif isinstance(module, SpatialVideoTransformer):
|
||||
x = layer(
|
||||
x,
|
||||
context,
|
||||
time_context,
|
||||
num_video_frames,
|
||||
image_only_indicator,
|
||||
)
|
||||
elif isinstance(module, SpatialTransformer):
|
||||
x = layer(x, context)
|
||||
else:
|
||||
x = layer(x)
|
||||
return x
|
||||
|
||||
|
||||
class Upsample(nn.Module):
|
||||
"""
|
||||
An upsampling layer with an optional convolution.
|
||||
:param channels: channels in the inputs and outputs.
|
||||
:param use_conv: a bool determining if a convolution is applied.
|
||||
:param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
|
||||
upsampling occurs in the inner-two dimensions.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
channels: int,
|
||||
use_conv: bool,
|
||||
dims: int = 2,
|
||||
out_channels: Optional[int] = None,
|
||||
padding: int = 1,
|
||||
third_up: bool = False,
|
||||
kernel_size: int = 3,
|
||||
scale_factor: int = 2,
|
||||
):
|
||||
super().__init__()
|
||||
self.channels = channels
|
||||
self.out_channels = out_channels or channels
|
||||
self.use_conv = use_conv
|
||||
self.dims = dims
|
||||
self.third_up = third_up
|
||||
self.scale_factor = scale_factor
|
||||
if use_conv:
|
||||
self.conv = conv_nd(
|
||||
dims, self.channels, self.out_channels, kernel_size, padding=padding
|
||||
)
|
||||
|
||||
def forward(self, x: th.Tensor) -> th.Tensor:
|
||||
assert x.shape[1] == self.channels
|
||||
|
||||
if self.dims == 3:
|
||||
t_factor = 1 if not self.third_up else self.scale_factor
|
||||
x = F.interpolate(
|
||||
x,
|
||||
(
|
||||
t_factor * x.shape[2],
|
||||
x.shape[3] * self.scale_factor,
|
||||
x.shape[4] * self.scale_factor,
|
||||
),
|
||||
mode="nearest",
|
||||
)
|
||||
else:
|
||||
x = F.interpolate(x, scale_factor=self.scale_factor, mode="nearest")
|
||||
if self.use_conv:
|
||||
x = self.conv(x)
|
||||
return x
|
||||
|
||||
|
||||
class Downsample(nn.Module):
|
||||
"""
|
||||
A downsampling layer with an optional convolution.
|
||||
:param channels: channels in the inputs and outputs.
|
||||
:param use_conv: a bool determining if a convolution is applied.
|
||||
:param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
|
||||
downsampling occurs in the inner-two dimensions.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
channels: int,
|
||||
use_conv: bool,
|
||||
dims: int = 2,
|
||||
out_channels: Optional[int] = None,
|
||||
padding: int = 1,
|
||||
third_down: bool = False,
|
||||
):
|
||||
super().__init__()
|
||||
self.channels = channels
|
||||
self.out_channels = out_channels or channels
|
||||
self.use_conv = use_conv
|
||||
self.dims = dims
|
||||
stride = 2 if dims != 3 else ((1, 2, 2) if not third_down else (2, 2, 2))
|
||||
if use_conv:
|
||||
# logger.info(f"Building a Downsample layer with {dims} dims.")
|
||||
# logger.info(
|
||||
# f" --> settings are: \n in-chn: {self.channels}, out-chn: {self.out_channels}, "
|
||||
# f"kernel-size: 3, stride: {stride}, padding: {padding}"
|
||||
# )
|
||||
# if dims == 3:
|
||||
# logger.info(f" --> Downsampling third axis (time): {third_down}")
|
||||
self.op = conv_nd(
|
||||
dims,
|
||||
self.channels,
|
||||
self.out_channels,
|
||||
3,
|
||||
stride=stride,
|
||||
padding=padding,
|
||||
)
|
||||
else:
|
||||
assert self.channels == self.out_channels
|
||||
self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride)
|
||||
|
||||
def forward(self, x: th.Tensor) -> th.Tensor:
|
||||
assert x.shape[1] == self.channels
|
||||
|
||||
return self.op(x)
|
||||
|
||||
|
||||
class ResBlock(TimestepBlock):
|
||||
"""
|
||||
A residual block that can optionally change the number of channels.
|
||||
:param channels: the number of input channels.
|
||||
:param emb_channels: the number of timestep embedding channels.
|
||||
:param dropout: the rate of dropout.
|
||||
:param out_channels: if specified, the number of out channels.
|
||||
:param use_conv: if True and out_channels is specified, use a spatial
|
||||
convolution instead of a smaller 1x1 convolution to change the
|
||||
channels in the skip connection.
|
||||
:param dims: determines if the signal is 1D, 2D, or 3D.
|
||||
:param use_checkpoint: if True, use gradient checkpointing on this module.
|
||||
:param up: if True, use this block for upsampling.
|
||||
:param down: if True, use this block for downsampling.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
channels: int,
|
||||
emb_channels: int,
|
||||
dropout: float,
|
||||
out_channels: Optional[int] = None,
|
||||
use_conv: bool = False,
|
||||
use_scale_shift_norm: bool = False,
|
||||
dims: int = 2,
|
||||
use_checkpoint: bool = False,
|
||||
up: bool = False,
|
||||
down: bool = False,
|
||||
kernel_size: int = 3,
|
||||
exchange_temb_dims: bool = False,
|
||||
skip_t_emb: bool = False,
|
||||
):
|
||||
super().__init__()
|
||||
self.channels = channels
|
||||
self.emb_channels = emb_channels
|
||||
self.dropout = dropout
|
||||
self.out_channels = out_channels or channels
|
||||
self.use_conv = use_conv
|
||||
self.use_checkpoint = use_checkpoint
|
||||
self.use_scale_shift_norm = use_scale_shift_norm
|
||||
self.exchange_temb_dims = exchange_temb_dims
|
||||
|
||||
if isinstance(kernel_size, Iterable):
|
||||
padding = [k // 2 for k in kernel_size]
|
||||
else:
|
||||
padding = kernel_size // 2
|
||||
|
||||
self.in_layers = nn.Sequential(
|
||||
normalization(channels),
|
||||
nn.SiLU(),
|
||||
conv_nd(dims, channels, self.out_channels, kernel_size, padding=padding),
|
||||
)
|
||||
|
||||
self.updown = up or down
|
||||
|
||||
if up:
|
||||
self.h_upd = Upsample(channels, False, dims)
|
||||
self.x_upd = Upsample(channels, False, dims)
|
||||
elif down:
|
||||
self.h_upd = Downsample(channels, False, dims)
|
||||
self.x_upd = Downsample(channels, False, dims)
|
||||
else:
|
||||
self.h_upd = self.x_upd = nn.Identity()
|
||||
|
||||
self.skip_t_emb = skip_t_emb
|
||||
self.emb_out_channels = (
|
||||
2 * self.out_channels if use_scale_shift_norm else self.out_channels
|
||||
)
|
||||
if self.skip_t_emb:
|
||||
# logger.info(f"Skipping timestep embedding in {self.__class__.__name__}")
|
||||
assert not self.use_scale_shift_norm
|
||||
self.emb_layers = None
|
||||
self.exchange_temb_dims = False
|
||||
else:
|
||||
self.emb_layers = nn.Sequential(
|
||||
nn.SiLU(),
|
||||
linear(
|
||||
emb_channels,
|
||||
self.emb_out_channels,
|
||||
),
|
||||
)
|
||||
|
||||
self.out_layers = nn.Sequential(
|
||||
normalization(self.out_channels),
|
||||
nn.SiLU(),
|
||||
nn.Dropout(p=dropout),
|
||||
zero_module(
|
||||
conv_nd(
|
||||
dims,
|
||||
self.out_channels,
|
||||
self.out_channels,
|
||||
kernel_size,
|
||||
padding=padding,
|
||||
)
|
||||
),
|
||||
)
|
||||
|
||||
if self.out_channels == channels:
|
||||
self.skip_connection = nn.Identity()
|
||||
elif use_conv:
|
||||
self.skip_connection = conv_nd(
|
||||
dims, channels, self.out_channels, kernel_size, padding=padding
|
||||
)
|
||||
else:
|
||||
self.skip_connection = conv_nd(dims, channels, self.out_channels, 1)
|
||||
|
||||
def forward(self, x: th.Tensor, emb: th.Tensor) -> th.Tensor:
|
||||
"""
|
||||
Apply the block to a Tensor, conditioned on a timestep embedding.
|
||||
:param x: an [N x C x ...] Tensor of features.
|
||||
:param emb: an [N x emb_channels] Tensor of timestep embeddings.
|
||||
:return: an [N x C x ...] Tensor of outputs.
|
||||
"""
|
||||
if self.use_checkpoint:
|
||||
return checkpoint(self._forward, x, emb)
|
||||
else:
|
||||
return self._forward(x, emb)
|
||||
|
||||
def _forward(self, x: th.Tensor, emb: th.Tensor) -> th.Tensor:
|
||||
if self.updown:
|
||||
in_rest, in_conv = self.in_layers[:-1], self.in_layers[-1]
|
||||
h = in_rest(x)
|
||||
h = self.h_upd(h)
|
||||
x = self.x_upd(x)
|
||||
h = in_conv(h)
|
||||
else:
|
||||
h = self.in_layers(x)
|
||||
|
||||
if self.skip_t_emb:
|
||||
emb_out = th.zeros_like(h)
|
||||
else:
|
||||
emb_out = self.emb_layers(emb).type(h.dtype)
|
||||
while len(emb_out.shape) < len(h.shape):
|
||||
emb_out = emb_out[..., None]
|
||||
if self.use_scale_shift_norm:
|
||||
out_norm, out_rest = self.out_layers[0], self.out_layers[1:]
|
||||
scale, shift = th.chunk(emb_out, 2, dim=1)
|
||||
h = out_norm(h) * (1 + scale) + shift
|
||||
h = out_rest(h)
|
||||
else:
|
||||
if self.exchange_temb_dims:
|
||||
emb_out = rearrange(emb_out, "b t c ... -> b c t ...")
|
||||
h = h + emb_out
|
||||
h = self.out_layers(h)
|
||||
return self.skip_connection(x) + h
|
||||
|
||||
|
||||
class AttentionBlock(nn.Module):
|
||||
"""
|
||||
An attention block that allows spatial positions to attend to each other.
|
||||
Originally ported from here, but adapted to the N-d case.
|
||||
https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/models/unet.py#L66.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
channels: int,
|
||||
num_heads: int = 1,
|
||||
num_head_channels: int = -1,
|
||||
use_checkpoint: bool = False,
|
||||
use_new_attention_order: bool = False,
|
||||
):
|
||||
super().__init__()
|
||||
self.channels = channels
|
||||
if num_head_channels == -1:
|
||||
self.num_heads = num_heads
|
||||
else:
|
||||
assert (
|
||||
channels % num_head_channels == 0
|
||||
), f"q,k,v channels {channels} is not divisible by num_head_channels {num_head_channels}"
|
||||
self.num_heads = channels // num_head_channels
|
||||
self.use_checkpoint = use_checkpoint
|
||||
self.norm = normalization(channels)
|
||||
self.qkv = conv_nd(1, channels, channels * 3, 1)
|
||||
if use_new_attention_order:
|
||||
# split qkv before split heads
|
||||
self.attention = QKVAttention(self.num_heads)
|
||||
else:
|
||||
# split heads before split qkv
|
||||
self.attention = QKVAttentionLegacy(self.num_heads)
|
||||
|
||||
self.proj_out = zero_module(conv_nd(1, channels, channels, 1))
|
||||
|
||||
def forward(self, x: th.Tensor, **kwargs) -> th.Tensor:
|
||||
return checkpoint(self._forward, x)
|
||||
|
||||
def _forward(self, x: th.Tensor) -> th.Tensor:
|
||||
b, c, *spatial = x.shape
|
||||
x = x.reshape(b, c, -1)
|
||||
qkv = self.qkv(self.norm(x))
|
||||
h = self.attention(qkv)
|
||||
h = self.proj_out(h)
|
||||
return (x + h).reshape(b, c, *spatial)
|
||||
|
||||
|
||||
class QKVAttentionLegacy(nn.Module):
|
||||
"""
|
||||
A module which performs QKV attention. Matches legacy QKVAttention + input/ouput heads shaping
|
||||
"""
|
||||
|
||||
def __init__(self, n_heads: int):
|
||||
super().__init__()
|
||||
self.n_heads = n_heads
|
||||
|
||||
def forward(self, qkv: th.Tensor) -> th.Tensor:
|
||||
"""
|
||||
Apply QKV attention.
|
||||
:param qkv: an [N x (H * 3 * C) x T] tensor of Qs, Ks, and Vs.
|
||||
:return: an [N x (H * C) x T] tensor after attention.
|
||||
"""
|
||||
bs, width, length = qkv.shape
|
||||
assert width % (3 * self.n_heads) == 0
|
||||
ch = width // (3 * self.n_heads)
|
||||
q, k, v = qkv.reshape(bs * self.n_heads, ch * 3, length).split(ch, dim=1)
|
||||
scale = 1 / math.sqrt(math.sqrt(ch))
|
||||
weight = th.einsum(
|
||||
"bct,bcs->bts", q * scale, k * scale
|
||||
) # More stable with f16 than dividing afterwards
|
||||
weight = th.softmax(weight.float(), dim=-1).type(weight.dtype)
|
||||
a = th.einsum("bts,bcs->bct", weight, v)
|
||||
return a.reshape(bs, -1, length)
|
||||
|
||||
|
||||
class QKVAttention(nn.Module):
|
||||
"""
|
||||
A module which performs QKV attention and splits in a different order.
|
||||
"""
|
||||
|
||||
def __init__(self, n_heads: int):
|
||||
super().__init__()
|
||||
self.n_heads = n_heads
|
||||
|
||||
def forward(self, qkv: th.Tensor) -> th.Tensor:
|
||||
"""
|
||||
Apply QKV attention.
|
||||
:param qkv: an [N x (3 * H * C) x T] tensor of Qs, Ks, and Vs.
|
||||
:return: an [N x (H * C) x T] tensor after attention.
|
||||
"""
|
||||
bs, width, length = qkv.shape
|
||||
assert width % (3 * self.n_heads) == 0
|
||||
ch = width // (3 * self.n_heads)
|
||||
q, k, v = qkv.chunk(3, dim=1)
|
||||
scale = 1 / math.sqrt(math.sqrt(ch))
|
||||
weight = th.einsum(
|
||||
"bct,bcs->bts",
|
||||
(q * scale).view(bs * self.n_heads, ch, length),
|
||||
(k * scale).view(bs * self.n_heads, ch, length),
|
||||
) # More stable with f16 than dividing afterwards
|
||||
weight = th.softmax(weight.float(), dim=-1).type(weight.dtype)
|
||||
a = th.einsum("bts,bcs->bct", weight, v.reshape(bs * self.n_heads, ch, length))
|
||||
return a.reshape(bs, -1, length)
|
||||
|
||||
|
||||
class Timestep(nn.Module):
|
||||
def __init__(self, dim: int):
|
||||
super().__init__()
|
||||
self.dim = dim
|
||||
|
||||
def forward(self, t: th.Tensor) -> th.Tensor:
|
||||
return timestep_embedding(t, self.dim)
|
||||
|
||||
|
||||
class UNetModel(nn.Module):
|
||||
"""
|
||||
The full UNet model with attention and timestep embedding.
|
||||
:param in_channels: channels in the input Tensor.
|
||||
:param model_channels: base channel count for the model.
|
||||
:param out_channels: channels in the output Tensor.
|
||||
:param num_res_blocks: number of residual blocks per downsample.
|
||||
:param attention_resolutions: a collection of downsample rates at which
|
||||
attention will take place. May be a set, list, or tuple.
|
||||
For example, if this contains 4, then at 4x downsampling, attention
|
||||
will be used.
|
||||
:param dropout: the dropout probability.
|
||||
:param channel_mult: channel multiplier for each level of the UNet.
|
||||
:param conv_resample: if True, use learned convolutions for upsampling and
|
||||
downsampling.
|
||||
:param dims: determines if the signal is 1D, 2D, or 3D.
|
||||
:param num_classes: if specified (as an int), then this model will be
|
||||
class-conditional with `num_classes` classes.
|
||||
:param use_checkpoint: use gradient checkpointing to reduce memory usage.
|
||||
:param num_heads: the number of attention heads in each attention layer.
|
||||
:param num_heads_channels: if specified, ignore num_heads and instead use
|
||||
a fixed channel width per attention head.
|
||||
:param num_heads_upsample: works with num_heads to set a different number
|
||||
of heads for upsampling. Deprecated.
|
||||
:param use_scale_shift_norm: use a FiLM-like conditioning mechanism.
|
||||
:param resblock_updown: use residual blocks for up/downsampling.
|
||||
:param use_new_attention_order: use a different attention pattern for potentially
|
||||
increased efficiency.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
in_channels: int,
|
||||
model_channels: int,
|
||||
out_channels: int,
|
||||
num_res_blocks: int,
|
||||
attention_resolutions: int,
|
||||
dropout: float = 0.0,
|
||||
channel_mult: Union[List, Tuple] = (1, 2, 4, 8),
|
||||
conv_resample: bool = True,
|
||||
dims: int = 2,
|
||||
num_classes: Optional[Union[int, str]] = None,
|
||||
use_checkpoint: bool = False,
|
||||
num_heads: int = -1,
|
||||
num_head_channels: int = -1,
|
||||
num_heads_upsample: int = -1,
|
||||
use_scale_shift_norm: bool = False,
|
||||
resblock_updown: bool = False,
|
||||
transformer_depth: int = 1,
|
||||
context_dim: Optional[int] = None,
|
||||
disable_self_attentions: Optional[List[bool]] = None,
|
||||
num_attention_blocks: Optional[List[int]] = None,
|
||||
disable_middle_self_attn: bool = False,
|
||||
disable_middle_transformer: bool = False,
|
||||
use_linear_in_transformer: bool = False,
|
||||
spatial_transformer_attn_type: str = "softmax",
|
||||
adm_in_channels: Optional[int] = None,
|
||||
):
|
||||
super().__init__()
|
||||
|
||||
if num_heads_upsample == -1:
|
||||
num_heads_upsample = num_heads
|
||||
|
||||
if num_heads == -1:
|
||||
assert (
|
||||
num_head_channels != -1
|
||||
), "Either num_heads or num_head_channels has to be set"
|
||||
|
||||
if num_head_channels == -1:
|
||||
assert (
|
||||
num_heads != -1
|
||||
), "Either num_heads or num_head_channels has to be set"
|
||||
|
||||
self.in_channels = in_channels
|
||||
self.model_channels = model_channels
|
||||
self.out_channels = out_channels
|
||||
if isinstance(transformer_depth, int):
|
||||
transformer_depth = len(channel_mult) * [transformer_depth]
|
||||
transformer_depth_middle = transformer_depth[-1]
|
||||
|
||||
if isinstance(num_res_blocks, int):
|
||||
self.num_res_blocks = len(channel_mult) * [num_res_blocks]
|
||||
else:
|
||||
if len(num_res_blocks) != len(channel_mult):
|
||||
msg = "provide num_res_blocks either as an int (globally constant) or as a list/tuple (per-level) with the same length as channel_mult"
|
||||
raise ValueError(msg)
|
||||
self.num_res_blocks = num_res_blocks
|
||||
|
||||
if disable_self_attentions is not None:
|
||||
assert len(disable_self_attentions) == len(channel_mult)
|
||||
if num_attention_blocks is not None:
|
||||
assert len(num_attention_blocks) == len(self.num_res_blocks)
|
||||
assert all(
|
||||
self.num_res_blocks[i] >= num_attention_blocks[i]
|
||||
for i in range(len(num_attention_blocks))
|
||||
)
|
||||
# logger.info(
|
||||
# f"Constructor of UNetModel received num_attention_blocks={num_attention_blocks}. "
|
||||
# f"This option has LESS priority than attention_resolutions {attention_resolutions}, "
|
||||
# f"i.e., in cases where num_attention_blocks[i] > 0 but 2**i not in attention_resolutions, "
|
||||
# f"attention will still not be set."
|
||||
# )
|
||||
|
||||
self.attention_resolutions = attention_resolutions
|
||||
self.dropout = dropout
|
||||
self.channel_mult = channel_mult
|
||||
self.conv_resample = conv_resample
|
||||
self.num_classes = num_classes
|
||||
self.use_checkpoint = use_checkpoint
|
||||
self.num_heads = num_heads
|
||||
self.num_head_channels = num_head_channels
|
||||
self.num_heads_upsample = num_heads_upsample
|
||||
|
||||
time_embed_dim = model_channels * 4
|
||||
self.time_embed = nn.Sequential(
|
||||
linear(model_channels, time_embed_dim),
|
||||
nn.SiLU(),
|
||||
linear(time_embed_dim, time_embed_dim),
|
||||
)
|
||||
|
||||
if self.num_classes is not None:
|
||||
if isinstance(self.num_classes, int):
|
||||
self.label_emb = nn.Embedding(num_classes, time_embed_dim)
|
||||
elif self.num_classes == "continuous":
|
||||
# logger.debug("setting up linear c_adm embedding layer")
|
||||
self.label_emb = nn.Linear(1, time_embed_dim)
|
||||
elif self.num_classes == "timestep":
|
||||
self.label_emb = nn.Sequential(
|
||||
Timestep(model_channels),
|
||||
nn.Sequential(
|
||||
linear(model_channels, time_embed_dim),
|
||||
nn.SiLU(),
|
||||
linear(time_embed_dim, time_embed_dim),
|
||||
),
|
||||
)
|
||||
elif self.num_classes == "sequential":
|
||||
assert adm_in_channels is not None
|
||||
self.label_emb = nn.Sequential(
|
||||
nn.Sequential(
|
||||
linear(adm_in_channels, time_embed_dim),
|
||||
nn.SiLU(),
|
||||
linear(time_embed_dim, time_embed_dim),
|
||||
)
|
||||
)
|
||||
else:
|
||||
raise ValueError
|
||||
|
||||
self.input_blocks = nn.ModuleList(
|
||||
[
|
||||
TimestepEmbedSequential(
|
||||
conv_nd(dims, in_channels, model_channels, 3, padding=1)
|
||||
)
|
||||
]
|
||||
)
|
||||
self._feature_size = model_channels
|
||||
input_block_chans = [model_channels]
|
||||
ch = model_channels
|
||||
ds = 1
|
||||
for level, mult in enumerate(channel_mult):
|
||||
for nr in range(self.num_res_blocks[level]):
|
||||
layers = [
|
||||
ResBlock(
|
||||
ch,
|
||||
time_embed_dim,
|
||||
dropout,
|
||||
out_channels=mult * model_channels,
|
||||
dims=dims,
|
||||
use_checkpoint=use_checkpoint,
|
||||
use_scale_shift_norm=use_scale_shift_norm,
|
||||
)
|
||||
]
|
||||
ch = mult * model_channels
|
||||
if ds in attention_resolutions:
|
||||
if num_head_channels == -1:
|
||||
dim_head = ch // num_heads
|
||||
else:
|
||||
num_heads = ch // num_head_channels
|
||||
dim_head = num_head_channels
|
||||
|
||||
if context_dim is not None and exists(disable_self_attentions):
|
||||
disabled_sa = disable_self_attentions[level]
|
||||
else:
|
||||
disabled_sa = False
|
||||
|
||||
if (
|
||||
not exists(num_attention_blocks)
|
||||
or nr < num_attention_blocks[level]
|
||||
):
|
||||
layers.append(
|
||||
SpatialTransformer(
|
||||
ch,
|
||||
num_heads,
|
||||
dim_head,
|
||||
depth=transformer_depth[level],
|
||||
context_dim=context_dim,
|
||||
disable_self_attn=disabled_sa,
|
||||
use_linear=use_linear_in_transformer,
|
||||
attn_type=spatial_transformer_attn_type,
|
||||
use_checkpoint=use_checkpoint,
|
||||
)
|
||||
)
|
||||
self.input_blocks.append(TimestepEmbedSequential(*layers))
|
||||
self._feature_size += ch
|
||||
input_block_chans.append(ch)
|
||||
if level != len(channel_mult) - 1:
|
||||
out_ch = ch
|
||||
self.input_blocks.append(
|
||||
TimestepEmbedSequential(
|
||||
ResBlock(
|
||||
ch,
|
||||
time_embed_dim,
|
||||
dropout,
|
||||
out_channels=out_ch,
|
||||
dims=dims,
|
||||
use_checkpoint=use_checkpoint,
|
||||
use_scale_shift_norm=use_scale_shift_norm,
|
||||
down=True,
|
||||
)
|
||||
if resblock_updown
|
||||
else Downsample(
|
||||
ch, conv_resample, dims=dims, out_channels=out_ch
|
||||
)
|
||||
)
|
||||
)
|
||||
ch = out_ch
|
||||
input_block_chans.append(ch)
|
||||
ds *= 2
|
||||
self._feature_size += ch
|
||||
|
||||
if num_head_channels == -1:
|
||||
dim_head = ch // num_heads
|
||||
else:
|
||||
num_heads = ch // num_head_channels
|
||||
dim_head = num_head_channels
|
||||
|
||||
self.middle_block = TimestepEmbedSequential(
|
||||
ResBlock(
|
||||
ch,
|
||||
time_embed_dim,
|
||||
dropout,
|
||||
out_channels=ch,
|
||||
dims=dims,
|
||||
use_checkpoint=use_checkpoint,
|
||||
use_scale_shift_norm=use_scale_shift_norm,
|
||||
),
|
||||
SpatialTransformer(
|
||||
ch,
|
||||
num_heads,
|
||||
dim_head,
|
||||
depth=transformer_depth_middle,
|
||||
context_dim=context_dim,
|
||||
disable_self_attn=disable_middle_self_attn,
|
||||
use_linear=use_linear_in_transformer,
|
||||
attn_type=spatial_transformer_attn_type,
|
||||
use_checkpoint=use_checkpoint,
|
||||
)
|
||||
if not disable_middle_transformer
|
||||
else th.nn.Identity(),
|
||||
ResBlock(
|
||||
ch,
|
||||
time_embed_dim,
|
||||
dropout,
|
||||
dims=dims,
|
||||
use_checkpoint=use_checkpoint,
|
||||
use_scale_shift_norm=use_scale_shift_norm,
|
||||
),
|
||||
)
|
||||
self._feature_size += ch
|
||||
|
||||
self.output_blocks = nn.ModuleList([])
|
||||
for level, mult in list(enumerate(channel_mult))[::-1]:
|
||||
for i in range(self.num_res_blocks[level] + 1):
|
||||
ich = input_block_chans.pop()
|
||||
layers = [
|
||||
ResBlock(
|
||||
ch + ich,
|
||||
time_embed_dim,
|
||||
dropout,
|
||||
out_channels=model_channels * mult,
|
||||
dims=dims,
|
||||
use_checkpoint=use_checkpoint,
|
||||
use_scale_shift_norm=use_scale_shift_norm,
|
||||
)
|
||||
]
|
||||
ch = model_channels * mult
|
||||
if ds in attention_resolutions:
|
||||
if num_head_channels == -1:
|
||||
dim_head = ch // num_heads
|
||||
else:
|
||||
num_heads = ch // num_head_channels
|
||||
dim_head = num_head_channels
|
||||
|
||||
if exists(disable_self_attentions):
|
||||
disabled_sa = disable_self_attentions[level]
|
||||
else:
|
||||
disabled_sa = False
|
||||
|
||||
if (
|
||||
not exists(num_attention_blocks)
|
||||
or i < num_attention_blocks[level]
|
||||
):
|
||||
layers.append(
|
||||
SpatialTransformer(
|
||||
ch,
|
||||
num_heads,
|
||||
dim_head,
|
||||
depth=transformer_depth[level],
|
||||
context_dim=context_dim,
|
||||
disable_self_attn=disabled_sa,
|
||||
use_linear=use_linear_in_transformer,
|
||||
attn_type=spatial_transformer_attn_type,
|
||||
use_checkpoint=use_checkpoint,
|
||||
)
|
||||
)
|
||||
if level and i == self.num_res_blocks[level]:
|
||||
out_ch = ch
|
||||
layers.append(
|
||||
ResBlock(
|
||||
ch,
|
||||
time_embed_dim,
|
||||
dropout,
|
||||
out_channels=out_ch,
|
||||
dims=dims,
|
||||
use_checkpoint=use_checkpoint,
|
||||
use_scale_shift_norm=use_scale_shift_norm,
|
||||
up=True,
|
||||
)
|
||||
if resblock_updown
|
||||
else Upsample(ch, conv_resample, dims=dims, out_channels=out_ch)
|
||||
)
|
||||
ds //= 2
|
||||
self.output_blocks.append(TimestepEmbedSequential(*layers))
|
||||
self._feature_size += ch
|
||||
|
||||
self.out = nn.Sequential(
|
||||
normalization(ch),
|
||||
nn.SiLU(),
|
||||
zero_module(conv_nd(dims, model_channels, out_channels, 3, padding=1)),
|
||||
)
|
||||
|
||||
def forward(
|
||||
self,
|
||||
x: th.Tensor,
|
||||
timesteps: Optional[th.Tensor] = None,
|
||||
context: Optional[th.Tensor] = None,
|
||||
y: Optional[th.Tensor] = None,
|
||||
**kwargs,
|
||||
) -> th.Tensor:
|
||||
"""
|
||||
Apply the model to an input batch.
|
||||
:param x: an [N x C x ...] Tensor of inputs.
|
||||
:param timesteps: a 1-D batch of timesteps.
|
||||
:param context: conditioning plugged in via crossattn
|
||||
:param y: an [N] Tensor of labels, if class-conditional.
|
||||
:return: an [N x C x ...] Tensor of outputs.
|
||||
"""
|
||||
assert (y is not None) == (
|
||||
self.num_classes is not None
|
||||
), "must specify y if and only if the model is class-conditional"
|
||||
hs = []
|
||||
t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False)
|
||||
emb = self.time_embed(t_emb)
|
||||
|
||||
if self.num_classes is not None:
|
||||
assert y.shape[0] == x.shape[0]
|
||||
emb = emb + self.label_emb(y)
|
||||
|
||||
h = x
|
||||
for module in self.input_blocks:
|
||||
h = module(h, emb, context)
|
||||
hs.append(h)
|
||||
h = self.middle_block(h, emb, context)
|
||||
for module in self.output_blocks:
|
||||
h = th.cat([h, hs.pop()], dim=1)
|
||||
h = module(h, emb, context)
|
||||
h = h.type(x.dtype)
|
||||
|
||||
return self.out(h)
|
369
imaginairy/modules/sgm/diffusionmodules/sampling.py
Normal file
369
imaginairy/modules/sgm/diffusionmodules/sampling.py
Normal file
@ -0,0 +1,369 @@
|
||||
"""
|
||||
Partially ported from https://github.com/crowsonkb/k-diffusion/blob/master/k_diffusion/sampling.py
|
||||
"""
|
||||
|
||||
|
||||
from typing import Dict, Optional, Union
|
||||
|
||||
import torch
|
||||
from omegaconf import ListConfig, OmegaConf
|
||||
from tqdm import tqdm
|
||||
|
||||
from imaginairy.modules.sgm.diffusionmodules.sampling_utils import (
|
||||
get_ancestral_step,
|
||||
linear_multistep_coeff,
|
||||
to_d,
|
||||
to_neg_log_sigma,
|
||||
to_sigma,
|
||||
)
|
||||
from imaginairy.utils import default, get_device, instantiate_from_config
|
||||
from imaginairy.vendored.k_diffusion.utils import append_dims
|
||||
|
||||
DEFAULT_GUIDER = {
|
||||
"target": "imaginairy.modules.sgm.diffusionmodules.guiders.IdentityGuider"
|
||||
}
|
||||
|
||||
|
||||
class BaseDiffusionSampler:
|
||||
def __init__(
|
||||
self,
|
||||
discretization_config: Union[Dict, ListConfig, OmegaConf],
|
||||
num_steps: Union[int, None] = None,
|
||||
guider_config: Union[Dict, ListConfig, OmegaConf, None] = None,
|
||||
verbose: bool = False,
|
||||
device: Optional[str] = None,
|
||||
):
|
||||
device = default(device, get_device)
|
||||
self.num_steps = num_steps
|
||||
self.discretization = instantiate_from_config(discretization_config)
|
||||
self.guider = instantiate_from_config(
|
||||
default(
|
||||
guider_config,
|
||||
DEFAULT_GUIDER,
|
||||
)
|
||||
)
|
||||
self.verbose = verbose
|
||||
self.device = device
|
||||
|
||||
def prepare_sampling_loop(self, x, cond, uc=None, num_steps=None):
|
||||
sigmas = self.discretization(
|
||||
self.num_steps if num_steps is None else num_steps, device=self.device
|
||||
)
|
||||
uc = default(uc, cond)
|
||||
|
||||
x *= torch.sqrt(1.0 + sigmas[0] ** 2.0)
|
||||
num_sigmas = len(sigmas)
|
||||
|
||||
s_in = x.new_ones([x.shape[0]])
|
||||
|
||||
return x, s_in, sigmas, num_sigmas, cond, uc
|
||||
|
||||
def denoise(self, x, denoiser, sigma, cond, uc):
|
||||
denoised = denoiser(*self.guider.prepare_inputs(x, sigma, cond, uc))
|
||||
denoised = self.guider(denoised, sigma)
|
||||
return denoised
|
||||
|
||||
def get_sigma_gen(self, num_sigmas):
|
||||
sigma_generator = range(num_sigmas - 1)
|
||||
if self.verbose:
|
||||
print("#" * 30, " Sampling setting ", "#" * 30)
|
||||
print(f"Sampler: {self.__class__.__name__}")
|
||||
print(f"Discretization: {self.discretization.__class__.__name__}")
|
||||
print(f"Guider: {self.guider.__class__.__name__}")
|
||||
sigma_generator = tqdm(
|
||||
sigma_generator,
|
||||
total=num_sigmas,
|
||||
desc=f"Sampling with {self.__class__.__name__} for {num_sigmas} steps",
|
||||
)
|
||||
return sigma_generator
|
||||
|
||||
|
||||
class SingleStepDiffusionSampler(BaseDiffusionSampler):
|
||||
def sampler_step(self, sigma, next_sigma, denoiser, x, cond, uc, *args, **kwargs):
|
||||
raise NotImplementedError
|
||||
|
||||
def euler_step(self, x, d, dt):
|
||||
return x + dt * d
|
||||
|
||||
|
||||
class EDMSampler(SingleStepDiffusionSampler):
|
||||
def __init__(
|
||||
self, s_churn=0.0, s_tmin=0.0, s_tmax=float("inf"), s_noise=1.0, *args, **kwargs
|
||||
):
|
||||
super().__init__(*args, **kwargs)
|
||||
|
||||
self.s_churn = s_churn
|
||||
self.s_tmin = s_tmin
|
||||
self.s_tmax = s_tmax
|
||||
self.s_noise = s_noise
|
||||
|
||||
def sampler_step(self, sigma, next_sigma, denoiser, x, cond, uc=None, gamma=0.0):
|
||||
sigma_hat = sigma * (gamma + 1.0)
|
||||
if gamma > 0:
|
||||
eps = torch.randn_like(x) * self.s_noise
|
||||
x = x + eps * append_dims(sigma_hat**2 - sigma**2, x.ndim) ** 0.5
|
||||
|
||||
denoised = self.denoise(x, denoiser, sigma_hat, cond, uc)
|
||||
d = to_d(x, sigma_hat, denoised)
|
||||
dt = append_dims(next_sigma - sigma_hat, x.ndim)
|
||||
|
||||
euler_step = self.euler_step(x, d, dt)
|
||||
x = self.possible_correction_step(
|
||||
euler_step, x, d, dt, next_sigma, denoiser, cond, uc
|
||||
)
|
||||
return x
|
||||
|
||||
def __call__(self, denoiser, x, cond, uc=None, num_steps=None):
|
||||
x, s_in, sigmas, num_sigmas, cond, uc = self.prepare_sampling_loop(
|
||||
x, cond, uc, num_steps
|
||||
)
|
||||
|
||||
for i in self.get_sigma_gen(num_sigmas):
|
||||
gamma = (
|
||||
min(self.s_churn / (num_sigmas - 1), 2**0.5 - 1)
|
||||
if self.s_tmin <= sigmas[i] <= self.s_tmax
|
||||
else 0.0
|
||||
)
|
||||
x = self.sampler_step(
|
||||
s_in * sigmas[i],
|
||||
s_in * sigmas[i + 1],
|
||||
denoiser,
|
||||
x,
|
||||
cond,
|
||||
uc,
|
||||
gamma,
|
||||
)
|
||||
|
||||
return x
|
||||
|
||||
|
||||
class AncestralSampler(SingleStepDiffusionSampler):
|
||||
def __init__(self, eta=1.0, s_noise=1.0, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
|
||||
self.eta = eta
|
||||
self.s_noise = s_noise
|
||||
self.noise_sampler = lambda x: torch.randn_like(x)
|
||||
|
||||
def ancestral_euler_step(self, x, denoised, sigma, sigma_down):
|
||||
d = to_d(x, sigma, denoised)
|
||||
dt = append_dims(sigma_down - sigma, x.ndim)
|
||||
|
||||
return self.euler_step(x, d, dt)
|
||||
|
||||
def ancestral_step(self, x, sigma, next_sigma, sigma_up):
|
||||
x = torch.where(
|
||||
append_dims(next_sigma, x.ndim) > 0.0,
|
||||
x + self.noise_sampler(x) * self.s_noise * append_dims(sigma_up, x.ndim),
|
||||
x,
|
||||
)
|
||||
return x
|
||||
|
||||
def __call__(self, denoiser, x, cond, uc=None, num_steps=None):
|
||||
x, s_in, sigmas, num_sigmas, cond, uc = self.prepare_sampling_loop(
|
||||
x, cond, uc, num_steps
|
||||
)
|
||||
|
||||
for i in self.get_sigma_gen(num_sigmas):
|
||||
x = self.sampler_step(
|
||||
s_in * sigmas[i],
|
||||
s_in * sigmas[i + 1],
|
||||
denoiser,
|
||||
x,
|
||||
cond,
|
||||
uc,
|
||||
)
|
||||
|
||||
return x
|
||||
|
||||
|
||||
class LinearMultistepSampler(BaseDiffusionSampler):
|
||||
def __init__(
|
||||
self,
|
||||
order=4,
|
||||
*args,
|
||||
**kwargs,
|
||||
):
|
||||
super().__init__(*args, **kwargs)
|
||||
|
||||
self.order = order
|
||||
|
||||
def __call__(self, denoiser, x, cond, uc=None, num_steps=None, **kwargs):
|
||||
x, s_in, sigmas, num_sigmas, cond, uc = self.prepare_sampling_loop(
|
||||
x, cond, uc, num_steps
|
||||
)
|
||||
|
||||
ds = []
|
||||
sigmas_cpu = sigmas.detach().cpu().numpy()
|
||||
for i in self.get_sigma_gen(num_sigmas):
|
||||
sigma = s_in * sigmas[i]
|
||||
denoised = denoiser(
|
||||
*self.guider.prepare_inputs(x, sigma, cond, uc), **kwargs
|
||||
)
|
||||
denoised = self.guider(denoised, sigma)
|
||||
d = to_d(x, sigma, denoised)
|
||||
ds.append(d)
|
||||
if len(ds) > self.order:
|
||||
ds.pop(0)
|
||||
cur_order = min(i + 1, self.order)
|
||||
coeffs = [
|
||||
linear_multistep_coeff(cur_order, sigmas_cpu, i, j)
|
||||
for j in range(cur_order)
|
||||
]
|
||||
x = x + sum(coeff * d for coeff, d in zip(coeffs, reversed(ds)))
|
||||
|
||||
return x
|
||||
|
||||
|
||||
class EulerEDMSampler(EDMSampler):
|
||||
def possible_correction_step(
|
||||
self, euler_step, x, d, dt, next_sigma, denoiser, cond, uc
|
||||
):
|
||||
return euler_step
|
||||
|
||||
|
||||
class HeunEDMSampler(EDMSampler):
|
||||
def possible_correction_step(
|
||||
self, euler_step, x, d, dt, next_sigma, denoiser, cond, uc
|
||||
):
|
||||
if torch.sum(next_sigma) < 1e-14:
|
||||
# Save a network evaluation if all noise levels are 0
|
||||
return euler_step
|
||||
else:
|
||||
denoised = self.denoise(euler_step, denoiser, next_sigma, cond, uc)
|
||||
d_new = to_d(euler_step, next_sigma, denoised)
|
||||
d_prime = (d + d_new) / 2.0
|
||||
|
||||
# apply correction if noise level is not 0
|
||||
x = torch.where(
|
||||
append_dims(next_sigma, x.ndim) > 0.0, x + d_prime * dt, euler_step
|
||||
)
|
||||
return x
|
||||
|
||||
|
||||
class EulerAncestralSampler(AncestralSampler):
|
||||
def sampler_step(self, sigma, next_sigma, denoiser, x, cond, uc):
|
||||
sigma_down, sigma_up = get_ancestral_step(sigma, next_sigma, eta=self.eta)
|
||||
denoised = self.denoise(x, denoiser, sigma, cond, uc)
|
||||
x = self.ancestral_euler_step(x, denoised, sigma, sigma_down)
|
||||
x = self.ancestral_step(x, sigma, next_sigma, sigma_up)
|
||||
|
||||
return x
|
||||
|
||||
|
||||
class DPMPP2SAncestralSampler(AncestralSampler):
|
||||
def get_variables(self, sigma, sigma_down):
|
||||
t, t_next = (to_neg_log_sigma(s) for s in (sigma, sigma_down))
|
||||
h = t_next - t
|
||||
s = t + 0.5 * h
|
||||
return h, s, t, t_next
|
||||
|
||||
def get_mult(self, h, s, t, t_next):
|
||||
mult1 = to_sigma(s) / to_sigma(t)
|
||||
mult2 = (-0.5 * h).expm1()
|
||||
mult3 = to_sigma(t_next) / to_sigma(t)
|
||||
mult4 = (-h).expm1()
|
||||
|
||||
return mult1, mult2, mult3, mult4
|
||||
|
||||
def sampler_step(self, sigma, next_sigma, denoiser, x, cond, uc=None, **kwargs):
|
||||
sigma_down, sigma_up = get_ancestral_step(sigma, next_sigma, eta=self.eta)
|
||||
denoised = self.denoise(x, denoiser, sigma, cond, uc)
|
||||
x_euler = self.ancestral_euler_step(x, denoised, sigma, sigma_down)
|
||||
|
||||
if torch.sum(sigma_down) < 1e-14:
|
||||
# Save a network evaluation if all noise levels are 0
|
||||
x = x_euler
|
||||
else:
|
||||
h, s, t, t_next = self.get_variables(sigma, sigma_down)
|
||||
mult = [
|
||||
append_dims(mult, x.ndim) for mult in self.get_mult(h, s, t, t_next)
|
||||
]
|
||||
|
||||
x2 = mult[0] * x - mult[1] * denoised
|
||||
denoised2 = self.denoise(x2, denoiser, to_sigma(s), cond, uc)
|
||||
x_dpmpp2s = mult[2] * x - mult[3] * denoised2
|
||||
|
||||
# apply correction if noise level is not 0
|
||||
x = torch.where(append_dims(sigma_down, x.ndim) > 0.0, x_dpmpp2s, x_euler)
|
||||
|
||||
x = self.ancestral_step(x, sigma, next_sigma, sigma_up)
|
||||
return x
|
||||
|
||||
|
||||
class DPMPP2MSampler(BaseDiffusionSampler):
|
||||
def get_variables(self, sigma, next_sigma, previous_sigma=None):
|
||||
t, t_next = (to_neg_log_sigma(s) for s in (sigma, next_sigma))
|
||||
h = t_next - t
|
||||
|
||||
if previous_sigma is not None:
|
||||
h_last = t - to_neg_log_sigma(previous_sigma)
|
||||
r = h_last / h
|
||||
return h, r, t, t_next
|
||||
else:
|
||||
return h, None, t, t_next
|
||||
|
||||
def get_mult(self, h, r, t, t_next, previous_sigma):
|
||||
mult1 = to_sigma(t_next) / to_sigma(t)
|
||||
mult2 = (-h).expm1()
|
||||
|
||||
if previous_sigma is not None:
|
||||
mult3 = 1 + 1 / (2 * r)
|
||||
mult4 = 1 / (2 * r)
|
||||
return mult1, mult2, mult3, mult4
|
||||
else:
|
||||
return mult1, mult2
|
||||
|
||||
def sampler_step(
|
||||
self,
|
||||
old_denoised,
|
||||
previous_sigma,
|
||||
sigma,
|
||||
next_sigma,
|
||||
denoiser,
|
||||
x,
|
||||
cond,
|
||||
uc=None,
|
||||
):
|
||||
denoised = self.denoise(x, denoiser, sigma, cond, uc)
|
||||
|
||||
h, r, t, t_next = self.get_variables(sigma, next_sigma, previous_sigma)
|
||||
mult = [
|
||||
append_dims(mult, x.ndim)
|
||||
for mult in self.get_mult(h, r, t, t_next, previous_sigma)
|
||||
]
|
||||
|
||||
x_standard = mult[0] * x - mult[1] * denoised
|
||||
if old_denoised is None or torch.sum(next_sigma) < 1e-14:
|
||||
# Save a network evaluation if all noise levels are 0 or on the first step
|
||||
return x_standard, denoised
|
||||
else:
|
||||
denoised_d = mult[2] * denoised - mult[3] * old_denoised
|
||||
x_advanced = mult[0] * x - mult[1] * denoised_d
|
||||
|
||||
# apply correction if noise level is not 0 and not first step
|
||||
x = torch.where(
|
||||
append_dims(next_sigma, x.ndim) > 0.0, x_advanced, x_standard
|
||||
)
|
||||
|
||||
return x, denoised
|
||||
|
||||
def __call__(self, denoiser, x, cond, uc=None, num_steps=None, **kwargs):
|
||||
x, s_in, sigmas, num_sigmas, cond, uc = self.prepare_sampling_loop(
|
||||
x, cond, uc, num_steps
|
||||
)
|
||||
|
||||
old_denoised = None
|
||||
for i in self.get_sigma_gen(num_sigmas):
|
||||
x, old_denoised = self.sampler_step(
|
||||
old_denoised,
|
||||
None if i == 0 else s_in * sigmas[i - 1],
|
||||
s_in * sigmas[i],
|
||||
s_in * sigmas[i + 1],
|
||||
denoiser,
|
||||
x,
|
||||
cond,
|
||||
uc=uc,
|
||||
)
|
||||
|
||||
return x
|
44
imaginairy/modules/sgm/diffusionmodules/sampling_utils.py
Normal file
44
imaginairy/modules/sgm/diffusionmodules/sampling_utils.py
Normal file
@ -0,0 +1,44 @@
|
||||
import torch
|
||||
from scipy import integrate
|
||||
|
||||
from imaginairy.vendored.k_diffusion.utils import append_dims
|
||||
|
||||
|
||||
def linear_multistep_coeff(order, t, i, j, epsrel=1e-4):
|
||||
if order - 1 > i:
|
||||
msg = f"Order {order} too high for step {i}"
|
||||
raise ValueError(msg)
|
||||
|
||||
def fn(tau):
|
||||
prod = 1.0
|
||||
for k in range(order):
|
||||
if j == k:
|
||||
continue
|
||||
prod *= (tau - t[i - k]) / (t[i - j] - t[i - k])
|
||||
return prod
|
||||
|
||||
return integrate.quad(fn, t[i], t[i + 1], epsrel=epsrel)[0]
|
||||
|
||||
|
||||
def get_ancestral_step(sigma_from, sigma_to, eta=1.0):
|
||||
if not eta:
|
||||
return sigma_to, 0.0
|
||||
sigma_up = torch.minimum(
|
||||
sigma_to,
|
||||
eta
|
||||
* (sigma_to**2 * (sigma_from**2 - sigma_to**2) / sigma_from**2) ** 0.5,
|
||||
)
|
||||
sigma_down = (sigma_to**2 - sigma_up**2) ** 0.5
|
||||
return sigma_down, sigma_up
|
||||
|
||||
|
||||
def to_d(x, sigma, denoised):
|
||||
return (x - denoised) / append_dims(sigma, x.ndim)
|
||||
|
||||
|
||||
def to_neg_log_sigma(sigma):
|
||||
return sigma.log().neg()
|
||||
|
||||
|
||||
def to_sigma(neg_log_sigma):
|
||||
return neg_log_sigma.neg().exp()
|
31
imaginairy/modules/sgm/diffusionmodules/sigma_sampling.py
Normal file
31
imaginairy/modules/sgm/diffusionmodules/sigma_sampling.py
Normal file
@ -0,0 +1,31 @@
|
||||
import torch
|
||||
|
||||
from imaginairy.utils import default, instantiate_from_config
|
||||
|
||||
|
||||
class EDMSampling:
|
||||
def __init__(self, p_mean=-1.2, p_std=1.2):
|
||||
self.p_mean = p_mean
|
||||
self.p_std = p_std
|
||||
|
||||
def __call__(self, n_samples, rand=None):
|
||||
log_sigma = self.p_mean + self.p_std * default(rand, torch.randn((n_samples,)))
|
||||
return log_sigma.exp()
|
||||
|
||||
|
||||
class DiscreteSampling:
|
||||
def __init__(self, discretization_config, num_idx, do_append_zero=False, flip=True):
|
||||
self.num_idx = num_idx
|
||||
self.sigmas = instantiate_from_config(discretization_config)(
|
||||
num_idx, do_append_zero=do_append_zero, flip=flip
|
||||
)
|
||||
|
||||
def idx_to_sigma(self, idx):
|
||||
return self.sigmas[idx]
|
||||
|
||||
def __call__(self, n_samples, rand=None):
|
||||
idx = default(
|
||||
rand,
|
||||
torch.randint(0, self.num_idx, (n_samples,)),
|
||||
)
|
||||
return self.idx_to_sigma(idx)
|
365
imaginairy/modules/sgm/diffusionmodules/util.py
Normal file
365
imaginairy/modules/sgm/diffusionmodules/util.py
Normal file
@ -0,0 +1,365 @@
|
||||
"""
|
||||
partially adopted from
|
||||
https://github.com/openai/improved-diffusion/blob/main/improved_diffusion/gaussian_diffusion.py
|
||||
and
|
||||
https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py
|
||||
and
|
||||
https://github.com/openai/guided-diffusion/blob/0ba878e517b276c45d1195eb29f6f5f72659a05b/guided_diffusion/nn.py
|
||||
|
||||
thanks!
|
||||
"""
|
||||
|
||||
import math
|
||||
from typing import Optional
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
from einops import rearrange, repeat
|
||||
|
||||
|
||||
def make_beta_schedule(
|
||||
schedule,
|
||||
n_timestep,
|
||||
linear_start=1e-4,
|
||||
linear_end=2e-2,
|
||||
):
|
||||
if schedule == "linear":
|
||||
betas = (
|
||||
torch.linspace(
|
||||
linear_start**0.5, linear_end**0.5, n_timestep, dtype=torch.float64
|
||||
)
|
||||
** 2
|
||||
)
|
||||
return betas.numpy()
|
||||
|
||||
|
||||
def extract_into_tensor(a, t, x_shape):
|
||||
b, *_ = t.shape
|
||||
out = a.gather(-1, t)
|
||||
return out.reshape(b, *((1,) * (len(x_shape) - 1)))
|
||||
|
||||
|
||||
def mixed_checkpoint(func, inputs: dict, params, flag):
|
||||
"""
|
||||
Evaluate a function without caching intermediate activations, allowing for
|
||||
reduced memory at the expense of extra compute in the backward pass. This differs from the original checkpoint function
|
||||
borrowed from https://github.com/openai/guided-diffusion/blob/0ba878e517b276c45d1195eb29f6f5f72659a05b/guided_diffusion/nn.py in that
|
||||
it also works with non-tensor inputs
|
||||
:param func: the function to evaluate.
|
||||
:param inputs: the argument dictionary to pass to `func`.
|
||||
:param params: a sequence of parameters `func` depends on but does not
|
||||
explicitly take as arguments.
|
||||
:param flag: if False, disable gradient checkpointing.
|
||||
"""
|
||||
if flag:
|
||||
tensor_keys = [key for key in inputs if isinstance(inputs[key], torch.Tensor)]
|
||||
tensor_inputs = [
|
||||
inputs[key] for key in inputs if isinstance(inputs[key], torch.Tensor)
|
||||
]
|
||||
non_tensor_keys = [
|
||||
key for key in inputs if not isinstance(inputs[key], torch.Tensor)
|
||||
]
|
||||
non_tensor_inputs = [
|
||||
inputs[key] for key in inputs if not isinstance(inputs[key], torch.Tensor)
|
||||
]
|
||||
args = tuple(tensor_inputs) + tuple(non_tensor_inputs) + tuple(params)
|
||||
return MixedCheckpointFunction.apply(
|
||||
func,
|
||||
len(tensor_inputs),
|
||||
len(non_tensor_inputs),
|
||||
tensor_keys,
|
||||
non_tensor_keys,
|
||||
*args,
|
||||
)
|
||||
else:
|
||||
return func(**inputs)
|
||||
|
||||
|
||||
class MixedCheckpointFunction(torch.autograd.Function):
|
||||
@staticmethod
|
||||
def forward(
|
||||
ctx,
|
||||
run_function,
|
||||
length_tensors,
|
||||
length_non_tensors,
|
||||
tensor_keys,
|
||||
non_tensor_keys,
|
||||
*args,
|
||||
):
|
||||
ctx.end_tensors = length_tensors
|
||||
ctx.end_non_tensors = length_tensors + length_non_tensors
|
||||
ctx.gpu_autocast_kwargs = {
|
||||
"enabled": torch.is_autocast_enabled(),
|
||||
"dtype": torch.get_autocast_gpu_dtype(),
|
||||
"cache_enabled": torch.is_autocast_cache_enabled(),
|
||||
}
|
||||
assert len(tensor_keys) == length_tensors
|
||||
assert len(non_tensor_keys) == length_non_tensors
|
||||
|
||||
ctx.input_tensors = dict(zip(tensor_keys, list(args[: ctx.end_tensors])))
|
||||
ctx.input_non_tensors = dict(
|
||||
zip(non_tensor_keys, list(args[ctx.end_tensors : ctx.end_non_tensors]))
|
||||
)
|
||||
ctx.run_function = run_function
|
||||
ctx.input_params = list(args[ctx.end_non_tensors :])
|
||||
|
||||
with torch.no_grad():
|
||||
output_tensors = ctx.run_function(
|
||||
**ctx.input_tensors, **ctx.input_non_tensors
|
||||
)
|
||||
return output_tensors
|
||||
|
||||
@staticmethod
|
||||
def backward(ctx, *output_grads):
|
||||
# additional_args = {key: ctx.input_tensors[key] for key in ctx.input_tensors if not isinstance(ctx.input_tensors[key],torch.Tensor)}
|
||||
ctx.input_tensors = {
|
||||
key: ctx.input_tensors[key].detach().requires_grad_(True)
|
||||
for key in ctx.input_tensors
|
||||
}
|
||||
|
||||
with torch.enable_grad(), torch.cuda.amp.autocast(**ctx.gpu_autocast_kwargs):
|
||||
# Fixes a bug where the first op in run_function modifies the
|
||||
# Tensor storage in place, which is not allowed for detach()'d
|
||||
# Tensors.
|
||||
shallow_copies = {
|
||||
key: ctx.input_tensors[key].view_as(ctx.input_tensors[key])
|
||||
for key in ctx.input_tensors
|
||||
}
|
||||
# shallow_copies.update(additional_args)
|
||||
output_tensors = ctx.run_function(**shallow_copies, **ctx.input_non_tensors)
|
||||
input_grads = torch.autograd.grad(
|
||||
output_tensors,
|
||||
list(ctx.input_tensors.values()) + ctx.input_params,
|
||||
output_grads,
|
||||
allow_unused=True,
|
||||
)
|
||||
del ctx.input_tensors
|
||||
del ctx.input_params
|
||||
del output_tensors
|
||||
return (
|
||||
(None, None, None, None, None)
|
||||
+ input_grads[: ctx.end_tensors]
|
||||
+ (None,) * (ctx.end_non_tensors - ctx.end_tensors)
|
||||
+ input_grads[ctx.end_tensors :]
|
||||
)
|
||||
|
||||
|
||||
def checkpoint(func, inputs, params, flag):
|
||||
"""
|
||||
Evaluate a function without caching intermediate activations, allowing for
|
||||
reduced memory at the expense of extra compute in the backward pass.
|
||||
:param func: the function to evaluate.
|
||||
:param inputs: the argument sequence to pass to `func`.
|
||||
:param params: a sequence of parameters `func` depends on but does not
|
||||
explicitly take as arguments.
|
||||
:param flag: if False, disable gradient checkpointing.
|
||||
"""
|
||||
if flag:
|
||||
args = tuple(inputs) + tuple(params)
|
||||
return CheckpointFunction.apply(func, len(inputs), *args)
|
||||
else:
|
||||
return func(*inputs)
|
||||
|
||||
|
||||
class CheckpointFunction(torch.autograd.Function):
|
||||
@staticmethod
|
||||
def forward(ctx, run_function, length, *args):
|
||||
ctx.run_function = run_function
|
||||
ctx.input_tensors = list(args[:length])
|
||||
ctx.input_params = list(args[length:])
|
||||
ctx.gpu_autocast_kwargs = {
|
||||
"enabled": torch.is_autocast_enabled(),
|
||||
"dtype": torch.get_autocast_gpu_dtype(),
|
||||
"cache_enabled": torch.is_autocast_cache_enabled(),
|
||||
}
|
||||
with torch.no_grad():
|
||||
output_tensors = ctx.run_function(*ctx.input_tensors)
|
||||
return output_tensors
|
||||
|
||||
@staticmethod
|
||||
def backward(ctx, *output_grads):
|
||||
ctx.input_tensors = [x.detach().requires_grad_(True) for x in ctx.input_tensors]
|
||||
with torch.enable_grad(), torch.cuda.amp.autocast(**ctx.gpu_autocast_kwargs):
|
||||
# Fixes a bug where the first op in run_function modifies the
|
||||
# Tensor storage in place, which is not allowed for detach()'d
|
||||
# Tensors.
|
||||
shallow_copies = [x.view_as(x) for x in ctx.input_tensors]
|
||||
output_tensors = ctx.run_function(*shallow_copies)
|
||||
input_grads = torch.autograd.grad(
|
||||
output_tensors,
|
||||
ctx.input_tensors + ctx.input_params,
|
||||
output_grads,
|
||||
allow_unused=True,
|
||||
)
|
||||
del ctx.input_tensors
|
||||
del ctx.input_params
|
||||
del output_tensors
|
||||
return (None, None, *input_grads)
|
||||
|
||||
|
||||
def timestep_embedding(timesteps, dim, max_period=10000, repeat_only=False):
|
||||
"""
|
||||
Create sinusoidal timestep embeddings.
|
||||
:param timesteps: a 1-D Tensor of N indices, one per batch element.
|
||||
These may be fractional.
|
||||
:param dim: the dimension of the output.
|
||||
:param max_period: controls the minimum frequency of the embeddings.
|
||||
:return: an [N x dim] Tensor of positional embeddings.
|
||||
"""
|
||||
if not repeat_only:
|
||||
half = dim // 2
|
||||
freqs = torch.exp(
|
||||
-math.log(max_period)
|
||||
* torch.arange(start=0, end=half, dtype=torch.float32)
|
||||
/ half
|
||||
).to(device=timesteps.device)
|
||||
args = timesteps[:, None].float() * freqs[None]
|
||||
embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)
|
||||
if dim % 2:
|
||||
embedding = torch.cat(
|
||||
[embedding, torch.zeros_like(embedding[:, :1])], dim=-1
|
||||
)
|
||||
else:
|
||||
embedding = repeat(timesteps, "b -> b d", d=dim)
|
||||
return embedding
|
||||
|
||||
|
||||
def zero_module(module):
|
||||
"""
|
||||
Zero out the parameters of a module and return it.
|
||||
"""
|
||||
for p in module.parameters():
|
||||
p.detach().zero_()
|
||||
return module
|
||||
|
||||
|
||||
def scale_module(module, scale):
|
||||
"""
|
||||
Scale the parameters of a module and return it.
|
||||
"""
|
||||
for p in module.parameters():
|
||||
p.detach().mul_(scale)
|
||||
return module
|
||||
|
||||
|
||||
def mean_flat(tensor):
|
||||
"""
|
||||
Take the mean over all non-batch dimensions.
|
||||
"""
|
||||
return tensor.mean(dim=list(range(1, len(tensor.shape))))
|
||||
|
||||
|
||||
def normalization(channels):
|
||||
"""
|
||||
Make a standard normalization layer.
|
||||
:param channels: number of input channels.
|
||||
:return: an nn.Module for normalization.
|
||||
"""
|
||||
return GroupNorm32(32, channels)
|
||||
|
||||
|
||||
# PyTorch 1.7 has SiLU, but we support PyTorch 1.5.
|
||||
class SiLU(nn.Module):
|
||||
def forward(self, x):
|
||||
return x * torch.sigmoid(x)
|
||||
|
||||
|
||||
class GroupNorm32(nn.GroupNorm):
|
||||
def forward(self, x):
|
||||
return super().forward(x.float()).type(x.dtype)
|
||||
|
||||
|
||||
def conv_nd(dims, *args, **kwargs):
|
||||
"""
|
||||
Create a 1D, 2D, or 3D convolution module.
|
||||
"""
|
||||
if dims == 1:
|
||||
return nn.Conv1d(*args, **kwargs)
|
||||
elif dims == 2:
|
||||
return nn.Conv2d(*args, **kwargs)
|
||||
elif dims == 3:
|
||||
return nn.Conv3d(*args, **kwargs)
|
||||
msg = f"unsupported dimensions: {dims}"
|
||||
raise ValueError(msg)
|
||||
|
||||
|
||||
def linear(*args, **kwargs):
|
||||
"""
|
||||
Create a linear module.
|
||||
"""
|
||||
return nn.Linear(*args, **kwargs)
|
||||
|
||||
|
||||
def avg_pool_nd(dims, *args, **kwargs):
|
||||
"""
|
||||
Create a 1D, 2D, or 3D average pooling module.
|
||||
"""
|
||||
if dims == 1:
|
||||
return nn.AvgPool1d(*args, **kwargs)
|
||||
elif dims == 2:
|
||||
return nn.AvgPool2d(*args, **kwargs)
|
||||
elif dims == 3:
|
||||
return nn.AvgPool3d(*args, **kwargs)
|
||||
msg = f"unsupported dimensions: {dims}"
|
||||
raise ValueError(msg)
|
||||
|
||||
|
||||
class AlphaBlender(nn.Module):
|
||||
strategies = ["learned", "fixed", "learned_with_images"]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
alpha: float,
|
||||
merge_strategy: str = "learned_with_images",
|
||||
rearrange_pattern: str = "b t -> (b t) 1 1",
|
||||
):
|
||||
super().__init__()
|
||||
self.merge_strategy = merge_strategy
|
||||
self.rearrange_pattern = rearrange_pattern
|
||||
|
||||
assert (
|
||||
merge_strategy in self.strategies
|
||||
), f"merge_strategy needs to be in {self.strategies}"
|
||||
|
||||
if self.merge_strategy == "fixed":
|
||||
self.register_buffer("mix_factor", torch.Tensor([alpha]))
|
||||
elif (
|
||||
self.merge_strategy == "learned"
|
||||
or self.merge_strategy == "learned_with_images"
|
||||
):
|
||||
self.register_parameter(
|
||||
"mix_factor", torch.nn.Parameter(torch.Tensor([alpha]))
|
||||
)
|
||||
else:
|
||||
msg = f"unknown merge strategy {self.merge_strategy}"
|
||||
raise ValueError(msg)
|
||||
|
||||
def get_alpha(self, image_only_indicator: torch.Tensor) -> torch.Tensor:
|
||||
if self.merge_strategy == "fixed":
|
||||
alpha = self.mix_factor
|
||||
elif self.merge_strategy == "learned":
|
||||
alpha = torch.sigmoid(self.mix_factor)
|
||||
elif self.merge_strategy == "learned_with_images":
|
||||
assert image_only_indicator is not None, "need image_only_indicator ..."
|
||||
alpha = torch.where(
|
||||
image_only_indicator.bool(),
|
||||
torch.ones(1, 1, device=image_only_indicator.device),
|
||||
rearrange(torch.sigmoid(self.mix_factor), "... -> ... 1"),
|
||||
)
|
||||
alpha = rearrange(alpha, self.rearrange_pattern)
|
||||
else:
|
||||
raise NotImplementedError
|
||||
return alpha
|
||||
|
||||
def forward(
|
||||
self,
|
||||
x_spatial: torch.Tensor,
|
||||
x_temporal: torch.Tensor,
|
||||
image_only_indicator: Optional[torch.Tensor] = None,
|
||||
) -> torch.Tensor:
|
||||
alpha = self.get_alpha(image_only_indicator)
|
||||
x = (
|
||||
alpha.to(x_spatial.dtype) * x_spatial
|
||||
+ (1.0 - alpha).to(x_spatial.dtype) * x_temporal
|
||||
)
|
||||
return x
|
510
imaginairy/modules/sgm/diffusionmodules/video_model.py
Normal file
510
imaginairy/modules/sgm/diffusionmodules/video_model.py
Normal file
@ -0,0 +1,510 @@
|
||||
from typing import List, Optional, Union
|
||||
|
||||
import torch as th
|
||||
import torch.nn as nn
|
||||
from einops import rearrange
|
||||
|
||||
from imaginairy.modules.sgm.diffusionmodules.openaimodel import (
|
||||
Downsample,
|
||||
ResBlock,
|
||||
SpatialVideoTransformer,
|
||||
Timestep,
|
||||
TimestepEmbedSequential,
|
||||
Upsample,
|
||||
)
|
||||
from imaginairy.modules.sgm.diffusionmodules.util import (
|
||||
conv_nd,
|
||||
linear,
|
||||
normalization,
|
||||
timestep_embedding,
|
||||
zero_module,
|
||||
)
|
||||
from imaginairy.utils import default
|
||||
|
||||
from .util import AlphaBlender
|
||||
|
||||
# import torch.nn.functional as F
|
||||
|
||||
|
||||
class VideoResBlock(ResBlock):
|
||||
def __init__(
|
||||
self,
|
||||
channels: int,
|
||||
emb_channels: int,
|
||||
dropout: float,
|
||||
video_kernel_size: Union[int, List[int]] = 3,
|
||||
merge_strategy: str = "fixed",
|
||||
merge_factor: float = 0.5,
|
||||
out_channels: Optional[int] = None,
|
||||
use_conv: bool = False,
|
||||
use_scale_shift_norm: bool = False,
|
||||
dims: int = 2,
|
||||
use_checkpoint: bool = False,
|
||||
up: bool = False,
|
||||
down: bool = False,
|
||||
):
|
||||
super().__init__(
|
||||
channels,
|
||||
emb_channels,
|
||||
dropout,
|
||||
out_channels=out_channels,
|
||||
use_conv=use_conv,
|
||||
use_scale_shift_norm=use_scale_shift_norm,
|
||||
dims=dims,
|
||||
use_checkpoint=use_checkpoint,
|
||||
up=up,
|
||||
down=down,
|
||||
)
|
||||
|
||||
self.time_stack = ResBlock(
|
||||
default(out_channels, channels),
|
||||
emb_channels,
|
||||
dropout=dropout,
|
||||
dims=3,
|
||||
out_channels=default(out_channels, channels),
|
||||
use_scale_shift_norm=False,
|
||||
use_conv=False,
|
||||
up=False,
|
||||
down=False,
|
||||
kernel_size=video_kernel_size,
|
||||
use_checkpoint=use_checkpoint,
|
||||
exchange_temb_dims=True,
|
||||
)
|
||||
self.time_mixer = AlphaBlender(
|
||||
alpha=merge_factor,
|
||||
merge_strategy=merge_strategy,
|
||||
rearrange_pattern="b t -> b 1 t 1 1",
|
||||
)
|
||||
|
||||
def forward(
|
||||
self,
|
||||
x: th.Tensor,
|
||||
emb: th.Tensor,
|
||||
num_video_frames: int,
|
||||
image_only_indicator: Optional[th.Tensor] = None,
|
||||
) -> th.Tensor:
|
||||
x = super().forward(x, emb)
|
||||
|
||||
x_mix = rearrange(x, "(b t) c h w -> b c t h w", t=num_video_frames)
|
||||
x = rearrange(x, "(b t) c h w -> b c t h w", t=num_video_frames)
|
||||
|
||||
x = self.time_stack(
|
||||
x, rearrange(emb, "(b t) ... -> b t ...", t=num_video_frames)
|
||||
)
|
||||
x = self.time_mixer(
|
||||
x_spatial=x_mix, x_temporal=x, image_only_indicator=image_only_indicator
|
||||
)
|
||||
x = rearrange(x, "b c t h w -> (b t) c h w")
|
||||
return x
|
||||
|
||||
|
||||
class VideoUNet(nn.Module):
|
||||
def __init__(
|
||||
self,
|
||||
in_channels: int,
|
||||
model_channels: int,
|
||||
out_channels: int,
|
||||
num_res_blocks: int,
|
||||
attention_resolutions: int,
|
||||
dropout: float = 0.0,
|
||||
channel_mult: List[int] = (1, 2, 4, 8),
|
||||
conv_resample: bool = True,
|
||||
dims: int = 2,
|
||||
num_classes: Optional[int] = None,
|
||||
use_checkpoint: bool = False,
|
||||
num_heads: int = -1,
|
||||
num_head_channels: int = -1,
|
||||
num_heads_upsample: int = -1,
|
||||
use_scale_shift_norm: bool = False,
|
||||
resblock_updown: bool = False,
|
||||
transformer_depth: Union[List[int], int] = 1,
|
||||
transformer_depth_middle: Optional[int] = None,
|
||||
context_dim: Optional[int] = None,
|
||||
time_downup: bool = False,
|
||||
time_context_dim: Optional[int] = None,
|
||||
extra_ff_mix_layer: bool = False,
|
||||
use_spatial_context: bool = False,
|
||||
merge_strategy: str = "fixed",
|
||||
merge_factor: float = 0.5,
|
||||
spatial_transformer_attn_type: str = "softmax",
|
||||
video_kernel_size: Union[int, List[int]] = 3,
|
||||
use_linear_in_transformer: bool = False,
|
||||
adm_in_channels: Optional[int] = None,
|
||||
disable_temporal_crossattention: bool = False,
|
||||
max_ddpm_temb_period: int = 10000,
|
||||
):
|
||||
super().__init__()
|
||||
assert context_dim is not None
|
||||
|
||||
if num_heads_upsample == -1:
|
||||
num_heads_upsample = num_heads
|
||||
|
||||
if num_heads == -1:
|
||||
assert num_head_channels != -1
|
||||
|
||||
if num_head_channels == -1:
|
||||
assert num_heads != -1
|
||||
|
||||
self.in_channels = in_channels
|
||||
self.model_channels = model_channels
|
||||
self.out_channels = out_channels
|
||||
if isinstance(transformer_depth, int):
|
||||
transformer_depth = len(channel_mult) * [transformer_depth]
|
||||
transformer_depth_middle = default(
|
||||
transformer_depth_middle, transformer_depth[-1]
|
||||
)
|
||||
|
||||
self.num_res_blocks = num_res_blocks
|
||||
self.attention_resolutions = attention_resolutions
|
||||
self.dropout = dropout
|
||||
self.channel_mult = channel_mult
|
||||
self.conv_resample = conv_resample
|
||||
self.num_classes = num_classes
|
||||
self.use_checkpoint = use_checkpoint
|
||||
self.num_heads = num_heads
|
||||
self.num_head_channels = num_head_channels
|
||||
self.num_heads_upsample = num_heads_upsample
|
||||
|
||||
time_embed_dim = model_channels * 4
|
||||
self.time_embed = nn.Sequential(
|
||||
linear(model_channels, time_embed_dim),
|
||||
nn.SiLU(),
|
||||
linear(time_embed_dim, time_embed_dim),
|
||||
)
|
||||
|
||||
if self.num_classes is not None:
|
||||
if isinstance(self.num_classes, int):
|
||||
self.label_emb = nn.Embedding(num_classes, time_embed_dim)
|
||||
elif self.num_classes == "continuous":
|
||||
print("setting up linear c_adm embedding layer")
|
||||
self.label_emb = nn.Linear(1, time_embed_dim)
|
||||
elif self.num_classes == "timestep":
|
||||
self.label_emb = nn.Sequential(
|
||||
Timestep(model_channels),
|
||||
nn.Sequential(
|
||||
linear(model_channels, time_embed_dim),
|
||||
nn.SiLU(),
|
||||
linear(time_embed_dim, time_embed_dim),
|
||||
),
|
||||
)
|
||||
|
||||
elif self.num_classes == "sequential":
|
||||
assert adm_in_channels is not None
|
||||
self.label_emb = nn.Sequential(
|
||||
nn.Sequential(
|
||||
linear(adm_in_channels, time_embed_dim),
|
||||
nn.SiLU(),
|
||||
linear(time_embed_dim, time_embed_dim),
|
||||
)
|
||||
)
|
||||
else:
|
||||
raise ValueError()
|
||||
|
||||
self.input_blocks = nn.ModuleList(
|
||||
[
|
||||
TimestepEmbedSequential(
|
||||
conv_nd(dims, in_channels, model_channels, 3, padding=1)
|
||||
)
|
||||
]
|
||||
)
|
||||
self._feature_size = model_channels
|
||||
input_block_chans = [model_channels]
|
||||
ch = model_channels
|
||||
ds = 1
|
||||
|
||||
def get_attention_layer(
|
||||
ch,
|
||||
num_heads,
|
||||
dim_head,
|
||||
depth=1,
|
||||
context_dim=None,
|
||||
use_checkpoint=False,
|
||||
disabled_sa=False,
|
||||
):
|
||||
return SpatialVideoTransformer(
|
||||
ch,
|
||||
num_heads,
|
||||
dim_head,
|
||||
depth=depth,
|
||||
context_dim=context_dim,
|
||||
time_context_dim=time_context_dim,
|
||||
dropout=dropout,
|
||||
ff_in=extra_ff_mix_layer,
|
||||
use_spatial_context=use_spatial_context,
|
||||
merge_strategy=merge_strategy,
|
||||
merge_factor=merge_factor,
|
||||
checkpoint=use_checkpoint,
|
||||
use_linear=use_linear_in_transformer,
|
||||
attn_mode=spatial_transformer_attn_type,
|
||||
disable_self_attn=disabled_sa,
|
||||
disable_temporal_crossattention=disable_temporal_crossattention,
|
||||
max_time_embed_period=max_ddpm_temb_period,
|
||||
)
|
||||
|
||||
def get_resblock(
|
||||
merge_factor,
|
||||
merge_strategy,
|
||||
video_kernel_size,
|
||||
ch,
|
||||
time_embed_dim,
|
||||
dropout,
|
||||
out_ch,
|
||||
dims,
|
||||
use_checkpoint,
|
||||
use_scale_shift_norm,
|
||||
down=False,
|
||||
up=False,
|
||||
):
|
||||
return VideoResBlock(
|
||||
merge_factor=merge_factor,
|
||||
merge_strategy=merge_strategy,
|
||||
video_kernel_size=video_kernel_size,
|
||||
channels=ch,
|
||||
emb_channels=time_embed_dim,
|
||||
dropout=dropout,
|
||||
out_channels=out_ch,
|
||||
dims=dims,
|
||||
use_checkpoint=use_checkpoint,
|
||||
use_scale_shift_norm=use_scale_shift_norm,
|
||||
down=down,
|
||||
up=up,
|
||||
)
|
||||
|
||||
for level, mult in enumerate(channel_mult):
|
||||
for _ in range(num_res_blocks):
|
||||
layers = [
|
||||
get_resblock(
|
||||
merge_factor=merge_factor,
|
||||
merge_strategy=merge_strategy,
|
||||
video_kernel_size=video_kernel_size,
|
||||
ch=ch,
|
||||
time_embed_dim=time_embed_dim,
|
||||
dropout=dropout,
|
||||
out_ch=mult * model_channels,
|
||||
dims=dims,
|
||||
use_checkpoint=use_checkpoint,
|
||||
use_scale_shift_norm=use_scale_shift_norm,
|
||||
)
|
||||
]
|
||||
ch = mult * model_channels
|
||||
if ds in attention_resolutions:
|
||||
if num_head_channels == -1:
|
||||
dim_head = ch // num_heads
|
||||
else:
|
||||
num_heads = ch // num_head_channels
|
||||
dim_head = num_head_channels
|
||||
|
||||
layers.append(
|
||||
get_attention_layer(
|
||||
ch,
|
||||
num_heads,
|
||||
dim_head,
|
||||
depth=transformer_depth[level],
|
||||
context_dim=context_dim,
|
||||
use_checkpoint=use_checkpoint,
|
||||
disabled_sa=False,
|
||||
)
|
||||
)
|
||||
self.input_blocks.append(TimestepEmbedSequential(*layers))
|
||||
self._feature_size += ch
|
||||
input_block_chans.append(ch)
|
||||
if level != len(channel_mult) - 1:
|
||||
ds *= 2
|
||||
out_ch = ch
|
||||
self.input_blocks.append(
|
||||
TimestepEmbedSequential(
|
||||
get_resblock(
|
||||
merge_factor=merge_factor,
|
||||
merge_strategy=merge_strategy,
|
||||
video_kernel_size=video_kernel_size,
|
||||
ch=ch,
|
||||
time_embed_dim=time_embed_dim,
|
||||
dropout=dropout,
|
||||
out_ch=out_ch,
|
||||
dims=dims,
|
||||
use_checkpoint=use_checkpoint,
|
||||
use_scale_shift_norm=use_scale_shift_norm,
|
||||
down=True,
|
||||
)
|
||||
if resblock_updown
|
||||
else Downsample(
|
||||
ch,
|
||||
conv_resample,
|
||||
dims=dims,
|
||||
out_channels=out_ch,
|
||||
third_down=time_downup,
|
||||
)
|
||||
)
|
||||
)
|
||||
ch = out_ch
|
||||
input_block_chans.append(ch)
|
||||
|
||||
self._feature_size += ch
|
||||
|
||||
if num_head_channels == -1:
|
||||
dim_head = ch // num_heads
|
||||
else:
|
||||
num_heads = ch // num_head_channels
|
||||
dim_head = num_head_channels
|
||||
|
||||
self.middle_block = TimestepEmbedSequential(
|
||||
get_resblock(
|
||||
merge_factor=merge_factor,
|
||||
merge_strategy=merge_strategy,
|
||||
video_kernel_size=video_kernel_size,
|
||||
ch=ch,
|
||||
time_embed_dim=time_embed_dim,
|
||||
out_ch=None,
|
||||
dropout=dropout,
|
||||
dims=dims,
|
||||
use_checkpoint=use_checkpoint,
|
||||
use_scale_shift_norm=use_scale_shift_norm,
|
||||
),
|
||||
get_attention_layer(
|
||||
ch,
|
||||
num_heads,
|
||||
dim_head,
|
||||
depth=transformer_depth_middle,
|
||||
context_dim=context_dim,
|
||||
use_checkpoint=use_checkpoint,
|
||||
),
|
||||
get_resblock(
|
||||
merge_factor=merge_factor,
|
||||
merge_strategy=merge_strategy,
|
||||
video_kernel_size=video_kernel_size,
|
||||
ch=ch,
|
||||
out_ch=None,
|
||||
time_embed_dim=time_embed_dim,
|
||||
dropout=dropout,
|
||||
dims=dims,
|
||||
use_checkpoint=use_checkpoint,
|
||||
use_scale_shift_norm=use_scale_shift_norm,
|
||||
),
|
||||
)
|
||||
self._feature_size += ch
|
||||
|
||||
self.output_blocks = nn.ModuleList([])
|
||||
for level, mult in list(enumerate(channel_mult))[::-1]:
|
||||
for i in range(num_res_blocks + 1):
|
||||
ich = input_block_chans.pop()
|
||||
layers = [
|
||||
get_resblock(
|
||||
merge_factor=merge_factor,
|
||||
merge_strategy=merge_strategy,
|
||||
video_kernel_size=video_kernel_size,
|
||||
ch=ch + ich,
|
||||
time_embed_dim=time_embed_dim,
|
||||
dropout=dropout,
|
||||
out_ch=model_channels * mult,
|
||||
dims=dims,
|
||||
use_checkpoint=use_checkpoint,
|
||||
use_scale_shift_norm=use_scale_shift_norm,
|
||||
)
|
||||
]
|
||||
ch = model_channels * mult
|
||||
if ds in attention_resolutions:
|
||||
if num_head_channels == -1:
|
||||
dim_head = ch // num_heads
|
||||
else:
|
||||
num_heads = ch // num_head_channels
|
||||
dim_head = num_head_channels
|
||||
|
||||
layers.append(
|
||||
get_attention_layer(
|
||||
ch,
|
||||
num_heads,
|
||||
dim_head,
|
||||
depth=transformer_depth[level],
|
||||
context_dim=context_dim,
|
||||
use_checkpoint=use_checkpoint,
|
||||
disabled_sa=False,
|
||||
)
|
||||
)
|
||||
if level and i == num_res_blocks:
|
||||
out_ch = ch
|
||||
ds //= 2
|
||||
layers.append(
|
||||
get_resblock(
|
||||
merge_factor=merge_factor,
|
||||
merge_strategy=merge_strategy,
|
||||
video_kernel_size=video_kernel_size,
|
||||
ch=ch,
|
||||
time_embed_dim=time_embed_dim,
|
||||
dropout=dropout,
|
||||
out_ch=out_ch,
|
||||
dims=dims,
|
||||
use_checkpoint=use_checkpoint,
|
||||
use_scale_shift_norm=use_scale_shift_norm,
|
||||
up=True,
|
||||
)
|
||||
if resblock_updown
|
||||
else Upsample(
|
||||
ch,
|
||||
conv_resample,
|
||||
dims=dims,
|
||||
out_channels=out_ch,
|
||||
third_up=time_downup,
|
||||
)
|
||||
)
|
||||
|
||||
self.output_blocks.append(TimestepEmbedSequential(*layers))
|
||||
self._feature_size += ch
|
||||
|
||||
self.out = nn.Sequential(
|
||||
normalization(ch),
|
||||
nn.SiLU(),
|
||||
zero_module(conv_nd(dims, model_channels, out_channels, 3, padding=1)),
|
||||
)
|
||||
|
||||
def forward(
|
||||
self,
|
||||
x: th.Tensor,
|
||||
timesteps: th.Tensor,
|
||||
context: Optional[th.Tensor] = None,
|
||||
y: Optional[th.Tensor] = None,
|
||||
time_context: Optional[th.Tensor] = None,
|
||||
num_video_frames: Optional[int] = None,
|
||||
image_only_indicator: Optional[th.Tensor] = None,
|
||||
):
|
||||
assert (y is not None) == (
|
||||
self.num_classes is not None
|
||||
), "must specify y if and only if the model is class-conditional -> no, relax this TODO"
|
||||
hs = []
|
||||
t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False)
|
||||
emb = self.time_embed(t_emb)
|
||||
|
||||
if self.num_classes is not None:
|
||||
assert y.shape[0] == x.shape[0]
|
||||
emb = emb + self.label_emb(y)
|
||||
|
||||
h = x
|
||||
for module in self.input_blocks:
|
||||
h = module(
|
||||
h,
|
||||
emb,
|
||||
context=context,
|
||||
image_only_indicator=image_only_indicator,
|
||||
time_context=time_context,
|
||||
num_video_frames=num_video_frames,
|
||||
)
|
||||
hs.append(h)
|
||||
h = self.middle_block(
|
||||
h,
|
||||
emb,
|
||||
context=context,
|
||||
image_only_indicator=image_only_indicator,
|
||||
time_context=time_context,
|
||||
num_video_frames=num_video_frames,
|
||||
)
|
||||
for module in self.output_blocks:
|
||||
h = th.cat([h, hs.pop()], dim=1)
|
||||
h = module(
|
||||
h,
|
||||
emb,
|
||||
context=context,
|
||||
image_only_indicator=image_only_indicator,
|
||||
time_context=time_context,
|
||||
num_video_frames=num_video_frames,
|
||||
)
|
||||
h = h.type(x.dtype)
|
||||
return self.out(h)
|
34
imaginairy/modules/sgm/diffusionmodules/wrappers.py
Normal file
34
imaginairy/modules/sgm/diffusionmodules/wrappers.py
Normal file
@ -0,0 +1,34 @@
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
from packaging import version
|
||||
|
||||
OPENAIUNETWRAPPER = "sgm.modules.diffusionmodules.wrappers.OpenAIWrapper"
|
||||
|
||||
|
||||
class IdentityWrapper(nn.Module):
|
||||
def __init__(self, diffusion_model, compile_model: bool = False):
|
||||
super().__init__()
|
||||
torch_compile = (
|
||||
torch.compile
|
||||
if (version.parse(torch.__version__) >= version.parse("2.0.0"))
|
||||
and compile_model
|
||||
else lambda x: x
|
||||
)
|
||||
self.diffusion_model = torch_compile(diffusion_model)
|
||||
|
||||
def forward(self, *args, **kwargs):
|
||||
return self.diffusion_model(*args, **kwargs)
|
||||
|
||||
|
||||
class OpenAIWrapper(IdentityWrapper):
|
||||
def forward(
|
||||
self, x: torch.Tensor, t: torch.Tensor, c: dict, **kwargs
|
||||
) -> torch.Tensor:
|
||||
x = torch.cat((x, c.get("concat", torch.Tensor([]).type_as(x))), dim=1)
|
||||
return self.diffusion_model(
|
||||
x,
|
||||
timesteps=t,
|
||||
context=c.get("crossattn", None),
|
||||
y=c.get("vector", None),
|
||||
**kwargs,
|
||||
)
|
0
imaginairy/modules/sgm/distributions/__init__.py
Normal file
0
imaginairy/modules/sgm/distributions/__init__.py
Normal file
102
imaginairy/modules/sgm/distributions/distributions.py
Normal file
102
imaginairy/modules/sgm/distributions/distributions.py
Normal file
@ -0,0 +1,102 @@
|
||||
import numpy as np
|
||||
import torch
|
||||
|
||||
|
||||
class AbstractDistribution:
|
||||
def sample(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
def mode(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class DiracDistribution(AbstractDistribution):
|
||||
def __init__(self, value):
|
||||
self.value = value
|
||||
|
||||
def sample(self):
|
||||
return self.value
|
||||
|
||||
def mode(self):
|
||||
return self.value
|
||||
|
||||
|
||||
class DiagonalGaussianDistribution:
|
||||
def __init__(self, parameters, deterministic=False):
|
||||
self.parameters = parameters
|
||||
self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)
|
||||
self.logvar = torch.clamp(self.logvar, -30.0, 20.0)
|
||||
self.deterministic = deterministic
|
||||
self.std = torch.exp(0.5 * self.logvar)
|
||||
self.var = torch.exp(self.logvar)
|
||||
if self.deterministic:
|
||||
self.var = self.std = torch.zeros_like(self.mean).to(
|
||||
device=self.parameters.device
|
||||
)
|
||||
|
||||
def sample(self):
|
||||
x = self.mean + self.std * torch.randn(self.mean.shape).to(
|
||||
device=self.parameters.device
|
||||
)
|
||||
return x
|
||||
|
||||
def kl(self, other=None):
|
||||
if self.deterministic:
|
||||
return torch.Tensor([0.0])
|
||||
else:
|
||||
if other is None:
|
||||
return 0.5 * torch.sum(
|
||||
torch.pow(self.mean, 2) + self.var - 1.0 - self.logvar,
|
||||
dim=[1, 2, 3],
|
||||
)
|
||||
else:
|
||||
return 0.5 * torch.sum(
|
||||
torch.pow(self.mean - other.mean, 2) / other.var
|
||||
+ self.var / other.var
|
||||
- 1.0
|
||||
- self.logvar
|
||||
+ other.logvar,
|
||||
dim=[1, 2, 3],
|
||||
)
|
||||
|
||||
def nll(self, sample, dims=[1, 2, 3]):
|
||||
if self.deterministic:
|
||||
return torch.Tensor([0.0])
|
||||
logtwopi = np.log(2.0 * np.pi)
|
||||
return 0.5 * torch.sum(
|
||||
logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,
|
||||
dim=dims,
|
||||
)
|
||||
|
||||
def mode(self):
|
||||
return self.mean
|
||||
|
||||
|
||||
def normal_kl(mean1, logvar1, mean2, logvar2):
|
||||
"""
|
||||
source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12
|
||||
Compute the KL divergence between two gaussians.
|
||||
Shapes are automatically broadcasted, so batches can be compared to
|
||||
scalars, among other use cases.
|
||||
"""
|
||||
tensor = None
|
||||
for obj in (mean1, logvar1, mean2, logvar2):
|
||||
if isinstance(obj, torch.Tensor):
|
||||
tensor = obj
|
||||
break
|
||||
assert tensor is not None, "at least one argument must be a Tensor"
|
||||
|
||||
# Force variances to be Tensors. Broadcasting helps convert scalars to
|
||||
# Tensors, but it does not work for torch.exp().
|
||||
logvar1, logvar2 = (
|
||||
x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor)
|
||||
for x in (logvar1, logvar2)
|
||||
)
|
||||
|
||||
return 0.5 * (
|
||||
-1.0
|
||||
+ logvar2
|
||||
- logvar1
|
||||
+ torch.exp(logvar1 - logvar2)
|
||||
+ ((mean1 - mean2) ** 2) * torch.exp(-logvar2)
|
||||
)
|
0
imaginairy/modules/sgm/encoders/__init__.py
Normal file
0
imaginairy/modules/sgm/encoders/__init__.py
Normal file
1069
imaginairy/modules/sgm/encoders/modules.py
Normal file
1069
imaginairy/modules/sgm/encoders/modules.py
Normal file
File diff suppressed because it is too large
Load Diff
330
imaginairy/modules/sgm/video_attention.py
Normal file
330
imaginairy/modules/sgm/video_attention.py
Normal file
@ -0,0 +1,330 @@
|
||||
import logging
|
||||
from typing import Optional
|
||||
|
||||
import torch
|
||||
from einops import rearrange, repeat
|
||||
from torch import nn
|
||||
|
||||
from imaginairy.modules.attention import XFORMERS_IS_AVAILABLE
|
||||
from imaginairy.modules.sgm.attention import (
|
||||
CrossAttention,
|
||||
FeedForward,
|
||||
MemoryEfficientCrossAttention,
|
||||
SpatialTransformer,
|
||||
)
|
||||
from imaginairy.modules.sgm.diffusionmodules.util import (
|
||||
AlphaBlender,
|
||||
checkpoint,
|
||||
linear,
|
||||
timestep_embedding,
|
||||
)
|
||||
from imaginairy.utils import exists
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class TimeMixSequential(nn.Sequential):
|
||||
def forward(self, x, context=None, timesteps=None):
|
||||
for layer in self:
|
||||
x = layer(x, context, timesteps)
|
||||
|
||||
return x
|
||||
|
||||
|
||||
class VideoTransformerBlock(nn.Module):
|
||||
ATTENTION_MODES = {
|
||||
"softmax": CrossAttention,
|
||||
"softmax-xformers": MemoryEfficientCrossAttention,
|
||||
}
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
dim,
|
||||
n_heads,
|
||||
d_head,
|
||||
dropout=0.0,
|
||||
context_dim=None,
|
||||
gated_ff=True,
|
||||
checkpoint=False,
|
||||
timesteps=None,
|
||||
ff_in=False,
|
||||
inner_dim=None,
|
||||
attn_mode="softmax",
|
||||
disable_self_attn=False,
|
||||
disable_temporal_crossattention=False,
|
||||
switch_temporal_ca_to_sa=False,
|
||||
):
|
||||
super().__init__()
|
||||
|
||||
if not XFORMERS_IS_AVAILABLE and attn_mode == "softmax-xformers":
|
||||
logger.debug(
|
||||
f"Attention mode '{attn_mode}' is not available. Falling back to vanilla attention. "
|
||||
f"This is not a problem in Pytorch >= 2.0. FYI, you are running with PyTorch version {torch.__version__}"
|
||||
)
|
||||
attn_mode = "softmax"
|
||||
|
||||
attn_cls = self.ATTENTION_MODES[attn_mode]
|
||||
|
||||
self.ff_in = ff_in or inner_dim is not None
|
||||
if inner_dim is None:
|
||||
inner_dim = dim
|
||||
|
||||
assert int(n_heads * d_head) == inner_dim
|
||||
|
||||
self.is_res = inner_dim == dim
|
||||
|
||||
if self.ff_in:
|
||||
self.norm_in = nn.LayerNorm(dim)
|
||||
self.ff_in = FeedForward(
|
||||
dim, dim_out=inner_dim, dropout=dropout, glu=gated_ff
|
||||
)
|
||||
|
||||
self.timesteps = timesteps
|
||||
self.disable_self_attn = disable_self_attn
|
||||
if self.disable_self_attn:
|
||||
self.attn1 = attn_cls(
|
||||
query_dim=inner_dim,
|
||||
heads=n_heads,
|
||||
dim_head=d_head,
|
||||
context_dim=context_dim,
|
||||
dropout=dropout,
|
||||
) # is a cross-attention
|
||||
else:
|
||||
self.attn1 = attn_cls(
|
||||
query_dim=inner_dim, heads=n_heads, dim_head=d_head, dropout=dropout
|
||||
) # is a self-attention
|
||||
|
||||
self.ff = FeedForward(inner_dim, dim_out=dim, dropout=dropout, glu=gated_ff)
|
||||
|
||||
if disable_temporal_crossattention:
|
||||
if switch_temporal_ca_to_sa:
|
||||
raise ValueError
|
||||
else:
|
||||
self.attn2 = None
|
||||
else:
|
||||
self.norm2 = nn.LayerNorm(inner_dim)
|
||||
if switch_temporal_ca_to_sa:
|
||||
self.attn2 = attn_cls(
|
||||
query_dim=inner_dim, heads=n_heads, dim_head=d_head, dropout=dropout
|
||||
) # is a self-attention
|
||||
else:
|
||||
self.attn2 = attn_cls(
|
||||
query_dim=inner_dim,
|
||||
context_dim=context_dim,
|
||||
heads=n_heads,
|
||||
dim_head=d_head,
|
||||
dropout=dropout,
|
||||
) # is self-attn if context is none
|
||||
|
||||
self.norm1 = nn.LayerNorm(inner_dim)
|
||||
self.norm3 = nn.LayerNorm(inner_dim)
|
||||
self.switch_temporal_ca_to_sa = switch_temporal_ca_to_sa
|
||||
|
||||
self.checkpoint = checkpoint
|
||||
if self.checkpoint:
|
||||
print(f"{self.__class__.__name__} is using checkpointing")
|
||||
|
||||
def forward(
|
||||
self,
|
||||
x: torch.Tensor,
|
||||
context: torch.Tensor = None,
|
||||
timesteps: Optional[int] = None,
|
||||
) -> torch.Tensor:
|
||||
if self.checkpoint:
|
||||
return checkpoint(self._forward, x, context, timesteps)
|
||||
else:
|
||||
return self._forward(x, context, timesteps=timesteps)
|
||||
|
||||
def _forward(self, x, context=None, timesteps=None):
|
||||
assert self.timesteps or timesteps
|
||||
assert not (self.timesteps and timesteps) or self.timesteps == timesteps
|
||||
timesteps = self.timesteps or timesteps
|
||||
B, S, C = x.shape
|
||||
x = rearrange(x, "(b t) s c -> (b s) t c", t=timesteps)
|
||||
|
||||
if self.ff_in:
|
||||
x_skip = x
|
||||
x = self.ff_in(self.norm_in(x))
|
||||
if self.is_res:
|
||||
x += x_skip
|
||||
|
||||
if self.disable_self_attn:
|
||||
x = self.attn1(self.norm1(x), context=context) + x
|
||||
else:
|
||||
x = self.attn1(self.norm1(x)) + x
|
||||
|
||||
if self.attn2 is not None:
|
||||
if self.switch_temporal_ca_to_sa:
|
||||
x = self.attn2(self.norm2(x)) + x
|
||||
else:
|
||||
x = self.attn2(self.norm2(x), context=context) + x
|
||||
x_skip = x
|
||||
x = self.ff(self.norm3(x))
|
||||
if self.is_res:
|
||||
x += x_skip
|
||||
|
||||
x = rearrange(
|
||||
x, "(b s) t c -> (b t) s c", s=S, b=B // timesteps, c=C, t=timesteps
|
||||
)
|
||||
return x
|
||||
|
||||
def get_last_layer(self):
|
||||
return self.ff.net[-1].weight
|
||||
|
||||
|
||||
class SpatialVideoTransformer(SpatialTransformer):
|
||||
def __init__(
|
||||
self,
|
||||
in_channels,
|
||||
n_heads,
|
||||
d_head,
|
||||
depth=1,
|
||||
dropout=0.0,
|
||||
use_linear=False,
|
||||
context_dim=None,
|
||||
use_spatial_context=False,
|
||||
timesteps=None,
|
||||
merge_strategy: str = "fixed",
|
||||
merge_factor: float = 0.5,
|
||||
time_context_dim=None,
|
||||
ff_in=False,
|
||||
checkpoint=False,
|
||||
time_depth=1,
|
||||
attn_mode="softmax",
|
||||
disable_self_attn=False,
|
||||
disable_temporal_crossattention=False,
|
||||
max_time_embed_period: int = 10000,
|
||||
):
|
||||
super().__init__(
|
||||
in_channels,
|
||||
n_heads,
|
||||
d_head,
|
||||
depth=depth,
|
||||
dropout=dropout,
|
||||
attn_type=attn_mode,
|
||||
use_checkpoint=checkpoint,
|
||||
context_dim=context_dim,
|
||||
use_linear=use_linear,
|
||||
disable_self_attn=disable_self_attn,
|
||||
)
|
||||
self.time_depth = time_depth
|
||||
self.depth = depth
|
||||
self.max_time_embed_period = max_time_embed_period
|
||||
|
||||
time_mix_d_head = d_head
|
||||
n_time_mix_heads = n_heads
|
||||
|
||||
time_mix_inner_dim = int(time_mix_d_head * n_time_mix_heads)
|
||||
|
||||
inner_dim = n_heads * d_head
|
||||
if use_spatial_context:
|
||||
time_context_dim = context_dim
|
||||
|
||||
self.time_stack = nn.ModuleList(
|
||||
[
|
||||
VideoTransformerBlock(
|
||||
inner_dim,
|
||||
n_time_mix_heads,
|
||||
time_mix_d_head,
|
||||
dropout=dropout,
|
||||
context_dim=time_context_dim,
|
||||
timesteps=timesteps,
|
||||
checkpoint=checkpoint,
|
||||
ff_in=ff_in,
|
||||
inner_dim=time_mix_inner_dim,
|
||||
attn_mode=attn_mode,
|
||||
disable_self_attn=disable_self_attn,
|
||||
disable_temporal_crossattention=disable_temporal_crossattention,
|
||||
)
|
||||
for _ in range(self.depth)
|
||||
]
|
||||
)
|
||||
|
||||
assert len(self.time_stack) == len(self.transformer_blocks)
|
||||
|
||||
self.use_spatial_context = use_spatial_context
|
||||
self.in_channels = in_channels
|
||||
|
||||
time_embed_dim = self.in_channels * 4
|
||||
self.time_pos_embed = nn.Sequential(
|
||||
linear(self.in_channels, time_embed_dim),
|
||||
nn.SiLU(),
|
||||
linear(time_embed_dim, self.in_channels),
|
||||
)
|
||||
|
||||
self.time_mixer = AlphaBlender(
|
||||
alpha=merge_factor, merge_strategy=merge_strategy
|
||||
)
|
||||
|
||||
def forward(
|
||||
self,
|
||||
x: torch.Tensor,
|
||||
context: Optional[torch.Tensor] = None,
|
||||
time_context: Optional[torch.Tensor] = None,
|
||||
timesteps: Optional[int] = None,
|
||||
image_only_indicator: Optional[torch.Tensor] = None,
|
||||
) -> torch.Tensor:
|
||||
_, _, h, w = x.shape
|
||||
x_in = x
|
||||
spatial_context = None
|
||||
if exists(context):
|
||||
spatial_context = context
|
||||
|
||||
if self.use_spatial_context:
|
||||
assert (
|
||||
context.ndim == 3
|
||||
), f"n dims of spatial context should be 3 but are {context.ndim}"
|
||||
|
||||
time_context = context
|
||||
time_context_first_timestep = time_context[::timesteps]
|
||||
time_context = repeat(
|
||||
time_context_first_timestep, "b ... -> (b n) ...", n=h * w
|
||||
)
|
||||
elif time_context is not None and not self.use_spatial_context:
|
||||
time_context = repeat(time_context, "b ... -> (b n) ...", n=h * w)
|
||||
if time_context.ndim == 2:
|
||||
time_context = rearrange(time_context, "b c -> b 1 c")
|
||||
|
||||
x = self.norm(x)
|
||||
if not self.use_linear:
|
||||
x = self.proj_in(x)
|
||||
x = rearrange(x, "b c h w -> b (h w) c")
|
||||
if self.use_linear:
|
||||
x = self.proj_in(x)
|
||||
|
||||
num_frames = torch.arange(timesteps, device=x.device)
|
||||
num_frames = repeat(num_frames, "t -> b t", b=x.shape[0] // timesteps)
|
||||
num_frames = rearrange(num_frames, "b t -> (b t)")
|
||||
t_emb = timestep_embedding(
|
||||
num_frames,
|
||||
self.in_channels,
|
||||
repeat_only=False,
|
||||
max_period=self.max_time_embed_period,
|
||||
)
|
||||
emb = self.time_pos_embed(t_emb)
|
||||
emb = emb[:, None, :]
|
||||
|
||||
for it_, (block, mix_block) in enumerate(
|
||||
zip(self.transformer_blocks, self.time_stack)
|
||||
):
|
||||
x = block(
|
||||
x,
|
||||
context=spatial_context,
|
||||
)
|
||||
|
||||
x_mix = x
|
||||
x_mix = x_mix + emb
|
||||
|
||||
x_mix = mix_block(x_mix, context=time_context, timesteps=timesteps)
|
||||
x = self.time_mixer(
|
||||
x_spatial=x,
|
||||
x_temporal=x_mix,
|
||||
image_only_indicator=image_only_indicator,
|
||||
)
|
||||
if self.use_linear:
|
||||
x = self.proj_out(x)
|
||||
x = rearrange(x, "b (h w) c -> b c h w", h=h, w=w)
|
||||
if not self.use_linear:
|
||||
x = self.proj_out(x)
|
||||
out = x + x_in
|
||||
return out
|
@ -68,18 +68,20 @@ def instantiate_from_config(config: Union[dict, str]) -> Any:
|
||||
|
||||
|
||||
@contextmanager
|
||||
def platform_appropriate_autocast(precision="autocast"):
|
||||
def platform_appropriate_autocast(precision="autocast", enabled=True):
|
||||
"""
|
||||
Allow calculations to run in mixed precision, which can be faster.
|
||||
"""
|
||||
precision_scope = nullcontext
|
||||
# autocast not supported on CPU
|
||||
# https://github.com/pytorch/pytorch/issues/55374
|
||||
# https://github.com/invoke-ai/InvokeAI/pull/518
|
||||
|
||||
if precision == "autocast" and get_device() in ("cuda",):
|
||||
precision_scope = autocast
|
||||
with precision_scope(get_device()):
|
||||
yield
|
||||
with autocast(get_device(), enabled=enabled):
|
||||
yield
|
||||
else:
|
||||
with nullcontext(get_device()):
|
||||
yield
|
||||
|
||||
|
||||
def _fixed_layer_norm(
|
||||
@ -252,3 +254,52 @@ def check_torch_version():
|
||||
|
||||
if version.parse(torch.__version__) < version.parse("2.0.0"):
|
||||
raise RuntimeError("ImaginAIry is not compatible with torch<2.0.0")
|
||||
|
||||
|
||||
def exists(val):
|
||||
return val is not None
|
||||
|
||||
|
||||
def default(val, d):
|
||||
if val is not None:
|
||||
return val
|
||||
return d() if callable(d) else d
|
||||
|
||||
|
||||
def disabled_train(self, mode=True):
|
||||
"""Overwrite model.train with this function to make sure train/eval mode
|
||||
does not change anymore."""
|
||||
return self
|
||||
|
||||
|
||||
def expand_dims_like(x, y):
|
||||
while x.dim() != y.dim():
|
||||
x = x.unsqueeze(-1)
|
||||
return x
|
||||
|
||||
|
||||
def get_nested_attribute(obj, attribute_path, depth=None, return_key=False):
|
||||
"""
|
||||
Will return the result of a recursive get attribute call.
|
||||
E.g.:
|
||||
a.b.c
|
||||
= getattr(getattr(a, "b"), "c")
|
||||
= get_nested_attribute(a, "b.c")
|
||||
If any part of the attribute call is an integer x with current obj a, will
|
||||
try to call a[x] instead of a.x first.
|
||||
"""
|
||||
attributes = attribute_path.split(".")
|
||||
if depth is not None and depth > 0:
|
||||
attributes = attributes[:depth]
|
||||
assert len(attributes) > 0, "At least one attribute should be selected"
|
||||
current_attribute = obj
|
||||
current_key = None
|
||||
for level, attribute in enumerate(attributes):
|
||||
current_key = ".".join(attributes[: level + 1])
|
||||
try:
|
||||
id_ = int(attribute)
|
||||
current_attribute = current_attribute[id_]
|
||||
except ValueError:
|
||||
current_attribute = getattr(current_attribute, attribute)
|
||||
|
||||
return (current_attribute, current_key) if return_key else current_attribute
|
||||
|
309
imaginairy/video_sample.py
Normal file
309
imaginairy/video_sample.py
Normal file
@ -0,0 +1,309 @@
|
||||
import logging
|
||||
import math
|
||||
import os
|
||||
import random
|
||||
from glob import glob
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
import torch
|
||||
from einops import rearrange, repeat
|
||||
from omegaconf import OmegaConf
|
||||
from PIL import Image
|
||||
from torchvision.transforms import ToTensor
|
||||
|
||||
from imaginairy import config
|
||||
from imaginairy.model_manager import get_cached_url_path
|
||||
from imaginairy.paths import PKG_ROOT
|
||||
from imaginairy.utils import (
|
||||
default,
|
||||
get_device,
|
||||
instantiate_from_config,
|
||||
platform_appropriate_autocast,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def generate_video(
|
||||
input_path: str = "other/images/sound-music.jpg", # Can either be image file or folder with image files
|
||||
num_frames: Optional[int] = None,
|
||||
num_steps: Optional[int] = None,
|
||||
model_name: str = "svd_xt",
|
||||
fps_id: int = 6,
|
||||
output_fps: int = 6,
|
||||
motion_bucket_id: int = 127,
|
||||
cond_aug: float = 0.02,
|
||||
seed: Optional[int] = None,
|
||||
decoding_t: int = 1, # Number of frames decoded at a time! This eats most VRAM. Reduce if necessary.
|
||||
device: Optional[str] = None,
|
||||
output_folder: Optional[str] = None,
|
||||
):
|
||||
"""
|
||||
Simple script to generate a single sample conditioned on an image `input_path` or multiple images, one for each
|
||||
image file in folder `input_path`. If you run out of VRAM, try decreasing `decoding_t`.
|
||||
"""
|
||||
device = default(device, get_device)
|
||||
seed = default(seed, random.randint(0, 1000000))
|
||||
output_fps = default(output_fps, fps_id)
|
||||
|
||||
logger.info(f"Device: {device} seed: {seed}")
|
||||
|
||||
torch.cuda.reset_peak_memory_stats()
|
||||
video_model_config = config.video_models.get(model_name, None)
|
||||
if video_model_config is None:
|
||||
msg = f"Version {model_name} does not exist."
|
||||
raise ValueError(msg)
|
||||
|
||||
num_frames = default(num_frames, video_model_config["default_frames"])
|
||||
num_steps = default(num_steps, video_model_config["default_steps"])
|
||||
output_folder = default(output_folder, "outputs/video/")
|
||||
video_config_path = f"{PKG_ROOT}/{video_model_config['config_path']}"
|
||||
|
||||
model, safety_filter = load_model(
|
||||
config=video_config_path,
|
||||
device=device,
|
||||
num_frames=num_frames,
|
||||
num_steps=num_steps,
|
||||
weights_url=video_model_config["weights_url"],
|
||||
)
|
||||
torch.manual_seed(seed)
|
||||
|
||||
path = Path(input_path)
|
||||
all_img_paths = []
|
||||
if path.is_file():
|
||||
if any(input_path.endswith(x) for x in ["jpg", "jpeg", "png"]):
|
||||
all_img_paths = [input_path]
|
||||
else:
|
||||
raise ValueError("Path is not valid image file.")
|
||||
elif path.is_dir():
|
||||
all_img_paths = sorted(
|
||||
[
|
||||
f
|
||||
for f in path.iterdir()
|
||||
if f.is_file() and f.suffix.lower() in [".jpg", ".jpeg", ".png"]
|
||||
]
|
||||
)
|
||||
if len(all_img_paths) == 0:
|
||||
raise ValueError("Folder does not contain any images.")
|
||||
else:
|
||||
raise ValueError
|
||||
|
||||
for input_img_path in all_img_paths:
|
||||
with Image.open(input_img_path) as image:
|
||||
if image.mode == "RGBA":
|
||||
image = image.convert("RGB")
|
||||
w, h = image.size
|
||||
|
||||
if h % 64 != 0 or w % 64 != 0:
|
||||
width, height = (x - x % 64 for x in (w, h))
|
||||
image = image.resize((width, height))
|
||||
logger.info(
|
||||
f"Your image is of size {h}x{w} which is not divisible by 64. We are resizing to {height}x{width}!"
|
||||
)
|
||||
|
||||
image = ToTensor()(image)
|
||||
image = image * 2.0 - 1.0
|
||||
|
||||
image = image.unsqueeze(0).to(device)
|
||||
H, W = image.shape[2:]
|
||||
assert image.shape[1] == 3
|
||||
F = 8
|
||||
C = 4
|
||||
shape = (num_frames, C, H // F, W // F)
|
||||
if (H, W) != (576, 1024):
|
||||
logger.warning(
|
||||
"The image you provided is not 576x1024. This leads to suboptimal performance as model was only trained on 576x1024. Consider increasing `cond_aug`."
|
||||
)
|
||||
if motion_bucket_id > 255:
|
||||
logger.warning(
|
||||
"High motion bucket! This may lead to suboptimal performance."
|
||||
)
|
||||
|
||||
if fps_id < 5:
|
||||
logger.warning("Small fps value! This may lead to suboptimal performance.")
|
||||
|
||||
if fps_id > 30:
|
||||
logger.warning("Large fps value! This may lead to suboptimal performance.")
|
||||
|
||||
value_dict = {}
|
||||
value_dict["motion_bucket_id"] = motion_bucket_id
|
||||
value_dict["fps_id"] = fps_id
|
||||
value_dict["cond_aug"] = cond_aug
|
||||
value_dict["cond_frames_without_noise"] = image
|
||||
value_dict["cond_frames"] = image + cond_aug * torch.randn_like(image)
|
||||
value_dict["cond_aug"] = cond_aug
|
||||
|
||||
with torch.no_grad(), platform_appropriate_autocast():
|
||||
reload_model(model.conditioner)
|
||||
batch, batch_uc = get_batch(
|
||||
get_unique_embedder_keys_from_conditioner(model.conditioner),
|
||||
value_dict,
|
||||
[1, num_frames],
|
||||
T=num_frames,
|
||||
device=device,
|
||||
)
|
||||
c, uc = model.conditioner.get_unconditional_conditioning(
|
||||
batch,
|
||||
batch_uc=batch_uc,
|
||||
force_uc_zero_embeddings=[
|
||||
"cond_frames",
|
||||
"cond_frames_without_noise",
|
||||
],
|
||||
)
|
||||
unload_model(model.conditioner)
|
||||
|
||||
for k in ["crossattn", "concat"]:
|
||||
uc[k] = repeat(uc[k], "b ... -> b t ...", t=num_frames)
|
||||
uc[k] = rearrange(uc[k], "b t ... -> (b t) ...", t=num_frames)
|
||||
c[k] = repeat(c[k], "b ... -> b t ...", t=num_frames)
|
||||
c[k] = rearrange(c[k], "b t ... -> (b t) ...", t=num_frames)
|
||||
|
||||
randn = torch.randn(shape, device=device)
|
||||
|
||||
additional_model_inputs = {}
|
||||
additional_model_inputs["image_only_indicator"] = torch.zeros(
|
||||
2, num_frames
|
||||
).to(device)
|
||||
additional_model_inputs["num_video_frames"] = batch["num_video_frames"]
|
||||
|
||||
def denoiser(_input, sigma, c):
|
||||
_input = _input.half()
|
||||
return model.denoiser(
|
||||
model.model, _input, sigma, c, **additional_model_inputs
|
||||
)
|
||||
|
||||
reload_model(model.denoiser)
|
||||
reload_model(model.model)
|
||||
samples_z = model.sampler(denoiser, randn, cond=c, uc=uc)
|
||||
unload_model(model.model)
|
||||
unload_model(model.denoiser)
|
||||
model.en_and_decode_n_samples_a_time = decoding_t
|
||||
samples_x = model.decode_first_stage(samples_z)
|
||||
samples = torch.clamp((samples_x + 1.0) / 2.0, min=0.0, max=1.0)
|
||||
|
||||
os.makedirs(output_folder, exist_ok=True)
|
||||
base_count = len(glob(os.path.join(output_folder, "*.mp4")))
|
||||
video_path = os.path.join(output_folder, f"{base_count:06d}.mp4")
|
||||
writer = cv2.VideoWriter(
|
||||
video_path,
|
||||
cv2.VideoWriter_fourcc(*"MP4V"),
|
||||
output_fps,
|
||||
(samples.shape[-1], samples.shape[-2]),
|
||||
)
|
||||
|
||||
samples = safety_filter(samples)
|
||||
vid = (
|
||||
(rearrange(samples, "t c h w -> t h w c") * 255)
|
||||
.cpu()
|
||||
.numpy()
|
||||
.astype(np.uint8)
|
||||
)
|
||||
for frame in vid:
|
||||
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
|
||||
writer.write(frame)
|
||||
writer.release()
|
||||
if torch.cuda.is_available():
|
||||
peak_memory_usage = torch.cuda.max_memory_allocated()
|
||||
msg = f"Peak memory usage: {peak_memory_usage / (1024 ** 2)} MB"
|
||||
logger.info(msg)
|
||||
logger.info(f"Video saved to {video_path}\n")
|
||||
|
||||
|
||||
def get_unique_embedder_keys_from_conditioner(conditioner):
|
||||
return list({x.input_key for x in conditioner.embedders})
|
||||
|
||||
|
||||
def get_batch(keys, value_dict, N, T, device):
|
||||
batch = {}
|
||||
batch_uc = {}
|
||||
|
||||
for key in keys:
|
||||
if key == "fps_id":
|
||||
batch[key] = (
|
||||
torch.tensor([value_dict["fps_id"]])
|
||||
.to(device)
|
||||
.repeat(int(math.prod(N)))
|
||||
)
|
||||
elif key == "motion_bucket_id":
|
||||
batch[key] = (
|
||||
torch.tensor([value_dict["motion_bucket_id"]])
|
||||
.to(device)
|
||||
.repeat(int(math.prod(N)))
|
||||
)
|
||||
elif key == "cond_aug":
|
||||
batch[key] = repeat(
|
||||
torch.tensor([value_dict["cond_aug"]]).to(device),
|
||||
"1 -> b",
|
||||
b=math.prod(N),
|
||||
)
|
||||
elif key == "cond_frames":
|
||||
batch[key] = repeat(value_dict["cond_frames"], "1 ... -> b ...", b=N[0])
|
||||
elif key == "cond_frames_without_noise":
|
||||
batch[key] = repeat(
|
||||
value_dict["cond_frames_without_noise"], "1 ... -> b ...", b=N[0]
|
||||
)
|
||||
else:
|
||||
batch[key] = value_dict[key]
|
||||
|
||||
if T is not None:
|
||||
batch["num_video_frames"] = T
|
||||
|
||||
for key in batch:
|
||||
if key not in batch_uc and isinstance(batch[key], torch.Tensor):
|
||||
batch_uc[key] = torch.clone(batch[key])
|
||||
return batch, batch_uc
|
||||
|
||||
|
||||
def load_model(
|
||||
config: str, device: str, num_frames: int, num_steps: int, weights_url: str
|
||||
):
|
||||
config = OmegaConf.load(config)
|
||||
ckpt_path = get_cached_url_path(weights_url)
|
||||
config["model"]["params"]["ckpt_path"] = ckpt_path
|
||||
if device == "cuda":
|
||||
config.model.params.conditioner_config.params.emb_models[
|
||||
0
|
||||
].params.open_clip_embedding_config.params.init_device = device
|
||||
|
||||
config.model.params.sampler_config.params.num_steps = num_steps
|
||||
config.model.params.sampler_config.params.guider_config.params.num_frames = (
|
||||
num_frames
|
||||
)
|
||||
|
||||
model = instantiate_from_config(config.model).to(device).half().eval()
|
||||
|
||||
# safety_filter = DeepFloydDataFiltering(verbose=False, device=device)
|
||||
def safety_filter(x):
|
||||
return x
|
||||
|
||||
# use less memory
|
||||
model.model.half()
|
||||
return model, safety_filter
|
||||
|
||||
|
||||
lowvram_mode = True
|
||||
|
||||
|
||||
def unload_model(model):
|
||||
global lowvram_mode
|
||||
if lowvram_mode:
|
||||
model.cpu()
|
||||
if get_device() == "cuda":
|
||||
torch.cuda.empty_cache()
|
||||
|
||||
|
||||
def reload_model(model):
|
||||
model.to(get_device())
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# configure logging
|
||||
logging.basicConfig(
|
||||
level=logging.INFO,
|
||||
format="%(asctime)s %(levelname)s %(name)s: %(message)s",
|
||||
datefmt="%Y-%m-%d %H:%M:%S",
|
||||
)
|
||||
generate_video()
|
1
setup.py
1
setup.py
@ -110,6 +110,7 @@ setup(
|
||||
# "triton>=2.0.0; sys_platform!='darwin' and platform_machine!='aarch64'",
|
||||
"kornia>=0.6",
|
||||
"uvicorn>=0.16.0",
|
||||
"xformers>=0.0.16; sys_platform!='darwin' and platform_machine!='aarch64'",
|
||||
],
|
||||
# don't specify maximum python versions as it can cause very long dependency resolution issues as the resolver
|
||||
# goes back to older versions of packages that didn't specify a maximum
|
||||
|
Loading…
Reference in New Issue
Block a user