You cannot select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
332 lines
10 KiB
Python
332 lines
10 KiB
Python
import math
|
|
|
|
import torch
|
|
from scipy import integrate
|
|
from torchdiffeq import odeint
|
|
from tqdm.auto import trange
|
|
|
|
|
|
def append_zero(x):
|
|
return torch.cat([x, x.new_zeros([1])])
|
|
|
|
|
|
def get_sigmas_karras(n, sigma_min, sigma_max, rho=7.0, device="cpu"):
|
|
"""Constructs the noise schedule of Karras et al. (2022)."""
|
|
ramp = torch.linspace(0, 1, n)
|
|
min_inv_rho = sigma_min ** (1 / rho)
|
|
max_inv_rho = sigma_max ** (1 / rho)
|
|
sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
|
|
return append_zero(sigmas).to(device)
|
|
|
|
|
|
def get_sigmas_exponential(n, sigma_min, sigma_max, device="cpu"):
|
|
"""Constructs an exponential noise schedule."""
|
|
sigmas = torch.linspace(
|
|
math.log(sigma_max), math.log(sigma_min), n, device=device
|
|
).exp()
|
|
return append_zero(sigmas)
|
|
|
|
|
|
def get_sigmas_vp(n, beta_d=19.9, beta_min=0.1, eps_s=1e-3, device="cpu"):
|
|
"""Constructs a continuous VP noise schedule."""
|
|
t = torch.linspace(1, eps_s, n, device=device)
|
|
sigmas = torch.sqrt(torch.exp(beta_d * t**2 / 2 + beta_min * t) - 1)
|
|
return append_zero(sigmas)
|
|
|
|
|
|
def to_d(x, sigma, denoised):
|
|
"""Converts a denoiser output to a Karras ODE derivative."""
|
|
return (x - denoised) / sigma
|
|
|
|
|
|
def get_ancestral_step(sigma_from, sigma_to):
|
|
"""Calculates the noise level (sigma_down) to step down to and the amount
|
|
of noise to add (sigma_up) when doing an ancestral sampling step."""
|
|
sigma_up = (
|
|
sigma_to**2 * (sigma_from**2 - sigma_to**2) / sigma_from**2
|
|
) ** 0.5
|
|
sigma_down = (sigma_to**2 - sigma_up**2) ** 0.5
|
|
return sigma_down, sigma_up
|
|
|
|
|
|
@torch.no_grad()
|
|
def sample_euler(
|
|
model,
|
|
x,
|
|
sigmas,
|
|
extra_args=None,
|
|
callback=None,
|
|
disable=None,
|
|
s_churn=0.0,
|
|
s_tmin=0.0,
|
|
s_tmax=float("inf"),
|
|
s_noise=1.0,
|
|
):
|
|
"""Implements Algorithm 2 (Euler steps) from Karras et al. (2022)."""
|
|
extra_args = {} if extra_args is None else extra_args
|
|
s_in = x.new_ones([x.shape[0]])
|
|
for i in trange(len(sigmas) - 1, disable=disable):
|
|
gamma = (
|
|
min(s_churn / (len(sigmas) - 1), 2**0.5 - 1)
|
|
if s_tmin <= sigmas[i] <= s_tmax
|
|
else 0.0
|
|
)
|
|
eps = torch.randn_like(x) * s_noise
|
|
sigma_hat = sigmas[i] * (gamma + 1)
|
|
if gamma > 0:
|
|
x = x + eps * (sigma_hat**2 - sigmas[i] ** 2) ** 0.5
|
|
denoised = model(x, sigma_hat * s_in, **extra_args)
|
|
d = to_d(x, sigma_hat, denoised)
|
|
if callback is not None:
|
|
callback(
|
|
{
|
|
"x": x,
|
|
"i": i,
|
|
"sigma": sigmas[i],
|
|
"sigma_hat": sigma_hat,
|
|
"denoised": denoised,
|
|
}
|
|
)
|
|
dt = sigmas[i + 1] - sigma_hat
|
|
# Euler method
|
|
x = x + d * dt
|
|
return x
|
|
|
|
|
|
@torch.no_grad()
|
|
def sample_euler_ancestral(
|
|
model, x, sigmas, extra_args=None, callback=None, disable=None
|
|
):
|
|
"""Ancestral sampling with Euler method steps."""
|
|
extra_args = {} if extra_args is None else extra_args
|
|
s_in = x.new_ones([x.shape[0]])
|
|
for i in trange(len(sigmas) - 1, disable=disable):
|
|
denoised = model(x, sigmas[i] * s_in, **extra_args)
|
|
sigma_down, sigma_up = get_ancestral_step(sigmas[i], sigmas[i + 1])
|
|
if callback is not None:
|
|
callback(
|
|
{
|
|
"x": x,
|
|
"i": i,
|
|
"sigma": sigmas[i],
|
|
"sigma_hat": sigmas[i],
|
|
"denoised": denoised,
|
|
}
|
|
)
|
|
d = to_d(x, sigmas[i], denoised)
|
|
# Euler method
|
|
dt = sigma_down - sigmas[i]
|
|
x = x + d * dt
|
|
x = x + torch.randn_like(x, device="cpu").to(x.device) * sigma_up
|
|
return x
|
|
|
|
|
|
@torch.no_grad()
|
|
def sample_heun(
|
|
model,
|
|
x,
|
|
sigmas,
|
|
extra_args=None,
|
|
callback=None,
|
|
disable=None,
|
|
s_churn=0.0,
|
|
s_tmin=0.0,
|
|
s_tmax=float("inf"),
|
|
s_noise=1.0,
|
|
):
|
|
"""Implements Algorithm 2 (Heun steps) from Karras et al. (2022)."""
|
|
extra_args = {} if extra_args is None else extra_args
|
|
s_in = x.new_ones([x.shape[0]])
|
|
for i in trange(len(sigmas) - 1, disable=disable):
|
|
gamma = (
|
|
min(s_churn / (len(sigmas) - 1), 2**0.5 - 1)
|
|
if s_tmin <= sigmas[i] <= s_tmax
|
|
else 0.0
|
|
)
|
|
eps = torch.randn_like(x) * s_noise
|
|
sigma_hat = sigmas[i] * (gamma + 1)
|
|
if gamma > 0:
|
|
x = x + eps * (sigma_hat**2 - sigmas[i] ** 2) ** 0.5
|
|
denoised = model(x, sigma_hat * s_in, **extra_args)
|
|
d = to_d(x, sigma_hat, denoised)
|
|
if callback is not None:
|
|
callback(
|
|
{
|
|
"x": x,
|
|
"i": i,
|
|
"sigma": sigmas[i],
|
|
"sigma_hat": sigma_hat,
|
|
"denoised": denoised,
|
|
}
|
|
)
|
|
dt = sigmas[i + 1] - sigma_hat
|
|
if sigmas[i + 1] == 0:
|
|
# Euler method
|
|
x = x + d * dt
|
|
else:
|
|
# Heun's method
|
|
x_2 = x + d * dt
|
|
denoised_2 = model(x_2, sigmas[i + 1] * s_in, **extra_args)
|
|
d_2 = to_d(x_2, sigmas[i + 1], denoised_2)
|
|
d_prime = (d + d_2) / 2
|
|
x = x + d_prime * dt
|
|
return x
|
|
|
|
|
|
@torch.no_grad()
|
|
def sample_dpm_2(
|
|
model,
|
|
x,
|
|
sigmas,
|
|
extra_args=None,
|
|
callback=None,
|
|
disable=None,
|
|
s_churn=0.0,
|
|
s_tmin=0.0,
|
|
s_tmax=float("inf"),
|
|
s_noise=1.0,
|
|
):
|
|
"""A sampler inspired by DPM-Solver-2 and Algorithm 2 from Karras et al. (2022)."""
|
|
extra_args = {} if extra_args is None else extra_args
|
|
s_in = x.new_ones([x.shape[0]])
|
|
for i in trange(len(sigmas) - 1, disable=disable):
|
|
gamma = (
|
|
min(s_churn / (len(sigmas) - 1), 2**0.5 - 1)
|
|
if s_tmin <= sigmas[i] <= s_tmax
|
|
else 0.0
|
|
)
|
|
eps = torch.randn_like(x) * s_noise
|
|
sigma_hat = sigmas[i] * (gamma + 1)
|
|
if gamma > 0:
|
|
x = x + eps * (sigma_hat**2 - sigmas[i] ** 2) ** 0.5
|
|
denoised = model(x, sigma_hat * s_in, **extra_args)
|
|
d = to_d(x, sigma_hat, denoised)
|
|
if callback is not None:
|
|
callback(
|
|
{
|
|
"x": x,
|
|
"i": i,
|
|
"sigma": sigmas[i],
|
|
"sigma_hat": sigma_hat,
|
|
"denoised": denoised,
|
|
}
|
|
)
|
|
# Midpoint method, where the midpoint is chosen according to a rho=3 Karras schedule
|
|
sigma_mid = ((sigma_hat ** (1 / 3) + sigmas[i + 1] ** (1 / 3)) / 2) ** 3
|
|
dt_1 = sigma_mid - sigma_hat
|
|
dt_2 = sigmas[i + 1] - sigma_hat
|
|
x_2 = x + d * dt_1
|
|
denoised_2 = model(x_2, sigma_mid * s_in, **extra_args)
|
|
d_2 = to_d(x_2, sigma_mid, denoised_2)
|
|
x = x + d_2 * dt_2
|
|
return x
|
|
|
|
|
|
@torch.no_grad()
|
|
def sample_dpm_2_ancestral(
|
|
model, x, sigmas, extra_args=None, callback=None, disable=None
|
|
):
|
|
"""Ancestral sampling with DPM-Solver inspired second-order steps."""
|
|
extra_args = {} if extra_args is None else extra_args
|
|
s_in = x.new_ones([x.shape[0]])
|
|
for i in trange(len(sigmas) - 1, disable=disable):
|
|
denoised = model(x, sigmas[i] * s_in, **extra_args)
|
|
sigma_down, sigma_up = get_ancestral_step(sigmas[i], sigmas[i + 1])
|
|
if callback is not None:
|
|
callback(
|
|
{
|
|
"x": x,
|
|
"i": i,
|
|
"sigma": sigmas[i],
|
|
"sigma_hat": sigmas[i],
|
|
"denoised": denoised,
|
|
}
|
|
)
|
|
d = to_d(x, sigmas[i], denoised)
|
|
# Midpoint method, where the midpoint is chosen according to a rho=3 Karras schedule
|
|
sigma_mid = ((sigmas[i] ** (1 / 3) + sigma_down ** (1 / 3)) / 2) ** 3
|
|
dt_1 = sigma_mid - sigmas[i]
|
|
dt_2 = sigma_down - sigmas[i]
|
|
x_2 = x + d * dt_1
|
|
denoised_2 = model(x_2, sigma_mid * s_in, **extra_args)
|
|
d_2 = to_d(x_2, sigma_mid, denoised_2)
|
|
x = x + d_2 * dt_2
|
|
x = x + torch.randn_like(x, device="cpu").to(x.device) * sigma_up
|
|
return x
|
|
|
|
|
|
def linear_multistep_coeff(order, t, i, j):
|
|
if order - 1 > i:
|
|
raise ValueError(f"Order {order} too high for step {i}")
|
|
|
|
def fn(tau):
|
|
prod = 1.0
|
|
for k in range(order):
|
|
if j == k:
|
|
continue
|
|
prod *= (tau - t[i - k]) / (t[i - j] - t[i - k])
|
|
return prod
|
|
|
|
return integrate.quad(fn, t[i], t[i + 1], epsrel=1e-4)[0]
|
|
|
|
|
|
@torch.no_grad()
|
|
def sample_lms(model, x, sigmas, extra_args=None, callback=None, disable=None, order=4):
|
|
extra_args = {} if extra_args is None else extra_args
|
|
s_in = x.new_ones([x.shape[0]])
|
|
sigmas_cpu = sigmas.detach().cpu().numpy()
|
|
ds = []
|
|
for i in trange(len(sigmas) - 1, disable=disable):
|
|
denoised = model(x, sigmas[i] * s_in, **extra_args)
|
|
d = to_d(x, sigmas[i], denoised)
|
|
ds.append(d)
|
|
if len(ds) > order:
|
|
ds.pop(0)
|
|
if callback is not None:
|
|
callback(
|
|
{
|
|
"x": x,
|
|
"i": i,
|
|
"sigma": sigmas[i],
|
|
"sigma_hat": sigmas[i],
|
|
"denoised": denoised,
|
|
}
|
|
)
|
|
cur_order = min(i + 1, order)
|
|
coeffs = [
|
|
linear_multistep_coeff(cur_order, sigmas_cpu, i, j)
|
|
for j in range(cur_order)
|
|
]
|
|
x = x + sum(coeff * d for coeff, d in zip(coeffs, reversed(ds)))
|
|
return x
|
|
|
|
|
|
@torch.no_grad()
|
|
def log_likelihood(
|
|
model, x, sigma_min, sigma_max, extra_args=None, atol=1e-4, rtol=1e-4
|
|
):
|
|
extra_args = {} if extra_args is None else extra_args
|
|
s_in = x.new_ones([x.shape[0]])
|
|
v = torch.randint_like(x, 2) * 2 - 1
|
|
fevals = 0
|
|
|
|
def ode_fn(sigma, x):
|
|
nonlocal fevals
|
|
with torch.enable_grad():
|
|
x = x[0].detach().requires_grad_()
|
|
denoised = model(x, sigma * s_in, **extra_args)
|
|
d = to_d(x, sigma, denoised)
|
|
fevals += 1
|
|
grad = torch.autograd.grad((d * v).sum(), x)[0]
|
|
d_ll = (v * grad).flatten(1).sum(1)
|
|
return d.detach(), d_ll
|
|
|
|
x_min = x, x.new_zeros([x.shape[0]])
|
|
t = x.new_tensor([sigma_min, sigma_max])
|
|
sol = odeint(ode_fn, x_min, t, atol=atol, rtol=rtol, method="dopri5")
|
|
latent, delta_ll = sol[0][-1], sol[1][-1]
|
|
ll_prior = (
|
|
torch.distributions.Normal(0, sigma_max).log_prob(latent).flatten(1).sum(1)
|
|
)
|
|
return ll_prior + delta_ll, {"fevals": fevals}
|