add interpolateframes

This commit is contained in:
Kritik Soman 2020-11-27 22:22:57 +05:30
parent eada1afa3c
commit b431ca6dbc
11 changed files with 1032 additions and 0 deletions

1
.gitignore vendored
View File

@ -5,3 +5,4 @@
*.pyc *.pyc
gimpenv/ gimpenv/
weights/ weights/
output/

21
gimp-plugins/RIFE/LICENSE Executable file
View File

@ -0,0 +1,21 @@
MIT License
Copyright (c) 2020 hzwer
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

117
gimp-plugins/RIFE/model/IFNet.py Executable file
View File

@ -0,0 +1,117 @@
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from model.warplayer import warp
# device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def conv_wo_act(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1):
return nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation, bias=False),
nn.BatchNorm2d(out_planes),
)
def conv(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1):
return nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation, bias=False),
nn.BatchNorm2d(out_planes),
nn.PReLU(out_planes)
)
class ResBlock(nn.Module):
def __init__(self, in_planes, out_planes, stride=1):
super(ResBlock, self).__init__()
if in_planes == out_planes and stride == 1:
self.conv0 = nn.Identity()
else:
self.conv0 = nn.Conv2d(in_planes, out_planes,
3, stride, 1, bias=False)
self.conv1 = conv(in_planes, out_planes, 3, stride, 1)
self.conv2 = conv_wo_act(out_planes, out_planes, 3, 1, 1)
self.relu1 = nn.PReLU(1)
self.relu2 = nn.PReLU(out_planes)
self.fc1 = nn.Conv2d(out_planes, 16, kernel_size=1, bias=False)
self.fc2 = nn.Conv2d(16, out_planes, kernel_size=1, bias=False)
def forward(self, x):
y = self.conv0(x)
x = self.conv1(x)
x = self.conv2(x)
w = x.mean(3, True).mean(2, True)
w = self.relu1(self.fc1(w))
w = torch.sigmoid(self.fc2(w))
x = self.relu2(x * w + y)
return x
class IFBlock(nn.Module):
def __init__(self, in_planes, scale=1, c=64):
super(IFBlock, self).__init__()
self.scale = scale
self.conv0 = conv(in_planes, c, 3, 2, 1)
self.res0 = ResBlock(c, c)
self.res1 = ResBlock(c, c)
self.res2 = ResBlock(c, c)
self.res3 = ResBlock(c, c)
self.res4 = ResBlock(c, c)
self.res5 = ResBlock(c, c)
self.conv1 = nn.Conv2d(c, 8, 3, 1, 1)
self.up = nn.PixelShuffle(2)
def forward(self, x):
if self.scale != 1:
x = F.interpolate(x, scale_factor=1. / self.scale, mode="bilinear",
align_corners=False)
x = self.conv0(x)
x = self.res0(x)
x = self.res1(x)
x = self.res2(x)
x = self.res3(x)
x = self.res4(x)
x = self.res5(x)
x = self.conv1(x)
flow = self.up(x)
if self.scale != 1:
flow = F.interpolate(flow, scale_factor=self.scale, mode="bilinear",
align_corners=False)
return flow
class IFNet(nn.Module):
def __init__(self, cFlag):
super(IFNet, self).__init__()
self.block0 = IFBlock(6, scale=4, c=192)
self.block1 = IFBlock(8, scale=2, c=128)
self.block2 = IFBlock(8, scale=1, c=64)
self.cFlag = cFlag
def forward(self, x):
x = F.interpolate(x, scale_factor=0.5, mode="bilinear",
align_corners=False)
flow0 = self.block0(x)
F1 = flow0
warped_img0 = warp(x[:, :3], F1, self.cFlag)
warped_img1 = warp(x[:, 3:], -F1, self.cFlag)
flow1 = self.block1(torch.cat((warped_img0, warped_img1, F1), 1))
F2 = (flow0 + flow1)
warped_img0 = warp(x[:, :3], F2, self.cFlag)
warped_img1 = warp(x[:, 3:], -F2, self.cFlag)
flow2 = self.block2(torch.cat((warped_img0, warped_img1, F2), 1))
F3 = (flow0 + flow1 + flow2)
return F3, [F1, F2, F3]
if __name__ == '__main__':
img0 = torch.zeros(3, 3, 256, 256).float().to(device)
img1 = torch.tensor(np.random.normal(
0, 1, (3, 3, 256, 256))).float().to(device)
imgs = torch.cat((img0, img1), 1)
flownet = IFNet()
flow, _ = flownet(imgs)
print(flow.shape)

View File

@ -0,0 +1,115 @@
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from model.warplayer import warp
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def conv_wo_act(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1):
return nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation, bias=False),
nn.BatchNorm2d(out_planes),
)
def conv(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1):
return nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation, bias=False),
nn.BatchNorm2d(out_planes),
nn.PReLU(out_planes)
)
class ResBlock(nn.Module):
def __init__(self, in_planes, out_planes, stride=1):
super(ResBlock, self).__init__()
if in_planes == out_planes and stride == 1:
self.conv0 = nn.Identity()
else:
self.conv0 = nn.Conv2d(in_planes, out_planes,
3, stride, 1, bias=False)
self.conv1 = conv(in_planes, out_planes, 3, stride, 1)
self.conv2 = conv_wo_act(out_planes, out_planes, 3, 1, 1)
self.relu1 = nn.PReLU(1)
self.relu2 = nn.PReLU(out_planes)
self.fc1 = nn.Conv2d(out_planes, 16, kernel_size=1, bias=False)
self.fc2 = nn.Conv2d(16, out_planes, kernel_size=1, bias=False)
def forward(self, x):
y = self.conv0(x)
x = self.conv1(x)
x = self.conv2(x)
w = x.mean(3, True).mean(2, True)
w = self.relu1(self.fc1(w))
w = torch.sigmoid(self.fc2(w))
x = self.relu2(x * w + y)
return x
class IFBlock(nn.Module):
def __init__(self, in_planes, scale=1, c=64):
super(IFBlock, self).__init__()
self.scale = scale
self.conv0 = conv(in_planes, c, 3, 1, 1)
self.res0 = ResBlock(c, c)
self.res1 = ResBlock(c, c)
self.res2 = ResBlock(c, c)
self.res3 = ResBlock(c, c)
self.res4 = ResBlock(c, c)
self.res5 = ResBlock(c, c)
self.conv1 = nn.Conv2d(c, 2, 3, 1, 1)
self.up = nn.PixelShuffle(2)
def forward(self, x):
if self.scale != 1:
x = F.interpolate(x, scale_factor=1. / self.scale, mode="bilinear",
align_corners=False)
x = self.conv0(x)
x = self.res0(x)
x = self.res1(x)
x = self.res2(x)
x = self.res3(x)
x = self.res4(x)
x = self.res5(x)
x = self.conv1(x)
flow = x # self.up(x)
if self.scale != 1:
flow = F.interpolate(flow, scale_factor=self.scale, mode="bilinear",
align_corners=False)
return flow
class IFNet(nn.Module):
def __init__(self):
super(IFNet, self).__init__()
self.block0 = IFBlock(6, scale=4, c=192)
self.block1 = IFBlock(8, scale=2, c=128)
self.block2 = IFBlock(8, scale=1, c=64)
def forward(self, x):
x = F.interpolate(x, scale_factor=0.5, mode="bilinear",
align_corners=False)
flow0 = self.block0(x)
F1 = flow0
warped_img0 = warp(x[:, :3], F1)
warped_img1 = warp(x[:, 3:], -F1)
flow1 = self.block1(torch.cat((warped_img0, warped_img1, F1), 1))
F2 = (flow0 + flow1)
warped_img0 = warp(x[:, :3], F2)
warped_img1 = warp(x[:, 3:], -F2)
flow2 = self.block2(torch.cat((warped_img0, warped_img1, F2), 1))
F3 = (flow0 + flow1 + flow2)
return F3, [F1, F2, F3]
if __name__ == '__main__':
img0 = torch.zeros(3, 3, 256, 256).float().to(device)
img1 = torch.tensor(np.random.normal(
0, 1, (3, 3, 256, 256))).float().to(device)
imgs = torch.cat((img0, img1), 1)
flownet = IFNet()
flow, _ = flownet(imgs)
print(flow.shape)

262
gimp-plugins/RIFE/model/RIFE.py Executable file
View File

@ -0,0 +1,262 @@
import torch
import torch.nn as nn
import numpy as np
from torch.optim import AdamW
import torch.optim as optim
import itertools
from model.warplayer import warp
from torch.nn.parallel import DistributedDataParallel as DDP
from model.IFNet import *
import torch.nn.functional as F
from model.loss import *
def conv(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1):
return nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation, bias=True),
nn.PReLU(out_planes)
)
def deconv(in_planes, out_planes, kernel_size=4, stride=2, padding=1):
return nn.Sequential(
torch.nn.ConvTranspose2d(in_channels=in_planes, out_channels=out_planes,
kernel_size=4, stride=2, padding=1, bias=True),
nn.PReLU(out_planes)
)
def conv_woact(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1):
return nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation, bias=True),
)
class ResBlock(nn.Module):
def __init__(self, in_planes, out_planes, stride=2):
super(ResBlock, self).__init__()
if in_planes == out_planes and stride == 1:
self.conv0 = nn.Identity()
else:
self.conv0 = nn.Conv2d(in_planes, out_planes,
3, stride, 1, bias=False)
self.conv1 = conv(in_planes, out_planes, 3, stride, 1)
self.conv2 = conv_woact(out_planes, out_planes, 3, 1, 1)
self.relu1 = nn.PReLU(1)
self.relu2 = nn.PReLU(out_planes)
self.fc1 = nn.Conv2d(out_planes, 16, kernel_size=1, bias=False)
self.fc2 = nn.Conv2d(16, out_planes, kernel_size=1, bias=False)
def forward(self, x):
y = self.conv0(x)
x = self.conv1(x)
x = self.conv2(x)
w = x.mean(3, True).mean(2, True)
w = self.relu1(self.fc1(w))
w = torch.sigmoid(self.fc2(w))
x = self.relu2(x * w + y)
return x
c = 16
class ContextNet(nn.Module):
def __init__(self, cFlag):
super(ContextNet, self).__init__()
self.conv1 = ResBlock(3, c)
self.conv2 = ResBlock(c, 2 * c)
self.conv3 = ResBlock(2 * c, 4 * c)
self.conv4 = ResBlock(4 * c, 8 * c)
self.cFlag = cFlag
def forward(self, x, flow):
x = self.conv1(x)
f1 = warp(x, flow, self.cFlag)
x = self.conv2(x)
flow = F.interpolate(flow, scale_factor=0.5, mode="bilinear",
align_corners=False) * 0.5
f2 = warp(x, flow, self.cFlag)
x = self.conv3(x)
flow = F.interpolate(flow, scale_factor=0.5, mode="bilinear",
align_corners=False) * 0.5
f3 = warp(x, flow, self.cFlag)
x = self.conv4(x)
flow = F.interpolate(flow, scale_factor=0.5, mode="bilinear",
align_corners=False) * 0.5
f4 = warp(x, flow, self.cFlag)
return [f1, f2, f3, f4]
class FusionNet(nn.Module):
def __init__(self, cFlag):
super(FusionNet, self).__init__()
self.down0 = ResBlock(8, 2 * c)
self.down1 = ResBlock(4 * c, 4 * c)
self.down2 = ResBlock(8 * c, 8 * c)
self.down3 = ResBlock(16 * c, 16 * c)
self.up0 = deconv(32 * c, 8 * c)
self.up1 = deconv(16 * c, 4 * c)
self.up2 = deconv(8 * c, 2 * c)
self.up3 = deconv(4 * c, c)
self.conv = nn.Conv2d(c, 4, 3, 1, 1)
self.cFlag = cFlag
def forward(self, img0, img1, flow, c0, c1, flow_gt):
warped_img0 = warp(img0, flow, self.cFlag)
warped_img1 = warp(img1, -flow, self.cFlag)
if flow_gt == None:
warped_img0_gt, warped_img1_gt = None, None
else:
warped_img0_gt = warp(img0, flow_gt[:, :2])
warped_img1_gt = warp(img1, flow_gt[:, 2:4])
s0 = self.down0(torch.cat((warped_img0, warped_img1, flow), 1))
s1 = self.down1(torch.cat((s0, c0[0], c1[0]), 1))
s2 = self.down2(torch.cat((s1, c0[1], c1[1]), 1))
s3 = self.down3(torch.cat((s2, c0[2], c1[2]), 1))
x = self.up0(torch.cat((s3, c0[3], c1[3]), 1))
x = self.up1(torch.cat((x, s2), 1))
x = self.up2(torch.cat((x, s1), 1))
x = self.up3(torch.cat((x, s0), 1))
x = self.conv(x)
return x, warped_img0, warped_img1, warped_img0_gt, warped_img1_gt
class Model:
def __init__(self, c_flag, local_rank=-1):
self.flownet = IFNet(c_flag)
self.contextnet = ContextNet(c_flag)
self.fusionnet = FusionNet(c_flag)
self.device(c_flag)
self.optimG = AdamW(itertools.chain(
self.flownet.parameters(),
self.contextnet.parameters(),
self.fusionnet.parameters()), lr=1e-6, weight_decay=1e-5)
self.schedulerG = optim.lr_scheduler.CyclicLR(
self.optimG, base_lr=1e-6, max_lr=1e-3, step_size_up=8000, cycle_momentum=False)
self.epe = EPE()
self.ter = Ternary(c_flag)
self.sobel = SOBEL(c_flag)
if local_rank != -1:
self.flownet = DDP(self.flownet, device_ids=[
local_rank], output_device=local_rank)
self.contextnet = DDP(self.contextnet, device_ids=[
local_rank], output_device=local_rank)
self.fusionnet = DDP(self.fusionnet, device_ids=[
local_rank], output_device=local_rank)
def train(self):
self.flownet.train()
self.contextnet.train()
self.fusionnet.train()
def eval(self):
self.flownet.eval()
self.contextnet.eval()
self.fusionnet.eval()
def device(self, c_flag):
if torch.cuda.is_available() and not c_flag:
device = torch.device("cuda")
else:
device = torch.device("cpu")
self.flownet.to(device)
self.contextnet.to(device)
self.fusionnet.to(device)
def load_model(self, path, rank=0):
def convert(param):
return {
k.replace("module.", ""): v
for k, v in param.items()
if "module." in k
}
if rank == 0:
self.flownet.load_state_dict(
convert(torch.load('{}/flownet.pkl'.format(path), map_location=torch.device("cpu"))))
self.contextnet.load_state_dict(
convert(torch.load('{}/contextnet.pkl'.format(path), map_location=torch.device("cpu"))))
self.fusionnet.load_state_dict(
convert(torch.load('{}/unet.pkl'.format(path), map_location=torch.device("cpu"))))
def save_model(self, path, rank=0):
if rank == 0:
torch.save(self.flownet.state_dict(),
'{}/flownet.pkl'.format(path))
torch.save(self.contextnet.state_dict(),
'{}/contextnet.pkl'.format(path))
torch.save(self.fusionnet.state_dict(), '{}/unet.pkl'.format(path))
def predict(self, imgs, flow, training=True, flow_gt=None):
img0 = imgs[:, :3]
img1 = imgs[:, 3:]
c0 = self.contextnet(img0, flow)
c1 = self.contextnet(img1, -flow)
flow = F.interpolate(flow, scale_factor=2.0, mode="bilinear",
align_corners=False) * 2.0
refine_output, warped_img0, warped_img1, warped_img0_gt, warped_img1_gt = self.fusionnet(
img0, img1, flow, c0, c1, flow_gt)
res = torch.sigmoid(refine_output[:, :3]) * 2 - 1
mask = torch.sigmoid(refine_output[:, 3:4])
merged_img = warped_img0 * mask + warped_img1 * (1 - mask)
pred = merged_img + res
pred = torch.clamp(pred, 0, 1)
if training:
return pred, mask, merged_img, warped_img0, warped_img1, warped_img0_gt, warped_img1_gt
else:
return pred
def inference(self, img0, img1):
imgs = torch.cat((img0, img1), 1)
flow, _ = self.flownet(imgs)
return self.predict(imgs, flow, training=False).detach()
def update(self, imgs, gt, learning_rate=0, mul=1, training=True, flow_gt=None):
for param_group in self.optimG.param_groups:
param_group['lr'] = learning_rate
if training:
self.train()
else:
self.eval()
flow, flow_list = self.flownet(imgs)
pred, mask, merged_img, warped_img0, warped_img1, warped_img0_gt, warped_img1_gt = self.predict(
imgs, flow, flow_gt=flow_gt)
loss_ter = self.ter(pred, gt).mean()
if training:
with torch.no_grad():
loss_flow = torch.abs(warped_img0_gt - gt).mean()
loss_mask = torch.abs(
merged_img - gt).sum(1, True).float().detach()
loss_mask = F.interpolate(loss_mask, scale_factor=0.5, mode="bilinear",
align_corners=False).detach()
flow_gt = (F.interpolate(flow_gt, scale_factor=0.5, mode="bilinear",
align_corners=False) * 0.5).detach()
loss_cons = 0
for i in range(3):
loss_cons += self.epe(flow_list[i], flow_gt[:, :2], 1)
loss_cons += self.epe(-flow_list[i], flow_gt[:, 2:4], 1)
loss_cons = loss_cons.mean() * 0.01
else:
loss_cons = torch.tensor([0])
loss_flow = torch.abs(warped_img0 - gt).mean()
loss_mask = 1
loss_l1 = (((pred - gt) ** 2 + 1e-6) ** 0.5).mean()
if training:
self.optimG.zero_grad()
loss_G = loss_l1 + loss_cons + loss_ter
loss_G.backward()
self.optimG.step()
return pred, merged_img, flow, loss_l1, loss_flow, loss_cons, loss_ter, loss_mask
if __name__ == '__main__':
img0 = torch.zeros(3, 3, 256, 256).float().to(device)
img1 = torch.tensor(np.random.normal(
0, 1, (3, 3, 256, 256))).float().to(device)
imgs = torch.cat((img0, img1), 1)
model = Model()
model.eval()
print(model.inference(imgs).shape)

250
gimp-plugins/RIFE/model/RIFE2F.py Executable file
View File

@ -0,0 +1,250 @@
import torch
import torch.nn as nn
import numpy as np
from torch.optim import AdamW
import torch.optim as optim
import itertools
from model.warplayer import warp
from torch.nn.parallel import DistributedDataParallel as DDP
from model.IFNet2F import *
import torch.nn.functional as F
from model.loss import *
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def conv(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1):
return nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation, bias=True),
nn.PReLU(out_planes)
)
def deconv(in_planes, out_planes, kernel_size=4, stride=2, padding=1):
return nn.Sequential(
torch.nn.ConvTranspose2d(in_channels=in_planes, out_channels=out_planes,
kernel_size=4, stride=2, padding=1, bias=True),
nn.PReLU(out_planes)
)
def conv_woact(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1):
return nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation, bias=True),
)
class ResBlock(nn.Module):
def __init__(self, in_planes, out_planes, stride=2):
super(ResBlock, self).__init__()
if in_planes == out_planes and stride == 1:
self.conv0 = nn.Identity()
else:
self.conv0 = nn.Conv2d(in_planes, out_planes,
3, stride, 1, bias=False)
self.conv1 = conv(in_planes, out_planes, 3, stride, 1)
self.conv2 = conv_woact(out_planes, out_planes, 3, 1, 1)
self.relu1 = nn.PReLU(1)
self.relu2 = nn.PReLU(out_planes)
self.fc1 = nn.Conv2d(out_planes, 16, kernel_size=1, bias=False)
self.fc2 = nn.Conv2d(16, out_planes, kernel_size=1, bias=False)
def forward(self, x):
y = self.conv0(x)
x = self.conv1(x)
x = self.conv2(x)
w = x.mean(3, True).mean(2, True)
w = self.relu1(self.fc1(w))
w = torch.sigmoid(self.fc2(w))
x = self.relu2(x * w + y)
return x
c = 16
class ContextNet(nn.Module):
def __init__(self):
super(ContextNet, self).__init__()
self.conv1 = ResBlock(3, c, 1)
self.conv2 = ResBlock(c, 2*c)
self.conv3 = ResBlock(2*c, 4*c)
self.conv4 = ResBlock(4*c, 8*c)
def forward(self, x, flow):
x = self.conv1(x)
f1 = warp(x, flow)
x = self.conv2(x)
flow = F.interpolate(flow, scale_factor=0.5, mode="bilinear",
align_corners=False) * 0.5
f2 = warp(x, flow)
x = self.conv3(x)
flow = F.interpolate(flow, scale_factor=0.5, mode="bilinear",
align_corners=False) * 0.5
f3 = warp(x, flow)
x = self.conv4(x)
flow = F.interpolate(flow, scale_factor=0.5, mode="bilinear",
align_corners=False) * 0.5
f4 = warp(x, flow)
return [f1, f2, f3, f4]
class FusionNet(nn.Module):
def __init__(self):
super(FusionNet, self).__init__()
self.down0 = ResBlock(8, 2*c, 1)
self.down1 = ResBlock(4*c, 4*c)
self.down2 = ResBlock(8*c, 8*c)
self.down3 = ResBlock(16*c, 16*c)
self.up0 = deconv(32*c, 8*c)
self.up1 = deconv(16*c, 4*c)
self.up2 = deconv(8*c, 2*c)
self.up3 = deconv(4*c, c)
self.conv = nn.Conv2d(c, 4, 3, 2, 1)
def forward(self, img0, img1, flow, c0, c1, flow_gt):
warped_img0 = warp(img0, flow)
warped_img1 = warp(img1, -flow)
if flow_gt == None:
warped_img0_gt, warped_img1_gt = None, None
else:
warped_img0_gt = warp(img0, flow_gt[:, :2])
warped_img1_gt = warp(img1, flow_gt[:, 2:4])
s0 = self.down0(torch.cat((warped_img0, warped_img1, flow), 1))
s1 = self.down1(torch.cat((s0, c0[0], c1[0]), 1))
s2 = self.down2(torch.cat((s1, c0[1], c1[1]), 1))
s3 = self.down3(torch.cat((s2, c0[2], c1[2]), 1))
x = self.up0(torch.cat((s3, c0[3], c1[3]), 1))
x = self.up1(torch.cat((x, s2), 1))
x = self.up2(torch.cat((x, s1), 1))
x = self.up3(torch.cat((x, s0), 1))
x = self.conv(x)
return x, warped_img0, warped_img1, warped_img0_gt, warped_img1_gt
class Model:
def __init__(self, local_rank=-1):
self.flownet = IFNet()
self.contextnet = ContextNet()
self.fusionnet = FusionNet()
self.device()
self.optimG = AdamW(itertools.chain(
self.flownet.parameters(),
self.contextnet.parameters(),
self.fusionnet.parameters()), lr=1e-6, weight_decay=1e-5)
self.schedulerG = optim.lr_scheduler.CyclicLR(
self.optimG, base_lr=1e-6, max_lr=1e-3, step_size_up=8000, cycle_momentum=False)
self.epe = EPE()
self.ter = Ternary()
self.sobel = SOBEL()
if local_rank != -1:
self.flownet = DDP(self.flownet, device_ids=[
local_rank], output_device=local_rank)
self.contextnet = DDP(self.contextnet, device_ids=[
local_rank], output_device=local_rank)
self.fusionnet = DDP(self.fusionnet, device_ids=[
local_rank], output_device=local_rank)
def train(self):
self.flownet.train()
self.contextnet.train()
self.fusionnet.train()
def eval(self):
self.flownet.eval()
self.contextnet.eval()
self.fusionnet.eval()
def device(self):
self.flownet.to(device)
self.contextnet.to(device)
self.fusionnet.to(device)
def load_model(self, path, rank=0):
def convert(param):
return {
k.replace("module.", ""): v
for k, v in param.items()
if "module." in k
}
if rank == 0:
self.flownet.load_state_dict(
convert(torch.load('{}/flownet.pkl'.format(path), map_location=device)))
self.contextnet.load_state_dict(
convert(torch.load('{}/contextnet.pkl'.format(path), map_location=device)))
self.fusionnet.load_state_dict(
convert(torch.load('{}/unet.pkl'.format(path), map_location=device)))
def save_model(self, path, rank=0):
if rank == 0:
torch.save(self.flownet.state_dict(),
'{}/flownet.pkl'.format(path))
torch.save(self.contextnet.state_dict(),
'{}/contextnet.pkl'.format(path))
torch.save(self.fusionnet.state_dict(), '{}/unet.pkl'.format(path))
def predict(self, imgs, flow, training=True, flow_gt=None):
img0 = imgs[:, :3]
img1 = imgs[:, 3:]
flow = F.interpolate(flow, scale_factor=2.0, mode="bilinear",
align_corners=False) * 2.0
c0 = self.contextnet(img0, flow)
c1 = self.contextnet(img1, -flow)
refine_output, warped_img0, warped_img1, warped_img0_gt, warped_img1_gt = self.fusionnet(
img0, img1, flow, c0, c1, flow_gt)
res = torch.sigmoid(refine_output[:, :3]) * 2 - 1
mask = torch.sigmoid(refine_output[:, 3:4])
merged_img = warped_img0 * mask + warped_img1 * (1 - mask)
pred = merged_img + res
pred = torch.clamp(pred, 0, 1)
if training:
return pred, mask, merged_img, warped_img0, warped_img1, warped_img0_gt, warped_img1_gt
else:
return pred
def inference(self, img0, img1):
with torch.no_grad():
imgs = torch.cat((img0, img1), 1)
flow, _ = self.flownet(imgs)
return self.predict(imgs, flow, training=False).detach()
def update(self, imgs, gt, learning_rate=0, mul=1, training=True, flow_gt=None):
for param_group in self.optimG.param_groups:
param_group['lr'] = learning_rate
if training:
self.train()
else:
self.eval()
flow, flow_list = self.flownet(imgs)
pred, mask, merged_img, warped_img0, warped_img1, warped_img0_gt, warped_img1_gt = self.predict(
imgs, flow, flow_gt=flow_gt)
loss_ter = self.ter(pred, gt).mean()
if training:
with torch.no_grad():
loss_flow = torch.abs(warped_img0_gt - gt).mean()
loss_mask = torch.abs(
merged_img - gt).sum(1, True).float().detach()
loss_cons = 0
for i in range(3):
loss_cons += self.epe(flow_list[i], flow_gt[:, :2], 1)
loss_cons += self.epe(-flow_list[i], flow_gt[:, 2:4], 1)
loss_cons = loss_cons.mean() * 0.01
else:
loss_cons = torch.tensor([0])
loss_flow = torch.abs(warped_img0 - gt).mean()
loss_mask = 1
loss_l1 = (((pred - gt) ** 2 + 1e-6) ** 0.5).mean()
if training:
self.optimG.zero_grad()
loss_G = loss_l1 + loss_cons + loss_ter
loss_G.backward()
self.optimG.step()
return pred, merged_img, flow, loss_l1, loss_flow, loss_cons, loss_ter, loss_mask
if __name__ == '__main__':
img0 = torch.zeros(3, 3, 256, 256).float().to(device)
img1 = torch.tensor(np.random.normal(
0, 1, (3, 3, 256, 256))).float().to(device)
imgs = torch.cat((img0, img1), 1)
model = Model()
model.eval()
print(model.inference(imgs).shape)

View File

90
gimp-plugins/RIFE/model/loss.py Executable file
View File

@ -0,0 +1,90 @@
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
# device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class EPE(nn.Module):
def __init__(self):
super(EPE, self).__init__()
def forward(self, flow, gt, loss_mask):
loss_map = (flow - gt.detach()) ** 2
loss_map = (loss_map.sum(1, True) + 1e-6) ** 0.5
return (loss_map * loss_mask)
class Ternary(nn.Module):
def __init__(self, cFlag):
super(Ternary, self).__init__()
patch_size = 7
out_channels = patch_size * patch_size
self.w = np.eye(out_channels).reshape(
(patch_size, patch_size, 1, out_channels))
self.w = np.transpose(self.w, (3, 2, 0, 1))
self.device = torch.device("cuda" if torch.cuda.is_available() and not cFlag else "cpu")
self.w = torch.tensor(self.w).float().to(self.device)
def transform(self, img):
patches = F.conv2d(img, self.w, padding=3, bias=None)
transf = patches - img
transf_norm = transf / torch.sqrt(0.81 + transf**2)
return transf_norm
def rgb2gray(self, rgb):
r, g, b = rgb[:, 0:1, :, :], rgb[:, 1:2, :, :], rgb[:, 2:3, :, :]
gray = 0.2989 * r + 0.5870 * g + 0.1140 * b
return gray
def hamming(self, t1, t2):
dist = (t1 - t2) ** 2
dist_norm = torch.mean(dist / (0.1 + dist), 1, True)
return dist_norm
def valid_mask(self, t, padding):
n, _, h, w = t.size()
inner = torch.ones(n, 1, h - 2 * padding, w - 2 * padding).type_as(t)
mask = F.pad(inner, [padding] * 4)
return mask
def forward(self, img0, img1):
img0 = self.transform(self.rgb2gray(img0))
img1 = self.transform(self.rgb2gray(img1))
return self.hamming(img0, img1) * self.valid_mask(img0, 1)
class SOBEL(nn.Module):
def __init__(self, cFlag):
super(SOBEL, self).__init__()
self.kernelX = torch.tensor([
[1, 0, -1],
[2, 0, -2],
[1, 0, -1],
]).float()
self.kernelY = self.kernelX.clone().T
self.device = torch.device("cuda" if torch.cuda.is_available() and not cFlag else "cpu")
self.kernelX = self.kernelX.unsqueeze(0).unsqueeze(0).to(self.device)
self.kernelY = self.kernelY.unsqueeze(0).unsqueeze(0).to(self.device)
def forward(self, pred, gt):
N, C, H, W = pred.shape[0], pred.shape[1], pred.shape[2], pred.shape[3]
img_stack = torch.cat(
[pred.reshape(N*C, 1, H, W), gt.reshape(N*C, 1, H, W)], 0)
sobel_stack_x = F.conv2d(img_stack, self.kernelX, padding=1)
sobel_stack_y = F.conv2d(img_stack, self.kernelY, padding=1)
pred_X, gt_X = sobel_stack_x[:N*C], sobel_stack_x[N*C:]
pred_Y, gt_Y = sobel_stack_y[:N*C], sobel_stack_y[N*C:]
L1X, L1Y = torch.abs(pred_X-gt_X), torch.abs(pred_Y-gt_Y)
loss = (L1X+L1Y)
return loss
if __name__ == '__main__':
img0 = torch.zeros(3, 3, 256, 256).float().to(device)
img1 = torch.tensor(np.random.normal(
0, 1, (3, 3, 256, 256))).float().to(device)
ternary_loss = Ternary()
print(ternary_loss(img0, img1).shape)

View File

@ -0,0 +1,23 @@
import torch
import torch.nn as nn
backwarp_tenGrid = {}
def warp(tenInput, tenFlow, cFlag):
device = torch.device("cuda" if torch.cuda.is_available() and not cFlag else "cpu")
k = (str(tenFlow.device), str(tenFlow.size()))
if k not in backwarp_tenGrid:
tenHorizontal = torch.linspace(-1.0, 1.0, tenFlow.shape[3]).view(
1, 1, 1, tenFlow.shape[3]).expand(tenFlow.shape[0], -1, tenFlow.shape[2], -1)
tenVertical = torch.linspace(-1.0, 1.0, tenFlow.shape[2]).view(
1, 1, tenFlow.shape[2], 1).expand(tenFlow.shape[0], -1, -1, tenFlow.shape[3])
backwarp_tenGrid[k] = torch.cat(
[tenHorizontal, tenVertical], 1).to(device)
tenFlow = torch.cat([tenFlow[:, 0:1, :, :] / ((tenInput.shape[3] - 1.0) / 2.0),
tenFlow[:, 1:2, :, :] / ((tenInput.shape[2] - 1.0) / 2.0)], 1)
g = (backwarp_tenGrid[k] + tenFlow).permute(0, 2, 3, 1)
return torch.nn.functional.grid_sample(input=tenInput, grid=torch.clamp(g, -1, 1), mode='bilinear',
padding_mode='zeros', align_corners=True)

126
gimp-plugins/interpolateframes.py Executable file
View File

@ -0,0 +1,126 @@
import os
baseLoc = os.path.dirname(os.path.realpath(__file__)) + '/'
savePath = '/'.join(baseLoc.split('/')[:-2]) + '/output/interpolateframes'
from gimpfu import *
import sys
sys.path.extend([baseLoc + 'gimpenv/lib/python2.7', baseLoc + 'gimpenv/lib/python2.7/site-packages',
baseLoc + 'gimpenv/lib/python2.7/site-packages/setuptools', baseLoc + 'RIFE'])
import cv2
import torch
from torch.nn import functional as F
from model import RIFE
import numpy as np
def channelData(layer): # convert gimp image to numpy
region = layer.get_pixel_rgn(0, 0, layer.width, layer.height)
pixChars = region[:, :] # Take whole layer
bpp = region.bpp
# return np.frombuffer(pixChars,dtype=np.uint8).reshape(len(pixChars)/bpp,bpp)
return np.frombuffer(pixChars, dtype=np.uint8).reshape(layer.height, layer.width, bpp)
def createResultLayer(image, name, result):
rlBytes = np.uint8(result).tobytes();
rl = gimp.Layer(image, name, image.width, image.height, 0, 100, NORMAL_MODE)
region = rl.get_pixel_rgn(0, 0, rl.width, rl.height, True)
region[:, :] = rlBytes
image.add_layer(rl, 0)
gimp.displays_flush()
def getinter(img_s, img_e, c_flag, string_path):
exp = 4
out_path = string_path
model = RIFE.Model(c_flag)
model.load_model(baseLoc + 'weights' + '/interpolateframes')
model.eval()
model.device(c_flag)
img0 = img_s
img1 = img_e
img0 = (torch.tensor(img0.transpose(2, 0, 1)) / 255.).unsqueeze(0)
img1 = (torch.tensor(img1.transpose(2, 0, 1))/ 255.).unsqueeze(0)
if torch.cuda.is_available() and not c_flag:
device = torch.device("cuda")
else:
device = torch.device("cpu")
img0 = img0.to(device)
img1 = img1.to(device)
n, c, h, w = img0.shape
ph = ((h - 1) // 32 + 1) * 32
pw = ((w - 1) // 32 + 1) * 32
padding = (0, pw - w, 0, ph - h)
img0 = F.pad(img0, padding)
img1 = F.pad(img1, padding)
img_list = [img0, img1]
idx=0
t=exp * (len(img_list) - 1)
for i in range(exp):
tmp = []
for j in range(len(img_list) - 1):
mid = model.inference(img_list[j], img_list[j + 1])
tmp.append(img_list[j])
tmp.append(mid)
idx=idx+1
gimp.progress_update(float(idx)/float(t))
gimp.displays_flush()
tmp.append(img1)
img_list = tmp
if not os.path.exists(out_path):
os.makedirs(out_path)
for i in range(len(img_list)):
cv2.imwrite(out_path + '/img{}.png'.format(i),
(img_list[i][0] * 255).byte().cpu().numpy().transpose(1, 2, 0)[:h, :w, ::-1])
def interpolateframes(imggimp, curlayer, string_path, layer_s, layer_e, c_flag):
if torch.cuda.is_available() and not c_flag:
gimp.progress_init("(Using GPU) Running slomo and saving frames in "+string_path)
# device = torch.device("cuda")
else:
gimp.progress_init("(Using CPU) Running slomo and saving frames in "+string_path)
# device = torch.device("cpu")
layer_1 = channelData(layer_s)
layer_2 = channelData(layer_e)
if layer_1.shape[2] == 4: # get rid of alpha channel
layer_1 = layer_1[:, :, 0:3]
if layer_2.shape[2] == 4: # get rid of alpha channel
layer_2 = layer_2[:, :, 0:3]
getinter(layer_1, layer_2, c_flag, string_path)
# pdb.gimp_message("Saved")
register(
"interpolate-frames",
"interpolate-frames",
"Running slomo...",
"Kritik Soman",
"Your",
"2020",
"interpolate-frames...",
"*", # Alternately use RGB, RGB*, GRAY*, INDEXED etc.
[(PF_IMAGE, "image", "Input image", None),
(PF_DRAWABLE, "drawable", "Input drawable", None),
(PF_STRING, "string", "Output Location", savePath),
(PF_LAYER, "drawinglayer", "Start Frame:", None),
(PF_LAYER, "drawinglayer", "End Frame:", None),
(PF_BOOL, "fcpu", "Force CPU", False)
],
[],
interpolateframes, menu="<Image>/Layer/GIML-ML")
main()

View File

@ -215,4 +215,31 @@ def sync(path,flag):
gimp.progress_init("Downloading " + model +"(~" + str(fileSize) + "MB)...") gimp.progress_init("Downloading " + model +"(~" + str(fileSize) + "MB)...")
download_file_from_google_drive(file_id, destination,fileSize) download_file_from_google_drive(file_id, destination,fileSize)
#interpolateframes
model = 'interpolateframes'
file_id = '1bHmO9-_ENTYoN1-BNwSk3nLN9-NDUnRg'
fileSize = 1.6 #in MB
mFName = 'contextnet.pkl'
if not os.path.isdir(path + '/' + model):
os.mkdir(path + '/' + model)
destination = path + '/' + model + '/' + mFName
if not os.path.isfile(destination):
gimp.progress_init("Downloading " + model +"(~" + str(fileSize) + "MB)...")
download_file_from_google_drive(file_id, destination,fileSize)
file_id = '1cQvDPBKsz3TAi0Q5bJXsu6A-Z7lpk_cE'
fileSize = 25.4 #in MB
mFName = 'flownet.pkl'
destination = path + '/' + model + '/' + mFName
if not os.path.isfile(destination):
gimp.progress_init("Downloading " + model +"(~" + str(fileSize) + "MB)...")
download_file_from_google_drive(file_id, destination,fileSize)
file_id = '1mlA8VtxIcvJfz51OsQMvWX24oqxZ429r'
fileSize = 15 #in MB
mFName = 'unet.pkl'
destination = path + '/' + model + '/' + mFName
if not os.path.isfile(destination):
gimp.progress_init("Downloading " + model +"(~" + str(fileSize) + "MB)...")
download_file_from_google_drive(file_id, destination,fileSize)