mirror of
https://github.com/kritiksoman/GIMP-ML
synced 2024-10-31 09:20:18 +00:00
CPUButton
This commit is contained in:
parent
68f6269355
commit
001b95d59d
6
gimp-plugins/CelebAMask-HQ/MaskGAN_demo/models/networks.py
Normal file → Executable file
6
gimp-plugins/CelebAMask-HQ/MaskGAN_demo/models/networks.py
Normal file → Executable file
@ -38,9 +38,9 @@ def define_G(input_nc, output_nc, ngf, netG, n_downsample_global=3, n_blocks_glo
|
|||||||
else:
|
else:
|
||||||
raise('generator not implemented!')
|
raise('generator not implemented!')
|
||||||
print(netG)
|
print(netG)
|
||||||
# if len(gpu_ids) > 0:
|
if len(gpu_ids) > 0:
|
||||||
# assert(torch.cuda.is_available())
|
assert(torch.cuda.is_available())
|
||||||
# netG.cuda(gpu_ids[0])
|
netG.cuda(gpu_ids[0])
|
||||||
netG.apply(weights_init)
|
netG.apply(weights_init)
|
||||||
return netG
|
return netG
|
||||||
|
|
||||||
|
8
gimp-plugins/CelebAMask-HQ/MaskGAN_demo/models/pix2pixHD_model.py
Normal file → Executable file
8
gimp-plugins/CelebAMask-HQ/MaskGAN_demo/models/pix2pixHD_model.py
Normal file → Executable file
@ -184,7 +184,7 @@ class Pix2PixHDModel(BaseModel):
|
|||||||
|
|
||||||
return inter_label_1, input_label, inter_label_2, real_image, input_label_ref, real_image_ref
|
return inter_label_1, input_label, inter_label_2, real_image, input_label_ref, real_image_ref
|
||||||
|
|
||||||
def encode_input_test(self, label_map, label_map_ref, real_image_ref, infer=False):
|
def encode_input_test(self, label_map, label_map_ref, real_image_ref, infer=False,f=False):
|
||||||
|
|
||||||
if self.opt.label_nc == 0:
|
if self.opt.label_nc == 0:
|
||||||
if torch.cuda.is_available():
|
if torch.cuda.is_available():
|
||||||
@ -198,7 +198,7 @@ class Pix2PixHDModel(BaseModel):
|
|||||||
# create one-hot vector for label map
|
# create one-hot vector for label map
|
||||||
size = label_map.size()
|
size = label_map.size()
|
||||||
oneHot_size = (size[0], self.opt.label_nc, size[2], size[3])
|
oneHot_size = (size[0], self.opt.label_nc, size[2], size[3])
|
||||||
if torch.cuda.is_available():
|
if torch.cuda.is_available() and not f:
|
||||||
input_label = torch.cuda.FloatTensor(torch.Size(oneHot_size)).zero_()
|
input_label = torch.cuda.FloatTensor(torch.Size(oneHot_size)).zero_()
|
||||||
input_label = input_label.scatter_(1, label_map.data.long().cuda(), 1.0)
|
input_label = input_label.scatter_(1, label_map.data.long().cuda(), 1.0)
|
||||||
input_label_ref = torch.cuda.FloatTensor(torch.Size(oneHot_size)).zero_()
|
input_label_ref = torch.cuda.FloatTensor(torch.Size(oneHot_size)).zero_()
|
||||||
@ -280,11 +280,11 @@ class Pix2PixHDModel(BaseModel):
|
|||||||
# Only return the fake_B image if necessary to save BW
|
# Only return the fake_B image if necessary to save BW
|
||||||
return [ self.loss_filter( loss_G_GAN, loss_G_GAN_Feat, loss_G_VGG, loss_GB_GAN, loss_GB_GAN_Feat, loss_GB_VGG, loss_D_real, loss_D_fake, loss_D_blend ), None if not infer else fake_inter_1, fake_image, fake_inter_2, blend_image, alpha, real_image, inter_label_1, input_label, inter_label_2 ]
|
return [ self.loss_filter( loss_G_GAN, loss_G_GAN_Feat, loss_G_VGG, loss_GB_GAN, loss_GB_GAN_Feat, loss_GB_VGG, loss_D_real, loss_D_fake, loss_D_blend ), None if not infer else fake_inter_1, fake_image, fake_inter_2, blend_image, alpha, real_image, inter_label_1, input_label, inter_label_2 ]
|
||||||
|
|
||||||
def inference(self, label, label_ref, image_ref):
|
def inference(self, label, label_ref, image_ref,cFlag):
|
||||||
|
|
||||||
# Encode Inputs
|
# Encode Inputs
|
||||||
image_ref = Variable(image_ref)
|
image_ref = Variable(image_ref)
|
||||||
input_label, input_label_ref, real_image_ref = self.encode_input_test(Variable(label), Variable(label_ref), image_ref, infer=True)
|
input_label, input_label_ref, real_image_ref = self.encode_input_test(Variable(label), Variable(label_ref), image_ref, infer=True,f=cFlag)
|
||||||
|
|
||||||
if torch.__version__.startswith('0.4'):
|
if torch.__version__.startswith('0.4'):
|
||||||
with torch.no_grad():
|
with torch.no_grad():
|
||||||
|
8
gimp-plugins/DeblurGANv2/predictorClass.py
Normal file → Executable file
8
gimp-plugins/DeblurGANv2/predictorClass.py
Normal file → Executable file
@ -25,11 +25,11 @@ config = {'project': 'deblur_gan', 'warmup_num': 3, 'optimizer': {'lr': 0.0001,
|
|||||||
|
|
||||||
|
|
||||||
class Predictor:
|
class Predictor:
|
||||||
def __init__(self, weights_path, model_name=''):
|
def __init__(self, weights_path, model_name='',cf=False):
|
||||||
# model = get_generator(model_name or config['model'])
|
# model = get_generator(model_name or config['model'])
|
||||||
model = get_generator_new(weights_path[0:-11])
|
model = get_generator_new(weights_path[0:-11])
|
||||||
model.load_state_dict(torch.load(weights_path, map_location=lambda storage, loc: storage)['model'])
|
model.load_state_dict(torch.load(weights_path, map_location=lambda storage, loc: storage)['model'])
|
||||||
if torch.cuda.is_available():
|
if torch.cuda.is_available() and not cf:
|
||||||
self.model = model.cuda()
|
self.model = model.cuda()
|
||||||
else:
|
else:
|
||||||
self.model = model
|
self.model = model
|
||||||
@ -73,10 +73,10 @@ class Predictor:
|
|||||||
x = (np.transpose(x, (1, 2, 0)) + 1) / 2.0 * 255.0
|
x = (np.transpose(x, (1, 2, 0)) + 1) / 2.0 * 255.0
|
||||||
return x.astype('uint8')
|
return x.astype('uint8')
|
||||||
|
|
||||||
def __call__(self, img, mask, ignore_mask=True):
|
def __call__(self, img, mask, ignore_mask=True,cf=False):
|
||||||
(img, mask), h, w = self._preprocess(img, mask)
|
(img, mask), h, w = self._preprocess(img, mask)
|
||||||
with torch.no_grad():
|
with torch.no_grad():
|
||||||
if torch.cuda.is_available():
|
if torch.cuda.is_available() and not cf:
|
||||||
inputs = [img.cuda()]
|
inputs = [img.cuda()]
|
||||||
else:
|
else:
|
||||||
inputs = [img]
|
inputs = [img]
|
||||||
|
10
gimp-plugins/MiDaS/run.py
Normal file → Executable file
10
gimp-plugins/MiDaS/run.py
Normal file → Executable file
@ -11,7 +11,7 @@ import cv2
|
|||||||
# import imageio
|
# import imageio
|
||||||
|
|
||||||
|
|
||||||
def run_depth(img, model_path, Net, utils, target_w=None):
|
def run_depth(img, model_path, Net, utils, target_w=None,f=False):
|
||||||
"""Run MonoDepthNN to compute depth maps.
|
"""Run MonoDepthNN to compute depth maps.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
@ -22,12 +22,13 @@ def run_depth(img, model_path, Net, utils, target_w=None):
|
|||||||
# print("initialize")
|
# print("initialize")
|
||||||
|
|
||||||
# select device
|
# select device
|
||||||
device = torch.device("cpu")
|
# device = torch.device("cpu")
|
||||||
# print("device: %s" % device)
|
# print("device: %s" % device)
|
||||||
|
|
||||||
# load network
|
# load network
|
||||||
model = Net(model_path)
|
model = Net(model_path)
|
||||||
model.to(device)
|
if torch.cuda.is_available() and not f:
|
||||||
|
model.cuda()
|
||||||
model.eval()
|
model.eval()
|
||||||
|
|
||||||
# get input
|
# get input
|
||||||
@ -50,7 +51,8 @@ def run_depth(img, model_path, Net, utils, target_w=None):
|
|||||||
target_height, target_width = int(round(img.shape[0] * scale)), int(round(img.shape[1] * scale))
|
target_height, target_width = int(round(img.shape[0] * scale)), int(round(img.shape[1] * scale))
|
||||||
img_input = utils.resize_image(img)
|
img_input = utils.resize_image(img)
|
||||||
# print(img_input.shape)
|
# print(img_input.shape)
|
||||||
img_input = img_input.to(device)
|
if torch.cuda.is_available() and not f:
|
||||||
|
img_input = img_input.cuda()
|
||||||
# compute
|
# compute
|
||||||
with torch.no_grad():
|
with torch.no_grad():
|
||||||
out = model.forward(img_input)
|
out = model.forward(img_input)
|
||||||
|
@ -9,6 +9,7 @@ sys.path.extend([baseLoc+'gimpenv/lib/python2.7',baseLoc+'gimpenv/lib/python2.7/
|
|||||||
import cv2
|
import cv2
|
||||||
from predictorClass import Predictor
|
from predictorClass import Predictor
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
import torch
|
||||||
|
|
||||||
def channelData(layer):#convert gimp image to numpy
|
def channelData(layer):#convert gimp image to numpy
|
||||||
region=layer.get_pixel_rgn(0, 0, layer.width,layer.height)
|
region=layer.get_pixel_rgn(0, 0, layer.width,layer.height)
|
||||||
@ -25,17 +26,21 @@ def createResultLayer(image,name,result):
|
|||||||
image.add_layer(rl,0)
|
image.add_layer(rl,0)
|
||||||
gimp.displays_flush()
|
gimp.displays_flush()
|
||||||
|
|
||||||
def getdeblur(img):
|
def getdeblur(img,flag):
|
||||||
predictor = Predictor(weights_path=baseLoc+'weights/deblur/'+'best_fpn.h5')
|
predictor = Predictor(weights_path=baseLoc+'weights/deblur/'+'best_fpn.h5',cf=flag)
|
||||||
if img.shape[2] == 4: # get rid of alpha channel
|
if img.shape[2] == 4: # get rid of alpha channel
|
||||||
img = img[:,:,0:3]
|
img = img[:,:,0:3]
|
||||||
pred = predictor(img, None)
|
pred = predictor(img, None,cf=flag)
|
||||||
return pred
|
return pred
|
||||||
|
|
||||||
def deblur(img, layer):
|
def deblur(img, layer,flag):
|
||||||
gimp.progress_init("Deblurring " + layer.name + "...")
|
if torch.cuda.is_available() and not flag:
|
||||||
|
gimp.progress_init("(Using GPU) Deblurring " + layer.name + "...")
|
||||||
|
else:
|
||||||
|
gimp.progress_init("(Using CPU) Deblurring " + layer.name + "...")
|
||||||
|
|
||||||
imgmat = channelData(layer)
|
imgmat = channelData(layer)
|
||||||
pred = getdeblur(imgmat)
|
pred = getdeblur(imgmat,flag)
|
||||||
createResultLayer(img,'deblur_'+layer.name,pred)
|
createResultLayer(img,'deblur_'+layer.name,pred)
|
||||||
|
|
||||||
|
|
||||||
@ -50,6 +55,9 @@ register(
|
|||||||
"*", # Alternately use RGB, RGB*, GRAY*, INDEXED etc.
|
"*", # Alternately use RGB, RGB*, GRAY*, INDEXED etc.
|
||||||
[ (PF_IMAGE, "image", "Input image", None),
|
[ (PF_IMAGE, "image", "Input image", None),
|
||||||
(PF_DRAWABLE, "drawable", "Input drawable", None),
|
(PF_DRAWABLE, "drawable", "Input drawable", None),
|
||||||
|
# (PF_LAYER, "drawinglayer", "Original Image", None),
|
||||||
|
(PF_BOOL, "fcpu", "Force CPU", False)
|
||||||
|
|
||||||
],
|
],
|
||||||
[],
|
[],
|
||||||
deblur, menu="<Image>/Layer/GIML-ML")
|
deblur, menu="<Image>/Layer/GIML-ML")
|
||||||
|
@ -24,7 +24,7 @@ def channelData(layer):#convert gimp image to numpy
|
|||||||
return np.frombuffer(pixChars,dtype=np.uint8).reshape(layer.height,layer.width,bpp)
|
return np.frombuffer(pixChars,dtype=np.uint8).reshape(layer.height,layer.width,bpp)
|
||||||
|
|
||||||
|
|
||||||
def deepcolor(tmp1, tmp2, ilayerimg,ilayerc) :
|
def deepcolor(tmp1, tmp2, ilayerimg,ilayerc,cflag) :
|
||||||
layerimg = channelData(ilayerimg)
|
layerimg = channelData(ilayerimg)
|
||||||
layerc = channelData(ilayerc)
|
layerc = channelData(ilayerc)
|
||||||
|
|
||||||
@ -47,7 +47,7 @@ def deepcolor(tmp1, tmp2, ilayerimg,ilayerc) :
|
|||||||
if layerimg.shape[2] == 4: #remove alpha channel in image if present
|
if layerimg.shape[2] == 4: #remove alpha channel in image if present
|
||||||
layerimg = layerimg[:,:,0:3]
|
layerimg = layerimg[:,:,0:3]
|
||||||
|
|
||||||
if torch.cuda.is_available():
|
if torch.cuda.is_available() and not cflag:
|
||||||
gimp.progress_init("(Using GPU) Running deepcolor for " + ilayerimg.name + "...")
|
gimp.progress_init("(Using GPU) Running deepcolor for " + ilayerimg.name + "...")
|
||||||
gpu_id = 0
|
gpu_id = 0
|
||||||
else:
|
else:
|
||||||
@ -58,7 +58,7 @@ def deepcolor(tmp1, tmp2, ilayerimg,ilayerc) :
|
|||||||
colorModel.prep_net(gpu_id, baseLoc + 'weights/colorize/caffemodel.pth')
|
colorModel.prep_net(gpu_id, baseLoc + 'weights/colorize/caffemodel.pth')
|
||||||
colorModel.load_image(layerimg) # load an image
|
colorModel.load_image(layerimg) # load an image
|
||||||
|
|
||||||
img_out = colorModel.net_forward(input_ab, mask) # run model, returns 256x256 image
|
img_out = colorModel.net_forward(input_ab, mask,f=cflag) # run model, returns 256x256 image
|
||||||
img_out_fullres = colorModel.get_img_fullres() # get image at full resolution
|
img_out_fullres = colorModel.get_img_fullres() # get image at full resolution
|
||||||
|
|
||||||
createResultLayer(tmp1, 'new_' + ilayerimg.name, img_out_fullres)
|
createResultLayer(tmp1, 'new_' + ilayerimg.name, img_out_fullres)
|
||||||
@ -78,6 +78,7 @@ register(
|
|||||||
(PF_DRAWABLE, "drawable", "Input drawable", None),
|
(PF_DRAWABLE, "drawable", "Input drawable", None),
|
||||||
(PF_LAYER, "drawinglayer", "Original Image:", None),
|
(PF_LAYER, "drawinglayer", "Original Image:", None),
|
||||||
(PF_LAYER, "drawinglayer", "Color Mask:", None),
|
(PF_LAYER, "drawinglayer", "Color Mask:", None),
|
||||||
|
(PF_BOOL, "fcpu", "Force CPU", False)
|
||||||
],
|
],
|
||||||
[],
|
[],
|
||||||
deepcolor, menu="<Image>/Layer/GIML-ML")
|
deepcolor, menu="<Image>/Layer/GIML-ML")
|
||||||
|
@ -14,13 +14,13 @@ import net
|
|||||||
import numpy as np
|
import numpy as np
|
||||||
import cv2
|
import cv2
|
||||||
|
|
||||||
def clrImg(data_hazy):
|
def clrImg(data_hazy,cFlag):
|
||||||
data_hazy = (data_hazy / 255.0)
|
data_hazy = (data_hazy / 255.0)
|
||||||
data_hazy = torch.from_numpy(data_hazy).float()
|
data_hazy = torch.from_numpy(data_hazy).float()
|
||||||
data_hazy = data_hazy.permute(2, 0, 1)
|
data_hazy = data_hazy.permute(2, 0, 1)
|
||||||
dehaze_net = net.dehaze_net()
|
dehaze_net = net.dehaze_net()
|
||||||
|
|
||||||
if torch.cuda.is_available():
|
if torch.cuda.is_available() and not cFlag:
|
||||||
dehaze_net = dehaze_net.cuda()
|
dehaze_net = dehaze_net.cuda()
|
||||||
dehaze_net.load_state_dict(torch.load(baseLoc+'weights/deepdehaze/dehazer.pth'))
|
dehaze_net.load_state_dict(torch.load(baseLoc+'weights/deepdehaze/dehazer.pth'))
|
||||||
data_hazy = data_hazy.cuda()
|
data_hazy = data_hazy.cuda()
|
||||||
@ -31,7 +31,7 @@ def clrImg(data_hazy):
|
|||||||
gimp.displays_flush()
|
gimp.displays_flush()
|
||||||
data_hazy = data_hazy.unsqueeze(0)
|
data_hazy = data_hazy.unsqueeze(0)
|
||||||
clean_image = dehaze_net(data_hazy)
|
clean_image = dehaze_net(data_hazy)
|
||||||
out = clean_image.detach().numpy()[0,:,:,:]*255
|
out = clean_image.detach().cpu().numpy()[0,:,:,:]*255
|
||||||
out = np.clip(np.transpose(out,(1,2,0)),0,255).astype(np.uint8)
|
out = np.clip(np.transpose(out,(1,2,0)),0,255).astype(np.uint8)
|
||||||
return out
|
return out
|
||||||
|
|
||||||
@ -53,15 +53,15 @@ def createResultLayer(image, name, result):
|
|||||||
gimp.displays_flush()
|
gimp.displays_flush()
|
||||||
|
|
||||||
|
|
||||||
def deepdehazing(img, layer):
|
def deepdehazing(img, layer, cFlag):
|
||||||
if torch.cuda.is_available():
|
if torch.cuda.is_available() and not cFlag:
|
||||||
gimp.progress_init("(Using GPU) Dehazing " + layer.name + "...")
|
gimp.progress_init("(Using GPU) Dehazing " + layer.name + "...")
|
||||||
else:
|
else:
|
||||||
gimp.progress_init("(Using CPU) Dehazing " + layer.name + "...")
|
gimp.progress_init("(Using CPU) Dehazing " + layer.name + "...")
|
||||||
imgmat = channelData(layer)
|
imgmat = channelData(layer)
|
||||||
if imgmat.shape[2] == 4: # get rid of alpha channel
|
if imgmat.shape[2] == 4: # get rid of alpha channel
|
||||||
imgmat = imgmat[:,:,0:3]
|
imgmat = imgmat[:,:,0:3]
|
||||||
cpy = clrImg(imgmat)
|
cpy = clrImg(imgmat,cFlag)
|
||||||
createResultLayer(img, 'new_output', cpy)
|
createResultLayer(img, 'new_output', cpy)
|
||||||
|
|
||||||
|
|
||||||
@ -76,6 +76,7 @@ register(
|
|||||||
"*", # Alternately use RGB, RGB*, GRAY*, INDEXED etc.
|
"*", # Alternately use RGB, RGB*, GRAY*, INDEXED etc.
|
||||||
[(PF_IMAGE, "image", "Input image", None),
|
[(PF_IMAGE, "image", "Input image", None),
|
||||||
(PF_DRAWABLE, "drawable", "Input drawable", None),
|
(PF_DRAWABLE, "drawable", "Input drawable", None),
|
||||||
|
(PF_BOOL, "fcpu", "Force CPU", False)
|
||||||
],
|
],
|
||||||
[],
|
[],
|
||||||
deepdehazing, menu="<Image>/Layer/GIML-ML")
|
deepdehazing, menu="<Image>/Layer/GIML-ML")
|
||||||
|
@ -12,7 +12,7 @@ sys.path.extend([baseLoc + 'gimpenv/lib/python2.7', baseLoc + 'gimpenv/lib/pytho
|
|||||||
from denoiser import *
|
from denoiser import *
|
||||||
from argparse import Namespace
|
from argparse import Namespace
|
||||||
|
|
||||||
def clrImg(Img):
|
def clrImg(Img,cFlag):
|
||||||
w, h, _ = Img.shape
|
w, h, _ = Img.shape
|
||||||
opt = Namespace(color=1, cond=1, delog='logsdc', ext_test_noise_level=None,
|
opt = Namespace(color=1, cond=1, delog='logsdc', ext_test_noise_level=None,
|
||||||
k=0, keep_ind=None, mode='MC', num_of_layers=20, out_dir='results_bc',
|
k=0, keep_ind=None, mode='MC', num_of_layers=20, out_dir='results_bc',
|
||||||
@ -25,7 +25,7 @@ def clrImg(Img):
|
|||||||
device_ids = [0]
|
device_ids = [0]
|
||||||
model = nn.DataParallel(net, device_ids=device_ids)
|
model = nn.DataParallel(net, device_ids=device_ids)
|
||||||
model_est = nn.DataParallel(est_net, device_ids=device_ids)# Estimator Model
|
model_est = nn.DataParallel(est_net, device_ids=device_ids)# Estimator Model
|
||||||
if torch.cuda.is_available():
|
if torch.cuda.is_available() and not cFlag:
|
||||||
ckpt_est = torch.load(baseLoc+'weights/deepdenoise/est_net.pth')
|
ckpt_est = torch.load(baseLoc+'weights/deepdenoise/est_net.pth')
|
||||||
ckpt = torch.load(baseLoc+'weights/deepdenoise/net.pth')
|
ckpt = torch.load(baseLoc+'weights/deepdenoise/net.pth')
|
||||||
model = model.cuda()
|
model = model.cuda()
|
||||||
@ -57,6 +57,8 @@ def clrImg(Img):
|
|||||||
merge_out = np.zeros([w, h, 3])
|
merge_out = np.zeros([w, h, 3])
|
||||||
wbin = opt.wbin
|
wbin = opt.wbin
|
||||||
i = 0
|
i = 0
|
||||||
|
idx=0
|
||||||
|
t=(w*h)/(wbin*wbin)
|
||||||
while i < w:
|
while i < w:
|
||||||
i_end = min(i + wbin, w)
|
i_end = min(i + wbin, w)
|
||||||
j = 0
|
j = 0
|
||||||
@ -66,7 +68,8 @@ def clrImg(Img):
|
|||||||
patch_merge_out_numpy = denoiser(patch, c, pss, model, model_est, opt)
|
patch_merge_out_numpy = denoiser(patch, c, pss, model, model_est, opt)
|
||||||
merge_out[i:i_end, j:j_end, :] = patch_merge_out_numpy
|
merge_out[i:i_end, j:j_end, :] = patch_merge_out_numpy
|
||||||
j = j_end
|
j = j_end
|
||||||
gimp.progress_update(float(i+j)/float(w+h))
|
idx=idx+1
|
||||||
|
gimp.progress_update(float(idx)/float(t))
|
||||||
gimp.displays_flush()
|
gimp.displays_flush()
|
||||||
i = i_end
|
i = i_end
|
||||||
|
|
||||||
@ -91,15 +94,15 @@ def createResultLayer(image, name, result):
|
|||||||
gimp.displays_flush()
|
gimp.displays_flush()
|
||||||
|
|
||||||
|
|
||||||
def deepdenoise(img, layer):
|
def deepdenoise(img, layer,cFlag):
|
||||||
if torch.cuda.is_available():
|
if torch.cuda.is_available() and not cFlag:
|
||||||
gimp.progress_init("(Using GPU) Denoising " + layer.name + "...")
|
gimp.progress_init("(Using GPU) Denoising " + layer.name + "...")
|
||||||
else:
|
else:
|
||||||
gimp.progress_init("(Using CPU) Denoising " + layer.name + "...")
|
gimp.progress_init("(Using CPU) Denoising " + layer.name + "...")
|
||||||
imgmat = channelData(layer)
|
imgmat = channelData(layer)
|
||||||
if imgmat.shape[2] == 4: # get rid of alpha channel
|
if imgmat.shape[2] == 4: # get rid of alpha channel
|
||||||
imgmat = imgmat[:,:,0:3]
|
imgmat = imgmat[:,:,0:3]
|
||||||
cpy = clrImg(imgmat)
|
cpy = clrImg(imgmat,cFlag)
|
||||||
createResultLayer(img, 'new_output', cpy)
|
createResultLayer(img, 'new_output', cpy)
|
||||||
|
|
||||||
|
|
||||||
@ -114,6 +117,7 @@ register(
|
|||||||
"*", # Alternately use RGB, RGB*, GRAY*, INDEXED etc.
|
"*", # Alternately use RGB, RGB*, GRAY*, INDEXED etc.
|
||||||
[(PF_IMAGE, "image", "Input image", None),
|
[(PF_IMAGE, "image", "Input image", None),
|
||||||
(PF_DRAWABLE, "drawable", "Input drawable", None),
|
(PF_DRAWABLE, "drawable", "Input drawable", None),
|
||||||
|
(PF_BOOL, "fcpu", "Force CPU", False)
|
||||||
],
|
],
|
||||||
[],
|
[],
|
||||||
deepdenoise, menu="<Image>/Layer/GIML-ML")
|
deepdenoise, menu="<Image>/Layer/GIML-ML")
|
||||||
|
@ -12,7 +12,7 @@ import torch
|
|||||||
from torchvision import transforms, datasets
|
from torchvision import transforms, datasets
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
|
||||||
def getSeg(input_image):
|
def getSeg(input_image,f):
|
||||||
model = torch.load(baseLoc+'weights/deeplabv3/deeplabv3+model.pt')
|
model = torch.load(baseLoc+'weights/deeplabv3/deeplabv3+model.pt')
|
||||||
model.eval()
|
model.eval()
|
||||||
preprocess = transforms.Compose([
|
preprocess = transforms.Compose([
|
||||||
@ -23,7 +23,7 @@ def getSeg(input_image):
|
|||||||
input_image = Image.fromarray(input_image)
|
input_image = Image.fromarray(input_image)
|
||||||
input_tensor = preprocess(input_image)
|
input_tensor = preprocess(input_image)
|
||||||
input_batch = input_tensor.unsqueeze(0) # create a mini-batch as expected by the model
|
input_batch = input_tensor.unsqueeze(0) # create a mini-batch as expected by the model
|
||||||
if torch.cuda.is_available():
|
if torch.cuda.is_available() and not f:
|
||||||
input_batch = input_batch.to('cuda')
|
input_batch = input_batch.to('cuda')
|
||||||
model.to('cuda')
|
model.to('cuda')
|
||||||
|
|
||||||
@ -61,8 +61,8 @@ def createResultLayer(image,name,result):
|
|||||||
image.add_layer(rl,0)
|
image.add_layer(rl,0)
|
||||||
gimp.displays_flush()
|
gimp.displays_flush()
|
||||||
|
|
||||||
def deeplabv3(img, layer) :
|
def deeplabv3(img, layer,cFlag) :
|
||||||
if torch.cuda.is_available():
|
if torch.cuda.is_available() and not cFlag:
|
||||||
gimp.progress_init("(Using GPU) Generating semantic segmentation map for " + layer.name + "...")
|
gimp.progress_init("(Using GPU) Generating semantic segmentation map for " + layer.name + "...")
|
||||||
else:
|
else:
|
||||||
gimp.progress_init("(Using CPU) Generating semantic segmentation map for " + layer.name + "...")
|
gimp.progress_init("(Using CPU) Generating semantic segmentation map for " + layer.name + "...")
|
||||||
@ -70,7 +70,7 @@ def deeplabv3(img, layer) :
|
|||||||
imgmat = channelData(layer)
|
imgmat = channelData(layer)
|
||||||
if imgmat.shape[2] == 4: # get rid of alpha channel
|
if imgmat.shape[2] == 4: # get rid of alpha channel
|
||||||
imgmat = imgmat[:,:,0:3]
|
imgmat = imgmat[:,:,0:3]
|
||||||
cpy=getSeg(imgmat)
|
cpy=getSeg(imgmat,cFlag)
|
||||||
createResultLayer(img,'new_output',cpy)
|
createResultLayer(img,'new_output',cpy)
|
||||||
|
|
||||||
|
|
||||||
@ -84,7 +84,8 @@ register(
|
|||||||
"deeplabv3...",
|
"deeplabv3...",
|
||||||
"*", # Alternately use RGB, RGB*, GRAY*, INDEXED etc.
|
"*", # Alternately use RGB, RGB*, GRAY*, INDEXED etc.
|
||||||
[ (PF_IMAGE, "image", "Input image", None),
|
[ (PF_IMAGE, "image", "Input image", None),
|
||||||
(PF_DRAWABLE, "drawable", "Input drawable", None)
|
(PF_DRAWABLE, "drawable", "Input drawable", None),
|
||||||
|
(PF_BOOL, "fcpu", "Force CPU", False)
|
||||||
],
|
],
|
||||||
[],
|
[],
|
||||||
deeplabv3, menu="<Image>/Layer/GIML-ML")
|
deeplabv3, menu="<Image>/Layer/GIML-ML")
|
||||||
|
@ -34,7 +34,7 @@ def createResultLayer(image,name,result):
|
|||||||
image.add_layer(rl,0)
|
image.add_layer(rl,0)
|
||||||
gimp.displays_flush()
|
gimp.displays_flush()
|
||||||
|
|
||||||
def getnewalpha(image,mask):
|
def getnewalpha(image,mask,cFlag):
|
||||||
if image.shape[2] == 4: # get rid of alpha channel
|
if image.shape[2] == 4: # get rid of alpha channel
|
||||||
image = image[:,:,0:3]
|
image = image[:,:,0:3]
|
||||||
if mask.shape[2] == 4: # get rid of alpha channel
|
if mask.shape[2] == 4: # get rid of alpha channel
|
||||||
@ -44,7 +44,7 @@ def getnewalpha(image,mask):
|
|||||||
trimap = mask[:, :, 0]
|
trimap = mask[:, :, 0]
|
||||||
|
|
||||||
cudaFlag = False
|
cudaFlag = False
|
||||||
if torch.cuda.is_available():
|
if torch.cuda.is_available() and not cFlag:
|
||||||
cudaFlag = True
|
cudaFlag = True
|
||||||
|
|
||||||
args = Namespace(crop_or_resize='whole', cuda=cudaFlag, max_size=1600, resume=baseLoc+'weights/deepmatting/stage1_sad_57.1.pth', stage=1)
|
args = Namespace(crop_or_resize='whole', cuda=cudaFlag, max_size=1600, resume=baseLoc+'weights/deepmatting/stage1_sad_57.1.pth', stage=1)
|
||||||
@ -75,8 +75,8 @@ def getnewalpha(image,mask):
|
|||||||
return pred_mattes
|
return pred_mattes
|
||||||
|
|
||||||
|
|
||||||
def deepmatting(imggimp, curlayer,layeri,layerm) :
|
def deepmatting(imggimp, curlayer,layeri,layerm,cFlag) :
|
||||||
if torch.cuda.is_available():
|
if torch.cuda.is_available() and not cFlag:
|
||||||
gimp.progress_init("(Using GPU) Running deep-matting for " + layeri.name + "...")
|
gimp.progress_init("(Using GPU) Running deep-matting for " + layeri.name + "...")
|
||||||
else:
|
else:
|
||||||
gimp.progress_init("(Using CPU) Running deep-matting for " + layeri.name + "...")
|
gimp.progress_init("(Using CPU) Running deep-matting for " + layeri.name + "...")
|
||||||
@ -84,7 +84,7 @@ def deepmatting(imggimp, curlayer,layeri,layerm) :
|
|||||||
img = channelData(layeri)
|
img = channelData(layeri)
|
||||||
mask = channelData(layerm)
|
mask = channelData(layerm)
|
||||||
|
|
||||||
cpy=getnewalpha(img,mask)
|
cpy=getnewalpha(img,mask,cFlag)
|
||||||
createResultLayer(imggimp,'new_output',cpy)
|
createResultLayer(imggimp,'new_output',cpy)
|
||||||
|
|
||||||
|
|
||||||
@ -101,7 +101,8 @@ register(
|
|||||||
[ (PF_IMAGE, "image", "Input image", None),
|
[ (PF_IMAGE, "image", "Input image", None),
|
||||||
(PF_DRAWABLE, "drawable", "Input drawable", None),
|
(PF_DRAWABLE, "drawable", "Input drawable", None),
|
||||||
(PF_LAYER, "drawinglayer", "Original Image:", None),
|
(PF_LAYER, "drawinglayer", "Original Image:", None),
|
||||||
(PF_LAYER, "drawinglayer", "Trimap Mask:", None)
|
(PF_LAYER, "drawinglayer", "Trimap Mask:", None),
|
||||||
|
(PF_BOOL, "fcpu", "Force CPU", False)
|
||||||
],
|
],
|
||||||
[],
|
[],
|
||||||
deepmatting, menu="<Image>/Layer/GIML-ML")
|
deepmatting, menu="<Image>/Layer/GIML-ML")
|
||||||
|
@ -56,7 +56,7 @@ def getOptions():
|
|||||||
'engine': None,
|
'engine': None,
|
||||||
'export_onnx': None,
|
'export_onnx': None,
|
||||||
'fineSize': 512,
|
'fineSize': 512,
|
||||||
'gpu_ids': [0],
|
'gpu_ids': [],
|
||||||
'how_many': 1000,
|
'how_many': 1000,
|
||||||
'input_nc': 3,
|
'input_nc': 3,
|
||||||
'isTrain': False,
|
'isTrain': False,
|
||||||
@ -105,16 +105,19 @@ def createResultLayer(image,name,result):
|
|||||||
image.add_layer(rl,0)
|
image.add_layer(rl,0)
|
||||||
gimp.displays_flush()
|
gimp.displays_flush()
|
||||||
|
|
||||||
def getnewface(img,mask,mask_m):
|
def getnewface(img,mask,mask_m,cFlag):
|
||||||
h,w,d = img.shape
|
h,w,d = img.shape
|
||||||
img = Image.fromarray(img)
|
img = Image.fromarray(img)
|
||||||
lmask = labelMask(mask)
|
lmask = labelMask(mask)
|
||||||
lmask_m = labelMask(mask_m)
|
lmask_m = labelMask(mask_m)
|
||||||
|
|
||||||
|
|
||||||
os.environ["CUDA_VISIBLE_DEVICES"] = str(0)
|
# os.environ["CUDA_VISIBLE_DEVICES"] = str(0)
|
||||||
opt = getOptions()
|
opt = getOptions()
|
||||||
|
|
||||||
|
if torch.cuda.is_available() and not cFlag:
|
||||||
|
opt.gpu_ids=[0]
|
||||||
|
|
||||||
model = create_model(opt)
|
model = create_model(opt)
|
||||||
|
|
||||||
params = get_params(opt, (512,512))
|
params = get_params(opt, (512,512))
|
||||||
@ -124,11 +127,11 @@ def getnewface(img,mask,mask_m):
|
|||||||
mask_m = transform_mask(Image.fromarray(np.uint8(lmask_m)))
|
mask_m = transform_mask(Image.fromarray(np.uint8(lmask_m)))
|
||||||
img = transform_image(img)
|
img = transform_image(img)
|
||||||
|
|
||||||
generated = model.inference(torch.FloatTensor([mask_m.numpy()]), torch.FloatTensor([mask.numpy()]), torch.FloatTensor([img.numpy()]))
|
generated = model.inference(torch.FloatTensor([mask_m.numpy()]), torch.FloatTensor([mask.numpy()]), torch.FloatTensor([img.numpy()]), cFlag)
|
||||||
|
|
||||||
result = generated.permute(0, 2, 3, 1)
|
result = generated.permute(0, 2, 3, 1)
|
||||||
if torch.cuda.is_available():
|
if torch.cuda.is_available():
|
||||||
result = result.cpu().numpy()
|
result = result.detach().cpu().numpy()
|
||||||
else:
|
else:
|
||||||
result = result.detach().numpy()
|
result = result.detach().numpy()
|
||||||
|
|
||||||
@ -141,8 +144,8 @@ def getnewface(img,mask,mask_m):
|
|||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
def facegen(imggimp, curlayer,layeri,layerm,layermm) :
|
def facegen(imggimp, curlayer,layeri,layerm,layermm,cFlag):
|
||||||
if torch.cuda.is_available():
|
if torch.cuda.is_available() and not cFlag:
|
||||||
gimp.progress_init("(Using GPU) Running face gen for " + layeri.name + "...")
|
gimp.progress_init("(Using GPU) Running face gen for " + layeri.name + "...")
|
||||||
else:
|
else:
|
||||||
gimp.progress_init("(Using CPU) Running face gen for " + layeri.name + "...")
|
gimp.progress_init("(Using CPU) Running face gen for " + layeri.name + "...")
|
||||||
@ -151,7 +154,7 @@ def facegen(imggimp, curlayer,layeri,layerm,layermm) :
|
|||||||
mask = channelData(layerm)
|
mask = channelData(layerm)
|
||||||
mask_m = channelData(layermm)
|
mask_m = channelData(layermm)
|
||||||
|
|
||||||
cpy=getnewface(img,mask,mask_m)
|
cpy=getnewface(img,mask,mask_m,cFlag)
|
||||||
createResultLayer(imggimp,'new_output',cpy)
|
createResultLayer(imggimp,'new_output',cpy)
|
||||||
|
|
||||||
|
|
||||||
@ -168,8 +171,9 @@ register(
|
|||||||
[ (PF_IMAGE, "image", "Input image", None),
|
[ (PF_IMAGE, "image", "Input image", None),
|
||||||
(PF_DRAWABLE, "drawable", "Input drawable", None),
|
(PF_DRAWABLE, "drawable", "Input drawable", None),
|
||||||
(PF_LAYER, "drawinglayer", "Original Image:", None),
|
(PF_LAYER, "drawinglayer", "Original Image:", None),
|
||||||
(PF_LAYER, "drawinglayer", "Original Mask:", None),
|
(PF_LAYER, "drawinglayer2", "Original Mask:", None),
|
||||||
(PF_LAYER, "drawinglayer", "Modified Mask:", None),
|
(PF_LAYER, "drawinglayer3", "Modified Mask:", None),
|
||||||
|
(PF_BOOL, "fcpu", "Force CPU", False),
|
||||||
],
|
],
|
||||||
[],
|
[],
|
||||||
facegen, menu="<Image>/Layer/GIML-ML")
|
facegen, menu="<Image>/Layer/GIML-ML")
|
||||||
|
@ -48,13 +48,13 @@ def colorMask(mask):
|
|||||||
x=x+getlabelmat(mask,idx)
|
x=x+getlabelmat(mask,idx)
|
||||||
return np.uint8(x)
|
return np.uint8(x)
|
||||||
|
|
||||||
def getface(input_image):
|
def getface(input_image,cFlag):
|
||||||
save_pth = baseLoc+'weights/faceparse/79999_iter.pth'
|
save_pth = baseLoc+'weights/faceparse/79999_iter.pth'
|
||||||
input_image = Image.fromarray(input_image)
|
input_image = Image.fromarray(input_image)
|
||||||
|
|
||||||
n_classes = 19
|
n_classes = 19
|
||||||
net = BiSeNet(n_classes=n_classes)
|
net = BiSeNet(n_classes=n_classes)
|
||||||
if torch.cuda.is_available():
|
if torch.cuda.is_available() and not cFlag:
|
||||||
net.cuda()
|
net.cuda()
|
||||||
net.load_state_dict(torch.load(save_pth))
|
net.load_state_dict(torch.load(save_pth))
|
||||||
else:
|
else:
|
||||||
@ -74,7 +74,7 @@ def getface(input_image):
|
|||||||
img = input_image.resize((512, 512), Image.BILINEAR)
|
img = input_image.resize((512, 512), Image.BILINEAR)
|
||||||
img = to_tensor(img)
|
img = to_tensor(img)
|
||||||
img = torch.unsqueeze(img, 0)
|
img = torch.unsqueeze(img, 0)
|
||||||
if torch.cuda.is_available():
|
if torch.cuda.is_available() and not cFlag:
|
||||||
img = img.cuda()
|
img = img.cuda()
|
||||||
out = net(img)[0]
|
out = net(img)[0]
|
||||||
if torch.cuda.is_available():
|
if torch.cuda.is_available():
|
||||||
@ -137,8 +137,8 @@ def createResultLayer(image,name,result):
|
|||||||
image.add_layer(rl,0)
|
image.add_layer(rl,0)
|
||||||
gimp.displays_flush()
|
gimp.displays_flush()
|
||||||
|
|
||||||
def faceparse(img, layer) :
|
def faceparse(img, layer,cFlag) :
|
||||||
if torch.cuda.is_available():
|
if torch.cuda.is_available() and not cFlag:
|
||||||
gimp.progress_init("(Using GPU) Running face parse for " + layer.name + "...")
|
gimp.progress_init("(Using GPU) Running face parse for " + layer.name + "...")
|
||||||
else:
|
else:
|
||||||
gimp.progress_init("(Using CPU) Running face parse for " + layer.name + "...")
|
gimp.progress_init("(Using CPU) Running face parse for " + layer.name + "...")
|
||||||
@ -146,7 +146,7 @@ def faceparse(img, layer) :
|
|||||||
imgmat = channelData(layer)
|
imgmat = channelData(layer)
|
||||||
if imgmat.shape[2] == 4: # get rid of alpha channel
|
if imgmat.shape[2] == 4: # get rid of alpha channel
|
||||||
imgmat = imgmat[:,:,0:3]
|
imgmat = imgmat[:,:,0:3]
|
||||||
cpy=getface(imgmat)
|
cpy=getface(imgmat,cFlag)
|
||||||
cpy = colorMask(cpy)
|
cpy = colorMask(cpy)
|
||||||
createResultLayer(img,'new_output',cpy)
|
createResultLayer(img,'new_output',cpy)
|
||||||
|
|
||||||
@ -164,6 +164,7 @@ register(
|
|||||||
"*", # Alternately use RGB, RGB*, GRAY*, INDEXED etc.
|
"*", # Alternately use RGB, RGB*, GRAY*, INDEXED etc.
|
||||||
[ (PF_IMAGE, "image", "Input image", None),
|
[ (PF_IMAGE, "image", "Input image", None),
|
||||||
(PF_DRAWABLE, "drawable", "Input drawable", None),
|
(PF_DRAWABLE, "drawable", "Input drawable", None),
|
||||||
|
(PF_BOOL, "fcpu", "Force CPU", False)
|
||||||
],
|
],
|
||||||
[],
|
[],
|
||||||
faceparse, menu="<Image>/Layer/GIML-ML")
|
faceparse, menu="<Image>/Layer/GIML-ML")
|
||||||
|
4
gimp-plugins/ideepcolor/data/colorize_image.py
Normal file → Executable file
4
gimp-plugins/ideepcolor/data/colorize_image.py
Normal file → Executable file
@ -250,7 +250,7 @@ class ColorizeImageTorch(ColorizeImageBase):
|
|||||||
self.__patch_instance_norm_state_dict(state_dict, getattr(module, key), keys, i + 1)
|
self.__patch_instance_norm_state_dict(state_dict, getattr(module, key), keys, i + 1)
|
||||||
|
|
||||||
# ***** Call forward *****
|
# ***** Call forward *****
|
||||||
def net_forward(self, input_ab, input_mask):
|
def net_forward(self, input_ab, input_mask, f):
|
||||||
# INPUTS
|
# INPUTS
|
||||||
# ab 2xXxX input color patches (non-normalized)
|
# ab 2xXxX input color patches (non-normalized)
|
||||||
# mask 1xXxX input mask, indicating which points have been provided
|
# mask 1xXxX input mask, indicating which points have been provided
|
||||||
@ -264,7 +264,7 @@ class ColorizeImageTorch(ColorizeImageBase):
|
|||||||
# return prediction
|
# return prediction
|
||||||
# self.net.blobs['data_l_ab_mask'].data[...] = net_input_prepped
|
# self.net.blobs['data_l_ab_mask'].data[...] = net_input_prepped
|
||||||
# embed()
|
# embed()
|
||||||
output_ab = self.net.forward(self.img_l_mc, self.input_ab_mc, self.input_mask_mult, self.mask_cent)[0, :, :, :].cpu().data.numpy()
|
output_ab = self.net.forward(self.img_l_mc, self.input_ab_mc, self.input_mask_mult, self.mask_cent,f)[0, :, :, :].cpu().data.numpy()
|
||||||
self.output_rgb = lab2rgb_transpose(self.img_l, output_ab)
|
self.output_rgb = lab2rgb_transpose(self.img_l, output_ab)
|
||||||
# self.output_rgb = lab2rgb_transpose(self.img_l, self.net.blobs[self.pred_ab_layer].data[0, :, :, :])
|
# self.output_rgb = lab2rgb_transpose(self.img_l, self.net.blobs[self.pred_ab_layer].data[0, :, :, :])
|
||||||
|
|
||||||
|
12
gimp-plugins/ideepcolor/models/pytorch/model.py
Normal file → Executable file
12
gimp-plugins/ideepcolor/models/pytorch/model.py
Normal file → Executable file
@ -131,7 +131,7 @@ class SIGGRAPHGenerator(nn.Module):
|
|||||||
self.upsample4 = nn.Sequential(*[nn.Upsample(scale_factor=4, mode='nearest'), ])
|
self.upsample4 = nn.Sequential(*[nn.Upsample(scale_factor=4, mode='nearest'), ])
|
||||||
self.softmax = nn.Sequential(*[nn.Softmax(dim=1), ])
|
self.softmax = nn.Sequential(*[nn.Softmax(dim=1), ])
|
||||||
|
|
||||||
def forward(self, input_A, input_B, mask_B, maskcent=0):
|
def forward(self, input_A, input_B, mask_B, maskcent=0,f=False):
|
||||||
# input_A \in [-50,+50]
|
# input_A \in [-50,+50]
|
||||||
# input_B \in [-110, +110]
|
# input_B \in [-110, +110]
|
||||||
# mask_B \in [0, +1.0]
|
# mask_B \in [0, +1.0]
|
||||||
@ -139,12 +139,14 @@ class SIGGRAPHGenerator(nn.Module):
|
|||||||
input_A = torch.Tensor(input_A)[None, :, :, :]
|
input_A = torch.Tensor(input_A)[None, :, :, :]
|
||||||
input_B = torch.Tensor(input_B)[None, :, :, :]
|
input_B = torch.Tensor(input_B)[None, :, :, :]
|
||||||
mask_B = torch.Tensor(mask_B)[None, :, :, :]
|
mask_B = torch.Tensor(mask_B)[None, :, :, :]
|
||||||
|
|
||||||
|
if torch.cuda.is_available() and not f:
|
||||||
|
input_A = input_A.cuda()
|
||||||
|
input_B = input_B.cuda()
|
||||||
|
mask_B = mask_B.cuda()
|
||||||
|
|
||||||
mask_B = mask_B - maskcent
|
mask_B = mask_B - maskcent
|
||||||
|
|
||||||
# input_A = torch.Tensor(input_A).cuda()[None, :, :, :]
|
|
||||||
# input_B = torch.Tensor(input_B).cuda()[None, :, :, :]
|
|
||||||
# mask_B = torch.Tensor(mask_B).cuda()[None, :, :, :]
|
|
||||||
|
|
||||||
conv1_2 = self.model1(torch.cat((input_A / 100., input_B / 110., mask_B), dim=1))
|
conv1_2 = self.model1(torch.cat((input_A / 100., input_B / 110., mask_B), dim=1))
|
||||||
conv2_2 = self.model2(conv1_2[:, :, ::2, ::2])
|
conv2_2 = self.model2(conv1_2[:, :, ::2, ::2])
|
||||||
conv3_3 = self.model3(conv2_2[:, :, ::2, ::2])
|
conv3_3 = self.model3(conv2_2[:, :, ::2, ::2])
|
||||||
|
@ -13,10 +13,11 @@ from monodepth_net import MonoDepthNet
|
|||||||
import MiDaS_utils as MiDaS_utils
|
import MiDaS_utils as MiDaS_utils
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import cv2
|
import cv2
|
||||||
|
import torch
|
||||||
|
|
||||||
def getMonoDepth(input_image):
|
def getMonoDepth(input_image,cFlag):
|
||||||
image = input_image / 255.0
|
image = input_image / 255.0
|
||||||
out = run_depth(image, baseLoc+'weights/MiDaS/model.pt', MonoDepthNet, MiDaS_utils, target_w=640)
|
out = run_depth(image, baseLoc+'weights/MiDaS/model.pt', MonoDepthNet, MiDaS_utils, target_w=640,f=cFlag)
|
||||||
out = np.repeat(out[:, :, np.newaxis], 3, axis=2)
|
out = np.repeat(out[:, :, np.newaxis], 3, axis=2)
|
||||||
d1,d2 = input_image.shape[:2]
|
d1,d2 = input_image.shape[:2]
|
||||||
out = cv2.resize(out,(d2,d1))
|
out = cv2.resize(out,(d2,d1))
|
||||||
@ -34,17 +35,22 @@ def channelData(layer): # convert gimp image to numpy
|
|||||||
|
|
||||||
def createResultLayer(image, name, result):
|
def createResultLayer(image, name, result):
|
||||||
rlBytes = np.uint8(result).tobytes();
|
rlBytes = np.uint8(result).tobytes();
|
||||||
rl = gimp.Layer(image, name, image.width, image.height, image.active_layer.type, 100, NORMAL_MODE)
|
rl = gimp.Layer(image, name, image.width, image.height, 0, 100, NORMAL_MODE)
|
||||||
region = rl.get_pixel_rgn(0, 0, rl.width, rl.height, True)
|
region = rl.get_pixel_rgn(0, 0, rl.width, rl.height, True)
|
||||||
region[:, :] = rlBytes
|
region[:, :] = rlBytes
|
||||||
image.add_layer(rl, 0)
|
image.add_layer(rl, 0)
|
||||||
gimp.displays_flush()
|
gimp.displays_flush()
|
||||||
|
|
||||||
|
|
||||||
def MonoDepth(img, layer):
|
def MonoDepth(img, layer,cFlag):
|
||||||
gimp.progress_init("Generating disparity map for " + layer.name + "...")
|
if torch.cuda.is_available() and not cFlag:
|
||||||
|
gimp.progress_init("(Using GPU) Generating disparity map for " + layer.name + "...")
|
||||||
|
else:
|
||||||
|
gimp.progress_init("(Using CPU) Generating disparity map for " + layer.name + "...")
|
||||||
imgmat = channelData(layer)
|
imgmat = channelData(layer)
|
||||||
cpy = getMonoDepth(imgmat)
|
if imgmat.shape[2] == 4: # get rid of alpha channel
|
||||||
|
imgmat = imgmat[:,:,0:3]
|
||||||
|
cpy = getMonoDepth(imgmat,cFlag)
|
||||||
createResultLayer(img, 'new_output', cpy)
|
createResultLayer(img, 'new_output', cpy)
|
||||||
|
|
||||||
|
|
||||||
@ -59,6 +65,7 @@ register(
|
|||||||
"*", # Alternately use RGB, RGB*, GRAY*, INDEXED etc.
|
"*", # Alternately use RGB, RGB*, GRAY*, INDEXED etc.
|
||||||
[(PF_IMAGE, "image", "Input image", None),
|
[(PF_IMAGE, "image", "Input image", None),
|
||||||
(PF_DRAWABLE, "drawable", "Input drawable", None),
|
(PF_DRAWABLE, "drawable", "Input drawable", None),
|
||||||
|
(PF_BOOL, "fcpu", "Force CPU", False)
|
||||||
],
|
],
|
||||||
[],
|
[],
|
||||||
MonoDepth, menu="<Image>/Layer/GIML-ML")
|
MonoDepth, menu="<Image>/Layer/GIML-ML")
|
||||||
|
@ -28,8 +28,8 @@ def colorMask(mask):
|
|||||||
return np.uint8(x)
|
return np.uint8(x)
|
||||||
|
|
||||||
|
|
||||||
def getnewimg(input_image,s):
|
def getnewimg(input_image,s,cFlag):
|
||||||
opt=Namespace(cuda=torch.cuda.is_available(),
|
opt=Namespace(cuda=torch.cuda.is_available() and not cFlag,
|
||||||
model=baseLoc+'weights/super_resolution/model_srresnet.pth',
|
model=baseLoc+'weights/super_resolution/model_srresnet.pth',
|
||||||
dataset='Set5',scale=s,gpus=0)
|
dataset='Set5',scale=s,gpus=0)
|
||||||
|
|
||||||
@ -88,8 +88,8 @@ def createResultLayer(name,layer_np):
|
|||||||
gimp.displays_flush()
|
gimp.displays_flush()
|
||||||
|
|
||||||
|
|
||||||
def super_resolution(img, layer,scale) :
|
def super_resolution(img, layer,scale,cFlag) :
|
||||||
if torch.cuda.is_available():
|
if torch.cuda.is_available() and not cFlag:
|
||||||
gimp.progress_init("(Using GPU) Running super-resolution for " + layer.name + "...")
|
gimp.progress_init("(Using GPU) Running super-resolution for " + layer.name + "...")
|
||||||
else:
|
else:
|
||||||
gimp.progress_init("(Using CPU) Running super-resolution for " + layer.name + "...")
|
gimp.progress_init("(Using CPU) Running super-resolution for " + layer.name + "...")
|
||||||
@ -97,7 +97,7 @@ def super_resolution(img, layer,scale) :
|
|||||||
imgmat = channelData(layer)
|
imgmat = channelData(layer)
|
||||||
if imgmat.shape[2] == 4: # get rid of alpha channel
|
if imgmat.shape[2] == 4: # get rid of alpha channel
|
||||||
imgmat = imgmat[:,:,0:3]
|
imgmat = imgmat[:,:,0:3]
|
||||||
cpy = getnewimg(imgmat,scale)
|
cpy = getnewimg(imgmat,scale,cFlag)
|
||||||
cpy = cv2.resize(cpy, (0,0), fx=scale/4, fy=scale/4)
|
cpy = cv2.resize(cpy, (0,0), fx=scale/4, fy=scale/4)
|
||||||
createResultLayer(layer.name+'_upscaled',cpy)
|
createResultLayer(layer.name+'_upscaled',cpy)
|
||||||
|
|
||||||
@ -113,7 +113,8 @@ register(
|
|||||||
"*", # Alternately use RGB, RGB*, GRAY*, INDEXED etc.
|
"*", # Alternately use RGB, RGB*, GRAY*, INDEXED etc.
|
||||||
[ (PF_IMAGE, "image", "Input image", None),
|
[ (PF_IMAGE, "image", "Input image", None),
|
||||||
(PF_DRAWABLE, "drawable", "Input drawable", None),
|
(PF_DRAWABLE, "drawable", "Input drawable", None),
|
||||||
(PF_SLIDER, "Scale", "Scale", 4, (1.1, 4, 0.5))
|
(PF_SLIDER, "Scale", "Scale", 4, (1.1, 4, 0.5)),
|
||||||
|
(PF_BOOL, "fcpu", "Force CPU", False)
|
||||||
],
|
],
|
||||||
[],
|
[],
|
||||||
super_resolution, menu="<Image>/Layer/GIML-ML")
|
super_resolution, menu="<Image>/Layer/GIML-ML")
|
||||||
|
Loading…
Reference in New Issue
Block a user