bug fixes

This commit is contained in:
Kritik Soman 2020-11-28 17:31:37 +05:30
parent b431ca6dbc
commit 7b2e9112b3
54 changed files with 294 additions and 244 deletions

View File

View File

0
gimp-plugins/DeblurGANv2/predictorClass.py Executable file → Normal file
View File

0
gimp-plugins/EnlightenGAN/.idea/.gitignore vendored Executable file → Normal file
View File

0
gimp-plugins/EnlightenGAN/.idea/EnlightenGAN.iml Executable file → Normal file
View File

0
gimp-plugins/EnlightenGAN/.idea/misc.xml Executable file → Normal file
View File

0
gimp-plugins/EnlightenGAN/.idea/modules.xml Executable file → Normal file
View File

0
gimp-plugins/EnlightenGAN/.idea/vcs.xml Executable file → Normal file
View File

0
gimp-plugins/EnlightenGAN/License Executable file → Normal file
View File

0
gimp-plugins/EnlightenGAN/data/__init__.py Executable file → Normal file
View File

0
gimp-plugins/EnlightenGAN/data/base_dataset.py Executable file → Normal file
View File

0
gimp-plugins/EnlightenGAN/lib/__init__.py Executable file → Normal file
View File

0
gimp-plugins/EnlightenGAN/lib/nn/__init__.py Executable file → Normal file
View File

0
gimp-plugins/EnlightenGAN/lib/nn/modules/__init__.py Executable file → Normal file
View File

0
gimp-plugins/EnlightenGAN/lib/nn/modules/batchnorm.py Executable file → Normal file
View File

0
gimp-plugins/EnlightenGAN/lib/nn/modules/comm.py Executable file → Normal file
View File

0
gimp-plugins/EnlightenGAN/lib/nn/modules/replicate.py Executable file → Normal file
View File

View File

0
gimp-plugins/EnlightenGAN/lib/nn/modules/unittest.py Executable file → Normal file
View File

0
gimp-plugins/EnlightenGAN/lib/nn/parallel/__init__.py Executable file → Normal file
View File

View File

0
gimp-plugins/EnlightenGAN/models/__init__.py Executable file → Normal file
View File

0
gimp-plugins/EnlightenGAN/models/base_model.py Executable file → Normal file
View File

0
gimp-plugins/EnlightenGAN/models/models.py Executable file → Normal file
View File

0
gimp-plugins/EnlightenGAN/models/networks.py Executable file → Normal file
View File

0
gimp-plugins/EnlightenGAN/models/single_model.py Executable file → Normal file
View File

0
gimp-plugins/EnlightenGAN/util/__init__.py Executable file → Normal file
View File

0
gimp-plugins/EnlightenGAN/util/image_pool.py Executable file → Normal file
View File

0
gimp-plugins/EnlightenGAN/util/util.py Executable file → Normal file
View File

0
gimp-plugins/MiDaS/run.py Executable file → Normal file
View File

0
gimp-plugins/RIFE/LICENSE Executable file → Normal file
View File

0
gimp-plugins/RIFE/model/IFNet.py Executable file → Normal file
View File

0
gimp-plugins/RIFE/model/IFNet2F.py Executable file → Normal file
View File

0
gimp-plugins/RIFE/model/RIFE.py Executable file → Normal file
View File

0
gimp-plugins/RIFE/model/RIFE2F.py Executable file → Normal file
View File

0
gimp-plugins/RIFE/model/__init__.py Executable file → Normal file
View File

0
gimp-plugins/RIFE/model/loss.py Executable file → Normal file
View File

0
gimp-plugins/RIFE/model/warplayer.py Executable file → Normal file
View File

View File

@ -1,47 +1,57 @@
import os
baseLoc = os.path.dirname(os.path.realpath(__file__))+'/'
baseLoc = os.path.dirname(os.path.realpath(__file__)) + '/'
from gimpfu import *
import sys
sys.path.extend([baseLoc+'gimpenv/lib/python2.7',baseLoc+'gimpenv/lib/python2.7/site-packages',baseLoc+'gimpenv/lib/python2.7/site-packages/setuptools',baseLoc+'DeblurGANv2'])
sys.path.extend([baseLoc + 'gimpenv/lib/python2.7', baseLoc + 'gimpenv/lib/python2.7/site-packages',
baseLoc + 'gimpenv/lib/python2.7/site-packages/setuptools', baseLoc + 'DeblurGANv2'])
import cv2
from predictorClass import Predictor
import numpy as np
import torch
def channelData(layer):#convert gimp image to numpy
region=layer.get_pixel_rgn(0, 0, layer.width,layer.height)
pixChars=region[:,:] # Take whole layer
bpp=region.bpp
# return np.frombuffer(pixChars,dtype=np.uint8).reshape(len(pixChars)/bpp,bpp)
return np.frombuffer(pixChars,dtype=np.uint8).reshape(layer.height,layer.width,bpp)
def createResultLayer(image,name,result):
rlBytes=np.uint8(result).tobytes();
rl=gimp.Layer(image,name,image.width,image.height,0,100,NORMAL_MODE)
region=rl.get_pixel_rgn(0, 0, rl.width,rl.height,True)
region[:,:]=rlBytes
image.add_layer(rl,0)
def channelData(layer): # convert gimp image to numpy
region = layer.get_pixel_rgn(0, 0, layer.width, layer.height)
pixChars = region[:, :] # Take whole layer
bpp = region.bpp
# return np.frombuffer(pixChars,dtype=np.uint8).reshape(len(pixChars)/bpp,bpp)
return np.frombuffer(pixChars, dtype=np.uint8).reshape(layer.height, layer.width, bpp)
def createResultLayer(image, name, result):
rlBytes = np.uint8(result).tobytes();
rl = gimp.Layer(image, name, image.width, image.height, 0, 100, NORMAL_MODE)
region = rl.get_pixel_rgn(0, 0, rl.width, rl.height, True)
region[:, :] = rlBytes
image.add_layer(rl, 0)
gimp.displays_flush()
def getdeblur(img,flag):
predictor = Predictor(weights_path=baseLoc+'weights/deblur/'+'best_fpn.h5',cf=flag)
def getdeblur(img, flag):
predictor = Predictor(weights_path=baseLoc + 'weights/deblur/' + 'best_fpn.h5', cf=flag)
if img.shape[2] == 4: # get rid of alpha channel
img = img[:,:,0:3]
pred = predictor(img, None,cf=flag)
img = img[:, :, 0:3]
pred = predictor(img, None, cf=flag)
return pred
def deblur(img, layer,flag):
if torch.cuda.is_available() and not flag:
gimp.progress_init("(Using GPU) Deblurring " + layer.name + "...")
else:
gimp.progress_init("(Using CPU) Deblurring " + layer.name + "...")
def deblur(img, layer, flag):
imgmat = channelData(layer)
pred = getdeblur(imgmat,flag)
createResultLayer(img,'deblur_'+layer.name,pred)
if imgmat.shape[0] != img.height or imgmat.shape[1] != img.width:
pdb.gimp_message(" Do (Layer -> Layer to Image Size) first and try again.")
else:
if torch.cuda.is_available() and not flag:
gimp.progress_init("(Using GPU) Deblurring " + layer.name + "...")
else:
gimp.progress_init("(Using CPU) Deblurring " + layer.name + "...")
imgmat = channelData(layer)
pred = getdeblur(imgmat, flag)
createResultLayer(img, 'deblur_' + layer.name, pred)
register(
@ -52,13 +62,13 @@ register(
"Your",
"2020",
"deblur...",
"*", # Alternately use RGB, RGB*, GRAY*, INDEXED etc.
[ (PF_IMAGE, "image", "Input image", None),
(PF_DRAWABLE, "drawable", "Input drawable", None),
# (PF_LAYER, "drawinglayer", "Original Image", None),
(PF_BOOL, "fcpu", "Force CPU", False)
"*", # Alternately use RGB, RGB*, GRAY*, INDEXED etc.
[(PF_IMAGE, "image", "Input image", None),
(PF_DRAWABLE, "drawable", "Input drawable", None),
# (PF_LAYER, "drawinglayer", "Original Image", None),
(PF_BOOL, "fcpu", "Force CPU", False)
],
],
[],
deblur, menu="<Image>/Layer/GIML-ML")

View File

@ -54,15 +54,18 @@ def createResultLayer(image, name, result):
def deepdehazing(img, layer, cFlag):
if torch.cuda.is_available() and not cFlag:
gimp.progress_init("(Using GPU) Dehazing " + layer.name + "...")
else:
gimp.progress_init("(Using CPU) Dehazing " + layer.name + "...")
imgmat = channelData(layer)
if imgmat.shape[2] == 4: # get rid of alpha channel
imgmat = imgmat[:,:,0:3]
cpy = clrImg(imgmat,cFlag)
createResultLayer(img, 'new_output', cpy)
if imgmat.shape[0] != img.height or imgmat.shape[1] != img.width:
pdb.gimp_message(" Do (Layer -> Layer to Image Size) first and try again.")
else:
if torch.cuda.is_available() and not cFlag:
gimp.progress_init("(Using GPU) Dehazing " + layer.name + "...")
else:
gimp.progress_init("(Using CPU) Dehazing " + layer.name + "...")
if imgmat.shape[2] == 4: # get rid of alpha channel
imgmat = imgmat[:,:,0:3]
cpy = clrImg(imgmat,cFlag)
createResultLayer(img, 'new_output', cpy)
register(

View File

@ -99,15 +99,18 @@ def createResultLayer(image, name, result):
def deepdenoise(img, layer,cFlag):
if torch.cuda.is_available() and not cFlag:
gimp.progress_init("(Using GPU) Denoising " + layer.name + "...")
else:
gimp.progress_init("(Using CPU) Denoising " + layer.name + "...")
imgmat = channelData(layer)
if imgmat.shape[2] == 4: # get rid of alpha channel
imgmat = imgmat[:,:,0:3]
cpy = clrImg(imgmat,cFlag)
createResultLayer(img, 'new_output', cpy)
if imgmat.shape[0] != img.height or imgmat.shape[1] != img.width:
pdb.gimp_message(" Do (Layer -> Layer to Image Size) first and try again.")
else:
if torch.cuda.is_available() and not cFlag:
gimp.progress_init("(Using GPU) Denoising " + layer.name + "...")
else:
gimp.progress_init("(Using CPU) Denoising " + layer.name + "...")
if imgmat.shape[2] == 4: # get rid of alpha channel
imgmat = imgmat[:,:,0:3]
cpy = clrImg(imgmat,cFlag)
createResultLayer(img, 'new_output', cpy)
register(

View File

@ -1,12 +1,12 @@
import os
baseLoc = os.path.dirname(os.path.realpath(__file__))+'/'
baseLoc = os.path.dirname(os.path.realpath(__file__)) + '/'
from gimpfu import *
import sys
sys.path.extend([baseLoc+'gimpenv/lib/python2.7',baseLoc+'gimpenv/lib/python2.7/site-packages',baseLoc+'gimpenv/lib/python2.7/site-packages/setuptools',baseLoc+'pytorch-deep-image-matting'])
sys.path.extend([baseLoc + 'gimpenv/lib/python2.7', baseLoc + 'gimpenv/lib/python2.7/site-packages',
baseLoc + 'gimpenv/lib/python2.7/site-packages/setuptools', baseLoc + 'pytorch-deep-image-matting'])
import torch
from argparse import Namespace
@ -17,43 +17,45 @@ import numpy as np
from deploy import inference_img_whole
def channelData(layer):#convert gimp image to numpy
region=layer.get_pixel_rgn(0, 0, layer.width,layer.height)
pixChars=region[:,:] # Take whole layer
bpp=region.bpp
def channelData(layer): # convert gimp image to numpy
region = layer.get_pixel_rgn(0, 0, layer.width, layer.height)
pixChars = region[:, :] # Take whole layer
bpp = region.bpp
# return np.frombuffer(pixChars,dtype=np.uint8).reshape(len(pixChars)/bpp,bpp)
return np.frombuffer(pixChars,dtype=np.uint8).reshape(layer.height,layer.width,bpp)
return np.frombuffer(pixChars, dtype=np.uint8).reshape(layer.height, layer.width, bpp)
def createResultLayer(image,name,result):
rlBytes=np.uint8(result).tobytes();
rl=gimp.Layer(image,name,image.width,image.height,1,100,NORMAL_MODE)#image.active_layer.type or RGB_IMAGE
region=rl.get_pixel_rgn(0, 0, rl.width,rl.height,True)
region[:,:]=rlBytes
image.add_layer(rl,0)
def createResultLayer(image, name, result):
rlBytes = np.uint8(result).tobytes();
rl = gimp.Layer(image, name, image.width, image.height, 1, 100,
NORMAL_MODE) # image.active_layer.type or RGB_IMAGE
region = rl.get_pixel_rgn(0, 0, rl.width, rl.height, True)
region[:, :] = rlBytes
image.add_layer(rl, 0)
gimp.displays_flush()
def getnewalpha(image,mask,cFlag):
if image.shape[2] == 4: # get rid of alpha channel
image = image[:,:,0:3]
if mask.shape[2] == 4: # get rid of alpha channel
mask = mask[:,:,0:3]
image = cv2.cvtColor(image,cv2.COLOR_RGB2BGR)
def getnewalpha(image, mask, cFlag):
if image.shape[2] == 4: # get rid of alpha channel
image = image[:, :, 0:3]
if mask.shape[2] == 4: # get rid of alpha channel
mask = mask[:, :, 0:3]
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
trimap = mask[:, :, 0]
cudaFlag = False
if torch.cuda.is_available() and not cFlag:
cudaFlag = True
cudaFlag = True
args = Namespace(crop_or_resize='whole', cuda=cudaFlag, max_size=1600, resume=baseLoc+'weights/deepmatting/stage1_sad_57.1.pth', stage=1)
args = Namespace(crop_or_resize='whole', cuda=cudaFlag, max_size=1600,
resume=baseLoc + 'weights/deepmatting/stage1_sad_57.1.pth', stage=1)
model = net.VGG16(args)
if cudaFlag:
ckpt = torch.load(args.resume)
else:
ckpt = torch.load(args.resume,map_location=torch.device("cpu"))
ckpt = torch.load(args.resume, map_location=torch.device("cpu"))
model.load_state_dict(ckpt['state_dict'], strict=True)
if cudaFlag:
model = model.cuda()
@ -70,23 +72,23 @@ def getnewalpha(image,mask,cFlag):
pred_mattes[trimap == 0] = 0
# pred_mattes = np.repeat(pred_mattes[:, :, np.newaxis], 3, axis=2)
image = cv2.cvtColor(image,cv2.COLOR_BGR2RGB)
pred_mattes = np.dstack((image,pred_mattes))
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
pred_mattes = np.dstack((image, pred_mattes))
return pred_mattes
def deepmatting(imggimp, curlayer,layeri,layerm,cFlag) :
if torch.cuda.is_available() and not cFlag:
gimp.progress_init("(Using GPU) Running deep-matting for " + layeri.name + "...")
else:
gimp.progress_init("(Using CPU) Running deep-matting for " + layeri.name + "...")
def deepmatting(imggimp, curlayer, layeri, layerm, cFlag):
img = channelData(layeri)
mask = channelData(layerm)
cpy=getnewalpha(img,mask,cFlag)
createResultLayer(imggimp,'new_output',cpy)
if img.shape[0] != imggimp.height or img.shape[1] != imggimp.width or mask.shape[0] != imggimp.height or mask.shape[1] != imggimp.width:
pdb.gimp_message(" Do (Layer -> Layer to Image Size) for both layers and try again.")
else:
if torch.cuda.is_available() and not cFlag:
gimp.progress_init("(Using GPU) Running deep-matting for " + layeri.name + "...")
else:
gimp.progress_init("(Using CPU) Running deep-matting for " + layeri.name + "...")
cpy = getnewalpha(img, mask, cFlag)
createResultLayer(imggimp, 'new_output', cpy)
register(
@ -97,13 +99,13 @@ register(
"Your",
"2020",
"deepmatting...",
"*", # Alternately use RGB, RGB*, GRAY*, INDEXED etc.
[ (PF_IMAGE, "image", "Input image", None),
(PF_DRAWABLE, "drawable", "Input drawable", None),
(PF_LAYER, "drawinglayer", "Original Image:", None),
(PF_LAYER, "drawinglayer", "Trimap Mask:", None),
(PF_BOOL, "fcpu", "Force CPU", False)
],
"*", # Alternately use RGB, RGB*, GRAY*, INDEXED etc.
[(PF_IMAGE, "image", "Input image", None),
(PF_DRAWABLE, "drawable", "Input drawable", None),
(PF_LAYER, "drawinglayer", "Original Image:", None),
(PF_LAYER, "drawinglayer", "Trimap Mask:", None),
(PF_BOOL, "fcpu", "Force CPU", False)
],
[],
deepmatting, menu="<Image>/Layer/GIML-ML")

View File

@ -77,15 +77,19 @@ def createResultLayer(image, name, result):
def Enlighten(img, layer,cFlag):
if torch.cuda.is_available() and not cFlag:
gimp.progress_init("(Using GPU) Enlighten " + layer.name + "...")
else:
gimp.progress_init("(Using CPU) Enlighten " + layer.name + "...")
imgmat = channelData(layer)
if imgmat.shape[2] == 4: # get rid of alpha channel
imgmat = imgmat[:,:,0:3]
cpy = getEnlighten(imgmat,cFlag)
createResultLayer(img, 'new_output', cpy)
if imgmat.shape[0] != img.height or imgmat.shape[1] != img.width:
pdb.gimp_message(" Do (Layer -> Layer to Image Size) first and try again.")
else:
if torch.cuda.is_available() and not cFlag:
gimp.progress_init("(Using GPU) Enlighten " + layer.name + "...")
else:
gimp.progress_init("(Using CPU) Enlighten " + layer.name + "...")
if imgmat.shape[2] == 4: # get rid of alpha channel
imgmat = imgmat[:,:,0:3]
cpy = getEnlighten(imgmat,cFlag)
createResultLayer(img, 'new_output', cpy)
register(

View File

@ -145,17 +145,24 @@ def getnewface(img,mask,mask_m,cFlag):
def facegen(imggimp, curlayer,layeri,layerm,layermm,cFlag):
if torch.cuda.is_available() and not cFlag:
gimp.progress_init("(Using GPU) Running face gen for " + layeri.name + "...")
else:
gimp.progress_init("(Using CPU) Running face gen for " + layeri.name + "...")
img = channelData(layeri)
mask = channelData(layerm)
mask_m = channelData(layermm)
cpy=getnewface(img,mask,mask_m,cFlag)
createResultLayer(imggimp,'new_output',cpy)
if img.shape[0] != imggimp.height or img.shape[1] != imggimp.width or mask.shape[0] != imggimp.height or mask.shape[1] != imggimp.width or mask_m.shape[0] != imggimp.height or mask_m.shape[1] != imggimp.width:
pdb.gimp_message("Do (Layer -> Layer to Image Size) for all layers and try again.")
else:
if torch.cuda.is_available() and not cFlag:
gimp.progress_init("(Using GPU) Running face gen for " + layeri.name + "...")
else:
gimp.progress_init("(Using CPU) Running face gen for " + layeri.name + "...")
if img.shape[2] == 4: # get rid of alpha channel
img = img[:, :, 0:3]
if mask.shape[2] == 4: # get rid of alpha channel
mask = mask[:, :, 0:3]
if mask_m.shape[2] == 4: # get rid of alpha channel
mask_m = mask_m[:, :, 0:3]
cpy = getnewface(img,mask,mask_m,cFlag)
createResultLayer(imggimp,'new_output',cpy)

View File

@ -1,55 +1,59 @@
import os
baseLoc = os.path.dirname(os.path.realpath(__file__))+'/'
baseLoc = os.path.dirname(os.path.realpath(__file__)) + '/'
from gimpfu import *
import sys
sys.path.extend([baseLoc+'gimpenv/lib/python2.7',baseLoc+'gimpenv/lib/python2.7/site-packages',baseLoc+'gimpenv/lib/python2.7/site-packages/setuptools',baseLoc+'face-parsing-PyTorch'])
sys.path.extend([baseLoc + 'gimpenv/lib/python2.7', baseLoc + 'gimpenv/lib/python2.7/site-packages',
baseLoc + 'gimpenv/lib/python2.7/site-packages/setuptools', baseLoc + 'face-parsing-PyTorch'])
from model import BiSeNet
from PIL import Image
import torch
from torchvision import transforms, datasets
import numpy as np
import cv2
colors = np.array([[0,0,0],
[204,0,0],
[0,255,255],
[51,255,255],
[51,51,255],
[204,0,204],
[204,204,0],
[102,51,0],
[255,0,0],
[0,204,204],
[76,153,0],
[102,204,0],
[255,255,0],
[0,0,153],
[255,153,51],
[0,51,0],
[0,204,0],
[0,0,204],
[255,51,153]])
colors = np.array([[0, 0, 0],
[204, 0, 0],
[0, 255, 255],
[51, 255, 255],
[51, 51, 255],
[204, 0, 204],
[204, 204, 0],
[102, 51, 0],
[255, 0, 0],
[0, 204, 204],
[76, 153, 0],
[102, 204, 0],
[255, 255, 0],
[0, 0, 153],
[255, 153, 51],
[0, 51, 0],
[0, 204, 0],
[0, 0, 204],
[255, 51, 153]])
colors = colors.astype(np.uint8)
def getlabelmat(mask,idx):
x=np.zeros((mask.shape[0],mask.shape[1],3))
x[mask==idx,0]=colors[idx][0]
x[mask==idx,1]=colors[idx][1]
x[mask==idx,2]=colors[idx][2]
def getlabelmat(mask, idx):
x = np.zeros((mask.shape[0], mask.shape[1], 3))
x[mask == idx, 0] = colors[idx][0]
x[mask == idx, 1] = colors[idx][1]
x[mask == idx, 2] = colors[idx][2]
return x
def colorMask(mask):
x=np.zeros((mask.shape[0],mask.shape[1],3))
x = np.zeros((mask.shape[0], mask.shape[1], 3))
for idx in range(19):
x=x+getlabelmat(mask,idx)
x = x + getlabelmat(mask, idx)
return np.uint8(x)
def getface(input_image,cFlag):
save_pth = baseLoc+'weights/faceparse/79999_iter.pth'
def getface(input_image, cFlag):
save_pth = baseLoc + 'weights/faceparse/79999_iter.pth'
input_image = Image.fromarray(input_image)
n_classes = 19
@ -60,16 +64,13 @@ def getface(input_image,cFlag):
else:
net.load_state_dict(torch.load(save_pth, map_location=lambda storage, loc: storage))
net.eval()
to_tensor = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
])
with torch.no_grad():
img = input_image.resize((512, 512), Image.BILINEAR)
img = to_tensor(img)
@ -88,8 +89,9 @@ def getface(input_image,cFlag):
return parsing
def getSeg(input_image):
model = torch.load(baseLoc+'deeplabv3+model.pt')
model = torch.load(baseLoc + 'deeplabv3+model.pt')
model.eval()
preprocess = transforms.Compose([
transforms.ToTensor(),
@ -99,7 +101,7 @@ def getSeg(input_image):
input_image = Image.fromarray(input_image)
input_tensor = preprocess(input_image)
input_batch = input_tensor.unsqueeze(0) # create a mini-batch as expected by the model
input_batch = input_tensor.unsqueeze(0) # create a mini-batch as expected by the model
# move the input and model to GPU for speed if available
if torch.cuda.is_available():
@ -110,7 +112,6 @@ def getSeg(input_image):
output = model(input_batch)['out'][0]
output_predictions = output.argmax(0)
# create a color pallette, selecting a color for each class
palette = torch.tensor([2 ** 25 - 1, 2 ** 15 - 1, 2 ** 21 - 1])
colors = torch.as_tensor([i for i in range(21)])[:, None] * palette
@ -119,38 +120,41 @@ def getSeg(input_image):
r = Image.fromarray(output_predictions.byte().cpu().numpy()).resize(input_image.size)
tmp = np.array(r)
tmp2 = 10*np.repeat(tmp[:, :, np.newaxis], 3, axis=2)
tmp2 = 10 * np.repeat(tmp[:, :, np.newaxis], 3, axis=2)
return tmp2
return tmp2
def channelData(layer):#convert gimp image to numpy
region=layer.get_pixel_rgn(0, 0, layer.width,layer.height)
pixChars=region[:,:] # Take whole layer
bpp=region.bpp
return np.frombuffer(pixChars,dtype=np.uint8).reshape(layer.height,layer.width,bpp)
def createResultLayer(image,name,result):
rlBytes=np.uint8(result).tobytes();
rl=gimp.Layer(image,name,image.width,image.height,0,100,NORMAL_MODE)
region=rl.get_pixel_rgn(0, 0, rl.width,rl.height,True)
region[:,:]=rlBytes
image.add_layer(rl,0)
def channelData(layer): # convert gimp image to numpy
region = layer.get_pixel_rgn(0, 0, layer.width, layer.height)
pixChars = region[:, :] # Take whole layer
bpp = region.bpp
return np.frombuffer(pixChars, dtype=np.uint8).reshape(layer.height, layer.width, bpp)
def createResultLayer(image, name, result):
rlBytes = np.uint8(result).tobytes()
rl = gimp.Layer(image, name, image.width, image.height, 0, 100, NORMAL_MODE)
region = rl.get_pixel_rgn(0, 0, rl.width, rl.height, True)
region[:, :] = rlBytes
image.add_layer(rl, 0)
gimp.displays_flush()
def faceparse(img, layer,cFlag) :
if torch.cuda.is_available() and not cFlag:
gimp.progress_init("(Using GPU) Running face parse for " + layer.name + "...")
else:
gimp.progress_init("(Using CPU) Running face parse for " + layer.name + "...")
def faceparse(img, layer, cFlag):
imgmat = channelData(layer)
if imgmat.shape[2] == 4: # get rid of alpha channel
imgmat = imgmat[:,:,0:3]
cpy=getface(imgmat,cFlag)
cpy = colorMask(cpy)
createResultLayer(img,'new_output',cpy)
imgmat = imgmat[:, :, 0:3]
if imgmat.shape[0] != img.height or imgmat.shape[1] != img.width:
pdb.gimp_message(" Do (Layer -> Layer to Image Size) first and try again.")
else:
if torch.cuda.is_available() and not cFlag:
gimp.progress_init("(Using GPU) Running face parse for " + layer.name + "...")
else:
gimp.progress_init("(Using CPU) Running face parse for " + layer.name + "...")
cpy = getface(imgmat, cFlag)
cpy = colorMask(cpy)
createResultLayer(img, 'new_output', cpy)
register(
@ -161,11 +165,11 @@ register(
"Your",
"2020",
"faceparse...",
"*", # Alternately use RGB, RGB*, GRAY*, INDEXED etc.
[ (PF_IMAGE, "image", "Input image", None),
(PF_DRAWABLE, "drawable", "Input drawable", None),
(PF_BOOL, "fcpu", "Force CPU", False)
],
"*", # Alternately use RGB, RGB*, GRAY*, INDEXED etc.
[(PF_IMAGE, "image", "Input image", None),
(PF_DRAWABLE, "drawable", "Input drawable", None),
(PF_BOOL, "fcpu", "Force CPU", False)
],
[],
faceparse, menu="<Image>/Layer/GIML-ML")

0
gimp-plugins/ideepcolor/data/colorize_image.py Executable file → Normal file
View File

0
gimp-plugins/ideepcolor/models/pytorch/model.py Executable file → Normal file
View File

View File

@ -87,21 +87,25 @@ def getinter(img_s, img_e, c_flag, string_path):
def interpolateframes(imggimp, curlayer, string_path, layer_s, layer_e, c_flag):
if torch.cuda.is_available() and not c_flag:
gimp.progress_init("(Using GPU) Running slomo and saving frames in "+string_path)
# device = torch.device("cuda")
else:
gimp.progress_init("(Using CPU) Running slomo and saving frames in "+string_path)
# device = torch.device("cpu")
layer_1 = channelData(layer_s)
layer_2 = channelData(layer_e)
if layer_1.shape[2] == 4: # get rid of alpha channel
layer_1 = layer_1[:, :, 0:3]
if layer_2.shape[2] == 4: # get rid of alpha channel
layer_2 = layer_2[:, :, 0:3]
getinter(layer_1, layer_2, c_flag, string_path)
# pdb.gimp_message("Saved")
if layer_1.shape[0] != imggimp.height or layer_1.shape[1] != imggimp.width or layer_2.shape[0] != imggimp.height or layer_2.shape[1] != imggimp.width:
pdb.gimp_message(" Do (Layer -> Layer to Image Size) for both layers and try again.")
else:
if torch.cuda.is_available() and not c_flag:
gimp.progress_init("(Using GPU) Running slomo and saving frames in "+string_path)
# device = torch.device("cuda")
else:
gimp.progress_init("(Using CPU) Running slomo and saving frames in "+string_path)
# device = torch.device("cpu")
if layer_1.shape[2] == 4: # get rid of alpha channel
layer_1 = layer_1[:, :, 0:3]
if layer_2.shape[2] == 4: # get rid of alpha channel
layer_2 = layer_2[:, :, 0:3]
getinter(layer_1, layer_2, c_flag, string_path)
# pdb.gimp_message("Saved")
register(

View File

@ -29,29 +29,32 @@ def createResultLayer(image,name,result):
def kmeans(imggimp, curlayer,layeri,n_clusters,locflag) :
image = channelData(layeri)
if image.shape[2] == 4: # get rid of alpha channel
image = image[:,:,0:3]
h,w,d = image.shape
# reshape the image to a 2D array of pixels and 3 color values (RGB)
pixel_values = image.reshape((-1, 3))
if locflag:
xx,yy = np.meshgrid(range(w),range(h))
x = xx.reshape(-1,1)
y = yy.reshape(-1,1)
pixel_values = np.concatenate((pixel_values,x,y),axis=1)
pixel_values = np.float32(pixel_values)
c,out = kmeans2(pixel_values,n_clusters)
if locflag:
c = np.uint8(c[:,0:3])
if image.shape[0] != imggimp.height or image.shape[1] != imggimp.width:
pdb.gimp_message(" Do (Layer -> Layer to Image Size) first and try again.")
else:
c = np.uint8(c)
if image.shape[2] == 4: # get rid of alpha channel
image = image[:,:,0:3]
h,w,d = image.shape
# reshape the image to a 2D array of pixels and 3 color values (RGB)
pixel_values = image.reshape((-1, 3))
segmented_image = c[out.flatten()]
segmented_image = segmented_image.reshape((h,w,d))
createResultLayer(imggimp,'new_output',segmented_image)
if locflag:
xx,yy = np.meshgrid(range(w),range(h))
x = xx.reshape(-1,1)
y = yy.reshape(-1,1)
pixel_values = np.concatenate((pixel_values,x,y),axis=1)
pixel_values = np.float32(pixel_values)
c,out = kmeans2(pixel_values,n_clusters)
if locflag:
c = np.uint8(c[:,0:3])
else:
c = np.uint8(c)
segmented_image = c[out.flatten()]
segmented_image = segmented_image.reshape((h,w,d))
createResultLayer(imggimp,'new_output',segmented_image)
register(

View File

@ -43,15 +43,19 @@ def createResultLayer(image, name, result):
def MonoDepth(img, layer,cFlag):
if torch.cuda.is_available() and not cFlag:
gimp.progress_init("(Using GPU) Generating disparity map for " + layer.name + "...")
else:
gimp.progress_init("(Using CPU) Generating disparity map for " + layer.name + "...")
imgmat = channelData(layer)
if imgmat.shape[2] == 4: # get rid of alpha channel
imgmat = imgmat[:,:,0:3]
cpy = getMonoDepth(imgmat,cFlag)
createResultLayer(img, 'new_output', cpy)
if imgmat.shape[0] != img.height or imgmat.shape[1] != img.width:
pdb.gimp_message(" Do (Layer -> Layer to Image Size) first and try again.")
else:
if torch.cuda.is_available() and not cFlag:
gimp.progress_init("(Using GPU) Generating disparity map for " + layer.name + "...")
else:
gimp.progress_init("(Using CPU) Generating disparity map for " + layer.name + "...")
if imgmat.shape[2] == 4: # get rid of alpha channel
imgmat = imgmat[:,:,0:3]
cpy = getMonoDepth(imgmat,cFlag)
createResultLayer(img, 'new_output', cpy)
register(

View File

@ -62,16 +62,19 @@ def createResultLayer(image,name,result):
gimp.displays_flush()
def deeplabv3(img, layer,cFlag) :
if torch.cuda.is_available() and not cFlag:
gimp.progress_init("(Using GPU) Generating semantic segmentation map for " + layer.name + "...")
else:
gimp.progress_init("(Using CPU) Generating semantic segmentation map for " + layer.name + "...")
imgmat = channelData(layer)
if imgmat.shape[2] == 4: # get rid of alpha channel
imgmat = imgmat[:,:,0:3]
cpy=getSeg(imgmat,cFlag)
createResultLayer(img,'new_output',cpy)
if imgmat.shape[0] != img.height or imgmat.shape[1] != img.width:
pdb.gimp_message(" Do (Layer -> Layer to Image Size) first and try again.")
else:
if torch.cuda.is_available() and not cFlag:
gimp.progress_init("(Using GPU) Generating semantic segmentation map for " + layer.name + "...")
else:
gimp.progress_init("(Using CPU) Generating semantic segmentation map for " + layer.name + "...")
if imgmat.shape[2] == 4: # get rid of alpha channel
imgmat = imgmat[:,:,0:3]
cpy=getSeg(imgmat,cFlag)
createResultLayer(img,'new_output',cpy)
register(

View File

@ -117,20 +117,23 @@ def createResultLayer(image, name, result):
gimp.displays_flush()
def super_resolution(img, layer, scale, cFlag, fFlag):
if torch.cuda.is_available() and not cFlag:
gimp.progress_init("(Using GPU) Running super-resolution for " + layer.name + "...")
else:
gimp.progress_init("(Using CPU) Running super-resolution for " + layer.name + "...")
imgmat = channelData(layer)
if imgmat.shape[2] == 4: # get rid of alpha channel
imgmat = imgmat[:, :, 0:3]
cpy = getnewimg(imgmat, scale, cFlag, fFlag)
cpy = cv2.resize(cpy, (0, 0), fx=scale / 4, fy=scale / 4)
if scale==1:
createResultLayer(img, layer.name + '_super', cpy)
if imgmat.shape[0] != img.height or imgmat.shape[1] != img.width:
pdb.gimp_message(" Do (Layer -> Layer to Image Size) first and try again.")
else:
createResultFile(layer.name + '_super', cpy)
if torch.cuda.is_available() and not cFlag:
gimp.progress_init("(Using GPU) Running super-resolution for " + layer.name + "...")
else:
gimp.progress_init("(Using CPU) Running super-resolution for " + layer.name + "...")
if imgmat.shape[2] == 4: # get rid of alpha channel
imgmat = imgmat[:, :, 0:3]
cpy = getnewimg(imgmat, scale, cFlag, fFlag)
cpy = cv2.resize(cpy, (0, 0), fx=scale / 4, fy=scale / 4)
if scale==1:
createResultLayer(img, layer.name + '_super', cpy)
else:
createResultFile(layer.name + '_super', cpy)
register(