diff --git a/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/models/networks.py b/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/models/networks.py old mode 100755 new mode 100644 diff --git a/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/models/pix2pixHD_model.py b/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/models/pix2pixHD_model.py old mode 100755 new mode 100644 diff --git a/gimp-plugins/DeblurGANv2/predictorClass.py b/gimp-plugins/DeblurGANv2/predictorClass.py old mode 100755 new mode 100644 diff --git a/gimp-plugins/EnlightenGAN/.idea/.gitignore b/gimp-plugins/EnlightenGAN/.idea/.gitignore old mode 100755 new mode 100644 diff --git a/gimp-plugins/EnlightenGAN/.idea/EnlightenGAN.iml b/gimp-plugins/EnlightenGAN/.idea/EnlightenGAN.iml old mode 100755 new mode 100644 diff --git a/gimp-plugins/EnlightenGAN/.idea/inspectionProfiles/profiles_settings.xml b/gimp-plugins/EnlightenGAN/.idea/inspectionProfiles/profiles_settings.xml old mode 100755 new mode 100644 diff --git a/gimp-plugins/EnlightenGAN/.idea/misc.xml b/gimp-plugins/EnlightenGAN/.idea/misc.xml old mode 100755 new mode 100644 diff --git a/gimp-plugins/EnlightenGAN/.idea/modules.xml b/gimp-plugins/EnlightenGAN/.idea/modules.xml old mode 100755 new mode 100644 diff --git a/gimp-plugins/EnlightenGAN/.idea/vcs.xml b/gimp-plugins/EnlightenGAN/.idea/vcs.xml old mode 100755 new mode 100644 diff --git a/gimp-plugins/EnlightenGAN/License b/gimp-plugins/EnlightenGAN/License old mode 100755 new mode 100644 diff --git a/gimp-plugins/EnlightenGAN/data/__init__.py b/gimp-plugins/EnlightenGAN/data/__init__.py old mode 100755 new mode 100644 diff --git a/gimp-plugins/EnlightenGAN/data/base_dataset.py b/gimp-plugins/EnlightenGAN/data/base_dataset.py old mode 100755 new mode 100644 diff --git a/gimp-plugins/EnlightenGAN/lib/__init__.py b/gimp-plugins/EnlightenGAN/lib/__init__.py old mode 100755 new mode 100644 diff --git a/gimp-plugins/EnlightenGAN/lib/nn/__init__.py b/gimp-plugins/EnlightenGAN/lib/nn/__init__.py old mode 100755 new mode 100644 diff --git a/gimp-plugins/EnlightenGAN/lib/nn/modules/__init__.py b/gimp-plugins/EnlightenGAN/lib/nn/modules/__init__.py old mode 100755 new mode 100644 diff --git a/gimp-plugins/EnlightenGAN/lib/nn/modules/batchnorm.py b/gimp-plugins/EnlightenGAN/lib/nn/modules/batchnorm.py old mode 100755 new mode 100644 diff --git a/gimp-plugins/EnlightenGAN/lib/nn/modules/comm.py b/gimp-plugins/EnlightenGAN/lib/nn/modules/comm.py old mode 100755 new mode 100644 diff --git a/gimp-plugins/EnlightenGAN/lib/nn/modules/replicate.py b/gimp-plugins/EnlightenGAN/lib/nn/modules/replicate.py old mode 100755 new mode 100644 diff --git a/gimp-plugins/EnlightenGAN/lib/nn/modules/tests/test_numeric_batchnorm.py b/gimp-plugins/EnlightenGAN/lib/nn/modules/tests/test_numeric_batchnorm.py old mode 100755 new mode 100644 diff --git a/gimp-plugins/EnlightenGAN/lib/nn/modules/tests/test_sync_batchnorm.py b/gimp-plugins/EnlightenGAN/lib/nn/modules/tests/test_sync_batchnorm.py old mode 100755 new mode 100644 diff --git a/gimp-plugins/EnlightenGAN/lib/nn/modules/unittest.py b/gimp-plugins/EnlightenGAN/lib/nn/modules/unittest.py old mode 100755 new mode 100644 diff --git a/gimp-plugins/EnlightenGAN/lib/nn/parallel/__init__.py b/gimp-plugins/EnlightenGAN/lib/nn/parallel/__init__.py old mode 100755 new mode 100644 diff --git a/gimp-plugins/EnlightenGAN/lib/nn/parallel/data_parallel.py b/gimp-plugins/EnlightenGAN/lib/nn/parallel/data_parallel.py old mode 100755 new mode 100644 diff --git a/gimp-plugins/EnlightenGAN/models/__init__.py b/gimp-plugins/EnlightenGAN/models/__init__.py old mode 100755 new mode 100644 diff --git a/gimp-plugins/EnlightenGAN/models/base_model.py b/gimp-plugins/EnlightenGAN/models/base_model.py old mode 100755 new mode 100644 diff --git a/gimp-plugins/EnlightenGAN/models/models.py b/gimp-plugins/EnlightenGAN/models/models.py old mode 100755 new mode 100644 diff --git a/gimp-plugins/EnlightenGAN/models/networks.py b/gimp-plugins/EnlightenGAN/models/networks.py old mode 100755 new mode 100644 diff --git a/gimp-plugins/EnlightenGAN/models/single_model.py b/gimp-plugins/EnlightenGAN/models/single_model.py old mode 100755 new mode 100644 diff --git a/gimp-plugins/EnlightenGAN/util/__init__.py b/gimp-plugins/EnlightenGAN/util/__init__.py old mode 100755 new mode 100644 diff --git a/gimp-plugins/EnlightenGAN/util/image_pool.py b/gimp-plugins/EnlightenGAN/util/image_pool.py old mode 100755 new mode 100644 diff --git a/gimp-plugins/EnlightenGAN/util/util.py b/gimp-plugins/EnlightenGAN/util/util.py old mode 100755 new mode 100644 diff --git a/gimp-plugins/MiDaS/run.py b/gimp-plugins/MiDaS/run.py old mode 100755 new mode 100644 diff --git a/gimp-plugins/RIFE/LICENSE b/gimp-plugins/RIFE/LICENSE old mode 100755 new mode 100644 diff --git a/gimp-plugins/RIFE/model/IFNet.py b/gimp-plugins/RIFE/model/IFNet.py old mode 100755 new mode 100644 diff --git a/gimp-plugins/RIFE/model/IFNet2F.py b/gimp-plugins/RIFE/model/IFNet2F.py old mode 100755 new mode 100644 diff --git a/gimp-plugins/RIFE/model/RIFE.py b/gimp-plugins/RIFE/model/RIFE.py old mode 100755 new mode 100644 diff --git a/gimp-plugins/RIFE/model/RIFE2F.py b/gimp-plugins/RIFE/model/RIFE2F.py old mode 100755 new mode 100644 diff --git a/gimp-plugins/RIFE/model/__init__.py b/gimp-plugins/RIFE/model/__init__.py old mode 100755 new mode 100644 diff --git a/gimp-plugins/RIFE/model/loss.py b/gimp-plugins/RIFE/model/loss.py old mode 100755 new mode 100644 diff --git a/gimp-plugins/RIFE/model/warplayer.py b/gimp-plugins/RIFE/model/warplayer.py old mode 100755 new mode 100644 diff --git a/gimp-plugins/deblur.py b/gimp-plugins/deblur.py index e44fe60..b367764 100755 --- a/gimp-plugins/deblur.py +++ b/gimp-plugins/deblur.py @@ -1,47 +1,57 @@ -import os -baseLoc = os.path.dirname(os.path.realpath(__file__))+'/' +import os +baseLoc = os.path.dirname(os.path.realpath(__file__)) + '/' from gimpfu import * import sys -sys.path.extend([baseLoc+'gimpenv/lib/python2.7',baseLoc+'gimpenv/lib/python2.7/site-packages',baseLoc+'gimpenv/lib/python2.7/site-packages/setuptools',baseLoc+'DeblurGANv2']) + +sys.path.extend([baseLoc + 'gimpenv/lib/python2.7', baseLoc + 'gimpenv/lib/python2.7/site-packages', + baseLoc + 'gimpenv/lib/python2.7/site-packages/setuptools', baseLoc + 'DeblurGANv2']) import cv2 from predictorClass import Predictor import numpy as np -import torch +import torch + -def channelData(layer):#convert gimp image to numpy - region=layer.get_pixel_rgn(0, 0, layer.width,layer.height) - pixChars=region[:,:] # Take whole layer - bpp=region.bpp +def channelData(layer): # convert gimp image to numpy + region = layer.get_pixel_rgn(0, 0, layer.width, layer.height) + pixChars = region[:, :] # Take whole layer + bpp = region.bpp # return np.frombuffer(pixChars,dtype=np.uint8).reshape(len(pixChars)/bpp,bpp) - return np.frombuffer(pixChars,dtype=np.uint8).reshape(layer.height,layer.width,bpp) - -def createResultLayer(image,name,result): - rlBytes=np.uint8(result).tobytes(); - rl=gimp.Layer(image,name,image.width,image.height,0,100,NORMAL_MODE) - region=rl.get_pixel_rgn(0, 0, rl.width,rl.height,True) - region[:,:]=rlBytes - image.add_layer(rl,0) + return np.frombuffer(pixChars, dtype=np.uint8).reshape(layer.height, layer.width, bpp) + + +def createResultLayer(image, name, result): + rlBytes = np.uint8(result).tobytes(); + rl = gimp.Layer(image, name, image.width, image.height, 0, 100, NORMAL_MODE) + region = rl.get_pixel_rgn(0, 0, rl.width, rl.height, True) + region[:, :] = rlBytes + image.add_layer(rl, 0) gimp.displays_flush() -def getdeblur(img,flag): - predictor = Predictor(weights_path=baseLoc+'weights/deblur/'+'best_fpn.h5',cf=flag) + +def getdeblur(img, flag): + predictor = Predictor(weights_path=baseLoc + 'weights/deblur/' + 'best_fpn.h5', cf=flag) if img.shape[2] == 4: # get rid of alpha channel - img = img[:,:,0:3] - pred = predictor(img, None,cf=flag) + img = img[:, :, 0:3] + pred = predictor(img, None, cf=flag) return pred -def deblur(img, layer,flag): - if torch.cuda.is_available() and not flag: - gimp.progress_init("(Using GPU) Deblurring " + layer.name + "...") - else: - gimp.progress_init("(Using CPU) Deblurring " + layer.name + "...") +def deblur(img, layer, flag): imgmat = channelData(layer) - pred = getdeblur(imgmat,flag) - createResultLayer(img,'deblur_'+layer.name,pred) + if imgmat.shape[0] != img.height or imgmat.shape[1] != img.width: + pdb.gimp_message(" Do (Layer -> Layer to Image Size) first and try again.") + else: + if torch.cuda.is_available() and not flag: + gimp.progress_init("(Using GPU) Deblurring " + layer.name + "...") + else: + gimp.progress_init("(Using CPU) Deblurring " + layer.name + "...") + + imgmat = channelData(layer) + pred = getdeblur(imgmat, flag) + createResultLayer(img, 'deblur_' + layer.name, pred) register( @@ -52,13 +62,13 @@ register( "Your", "2020", "deblur...", - "*", # Alternately use RGB, RGB*, GRAY*, INDEXED etc. - [ (PF_IMAGE, "image", "Input image", None), - (PF_DRAWABLE, "drawable", "Input drawable", None), - # (PF_LAYER, "drawinglayer", "Original Image", None), - (PF_BOOL, "fcpu", "Force CPU", False) - - ], + "*", # Alternately use RGB, RGB*, GRAY*, INDEXED etc. + [(PF_IMAGE, "image", "Input image", None), + (PF_DRAWABLE, "drawable", "Input drawable", None), + # (PF_LAYER, "drawinglayer", "Original Image", None), + (PF_BOOL, "fcpu", "Force CPU", False) + + ], [], deblur, menu="/Layer/GIML-ML") diff --git a/gimp-plugins/deepdehaze.py b/gimp-plugins/deepdehaze.py index 3157100..b45b1e3 100755 --- a/gimp-plugins/deepdehaze.py +++ b/gimp-plugins/deepdehaze.py @@ -54,15 +54,18 @@ def createResultLayer(image, name, result): def deepdehazing(img, layer, cFlag): - if torch.cuda.is_available() and not cFlag: - gimp.progress_init("(Using GPU) Dehazing " + layer.name + "...") - else: - gimp.progress_init("(Using CPU) Dehazing " + layer.name + "...") imgmat = channelData(layer) - if imgmat.shape[2] == 4: # get rid of alpha channel - imgmat = imgmat[:,:,0:3] - cpy = clrImg(imgmat,cFlag) - createResultLayer(img, 'new_output', cpy) + if imgmat.shape[0] != img.height or imgmat.shape[1] != img.width: + pdb.gimp_message(" Do (Layer -> Layer to Image Size) first and try again.") + else: + if torch.cuda.is_available() and not cFlag: + gimp.progress_init("(Using GPU) Dehazing " + layer.name + "...") + else: + gimp.progress_init("(Using CPU) Dehazing " + layer.name + "...") + if imgmat.shape[2] == 4: # get rid of alpha channel + imgmat = imgmat[:,:,0:3] + cpy = clrImg(imgmat,cFlag) + createResultLayer(img, 'new_output', cpy) register( diff --git a/gimp-plugins/deepdenoise.py b/gimp-plugins/deepdenoise.py index 80893c3..000c55d 100755 --- a/gimp-plugins/deepdenoise.py +++ b/gimp-plugins/deepdenoise.py @@ -99,15 +99,18 @@ def createResultLayer(image, name, result): def deepdenoise(img, layer,cFlag): - if torch.cuda.is_available() and not cFlag: - gimp.progress_init("(Using GPU) Denoising " + layer.name + "...") - else: - gimp.progress_init("(Using CPU) Denoising " + layer.name + "...") imgmat = channelData(layer) - if imgmat.shape[2] == 4: # get rid of alpha channel - imgmat = imgmat[:,:,0:3] - cpy = clrImg(imgmat,cFlag) - createResultLayer(img, 'new_output', cpy) + if imgmat.shape[0] != img.height or imgmat.shape[1] != img.width: + pdb.gimp_message(" Do (Layer -> Layer to Image Size) first and try again.") + else: + if torch.cuda.is_available() and not cFlag: + gimp.progress_init("(Using GPU) Denoising " + layer.name + "...") + else: + gimp.progress_init("(Using CPU) Denoising " + layer.name + "...") + if imgmat.shape[2] == 4: # get rid of alpha channel + imgmat = imgmat[:,:,0:3] + cpy = clrImg(imgmat,cFlag) + createResultLayer(img, 'new_output', cpy) register( diff --git a/gimp-plugins/deepmatting.py b/gimp-plugins/deepmatting.py index 97b6264..c8ad676 100755 --- a/gimp-plugins/deepmatting.py +++ b/gimp-plugins/deepmatting.py @@ -1,12 +1,12 @@ -import os -baseLoc = os.path.dirname(os.path.realpath(__file__))+'/' +import os +baseLoc = os.path.dirname(os.path.realpath(__file__)) + '/' from gimpfu import * import sys -sys.path.extend([baseLoc+'gimpenv/lib/python2.7',baseLoc+'gimpenv/lib/python2.7/site-packages',baseLoc+'gimpenv/lib/python2.7/site-packages/setuptools',baseLoc+'pytorch-deep-image-matting']) - +sys.path.extend([baseLoc + 'gimpenv/lib/python2.7', baseLoc + 'gimpenv/lib/python2.7/site-packages', + baseLoc + 'gimpenv/lib/python2.7/site-packages/setuptools', baseLoc + 'pytorch-deep-image-matting']) import torch from argparse import Namespace @@ -17,43 +17,45 @@ import numpy as np from deploy import inference_img_whole - -def channelData(layer):#convert gimp image to numpy - region=layer.get_pixel_rgn(0, 0, layer.width,layer.height) - pixChars=region[:,:] # Take whole layer - bpp=region.bpp +def channelData(layer): # convert gimp image to numpy + region = layer.get_pixel_rgn(0, 0, layer.width, layer.height) + pixChars = region[:, :] # Take whole layer + bpp = region.bpp # return np.frombuffer(pixChars,dtype=np.uint8).reshape(len(pixChars)/bpp,bpp) - return np.frombuffer(pixChars,dtype=np.uint8).reshape(layer.height,layer.width,bpp) + return np.frombuffer(pixChars, dtype=np.uint8).reshape(layer.height, layer.width, bpp) -def createResultLayer(image,name,result): - rlBytes=np.uint8(result).tobytes(); - rl=gimp.Layer(image,name,image.width,image.height,1,100,NORMAL_MODE)#image.active_layer.type or RGB_IMAGE - region=rl.get_pixel_rgn(0, 0, rl.width,rl.height,True) - region[:,:]=rlBytes - image.add_layer(rl,0) +def createResultLayer(image, name, result): + rlBytes = np.uint8(result).tobytes(); + rl = gimp.Layer(image, name, image.width, image.height, 1, 100, + NORMAL_MODE) # image.active_layer.type or RGB_IMAGE + region = rl.get_pixel_rgn(0, 0, rl.width, rl.height, True) + region[:, :] = rlBytes + image.add_layer(rl, 0) gimp.displays_flush() -def getnewalpha(image,mask,cFlag): + +def getnewalpha(image, mask, cFlag): if image.shape[2] == 4: # get rid of alpha channel - image = image[:,:,0:3] + image = image[:, :, 0:3] if mask.shape[2] == 4: # get rid of alpha channel - mask = mask[:,:,0:3] + mask = mask[:, :, 0:3] - image = cv2.cvtColor(image,cv2.COLOR_RGB2BGR) + image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) trimap = mask[:, :, 0] cudaFlag = False if torch.cuda.is_available() and not cFlag: - cudaFlag = True + cudaFlag = True - args = Namespace(crop_or_resize='whole', cuda=cudaFlag, max_size=1600, resume=baseLoc+'weights/deepmatting/stage1_sad_57.1.pth', stage=1) + args = Namespace(crop_or_resize='whole', cuda=cudaFlag, max_size=1600, + resume=baseLoc + 'weights/deepmatting/stage1_sad_57.1.pth', stage=1) model = net.VGG16(args) if cudaFlag: ckpt = torch.load(args.resume) else: - ckpt = torch.load(args.resume,map_location=torch.device("cpu")) + ckpt = torch.load(args.resume, map_location=torch.device("cpu")) model.load_state_dict(ckpt['state_dict'], strict=True) if cudaFlag: model = model.cuda() @@ -69,25 +71,25 @@ def getnewalpha(image,mask,cFlag): pred_mattes[trimap == 255] = 255 pred_mattes[trimap == 0] = 0 # pred_mattes = np.repeat(pred_mattes[:, :, np.newaxis], 3, axis=2) - - image = cv2.cvtColor(image,cv2.COLOR_BGR2RGB) - pred_mattes = np.dstack((image,pred_mattes)) - return pred_mattes + image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) + pred_mattes = np.dstack((image, pred_mattes)) + return pred_mattes -def deepmatting(imggimp, curlayer,layeri,layerm,cFlag) : - if torch.cuda.is_available() and not cFlag: - gimp.progress_init("(Using GPU) Running deep-matting for " + layeri.name + "...") - else: - gimp.progress_init("(Using CPU) Running deep-matting for " + layeri.name + "...") +def deepmatting(imggimp, curlayer, layeri, layerm, cFlag): img = channelData(layeri) mask = channelData(layerm) + if img.shape[0] != imggimp.height or img.shape[1] != imggimp.width or mask.shape[0] != imggimp.height or mask.shape[1] != imggimp.width: + pdb.gimp_message(" Do (Layer -> Layer to Image Size) for both layers and try again.") + else: + if torch.cuda.is_available() and not cFlag: + gimp.progress_init("(Using GPU) Running deep-matting for " + layeri.name + "...") + else: + gimp.progress_init("(Using CPU) Running deep-matting for " + layeri.name + "...") + cpy = getnewalpha(img, mask, cFlag) + createResultLayer(imggimp, 'new_output', cpy) - cpy=getnewalpha(img,mask,cFlag) - createResultLayer(imggimp,'new_output',cpy) - - register( "deep-matting", @@ -97,13 +99,13 @@ register( "Your", "2020", "deepmatting...", - "*", # Alternately use RGB, RGB*, GRAY*, INDEXED etc. - [ (PF_IMAGE, "image", "Input image", None), - (PF_DRAWABLE, "drawable", "Input drawable", None), - (PF_LAYER, "drawinglayer", "Original Image:", None), - (PF_LAYER, "drawinglayer", "Trimap Mask:", None), - (PF_BOOL, "fcpu", "Force CPU", False) - ], + "*", # Alternately use RGB, RGB*, GRAY*, INDEXED etc. + [(PF_IMAGE, "image", "Input image", None), + (PF_DRAWABLE, "drawable", "Input drawable", None), + (PF_LAYER, "drawinglayer", "Original Image:", None), + (PF_LAYER, "drawinglayer", "Trimap Mask:", None), + (PF_BOOL, "fcpu", "Force CPU", False) + ], [], deepmatting, menu="/Layer/GIML-ML") diff --git a/gimp-plugins/enlighten.py b/gimp-plugins/enlighten.py index a13d918..89d17a6 100755 --- a/gimp-plugins/enlighten.py +++ b/gimp-plugins/enlighten.py @@ -77,15 +77,19 @@ def createResultLayer(image, name, result): def Enlighten(img, layer,cFlag): - if torch.cuda.is_available() and not cFlag: - gimp.progress_init("(Using GPU) Enlighten " + layer.name + "...") - else: - gimp.progress_init("(Using CPU) Enlighten " + layer.name + "...") imgmat = channelData(layer) - if imgmat.shape[2] == 4: # get rid of alpha channel - imgmat = imgmat[:,:,0:3] - cpy = getEnlighten(imgmat,cFlag) - createResultLayer(img, 'new_output', cpy) + if imgmat.shape[0] != img.height or imgmat.shape[1] != img.width: + pdb.gimp_message(" Do (Layer -> Layer to Image Size) first and try again.") + else: + if torch.cuda.is_available() and not cFlag: + gimp.progress_init("(Using GPU) Enlighten " + layer.name + "...") + else: + gimp.progress_init("(Using CPU) Enlighten " + layer.name + "...") + + if imgmat.shape[2] == 4: # get rid of alpha channel + imgmat = imgmat[:,:,0:3] + cpy = getEnlighten(imgmat,cFlag) + createResultLayer(img, 'new_output', cpy) register( diff --git a/gimp-plugins/facegen.py b/gimp-plugins/facegen.py index bc46544..1051cf3 100755 --- a/gimp-plugins/facegen.py +++ b/gimp-plugins/facegen.py @@ -145,17 +145,24 @@ def getnewface(img,mask,mask_m,cFlag): def facegen(imggimp, curlayer,layeri,layerm,layermm,cFlag): - if torch.cuda.is_available() and not cFlag: - gimp.progress_init("(Using GPU) Running face gen for " + layeri.name + "...") - else: - gimp.progress_init("(Using CPU) Running face gen for " + layeri.name + "...") - img = channelData(layeri) mask = channelData(layerm) mask_m = channelData(layermm) - - cpy=getnewface(img,mask,mask_m,cFlag) - createResultLayer(imggimp,'new_output',cpy) + if img.shape[0] != imggimp.height or img.shape[1] != imggimp.width or mask.shape[0] != imggimp.height or mask.shape[1] != imggimp.width or mask_m.shape[0] != imggimp.height or mask_m.shape[1] != imggimp.width: + pdb.gimp_message("Do (Layer -> Layer to Image Size) for all layers and try again.") + else: + if torch.cuda.is_available() and not cFlag: + gimp.progress_init("(Using GPU) Running face gen for " + layeri.name + "...") + else: + gimp.progress_init("(Using CPU) Running face gen for " + layeri.name + "...") + if img.shape[2] == 4: # get rid of alpha channel + img = img[:, :, 0:3] + if mask.shape[2] == 4: # get rid of alpha channel + mask = mask[:, :, 0:3] + if mask_m.shape[2] == 4: # get rid of alpha channel + mask_m = mask_m[:, :, 0:3] + cpy = getnewface(img,mask,mask_m,cFlag) + createResultLayer(imggimp,'new_output',cpy) diff --git a/gimp-plugins/faceparse.py b/gimp-plugins/faceparse.py index 2751d0a..731b591 100755 --- a/gimp-plugins/faceparse.py +++ b/gimp-plugins/faceparse.py @@ -1,55 +1,59 @@ -import os -baseLoc = os.path.dirname(os.path.realpath(__file__))+'/' +import os +baseLoc = os.path.dirname(os.path.realpath(__file__)) + '/' from gimpfu import * import sys -sys.path.extend([baseLoc+'gimpenv/lib/python2.7',baseLoc+'gimpenv/lib/python2.7/site-packages',baseLoc+'gimpenv/lib/python2.7/site-packages/setuptools',baseLoc+'face-parsing-PyTorch']) - +sys.path.extend([baseLoc + 'gimpenv/lib/python2.7', baseLoc + 'gimpenv/lib/python2.7/site-packages', + baseLoc + 'gimpenv/lib/python2.7/site-packages/setuptools', baseLoc + 'face-parsing-PyTorch']) from model import BiSeNet from PIL import Image import torch from torchvision import transforms, datasets import numpy as np - -colors = np.array([[0,0,0], -[204,0,0], -[0,255,255], -[51,255,255], -[51,51,255], -[204,0,204], -[204,204,0], -[102,51,0], -[255,0,0], -[0,204,204], -[76,153,0], -[102,204,0], -[255,255,0], -[0,0,153], -[255,153,51], -[0,51,0], -[0,204,0], -[0,0,204], -[255,51,153]]) +import cv2 + +colors = np.array([[0, 0, 0], + [204, 0, 0], + [0, 255, 255], + [51, 255, 255], + [51, 51, 255], + [204, 0, 204], + [204, 204, 0], + [102, 51, 0], + [255, 0, 0], + [0, 204, 204], + [76, 153, 0], + [102, 204, 0], + [255, 255, 0], + [0, 0, 153], + [255, 153, 51], + [0, 51, 0], + [0, 204, 0], + [0, 0, 204], + [255, 51, 153]]) colors = colors.astype(np.uint8) -def getlabelmat(mask,idx): - x=np.zeros((mask.shape[0],mask.shape[1],3)) - x[mask==idx,0]=colors[idx][0] - x[mask==idx,1]=colors[idx][1] - x[mask==idx,2]=colors[idx][2] - return x + +def getlabelmat(mask, idx): + x = np.zeros((mask.shape[0], mask.shape[1], 3)) + x[mask == idx, 0] = colors[idx][0] + x[mask == idx, 1] = colors[idx][1] + x[mask == idx, 2] = colors[idx][2] + return x + def colorMask(mask): - x=np.zeros((mask.shape[0],mask.shape[1],3)) + x = np.zeros((mask.shape[0], mask.shape[1], 3)) for idx in range(19): - x=x+getlabelmat(mask,idx) + x = x + getlabelmat(mask, idx) return np.uint8(x) -def getface(input_image,cFlag): - save_pth = baseLoc+'weights/faceparse/79999_iter.pth' + +def getface(input_image, cFlag): + save_pth = baseLoc + 'weights/faceparse/79999_iter.pth' input_image = Image.fromarray(input_image) n_classes = 19 @@ -60,16 +64,13 @@ def getface(input_image,cFlag): else: net.load_state_dict(torch.load(save_pth, map_location=lambda storage, loc: storage)) - net.eval() - to_tensor = transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)), ]) - with torch.no_grad(): img = input_image.resize((512, 512), Image.BILINEAR) img = to_tensor(img) @@ -81,15 +82,16 @@ def getface(input_image,cFlag): parsing = out.squeeze(0).cpu().numpy().argmax(0) else: parsing = out.squeeze(0).numpy().argmax(0) - + parsing = Image.fromarray(np.uint8(parsing)) - parsing = parsing.resize(input_image.size) + parsing = parsing.resize(input_image.size) parsing = np.array(parsing) return parsing + def getSeg(input_image): - model = torch.load(baseLoc+'deeplabv3+model.pt') + model = torch.load(baseLoc + 'deeplabv3+model.pt') model.eval() preprocess = transforms.Compose([ transforms.ToTensor(), @@ -99,7 +101,7 @@ def getSeg(input_image): input_image = Image.fromarray(input_image) input_tensor = preprocess(input_image) - input_batch = input_tensor.unsqueeze(0) # create a mini-batch as expected by the model + input_batch = input_tensor.unsqueeze(0) # create a mini-batch as expected by the model # move the input and model to GPU for speed if available if torch.cuda.is_available(): @@ -110,7 +112,6 @@ def getSeg(input_image): output = model(input_batch)['out'][0] output_predictions = output.argmax(0) - # create a color pallette, selecting a color for each class palette = torch.tensor([2 ** 25 - 1, 2 ** 15 - 1, 2 ** 21 - 1]) colors = torch.as_tensor([i for i in range(21)])[:, None] * palette @@ -119,39 +120,42 @@ def getSeg(input_image): r = Image.fromarray(output_predictions.byte().cpu().numpy()).resize(input_image.size) tmp = np.array(r) - tmp2 = 10*np.repeat(tmp[:, :, np.newaxis], 3, axis=2) - - return tmp2 - -def channelData(layer):#convert gimp image to numpy - region=layer.get_pixel_rgn(0, 0, layer.width,layer.height) - pixChars=region[:,:] # Take whole layer - bpp=region.bpp - return np.frombuffer(pixChars,dtype=np.uint8).reshape(layer.height,layer.width,bpp) - -def createResultLayer(image,name,result): - rlBytes=np.uint8(result).tobytes(); - rl=gimp.Layer(image,name,image.width,image.height,0,100,NORMAL_MODE) - region=rl.get_pixel_rgn(0, 0, rl.width,rl.height,True) - region[:,:]=rlBytes - image.add_layer(rl,0) + tmp2 = 10 * np.repeat(tmp[:, :, np.newaxis], 3, axis=2) + + return tmp2 + + +def channelData(layer): # convert gimp image to numpy + region = layer.get_pixel_rgn(0, 0, layer.width, layer.height) + pixChars = region[:, :] # Take whole layer + bpp = region.bpp + return np.frombuffer(pixChars, dtype=np.uint8).reshape(layer.height, layer.width, bpp) + + +def createResultLayer(image, name, result): + rlBytes = np.uint8(result).tobytes() + rl = gimp.Layer(image, name, image.width, image.height, 0, 100, NORMAL_MODE) + region = rl.get_pixel_rgn(0, 0, rl.width, rl.height, True) + region[:, :] = rlBytes + image.add_layer(rl, 0) gimp.displays_flush() -def faceparse(img, layer,cFlag) : - if torch.cuda.is_available() and not cFlag: - gimp.progress_init("(Using GPU) Running face parse for " + layer.name + "...") - else: - gimp.progress_init("(Using CPU) Running face parse for " + layer.name + "...") +def faceparse(img, layer, cFlag): imgmat = channelData(layer) if imgmat.shape[2] == 4: # get rid of alpha channel - imgmat = imgmat[:,:,0:3] - cpy=getface(imgmat,cFlag) - cpy = colorMask(cpy) - createResultLayer(img,'new_output',cpy) - + imgmat = imgmat[:, :, 0:3] + if imgmat.shape[0] != img.height or imgmat.shape[1] != img.width: + pdb.gimp_message(" Do (Layer -> Layer to Image Size) first and try again.") + else: + if torch.cuda.is_available() and not cFlag: + gimp.progress_init("(Using GPU) Running face parse for " + layer.name + "...") + else: + gimp.progress_init("(Using CPU) Running face parse for " + layer.name + "...") + cpy = getface(imgmat, cFlag) + cpy = colorMask(cpy) + createResultLayer(img, 'new_output', cpy) - register( "faceparse", @@ -161,11 +165,11 @@ register( "Your", "2020", "faceparse...", - "*", # Alternately use RGB, RGB*, GRAY*, INDEXED etc. - [ (PF_IMAGE, "image", "Input image", None), - (PF_DRAWABLE, "drawable", "Input drawable", None), - (PF_BOOL, "fcpu", "Force CPU", False) - ], + "*", # Alternately use RGB, RGB*, GRAY*, INDEXED etc. + [(PF_IMAGE, "image", "Input image", None), + (PF_DRAWABLE, "drawable", "Input drawable", None), + (PF_BOOL, "fcpu", "Force CPU", False) + ], [], faceparse, menu="/Layer/GIML-ML") diff --git a/gimp-plugins/ideepcolor/data/colorize_image.py b/gimp-plugins/ideepcolor/data/colorize_image.py old mode 100755 new mode 100644 diff --git a/gimp-plugins/ideepcolor/models/pytorch/model.py b/gimp-plugins/ideepcolor/models/pytorch/model.py old mode 100755 new mode 100644 diff --git a/gimp-plugins/interpolateframes.py b/gimp-plugins/interpolateframes.py index 96cc8f0..b608fe5 100755 --- a/gimp-plugins/interpolateframes.py +++ b/gimp-plugins/interpolateframes.py @@ -87,21 +87,25 @@ def getinter(img_s, img_e, c_flag, string_path): def interpolateframes(imggimp, curlayer, string_path, layer_s, layer_e, c_flag): - if torch.cuda.is_available() and not c_flag: - gimp.progress_init("(Using GPU) Running slomo and saving frames in "+string_path) - # device = torch.device("cuda") - else: - gimp.progress_init("(Using CPU) Running slomo and saving frames in "+string_path) - # device = torch.device("cpu") - layer_1 = channelData(layer_s) layer_2 = channelData(layer_e) - if layer_1.shape[2] == 4: # get rid of alpha channel - layer_1 = layer_1[:, :, 0:3] - if layer_2.shape[2] == 4: # get rid of alpha channel - layer_2 = layer_2[:, :, 0:3] - getinter(layer_1, layer_2, c_flag, string_path) - # pdb.gimp_message("Saved") + + if layer_1.shape[0] != imggimp.height or layer_1.shape[1] != imggimp.width or layer_2.shape[0] != imggimp.height or layer_2.shape[1] != imggimp.width: + pdb.gimp_message(" Do (Layer -> Layer to Image Size) for both layers and try again.") + else: + if torch.cuda.is_available() and not c_flag: + gimp.progress_init("(Using GPU) Running slomo and saving frames in "+string_path) + # device = torch.device("cuda") + else: + gimp.progress_init("(Using CPU) Running slomo and saving frames in "+string_path) + # device = torch.device("cpu") + + if layer_1.shape[2] == 4: # get rid of alpha channel + layer_1 = layer_1[:, :, 0:3] + if layer_2.shape[2] == 4: # get rid of alpha channel + layer_2 = layer_2[:, :, 0:3] + getinter(layer_1, layer_2, c_flag, string_path) + # pdb.gimp_message("Saved") register( diff --git a/gimp-plugins/kmeans.py b/gimp-plugins/kmeans.py index bf59692..28dfcc0 100755 --- a/gimp-plugins/kmeans.py +++ b/gimp-plugins/kmeans.py @@ -29,29 +29,32 @@ def createResultLayer(image,name,result): def kmeans(imggimp, curlayer,layeri,n_clusters,locflag) : image = channelData(layeri) - if image.shape[2] == 4: # get rid of alpha channel - image = image[:,:,0:3] - h,w,d = image.shape - # reshape the image to a 2D array of pixels and 3 color values (RGB) - pixel_values = image.reshape((-1, 3)) + if image.shape[0] != imggimp.height or image.shape[1] != imggimp.width: + pdb.gimp_message(" Do (Layer -> Layer to Image Size) first and try again.") + else: + if image.shape[2] == 4: # get rid of alpha channel + image = image[:,:,0:3] + h,w,d = image.shape + # reshape the image to a 2D array of pixels and 3 color values (RGB) + pixel_values = image.reshape((-1, 3)) - if locflag: - xx,yy = np.meshgrid(range(w),range(h)) - x = xx.reshape(-1,1) - y = yy.reshape(-1,1) - pixel_values = np.concatenate((pixel_values,x,y),axis=1) + if locflag: + xx,yy = np.meshgrid(range(w),range(h)) + x = xx.reshape(-1,1) + y = yy.reshape(-1,1) + pixel_values = np.concatenate((pixel_values,x,y),axis=1) - pixel_values = np.float32(pixel_values) - c,out = kmeans2(pixel_values,n_clusters) - - if locflag: - c = np.uint8(c[:,0:3]) - else: - c = np.uint8(c) - - segmented_image = c[out.flatten()] - segmented_image = segmented_image.reshape((h,w,d)) - createResultLayer(imggimp,'new_output',segmented_image) + pixel_values = np.float32(pixel_values) + c,out = kmeans2(pixel_values,n_clusters) + + if locflag: + c = np.uint8(c[:,0:3]) + else: + c = np.uint8(c) + + segmented_image = c[out.flatten()] + segmented_image = segmented_image.reshape((h,w,d)) + createResultLayer(imggimp,'new_output',segmented_image) register( diff --git a/gimp-plugins/monodepth.py b/gimp-plugins/monodepth.py index 575477c..231e9ca 100755 --- a/gimp-plugins/monodepth.py +++ b/gimp-plugins/monodepth.py @@ -43,15 +43,19 @@ def createResultLayer(image, name, result): def MonoDepth(img, layer,cFlag): - if torch.cuda.is_available() and not cFlag: - gimp.progress_init("(Using GPU) Generating disparity map for " + layer.name + "...") - else: - gimp.progress_init("(Using CPU) Generating disparity map for " + layer.name + "...") imgmat = channelData(layer) - if imgmat.shape[2] == 4: # get rid of alpha channel - imgmat = imgmat[:,:,0:3] - cpy = getMonoDepth(imgmat,cFlag) - createResultLayer(img, 'new_output', cpy) + if imgmat.shape[0] != img.height or imgmat.shape[1] != img.width: + pdb.gimp_message(" Do (Layer -> Layer to Image Size) first and try again.") + else: + if torch.cuda.is_available() and not cFlag: + gimp.progress_init("(Using GPU) Generating disparity map for " + layer.name + "...") + else: + gimp.progress_init("(Using CPU) Generating disparity map for " + layer.name + "...") + + if imgmat.shape[2] == 4: # get rid of alpha channel + imgmat = imgmat[:,:,0:3] + cpy = getMonoDepth(imgmat,cFlag) + createResultLayer(img, 'new_output', cpy) register( diff --git a/gimp-plugins/semseg.py b/gimp-plugins/semseg.py index 2abb23b..4d036d0 100755 --- a/gimp-plugins/semseg.py +++ b/gimp-plugins/semseg.py @@ -62,16 +62,19 @@ def createResultLayer(image,name,result): gimp.displays_flush() def deeplabv3(img, layer,cFlag) : - if torch.cuda.is_available() and not cFlag: - gimp.progress_init("(Using GPU) Generating semantic segmentation map for " + layer.name + "...") - else: - gimp.progress_init("(Using CPU) Generating semantic segmentation map for " + layer.name + "...") - imgmat = channelData(layer) - if imgmat.shape[2] == 4: # get rid of alpha channel - imgmat = imgmat[:,:,0:3] - cpy=getSeg(imgmat,cFlag) - createResultLayer(img,'new_output',cpy) + if imgmat.shape[0] != img.height or imgmat.shape[1] != img.width: + pdb.gimp_message(" Do (Layer -> Layer to Image Size) first and try again.") + else: + if torch.cuda.is_available() and not cFlag: + gimp.progress_init("(Using GPU) Generating semantic segmentation map for " + layer.name + "...") + else: + gimp.progress_init("(Using CPU) Generating semantic segmentation map for " + layer.name + "...") + + if imgmat.shape[2] == 4: # get rid of alpha channel + imgmat = imgmat[:,:,0:3] + cpy=getSeg(imgmat,cFlag) + createResultLayer(img,'new_output',cpy) register( diff --git a/gimp-plugins/super_resolution.py b/gimp-plugins/super_resolution.py index 2b49ef9..1d36027 100755 --- a/gimp-plugins/super_resolution.py +++ b/gimp-plugins/super_resolution.py @@ -117,20 +117,23 @@ def createResultLayer(image, name, result): gimp.displays_flush() def super_resolution(img, layer, scale, cFlag, fFlag): - if torch.cuda.is_available() and not cFlag: - gimp.progress_init("(Using GPU) Running super-resolution for " + layer.name + "...") - else: - gimp.progress_init("(Using CPU) Running super-resolution for " + layer.name + "...") - imgmat = channelData(layer) - if imgmat.shape[2] == 4: # get rid of alpha channel - imgmat = imgmat[:, :, 0:3] - cpy = getnewimg(imgmat, scale, cFlag, fFlag) - cpy = cv2.resize(cpy, (0, 0), fx=scale / 4, fy=scale / 4) - if scale==1: - createResultLayer(img, layer.name + '_super', cpy) + if imgmat.shape[0] != img.height or imgmat.shape[1] != img.width: + pdb.gimp_message(" Do (Layer -> Layer to Image Size) first and try again.") else: - createResultFile(layer.name + '_super', cpy) + if torch.cuda.is_available() and not cFlag: + gimp.progress_init("(Using GPU) Running super-resolution for " + layer.name + "...") + else: + gimp.progress_init("(Using CPU) Running super-resolution for " + layer.name + "...") + + if imgmat.shape[2] == 4: # get rid of alpha channel + imgmat = imgmat[:, :, 0:3] + cpy = getnewimg(imgmat, scale, cFlag, fFlag) + cpy = cv2.resize(cpy, (0, 0), fx=scale / 4, fy=scale / 4) + if scale==1: + createResultLayer(img, layer.name + '_super', cpy) + else: + createResultFile(layer.name + '_super', cpy) register(