From fdf3b77842d848de811ebdb841b94444c70fb6e7 Mon Sep 17 00:00:00 2001 From: kritiksoman Date: Fri, 17 Jul 2020 09:30:16 +0530 Subject: [PATCH] julyUpdate --- gimp-plugins/MiDaS/LICENSE | 50 ++ gimp-plugins/MiDaS/MiDaS_utils.py | 192 ++++++ gimp-plugins/MiDaS/MiDaS_utils.pyc | Bin 0 -> 5298 bytes gimp-plugins/MiDaS/__init__.py | 0 gimp-plugins/MiDaS/__init__.pyc | Bin 0 -> 164 bytes gimp-plugins/MiDaS/monodepth_net.py | 186 ++++++ gimp-plugins/MiDaS/monodepth_net.pyc | Bin 0 -> 6822 bytes gimp-plugins/MiDaS/run.py | 78 +++ gimp-plugins/MiDaS/run.pyc | Bin 0 -> 1594 bytes gimp-plugins/color_palette.png | Bin 0 -> 34519 bytes gimp-plugins/colorize.py | 116 ---- gimp-plugins/colorpalette.py | 60 ++ gimp-plugins/deepcolor.py | 85 +++ .../__pycache__/model.cpython-38.pyc | Bin 9174 -> 0 bytes .../__pycache__/resnet.cpython-38.pyc | Bin 3614 -> 0 bytes gimp-plugins/ideepcolor/LICENSE | 21 + gimp-plugins/ideepcolor/data/__init__.py | 0 gimp-plugins/ideepcolor/data/__init__.pyc | Bin 0 -> 159 bytes .../ideepcolor/data/color_bins/in_hull.npy | Bin 0 -> 609 bytes .../ideepcolor/data/color_bins/pts_grid.npy | Bin 0 -> 8544 bytes .../data/color_bins/pts_in_hull.npy | Bin 0 -> 5088 bytes .../ideepcolor/data/colorize_image.py | 565 ++++++++++++++++ .../ideepcolor/data/colorize_image.pyc | Bin 0 -> 21215 bytes gimp-plugins/ideepcolor/data/lab_gamut.py | 90 +++ gimp-plugins/ideepcolor/models/__init__.py | 0 gimp-plugins/ideepcolor/models/__init__.pyc | Bin 0 -> 161 bytes .../ideepcolor/models/pytorch/__init__.py | 0 .../ideepcolor/models/pytorch/__init__.pyc | Bin 0 -> 169 bytes .../ideepcolor/models/pytorch/model.py | 175 +++++ .../ideepcolor/models/pytorch/model.pyc | Bin 0 -> 5694 bytes gimp-plugins/monodepth.py | 129 +--- gimp-plugins/monodepth2/LICENSE.txt | 181 ----- gimp-plugins/monodepth2/evaluate_depth.py | 230 ------- gimp-plugins/monodepth2/evaluate_pose.py | 134 ---- gimp-plugins/monodepth2/export_gt_depth.py | 65 -- gimp-plugins/monodepth2/layers.py | 269 -------- gimp-plugins/monodepth2/layers.pyc | Bin 10823 -> 0 bytes gimp-plugins/monodepth2/networks/__init__.py | 4 - gimp-plugins/monodepth2/networks/__init__.pyc | Bin 397 -> 0 bytes .../monodepth2/networks/depth_decoder.py | 65 -- .../monodepth2/networks/depth_decoder.pyc | Bin 2358 -> 0 bytes gimp-plugins/monodepth2/networks/pose_cnn.py | 50 -- gimp-plugins/monodepth2/networks/pose_cnn.pyc | Bin 1873 -> 0 bytes .../monodepth2/networks/pose_decoder.py | 54 -- .../monodepth2/networks/pose_decoder.pyc | Bin 2111 -> 0 bytes .../monodepth2/networks/resnet_encoder.py | 98 --- .../monodepth2/networks/resnet_encoder.pyc | Bin 4657 -> 0 bytes gimp-plugins/monodepth2/options.py | 208 ------ gimp-plugins/monodepth2/test_simple.py | 160 ----- gimp-plugins/monodepth2/train.py | 18 - gimp-plugins/monodepth2/trainer.py | 630 ------------------ gimp-plugins/monodepth2/utils.py | 114 ---- gimp-plugins/monodepth2/utils.pyc | Bin 5685 -> 0 bytes gimp-plugins/moveWeights.sh | 5 +- gimp-plugins/neural-colorization/LICENSE.txt | 23 - .../__pycache__/model.cpython-38.pyc | Bin 4119 -> 0 bytes .../build_dataset_directory.py | 42 -- gimp-plugins/neural-colorization/colorize.py | 73 -- gimp-plugins/neural-colorization/model.py | 123 ---- gimp-plugins/neural-colorization/model.pyc | Bin 6371 -> 0 bytes .../neural-colorization/resize_all_imgs.py | 35 - gimp-plugins/neural-colorization/train.py | 186 ------ .../__pycache__/srresnet.cpython-38.pyc | Bin 3791 -> 0 bytes 63 files changed, 1541 insertions(+), 2973 deletions(-) create mode 100644 gimp-plugins/MiDaS/LICENSE create mode 100644 gimp-plugins/MiDaS/MiDaS_utils.py create mode 100644 gimp-plugins/MiDaS/MiDaS_utils.pyc create mode 100644 gimp-plugins/MiDaS/__init__.py create mode 100644 gimp-plugins/MiDaS/__init__.pyc create mode 100644 gimp-plugins/MiDaS/monodepth_net.py create mode 100644 gimp-plugins/MiDaS/monodepth_net.pyc create mode 100644 gimp-plugins/MiDaS/run.py create mode 100644 gimp-plugins/MiDaS/run.pyc create mode 100644 gimp-plugins/color_palette.png delete mode 100755 gimp-plugins/colorize.py create mode 100755 gimp-plugins/colorpalette.py create mode 100755 gimp-plugins/deepcolor.py delete mode 100755 gimp-plugins/face-parsing.PyTorch/__pycache__/model.cpython-38.pyc delete mode 100755 gimp-plugins/face-parsing.PyTorch/__pycache__/resnet.cpython-38.pyc create mode 100644 gimp-plugins/ideepcolor/LICENSE create mode 100644 gimp-plugins/ideepcolor/data/__init__.py create mode 100644 gimp-plugins/ideepcolor/data/__init__.pyc create mode 100644 gimp-plugins/ideepcolor/data/color_bins/in_hull.npy create mode 100644 gimp-plugins/ideepcolor/data/color_bins/pts_grid.npy create mode 100644 gimp-plugins/ideepcolor/data/color_bins/pts_in_hull.npy create mode 100644 gimp-plugins/ideepcolor/data/colorize_image.py create mode 100644 gimp-plugins/ideepcolor/data/colorize_image.pyc create mode 100644 gimp-plugins/ideepcolor/data/lab_gamut.py create mode 100644 gimp-plugins/ideepcolor/models/__init__.py create mode 100644 gimp-plugins/ideepcolor/models/__init__.pyc create mode 100644 gimp-plugins/ideepcolor/models/pytorch/__init__.py create mode 100644 gimp-plugins/ideepcolor/models/pytorch/__init__.pyc create mode 100644 gimp-plugins/ideepcolor/models/pytorch/model.py create mode 100644 gimp-plugins/ideepcolor/models/pytorch/model.pyc delete mode 100644 gimp-plugins/monodepth2/LICENSE.txt delete mode 100755 gimp-plugins/monodepth2/evaluate_depth.py delete mode 100755 gimp-plugins/monodepth2/evaluate_pose.py delete mode 100755 gimp-plugins/monodepth2/export_gt_depth.py delete mode 100755 gimp-plugins/monodepth2/layers.py delete mode 100644 gimp-plugins/monodepth2/layers.pyc delete mode 100755 gimp-plugins/monodepth2/networks/__init__.py delete mode 100644 gimp-plugins/monodepth2/networks/__init__.pyc delete mode 100755 gimp-plugins/monodepth2/networks/depth_decoder.py delete mode 100644 gimp-plugins/monodepth2/networks/depth_decoder.pyc delete mode 100755 gimp-plugins/monodepth2/networks/pose_cnn.py delete mode 100644 gimp-plugins/monodepth2/networks/pose_cnn.pyc delete mode 100755 gimp-plugins/monodepth2/networks/pose_decoder.py delete mode 100644 gimp-plugins/monodepth2/networks/pose_decoder.pyc delete mode 100755 gimp-plugins/monodepth2/networks/resnet_encoder.py delete mode 100644 gimp-plugins/monodepth2/networks/resnet_encoder.pyc delete mode 100755 gimp-plugins/monodepth2/options.py delete mode 100755 gimp-plugins/monodepth2/test_simple.py delete mode 100755 gimp-plugins/monodepth2/train.py delete mode 100755 gimp-plugins/monodepth2/trainer.py delete mode 100755 gimp-plugins/monodepth2/utils.py delete mode 100755 gimp-plugins/monodepth2/utils.pyc delete mode 100644 gimp-plugins/neural-colorization/LICENSE.txt delete mode 100755 gimp-plugins/neural-colorization/__pycache__/model.cpython-38.pyc delete mode 100755 gimp-plugins/neural-colorization/build_dataset_directory.py delete mode 100755 gimp-plugins/neural-colorization/colorize.py delete mode 100755 gimp-plugins/neural-colorization/model.py delete mode 100755 gimp-plugins/neural-colorization/model.pyc delete mode 100755 gimp-plugins/neural-colorization/resize_all_imgs.py delete mode 100755 gimp-plugins/neural-colorization/train.py delete mode 100755 gimp-plugins/pytorch-SRResNet/__pycache__/srresnet.cpython-38.pyc diff --git a/gimp-plugins/MiDaS/LICENSE b/gimp-plugins/MiDaS/LICENSE new file mode 100644 index 0000000..2f4969f --- /dev/null +++ b/gimp-plugins/MiDaS/LICENSE @@ -0,0 +1,50 @@ + +MIT License + +Copyright (c) 2020 Virginia Tech Vision and Learning Lab + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +------------------ LICENSE FOR MiDaS -------------------- + +MIT License + +Copyright (c) 2019 Intel ISL (Intel Intelligent Systems Lab) + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +--------------------------- LICENSE FOR EdgeConnect -------------------------------- + +Attribution-NonCommercial 4.0 International \ No newline at end of file diff --git a/gimp-plugins/MiDaS/MiDaS_utils.py b/gimp-plugins/MiDaS/MiDaS_utils.py new file mode 100644 index 0000000..61ce942 --- /dev/null +++ b/gimp-plugins/MiDaS/MiDaS_utils.py @@ -0,0 +1,192 @@ +"""Utils for monoDepth. +""" +import sys +import re +import numpy as np +import cv2 +import torch +# import imageio + + +def read_pfm(path): + """Read pfm file. + + Args: + path (str): path to file + + Returns: + tuple: (data, scale) + """ + with open(path, "rb") as file: + + color = None + width = None + height = None + scale = None + endian = None + + header = file.readline().rstrip() + if header.decode("ascii") == "PF": + color = True + elif header.decode("ascii") == "Pf": + color = False + else: + raise Exception("Not a PFM file: " + path) + + dim_match = re.match(r"^(\d+)\s(\d+)\s$", file.readline().decode("ascii")) + if dim_match: + width, height = list(map(int, dim_match.groups())) + else: + raise Exception("Malformed PFM header.") + + scale = float(file.readline().decode("ascii").rstrip()) + if scale < 0: + # little-endian + endian = "<" + scale = -scale + else: + # big-endian + endian = ">" + + data = np.fromfile(file, endian + "f") + shape = (height, width, 3) if color else (height, width) + + data = np.reshape(data, shape) + data = np.flipud(data) + + return data, scale + + +def write_pfm(path, image, scale=1): + """Write pfm file. + + Args: + path (str): pathto file + image (array): data + scale (int, optional): Scale. Defaults to 1. + """ + + with open(path, "wb") as file: + color = None + + if image.dtype.name != "float32": + raise Exception("Image dtype must be float32.") + + image = np.flipud(image) + + if len(image.shape) == 3 and image.shape[2] == 3: # color image + color = True + elif ( + len(image.shape) == 2 or len(image.shape) == 3 and image.shape[2] == 1 + ): # greyscale + color = False + else: + raise Exception("Image must have H x W x 3, H x W x 1 or H x W dimensions.") + + file.write("PF\n" if color else "Pf\n".encode()) + file.write("%d %d\n".encode() % (image.shape[1], image.shape[0])) + + endian = image.dtype.byteorder + + if endian == "<" or endian == "=" and sys.byteorder == "little": + scale = -scale + + file.write("%f\n".encode() % scale) + + image.tofile(file) + + +def read_image(path): + """Read image and output RGB image (0-1). + + Args: + path (str): path to file + + Returns: + array: RGB image (0-1) + """ + img = cv2.imread(path) + + if img.ndim == 2: + img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) + + img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) / 255.0 + + return img + + +def resize_image(img): + """Resize image and make it fit for network. + + Args: + img (array): image + + Returns: + tensor: data ready for network + """ + height_orig = img.shape[0] + width_orig = img.shape[1] + unit_scale = 384. + + if width_orig > height_orig: + scale = width_orig / unit_scale + else: + scale = height_orig / unit_scale + + height = (np.ceil(height_orig / scale / 32) * 32).astype(int) + width = (np.ceil(width_orig / scale / 32) * 32).astype(int) + + img_resized = cv2.resize(img, (width, height), interpolation=cv2.INTER_AREA) + + img_resized = ( + torch.from_numpy(np.transpose(img_resized, (2, 0, 1))).contiguous().float() + ) + img_resized = img_resized.unsqueeze(0) + + return img_resized + + +def resize_depth(depth, width, height): + """Resize depth map and bring to CPU (numpy). + + Args: + depth (tensor): depth + width (int): image width + height (int): image height + + Returns: + array: processed depth + """ + depth = torch.squeeze(depth[0, :, :, :]).to("cpu") + depth = cv2.blur(depth.numpy(), (3, 3)) + depth_resized = cv2.resize( + depth, (width, height), interpolation=cv2.INTER_AREA + ) + + return depth_resized + +def write_depth(path, depth, bits=1): + """Write depth map to pfm and png file. + + Args: + path (str): filepath without extension + depth (array): depth + """ + # write_pfm(path + ".pfm", depth.astype(np.float32)) + + depth_min = depth.min() + depth_max = depth.max() + + max_val = (2**(8*bits))-1 + + if depth_max - depth_min > np.finfo("float").eps: + out = max_val * (depth - depth_min) / (depth_max - depth_min) + else: + out = 0 + + if bits == 1: + cv2.imwrite(path + ".png", out.astype("uint8")) + elif bits == 2: + cv2.imwrite(path + ".png", out.astype("uint16")) + + return \ No newline at end of file diff --git a/gimp-plugins/MiDaS/MiDaS_utils.pyc b/gimp-plugins/MiDaS/MiDaS_utils.pyc new file mode 100644 index 0000000000000000000000000000000000000000..daa807f0a3a6d74e0339d91388bb4cb5bd0a8484 GIT binary patch literal 5298 zcmc&&&uOn@vQ-X}Ek55mA+0}GwkEA}%3{e? zI&d7FKl7BGQhhA=*=hENhJR0{)ZUaT|Eg?5*=c2)toCQ<2U;=0L)cW*lL?hIW#~yN zIqacl{N5kXEdT~s+a8uTrM8tS-}d6Vth1`7(U9SA8m!w{)t}=id(ce7Urbe3dsytc zw>P2ouvbmht*8u2sdixiH_`14wFhN`KB5aXhpFMr(Br?Vg7NwTZ19hM46)ui#M9|R zggT0YqB{t>NvancjR1c)$}O`jCDw|__JWqN<+){7wz(KP+fdtKnLT4}hefKFgH{~b z=zL&0QL4|$SeB43H*s!CMW&M^qFB4_e!6B3`qkV9QLuLVuFS9;i1Ops!}y(Z56$T3 zw7~FglmdeT9gB4jtI*|Qf+3V83a^U1CbFAUICSd>8z&(0LT6G~%`*)^z%BGo6I5!f zV690Z#aMUpSc~mPuvNoHl# ztuh}LMh5Gqd1R$~R){CvG9U0LfLpp{dQsuL>ZVCCj9UyZ8J))>1+nI)cJeeYWz?M{ zMkCkK)5%uPic*e3lsb!(C>ynN<772)GH4$-&*O95=s^oZDG_E3rKS5umu6|ZOl-1k z@t?hPtQ8`#E^S!t`Gq7aBCI~iwwCTDH=}hY+e326VzE0u zgEo-J6`F5z!unyq=8gXOhrM~P&d;CoPeFUyZ{oKJi$IJM6<;DY`%S)lwISp|#$O6fO+ zraa}jX0VD=Id4IE3!~DsS^ym!ga6)W@j}0(sHV90p~v!!xDa~aHc()EXjBCwO_O;+ z>*7WXx{;~TFdlQ3jhx^CHW{2V2hMqeC-EDI2f&T~??`8#b4W=1O9s)F4q8!JM!Vo8 zqIFDI0`#B-yqynniO(p7%{r-QF}SI_(J-|pz>pUg5146ZQ$kJx^719~7X19aOcLAO zLI;DPvB9PeM(vBjDG4b}AZMCAvV%%UM+ z*KRl5Z?|Cx-ZODvRA=F7YMK-?RFg$^csL`P5b|RXZSGo~mt+d|G`mI&Y)qn-tv0Fo1a796~?Ed6LLgRLt8DAQ7)7)G+I}KpyY=br8o%?;XgqUc*1`cSspL zNG@#!pIL`M*3!pSjC)^#-o**BE{6Mi(pq7-(riNRU5^pL>R4Z;9;7l4(tzoDOZE>aym?p)RuaVkL%zmJQp4wWz3*#Po|axdcDPUn_oeM+9aRo5!8sk zL9~skMb3o}{DeGcCoi{O3P2p=0qx2@zbX#|x#gv!=76WN`|Q{bf^f#sr^BIod_4)E z5-fqfby?(T!~{#gD)Hkecid4DSw~}JH-T$MC#gUOTXHAnQTd4Nz4wa!z{6Fxvm^xehXc~Z|---R=1tE@#~HpUIA_@7dwwo zHzy!eea4#sfi}Gvzu})o)^)0K)}Qqy#p12RTSp>k2Zb?d0(l@l{P#O0s;cD|B(2 zM;n8u%)x{S7Nb@jAs0hQHo~h=38}i=qrxhKI=lfP$;$|Y@}97{!#odZGab5zfh<1! z9y?u!Xt7bSFEb1DLV{2tP;~zX1j@X>gBOmyr6MTZptN8?WhsnOsGB8J8ZZSXEMLG2$ns@Q z3>QG~fAm79k-XS<65B&!5$MOnGdOgNQ#Zju?mR}h<5ogSa$2D`0ixlKFTw}G$`I82 z9pO@zF21)aC={#?pdc;>(PPOr#lD+l-CWeVFb2e literal 0 HcmV?d00001 diff --git a/gimp-plugins/MiDaS/monodepth_net.py b/gimp-plugins/MiDaS/monodepth_net.py new file mode 100644 index 0000000..a1a9bfc --- /dev/null +++ b/gimp-plugins/MiDaS/monodepth_net.py @@ -0,0 +1,186 @@ +"""MonoDepthNet: Network for monocular depth estimation trained by mixing several datasets. +This file contains code that is adapted from +https://github.com/thomasjpfan/pytorch_refinenet/blob/master/pytorch_refinenet/refinenet/refinenet_4cascade.py +""" +import torch +import torch.nn as nn +from torchvision import models + + +class MonoDepthNet(nn.Module): + """Network for monocular depth estimation. + """ + + def __init__(self, path=None, features=256): + """Init. + + Args: + path (str, optional): Path to saved model. Defaults to None. + features (int, optional): Number of features. Defaults to 256. + """ + super(MonoDepthNet,self).__init__() + + resnet = models.resnet50(pretrained=False) + + self.pretrained = nn.Module() + self.scratch = nn.Module() + self.pretrained.layer1 = nn.Sequential(resnet.conv1, resnet.bn1, resnet.relu, + resnet.maxpool, resnet.layer1) + + self.pretrained.layer2 = resnet.layer2 + self.pretrained.layer3 = resnet.layer3 + self.pretrained.layer4 = resnet.layer4 + + # adjust channel number of feature maps + self.scratch.layer1_rn = nn.Conv2d(256, features, kernel_size=3, stride=1, padding=1, bias=False) + self.scratch.layer2_rn = nn.Conv2d(512, features, kernel_size=3, stride=1, padding=1, bias=False) + self.scratch.layer3_rn = nn.Conv2d(1024, features, kernel_size=3, stride=1, padding=1, bias=False) + self.scratch.layer4_rn = nn.Conv2d(2048, features, kernel_size=3, stride=1, padding=1, bias=False) + + self.scratch.refinenet4 = FeatureFusionBlock(features) + self.scratch.refinenet3 = FeatureFusionBlock(features) + self.scratch.refinenet2 = FeatureFusionBlock(features) + self.scratch.refinenet1 = FeatureFusionBlock(features) + + # adaptive output module: 2 convolutions and upsampling + self.scratch.output_conv = nn.Sequential(nn.Conv2d(features, 128, kernel_size=3, stride=1, padding=1), + nn.Conv2d(128, 1, kernel_size=3, stride=1, padding=1), + Interpolate(scale_factor=2, mode='bilinear')) + + # load model + if path: + self.load(path) + + def forward(self, x): + """Forward pass. + + Args: + x (tensor): input data (image) + + Returns: + tensor: depth + """ + layer_1 = self.pretrained.layer1(x) + layer_2 = self.pretrained.layer2(layer_1) + layer_3 = self.pretrained.layer3(layer_2) + layer_4 = self.pretrained.layer4(layer_3) + + layer_1_rn = self.scratch.layer1_rn(layer_1) + layer_2_rn = self.scratch.layer2_rn(layer_2) + layer_3_rn = self.scratch.layer3_rn(layer_3) + layer_4_rn = self.scratch.layer4_rn(layer_4) + + path_4 = self.scratch.refinenet4(layer_4_rn) + path_3 = self.scratch.refinenet3(path_4, layer_3_rn) + path_2 = self.scratch.refinenet2(path_3, layer_2_rn) + path_1 = self.scratch.refinenet1(path_2, layer_1_rn) + + out = self.scratch.output_conv(path_1) + + return out + + def load(self, path): + """Load model from file. + + Args: + path (str): file path + """ + parameters = torch.load(path) + + self.load_state_dict(parameters) + + +class Interpolate(nn.Module): + """Interpolation module. + """ + + def __init__(self, scale_factor, mode): + """Init. + + Args: + scale_factor (float): scaling + mode (str): interpolation mode + """ + super(Interpolate, self).__init__() + + self.interp = nn.functional.interpolate + self.scale_factor = scale_factor + self.mode = mode + + def forward(self, x): + """Forward pass. + + Args: + x (tensor): input + + Returns: + tensor: interpolated data + """ + x = self.interp(x, scale_factor=self.scale_factor, mode=self.mode, align_corners=False) + + return x + + +class ResidualConvUnit(nn.Module): + """Residual convolution module. + """ + + def __init__(self, features): + """Init. + + Args: + features (int): number of features + """ + super(ResidualConvUnit,self).__init__() + + self.conv1 = nn.Conv2d(features, features, kernel_size=3, stride=1, padding=1, bias=True) + self.conv2 = nn.Conv2d(features, features, kernel_size=3, stride=1, padding=1, bias=False) + self.relu = nn.ReLU(inplace=True) + + def forward(self, x): + """Forward pass. + + Args: + x (tensor): input + + Returns: + tensor: output + """ + out = self.relu(x) + out = self.conv1(out) + out = self.relu(out) + out = self.conv2(out) + + return out + x + + +class FeatureFusionBlock(nn.Module): + """Feature fusion block. + """ + + def __init__(self, features): + """Init. + + Args: + features (int): number of features + """ + super(FeatureFusionBlock,self).__init__() + + self.resConfUnit = ResidualConvUnit(features) + + def forward(self, *xs): + """Forward pass. + + Returns: + tensor: output + """ + output = xs[0] + + if len(xs) == 2: + output += self.resConfUnit(xs[1]) + + output = self.resConfUnit(output) + output = nn.functional.interpolate(output, scale_factor=2, + mode='bilinear', align_corners=True) + + return output diff --git a/gimp-plugins/MiDaS/monodepth_net.pyc b/gimp-plugins/MiDaS/monodepth_net.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fdbdd6a004beacf6818e906c341f6833f6f06e95 GIT binary patch literal 6822 zcmd5=-*XgM6}~-_Ofmx@Y*-e;ve4)%L%C#@ghfs1>H?x`L8Lm%gQ?Q4PWK(sNl*9K z_YM#xRa=!UpZo`Wuq>Z_@_+F^u`D0;`_7%2o*@fMQAW2XeRA&4zW4n2&bhb#kA+t2 z&wu}QtcFh=|9_0v{1GHkY75j;Nlj&*+5+SFn%b?at-2KJDw|QgS+zB*ykth{y4r21 zF8a-?FO)h|>X%z{DrrnK%#9o7RWd)(&>S~3RnnShI5BQ$aV?welU(9IK^_!&aaZ^4 z&W5(@KFHIe-1ECd>G#kQ53;EA6Sn)>*t8efw8(v1Mrp1Se|z8WrO(p*v2XMfT}GLo zL^d+onw8dPJE`%zX{P1Rn4?=3z1EJkeY4)_JWlP-V0$Gl zdL6q{^dhs{??!p2zi*2&-U&a5-%9_D zXQS<5|F!|r!@@FSDU?ZhOeys?8chd8L#faXXpK5@8vUZCwq{gPr<|e4WJZ{V3O3vP zfcXIA^@O|9^6{h!i;HPYGk8zny@S`ZYgjSfF}u z9NW+@oS!Z>rPxx>G3tcSlR_7So)WsK4re)bcV2~yQgeD#1cQNZjf!AUTpAUZWb!k# zvgeQwOdJb?KpH9$D~7#tk+m64-c;e+qyEj;>wk9CAM1ae`QI7!$4+0T|2bvPv*)3R zm@8a9Qgh)*&AUfxF3NXX!>{wxC%;4*hqXF21#zJjB03^2c6d)E3o1FKl0}uAhE8jT zb&)pKc1b<=RQZ05!n{l#%l2|W&Rg*DC0fkiKt9b=yV4T7qHTRrJ~r#)5^MSq{JU*z z`GH^b>ETg!ZQXy!T3h%gdICQcA6fD5>TWd1tYO1Ok!xqJoPTd%DbZ7Mk?8I;C@n_hQ{?qEqqaIS!1H z1ej#$IooMuQU$L`=}4)X2l<=>1pwnH(_uG?fi5ZfC>g?yTJ*!%3}_RT8*QI$vmj>B z*JU7IVvjIPA=)quz$+Ppy#t;;`cQh`i!xv#C(d(O>4PE}WIB*faiu0MBOC9?b~p8J z20FKCl*w3t=93%nYb+zbA?qr2Hn1Gki=Oq1B9j@jXkV8%Dt5JE*DCg=uy-)!YGRw1 z!VL?{yeh3umDZ+8H(8=N-E;cAH!v`g+gTCs$=b&#dvm(DHeFnuF5VE8ewqWr`$ZO6 zE#+ch`vV(t|FZ3@h>|u{DJ7$`t}rxNk@(2uWQ$JxZS+<`3^mhvWOQjddu3|VJ%gx{ zcOLG?J5kwtSQfiFwx*Nor)!?;~DW*ih#|dNUVbn$ssS@2H4d<6nLaoGI{<@uP%}3g1;*9Pk4AxUH2`1$ zGiCe5I5gda0t^%Jrkc5hYN2^ z@T(L2+63=hXxN{AM=W$janY(MK3WyUNttZWHA3g|!a5)j*1?0Y4qV`E$L=k%Ib@X7 z@GY#*P>`xQbd8aV;Qv|v;7Z~obPzg=IEfDidC!X;GI9?Osm~^JBV~R8avzZ4;+15V zl7+s~swfbNf3PY^Sy?eQkc~FiBB(5BXz*j*Px5V~Pc>m?n0Qlb<^o6+ zVt9CQTX)HZWZ3M-_YV7i59Fm8HlP$}6#`ZzI}CxQ0@cW{D+??!!&hcpIEsu@55L`o zAz9cSYY}zEmdBKe>Bz`75tN3pa<~ya?z+)tTn{Kfk(tvWW0YvTJIG^~*h!GBCeji& zT{)mJ1Qh>-C0BK*KmMp1;Ui)JjH-)X(?v|WHrFL8`VJVVVHZ>m)a2z@O-6Ejt zX)=g1=CzNIhsS7^=M7-u3I+zl`FLURq(HKL1TAQWd?^7j#ovQ@DOwOD#6ST`)yWRW z;YM`?29~(P`>Q%Lwc>f12YH;eakn!kGboRhxY3zM^rQQm`ygM5J)?|S4rxH0{K(O) zGTS;Pl#^CKBb75&AT~i=Nbo=wmPN7NXp310{`>@O75)T-V2LfCl>;3tv60q2KIW`S zW1#Op3f@NjgwE&L4J1kw{@0>;1N7de6E4|omjMKyVL z!td(=H?&^pc>-=u2A5He@yhcC@In_Do}K-!9G-pLV8(mHEM$Z$v^TLe$uQv}l4jss zRo(N!gkXF0d%5r%X1)Jyzu$1cFW$_(J8!0BBz?d!8=%`I=>A{4^)Fb+Fv&eri>VJv zZXWMX+0p}X$h!=`13o*U0fzT#s=PH!+T39Rz`P8Beit1P`RLyh4vo7T+|%P~6~4=R zOwiF)CeVv1hPd@+o1Zq++)G^gmB-N+yfno$O0J_E+>R5lKr%eSklZQD!2w03?YM!LL0pnpy1#*8tBXo(kL_f}6$ym5!O4?+VKX8Ydj~S#z T;0^C25cPC@d2{*l@|FJpH)GNM literal 0 HcmV?d00001 diff --git a/gimp-plugins/MiDaS/run.py b/gimp-plugins/MiDaS/run.py new file mode 100644 index 0000000..d008ea6 --- /dev/null +++ b/gimp-plugins/MiDaS/run.py @@ -0,0 +1,78 @@ +"""Compute depth maps for images in the input folder. +""" +# import os +# import glob +import torch +# from monodepth_net import MonoDepthNet +# import utils +# import matplotlib.pyplot as plt +import numpy as np +import cv2 +# import imageio + + +def run_depth(img, model_path, Net, utils, target_w=None): + """Run MonoDepthNN to compute depth maps. + + Args: + input_path (str): path to input folder + output_path (str): path to output folder + model_path (str): path to saved model + """ + # print("initialize") + + # select device + device = torch.device("cpu") + # print("device: %s" % device) + + # load network + model = Net(model_path) + model.to(device) + model.eval() + + # get input + # img_names = glob.glob(os.path.join(input_path, "*")) + # num_images = len(img_names) + + # create output folder + # os.makedirs(output_path, exist_ok=True) + + # print("start processing") + + # for ind, img_name in enumerate(img_names): + + # print(" processing {} ({}/{})".format(img_name, ind + 1, num_images)) + + # input + # img = utils.read_image(img_name) + w = img.shape[1] + scale = 640. / max(img.shape[0], img.shape[1]) + target_height, target_width = int(round(img.shape[0] * scale)), int(round(img.shape[1] * scale)) + img_input = utils.resize_image(img) + # print(img_input.shape) + img_input = img_input.to(device) + # compute + with torch.no_grad(): + out = model.forward(img_input) + + depth = utils.resize_depth(out, target_width, target_height) + img = cv2.resize((img * 255).astype(np.uint8), (target_width, target_height), interpolation=cv2.INTER_AREA) + + + # np.save(filename + '.npy', depth) + # utils.write_depth(filename, depth, bits=2) + depth_min = depth.min() + depth_max = depth.max() + bits = 1 + max_val = (2 ** (8 * bits)) - 1 + + if depth_max - depth_min > np.finfo("float").eps: + out = max_val * (depth - depth_min) / (depth_max - depth_min) + else: + out = 0 + out = out.astype("uint8") + # cv2.imwrite("out.png", out) + return out + # print("finished") + + diff --git a/gimp-plugins/MiDaS/run.pyc b/gimp-plugins/MiDaS/run.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5ddbfea29622b5a94820c1e678f070a800b47924 GIT binary patch literal 1594 zcmbtU&2HO95S}F^T9zrxKN1iFJ+LV`Zy$Tg49m+33?N%{ck%uxObnp+p*?zi)|-^|X+{b$gB>;C;~0-NFB`W!!Vf>R1` zjSV_c!bX;9g6V{PA_y`uB;4FS=vRA$E67_bQP#P zo=X#`BCrdI3)-MOOJ#N3S08cAk}i55;O9{k{LG&Mzyd6p#*Kic3$+8a0Ky*B9xQR& zrLCZi3paoWJ(%E8vqdB=dbOes9NjB#92hq}upZd0hXy?uVLbeOqH)mmPUAG&(Db2p zVc8>BqO$|_&OII667g*!^kLMpLx&wU_2K8V3qSlq{^;gIjVd&Cp&5YnVL70nsRyIw zF4!R~_sFd^y1=?`vC%%R~DP+v-YiO z(^q#6xlU#FS$h*-$uwX+TU!?sNy?SN#)CP2INm*1w9M2;ob3Q7RajY-I*YB+g`x+f zdZeTNy~uQI8LiOl+sXJ=VRe-(u%+-~<&{dLB|obzy&HKIXY6AZaVeQZ9{V!{*V#BC+u8+LPzr|P8s7%TgTNWhWrlO#IC+(0&N6_v9Z^9B5^6okg~UZ zoW7s^_$rJ}!&j#*8Dzo9sp3MjM3!bkPqpQk&)Ms~&McnEO(CqxjO7Z&)m++W9a1ok z$S#(uy>1JT#F^wG+tzj=)qG(&@r`7yQoFF*=%Vs@#2v!N;oWnHHeb>+igG0XV25?3 zlK-Zx;fi6QBP{;}Wxx~viA{epvNF@lN?CPjbRHMeH`mD`uJSjPUdqIpX(89s*Xm__ zHmz30ae2*$AdL9fT?}WwqM$p&jxPqHzdsOPitohG89DAf&0B=u0@1(__lnfDX10TS~YO*E@3Qy>PYs4q8PYIvEJyro;!cU3*cP;gl_Q|t9 z%7JzQ&7Z*jRYwtcfBe$`UXQ=|^Zi-I)4ys1Wf?Gk*M=?2c=q=-O!ni?z`+`?fHx!? z8FdiQ;i<>hQ*qj4@RKK^Ph?+f=Q~}TsWD3Fu5;h%bT+ED$ijr-@ek=ME?w^0~N_xjjBo)#{UNFyTGyeDQknHI* z@!@Cxs)+qoPZSOX5us{D$l|x+7OxS|Z%tz3zkT|w?P)O>UGv{}!p4^qy_`0TxI0d0 zZdi}kbh*ZRbI^2S74BmgMAPbzr*rAg;zr)I*X$oi=5eWD?V=*oeYTYyasTUMGYp@} z)J4OQ31N%hT|<~z&e!P^3CZ{_6h=DR)XDIM26 z_ltR+PmbBtT%Vq&T8~u4_|2gfuZY|qMNCR8nViPgG^`u5xDt@%_7`hb>vEJ0TD<7b z7d|U|Y*#Wp-1#muko3*_aAzl{U9FcF^vuVlmCoC@7=7~<9fmpRp1%t7$99PwCY0;^ zLx-qLl9Hnsl3$K!i}r+lx;vZlJg-b|yxITJyhg>MuJ4aCBz&`*guW`Ldp&luaM5&R zbba9Y;JQ;>H3R+~KC1%=6ZD@kVFo8f~fpsP?d}f~O)FqFGEwv$)KF)t!soOd#4>aI*=0YGOWfZ*OV5hwDH_!;Uul56Ib&V=)oryudJNe zUG|L#DIwXe6uTOMF3nBN#p@5TW~9UXJ4KvPJU~Met3fnrJ$?3k7AIo~O|dQ1Ru5}2 zN<%!0Zm^NIsK^Y(Rb%G)squ3I$J>63J!4spwepbmAR*`DwcL1(UnB=$1XRiCnv>*F zoR3;j;c||NX9daHcD+=oD4wB7iXiUGP8N@&#f@Xg1Bu&VE0$&t^jmFD^U#K|1tW{+ zReyq(T`vMD_~pF&kG*PvjYLB=(8(arxS#M*2iA5zWF0f&=z6DAPx~5ah zY^+W@pk}o0o4f#DaH6&)GBIBMxvB68L_VA*-Hm<6R_`uRPSHOaQ6EHowBRx)DSEkQ z?RoRQ)CK2)J`HR$bv4y|@4xgyn)ql$0{y4+07GfR@vm>C!fC`J=j9PX$2}CD4>ya2 zH;)VM_r20&kp7L$?F>#XboqKULd2tPJ$_Yn-{xk(#D1vh`e&)h2ggh9%7XwRE16;I z33unWhrLQ3$H)CF536JkJ?pL0t*6`LA?Hap3t2vK6*|Fa1V8F2C6YoFM#S8XLUiS^ zJF?qOl|84u)coQ2Eywc|Zte~|o4W`2BYt+4W{_ zaR_JXCq>kN@xsri`xX7noC=HAlQmn+P?w$Jiv`yMQ2LD)zl}E4yZNqd*>~E^*?B1o z{0FtGXfCc9?pqRME*C!*NzQYhKXKa>SnOprt|wOU*!!$|lTCI<+tj#Q(Qh)4XnWL& zHw@vRDKct9LpLYb3Y{~;9gF9_aOt&J^s7mkK275*ud8oWtSR%PFeCE8VNUk1KXGRKu1G z2B=**F1DHDUXh39&3ESl+c`0{VN6vc;>7g%j1gW|&3BHZ_><0Sa~74TJVQroJ;WSo z&zUQFUQt#6BlG~~UdteeDj1X4(@~PnsUmHE7YPktVjYKZ6k)ix?tt_@Ms$&dhuXXh+VC0}t}2FYcc4HTv0ydIZIro6#^ z;3iEv%;JL&`lr_Ru8c&VZ)vIL-A*dayX|;jCC#NQF|8XN-l;>q@Y%CJW3~yB zx^>3zX@~b(0gq4UdUyH9^I6w}M$@5`Uf=GrK|m0(s1Fz`2iR>B2E=E_s_S&vT@@}h z(@!?2y=el zYPGy2(D?kQ`D;>*PSaVKZHEap!-2)pFKL}V{!}@|8R`1x3s@+khcP*p#%@yA*WLEsYz^&b0-^hen@3Vame|cd+am=HPxSkN zE`&t1vA;j|MtAelA!Hi*taFmoe7nw4WQu6B8pe}HU{K`K!(rik_VXi$S)as{eFS}B z8jdu`WZS%9->as;#8h6yg|G>_uV6041w5ii?CcV?-_Q2}MIV^OcbxpRWI5Whz znNp?a=fkrAUfs@5AG)-v7iQi(KHe_C!Bg3agQMdwD&$U}eD>-!Z;H7*s?haTHXp|} zww>?^YcNAB0wQA!GO7!{Z_i+qO4N*f%3PJQMT?4)qFtKR=x7ejVA7=4v_I^&e;_Pi z7N&UL4TTAY9&jIq-nEn6V~_^E+f~$QP0}?T+{$DL%q$A?9+fzxK(KJy<7ST!!3-QBuwSc)DXSE1&#h{*NVtGI&kr%` zPf{HQ9|`zJ03~fQUpF)V^vcp=jvSbx8MBkk%yQ z9H$iAiSBXeTgDFU*+Mcd!Q1Z?EHHY`P$M_CR`Ipf2t|Dav@Ug0Kh?;*OJpqtQSnz& zZ-4e)>iVOm<@Zy4hm8&V{+6Pr@?dBm+z`w&VmHoTh5g(Yi=HC+R0K!g5z_`b?nOt{ z2MzT%hWx%}?iX~Z1weoB4U%9P7vc@YoP!Vz`QJ0zlt%343|~ENcPd(D>=v9i*_;x} z9hl`cCU_rS)u7Vd0lSI_L_k6K7X`i%MT1ozd>V8&_9pbNlOfn9pH55cMOs)Dmp3WMjWw}md%kJTkHNcxFL}sfB5;w0 zG$BgcEqNm}Ey1xMYTD6*-~x;hYqJB5>ka&{{7`pgGQ^GAT*TirVGO&eu2^@$Ax*2* zEx7D>cV^IKQ@?g9(l`7S;h!bnn+gdVm^9R(B^bT0u)SWHtN_$kOs7e*Wfzx}0I9Gs z8V`>lv-tI^Q^o1jGd6*ZwG7#b4z>#Ui8sD!)SB1q-dGg-2|6xxbTJeWMi6XlyG-&8 ziWSi&TK@sTlL3i_K4}8x{rxHdtvx|KBRK;*H1Ek2govVLEx)A3t&qssC}~n8Iub-e zr=-z=CUV;nOWduF5%1OW0Yd^8N-ge;a^a>+Y80Z8Uu>9C{QB;$I@JW1N zAS(`IStiUU&^)gkGk!2UKAOmVKL&Z2s9Ul+?K_2-EUHZ@QCK-INE33E{X$L;3$Rkm@SP`+kMEBgnMggTr& z$@O#T!HR$z>Sv`XH67qkq>6xVzM4-uf#B%jB9Bxl6y4Yv-qYR5GVmc2;ChcQ1p1V5j=fDj+K zwDCB;O|7%jIV3K=JqflpW-X^GBz*$YYga~THAQWgW_?#HXv#DDV~zxU7wo#4%t-Ly zL%`~a=$4#{xZNgn<0&r_{Y0o)8TI26+4#NqOCZKI5heaq*;{)iSZ3058x{)HOqA33 zx-YayT;cdPxIYQWaK2NTPa0~bO)OiT?wYgBSr0sAmEIRvyh1D1`BGu`Ox$WXWpIGY zQXvc!dvJENsx8=-m8t(sN{Y{TJF+vZJ&dWzaxm60c$(UxWmpE|S4Y@595X!kD*tl1 zQSG1&gE59bgZH;^EFlZxN{W@NY4ZIQ3M0oY`t=Lt5K7IojtJF!kXlA{Jcv-*J7gq6^mdiAp1_}Pi9N}>t4dTxR z8-t1HSMS`GBX!W!%buDa8wpsc$0Fa!cyz`N*1oC@I{p&C!8fplyeBF#q~?PE8W~#c z?9&km5@g2ZH&-6-Zz!Pq(&)`;`^pGDahz-0F6xu~uQb>F%$2Q>jz-?+Db7TMbC)lQ zQJM73q*l80GZkK0M5a+fNItvDWs}s~7W3I@O_1ya(bi)5qmHfHyTd^+3?Gm$?W_R#Ol^=_;g`oVQ~9p)JsDh^0WxopOumDerqyh7Qj}) z$h*LIi8Y0s82MRuxuD>JDKjUkqB7CD{r6gHWXX8XKVV~Q_;f!jbRmcL)3VYUMZt(P z{49#^ffR&fRW67?V*i#91Cab_cAZ&U7F=*%TQ3=c3%*=d-Anq7>_?+(K0ti_i44O| z{J4Y4Z}N0ZLYEChkf)DhUcC??F2(gg6cG}>b+h>3VP`56ZYsjK2Ja_%aZv?GdL$7B zt~RoFh-dpgCvVC(eF#ExCTJ{gGFT&8Es>4Ip5Hk375j@=mvz6-?zSc;TcL*$D)Fpd zW2_z++=hIN9bC|jt3LKsS%`h2y%`HnxX}p3+rW|rQL(E>;8Bh zS!Y~_w>)T_Tej2or~lcW^Gse$aESV8pAR<4r0NcpY>2ETfzzgxBIoo^Yk0fr@gfd& zTKV(v@xv8})QV)?$eu|W48>`O2Fh4Wk5luF_K<#CDC=3QyCJ`2Qj*&JFWfCM4qx9F z(uB*2ay3l!0**hiF_=K!-Mj3_l3#5(xHATWnCN#1h;zTA6OBxFxxy+@n%S7iMw%*0 z%X|Bu2jyrgPS{=c)27rE!|BN+Drrt_mgoDcl6();fe@Nd(h)K%jtIccM zE|R&!w^U6eXB+tA#Ag$znqzUHKMo!6>j^@6zKf7M%iuT#y_bM!9&DuC4$x+Yh!3{osZ%}j|S2em|MAl4Z5__`4W&G!0UG(Ko z2CgOx>}nW}@+|F~h0wXT^#>u#2DCHF^=hNcwL{lBEx}0xS{aAj9tja!;WSHv z^kK>6PlzRw^?B3Pzd$eB^FyB<5qm9r@`pe%$FzgljfqGp{Fqj`BKH%!0--8}K^V|V zRP-YVrIgK_%ha;US5|6#ys=n0t&O4XdH+!g(iML^TlSCaA726jvZ`wG?lah%%tUh2 zt}`)(guvbig*TRi6sC|(^vvEMB2oi?!>H1Ju?+9w*gpA1#$~kw`STdKV#mOSobjd= z4USLugmnCc*##Y4+f=;}0t#5+m7-PH4Owj(^J*&zPo8S=BYDHhEUiSXmYIGO0+(g2 zmmPkgU&pQ%^aDu@R@hR(T%8frj+x(ZxpX!xlYA!I^*c60Ifi;T;{ZFeY_R(3XCm65 z%!X7;j_y91zHe2zv5fx>4|>I=v;F?M#iaf#+e##@rODp9(Q7=bx*0Z%x;d*K4r<)u zTvG6SM9zNrFyf9z-j}R*Fysu|<(-^$IxBWNFz>xMc=oG~Or`91;uX}|>slcgEX!&+ zr>gqN_Rkb|r@ht|hz(zDw-w}DP4n)(3k0Q=8dQsXGX#k}FYkQ$1K6&E-tysgeR9qf z@k_-0o@U&Y4DNU>s(9h6_tGTk`3NuQ^}rg-Cy6)B!Q;?uPevkS^M@F-NGqeuDn!Bg znl$64@eVvJGgu9TP(-+G!Tjv>0h-XUv3U|B;gWc})Gx`Oq`BE>TB`(rTf4q}AM7e7!_h{zfGiv2Nlgne zSCxKOV?8I-oY9c$PjYk$u}4UoXA6SxVTPLebxWtTLf5{iK$d%b8KndeLEFs zndy3HBIb^~FuH_b@C()$;pwE{2=*eikNNqWt|d~u5MEgfIP5xBtq1nh4VKKZ0pf{f z*h8EpPSiCvCDp=>rl?M}P>z&1dD~?=GCVXALKDhr+(HFrXfdQz>9eSHfTM3Yrg#SD zOACc!^3VL%e_Ht-AzD(19S>aK2hPwYj)86qV)zL}G8^pI{OkAnJbl)a;s?KF0f&JI~-4 zM|$5k<(s8PWs;9A%!y^qVf*ZEzczanX&q1Zsi+T9BN{(C2Q(NU@x%fusvp?H;l46TGT z$khadE8_bE?~KQ)&m@8CTfy-JxNUrQl7*3kX@b$~`NDN~7|iTu-EqCQs1}fCk^PcI zatdt3ZOSn+8)|Ku)7#HkRbow9EPC~boL+Me?e-r9>_KkCV)`fve)+o>?p#hpH5Ldg zL}QQtx{U2+KlK|c5?C%CijST+kIusS1q56oeDNbq>XA(e^Z82;DN~HbIUtsaOudvo zoIlE*Zjw_dgldj%U|@9p&~x$m-s?Xd@gc;8(l}rrEcYX7E=UPWK7>uVKeeV{+gN7; z8=C~~LKS|>4AsoYY}0&fISWe)c|?jUM+%M&vGd%X4qPVs^H8WpH1z{B(fFQTN)(%P z#Y<9|?t@v%ws44jaCp|QGh*!FXU^5~Sc8ddH8M>;Ll)(S&sf)nda;Jn?fF#7Y|b&k z9cP?vTeja`y?f5U5tz~x2FSBM>id=@k{~}KH)ln@7=XUXg}z@3(6OT&%XL~ z?K)Q*HWUUnP=QyZZ2BhI zo#??%(l!$D+_*pG{OD28C6_6qu+YU+#>M0~9x;k=Ffj{ zQO3Q{06aV!se5z&=Okq%df>`9qbh;6&4BVM{mdST#ab=6|}4TGE= z5@nzs;<&XA>{;ElQc{KYZAK+{&*Ak%zW+>VvF``##m!bP5RWT4bIY_~SbL_X)b9>e z3S5grLe&=ckRYx+`8we0-rN@g8s^?RS{t?qG)!1^>u2X74HSe&8e2Es3A zYI$dVK90GM*VE5g%Uz9UgO!nET$w`aKTcoWzncsfcZ_g^fu$n?L}|)8jq*rWo$!_041h zH@GA|lH2tFB$iM(+K(+Xe(NqtNq;9_N6=y1I%BO{j{9xggeZkbJqEv|gA)&Xhs~p| zts%OeVo{ARDG%a^DMV!vfww`9GUJ&dbp zB8J5o{>kq?UST8RW8k*bYCKlYaDD257uBphap$q0`BxSgi$YJ7mu-LJEAW|9%uD3+ z(4s3M%l#6OXU$%;9J9F!>%HK$lIQg}H_>6B?3MB{H_>I(UjyNM)8bX8{8L{a$TWLv zz+Jk2UUOP>zfedQz5{X?K!`6X!h)_XNeNxSr~`wG!4Mf9C&#i{E( zc8v4s{XSE(?gfxM>ujDMC{6J^`=;v-0QW*Bz0neLe##=SDtC6RgMu@v`Fr}wzH zX5qT;W79E^kJNEL`x*4)F%&f-&CaL;%SWX9=L6;yz}ZBcn! z*38FSqcZ~;tDC)=MYe9@%1MpSY`&-WHwPjQw_fEM)^3<{Q`D^6o(4 zQ}X-hpFm>Ov5zhv19W^QNveGOf~f4SZZoa_I`T1VAW=XNfA{e((V@p=eOZJ!X@mo; zUfK)ruwTzm zPqAr3l4JTlk@UIbYOb_4we)}7I3L$uEJ94vdQVyoPdK){6;k=* zuCnw|^~s0t+Kyj^Ft%}Y)lzXNS_4VE1@?aXaRr{R9iAB@EEC8A03r}fzXg)3;Dg4C zV^KR`5@&6@Nt$ow4~`pbbK^c341Izw!2N}ZC=l1H4SxN{0>91JgbfE08sDdMZrYGp z*6N5O5U!qK-a=_d=Y=kym2J-%sz}kEl4A+CU)Q#1IvO2s0_iFeVS{+{>D0sMo=9ub zfHTkglXT~S9=A~-74L-*@Q48bvS$2tn;+$`el2B32>)7beX`#Cr&?2`2Sopkvx3&+ zWDt1&{Gb`O>DQ2_klb|;jyd`QbtqNfqK6`44PR~RPzIb*3*>IGa4UqGr9ihw-6ucA z;xA@Dmp(qx1H#6T)5WtACpPz3v&fn`U|xU(b~&)f?8&E(n2DP-+b#llzn;r?f3CtO zU=_alE9e3@HmetPYq}_pa1S+4xxL?f3Z#I5G#a~k44y9B8iPN1%PK?%$}2?wA;21^ z!VU~h9I;xD`?wMHa&#U@?=%38U-^B@GjynFy8ESq@|fHHL$f0Pc1|Q>IhihrQ3b7a zzC5H&({8QXqAmaa+h024C(@-8;W(RIs^7sl)USay z#HR;Ufj<57O(NKPdB#bX?nl7jr`xb$O-hNj2uUw?O#0s244?gfOc?sME>2uw;ALx@P4R;h#`NQjz5Pw!K5du zR#SUL_nX~81*XqJMp6ys_a>}>C8+NlRQhe`W?#IX>PyhK8T>PKk5>-J_!yH+Rv!PF zQQ8bq30M?FqIWNFC4Ot7Kob~;mFvQJEX#Cca2fvZP0R!1a0p3G|Myzajyy_7 zfT~~TKTGm>^$Qo!k($uk-{&U)Fsj@+uNVVl#)1Aozgb)Mc>1u4ZQCa=Fm<}-(D5~45X-@S%u*U32T^ZOmFqbDmSNh01{a7 zqYgQ&UlaD0$kW;IsC)zgwI!U%9BIP2gfg*P+Ie+BoCsFpTx<|Q`uJ&hiIf2_~w{Z8SWQWqJZfKcL=JDV&z6a zfV&scSCLZ!Py_mhQVTS$=O_{KH>P1TsAeB1PMG;Jq;(GTMb0kW&rEnT>IRecMcw2)J(c;%8{N80>Zf|52X?J(6T*sJATmlp zmu-}NUHklx`J!)(^|Q4x(VV;u(*T;m)VovpYDJTfC{ZXQb5%~#JT>6VUh=s6w{YF}OWWR*sk;$5hH7@z zK(trRZQbjQ)q0!RNaCHpTeZX2l}36DP&twBUfSbQNXo9~9rUq14;|n-B&E=H`r$0> zw7EX{oc55+JE{)kS0JVrE)@PNj;mzh(vwe~G;90ITKNYgkUF(bLmq(1k4-m7cJw| z5EG_xVh2b{EG#dUDJOfbn%HJKtX?Xs^)x8dDDlwFGM1Qj+uvMrYtw2dVg=2um8^}| zim46V&dCxOJclr{%U)FrdOxHMtW%l2`NPIgA)(B?PN19-Ch%0vYm7)gPig7#bh$Uz z<=5UYvs=t#Xt%qz{w4+$o@XqP2L&;VGihQmYG64jjfWEDr<-tpA?x&qHn!zU&S4j) zuTeq@wtapxF|2BU?ecxUNLu_`F$Q3-Dg2RtGjlC^Ncb3NAKYE5ek&HmW_=lo&HTHm z^`!##FKTCdqx65Ptv`OF+~URhuJ*U0=P2>Orbh2)jr8A}vOimY4OmZ^<^MYS-^~6e z2LD$*VEEy3|0n|-Box&5v1Pq~sDV&9z)#U~+yDE}Kq{hqn#Jqspz+!owqsJBOe9YE zmb~oa=WH*FQl?R`w>Z3kqX)tQSplTs1E|ILAzV`4G--*#T|)E4MBY&|96x5Dmff#d zr_P-jEesdBy=(^TQ}=mq0k=QZYRAzKN6qYfw|xJXhc=?TtDOd(=8tVPI1^#62Q?>$ zW@gwt=Ecq~D}F`qD7xygRZ3#_3x?8K^V2Y>0ThNKe%1zRp4dVQQ>s0TK%b2fcX?Dw zR$tyRAy5BN4}2{z9c^dar)4gxmSh#X;!YVosNRa zx1`JeiMtgzn(*pJ-vtihfc8N7=ySDnNY1!j`nK}k56HOHH|}RD>73@^=*nh6^lPaW zQtd_6T!bty7Lc8pi}%@@{KFBR>yX|BTDMjcAEfdjHEi zZEe>lDPu5?`Ki)&U+$fNUmRAl_is9VL>^G7CX*q#e>&fPJ2v!qfJ5>)-~aYgyiJ}1 z4oRgBW87~|ysZNqlJyRg-!(X=3g9b2&I}5FZ=w&dAq+I!zn=!lfB^Ppun12s_wQZY zABIDf8Swe|$_akAB7=Zwu`p>=qyOFKvjfJ_7c}_y0RFtISsu_ej-AeTzq^fbfY0|q z(c)hLT)@1DasXXRR%eNS^SjTd1&qVafaBj2@f0qJ4)FN~T*_5{w|eWqIR2aB$MyPe zivJen|D^bDZSl6Q#b%u^@oxo$($F$8(tw^53Unjz0ipYbgP8aNrY zC_O<3qJV^qdKvGO*15w7jxle$}=!eGbLo>w!8ee!m;C+2XN}&Ej~~S zo1UleR}fqZ3MPLRg4>u(n$t7kw!AKC6+BFea9O_kG-QKOnf!UPuIp1e`wUC%`ynC0 zgV1jJVWTAudacRW_m`p6mH zN-?+OnWFZZx*wY*yR5AbmggN55w{0na&bt4_m@6#xt47YSeIu||ND!s++euLds$#|)juc=VWu0)9BHNxQI!C|83)NN;)M0KJ((@iciWJh7`WN;eJv(Z1BcYZ=E*s-3yfQgL=mUp*e4+5#sv0E_Npe)F9r^EwSH==K=ldXuU!D?}jo& zb%Ai!xg?hh#_z=!fX-oP@Fxux2R{p15M4ezH<}LRp4YBbF zAOe6Rpzst9{=FV}49e&0vdcwUQ`4$ZmM@Q6kGmD6VaJ9AgzH$mqhzZ|9459#*619J z3`w6)P_r5oysJAq9GP&YAMQZeaKS5DrA*}Or_M&(rTW`NF~b$r}w4I{oHj z$=yWCOx&hEd<^wq&5{ATDif<~b%g#mZpE>5cI)G=&Nr|DG$J>Ag723V@$X5vVrtd3 z^b+j(+=(W;l~(5>$B} z**F(2sdqid(e+qJXEU`(x^IWhLl)9P|2fQ<@=J4^565>#7!Fvi8Y$A~E^$ z#Z>9uLq>gND#pJAo>H^ae*z+spA_5W>1p^)_t~MQ0DJ_cY);NmfM8P`Qv|kbl^$-f4hOK=fH3Jn+xDS zm*~+;^#0H5{M#@7&$0iXmihnNu}9&lRC^3A5HcKYv`1K3%rGXx7~+5@)tu~`72%kh zf7L7UI0(QcF_r!7+~oS<<^H~&fvhgCG(0~FfrFEseXPevo%Xk&hp6`_09K$;Tw3@$ z94ib52$5L)RAc?Ec7_I^XFs_sX#YI}e}pv5ih#|t;kPcI-$NunpHC2C3?*^xVg2%fwjwca{c`Lp4^(yu!RhISt2O-B^!M(BNAwc0VM$-T=Vf4Q#(;X)`Q{9eW$lClGLAEP+Hld<> zUXcJ8b$~-diJ^qtK~+Wca>*CHY0uJH^RW{rNmCZ(ow&m;la*+KhoV;EQQH9YtPa3L zt<2T7q%OHG-Yo3SRIVnmv2-w!dfwWO5rg>8c4e>xPftJJUv27IH=gktk^&&8^SVX1 zlflDBqK7@6h$u|BOx;^&gOjYr(kQ9j=Wwt+sbnx41Al=emP z;w`moZk%SU-C&%KYA9Qg=Qcq8QK;ljzkm4`<%P9#zvc=d?gtI?+;K$#CT$Myej(cr z5v`kkSc*&HmjkB4F*6rHU6Z@zK(aJ--O)LKOXBSDEVL$;wMHeeT;e$~0B`#C2uury zWmttTMqU;1502G1`D!)jclr(pWB%x)|7LRuS-pmC3QH4#0r;07aN1Kz@JIsV9;FGa8`;BUH!xh5 z`UwD$l+Hb^_w04o+bywae}?q-hMkfh_%r|n%fo3}KhPGl2srcU34pZbi0`vPn1g>E zmU-$_ppJ0}KS%#&-Eh>Udw-m;jbI7wyqC4?od0JE#jMywIwmI08A^N+x+1hqOW z2QACvb+n5|Dhn%{8RG<7(jN-%IM+z>;r^MiLQR%F2)K%@#Ih)SbbPiyzR13#Z zNxW9Mo5O_WTdMgqG1#tGy03Rr<{vp z34Ep0j%5|?6!c#+Cg%l836m}W=-J6`<*)|;qnuAttluydxgkh^7aFrSR$>H^8F{n< z6|mm)zkUY8s(1#Fm9WP@&;V_7&sktCn&8&&)s+i;MCJM5vx)OTjXXvp&{{Ly&vum> z(I_QB1Gy9+gV_b3ceZNGF4!~rjus!JMSEtXV%sG60-y)gT1z->JVD9}px;aawS8u5aq4PR2kzhpKH)6spiYV9GI;11Pc=(8((|t`BBfi2mw^ zVO)_p$}1jk4c5D~k}Avgs^{1huK-A?|Em|o`;8Y1P_)%U1Shco@&t7gNEZ9a+iwcK zDpd(Pns7SD(FyX&+70G7&HLrAM3geMS ziMP}}LEDk0Gelw&qZxql2FxbxF|E>GklckOU%xueUC@ zyh76TajJCz^eu#~Z4I?a7Uw+62fWtLjDp8zhW%me9qZR_-vmHnXHd90KSwBq(8scK zylNSU1OU1z*1-T3O{0Z;6F_6cV1D^Q%eJ%ee3pSy=d4*??;c>X>95?0eBr;z=(0+* zh`*5c)m)aAzA~{E`=gGtv6Y55nx+lDER~0xFIgIoT?zYTA1xZP-~m5TKOA0 z044jj*j7P}_k7xLJSvmK3Ccz}lwZdcH`7>M!&wX4UVdBWB8kW1%0`mT3`0X{+6ll! z@j~t%iA7GhoAzwHkJ=U=?k}n^&DX2>0wpR?R6!pa&Wf#3hn+Q?L;B)f%Sdn?1d~V8 zA2GeFjADuw%$&@_U<%5YC{C@D=YAvpSaHxGHStZx3m;?OD#x}tWzDp#oH5_QM*c(- z@tc-x?pdn+BYCEsHCJQv3asz)2}x#xB;b`y*iFoQd&YPB&fV}OKCjfFL{Al|6aQt_nzAKW0c8miZ%zkO@`GC#IGQcJ=gf^YU zqLIxz&RM3MK=W(QSXp0xWC5k+zk!5}^O=P9fjHKMdz|q#DN9)R$p`9l9MzOM{UOwT z#O7;CKUIQa^J7jRNY+|jkbK7@)_#o)p|yZcl3VQqmnnd)gtQ6Vc1ghtP2y9ATyvYYAdzGFtAROs`*HB6xP*`xCvfL`T>pwEH=$!(kdul9g4&a@>WZbSj}MX zTxIZq-mA-+wUYRk5*`Z(*~6)sP^}KSsWa@hd2R;xSqrh;YhR~}ZVnpsBO+)-5^Z)A}(p{yfxY})_biwaEs z=n0;tBh(~(X3EK$!jVS^IdHT9?!A<86r3&HoyA6(?GjX=4of92Gv((UQEJel8LEU7 zC+@(@tTng2Fs{O10DS#qgQpo1=N17MND(0zEq=d*AhNWeq}t`u6fU#r#etgwD03Fd zo@Qb~>DQrQtjJK|ki%7G#*rqD60!G~CF3VA8%(>)#01mpP(gAbuA@&z509z~@>b%x z7+Gt)*^v8&FGLF_MPD~b0?(N>FQi@2b%?nh&u=kDB2peCCNbJu{uZA;mLAc6c|q!x z`8sZDd*)bFK1T;AO`6o%Z`aNl)c?W0EH&2ocFqRVE+ZZ zX;}ANC^L8cb?#)M*_(@SOFI^|$^7qFi`CZ7!O_8{4x>L`%p$-Wm@TMp$RnBxTybA@ z`pau}7YAP5rr2S}Ri|e1)BEQ|lzDSWZX)GVeQ%*)h01|^1@QNC;@TCxHAzHiq>ym7H1XnmPFL>%%P9Dm8$464 z7bN*k8-fvUiiu7*XkG*oE(>=Ww_$&n#kERu>xh#KIW4w!51Cy?I{fslxC_r!ZP?dE zqpg-DuIEH)0XKEHJV6}0V=(py2wIZ`C*BHfU!k?3rXUou0YhI;#gDUqoD_+a;29G? zw_M3LVtnv!-%m-`>XB#9=}Y`2t~$_{Wx_4Q9KgK=VI642l4~11R31$&p>3UXwcV$Z zx<;w6SzMFhV;D2w{KQw|L#w}E82v$^;Z-m*#3pf9{F!(Q3L-jDVp@{xCG5Fibnjh3 zFM~64=cvDo-?wOtXQ=hxGjIA-jxeOFFu$Ke?X2&JU0uj>H0AKyBxDrn>j~;Cm5X@K z!yIA>?Fh!Ab3R`Ni@~RFr3M;pbFZqWai*mT%v=DRXfG8rY%S(5&*!Xx?QTR$#4ChV z*-Sd5IbGXM9E%uuzakn8)1^p%rW)vR00lHE>S8KZgGK?`r2s2Oc7~;AuP`idBsl%{TTG+#IZ?h{f&)SmpRv&o=QOjYP2}) zF_vGI_%c`wH-{#Ta0Yrswgin1UcPuQx-+afCVRIHIw5&7iT9q?39m#7W+)Pygs}b% zY>ig>1ejK5Ah+Is%MCao!Ile=!+VE;Uu*`jC!|vgeu2x5qcmA%8a6UR9#>b<1nlv61ri8VvHt9 z=1OYAoju=M#7E)!rOo$@7B;E{E&qOO0&VbUSOPu*yhWb4g!e1r8@HK}DKd{|RMayF zOl`dVf#XQ2G%4{YSDnw~7Xz$2F0o#STz=n;RXdN6O7&NG;ZVrVKmTSD*cvl z^G$%m!bEGvCG-gU`J9oWd8C-k@Ku?*;IK1MTrfA<4;wMI7VIvSHp2ULjW6vw4eOeH z@+$d68rZ;%hOv`xDBxa(qKJ76WCg=nb)YD*vJ+u$A_+>QCy0#9_XxnP8uq^rE)>7Xlh}0Su|`r71qMB_0OlvtO>TcEIuZ)TuHw6 zO|N^RjyU)oA*H2nH9G{$Wlp_T?X+HAX68-<2d>Pi!MyFynDYil#8AdFwnGleyGhr2 z6k;>EFt!iu;5+7BoxIbC6}ZC)16v}WHELcv3vQc0L)aQ7S7Aho*516dKL-C7GoqG;keF%-3Qhhgvpm0m-lI&D~VC)RqO<-biXq7%u+(F?G&c>*zR za+BfTRlF3_#4(7T)`L(t+}}FC;Ux8daqKhbs4E6i8HF^!ind`0@kTX&MuMEOCywI5Eo>G>{EKo;F=U&zgk4k%% z3Y!KWczjs)1ug*JELR=>3?HOq9=WaO2lAL`w`gVqS=K7EQ}(?SYEJANF7bmMS48&a zSyef`Wq0oUkaQ-w$uVaWFv2??)4Qv4tvMvs=5?8jhT?j38H?lqrC@hnUr6;{`7rG= zWs-#|L4GP^7KU_~fmn#cRcWqsI%-~u53@wa?DWp#C&gXt`UES_qnb~aV~hchFI1Oi zpeex!x%&~Lhu zlI%!B<;s+JQ-g%<3)a$03bHGiyiE)_u3u~$X=5p_&<%rEdw8?f@+t#|SP@oW`hF$~ zx6k3QnQKYNB=pDWND;igf9{g9w3$BGQWmkgftEO^_CPs3KjYgx--?svt#LsM4ci z0f`X0h(LhQ1AHgGuNv>4`{S;=)?Ig*UuW$#b277M&ogIc@BK_&ojZRigXM=^eq1#t zc_?Mc(L2M@mH2kyzS5<=EPslJ37}|SqJ$1P`PK4)$^4BiGm~~%MP&&g{T{69)pXjR3?8|hLV6?TkplgF#1hd$y~Yw* z(_r{lxb^jtx{5A|rh|AFyLlp>p4(Rwmf>h|6uJq+a&PfmX=&%&nr;(?M|w0}#`Vw4 zu37L?zJq*}5?(X?Qe#M$6eA&HDuX+_u4HUdp4yr8oWVW4#qwp5RTOFhI$Tf5U$e^~ z%hTI*zpB?L=|=WDO0d$G$a76dzuPb5*l*Udbvb^m$ z!KcxXP$SL>k~;Z#__41!{_ELgLu7}yz|7S~#w zD|}_AtKvQK?_Z5$%Cur$QHm1@T$X!Hk4!eIDlD*95=Vwh%BpB0VWJX|rk6QXKAt9J z`xoD!->x@1NLN!GU6OSKS($V!2-m)lkXDBCPjG%kO3 zz>MRhI;YqsF6<-s=~yPZVsJx&d2r^;e7%NCJq?#M!{dJz-I?8*ycB;p`ynZCVq9NwTB6Ry5vF!cy@g84#FpJ*@_b^YGt6~N8 znMRO91XmD9V@R?c5;EP~W@}&ouckI9Q7N zyZ)Q2r0a>9UZ4V8%7)s79Zg-6uj^JMKj(GMD#`7CrQ{}j zS4{AbLSpgb(n*(*zK|QG*D+|wPBOi+=`CL!Ti=r*ZZAptuwaYUMHw@if zBv=&+sXBbTJ0p61v}SPALp2V@BLSc1fK-`9zD3Ax#em^;*Z3@F&&tk;a|LyaH$9hW z`pO^{$rDV)<}cV4}|dRN6`B1LuVSqoNS+UNoN{w+&K~XG0WEI zTVY@fB2^{*T%>cmnNbTUb8r?k&LU~)k5dT(|t-(#eqw{S>eopUg&FUKKc9ShEX9&8m>Xq;O?1-(S(C@h1%|R7U*$q zOITx|;I8|W0u?G&EA=LCtsVf0 z*48S02DSy)(%%lQ>8A32Uo`OQg7$iuc#`o*vc7AmOUC@?)Ah-*LV{p&eX`q~b z#ppZA6o++~WtXHXAxA@By0WXy^SXwjz+Go=n-Ht{!BuQ{*3YBqRg0T@DpKS1P_0O& z#*|H|@cWNnr>sXWYg*bI(H%UwUgBnmAEWB8Rw%z{Bl)ZosI=GPnFHci9^Q=i>whD# z`QXfUbZd=%o++JC$zrPivfH)q3NA*Ot-FuB6Uetv!Cg-g+iVG&qqk0Wx~G2l;>3WC zmT2D^cw@i&mNBRxEq66#K_>Bl6QN^GXPov!sBscApVNp--ZT-VLeabqhvKT=H978Cv`2cuwUhWg_i&0LDUx2 zOQRdb931k9i;^Zd-xtTaxp;2RKZK*CD>q|$CmTqQ-yYh=>G2FO9g+Dh6pl=73_@@e z;Uf(`_{hzF{rIqp3{u?eb!T^ck3|t&P=_Ue*J0K`(9-j*2ChTs=&?kwd5JyuQ6F_M z(UPe3o_2=h`t%JG%alrYv)|5Nm&H?`9$2NNPX(Yb4@I>|gInV_&$KsUq^ZWmp zw2GwWjD5#+(JYDw$0j*u;V&8hh$I>TAHcezk^;bo5Og;n?34o9&BM9w?G?PoX~xPO z*6NGdEq4Gr0cre4-GZten}}T_3*TWUtJ-dQ9m9}n`+e+EcN9RoZ-YXQD6gUL1piik zOBzgObK-7L9T3MQXdB>U1+PJo=72WvB+!6IGN9{};v6!(Wi4>6`S|Eyc9q#WKzcda ze=p4;-}Vsj*?9q;zlxgPv%s;dTEyZSfAYW|hNp_l6vR zYY!L;1ZHZmnf^2?7z98G&#}!nbB?%W?ukx3czcXh*4_f}q~zk}1Pzo|WJQ z_RIn#4w(yh2LIOY$JRxNT(M(-*J7&)87T3ETWph~^KfVCxF}hl(ybl8^K6=X;%K-J z9QV#^Ncz16_-{P{_V8=<=~snlrcC&lz}PpJrjE8703uoIHo#v})wGboV4xyhbOkh^ ziN95VXXYX~M;*X^j6b&u=(c^afZIe0zWCxh8;7S+sct>~osLKS6iB;++-M-rJOkXW z0rq9u3FE?{0Txq7E=Icg8d=y!;I@IbvD+~b;rKf!0M~RHdB_0(zks)7_|7Z2Fj=I^ z!)&jQ(-+w%@Qfvk9UM+TYO72Q3Lr$$4(~tPX5fn;BFDtYtY6EK-tbH!50iBmK>@xT zz_EHom2#eEWNzu4=qGbGa44Ab){|Mlt_SIQz5sVVh;iPLXu)*)uQmxL3BDEMGAhCk z8{WTH@SVxr*?myF;r;Ppzp469eiUG)2x!~*7RV@AlV)(^-EbZgr;05gvVguP!a$hjt)r>E!jj1Ia{5%jwp}=#d$+!yN!?2u&sUkd3p<}^ zf%lHCe@|j9#Z{l`;q7QFCdi-+;|5Y9K`*~cd-EWeiT9jNxkmw%FHV39Z(4lacpn`< z7OR!7t9UARL$4k0BVFwyo>IVHl`tCR1>7-V$9a8YLIF3oG%>lYP|%DW#WSXoxCNvn zp}KrXyGr33-5$N93bUr^X9sp@x+Hv1?wUe;0CmV$;HXR)y6!1=Z9~rIC;thcv3*m7 zaABZD6X$1Bl8+V-^@7so@gBWJHQY2!Kw(0uLledN)i}q3@CN~Yz{Mn+hpj`&t(M;7 z^LW;kcBU?$ZQlVO>|4!6fgbIJfQ@rg*u}e@z2Vs^6ntmr+yplmrEwjx1)eg?!YUEe zJQD5fT?sSc>QC%t@8ZeM#Rr1j6g$FMNBJ z5QHt%C7(Bk;_`&rxw^!CbR*y0#EKA(w(V987Q0W0hBl@fChBG$0k_` z`uDg9@u@`&q|uCy7nRQ~8c^Ka8)R_TAuw(-fEMXN-aivz;(FX<;l;)G)CuD1-%FsM zzWv-kjHj~s4cYf1@##?CI*hRy2$?~#hx<#VJGaWjt9~t1A;d`92LoSa&j%F#4iN^P zsN+KnK;)&BU}M798KPN%D19Uv6G!j@N+GH9fHdsdC(g1z#29MWv_s40@3Ig#%2-sb zyBkx>Yi)2H&I^3=cFc2#Ic%EYquKKnvLMq?1oFtwhoUUK`yTPALGax=JzRho@#0k5 zxT*KU+;s}wcE6-`e)J><1$*OWCx^@86+6)PSBIJdS9|3sNheKrqq&|6OJQ-yGkB0i}xrV3H+HYfGq%YBl$7I?&i->xgUztrj_CrBYMk6phX~_&&%4CC+klu-}My7?$KZl#k% zMmp!eCQ&TOwja)sE;&S%0r>{9D!Ixp&wld0G|3Dwe|#9>&3SJLjdPrx_G&P@t*iX% z8Z$97r@!XEiPo-rf$xB8im3H67?0v87TSi(W7!(4SthJjMu!gTu(21kFz|_?Jx$ks z3$qHudoRUuAPxFB7#6JNnMVJT0u_W@yFcgzkz!P%%* zAt`8|{VV2WJhDS_sdjyHcC#4o6u=!-_ zZ?yUoIkDb;| zd(2%JI3MDZFySu77ZX7m!+wH^n`DMWjBXJPvIrp+Ya((=9Lp1%CMh?iiP1G*0r4*y_;+9)I`K2AX;un$O+&flnNBy z!B(z9AOg*ikaT9nx)X{>m}x;_il)%t6+yIp6rgY=)>%RkNP?S|%&mIk*Xx0YL(2r6lU|AuM^kiv$LJ^vn*cL-3nt<>2zom$Xm4K>(cd`gY9i$|k z=u%8{E!yo95P1n$;SLIg)zO(hCkhY)+G5 z%sCM0E*HQCm_4`T1Fw`|q%z5BG2+ALvPx8=*Zh{aK?vkBY*T-r5H&>A^nIHZ5$^1Pj$a+pgLe z;DnrO%9&poq#Qlx_<5_iBnkStrBCIdl1r0sMpKs57ny|8iZsb8YXjb;Pc0)@89g25 z%`B`(o9M8CXyY?+4Ek;1XLHlwO>Fa2p`kgtHf`PIu!$EYS&c@7l^U$|Cr@s^#=?TT$W+0Hx4OPWq6VO@ z@3!|!BlPND^xyYiGWdK$jVu$dvP$QZz|?N336UIe(_M(1B>iYU<#BzL4n2TKRSON7 zI<;KrcJs&>zCYmoAI#v2Wk&Fm{{kcC_|ll|H5GcoIerTS3KxI6a*+^4P6P08I&&uJ zCxYOiL zQ9xs&^wzHuR=&r>Opn_5tlQH$7EpyMn$Hq9G7(DHa1 zql!zG&BNhNROaoih>tau@oms!GUDV~fLOP;-34 z{03W`=bOcjrTGz!c){T;!6C^}^vGacpIlCmIM*Fc-GG z^MX&>C&yc^50K)ct)rHs;d{B8gLP=n^`%Yb!L&*mi3fZA9cgvkFkY&11DtK|H}NqX zR^Y}_&7KQ(J*RU7kDM5OSX|YJ^#&#sz}r?(QG&KVlpFcqlF`k8q3~?U`R5TK7Ha^w zOP8!g2)mDfkKf+qvd4sQYRJb;%Onl|WBu?2s{Vc5U)}vR6vD5n|A%LV`ziJ06DLT~ zT9+?g4>EaFS}<7du}Ug;(HmEycPDyuFUZsQRyqcgHIoPs)>k**GUxGJiDE~ zRiD}0hK_xV62p1;)4h<;h!cANoxg6fdTfW^7UVS{{KU(zYfd?H8dg?*Uw-H{W5O2j z#jKK4-09PkFSV<+wZW^fvM0g%tg`>{o|l6zfqc(RtL7D9sy9Wnb~PZ$Y7!&#{6+>`;+^>D)my=wS&L#05>=yd%FLe@b$K_d<`S=ztjnFZEcIWE z!EOZaR0SQ>S|1+2JN`&NCf0n-DS7(SmXdKMFS<7F{zy=tWt>7grY8k?dGi8!fWzT7 z5p(VPAXrS=QQy}fp0!B^m{$k+o2IquwAsPsChRirsP9=+(^W~gyE?L?JsYXn9eeX0 z51_3r?$GSa&Cl!E9l);Vs-$(rF(zqkE%nWf%EMxr8>ZrZj6`ZtV9I7aQHtGVMUjqM z+mDC^1xcr_aINn->j7iSLmJ@7`C{d<0KsQ6;yyp^1yVw!XSogm0teOy2up=PzETrz z32Y3BU=QB(ie?h78pQds1TQ`Ty9n$tuElN|zJBPfJ7pa;3UW=>GthnzO9{ literal 0 HcmV?d00001 diff --git a/gimp-plugins/colorize.py b/gimp-plugins/colorize.py deleted file mode 100755 index b7c600f..0000000 --- a/gimp-plugins/colorize.py +++ /dev/null @@ -1,116 +0,0 @@ -import os -baseLoc = os.path.dirname(os.path.realpath(__file__))+'/' - - -from gimpfu import * -import sys - -sys.path.extend([baseLoc+'gimpenv/lib/python2.7',baseLoc+'gimpenv/lib/python2.7/site-packages',baseLoc+'gimpenv/lib/python2.7/site-packages/setuptools',baseLoc+'neural-colorization']) - - -import torch -from model import generator -from torch.autograd import Variable -from scipy.ndimage import zoom -from PIL import Image -from argparse import Namespace -import numpy as np -# from skimage.color import rgb2yuv,yuv2rgb -import cv2 - -def getcolor(input_image): - p = np.repeat(input_image, 3, axis=2) - - if torch.cuda.is_available(): - g_available=1 - else: - g_available=-1 - - args=Namespace(model=baseLoc+'neural-colorization/model.pth',gpu=g_available) - - G = generator() - - if torch.cuda.is_available(): - G=G.cuda() - G.load_state_dict(torch.load(args.model)) - else: - G.load_state_dict(torch.load(args.model,map_location=torch.device('cpu'))) - - p = p.astype(np.float32) - p = p / 255 - img_yuv = cv2.cvtColor(p, cv2.COLOR_RGB2YUV) - # img_yuv = rgb2yuv(p) - H,W,_ = img_yuv.shape - infimg = np.expand_dims(np.expand_dims(img_yuv[...,0], axis=0), axis=0) - img_variable = Variable(torch.Tensor(infimg-0.5)) - if args.gpu>=0: - img_variable=img_variable.cuda(args.gpu) - res = G(img_variable) - uv=res.cpu().detach().numpy() - uv[:,0,:,:] *= 0.436 - uv[:,1,:,:] *= 0.615 - (_,_,H1,W1) = uv.shape - uv = zoom(uv,(1,1,float(H)/H1,float(W)/W1)) - yuv = np.concatenate([infimg,uv],axis=1)[0] - # rgb=yuv2rgb(yuv.transpose(1,2,0)) - # out=(rgb.clip(min=0,max=1)*255)[:,:,[0,1,2]] - rgb = cv2.cvtColor(yuv.transpose(1, 2, 0)*255, cv2.COLOR_YUV2RGB) - rgb = rgb.clip(min=0,max=255) - out = rgb.astype(np.uint8) - - return out - -def channelData(layer):#convert gimp image to numpy - region=layer.get_pixel_rgn(0, 0, layer.width,layer.height) - pixChars=region[:,:] # Take whole layer - bpp=region.bpp - # return np.frombuffer(pixChars,dtype=np.uint8).reshape(len(pixChars)/bpp,bpp) - return np.frombuffer(pixChars,dtype=np.uint8).reshape(layer.height,layer.width,bpp) - -def createResultLayer(image,name,result): - rlBytes=np.uint8(result).tobytes(); - rl=gimp.Layer(image,name,image.width,image.height,image.active_layer.type,100,NORMAL_MODE) - region=rl.get_pixel_rgn(0, 0, rl.width,rl.height,True) - region[:,:]=rlBytes - image.add_layer(rl,0) - gimp.displays_flush() - -def genNewImg(name,layer_np): - h,w,d=layer_np.shape - img=pdb.gimp_image_new(w, h, RGB) - display=pdb.gimp_display_new(img) - - rlBytes=np.uint8(layer_np).tobytes(); - rl=gimp.Layer(img,name,img.width,img.height,RGB,100,NORMAL_MODE) - region=rl.get_pixel_rgn(0, 0, rl.width,rl.height,True) - region[:,:]=rlBytes - - pdb.gimp_image_insert_layer(img, rl, None, 0) - gimp.displays_flush() - -def colorize(img, layer) : - gimp.progress_init("Coloring " + layer.name + "...") - - imgmat = channelData(layer) - cpy=getcolor(imgmat) - - genNewImg(layer.name+'_colored',cpy) - - - -register( - "colorize", - "colorize", - "Generate monocular disparity map based on deep learning.", - "Kritik Soman", - "Your", - "2020", - "colorize...", - "*", # Alternately use RGB, RGB*, GRAY*, INDEXED etc. - [ (PF_IMAGE, "image", "Input image", None), - (PF_DRAWABLE, "drawable", "Input drawable", None), - ], - [], - colorize, menu="/Layer/GIML-ML") - -main() diff --git a/gimp-plugins/colorpalette.py b/gimp-plugins/colorpalette.py new file mode 100755 index 0000000..411058f --- /dev/null +++ b/gimp-plugins/colorpalette.py @@ -0,0 +1,60 @@ +import os +baseLoc = os.path.dirname(os.path.realpath(__file__)) + '/' +from gimpfu import * +import sys +sys.path.extend([baseLoc + 'gimpenv/lib/python2.7', baseLoc + 'gimpenv/lib/python2.7/site-packages', + baseLoc + 'gimpenv/lib/python2.7/site-packages/setuptools']) + +import cv2 +import numpy as np + +def channelData(layer): # convert gimp image to numpy + region = layer.get_pixel_rgn(0, 0, layer.width, layer.height) + pixChars = region[:, :] # Take whole layer + bpp = region.bpp + return np.frombuffer(pixChars, dtype=np.uint8).reshape(layer.height, layer.width, bpp) + +def createResultLayer(image, name, result): + rlBytes = np.uint8(result).tobytes(); + rl = gimp.Layer(image, name, image.width, image.height, image.active_layer.type, 100, NORMAL_MODE) + region = rl.get_pixel_rgn(0, 0, rl.width, rl.height, True) + region[:, :] = rlBytes + image.add_layer(rl, 0) + gimp.displays_flush() + +def genNewImg(name, layer_np): + h, w, d = layer_np.shape + img = pdb.gimp_image_new(w, h, RGB) + display = pdb.gimp_display_new(img) + + rlBytes = np.uint8(layer_np).tobytes(); + rl = gimp.Layer(img, name, img.width, img.height, RGB, 100, NORMAL_MODE) + region = rl.get_pixel_rgn(0, 0, rl.width, rl.height, True) + region[:, :] = rlBytes + + pdb.gimp_image_insert_layer(img, rl, None, 0) + + gimp.displays_flush() + + +def colorpalette(img, layer): + cpy = cv2.cvtColor(cv2.imread(baseLoc+'color_palette.png'),cv2.COLOR_BGR2RGB) + genNewImg('palette', cpy) + + +register( + "colorpalette", + "colorpalette", + "colorpalette.", + "Kritik Soman", + "Your", + "2020", + "colorpalette...", + "*", # Alternately use RGB, RGB*, GRAY*, INDEXED etc. + [(PF_IMAGE, "image", "Input image", None), + (PF_DRAWABLE, "drawable", "Input drawable", None), + ], + [], + colorpalette, menu="/Layer/GIML-ML") + +main() diff --git a/gimp-plugins/deepcolor.py b/gimp-plugins/deepcolor.py new file mode 100755 index 0000000..10ffcdf --- /dev/null +++ b/gimp-plugins/deepcolor.py @@ -0,0 +1,85 @@ +import os +baseLoc = os.path.dirname(os.path.realpath(__file__))+'/' +from gimpfu import * +import sys +sys.path.extend([baseLoc+'gimpenv/lib/python2.7',baseLoc+'gimpenv/lib/python2.7/site-packages',baseLoc+'gimpenv/lib/python2.7/site-packages/setuptools',baseLoc+'ideepcolor']) +import numpy as np +import torch +import cv2 +from data import colorize_image as CI + +def createResultLayer(image,name,result): + rlBytes=np.uint8(result).tobytes(); + rl=gimp.Layer(image,name,image.width,image.height) + # ,image.active_layer.type,100,NORMAL_MODE) + region=rl.get_pixel_rgn(0, 0, rl.width,rl.height,True) + region[:,:]=rlBytes + image.add_layer(rl,0) + gimp.displays_flush() + +def channelData(layer):#convert gimp image to numpy + region=layer.get_pixel_rgn(0, 0, layer.width,layer.height) + pixChars=region[:,:] # Take whole layer + bpp=region.bpp + return np.frombuffer(pixChars,dtype=np.uint8).reshape(layer.height,layer.width,bpp) + + +def deepcolor(tmp1, tmp2, ilayerimg,ilayerc) : + layerimg = channelData(ilayerimg) + layerc = channelData(ilayerc) + + if ilayerimg.name == ilayerc.name: # if local color hints are not provided by user + mask = np.zeros((1, 256, 256)) # giving no user points, so mask is all 0's + input_ab = np.zeros((2, 256, 256)) # ab values of user points, default to 0 for no input + else: + if layerc.shape[2] == 3: # error + pdb.gimp_message("Alpha channel missing in " + ilayerc.name + " !") + return + else: + input_ab = cv2.cvtColor(layerc[:,:,0:3].astype(np.float32)/255, cv2.COLOR_RGB2LAB) + mask = layerc[:,:,3]>0 + mask = mask.astype(np.uint8) + input_ab = cv2.resize(input_ab,(256,256)) + mask = cv2.resize(mask, (256, 256)) + mask = mask[np.newaxis, :, :] + input_ab = input_ab[:,:, 1:3].transpose((2, 0, 1)) + + if layerimg.shape[2] == 4: #remove alpha channel in image if present + layerimg = layerimg[:,:,0:3] + + if torch.cuda.is_available(): + gimp.progress_init("(Using GPU) Running deepcolor for " + ilayerimg.name + "...") + gpu_id = 0 + else: + gimp.progress_init("(Using CPU) Running deepcolor for " + ilayerimg.name + "...") + gpu_id = None + + colorModel = CI.ColorizeImageTorch(Xd=256) + colorModel.prep_net(gpu_id, baseLoc + 'ideepcolor/models/pytorch/caffemodel.pth') + colorModel.load_image(layerimg) # load an image + + img_out = colorModel.net_forward(input_ab, mask) # run model, returns 256x256 image + img_out_fullres = colorModel.get_img_fullres() # get image at full resolution + + createResultLayer(tmp1, 'new_' + ilayerimg.name, img_out_fullres) + + + +register( + "deepcolor", + "deepcolor", + "Running deepcolor.", + "Kritik Soman", + "Your", + "2020", + "deepcolor...", + "*", # Alternately use RGB, RGB*, GRAY*, INDEXED etc. + [ (PF_IMAGE, "image", "Input image", None), + (PF_DRAWABLE, "drawable", "Input drawable", None), + (PF_LAYER, "drawinglayer", "Original Image:", None), + (PF_LAYER, "drawinglayer", "Color Mask:", None), + ], + [], + deepcolor, menu="/Layer/GIML-ML") + +main() diff --git a/gimp-plugins/face-parsing.PyTorch/__pycache__/model.cpython-38.pyc b/gimp-plugins/face-parsing.PyTorch/__pycache__/model.cpython-38.pyc deleted file mode 100755 index c7216a39472602cda52c07f5fb0f6251868e502e..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 9174 zcmds7TaO$^74G}=%m6+IHIZauoE%V$pWnNYu>RacKFt@Wy& zx$i`AJ4()7tdTSNv+_!O21KNJdZ>9uSo}h(>ryI3&s4JTEF}TY{z9`f&q4pCtaQ24 zzI$o?o#@T?j!blLgo)5jommx172D z$-5m<+dSLqgi-TM_Y*OLS4m+mgV6Lj-O-Ky@o(3e(RY@HlX9m2)ir~y^B-v&=VhGn z<0v3uE!7Y7T|LqF4b)85SWlrxhx&Te5`1T-U(GBLHG5(awL?QJN#@7mQCwAVnw&zt zBOrzF5maKS4SGpm(03{$Bg>oAs4@ND*udTZmjgS{u4o^;#G$2nVr-dvhH!DUw(L~j za-_<@)Ik?QZM&%M?YpvW9^lR$H-zFGTI+(}3}es(!IvHuJ-4>mXoezcXQc+D9VbD% z7KvHh6muv%i+bSAZwHMQ=E847LxQHCIZ87G(a9@9h4^6N=koB3<|h*Z5f!w20%t4% zO`pM0(M#IS%7{QlbLv6@h%=BF! z#xJE?b)@zSnxW^m8li%OB0z#hAH`yoeVk+=^Jx~} zo-$ynHNR%kj!umadc>5*@Pz=tQ;IJIv4O+TcJ*C?okF4|X8BPpiVe(6Bl(_hebjXM=;a^1M0$tgN4 zd$Tm&FgNK~P%)&^dCr@Sb`%H+MVS+HyHPt7-{Nheh*&_8S{7}M0JR`h00qJP;32e**g_w07*YZ<$)oIAT@xb#KvNzJeeYBC<1`040W67 z`!wP#Is0n839tkeXC zk+KJrqo|}nNrE3?m4dq)ez((U(z~O3DCl&GAF%i#8`uf` zsmyHknpI^h;z{zYdK4s?t%PY3<%cRNQJd@Pn>b^xm$ZtLIC;S3dh&SLJbVE99zSBj z5qbLf4;TScfhcUc=~8XwM03Vrx~^LkN^Li!2*E%Of(LZr{X_fqnCHu{2@{?b|b! zQDxE6E4hUwb=Tf?uoRBBDeStbJ;5vPmQrVeSKggTfETGVeg{}ED^X(>s4#mx1QAx;K*3NG z%XYWZ3~(Vzil4A}o5edQswdPm5~Doh+Vh@q^LfuER$cTQUd~-m*Yj(*eG#`Wdge7S z&6_N)foPfit8#Ch~8UV9{?ODchae5!fO2_6MAIRh@8ro*N>3WFrV>GnhY+U#;7Gw$ z%PBLb`tb`ALmnO$KV!k^m!qxbYY#z7Tw^hzeT0bL;>-<9`BCsOmqv&D@L|I5heIBu za8^f${6LtHqL(R{@bw0s_eX|5@I#M@{#Hgm_(70?I{s^Rm?MMh7Jv-o)^LvfsEqu5 z6=Ybtip8xbqN}|aA>J{89f!i8A^ru5NrcFAP0Tip=b8}KJwgYEcjzH9Rb5bdDBh77 zs?mEc?iHB`yT?R!3E^K^hJQ1#so8$`R|RTFa3C&7tT4Fr>BT~h&cU8Wq*|Wz-dfSNwh6Eh^et5vwQcH%g7*4M-IkBIEZ84 zhFfV4!bD{M;L_1WBUKns%55XBsCbFCU*fg`oJ6>QsRd`QkTMlH$OR*9*=R|pty&gW zF&xjV1&Q!MjW}wru_|Fs3Q}U-hvdDEqUuTq%`@w97e%e>S#|btjSYT;LU~wulU&7w_-;}l zg66g2Sgiyr1r}nXlAC(4M`l)(d&D6P2T3q&h|B4X(Xftdnh;95?ctpdVTprQ&uL+hTkTa?_=1h*u)5p<+!-pFwPi!&uRI?9BJ4?cu{nW=ke9a;ZPu<8YCC%rwW#IAboI^dWiF zty>niFt}|2rBT}$Zy>^CzQx(s4>Zuovm{1}5N6MauSiO5XP9XL+Y;P;cX74uoW^2I z4whueF(e17ms)k?T-Cmk?air8i~jzpO6)&_K}q|RrilKWBUPR$7G=-0}t3TL{ z@G3cHWQRh0pG|Gqvo|j_Oh)Fb=diH<6kl+GasD4s=;nfTWS z{^igMgUpKV;zhOy+F{#sFmi4pyn#lZt?Z2#isTDGxNNlEIYX<*o?Qxu6{*l`a;+QoWjC9Skv zQnO3TCMckQ45Wvu=N`$U|D7Ism{WnC^e+@B(%+k1QnHEyU1HzNy!qIf-+S-(hR<3p zpW(UJ9{=I~5@Y|Q#^uLF<38T}?+}tnp0Zw9^1kQ^Ue>L?5R9|F-E%69+pigo*Q-M# zdcItdwsc&2K;y?0`?$giAgVQb*S4b}GZB+R=)i!9UbLoB!@ zyOypeSujk)EY7t*7!_4rkzL;~)k5*F-)ua~W0h|ls-#E`^Fcq%Ha@E$$T#+q{_yTF z9qlDqzOfrd@!esla_nvW@vE;0oeC0wrE7E!oClu7aymW}O)i-DR<%)&NOAeGt497V zDlzME$$CP%->_Xy5WTMV027>s_3Uq$&3X<1t7-p1m?zPLbPydHWTN>pv>MgxcnP=s z&kzMGfcvSK3bJ=$O@RNzI<+P&unTAEPQ(oTK@B}(B2ESB-X(p#a1MNl?VQo5hGdXU zTI0~l<;0TKp{4$W@hyy7BE7Vr#iq}k`6@G7vnd%i@9uon<+_$+ut*e_cI{fq!9I;e z6k*nFYbPHKW2KhS(q0fGSyBXn!B9H{@us3P>c)exi1v2|s=p<*^*q}wd$*{*rR^v2 z7tge<;&h~aa_Kzm57SutPvfIeoE1r!DhmFtt?fKccWGZiv>#^LBS)cH=_Xv)gCI)7 zJjW);V-(p}twLPKo3BAIzJy=X{Fb=Om$}V7GChoM3-90K)eEbw&rcMQW6vJ~fnF0a zU!yW%Q^d=RpNN7_f#DPunwd80Hq}o^>r^#<0-GMVk{#C7Bj{Z-7uJ}Fo9x7fMoetX zu_yLZ_J;3tYw8-lqWL3r8>ONkRX0i8BJo{_uA{z(Li3l}L)<5F6sA2d-yf(V8Wq|C zOeG!*P;_l~ppHW&e}s8CtJChr>9N#*5V*4C=b-`!x0)2oPB|>YKURO~Y59Ce4$pKzAY)heaHi0l{;k zgl$YNSqF$O^BY`kLi1m&L)1V|{yxN^6)>ByS&P=c4hK{cGM zuDvH!8i#)rn1rKSlD(&i;i3z~*(UeYWf0NN(yk#$--iZ*$EpG1v>$uWvNq%_!N zR-yr2q1LIBOnUxD5};F-Ai!C7S#_vuiv+PkX;^^kx%i{E%< zZnyt~gMNp#09wgGe8E9rAg~Cs0XS%0a~8Fdb%>X0hL>Q%sW(A*fOn90CibrNl&K$J z>2AFQ=ZAEP3kLd#nje#(v#3u<{0gGtB1Ocaon83;J?&Dl`C(aZZ(Ruy!w;WhcuoXR z@sI!$ncsMM0Y$|?Z@KbYZ2VhHFW8h9#JO~GYsSF6d}_}a=wR(@ablgor6<;*tKK`h zJrQ7KoHYSHpoQC*Ve%~94dHxZM~mGEncOpXs_|f;J-TiMBbDkVh}K-NOv}#;3UZl5 zg?5r6?&sPa4JCBCZKhoG&jEUb&Gy=q(1MFj%?~wyaTTCXG4u)Eobnj~r}HmjPFcDQ3vPxGcfQk0RB;jE*raI?o18cqSPR(gn`Rmf;$=j$Z*dvmj(BWB#T33 zQog#2?P|9SCv)jAF{H_JD@m){-1cOzV#7{{WUZISq$ZCp$Q~ivDApj%R zsA;Yy#6%QBCL4IX+4LHoXZVS-Yd^$ACpf35C|U3yRQmv8o`kDhd`mm#{;$3AZ$Orn zP_E9K>${^YqDy<2YX73HD5mxGauhE0X})eeJs?+ij6&BJ8ugGq|7#L-OV;j}Cg+=k zX~?l8R>DJD2n|h1RAz<=)Vrj0hAPPl+~V-RphV^Z6jLKmwD(DArca6F^EC({L)2*7 UmhX1n@oSy;JacuUFreRm0DXTm+W-In diff --git a/gimp-plugins/ideepcolor/LICENSE b/gimp-plugins/ideepcolor/LICENSE new file mode 100644 index 0000000..44b190d --- /dev/null +++ b/gimp-plugins/ideepcolor/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2017 Jun-Yan Zhu and Richard Zhang + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/gimp-plugins/ideepcolor/data/__init__.py b/gimp-plugins/ideepcolor/data/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/gimp-plugins/ideepcolor/data/__init__.pyc b/gimp-plugins/ideepcolor/data/__init__.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c4eea9ea794d5b44607bd27b513e6e29d874c420 GIT binary patch literal 159 zcmZSn%*(|$@qSz~0~9aDWy57b|5QDfS3UQ6Rsvk literal 0 HcmV?d00001 diff --git a/gimp-plugins/ideepcolor/data/color_bins/in_hull.npy b/gimp-plugins/ideepcolor/data/color_bins/in_hull.npy new file mode 100644 index 0000000000000000000000000000000000000000..e8cf2b4000b9a5c7270398a66272f320fb383880 GIT binary patch literal 609 zcmbR27wQ`j$;jZwP_3SlTAW;@Zl$1JlVqr_qoAIaUsO_*m=~X4l#&V(cT3DEP6dh= zXCxM+0{I%IMwU97ItsN4aKObt5nu!XG!wvL=)y2T6x9epWMO1cggB}w%uX~>{KDvp oA*wM%!NS-@8L$guih|6*B8o$pB*nPRz-A*PZV*mEiz_T#0A!{X$p8QV literal 0 HcmV?d00001 diff --git a/gimp-plugins/ideepcolor/data/color_bins/pts_grid.npy b/gimp-plugins/ideepcolor/data/color_bins/pts_grid.npy new file mode 100644 index 0000000000000000000000000000000000000000..10a480c0dbad176478233c43089c715c65006099 GIT binary patch literal 8544 zcmZ9~p-TgC7{}qWE}P}rO}I1=RF>D26@wNz+=;^2I2jh;o%qtBuj(W{};Rp;bw&s+1>yftslTl3buHE%r}ztFrjZ_Qit*1R=u&0F)< zyftslo1_1U_C1}Gw>@voTl3buHE+#Z^VYmIZ_Qit*1R=u&0F)yftsn;jMXV-kP`Ot$AzS znz!byd28O9x8|*RYu=i-=B+vDoV@LMYu=i--|^PGHE+#Z^VYmIZ_Qit*1R=u&0F)< zyftslTl3buH8-7;w>@vo+xK{D-kP`Ot$AzSnz!byd28O9x8|*RYu=i-=B;^a-kP`O zsB`kR=WWkh^VYmIZ_Qit*1R=u&0F)D6vr_rRYBdK9CgiO~#W)NUVLrWkbjI=nymjH`KqtR$I8jZ%n$eznQ zm^W7sXMKNn&L5tRjt`e7TSZgcS6x4z57j|gEw1*fx~wi%!`;x{oUMkg|9{@J*JHna zK3=xB{o2#{-n=enyLI_c#@FQKck?wXHeb9*ZjzVDr{UN5yzS(j_b@ylYXZL6KXZL6KXZL6K zXZQbzdw#{q_OLx{58K1`usv)K+r##-J!}u>JzZSzGj`?EUeE1id)Z#Lm+fVH*%n@k9;^rJ!FsSB?&;rc z)`#_BeOMpXhrO5csgLLSus*C0>%;o6KCBPx!}_?F|94q0){FIGy;v{Si}hmn;(Y4m zxn8Un>&1GpUaS}E<-Pi_KCBPx!}_p3tPktM`mlR)KK1cjAJ&KUVSQL1*2jDNoXL8z zUaS}E#d@(`tQYIWda+(?2j^2S&-G%xSTEL#_42vT@pB35$NI5;tRL&g`muhjAM3~Z zv3{%{+sXOV&vX4)Kh}@+^Evvleykts$NI5;tRL&g`muhjAM3~Zv3_hf=Tkq=^<({f z-(&oFWBph^){pgL{a8QNkM(2ySU=W}^<({5Kh}?R;e6`nxqhDO$NI5;tRL&g`muhj zAM3~Zv3{%{>&N=Beykts$2xI7^>gaS`muhjAM3~Zv3{%{>&N=Beykts$NI5;tRL&g z`muigozsi;V!c=|){FIGy;v{Si}hl?STEL#^6oV!iwvs|V}Bdaxd>2kXIl KupX=j>+uJ self.Xfullres_max or Yfullres > self.Xfullres_max: + if Xfullres > Yfullres: + zoom_factor = 1. * self.Xfullres_max / Xfullres + else: + zoom_factor = 1. * self.Xfullres_max / Yfullres + self.img_rgb_fullres = zoom(self.img_rgb_fullres, (zoom_factor, zoom_factor, 1), order=1) + + self.img_lab_fullres = cv2.cvtColor(self.img_rgb_fullres.astype(np.float32) / 255, cv2.COLOR_RGB2LAB).transpose((2, 0, 1)) + # self.img_lab_fullres = color.rgb2lab(self.img_rgb_fullres).transpose((2, 0, 1)) + self.img_l_fullres = self.img_lab_fullres[[0], :, :] + self.img_ab_fullres = self.img_lab_fullres[1:, :, :] + + def _set_img_lab_(self): + # set self.img_lab from self.im_rgb + self.img_lab = cv2.cvtColor(self.img_rgb.astype(np.float32) / 255, cv2.COLOR_RGB2LAB).transpose((2, 0, 1)) + # self.img_lab = color.rgb2lab(self.img_rgb).transpose((2, 0, 1)) + self.img_l = self.img_lab[[0], :, :] + self.img_ab = self.img_lab[1:, :, :] + + def _set_img_lab_mc_(self): + # set self.img_lab_mc from self.img_lab + # lab image, mean centered [XxYxX] + self.img_lab_mc = self.img_lab / np.array((self.l_norm, self.ab_norm, self.ab_norm))[:, np.newaxis, np.newaxis] - np.array( + (self.l_mean / self.l_norm, self.ab_mean / self.ab_norm, self.ab_mean / self.ab_norm))[:, np.newaxis, np.newaxis] + self._set_img_l_() + + def _set_img_l_(self): + self.img_l_mc = self.img_lab_mc[[0], :, :] + self.img_l_set = True + + def _set_img_ab_(self): + self.img_ab_mc = self.img_lab_mc[[1, 2], :, :] + + def _set_out_ab_(self): + self.output_lab = rgb2lab_transpose(self.output_rgb) + self.output_ab = self.output_lab[1:, :, :] + + +class ColorizeImageTorch(ColorizeImageBase): + def __init__(self, Xd=256, maskcent=False): + print('ColorizeImageTorch instantiated') + ColorizeImageBase.__init__(self, Xd) + self.l_norm = 1. + self.ab_norm = 1. + self.l_mean = 50. + self.ab_mean = 0. + self.mask_mult = 1. + self.mask_cent = .5 if maskcent else 0 + + # Load grid properties + self.pts_in_hull = np.array(np.meshgrid(np.arange(-110, 120, 10), np.arange(-110, 120, 10))).reshape((2, 529)).T + + # ***** Net preparation ***** + def prep_net(self, gpu_id=None, path='', dist=False): + import torch + import models.pytorch.model as model + print('path = %s' % path) + print('Model set! dist mode? ', dist) + self.net = model.SIGGRAPHGenerator(dist=dist) + state_dict = torch.load(path) + if hasattr(state_dict, '_metadata'): + del state_dict._metadata + + # patch InstanceNorm checkpoints prior to 0.4 + for key in list(state_dict.keys()): # need to copy keys here because we mutate in loop + self.__patch_instance_norm_state_dict(state_dict, self.net, key.split('.')) + self.net.load_state_dict(state_dict) + if gpu_id != None: + self.net.cuda() + self.net.eval() + self.net_set = True + + def __patch_instance_norm_state_dict(self, state_dict, module, keys, i=0): + key = keys[i] + if i + 1 == len(keys): # at the end, pointing to a parameter/buffer + if module.__class__.__name__.startswith('InstanceNorm') and \ + (key == 'running_mean' or key == 'running_var'): + if getattr(module, key) is None: + state_dict.pop('.'.join(keys)) + if module.__class__.__name__.startswith('InstanceNorm') and \ + (key == 'num_batches_tracked'): + state_dict.pop('.'.join(keys)) + else: + self.__patch_instance_norm_state_dict(state_dict, getattr(module, key), keys, i + 1) + + # ***** Call forward ***** + def net_forward(self, input_ab, input_mask): + # INPUTS + # ab 2xXxX input color patches (non-normalized) + # mask 1xXxX input mask, indicating which points have been provided + # assumes self.img_l_mc has been set + + if ColorizeImageBase.net_forward(self, input_ab, input_mask) == -1: + return -1 + + # net_input_prepped = np.concatenate((self.img_l_mc, self.input_ab_mc, self.input_mask_mult), axis=0) + + # return prediction + # self.net.blobs['data_l_ab_mask'].data[...] = net_input_prepped + # embed() + output_ab = self.net.forward(self.img_l_mc, self.input_ab_mc, self.input_mask_mult, self.mask_cent)[0, :, :, :].cpu().data.numpy() + self.output_rgb = lab2rgb_transpose(self.img_l, output_ab) + # self.output_rgb = lab2rgb_transpose(self.img_l, self.net.blobs[self.pred_ab_layer].data[0, :, :, :]) + + self._set_out_ab_() + return self.output_rgb + + def get_img_forward(self): + # get image with point estimate + return self.output_rgb + + def get_img_gray(self): + # Get black and white image + return lab2rgb_transpose(self.img_l, np.zeros((2, self.Xd, self.Xd))) + + +class ColorizeImageTorchDist(ColorizeImageTorch): + def __init__(self, Xd=256, maskcent=False): + ColorizeImageTorch.__init__(self, Xd) + self.dist_ab_set = False + self.pts_grid = np.array(np.meshgrid(np.arange(-110, 120, 10), np.arange(-110, 120, 10))).reshape((2, 529)).T + self.in_hull = np.ones(529, dtype=bool) + self.AB = self.pts_grid.shape[0] # 529 + self.A = int(np.sqrt(self.AB)) # 23 + self.B = int(np.sqrt(self.AB)) # 23 + self.dist_ab_full = np.zeros((self.AB, self.Xd, self.Xd)) + self.dist_ab_grid = np.zeros((self.A, self.B, self.Xd, self.Xd)) + self.dist_entropy = np.zeros((self.Xd, self.Xd)) + self.mask_cent = .5 if maskcent else 0 + + def prep_net(self, gpu_id=None, path='', dist=True, S=.2): + ColorizeImageTorch.prep_net(self, gpu_id=gpu_id, path=path, dist=dist) + # set S somehow + + def net_forward(self, input_ab, input_mask): + # INPUTS + # ab 2xXxX input color patches (non-normalized) + # mask 1xXxX input mask, indicating which points have been provided + # assumes self.img_l_mc has been set + + # embed() + if ColorizeImageBase.net_forward(self, input_ab, input_mask) == -1: + return -1 + + # set distribution + (function_return, self.dist_ab) = self.net.forward(self.img_l_mc, self.input_ab_mc, self.input_mask_mult, self.mask_cent) + function_return = function_return[0, :, :, :].cpu().data.numpy() + self.dist_ab = self.dist_ab[0, :, :, :].cpu().data.numpy() + self.dist_ab_set = True + + # full grid, ABxXxX, AB = 529 + self.dist_ab_full[self.in_hull, :, :] = self.dist_ab + + # gridded, AxBxXxX, A = 23 + self.dist_ab_grid = self.dist_ab_full.reshape((self.A, self.B, self.Xd, self.Xd)) + + # return + return function_return + + # def get_ab_reccs(self, h, w, K=5, N=25000, return_conf=False): + # ''' Recommended colors at point (h,w) + # Call this after calling net_forward + # ''' + # if not self.dist_ab_set: + # print('Need to set prediction first') + # return 0 + # + # # randomly sample from pdf + # cmf = np.cumsum(self.dist_ab[:, h, w]) # CMF + # cmf = cmf / cmf[-1] + # cmf_bins = cmf + # + # # randomly sample N points + # rnd_pts = np.random.uniform(low=0, high=1.0, size=N) + # inds = np.digitize(rnd_pts, bins=cmf_bins) + # rnd_pts_ab = self.pts_in_hull[inds, :] + # + # # run k-means + # kmeans = KMeans(n_clusters=K).fit(rnd_pts_ab) + # + # # sort by cluster occupancy + # k_label_cnt = np.histogram(kmeans.labels_, np.arange(0, K + 1))[0] + # k_inds = np.argsort(k_label_cnt, axis=0)[::-1] + # + # cluster_per = 1. * k_label_cnt[k_inds] / N # percentage of points within cluster + # cluster_centers = kmeans.cluster_centers_[k_inds, :] # cluster centers + # + # # cluster_centers = np.random.uniform(low=-100,high=100,size=(N,2)) + # if return_conf: + # return cluster_centers, cluster_per + # else: + # return cluster_centers + + def compute_entropy(self): + # compute the distribution entropy (really slow right now) + self.dist_entropy = np.sum(self.dist_ab * np.log(self.dist_ab), axis=0) + + # def plot_dist_grid(self, h, w): + # # Plots distribution at a given point + # plt.figure() + # plt.imshow(self.dist_ab_grid[:, :, h, w], extent=[-110, 110, 110, -110], interpolation='nearest') + # plt.colorbar() + # plt.ylabel('a') + # plt.xlabel('b') + + # def plot_dist_entropy(self): + # # Plots distribution at a given point + # plt.figure() + # plt.imshow(-self.dist_entropy, interpolation='nearest') + # plt.colorbar() + + +class ColorizeImageCaffe(ColorizeImageBase): + def __init__(self, Xd=256): + print('ColorizeImageCaffe instantiated') + ColorizeImageBase.__init__(self, Xd) + self.l_norm = 1. + self.ab_norm = 1. + self.l_mean = 50. + self.ab_mean = 0. + self.mask_mult = 110. + + self.pred_ab_layer = 'pred_ab' # predicted ab layer + + # Load grid properties + self.pts_in_hull_path = './data/color_bins/pts_in_hull.npy' + self.pts_in_hull = np.load(self.pts_in_hull_path) # 313x2, in-gamut + + # ***** Net preparation ***** + def prep_net(self, gpu_id, prototxt_path='', caffemodel_path=''): + import caffe + print('gpu_id = %d, net_path = %s, model_path = %s' % (gpu_id, prototxt_path, caffemodel_path)) + if gpu_id == -1: + caffe.set_mode_cpu() + else: + caffe.set_device(gpu_id) + caffe.set_mode_gpu() + self.gpu_id = gpu_id + self.net = caffe.Net(prototxt_path, caffemodel_path, caffe.TEST) + self.net_set = True + + # automatically set cluster centers + if len(self.net.params[self.pred_ab_layer][0].data[...].shape) == 4 and self.net.params[self.pred_ab_layer][0].data[...].shape[1] == 313: + print('Setting ab cluster centers in layer: %s' % self.pred_ab_layer) + self.net.params[self.pred_ab_layer][0].data[:, :, 0, 0] = self.pts_in_hull.T + + # automatically set upsampling kernel + for layer in self.net._layer_names: + if layer[-3:] == '_us': + print('Setting upsampling layer kernel: %s' % layer) + self.net.params[layer][0].data[:, 0, :, :] = np.array(((.25, .5, .25, 0), (.5, 1., .5, 0), (.25, .5, .25, 0), (0, 0, 0, 0)))[np.newaxis, :, :] + + # ***** Call forward ***** + def net_forward(self, input_ab, input_mask): + # INPUTS + # ab 2xXxX input color patches (non-normalized) + # mask 1xXxX input mask, indicating which points have been provided + # assumes self.img_l_mc has been set + + if ColorizeImageBase.net_forward(self, input_ab, input_mask) == -1: + return -1 + + net_input_prepped = np.concatenate((self.img_l_mc, self.input_ab_mc, self.input_mask_mult), axis=0) + + self.net.blobs['data_l_ab_mask'].data[...] = net_input_prepped + self.net.forward() + + # return prediction + self.output_rgb = lab2rgb_transpose(self.img_l, self.net.blobs[self.pred_ab_layer].data[0, :, :, :]) + + self._set_out_ab_() + return self.output_rgb + + def get_img_forward(self): + # get image with point estimate + return self.output_rgb + + def get_img_gray(self): + # Get black and white image + return lab2rgb_transpose(self.img_l, np.zeros((2, self.Xd, self.Xd))) + + +class ColorizeImageCaffeGlobDist(ColorizeImageCaffe): + # Caffe colorization, with additional global histogram as input + def __init__(self, Xd=256): + ColorizeImageCaffe.__init__(self, Xd) + self.glob_mask_mult = 1. + self.glob_layer = 'glob_ab_313_mask' + + def net_forward(self, input_ab, input_mask, glob_dist=-1): + # glob_dist is 313 array, or -1 + if np.array(glob_dist).flatten()[0] == -1: # run without this, zero it out + self.net.blobs[self.glob_layer].data[0, :-1, 0, 0] = 0. + self.net.blobs[self.glob_layer].data[0, -1, 0, 0] = 0. + else: # run conditioned on global histogram + self.net.blobs[self.glob_layer].data[0, :-1, 0, 0] = glob_dist + self.net.blobs[self.glob_layer].data[0, -1, 0, 0] = self.glob_mask_mult + + self.output_rgb = ColorizeImageCaffe.net_forward(self, input_ab, input_mask) + self._set_out_ab_() + return self.output_rgb + + +class ColorizeImageCaffeDist(ColorizeImageCaffe): + # caffe model which includes distribution prediction + def __init__(self, Xd=256): + ColorizeImageCaffe.__init__(self, Xd) + self.dist_ab_set = False + self.scale_S_layer = 'scale_S' + self.dist_ab_S_layer = 'dist_ab_S' # softened distribution layer + self.pts_grid = np.load('./data/color_bins/pts_grid.npy') # 529x2, all points + self.in_hull = np.load('./data/color_bins/in_hull.npy') # 529 bool + self.AB = self.pts_grid.shape[0] # 529 + self.A = int(np.sqrt(self.AB)) # 23 + self.B = int(np.sqrt(self.AB)) # 23 + self.dist_ab_full = np.zeros((self.AB, self.Xd, self.Xd)) + self.dist_ab_grid = np.zeros((self.A, self.B, self.Xd, self.Xd)) + self.dist_entropy = np.zeros((self.Xd, self.Xd)) + + def prep_net(self, gpu_id, prototxt_path='', caffemodel_path='', S=.2): + ColorizeImageCaffe.prep_net(self, gpu_id, prototxt_path=prototxt_path, caffemodel_path=caffemodel_path) + self.S = S + self.net.params[self.scale_S_layer][0].data[...] = S + + def net_forward(self, input_ab, input_mask): + # INPUTS + # ab 2xXxX input color patches (non-normalized) + # mask 1xXxX input mask, indicating which points have been provided + # assumes self.img_l_mc has been set + + function_return = ColorizeImageCaffe.net_forward(self, input_ab, input_mask) + if np.array(function_return).flatten()[0] == -1: # errored out + return -1 + + # set distribution + # in-gamut, CxXxX, C = 313 + self.dist_ab = self.net.blobs[self.dist_ab_S_layer].data[0, :, :, :] + self.dist_ab_set = True + + # full grid, ABxXxX, AB = 529 + self.dist_ab_full[self.in_hull, :, :] = self.dist_ab + + # gridded, AxBxXxX, A = 23 + self.dist_ab_grid = self.dist_ab_full.reshape((self.A, self.B, self.Xd, self.Xd)) + + # return + return function_return + + # def get_ab_reccs(self, h, w, K=5, N=25000, return_conf=False): + # ''' Recommended colors at point (h,w) + # Call this after calling net_forward + # ''' + # if not self.dist_ab_set: + # print('Need to set prediction first') + # return 0 + # + # # randomly sample from pdf + # cmf = np.cumsum(self.dist_ab[:, h, w]) # CMF + # cmf = cmf / cmf[-1] + # cmf_bins = cmf + # + # # randomly sample N points + # rnd_pts = np.random.uniform(low=0, high=1.0, size=N) + # inds = np.digitize(rnd_pts, bins=cmf_bins) + # rnd_pts_ab = self.pts_in_hull[inds, :] + # + # # run k-means + # kmeans = KMeans(n_clusters=K).fit(rnd_pts_ab) + # + # # sort by cluster occupancy + # k_label_cnt = np.histogram(kmeans.labels_, np.arange(0, K + 1))[0] + # k_inds = np.argsort(k_label_cnt, axis=0)[::-1] + # + # cluster_per = 1. * k_label_cnt[k_inds] / N # percentage of points within cluster + # cluster_centers = kmeans.cluster_centers_[k_inds, :] # cluster centers + # + # # cluster_centers = np.random.uniform(low=-100,high=100,size=(N,2)) + # if return_conf: + # return cluster_centers, cluster_per + # else: + # return cluster_centers + + def compute_entropy(self): + # compute the distribution entropy (really slow right now) + self.dist_entropy = np.sum(self.dist_ab * np.log(self.dist_ab), axis=0) + + # def plot_dist_grid(self, h, w): + # Plots distribution at a given point + # plt.figure() + # plt.imshow(self.dist_ab_grid[:, :, h, w], extent=[-110, 110, 110, -110], interpolation='nearest') + # plt.colorbar() + # plt.ylabel('a') + # plt.xlabel('b') + + # def plot_dist_entropy(self): + # Plots distribution at a given point + # plt.figure() + # plt.imshow(-self.dist_entropy, interpolation='nearest') + # plt.colorbar() diff --git a/gimp-plugins/ideepcolor/data/colorize_image.pyc b/gimp-plugins/ideepcolor/data/colorize_image.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e34865ae8577c0d86cf365064e858c52e738c948 GIT binary patch literal 21215 zcmdU1Ym8i1eLr_*_A%>SuOGW>yLpYt!y6Lp#7-Ls0mpWNV@f9JU57ZPFwD-}-JSKk zJom0+(*D`RP5kE<|Lt837ypX*_Yqv-w~;jQ*K=*>E_zA6 z;4T)De9>JjDqnEzlIx7PizCi!qNwCvfj#ue-#yhRAj73#%^6(bDw5}&JNr12$hlR|tzsQK z_q^w>7u?(n1$XtBi;8ZwHhZ0j#mu(4k!pLP zX4tPsD{2;8ZG}-7ji6cQ65goZqAA?6Ni7OG{dPSHBCbs}8b{h#8`M}gkGxY7R*^gQ zbQlc6W0wZ4sC6mqb?V(?PhD%Q)CZlX2EA1bG(6S~o@?~ly}|Kgt!5DPl|R<3NA+XM zw%!xeTAliGaI}9dnq^O%X`nIG(#e)l5$;Cfybzk1>71?Apx-@W(Wci%fVwW}|x`gqpxb!fM~ z?UPTR-_*w-h}H((pjo+6Z?6TFR#-t#S5H(#?K+4o25OOvEpmw%W2xP%M<Co<$VdrF60`$ZUdz0k~28<2rL%%dtrd3L+zF0#wk~#Go-f|tsX|# z`hhs>%#&|>(yzVk)am2?+0z&sUG&~Mk`g>43pRyJyhk^QsSc+?Snu0?_Psc%`j64+-hNOfWz0-@>M4Si}bB-G3 zbUh3jtmTg^dm2~x9V9`{z(M41n2jsaYS6&Jnjuq4tQpjRSu>Dt%mpxLux5A!#;vTD zFln-67iGoxEDV>d*hSc?DYb!lk_Ed6i!`m0Y1fqXy10jyi%;!!Opc_F&FJ7xGf&nXyc0CLYb#SE~ zyV?l)QLEQgk$)G8hCf@V)qV|BIf33nvVt>H$}Cr3vm_P*X8`wH7xJlzC!o@B>&5od ziU4<{*kEik-IhK!O0z>I8dtNjp63G-%H{ZpM#d~d4`5&#cd^3Y{6EG?jReL+$`5ofSwtFbXJVH8Qw$Z6A2QY* ztuCdaw-!-R0XFWCt@YoGUN7S^8=H-mtE|y-Ex}tHFg#Frqd=QxQ{iHwu;iA&!HO`% z2t!OHedFJRhkXVI%ta#)yeAm+LfO6@nxY_F!l29G-ZhM3<8(Z_h>?2vIEmLK}UYYj#iRp1-!{1EjpGi!r z$A1Tv+2m!seoTdh^)iLU@R$^qqAYkT6c*NYR6tDeh*%#oc8f=#`f^r+Bdl(r)g)HJ z*eL-lxS6WAoRAR#8yk{$eHxNX$(hU%jbtEuXQXZheI}Mx=snhNzm0Mey292%cK{8H zTPbk>uQ|b^NvgJvBef|CQ)mGmV3`)+F6MAqGK{!CJpxWGT= zHN54Y#>fp*#)!P7MzVm;U_q6 zK+y=c&$137C@2pEXSJKd$9Ac6L}@*@jCSBK=Y9oyKS>7N5SiBCMdGb!r@DNLO-&Kt zr4KTWg^Zm+3RA3pUOs6WiF+`H=xO^5-XaqR(d{crUqy_v#Z z??~Z+!hB|jQ(f=@G=ye@P9s_5J{j#v?0CNgF)viWiT^|yT)qj&2f z&LR*+usT11)PFY<*|&$0Q{barV?@oDm&*0V)-^X#<2nXf{GVo9J|EM95jWB?3n15a ziYNr3e~rtCQhd~VVhai3c)5DAnXjnY6o25NL8FqF4H@9^*eangjz}`}j(KO{;hmas zfFXPtm+kR+TWN%2s7~h#Exnvm;wi5r+FY7GIch;xFjdT%`2$;C$_libp9j*Cdvjw*z=bz)N$?CBS4@=?` zxa{~kpnqDcuc7al^7}mPYsx~^YTbG#sMR#oTCLM-uC;ONA3<~fZYK9LIl<(OOb#=7 zoC(8o{?9V0GFf2qG?TY8d4|ceOfE8c2a|i5kjJzS;*%l$6($g{oOzU?90E}GZ$me) z#T9ZOqu%s*xm2Dm?<((~oK-L6%(dd{}7=g91SbUYE z>7tp8M{SJS3Q&*fgi^pZ?bTx@*8%2HzP<##0ynH;i+)m>DfNQ=d8$JO2&y61fAcC< zTHP?hwoD5rW17oog5S$0)lO?KQmJ0Ug1RL< z7v+to>%66n@<}~eB>xS_Wm`QBY#iiF+c+vaLAbI!XfNvOqyiXb+%+ByAEU$Ct zWy$(ch`sRI3byHUUT+2U5;uCDe7{(HXncMI-Tf1;IP540H$;EudhKm=nK{=8A{( zDsRHsKy9f#(Q5|n3ieD7tA8QV7?C$n0d^-oYTbEhbtSj zY5$5Vq?LB?koG|~c4<>=+A9|idACDaCUH%9(lZSXoHCNi$-m=<9&`ylx>y}TbATuU z0^n)$^lvC8KosUvL26Jwc##{R5FG5ThEFJ8QsWo3y8y|Ocr>vD<7$k@B{RhAdNW?) zU4D^rv&GxLt2d77jmU5P@ONGJb_oM~H~?12Y|^dGhZDJ8iVrrJWKgpSQ=N+$rpjIO zNt6!Ox?LQpwJmjm_@v-Uec)3sM-+wbTBo*1j>DNH90Y7!3Yyxr6;HRJ2`P28nvV4V zc3k3f)aPJ}K@>jM!jW4J1o~FY{yx@|Px`&SSZ}r0>iXAFAn(zFKflgKBo|}~XvTjp z?>Ui{{{f^!<*%rM9)6IJ{Wsc}pYBa~bKt#$xNgQZ>D^qAk*8Fp4EOuE$=i2O4O$Sy z;zs>#%;-dh9C)faa~Q#z$eRj9qz3wYD=-g!!(X7>C9pH&Jb0!Y>ti0}wa^X%H()`7 z3$p90yQNFn!xXeADPmewGBcmqTw%r+IRTO}*@Is~-hYtELrlQZM%4y(m^pRnQwko~ zr++QCsd0|YSCM2<7mNQX=qh}T%Av*j1cASY4(#a6=uKW}>;fz*j?R$3lIsjxaK2n0 zS{YW@S$X*dJLCfhq3=WA`K?n9CFLxN0yP(g`QvV&E70#(@WP#%9bJ#PY<6*Af*QmJ zfQdboBh0;z$=yt-Da?;ay_ZZKC^rnhqk~?*flFKizhn2l{gb=Pv*l7WhtkbP`cYW& z25R)97@Wq10PuFc1#mfh3t1O{`x~XLza4=mF^*Sk<4K&wO6HXeoe1p7fi)1)fKW2l zBvi)~9RhC2nV~PRsU-FV;KsmNfGtuium(Q@E#Wm_qyU9<6NDE{qf9s`6}T9R6i;(w zmaPTWh~!fLeBbs}`}N3-_<36CY@AxSi<6nlwb`fI{p(xm025mt$hA!0BU{eegEXFl zhdelH^qPcOYljR}(tuUBbx;JcnmpMrWx0&T_`P&;qvP^ZDD}xY=C~Y2PA&$aCp~)L z(#%cqdu-Tea+%41tx0Y=BywG-SApE@Tb9LMuPu!n`zm%u0p624bz1fnA2s-=_ztjz zJEin=CgK7B$v7C}URR**2ocl*HhzEusZ#PATp}L~H(Yb9rMnq(up@GEgdT^v-S6Fm zKX-VqMUdQI!zh+N`Vs!Veo}akGRMV}BEKD(R5HN#^fVwC;(j5sqMZcg0$2U_BhA3B zo%IwEsef#<`YtwE-%bm>AO)mk7+Qz}xSLBsGCMo$$uXza;7(Mg6U8ilM4a*S`Ur?J z0+CDJB&rpFy$}0@A+K(ptB{Bk?!{$nTqm%T%22>&R4$khw7cqT+qt zgWj9qO3fEvS4?3M8BNl344IV4;2<$?1ea=^F~bsD27p$=ry`!I`qU^bT(K0PQv;3R zTF3vBG11ILw1 zhM@@PPu#>ojw1s&GBsN&zj1Sj^GtndDQIvw{E-HmVF;*o0})J!xcxrJ`YZrr$NlvE{`+L1O-D$*IqVd89;>0#YNa6G^yGD zaG}XxddDNOsru}$z8D_Hn~&x`)~qce>T)a>Aw1gcUz;aeznb{u6xzja#=E3uz$2`ytPgZ8^fNy2rO7pS_d&%L0R|b_JD?l2hJN-Kb%dLW8a@sR!x2Uim;y}{ zuz@=ayvi!Yx0ykR+xP-8#>Ck@)Wm)&99w|tL-cgf`-q(FTZvbJez$80zB>e#qhEe2 zks(`8Y68eiCnjdA=QkN93;};(c^-y$eD$Mx6&~D+5a|aQi)ApW>~6!Sa_sFM4v)0P-Xnolwu*yVdX{Cx(M z3krM2anyT2R}1^YTd<)@|#*Y1$KpMTe51`h<=>=5*pSQ8LT53C1{Dk6pf5j0KGR2=la44jy#>fQ&k8+s*KjV2{$Yy7sU}ezx z2D>Ne}N8vfg_{F9rY&nOdl-IAbQ6L&ARBFK;v1+!K3*I9;tO2 zmlRKsoDKk>xsgI=;AZ5D-0hbkB|CO{U)77)k!@vfYz}45^An1Srj~@nGbE}gh>+$)s<1If`!9E5n#O9>AOp9r zEyrdt2PF^WD&%TlO`ZQRlQ%ID9QX=z|G-3+|L>U-V@T9qS#y`sSUkd*#&CIpqxl+n zLK?epn|H5wOM%xx*l=U#4f=vlWMdkdg7_-B`7#G4#w<*ZY!2*8HLOz$Y(<8ZYF9S7LjS!Xz#o_cr- z7It%!ID*a=5;ej0Os(31ImYw^iQ~>=51x1`9fFnR9QZ#(t;GNZ#XrH@KVou<$tRh7 zipg79c|_9yc3!bUZ%e6Dg%hJu466PYk@{akBHA1^P$AWJH*5ZwiB#YhnEOX2RDl}< z_33N$M5a-OVfi?)|KLiR<+~ZHn0h0r(femK%CD5g>bYZY#{kR{LPXYC!q*ce=&%5e zMWf@J{v%hOkySR2Op*m?4u5SuVJmT$Ku%fBs~v`uIOQtRU?a4F0i0>HeSMRl_ATjnjXT(B^>5;`{tk>5V%R zIrsl8U8z53PWnP?d)LtC{WLazw&xN#<6IDO3dv=65WI(Y_$5%;`v9PnITm=B+R}QJyMK?Em;&b?)7B IZ<(9@U#fK`TL1t6 literal 0 HcmV?d00001 diff --git a/gimp-plugins/ideepcolor/data/lab_gamut.py b/gimp-plugins/ideepcolor/data/lab_gamut.py new file mode 100644 index 0000000..9786277 --- /dev/null +++ b/gimp-plugins/ideepcolor/data/lab_gamut.py @@ -0,0 +1,90 @@ +import numpy as np +from skimage import color +import warnings + + +def qcolor2lab_1d(qc): + # take 1d numpy array and do color conversion + c = np.array([qc.red(), qc.green(), qc.blue()], np.uint8) + return rgb2lab_1d(c) + + +def rgb2lab_1d(in_rgb): + # take 1d numpy array and do color conversion + # print('in_rgb', in_rgb) + return color.rgb2lab(in_rgb[np.newaxis, np.newaxis, :]).flatten() + + +def lab2rgb_1d(in_lab, clip=True, dtype='uint8'): + warnings.filterwarnings("ignore") + tmp_rgb = color.lab2rgb(in_lab[np.newaxis, np.newaxis, :]).flatten() + if clip: + tmp_rgb = np.clip(tmp_rgb, 0, 1) + if dtype == 'uint8': + tmp_rgb = np.round(tmp_rgb * 255).astype('uint8') + return tmp_rgb + + +def snap_ab(input_l, input_rgb, return_type='rgb'): + ''' given an input lightness and rgb, snap the color into a region where l,a,b is in-gamut + ''' + T = 20 + warnings.filterwarnings("ignore") + input_lab = rgb2lab_1d(np.array(input_rgb)) # convert input to lab + conv_lab = input_lab.copy() # keep ab from input + for t in range(T): + conv_lab[0] = input_l # overwrite input l with input ab + old_lab = conv_lab + tmp_rgb = color.lab2rgb(conv_lab[np.newaxis, np.newaxis, :]).flatten() + tmp_rgb = np.clip(tmp_rgb, 0, 1) + conv_lab = color.rgb2lab(tmp_rgb[np.newaxis, np.newaxis, :]).flatten() + dif_lab = np.sum(np.abs(conv_lab - old_lab)) + if dif_lab < 1: + break + # print(conv_lab) + + conv_rgb_ingamut = lab2rgb_1d(conv_lab, clip=True, dtype='uint8') + if (return_type == 'rgb'): + return conv_rgb_ingamut + + elif(return_type == 'lab'): + conv_lab_ingamut = rgb2lab_1d(conv_rgb_ingamut) + return conv_lab_ingamut + + +class abGrid(): + def __init__(self, gamut_size=110, D=1): + self.D = D + self.vals_b, self.vals_a = np.meshgrid(np.arange(-gamut_size, gamut_size + D, D), + np.arange(-gamut_size, gamut_size + D, D)) + self.pts_full_grid = np.concatenate((self.vals_a[:, :, np.newaxis], self.vals_b[:, :, np.newaxis]), axis=2) + self.A = self.pts_full_grid.shape[0] + self.B = self.pts_full_grid.shape[1] + self.AB = self.A * self.B + self.gamut_size = gamut_size + + def update_gamut(self, l_in): + warnings.filterwarnings("ignore") + thresh = 1.0 + pts_lab = np.concatenate((l_in + np.zeros((self.A, self.B, 1)), self.pts_full_grid), axis=2) + self.pts_rgb = (255 * np.clip(color.lab2rgb(pts_lab), 0, 1)).astype('uint8') + pts_lab_back = color.rgb2lab(self.pts_rgb) + pts_lab_diff = np.linalg.norm(pts_lab - pts_lab_back, axis=2) + + self.mask = pts_lab_diff < thresh + mask3 = np.tile(self.mask[..., np.newaxis], [1, 1, 3]) + self.masked_rgb = self.pts_rgb.copy() + self.masked_rgb[np.invert(mask3)] = 255 + return self.masked_rgb, self.mask + + def ab2xy(self, a, b): + y = self.gamut_size + a + x = self.gamut_size + b + # print('ab2xy (%d, %d) -> (%d, %d)' % (a, b, x, y)) + return x, y + + def xy2ab(self, x, y): + a = y - self.gamut_size + b = x - self.gamut_size + # print('xy2ab (%d, %d) -> (%d, %d)' % (x, y, a, b)) + return a, b diff --git a/gimp-plugins/ideepcolor/models/__init__.py b/gimp-plugins/ideepcolor/models/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/gimp-plugins/ideepcolor/models/__init__.pyc b/gimp-plugins/ideepcolor/models/__init__.pyc new file mode 100644 index 0000000000000000000000000000000000000000..613eafd814ac0177187f80861541d0d4325fb14e GIT binary patch literal 161 zcmZSn%*(|$@qSz~0~9a1^h1kNi;DHLi!w_x zvy1a{6Z7-~Dw8u3i*f^s^0QKtON#aLQp=O`bMlLf^fOaZQwzXs{oMSN)SP1d`1s7c c%#!$cy@JXT4xn*1K=IO?R6CHRB|yvo0C3wUG5`Po literal 0 HcmV?d00001 diff --git a/gimp-plugins/ideepcolor/models/pytorch/__init__.py b/gimp-plugins/ideepcolor/models/pytorch/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/gimp-plugins/ideepcolor/models/pytorch/__init__.pyc b/gimp-plugins/ideepcolor/models/pytorch/__init__.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9f51000990deef76979f2fe2efb905f2631d6d6c GIT binary patch literal 169 zcmZSn%*(|$@qSz~0~9aRSS9)AVI#-BkAqK$+%XeUD(8L~5^GPIMWO@r(#y(D@;^kgH)3bolBl%0R^vmT6t zt$lg_e)Z17FYYT>c~;Q$lI1cOy^F`c45F}*!Dg}UOM^DDYzzxApQCDRmYu7!%4s|$ zJooVUzYY=w_%kSk7jP=Wz8Im%W|o2+butv>X{SIPh+qaH%kbQW%XzX3WCH{}MIrPv zxh;b_ISO+!{}dZ}VGzJdib6pWak!N(g?=+39Kdpk<;XdhEpl29b1qL|zTde7g>x`* zl$A;+QRs0_PZ6__~6N~M!1^f;HGZ@>)4IGDt!eb#1& zj*4-B!h!x66BJfp;wUSXPNL8gV}iZ`GaTbk5~KF{fZ1WPN4mZP@X>dW=kEYKoWpn1 z<6?rI#8%~<8S9N&`+pKoJ4FhM{ZS|ANo-Z#nX%rewf_e^ecfp{Na0|Aya{?dE0s>7 z(6d<*^bMHljZyoIgASjXK2cCK5F_?5);e&a4@rQ)k8%8Th7V?=GeY6W2`p4ng`Q-* zNi&>`^Z?V|AsL8IpZ$j~q9{{XjxBWO0Vg8%&i{NXd?Hpli5e1JB6}1qhY<}#H5>zDQbKfVF-~^3 z$?oHIvTJ?pTBPDgl_?6RL<{FS>9$16!`!m`Oxy~YdNTmY$B2I8KngLUUn`J-7}3cG zawbS?$hjDqh>`O#G8rQmVq_{tE=CBy!b@aNNSjzDdlFgB;Ppm{z1%hJ3fWWq!u0!( zCS2tfwl>Rc>bfJ%@EX`)2xM1zu4mPNMG| z%XglGaR}oMq5lm|LA0HQ6*x`T;f6TiZrD{@K%OeoZ!}wK>-XRI=g-faam;WqPQfsU zU$+{{+_dWG!)1ZJrmbXES6QC&gMi(#oxR!jSS8OY?UwSYVD!qS>9|f{nxbtx&|p6w zd2JQ2FRm+r-nD}I_IlIXTe2m_drkK#Se{x{4;}}jU>JMkS+3t|`U-tVeR9cPGdrKhRGQwXPKO1 za-PWrCKs8!&*T!5%S^5@xx(ZslMk5iZmC{pG7G|wCjQsp<{$gY^XGRxCvbNC=APxw zKiscxTi)J7ueqb@fj{r6XZ2>I=`GFUQ&278=Os7(d~2UCjoVth(b})_MwSof4_?P8 z8%3jF6ti>2X=4#jIoHqi;v!|2F}D&u5s<}wpvJCU@D<}sb{RIx zn0M$ek7uqM@3B8|VtgSmddvx$MhSScW`J;kZnNJ%}?DPOyj`c~Uns%!MV| z%utOdyxnEfC2M#)wOQW&>suiBd9tRN=ijVV`Q(?fC{->kX)f?tKC+}f$>AV}_ao){ zQCVAWx=PMBsb_U7IN3SJM`!gzCLb}8gY7bNuRZ(Z*yC@xItL=Bvg5YefqAEkzpHDE ze&t%PBkuC=GozwqudP1Kk+IwDiH(&tvgS5jW8 z_c5AAC)``9WmtyTH$MkbFovItr_}{W;;!uO8?IaBj!Cx9SK_fWWbeR1apGg0rBAs< K6-S4S(d>V^1p3$j literal 0 HcmV?d00001 diff --git a/gimp-plugins/monodepth.py b/gimp-plugins/monodepth.py index 015c21c..92110b5 100755 --- a/gimp-plugins/monodepth.py +++ b/gimp-plugins/monodepth.py @@ -1,107 +1,52 @@ -import os -baseLoc = os.path.dirname(os.path.realpath(__file__))+'/' +import os +baseLoc = os.path.dirname(os.path.realpath(__file__)) + '/' from gimpfu import * import sys -sys.path.extend([baseLoc+'gimpenv/lib/python2.7',baseLoc+'gimpenv/lib/python2.7/site-packages',baseLoc+'gimpenv/lib/python2.7/site-packages/setuptools',baseLoc+'monodepth2']) +sys.path.extend([baseLoc + 'gimpenv/lib/python2.7', baseLoc + 'gimpenv/lib/python2.7/site-packages', + baseLoc + 'gimpenv/lib/python2.7/site-packages/setuptools', baseLoc + 'MiDaS']) -import PIL.Image as pil -import networks -import torch -from torchvision import transforms -import os +from run import run_depth +from monodepth_net import MonoDepthNet +import MiDaS_utils as MiDaS_utils import numpy as np import cv2 -# import matplotlib as mpl -# import matplotlib.cm as cm def getMonoDepth(input_image): - if torch.cuda.is_available(): - device = torch.device("cuda") - else: - device = torch.device("cpu") - loc=baseLoc+'monodepth2/' - - model_path = os.path.join(loc+"models", 'mono+stereo_640x192') - encoder_path = os.path.join(model_path, "encoder.pth") - depth_decoder_path = os.path.join(model_path, "depth.pth") - - # LOADING PRETRAINED MODEL - encoder = networks.ResnetEncoder(18, False) - loaded_dict_enc = torch.load(encoder_path, map_location=device) - - # extract the height and width of image that this model was trained with - feed_height = loaded_dict_enc['height'] - feed_width = loaded_dict_enc['width'] - filtered_dict_enc = {k: v for k, v in loaded_dict_enc.items() if k in encoder.state_dict()} - encoder.load_state_dict(filtered_dict_enc) - encoder.to(device) - encoder.eval() - - depth_decoder = networks.DepthDecoder(num_ch_enc=encoder.num_ch_enc, scales=range(4)) - - loaded_dict = torch.load(depth_decoder_path, map_location=device) - depth_decoder.load_state_dict(loaded_dict) - - depth_decoder.to(device) - depth_decoder.eval() - - with torch.no_grad(): - input_image = pil.fromarray(input_image) - # input_image = pil.open(image_path).convert('RGB') - original_width, original_height = input_image.size - input_image = input_image.resize((feed_width, feed_height), pil.LANCZOS) - input_image = transforms.ToTensor()(input_image).unsqueeze(0) - - # PREDICTION - input_image = input_image.to(device) - features = encoder(input_image) - outputs = depth_decoder(features) - - disp = outputs[("disp", 0)] - disp_resized = torch.nn.functional.interpolate( - disp, (original_height, original_width), mode="bilinear", align_corners=False) - - # Saving colormapped depth image - disp_resized_np = disp_resized.squeeze().cpu().numpy() - vmax = np.percentile(disp_resized_np, 95) - vmin = disp_resized_np.min() - disp_resized_np = vmin + (disp_resized_np - vmin) * (vmax - vmin) / (disp_resized_np.max() - vmin) - disp_resized_np = (255 * (disp_resized_np - vmin) / (vmax - vmin)).astype(np.uint8) - colormapped_im = cv2.applyColorMap(disp_resized_np, cv2.COLORMAP_HOT) - colormapped_im = cv2.cvtColor(colormapped_im, cv2.COLOR_BGR2RGB) - # normalizer = mpl.colors.Normalize(vmin=disp_resized_np.min(), vmax=vmax) - # mapper = cm.ScalarMappable(norm=normalizer, cmap='magma') - # colormapped_im = (mapper.to_rgba(disp_resized_np)[:, :, :3] * 255).astype(np.uint8) - return colormapped_im - -def channelData(layer):#convert gimp image to numpy - region=layer.get_pixel_rgn(0, 0, layer.width,layer.height) - pixChars=region[:,:] # Take whole layer - bpp=region.bpp + image = input_image / 255.0 + out = run_depth(image, baseLoc+'MiDaS/model.pt', MonoDepthNet, MiDaS_utils, target_w=640) + out = np.repeat(out[:, :, np.newaxis], 3, axis=2) + d1,d2 = input_image.shape[:2] + out = cv2.resize(out,(d2,d1)) + # cv2.imwrite("/Users/kritiksoman/PycharmProjects/new/out.png", out) + return out + + +def channelData(layer): # convert gimp image to numpy + region = layer.get_pixel_rgn(0, 0, layer.width, layer.height) + pixChars = region[:, :] # Take whole layer + bpp = region.bpp # return np.frombuffer(pixChars,dtype=np.uint8).reshape(len(pixChars)/bpp,bpp) - return np.frombuffer(pixChars,dtype=np.uint8).reshape(layer.height,layer.width,bpp) + return np.frombuffer(pixChars, dtype=np.uint8).reshape(layer.height, layer.width, bpp) + -def createResultLayer(image,name,result): - rlBytes=np.uint8(result).tobytes(); - rl=gimp.Layer(image,name,image.width,image.height,image.active_layer.type,100,NORMAL_MODE) - region=rl.get_pixel_rgn(0, 0, rl.width,rl.height,True) - region[:,:]=rlBytes - image.add_layer(rl,0) +def createResultLayer(image, name, result): + rlBytes = np.uint8(result).tobytes(); + rl = gimp.Layer(image, name, image.width, image.height, image.active_layer.type, 100, NORMAL_MODE) + region = rl.get_pixel_rgn(0, 0, rl.width, rl.height, True) + region[:, :] = rlBytes + image.add_layer(rl, 0) gimp.displays_flush() -def MonoDepth(img, layer) : - gimp.progress_init("Generating disparity map for " + layer.name + "...") +def MonoDepth(img, layer): + gimp.progress_init("Generating disparity map for " + layer.name + "...") imgmat = channelData(layer) - cpy=getMonoDepth(imgmat) - - createResultLayer(img,'new_output',cpy) - + cpy = getMonoDepth(imgmat) + createResultLayer(img, 'new_output', cpy) - register( "MonoDepth", @@ -111,11 +56,11 @@ register( "Your", "2020", "MonoDepth...", - "*", # Alternately use RGB, RGB*, GRAY*, INDEXED etc. - [ (PF_IMAGE, "image", "Input image", None), - (PF_DRAWABLE, "drawable", "Input drawable", None), - ], + "*", # Alternately use RGB, RGB*, GRAY*, INDEXED etc. + [(PF_IMAGE, "image", "Input image", None), + (PF_DRAWABLE, "drawable", "Input drawable", None), + ], [], MonoDepth, menu="/Layer/GIML-ML") -main() +main() \ No newline at end of file diff --git a/gimp-plugins/monodepth2/LICENSE.txt b/gimp-plugins/monodepth2/LICENSE.txt deleted file mode 100644 index 08db21d..0000000 --- a/gimp-plugins/monodepth2/LICENSE.txt +++ /dev/null @@ -1,181 +0,0 @@ -Copyright © Niantic, Inc. 2018. Patent Pending. - -All rights reserved. - - - -================================================================================ - - - -This Software is licensed under the terms of the following Monodepth2 license -which allows for non-commercial use only. For any other use of the software not -covered by the terms of this license, please contact partnerships@nianticlabs.com - - - -================================================================================ - - - -Monodepth v2 License - - - This Agreement is made by and between the Licensor and the Licensee as -defined and identified below. - - -1. Definitions. - - In this Agreement (“the Agreement”) the following words shall have the -following meanings: - - "Authors" shall mean C. Godard, O. Mac Aodha, M. Firman, G. Brostow - "Licensee" Shall mean the person or organization agreeing to use the -Software in accordance with these terms and conditions. - "Licensor" shall mean Niantic Inc., a company organized and existing under -the laws of Delaware, whose principal place of business is at 1 Ferry Building, -Suite 200, San Francisco, 94111. - "Software" shall mean the MonoDepth v2 Software uploaded by Licensor to the -GitHub repository at [URL] on [DATE] in source code or object code form and any -accompanying documentation as well as any modifications or additions uploaded -to the same GitHub repository by Licensor. - - -2. License. - - 2.1 The Licensor has all necessary rights to grant a license under: (i) -copyright and rights in the nature of copyright subsisting in the Software; and -(ii) certain patent rights resulting from a patent application filed by the -Licensor in the United States in connection with the Software. The Licensor -grants the Licensee for the duration of this Agreement, a free of charge, -non-sublicenseable, non-exclusive, non-transferable copyright and patent -license (in consequence of said patent application) to use the Software for -non-commercial purpose only, including teaching and research at educational -institutions and research at not-for-profit research institutions in accordance -with the provisions of this Agreement. Non-commercial use expressly excludes -any profit-making or commercial activities, including without limitation sale, -license, manufacture or development of commercial products, use in -commercially-sponsored research, use at a laboratory or other facility owned or -controlled (whether in whole or in part) by a commercial entity, provision of -consulting service, use for or on behalf of any commercial entity, and use in -research where a commercial party obtains rights to research results or any -other benefit. Any use of the Software for any purpose other than -non-commercial research shall automatically terminate this License. - - - 2.2 The Licensee is permitted to make modifications to the Software -provided that any distribution of such modifications is in accordance with -Clause 3. - - 2.3 Except as expressly permitted by this Agreement and save to the -extent and in the circumstances expressly required to be permitted by law, the -Licensee is not permitted to rent, lease, sell, offer to sell, or loan the -Software or its associated documentation. - - -3. Redistribution and modifications - - 3.1 The Licensee may reproduce and distribute copies of the Software, with -or without modifications, in source format only and only to this same GitHub -repository , and provided that any and every distribution is accompanied by an -unmodified copy of this License and that the following copyright notice is -always displayed in an obvious manner: Copyright © Niantic, Inc. 2018. All -rights reserved. - - - 3.2 In the case where the Software has been modified, any distribution must -include prominent notices indicating which files have been changed. - - 3.3 The Licensee shall cause any work that it distributes or publishes, -that in whole or in part contains or is derived from the Software or any part -thereof (“Work based on the Software”), to be licensed as a whole at no charge -to all third parties entitled to a license to the Software under the terms of -this License and on the same terms provided in this License. - - -4. Duration. - - This Agreement is effective until the Licensee terminates it by destroying -the Software, any Work based on the Software, and its documentation together -with all copies. It will also terminate automatically if the Licensee fails to -abide by its terms. Upon automatic termination the Licensee agrees to destroy -all copies of the Software, Work based on the Software, and its documentation. - - -5. Disclaimer of Warranties. - - The Software is provided as is. To the maximum extent permitted by law, -Licensor provides no warranties or conditions of any kind, either express or -implied, including without limitation, any warranties or condition of title, -non-infringement or fitness for a particular purpose. - - -6. LIMITATION OF LIABILITY. - - IN NO EVENT SHALL THE LICENSOR AND/OR AUTHORS BE LIABLE FOR ANY DIRECT, -INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING -BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE -OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF -ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -7. Indemnity. - - The Licensee shall indemnify the Licensor and/or Authors against all third -party claims that may be asserted against or suffered by the Licensor and/or -Authors and which relate to use of the Software by the Licensee. - - -8. Intellectual Property. - - 8.1 As between the Licensee and Licensor, copyright and all other -intellectual property rights subsisting in or in connection with the Software -and supporting information shall remain at all times the property of the -Licensor. The Licensee shall acquire no rights in any such material except as -expressly provided in this Agreement. - - 8.2 No permission is granted to use the trademarks or product names of the -Licensor except as required for reasonable and customary use in describing the -origin of the Software and for the purposes of abiding by the terms of Clause -3.1. - - 8.3 The Licensee shall promptly notify the Licensor of any improvement or -new use of the Software (“Improvements”) in sufficient detail for Licensor to -evaluate the Improvements. The Licensee hereby grants the Licensor and its -affiliates a non-exclusive, fully paid-up, royalty-free, irrevocable and -perpetual license to all Improvements for non-commercial academic research and -teaching purposes upon creation of such improvements. - - 8.4 The Licensee grants an exclusive first option to the Licensor to be -exercised by the Licensor within three (3) years of the date of notification of -an Improvement under Clause 8.3 to use any the Improvement for commercial -purposes on terms to be negotiated and agreed by Licensee and Licensor in good -faith within a period of six (6) months from the date of exercise of the said -option (including without limitation any royalty share in net income from such -commercialization payable to the Licensee, as the case may be). - - -9. Acknowledgements. - - The Licensee shall acknowledge the Authors and use of the Software in the -publication of any work that uses, or results that are achieved through, the -use of the Software. The following citation shall be included in the -acknowledgement: “Digging Into Self-Supervised Monocular Depth Estimation, -by C. Godard, O. Mac Aodha, M. Firman, G. Brostow, arXiv:1806.01260”. - - -10. Governing Law. - - This Agreement shall be governed by, construed and interpreted in -accordance with English law and the parties submit to the exclusive -jurisdiction of the English courts. - - -11. Termination. - - Upon termination of this Agreement, the licenses granted hereunder will -terminate and Sections 5, 6, 7, 8, 9, 10 and 11 shall survive any termination -of this Agreement. diff --git a/gimp-plugins/monodepth2/evaluate_depth.py b/gimp-plugins/monodepth2/evaluate_depth.py deleted file mode 100755 index 7746ef9..0000000 --- a/gimp-plugins/monodepth2/evaluate_depth.py +++ /dev/null @@ -1,230 +0,0 @@ -from __future__ import absolute_import, division, print_function - -import os -import cv2 -import numpy as np - -import torch -from torch.utils.data import DataLoader - -from layers import disp_to_depth -from utils import readlines -from options import MonodepthOptions -import datasets -import networks - -cv2.setNumThreads(0) # This speeds up evaluation 5x on our unix systems (OpenCV 3.3.1) - - -splits_dir = os.path.join(os.path.dirname(__file__), "splits") - -# Models which were trained with stereo supervision were trained with a nominal -# baseline of 0.1 units. The KITTI rig has a baseline of 54cm. Therefore, -# to convert our stereo predictions to real-world scale we multiply our depths by 5.4. -STEREO_SCALE_FACTOR = 5.4 - - -def compute_errors(gt, pred): - """Computation of error metrics between predicted and ground truth depths - """ - thresh = np.maximum((gt / pred), (pred / gt)) - a1 = (thresh < 1.25 ).mean() - a2 = (thresh < 1.25 ** 2).mean() - a3 = (thresh < 1.25 ** 3).mean() - - rmse = (gt - pred) ** 2 - rmse = np.sqrt(rmse.mean()) - - rmse_log = (np.log(gt) - np.log(pred)) ** 2 - rmse_log = np.sqrt(rmse_log.mean()) - - abs_rel = np.mean(np.abs(gt - pred) / gt) - - sq_rel = np.mean(((gt - pred) ** 2) / gt) - - return abs_rel, sq_rel, rmse, rmse_log, a1, a2, a3 - - -def batch_post_process_disparity(l_disp, r_disp): - """Apply the disparity post-processing method as introduced in Monodepthv1 - """ - _, h, w = l_disp.shape - m_disp = 0.5 * (l_disp + r_disp) - l, _ = np.meshgrid(np.linspace(0, 1, w), np.linspace(0, 1, h)) - l_mask = (1.0 - np.clip(20 * (l - 0.05), 0, 1))[None, ...] - r_mask = l_mask[:, :, ::-1] - return r_mask * l_disp + l_mask * r_disp + (1.0 - l_mask - r_mask) * m_disp - - -def evaluate(opt): - """Evaluates a pretrained model using a specified test set - """ - MIN_DEPTH = 1e-3 - MAX_DEPTH = 80 - - assert sum((opt.eval_mono, opt.eval_stereo)) == 1, \ - "Please choose mono or stereo evaluation by setting either --eval_mono or --eval_stereo" - - if opt.ext_disp_to_eval is None: - - opt.load_weights_folder = os.path.expanduser(opt.load_weights_folder) - - assert os.path.isdir(opt.load_weights_folder), \ - "Cannot find a folder at {}".format(opt.load_weights_folder) - - print("-> Loading weights from {}".format(opt.load_weights_folder)) - - filenames = readlines(os.path.join(splits_dir, opt.eval_split, "test_files.txt")) - encoder_path = os.path.join(opt.load_weights_folder, "encoder.pth") - decoder_path = os.path.join(opt.load_weights_folder, "depth.pth") - - encoder_dict = torch.load(encoder_path) - - dataset = datasets.KITTIRAWDataset(opt.data_path, filenames, - encoder_dict['height'], encoder_dict['width'], - [0], 4, is_train=False) - dataloader = DataLoader(dataset, 16, shuffle=False, num_workers=opt.num_workers, - pin_memory=True, drop_last=False) - - encoder = networks.ResnetEncoder(opt.num_layers, False) - depth_decoder = networks.DepthDecoder(encoder.num_ch_enc) - - model_dict = encoder.state_dict() - encoder.load_state_dict({k: v for k, v in encoder_dict.items() if k in model_dict}) - depth_decoder.load_state_dict(torch.load(decoder_path)) - - encoder.cuda() - encoder.eval() - depth_decoder.cuda() - depth_decoder.eval() - - pred_disps = [] - - print("-> Computing predictions with size {}x{}".format( - encoder_dict['width'], encoder_dict['height'])) - - with torch.no_grad(): - for data in dataloader: - input_color = data[("color", 0, 0)].cuda() - - if opt.post_process: - # Post-processed results require each image to have two forward passes - input_color = torch.cat((input_color, torch.flip(input_color, [3])), 0) - - output = depth_decoder(encoder(input_color)) - - pred_disp, _ = disp_to_depth(output[("disp", 0)], opt.min_depth, opt.max_depth) - pred_disp = pred_disp.cpu()[:, 0].numpy() - - if opt.post_process: - N = pred_disp.shape[0] // 2 - pred_disp = batch_post_process_disparity(pred_disp[:N], pred_disp[N:, :, ::-1]) - - pred_disps.append(pred_disp) - - pred_disps = np.concatenate(pred_disps) - - else: - # Load predictions from file - print("-> Loading predictions from {}".format(opt.ext_disp_to_eval)) - pred_disps = np.load(opt.ext_disp_to_eval) - - if opt.eval_eigen_to_benchmark: - eigen_to_benchmark_ids = np.load( - os.path.join(splits_dir, "benchmark", "eigen_to_benchmark_ids.npy")) - - pred_disps = pred_disps[eigen_to_benchmark_ids] - - if opt.save_pred_disps: - output_path = os.path.join( - opt.load_weights_folder, "disps_{}_split.npy".format(opt.eval_split)) - print("-> Saving predicted disparities to ", output_path) - np.save(output_path, pred_disps) - - if opt.no_eval: - print("-> Evaluation disabled. Done.") - quit() - - elif opt.eval_split == 'benchmark': - save_dir = os.path.join(opt.load_weights_folder, "benchmark_predictions") - print("-> Saving out benchmark predictions to {}".format(save_dir)) - if not os.path.exists(save_dir): - os.makedirs(save_dir) - - for idx in range(len(pred_disps)): - disp_resized = cv2.resize(pred_disps[idx], (1216, 352)) - depth = STEREO_SCALE_FACTOR / disp_resized - depth = np.clip(depth, 0, 80) - depth = np.uint16(depth * 256) - save_path = os.path.join(save_dir, "{:010d}.png".format(idx)) - cv2.imwrite(save_path, depth) - - print("-> No ground truth is available for the KITTI benchmark, so not evaluating. Done.") - quit() - - gt_path = os.path.join(splits_dir, opt.eval_split, "gt_depths.npz") - gt_depths = np.load(gt_path, fix_imports=True, encoding='latin1')["data"] - - print("-> Evaluating") - - if opt.eval_stereo: - print(" Stereo evaluation - " - "disabling median scaling, scaling by {}".format(STEREO_SCALE_FACTOR)) - opt.disable_median_scaling = True - opt.pred_depth_scale_factor = STEREO_SCALE_FACTOR - else: - print(" Mono evaluation - using median scaling") - - errors = [] - ratios = [] - - for i in range(pred_disps.shape[0]): - - gt_depth = gt_depths[i] - gt_height, gt_width = gt_depth.shape[:2] - - pred_disp = pred_disps[i] - pred_disp = cv2.resize(pred_disp, (gt_width, gt_height)) - pred_depth = 1 / pred_disp - - if opt.eval_split == "eigen": - mask = np.logical_and(gt_depth > MIN_DEPTH, gt_depth < MAX_DEPTH) - - crop = np.array([0.40810811 * gt_height, 0.99189189 * gt_height, - 0.03594771 * gt_width, 0.96405229 * gt_width]).astype(np.int32) - crop_mask = np.zeros(mask.shape) - crop_mask[crop[0]:crop[1], crop[2]:crop[3]] = 1 - mask = np.logical_and(mask, crop_mask) - - else: - mask = gt_depth > 0 - - pred_depth = pred_depth[mask] - gt_depth = gt_depth[mask] - - pred_depth *= opt.pred_depth_scale_factor - if not opt.disable_median_scaling: - ratio = np.median(gt_depth) / np.median(pred_depth) - ratios.append(ratio) - pred_depth *= ratio - - pred_depth[pred_depth < MIN_DEPTH] = MIN_DEPTH - pred_depth[pred_depth > MAX_DEPTH] = MAX_DEPTH - - errors.append(compute_errors(gt_depth, pred_depth)) - - if not opt.disable_median_scaling: - ratios = np.array(ratios) - med = np.median(ratios) - print(" Scaling ratios | med: {:0.3f} | std: {:0.3f}".format(med, np.std(ratios / med))) - - mean_errors = np.array(errors).mean(0) - - print("\n " + ("{:>8} | " * 7).format("abs_rel", "sq_rel", "rmse", "rmse_log", "a1", "a2", "a3")) - print(("&{: 8.3f} " * 7).format(*mean_errors.tolist()) + "\\\\") - print("\n-> Done!") - - -if __name__ == "__main__": - options = MonodepthOptions() - evaluate(options.parse()) diff --git a/gimp-plugins/monodepth2/evaluate_pose.py b/gimp-plugins/monodepth2/evaluate_pose.py deleted file mode 100755 index 4b852a0..0000000 --- a/gimp-plugins/monodepth2/evaluate_pose.py +++ /dev/null @@ -1,134 +0,0 @@ -# Copyright Niantic 2019. Patent Pending. All rights reserved. -# -# This software is licensed under the terms of the Monodepth2 licence -# which allows for non-commercial use only, the full terms of which are made -# available in the LICENSE file. - -from __future__ import absolute_import, division, print_function - -import os -import numpy as np - -import torch -from torch.utils.data import DataLoader - -from layers import transformation_from_parameters -from utils import readlines -from options import MonodepthOptions -from datasets import KITTIOdomDataset -import networks - - -# from https://github.com/tinghuiz/SfMLearner -def dump_xyz(source_to_target_transformations): - xyzs = [] - cam_to_world = np.eye(4) - xyzs.append(cam_to_world[:3, 3]) - for source_to_target_transformation in source_to_target_transformations: - cam_to_world = np.dot(cam_to_world, source_to_target_transformation) - xyzs.append(cam_to_world[:3, 3]) - return xyzs - - -# from https://github.com/tinghuiz/SfMLearner -def compute_ate(gtruth_xyz, pred_xyz_o): - - # Make sure that the first matched frames align (no need for rotational alignment as - # all the predicted/ground-truth snippets have been converted to use the same coordinate - # system with the first frame of the snippet being the origin). - offset = gtruth_xyz[0] - pred_xyz_o[0] - pred_xyz = pred_xyz_o + offset[None, :] - - # Optimize the scaling factor - scale = np.sum(gtruth_xyz * pred_xyz) / np.sum(pred_xyz ** 2) - alignment_error = pred_xyz * scale - gtruth_xyz - rmse = np.sqrt(np.sum(alignment_error ** 2)) / gtruth_xyz.shape[0] - return rmse - - -def evaluate(opt): - """Evaluate odometry on the KITTI dataset - """ - assert os.path.isdir(opt.load_weights_folder), \ - "Cannot find a folder at {}".format(opt.load_weights_folder) - - assert opt.eval_split == "odom_9" or opt.eval_split == "odom_10", \ - "eval_split should be either odom_9 or odom_10" - - sequence_id = int(opt.eval_split.split("_")[1]) - - filenames = readlines( - os.path.join(os.path.dirname(__file__), "splits", "odom", - "test_files_{:02d}.txt".format(sequence_id))) - - dataset = KITTIOdomDataset(opt.data_path, filenames, opt.height, opt.width, - [0, 1], 4, is_train=False) - dataloader = DataLoader(dataset, opt.batch_size, shuffle=False, - num_workers=opt.num_workers, pin_memory=True, drop_last=False) - - pose_encoder_path = os.path.join(opt.load_weights_folder, "pose_encoder.pth") - pose_decoder_path = os.path.join(opt.load_weights_folder, "pose.pth") - - pose_encoder = networks.ResnetEncoder(opt.num_layers, False, 2) - pose_encoder.load_state_dict(torch.load(pose_encoder_path)) - - pose_decoder = networks.PoseDecoder(pose_encoder.num_ch_enc, 1, 2) - pose_decoder.load_state_dict(torch.load(pose_decoder_path)) - - pose_encoder.cuda() - pose_encoder.eval() - pose_decoder.cuda() - pose_decoder.eval() - - pred_poses = [] - - print("-> Computing pose predictions") - - opt.frame_ids = [0, 1] # pose network only takes two frames as input - - with torch.no_grad(): - for inputs in dataloader: - for key, ipt in inputs.items(): - inputs[key] = ipt.cuda() - - all_color_aug = torch.cat([inputs[("color_aug", i, 0)] for i in opt.frame_ids], 1) - - features = [pose_encoder(all_color_aug)] - axisangle, translation = pose_decoder(features) - - pred_poses.append( - transformation_from_parameters(axisangle[:, 0], translation[:, 0]).cpu().numpy()) - - pred_poses = np.concatenate(pred_poses) - - gt_poses_path = os.path.join(opt.data_path, "poses", "{:02d}.txt".format(sequence_id)) - gt_global_poses = np.loadtxt(gt_poses_path).reshape(-1, 3, 4) - gt_global_poses = np.concatenate( - (gt_global_poses, np.zeros((gt_global_poses.shape[0], 1, 4))), 1) - gt_global_poses[:, 3, 3] = 1 - gt_xyzs = gt_global_poses[:, :3, 3] - - gt_local_poses = [] - for i in range(1, len(gt_global_poses)): - gt_local_poses.append( - np.linalg.inv(np.dot(np.linalg.inv(gt_global_poses[i - 1]), gt_global_poses[i]))) - - ates = [] - num_frames = gt_xyzs.shape[0] - track_length = 5 - for i in range(0, num_frames - 1): - local_xyzs = np.array(dump_xyz(pred_poses[i:i + track_length - 1])) - gt_local_xyzs = np.array(dump_xyz(gt_local_poses[i:i + track_length - 1])) - - ates.append(compute_ate(gt_local_xyzs, local_xyzs)) - - print("\n Trajectory error: {:0.3f}, std: {:0.3f}\n".format(np.mean(ates), np.std(ates))) - - save_path = os.path.join(opt.load_weights_folder, "poses.npy") - np.save(save_path, pred_poses) - print("-> Predictions saved to", save_path) - - -if __name__ == "__main__": - options = MonodepthOptions() - evaluate(options.parse()) diff --git a/gimp-plugins/monodepth2/export_gt_depth.py b/gimp-plugins/monodepth2/export_gt_depth.py deleted file mode 100755 index 4263b74..0000000 --- a/gimp-plugins/monodepth2/export_gt_depth.py +++ /dev/null @@ -1,65 +0,0 @@ -# Copyright Niantic 2019. Patent Pending. All rights reserved. -# -# This software is licensed under the terms of the Monodepth2 licence -# which allows for non-commercial use only, the full terms of which are made -# available in the LICENSE file. - -from __future__ import absolute_import, division, print_function - -import os - -import argparse -import numpy as np -import PIL.Image as pil - -from utils import readlines -from kitti_utils import generate_depth_map - - -def export_gt_depths_kitti(): - - parser = argparse.ArgumentParser(description='export_gt_depth') - - parser.add_argument('--data_path', - type=str, - help='path to the root of the KITTI data', - required=True) - parser.add_argument('--split', - type=str, - help='which split to export gt from', - required=True, - choices=["eigen", "eigen_benchmark"]) - opt = parser.parse_args() - - split_folder = os.path.join(os.path.dirname(__file__), "splits", opt.split) - lines = readlines(os.path.join(split_folder, "test_files.txt")) - - print("Exporting ground truth depths for {}".format(opt.split)) - - gt_depths = [] - for line in lines: - - folder, frame_id, _ = line.split() - frame_id = int(frame_id) - - if opt.split == "eigen": - calib_dir = os.path.join(opt.data_path, folder.split("/")[0]) - velo_filename = os.path.join(opt.data_path, folder, - "velodyne_points/data", "{:010d}.bin".format(frame_id)) - gt_depth = generate_depth_map(calib_dir, velo_filename, 2, True) - elif opt.split == "eigen_benchmark": - gt_depth_path = os.path.join(opt.data_path, folder, "proj_depth", - "groundtruth", "image_02", "{:010d}.png".format(frame_id)) - gt_depth = np.array(pil.open(gt_depth_path)).astype(np.float32) / 256 - - gt_depths.append(gt_depth.astype(np.float32)) - - output_path = os.path.join(split_folder, "gt_depths.npz") - - print("Saving to {}".format(opt.split)) - - np.savez_compressed(output_path, data=np.array(gt_depths)) - - -if __name__ == "__main__": - export_gt_depths_kitti() diff --git a/gimp-plugins/monodepth2/layers.py b/gimp-plugins/monodepth2/layers.py deleted file mode 100755 index 070cadb..0000000 --- a/gimp-plugins/monodepth2/layers.py +++ /dev/null @@ -1,269 +0,0 @@ -# Copyright Niantic 2019. Patent Pending. All rights reserved. -# -# This software is licensed under the terms of the Monodepth2 licence -# which allows for non-commercial use only, the full terms of which are made -# available in the LICENSE file. - -from __future__ import absolute_import, division, print_function - -import numpy as np - -import torch -import torch.nn as nn -import torch.nn.functional as F - - -def disp_to_depth(disp, min_depth, max_depth): - """Convert network's sigmoid output into depth prediction - The formula for this conversion is given in the 'additional considerations' - section of the paper. - """ - min_disp = 1 / max_depth - max_disp = 1 / min_depth - scaled_disp = min_disp + (max_disp - min_disp) * disp - depth = 1 / scaled_disp - return scaled_disp, depth - - -def transformation_from_parameters(axisangle, translation, invert=False): - """Convert the network's (axisangle, translation) output into a 4x4 matrix - """ - R = rot_from_axisangle(axisangle) - t = translation.clone() - - if invert: - R = R.transpose(1, 2) - t *= -1 - - T = get_translation_matrix(t) - - if invert: - M = torch.matmul(R, T) - else: - M = torch.matmul(T, R) - - return M - - -def get_translation_matrix(translation_vector): - """Convert a translation vector into a 4x4 transformation matrix - """ - T = torch.zeros(translation_vector.shape[0], 4, 4).to(device=translation_vector.device) - - t = translation_vector.contiguous().view(-1, 3, 1) - - T[:, 0, 0] = 1 - T[:, 1, 1] = 1 - T[:, 2, 2] = 1 - T[:, 3, 3] = 1 - T[:, :3, 3, None] = t - - return T - - -def rot_from_axisangle(vec): - """Convert an axisangle rotation into a 4x4 transformation matrix - (adapted from https://github.com/Wallacoloo/printipi) - Input 'vec' has to be Bx1x3 - """ - angle = torch.norm(vec, 2, 2, True) - axis = vec / (angle + 1e-7) - - ca = torch.cos(angle) - sa = torch.sin(angle) - C = 1 - ca - - x = axis[..., 0].unsqueeze(1) - y = axis[..., 1].unsqueeze(1) - z = axis[..., 2].unsqueeze(1) - - xs = x * sa - ys = y * sa - zs = z * sa - xC = x * C - yC = y * C - zC = z * C - xyC = x * yC - yzC = y * zC - zxC = z * xC - - rot = torch.zeros((vec.shape[0], 4, 4)).to(device=vec.device) - - rot[:, 0, 0] = torch.squeeze(x * xC + ca) - rot[:, 0, 1] = torch.squeeze(xyC - zs) - rot[:, 0, 2] = torch.squeeze(zxC + ys) - rot[:, 1, 0] = torch.squeeze(xyC + zs) - rot[:, 1, 1] = torch.squeeze(y * yC + ca) - rot[:, 1, 2] = torch.squeeze(yzC - xs) - rot[:, 2, 0] = torch.squeeze(zxC - ys) - rot[:, 2, 1] = torch.squeeze(yzC + xs) - rot[:, 2, 2] = torch.squeeze(z * zC + ca) - rot[:, 3, 3] = 1 - - return rot - - -class ConvBlock(nn.Module): - """Layer to perform a convolution followed by ELU - """ - def __init__(self, in_channels, out_channels): - super(ConvBlock, self).__init__() - - self.conv = Conv3x3(in_channels, out_channels) - self.nonlin = nn.ELU(inplace=True) - - def forward(self, x): - out = self.conv(x) - out = self.nonlin(out) - return out - - -class Conv3x3(nn.Module): - """Layer to pad and convolve input - """ - def __init__(self, in_channels, out_channels, use_refl=True): - super(Conv3x3, self).__init__() - - if use_refl: - self.pad = nn.ReflectionPad2d(1) - else: - self.pad = nn.ZeroPad2d(1) - self.conv = nn.Conv2d(int(in_channels), int(out_channels), 3) - - def forward(self, x): - out = self.pad(x) - out = self.conv(out) - return out - - -class BackprojectDepth(nn.Module): - """Layer to transform a depth image into a point cloud - """ - def __init__(self, batch_size, height, width): - super(BackprojectDepth, self).__init__() - - self.batch_size = batch_size - self.height = height - self.width = width - - meshgrid = np.meshgrid(range(self.width), range(self.height), indexing='xy') - self.id_coords = np.stack(meshgrid, axis=0).astype(np.float32) - self.id_coords = nn.Parameter(torch.from_numpy(self.id_coords), - requires_grad=False) - - self.ones = nn.Parameter(torch.ones(self.batch_size, 1, self.height * self.width), - requires_grad=False) - - self.pix_coords = torch.unsqueeze(torch.stack( - [self.id_coords[0].view(-1), self.id_coords[1].view(-1)], 0), 0) - self.pix_coords = self.pix_coords.repeat(batch_size, 1, 1) - self.pix_coords = nn.Parameter(torch.cat([self.pix_coords, self.ones], 1), - requires_grad=False) - - def forward(self, depth, inv_K): - cam_points = torch.matmul(inv_K[:, :3, :3], self.pix_coords) - cam_points = depth.view(self.batch_size, 1, -1) * cam_points - cam_points = torch.cat([cam_points, self.ones], 1) - - return cam_points - - -class Project3D(nn.Module): - """Layer which projects 3D points into a camera with intrinsics K and at position T - """ - def __init__(self, batch_size, height, width, eps=1e-7): - super(Project3D, self).__init__() - - self.batch_size = batch_size - self.height = height - self.width = width - self.eps = eps - - def forward(self, points, K, T): - P = torch.matmul(K, T)[:, :3, :] - - cam_points = torch.matmul(P, points) - - pix_coords = cam_points[:, :2, :] / (cam_points[:, 2, :].unsqueeze(1) + self.eps) - pix_coords = pix_coords.view(self.batch_size, 2, self.height, self.width) - pix_coords = pix_coords.permute(0, 2, 3, 1) - pix_coords[..., 0] /= self.width - 1 - pix_coords[..., 1] /= self.height - 1 - pix_coords = (pix_coords - 0.5) * 2 - return pix_coords - - -def upsample(x): - """Upsample input tensor by a factor of 2 - """ - return F.interpolate(x, scale_factor=2, mode="nearest") - - -def get_smooth_loss(disp, img): - """Computes the smoothness loss for a disparity image - The color image is used for edge-aware smoothness - """ - grad_disp_x = torch.abs(disp[:, :, :, :-1] - disp[:, :, :, 1:]) - grad_disp_y = torch.abs(disp[:, :, :-1, :] - disp[:, :, 1:, :]) - - grad_img_x = torch.mean(torch.abs(img[:, :, :, :-1] - img[:, :, :, 1:]), 1, keepdim=True) - grad_img_y = torch.mean(torch.abs(img[:, :, :-1, :] - img[:, :, 1:, :]), 1, keepdim=True) - - grad_disp_x *= torch.exp(-grad_img_x) - grad_disp_y *= torch.exp(-grad_img_y) - - return grad_disp_x.mean() + grad_disp_y.mean() - - -class SSIM(nn.Module): - """Layer to compute the SSIM loss between a pair of images - """ - def __init__(self): - super(SSIM, self).__init__() - self.mu_x_pool = nn.AvgPool2d(3, 1) - self.mu_y_pool = nn.AvgPool2d(3, 1) - self.sig_x_pool = nn.AvgPool2d(3, 1) - self.sig_y_pool = nn.AvgPool2d(3, 1) - self.sig_xy_pool = nn.AvgPool2d(3, 1) - - self.refl = nn.ReflectionPad2d(1) - - self.C1 = 0.01 ** 2 - self.C2 = 0.03 ** 2 - - def forward(self, x, y): - x = self.refl(x) - y = self.refl(y) - - mu_x = self.mu_x_pool(x) - mu_y = self.mu_y_pool(y) - - sigma_x = self.sig_x_pool(x ** 2) - mu_x ** 2 - sigma_y = self.sig_y_pool(y ** 2) - mu_y ** 2 - sigma_xy = self.sig_xy_pool(x * y) - mu_x * mu_y - - SSIM_n = (2 * mu_x * mu_y + self.C1) * (2 * sigma_xy + self.C2) - SSIM_d = (mu_x ** 2 + mu_y ** 2 + self.C1) * (sigma_x + sigma_y + self.C2) - - return torch.clamp((1 - SSIM_n / SSIM_d) / 2, 0, 1) - - -def compute_depth_errors(gt, pred): - """Computation of error metrics between predicted and ground truth depths - """ - thresh = torch.max((gt / pred), (pred / gt)) - a1 = (thresh < 1.25 ).float().mean() - a2 = (thresh < 1.25 ** 2).float().mean() - a3 = (thresh < 1.25 ** 3).float().mean() - - rmse = (gt - pred) ** 2 - rmse = torch.sqrt(rmse.mean()) - - rmse_log = (torch.log(gt) - torch.log(pred)) ** 2 - rmse_log = torch.sqrt(rmse_log.mean()) - - abs_rel = torch.mean(torch.abs(gt - pred) / gt) - - sq_rel = torch.mean((gt - pred) ** 2 / gt) - - return abs_rel, sq_rel, rmse, rmse_log, a1, a2, a3 diff --git a/gimp-plugins/monodepth2/layers.pyc b/gimp-plugins/monodepth2/layers.pyc deleted file mode 100644 index 651ca83ef62306fbe7644a907d5a2c1afaaa6720..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 10823 zcmcgyO>7)#TK=lrZo6&ANo;3qkHe^T8A#7A<1oo+#qMS&A zyPWQ-PJPvmZEXIM0STl%a75a@ES!**8`=XB7fx`26A}^bHDvp`@h&ScoL^VY&nP|quXmZ)?dM27><`ZMSwXx z{3z}h9lxK)QKAXW`S|;=6Z^Ya-Wzm7zWt(;SihzIxDFqwcCsHMlAQdC0WArv7ZKzWr=h)%#&T&No$(fV@ltk7;ldiYWw9V*5oMkzSHEH9waM56Tqx zRAsEF1dEuMM~Q8P-8gdTX?DjIx*?D<_Dcw^e`Mp_Uhjpi>-Vz9X*UZad%cY#d$r#k zw38G?S(>TkjqBa;5S2Ikhs6bUhjE%k=B9Fq9nK*z({tXOw}j`}13f}orH+7CeMjH^Y!TRE^QkY+Ss6R68!36QefW5Nc9nd8<>f zV`PF99f7cmao0TWiRP-Ei0lgzR-mv@_@To0&xa6R8jedY(SEr__rpBw#RbUaDr#8b zgQ+ff3zZsZ<3fe+E0v{6i<2Y6b9G|+zeEHy0=fX1I9gZ-F_0cW8-M{EH43IcHr|P= zgp)@OkQSr@EU2;O37X*8K}a+F|JV_gXLhUR)+YdtmY$+F^kNc{vd%b)_mfsE z(o49Hf{|^}V}pPY5Q;@bM{%B6#cT(#Q;=kfELi7@j0?CblJ+1QSYgk85 zom4$RV@)^hfYcjYL2xb<6;L$%G#*PxG1V_tHY)3$`+eTg%QEuGN}`u(Gf zVC$e)N3xh6^azj|%j)6@z#rlm84b`Hi&{mkW9SJYXL7=be9M{`bl$9;QL8E3cM!&cVKvg=Z)l78}v1k*byRhw&I(rbPvv_4dv z2eE(m;I)IBPH+(b+He2SpZwlG|Mk%u!i~)YSq@x;3)_MeDN?Sej7n8u^Gydd6h01U zh)XFXfso_Fd=Nuo6I!g^76?Zu9HjQqAdZjX;6=9FU~rXzz=-NkS>7P4t0}RDJTLjI z-npX!u~w*;4JFsM6dou%RCuJ~16TRby^h@LU`u6(?sc@Kp$?GbtPjid5o$4wgF7FD z<96M_BL&w?uX&4=i{3?VuCnamSE2rN>Q~vUe8Kxk{l@S0infv@cDI|g9=6b-{&+Zd zsXqIN#L(tJwFo5upCSU*sYrF7GQoWTyS<5;?@_lSkw7D*;PFH0xm4k)=i){xULGyzAeqxM=?E}sNM3IGp<`g9o1vapHhM>hv znMO`)-$pLzFzth~V+l_uw+5J7K&wEyZnK%BNzrVoGWX%;!A(&HB-y^sHBEJJm~ZfT zwxW`eW@$G`oj@bC+PJ$bZ5b+ds}rVa+_l953Sb+IGS27RzlL6W9Fp@eIO!#?MK0p@ z@vSZ7M6n%v2%TusmQfwT_CXjSN#lm6ye)PNZX+7pMW96mTTH%+Kx@Y;&Smo2WYERq zFpn%cFV{f2ZICa^6G@@;;Y6rUG@HFF8g!ZDX*Qct)@n8bZbI-Dg7LHz`xMrQNCF$? zCRKxKS+4~o#TFj@ad*WNKaR*GPs9==PhoOH18{6HJz|TOQF+W3VdRHtR5HbW4EP4V zIzAvB4Idq12N`GZ0c1+P$#a3?0otd)#l}p_WI?7FVwA~hGu&542aJdUoF>eMkU9pW zK=RoZeml74<_EVJa1Y#u-$6`Pc@Xb*oelay7~P0OBmFQEmh3=7bh*;T!9`^Sz_zGN z7L+5fd+hq_>~$8wz{brSJ&(`y(;O#B1nVp9N$`nmg>&H*T7PaS+_f>U1aBgFKC>rn ze~f36L%5&8+YH`e@N*2v=${EU@YE0SXaeMhg?d3<`fk{I*w3>)9REFf0!G~Ne~=hI zgZ@UWF)~*Cu=JuNy|7IVfj9~Meujt-yLS+c*@c)PDx;(Vx}C``jz1tghGGK@b}S(NAudbkKvNioL!WcHv|pK*N-^!A~F(yo5lL?k5Ljml`*Z`*BzZ z7NJ*4v^kXch^>B}!8aLv3&Hu|1-I|pcr2$xt)TAV2aWG#Zw>yE2A&1)tKJ6EQn!b{+V5R3xG(<^+1~7Dytisr=sz2vaLxY+~Y1FI= z^5F!WWiUY#JSZJNK&T0bE!yKi`xZ0#0KtR=p&_8-ft#xNu5&?zy{6E@p3l}2iob`) zlH!f`x_8yvsJJtI8yOQ|{8?xvWRBm#Gs5Be6-TfRgKysBCh70F>IqJfS_#w>V2isZ zkk-y4u-sXP!H+vhi>_;7yY+A0bC_#~2;72JmWTdhSdGkr-`yrH>%S|7CM;0f%4zL? zD1aUrg)MRqpDG?HHeW^LEXC&eNDbsAR6(~P^f(7`Ytq6kKa zzyZD`c))(FdJEOzEE;AbG0v`;HH@$_bg7exODrG}J3v6hY_=I1^LMwO<33=^3BV92 z^GqjDV}!O61F-#ixK5JuTaYZgc5op(|6O?^75yOK@Is;Up^>-H-5=wz^nn`hHMp(= z?{fg?Rs0s63G_(?@3X)Z^2;A`9wbWXcC#(=6rT~D%8TO2PJ+Mzyp*5HeQ}iiBIk!^7 z&1zf%^fTP0#NvLLrsr}f&hjW1@n=|&B?+1;=+>Vm{oT0K{kDV6=TmnH3Zigf0jLh0unefXz+kEz{%(^n? z$crFij)D(9B%EPWx_g@@KKmG%W#db{_Ajq||E<42dU{(}+y22zAOG+hKltwL_N!Z0 zfA|kydhvGqcmLv7-~Q8+_is0N6wV&|6~-iY?r_xvz@Oc3e~@KecygHP4Vni~v9se9ixNb9ue^O0G%^_qL#a2UUZ3g<$8TFl>acqe$5y(_~X zL~z|%M4asu9v%0;A%bn*8~IP2ri7$UfGFZbqw}shBM6zU)M-)36m~81$OKr+x)?-5 zRDc5J85KdFLTru75Q>0Vs1D`Gwc@qk+6i#;mTKz;odSK^=Cu zTGj)~K@(F-^*~Gb00>?`Oc!nHW@8Q2Hwc`V{n#|4{2Oax4Q@~HF@s-Y@aqhIgTWI9 zPZ|6sgWqB>x-M>Yp&YojJWYx1U=uM1UZA64iV#v5AIM11K`4JR-_hgrGx;DL^-XPk83al*1a)Bts6UWs znW>6MvVtfWan}eS=NjSDwNWFN>Ey7;9&^{dE9Z|+0fL(I#OPWYir@ni)nXF7AIFL? zA>7Z-z%Jx=S;C4F*zST9$R6i8L@qug$!V|-k-z*%3j&!Q`F5TS@G9~F%v*h_2|1g?Z~elq`W`2PN9;mC6*49dqr}=gdn9$dugfG| z^|*Tt`5rs0&HJd}gn{JWcM%)!o?@TkHbQ50%Uxx?K^E4%>Q#Kfv;d%4s@4}4R_d4P UUs%7mzO?SIuh(n!OY5KePxEY@vH$=8 diff --git a/gimp-plugins/monodepth2/networks/__init__.py b/gimp-plugins/monodepth2/networks/__init__.py deleted file mode 100755 index 2386870..0000000 --- a/gimp-plugins/monodepth2/networks/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -from .resnet_encoder import ResnetEncoder -from .depth_decoder import DepthDecoder -from .pose_decoder import PoseDecoder -from .pose_cnn import PoseCNN diff --git a/gimp-plugins/monodepth2/networks/__init__.pyc b/gimp-plugins/monodepth2/networks/__init__.pyc deleted file mode 100644 index 834ff2c107aa63dab8300b814d502232ea54be0e..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 397 zcmY+AT}s3-6oqfv=~UDam(dpu_$;DWUyD@cr4NRo31cxorO7zB1$XCK+<+&|AY*<` zIA7r0+~hy2)35K3*Pad|!+DSGYly^Rq=^ipjuDGKBb|}SsmqhkNiWD0)D_7uNH58h z)RpiBy;MsC4GdC62`fmeyj9`J_D-rP;d9_xjp40UNAwKz*?IL3mIwIQG);8^6D}ar zoGDZaZ9!5k8AF9ksqOrPSZhS;AB|0im^9Pg+BS!&u#dz)!1A?MljmmB^V;pLb{pyW yb}-}BsCV1J;>uZcg_<`UJ?`D~?zs?y9fAMp*<$N#xxasC47epaIZ diff --git a/gimp-plugins/monodepth2/networks/depth_decoder.py b/gimp-plugins/monodepth2/networks/depth_decoder.py deleted file mode 100755 index 498ec38..0000000 --- a/gimp-plugins/monodepth2/networks/depth_decoder.py +++ /dev/null @@ -1,65 +0,0 @@ -# Copyright Niantic 2019. Patent Pending. All rights reserved. -# -# This software is licensed under the terms of the Monodepth2 licence -# which allows for non-commercial use only, the full terms of which are made -# available in the LICENSE file. - -from __future__ import absolute_import, division, print_function - -import numpy as np -import torch -import torch.nn as nn - -from collections import OrderedDict -from layers import * - - -class DepthDecoder(nn.Module): - def __init__(self, num_ch_enc, scales=range(4), num_output_channels=1, use_skips=True): - super(DepthDecoder, self).__init__() - - self.num_output_channels = num_output_channels - self.use_skips = use_skips - self.upsample_mode = 'nearest' - self.scales = scales - - self.num_ch_enc = num_ch_enc - self.num_ch_dec = np.array([16, 32, 64, 128, 256]) - - # decoder - self.convs = OrderedDict() - for i in range(4, -1, -1): - # upconv_0 - num_ch_in = self.num_ch_enc[-1] if i == 4 else self.num_ch_dec[i + 1] - num_ch_out = self.num_ch_dec[i] - self.convs[("upconv", i, 0)] = ConvBlock(num_ch_in, num_ch_out) - - # upconv_1 - num_ch_in = self.num_ch_dec[i] - if self.use_skips and i > 0: - num_ch_in += self.num_ch_enc[i - 1] - num_ch_out = self.num_ch_dec[i] - self.convs[("upconv", i, 1)] = ConvBlock(num_ch_in, num_ch_out) - - for s in self.scales: - self.convs[("dispconv", s)] = Conv3x3(self.num_ch_dec[s], self.num_output_channels) - - self.decoder = nn.ModuleList(list(self.convs.values())) - self.sigmoid = nn.Sigmoid() - - def forward(self, input_features): - self.outputs = {} - - # decoder - x = input_features[-1] - for i in range(4, -1, -1): - x = self.convs[("upconv", i, 0)](x) - x = [upsample(x)] - if self.use_skips and i > 0: - x += [input_features[i - 1]] - x = torch.cat(x, 1) - x = self.convs[("upconv", i, 1)](x) - if i in self.scales: - self.outputs[("disp", i)] = self.sigmoid(self.convs[("dispconv", i)](x)) - - return self.outputs diff --git a/gimp-plugins/monodepth2/networks/depth_decoder.pyc b/gimp-plugins/monodepth2/networks/depth_decoder.pyc deleted file mode 100644 index 10a9d2c03733f74f053982e83a3c3c94f1548ef9..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 2358 zcmcIlO>Z1U5Url^etI3}J61cyd)Bivo9Q0M zMz$2mKJh2`6Z``19Jp}jH*g2ut62wg;}CXddaJ9etEykutM&g_>HqNS!8friK0baQ ze`~~k4oB<;#{AQK}=Th2sIMZOxZ(CyBsgYX%18ZjfciXr;WmU->cFq_TG%DFCd zNp_IgtSsCmOy*TqxNuSzv4iT_$Z+Q}hR>@ct$2RkbUNO1{{_o@tHX=2zEx?mgzwMaSKBaZyWDZdix1#hsVe< zTzN9}cw}?za~@(`QUOlYB5cXAr<%4LH=2&%)YBfrBX9ysfZvwg7Bi!hJ`W71lm~M7 zn>gUvkpvgfl~Z4~zy4WH;1}CEvhPXtBTc7jF)S|Wq_4|8xivgNV63WQc!J1C^X&w5T&P4UcQzWhnpI|jq`jU zCtcRZB6wGZl>7@R4&=Bi6Hm5Yeqmm?%QC$p?&>1{={2GS@D})87aH6+hwvsZeP^_S z8+_(il0_O-sdd?9$k|;Ek2$>NAh_P<`v1ZwV+}aoeLauM;y{aeQL|Pxqg%9KmDqZo zRskh72*a$%To}4*&=mD7ENeHfT^R30MUm##v8rmDhIT)jTih^X*l0G-({P4D)~Gg) za%9?HxQO?{6uj^_oompjs-mNd>q#01KGt=WDO;7QsMt+4@KNKk zP)DDYNu8&kX4W~@FxV+1d27psC{L4;>qFfJR$k5Dy8f%a-uB){Q|5LK?K6$ptt6|g>R}Q-C!q&_Cg69PTkXCHq&oU(op$XpfeC1BS zWi5pyv-74fXwi_t2G_ZT3{lRU6QHRPrBYeNdq4)AIC4!9y@QE%>EK=NV_{_lnKelx zSCiN*uEXFC?eMI=zn&@2oU0!8R~Ri|2-0n75A)Kz+$|xss?o+3i%Nu%76oEEEGCVHxJk@`1+q5D=kVi*gt~QdU!Dy%a{|>3SZ8ac& T%!;$@JGZ@o8JJb@x$plCxd`F! diff --git a/gimp-plugins/monodepth2/networks/pose_cnn.py b/gimp-plugins/monodepth2/networks/pose_cnn.py deleted file mode 100755 index 16baec7..0000000 --- a/gimp-plugins/monodepth2/networks/pose_cnn.py +++ /dev/null @@ -1,50 +0,0 @@ -# Copyright Niantic 2019. Patent Pending. All rights reserved. -# -# This software is licensed under the terms of the Monodepth2 licence -# which allows for non-commercial use only, the full terms of which are made -# available in the LICENSE file. - -from __future__ import absolute_import, division, print_function - -import torch -import torch.nn as nn - - -class PoseCNN(nn.Module): - def __init__(self, num_input_frames): - super(PoseCNN, self).__init__() - - self.num_input_frames = num_input_frames - - self.convs = {} - self.convs[0] = nn.Conv2d(3 * num_input_frames, 16, 7, 2, 3) - self.convs[1] = nn.Conv2d(16, 32, 5, 2, 2) - self.convs[2] = nn.Conv2d(32, 64, 3, 2, 1) - self.convs[3] = nn.Conv2d(64, 128, 3, 2, 1) - self.convs[4] = nn.Conv2d(128, 256, 3, 2, 1) - self.convs[5] = nn.Conv2d(256, 256, 3, 2, 1) - self.convs[6] = nn.Conv2d(256, 256, 3, 2, 1) - - self.pose_conv = nn.Conv2d(256, 6 * (num_input_frames - 1), 1) - - self.num_convs = len(self.convs) - - self.relu = nn.ReLU(True) - - self.net = nn.ModuleList(list(self.convs.values())) - - def forward(self, out): - - for i in range(self.num_convs): - out = self.convs[i](out) - out = self.relu(out) - - out = self.pose_conv(out) - out = out.mean(3).mean(2) - - out = 0.01 * out.view(-1, self.num_input_frames - 1, 1, 6) - - axisangle = out[..., :3] - translation = out[..., 3:] - - return axisangle, translation diff --git a/gimp-plugins/monodepth2/networks/pose_cnn.pyc b/gimp-plugins/monodepth2/networks/pose_cnn.pyc deleted file mode 100644 index dd6d711ee56355532c88f51128e080263b85c2c1..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1873 zcmcIk-HzKt6h3yGzwL(Iw$W-qy{G^o-*hAO4yl4dD{)g+$lD4L35-dc?F4(AddArT zU8&NR8ykP@y;zBxlEjlU=X`VK%zWQ*=kL+ZZ-1`8o5|+W!Tv$%to%I~xv*f=h|N(mPC=t1$w?d{JmbKphvxwoy~J{lUm!0~ZYax; zMF%f*o_1I<0%;^(Pl<#Gjd36x7H7o#qD#aQc319gPzd8X`ih7yAqHCn!b7fRj=?2F zv_&Aia}^@kA`rfI6=HXbK=?{T|349<%ZL#VjWQ3%thJ*z(ihOx9{i%ctfAh~j`(Y` z@$9N+B;c||F86r5h;9J;r0OIl)yIaiF#8-xvms5Y7jv7I%7ceK{4HsH8B_d|GA&|6A? zXTl=4>SdaC757i_M^8MB$90pdQRfi$4m@90^QOoj8Ryks(BKa1tZ1-1bK2aiIu+pZ zVv*=^5RsUJ`wYY6iOXv@DYMz+VRdGUDx15>DH`X_s%TD)MX0jXJYV^zcPGerR@Fbc zNn5dGM=bkjv7 zV##fs1`xZ&oH{_zK&$4>{A+~ZiFi3b1+yviy!D09T?1M0-RyVz2(FN!Y9xY8v zRco56SP}~5O@(iB>(^b diff --git a/gimp-plugins/monodepth2/networks/pose_decoder.py b/gimp-plugins/monodepth2/networks/pose_decoder.py deleted file mode 100755 index 4b03b60..0000000 --- a/gimp-plugins/monodepth2/networks/pose_decoder.py +++ /dev/null @@ -1,54 +0,0 @@ -# Copyright Niantic 2019. Patent Pending. All rights reserved. -# -# This software is licensed under the terms of the Monodepth2 licence -# which allows for non-commercial use only, the full terms of which are made -# available in the LICENSE file. - -from __future__ import absolute_import, division, print_function - -import torch -import torch.nn as nn -from collections import OrderedDict - - -class PoseDecoder(nn.Module): - def __init__(self, num_ch_enc, num_input_features, num_frames_to_predict_for=None, stride=1): - super(PoseDecoder, self).__init__() - - self.num_ch_enc = num_ch_enc - self.num_input_features = num_input_features - - if num_frames_to_predict_for is None: - num_frames_to_predict_for = num_input_features - 1 - self.num_frames_to_predict_for = num_frames_to_predict_for - - self.convs = OrderedDict() - self.convs[("squeeze")] = nn.Conv2d(self.num_ch_enc[-1], 256, 1) - self.convs[("pose", 0)] = nn.Conv2d(num_input_features * 256, 256, 3, stride, 1) - self.convs[("pose", 1)] = nn.Conv2d(256, 256, 3, stride, 1) - self.convs[("pose", 2)] = nn.Conv2d(256, 6 * num_frames_to_predict_for, 1) - - self.relu = nn.ReLU() - - self.net = nn.ModuleList(list(self.convs.values())) - - def forward(self, input_features): - last_features = [f[-1] for f in input_features] - - cat_features = [self.relu(self.convs["squeeze"](f)) for f in last_features] - cat_features = torch.cat(cat_features, 1) - - out = cat_features - for i in range(3): - out = self.convs[("pose", i)](out) - if i != 2: - out = self.relu(out) - - out = out.mean(3).mean(2) - - out = 0.01 * out.view(-1, self.num_frames_to_predict_for, 1, 6) - - axisangle = out[..., :3] - translation = out[..., 3:] - - return axisangle, translation diff --git a/gimp-plugins/monodepth2/networks/pose_decoder.pyc b/gimp-plugins/monodepth2/networks/pose_decoder.pyc deleted file mode 100644 index dfc6000f79184642284a7c3290691a47e86a65e2..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 2111 zcmcIl-EJFI5T3K^-~5n}Mp1%5ty(~PLoD^W6a+zBPy*VhLRe~-W%oE)XT9s}IZjH6 zuM%!}7~X|P;2C%T_`b1AQp6>-;*&WubLRJ(aro!@%CEnlKhI?O3Gx5aAB?yh1irbE zQe-i}W!e;#Pe!vU^I#oD zhA&rfeADE%vH3xfdA)yIJ9}WW8r;`X{ER8y!vibKEDx8tUL!@&0fR(h@ln)*QbGG! z8wbA(_;bGB0^8%y$R$z=WE?OtbE)?vl*@pEq$QVX{BiUncMy^zK+!Ivu(NYDbLfNLgEthBmv_Rk?v{qheQT(48tG6|JjAi>Z zKWhv5sw^X0)5z8Z9a#7nZ%}jvuZxT~S;V7g9rZcXk`;g9QwYn{1=F3)tbJ*V5VU7q zr|6EhzEBVZU`0IQ8%*3>*@IhBRel>h&z`v1)HX3~Dp-=Fs0yDXeg&LrHc7IR!~!f^ z%WF|hXTXn5{j9N0V>zrV>-U!OQIk%rOMIP7fhK?{8P!d!anoB?SLe<%f~xWg2QVJy zYA3c&o+{B;K$!l&uJc)GpA^n3za+s&=V>{Eb6SP#h=x_c*>V*3af|snUL&9S&NoGF zBT%A;`vTP9sk4n6Oww#{P+wGKo#t+EjJf$}TF#D(3Zl9K{HOlp;Q($f>gLo9SokDg z%*FonrAm z3h%w=ft+;(Ag3^d@V*KGO-1<_BNxaXlo#ls`uj3(35p8T6U!kd3LMb$w!HX?wE^^@ zfAH*MnRgU}x=$`UGNQ3Ih0w)4@5pH=&6z5}<15kGnc(j`+{f*uH4>fW%e+ffEHTUh zA>P%=>B&_~wrLoueO(kS`6W=NbA3TBqLY8Pm?j8%R5^;+#X}v1m_1%h2D`q1Ics|S z^Y(9F{_*nq6CH&8i2aBO?wG@*2={f9ohVqc)GMb+t7EH(nb@>a;=HgI3YbXov%|;S zm;b^$M``3~x=h`zIUv+v{bn#2Wj*uQaIk@;gVp?^aPV7NU9TQ7xH9DueiOLVt%1qA zNSpitoG;<|UNA6QLC4%NduGk-1iQiAU 1: - self.encoder = resnet_multiimage_input(num_layers, pretrained, num_input_images) - else: - self.encoder = resnets[num_layers](pretrained) - - if num_layers > 34: - self.num_ch_enc[1:] *= 4 - - def forward(self, input_image): - self.features = [] - x = (input_image - 0.45) / 0.225 - x = self.encoder.conv1(x) - x = self.encoder.bn1(x) - self.features.append(self.encoder.relu(x)) - self.features.append(self.encoder.layer1(self.encoder.maxpool(self.features[-1]))) - self.features.append(self.encoder.layer2(self.features[-1])) - self.features.append(self.encoder.layer3(self.features[-1])) - self.features.append(self.encoder.layer4(self.features[-1])) - - return self.features diff --git a/gimp-plugins/monodepth2/networks/resnet_encoder.pyc b/gimp-plugins/monodepth2/networks/resnet_encoder.pyc deleted file mode 100644 index fd81848e2dab08a93aaa30440f46d49ed42d6125..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 4657 zcmcIo&2wBw5%1ZRR;w?|mSji5hP*gXtdm%4$*~9~mDnPIa)DLhnUF$N@OXM}b~U?i z-&@|yN>(gYrI0Uh;>e9N2X6cs;lQ2ZzzGhVsN%u_e%-UH-Pi>udp|}!{d#(Odb)qz ztLi@&=YI94!=ENJd{y!H-p>@--+@T+QIrw&6zwVMRcNn5y(;ZhN%6A^WmB{_Epni1 zv{xI^Gt{ZmUVSv3rOq7f&5fq>)LEd;BJC|wS|vS4oeR`blupr8qBEi&?=6wONGVj; z=qa|nMClCaI(3#su6{1J!d5(L%)(m3M`-W?+!BU(-xk@x>B#i@Md@4}ywn^TYl_@m zz@%TA+(oTHo;a{}-DU7XZFjYMILMrN(2LvpLEay@FF0q1o%3agYGR+j4e$xP1T%0H zwzW#(qs9~@>~ArAyU4982Z^&m9F*GT+6BEL)md<4+(B>{m&Yb=2l=44ugjol1%|f_ z4DW6?<^sI#r*YruG-#DYFF0_nZ|^pnZLA*bZzM&p**|tgnH)6z*_!)VvES^)*6Fe- z4~Bf%)i%8i8~tO^ZT<#_83jX=X$&=d=hB&0Tg0Y7W}ep1s$$f|iDwJ)Y>w8wjb^_m-xWUg#vs&K_EVAtl;W)%{`0;*UIiB>gqDrS!B(p+RQMM z?-(IzQf(?cXJ$W6Q-mRQAlf&G9cCgDvpCTb9gC;7Mv=*liy}!;*)-SIv2A%S)*^=vw^CX9 zoj9{vs07SSIpI$1l7ro%>_LW=?&q7bX{f*du@FBj2RdZC9OJ~p`0+WX zt#KBKiX&~>$Z0kVC*oBu3fZs*-;=DjI%~<1k=aRiys;IdfgDki#nxi;i&(;Q!4Lo+ zV+hh0r)*##8SHmKH9xkxv_OfZ`9X1%XGNS^KuWKFv!4yxu)f(Va@=g+9c(oLSw}_L z9YIH=^8}AdKuE|q%D8E-V<5GzW>rnSsg~7c^_se-uJgOHg86lPuPA{ek4v zF(v}!fL^~)gcM@58-3a!6m5_3YMPi;p=~HwKdrJnE(A#00Zc-2IGK={;v4~-o*!4iMC90EnWl&a!PVW*y(t8o{BRc{)KM2IjQ|8CN}0K-E>xf0v;M zzz9%Qq;i~Y8sM>rW7>c7p(jh0uZX^64a-s4ws)m?aB<>rlRU9O1NHRA-C*~;lnu+B zf4mVq99S3Z>tOSppeTbox6XC+OYO=SfRP4`eIz54JZJ@+wQr$hx?d*R8cWdst`+VDv);nrZBGw0jNEl7KE9~oV8XYyfL zqJLqXzJd$sl}h&P#MUG`0u7u@JB4$Z&UMoD2+PPK3!_hqLUNC@JsOmmtoDyEv!b`f z(BuSdq<0WNO~B!nN*2OzXsS|^xCUp5C>aNVOvm>rUJ%8CT^=5VjNEbG1G!irGOS;x z|`Z<6odYO?I_(oCV#o2R5guL5qrS-kEX?_r#obfVE7y4$0ZZ*tht4KAXgT>5>cxWxHpq%Ua^mt}l>9`(rq zm&35hLgQrLd}m^E`|XLzom*1s{ob3mHYaCywnXiZ;%uNL>BDU{;2!JdR2v3HgF_+w z8V`4Q_!bX*SP}A??aN6baRz(EUc*3YrSe*3QO(JFv9h4nCbsqS~_C}tE3>1zl_r$O4 z4XI|<1A<(iNDrLJtbZnDn6pB7;cZ?o4TJpaL27o$N`2n*lZR3y(w(eL;GU W;l}d$u2mM0%}aPMSJ&5 Loading model from ", model_path) - encoder_path = os.path.join(model_path, "encoder.pth") - depth_decoder_path = os.path.join(model_path, "depth.pth") - - # LOADING PRETRAINED MODEL - print(" Loading pretrained encoder") - encoder = networks.ResnetEncoder(18, False) - loaded_dict_enc = torch.load(encoder_path, map_location=device) - - # extract the height and width of image that this model was trained with - feed_height = loaded_dict_enc['height'] - feed_width = loaded_dict_enc['width'] - filtered_dict_enc = {k: v for k, v in loaded_dict_enc.items() if k in encoder.state_dict()} - encoder.load_state_dict(filtered_dict_enc) - encoder.to(device) - encoder.eval() - - print(" Loading pretrained decoder") - depth_decoder = networks.DepthDecoder( - num_ch_enc=encoder.num_ch_enc, scales=range(4)) - - loaded_dict = torch.load(depth_decoder_path, map_location=device) - depth_decoder.load_state_dict(loaded_dict) - - depth_decoder.to(device) - depth_decoder.eval() - - # FINDING INPUT IMAGES - if os.path.isfile(args.image_path): - # Only testing on a single image - paths = [args.image_path] - output_directory = os.path.dirname(args.image_path) - elif os.path.isdir(args.image_path): - # Searching folder for images - paths = glob.glob(os.path.join(args.image_path, '*.{}'.format(args.ext))) - output_directory = args.image_path - else: - raise Exception("Can not find args.image_path: {}".format(args.image_path)) - - print("-> Predicting on {:d} test images".format(len(paths))) - - # PREDICTING ON EACH IMAGE IN TURN - with torch.no_grad(): - for idx, image_path in enumerate(paths): - - if image_path.endswith("_disp.jpg"): - # don't try to predict disparity for a disparity image! - continue - - # Load image and preprocess - # input_image = cv2.imread(image_path) - # input_image = cv2.cvtColor(input_image, cv2.COLOR_BGR2RGB) - input_image = pil.open(image_path).convert('RGB') - original_width, original_height = input_image.size - # input_image = cv2.resize(input_image, (feed_width, feed_height)) - input_image = input_image.resize((feed_width, feed_height), pil.LANCZOS) - input_image = transforms.ToTensor()(input_image).unsqueeze(0) - - # PREDICTION - input_image = input_image.to(device) - features = encoder(input_image) - outputs = depth_decoder(features) - - disp = outputs[("disp", 0)] - disp_resized = torch.nn.functional.interpolate( - disp, (original_height, original_width), mode="bilinear", align_corners=False) - - # Saving numpy file - output_name = os.path.splitext(os.path.basename(image_path))[0] - name_dest_npy = os.path.join(output_directory, "{}_disp.npy".format(output_name)) - scaled_disp, _ = disp_to_depth(disp, 0.1, 100) - np.save(name_dest_npy, scaled_disp.cpu().numpy()) - - # Saving colormapped depth image - disp_resized_np = disp_resized.squeeze().cpu().numpy() - vmax = np.percentile(disp_resized_np, 95) - normalizer = mpl.colors.Normalize(vmin=disp_resized_np.min(), vmax=vmax) - mapper = cm.ScalarMappable(norm=normalizer, cmap='magma') - colormapped_im = (mapper.to_rgba(disp_resized_np)[:, :, :3] * 255).astype(np.uint8) - im = pil.fromarray(colormapped_im) - - name_dest_im = os.path.join(output_directory, "{}_disp.jpeg".format(output_name)) - im.save(name_dest_im) - # cv2.imwrite('/Users/kritiksoman/Downloads/gimp-plugins/out5.jpg',cv2.cvtColor(colormapped_im, cv2.COLOR_RGB2BGR)) - - print(" Processed {:d} of {:d} images - saved prediction to {}".format( - idx + 1, len(paths), name_dest_im)) - - print('-> Done!') - - -if __name__ == '__main__': - args = parse_args() - test_simple(args) diff --git a/gimp-plugins/monodepth2/train.py b/gimp-plugins/monodepth2/train.py deleted file mode 100755 index ee1425e..0000000 --- a/gimp-plugins/monodepth2/train.py +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright Niantic 2019. Patent Pending. All rights reserved. -# -# This software is licensed under the terms of the Monodepth2 licence -# which allows for non-commercial use only, the full terms of which are made -# available in the LICENSE file. - -from __future__ import absolute_import, division, print_function - -from trainer import Trainer -from options import MonodepthOptions - -options = MonodepthOptions() -opts = options.parse() - - -if __name__ == "__main__": - trainer = Trainer(opts) - trainer.train() diff --git a/gimp-plugins/monodepth2/trainer.py b/gimp-plugins/monodepth2/trainer.py deleted file mode 100755 index a726dad..0000000 --- a/gimp-plugins/monodepth2/trainer.py +++ /dev/null @@ -1,630 +0,0 @@ -# Copyright Niantic 2019. Patent Pending. All rights reserved. -# -# This software is licensed under the terms of the Monodepth2 licence -# which allows for non-commercial use only, the full terms of which are made -# available in the LICENSE file. - -from __future__ import absolute_import, division, print_function - -import numpy as np -import time - -import torch -import torch.nn.functional as F -import torch.optim as optim -from torch.utils.data import DataLoader -from tensorboardX import SummaryWriter - -import json - -from utils import * -from kitti_utils import * -from layers import * - -import datasets -import networks -from IPython import embed - - -class Trainer: - def __init__(self, options): - self.opt = options - self.log_path = os.path.join(self.opt.log_dir, self.opt.model_name) - - # checking height and width are multiples of 32 - assert self.opt.height % 32 == 0, "'height' must be a multiple of 32" - assert self.opt.width % 32 == 0, "'width' must be a multiple of 32" - - self.models = {} - self.parameters_to_train = [] - - self.device = torch.device("cpu" if self.opt.no_cuda else "cuda") - - self.num_scales = len(self.opt.scales) - self.num_input_frames = len(self.opt.frame_ids) - self.num_pose_frames = 2 if self.opt.pose_model_input == "pairs" else self.num_input_frames - - assert self.opt.frame_ids[0] == 0, "frame_ids must start with 0" - - self.use_pose_net = not (self.opt.use_stereo and self.opt.frame_ids == [0]) - - if self.opt.use_stereo: - self.opt.frame_ids.append("s") - - self.models["encoder"] = networks.ResnetEncoder( - self.opt.num_layers, self.opt.weights_init == "pretrained") - self.models["encoder"].to(self.device) - self.parameters_to_train += list(self.models["encoder"].parameters()) - - self.models["depth"] = networks.DepthDecoder( - self.models["encoder"].num_ch_enc, self.opt.scales) - self.models["depth"].to(self.device) - self.parameters_to_train += list(self.models["depth"].parameters()) - - if self.use_pose_net: - if self.opt.pose_model_type == "separate_resnet": - self.models["pose_encoder"] = networks.ResnetEncoder( - self.opt.num_layers, - self.opt.weights_init == "pretrained", - num_input_images=self.num_pose_frames) - - self.models["pose_encoder"].to(self.device) - self.parameters_to_train += list(self.models["pose_encoder"].parameters()) - - self.models["pose"] = networks.PoseDecoder( - self.models["pose_encoder"].num_ch_enc, - num_input_features=1, - num_frames_to_predict_for=2) - - elif self.opt.pose_model_type == "shared": - self.models["pose"] = networks.PoseDecoder( - self.models["encoder"].num_ch_enc, self.num_pose_frames) - - elif self.opt.pose_model_type == "posecnn": - self.models["pose"] = networks.PoseCNN( - self.num_input_frames if self.opt.pose_model_input == "all" else 2) - - self.models["pose"].to(self.device) - self.parameters_to_train += list(self.models["pose"].parameters()) - - if self.opt.predictive_mask: - assert self.opt.disable_automasking, \ - "When using predictive_mask, please disable automasking with --disable_automasking" - - # Our implementation of the predictive masking baseline has the the same architecture - # as our depth decoder. We predict a separate mask for each source frame. - self.models["predictive_mask"] = networks.DepthDecoder( - self.models["encoder"].num_ch_enc, self.opt.scales, - num_output_channels=(len(self.opt.frame_ids) - 1)) - self.models["predictive_mask"].to(self.device) - self.parameters_to_train += list(self.models["predictive_mask"].parameters()) - - self.model_optimizer = optim.Adam(self.parameters_to_train, self.opt.learning_rate) - self.model_lr_scheduler = optim.lr_scheduler.StepLR( - self.model_optimizer, self.opt.scheduler_step_size, 0.1) - - if self.opt.load_weights_folder is not None: - self.load_model() - - print("Training model named:\n ", self.opt.model_name) - print("Models and tensorboard events files are saved to:\n ", self.opt.log_dir) - print("Training is using:\n ", self.device) - - # data - datasets_dict = {"kitti": datasets.KITTIRAWDataset, - "kitti_odom": datasets.KITTIOdomDataset} - self.dataset = datasets_dict[self.opt.dataset] - - fpath = os.path.join(os.path.dirname(__file__), "splits", self.opt.split, "{}_files.txt") - - train_filenames = readlines(fpath.format("train")) - val_filenames = readlines(fpath.format("val")) - img_ext = '.png' if self.opt.png else '.jpg' - - num_train_samples = len(train_filenames) - self.num_total_steps = num_train_samples // self.opt.batch_size * self.opt.num_epochs - - train_dataset = self.dataset( - self.opt.data_path, train_filenames, self.opt.height, self.opt.width, - self.opt.frame_ids, 4, is_train=True, img_ext=img_ext) - self.train_loader = DataLoader( - train_dataset, self.opt.batch_size, True, - num_workers=self.opt.num_workers, pin_memory=True, drop_last=True) - val_dataset = self.dataset( - self.opt.data_path, val_filenames, self.opt.height, self.opt.width, - self.opt.frame_ids, 4, is_train=False, img_ext=img_ext) - self.val_loader = DataLoader( - val_dataset, self.opt.batch_size, True, - num_workers=self.opt.num_workers, pin_memory=True, drop_last=True) - self.val_iter = iter(self.val_loader) - - self.writers = {} - for mode in ["train", "val"]: - self.writers[mode] = SummaryWriter(os.path.join(self.log_path, mode)) - - if not self.opt.no_ssim: - self.ssim = SSIM() - self.ssim.to(self.device) - - self.backproject_depth = {} - self.project_3d = {} - for scale in self.opt.scales: - h = self.opt.height // (2 ** scale) - w = self.opt.width // (2 ** scale) - - self.backproject_depth[scale] = BackprojectDepth(self.opt.batch_size, h, w) - self.backproject_depth[scale].to(self.device) - - self.project_3d[scale] = Project3D(self.opt.batch_size, h, w) - self.project_3d[scale].to(self.device) - - self.depth_metric_names = [ - "de/abs_rel", "de/sq_rel", "de/rms", "de/log_rms", "da/a1", "da/a2", "da/a3"] - - print("Using split:\n ", self.opt.split) - print("There are {:d} training items and {:d} validation items\n".format( - len(train_dataset), len(val_dataset))) - - self.save_opts() - - def set_train(self): - """Convert all models to training mode - """ - for m in self.models.values(): - m.train() - - def set_eval(self): - """Convert all models to testing/evaluation mode - """ - for m in self.models.values(): - m.eval() - - def train(self): - """Run the entire training pipeline - """ - self.epoch = 0 - self.step = 0 - self.start_time = time.time() - for self.epoch in range(self.opt.num_epochs): - self.run_epoch() - if (self.epoch + 1) % self.opt.save_frequency == 0: - self.save_model() - - def run_epoch(self): - """Run a single epoch of training and validation - """ - self.model_lr_scheduler.step() - - print("Training") - self.set_train() - - for batch_idx, inputs in enumerate(self.train_loader): - - before_op_time = time.time() - - outputs, losses = self.process_batch(inputs) - - self.model_optimizer.zero_grad() - losses["loss"].backward() - self.model_optimizer.step() - - duration = time.time() - before_op_time - - # log less frequently after the first 2000 steps to save time & disk space - early_phase = batch_idx % self.opt.log_frequency == 0 and self.step < 2000 - late_phase = self.step % 2000 == 0 - - if early_phase or late_phase: - self.log_time(batch_idx, duration, losses["loss"].cpu().data) - - if "depth_gt" in inputs: - self.compute_depth_losses(inputs, outputs, losses) - - self.log("train", inputs, outputs, losses) - self.val() - - self.step += 1 - - def process_batch(self, inputs): - """Pass a minibatch through the network and generate images and losses - """ - for key, ipt in inputs.items(): - inputs[key] = ipt.to(self.device) - - if self.opt.pose_model_type == "shared": - # If we are using a shared encoder for both depth and pose (as advocated - # in monodepthv1), then all images are fed separately through the depth encoder. - all_color_aug = torch.cat([inputs[("color_aug", i, 0)] for i in self.opt.frame_ids]) - all_features = self.models["encoder"](all_color_aug) - all_features = [torch.split(f, self.opt.batch_size) for f in all_features] - - features = {} - for i, k in enumerate(self.opt.frame_ids): - features[k] = [f[i] for f in all_features] - - outputs = self.models["depth"](features[0]) - else: - # Otherwise, we only feed the image with frame_id 0 through the depth encoder - features = self.models["encoder"](inputs["color_aug", 0, 0]) - outputs = self.models["depth"](features) - - if self.opt.predictive_mask: - outputs["predictive_mask"] = self.models["predictive_mask"](features) - - if self.use_pose_net: - outputs.update(self.predict_poses(inputs, features)) - - self.generate_images_pred(inputs, outputs) - losses = self.compute_losses(inputs, outputs) - - return outputs, losses - - def predict_poses(self, inputs, features): - """Predict poses between input frames for monocular sequences. - """ - outputs = {} - if self.num_pose_frames == 2: - # In this setting, we compute the pose to each source frame via a - # separate forward pass through the pose network. - - # select what features the pose network takes as input - if self.opt.pose_model_type == "shared": - pose_feats = {f_i: features[f_i] for f_i in self.opt.frame_ids} - else: - pose_feats = {f_i: inputs["color_aug", f_i, 0] for f_i in self.opt.frame_ids} - - for f_i in self.opt.frame_ids[1:]: - if f_i != "s": - # To maintain ordering we always pass frames in temporal order - if f_i < 0: - pose_inputs = [pose_feats[f_i], pose_feats[0]] - else: - pose_inputs = [pose_feats[0], pose_feats[f_i]] - - if self.opt.pose_model_type == "separate_resnet": - pose_inputs = [self.models["pose_encoder"](torch.cat(pose_inputs, 1))] - elif self.opt.pose_model_type == "posecnn": - pose_inputs = torch.cat(pose_inputs, 1) - - axisangle, translation = self.models["pose"](pose_inputs) - outputs[("axisangle", 0, f_i)] = axisangle - outputs[("translation", 0, f_i)] = translation - - # Invert the matrix if the frame id is negative - outputs[("cam_T_cam", 0, f_i)] = transformation_from_parameters( - axisangle[:, 0], translation[:, 0], invert=(f_i < 0)) - - else: - # Here we input all frames to the pose net (and predict all poses) together - if self.opt.pose_model_type in ["separate_resnet", "posecnn"]: - pose_inputs = torch.cat( - [inputs[("color_aug", i, 0)] for i in self.opt.frame_ids if i != "s"], 1) - - if self.opt.pose_model_type == "separate_resnet": - pose_inputs = [self.models["pose_encoder"](pose_inputs)] - - elif self.opt.pose_model_type == "shared": - pose_inputs = [features[i] for i in self.opt.frame_ids if i != "s"] - - axisangle, translation = self.models["pose"](pose_inputs) - - for i, f_i in enumerate(self.opt.frame_ids[1:]): - if f_i != "s": - outputs[("axisangle", 0, f_i)] = axisangle - outputs[("translation", 0, f_i)] = translation - outputs[("cam_T_cam", 0, f_i)] = transformation_from_parameters( - axisangle[:, i], translation[:, i]) - - return outputs - - def val(self): - """Validate the model on a single minibatch - """ - self.set_eval() - try: - inputs = self.val_iter.next() - except StopIteration: - self.val_iter = iter(self.val_loader) - inputs = self.val_iter.next() - - with torch.no_grad(): - outputs, losses = self.process_batch(inputs) - - if "depth_gt" in inputs: - self.compute_depth_losses(inputs, outputs, losses) - - self.log("val", inputs, outputs, losses) - del inputs, outputs, losses - - self.set_train() - - def generate_images_pred(self, inputs, outputs): - """Generate the warped (reprojected) color images for a minibatch. - Generated images are saved into the `outputs` dictionary. - """ - for scale in self.opt.scales: - disp = outputs[("disp", scale)] - if self.opt.v1_multiscale: - source_scale = scale - else: - disp = F.interpolate( - disp, [self.opt.height, self.opt.width], mode="bilinear", align_corners=False) - source_scale = 0 - - _, depth = disp_to_depth(disp, self.opt.min_depth, self.opt.max_depth) - - outputs[("depth", 0, scale)] = depth - - for i, frame_id in enumerate(self.opt.frame_ids[1:]): - - if frame_id == "s": - T = inputs["stereo_T"] - else: - T = outputs[("cam_T_cam", 0, frame_id)] - - # from the authors of https://arxiv.org/abs/1712.00175 - if self.opt.pose_model_type == "posecnn": - - axisangle = outputs[("axisangle", 0, frame_id)] - translation = outputs[("translation", 0, frame_id)] - - inv_depth = 1 / depth - mean_inv_depth = inv_depth.mean(3, True).mean(2, True) - - T = transformation_from_parameters( - axisangle[:, 0], translation[:, 0] * mean_inv_depth[:, 0], frame_id < 0) - - cam_points = self.backproject_depth[source_scale]( - depth, inputs[("inv_K", source_scale)]) - pix_coords = self.project_3d[source_scale]( - cam_points, inputs[("K", source_scale)], T) - - outputs[("sample", frame_id, scale)] = pix_coords - - outputs[("color", frame_id, scale)] = F.grid_sample( - inputs[("color", frame_id, source_scale)], - outputs[("sample", frame_id, scale)], - padding_mode="border") - - if not self.opt.disable_automasking: - outputs[("color_identity", frame_id, scale)] = \ - inputs[("color", frame_id, source_scale)] - - def compute_reprojection_loss(self, pred, target): - """Computes reprojection loss between a batch of predicted and target images - """ - abs_diff = torch.abs(target - pred) - l1_loss = abs_diff.mean(1, True) - - if self.opt.no_ssim: - reprojection_loss = l1_loss - else: - ssim_loss = self.ssim(pred, target).mean(1, True) - reprojection_loss = 0.85 * ssim_loss + 0.15 * l1_loss - - return reprojection_loss - - def compute_losses(self, inputs, outputs): - """Compute the reprojection and smoothness losses for a minibatch - """ - losses = {} - total_loss = 0 - - for scale in self.opt.scales: - loss = 0 - reprojection_losses = [] - - if self.opt.v1_multiscale: - source_scale = scale - else: - source_scale = 0 - - disp = outputs[("disp", scale)] - color = inputs[("color", 0, scale)] - target = inputs[("color", 0, source_scale)] - - for frame_id in self.opt.frame_ids[1:]: - pred = outputs[("color", frame_id, scale)] - reprojection_losses.append(self.compute_reprojection_loss(pred, target)) - - reprojection_losses = torch.cat(reprojection_losses, 1) - - if not self.opt.disable_automasking: - identity_reprojection_losses = [] - for frame_id in self.opt.frame_ids[1:]: - pred = inputs[("color", frame_id, source_scale)] - identity_reprojection_losses.append( - self.compute_reprojection_loss(pred, target)) - - identity_reprojection_losses = torch.cat(identity_reprojection_losses, 1) - - if self.opt.avg_reprojection: - identity_reprojection_loss = identity_reprojection_losses.mean(1, keepdim=True) - else: - # save both images, and do min all at once below - identity_reprojection_loss = identity_reprojection_losses - - elif self.opt.predictive_mask: - # use the predicted mask - mask = outputs["predictive_mask"]["disp", scale] - if not self.opt.v1_multiscale: - mask = F.interpolate( - mask, [self.opt.height, self.opt.width], - mode="bilinear", align_corners=False) - - reprojection_losses *= mask - - # add a loss pushing mask to 1 (using nn.BCELoss for stability) - weighting_loss = 0.2 * nn.BCELoss()(mask, torch.ones(mask.shape).cuda()) - loss += weighting_loss.mean() - - if self.opt.avg_reprojection: - reprojection_loss = reprojection_losses.mean(1, keepdim=True) - else: - reprojection_loss = reprojection_losses - - if not self.opt.disable_automasking: - # add random numbers to break ties - identity_reprojection_loss += torch.randn( - identity_reprojection_loss.shape).cuda() * 0.00001 - - combined = torch.cat((identity_reprojection_loss, reprojection_loss), dim=1) - else: - combined = reprojection_loss - - if combined.shape[1] == 1: - to_optimise = combined - else: - to_optimise, idxs = torch.min(combined, dim=1) - - if not self.opt.disable_automasking: - outputs["identity_selection/{}".format(scale)] = ( - idxs > identity_reprojection_loss.shape[1] - 1).float() - - loss += to_optimise.mean() - - mean_disp = disp.mean(2, True).mean(3, True) - norm_disp = disp / (mean_disp + 1e-7) - smooth_loss = get_smooth_loss(norm_disp, color) - - loss += self.opt.disparity_smoothness * smooth_loss / (2 ** scale) - total_loss += loss - losses["loss/{}".format(scale)] = loss - - total_loss /= self.num_scales - losses["loss"] = total_loss - return losses - - def compute_depth_losses(self, inputs, outputs, losses): - """Compute depth metrics, to allow monitoring during training - - This isn't particularly accurate as it averages over the entire batch, - so is only used to give an indication of validation performance - """ - depth_pred = outputs[("depth", 0, 0)] - depth_pred = torch.clamp(F.interpolate( - depth_pred, [375, 1242], mode="bilinear", align_corners=False), 1e-3, 80) - depth_pred = depth_pred.detach() - - depth_gt = inputs["depth_gt"] - mask = depth_gt > 0 - - # garg/eigen crop - crop_mask = torch.zeros_like(mask) - crop_mask[:, :, 153:371, 44:1197] = 1 - mask = mask * crop_mask - - depth_gt = depth_gt[mask] - depth_pred = depth_pred[mask] - depth_pred *= torch.median(depth_gt) / torch.median(depth_pred) - - depth_pred = torch.clamp(depth_pred, min=1e-3, max=80) - - depth_errors = compute_depth_errors(depth_gt, depth_pred) - - for i, metric in enumerate(self.depth_metric_names): - losses[metric] = np.array(depth_errors[i].cpu()) - - def log_time(self, batch_idx, duration, loss): - """Print a logging statement to the terminal - """ - samples_per_sec = self.opt.batch_size / duration - time_sofar = time.time() - self.start_time - training_time_left = ( - self.num_total_steps / self.step - 1.0) * time_sofar if self.step > 0 else 0 - print_string = "epoch {:>3} | batch {:>6} | examples/s: {:5.1f}" + \ - " | loss: {:.5f} | time elapsed: {} | time left: {}" - print(print_string.format(self.epoch, batch_idx, samples_per_sec, loss, - sec_to_hm_str(time_sofar), sec_to_hm_str(training_time_left))) - - def log(self, mode, inputs, outputs, losses): - """Write an event to the tensorboard events file - """ - writer = self.writers[mode] - for l, v in losses.items(): - writer.add_scalar("{}".format(l), v, self.step) - - for j in range(min(4, self.opt.batch_size)): # write a maxmimum of four images - for s in self.opt.scales: - for frame_id in self.opt.frame_ids: - writer.add_image( - "color_{}_{}/{}".format(frame_id, s, j), - inputs[("color", frame_id, s)][j].data, self.step) - if s == 0 and frame_id != 0: - writer.add_image( - "color_pred_{}_{}/{}".format(frame_id, s, j), - outputs[("color", frame_id, s)][j].data, self.step) - - writer.add_image( - "disp_{}/{}".format(s, j), - normalize_image(outputs[("disp", s)][j]), self.step) - - if self.opt.predictive_mask: - for f_idx, frame_id in enumerate(self.opt.frame_ids[1:]): - writer.add_image( - "predictive_mask_{}_{}/{}".format(frame_id, s, j), - outputs["predictive_mask"][("disp", s)][j, f_idx][None, ...], - self.step) - - elif not self.opt.disable_automasking: - writer.add_image( - "automask_{}/{}".format(s, j), - outputs["identity_selection/{}".format(s)][j][None, ...], self.step) - - def save_opts(self): - """Save options to disk so we know what we ran this experiment with - """ - models_dir = os.path.join(self.log_path, "models") - if not os.path.exists(models_dir): - os.makedirs(models_dir) - to_save = self.opt.__dict__.copy() - - with open(os.path.join(models_dir, 'opt.json'), 'w') as f: - json.dump(to_save, f, indent=2) - - def save_model(self): - """Save model weights to disk - """ - save_folder = os.path.join(self.log_path, "models", "weights_{}".format(self.epoch)) - if not os.path.exists(save_folder): - os.makedirs(save_folder) - - for model_name, model in self.models.items(): - save_path = os.path.join(save_folder, "{}.pth".format(model_name)) - to_save = model.state_dict() - if model_name == 'encoder': - # save the sizes - these are needed at prediction time - to_save['height'] = self.opt.height - to_save['width'] = self.opt.width - to_save['use_stereo'] = self.opt.use_stereo - torch.save(to_save, save_path) - - save_path = os.path.join(save_folder, "{}.pth".format("adam")) - torch.save(self.model_optimizer.state_dict(), save_path) - - def load_model(self): - """Load model(s) from disk - """ - self.opt.load_weights_folder = os.path.expanduser(self.opt.load_weights_folder) - - assert os.path.isdir(self.opt.load_weights_folder), \ - "Cannot find folder {}".format(self.opt.load_weights_folder) - print("loading model from folder {}".format(self.opt.load_weights_folder)) - - for n in self.opt.models_to_load: - print("Loading {} weights...".format(n)) - path = os.path.join(self.opt.load_weights_folder, "{}.pth".format(n)) - model_dict = self.models[n].state_dict() - pretrained_dict = torch.load(path) - pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict} - model_dict.update(pretrained_dict) - self.models[n].load_state_dict(model_dict) - - # loading adam state - optimizer_load_path = os.path.join(self.opt.load_weights_folder, "adam.pth") - if os.path.isfile(optimizer_load_path): - print("Loading Adam weights") - optimizer_dict = torch.load(optimizer_load_path) - self.model_optimizer.load_state_dict(optimizer_dict) - else: - print("Cannot find Adam weights so Adam is randomly initialized") diff --git a/gimp-plugins/monodepth2/utils.py b/gimp-plugins/monodepth2/utils.py deleted file mode 100755 index e309cf7..0000000 --- a/gimp-plugins/monodepth2/utils.py +++ /dev/null @@ -1,114 +0,0 @@ -# Copyright Niantic 2019. Patent Pending. All rights reserved. -# -# This software is licensed under the terms of the Monodepth2 licence -# which allows for non-commercial use only, the full terms of which are made -# available in the LICENSE file. - -from __future__ import absolute_import, division, print_function -import os -import hashlib -import zipfile -from six.moves import urllib - - -def readlines(filename): - """Read all the lines in a text file and return as a list - """ - with open(filename, 'r') as f: - lines = f.read().splitlines() - return lines - - -def normalize_image(x): - """Rescale image pixels to span range [0, 1] - """ - ma = float(x.max().cpu().data) - mi = float(x.min().cpu().data) - d = ma - mi if ma != mi else 1e5 - return (x - mi) / d - - -def sec_to_hm(t): - """Convert time in seconds to time in hours, minutes and seconds - e.g. 10239 -> (2, 50, 39) - """ - t = int(t) - s = t % 60 - t //= 60 - m = t % 60 - t //= 60 - return t, m, s - - -def sec_to_hm_str(t): - """Convert time in seconds to a nice string - e.g. 10239 -> '02h50m39s' - """ - h, m, s = sec_to_hm(t) - return "{:02d}h{:02d}m{:02d}s".format(h, m, s) - - -def download_model_if_doesnt_exist(model_name): - """If pretrained kitti model doesn't exist, download and unzip it - """ - # values are tuples of (, ) - download_paths = { - "mono_640x192": - ("https://storage.googleapis.com/niantic-lon-static/research/monodepth2/mono_640x192.zip", - "a964b8356e08a02d009609d9e3928f7c"), - "stereo_640x192": - ("https://storage.googleapis.com/niantic-lon-static/research/monodepth2/stereo_640x192.zip", - "3dfb76bcff0786e4ec07ac00f658dd07"), - "mono+stereo_640x192": - ("https://storage.googleapis.com/niantic-lon-static/research/monodepth2/mono%2Bstereo_640x192.zip", - "c024d69012485ed05d7eaa9617a96b81"), - "mono_no_pt_640x192": - ("https://storage.googleapis.com/niantic-lon-static/research/monodepth2/mono_no_pt_640x192.zip", - "9c2f071e35027c895a4728358ffc913a"), - "stereo_no_pt_640x192": - ("https://storage.googleapis.com/niantic-lon-static/research/monodepth2/stereo_no_pt_640x192.zip", - "41ec2de112905f85541ac33a854742d1"), - "mono+stereo_no_pt_640x192": - ("https://storage.googleapis.com/niantic-lon-static/research/monodepth2/mono%2Bstereo_no_pt_640x192.zip", - "46c3b824f541d143a45c37df65fbab0a"), - "mono_1024x320": - ("https://storage.googleapis.com/niantic-lon-static/research/monodepth2/mono_1024x320.zip", - "0ab0766efdfeea89a0d9ea8ba90e1e63"), - "stereo_1024x320": - ("https://storage.googleapis.com/niantic-lon-static/research/monodepth2/stereo_1024x320.zip", - "afc2f2126d70cf3fdf26b550898b501a"), - "mono+stereo_1024x320": - ("https://storage.googleapis.com/niantic-lon-static/research/monodepth2/mono%2Bstereo_1024x320.zip", - "cdc5fc9b23513c07d5b19235d9ef08f7"), - } - - if not os.path.exists("models"): - os.makedirs("models") - - model_path = os.path.join("models", model_name) - - def check_file_matches_md5(checksum, fpath): - if not os.path.exists(fpath): - return False - with open(fpath, 'rb') as f: - current_md5checksum = hashlib.md5(f.read()).hexdigest() - return current_md5checksum == checksum - - # see if we have the model already downloaded... - if not os.path.exists(os.path.join(model_path, "encoder.pth")): - - model_url, required_md5checksum = download_paths[model_name] - - if not check_file_matches_md5(required_md5checksum, model_path + ".zip"): - print("-> Downloading pretrained model to {}".format(model_path + ".zip")) - urllib.request.urlretrieve(model_url, model_path + ".zip") - - if not check_file_matches_md5(required_md5checksum, model_path + ".zip"): - print(" Failed to download a file which matches the checksum - quitting") - quit() - - print(" Unzipping model...") - with zipfile.ZipFile(model_path + ".zip", 'r') as f: - f.extractall(model_path) - - print(" Model unzipped to {}".format(model_path)) diff --git a/gimp-plugins/monodepth2/utils.pyc b/gimp-plugins/monodepth2/utils.pyc deleted file mode 100755 index d416cdda14ad9ee7d52be972bb9ab814ee9fedbb..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 5685 zcmcgwTXP$?6~>aZZk8L59p7RnSvz&5#1gq&a(PM9jGJ^k?X>kwSCcwb-O%6yOG4TU zy#O6GmL59pTOab<`dj+a=~KRgC26U4rY}@26F7^@!NGS9&N)E!Uzaxi@y{3EL~Qo2 zg8Jbzhv~n85cuz~gt63N`wmMhY`?Z<9S8aX+rKcUSJ?iFrK>Di zW$7B*U&EXl`#WR%>r7m*#0C>9mbl2oswFm=ShK_>;=Z@N4k;9gwp~SG-a)~KI!~rX z4pchH3uD&MCe#b1Ri2rj;A2v#%nagb78x`>-gao_3W{lwBx)GZH*vdw-tm9O=IfuM2qhOTPZHORrJJZsYFA}0 zca1zUZmbgN@=UmeG}8jj8a;{9X2ZolLxL2K%bnFg=M$M3A__1y8zA%~QO0sDN6}u1 zX~t7&r^kkNQH}t?wyDs+K+*V0%R)B}3uV-y&QqQ>ewBZlB{>(mF#>kGlVmzlnQo+c zmJ2yCW4|#qD$(`HUk%|-QdrIeswMzg^@?-Tu>g{oWGfcHZ$Oj)o<{7T%3e8yJ1^t&GN@NM;x%plv z%e97l=TsYjKHUab0eoDAAzNIZ5zjn(Aq(Rgl>*$%)iTO6VWFKjjq_=tcidEEIDRl? z81`(G#ah-!b+_sHt-ibaxx4M}xB(Q=>VIr|)n{0wJd3mi9MRIG?di0jYFnmDg_i6X zG7d~W7^ff7$0iC^+jKT&e+z3Sg!?0vViVOTOuU2Y0TWkH?J#i-)h-h^QPr8a%{ry~0POI>;tn|!JtFPEgOs06X0=5l z5Qy%Op~HnkVd8xXBv2cTw)1C1joga(fQAvf)FF*BcIzpo)!4}e2HhT`#Gnt^4UE$N zAxLZ!!AXjK5N87!!?IJtaoU<1n zk#yg`%TBQ33Ol$&9kk#R@l%8#;&gu|4O*!KOQLIUy8-z1mneP{yAy<+f+Hpg_fQ$5 z+!TI3aYZh5wq;y-gh;f5&oV05^E}PIQxjK}0ZRW7e8$F1^rwx6HhBTZR3GK}D3N@k zbUn(`My7aXRJ5Dq*{+8BR8galTJj_V z=+3eSViDL7zz_rUb%yM952-A^k*iPyxkr%@;i8OO2dfl!Fcd)x1Y+Y8!RSh3lkG#g z*w8|cZK_)g?d{9bxwZ-NgqyLI%7zXNzKlKMiI$59FzMPnHq>S=2E#Kb(Le9e#_i>5_^+t45t!F*rv^x)@TF z^gZHOPSAzrb=)oQiQkS@GhJMSZZ-hsAzEyODYQ1QTChs?l+v=&C_M<`gWT4B!W|M+d-J?wJj zT4jChZsl_2s&m764<%j}DjUu%=W4~O%R(@~dnBF!B5QW7? ztD}0FzmVE$EhJ5a^dw*}{^9#X{Tjt+9>A>uWy%yH)Ir diff --git a/gimp-plugins/moveWeights.sh b/gimp-plugins/moveWeights.sh index 4e2c82a..b622c1a 100644 --- a/gimp-plugins/moveWeights.sh +++ b/gimp-plugins/moveWeights.sh @@ -1,14 +1,13 @@ unzip weights.zip mkdir -p CelebAMask-HQ/MaskGAN_demo/checkpoints/label2face_512p -mkdir -p monodepth2/models/mono+stereo_640x192 mkdir -p pytorch-SRResNet/model mkdir deeplabv3 -mv weights/colorize/* neural-colorization/ +mv weights/colorize/* ideepcolor/models/pytorch/ mv weights/deblur/* DeblurGANv2/ mv weights/deeplabv3/* deeplabv3 mv weights/facegen/* CelebAMask-HQ/MaskGAN_demo/checkpoints/label2face_512p/ mv weights/faceparse/* face-parsing.PyTorch/ -mv weights/monodepth/* monodepth2/models/mono+stereo_640x192/ +mv weights/MiDaS/* MiDaS/ mv weights/super_resolution/* pytorch-SRResNet/model/ rm -rf weights/ diff --git a/gimp-plugins/neural-colorization/LICENSE.txt b/gimp-plugins/neural-colorization/LICENSE.txt deleted file mode 100644 index 6187c76..0000000 --- a/gimp-plugins/neural-colorization/LICENSE.txt +++ /dev/null @@ -1,23 +0,0 @@ -Copyright (c) 2016, Richard Zhang, Phillip Isola, Alexei A. Efros -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/gimp-plugins/neural-colorization/__pycache__/model.cpython-38.pyc b/gimp-plugins/neural-colorization/__pycache__/model.cpython-38.pyc deleted file mode 100755 index d4f9ebd5e36d6c30dea708935df53d6b1debb37e..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 4119 zcmcgv&5zSY6rZsj=X+(99L@pAzypSxvEsPm;T;3@j6+m0*Y#Do@d^hdEfJ9yj8817#?Z<`0eRs z#(twg_OURyiWdF^AerPIYbIabHJS#ezTuf&t7+-J>E*h1)8_03la|cwF`09X2l=7d zEC6H6JTQ4+ioleBDaax)MYn`;8RL>HV_eqb3dR*##ki`+RkWKTE+ag-_F(-rYJ#<{wgc)Dxj(8G}&!mhT?VDmJ0lZE~3}x-MK+ava#z_o!cSoJW1(B@}I?suAPr#EC7} z^BgB5mhxPYY#j)4i*`wYS4@p*0WzRG-DnZ z1LNnJoJss=DGGpEB|wYfvZpya?Y`ftn~7VEwjWzUZHG!5r(LWqq!r|iNzj+!+e5`( z9CJz!irhLaTLai;&vbP6`G{R-->f_{2gvV%Ip9M+;+#EN8W`&Qz}Q4~OrvfhSV@G! z*nTX$zKgHu^;}=ptwiTs$M5x{*be%U{wC8iank8gn}px#DYqw-dz`eQG-ljF4#S`` z8H45~*KLJT#?&zaD+I_N>Ma7qC?o7JDag}HXw#&ywC`KM%siB;SJ6V6KdmFWhA^rj zs2f&nC#uaP0If^iOWF6As&owJlkT$?yz zZa%$~rR_;p}{65x_H}rZKOye1YIS}N|b(*vT zjUwbGnjxVqzNR$jgp@gHk8&7W($x6eh-T91nWwW{<|fqq3^k8)JxeWRs64V|SyuL@ z5>3p-?2)OZWM>|sKvrH>hkR&81>6n51EtBpI^tPd0MF$)p2rrzb7hX_@dfZ)o#T08 z0X$D;coq?nNqiAuTSq1v;UcJJ65*6SpC8ps_LN3!bm|#41nT7}T#?2fA??%$Ghj?9 zIX(waH^y{}P<7o`A5xzH@N{aa^8n+k3Bn}Ag`}_d)kXBj-vhLW-}IqAwM{f#@kteZ z^6ENsz!jYZb2TR}T=w)KeB}~|zoW6;v)iukDiH;0?K19(!V9mUlgv^i`$$nBt+ZG= z&IULWos+IU#{A1@>Jxxhu|_p$)!Y$6+`dS;ky6u#`v0M%A_u&P<5z@V zn0_}?Zi~|IUdr(mvf*F-_jSr`QTkC$UY!5hDYr%G*PC*zmt({Cu2ay7fsE*J$QlCH$}#gf7jx zEBqb&(D55{Sr!-54M%5K_zM8t2@ALr7PvhF32X6+#smIs$NzCiqYDM3;MI>`|4BR<09Seqag4?eJO~S60+z=!L|}oQFV*$Tlya; xao=aaB!3BXaZCJ4j?r>5zSFy(E>0+7$(8#G?SPPOCin%S*Idc3l$R}f{{e(E;~M|~ diff --git a/gimp-plugins/neural-colorization/build_dataset_directory.py b/gimp-plugins/neural-colorization/build_dataset_directory.py deleted file mode 100755 index 06d2bc4..0000000 --- a/gimp-plugins/neural-colorization/build_dataset_directory.py +++ /dev/null @@ -1,42 +0,0 @@ -import os -import shutil -import argparse -image_extensions = {'.jpg', '.jpeg', '.JPG', '.JPEG'} - -def parse_args(): - parser = argparse.ArgumentParser(description="Put all places 365 images in single folder.") - parser.add_argument("-i", - "--input_dir", - required=True, - type=str, - help="input folder: the folder containing unzipped places 365 files") - parser.add_argument("-o", - "--output_dir", - required=True, - type=str, - help="output folder: the folder to put all images") - args = parser.parse_args() - return args - -def genlist(image_dir): - image_list = [] - for filename in os.listdir(image_dir): - path = os.path.join(image_dir,filename) - if os.path.isdir(path): - image_list = image_list + genlist(path) - else: - ext = os.path.splitext(filename)[1] - if ext in image_extensions: - image_list.append(os.path.join(image_dir, filename)) - return image_list - - -args = parse_args() -if not os.path.exists(args.output_dir): - os.makedirs(args.output_dir) -flist = genlist(args.input_dir) -for i,p in enumerate(flist): - if os.path.getsize(p) != 0: - os.rename(p,os.path.join(args.output_dir,str(i)+'.jpg')) -shutil.rmtree(args.input_dir) -print('done') \ No newline at end of file diff --git a/gimp-plugins/neural-colorization/colorize.py b/gimp-plugins/neural-colorization/colorize.py deleted file mode 100755 index 86320c5..0000000 --- a/gimp-plugins/neural-colorization/colorize.py +++ /dev/null @@ -1,73 +0,0 @@ -import torch -from model import generator -from torch.autograd import Variable -from scipy.ndimage import zoom -import cv2 -import os -from PIL import Image -import argparse -import numpy as np -from skimage.color import rgb2yuv,yuv2rgb - -def parse_args(): - parser = argparse.ArgumentParser(description="Colorize images") - parser.add_argument("-i", - "--input", - type=str, - required=True, - help="input image/input dir") - parser.add_argument("-o", - "--output", - type=str, - required=True, - help="output image/output dir") - parser.add_argument("-m", - "--model", - type=str, - required=True, - help="location for model (Generator)") - parser.add_argument("--gpu", - type=int, - default=-1, - help="which GPU to use? [-1 for cpu]") - args = parser.parse_args() - return args - -args = parse_args() - -G = generator() - -if torch.cuda.is_available(): -# args.gpu>=0: - G=G.cuda(args.gpu) - G.load_state_dict(torch.load(args.model)) -else: - G.load_state_dict(torch.load(args.model,map_location=torch.device('cpu'))) - -def inference(G,in_path,out_path): - p=Image.open(in_path).convert('RGB') - img_yuv = rgb2yuv(p) - H,W,_ = img_yuv.shape - infimg = np.expand_dims(np.expand_dims(img_yuv[...,0], axis=0), axis=0) - img_variable = Variable(torch.Tensor(infimg-0.5)) - if args.gpu>=0: - img_variable=img_variable.cuda(args.gpu) - res = G(img_variable) - uv=res.cpu().detach().numpy() - uv[:,0,:,:] *= 0.436 - uv[:,1,:,:] *= 0.615 - (_,_,H1,W1) = uv.shape - uv = zoom(uv,(1,1,H/H1,W/W1)) - yuv = np.concatenate([infimg,uv],axis=1)[0] - rgb=yuv2rgb(yuv.transpose(1,2,0)) - cv2.imwrite(out_path,(rgb.clip(min=0,max=1)*256)[:,:,[2,1,0]]) - - -if not os.path.isdir(args.input): - inference(G,args.input,args.output) -else: - if not os.path.exists(args.output): - os.makedirs(args.output) - for f in os.listdir(args.input): - inference(G,os.path.join(args.input,f),os.path.join(args.output,f)) - diff --git a/gimp-plugins/neural-colorization/model.py b/gimp-plugins/neural-colorization/model.py deleted file mode 100755 index afee57b..0000000 --- a/gimp-plugins/neural-colorization/model.py +++ /dev/null @@ -1,123 +0,0 @@ -import torch -import torch.nn as nn -from functools import reduce -from torch.autograd import Variable - - -class shave_block(nn.Module): - def __init__(self, s): - super(shave_block, self).__init__() - self.s=s - def forward(self,x): - return x[:,:,self.s:-self.s,self.s:-self.s] - -class LambdaBase(nn.Sequential): - def __init__(self, fn, *args): - super(LambdaBase, self).__init__(*args) - self.lambda_func = fn - - def forward_prepare(self, input): - output = [] - for module in self._modules.values(): - output.append(module(input)) - return output if output else input - -class Lambda(LambdaBase): - def forward(self, input): - return self.lambda_func(self.forward_prepare(input)) - -class LambdaMap(LambdaBase): - def forward(self, input): - return list(map(self.lambda_func,self.forward_prepare(input))) - -class LambdaReduce(LambdaBase): - def forward(self, input): - return reduce(self.lambda_func,self.forward_prepare(input)) - -def generator(): - G = nn.Sequential( # Sequential, - nn.ReflectionPad2d((40, 40, 40, 40)), - nn.Conv2d(1,32,(9, 9),(1, 1),(4, 4)), - nn.BatchNorm2d(32), - nn.ReLU(), - nn.Conv2d(32,64,(3, 3),(2, 2),(1, 1)), - nn.BatchNorm2d(64), - nn.ReLU(), - nn.Conv2d(64,128,(3, 3),(2, 2),(1, 1)), - nn.BatchNorm2d(128), - nn.ReLU(), - nn.Sequential( # Sequential, - LambdaMap(lambda x: x, # ConcatTable, - nn.Sequential( # Sequential, - nn.Conv2d(128,128,(3, 3)), - nn.BatchNorm2d(128), - nn.ReLU(), - nn.Conv2d(128,128,(3, 3)), - nn.BatchNorm2d(128), - ), - shave_block(2), - ), - LambdaReduce(lambda x,y: x+y), # CAddTable, - ), - nn.Sequential( # Sequential, - LambdaMap(lambda x: x, # ConcatTable, - nn.Sequential( # Sequential, - nn.Conv2d(128,128,(3, 3)), - nn.BatchNorm2d(128), - nn.ReLU(), - nn.Conv2d(128,128,(3, 3)), - nn.BatchNorm2d(128), - ), - shave_block(2), - ), - LambdaReduce(lambda x,y: x+y), # CAddTable, - ), - nn.Sequential( # Sequential, - LambdaMap(lambda x: x, # ConcatTable, - nn.Sequential( # Sequential, - nn.Conv2d(128,128,(3, 3)), - nn.BatchNorm2d(128), - nn.ReLU(), - nn.Conv2d(128,128,(3, 3)), - nn.BatchNorm2d(128), - ), - shave_block(2), - ), - LambdaReduce(lambda x,y: x+y), # CAddTable, - ), - nn.Sequential( # Sequential, - LambdaMap(lambda x: x, # ConcatTable, - nn.Sequential( # Sequential, - nn.Conv2d(128,128,(3, 3)), - nn.BatchNorm2d(128), - nn.ReLU(), - nn.Conv2d(128,128,(3, 3)), - nn.BatchNorm2d(128), - ), - shave_block(2), - ), - LambdaReduce(lambda x,y: x+y), # CAddTable, - ), - nn.Sequential( # Sequential, - LambdaMap(lambda x: x, # ConcatTable, - nn.Sequential( # Sequential, - nn.Conv2d(128,128,(3, 3)), - nn.BatchNorm2d(128), - nn.ReLU(), - nn.Conv2d(128,128,(3, 3)), - nn.BatchNorm2d(128), - ), - shave_block(2), - ), - LambdaReduce(lambda x,y: x+y), # CAddTable, - ), - nn.ConvTranspose2d(128,64,(3, 3),(2, 2),(1, 1),(1, 1)), - nn.BatchNorm2d(64), - nn.ReLU(), - nn.ConvTranspose2d(64,32,(3, 3),(2, 2),(1, 1),(1, 1)), - nn.BatchNorm2d(32), - nn.ReLU(), - nn.Conv2d(32,2,(9, 9),(1, 1),(4, 4)), - nn.Tanh(), - ) - return G \ No newline at end of file diff --git a/gimp-plugins/neural-colorization/model.pyc b/gimp-plugins/neural-colorization/model.pyc deleted file mode 100755 index 33c2db96b3ad7875a248f95cbbc3fd13b4adf9fd..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 6371 zcmd5=ZEq7t5T3Ig=M@5>B!u^uQe0X{OTSf=Qi6&qK~!CUib$<>c+bg&^PRan^HK$= zQrb`DC-qpKU>K|q}*j(4&zvpX}-%+9X2{P%$gQ~vXphIBt={J)8k{sm(2 zbtDv7De8`_mt>`+LnR5z(yGWxMI5r0B^;4fRaUBKSHv&L`lvJ=F(dN3$fn4nl`%0@ z9T?Yv(PCgi%$WES+B;tKPKuexdnb$DDKS%d?}4KCfS7}9tQ@4$%e8435d4GBQq4g% ztAZrH=`?)X9z*+~mjvG9(9b5(NY}h)e*JM6H=Y!5aON$J9S8BTIIIzs0tg(fl*L`H zl`z~OVIhW#DCt>{3|~h!U_am}5d~*M+x0S+T2^J2Y8A4Ma8jr3CoY%CsA0VxL_t=s zV-1X{oQ%AwA2wa`*HA1;zejE1LFy;z!jmM(f+uO*@}h;?@$)E*J(Di12Cep$cGy`B zqI4nhJBb%wX~bci1iyM&5JwBG*!bbq_Dh!{C_ZIHoFh(Q9AQ-BzD5O>7!8d-Y($8l z0$bNHq;zsXyIGO>+-zE9&7j3D7gtMP?21|0DAsSr$#X9;>TwDc$AWLsj=Yu+v?tJC zuLF)wh^E5flA))T)`kf=b)kL=dDR&&s}gs;)??${@>0KnG5x%EOqMZ?@5;=04Fcl+mgc~uq|jIFqV3VQYHJe`%F(4Bn4!DaO45lZXqPPEaG zY9e{&g&p5ky>{D=jK+|SU+&5vYIic_j5}FgwF~SDDj5;!Ms~fO_-!xokDx0Z0}-d{ z%sO*6&fRfzxQuTb)kjh5sid`GdW&Im5G9Z5ih^RJx`BTE6cK$G&Au#)$>?c|^gXZrL0~hK73?#p6j_6N2id*YJ~-8S`-PiOGe8)mSw;X_Ufccu-EbO` z_r?u#kbEka4mZ$t>jqb^A0On2BDc}O@B|&8*9bbpMn3XZ_cB4$T@={^+|NVo-r&xS zcP^J+brfc%4*Ft|{YdzP4!Dux#_|yZmvvml&;&gyW>(CcY>sGug`HiV5gs62W^h); z91}Ay=J*!o>L6xtj*B@V=A@WYTbReUF+&+JPw-mV(-X2V9pBTkS`(2g|+C z&o)5rG)<%VTkSoxgS{X3FF084#eTK{at{xZJF|n_Px==eEca4B+W@&o-jr({1Q*xD zd@AO$n9pRhn#a=ipanme5mBFuxgzGOn1wC;JM9R>)fZyE6mu;P@(FI4%{h6c`D(fL z6?WlLz;iF)#thDq@RSOe-h!JX*rO8g_zZpnX^<~3%c$!;0?98=b?6e!dM@wIy~A~g z=c?ZRuG<^$jcbqszihdXP*>G#3>9dGY&`ddc4ScP^q0-cUD44!0U_5Z=p@K~KutCJ zw$O+md1HmpvtTRM;kw)icC4_4pvMO+7Lx3>?vKAqodt{&a*;1oUrWL!Cl^igZsf7I6MW~a$2H~sAlyeMa{0)kyJ&ep~|BN$Tnkr3~hTV5USGV*SWawkK zJ}*PV#+u$}w01R$T>es48~B=+#c`PGZ^w+!HkQfjWbta^8JDr6?zyKgqt*d@8?euV se1Nc?p;O#TAU(vjHuP(meN($Z#ZtcOOgYoeA?Hweesq3vw!*)E0Xk&7mjD0& diff --git a/gimp-plugins/neural-colorization/resize_all_imgs.py b/gimp-plugins/neural-colorization/resize_all_imgs.py deleted file mode 100755 index fc944d8..0000000 --- a/gimp-plugins/neural-colorization/resize_all_imgs.py +++ /dev/null @@ -1,35 +0,0 @@ -from multiprocessing import Pool -from PIL import Image -import os -import argparse - -def parse_args(): - parser = argparse.ArgumentParser(description="Resize all colorful imgs to 256*256 for training") - parser.add_argument("-d", - "--dir", - required=True, - type=str, - help="The directory includes all jpg images") - parser.add_argument("-n", - "--nprocesses", - default=10, - type=int, - help="Using how many processes") - args = parser.parse_args() - return args - -def doit(x): - a=Image.open(x) - if a.getbands()!=('R','G','B'): - os.remove(x) - return - a.resize((256,256),Image.BICUBIC).save(x) - return - -args=parse_args() -pool = Pool(processes=args.nprocesses) -jpgs = [] -flist = os.listdir(args.dir) -full_flist = [os.path.join(args.dir,x) for x in flist] -pool.map(doit, full_flist) -print('done') \ No newline at end of file diff --git a/gimp-plugins/neural-colorization/train.py b/gimp-plugins/neural-colorization/train.py deleted file mode 100755 index d5f5ae2..0000000 --- a/gimp-plugins/neural-colorization/train.py +++ /dev/null @@ -1,186 +0,0 @@ -import torch -import torch.nn as nn -import argparse -from torch.autograd import Variable -import torchvision.models as models -import os -from torch.utils import data -from model import generator -import numpy as np -from PIL import Image -from skimage.color import rgb2yuv,yuv2rgb -import cv2 - -def parse_args(): - parser = argparse.ArgumentParser(description="Train a GAN based model") - parser.add_argument("-d", - "--training_dir", - type=str, - required=True, - help="Training directory (folder contains all 256*256 images)") - parser.add_argument("-t", - "--test_image", - type=str, - default=None, - help="Test image location") - parser.add_argument("-c", - "--checkpoint_location", - type=str, - required=True, - help="Place to save checkpoints") - parser.add_argument("-e", - "--epoch", - type=int, - default=120, - help="Epoches to run training") - parser.add_argument("--gpu", - type=int, - default=0, - help="which GPU to use?") - parser.add_argument("-b", - "--batch_size", - type=int, - default=20, - help="batch size") - parser.add_argument("-w", - "--num_workers", - type=int, - default=6, - help="Number of workers to fetch data") - parser.add_argument("-p", - "--pixel_loss_weights", - type=float, - default=1000.0, - help="Pixel-wise loss weights") - parser.add_argument("--g_every", - type=int, - default=1, - help="Training generator every k iteration") - parser.add_argument("--g_lr", - type=float, - default=1e-4, - help="learning rate for generator") - parser.add_argument("--d_lr", - type=float, - default=1e-4, - help="learning rate for discriminator") - parser.add_argument("-i", - "--checkpoint_every", - type=int, - default=100, - help="Save checkpoint every k iteration (checkpoints for same epoch will overwrite)") - parser.add_argument("--d_init", - type=str, - default=None, - help="Init weights for discriminator") - parser.add_argument("--g_init", - type=str, - default=None, - help="Init weights for generator") - args = parser.parse_args() - return args - -# define data generator -class img_data(data.Dataset): - def __init__(self, path): - files = os.listdir(path) - self.files = [os.path.join(path,x) for x in files] - def __len__(self): - return len(self.files) - - def __getitem__(self, index): - img = Image.open(self.files[index]) - yuv = rgb2yuv(img) - y = yuv[...,0]-0.5 - u_t = yuv[...,1] / 0.43601035 - v_t = yuv[...,2] / 0.61497538 - return torch.Tensor(np.expand_dims(y,axis=0)),torch.Tensor(np.stack([u_t,v_t],axis=0)) - - -args = parse_args() -if not os.path.exists(os.path.join(args.checkpoint_location,'weights')): - os.makedirs(os.path.join(args.checkpoint_location,'weights')) - -# Define G, same as torch version -G = generator().cuda(args.gpu) - -# define D -D = models.resnet18(pretrained=False,num_classes=2) -D.fc = nn.Sequential(nn.Linear(512, 1), nn.Sigmoid()) -D = D.cuda(args.gpu) - -trainset = img_data(args.training_dir) -params = {'batch_size': args.batch_size, - 'shuffle': True, - 'num_workers': args.num_workers} -training_generator = data.DataLoader(trainset, **params) -if args.test_image is not None: - test_img = Image.open(args.test_image).convert('RGB').resize((256,256)) - test_yuv = rgb2yuv(test_img) - test_inf = test_yuv[...,0].reshape(1,1,256,256) - test_var = Variable(torch.Tensor(test_inf-0.5)).cuda(args.gpu) -if args.d_init is not None: - D.load_state_dict(torch.load(args.d_init)) -if args.g_init is not None: - G.load_state_dict(torch.load(args.g_init)) - -# save test image for beginning -if args.test_image is not None: - test_res = G(test_var) - uv=test_res.cpu().detach().numpy() - uv[:,0,:,:] *= 0.436 - uv[:,1,:,:] *= 0.615 - test_yuv = np.concatenate([test_inf,uv],axis=1).reshape(3,256,256) - test_rgb = yuv2rgb(test_yuv.transpose(1,2,0)) - cv2.imwrite(os.path.join(args.checkpoint_location,'test_init.jpg'),(test_rgb.clip(min=0,max=1)*256)[:,:,[2,1,0]]) - -i=0 -adversarial_loss = torch.nn.BCELoss() -optimizer_G = torch.optim.Adam(G.parameters(), lr=args.g_lr, betas=(0.5, 0.999)) -optimizer_D = torch.optim.Adam(D.parameters(), lr=args.d_lr, betas=(0.5, 0.999)) -for epoch in range(args.epoch): - for y, uv in training_generator: - # Adversarial ground truths - valid = Variable(torch.Tensor(y.size(0), 1).fill_(1.0), requires_grad=False).cuda(args.gpu) - fake = Variable(torch.Tensor(y.size(0), 1).fill_(0.0), requires_grad=False).cuda(args.gpu) - - yvar = Variable(y).cuda(args.gpu) - uvvar = Variable(uv).cuda(args.gpu) - real_imgs = torch.cat([yvar,uvvar],dim=1) - - optimizer_G.zero_grad() - uvgen = G(yvar) - # Generate a batch of images - gen_imgs = torch.cat([yvar.detach(),uvgen],dim=1) - - # Loss measures generator's ability to fool the discriminator - g_loss_gan = adversarial_loss(D(gen_imgs), valid) - g_loss = g_loss_gan + args.pixel_loss_weights * torch.mean((uvvar-uvgen)**2) - if i%args.g_every==0: - g_loss.backward() - optimizer_G.step() - - optimizer_D.zero_grad() - - # Measure discriminator's ability to classify real from generated samples - real_loss = adversarial_loss(D(real_imgs), valid) - fake_loss = adversarial_loss(D(gen_imgs.detach()), fake) - d_loss = (real_loss + fake_loss) / 2 - d_loss.backward() - optimizer_D.step() - i+=1 - if i%args.checkpoint_every==0: - print ("Epoch: %d: [D loss: %f] [G total loss: %f] [G GAN Loss: %f]" % (epoch, d_loss.item(), g_loss.item(), g_loss_gan.item())) - - torch.save(D.state_dict(), os.path.join(args.checkpoint_location,'weights','D'+str(epoch)+'.pth')) - torch.save(G.state_dict(), os.path.join(args.checkpoint_location,'weights','G'+str(epoch)+'.pth')) - if args.test_image is not None: - test_res = G(test_var) - uv=test_res.cpu().detach().numpy() - uv[:,0,:,:] *= 0.436 - uv[:,1,:,:] *= 0.615 - test_yuv = np.concatenate([test_inf,uv],axis=1).reshape(3,256,256) - test_rgb = yuv2rgb(test_yuv.transpose(1,2,0)) - cv2.imwrite(os.path.join(args.checkpoint_location,'test_epoch_'+str(epoch)+'.jpg'),(test_rgb.clip(min=0,max=1)*256)[:,:,[2,1,0]]) -torch.save(D.state_dict(), os.path.join(args.checkpoint_location,'D_final.pth')) -torch.save(G.state_dict(), os.path.join(args.checkpoint_location,'G_final.pth')) diff --git a/gimp-plugins/pytorch-SRResNet/__pycache__/srresnet.cpython-38.pyc b/gimp-plugins/pytorch-SRResNet/__pycache__/srresnet.cpython-38.pyc deleted file mode 100755 index a558116dc11bde5c0f90883b469b6e4b7ca88dc1..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 3791 zcmb7H&2t+`74M!eeao_=Bu=uS47(I)+1M4cd@q}@@g`6N19rkeRWLPCkL zVk1*jIaPZ~Y7ZQMlXT351OEko!uE}#DwJ-V_X5AyGqU9msHIi?rr+!CH{J8z@7Jxz zl}eGJ{IPcQo2Qo<`v)nr#es4cRs9M=Fu^;lnLc^fYFa#P+fDlc6PB=FFky$*j&o!; zU0599!s2EY4;D}4V98~coZy?Rk)OafTW^t^Eq11;chCq~lMB|gK4EPxxUgR2o3^lp z^MW;)>5ye~Dc@hQc(QB@~pxut*@SlJF z6P3T+fhiZo{Z7yd8)faPK|hqz!s~S2_oFyUd|z8}ti5}^`01+9ZVS)by84q?B|+Q@ z*L$)HvmM2^b>U$c?Cx)b4FE9&-d^*e*DC{`=|{iG*b+qX715W4j+Sy57kDh`w7 z{=U3~C3~o<48eHSTI4nS7Wu*AnMf~B_tm0pP7CF44)q?Y`aZ;vA0mh&hKkUStRWwp z-=PhSsx!37a%d;+$Q#;2Yiyy<-ej-&dLt*V!-rfXafQTNByK=x*X%;uh)lzgH_61G zH!BENoJ7gKFM=e{9%9=cByx$S8S!Nrm}~dsvp|YDD9QkmJNydvS>^{<&+aoDi?l%~ zOF}61x(=z4*+!yKs{uCPP;xr{NNNU$z27Np6Wg@aUm=Tlz^mx;R!7B zq9BSXED^hWQV^vvft0}K0LT`A>)=P=rCBA|%di$i+03dW_NbbTt;~*veNI%z+<-WH z%NHFtrZ?5H(frK1=dsR(Fn3 zO9GXoe0wBoWF;Xf3KKYB6T*;0gKeQwx!brY7x0v~NDwS#odgaIri;WLpcP8(Yh)#q z=wdh64gF5Ae@s2Vwc%!+Hxs&%kXLD>_ayeA=;CI$Hz2$O9bNuK^gQfrZV%e+PN)lm zzG?-X@crkyWag$o(z!J4l`cjKWMa5Wn!OUyYwuYYZEYvoG3d+1U^Sc!+UW+#Hi&Ca zCX$Fny9c4{`3*b6ZCCSH8dxl$H^l~RnHH-ZJ< zLDeMu2|G=}LtZEz6pnkS>gy0&47|gJ))7a3)DFQ#U@3ltXd|lS#M%K9y}~gfzkb>Wi4>7W%_c*qw-nH zn(nrca_=|s3i&M(-y!jB5^s}uhXhf)`~gJ9!SbtUYa7{;&RL>4^k*Pk z2O%TcEmG(z>Bcj{1^G8HNK*XeH%J&1(DyO~D~SzL-%Hdwp;dwmofe8fheWxV_Y90$ zP)`**)Bc|uk0I2xQ-n(3rMHti^#&+DOQ<6Uq~n6_EY#z>4s0F~rO3auOh3^l=yiG* z0wv}I!gj&9(rG^!SGr(aNtDj{yLeproWJq~{>q|!&fmr3h}viNiS<=3@K+I)bN6G-Xwz!{at0EAyb>Q@c%Z-Dsc4Dq{1)~8(l6*OMk znIA2PnwTeAkKfDsbEf~*asNWv5B(Cx*Tp3whEMt4D?|A88NxSQe~#wm3Q|I@LQH;7 zt=kY0L%u}p$0R^TQ^IFkzpek}!QT)5a3{0;>y9}P8kf^^;33U72&hwwSjQX&<^a&8 zkAkGNo!uhy?J!6NAb;&W#3eP5+HSXQlUUWc&1kFJ1NY}t+K?ZRryr8|ki;DlcS)p7 z-Hvd5X;_BeP--j^p&W t`VrZ@pPAdMxo)O643l0<=$m;1IoJ6Q(j)&v4$WMa>FCW{3-b%h{{gx=KQ#aV