GIMP3-ML
DESKTOP-F04AGRR\Kritik Soman 3 years ago
parent 8730796215
commit 23586ce863

3
.gitignore vendored

@ -3,3 +3,6 @@
*$py.class
*.so
*.pyc
dist/
.idea/
gimpml.egg-info/

3
.idea/.gitignore vendored

@ -1,3 +0,0 @@
# Default ignored files
/shelf/
/workspace.xml

@ -1,14 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<module type="PYTHON_MODULE" version="4">
<component name="NewModuleRootManager">
<content url="file://$MODULE_DIR$" />
<orderEntry type="jdk" jdkName="Python 3.8 (gimpenv3)" jdkType="Python SDK" />
<orderEntry type="sourceFolder" forTests="false" />
<orderEntry type="module" module-name="gimp" />
<orderEntry type="module" module-name="GIMP-ML" />
</component>
<component name="PyDocumentationSettings">
<option name="format" value="PLAIN" />
<option name="myDocStringFormat" value="Plain" />
</component>
</module>

@ -1,114 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="CsvFileAttributes">
<option name="attributeMap">
<map>
<entry key="/gimpml/plugins/monodepth/monodepth.py">
<value>
<Attribute>
<option name="separator" value="," />
</Attribute>
</value>
</entry>
<entry key="/gimpml/plugins/semseg/semseg.py">
<value>
<Attribute>
<option name="separator" value="," />
</Attribute>
</value>
</entry>
<entry key="/gimpml/tools/complete_install.py">
<value>
<Attribute>
<option name="separator" value="," />
</Attribute>
</value>
</entry>
<entry key="/gimpml/tools/inpainting.py">
<value>
<Attribute>
<option name="separator" value="," />
</Attribute>
</value>
</entry>
<entry key="/gimpml/tools/monodepth.py">
<value>
<Attribute>
<option name="separator" value="," />
</Attribute>
</value>
</entry>
<entry key="/install.bat">
<value>
<Attribute>
<option name="separator" value=";" />
</Attribute>
</value>
</entry>
<entry key="/testscases/test.py">
<value>
<Attribute>
<option name="separator" value=":" />
</Attribute>
</value>
</entry>
<entry key="C:\Users\Kritik Soman\AppData\Roaming\JetBrains\PyCharmCE2020.2\scratches\backtranslate.py">
<value>
<Attribute>
<option name="separator" value="," />
</Attribute>
</value>
</entry>
<entry key="C:\Users\Kritik Soman\AppData\Roaming\JetBrains\PyCharmCE2020.2\scratches\scratch.py">
<value>
<Attribute>
<option name="separator" value="," />
</Attribute>
</value>
</entry>
<entry key="D:\PycharmProjects\gimp\plug-ins\python\foggify.py">
<value>
<Attribute>
<option name="separator" value="," />
</Attribute>
</value>
</entry>
<entry key="D:\PycharmProjects\gimp\plug-ins\python\histogram-export.py">
<value>
<Attribute>
<option name="separator" value="," />
</Attribute>
</value>
</entry>
<entry key="\gimpml\tools\complete_install.py">
<value>
<Attribute>
<option name="separator" value=":" />
</Attribute>
</value>
</entry>
<entry key="\gimpml\tools\model_info.csv">
<value>
<Attribute>
<option name="separator" value="," />
</Attribute>
</value>
</entry>
<entry key="\tmp.py">
<value>
<Attribute>
<option name="separator" value=":" />
</Attribute>
</value>
</entry>
<entry key="\tmponedrive.py">
<value>
<Attribute>
<option name="separator" value="," />
</Attribute>
</value>
</entry>
</map>
</option>
</component>
</project>

@ -1,6 +0,0 @@
<component name="InspectionProjectProfileManager">
<settings>
<option name="USE_PROJECT_PROFILE" value="false" />
<version value="1.0" />
</settings>
</component>

@ -1,4 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ProjectRootManager" version="2" project-jdk-name="Python 3.8 (gimpenv3)" project-jdk-type="Python SDK" />
</project>

@ -1,10 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ProjectModuleManager">
<modules>
<module fileurl="file://$PROJECT_DIR$/../GIMP-ML/.idea/GIMP-ML.iml" filepath="$PROJECT_DIR$/../GIMP-ML/.idea/GIMP-ML.iml" />
<module fileurl="file://$PROJECT_DIR$/.idea/GIMP3-ML-pip.iml" filepath="$PROJECT_DIR$/.idea/GIMP3-ML-pip.iml" />
<module fileurl="file://$PROJECT_DIR$/../gimp/.idea/gimp.iml" filepath="$PROJECT_DIR$/../gimp/.idea/gimp.iml" />
</modules>
</component>
</project>

@ -1,8 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="VcsDirectoryMappings">
<mapping directory="$PROJECT_DIR$" vcs="Git" />
<mapping directory="$PROJECT_DIR$/gimpml/tools/DPT" vcs="Git" />
<mapping directory="$PROJECT_DIR$/../gimp" vcs="Git" />
</component>
</project>

Binary file not shown.

Binary file not shown.

@ -1,142 +0,0 @@
Metadata-Version: 2.1
Name: gimpml
Version: 0.0.6
Summary: A.I. for GIMP
Home-page: https://github.com/kritiksoman/GIMP-ML
Author: Kritik Soman
Author-email: kritiksoman@ieee.org
License: UNKNOWN
Keywords: sample,setuptools,development
Platform: UNKNOWN
Classifier: Development Status :: 3 - Alpha
Classifier: Intended Audience :: Developers
Classifier: Topic :: Software Development :: Build Tools
Classifier: Programming Language :: Python :: 3.8
Requires-Python: >=2.7
Description-Content-Type: text/markdown
This branch is under development. Dedicated for GIMP 3 and Python 3. :star: :star: :star: :star: are welcome. <br>
<img src="https://github.com/kritiksoman/tmp/blob/master/cover.png" width="1280" height="180"> <br>
# Objectives
[1] Model Ensembling. <br>
[2] Deep learning inference package for different computer vision tasks. <br>
[3] Bridge gap between CV research work and real world data. <br>
[4] Add AI to routine image editing workflows. <br>
# Contribution
[<img src="http://img.youtube.com/vi/vFFNp0xhEiU/0.jpg" width="800" height="600">](http://www.youtube.com/watch?v=vFFNp0xhEiU)<br> <br>
Welcome people interested in contribution !!
Join us on Slack --> [<img src="https://woocommerce.com/wp-content/uploads/2015/02/Slack_RGB.png" width="130" height="50">](https://join.slack.com/t/gimp-mlworkspace/shared_invite/zt-rbaxvztx-GRvj941idw3sQ0trS686YA)<br>
Contribution guidelines available --> [Link](https://github.com/kritiksoman/GIMP-ML/blob/GIMP3-ML/CONTRIBUTION.md).<br>
# Screenshot of Menu
![image1](https://github.com/kritiksoman/GIMP-ML/blob/GIMP3-ML/screenshot.png)
# Installation Steps
[1] Install [GIMP](https://www.gimp.org/downloads/devel/) 2.99.6 (Only windows and linux) <br>
[2] Clone this repository: git clone https://github.com/kritiksoman/GIMP-ML.git <br>
[3] Change branch : <br>
```git checkout GIMP3-ML``` <br>
[3] On linux, run for GPU/CPU: <br>
```bash GIMP-ML/install.bat```<br>
On windows, run for CPU: <br>
```GIMP-ML\install.bat```<br>
On windows, run for GPU: <br>
```GIMP-ML\install.bat gpu```<br>
[4] Follow steps that are printed in terminal or cmd. <br>
FYI: weights link --> [Link](https://drive.google.com/drive/folders/10IiBO4fuMiGQ-spBStnObbk9R-pGp6u8?usp=sharing)
| Windows | Linux |
| ------------- |:-------------:|
|[<img src="http://img.youtube.com/vi/Rc88_qHSEjc/0.jpg" width="400" height="300">](http://www.youtube.com/watch?v=Rc88_qHSEjc)| [<img src="http://img.youtube.com/vi/MUdUzxYDwaU/0.jpg" width="400" height="300">](http://www.youtube.com/watch?v=MUdUzxYDwaU) |
# Use as a Python Package
```Python
import cv2
import gimpml
image = cv2.imread('sampleinput/img.png')
alpha = cv2.imread('sampleinput/alpha.png')
out = gimpml.kmeans(image)
cv2.imwrite('output/tmp-kmeans.jpg', out)
out = gimpml.deblur(image)
cv2.imwrite('output/tmp-deblur.jpg', out)
out = gimpml.deepcolor(image)
cv2.imwrite('output/tmp-deepcolor.jpg', out)
out = gimpml.dehaze(image)
cv2.imwrite('output/tmp-dehaze.jpg', out)
out = gimpml.denoise(image)
cv2.imwrite('output/tmp-denoise.jpg', out)
out = gimpml.matting(image, alpha)
cv2.imwrite('output/tmp-matting.png', out) # save as png
out = gimpml.enlighten(image)
cv2.imwrite('output/tmp-enlighten.jpg', out)
face = cv2.imread('sampleinput/face.png')
out = gimpml.parseface(face[:, :, ::-1])
cv2.imwrite('output/tmp-parseface.png', out[:, :, ::-1])
mask1 = cv2.imread('sampleinput/mask1.png')
mask2 = cv2.imread('sampleinput/mask2.png')
out = gimpml.interpolateframe(mask1, mask2, 'output/interpolateframes')
face = cv2.imread('sampleinput/face.png')
out = gimpml.depth(face[:, :, ::-1])
cv2.imwrite('output/tmp-depth.png', out[:, :, ::-1])
image = cv2.imread('sampleinput/face.png')
out = gimpml.semseg(image[:, :, ::-1])
cv2.imwrite('output/tmp-semseg.png', out[:, :, ::-1])
image = cv2.imread('sampleinput/face.png')
out = gimpml.super(image[:, :, ::-1])
cv2.imwrite('output/tmp-super.png', out[:, :, ::-1])
image = cv2.imread('sampleinput/inpaint.png')
mask = cv2.imread('sampleinput/inpaint-mask.png')
out = gimpml.inpaint(image[:, :, ::-1], mask[:, :, 0])
cv2.imwrite('output/tmp-inpaint.png', out[:, :, ::-1])
```
# Model Zoo
| Name | License | Dataset |
| ------------- |:-------------:| :-------------:|
| [deblur](https://github.com/kritiksoman/GIMP-ML/wiki/User-Manual#de-blur) | [BSD 3-clause](https://github.com/VITA-Group/DeblurGANv2/blob/master/LICENSE) | GoPro |
| [faceparse](https://github.com/kritiksoman/GIMP-ML/wiki/User-Manual#face-parsing) | [MIT](https://github.com/zllrunning/face-parsing.PyTorch/blob/master/LICENSE) | CelebAMask-HQ |
| [coloring](https://github.com/kritiksoman/GIMP-ML/wiki/User-Manual#deep-image-coloring) | [MIT](https://github.com/junyanz/interactive-deep-colorization/blob/master/LICENSE) | ImageNet |
| [monodepth](https://github.com/kritiksoman/GIMP-ML/wiki/User-Manual#monodepth) | [MIT](https://github.com/intel-isl/DPT/blob/main/LICENSE) | [Multiple](https://arxiv.org/pdf/1907.01341v3.pdf) |
| [super-resolution](https://github.com/kritiksoman/GIMP-ML/wiki/User-Manual#image-super-resolution) | [MIT](https://github.com/twtygqyy/pytorch-SRResNet/blob/master/LICENSE) | ImageNet |
| [matting](https://github.com/kritiksoman/GIMP-ML/wiki/User-Manual#deep-image-matting) | [Non-commercial purposes](https://github.com/poppinace/indexnet_matting/blob/master/Adobe%20Deep%20Image%20Mattng%20Dataset%20License%20Agreement.pdf) | Adobe Deep Image Matting |
| [semantic-segmentation](https://github.com/kritiksoman/GIMP-ML/wiki/User-Manual#semantic-segmentation) | [MIT](https://github.com/intel-isl/DPT/blob/main/LICENSE) | ADE20K |
| [kmeans](https://github.com/kritiksoman/GIMP-ML/wiki/User-Manual#k-means-clustering) | [BSD](https://github.com/scipy/scipy/blob/master/LICENSE.txt) | - |
| [dehazing](https://github.com/kritiksoman/GIMP-ML/wiki/User-Manual#de-haze) | [MIT](https://github.com/MayankSingal/PyTorch-Image-Dehazing/blob/master/LICENSE) | [Custom](https://sites.google.com/site/boyilics/website-builder/project-page) |
| [denoising](https://github.com/kritiksoman/GIMP-ML/wiki/User-Manual#de-noise) | [GPL3](https://github.com/SaoYan/DnCNN-PyTorch/blob/master/LICENSE) | BSD68 |
| [enlighten](https://github.com/kritiksoman/GIMP-ML/wiki/User-Manual#enlightening) | [BSD](https://github.com/VITA-Group/EnlightenGAN/blob/master/License) | [Custom](https://arxiv.org/pdf/1906.06972.pdf) |
| [interpolate-frames](https://github.com/kritiksoman/GIMP-ML/wiki/User-Manual#interpolate-frames) | [MIT](https://github.com/hzwer/arXiv2020-RIFE/blob/main/LICENSE) | [HD](https://arxiv.org/pdf/2011.06294.pdf) |
| [inpainting](https://github.com/kritiksoman/GIMP-ML/wiki/User-Manual#in-painting) | [CC BY-NC 4.0](https://github.com/knazeri/edge-connect/blob/master/LICENSE.md) | [CelebA, CelebHQ, Places2, Paris StreetView](https://openaccess.thecvf.com/content_ICCVW_2019/papers/AIM/Nazeri_EdgeConnect_Structure_Guided_Image_Inpainting_using_Edge_Prediction_ICCVW_2019_paper.pdf) |
# Citation
Please cite using the following bibtex entry:
```
@article{soman2020GIMPML,
title={GIMP-ML: Python Plugins for using Computer Vision Models in GIMP},
author={Soman, Kritik},
journal={arXiv preprint arXiv:2004.13060},
year={2020}
}
```

@ -1,198 +0,0 @@
MANIFEST.in
README.md
setup.py
gimpml/__init__.py
gimpml.egg-info/PKG-INFO
gimpml.egg-info/SOURCES.txt
gimpml.egg-info/dependency_links.txt
gimpml.egg-info/requires.txt
gimpml.egg-info/top_level.txt
gimpml/plugins/__init__.py
gimpml/plugins/coloring/__init__.py
gimpml/plugins/coloring/coloring.py
gimpml/plugins/colorpalette/__init__.py
gimpml/plugins/colorpalette/color_palette.png
gimpml/plugins/colorpalette/colorpalette.py
gimpml/plugins/deblur/__init__.py
gimpml/plugins/deblur/deblur.py
gimpml/plugins/dehaze/__init__.py
gimpml/plugins/dehaze/dehaze.py
gimpml/plugins/denoise/__init__.py
gimpml/plugins/denoise/denoise.py
gimpml/plugins/enlighten/__init__.py
gimpml/plugins/enlighten/enlighten.py
gimpml/plugins/faceparse/__init__.py
gimpml/plugins/faceparse/faceparse.py
gimpml/plugins/images/__init__.py
gimpml/plugins/images/error_icon.png
gimpml/plugins/images/plugin_logo.png
gimpml/plugins/inpainting/__init__.py
gimpml/plugins/inpainting/inpainting.py
gimpml/plugins/interpolation/__init__.py
gimpml/plugins/interpolation/interpolation.py
gimpml/plugins/kmeans/__init__.py
gimpml/plugins/kmeans/kmeans.py
gimpml/plugins/matting/__init__.py
gimpml/plugins/matting/matting.py
gimpml/plugins/monodepth/__init__.py
gimpml/plugins/monodepth/monodepth.py
gimpml/plugins/semseg/__init__.py
gimpml/plugins/semseg/semseg.py
gimpml/plugins/superresolution/__init__.py
gimpml/plugins/superresolution/superresolution.py
gimpml/tools/__init__.py
gimpml/tools/coloring.py
gimpml/tools/complete_install.py
gimpml/tools/deblur.py
gimpml/tools/dehaze.py
gimpml/tools/denoise.py
gimpml/tools/enlighten.py
gimpml/tools/faceparse.py
gimpml/tools/inpainting.py
gimpml/tools/interpolation.py
gimpml/tools/kmeans.py
gimpml/tools/matting.py
gimpml/tools/model_info.csv
gimpml/tools/monodepth.py
gimpml/tools/semseg.py
gimpml/tools/superresolution.py
gimpml/tools/DPT/__init__.py
gimpml/tools/DPT/monodepth_run.py
gimpml/tools/DPT/semseg_run.py
gimpml/tools/DPT/dpt/__init__.py
gimpml/tools/DPT/dpt/base_model.py
gimpml/tools/DPT/dpt/blocks.py
gimpml/tools/DPT/dpt/midas_net.py
gimpml/tools/DPT/dpt/models.py
gimpml/tools/DPT/dpt/transforms.py
gimpml/tools/DPT/dpt/vit.py
gimpml/tools/DPT/dpt_util/__init__.py
gimpml/tools/DPT/dpt_util/io.py
gimpml/tools/DPT/dpt_util/misc.py
gimpml/tools/DPT/dpt_util/pallete.py
gimpml/tools/DeblurGANv2/__init__.py
gimpml/tools/DeblurGANv2/__int__.py
gimpml/tools/DeblurGANv2/adversarial_trainer.py
gimpml/tools/DeblurGANv2/aug.py
gimpml/tools/DeblurGANv2/dataset.py
gimpml/tools/DeblurGANv2/metric_counter.py
gimpml/tools/DeblurGANv2/predict.py
gimpml/tools/DeblurGANv2/predictorClass.py
gimpml/tools/DeblurGANv2/schedulers.py
gimpml/tools/DeblurGANv2/test_aug.py
gimpml/tools/DeblurGANv2/test_dataset.py
gimpml/tools/DeblurGANv2/test_metrics.py
gimpml/tools/DeblurGANv2/testing.py
gimpml/tools/DeblurGANv2/train.py
gimpml/tools/DeblurGANv2/config/__init__.py
gimpml/tools/DeblurGANv2/models/__init__.py
gimpml/tools/DeblurGANv2/models/fpn_densenet.py
gimpml/tools/DeblurGANv2/models/fpn_inception.py
gimpml/tools/DeblurGANv2/models/fpn_inception_simple.py
gimpml/tools/DeblurGANv2/models/fpn_mobilenet.py
gimpml/tools/DeblurGANv2/models/losses.py
gimpml/tools/DeblurGANv2/models/mobilenet_v2.py
gimpml/tools/DeblurGANv2/models/models.py
gimpml/tools/DeblurGANv2/models/networks.py
gimpml/tools/DeblurGANv2/models/senet.py
gimpml/tools/DeblurGANv2/models/unet_seresnext.py
gimpml/tools/DeblurGANv2/util/__init__.py
gimpml/tools/DeblurGANv2/util/image_pool.py
gimpml/tools/DeblurGANv2/util/metrics.py
gimpml/tools/EnlightenGAN/__init__.py
gimpml/tools/EnlightenGAN/enlighten_data/__init__.py
gimpml/tools/EnlightenGAN/enlighten_data/base_dataset.py
gimpml/tools/EnlightenGAN/enlighten_models/__init__.py
gimpml/tools/EnlightenGAN/enlighten_models/base_model.py
gimpml/tools/EnlightenGAN/enlighten_models/models.py
gimpml/tools/EnlightenGAN/enlighten_models/networks.py
gimpml/tools/EnlightenGAN/enlighten_models/single_model.py
gimpml/tools/EnlightenGAN/enlighten_util/__init__.py
gimpml/tools/EnlightenGAN/enlighten_util/image_pool.py
gimpml/tools/EnlightenGAN/enlighten_util/util.py
gimpml/tools/EnlightenGAN/lib/__init__.py
gimpml/tools/EnlightenGAN/lib/nn/__init__.py
gimpml/tools/EnlightenGAN/lib/nn/modules/__init__.py
gimpml/tools/EnlightenGAN/lib/nn/modules/batchnorm.py
gimpml/tools/EnlightenGAN/lib/nn/modules/comm.py
gimpml/tools/EnlightenGAN/lib/nn/modules/replicate.py
gimpml/tools/EnlightenGAN/lib/nn/modules/unittest.py
gimpml/tools/EnlightenGAN/lib/nn/parallel/__init__.py
gimpml/tools/EnlightenGAN/lib/nn/parallel/data_parallel.py
gimpml/tools/Inpainting/DFNet_core.py
gimpml/tools/Inpainting/RefinementNet_core.py
gimpml/tools/Inpainting/__init__.py
gimpml/tools/MiDaS/MiDaS_utils.py
gimpml/tools/MiDaS/__init__.py
gimpml/tools/MiDaS/mono_run.py
gimpml/tools/MiDaS/monodepth_net.py
gimpml/tools/PD-Denoising-pytorch/__init__.py
gimpml/tools/PD-Denoising-pytorch/denoise_models.py
gimpml/tools/PD-Denoising-pytorch/denoiser.py
gimpml/tools/PD-Denoising-pytorch/models_denoise.py
gimpml/tools/PD-Denoising-pytorch/utils.py
gimpml/tools/PyTorch-Image-Dehazing/__init__.py
gimpml/tools/PyTorch-Image-Dehazing/dataloader.py
gimpml/tools/PyTorch-Image-Dehazing/net.py
gimpml/tools/RIFE/__init__.py
gimpml/tools/RIFE/rife_model/IFNet.py
gimpml/tools/RIFE/rife_model/IFNet2F.py
gimpml/tools/RIFE/rife_model/RIFE.py
gimpml/tools/RIFE/rife_model/RIFE2F.py
gimpml/tools/RIFE/rife_model/__init__.py
gimpml/tools/RIFE/rife_model/loss.py
gimpml/tools/RIFE/rife_model/warplayer.py
gimpml/tools/edge-connect/__init__.py
gimpml/tools/edge-connect/src/__init__.py
gimpml/tools/edge-connect/src/canny_opencv.py
gimpml/tools/edge-connect/src/config.py
gimpml/tools/edge-connect/src/dataset.py
gimpml/tools/edge-connect/src/edge_connect.py
gimpml/tools/edge-connect/src/loss.py
gimpml/tools/edge-connect/src/metrics.py
gimpml/tools/edge-connect/src/models.py
gimpml/tools/edge-connect/src/networks.py
gimpml/tools/edge-connect/src/utils.py
gimpml/tools/face-parsing-PyTorch/__init__.py
gimpml/tools/face-parsing-PyTorch/evaluate.py
gimpml/tools/face-parsing-PyTorch/face_dataset.py
gimpml/tools/face-parsing-PyTorch/makeup.py
gimpml/tools/face-parsing-PyTorch/model.py
gimpml/tools/face-parsing-PyTorch/optimizer.py
gimpml/tools/face-parsing-PyTorch/prepropess_data.py
gimpml/tools/face-parsing-PyTorch/resnet.py
gimpml/tools/face-parsing-PyTorch/test.py
gimpml/tools/face-parsing-PyTorch/train.py
gimpml/tools/face-parsing-PyTorch/transform.py
gimpml/tools/face-parsing-PyTorch/parse_modules/__init__.py
gimpml/tools/face-parsing-PyTorch/parse_modules/bn.py
gimpml/tools/face-parsing-PyTorch/parse_modules/deeplab.py
gimpml/tools/face-parsing-PyTorch/parse_modules/dense.py
gimpml/tools/face-parsing-PyTorch/parse_modules/functions.py
gimpml/tools/face-parsing-PyTorch/parse_modules/misc.py
gimpml/tools/face-parsing-PyTorch/parse_modules/residual.py
gimpml/tools/face-parsing-PyTorch/parse_modules/src/__init__.py
gimpml/tools/face-parsing-PyTorch/parse_modules/src/utils/__init__.py
gimpml/tools/ideepcolor/__init__.py
gimpml/tools/ideepcolor/data/__init__.py
gimpml/tools/ideepcolor/data/colorize_image.py
gimpml/tools/ideepcolor/data/lab_gamut.py
gimpml/tools/ideepcolor/data/color_bins/__init__.py
gimpml/tools/ideepcolor/pytorch/__init__.py
gimpml/tools/ideepcolor/pytorch/model.py
gimpml/tools/pytorch-SRResNet/__init__.py
gimpml/tools/pytorch-SRResNet/dataset.py
gimpml/tools/pytorch-SRResNet/demo.py
gimpml/tools/pytorch-SRResNet/eval.py
gimpml/tools/pytorch-SRResNet/main_srresnet.py
gimpml/tools/pytorch-SRResNet/srresnet.py
gimpml/tools/pytorch-SRResNet/data/__init__.py
gimpml/tools/pytorch-deep-image-matting/__init__.py
gimpml/tools/pytorch-deep-image-matting/data.py
gimpml/tools/pytorch-deep-image-matting/deepmatting_net.py
gimpml/tools/pytorch-deep-image-matting/demo.py
gimpml/tools/pytorch-deep-image-matting/deploy.py
gimpml/tools/pytorch-deep-image-matting/tools/__init__.py
gimpml/tools/pytorch-deep-image-matting/tools/chg_model.py
gimpml/tools/pytorch-deep-image-matting/tools/composite.py
gimpml/tools/pytorch-deep-image-matting/tools/loss_draw.py

@ -1,13 +0,0 @@
numpy
scipy
gdown
typing
requests
opencv-python<=4.3
pretrainedmodels
scikit-image
timm==0.4.5
[:python_version <= "2.7"]
future
enum

@ -8,7 +8,7 @@ sys.path.extend([plugin_loc])
import numpy as np
import torch
import cv2
from data import colorize_image as CI
from color_data import colorize_image as CI
from gimpml.tools.tools_utils import get_weight_path
def get_deepcolor(layerimg, layerc=None, cpu_flag=False, weight_path=None):

@ -1,30 +1,28 @@
model,file_id,fileSize,mFName,md5sum
deepmatting,11dxJKH8p7xkcGtMtvzMUw-ua6pZ0vrfw,108,stage1_sad_57.1.pth,0cffb16b30fad67fa3eb5cfeeae0c7d5
MiDaS,11eap5jc-4SCX_sMMxYE6Bi5q_BKw894a,143,model.pt,74a1fcdde0b743212851d4b81be5f6eb
MiDaS,1hmv768Mrfv56SitcoMUlYp6zhUrj0HQv,493,dpt_hybrid-midas-501f0c75.pt,39beff6ce86ce03f6b94a29c6d11ee12
semseg,19rLDu8COUFSrOMnk124SBiEN9S9_QHBH,496,dpt_hybrid-ade20k-53898607.pt,d59f62cfba50297ffdef053bc4818a21
colorize,12tKfNIDewgJPbW3FiITV_AMbOtZWP0Eg,130,caffemodel.pth,efb7de5b0f3827d8a7e392ac287bca81
superresolution,11GwnqKsYo2jujACD_GMB9uMTQfsuk2RY,6,model_srresnet.pth,8f7ed4feb00402863e771a46013a93a8
faceparse,115nnWD0FoDkplTJYBY7lTQu1VNXFbCA_,51,79999_iter.pth,ff26a222ce48a618a1fa820b46223cae
deblur,11Tt4a_URCer4ZxZA2l3dLMRVeSwoBFYP,233,mymodel.pth,190be4c1b076ffd7b1cb5e40ea31ce4c
deblur,11MCHMVhs4aaMGSusqiu0rtAo97xuC1GA,234,best_fpn.h5,96f747f38a0119669265cbb5fc7b3c5c
deeplabv3,11rX1MHjhmtaoFTQ7ao4p6b31Oz300i0G,233,deeplabv3+model.pt,32f62b9f15f2e39085476d71b902c83c
deepdehaze,1hrd310nYCbh6ui_ZsZci7Zna2AFP1sMS,0.008,dehazer.pth,77a898308869d223fdda17436eae34ee
deepdenoise,1acZ1FTNMuAQaYtE3RYLA8fs8cQrW2tZ_,0.166,est_net.pth,bf681d25ed09015b9b152b991fc032a8
deepdenoise,1tBoyDxYJ92pvopBJeK9PmG_jMA_Ut38_,3,net.pth,bbca3ca4b3a92a5a26af605cbf823242
enlightening,1V8ARc2tDgUUpc11xiT5Y9HFQgC6Ug2T6,35,200_net_G_A.pth,5e3e6d3ab04492f4a693316515ac8571
interpolateframes,1bHmO9-_ENTYoN1-BNwSk3nLN9-NDUnRg,1.6,contextnet.pkl,e0a3353054c460b4600ab57c686c4f7e
interpolateframes,1cQvDPBKsz3TAi0Q5bJXsu6A-Z7lpk_cE,25.4,flownet.pkl,f3e13948d14bb5f0bf3fa7da455f1649
interpolateframes,1mlA8VtxIcvJfz51OsQMvWX24oqxZ429r,15,unet.pkl,491a6e475fc88eb6ff587d1912a60ff7
edgeconnect/places2,1cmaFsyjKpC6wQhCiuITUI_pmC7g54eOU,10.8,EdgeModel_dis.pth,5777b1d8fd48ec684899c80cf8e0af60
edgeconnect/places2,1z4xI1P-LfOpZRsc6hHjcGMS37PxOMePG,42.2,EdgeModel_gen.pth,8ae8a8f5f73b2c23bb20fe64e9662c36
edgeconnect/places2,1NkK9WMOEiPUfcp5Ga3cmesabb8C4g2yC,10.8,InpaintingModel_dis.pth,ec8203fca31511050d116698bb56344b
edgeconnect/places2,1bfLDxISWagkkHj5SlFz3Oc26C15HVsYu,42,InpaintingModel_gen.pth,f681314e7ff76c13302096ebf49dff2d
edgeconnect/celeba,1n_zyNXKiMG_lWekW6WepCuDGUSaK3zJK,10.8,EdgeModel_dis.pth,e3d40f777c618eac1e0a92a8b3c067b0
edgeconnect/celeba,19UyiBlj4t2DlfCbhqRlkX3xdiUdzrZLG,42,EdgeModel_gen.pth,10851c18fe7aa9e9322ed1774a0cd233
edgeconnect/celeba,1ofvNuHiY_jKg8-LVDE7CIVdWMk3ZXCRL,10.8,InpaintingModel_dis.pth,2f886c89399a29fac357a5c87c671d17
edgeconnect/celeba,1ZrVNGUO1bP84PfQBTP-VARZmGl4rarou,42,InpaintingModel_gen.pth,8e898b12fd9b86dee56887123ff72fb5
edgeconnect/psv,1lNjsFK0x4WXCFYF64Ns3KB_Flc2PuUcl,10.8,EdgeModel_dis.pth,ea6c734b66eeb8aed48e532f089957ec
edgeconnect/psv,1shppHfINx_r9Xr1uUodLt0MyHRh60tZQ,42,EdgeModel_gen.pth,40c83e91ace061acd2bd95421750ffaf
edgeconnect/psv,16pEDKygTnHXqxjoV3xN-3Ewy93qZ_kLg,10.8,InpaintingModel_dis.pth,bf9bb863592605237e620b8d73db225e
edgeconnect/psv,1LrY_vplAiGcfX0B9c_m9HJsbfuvjITa2,42,InpaintingModel_gen.pth,afc0ce9b90413298972a2ef1fc65a3c7
deepmatting,1-sS7sYCaVJJhH6Z3AZTTd3MGG6xwMT61,108,stage1_sad_57.1.pth,0cffb16b30fad67fa3eb5cfeeae0c7d5
MiDaS,10-7q6pRIlXndv7C10XasGwsJ3mtaHxG4,493,dpt_hybrid-midas-501f0c75.pt,39beff6ce86ce03f6b94a29c6d11ee12
semseg,12HgzvAr9oxlcVm1RdZc4cHpPQii72Qe6,496,dpt_hybrid-ade20k-53898607.pt,d59f62cfba50297ffdef053bc4818a21
colorize,10VhmSmmBTSZPvEedJCu9FbU6M3W3l2oV,130,caffemodel.pth,efb7de5b0f3827d8a7e392ac287bca81
superresolution,1-lx-VremVOXZQ9B4cW-qzP_md47L2iG9,6,model_srresnet.pth,8f7ed4feb00402863e771a46013a93a8
faceparse,1-VSMiqSQGSW8TM2FdL2-I0m2tAS_5QHv,51,79999_iter.pth,ff26a222ce48a618a1fa820b46223cae
deblur,1-dBhVu5J7MGfLQvQad76zVgwp494hN10,233,mymodel.pth,190be4c1b076ffd7b1cb5e40ea31ce4c
deblur,1-naBEr4n12QDRCTp05TkyaUrAs5aYCTJ,234,best_fpn.h5,96f747f38a0119669265cbb5fc7b3c5c
deepdehaze,10ohRP-xJImHTgMmVkJAtsD2o4UOytJ7y,0.008,dehazer.pth,77a898308869d223fdda17436eae34ee
deepdenoise,10fEOlEJ17kisfacmwin2TPn3jipnIliX,0.166,est_net.pth,bf681d25ed09015b9b152b991fc032a8
deepdenoise,10dhN1JxtWXS1bPkRXipZUcHkIXxTdCzS,3,net.pth,bbca3ca4b3a92a5a26af605cbf823242
enlightening,114oSrl3x8UxUDHD-FDk5wpeykAE--a7n,35,200_net_G_A.pth,5e3e6d3ab04492f4a693316515ac8571
interpolateframes,11GNIe-BqZaxDirQuvAf8uhEudHXR9ykK,1.6,contextnet.pkl,e0a3353054c460b4600ab57c686c4f7e
interpolateframes,117RKruiaYF3ajG7o38yhG6CCi4jah6cu,25.4,flownet.pkl,f3e13948d14bb5f0bf3fa7da455f1649
interpolateframes,11KUlDt45szlWd3c36RZZZ9CN0p6oIOoJ,15,unet.pkl,491a6e475fc88eb6ff587d1912a60ff7
edgeconnect/places2,11SN5DlL1HsH9Ikx9MvPlCv0VTXs4SwoD,10.8,EdgeModel_dis.pth,5777b1d8fd48ec684899c80cf8e0af60
edgeconnect/places2,11RC0cMS-v9kuInGUcljGXpficDSvUH1p,42.2,EdgeModel_gen.pth,8ae8a8f5f73b2c23bb20fe64e9662c36
edgeconnect/places2,11ojOhIbf5l0SweGyzrbqcEKNbciqZan6,10.8,InpaintingModel_dis.pth,ec8203fca31511050d116698bb56344b
edgeconnect/places2,11V9p3pK5uZLWg4nWSOAlrT5GkEuEUSZM,42,InpaintingModel_gen.pth,f681314e7ff76c13302096ebf49dff2d
edgeconnect/celeba,112NfyhcwB_p2Yein7Gjx43SHw3mzaKPw,10.8,EdgeModel_dis.pth,e3d40f777c618eac1e0a92a8b3c067b0
edgeconnect/celeba,111Kubp5jVxUgsD0LpjFi2ytuHJIqYDwH,42,EdgeModel_gen.pth,10851c18fe7aa9e9322ed1774a0cd233
edgeconnect/celeba,11Qe0qO-EZKccBCzzAyBtkUMnhdI5j7-I,10.8,InpaintingModel_dis.pth,2f886c89399a29fac357a5c87c671d17
edgeconnect/celeba,11LVO9vRbnw3zFKSojjBLhdYcBkzh65WJ,42,InpaintingModel_gen.pth,8e898b12fd9b86dee56887123ff72fb5
edgeconnect/psv,11vIrpFaAHhgPN0yixFbN7ZNY78ooefiq,10.8,EdgeModel_dis.pth,ea6c734b66eeb8aed48e532f089957ec
edgeconnect/psv,11taSeBwJXj3AC1qucNHFUgTOTDIbyOLz,42,EdgeModel_gen.pth,40c83e91ace061acd2bd95421750ffaf
edgeconnect/psv,122BaS0KobaZqsU5mZcIR4pTsoQz__1k8,10.8,InpaintingModel_dis.pth,bf9bb863592605237e620b8d73db225e
edgeconnect/psv,11wPatO4UAIuPc_9Se99QvlKwGRiw62PY,42,InpaintingModel_gen.pth,afc0ce9b90413298972a2ef1fc65a3c7

1 model file_id fileSize mFName md5sum
2 deepmatting 11dxJKH8p7xkcGtMtvzMUw-ua6pZ0vrfw 1-sS7sYCaVJJhH6Z3AZTTd3MGG6xwMT61 108 stage1_sad_57.1.pth 0cffb16b30fad67fa3eb5cfeeae0c7d5
3 MiDaS 11eap5jc-4SCX_sMMxYE6Bi5q_BKw894a 10-7q6pRIlXndv7C10XasGwsJ3mtaHxG4 143 493 model.pt dpt_hybrid-midas-501f0c75.pt 74a1fcdde0b743212851d4b81be5f6eb 39beff6ce86ce03f6b94a29c6d11ee12
4 MiDaS semseg 1hmv768Mrfv56SitcoMUlYp6zhUrj0HQv 12HgzvAr9oxlcVm1RdZc4cHpPQii72Qe6 493 496 dpt_hybrid-midas-501f0c75.pt dpt_hybrid-ade20k-53898607.pt 39beff6ce86ce03f6b94a29c6d11ee12 d59f62cfba50297ffdef053bc4818a21
5 semseg colorize 19rLDu8COUFSrOMnk124SBiEN9S9_QHBH 10VhmSmmBTSZPvEedJCu9FbU6M3W3l2oV 496 130 dpt_hybrid-ade20k-53898607.pt caffemodel.pth d59f62cfba50297ffdef053bc4818a21 efb7de5b0f3827d8a7e392ac287bca81
6 colorize superresolution 12tKfNIDewgJPbW3FiITV_AMbOtZWP0Eg 1-lx-VremVOXZQ9B4cW-qzP_md47L2iG9 130 6 caffemodel.pth model_srresnet.pth efb7de5b0f3827d8a7e392ac287bca81 8f7ed4feb00402863e771a46013a93a8
7 superresolution faceparse 11GwnqKsYo2jujACD_GMB9uMTQfsuk2RY 1-VSMiqSQGSW8TM2FdL2-I0m2tAS_5QHv 6 51 model_srresnet.pth 79999_iter.pth 8f7ed4feb00402863e771a46013a93a8 ff26a222ce48a618a1fa820b46223cae
8 faceparse deblur 115nnWD0FoDkplTJYBY7lTQu1VNXFbCA_ 1-dBhVu5J7MGfLQvQad76zVgwp494hN10 51 233 79999_iter.pth mymodel.pth ff26a222ce48a618a1fa820b46223cae 190be4c1b076ffd7b1cb5e40ea31ce4c
9 deblur 11Tt4a_URCer4ZxZA2l3dLMRVeSwoBFYP 1-naBEr4n12QDRCTp05TkyaUrAs5aYCTJ 233 234 mymodel.pth best_fpn.h5 190be4c1b076ffd7b1cb5e40ea31ce4c 96f747f38a0119669265cbb5fc7b3c5c
10 deblur deepdehaze 11MCHMVhs4aaMGSusqiu0rtAo97xuC1GA 10ohRP-xJImHTgMmVkJAtsD2o4UOytJ7y 234 0.008 best_fpn.h5 dehazer.pth 96f747f38a0119669265cbb5fc7b3c5c 77a898308869d223fdda17436eae34ee
11 deeplabv3 deepdenoise 11rX1MHjhmtaoFTQ7ao4p6b31Oz300i0G 10fEOlEJ17kisfacmwin2TPn3jipnIliX 233 0.166 deeplabv3+model.pt est_net.pth 32f62b9f15f2e39085476d71b902c83c bf681d25ed09015b9b152b991fc032a8
12 deepdehaze deepdenoise 1hrd310nYCbh6ui_ZsZci7Zna2AFP1sMS 10dhN1JxtWXS1bPkRXipZUcHkIXxTdCzS 0.008 3 dehazer.pth net.pth 77a898308869d223fdda17436eae34ee bbca3ca4b3a92a5a26af605cbf823242
13 deepdenoise enlightening 1acZ1FTNMuAQaYtE3RYLA8fs8cQrW2tZ_ 114oSrl3x8UxUDHD-FDk5wpeykAE--a7n 0.166 35 est_net.pth 200_net_G_A.pth bf681d25ed09015b9b152b991fc032a8 5e3e6d3ab04492f4a693316515ac8571
14 deepdenoise interpolateframes 1tBoyDxYJ92pvopBJeK9PmG_jMA_Ut38_ 11GNIe-BqZaxDirQuvAf8uhEudHXR9ykK 3 1.6 net.pth contextnet.pkl bbca3ca4b3a92a5a26af605cbf823242 e0a3353054c460b4600ab57c686c4f7e
15 enlightening interpolateframes 1V8ARc2tDgUUpc11xiT5Y9HFQgC6Ug2T6 117RKruiaYF3ajG7o38yhG6CCi4jah6cu 35 25.4 200_net_G_A.pth flownet.pkl 5e3e6d3ab04492f4a693316515ac8571 f3e13948d14bb5f0bf3fa7da455f1649
16 interpolateframes 1bHmO9-_ENTYoN1-BNwSk3nLN9-NDUnRg 11KUlDt45szlWd3c36RZZZ9CN0p6oIOoJ 1.6 15 contextnet.pkl unet.pkl e0a3353054c460b4600ab57c686c4f7e 491a6e475fc88eb6ff587d1912a60ff7
17 interpolateframes edgeconnect/places2 1cQvDPBKsz3TAi0Q5bJXsu6A-Z7lpk_cE 11SN5DlL1HsH9Ikx9MvPlCv0VTXs4SwoD 25.4 10.8 flownet.pkl EdgeModel_dis.pth f3e13948d14bb5f0bf3fa7da455f1649 5777b1d8fd48ec684899c80cf8e0af60
18 interpolateframes edgeconnect/places2 1mlA8VtxIcvJfz51OsQMvWX24oqxZ429r 11RC0cMS-v9kuInGUcljGXpficDSvUH1p 15 42.2 unet.pkl EdgeModel_gen.pth 491a6e475fc88eb6ff587d1912a60ff7 8ae8a8f5f73b2c23bb20fe64e9662c36
19 edgeconnect/places2 1cmaFsyjKpC6wQhCiuITUI_pmC7g54eOU 11ojOhIbf5l0SweGyzrbqcEKNbciqZan6 10.8 EdgeModel_dis.pth InpaintingModel_dis.pth 5777b1d8fd48ec684899c80cf8e0af60 ec8203fca31511050d116698bb56344b
20 edgeconnect/places2 1z4xI1P-LfOpZRsc6hHjcGMS37PxOMePG 11V9p3pK5uZLWg4nWSOAlrT5GkEuEUSZM 42.2 42 EdgeModel_gen.pth InpaintingModel_gen.pth 8ae8a8f5f73b2c23bb20fe64e9662c36 f681314e7ff76c13302096ebf49dff2d
21 edgeconnect/places2 edgeconnect/celeba 1NkK9WMOEiPUfcp5Ga3cmesabb8C4g2yC 112NfyhcwB_p2Yein7Gjx43SHw3mzaKPw 10.8 InpaintingModel_dis.pth EdgeModel_dis.pth ec8203fca31511050d116698bb56344b e3d40f777c618eac1e0a92a8b3c067b0
22 edgeconnect/places2 edgeconnect/celeba 1bfLDxISWagkkHj5SlFz3Oc26C15HVsYu 111Kubp5jVxUgsD0LpjFi2ytuHJIqYDwH 42 InpaintingModel_gen.pth EdgeModel_gen.pth f681314e7ff76c13302096ebf49dff2d 10851c18fe7aa9e9322ed1774a0cd233
23 edgeconnect/celeba 1n_zyNXKiMG_lWekW6WepCuDGUSaK3zJK 11Qe0qO-EZKccBCzzAyBtkUMnhdI5j7-I 10.8 EdgeModel_dis.pth InpaintingModel_dis.pth e3d40f777c618eac1e0a92a8b3c067b0 2f886c89399a29fac357a5c87c671d17
24 edgeconnect/celeba 19UyiBlj4t2DlfCbhqRlkX3xdiUdzrZLG 11LVO9vRbnw3zFKSojjBLhdYcBkzh65WJ 42 EdgeModel_gen.pth InpaintingModel_gen.pth 10851c18fe7aa9e9322ed1774a0cd233 8e898b12fd9b86dee56887123ff72fb5
25 edgeconnect/celeba edgeconnect/psv 1ofvNuHiY_jKg8-LVDE7CIVdWMk3ZXCRL 11vIrpFaAHhgPN0yixFbN7ZNY78ooefiq 10.8 InpaintingModel_dis.pth EdgeModel_dis.pth 2f886c89399a29fac357a5c87c671d17 ea6c734b66eeb8aed48e532f089957ec
26 edgeconnect/celeba edgeconnect/psv 1ZrVNGUO1bP84PfQBTP-VARZmGl4rarou 11taSeBwJXj3AC1qucNHFUgTOTDIbyOLz 42 InpaintingModel_gen.pth EdgeModel_gen.pth 8e898b12fd9b86dee56887123ff72fb5 40c83e91ace061acd2bd95421750ffaf
27 edgeconnect/psv 1lNjsFK0x4WXCFYF64Ns3KB_Flc2PuUcl 122BaS0KobaZqsU5mZcIR4pTsoQz__1k8 10.8 EdgeModel_dis.pth InpaintingModel_dis.pth ea6c734b66eeb8aed48e532f089957ec bf9bb863592605237e620b8d73db225e
28 edgeconnect/psv 1shppHfINx_r9Xr1uUodLt0MyHRh60tZQ 11wPatO4UAIuPc_9Se99QvlKwGRiw62PY 42 EdgeModel_gen.pth InpaintingModel_gen.pth 40c83e91ace061acd2bd95421750ffaf afc0ce9b90413298972a2ef1fc65a3c7
edgeconnect/psv 16pEDKygTnHXqxjoV3xN-3Ewy93qZ_kLg 10.8 InpaintingModel_dis.pth bf9bb863592605237e620b8d73db225e
edgeconnect/psv 1LrY_vplAiGcfX0B9c_m9HJsbfuvjITa2 42 InpaintingModel_gen.pth afc0ce9b90413298972a2ef1fc65a3c7

@ -1,214 +0,0 @@
import torch
import cv2
import os
import random
import numpy as np
from torchvision import transforms
import logging
def gen_trimap(alpha):
k_size = random.choice(range(2, 5))
iterations = np.random.randint(5, 15)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (k_size, k_size))
dilated = cv2.dilate(alpha, kernel, iterations=iterations)
eroded = cv2.erode(alpha, kernel, iterations=iterations)
trimap = np.zeros(alpha.shape)
trimap.fill(128)
# trimap[alpha >= 255] = 255
trimap[eroded >= 255] = 255
trimap[dilated <= 0] = 0
"""
alpha_unknown = alpha[trimap == 128]
num_all = alpha_unknown.size
num_0 = (alpha_unknown == 0).sum()
num_1 = (alpha_unknown == 255).sum()
print("Debug: 0 : {}/{} {:.3f}".format(num_0, num_all, float(num_0)/num_all))
print("Debug: 255: {}/{} {:.3f}".format(num_1, num_all, float(num_1)/num_all))
"""
return trimap
def compute_gradient(img):
x = cv2.Sobel(img, cv2.CV_16S, 1, 0)
y = cv2.Sobel(img, cv2.CV_16S, 0, 1)
absX = cv2.convertScaleAbs(x)
absY = cv2.convertScaleAbs(y)
grad = cv2.addWeighted(absX, 0.5, absY, 0.5, 0)
grad = cv2.cvtColor(grad, cv2.COLOR_BGR2GRAY)
return grad
class MatTransform(object):
def __init__(self, flip=False):
self.flip = flip
def __call__(self, img, alpha, fg, bg, crop_h, crop_w):
h, w = alpha.shape
# trimap is dilated maybe choose some bg region(0)
# random crop in the unknown region center
target = np.where((alpha > 0) & (alpha < 255))
delta_h = center_h = crop_h / 2
delta_w = center_w = crop_w / 2
if len(target[0]) > 0:
rand_ind = np.random.randint(len(target[0]))
center_h = min(max(target[0][rand_ind], delta_h), h - delta_h)
center_w = min(max(target[1][rand_ind], delta_w), w - delta_w)
# choose unknown point as center not as left-top
start_h = int(center_h - delta_h)
start_w = int(center_w - delta_w)
end_h = int(center_h + delta_h)
end_w = int(center_w + delta_w)
# print("Debug: center({},{}) start({},{}) end({},{}) alpha:{} alpha-len:{} unknown-len:{}".format(center_h, center_w, start_h, start_w, end_h, end_w, alpha[int(center_h), int(center_w)], alpha.size, len(target[0])))
img = img[start_h:end_h, start_w:end_w]
fg = fg[start_h:end_h, start_w:end_w]
bg = bg[start_h:end_h, start_w:end_w]
alpha = alpha[start_h:end_h, start_w:end_w]
# random flip
if self.flip and random.random() < 0.5:
img = cv2.flip(img, 1)
alpha = cv2.flip(alpha, 1)
fg = cv2.flip(fg, 1)
bg = cv2.flip(bg, 1)
return img, alpha, fg, bg
def get_files(mydir):
res = []
for root, dirs, files in os.walk(mydir, followlinks=True):
for f in files:
if (
f.endswith(".jpg")
or f.endswith(".png")
or f.endswith(".jpeg")
or f.endswith(".JPG")
):
res.append(os.path.join(root, f))
return res
# Dataset not composite online
class MatDatasetOffline(torch.utils.data.Dataset):
def __init__(self, args, transform=None, normalize=None):
self.samples = []
self.transform = transform
self.normalize = normalize
self.args = args
self.size_h = args.size_h
self.size_w = args.size_w
self.crop_h = args.crop_h
self.crop_w = args.crop_w
self.logger = logging.getLogger("DeepImageMatting")
assert len(self.crop_h) == len(self.crop_w)
fg_paths = get_files(self.args.fgDir)
self.cnt = len(fg_paths)
for fg_path in fg_paths:
alpha_path = fg_path.replace(self.args.fgDir, self.args.alphaDir)
img_path = fg_path.replace(self.args.fgDir, self.args.imgDir)
bg_path = fg_path.replace(self.args.fgDir, self.args.bgDir)
assert os.path.exists(alpha_path)
assert os.path.exists(fg_path)
assert os.path.exists(bg_path)
assert os.path.exists(img_path)
self.samples.append((alpha_path, fg_path, bg_path, img_path))
self.logger.info("MatDatasetOffline Samples: {}".format(self.cnt))
assert self.cnt > 0
def __getitem__(self, index):
alpha_path, fg_path, bg_path, img_path = self.samples[index]
img_info = [fg_path, alpha_path, bg_path, img_path]
# read fg, alpha
fg = cv2.imread(fg_path)[:, :, :3]
bg = cv2.imread(bg_path)[:, :, :3]
img = cv2.imread(img_path)[:, :, :3]
alpha = cv2.imread(alpha_path)[:, :, 0]
assert bg.shape == fg.shape and bg.shape == img.shape
img_info.append(fg.shape)
(
bh,
bw,
bc,
) = fg.shape
rand_ind = random.randint(0, len(self.crop_h) - 1)
cur_crop_h = self.crop_h[rand_ind]
cur_crop_w = self.crop_w[rand_ind]
# if ratio!=1: make the img (h==croph and w>=cropw)or(w==cropw and h>=croph)
wratio = float(cur_crop_w) / bw
hratio = float(cur_crop_h) / bh
ratio = wratio if wratio > hratio else hratio
if ratio > 1:
nbw = int(bw * ratio + 1.0)
nbh = int(bh * ratio + 1.0)
fg = cv2.resize(fg, (nbw, nbh), interpolation=cv2.INTER_LINEAR)
bg = cv2.resize(bg, (nbw, nbh), interpolation=cv2.INTER_LINEAR)
img = cv2.resize(img, (nbw, nbh), interpolation=cv2.INTER_LINEAR)
alpha = cv2.resize(alpha, (nbw, nbh), interpolation=cv2.INTER_LINEAR)
# random crop(crop_h, crop_w) and flip
if self.transform:
img, alpha, fg, bg = self.transform(
img, alpha, fg, bg, cur_crop_h, cur_crop_w
)
# resize to (size_h, size_w)
if self.size_h != img.shape[0] or self.size_w != img.shape[1]:
# resize
img = cv2.resize(
img, (self.size_w, self.size_h), interpolation=cv2.INTER_LINEAR
)
fg = cv2.resize(
fg, (self.size_w, self.size_h), interpolation=cv2.INTER_LINEAR
)
bg = cv2.resize(
bg, (self.size_w, self.size_h), interpolation=cv2.INTER_LINEAR
)
alpha = cv2.resize(
alpha, (self.size_w, self.size_h), interpolation=cv2.INTER_LINEAR
)
trimap = gen_trimap(alpha)
grad = compute_gradient(img)
if self.normalize:
img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# first, 0-255 to 0-1
# second, x-mean/std and HWC to CHW
img_norm = self.normalize(img_rgb)
else:
img_norm = None
# img_id = img_info[0].split('/')[-1]
# cv2.imwrite("result/debug/{}_img.png".format(img_id), img)
# cv2.imwrite("result/debug/{}_alpha.png".format(img_id), alpha)
# cv2.imwrite("result/debug/{}_fg.png".format(img_id), fg)
# cv2.imwrite("result/debug/{}_bg.png".format(img_id), bg)
# cv2.imwrite("result/debug/{}_trimap.png".format(img_id), trimap)
# cv2.imwrite("result/debug/{}_grad.png".format(img_id), grad)
alpha = torch.from_numpy(alpha.astype(np.float32)[np.newaxis, :, :])
trimap = torch.from_numpy(trimap.astype(np.float32)[np.newaxis, :, :])
grad = torch.from_numpy(grad.astype(np.float32)[np.newaxis, :, :])
img = torch.from_numpy(img.astype(np.float32)).permute(2, 0, 1)
fg = torch.from_numpy(fg.astype(np.float32)).permute(2, 0, 1)
bg = torch.from_numpy(bg.astype(np.float32)).permute(2, 0, 1)
return img, alpha, fg, bg, trimap, grad, img_norm, img_info
def __len__(self):
return len(self.samples)

@ -21,7 +21,7 @@ def get_super(input_image, s=4, cpu_flag=False, fFlag=True, weight_path=None):
weight_path = get_weight_path()
opt = Namespace(
cuda=torch.cuda.is_available() and not cpu_flag,
model=os.path.join(weight_path, "super_resolution", "model_srresnet.pth"),
model=os.path.join(weight_path, "superresolution", "model_srresnet.pth"),
dataset="Set5",
scale=s,
gpus=0,

@ -4,8 +4,8 @@ import gimpml
image = cv2.imread("sampleinput/img.png")
alpha = cv2.imread("sampleinput/alpha.png")
out = gimpml.kmeans(image)
cv2.imwrite("output/tmp-kmeans.jpg", out)
# out = gimpml.kmeans(image)
# cv2.imwrite("output/tmp-kmeans.jpg", out)
# #
# out = gimpml.deblur(image)
# cv2.imwrite('output/tmp-deblur.jpg', out)
@ -19,9 +19,9 @@ cv2.imwrite("output/tmp-kmeans.jpg", out)
# out = gimpml.denoise(image)
# cv2.imwrite('output/tmp-denoise.jpg', out)
#
# out = gimpml.matting(image, alpha)
# cv2.imwrite('output/tmp-matting.png', out) # save as png
#
out = gimpml.matting(image, alpha)
cv2.imwrite('output/tmp-matting.png', out) # save as png
# out = gimpml.enlighten(image)
# cv2.imwrite('output/tmp-enlighten.jpg', out)

Loading…
Cancel
Save