From ccd3c980ca15cb26853dac0a1ca529a96b384094 Mon Sep 17 00:00:00 2001 From: kritiksoman Date: Mon, 27 Apr 2020 10:02:33 +0530 Subject: [PATCH] First upload --- .../MaskGAN_demo/data/__init__.py | 0 .../MaskGAN_demo/data/__init__.pyc | Bin 0 -> 172 bytes .../data/__pycache__/__init__.cpython-37.pyc | Bin 0 -> 172 bytes .../data/__pycache__/__init__.cpython-38.pyc | Bin 0 -> 176 bytes .../__pycache__/base_dataset.cpython-37.pyc | Bin 0 -> 3579 bytes .../__pycache__/base_dataset.cpython-38.pyc | Bin 0 -> 3611 bytes .../__pycache__/data_loader.cpython-37.pyc | Bin 0 -> 437 bytes .../__pycache__/data_loader.cpython-38.pyc | Bin 0 -> 445 bytes .../MaskGAN_demo/data/aligned_dataset.py | 100 +++ .../MaskGAN_demo/data/base_data_loader.py | 14 + .../MaskGAN_demo/data/base_dataset.py | 97 +++ .../MaskGAN_demo/data/base_dataset.pyc | Bin 0 -> 5509 bytes .../data/custom_dataset_data_loader.py | 31 + .../MaskGAN_demo/data/data_loader.py | 7 + .../MaskGAN_demo/data/data_loader.pyc | Bin 0 -> 555 bytes .../MaskGAN_demo/data/image_folder.py | 82 ++ .../MaskGAN_demo/models/__init__.py | 0 .../MaskGAN_demo/models/__init__.pyc | Bin 0 -> 174 bytes .../__pycache__/__init__.cpython-37.pyc | Bin 0 -> 174 bytes .../__pycache__/__init__.cpython-38.pyc | Bin 0 -> 178 bytes .../__pycache__/base_model.cpython-37.pyc | Bin 0 -> 3558 bytes .../__pycache__/base_model.cpython-38.pyc | Bin 0 -> 3612 bytes .../models/__pycache__/models.cpython-37.pyc | Bin 0 -> 640 bytes .../models/__pycache__/models.cpython-38.pyc | Bin 0 -> 648 bytes .../__pycache__/networks.cpython-37.pyc | Bin 0 -> 27118 bytes .../__pycache__/networks.cpython-38.pyc | Bin 0 -> 26577 bytes .../pix2pixHD_model.cpython-37.pyc | Bin 0 -> 9545 bytes .../pix2pixHD_model.cpython-38.pyc | Bin 0 -> 9557 bytes .../MaskGAN_demo/models/base_model.py | 94 ++ .../MaskGAN_demo/models/base_model.pyc | Bin 0 -> 4999 bytes .../MaskGAN_demo/models/models.py | 20 + .../MaskGAN_demo/models/models.pyc | Bin 0 -> 833 bytes .../MaskGAN_demo/models/networks.py | 818 ++++++++++++++++++ .../MaskGAN_demo/models/networks.pyc | Bin 0 -> 36361 bytes .../MaskGAN_demo/models/pix2pixHD_model.py | 326 +++++++ .../MaskGAN_demo/models/pix2pixHD_model.pyc | Bin 0 -> 12701 bytes .../MaskGAN_demo/options/__init__.py | 0 .../MaskGAN_demo/options/__init__.pyc | Bin 0 -> 175 bytes .../__pycache__/__init__.cpython-37.pyc | Bin 0 -> 175 bytes .../__pycache__/__init__.cpython-38.pyc | Bin 0 -> 179 bytes .../__pycache__/base_options.cpython-37.pyc | Bin 0 -> 4284 bytes .../__pycache__/base_options.cpython-38.pyc | Bin 0 -> 4290 bytes .../__pycache__/test_options.cpython-37.pyc | Bin 0 -> 1483 bytes .../__pycache__/test_options.cpython-38.pyc | Bin 0 -> 1495 bytes .../MaskGAN_demo/options/base_options.py | 89 ++ .../MaskGAN_demo/options/base_options.pyc | Bin 0 -> 5455 bytes .../MaskGAN_demo/options/test_options.py | 19 + .../MaskGAN_demo/options/test_options.pyc | Bin 0 -> 1912 bytes .../MaskGAN_demo/options/train_options.py | 36 + .../MaskGAN_demo/util/__init__.py | 0 .../MaskGAN_demo/util/__init__.pyc | Bin 0 -> 172 bytes .../util/__pycache__/__init__.cpython-37.pyc | Bin 0 -> 172 bytes .../util/__pycache__/__init__.cpython-38.pyc | Bin 0 -> 176 bytes .../__pycache__/image_pool.cpython-37.pyc | Bin 0 -> 1078 bytes .../__pycache__/image_pool.cpython-38.pyc | Bin 0 -> 1090 bytes .../util/__pycache__/util.cpython-37.pyc | Bin 0 -> 3751 bytes .../util/__pycache__/util.cpython-38.pyc | Bin 0 -> 3837 bytes .../MaskGAN_demo/util/image_pool.py | 31 + .../MaskGAN_demo/util/image_pool.pyc | Bin 0 -> 1487 bytes .../CelebAMask-HQ/MaskGAN_demo/util/util.py | 107 +++ .../CelebAMask-HQ/MaskGAN_demo/util/util.pyc | Bin 0 -> 5383 bytes gimp-plugins/DeblurGANv2/README.md | 131 +++ .../DeblurGANv2/adversarial_trainer.py | 99 +++ gimp-plugins/DeblurGANv2/aug.py | 93 ++ gimp-plugins/DeblurGANv2/aug.pyc | Bin 0 -> 3636 bytes gimp-plugins/DeblurGANv2/config/config.yaml | 68 ++ gimp-plugins/DeblurGANv2/dataset.py | 142 +++ gimp-plugins/DeblurGANv2/metric_counter.py | 56 ++ gimp-plugins/DeblurGANv2/models/__init__.py | 0 gimp-plugins/DeblurGANv2/models/__init__.pyc | Bin 0 -> 165 bytes .../DeblurGANv2/models/fpn_densenet.py | 135 +++ .../DeblurGANv2/models/fpn_densenet.pyc | Bin 0 -> 5309 bytes .../DeblurGANv2/models/fpn_inception.py | 167 ++++ .../DeblurGANv2/models/fpn_inception.pyc | Bin 0 -> 7044 bytes .../models/fpn_inception_simple.py | 160 ++++ .../models/fpn_inception_simple.pyc | Bin 0 -> 6926 bytes .../DeblurGANv2/models/fpn_mobilenet.py | 147 ++++ .../DeblurGANv2/models/fpn_mobilenet.pyc | Bin 0 -> 5963 bytes .../DeblurGANv2/models/fpn_mobilenet.py~HEAD | 147 ++++ gimp-plugins/DeblurGANv2/models/losses.py | 300 +++++++ .../DeblurGANv2/models/mobilenet_v2.py | 126 +++ .../DeblurGANv2/models/mobilenet_v2.pyc | Bin 0 -> 4623 bytes gimp-plugins/DeblurGANv2/models/models.py | 35 + gimp-plugins/DeblurGANv2/models/models.pyc | Bin 0 -> 2209 bytes gimp-plugins/DeblurGANv2/models/networks.py | 330 +++++++ gimp-plugins/DeblurGANv2/models/networks.pyc | Bin 0 -> 11391 bytes gimp-plugins/DeblurGANv2/models/senet.py | 430 +++++++++ gimp-plugins/DeblurGANv2/models/senet.pyc | Bin 0 -> 15348 bytes .../DeblurGANv2/models/unet_seresnext.py | 153 ++++ .../DeblurGANv2/models/unet_seresnext.pyc | Bin 0 -> 5851 bytes gimp-plugins/DeblurGANv2/mymodel.pt | Bin 0 -> 387153 bytes gimp-plugins/DeblurGANv2/predict.py | 108 +++ gimp-plugins/DeblurGANv2/predictorClass.py | 67 ++ gimp-plugins/DeblurGANv2/predictorClass.pyc | Bin 0 -> 4832 bytes gimp-plugins/DeblurGANv2/requirements.txt | 13 + gimp-plugins/DeblurGANv2/schedulers.py | 59 ++ gimp-plugins/DeblurGANv2/test.sh | 3 + gimp-plugins/DeblurGANv2/test_aug.py | 20 + gimp-plugins/DeblurGANv2/test_dataset.py | 76 ++ gimp-plugins/DeblurGANv2/test_metrics.py | 90 ++ gimp-plugins/DeblurGANv2/testing.py | 9 + gimp-plugins/DeblurGANv2/train.py | 181 ++++ gimp-plugins/DeblurGANv2/util/__init__.py | 0 gimp-plugins/DeblurGANv2/util/__init__.pyc | Bin 0 -> 163 bytes gimp-plugins/DeblurGANv2/util/image_pool.py | 33 + gimp-plugins/DeblurGANv2/util/metrics.py | 54 ++ gimp-plugins/DeblurGANv2/util/metrics.pyc | Bin 0 -> 2473 bytes gimp-plugins/colorize.py | 111 +++ gimp-plugins/deblur.py | 53 ++ gimp-plugins/deeplabv3.py | 88 ++ .../__pycache__/model.cpython-38.pyc | Bin 0 -> 9174 bytes .../__pycache__/resnet.cpython-38.pyc | Bin 0 -> 3614 bytes gimp-plugins/face-parsing.PyTorch/evaluate.py | 95 ++ .../face-parsing.PyTorch/face_dataset.py | 106 +++ gimp-plugins/face-parsing.PyTorch/logger.py | 23 + gimp-plugins/face-parsing.PyTorch/logger.pyc | Bin 0 -> 1020 bytes gimp-plugins/face-parsing.PyTorch/loss.py | 75 ++ gimp-plugins/face-parsing.PyTorch/makeup.py | 130 +++ gimp-plugins/face-parsing.PyTorch/model.py | 283 ++++++ gimp-plugins/face-parsing.PyTorch/model.pyc | Bin 0 -> 13335 bytes .../face-parsing.PyTorch/modules/__init__.py | 5 + .../face-parsing.PyTorch/modules/bn.py | 130 +++ .../face-parsing.PyTorch/modules/deeplab.py | 84 ++ .../face-parsing.PyTorch/modules/dense.py | 42 + .../face-parsing.PyTorch/modules/functions.py | 234 +++++ .../face-parsing.PyTorch/modules/misc.py | 21 + .../face-parsing.PyTorch/modules/residual.py | 88 ++ .../face-parsing.PyTorch/modules/src/checks.h | 15 + .../modules/src/inplace_abn.cpp | 95 ++ .../modules/src/inplace_abn.h | 88 ++ .../modules/src/inplace_abn_cpu.cpp | 119 +++ .../modules/src/inplace_abn_cuda.cu | 333 +++++++ .../modules/src/inplace_abn_cuda_half.cu | 275 ++++++ .../modules/src/utils/checks.h | 15 + .../modules/src/utils/common.h | 49 ++ .../modules/src/utils/cuda.cuh | 71 ++ .../face-parsing.PyTorch/optimizer.py | 69 ++ .../face-parsing.PyTorch/prepropess_data.py | 38 + gimp-plugins/face-parsing.PyTorch/resnet.py | 109 +++ gimp-plugins/face-parsing.PyTorch/resnet.pyc | Bin 0 -> 4822 bytes gimp-plugins/face-parsing.PyTorch/test.py | 100 +++ gimp-plugins/face-parsing.PyTorch/train.py | 179 ++++ .../face-parsing.PyTorch/transform.py | 129 +++ gimp-plugins/facegen.py | 175 ++++ gimp-plugins/faceparse.py | 167 ++++ gimp-plugins/installGimpML-mac.sh | 39 + gimp-plugins/installGimpML-ubuntu.sh | 41 + gimp-plugins/invert.py | 25 + gimp-plugins/monodepth.py | 113 +++ gimp-plugins/monodepth2/evaluate_depth.py | 230 +++++ gimp-plugins/monodepth2/evaluate_pose.py | 134 +++ gimp-plugins/monodepth2/export_gt_depth.py | 65 ++ gimp-plugins/monodepth2/layers.py | 269 ++++++ gimp-plugins/monodepth2/layers.pyc | Bin 0 -> 10823 bytes gimp-plugins/monodepth2/networks/__init__.py | 4 + gimp-plugins/monodepth2/networks/__init__.pyc | Bin 0 -> 397 bytes .../monodepth2/networks/depth_decoder.py | 65 ++ .../monodepth2/networks/depth_decoder.pyc | Bin 0 -> 2358 bytes gimp-plugins/monodepth2/networks/pose_cnn.py | 50 ++ gimp-plugins/monodepth2/networks/pose_cnn.pyc | Bin 0 -> 1873 bytes .../monodepth2/networks/pose_decoder.py | 54 ++ .../monodepth2/networks/pose_decoder.pyc | Bin 0 -> 2111 bytes .../monodepth2/networks/resnet_encoder.py | 98 +++ .../monodepth2/networks/resnet_encoder.pyc | Bin 0 -> 4657 bytes gimp-plugins/monodepth2/options.py | 208 +++++ gimp-plugins/monodepth2/test_simple.py | 160 ++++ gimp-plugins/monodepth2/train.py | 18 + gimp-plugins/monodepth2/trainer.py | 630 ++++++++++++++ gimp-plugins/monodepth2/utils.py | 114 +++ gimp-plugins/monodepth2/utils.pyc | Bin 0 -> 5685 bytes gimp-plugins/moveWeights.sh | 9 + .../__pycache__/model.cpython-38.pyc | Bin 0 -> 4119 bytes .../build_dataset_directory.py | 42 + gimp-plugins/neural-colorization/colorize.py | 73 ++ gimp-plugins/neural-colorization/model.py | 123 +++ gimp-plugins/neural-colorization/model.pyc | Bin 0 -> 6371 bytes .../neural-colorization/resize_all_imgs.py | 35 + gimp-plugins/neural-colorization/train.py | 186 ++++ .../__pycache__/srresnet.cpython-38.pyc | Bin 0 -> 3791 bytes .../data/generate_train_srresnet.m | 92 ++ gimp-plugins/pytorch-SRResNet/data/modcrop.m | 12 + .../pytorch-SRResNet/data/store2hdf5.m | 59 ++ gimp-plugins/pytorch-SRResNet/dataset.py | 16 + gimp-plugins/pytorch-SRResNet/demo.py | 85 ++ gimp-plugins/pytorch-SRResNet/eval.py | 93 ++ .../pytorch-SRResNet/main_srresnet.py | 166 ++++ gimp-plugins/pytorch-SRResNet/srresnet.py | 141 +++ gimp-plugins/pytorch-SRResNet/srresnet.pyc | Bin 0 -> 5412 bytes gimp-plugins/super_resolution.py | 122 +++ 189 files changed, 12468 insertions(+) create mode 100755 gimp-plugins/CelebAMask-HQ/MaskGAN_demo/data/__init__.py create mode 100755 gimp-plugins/CelebAMask-HQ/MaskGAN_demo/data/__init__.pyc create mode 100755 gimp-plugins/CelebAMask-HQ/MaskGAN_demo/data/__pycache__/__init__.cpython-37.pyc create mode 100755 gimp-plugins/CelebAMask-HQ/MaskGAN_demo/data/__pycache__/__init__.cpython-38.pyc create mode 100755 gimp-plugins/CelebAMask-HQ/MaskGAN_demo/data/__pycache__/base_dataset.cpython-37.pyc create mode 100755 gimp-plugins/CelebAMask-HQ/MaskGAN_demo/data/__pycache__/base_dataset.cpython-38.pyc create mode 100755 gimp-plugins/CelebAMask-HQ/MaskGAN_demo/data/__pycache__/data_loader.cpython-37.pyc create mode 100755 gimp-plugins/CelebAMask-HQ/MaskGAN_demo/data/__pycache__/data_loader.cpython-38.pyc create mode 100755 gimp-plugins/CelebAMask-HQ/MaskGAN_demo/data/aligned_dataset.py create mode 100755 gimp-plugins/CelebAMask-HQ/MaskGAN_demo/data/base_data_loader.py create mode 100755 gimp-plugins/CelebAMask-HQ/MaskGAN_demo/data/base_dataset.py create mode 100755 gimp-plugins/CelebAMask-HQ/MaskGAN_demo/data/base_dataset.pyc create mode 100755 gimp-plugins/CelebAMask-HQ/MaskGAN_demo/data/custom_dataset_data_loader.py create mode 100755 gimp-plugins/CelebAMask-HQ/MaskGAN_demo/data/data_loader.py create mode 100755 gimp-plugins/CelebAMask-HQ/MaskGAN_demo/data/data_loader.pyc create mode 100755 gimp-plugins/CelebAMask-HQ/MaskGAN_demo/data/image_folder.py create mode 100755 gimp-plugins/CelebAMask-HQ/MaskGAN_demo/models/__init__.py create mode 100755 gimp-plugins/CelebAMask-HQ/MaskGAN_demo/models/__init__.pyc create mode 100755 gimp-plugins/CelebAMask-HQ/MaskGAN_demo/models/__pycache__/__init__.cpython-37.pyc create mode 100755 gimp-plugins/CelebAMask-HQ/MaskGAN_demo/models/__pycache__/__init__.cpython-38.pyc create mode 100755 gimp-plugins/CelebAMask-HQ/MaskGAN_demo/models/__pycache__/base_model.cpython-37.pyc create mode 100755 gimp-plugins/CelebAMask-HQ/MaskGAN_demo/models/__pycache__/base_model.cpython-38.pyc create mode 100755 gimp-plugins/CelebAMask-HQ/MaskGAN_demo/models/__pycache__/models.cpython-37.pyc create mode 100755 gimp-plugins/CelebAMask-HQ/MaskGAN_demo/models/__pycache__/models.cpython-38.pyc create mode 100755 gimp-plugins/CelebAMask-HQ/MaskGAN_demo/models/__pycache__/networks.cpython-37.pyc create mode 100755 gimp-plugins/CelebAMask-HQ/MaskGAN_demo/models/__pycache__/networks.cpython-38.pyc create mode 100755 gimp-plugins/CelebAMask-HQ/MaskGAN_demo/models/__pycache__/pix2pixHD_model.cpython-37.pyc create mode 100755 gimp-plugins/CelebAMask-HQ/MaskGAN_demo/models/__pycache__/pix2pixHD_model.cpython-38.pyc create mode 100755 gimp-plugins/CelebAMask-HQ/MaskGAN_demo/models/base_model.py create mode 100755 gimp-plugins/CelebAMask-HQ/MaskGAN_demo/models/base_model.pyc create mode 100755 gimp-plugins/CelebAMask-HQ/MaskGAN_demo/models/models.py create mode 100755 gimp-plugins/CelebAMask-HQ/MaskGAN_demo/models/models.pyc create mode 100755 gimp-plugins/CelebAMask-HQ/MaskGAN_demo/models/networks.py create mode 100644 gimp-plugins/CelebAMask-HQ/MaskGAN_demo/models/networks.pyc create mode 100755 gimp-plugins/CelebAMask-HQ/MaskGAN_demo/models/pix2pixHD_model.py create mode 100755 gimp-plugins/CelebAMask-HQ/MaskGAN_demo/models/pix2pixHD_model.pyc create mode 100755 gimp-plugins/CelebAMask-HQ/MaskGAN_demo/options/__init__.py create mode 100755 gimp-plugins/CelebAMask-HQ/MaskGAN_demo/options/__init__.pyc create mode 100755 gimp-plugins/CelebAMask-HQ/MaskGAN_demo/options/__pycache__/__init__.cpython-37.pyc create mode 100755 gimp-plugins/CelebAMask-HQ/MaskGAN_demo/options/__pycache__/__init__.cpython-38.pyc create mode 100755 gimp-plugins/CelebAMask-HQ/MaskGAN_demo/options/__pycache__/base_options.cpython-37.pyc create mode 100755 gimp-plugins/CelebAMask-HQ/MaskGAN_demo/options/__pycache__/base_options.cpython-38.pyc create mode 100755 gimp-plugins/CelebAMask-HQ/MaskGAN_demo/options/__pycache__/test_options.cpython-37.pyc create mode 100755 gimp-plugins/CelebAMask-HQ/MaskGAN_demo/options/__pycache__/test_options.cpython-38.pyc create mode 100755 gimp-plugins/CelebAMask-HQ/MaskGAN_demo/options/base_options.py create mode 100755 gimp-plugins/CelebAMask-HQ/MaskGAN_demo/options/base_options.pyc create mode 100755 gimp-plugins/CelebAMask-HQ/MaskGAN_demo/options/test_options.py create mode 100755 gimp-plugins/CelebAMask-HQ/MaskGAN_demo/options/test_options.pyc create mode 100755 gimp-plugins/CelebAMask-HQ/MaskGAN_demo/options/train_options.py create mode 100755 gimp-plugins/CelebAMask-HQ/MaskGAN_demo/util/__init__.py create mode 100755 gimp-plugins/CelebAMask-HQ/MaskGAN_demo/util/__init__.pyc create mode 100755 gimp-plugins/CelebAMask-HQ/MaskGAN_demo/util/__pycache__/__init__.cpython-37.pyc create mode 100755 gimp-plugins/CelebAMask-HQ/MaskGAN_demo/util/__pycache__/__init__.cpython-38.pyc create mode 100755 gimp-plugins/CelebAMask-HQ/MaskGAN_demo/util/__pycache__/image_pool.cpython-37.pyc create mode 100755 gimp-plugins/CelebAMask-HQ/MaskGAN_demo/util/__pycache__/image_pool.cpython-38.pyc create mode 100755 gimp-plugins/CelebAMask-HQ/MaskGAN_demo/util/__pycache__/util.cpython-37.pyc create mode 100755 gimp-plugins/CelebAMask-HQ/MaskGAN_demo/util/__pycache__/util.cpython-38.pyc create mode 100755 gimp-plugins/CelebAMask-HQ/MaskGAN_demo/util/image_pool.py create mode 100755 gimp-plugins/CelebAMask-HQ/MaskGAN_demo/util/image_pool.pyc create mode 100755 gimp-plugins/CelebAMask-HQ/MaskGAN_demo/util/util.py create mode 100755 gimp-plugins/CelebAMask-HQ/MaskGAN_demo/util/util.pyc create mode 100644 gimp-plugins/DeblurGANv2/README.md create mode 100644 gimp-plugins/DeblurGANv2/adversarial_trainer.py create mode 100644 gimp-plugins/DeblurGANv2/aug.py create mode 100644 gimp-plugins/DeblurGANv2/aug.pyc create mode 100644 gimp-plugins/DeblurGANv2/config/config.yaml create mode 100644 gimp-plugins/DeblurGANv2/dataset.py create mode 100644 gimp-plugins/DeblurGANv2/metric_counter.py create mode 100644 gimp-plugins/DeblurGANv2/models/__init__.py create mode 100644 gimp-plugins/DeblurGANv2/models/__init__.pyc create mode 100644 gimp-plugins/DeblurGANv2/models/fpn_densenet.py create mode 100644 gimp-plugins/DeblurGANv2/models/fpn_densenet.pyc create mode 100644 gimp-plugins/DeblurGANv2/models/fpn_inception.py create mode 100644 gimp-plugins/DeblurGANv2/models/fpn_inception.pyc create mode 100644 gimp-plugins/DeblurGANv2/models/fpn_inception_simple.py create mode 100644 gimp-plugins/DeblurGANv2/models/fpn_inception_simple.pyc create mode 100644 gimp-plugins/DeblurGANv2/models/fpn_mobilenet.py create mode 100644 gimp-plugins/DeblurGANv2/models/fpn_mobilenet.pyc create mode 100644 gimp-plugins/DeblurGANv2/models/fpn_mobilenet.py~HEAD create mode 100644 gimp-plugins/DeblurGANv2/models/losses.py create mode 100755 gimp-plugins/DeblurGANv2/models/mobilenet_v2.py create mode 100644 gimp-plugins/DeblurGANv2/models/mobilenet_v2.pyc create mode 100755 gimp-plugins/DeblurGANv2/models/models.py create mode 100644 gimp-plugins/DeblurGANv2/models/models.pyc create mode 100644 gimp-plugins/DeblurGANv2/models/networks.py create mode 100644 gimp-plugins/DeblurGANv2/models/networks.pyc create mode 100755 gimp-plugins/DeblurGANv2/models/senet.py create mode 100644 gimp-plugins/DeblurGANv2/models/senet.pyc create mode 100644 gimp-plugins/DeblurGANv2/models/unet_seresnext.py create mode 100644 gimp-plugins/DeblurGANv2/models/unet_seresnext.pyc create mode 100644 gimp-plugins/DeblurGANv2/mymodel.pt create mode 100644 gimp-plugins/DeblurGANv2/predict.py create mode 100644 gimp-plugins/DeblurGANv2/predictorClass.py create mode 100644 gimp-plugins/DeblurGANv2/predictorClass.pyc create mode 100644 gimp-plugins/DeblurGANv2/requirements.txt create mode 100644 gimp-plugins/DeblurGANv2/schedulers.py create mode 100755 gimp-plugins/DeblurGANv2/test.sh create mode 100644 gimp-plugins/DeblurGANv2/test_aug.py create mode 100644 gimp-plugins/DeblurGANv2/test_dataset.py create mode 100644 gimp-plugins/DeblurGANv2/test_metrics.py create mode 100644 gimp-plugins/DeblurGANv2/testing.py create mode 100644 gimp-plugins/DeblurGANv2/train.py create mode 100644 gimp-plugins/DeblurGANv2/util/__init__.py create mode 100644 gimp-plugins/DeblurGANv2/util/__init__.pyc create mode 100644 gimp-plugins/DeblurGANv2/util/image_pool.py create mode 100644 gimp-plugins/DeblurGANv2/util/metrics.py create mode 100644 gimp-plugins/DeblurGANv2/util/metrics.pyc create mode 100755 gimp-plugins/colorize.py create mode 100755 gimp-plugins/deblur.py create mode 100755 gimp-plugins/deeplabv3.py create mode 100755 gimp-plugins/face-parsing.PyTorch/__pycache__/model.cpython-38.pyc create mode 100755 gimp-plugins/face-parsing.PyTorch/__pycache__/resnet.cpython-38.pyc create mode 100755 gimp-plugins/face-parsing.PyTorch/evaluate.py create mode 100755 gimp-plugins/face-parsing.PyTorch/face_dataset.py create mode 100755 gimp-plugins/face-parsing.PyTorch/logger.py create mode 100755 gimp-plugins/face-parsing.PyTorch/logger.pyc create mode 100755 gimp-plugins/face-parsing.PyTorch/loss.py create mode 100755 gimp-plugins/face-parsing.PyTorch/makeup.py create mode 100755 gimp-plugins/face-parsing.PyTorch/model.py create mode 100755 gimp-plugins/face-parsing.PyTorch/model.pyc create mode 100755 gimp-plugins/face-parsing.PyTorch/modules/__init__.py create mode 100755 gimp-plugins/face-parsing.PyTorch/modules/bn.py create mode 100755 gimp-plugins/face-parsing.PyTorch/modules/deeplab.py create mode 100755 gimp-plugins/face-parsing.PyTorch/modules/dense.py create mode 100755 gimp-plugins/face-parsing.PyTorch/modules/functions.py create mode 100755 gimp-plugins/face-parsing.PyTorch/modules/misc.py create mode 100755 gimp-plugins/face-parsing.PyTorch/modules/residual.py create mode 100755 gimp-plugins/face-parsing.PyTorch/modules/src/checks.h create mode 100755 gimp-plugins/face-parsing.PyTorch/modules/src/inplace_abn.cpp create mode 100755 gimp-plugins/face-parsing.PyTorch/modules/src/inplace_abn.h create mode 100755 gimp-plugins/face-parsing.PyTorch/modules/src/inplace_abn_cpu.cpp create mode 100755 gimp-plugins/face-parsing.PyTorch/modules/src/inplace_abn_cuda.cu create mode 100755 gimp-plugins/face-parsing.PyTorch/modules/src/inplace_abn_cuda_half.cu create mode 100755 gimp-plugins/face-parsing.PyTorch/modules/src/utils/checks.h create mode 100755 gimp-plugins/face-parsing.PyTorch/modules/src/utils/common.h create mode 100755 gimp-plugins/face-parsing.PyTorch/modules/src/utils/cuda.cuh create mode 100755 gimp-plugins/face-parsing.PyTorch/optimizer.py create mode 100755 gimp-plugins/face-parsing.PyTorch/prepropess_data.py create mode 100755 gimp-plugins/face-parsing.PyTorch/resnet.py create mode 100755 gimp-plugins/face-parsing.PyTorch/resnet.pyc create mode 100755 gimp-plugins/face-parsing.PyTorch/test.py create mode 100755 gimp-plugins/face-parsing.PyTorch/train.py create mode 100755 gimp-plugins/face-parsing.PyTorch/transform.py create mode 100755 gimp-plugins/facegen.py create mode 100755 gimp-plugins/faceparse.py create mode 100644 gimp-plugins/installGimpML-mac.sh create mode 100644 gimp-plugins/installGimpML-ubuntu.sh create mode 100755 gimp-plugins/invert.py create mode 100755 gimp-plugins/monodepth.py create mode 100755 gimp-plugins/monodepth2/evaluate_depth.py create mode 100755 gimp-plugins/monodepth2/evaluate_pose.py create mode 100755 gimp-plugins/monodepth2/export_gt_depth.py create mode 100755 gimp-plugins/monodepth2/layers.py create mode 100644 gimp-plugins/monodepth2/layers.pyc create mode 100755 gimp-plugins/monodepth2/networks/__init__.py create mode 100644 gimp-plugins/monodepth2/networks/__init__.pyc create mode 100755 gimp-plugins/monodepth2/networks/depth_decoder.py create mode 100644 gimp-plugins/monodepth2/networks/depth_decoder.pyc create mode 100755 gimp-plugins/monodepth2/networks/pose_cnn.py create mode 100644 gimp-plugins/monodepth2/networks/pose_cnn.pyc create mode 100755 gimp-plugins/monodepth2/networks/pose_decoder.py create mode 100644 gimp-plugins/monodepth2/networks/pose_decoder.pyc create mode 100755 gimp-plugins/monodepth2/networks/resnet_encoder.py create mode 100644 gimp-plugins/monodepth2/networks/resnet_encoder.pyc create mode 100755 gimp-plugins/monodepth2/options.py create mode 100755 gimp-plugins/monodepth2/test_simple.py create mode 100755 gimp-plugins/monodepth2/train.py create mode 100755 gimp-plugins/monodepth2/trainer.py create mode 100755 gimp-plugins/monodepth2/utils.py create mode 100755 gimp-plugins/monodepth2/utils.pyc create mode 100644 gimp-plugins/moveWeights.sh create mode 100755 gimp-plugins/neural-colorization/__pycache__/model.cpython-38.pyc create mode 100755 gimp-plugins/neural-colorization/build_dataset_directory.py create mode 100755 gimp-plugins/neural-colorization/colorize.py create mode 100755 gimp-plugins/neural-colorization/model.py create mode 100755 gimp-plugins/neural-colorization/model.pyc create mode 100755 gimp-plugins/neural-colorization/resize_all_imgs.py create mode 100755 gimp-plugins/neural-colorization/train.py create mode 100755 gimp-plugins/pytorch-SRResNet/__pycache__/srresnet.cpython-38.pyc create mode 100755 gimp-plugins/pytorch-SRResNet/data/generate_train_srresnet.m create mode 100755 gimp-plugins/pytorch-SRResNet/data/modcrop.m create mode 100755 gimp-plugins/pytorch-SRResNet/data/store2hdf5.m create mode 100755 gimp-plugins/pytorch-SRResNet/dataset.py create mode 100755 gimp-plugins/pytorch-SRResNet/demo.py create mode 100755 gimp-plugins/pytorch-SRResNet/eval.py create mode 100755 gimp-plugins/pytorch-SRResNet/main_srresnet.py create mode 100755 gimp-plugins/pytorch-SRResNet/srresnet.py create mode 100755 gimp-plugins/pytorch-SRResNet/srresnet.pyc create mode 100755 gimp-plugins/super_resolution.py diff --git a/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/data/__init__.py b/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/data/__init__.py new file mode 100755 index 0000000..e69de29 diff --git a/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/data/__init__.pyc b/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/data/__init__.pyc new file mode 100755 index 0000000000000000000000000000000000000000..a8c2f8205f453aa4c3bac894c5c646f637593f27 GIT binary patch literal 172 zcmZSn%*(|kSRI$l00oRd+5w1*S%5?e14FO|NW@PANHCxg#i2kk{m|mnqGJ8*qRf)a z?Be{~#5{eM{PMh<{KS-E{q)S-0^Nd~()7%{Vtwb-oYW*o-^Aig`k0xrSoI1v39M8E(ekl_Ht#VkM~g&~+hlhJP_LlH-n(JQ_H literal 0 HcmV?d00001 diff --git a/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/data/__pycache__/__init__.cpython-38.pyc b/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/data/__pycache__/__init__.cpython-38.pyc new file mode 100755 index 0000000000000000000000000000000000000000..9507d0f8b9ba337358aafc70539411666c2c6640 GIT binary patch literal 176 zcmWIL<>g`k0xrSoI1v39L?8o3AjbiSi&=m~3PUi1CZpdH!>k+6AqTL<+ q;!{#{^Yv2_OA__t<1_OzOXB183My}L*yQG?l;)(`fvo!s#0&u5*(=ci literal 0 HcmV?d00001 diff --git a/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/data/__pycache__/base_dataset.cpython-37.pyc b/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/data/__pycache__/base_dataset.cpython-37.pyc new file mode 100755 index 0000000000000000000000000000000000000000..eb87a4c190c9a4833b91884291a859f33308dc12 GIT binary patch literal 3579 zcmaJ@TW=h<6(%{?U9DEv_@Webk)&xih$W;2dV}luR@(wxA&Hd&9TXXim$R#p=Cb9C zBHNvPvH>Z5>JKOi>7_t_Lf`s3_!OXd$#dWO)bDUsyDLZN3`iax9+KzAkG!|I*k<_J zD}VappB={jO`Y@425|?a{2MCCBu`n!1>-Cef#Adusg>D*ZD^i4nH#th+6#O$qZPCs zF=(lU_m<4ea(VSZ0|2hPx=E(wq*M?54v(eb}+I;BLglMWf$D#NpDFm zqqibgrSyJ^!o;QGN&_W&xKh5XTG~a4BB< zfz>D&*b~K^V`j6!#j>7WxEGc2gQ!AX_4#BWYH3j_)D4M()|;aj z%7FqD=u)6!{N?(bRh#E}T`-SKyiYSI3yh1!jok7BdwZNaIdj5aVEy@g%Nvh#FN(8uie@kh}xI-Uj?$$*$I|ZT*8TAl*_QMiczp$ z_9OV_D3R6Pl&dI6J)wOP2}eb#?LnH1;MeYCexb~C?NgpU#fsWhShL8qM{SZ#D5FL3hsD4aB4JlFh%q60RIfpNZW592BvMJme5 zH4K;6P%-Xv8-JU3P+Z;>8+^mExw?U|-G_ZT1oTk|shHFIA?zEnu_O73*k=c>+C)n< zAHt`#b1Ew9)UMqV4li@g^7o{eNPadMb&mP57zhI*{uw7J=}ct4oXUt}3o+FI(XM{e&5QYmNPqeZ{6Lv15*v+80)8t8XxGq5NhlWdYt6 zW-_${J341<$bSxNYwIsuwGD4kc4X&FO!zkV?@ahW-*Z0Zc5CO1kalSY@im#F7v>V? z0(cN+n6Pv~RIFwv96n{QtOgea9k98%Y8_%gLo3OKNHsT$tGz;+u37`zq?+W{*$IxCQ4c7$LfSwL zfB(u4yUBhiM)@?FxrkwgAMsK31_fY`{i#-08AWl0*r^{6!tTEn!66e0*x(k$w z4q}|OqgaJMnQpCZLUr{X?Lx59pb%~RQ|i&imv1u-0>I`vE-3^7R+9bjIjl72d14?} zJzDJI`ae5gzh@0W;`{pJ?-D)BV8yJ0^s@OzAAgtVEh)+n6MF4Nqfwko-JZs>)Xq~v zBkevhq14U*@KOb9c_?uumQgkWR1MSOS(FBL9#@Zol{_@Q4&!_;%KNxUwU?B;DoS$g z<%Ka*`@6+%oR@{ttsSI{xkj{izsP`WanEaV&bWa5tXuh1s5>PQp7J2*PIV2_q^z`4 z5R4hm^=!3CwwX{7#5GPeS-f~5Wl5qWSvJ9~bs*P){f$ByAy7kGf!+6LG?#06!U7Vm(Xom%KSxUg`wHK1Xu z4Xh80zKD9dZi{^ja@abZCue7S&M6foeJ3H&g*X({nc$OG_QEjsUk%=VXoaKW$uv7 zU8;=w#*jB`cn709C{r7nt8W?T4erziiXg?vq{d?qb2iXp#DT5?4-XFy1grKA?D7M| z%s;T{@o~z@1{CiqL+f8<1Dmb@vYMW^qOs^lSQb|~ML}CQMh*84y2b`Ao`0mp3zQ0R zsrKHC#*ZOLnKNgJk7#UgMI+P46lEV}ow0=Oi<+xt=!l2+J}%~&R+es=lSv0!(Ja06 z@X7Wk;gjDy+zmh5e)M=3ij%UJy(`of2EITsk2*S(9d3BNb-i2_s=v2+SS4w>NpEZI z7+Tw=V|}vy#OxPbF@j$vWm4puQ literal 0 HcmV?d00001 diff --git a/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/data/__pycache__/base_dataset.cpython-38.pyc b/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/data/__pycache__/base_dataset.cpython-38.pyc new file mode 100755 index 0000000000000000000000000000000000000000..c7645e28828a60ca792fcd06f189d9380468bef0 GIT binary patch literal 3611 zcmZ`*NpBp-6|SmYre`=Du9jpbu>i$!GLA&T&K?L-WYMGuD1>8C0n!NEXifEyP4+UW zZkm$PbCLkkIpqfg0eK0apO9OAN1X!1fZ=m)xg_7K9?l|1-Kg!=t9om__a4vBw-~aoBoc`E$~d?n~ykBl~b(Hl)|LWK*_Ya=#; zElyLS&7NTa81&J6mI_p-h@82&E0l-1w(>#wI;9pVGY*sZd9(t2Rary35<{nAhfwg5Q14;JZ8o%rPWFz03P3jrYOr`^t+Gn2 z0izV>aNg2BccfL(4em8|Auhzwo!POcb{f(edX-x>`l9lr{TBf@cXs^+6%|lp5N0yy zseItC7QGPKIf-R?For5JR99$^P=Y~TXuF@p18BB0I-e_ZTzllPk8q=Q74FPa-Jmhf zN-eU1ZlvMUI6Y3a*N?M^@VZUa!R2@sbcmm5e&9PlZ=<=qBi8tuWpi}{dwW|whj2z8c`-HfmTyAFeKvF?KNE-S z$W`kYY0med*UC8;rFCvs?iq)sIcM3sQj9P^9ql@&{8aRX2`kKb3Haa6n1MG}*e}=@YCpmn%xPwWJscrJ#H#1m=b7D~Px7isU z9x>TPG-402G0&o}DdcI#aP?*;NI0V< zurbjQ*BwNz@F>FTJBd{A;fTDpDc-9e5T$V_HpG8KAR_)|Qe%T40KJVAR?A8+t;AIGQ8k4STeYI%5!ChsJQrBHjSLg(pT)W|5 z5M@%g#x^Xp^N8Y+b{`rOYNw9?Q~E1eAdw!6FdZOR?I-z@F!AjyD!2WmEHD}lqU<2d zddRA}5f^(Zj5FQHawC-X_VT?bD{`fqyKohgN_69Xo+7qI-A3(eMr6{gZf0Yo&UgxQ z6i0q%%xjRuMX8;f0-3Qz*H#~rY{odG>spD%d*^3d^<8Whgv&;eYZb9;P1LP}c?YXD zza~1uMSK(g^)5$CZQ@jqj!u8{=1j(^z3H4v9>lHMz(2+&((BJLsJqln68Xfy_bzWR zIh!k5_b5B`5L7E0;cW|P$uc7< zfQp@4SUX5lNNATK!dBk_1w;ho5l7|6nN;5e0G9ebhKZWB$WPD?)O7;CL)}C;(uMi} z-4}7|9ABx3V$@j3ki#C>|PJD}8(aww{RD_Vdj7!k1Q<6|Q z;(%j;(xZVA!KxZZcJV&!=N;Kpew=fX1g&wEVf3!DzD)^$WHrmPCuEA87Fmt%QxrgV!x{6X(c}rsVNpnKiCxXQrqkpCn&2@=$E9xLO3KD8&zv zoTquSbkn?0df3VY;DxP+JD&s(Ki=94KHb@Vum|>u<^NYyVp_vko9%q{ySa($~ zmbvO3tRI(gQmj+GtsOJhwpm!8>^wB|{3{0W^SFrfY<(Op3iJN7lO3lh($|OqDqnPC zvo781V*7>xeYA7`F`9Ym<{=tiHD)%tF)U4~bNq_6ce}23?~t}keY6Y_+K3^ZSZ;Ys N%hx@t<8?gG`yaa-^z#4! literal 0 HcmV?d00001 diff --git a/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/data/__pycache__/data_loader.cpython-37.pyc b/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/data/__pycache__/data_loader.cpython-37.pyc new file mode 100755 index 0000000000000000000000000000000000000000..84d072de8c3be5fa4b4f90e52debb94aa53d7ca6 GIT binary patch literal 437 zcmYjN!Ab)$5KVSlt!oQ<^5}WNHa{R@-GV)6K}3)qEU^hCbhBBK6j9q#{gC$PFYQ%4 zc=qPWnY4lfZ{B2P-Y}DkRx2T3$>Hn6C-4V8{w#vv9N?PZ1QGO&42g(tNg9tSq|o9m zGGKQCPEZl$MAHSOWKNf45&38yt>QIVVkTPE=42froHO_W2}x;nl+Bzsy2m|t(j#3N zE~HKC!(#!j&N$RZ7(+wgQ65?>?pf7Ks|sJ$3a(|9s6u(gbM+$AxQb2bL(^~{L9a{d)b51<``Vmh5x2ax&uh4-Gkg9-EdgwpKIV@jZ1MMeDWbNg`a{~Ie`&Ac z!Lv6{&ZHGO@aE0e8zvdFS}_5OkKZ1@fIl(WH3rEgz%{{1BI!FB63OmK5{)U8{w;z4 zkpkQqI7ubTiKYum$(%09f_XM)t7uJ@SYfN$oUB8HyZYZCAqlNc(wXx{_kdZ%A+)OEtjK&O^6XV5Q5BiehoRvi%D1u) zV)sG#`@jcIS;waqBvWU!D0t627r5NK_34 E1H$2Nv;Y7A literal 0 HcmV?d00001 diff --git a/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/data/aligned_dataset.py b/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/data/aligned_dataset.py new file mode 100755 index 0000000..dc0ad44 --- /dev/null +++ b/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/data/aligned_dataset.py @@ -0,0 +1,100 @@ +## Copyright (C) 2017 NVIDIA Corporation. All rights reserved. +### Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode). +import os.path +from data.base_dataset import BaseDataset, get_params, get_transform, normalize +from data.image_folder import make_dataset, make_dataset_test +from PIL import Image +import torch +import numpy as np + +class AlignedDataset(BaseDataset): + def initialize(self, opt): + self.opt = opt + self.root = opt.dataroot + + ### input A (label maps) + if opt.isTrain or opt.use_encoded_image: + dir_A = '_A' if self.opt.label_nc == 0 else '_label' + self.dir_A = os.path.join(opt.dataroot, opt.phase + dir_A) + self.A_paths = sorted(make_dataset(self.dir_A)) + self.AR_paths = make_dataset(self.dir_A) + + ### input A inter 1 (label maps) + if opt.isTrain or opt.use_encoded_image: + dir_A_inter_1 = '_label_inter_1' + self.dir_A_inter_1 = os.path.join(opt.dataroot, opt.phase + dir_A_inter_1) + self.A_paths_inter_1 = sorted(make_dataset(self.dir_A_inter_1)) + + ### input A inter 2 (label maps) + if opt.isTrain or opt.use_encoded_image: + dir_A_inter_2 = '_label_inter_2' + self.dir_A_inter_2 = os.path.join(opt.dataroot, opt.phase + dir_A_inter_2) + self.A_paths_inter_2 = sorted(make_dataset(self.dir_A_inter_2)) + + ### input A test (label maps) + if not (opt.isTrain or opt.use_encoded_image): + dir_A = '_A' if self.opt.label_nc == 0 else '_label' + self.dir_A = os.path.join(opt.dataroot, opt.phase + dir_A) + self.A_paths = sorted(make_dataset_test(self.dir_A)) + dir_AR = '_AR' if self.opt.label_nc == 0 else '_labelref' + self.dir_AR = os.path.join(opt.dataroot, opt.phase + dir_AR) + self.AR_paths = sorted(make_dataset_test(self.dir_AR)) + + ### input B (real images) + dir_B = '_B' if self.opt.label_nc == 0 else '_img' + self.dir_B = os.path.join(opt.dataroot, opt.phase + dir_B) + self.B_paths = sorted(make_dataset(self.dir_B)) + self.BR_paths = sorted(make_dataset(self.dir_B)) + + self.dataset_size = len(self.A_paths) + + def __getitem__(self, index): + ### input A (label maps) + A_path = self.A_paths[index] + AR_path = self.AR_paths[index] + A = Image.open(A_path) + AR = Image.open(AR_path) + + if self.opt.isTrain: + A_path_inter_1 = self.A_paths_inter_1[index] + A_path_inter_2 = self.A_paths_inter_2[index] + A_inter_1 = Image.open(A_path_inter_1) + A_inter_2 = Image.open(A_path_inter_2) + + params = get_params(self.opt, A.size) + if self.opt.label_nc == 0: + transform_A = get_transform(self.opt, params) + A_tensor = transform_A(A.convert('RGB')) + if self.opt.isTrain: + A_inter_1_tensor = transform_A(A_inter_1.convert('RGB')) + A_inter_2_tensor = transform_A(A_inter_2.convert('RGB')) + AR_tensor = transform_A(AR.convert('RGB')) + else: + transform_A = get_transform(self.opt, params, method=Image.NEAREST, normalize=False) + A_tensor = transform_A(A) * 255.0 + if self.opt.isTrain: + A_inter_1_tensor = transform_A(A_inter_1) * 255.0 + A_inter_2_tensor = transform_A(A_inter_2) * 255.0 + AR_tensor = transform_A(AR) * 255.0 + B_tensor = inst_tensor = feat_tensor = 0 + ### input B (real images) + B_path = self.B_paths[index] + BR_path = self.BR_paths[index] + B = Image.open(B_path).convert('RGB') + BR = Image.open(BR_path).convert('RGB') + transform_B = get_transform(self.opt, params) + B_tensor = transform_B(B) + BR_tensor = transform_B(BR) + + if self.opt.isTrain: + input_dict = {'inter_label_1': A_inter_1_tensor, 'label': A_tensor, 'inter_label_2': A_inter_2_tensor, 'label_ref': AR_tensor, 'image': B_tensor, 'image_ref': BR_tensor, 'path': A_path, 'path_ref': AR_path} + else: + input_dict = {'label': A_tensor, 'label_ref': AR_tensor, 'image': B_tensor, 'image_ref': BR_tensor, 'path': A_path, 'path_ref': AR_path} + + return input_dict + + def __len__(self): + return len(self.A_paths) // self.opt.batchSize * self.opt.batchSize + + def name(self): + return 'AlignedDataset' diff --git a/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/data/base_data_loader.py b/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/data/base_data_loader.py new file mode 100755 index 0000000..0e1deb5 --- /dev/null +++ b/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/data/base_data_loader.py @@ -0,0 +1,14 @@ + +class BaseDataLoader(): + def __init__(self): + pass + + def initialize(self, opt): + self.opt = opt + pass + + def load_data(): + return None + + + diff --git a/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/data/base_dataset.py b/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/data/base_dataset.py new file mode 100755 index 0000000..aa38b25 --- /dev/null +++ b/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/data/base_dataset.py @@ -0,0 +1,97 @@ +### Copyright (C) 2017 NVIDIA Corporation. All rights reserved. +### Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode). +import torch.utils.data as data +from PIL import Image +import torchvision.transforms as transforms +import numpy as np +import random + +class BaseDataset(data.Dataset): + def __init__(self): + super(BaseDataset, self).__init__() + + def name(self): + return 'BaseDataset' + + def initialize(self, opt): + pass + +def get_params(opt, size): + w, h = size + new_h = h + new_w = w + if opt.resize_or_crop == 'resize_and_crop': + new_h = new_w = opt.loadSize + elif opt.resize_or_crop == 'scale_width_and_crop': + new_w = opt.loadSize + new_h = opt.loadSize * h // w + + x = random.randint(0, np.maximum(0, new_w - opt.fineSize)) + y = random.randint(0, np.maximum(0, new_h - opt.fineSize)) + + #flip = random.random() > 0.5 + flip = 0 + return {'crop_pos': (x, y), 'flip': flip} + +def get_transform(opt, params, method=Image.BICUBIC, normalize=True, normalize_mask=False): + transform_list = [] + if 'resize' in opt.resize_or_crop: + osize = [opt.loadSize, opt.loadSize] + transform_list.append(transforms.Scale(osize, method)) + elif 'scale_width' in opt.resize_or_crop: + transform_list.append(transforms.Lambda(lambda img: __scale_width(img, opt.loadSize, method))) + + if 'crop' in opt.resize_or_crop: + transform_list.append(transforms.Lambda(lambda img: __crop(img, params['crop_pos'], opt.fineSize))) + + if opt.resize_or_crop == 'none': + base = float(2 ** opt.n_downsample_global) + if opt.netG == 'local': + base *= (2 ** opt.n_local_enhancers) + transform_list.append(transforms.Lambda(lambda img: __make_power_2(img, base, method))) + + if opt.isTrain and not opt.no_flip: + transform_list.append(transforms.Lambda(lambda img: __flip(img, params['flip']))) + + transform_list += [transforms.ToTensor()] + + if normalize: + transform_list += [transforms.Normalize((0.5, 0.5, 0.5), + (0.5, 0.5, 0.5))] + if normalize_mask: + transform_list += [transforms.Normalize((0, 0, 0), + (1 / 255., 1 / 255., 1 / 255.))] + + return transforms.Compose(transform_list) + +def normalize(): + return transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) + +def __make_power_2(img, base, method=Image.BICUBIC): + ow, oh = img.size + h = int(round(oh / base) * base) + w = int(round(ow / base) * base) + if (h == oh) and (w == ow): + return img + return img.resize((w, h), method) + +def __scale_width(img, target_width, method=Image.BICUBIC): + ow, oh = img.size + if (ow == target_width): + return img + w = target_width + h = int(target_width * oh / ow) + return img.resize((w, h), method) + +def __crop(img, pos, size): + ow, oh = img.size + x1, y1 = pos + tw = th = size + if (ow > tw or oh > th): + return img.crop((x1, y1, x1 + tw, y1 + th)) + return img + +def __flip(img, flip): + if flip: + return img.transpose(Image.FLIP_LEFT_RIGHT) + return img diff --git a/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/data/base_dataset.pyc b/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/data/base_dataset.pyc new file mode 100755 index 0000000000000000000000000000000000000000..ad0f39239aa85539295d63b6bc05962b5a7d99fa GIT binary patch literal 5509 zcmcgwTW{RP6+R^QDz7ccl48e5VJE49Z4pc81x^b%iDS8T6u8@js|3iRFhMQ35~U@% z;82y_%IJfQ+Wjqk?^~bx*guh7)*)b(;GN`B})y+rc z!bGNNdA>$Orq}fPG$O}1q{#Reh*L*?@5&4OJUR8`MMX|QSLp?Zikl->ijVcykqV|Umwhyu zc7@aZ@$`5ZT*#AkJmv=2GZ+cp1Q+xK=YLowacZAmlk4y_{9e@l9}`4@5HqGam%eC7 z;>u}FPU|v&hgan8DdJXD?snT$tWz$dFJgn!3c@T3`UT*!jxo~@aV#fER31zx5{%nJ zBwH}bjbd~#Oh#=UT6qQC_+fcrpT#4m-*&M>$t??Plcy#5NhVndjREwL4yECKnP)ox+^7G{@N}_IQdA@U2BWYDQ?rJx z&2@C*w455g%gzc~(^+?KJGZ@>(BWWQt#pDjKihQ_Q_dF{w+SK;8vUl6{rjB9}3%9%BPn_uI~ zYfsly=hxUxkk1J14F^Yzl*Yljtl_bL6}m zrzI)Abolg3(gHQN4&ou1EzK_i2kp~hdfIp7V>+ED#h3&-o=`Cqo9^{caj>7+fk>9|U_l9A*7lohx6tqFJ z9?NKPUfppGVbulJ--W)=T2-bF9iQdROr1ajiqGJr3j4Pv(>69xW$Sed(ANI|yWK&X zX_@6&j9?3$>C?j;0rb4mOo`(&)?+?%z{G+^P$b3hC`NcaiHqRpZ|SNbkX4QUKKK`< z=`XOCYIW0L?$$Yyhx2=r_0={JWte} zbZ~$yT9#{gL>5FShE14`kfHa7`Cd3wRAh1axa4tUfub*nvx6|}qmtItnwZ@pM7ph$ zBiFOgG`pMc#+k{BvWbZfgwWbe*|zf(Nj}!(u1@Cjv=n*DolUu^+Vl?iHAf2Nsbaw} zF{SE}Gk@!Ww;B2TpG=-O+u(u&6@w^K^i;SXW58^p6V1?9o%fKVZ@adwfnLYmn)89X z=GKtMSHRW6NS`bCp}%jVnXz{xiXr`?{8Cq-mYpe#eh_-5+~*1PEdL`s(0H}JjgRA%<2GHT!7fq2WlC}&_lR$n(c)y0jq zEeIHK9rH9hUK+R%27_&Dtgq%R$1QwX&i{nnys=73R)z(=Mqddu1o)qk^FBM;OD(rB zqus=g9ki*@S}<4*o~oc^C}YY|qwRrs+i3PZuBv8n@upVis}fCFm!pc9PZY2ySy2f- z)x|KT8jgKkt`UC?xi+Y1u}8aWp6*!pR6Bl%ZSAYeEl>(b1XF9{spS-P&wi$t=k`)s zD3%8cvnAu-w`gZClcu)3fdK%kcxP-;_%Q$bLBV^*2E>x)2aT}IAU;M! z-!&gS+4(AX@|#Dyfxq+ki(PB4TbNzwYnsmj^(mUZ@iXkNIBkNs)DWH*{e#WpG8vjp z{&%E#OK&yL>JWAOYUhbnOjmNz9}<(~+2%B}7>(4rK08kF{}bN|eBFa_^P&A?O8;f@ z-{bZieXz6r4Vvz?TO7x_{82cB8Sim9qp(kK_;doFr>FYN@S@ZOXVC_>y=VWu`IN@c b*W9c6dfReZ?z>CP)s34?Z@IDDXg2-_8%cdHWee>=3#|y^!4kWn#BMgrCPmckss6Dapl{NG ze_&wVyve+MGn>|Tx7|Mbd|uMw2;>`t?*ek7FQO+9LHB8bn&zXYJ2CsYtw zL0dth5ws)PCe$SKnlK+vn6{`P+OZ&kAKOgL4^GOi;T(`{d~kN8g7P|Wx^*hob$Y~t z3qX#$!7{2A%pr?;QAFV!5(=y?mDQmQVoDQ?D$IwD69bQRRoE*#DA?xkd5V%E_(w4L z;C1cgx`xBrJF7}La&IM%@^WQt)vt=p%9LJay3jAfJLT8?$xLz{52uS!!BfqHUg-P_pspyEVn8t^hALH(Cvwv9c1Oug`k0xrSoI1v39M8E(ekl_Ht#VkM~g&~+hlhJP_LlHWHof~7gBb|4Es12F>tEQ2hn literal 0 HcmV?d00001 diff --git a/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/models/__pycache__/__init__.cpython-38.pyc b/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/models/__pycache__/__init__.cpython-38.pyc new file mode 100755 index 0000000000000000000000000000000000000000..2d0946a2b428645da588c3d2304112136dff0fde GIT binary patch literal 178 zcmWIL<>g`k0xrSoI1v39L?8o3AjbiSi&=m~3PUi1CZpdH!>k+6AqTL<+ r;!{#{^YwG{Q&Mw^_2c6+^D;}~T{s|5tU z;L@M}WUUeM9~{g-2Riql$=d)JVKgD_0@F4%W2e*+ z0GC;vtpg6&CAI;$!Zz6!;3~V!8h~r;8vBTCzoG3lc7=TmGj(Vdya5>#l>Hv0O)>U5D zpOk?nfL56zWt9;y5Z!Clw4Du<_6`QaFlJJFv3w??IMpSUiO!*RIztxe%9o(5dd5?k z3GL3Xl}YUkA_YX<40hBzhrH7pKv&6-#X`7nPVLL+h~uzj3DiJ%2o}~AB_s;s6l@zM z@rYMoEX_-$F7@Hpprfs&RhEQjY=ibOw!xi+ZRQ$MU5eAeP|f9U0mXUwWyw`&s5Vgi z>@qf&Vi)WG;~fbVda?KmC@&U|UJ(EP$Fi}?kPDFsd2>bb)urP9O?bvP z&PKac!G%ZaQWCx7$$6h}&~bmdB5~y<;Zg@Z7e939MQqxG0i(H{Tl-dnj7th4(gfp@ zotLK8*j3J?l+%ejB@=H-a(8M`qWs)BrQ)%2y5&hQCE_tql&27;jSpLUquNawrf?HC z<<3AIwkiS=7!h>@>j=TMvP+S$A^}_rqx_-KzK(NcD1f11R-5N&h|LNSHsLeSLe*;l+(VT4R4) z&dZb9l)NRmzfG_QC)Ifw$9Kpp{hX+^Zk?5TrFT>`)cPqI2k8b2SS7jC-xyawb_M5N zKYmM!lfAe*;|pi)dGDxf7ZP%BA;AV-pXb43GxsN3tU9GE*e0mQJNoK#kVw?!JUFGs zc3`QuX6+l2T7Mt~G;g$a{*8_{ZpvnwsV09NOC_5pTz#VHqtWF@Jmn&S*JHHMf27A6}fEILY<@7D-xx&ez_jl`+^K>PTwQOze+}5O=yt$txNp_69a6cM%Sq-pXx2$hXy2WrO5P%u%r(9{e(`x9_KtNqm1d6l4L%Xp= zht_qY{4#2ofTA6o6_|3Mj!?WWO#!35BQ9QMl0&r!$w}MtMCvNc$v8`)lGx8+3S>hq zsieTFTH6t0x+Dilth5Vtl;W&aEy@JUDB?#5ZXhtx3}e0cF@Sb`urPUXp+CbVj4ACp z7y+csYbRzT%`1>wNQU>WtE<=g3z(4)01#S*^2&iSYYlqX_iYQxEtdw+#~$K!7*`=+ zZPBU)G|*gYgEr|E=&c5^RS_NJKL6gEepW)Jxj1vwt>J)S*iLvPQn>06e1*RNUh|^t z`tq?}M7jrw9FtaSUHf5(K{^a|5Ms$aOt4=K!&i{SixpF%3UoGsHJNB2_yK~C5MUi8 zt|0gr0p=F*69o4V+(%$ez67X)`6q!%QQ}s#s_#K_pv`}d?*}bI*K&(oW|G&hfL>c^ cs@*5%xQ5~)5!$~~D0CM^ODt%xRQ9d!0VBOME&u=k literal 0 HcmV?d00001 diff --git a/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/models/__pycache__/base_model.cpython-38.pyc b/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/models/__pycache__/base_model.cpython-38.pyc new file mode 100755 index 0000000000000000000000000000000000000000..c7413d26d3c2746ffd206e5603bbbefd39eb16cd GIT binary patch literal 3612 zcmbss%Z?kzv3q7XGaQop7Ok}tOKwB@A@f$%GT0r0n z*8l!jYlV;>aI*Y4F!>Cc{1^Zuj3%UA^t4UQ+-h6*2(y^|j4+$iuHAJ`>~@LKhotHJ z2&c$y2jTK(!{jq)@(=(gNU3cx%B*LkZ8MuW&^xTeT%QD-?hGxT*y{W#47H|tRJPVyV+5i zWD%3C{kT8)Xpju|<5afp@Pt44{6QppAN}#`7WVf(-whe>XRSU;B3n;DsL+ge2FKb- zqdxah668+kXysJs9o*`eW!!=f$0)Fo{4NY~nv)P<7kdi51wFJb74Io9RJW8m(7Uke zDzEEL%77C_ zM?968(C)0aGO3+GqyVX#!HLz*0q^t%FjO*Ru@D}}seKt8a-23TQGx|gMsTWO;UZEH zr=Z^`iATHwb7?%6y3~hXosPB^npq&eq94?e(GQ+3^n;3^fF0C~ly>`jzh#C~I$C>J6V^2QSP zD~s#Diug+0oXv=5n9rg`my+lSPtH5BgQ5NY65l27!mSR*`~1UXfLAdQx(fqFb33>8 ztU4K&6y&W5W-mK0O|7x3oJlFC6L(4`-jw9-)S^WBxpP9redTn^lVD23eZVMBA)#ZG zH?7^DYBywT^x5$gyrpv4w~DuM<8ZFdGk9Y_@^%ugMq?S!YIhFjHG zhS6aZ!?hFM^i1N?9?(9@L{HadgV1P2SNI_79266QxkuMb&fAND9yq5in<>V}c@0+0 z&WzkmI6BLGRk}vE=ms6t7nWQ`)#QwB@lM##U*x~Ro@aHNmrf`pxg)RP`!IHt)wL&1 z?#y(Jvl8f-%0~*G``oP)gAWgX&k|oT=O6S1O+Ovg(IU#5_=r0Nir*4gf0u#KvP_d!bPRKY&H(0ZGty=L7(1KL|w{* z6KZq@8hUPaz9XsiXHr1ppS4|a9n3S@xFH*9rW*Wju~f2g%++;G?~N|q<0%&rJi3kk zP|mimpGg#GU5ubjwDL6oF=+5{9i*pYE2qy%&lL{5d)9xfrw+V(CVL=g+S=OrSks=S zhc9V6FeQj_`2weSf^T6Abh7^7_Ls;2UriT&KGYtg4;F5Sl@CT4M#9dmBNKLpVH*Gfh6N}f410A2a#$-f;Wbe zv(f$6liTA*T%3}6bif;XS(0Q&NDI%Sniur~`?P8OXmo-JPatSgZ9n8n)178rmj(hV zmm*1=O&r>dB?hxzHJ4vx43kW>gRA0~Ko4pR#rx8fINCep;z=es)Rgd0XTtx6u-gKcEp%2$w3k;?LwucIBQmmqUl#C@~;tKzAc1o;^+ea?bX4l z$_ul$a0~1D_9`d=xXr68W;hjQ{{)eY?W@a1MN#Az00^x@`Q<20PumA^|Ypu}+y$qw}^tH^QgVdKlG{zlhMNxj{30k=ElCy3N2Mp78!XuG_ zxkHE?{ul{h5eT*PwS~i8gu2^=fCj|on)bsGlXMvBAjIN)nBce?hEL&5FLq2JE3i}( zml0q=CEiBx4g#!s#8m_sl;&*_AL8gPf p4PMhN-ZD(`;`acwm8RN#V8k^T7qQU(t>Qwrk+npwSWx@c{{i`IE|>rS literal 0 HcmV?d00001 diff --git a/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/models/__pycache__/models.cpython-37.pyc b/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/models/__pycache__/models.cpython-37.pyc new file mode 100755 index 0000000000000000000000000000000000000000..bab26fdf620636cf590b60d72689e037e0b0e752 GIT binary patch literal 640 zcmYjPJ8#=C5GJXYquP$shi5MZsv!~3xhV25P-lpdqRrx>H5id~X;YLy%1N5ale2c| zFKEX8C9a+F7drJQr;h{f`*`2!cy|I1JDnJT#XDag=4!_imT_~ zqv4&KEg`mh3JUmWcv3JfZTDDxTAK zGfCG5&3BXSzYV61pxx>^0I-oTZ#5Ol#zKioP${mtjV`!8FATRqsfDVnuV~J#C%LlG ztX#UwZBP|D`(%kywmqa3J*ApT$z|eOuPCd3Ep!BZejxn_=+{|#X1F%#LPN8KDRQdP zVR5NsL77QsA}@DKxts}Q(jzYU`N0V_i`~)d)bXo>@s#nrNSh_hJ?)jBZF}SNeUlW9WyWpXLhTediX8;T_EYTD( z!n`Xq#SFhkiGK$fG|sT2d-H*vAAy?+phgR*T@tP7H(x?*_Y@TH(eR{TT-xrj`oc9=8E@jte(vPK#~0?);DVY#ra7%ROR$X@KNlIF z31*V64Vv$!yZ;)@8bQ0YbpT)^VLoUol#PWFm7r2wavPm9Dv^vY^bQbCH*OrCiO0GU*YQ{Os_An&sZ;UF!Jl;dsV)UZl+u=AQP;@3y`D0o(1| zUZxL#aEyC6#4Qq%7`R7z_zBU^A(Ifj3)GyO$sT}vM7DJRO;(b>_8>CufwjnArqrit literal 0 HcmV?d00001 diff --git a/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/models/__pycache__/networks.cpython-37.pyc b/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/models/__pycache__/networks.cpython-37.pyc new file mode 100755 index 0000000000000000000000000000000000000000..833b3bbbb99f88a47b9c06c08dd72fd8e1314bba GIT binary patch literal 27118 zcmeHwdvqMvdEd=TOxumnL8q^K1|Q3NgV0a7no5+#x%Et^7U3X*L2v0m&f77H$R zmv;sv2rn90lr1_o9ozA<$;koSyqu

$(rS=k&N~)22lj#)Qm^sl*S%~+-m$7W)Pv+DL@d?p^?5;I9jpIWpm!!#DtGa1Ch9Q$Qs zX25yENjS-u&6%u|a?(x)FkUt063&2=1$NMhJA=Ru0vC63&Jb`pgiUAI89{gm>Gd17 zlXtcNGwh5yTaoj~S;HA~w!LgP+bY&V{#tBi3ozTA9l-1eFr&bXJ3E2d8DO>ov&*>y zm^%W@7%;n?JAt_~z-$Ai;M@hwT>)mhV>o;2I~K=h#!UmUcRPC#ySHL2=r8XC?jC0! zaQlKcb^&v*vmcoK0p<>1CY%Gn90)MGfq8>-A29a?m^*>F-#G}(!2nYL=8$t3n8N|) zE?|y0lfX;{m_5K8b&dgZEWq3i%mdDYz&sdW_5$;e^DriLqYm1gUevPl#!P>o^ZK84$T8$jDw zFc!>Jvu&pUgxmvwmoUnXzaV9M} z@ZHZFY6$uM;DtlaxfSIex~OVi?V{UUEHw_DZeD5Bn`Mv zZOxRKGxNBIO*MjNKZUHlQlor}9yyIPZH(%RmKyiaFP^m!b1Wxz!&)^P`w(Lz=2#G8 zV^r#xtQU6@ZDY*1X1;`;MC&J~{P--JcN%1nD9xeaDk_iM&#c^|8?3fc@@maSpn76W~ z+6C;*7(tqVbW#>ZiNFAEVuh^VgMeqWQ;vDVT+Ou6fM~RRp4m3qX4`7p?RYzLEwLJF z>_uD*ac4ZMD=yJaB0hz9+p%9Vy;wVT4Mg3Jt=S$107GctIL4P6$Cvy>wceaX)5Pn| za;d&DUag?1OI}k+V}hi@Ho0peA(ill&dM8S!-O+~=lJSVjd@ar^5ZR4Yj}RV)N0kQ z3f)W$kaAi}Xr;2BZZ3I2FxIHf`8EjgtiPpEbU?{&i61NmWg2P}XHl$+ZipCZ6s4HO zO7Jn)&qZk@2HBy@{A9JYRIE9!+KD>$_gBiPkf4fZcO%jr1u(1(`e_t**34SAnX)sc zIt2Wga*ENo*jEfoAH5R+^ifRuC>8ZhN_r=8ExDRTpQPFDX#eCjvL#zhdGmh#d%g#L+z{i7L5Mz{R7XO`+-%|&BZPUA<_7HbW5mAV_{ z@niK$!;j;!Jn!4(C8y+PYi_Z0xm2r@iTb%GU2qSM8X<4B7cc4V$*6lIgN8E)Ixh;5 zyT#M$eqslf+)B}{RTrBz$B#Fb7K^9-K~VhD7$|D-Or_+hgUpF7EuDG#R_%S7E$VW} z7-lAD?HsrZ+WJx8%H;06I7&E5rKwLM;DM)rkARb0vsW!Nwv>KvIZ$1Tn$z&j~@Xis}hq%SixU9P12A1!oWdEyabFk`XP%KubxC z^Asp46;M(lprmsVB|R>blmaCsB1%d{l$5$9CFM?nSe$4Gzj_07QA%1j#Xgi--Agb* zaDafs(nkp=`DZY0ecOE+BP5-tO_jJrn%AC8R*#=xBY z3iVpZ)|b;Wkg84+T5JN^Z5qGHSJ0RAKUA>JkL-J&rwI{ACaDcHa!( zq3zL@7TX$v1Y)NM@d9W&=$B$#U;>0|6q9WXYA>~xUS~|EkwJJshef|KrzWy~++Au_ zlw#IYlc;<^P0FYW6e94fO5M-Ql`d9-aIS+dUM?v=*NKzo#B`QX;3 ztelyM{%i=7mEoYJ4~7*ivpso{1w_>SIXpPVYOHN3>Zb@_Ottnfv}U5+Ou~z?Rl9Ag zbEI})K;!EsIGwQo!M+;z5@aHdb%XtaR$oAFjW^>LmJ{-7Ftj1B&uqvmg}f5TD-q?D zMX%q0no;^m5QPwsh%MqXIJM5rT`|A z>N))QanT9jgwGGa$VbL=8y)=!gFCHNr0K$*F>Gx)k{a z#+Qw8mIoZeNJ~Nbhv9^|vLo!U4LS7m9LeAmv#bMvFPY>=!kgX%o)lX?+&00F_Or*x znpfk}(($&nZmlLbYSv;<FpW+!Qp9NJ&9|+%4q@4*{ zoyL!Pir`%Y&l0O!SW&j9cZArPq*lUezTHw4M^TIEZ#E1=s2BBO@d_d}Nu!0>TIA8A zrfm(I-6X9Og91p*ex_K2b+H5uuIOir#Y;=2y0(2puKELKV5xZ1dYE$D=n@zC>R@0o zl`H&ur&!^DquL;x;spd;YGNsjvwSv{7i<}b=n4mzyBDaCNeW{`Q@)&ndqF!6z%0QK z<7m>jpM3u8Su%`B3n9&h98kDzFZc6e*Nin=I2uOYT2ew#+R2H@f2-*4Vp{quF%|IE zwG%<$uZ4lHhJmkzf$yG}$S8^fXAo8E%9TQt5wb766cNxhf6~dgsg&I z=O+U@Zo-lgCgM13K+#yF6`AMDxTO^hbZ-{jyD}U!z~+1^QhiBMpi7X_*XL zV@_(WN{wnot7bJI)I6~Xr{a~$^C2HGmwofh}$NU`dEa;~Ya zl$7&hcyKcS1~`C?(SN{P844R9NP{wvsMUK2N(4f(O@>HroxH;)meysrUBm6#02nk0 z>Bt`XBw6utsoaM2*%9K7Jr@^hg#Z^Zo^(lrW<*>9BXkSf%b;5|XlLjAPY5>9O0 zgvtq-4wDD{?8I-N2ieQ*RMlQhFHd&~s1z`x`F(F7{Nhe%fk8>ooHrotdgHpv^IM=y)TFBwAfv-p06x2JRHfvo z?O;3o5$ZcRZ^+3a|3T3FFz7!A&lCKP^f%ssG%b#JdBo@8mE6*U-y-~CRF8pa>C2m> zLAW9YE>={dQimnBQlUe`acYg~O%bejGDS?e`0*32r)mz&JU+xrPN~)aGZ0HZ8@wd` z49E?pSCmc9H)YVkq;6DvTUF{y>JqT{mRN@IQrW9r_EUI6@>Gv8vldf~ zishvF7->@v9ns9}3zc)v`9l{pae`}|FFBKrB2lUZ0Ds^@#f4gYl9sGE1M4MiJ+fdWEkyh_x4C zxrc0`mlJ;y@h-n%z)b`b$l41XGG!Lb@fh82yYQ4X_nEtKXJZ9xWqY6dWh1|)X2Sav zZufHl%Rkk&)(o+aK1w}q!CbJ!OP7Ie0iELg7Bu(RLhSw495of%&VnTlHWT0|(nwsq zZBy#ew+j9rD}ulGJcCE9rQlVC9Mtc@I`4cX!10Tcn|UdN(N3KvKIM-+>6BU^s7kMI zPA#DZqU$Ij^t7MUI)F{_hKeoW9TdOvD2ZB(SB;swV7m=NS6aEd zzpFNRJJL5OE_h3?(fI&*YAqi?d+MIs9rYX?f;sRY_9S`+JkYw71DB$o!`8e*eGC~$ z%VD5@oM94#mS;bK5U^|ksq@Tm{}54Zg}$n*4+8i1aBHy%CmHE7Z?5d^ujFQfuFRV8 zVsDYNNG%^kAk-n=X|39#BZI2wMw8x6IU;aUeTwhNcoGF(z z`|A?%T_W3xgu9~b*&x%^Rbq#_WU=(SsIfw&FY*KLBZv`D{Hm7-t`WQp;3uEeu3z;5 zqGX6|Djd}f#(#uBeC{le`tk4`k%H=@c-&kF{xih5r2kt8!B|HKu59b|U2c?r)*yLRsfBRm9f5Jhx{XT8~$-SuE8$-b$*Fe7Ot^FhIbFgU>q-(j`nVj zq4@=k*15!Ma~-O~$fCnLIx{4k#fr>}ui~NCU?v+6-G!K=g1d0}CAyhJy}t}YgXRSk z1Tuf|d_&1sKejxBG?-`E28A64<>V?HiE#Z|Fa_Wh-v)DHn}KkKVGkyPV~0=ItZOmT zpgD2rfV>kA-)W~81|;qPoWt3LK^P<^Op+u>1CCOdHc;SYWM-yh^ubRFH%G-!=$R22 z1eh=5%%f(Xq?R+#iaW~mNEx$`9z2@m`jS(D=ILMh8P&)d8wy>yFKdR_D_<2kZ1#`euq4}?NRTa7y62bo7X3sY_` zwjLieQ4$s_VXPdzRm%tcb?{M{Bz7N1Abej7*w3*5+P=_3LS&(gNf|Iv0o5-tPqO*>77!pG4jw-zF>kCLAFUOA1v9im_-=0iaIDKE8<7GaWvjRb4-MjW$G z8LT&f54}njlWtq9ndS4|fRlt5%YtAz34c{)HS1;D83$T{lfpou$B8)shc5& zbmbR%q2|DhUOk7TF3YWQ2)>pn0xGGjI7e6ul=Y|IrgPjY!?D2##E2-8oC;fRUYkr;a0 zIDnZaJ?fAKu#m?vfNi(}IG8OA5v$2AvAP+bdN^umaK__} zGd&`oeJFfxY6G9+dKQZI8|FeBD_#N3K{rSF3rM&zMttFTezW4nb{=ehl7-GEp(+E*d*oO3Q~gWcBRa2 zlJXl!c>rI>ynv+KCFLC4m=gR5{=&BTQPX|%@^)BtBjVG{V(tm^55+v1~@NV{ylf{-7_G}4hr{8Z>w6gyCJqz^DY z;nr*A3ami+oeX}D2EWIG-v>0i%D`VAOpR(bb%q~%8$dWZ7@paNZ<6N(WsW?Ws?OwZ zV)9MbAH0R6yC2cyJv5cWzZ^8u;huXJ#uU_v-1Ds6M&6P_9&y~KaJ#1gXgiY+Ko!MG z1kU!ccVJ6XMTDsY#T?#V@!Y|1Lnsj$afxbt6|xK(AG~#O_4kpR`elNDK=2OzlJUn#ur6XZrG7KCI1INO^1AYt>79BN4{IkM9N^A_xKr(Qm=|5C?P**u* z8RnVSIl)Idc{9JEc|~qahIS8!;Hc&$Ef{i$%_g9!lYmdeIU{zPDZUfPugtFD#b2sS z{E&Mcuun~b2!O54g&>@Ag{=){Zxj#OKT9)i5 zNR!oRh?R7MIvt;xS5MkpkQP`;I~1hFGt$~UX@`Tfz)ISYAT6GeHr|sqAEX6V(ryXT z;u&co2Dz{;p#pT3Wq*(haXOcYo?H%e5fE(g1EITYl=cE^&>u3QcWxe%vwIntBMWLGXngIq|VR@Ab`f*kbQ5A?kK zVAtCZ>9;Wr7PjWYz&zp%2!$L+_>CfrA4TXfOyp*zmmU{IX>dyYbqq=^ZfH>6BuiEA zC3u{GMh`?&R(wjuIZPK~%^HoM>K}jpd%yY2?D-Rs<_yDFq!z)%39U7g4TzM2nq1av zFzMFuN9$N51(y?6#>t)unJDg3psmUgz4Sdc+c@G|l}R6gNd%5Ea1?=K3>-rMtaY~W z00Iv(@E`&YG4PO|I0s3Jm3OhZ@(}{O?i}&AJlB$C+i;CcA-3XroTgoB9r1@RR9X;{ zG8rnfynd{O7c56U{xB0E)WW0XNHHEE3ZWJrE$0|EMKq2aCJtq5;n{K?5FEa~)Iu>^ zc($Ae1&5rLS}1V~&zAF$;E>f)3x#jt*>WCMUt&w8>og@~Hv2_3=Bgl{DJ=)yv*coF zxdlgBD4BnaiS8v}L#tm0*jx(NDHP@rZJ0Ue_3{@Rh3>i?dOjuvH{9cx;~ketLzwxm z=@-k@BptUDWUQ4e)*Ek>;k2JA98TeOe-%JvIHk9&8(fDphn%K7XAGq|SXY4M6R}*a z2FGiFrJN@gZd))d$a~3wX^2hZ&QSIXmUDK*4&csW)tbigO=7tsBG<+~8%djLr`Cqx ze>WC}WvJ8g#*GznaPqE=w6PWuzj^+OXCAYg6uR1Rz+nd6?T?B04FXOgsIL+HbAsO_ z_!k7fMer{PzE1G32!5O3UlaHQzeDgz0_o&`!_dD4fLrA`hN*!0);x%J9@IJy@|*`b z&V&5sL2mO8^(i8L0*ef=2H|g$B=UHdQvlt60e7?CcvSCenUvajEXxmE~4Z zE3!wCxuWqXGvbQQds(LG+f-Ep+V`!+B|lMbR>4)&XOP5?Ra$O;U9lQ8SPiYPIOUgF zW%4|Zf>EqtSQ%kOd(uQ^5J{DvmingfCc2w9T!um*7V2*afoPosKF)P)0YD0pq$bjk zECUjj_cT$5q!|)pK+cka@AK9KX{^Czf~7hEX-f*`&J-iKvD|!Z3+~a7%q0a$ohi2B z9>cv2_x6zFB?ZZzDR$r<$GsExu8{mC1quBQ+`Dn#NkZozROXGOUJy>D{vDc2Q>ySJ zA^&d?_f3L-Pw)+b|3vU@g8xYH9|(S*;6D?5hu|i`cM1L*!G9&7k6-;yg8xDA-wD1) z@O^^+P4EW<|BK*{2!25DhXBD?@y!+Wr^I0$8Mavvz{bQBBY{nTO`Tm1NWHQGYx+1~ zehpE!7&@$2c1i8v>j_aAb z2Z^l`EEpZiitboNf()gASYX(5YN!E%9Dz81A}^4iadC3S=IB(1i52TA{}4VNEOXJuV_(s~jBoUnUyP^UWub%7 zi$X)j46xXt(X?T)!#U>DX*PaV4HGmOQSSLlC^d(Pr5N{tr9vWW@jXq{Xs4^ zXFQQ^^Bn9;pMVR5U!zky%|)6)iFB#O)y-K{SRJrO*5hF;+_(_Q3o&uz#)2nXp6uW$ zE>9>w+9T@?USqrDg5M$0Zc|g@LeXh(xYde_HOIlkYDPa{!>s40x!_OM{^{iioW<}*o(z_TMRS~b!GbtqGQq+{r?%*) zo;@=ylc(5%Fy|c+I2rP#Y}fL=3RVQkjzqo{BS_1Vz#@XWMt^}NmKrq>3TZbQ?NXU< zk%0FinnPS`e0vevkCBrH95Z)ATf?7~upkGwb}vrcsJ4+hBUVQLkY;6^?+ny6vNBk? zJB&HHVJHP5AA=CBeJpA1iW8+yOA_{k?lbldSQSUkRSoF(kX z$2MaLSltUnt4h-yqFx4g^3_;El;$-ufm@}Q8K5p!(+-#h7zUI(2dh)A_WeWtm2!!dpmLY0eAf#Q$QV-FrxfcU7B{zpChbw%8Te}wm zZ$305@w^>*g<1v%A%~&(Ma4pa7`F82>K{((4wie9;WWw;GI#F%TN&*s$)9Z?vBhIy z?ifhJ(g$UJD!k;oc<7zWNOx^NfWu6DlbyMYjP5K{$58BGZ9&`E@zpF-)bh&UrWzq4 z_iISnJHDvW!$Fyn&Fu#`&Zt;(oHZT?mfPB&05%z6_j5iJ*yn-8o`4{2-if>OUfM~( z{)Fl*=LcArEyYX%(!PYW*o7czvyFLRQ^0-(Sj=$-*bBg>f&C>tgNhPlQG$e%0q(1q zN5!nF;0^#caM5zVhTkmWep~iBW*aQQcTj>slmND_lpyKkE?StGwK1ZGp*{?W^KYba z9SX#-#1s&dcSa-*^iv#Z)Df4LxCc?bE#h7pX*>_?7Qvp!8>3x$J&Kr7iJ3$CtwH*I zz@n7+_Og_3q%rDX=Ggr(O1KpzgxxJAlsX{Qm%K6LyG^T+bP;0_$=D4jH7L{e#`ewP z7$baO+tjq|2Jq7`?ZnGdQM*avq>GhZ&o!{Qv(;2MO#%C9Ww%0s2Pqscym!^NW*h1! zk<7R14Rr+pp(EdN8!LBozsSA(rFwAI1K$)l0vcz2GS)%na0Ql0AO#174&ga=Wh^;6s)I*SuIG@E)KaWPaw{T@T<8IZa-v}9?e zpTIC(#6EaRZ_Sy=BQR&pWn5E;)6216Wb}^{h#x9`UUov**r@uPdOu&tKHI{69aXB! zI~+Qg2ZuU@9bqDsg$I%|_D!|-sbE-ChHp}n*<5QGCrfxUa+hUX&`h}BKyE7^!{G-E zLvI1YkQ*D3fyZiOk@Oi_1rp|O3Ko6_TFy#7Qrk9iPkgSU*c`)yaL2%I)O6tUK#0>H{`Ll zP;Q5#2{pgYIU-Whj)?PGxue1|Ujye&3g^W{iOeGF*`W^SP5A@b1*;Y0i1|kILP=DU zwxqD#3n)ZC+r>m3JSkvclU@wNTU(Gkh=mivWWV`dEOA&Y!!?*>zwZXKNs`e4pK44~ zF#*;C7ECw5J`QXG*y~u2iPfEw@&IsP$?j+H%df8qdrE#N$%392jQ`jB^TQ8$sD%Z?kN|`0`+Ud(gB2^H2qnBe|Kn ziy`S01dP|l%GBL_X)i#;#MC`R?IXAs;8ht|Fn2x4#N=G+5JNPY)b9fL)?B&kq@p8^ zjGNx$Z-BgyFFnAQ-oVg{3`LAe-A~*>0&=Nvhed}OMMF_hkyb|lHfKC9a6~+eJgAG= zU_9G93`ZDH7DJTGCdFTv6=Xr(%m&(#%s>eFZ)7y2dfumSyWP$p+DZ#J@FrB%_u42mhQ5KOvdW)m)FT!*wxQRti{aj zQ^b@}wCIYII<`SL{p?`BD2|5cTcE;47o32Wy&(y-L0UEdoQXIJWm?2h_Tr(Jqi7~g zqX=UJ3IvaW&@CC)GW8|AB9!S5&o0&KPLTplAL}Fgw1RNxW)ls6obN0H=)-3m)oj7W z-<+mi37dj6wK964vv;sVAFKl})?)y-&<6z|mcT*r^tf^u%U0Y3YPcy55Q_da2Yy1br66gTQ@=b*6UB zlMHrK{O;iQL1a z3&;ZmvQ9fV-uIm7drtN}r({TAIeH5@hl`BGBxWHr+ZKA@=lvnLu!?$em_y}AKVmXK z%-7~T8BAe{2r-F~LX~Lq0rP3LCe1Sr7;aR=fes^~+pIHYkV-Dn3$=<5(n68F zg5hx-zrK0K&&a&3ED%zUu;Cshcq2f#o3V};)EWSGY_K)@$4(PbKOGX2VHWVWG8|Ps zB-q#Iv|?9&ig~laAh(iAC=3tL#TZ__w)Sn-4f=zTrdmW%dl@skQD~N0bwACD`xYAj zBIYLh*1^8wH{h-=CWI=%w!~MvgbcKYFyK;=1QZ`AhA;_s2HUoyz1b{|CSD<9@g|bD z`EjA!%{jU-w#^CiXF5GY<6~v4r{WtalNINn70Tq&3eC$%aiFotOB&HC6%g~-c zLH<&Q;Bs+AOc^zZuGdF!9aD}yj0r8*`+rPz-BLX&(`KZnq<4ZXJ)20>%?aoqV$*@)0WV8f z9Mem~9mctKOiiQb;D`ifAjAX$+s~?{Mg!}bigF;pK=_2!5I7-1B>2bhbxnys!B7_M z;HMUwJVtbBQJ*+K--AD-Dpl+_s;J`Z(%c+24o(sqr_U5;6ySUfHxyz~KI$wo+ni$m zE*qcIsM@KGnJ2C{vM>*GXE;;Vdx*kD`lb3v@gdyv3kbkMGH_5>T8+|HV#wX)S&p{J zao8AV8hPj#*DvD0ADLXop}9QJN)`eyB!Vym4v#}xfE$-~!6;^{_})5>f7P1{e%kto zfg{G)13WAa^Og(zI`jfS&QWI9Q(QQ_fc25wlDqM3(YIHQo`*)@Rtox5g@Qi&s8DJ+ zg;{v+3bPdudZoY>RqR^#KuL0l%G(Pq81d6R^eOEzjUV zeS+vu5>OWS89AO-PJNYrlAaG%GV9qhbJjP@l96OElUau9;?9>^Sc`ruL-HT9U#XZI z<}S=0Vik9cdqX&bxU%~eJsXVX9$hwu9He7uNwIm+xZEI1GZ>B@Y@3DXLnFqfeqiqZ zfjI%%<9O0)`p+{&Dg^COjqKB+cv2r4Miq-+XK8Vxw68*UIxW0(r`U|A0d8RrP&-p^ zWg>Et&R4@BAj<|g-oC=`2KGQUH6+}EuS8eKCwdjF+ql-m2@)}#=^keDDz-tuY-ks7 zeF>LZ8w)(_6=#BRf}Sj8EyLwnh{G-o!DjXHCSV3bb2+E^iFjhfJQ6-IiaH5tiA%>% zpAS!_)Qh;@ifH(%8#NHYV$qLRON)yo^)w08p2ZH~jXZ@Jh7Br~rL-zP=(AOG2z2ul z>|4)AUhIubgzXZkU|g8wm{q7=m@zeZY6*AcabG<)<56OxcfqVR8|r?iCl-qy)@gb$N1S5Aef=`45#MI)#A%@DnS}CTjF{N! z-6^HrDJ4=Q_4`;j3>K!tsDbuDLq#NQX+;^W92~Kd))SM$IfO-gno*x2pg>U4i_bCi zJV2-z(^{dS0*;_MFZ7xF92DPT&x|2pz+r|pb>^5wqdyOxKMp>zGScaxP_$d>0=_i; zI$ca8!r$RXSy>p}89g>ezH|TkKKtoQk463jBejrnvX*|;W zvTlvu8u|nE$#9yd@(27Vb2YH95rx=%RG&nATsCd|@{wZdC)@_X}Q>AnKa zWiO%fz%;Lx7J+#CQms5+E4-Zvostxb@{A=qe^rilE<{Ii>jTi|J+I{+KZMiYz4@it zgV+*%s64NrYSeHVchjw^=H<$vmVWnsH<(|#KWuTy<9?iiDxcD#kj8iq_lp6++dT$w zJ5>t=kl;DY_=OS>oU{waYD)7raZd}o;m$go6p16Fan`Os+-hEda>RoSrA#i)k6bLt z(Fr(p9&JrXTm-{99ytzFHge~2mg_k8)tA@t4Ie( z2yDKA;tAqi$Kqb9SUl?p(J=qf%{?NBzzyz2K*1|Si-}yR$X2= zW)oO(jhu1jT`cI2#FTFpmvCS9?cyxf_Ojx_+Hg$8FEHF`TDF6FfsMSt5L;Ttrq*Ds z#mm0wiDHChR83F)91{6ytTP7P7E5k0Ti}~Z>g{;bH?R1KdHoL%Ob9HVE8f4x$R8*! zweZgoD#fMZ0Ed3@Eg>8FBhL4)3VVE_oF>eLW=M zD&JwtsCj~3-l%x`(kNR%%Vq8%<{ve}l^wmc?;0#o^-0E&c=qwosXWM2NR8V}L7o({ z(BvULL14gP9T`@eW7{dok^7V&tkZCSg7~1t$MFvxuu3tC1#$Jo#xeJ2Q`N+_%oxIP z3C9tJ0wg3ZhkM~5I_mq@V=}xz${b(`cxB|@c94VvToO185ZY=HE(IJm^amuJ4!k#! ze8xG&vca;iZ=mq^_KfNfxX5nt#6F9L_i>W<6}>Q?vIt}uta1mh_w$iG7a;>u*Bx2o z8}TbO$D3DCAJ4?E$Y|^9d<*MVGdXQ7Y2jWPnkolrXR81y6#H}o@K6R2faX?UE(@a_9YlprhM9s`kJ*wsH zfr-|5DwRp&G=kyL3=UMzq_X+@@+0|&^M(9@{83QjUHN5%$MTQm59f#T QgZY7cHh*XSj!f+T0M)P5@Bjb+ literal 0 HcmV?d00001 diff --git a/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/models/__pycache__/networks.cpython-38.pyc b/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/models/__pycache__/networks.cpython-38.pyc new file mode 100755 index 0000000000000000000000000000000000000000..b8d570f8607278f9faea4ad6ab76ed76e59cf23a GIT binary patch literal 26577 zcmeHwdvqMvdEdh73NP-l#q$rA@B|aePX_}NMii&Isp+riyH?~)codvMq zVzIn4AVqlL*rsgJj;T718{0|a0#4d<*rbkYCr#6wx=GJT+`2E<=QJmov}sbiRq4Ls zK2k?Xtp0xAotfPQ7NVS!{@3N14|l$M=gysbzt{clcQ2nC9LyT{{pMTW|GxKp&@jHm zo9}-#w^2d4cBZKCH>c2uu7Jx(}_|7c~-+-NS2ZzE>%iPd1t}04AWT1l(NW4 zxb{m%X~2EdO}Xip%u>#E+>DzAOwJlJDR;ol0Xyg>-9canflIo1cL=yV(xyA?jvzgR z^7;weEx22N8FokA+feh!lZHFyZhgscx7Mur!qr4+3ozT-8F<^GNcK~xoh}jBE(cKNq?hv!hHQYUo?F-|jannHVo$g-b?yVW~`pr9l zyUX1N+`jOMoxt4f?gwUnh`Ak@3HJan2SUs)VBX^11I#@k<_=))bq@k_FvJvrIpiJ& z=5UDF4a^aD5}3&lvj>>_+@ru84Ka5DbHDolFb{;7y}&%^J_O7|A?7Y%9(ErA=8+Jy z&wb22=DrPmd^hrryC;x$BFx+Gn&*s(lQ&5*Qxj(3JYP}u%5x!k-k3L6%(l5|x#pVbS{D*(Kd^~;do@WKSp#J??~(jun14RbPw9Nzr%*oarq@i; z99rT`1*yjuo0lfto3G>dD&8f(+4qlr@yS12{`84p;AF+G&P^?@6c?0p>=Pou{2w6 zdWRmbHEPq3JzeoG-1|e%9OB<6ADb$>wS~n)3yW^8;T>w${L72#f_JcWCCKSEdF6Vu z?hoQsUXEXDI%eK1;2Jj77TgC8s`e|*>Kly8DU@r2yDwO3+{f7X)_lUX+{ATj#cb|F zj*Xn7VU7*%)Hyjn>89Gom~qwoFh&z2ke&*X)9m8&Acs_C2Ax+^qp1D#@?E;q>dO_s zzSt}_DpzW%_#=0D?=9B7Vsp_i&MYo9-HB9?nOSO9{l&$G7o=Mi<<~2Xz-l%XWl@lS zD*PBdhQXGpMgOUVR-?91Yx*_!38fYlMk2$PmHjKNT9B2byiNv0gMw5xTl34E?Aw5g z`>=q1ELb^H?E>~@f*?acTB(ZAM1sEoT+9*jegO&JXgjWX-CW7G(TV7|eZJW?+Gg8o z+wEjKdo{I^XzoQ`0(qx>t1B-4iMKkYMOZdE&{aU}^9SAAO=ZM+#9v zvZd-xKS)+ut;Q9hpNRodP-_W2RShzWOMaM4G-qc58wB}eu%%gcLD61?FD!>`8fuoO z(X0zzgcxa-rJ3bg_%biZ$7Li3)nV9z^lWRXTz5TnJHB!Km^p5ZL{)y0Q_PKG0Z zkw{;qpt9AJZ_g)JQ)`CzsGkPeXI7K#v}8=ZUH7af4kJ?y8@xz?)PMo!qdmJt6ufhx0AUXptJa!v%6RYa3Mc|&pI9>-?f$X%s=&)6go5Gesy4=wJ}H_ek+BcOn^N>t3V>1G`c8 z{2<#YBP>yCNFPqXngwl?vg8se0u3*J*LTIB$jWl4liz`MczFQwg=5;%AC9S>2Bu1M z_m927pGV@X870#-O0b2<88vXgjQujLU1-{uG9~C(Hx1i11)Dd6zv-9JnI(|JL^epn zKsdALdFK^LP?6cH1ksOa_C7D~?EpUd9eruBr=d=ue9BNVfVPV0^8cj4sU^Hb_4dxLfR;>|aXDSzJVLIQz zmoHURkniNled>Iz=|L23XLaeP&pp9!!*CI%Pd+ZU7)#r$HD-bgWm?0Vtu#v`ennB2 zcdCz3oi_w2UuuW)bWneX>D4p1rD5vPDln|PnTr2m7_H^uu$K=;A6R97MnCp^Ox<6= zjccqV+Lofliu47iwTG!y6MbhAUP!FiZCjloy#oU(Uo#=+jCrW|m874d_;9W392@lf zJZfvc9e**OLS0R!Hq`am4RtxFD}}mJab0OXiI&lXNKBo+VFrV$c5$h$YEaRt;!b4L zd9+@Ukk#`5=;!)!O%3t>Spv#-^&G)DfQhuCxC@eE9R&Gs44|$!48dS{uRFbzs{-qX zgisogkqWa0WQ_F9dLA3$u=dnc!#lk_00wx>2CogjZdtZD2HE4l66oBe%;oLTz-_2y z!wAy4e4JI!0YE;PluII`PC+&$+9u@EevTVO^GZ_sJK46@td$h`XEg!SF12PsUL`J$ zv5)nLuyk08WiYO|N4ayL3>gQJUyAfIVJpf;^%Q|iK$;ADSyhObjZzK^#^7cwYKXKJq zwMD{#3s=*Uf+m?+)`7sx{0eN!o-)N#IHw*Z=4XJ41IkTWz_`&_EeC3 zes=cA!(yBSiA%GyTHK}191&@kfk_Cx1q&}ohcmnhOE^udX6Zy3YUxSV_!2IUGR?pM z=fD!n!{O&|F0oi#=$)JnS_Q?gQ3I1&VMeb&MuF~=KIVd)gj^;49{{bR59Sl?6sQ<| z11L3(CUU9Kqidm0r2Gw1t=;osU>JEY;K#*3o8F*Iwp6Lu{{A3}ri1%@rLew1> zl3va?Fn#3MQP)Gm&~}1v1!D^%)Lu=wi8T|3Cv-ffKNx8@c^#w3k#0M)HYPrp1j(46 zT*+dNpWR}t474-yM4~+~ib)fpo5Gc#ed0RTA^$p9sV4zi$Xq;&=WRZZI+If8PE3t6 zekLrP!SpG6Juz!a>7R?=d{*9Ux3k-f`0bcd^S%B+^rjuK1;ZAgBd_2zZ8_abLQOSm4a~)sYjkh8ZoN5slY&X@W{HGq<4=(Cd{uXA zYK1q+id(5SZ&-THp9>!n@5T*V%Bv5eoGzRXXXsJYB+i?|tOk?oW-YK)t+Ax80DM>x|i!3oJ^Cm67NRygr#5%K|t(|!;73ut zc_FmJfgB%XrBlRJrGA2sF9DS71ta&+%f&-HEomFKKVRuFgG$}ru0eLbMxlBCB8cc_66*Uk65tc z6Z46WSTnR$ID-}}ahN{=j-rg@CEGSN9=)!R{)sZ=d(S=O!>R*+D|DcK4s-O*TS6S) zD7Bdv9ijZ>R513KTWNu;YQ6qC^#QbAtQ{fuiA<2zR)(i-axpz}9P1fai%T#~gKTvX zv@yH1xP+O&rF!kM;v`EA5WD~Y?ILDquA4gam}#oO887-{3w*ux*=7$} zeFZh@S*;A$n7I@BWEg&q|e?Ulsi**C=D;D<422vLW7Mt=M8GgRbaMn*oKve&C!3a6UmFKki@? zufcPyei)D9Lq3Z?>L&?)mVm6+Z2^}u+U|ue3^MhF%4`jjjhT`SYB?fsQmyj2jy4w~ ziE940aK%Q_Fqm|C>-vukZ3S3tq{A*-ff7&r&sqRV)7XW#upcNgKaESgNDk~W>-GFD zXTWhp!B>&f^J6#i=S>3i*bx?;0&XI-Bi6YE#BNA~TpaZT9u&!~?FLHu&CP{p|G&BL zmuxPG@XK|peYMOHc|UkUT?j?YUp)N%$gZK=ygzP@S)OCUc0d=c- zh2XCcd=el?pVOXQ^(mr6U44$JK8os}1MeLGcmqY1=v7q9TYEi?{dE*&Lad{x2`LAB ztnks3Hw*o=M%xqI-iC~Ww8P(R#;CCsnreo!4s!)|4vZ3MKP5?!s8;+?xCJR;{3GGt z$-YvJZ2l@PvDY~nSRPx~^k_EV+=9bd)8_L6j?h2jdnkuPz8)9m+rZQ868^kn0Np&T zIhs?;nbeze_Nk+&qQl+2>l##t@kHS7;ih+LOOYEn`mn|XM;~27VvH+H2sjOZGN+gp zKQ&Bp^%6>8Wn?Q1Y)t(oSK$7Gv(|#y4iENLnEhJ~q_a%>Fy~!6y1Qy!O_;{Tsf+vN ziDdLdJ2O8ZIs5(G{2(R`CZ-K(kOthEn0&z87XHaj39G=f2!}*1Na>Xfp$e>Xan(=@ zH&V-Kkjn@0SDU^gRjeTT@Ih7^OKuHzq6@w{%WUa6VX3#_Hh{sWmq*lb=8~nbka&i5 z_bkZ-X%BN#-&Zsr6xOBmou*JSXOMa9((G9*Rl=gp#x`YbVxY_Je1xy~MS|N2NC%3o z(C?;MsZIa{=6pYX{jbRK#sFfsBtFag)`g{!E%gJu#C`;RcG>P>+H^ z!J0B%#6(}ldmLtCg37s##TU}ScF+OWwJvVM9SqEmz_f&u3v_eUoP`3Hv4cCFazJx3 zR&$sZfptGpSPyQsuw*fj?3gR!)(TRv^JrL0XN>S60b-;L!EHZ_rLuC!y5C23xGF<0 z?XbCx9@@J%>xOkt_ZVpG4Vo{MtKP=}u37WO$IMw6U~TD-Yq~F*A7hTCPQGlsY`$#G zpf~7u@=ajjd(s?o)m*Wg5T&4rvrRc7aft_7yVioAaL zk6RaQRGCmej|XLJ)Gr_fuhLhD__Cw}>)>Jacagl2(BmL9Dt2XOFtW$Ea#CGfXdNGf z^&oOKg(cVJ`!?(UaMTVSlLcPyI1*6}S|k2#HbEqRMze*`i%^a*P25EFD=hh|1iuUr zQBj2m3cS*YHVr#$=xRol=KmUtk)Xt6OSAF9RdhY#Uk-owSmWR<>b~VWZt*#xlXP23u= z;*WluRMF}cX80)jXLALDc}&_D4=ESEw?P%`zzPN!!10~=jHr?{W;2*KUQc4pD2sRz zc)VvRWHN0Fie$h|!_#Czt(=5cDZ7&MbM34PyTEn8D)bGp9^g=aSP$3+s-~;9$Y12% z;`y|0$7aum@5-V@P)54!ECd>TQSIa>O|^|BRtMo7im)kQpY;d)JVOISSnh>yYY56Q z;}5q7L?Jr!BkfFkV7u{(L6!GO^WuT#VF;GNp63eqdia`H4(}U8scC%qNPAGcPlZq& z-V~9_vCCqABPtz3%JnJrO_YM$y{pt;gDF5sDTLh4zDd4XGfB6`CUt z2TtU$(~&o{jzwI+mEEXUYnZsn-^uXrec|7u;otkUXD>>JC1N=FuC;h)m~ z;2Im$A0Bf5gn3$V5xJ{|jsPGzyuBj;@?*lQ8`z#~%Aa&q#P zAMRNf%bS!bVVRsRL#&h=)a7{3x_Zjy!?M6i*`cs3?orn6DLWjN1y;(Agk^D$vdNyZ zg|IBJQg%yN7WXL2<+)CKN5itfO4-}OvbaatbWho_uq?1rb}Q~V^OI66iDwX|jo#V5 zY0kLhPz829CC}{e)AKvgkIr@W@9ikJOBNuE@*PM!`Uya`KZ6{OQV}`X5SNu&cK6h> zr>mAb!&>-dmh@Ou%igdS_M)!kuAW-6KNuse`5`)4=4`^~4P&z?|U#T_QCqM!&SPH0<~ zVn7rU!o;gOrjbsUV6=gNBRGT*X(qc8WYM+D?6qn~bWnL>x_Kn9YLfvHlStgh#C=E{ zW#T9jkf77e`;mBni3gB)kckI_)EOvEM6xAjsz(U$xO*hn@?1-Tq2aoiLT=6Tx#YRj zIuZ;&TWdid%EF?omBPdF3XuiXE;X4mhlDo zHukYdS*Pu+4#7We%nu8rb6$)Kk!Enfu8y=3*@wRc{-x*q2384WzCaEVCWE1tLC>q- zBO)O9w*>!&;NKDaKEWRn`~ktY2>w06e<1ia!K(!Sk>IZr$S~hz>N@~%f0US}wG~)% zAm2IA>l_Gl4g@&|0-OWE%{|yS8@1dP$V# zSIY3QHCzJc=Al&tRATxCPaMOMRvCn3GCF_7hF2RQEU>(6bTS5wI-X07H>@al>km#~ z0Xng;)Cla_i>19${A@}+a>HoCF_%a9u{~vaW;i5S`t{|T9XyR;=LN@}5Q>F9u!K;wWddL1 z8p7&Pf~2X5H6qWDl;t^1+!2XJL>ZE`l;HEcG+`R6aBd&~CM0nw!P;431Q&w1SGV9A zjmTX}kmOn7He6%4w&L0rk-n55`Lo1!T;sTQ;My6nfs`P@-;Qe+t~*HZ{0A#Jht8NN zikMRW89k(lR0NVx{&$J{BZB`*C3#EcBrzP$joR5haxVv>gL89v`cA*Jju!x=~uKR%B z!dupTg*c0Uk4+Ex7EOni8DQh1xzUDTfoE7xr}Ow)bqB#4`-3@J7x=0i5(pT22Lc5$ zxRQ!FRa`y4M0ocD1jVXP;!>OI&0qCj{C8!SyymH?T4Ys?jqG zhCRb|YF~HgG__FPPs7JSf5}lQtgt-hEnN78&jLY+8=paK$%h#_eTHk1Tsx5(_J=jt zT#H1R{!iN%Uy=9F*_>gBW7rbiT5&;hZ4>hi%tY3bQ7&AqP{;ELakeJHJ6rDT@GdEL zXgT^J=MP?Gf8@izLt@uDQ+kPiH+oRaRV<&7T-8qz{1}1Qt7813gN=z{ZM1SMv=`+k zkP33*Eh{h7T^Ea7_J!R)Ueo!tkni{ZU5;e2SUbFa*%8F9goH z)8}Pj6T$p5{tPI0#e2djk?1BHcYIem<&+jk7_n$!xoAdo%ytE(R z`v@53vIl4$x?K1;{YHErmwA)CzFB!$is_Ok?NBx#ngFA@PBOrE^9GV3b@5B`mUX4p zy&;8=UzqE$B+z#$;eE;-Y<}v`>#B&Z>*DWg=x40`pT!@=KJV~%S11o@O5I6tkwwU{ z>Mq_+5cJ_H8`h_{6U8)F<;=a{C`T@G)n0NGxT!k^c5`lub?P?M7uf~i8*+~LEIFmn zy3^7uA?>|~ZR^$WqRvLyHrB~aZ_A@SZMY@Hvx^c@1)%t0>9i_q>;6ggK=dvcWV%A? z;g%9#VQ0Z0t9ux&HWWui&uDGzW?HOL=|#UR5>)M?0Kb9ay^KKP8V*B8ws9XIL(s$} zLo|;A%MIL*0-KJp`?-P$?DN1P+C3~=aFgDgpK()|5yFU--SQUJ7vaE1*$<;EV(X=B zt~m#+1MFvk#oAzqeHPdZuvi@6sv}yELkm)F7Pw!>+GB(}0NlU@%liiY=8*S0vY#*4 zWDCB777U^Vn0-nM(r*5Og|$o@)k1^I^#;BB$VvNFE5JJko3+uON93 zpnY4!-80gB9@s5{J&PwsyXrcIoKeY{LHXOl^80{AEAj3ZrF|pKQ8$T&icg@0x1oiY z2}=v*8&K+({4vzKRU3D7*&^&>>^h7Lv}s#&TfaQ!2-9wzIxo@gK?V~o@x0X3y`*N+ z#`4bRn%KbDT2weYpoo=9M&lQGY``PM_pb!jbW{Be6br0IQ+)^tp{2m`ny=m7{UCR( zR~q5T27FS;4IAvZORG_-))2TP&XupSGHkJj-yWJ+AWmkOdu_bCwoc?ZtHN$YuGLL! z*J4Ek2QAFN;r7RAoIjyu%XQ)s_yqOen97tu>f&IM$ig56Zd|}FZ`##bosUZ~u??n> zr-O{e`y-~yZ!kM~R-*iEY{Yy{{WvkXb1m#aQI&=~L$<+cG7J>N%!-ygz^)Z>)<&NC zAi)YjuTg{$EWA@}i#`%>oI>I4cuInvU@LI{wOm#q!3}KO>c@M=*fVa~0iHGn^=_6n zx}^En!Av^~yJNW!n^FCRBVO4tHICv&gkxv24O&A8P*DsFOhUO^c^;-7?j?emr?J2aJ*YZfG)zN&z);k7csivuUQ?r5uvpuTY z;aBqadj6-sk+Du8tFwB6rXy^fU_g68wUrvTtY>M4zBdw|NLq#-ua~q~ z7=s%FGHcRLfTQ(Pjwg+9sg)A9(2FGw(IgB~q{Mw6=3+pnV?gOG6pA}{mJ zmoT-Uto=6dL)Z(%97@Z4a~?U6vFdwh#jwb}JY=Qq4vFmJ_fWP*^&CTvmb?2=z7Uq* z2Q1nLDfo!AEDu@fKvqtnZ6jzKCcbQ&$hrb#-4RmVBLs&CD83YDc3KT0VW>&oevGO6 zn7W^Ul1V+l)Pn>M5j+eKE&4t|1m%u8$&@Vb{wLf9)=ahQRGlNPP<-z(EkH8nK!-RV zXR24ss7Hx=jDRvGiudn`l9-9qTUqE7K_9X4PLlaUsDXx(4Y9GUBQ8XIM54l>$bhJz z@aPs5Fkr-+FLc~rOptteJ&sE@{D_#KK){gCn~Hucz4ps{OC1n_p=TtqWtC8;`3do7 zbx$$WzedgdOs$oOiD_!Z0%A*diz5*M3TGuU#G48Rj_C8OQ_nZ@`xG%%JlOM#tuH!~ zqIFvrn-w7t)`e>@Ab~5lvLPXbX;wx8)lc`x0Z!*)Ij|Quy>dXyQ%*BL+0gVnmmrd> zNv#53!9#kvXLx$4-f+v*Kl-d0*(?;MZ-il{Z(&!{d{$Qh^hqeLT5NGeyblq*9CZna zXnFM6&L*}FLGT-*AHOAb(o$z3$_rMa&(K)JWYXz?r8YV*!YNIkTuQz;*T-(IHXIKU z1lkF8)}tvC4i4@_LR+}Ph@3-2o~_=`RZsKl-$AfW=WxmX$M~%d9_Cj1UDT30yT5mR z@Af$8$ONPfVFY?C@x8{XxEZ%HfHo#}Q@{SHK5d{w>{C7cy`}s85jmTuArfe{cg0(Z zDB?HV!mCDN`6yq(af0DsJ82=x4Rp@$;qM3Wr|rZ0tGMDH(+aL*?)Bz6#8}Hp&u&x?4m*oKiVe}yS?GbU4~F3RDeKi#GRTqjh{+H! zSD*2Pg`yHMViLBe7Tn85#XnL@yaLB&lU{_{#fH*O z9m*8&0yM5TbL=o5*1(2**j;WC!RZoiu{Gou_DUEW`Kt(y6a~jo0ek!~{s!jhAS;WV z5#76shJcUgN?2YxjL%RKZL{x@Z#J}a?_~S#e-8^E3qxF+~oIL^^ zl$P<^K#|PluXo;ii$J+xFt!-WXwgR5%P|x3q>a0tJjKC@@by@B$sTD)r{s0Ga?2^o z;v1>;OZi&cP&9#0Ij|*c3y$~xvKg8-g-XT4Mx!)H3R$W%NcR)OKiBDCn)=IQJs;Me z#Q0(4zsGTT#{slRBI$rwVlQD#bkNH}Zde!51lC62d{h#jil9uiPRFNfV!B}~`GWIo zwJ6wkGqst2}w%(8ENVv_Z<(;ViXFJrAPby;mlBlIKE8;L=y~gwZACQcC z7g`+AMz>0hU)qmAl=SRoNHUX zf4ol<-wXx32QT?z4+RjjIR*4sYT8c*nc6*d3;kssOD!wY|5v{|_pNKc{Jn1@O8s+SPDQ~B5xq7;ZqoKH(#Ur8^9fw0%Wc7BQZb;53=m<-N zX^3ndSThf&B#q}$tcUQqH5}ck_iBCKdd0wjP#gmu?}fFodA=Pg6p-s+xiyC|sPl-v zC-p7%|Ua%IABP??yK2Nj+pitfhSvgu%PVCep#jy`VVs$WzIUSf) zsYog)WnYAz_vR`s1U7FrvHBXvwumfpg9v52w2DGTl!Uy$U))`@eRxGYRDvzlda5b_Y} zFW8(8qyc8IGt10tDI*>WnPtcrjPc7!ZsPg~GH0Xn1$EHS^T>urwpj-$ER=)fY-M4g zqNY(EL3h|Sx>3?dh0>|g$I`QWUBI5D{OXo8*vVXoJ+=Ks3;QNEeSRM|t~m>n2g`RR zPn_UBE$+6ap|c93hbMZ#Gll&uo!}45J1Hb!SBE$o0!8EO<}qU9XCcJ3aO!@hCl|}@ zYn;ilMw~#x-QME4Pi{4K;7mu1PD%x^_DyU6?zGbGv=SxK`gsJaLa^yHzCb*!I0RQm zz3s-QYWea3q%a|p3GfTVir`pd>I(!ideZF+9LFX=cRUMieaH#bd7wl5GCG$~$+tN= ztiynF2~k|;n8kT>0oq_3(qnm~GfI(W*F1NAa|4f*_PeZCPY3c@VS!gizH{$~KKGf6 zZ|j+LqgcoNM16$AgAI;v8`5M4WQkt7GsU;W6R9uh&+h${V4yJ>ty$Fmn6GBNhN;+* zo3rsrl!@8c#5SG0c!BhB6n5nO2~VlS+LSv0Z%`V6z=QbPIo@hWT!B1gRSZu&#nCd*E|+k?C!>2C#i{UQ>^;TlE5-9T za#Af7pRQlQ$-4Ix&o5#cccET<2M+IEyimEPh@-VD=m20AuT&O*c=uAhI#(~gn+4s9 zG>a;YEjoKePU9`crzz|6tLJ>bG%TC)u5a!~Jk56pl#^hFgo5VJGq2Icb@PBmWjEvWFhWsOmc$;;BzVjbxd}%6x() zfiK)p^o@RH$iMCeH}M&r$Kq*g20gd)Mgh3N)+2htu74r zWYbo{fjZa)h8^sP2BkvKCmx)}v38;qxSbTyZv$|#C)W@`N?VHu(_m@9VjPnwj;{PR z%P`k0BT@~er9=X%32GT0@D{Uhlrg;Up8t68HBQecFPqHDxU6} zz+6%n@lar14pMXa9D5TwipN2(4`vAl%1bSrabGJhl`o}0cwy`zHwiMtI*)LJVq%-Q zye;BTGU&mshkA@4k!WbbeEYpO%BhF2>D0(40}j@kf2vqs!PC)vM01s z=4Mm=OVK^Ny|)*-Sf-~NNY1ft4sg@y1dyo^9XBh2OsQdE+(UPQ)PO_q5`vqtd69(4 zjWMudGH@V*47rsM=kp^pF3ts^t;LhTahw?$1LF(SK1$`z2?4e&&~L$2+-sFinNhTN^~ZYriSPN7Fb{a z_RR8v3}VO2SaxFBj4Sa|;sSDc$UjI`enC>HJf^qJp(Mb+(=ak zmVKr#r*EfEpIh@rcXfQV zw$U+NQzaa|ZFO>P4tax_ZM&0q^D=L>ole0mAfIFQBgHLlDlE^OM+$R-{GRTXP%f|{ z%Eh3Ba+#?&m1^k;W>&VUs<1!ydDFks4v^2k)A1X@2ipqnVh+tdWkAh2A;YJ3((}SWGNtjI@D;7P&^O#lVW5T3Ou~*s;?p z4a+0tvC0jZpF!{0#2#Ut!#V6OpX7)0*pr3TFJR3jtT~4{^Wj-weg$@lndEU0_7*2@ zctR9?q66@Z@`nGQ88sEuH&Bb;uimUK3L|Rv17X~22KR*)_CzkayBh>l#r)V8ddGht z^jdFMnBi`x_fS}VuNQ<&m_FzJLt(~UUfUKb^;&uF{EfoTpzp6gJO5D>@aX&w zZ^q4?sN3>hWHXdwUU&X1MPl{zTG^hfqUPjsx^*t%8AqXYfu zOGmMQ6w%x%^{auw+3*^E=sNP_)dN4Ks(X!wYc=l3 zrb7Bn+o)1DGLHv-8*Jp&{ha_zw%>@RjhLgmG)5i1|Q3;>ySZQ0T5{{gYp@vWbS8L6;(u zHdUb5{tD@?b^ zI>@Q%I?Sr`k5TgiXlP)@U&LoH>f)ZtN?G|uR^C&8pvGqjYY!*mRgrrM-H0n1o+B(Th zl9wbaNlvomQ66otp~vfTg&O|{$YZ7bt%04`tvA@pj`>*Qm*U^pgT@c%(YD&Y)cN+n z0W}<|i!T$EhHs7_Zw3X}BE<*qCWYaZ5m`Q$AzVs>a#9*zWyj)cm}PEHJ*tdUjM++x zNx7lEh90ma-U7}BTZ0WmmJv?uv39owi>1Pj)7q9Wx=N)J@!Ls$Pu&CWhUNIWoO@Hw zT?7SgwBEs}L_M|jE<0J*Smg`i;@^~*$A({x`QG%HGJibn1y2348Qy1*tJPCmP!7=K zx5hS1uF>1|nk?cu+`uOl08Z9o|J?qsvX$AC6}s|GzYQycRrcGF6N*qbg?de>8`4(l zpScc!Rv|OBvek_%?{s?Ypc8~KI@Y_q^8Ri+Zf<_?QB`yEBx~9c1dy;|X~($crU!e# zb>zppF2(7l_wnYYYi(YWO@;KEwo#>Q+{nb~MkY=-GI6TOEsb}q-wa}5O0z;0Qg zZKU#BVf$ev=03I>HY)LUP?6f85`^1+SPOV1Gq5XXX&*1JO3>@pwxjA(^yr_yMIHTz zu!k^)A6I-HRO(Ii-3>tkDm7mIeCc2Q>A(Kt&8jA}HW%idAoio`7B@kD;p~%IlzBi# zm)GT9jdx*CHltg#K)y(r_8r)UP#TdiVIPK}$lnRV+IGk1JN%?XNNu&S#z{ECJ_%?c z-loWRcVifYFa(7jHtJxEAl?)Up~qmgMLr#w*JyX|_-!#0f|T9bPBgBV3!xjJCA~3S zB2k{;$QI%=-r?VnbB5jYh$pjexO&Ly!oe_-O5TPvhw+&KE|`H%U8*0stc^*>YqESz z=-pl{a`ys$ryIfM?eS(9i*l_S#?5dy@Z<@)C9*v|7_(`yD2v$vio*TvW^LP(=O^+# zuxe`C>QKRA0bD!9v~P3X<(%vjktbE{5jua9%2v0{yf)`wr$PaXe@bt>>M`^)&-_iIdO?!IuWcK(-DFkJa#R;q@a90L@sWm_Rxc@)SELl7@tb zUK-B{tKz!M=MWvRlhTMh^t@bwJ#-i>EzDVfhmKV-6CS#KC^q@?giWiS#?Bv#?Ie!G zrkzb=JBMNyU=h&RwEJo7LO8^lzhd3vH9v!QV8s#-t9=IVz*;39*7*$HfmKSpf}}mH z%pDCj%VJ_voCBwpm)|_7KxGH}qi-QDCy2MO93nS1^7$;EN8ZWu4)TR8Uy#%S-(c(z z^{~C^lwe62C|Q$|6)vW&`z^>>7~jUY2FA1M7VHniQply1)<&w8$>F0*ir=r^d@_p< zg|Y5++kV_c#A_xM3#7@UzT-zJN6sN9ewSejzCyWG0>=of5jYMY@)UCM;)gv%U$%WX z%H#u9%bDW?l>yN-iE?-s_-Qn|a|i1$3WxkOuSuadQQGgogF0#FaTCmsI)zZ`0p~B! z^rh*ME<9blC}yUDDAYCG%k$`XK=z0tG&U%thhLqh2UR3bSJm)f1OF*Ld=rg)_uGKb4a{wd>UKQt0eEO%D1q3#4@o9 zOMVH@ObmHXP2{aCZ^;;nUZ?2GSKK8&!0A*~{(JQKe_bPaYxwst4*xz4mQVFHT^9ZW z0>qO15dmU*{y71Mz+uchH_c9797OTo0X3p1!sP*n zSLNU}5-{{5)kt~Cu-A}@p&ThRHPl8595IBpAQX*6=NEA_P?m{-Ft&-jIRx292v%iz z)ZawBUk0zFyF-y6*6dVssY$LQ@+vF_dmeGsPcf4)5d!89IR&;v4vxcppR=l!l95Q} zIz)#|z}RH&kb&tIQwag979+Zjv^>+1q+K#GPDD|LL}6=%h>!j&28@XO3W#UW^?vqB zPvcJvi09CSLU@uw*EPD*Lpb*P!Ml<5iG^4k!heJOz!{M0QHKbVK*;Bzwu@OAj=3zC zva+6)Wq1YnaH~fe9PgRIEHDhTmL&D{!5n5ZlR2j20bo9Bo5|V`K0(`pY+FR0`sIOl zMAjd{^Mp27LLY0W#km%(p@dUK(Dn;@jyNi@j!LZMr?6(GuvR42io{xF*5DY@8nE*% z2jPl;L>Pzi1!f#ojwyrV451#Bi^!jN@XMI?i_)Hoz`{=lCm;MUE-p(Ey$stfwQh>*MHKcbv;8@@xi4+wk+AWZIuje!3SRWJfS0dNa#zjKHA zGSJCAstyQ{B5s`RtB5K{1YStAwpBkE4~m;XKiDfdVgxL%5G1!TyzUE-kaTf({J~-eYY$hWhP!GR!y<#ET%#JO%^52L`2pq2XgbW zSq7EI-DDxHr!h1CF^jP5kCsi7#aCwJs8jKm*|b7NU(%>ck;2^G{h-;{jw3g}G0qCL zDb&_v8gg0FrKC}s1E!5U;3ptibOk`sE7~j^t66PcpM~H;f`e4lSCL234Tv*`YT+iW zs+xKc>2g-PtS;;Qr32a^6SWf^K&}!QE>gmU$l!TG&y=x>yt+qFLy%LKQpfm29~c7| zDXBpN*`snY3Cxl5vn3gBwT8L$VS^qsam$nsmG3CuRYH<%PqigE+yp$B;3g2;^z~QP zU#;4yYW*=PQq@uLys`7{@eZm*j^ZKQXZ!a7y+tA>M$#B70$v!oq5dhlmzJ z`>3^B2vY+(OocwtWKfs%quna52b>y?QBqPx$hk$}Nl~|p_@PU|y|+`T857X43;Qy}Y% zk~{_44d_QU8bJ*@R-QwmF5^EiJRltoXB7Ih2vZu*i%h+w+eAWh-o}3#Xd;x#f)m>#dx%sZxX%dF_9ue zQS{24zI4y4{pSv+`cwV!KTEFf|0@gI^C%|hdBXAN)qNM?Ur!W0@9wVOPFpy=JjzI? z42Pw9S_&_zou#t=GeY@u0>30MBCyXrGWR0YRRKt3MN0rM=XKk%Z#l0!XB^wPXqRl$ z&P$vcnkPTHr&o}oMSy3Y>FIWf@;l^!laNcF)2R>r^ejp4BGH%7t1<~M*oWw;f*_n$ zU)7~(5jw2zRLJhHUoM^Yd5x$~%u4_3puhC=oa>q1*{T}+FMz`TlE7aPkTlvKZ=W82 zjn=C)D6Q94IhEnI%~2=1RjGc^x{+I!|3hTm-;KLcF;EA0W*6a-_0%+x!D-sAaPHv$ y2BrUnLcS~T*Jvb*z*z!x2y&*wj!4*-QZ~3uV~x)VmpTg=X!h#t_muA`C;kTrx{sy+ literal 0 HcmV?d00001 diff --git a/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/models/__pycache__/pix2pixHD_model.cpython-38.pyc b/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/models/__pycache__/pix2pixHD_model.cpython-38.pyc new file mode 100755 index 0000000000000000000000000000000000000000..af3bee1f3826458a6ee7793c10bc0f40cbd7549f GIT binary patch literal 9557 zcmbta-IE;GRqyZV?)lhHt#+lAJd&}Zv8|OQ?AUP>S(4XUnb?aH$xdkOG`(|sW@mP$ zXLP$)vNk;=um&d~B1#HU6!0(uRDt0ipo&+DDqeV`iu*=ERZ36rKowLTFu!xVXLokB z*j3QG_x8E>p8Iv~x#z3r-E!Gg@Mu5$_P752nxgy*6(%18g=FV`gQ&rV9JLfVYB;P6*g}vl}$ScY3|fV-?L*b0dhuw|gw? z5NZ@A_j`ORu4~jge`u>V((6d^IrM8P11(jC%19aLsj{kkaBE#lq~ct$e#CrUrH@zj5J_d`Py`5U@ZsM)G!8i;l@BsPfqF#XR#LxtDeJJ%UEjx^EuIZV15pEhM77z0(+ekA3P>nKGu=( zoaRRGVJmJasQ(#i@%;O1wRvI0t({O9_gdk7p+$XBi0^KPAyu&;354DaJ`{Srzb(vY zyW4*ttf1cyBPPs%^WcFnlOC^c3b(;~T|dJx<0PnW38TKv0#RzkesC{nb%+&2@n)wN zBzM9n?r~wu(3~)`eSNyGZY~>+3!XyS7jn{x(r!JEZ9ql!wxPvT|7af;eWg0Fk?g2CK#+7Ck-X>dOHgi5wJ&T=S zE-L~mu#>FB%7ARP%qnaOki(|g4Dekx%jWPdvUzq4?-E;J$MG(+MR3?DQF#X&M!K~s z?{J+sZ2S;R>2;)-I8h1)ZJpDmCGBzLjX3NyWFgDWqfHo5&<)Q~Hz^0yown&X*GzZpcHD_?#i3=*ok*KB%Lb3-;2vv<}; zm9mlfJPbNuEx!?Lg_Y#4C?{H(T* zBP}A$fxp%E64HqhSXPc?W2BS7Tn67$PYb31t#&+90qv)Nd%FD;|Bs}~>|JP6n)#8+ zX{Bc#sWH9JqTJ5v<3;Akot+&Nl4@GWdJLaNYlXSM{RL=NV{a7nl5MA0ak5R;L3&Nr zVOE`g9W~E@h6ZN*X>tyu&hM(MoR`1AD!b}$s>yl6+QrHEH9*he{T$|b9x&})ZC@xU zBUl>#3if^xB$yH4%%Kuk5}XhHU4*y(n{w`13R_bud<~r^O44{BwyZzEfCG3?L_BF_bUSz)NrXT zxk^+TzBYp785Cikl+xnx+K4Qns}L~dK_x8@ud|cM4a_mKs~%TIDn{H)OKGL4zD)Se z@O9vMr;;sw6k%ylZma5_W&9{U*jF z%Bk&NVoxJaOr2w@HT^Ft({qis(^mKGq!9B&HlFE zVlgk^_C2m5<2JIL3;R!XJ-0Edbmdz?2et>R?zaOHicr^tdPAtI(t_GKcN5~QN@iV-k^Y!2v8e3f$QI%= z-sLaLIip^7#Cx-^dV0hf!o@I>O8%-elJS}XE|`H%T`C{Btku1aH)Q#S(0lzv6z+w5 zqZh*x?(&#ST(bFbtqsl z2d39Hv(euwjys8Gb>4`@xV08V}jriHL_)4f!O)g1Cx zFG3642C)5Gmn24cuSK$a0X!}1_j?eRyV49Gb1oLsxR0>=8?6t==c30CVo?HFVocz- zqDD{XahP~88k3k`r8OF?RZq|rBY-vm#~XuyeK1xx7A{;6p2YWC$tJocT<8H~!i@W! zRw67oAGpJCG+i`c??P>|W5>4l{d){2giU@HYTmM2f$QOo5{@4L8G97Ysw2RSlSxMwb7#ZCa{@ zHdmcT4GhjtZLO-oUC|fSS@f8fXdw*;##JVnZ`MhEkDm$vy7tnp`UxyX=p&^K+kQ*q?~|uLG~iWV zOSQH+)Zt0n&`TNQq*Zc(Dp%qI!to8}KLu-|ISmzUXhgK=^ zijwxQI=3`fG4rWS5fPl4Bi|0FKxG&Eqjxc>q=?F}0%AKhU?+ziz-|t^fQva?l+*&> zVC)F>u*K<=U|AU`S$icbn$KGITL4)Y-^REG#bYX(WUoh71xj zB?ytli6mnq5JY=_5H%D}!5_vL|B=YSSlZ6XL&9cq_Bxp>xLKOeT;7`^SkJoW%u6mYAt| zq;xQk#HJ|ZQIf|>N@H0F{?#LI?-u^AGs#h#U9g$ape}r-PZ_!{*rkv>}@Q*3`9%bLB zjCh*=kg_6WM{(-RB(L~M7$<)N)R@8uV|FE0&DfN}b{U&a>toh*q49FI)OM<$!H9D3 zI!TW4Lwb+Ok{fRTNTB&BA~n=T3Op}FwIK0~ROgpLcWA=YKpfiyYz{%w5n@z1jQZOM z_{*4;^luQK(OIioD`YmfjsUE%7z}uXR|Cu>OhkhD1O5$y3-A=~2b|TcjFdz+&m~&q z62vC^hAc|2l!*YCu^7>FrKy>XBn^>0YebY}WE93#gfQtpW5Afir2+yPbbp_`vy=D- zECf1OIzCCItI|Rk!js>R+l{REErj3@{Tny~cR;#D9U?9QDegcom+~?^aak_sWj!y; zcna|0Nsl!6+EasRU>ImE%M;KCGw}J%bcX5p5-^*$P33Kfm!NGg>P^gYCIt@arw-Lq?R>h3b)QL3U?peMg>*KZM;xYr(atj^q7yBr z74)2jMkj92n=WpMvwo+h^4|wX-IP~_{|KN|@J#@CrmsoWUOUDgqQuuJ`#s7?hw-0M z7E(rcCsXl1CP;b*?-6u|vUiaQlLt`~Dw`@cDf1{{WNIkqS?0%aDk`j-kpjVO?G*jxA@F9mhL)q;>%Vn=9 zgSz=`H-k=pGw>?%i_D%giB(eoIuBit8Z z{n7Gi@-WJj9CaehGM!b(AWIfpDK*FuzaO@mn@Q|Bt7A~8EupserXkNXTS^v;Ib^ZO z1AYvW#cv@~^r|)u&t_Vi)u$n@kl^=}^b>%Qba+xQVX~G*NdMEHYO3|A1s^JdDn^E} zs4nU|3x{+<2UE#OYziwW)ovC)qH!pe&^J5#d1 zi;7Hv6n%f}v-|umszrgqAUt6E_XEA2qYi&S*&kB&N0jX=VX2t^nCcEG3rCuu(8S?M z*km#JgbLixs;Xsg{jAc@)l8n~?KuD0tf;$NTptQA-3_r^{d{lm- z&R__{kMX2BHP-p77>8e>?90e%rdNuCd!e64X|nB85Gj)fBqI>dlZ5zyY7cGIpw8!M zm$XntTDVVq3nQEZpPp!ACX(Ek2LRk4wVd>7XvsQ1mlBUabCLD1OEFDmak(5P6o-&S0;mxh?L&1;`!;2? zW&W$k#-#mqsyIa2x$Qo@r;wsepa1}Ys;61{5#VqW`!g}z4Z=g3% zk%^KFBH`Bdw)^p6!sega^bTiP+%StjW!Q&FJ)cu-ycnMr^F^ZdEGAMQ8sXvP$p+PT zo<5r52m0feB{%o~E5i1DiUIn*aDDprzKw9NFG{|DcRT20E&O@f97Q%|_$yP`Qff&x zEw%QaQ0t#k_Ggs+Ic58tBcbzD_bf8<5aI=7u;_K$vhTRBxaVBky=<3l({?0I9nA+G zx}4XL;(26Geypb}B`R-{lT8vX`>0NR#*Yqw((6c4_NDYanIsoXMD$ic7*4yW=~Au; z9Y%O2W%u_Xw?O;7L8m~>O20waSvWZFO0Im?YX<)dpzu#9`%B6st@g*;r^{cV^*Rko zYqnKRWq5FO(7a3;MLC4li@l2cy~s+io%Ez;pbp;DHo_t+nXMv&(zI3KZs0dU*$*n@ v_XPeW8i_=r{6R$adjgv2sG}10m5d#((pcm3!o|)51Dbte`a8;Zl+*tMjr)dX literal 0 HcmV?d00001 diff --git a/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/models/base_model.py b/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/models/base_model.py new file mode 100755 index 0000000..17098d7 --- /dev/null +++ b/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/models/base_model.py @@ -0,0 +1,94 @@ +### Copyright (C) 2017 NVIDIA Corporation. All rights reserved. +### Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode). +import os +import torch +import sys + +class BaseModel(torch.nn.Module): + def name(self): + return 'BaseModel' + + def initialize(self, opt): + self.opt = opt + self.gpu_ids = opt.gpu_ids + self.isTrain = opt.isTrain + self.Tensor = torch.cuda.FloatTensor if self.gpu_ids else torch.Tensor + self.save_dir = os.path.join(opt.checkpoints_dir, opt.name) + + def set_input(self, input): + self.input = input + + def forward(self): + pass + + # used in test time, no backprop + def test(self): + pass + + def get_image_paths(self): + pass + + def optimize_parameters(self): + pass + + def get_current_visuals(self): + return self.input + + def get_current_errors(self): + return {} + + def save(self, label): + pass + + # helper saving function that can be used by subclasses + def save_network(self, network, network_label, epoch_label, gpu_ids): + save_filename = '%s_net_%s.pth' % (epoch_label, network_label) + save_path = os.path.join(self.save_dir, save_filename) + torch.save(network.cpu().state_dict(), save_path) + if len(gpu_ids) and torch.cuda.is_available(): + network.cuda() + + # helper loading function that can be used by subclasses + def load_network(self, network, network_label, epoch_label, save_dir=''): + save_filename = '%s_net_%s.pth' % (epoch_label, network_label) + print (save_filename) + if not save_dir: + save_dir = self.save_dir + save_path = os.path.join(save_dir, save_filename) + if not os.path.isfile(save_path): + print('%s not exists yet!' % save_path) + if network_label == 'G': + raise('Generator must exist!') + else: + #network.load_state_dict(torch.load(save_path)) + try: + network.load_state_dict(torch.load(save_path)) + except: + pretrained_dict = torch.load(save_path) + model_dict = network.state_dict() + try: + pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict} + network.load_state_dict(pretrained_dict) + if self.opt.verbose: + print('Pretrained network %s has excessive layers; Only loading layers that are used' % network_label) + except: + print('Pretrained network %s has fewer layers; The following are not initialized:' % network_label) + for k, v in pretrained_dict.items(): + if v.size() == model_dict[k].size(): + model_dict[k] = v + + if sys.version_info >= (3,0): + not_initialized = set() + else: + from sets import Set + not_initialized = Set() + + for k, v in model_dict.items(): + if k not in pretrained_dict or v.size() != pretrained_dict[k].size(): + not_initialized.add(k.split('.')[0]) + + print(sorted(not_initialized)) + network.load_state_dict(model_dict) + + def update_learning_rate(): + pass diff --git a/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/models/base_model.pyc b/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/models/base_model.pyc new file mode 100755 index 0000000000000000000000000000000000000000..8a4e932a00433818a13b448fba0af16a4daf2d3e GIT binary patch literal 4999 zcmd5=TW=dx5T4^(Vkc?SLJMgtaEdC-r42&B15}7!pa>}ewnc@Avf6r2lC{^n?w;eM zfkZ;IPxRF(}N3Y0gMdFHBBfYBhYO;o*s@xa3C-TK=or|w`YS<|G6Q32|1bR7g zb~%r2y3evgDjxSOfCV#UfL*w|;dZN4vLR9%GvMYmr8Fe2D>5Z8Trxa;QJgrDe z;-(@q61NnYl^FP}+=o7M63;2|hy>R=M<7Jk0PHtTW zk?y_#So+M_!Y%cS#3y~1529@8YQB}Fc@(>)^<*$SKTJpKN#>TW*wo&>v>dtq`42x` z;`g;nD`9L0`O<*4a!a>iwovg4!);}lMFTrcRuoB0mtTD7L6tcMvSVaNBvhnV;(+j1 zMd=DbFWwTb9_-a6Ybf5(3BaeMH!Z!Uu4*Zqk$dHGSW)0%+F+$K2Z&8X@}b8fk@evy zOk$^a;%*dClKDC(*xWPuLH$}Juydnk#Y^NsoP?1xe*OXq1 z!+7O_CUKLElPs;W;#fP*xiCX~oO2h3ext*Z6R_k2ip>&ZTNALf2Fx>Br@JUAoaKDv zSund^$*Ii(>gKsFabdI>C9rsEJEWFgAO?n8dC~XeKgL7#j8=3z%)1-qd!QMP(`d$U zEdd9fK`kq!Fp(UZJE@h*F_!sNzL)iR-aKp0n9~N1Lwi9Vb26o`;&Fc@oayGTVPhtmjx>e~{q(E+mdX7-8uU5bY2F>WOoLFq1Du}yCF9jYjEq+ z3C=h$Q_`Oj_bWM-j6s=@qAJA$vjhE(liGWVeLu9um0*F&-+93QoBnCR91!3Qn6XZN zI{eUlo*DT(W-jF^`j->unC8y8c9#3Ly_-1i+S}GIA__#VIrbpeY-WoHoo{Y5d79>1 z+>Tq&X9wMS{B}Z1!Uq7nw3Y@W?Yi}9(+k>$A581ezh!>?d(PFHOyZdD^uj_V!h>Y1 z*;wy390*Qh7p-oIw*#j)7QH!b+TwPO?wc{<(7|YqcH2(VGX`CfXXwn=@^Sz|>P+ih z@G`fg=usT&)^3<4Ud7@?%f=n%xAKPc95J>;L7PC!#Uin@04n})-)R~}!HXPaEa@oB z2r2e1J+bmG!;?WCyae!gLr(3XCLV{>8)SJIK+L?Un+C34C(QBk5>~C6Q>JQ;f>uS! zpTqY&u%=nS+p!Ahm3h;wESQd2tjw4$cj1S+T)|WR9`KLuiW3e_c!%I3z=5`kJzkXqFw~~YodVJ@&1!QBPYuuBU#&Sk`%*`{96gGPy^h(M g8M917e_6HH-40cL^~A-}_V4mkI*P|%1ZFFL0VQ8$6951J literal 0 HcmV?d00001 diff --git a/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/models/models.py b/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/models/models.py new file mode 100755 index 0000000..0a59a6a --- /dev/null +++ b/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/models/models.py @@ -0,0 +1,20 @@ +### Copyright (C) 2017 NVIDIA Corporation. All rights reserved. +### Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode). +import torch + +def create_model(opt): + if opt.model == 'pix2pixHD': + from .pix2pixHD_model import Pix2PixHDModel, InferenceModel + if opt.isTrain: + model = Pix2PixHDModel() + else: + model = InferenceModel() + + model.initialize(opt) + if opt.verbose: + print("model [%s] was created" % (model.name())) + + if opt.isTrain and len(opt.gpu_ids): + model = torch.nn.DataParallel(model, device_ids=opt.gpu_ids) + + return model diff --git a/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/models/models.pyc b/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/models/models.pyc new file mode 100755 index 0000000000000000000000000000000000000000..28ee6b5e6e2f668498b648a10e5cdd5ee0b72f0c GIT binary patch literal 833 zcmcgpOK;Oa5T0G9j|7JThp1doE=axJ^4e$;f4nNUh6gf>I z%&I?lCOJd5vz7z2?grHopbX7(4)&0miwHD>io*(Z1S<}ViaKClz>>i=z$$_-hp^;p z-yli^!<(1ItyY!U0 zl=h-_(i8VaRMOL%XlX)n)+}gXnB{AmfA$1vjE~Wbgcm2m3azD1s6DjSjlUb7Q8+nFMDp|w3oR3 NLd#dgv?D@e{ueW1y$b*U literal 0 HcmV?d00001 diff --git a/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/models/networks.py b/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/models/networks.py new file mode 100755 index 0000000..7aa8df0 --- /dev/null +++ b/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/models/networks.py @@ -0,0 +1,818 @@ +### Copyright (C) 2017 NVIDIA Corporation. All rights reserved. +### Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode). +import torch +import torch.nn as nn +import functools +from torch.autograd import Variable +import numpy as np +import torch.nn.functional as F + +############################################################################### +# Functions +############################################################################### +def weights_init(m): + classname = m.__class__.__name__ + if classname.find('Conv2d') != -1: + m.weight.data.normal_(0.0, 0.02) + elif classname.find('BatchNorm2d') != -1: + m.weight.data.normal_(1.0, 0.02) + m.bias.data.fill_(0) + +def get_norm_layer(norm_type='instance'): + if norm_type == 'batch': + norm_layer = functools.partial(nn.BatchNorm2d, affine=True) + elif norm_type == 'instance': + norm_layer = functools.partial(nn.InstanceNorm2d, affine=False) + else: + raise NotImplementedError('normalization layer [%s] is not found' % norm_type) + return norm_layer + +def define_G(input_nc, output_nc, ngf, netG, n_downsample_global=3, n_blocks_global=9, n_local_enhancers=1, + n_blocks_local=3, norm='instance', gpu_ids=[]): + norm_layer = get_norm_layer(norm_type=norm) + if netG == 'global': + netG = GlobalGenerator(input_nc, output_nc, ngf, n_downsample_global, n_blocks_global, norm_layer) + elif netG == 'local': + netG = LocalEnhancer(input_nc, output_nc, ngf, n_downsample_global, n_blocks_global, + n_local_enhancers, n_blocks_local, norm_layer) + else: + raise('generator not implemented!') + print(netG) + # if len(gpu_ids) > 0: + # assert(torch.cuda.is_available()) + # netG.cuda(gpu_ids[0]) + netG.apply(weights_init) + return netG + +def define_D(input_nc, ndf, n_layers_D, norm='instance', use_sigmoid=False, num_D=1, getIntermFeat=False, gpu_ids=[]): + norm_layer = get_norm_layer(norm_type=norm) + netD = MultiscaleDiscriminator(input_nc, ndf, n_layers_D, norm_layer, use_sigmoid, num_D, getIntermFeat) + print(netD) + if len(gpu_ids) > 0: + assert(torch.cuda.is_available()) + netD.cuda(gpu_ids[0]) + netD.apply(weights_init) + return netD + +def define_VAE(input_nc, gpu_ids=[]): + netVAE = VAE(19, 32, 32, 1024) + print(netVAE) + if len(gpu_ids) > 0: + assert(torch.cuda.is_available()) + netVAE.cuda(gpu_ids[0]) + return netVAE + +def define_B(input_nc, output_nc, ngf, n_downsample_global=3, n_blocks_global=3, norm='instance', gpu_ids=[]): + norm_layer = get_norm_layer(norm_type=norm) + netB = BlendGenerator(input_nc, output_nc, ngf, n_downsample_global, n_blocks_global, norm_layer) + print(netB) + if len(gpu_ids) > 0: + assert(torch.cuda.is_available()) + netB.cuda(gpu_ids[0]) + netB.apply(weights_init) + return netB + +def print_network(net): + if isinstance(net, list): + net = net[0] + num_params = 0 + for param in net.parameters(): + num_params += param.numel() + print(net) + print('Total number of parameters: %d' % num_params) + +############################################################################## +# Losses +############################################################################## +class GANLoss(nn.Module): + def __init__(self, use_lsgan=True, target_real_label=1.0, target_fake_label=0.0, + tensor=torch.FloatTensor): + super(GANLoss, self).__init__() + self.real_label = target_real_label + self.fake_label = target_fake_label + self.real_label_var = None + self.fake_label_var = None + self.Tensor = tensor + if use_lsgan: + self.loss = nn.MSELoss() + else: + self.loss = nn.BCELoss() + + def get_target_tensor(self, input, target_is_real): + target_tensor = None + if target_is_real: + create_label = ((self.real_label_var is None) or + (self.real_label_var.numel() != input.numel())) + if create_label: + real_tensor = self.Tensor(input.size()).fill_(self.real_label) + self.real_label_var = Variable(real_tensor, requires_grad=False) + target_tensor = self.real_label_var + else: + create_label = ((self.fake_label_var is None) or + (self.fake_label_var.numel() != input.numel())) + if create_label: + fake_tensor = self.Tensor(input.size()).fill_(self.fake_label) + self.fake_label_var = Variable(fake_tensor, requires_grad=False) + target_tensor = self.fake_label_var + return target_tensor + + def __call__(self, input, target_is_real): + if isinstance(input[0], list): + loss = 0 + for input_i in input: + pred = input_i[-1] + target_tensor = self.get_target_tensor(pred, target_is_real) + loss += self.loss(pred, target_tensor) + return loss + else: + target_tensor = self.get_target_tensor(input[-1], target_is_real) + return self.loss(input[-1], target_tensor) + +class VGGLoss(nn.Module): + def __init__(self, gpu_ids): + super(VGGLoss, self).__init__() + self.vgg = Vgg19().cuda() + self.criterion = nn.L1Loss() + self.weights = [1.0/32, 1.0/16, 1.0/8, 1.0/4, 1.0] + + def forward(self, x, y): + x_vgg, y_vgg = self.vgg(x), self.vgg(y) + loss = 0 + for i in range(len(x_vgg)): + loss += self.weights[i] * self.criterion(x_vgg[i], y_vgg[i].detach()) + return loss + +############################################################################## +# Generator +############################################################################## +class GlobalGenerator(nn.Module): + def __init__(self, input_nc, output_nc, ngf=64, n_downsampling=3, n_blocks=9, norm_layer=nn.BatchNorm2d, + padding_type='reflect'): + assert(n_blocks >= 0) + super(GlobalGenerator, self).__init__() + activation = nn.ReLU(True) + + model = [nn.ReflectionPad2d(3), nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0), norm_layer(ngf), activation] + ### downsample + for i in range(n_downsampling): + mult = 2**i + model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1), + norm_layer(ngf * mult * 2), activation] + + ### resnet blocks + mult = 2**n_downsampling + for i in range(n_blocks): + model += [ResnetBlock(ngf * mult, norm_type='adain', padding_type=padding_type)] + ### upsample + for i in range(n_downsampling): + mult = 2**(n_downsampling - i) + model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2), kernel_size=3, stride=2, padding=1, output_padding=1), + norm_layer(int(ngf * mult / 2)), activation] + model += [nn.ReflectionPad2d(3), nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0), nn.Tanh()] + self.model = nn.Sequential(*model) + + # style encoder + self.enc_style = StyleEncoder(5, 3, 16, self.get_num_adain_params(self.model), norm='none', activ='relu', pad_type='reflect') + # label encoder + self.enc_label = LabelEncoder(5, 19, 16, 64, norm='none', activ='relu', pad_type='reflect') + + def assign_adain_params(self, adain_params, model): + # assign the adain_params to the AdaIN layers in model + for m in model.modules(): + if m.__class__.__name__ == "AdaptiveInstanceNorm2d": + mean = adain_params[:, :m.num_features] + std = adain_params[:, m.num_features:2*m.num_features] + m.bias = mean.contiguous().view(-1) + m.weight = std.contiguous().view(-1) + if adain_params.size(1) > 2*m.num_features: + adain_params = adain_params[:, 2*m.num_features:] + + def get_num_adain_params(self, model): + # return the number of AdaIN parameters needed by the model + num_adain_params = 0 + for m in model.modules(): + if m.__class__.__name__ == "AdaptiveInstanceNorm2d": + num_adain_params += 2*m.num_features + return num_adain_params + + def forward(self, input, input_ref, image_ref): + fea1, fea2 = self.enc_label(input_ref) + adain_params = self.enc_style((image_ref, fea1, fea2)) + self.assign_adain_params(adain_params, self.model) + return self.model(input) + +class BlendGenerator(nn.Module): + def __init__(self, input_nc, output_nc, ngf=64, n_downsampling=3, n_blocks=3, norm_layer=nn.BatchNorm2d, + padding_type='reflect'): + assert(n_blocks >= 0) + super(BlendGenerator, self).__init__() + activation = nn.ReLU(True) + + model = [nn.ReflectionPad2d(3), nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0), norm_layer(ngf), activation] + ### downsample + for i in range(n_downsampling): + mult = 2**i + model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1), + norm_layer(ngf * mult * 2), activation] + + ### resnet blocks + mult = 2**n_downsampling + for i in range(n_blocks): + model += [ResnetBlock(ngf * mult, norm_type='in', padding_type=padding_type)] + + ### upsample + for i in range(n_downsampling): + mult = 2**(n_downsampling - i) + model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2), kernel_size=3, stride=2, padding=1, output_padding=1), + norm_layer(int(ngf * mult / 2)), activation] + model += [nn.ReflectionPad2d(3), nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0), nn.Sigmoid()] + self.model = nn.Sequential(*model) + + def forward(self, input1, input2): + m = self.model(torch.cat([input1, input2], 1)) + return input1 * m + input2 * (1-m), m + +# Define the Multiscale Discriminator. +class MultiscaleDiscriminator(nn.Module): + def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, + use_sigmoid=False, num_D=3, getIntermFeat=False): + super(MultiscaleDiscriminator, self).__init__() + self.num_D = num_D + self.n_layers = n_layers + self.getIntermFeat = getIntermFeat + + for i in range(num_D): + netD = NLayerDiscriminator(input_nc, ndf, n_layers, norm_layer, use_sigmoid, getIntermFeat) + if getIntermFeat: + for j in range(n_layers+2): + setattr(self, 'scale'+str(i)+'_layer'+str(j), getattr(netD, 'model'+str(j))) + else: + setattr(self, 'layer'+str(i), netD.model) + + self.downsample = nn.AvgPool2d(3, stride=2, padding=[1, 1], count_include_pad=False) + + def singleD_forward(self, model, input): + if self.getIntermFeat: + result = [input] + for i in range(len(model)): + result.append(model[i](result[-1])) + return result[1:] + else: + return [model(input)] + + def forward(self, input): + num_D = self.num_D + result = [] + input_downsampled = input + for i in range(num_D): + if self.getIntermFeat: + model = [getattr(self, 'scale'+str(num_D-1-i)+'_layer'+str(j)) for j in range(self.n_layers+2)] + else: + model = getattr(self, 'layer'+str(num_D-1-i)) + result.append(self.singleD_forward(model, input_downsampled)) + if i != (num_D-1): + input_downsampled = self.downsample(input_downsampled) + return result + +# Define the PatchGAN discriminator with the specified arguments. +class NLayerDiscriminator(nn.Module): + def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, use_sigmoid=False, getIntermFeat=False): + super(NLayerDiscriminator, self).__init__() + self.getIntermFeat = getIntermFeat + self.n_layers = n_layers + + kw = 4 + padw = int(np.ceil((kw-1.0)/2)) + sequence = [[nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]] + + nf = ndf + for n in range(1, n_layers): + nf_prev = nf + nf = min(nf * 2, 512) + sequence += [[ + nn.Conv2d(nf_prev, nf, kernel_size=kw, stride=2, padding=padw), + norm_layer(nf), nn.LeakyReLU(0.2, True) + ]] + + nf_prev = nf + nf = min(nf * 2, 512) + sequence += [[ + nn.Conv2d(nf_prev, nf, kernel_size=kw, stride=1, padding=padw), + norm_layer(nf), + nn.LeakyReLU(0.2, True) + ]] + + sequence += [[nn.Conv2d(nf, 1, kernel_size=kw, stride=1, padding=padw)]] + + if use_sigmoid: + sequence += [[nn.Sigmoid()]] + + if getIntermFeat: + for n in range(len(sequence)): + setattr(self, 'model'+str(n), nn.Sequential(*sequence[n])) + else: + sequence_stream = [] + for n in range(len(sequence)): + sequence_stream += sequence[n] + self.model = nn.Sequential(*sequence_stream) + + def forward(self, input): + if self.getIntermFeat: + res = [input] + for n in range(self.n_layers+2): + model = getattr(self, 'model'+str(n)) + res.append(model(res[-1])) + return res[1:] + else: + return self.model(input) + +from torchvision import models +class Vgg19(torch.nn.Module): + def __init__(self, requires_grad=False): + super(Vgg19, self).__init__() + vgg_pretrained_features = models.vgg19(pretrained=True).features + self.slice1 = torch.nn.Sequential() + self.slice2 = torch.nn.Sequential() + self.slice3 = torch.nn.Sequential() + self.slice4 = torch.nn.Sequential() + self.slice5 = torch.nn.Sequential() + for x in range(2): + self.slice1.add_module(str(x), vgg_pretrained_features[x]) + for x in range(2, 7): + self.slice2.add_module(str(x), vgg_pretrained_features[x]) + for x in range(7, 12): + self.slice3.add_module(str(x), vgg_pretrained_features[x]) + for x in range(12, 21): + self.slice4.add_module(str(x), vgg_pretrained_features[x]) + for x in range(21, 30): + self.slice5.add_module(str(x), vgg_pretrained_features[x]) + if not requires_grad: + for param in self.parameters(): + param.requires_grad = False + + def forward(self, X): + h_relu1 = self.slice1(X) + h_relu2 = self.slice2(h_relu1) + h_relu3 = self.slice3(h_relu2) + h_relu4 = self.slice4(h_relu3) + h_relu5 = self.slice5(h_relu4) + out = [h_relu1, h_relu2, h_relu3, h_relu4, h_relu5] + return out + +# Define the MaskVAE +class VAE(nn.Module): + def __init__(self, nc, ngf, ndf, latent_variable_size): + super(VAE, self).__init__() + #self.cuda = True + self.nc = nc + self.ngf = ngf + self.ndf = ndf + self.latent_variable_size = latent_variable_size + + # encoder + self.e1 = nn.Conv2d(nc, ndf, 4, 2, 1) + self.bn1 = nn.BatchNorm2d(ndf) + + self.e2 = nn.Conv2d(ndf, ndf*2, 4, 2, 1) + self.bn2 = nn.BatchNorm2d(ndf*2) + + self.e3 = nn.Conv2d(ndf*2, ndf*4, 4, 2, 1) + self.bn3 = nn.BatchNorm2d(ndf*4) + + self.e4 = nn.Conv2d(ndf*4, ndf*8, 4, 2, 1) + self.bn4 = nn.BatchNorm2d(ndf*8) + + self.e5 = nn.Conv2d(ndf*8, ndf*16, 4, 2, 1) + self.bn5 = nn.BatchNorm2d(ndf*16) + + self.e6 = nn.Conv2d(ndf*16, ndf*32, 4, 2, 1) + self.bn6 = nn.BatchNorm2d(ndf*32) + + self.e7 = nn.Conv2d(ndf*32, ndf*64, 4, 2, 1) + self.bn7 = nn.BatchNorm2d(ndf*64) + + self.fc1 = nn.Linear(ndf*64*4*4, latent_variable_size) + self.fc2 = nn.Linear(ndf*64*4*4, latent_variable_size) + + # decoder + self.d1 = nn.Linear(latent_variable_size, ngf*64*4*4) + + self.up1 = nn.UpsamplingNearest2d(scale_factor=2) + self.pd1 = nn.ReplicationPad2d(1) + self.d2 = nn.Conv2d(ngf*64, ngf*32, 3, 1) + self.bn8 = nn.BatchNorm2d(ngf*32, 1.e-3) + + self.up2 = nn.UpsamplingNearest2d(scale_factor=2) + self.pd2 = nn.ReplicationPad2d(1) + self.d3 = nn.Conv2d(ngf*32, ngf*16, 3, 1) + self.bn9 = nn.BatchNorm2d(ngf*16, 1.e-3) + + self.up3 = nn.UpsamplingNearest2d(scale_factor=2) + self.pd3 = nn.ReplicationPad2d(1) + self.d4 = nn.Conv2d(ngf*16, ngf*8, 3, 1) + self.bn10 = nn.BatchNorm2d(ngf*8, 1.e-3) + + self.up4 = nn.UpsamplingNearest2d(scale_factor=2) + self.pd4 = nn.ReplicationPad2d(1) + self.d5 = nn.Conv2d(ngf*8, ngf*4, 3, 1) + self.bn11 = nn.BatchNorm2d(ngf*4, 1.e-3) + + self.up5 = nn.UpsamplingNearest2d(scale_factor=2) + self.pd5 = nn.ReplicationPad2d(1) + self.d6 = nn.Conv2d(ngf*4, ngf*2, 3, 1) + self.bn12 = nn.BatchNorm2d(ngf*2, 1.e-3) + + self.up6 = nn.UpsamplingNearest2d(scale_factor=2) + self.pd6 = nn.ReplicationPad2d(1) + self.d7 = nn.Conv2d(ngf*2, ngf, 3, 1) + self.bn13 = nn.BatchNorm2d(ngf, 1.e-3) + + self.up7 = nn.UpsamplingNearest2d(scale_factor=2) + self.pd7 = nn.ReplicationPad2d(1) + self.d8 = nn.Conv2d(ngf, nc, 3, 1) + + self.leakyrelu = nn.LeakyReLU(0.2) + self.relu = nn.ReLU() + #self.sigmoid = nn.Sigmoid() + self.maxpool = nn.MaxPool2d((2, 2), (2, 2)) + + def encode(self, x): + h1 = self.leakyrelu(self.bn1(self.e1(x))) + h2 = self.leakyrelu(self.bn2(self.e2(h1))) + h3 = self.leakyrelu(self.bn3(self.e3(h2))) + h4 = self.leakyrelu(self.bn4(self.e4(h3))) + h5 = self.leakyrelu(self.bn5(self.e5(h4))) + h6 = self.leakyrelu(self.bn6(self.e6(h5))) + h7 = self.leakyrelu(self.bn7(self.e7(h6))) + h7 = h7.view(-1, self.ndf*64*4*4) + return self.fc1(h7), self.fc2(h7) + + def reparametrize(self, mu, logvar): + std = logvar.mul(0.5).exp_() + #if self.cuda: + eps = torch.cuda.FloatTensor(std.size()).normal_() + #else: + # eps = torch.FloatTensor(std.size()).normal_() + eps = Variable(eps) + return eps.mul(std).add_(mu) + + def decode(self, z): + h1 = self.relu(self.d1(z)) + h1 = h1.view(-1, self.ngf*64, 4, 4) + h2 = self.leakyrelu(self.bn8(self.d2(self.pd1(self.up1(h1))))) + h3 = self.leakyrelu(self.bn9(self.d3(self.pd2(self.up2(h2))))) + h4 = self.leakyrelu(self.bn10(self.d4(self.pd3(self.up3(h3))))) + h5 = self.leakyrelu(self.bn11(self.d5(self.pd4(self.up4(h4))))) + h6 = self.leakyrelu(self.bn12(self.d6(self.pd5(self.up5(h5))))) + h7 = self.leakyrelu(self.bn13(self.d7(self.pd6(self.up6(h6))))) + return self.d8(self.pd7(self.up7(h7))) + + def get_latent_var(self, x): + mu, logvar = self.encode(x) + z = self.reparametrize(mu, logvar) + return z, mu, logvar.mul(0.5).exp_() + + def forward(self, x): + mu, logvar = self.encode(x) + z = self.reparametrize(mu, logvar) + res = self.decode(z) + + return res, x, mu, logvar + +# style encode part +class StyleEncoder(nn.Module): + def __init__(self, n_downsample, input_dim, dim, style_dim, norm, activ, pad_type): + super(StyleEncoder, self).__init__() + self.model = [] + self.model_middle = [] + self.model_last = [] + self.model += [ConvBlock(input_dim, dim, 7, 1, 3, norm=norm, activation=activ, pad_type=pad_type)] + for i in range(2): + self.model += [ConvBlock(dim, 2 * dim, 4, 2, 1, norm=norm, activation=activ, pad_type=pad_type)] + dim *= 2 + for i in range(n_downsample - 2): + self.model_middle += [ConvBlock(dim, dim, 4, 2, 1, norm=norm, activation=activ, pad_type=pad_type)] + self.model_last += [nn.AdaptiveAvgPool2d(1)] # global average pooling + self.model_last += [nn.Conv2d(dim, style_dim, 1, 1, 0)] + + self.model = nn.Sequential(*self.model) + self.model_middle = nn.Sequential(*self.model_middle) + self.model_last = nn.Sequential(*self.model_last) + + self.output_dim = dim + + self.sft1 = SFTLayer() + self.sft2 = SFTLayer() + + def forward(self, x): + fea = self.model(x[0]) + fea = self.sft1((fea, x[1])) + fea = self.model_middle(fea) + fea = self.sft2((fea, x[2])) + return self.model_last(fea) + +# label encode part +class LabelEncoder(nn.Module): + def __init__(self, n_downsample, input_dim, dim, style_dim, norm, activ, pad_type): + super(LabelEncoder, self).__init__() + self.model = [] + self.model_last = [nn.ReLU()] + self.model += [ConvBlock(input_dim, dim, 7, 1, 3, norm=norm, activation=activ, pad_type=pad_type)] + self.model += [ConvBlock(dim, 2 * dim, 4, 2, 1, norm=norm, activation=activ, pad_type=pad_type)] + dim *= 2 + self.model += [ConvBlock(dim, 2 * dim, 4, 2, 1, norm=norm, activation='none', pad_type=pad_type)] + dim *= 2 + for i in range(n_downsample - 3): + self.model_last += [ConvBlock(dim, dim, 4, 2, 1, norm=norm, activation=activ, pad_type=pad_type)] + self.model_last += [ConvBlock(dim, dim, 4, 2, 1, norm=norm, activation='none', pad_type=pad_type)] + self.model = nn.Sequential(*self.model) + self.model_last = nn.Sequential(*self.model_last) + self.output_dim = dim + + def forward(self, x): + fea = self.model(x) + return fea, self.model_last(fea) + +# Define the basic block +class ConvBlock(nn.Module): + def __init__(self, input_dim ,output_dim, kernel_size, stride, + padding=0, norm='none', activation='relu', pad_type='zero'): + super(ConvBlock, self).__init__() + self.use_bias = True + # initialize padding + if pad_type == 'reflect': + self.pad = nn.ReflectionPad2d(padding) + elif pad_type == 'replicate': + self.pad = nn.ReplicationPad2d(padding) + elif pad_type == 'zero': + self.pad = nn.ZeroPad2d(padding) + else: + assert 0, "Unsupported padding type: {}".format(pad_type) + + # initialize normalization + norm_dim = output_dim + if norm == 'bn': + self.norm = nn.BatchNorm2d(norm_dim) + elif norm == 'in': + #self.norm = nn.InstanceNorm2d(norm_dim, track_running_stats=True) + self.norm = nn.InstanceNorm2d(norm_dim) + elif norm == 'ln': + self.norm = LayerNorm(norm_dim) + elif norm == 'adain': + self.norm = AdaptiveInstanceNorm2d(norm_dim) + elif norm == 'none' or norm == 'sn': + self.norm = None + else: + assert 0, "Unsupported normalization: {}".format(norm) + + # initialize activation + if activation == 'relu': + self.activation = nn.ReLU(inplace=True) + elif activation == 'lrelu': + self.activation = nn.LeakyReLU(0.2, inplace=True) + elif activation == 'prelu': + self.activation = nn.PReLU() + elif activation == 'selu': + self.activation = nn.SELU(inplace=True) + elif activation == 'tanh': + self.activation = nn.Tanh() + elif activation == 'none': + self.activation = None + else: + assert 0, "Unsupported activation: {}".format(activation) + + # initialize convolution + if norm == 'sn': + self.conv = SpectralNorm(nn.Conv2d(input_dim, output_dim, kernel_size, stride, bias=self.use_bias)) + else: + self.conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride, bias=self.use_bias) + + def forward(self, x): + x = self.conv(self.pad(x)) + if self.norm: + x = self.norm(x) + if self.activation: + x = self.activation(x) + return x + +class LinearBlock(nn.Module): + def __init__(self, input_dim, output_dim, norm='none', activation='relu'): + super(LinearBlock, self).__init__() + use_bias = True + # initialize fully connected layer + if norm == 'sn': + self.fc = SpectralNorm(nn.Linear(input_dim, output_dim, bias=use_bias)) + else: + self.fc = nn.Linear(input_dim, output_dim, bias=use_bias) + + # initialize normalization + norm_dim = output_dim + if norm == 'bn': + self.norm = nn.BatchNorm1d(norm_dim) + elif norm == 'in': + self.norm = nn.InstanceNorm1d(norm_dim) + elif norm == 'ln': + self.norm = LayerNorm(norm_dim) + elif norm == 'none' or norm == 'sn': + self.norm = None + else: + assert 0, "Unsupported normalization: {}".format(norm) + + # initialize activation + if activation == 'relu': + self.activation = nn.ReLU(inplace=True) + elif activation == 'lrelu': + self.activation = nn.LeakyReLU(0.2, inplace=True) + elif activation == 'prelu': + self.activation = nn.PReLU() + elif activation == 'selu': + self.activation = nn.SELU(inplace=True) + elif activation == 'tanh': + self.activation = nn.Tanh() + elif activation == 'none': + self.activation = None + else: + assert 0, "Unsupported activation: {}".format(activation) + + def forward(self, x): + out = self.fc(x) + if self.norm: + out = self.norm(out) + if self.activation: + out = self.activation(out) + return out + +# Define a resnet block +class ResnetBlock(nn.Module): + def __init__(self, dim, norm_type, padding_type, use_dropout=False): + super(ResnetBlock, self).__init__() + self.conv_block = self.build_conv_block(dim, norm_type, padding_type, use_dropout) + + def build_conv_block(self, dim, norm_type, padding_type, use_dropout): + conv_block = [] + conv_block += [ConvBlock(dim ,dim, 3, 1, 1, norm=norm_type, activation='relu', pad_type=padding_type)] + conv_block += [ConvBlock(dim ,dim, 3, 1, 1, norm=norm_type, activation='none', pad_type=padding_type)] + + return nn.Sequential(*conv_block) + + def forward(self, x): + out = x + self.conv_block(x) + return out + +class SFTLayer(nn.Module): + def __init__(self): + super(SFTLayer, self).__init__() + self.SFT_scale_conv1 = nn.Conv2d(64, 64, 1) + self.SFT_scale_conv2 = nn.Conv2d(64, 64, 1) + self.SFT_shift_conv1 = nn.Conv2d(64, 64, 1) + self.SFT_shift_conv2 = nn.Conv2d(64, 64, 1) + + def forward(self, x): + scale = self.SFT_scale_conv2(F.leaky_relu(self.SFT_scale_conv1(x[1]), 0.1, inplace=True)) + shift = self.SFT_shift_conv2(F.leaky_relu(self.SFT_shift_conv1(x[1]), 0.1, inplace=True)) + return x[0] * scale + shift + +class ConvBlock_SFT(nn.Module): + def __init__(self, dim, norm_type, padding_type, use_dropout=False): + super(ResnetBlock_SFT, self).__init__() + self.sft1 = SFTLayer() + self.conv1 = ConvBlock(dim ,dim, 4, 2, 1, norm=norm_type, activation='none', pad_type=padding_type) + + def forward(self, x): + fea = self.sft1((x[0], x[1])) + fea = F.relu(self.conv1(fea), inplace=True) + return (x[0] + fea, x[1]) + +class ConvBlock_SFT_last(nn.Module): + def __init__(self, dim, norm_type, padding_type, use_dropout=False): + super(ResnetBlock_SFT_last, self).__init__() + self.sft1 = SFTLayer() + self.conv1 = ConvBlock(dim ,dim, 4, 2, 1, norm=norm_type, activation='none', pad_type=padding_type) + + def forward(self, x): + fea = self.sft1((x[0], x[1])) + fea = F.relu(self.conv1(fea), inplace=True) + return x[0] + fea + +# Definition of normalization layer +class AdaptiveInstanceNorm2d(nn.Module): + def __init__(self, num_features, eps=1e-5, momentum=0.1): + super(AdaptiveInstanceNorm2d, self).__init__() + self.num_features = num_features + self.eps = eps + self.momentum = momentum + # weight and bias are dynamically assigned + self.weight = None + self.bias = None + # just dummy buffers, not used + self.register_buffer('running_mean', torch.zeros(num_features)) + self.register_buffer('running_var', torch.ones(num_features)) + + def forward(self, x): + assert self.weight is not None and self.bias is not None, "Please assign weight and bias before calling AdaIN!" + b, c = x.size(0), x.size(1) + running_mean = self.running_mean.repeat(b) + running_var = self.running_var.repeat(b) + + # Apply instance norm + x_reshaped = x.contiguous().view(1, b * c, *x.size()[2:]) + + out = F.batch_norm( + x_reshaped, running_mean, running_var, self.weight, self.bias, + True, self.momentum, self.eps) + + return out.view(b, c, *x.size()[2:]) + + def __repr__(self): + return self.__class__.__name__ + '(' + str(self.num_features) + ')' + +class LayerNorm(nn.Module): + def __init__(self, num_features, eps=1e-5, affine=True): + super(LayerNorm, self).__init__() + self.num_features = num_features + self.affine = affine + self.eps = eps + + if self.affine: + self.gamma = nn.Parameter(torch.Tensor(num_features).uniform_()) + self.beta = nn.Parameter(torch.zeros(num_features)) + + def forward(self, x): + shape = [-1] + [1] * (x.dim() - 1) + # print(x.size()) + if x.size(0) == 1: + # These two lines run much faster in pytorch 0.4 than the two lines listed below. + mean = x.view(-1).mean().view(*shape) + std = x.view(-1).std().view(*shape) + else: + mean = x.view(x.size(0), -1).mean(1).view(*shape) + std = x.view(x.size(0), -1).std(1).view(*shape) + + x = (x - mean) / (std + self.eps) + + if self.affine: + shape = [1, -1] + [1] * (x.dim() - 2) + x = x * self.gamma.view(*shape) + self.beta.view(*shape) + return x + +def l2normalize(v, eps=1e-12): + return v / (v.norm() + eps) + +class SpectralNorm(nn.Module): + """ + Based on the paper "Spectral Normalization for Generative Adversarial Networks" by Takeru Miyato, Toshiki Kataoka, Masanori Koyama, Yuichi Yoshida + and the Pytorch implementation https://github.com/christiancosgrove/pytorch-spectral-normalization-gan + """ + def __init__(self, module, name='weight', power_iterations=1): + super(SpectralNorm, self).__init__() + self.module = module + self.name = name + self.power_iterations = power_iterations + if not self._made_params(): + self._make_params() + + def _update_u_v(self): + u = getattr(self.module, self.name + "_u") + v = getattr(self.module, self.name + "_v") + w = getattr(self.module, self.name + "_bar") + + height = w.data.shape[0] + for _ in range(self.power_iterations): + v.data = l2normalize(torch.mv(torch.t(w.view(height,-1).data), u.data)) + u.data = l2normalize(torch.mv(w.view(height,-1).data, v.data)) + + # sigma = torch.dot(u.data, torch.mv(w.view(height,-1).data, v.data)) + sigma = u.dot(w.view(height, -1).mv(v)) + setattr(self.module, self.name, w / sigma.expand_as(w)) + + def _made_params(self): + try: + u = getattr(self.module, self.name + "_u") + v = getattr(self.module, self.name + "_v") + w = getattr(self.module, self.name + "_bar") + return True + except AttributeError: + return False + + + def _make_params(self): + w = getattr(self.module, self.name) + + height = w.data.shape[0] + width = w.view(height, -1).data.shape[1] + + u = nn.Parameter(w.data.new(height).normal_(0, 1), requires_grad=False) + v = nn.Parameter(w.data.new(width).normal_(0, 1), requires_grad=False) + u.data = l2normalize(u.data) + v.data = l2normalize(v.data) + w_bar = nn.Parameter(w.data) + + del self.module._parameters[self.name] + + self.module.register_parameter(self.name + "_u", u) + self.module.register_parameter(self.name + "_v", v) + self.module.register_parameter(self.name + "_bar", w_bar) + + def forward(self, *args): + self._update_u_v() + return self.module.forward(*args) diff --git a/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/models/networks.pyc b/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/models/networks.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7574e5834aeefefb35484a9942f01a39b43cbc40 GIT binary patch literal 36361 zcmeI5d30RYec#^<76KG0QY1(aq83n_DOnUnQX(aJkwj7=Wl4zgD4Dh#dNQ0DfFl9} zaArV(vgFt%ouzS_X7x!cdHWB)L*bUfz_{N0T~<$LtwS=BRHxxk3q>m8@~2qHq%$Ab+76o2TlJ~wmk<_;qB5ukfb|m#} zg^2BW#LlEXQHa=)M@%O5orQ?WJYrW;-&Kg1$|I)SGdCymHzy0blb3hXYrB(1;% zekiFw@DH#j^Vt!+HO|Q&Npfbfzp#9u47!C zFT;7Aap13%yirPCH^o!XWr(MooF7Wsk2%3m^2%`X%82N>KAc=GC#BYtz0@Ox)E`)# z+GMKKs$|qP!q0};)o!YAw&U8oBq{Y_7gLSta+K->P4@~^viC=#08z1l9K)}&z zw{~u3vAuAxo~8KB$D2y%Qo7^#DwW!NwbQ9o{OFZRtGdwOW|*kiW~=UsUTrkz&UGDD zuXd~54MesmwK^XO&o--_?uc{D&sX-U%ev~j1=ry6RqS4Z5<8tQ!ngmWPNUt~zfi61 zKfd^CYksj>@9dvzE-dX^nqQu4wmSQdHRc;WziP{R@lr z#(ZagtI>URv3)mE)Pz%G}8y ze#9hmYOnfg#yxgcBjir5&eGqF^hTmOcjH6{H(#rEn~SaK`Rb)cd-{EMcg{>VJJYSj z?)2>9a;x4S$Jympt-H85-|^@yRomTWb)JS(P%HI_=pkQhFE_fX;+d=qf;ilir>paw zMt3`rGmG747MA843yoH{QGcr4UTnvpieS{PO83%Iqr0(p=f(|qbk*p&Mz`YXUEvNw z^6}lx81F7`Ej7ifO|`Qr3aqJ(;sk|wgg=UhQDuZ*9|P)3Z){9n_tYn7Z6tYpV{&;kd1EMP9d^nX^JYTdu1z_byuQKd$DH2WtRWvtPMpMnhD<=P_Ylxy zB1m_sJ>urM`Ngx0m_p|lYmCLs3T(6*?P_+$Va;P5jTP zn5SCjMCxr9TwPk4zm$qG_Nr4dW>{LLTWjXk#pP~xr+#eB&6?@qvJ)nZR;3Q7cB*Pe zC2O#1Z&l9H=JTCi*i8=eV^kVh13IR=Jf$O}bO_2sJGZo4Y1TXG6uAdXO=DD1Z;0V4 zCvL;vnZQZPTcD!bxVDwHl}Ae>!<$Prr4$kAzX|ArcrpSSG69XPCYrGx-2eg`k~hYa z*T)%tQHyb0Vq!RHFC^WbjJQno?-dP*RK^ARq$Jr?hWZNIps~_DH>dV$Y-UV3O?=5- z5qFTnB);dD=ex}gL*F>gzjkw>*%Ad&ACv`0i^8}Ec7zDEWguNB+3ZxR7pu*ANj?wN zlO6C`;9~$tZzoNVj=CeA#&MCY`fMOU#a!8`98WcPmOsm#My1o7TUcz?U2SW5p>o`` z2`fJXxU?6ZZdAMJy(&?|-Xpi;Dq?`LLZg|D|MC6!J6ly>X$zx2!szcPO_UgW!KQjm zJ$8(XVWNo!55`A-P{45V24h>+7&CsM3JiOUUlK;Z^o;0bGC!Hn2O47sk`lR)k8!yN z2u%TSb6QTq_{0DUAKcmRe)6gGes0ozavqSQ=BE$hm@GUl?oWdXoJ*Dgcc$ne>r5(p zl)DU;cjTQ}lgc16)qi*82k>NUghB)kL*n#Jj=e)>BUlFPg{8(a9+G5L&x?m%S1L&I1wB50%STjWP#PQ3xt+lLCYa+gAx zxm`|@<7al73wp#&&=Hbc*q@vx&54tY1oXZWPtxmp6Lv?~`{H7^IzJ6GoJGS~oSjB@ zL0JSFI*(1?UEeE^GQc!DJ-y8+G2iTT9o|ph!%f~t5{t_T*tUN=W!V9;5`OA2DBvghsgn4$R8f2 zKcz$XL+g7PM_AT zsI_9vo%x3_rQKkHfjnx=`*yZ^zLDJy6Faj=uc|Zq$to97dll4A>L9`Ai;Y%iv7HKC z-QUljdP@Brq$H1!>F6;BTDNjcr!hZk#YmEDzB5;Cb*0;Mt8M9`{hEuR{mKo&T~|C< zQcF+LsP}Mnw&5h@?WOVjcNATwrs38Ds^dKDzlVqMK&@q@+aES71FT7Mek@r!l-&j` zFuLJkdiw@L4)QKL0eOkCa08T??LrOATJgUzqE1G6hFGn*XhK_I+GOI;+!=Mc?eycl zO20R|y=JAKy>_LL9c7NXQV6tI>3FZwaX+jSZ;AC8Ya#8%h2>_uf!^A#)~yn!;?eXu zIYP{I1}7+?`C22j=JSlgxsr6Joag1d3umu7I8}@(yV*jMHZ^9{f`TQ2am86s(1z8o z8gbnoeM>nvab*c~Z?d``#AI3)d9P|D5R@3M6L4!3{MwDXr?jiI39Y&3A1~#UCXg|u z{bf9oCQug=B$Fs#svC%4!h?7zYh(k4P^J_Jjg=8FL&=3{ z1Pnu(rMG4<)z3pIrcsp|nNC6Bnf`16WaEiNO!`jRnLaM3BIgC1{)nd1{8B8P5n-_K z09Tr(j-_^^o=R>Gmd`VcM7N)>R9>aJ;CJN6DDq?@n(NJ_;quOs%>t>I-N;#~U@lo^ z%2r7y^axTOt)D-Q(WU!hAfq589{nu7{&Uzj2!oAL|1@=V#&AXftSy_yx4CQIed5F_ zVJH>F!cg;FC=p@EKwiOvNG#biB%vqHuF27rQ{b0!vyO;EJwR9mehHqyEKF`_oFTz5 zH=CYz=-*J{f5Y)_#Q)|@rxgR(slAdhGnt&l$TrPwQxS-Skl1|6%H9xOWp8L^WzgI5VgB_V)rtlCc@Z5StDVB!enTzCW=80kT~Ioku0YCzn_?P_bT5qdn$N>ZhinY`vyV>qF=f{>8O4_0#wgzzr2a1kxQa;&5xb3%BxHt%c~KsWg)IUoMxS!5~(qq;t&u$CDLo zfmnHD8b*@6FTtY7lVCR7{>BY5=wyXKzBeQjqGJA$U_u20U)0GHQ#=Nq@=z4tOG1gN zW(Z+7o?Lh>Ijw?+UGT~g*XqTD<7e(wt*Xn`OlJp|Xb3Hk*{M8NiZXsPIlb~_#r9t4 zwHLnHnCvRFnXJ)R^pk$GchIJ*TV7|CPVFD2{Z}Pl{S^0=PG_C6rnjlg7L~e5B6?eW zw^^f=^?~kh)PsI3K$JTB9lCGgU5C>-A=9t||FrcIsFIzPr zn~O?2%4bW`6ZrPZRBjtlYVjjx4Z!epNJXTL=U%dMU~6(3%!RCZp;|v!Plbu;c^p4^ z+UU@Pqq6uqwbr3t1YkQ$i=D>7x`i0e=cc__ZJo0Yatc)&5oFU>ce zYSmcKX?LZb*vijr-M!SCvy1N=ty-m{)Xw*uCWgIyy=c!C_o`<@CQi$@N6sBM*2W7q zY(~o6)-y`m1evZVrNFkT4)*d(`cV~eK@sY#1uU$6m>meho}`*@;_BRkla#k1Yqyji zWn#Cn^kC`sp^e28n2I^`I&4I#1sq# z8y?fD$7U<%o)3PMKsd{575HbRjspzvQZG8<;J5Hh^im8fGliO9gC2Zcq zgzGX92{9lPjNB`%Z8K&WDc!%ClrYIcO4~CG&xqp)xh`zi-)XS@ly#QC(e!I6)?*Ti z%wOTQKLqL5aT^ipPj??cAH%Y$W7VgF?ZWgcsN%=CLIGo4NIkZzR8tRa;L3YSmVlzF z`z6d=(GWtFH8)6k9Z6=xf0%B4}Kgs0@Uc#!%F8Z!NHx0#A zBFGEWA(8xa>hkNR%t{)nGd#YsY;3x9{D$$d^lgOIxcnn7w@Q0IoP~Y+r%3WWHKy{S74S|5sWk*w^*{gwk4<#L|qVpj4(zR1#~Fspikd z#`L0m*1o(?Kx$>PWep|0gnzFvv7p$AWc!L;`a#`FrAog{POinJAJXlI<$MIkV(?Tf z)23GxYIM1l7L|Url6*|glpN_x>DS1~wWL%=zX5~GNvil~Tpek4$@&`7P+vo`l|o3W z%Ysdv6>!buP&f=kmy#~ptlo1kMb8gnHu?lQR+CP%4 zbexCzekqxBz9%*7X6`|mJb=sx-rh0ELrm(<3mTJ}&4Y-;P7pyN%%P-op_81x@U~tF zSTXVv$neSyk>+j5X=X~ypJiZRvW>7p-=@HEQ#lQ|k*DyCnE9_DNqsXDlh+Zv-b7-8 zjlCTvaZlg8)x&dPLsTG^@WqPCy4hT2Sh!j^TfK)xRKX^-YPTEvS(siMy_hp9EO^k9TIuFbUYvV@Epbd>H7~Fl z-`~F!%RN>vr-@Au$oXq>jB1DEQxl_An){`Pacuf~rOzS*;f|A3^k2C;YPVE$FX!=J z3oVnSqvbs@+tVQFGA)aB0?*p1k<0+L@^H`gWu4O0x5uk3p5zJi*oFe-LLSs7NRlq~hv{_RI$O8B6k^vyIg>!)7x!QN%tqck@SSU}&mhdL~v(qdln} zY;W2|!QO#6Hbhl2gX*8qfNYbBx^{{g&{(-9@hXB8?YK*YoYAFBWaeVIlwKW9R({Hu zVJUI^%#Zezd}ctGl`il^cFrEd+p3N7oiRLf0;`L=Q2KV!xsx#YB%BqvG6|QzpPAEB z3Am@SZO0~bn zl`WUCOL6P?c=|nri1Ym;zOvA~A5Wt&OUZ?1CNX^$BbS)0oQa8>Pska<@pdhlBX8g` zCp-afbbEviaa~hTLh;(l)mu!18iA2rR3*bPf3mc*84G!3bql&z5{qvcw`~Mh0&Le5 zgCw&`2KY#tLhKupm5VYlBg&w9VvnC`Br>#bOy1a(u(*zg+BB6QGP8r)-#1tXie52IJ^A=+77Bb$Z_nZ zd0Y$B8co`NP+Iv4brk?ey6FDnXZESLFs2AmPyw)))5#!^%Gsub+)sTBZAfayPnJ%% zjv4tjCo6leE^NYGPp3z=BX3}l+g3iEm9@ENrpMaeS1&|rV5ELnC>7><>eE*w=Dzh? z^z%<1HwHDem>ZZdp|mIDi0Xp{EqDw>A0dpTMD&RzTRCct=DZhDJ@K(Un%uw{!h%OJb zN{E4$)bdmgO(p$d1v>$r0h`Po*SLt_5{(L5I;PP3#>!#t6Zfp0x%CCo^^!&6znyBP znloD3y*Exzi3;LmPCARX#^oPT$x*J3Y;6t8B_t9fc7+3e){?FeIJ{~LhXz8Isp=n4 z*E-^d;bCb3k%PRsoRlC2XpIR0>k3Gi%cjH%Ah#w2{96+ONUjOld{xMe$;ts?DYOce z9zS!Z?E{eD#^e>w`2eIQU&s~D6hvIQ@U^VAK9uD_C=pi3cEdHZeKOBR-&mI`WMf8m zjoSV=&qklnA**WJe9deRm3oat3VUl+p|%^Zne9C5xTU1?Bc+vTGyFF7_wh4Zq#iSW zgK;Yp$%Vg)0D--Q{^GzHPr|^wZkr|0M&0mlqgQhi(ajxlZo`Qs%MG)#z|Jzc(nlrAXeab@BfAbbecpA}!a9-w;%Al>`j4vgE=(+`?xwY{_x1ct+1bTCV);XL~P)^in z!p14hSvEiuG&Pxtj{I@F=|7V56LNl1&L?sD^9T#h_rR!0a*jof`Q-xz|G|R)P{IFD z!T+$~oF(=FkAhIRs!<3K-@=+g{O%~lvkvRwYI3eWbPJf@rN*fdqoqwdQ(4&582@NQ z4At+%)96hhuHdocmIqM6=w`qM)Z_s;!szCmEDEvpP2!*?qSB?CV_coz8HQQ-TWfRk zy=-nKHg=o?qYx7N@H%(qM6h!lduPyvlK#Cy*4Y@&xQV z58)FKuj=VRf0jxAN=zu$Cm5 zk|3aICt6$9;xT=af3stD2C`JQRtm3Xh6^hHNxO=@S?zyUpw-l<- zI8ue^r&tBkupodeF0bO=^{Tku{d~D6^9B{kY804p8%~Z@1(qFOrcj7efX-P*;z+2Qxf8D zRk4pa>QU{h%NXD>hrA8CG`YXsx9>=_A&!k}L0<0+1xV6h##@0y?Ge<*_FygJHK*k~ zf#dB>w!K$os~qZX+a@#jN8k5bKlA+A7aq^2$KLxJMM;9Mz4cypYfu7sYfv!*t;Ojy z&0)RaYMz4!*y}kwMpw6TP;=e=9r!R9iS$gGE{_Zt3)gO``$eu;2L_a5ZYEIRI&>hWOphdjHd7aq*w z<4Ko@aQ26L@zL_-rFitEdOUjla4$ZpU0#a5SR%stkM!cB-sPp}oTd7ah<_yg1P@_* z#=Ptq(u88E7rc$|=c_BR@Btw+Bde|~u?$ksSN|oUeIiJIhT8$FL?@|YURuXFJlGq` zUePFx58cM`q&rGG%lE8v?J4aU+EbPadna~>H}n7Y(hjHF&3f>bFzCyekH!(Y$6Yjm zUcx#d*Wx}_e&Oc$%T(P+UEV0QwOAxuD z1Po^+KqgsP&Js{y-bhTF__1Aj$uUb1#YPEO-$=mjS8ep|Jq0x~2WGMw&&W1dfvoJJ zT)>jf)RK!ys}~;S$M|Qz7q``8Ghse?0&g5(@XV@A7pDK3u=HQa`B^#tM$W&J^Ka$+ zyquqt^Y7*Sf}H;-=Re5#B{{z+=NshMl=ds~eF`Vm8B*P9&SZ)|7i@bj*z;Vl;kn?p zbHP;Sf_Kgx87vtao!ExTUe&)RV%|m*6J}Jae`^9Rsk~z6V_cn=aG)V*g7ILGpuEdo zP2%~OH;KY;nx;bjbUSD zsZuyBUMI+#71J^KxMD}5X1aZRaIY^c`)TJF=MW6(H!GzIZY;&z(j7gB#n0?uUUsUt z&GzaaS68dOY#}S(Hnb@wtm5l@PVtp%`8sC=B(PU2GDbkR4Mu=utip>d zOQ1$%+2Le6?M~W7M=u~qmL(oDvP?PI&31R&<=l4xQ?e`p9Je~bZFX zoZpr6-{t&{oZpu7Kji#@oZpx8dvg9n&L7M9BRPL4rz9{-QMbdUK{hAsYj{Wm+D8?Xe7g^t;w zr&f)zkYMBA>OM(-l-R*`jMun-L0vIMUC|X=%4=7(PpXyuAyvy$8TPN+?pw`G&a0KO z|8kV!qih72${ zWgfpGP$352VI(smFt`KOKyV;5n2tjP%=HqbjBBYXIWvTY3V*TZ{?jkT?pOj$$d4fPg!>9IU;xEDzC^1zW^ zAhj5dD%jwB!b|;*O)d9lThcZ%elxK`AR350XX2dLa{7kCgnu!w#*=M|J%P_86iWvm z<<|3RFHcn#n)N#8h*Wq4@D9ALr&K!3!~yrk9K z;-Z4+nzKesJ^B&W-1@gr3n8RqacxyP3oz346c05rb`FRxLN;mm(_49lREyr}ZE|+u z#5P-Ov0qqy7Hb11g}-n~%zl6t;ea(+!zPet?c~kjuQ3e_BG;cAY~NfNvI?$6 z|EN=dEwDyUHnD1ZUlFY1EhTRntVfdztn0c_pEKvx4I(++8Z(q4K<9y@JuD94LBF%{ zNGr8}Cd^G=gEV3@tua?*)5qMfBYsd};Xd8mI z9@%K8SoZ0jae_Dp`F$~Fyj-1)(P9$B1a>=3#>4m z8?s(%@m94Z-uT(5^D;fZkYHLb?0szd18>;0cea&&J4M+)-%4M_Zw7Hdr{!H*#X|JX z-RgYyDq@uqoV0;_Xsh#8wl<18=d}`%uTD9Ly)e9*K=8tw)WXA?m(Tg#w#6D)=E{!R zyLhqY-rYE{+tWhhq;4m0tY7PWY-f4xbjTMSZ?n40PsrJt$@c#R z-Zu)PXM$x%FR`_Cy$#tXljAjD7F%DVw4YEiJ>v`7J?aPI{c$g)5M4|wPAxTR?E0B^ z34(X0;n4WScOiMP2yfqtML&mux>GXa;zvmczf50L@y?V1i_g#1roissz-{BW# z^S`{C{hz)^yVq-Rb?KE~yRpLZAH6?9gt&gTZh%L`{k(5JUuMD9Z5!*0r#cE>5TeP z^SXro8K+NleL=;lm}g3@5*Ln^cO3FNJYtE}6Wy9xs~?TkQLfH3&Ke1OB|hvhlw5(Y zEL&AvU#!-|90CN1To3rKQt$fGRCYAf^AM%ah*N}sNK<1a^FRs3at|Vrtn2_;FM*KJ z^MiOl91}xHqpx;iM6U&p((nqv54uKP%S(bEZ~_b)#RNz`nl$1B|5WD=R>dOuh#ggI z=T{1i|8?PVRt2(QL(&;9VZlJYYCTR8OMj9Lpnr(feHKUJ$if?u-JMY(2gybVlh}cg!4e!N?r=SFQ zkXS!y`=@(yo$z?N&-m%K`ssR07CarDaBA;O4E#(ghK%=O+#{dFO!^l2G)o8(bt}Fi z_OCBp(tDNbKAc>lq;FNw{c`r$cE*g^V;vIyzoF!aN^-LBhT#<9?wt(hzG?>EJ?V$=)VTbk zaXHFWm?f>d(;k$)0+Ra;73yx|w=i7SF%@tSXC3_3SX&N3 zbhi)5C))GAoU_Z#`FcfaQG7ALrqljyVIj!|fL+SGqRcnoNaw4!7ncUVfmiMQie4v+ zSKK`kZJW8=Jq|r85#=5h?j`1$8O0DkT^qpe2ci;?|tKLo(U#+ekqd9t) z?19pOP!~mpy4=0mOs_62Aj_l-#(j_wp+$q0sj$*^ouJ|Ci}bPftqPi{g~ZYGUN}Ln)~qhhV&L1P~+A=>a?R=mYt2yvy7(1x`Z?y6ipP@fdurJIE53`e;LvT z58#S*!!b*)8_sFZb;B{b2v;=7GtDP(P4!_kJi)CWF#6hLt`_@19ZN8k*hMLTIj~OH z!G74e=4{uvR7jVH9W<($#0JbA0-ld+0A-jG@aU~vylaXe4@0h9&l3>Lk%2=GmKg&r zt_X!NgoDmjq_eda#w4JG)YiAJk=g(ccd&O~0KZ?* zIhuI%o7En%8m7+;-UM~5w-i}{lyepqDIDc`vuEjgLS2(mbMXs!*5~5OrYR&j10nFu z@sfs1<=D-vqw3+VY)547h*9Qs39_pIiBoPP9>-^MyL=^fb@? zU20VD)uxf|Hd?cauh=)#>HgoxT%v+C%;hItkHbtiUvTF7p+51f&s+kH|2@q0e-KN3 zqTefjF<9yuSn3ChEM#J0Wbm`c5fvySWnEhUSLsq!GKdP-sD3sHNy)#BtyWkIiT)x@+Hz(#4m)h}kgUrS7|+tJIRa{;OBa$=_8_X;Z+l@IsYt(30&Mwc+@?o5P#99Dq#WLPJ%j=&z1?4F(JAHoe zATJ^KBN}`iSc;W#y*EPFREWbd!pFHhTA#ub97_+LA5Yq`Ul`TYx1IZ>@-q;KLD@)A zO_?v-B$WA?HS3E3`PS920*BGaRtNTJOOcHbD=qAezl?V4vtWcn2NO{kp-|lIAEPj) zATji0r5N(XG!vJ>XXNqpwrOAHTaM^E)WhPd%UfGqrW*DK~5qU* z#FtI3)0Fxji?k%6lhS?capv6m(7n8FMBB=0YNxItu47!C+i_yt*joV+O=8H^y=i6= zGGfxA#4~=ly>{;n#!;|e^&H3g#X{F>hGzt*(6dYJO66zuyj(olT3tz;X0t*19-g|d zyV*C0V;6?;A6A1zT^JTOjc>87yQcQ(N4<8Gt22$0L%Xbte1QdMhn0KB!g-;TQs;=C z1Gz$vadk8y_?$o<`0ZH-qZ&JMfrmCPfAUva=tyy-!Gq&dmwj?ZDLM6>C&)oOS2Cf!>NZXLiL^iYDwH-E{}$c%8Vkr?-)Yu!2^ovPy%^( z*Mtpkgm|(7AQPx{w6>FZ_z4u>89xEZkNIqq3dv(JL=abDS&T8y7LVptxK1~($WlDP=`O+`JDJbDuuQp8?g3~

zEuGE=D%d#9&rvj$`ZV7b@1AS0Pl)d@PT$en#xkvaEcuonzBS&S&c24qhZRswDj>)!NStwt4sv+%`dmQ z;n?_+y{=XkGV1oOkPZAl z#?`r#5V}=;6^LwF_AB3*kgbV8heI5o{;!R|BCQB?1eOe9)c|8PCuUb(6KG~5p?%j) zt+#qWtSrB0{`zUKJnded^tJ5Qu`iVoOkO*~>=(R+3hv(Y5qEV6^O6K%T4J}2j`36gDGZa&0y}R{r>A=uk zL46VtlF+6;t(sgqbrm^f?;QN|5FovG>U3*cG7RsY-VYEHe!!>HP4(uR6} zRt~yVwg$Uq&?ZRe-6l`+8tdlSz3P<dQ@pOwX6sh!h$CwZxcqcnqj|8_hJU2Pz;eEC@m8C6Z6%W1Itas0) zISodb5NW`Kh_;j5CPLJH5WNIoYv~rLq_Pn3`jD;Vn|Vp5NmA&hOSWRKacf+x_w_~y zv0c&xrKbX_r_u{l5C|&$x4&!D0-0|;5BQY_R-7Ny0}3w^&X}w@ri&;7Vk`B<8lP}I zi9cGbM^4o_*G==5*765ENk2M_?maes`Aba6zt zkI8w5oTqRstDg54aQ)5M`24E2A^ICX@kxU3Q9>DWUHL^mHow?Pe@tm+a;$G*f)?_g*yK2p-H||WZgNAvx#Fpn|Pr) z`?E*J$2N_#A9?Gpjhn`HY#iS-b^p{&Q;$qdPu(|l2$tG2wSs@=)H|jgoZ33IdFqC# NO;fi|-MVS$e*(nG&WZp4 literal 0 HcmV?d00001 diff --git a/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/models/pix2pixHD_model.py b/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/models/pix2pixHD_model.py new file mode 100755 index 0000000..3f2a11e --- /dev/null +++ b/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/models/pix2pixHD_model.py @@ -0,0 +1,326 @@ +### Copyright (C) 2017 NVIDIA Corporation. All rights reserved. +### Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode). +import numpy as np +import torch +import os +from torch.autograd import Variable +from util.image_pool import ImagePool +from .base_model import BaseModel +from . import networks + +def generate_discrete_label(inputs, label_nc): + pred_batch = [] + size = inputs.size() + for input in inputs: + input = input.view(1, label_nc, size[2], size[3]) + pred = np.squeeze(input.data.max(1)[1].cpu().numpy(), axis=0) + pred_batch.append(pred) + + pred_batch = np.array(pred_batch) + pred_batch = torch.from_numpy(pred_batch) + label_map = [] + for p in pred_batch: + p = p.view(1, 512, 512) + label_map.append(p) + label_map = torch.stack(label_map, 0) + size = label_map.size() + oneHot_size = (size[0], label_nc, size[2], size[3]) + if torch.cuda.is_available(): + input_label = torch.cuda.FloatTensor(torch.Size(oneHot_size)).zero_() + input_label = input_label.scatter_(1, label_map.data.long().cuda(), 1.0) + else: + input_label = torch.FloatTensor(torch.Size(oneHot_size)).zero_() + input_label = input_label.scatter_(1, label_map.data.long(), 1.0) + + return input_label + +class Pix2PixHDModel(BaseModel): + def name(self): + return 'Pix2PixHDModel' + + def init_loss_filter(self, use_gan_feat_loss, use_vgg_loss): + flags = (True, use_gan_feat_loss, use_vgg_loss, True, use_gan_feat_loss, use_vgg_loss, True, True, True, True) + def loss_filter(g_gan, g_gan_feat, g_vgg, gb_gan, gb_gan_feat, gb_vgg, d_real, d_fake, d_blend): + return [l for (l,f) in zip((g_gan,g_gan_feat,g_vgg,gb_gan,gb_gan_feat,gb_vgg,d_real,d_fake,d_blend),flags) if f] + return loss_filter + + def initialize(self, opt): + BaseModel.initialize(self, opt) + if opt.resize_or_crop != 'none' or not opt.isTrain: # when training at full res this causes OOM + torch.backends.cudnn.benchmark = True + self.isTrain = opt.isTrain + input_nc = opt.label_nc if opt.label_nc != 0 else opt.input_nc + + ##### define networks + # Generator network + netG_input_nc = input_nc + # Main Generator + self.netG = networks.define_G(netG_input_nc, opt.output_nc, opt.ngf, opt.netG, + opt.n_downsample_global, opt.n_blocks_global, opt.n_local_enhancers, + opt.n_blocks_local, opt.norm, gpu_ids=self.gpu_ids) + + # Discriminator network + if self.isTrain: + use_sigmoid = opt.no_lsgan + netD_input_nc = input_nc + opt.output_nc + netB_input_nc = opt.output_nc * 2 + self.netD = networks.define_D(netD_input_nc, opt.ndf, opt.n_layers_D, opt.norm, use_sigmoid, + opt.num_D, not opt.no_ganFeat_loss, gpu_ids=self.gpu_ids) + self.netB = networks.define_B(netB_input_nc, opt.output_nc, 32, 3, 3, opt.norm, gpu_ids=self.gpu_ids) + + if self.opt.verbose: + print('---------- Networks initialized -------------') + + # load networks + if not self.isTrain or opt.continue_train or opt.load_pretrain: + pretrained_path = '' if not self.isTrain else opt.load_pretrain + print (pretrained_path) + self.load_network(self.netG, 'G', opt.which_epoch, pretrained_path) + if self.isTrain: + self.load_network(self.netB, 'B', opt.which_epoch, pretrained_path) + self.load_network(self.netD, 'D', opt.which_epoch, pretrained_path) + + # set loss functions and optimizers + if self.isTrain: + if opt.pool_size > 0 and (len(self.gpu_ids)) > 1: + raise NotImplementedError("Fake Pool Not Implemented for MultiGPU") + self.fake_pool = ImagePool(opt.pool_size) + self.old_lr = opt.lr + + # define loss functions + self.loss_filter = self.init_loss_filter(not opt.no_ganFeat_loss, not opt.no_vgg_loss) + + self.criterionGAN = networks.GANLoss(use_lsgan=not opt.no_lsgan, tensor=self.Tensor) + self.criterionFeat = torch.nn.L1Loss() + if not opt.no_vgg_loss: + self.criterionVGG = networks.VGGLoss(self.gpu_ids) + + # Names so we can breakout loss + self.loss_names = self.loss_filter('G_GAN','G_GAN_Feat','G_VGG','GB_GAN','GB_GAN_Feat','GB_VGG','D_real','D_fake','D_blend') + # initialize optimizers + # optimizer G + if opt.niter_fix_global > 0: + import sys + if sys.version_info >= (3,0): + finetune_list = set() + else: + from sets import Set + finetune_list = Set() + + params_dict = dict(self.netG.named_parameters()) + params = [] + for key, value in params_dict.items(): + if key.startswith('model' + str(opt.n_local_enhancers)): + params += [value] + finetune_list.add(key.split('.')[0]) + print('------------- Only training the local enhancer network (for %d epochs) ------------' % opt.niter_fix_global) + print('The layers that are finetuned are ', sorted(finetune_list)) + else: + params = list(self.netG.parameters()) + + self.optimizer_G = torch.optim.Adam(params, lr=opt.lr, betas=(opt.beta1, 0.999)) + + # optimizer D + params = list(self.netD.parameters()) + self.optimizer_D = torch.optim.Adam(params, lr=opt.lr, betas=(opt.beta1, 0.999)) + + # optimizer G + B + params = list(self.netG.parameters()) + list(self.netB.parameters()) + self.optimizer_GB = torch.optim.Adam(params, lr=opt.lr, betas=(opt.beta1, 0.999)) + + def encode_input(self, inter_label_map_1, label_map, inter_label_map_2, real_image, label_map_ref, real_image_ref, infer=False): + + if self.opt.label_nc == 0: + if torch.cuda.is_available(): + input_label = label_map.data.cuda() + inter_label_1 = inter_label_map_1.data.cuda() + inter_label_2 = inter_label_map_2.data.cuda() + input_label_ref = label_map_ref.data.cuda() + else: + input_label = label_map.data + inter_label_1 = inter_label_map_1.data + inter_label_2 = inter_label_map_2.data + input_label_ref = label_map_ref.data + + else: + # create one-hot vector for label map + size = label_map.size() + oneHot_size = (size[0], self.opt.label_nc, size[2], size[3]) + if torch.cuda.is_available(): + input_label = torch.cuda.FloatTensor(torch.Size(oneHot_size)).zero_() + input_label = input_label.scatter_(1, label_map.data.long().cuda(), 1.0) + inter_label_1 = torch.cuda.FloatTensor(torch.Size(oneHot_size)).zero_() + inter_label_1 = inter_label_1.scatter_(1, inter_label_map_1.data.long().cuda(), 1.0) + inter_label_2 = torch.cuda.FloatTensor(torch.Size(oneHot_size)).zero_() + inter_label_2 = inter_label_2.scatter_(1, inter_label_map_2.data.long().cuda(), 1.0) + input_label_ref = torch.cuda.FloatTensor(torch.Size(oneHot_size)).zero_() + input_label_ref = input_label_ref.scatter_(1, label_map_ref.data.long().cuda(), 1.0) + else: + input_label = torch.FloatTensor(torch.Size(oneHot_size)).zero_() + input_label = input_label.scatter_(1, label_map.data.long(), 1.0) + inter_label_1 = torch.FloatTensor(torch.Size(oneHot_size)).zero_() + inter_label_1 = inter_label_1.scatter_(1, inter_label_map_1.data.long(), 1.0) + inter_label_2 = torch.FloatTensor(torch.Size(oneHot_size)).zero_() + inter_label_2 = inter_label_2.scatter_(1, inter_label_map_2.data.long(), 1.0) + input_label_ref = torch.FloatTensor(torch.Size(oneHot_size)).zero_() + input_label_ref = input_label_ref.scatter_(1, label_map_ref.data.long(), 1.0) + + if self.opt.data_type == 16: + input_label = input_label.half() + inter_label_1 = inter_label_1.half() + inter_label_2 = inter_label_2.half() + input_label_ref = input_label_ref.half() + + input_label = Variable(input_label, volatile=infer) + inter_label_1 = Variable(inter_label_1, volatile=infer) + inter_label_2 = Variable(inter_label_2, volatile=infer) + input_label_ref = Variable(input_label_ref, volatile=infer) + if torch.cuda.is_available(): + real_image = Variable(real_image.data.cuda()) + real_image_ref = Variable(real_image_ref.data.cuda()) + else: + real_image = Variable(real_image.data) + real_image_ref = Variable(real_image_ref.data) + + return inter_label_1, input_label, inter_label_2, real_image, input_label_ref, real_image_ref + + def encode_input_test(self, label_map, label_map_ref, real_image_ref, infer=False): + + if self.opt.label_nc == 0: + if torch.cuda.is_available(): + input_label = label_map.data.cuda() + input_label_ref = label_map_ref.data.cuda() + else: + input_label = label_map.data + input_label_ref = label_map_ref.data + + else: + # create one-hot vector for label map + size = label_map.size() + oneHot_size = (size[0], self.opt.label_nc, size[2], size[3]) + if torch.cuda.is_available(): + input_label = torch.cuda.FloatTensor(torch.Size(oneHot_size)).zero_() + input_label = input_label.scatter_(1, label_map.data.long().cuda(), 1.0) + input_label_ref = torch.cuda.FloatTensor(torch.Size(oneHot_size)).zero_() + input_label_ref = input_label_ref.scatter_(1, label_map_ref.data.long().cuda(), 1.0) + real_image_ref = Variable(real_image_ref.data.cuda()) + + else: + input_label = torch.FloatTensor(torch.Size(oneHot_size)).zero_() + input_label = input_label.scatter_(1, label_map.data.long(), 1.0) + input_label_ref = torch.FloatTensor(torch.Size(oneHot_size)).zero_() + input_label_ref = input_label_ref.scatter_(1, label_map_ref.data.long(), 1.0) + real_image_ref = Variable(real_image_ref.data) + + + if self.opt.data_type == 16: + input_label = input_label.half() + input_label_ref = input_label_ref.half() + + input_label = Variable(input_label, volatile=infer) + input_label_ref = Variable(input_label_ref, volatile=infer) + + return input_label, input_label_ref, real_image_ref + + def discriminate(self, input_label, test_image, use_pool=False): + input_concat = torch.cat((input_label, test_image.detach()), dim=1) + if use_pool: + fake_query = self.fake_pool.query(input_concat) + return self.netD.forward(fake_query) + else: + return self.netD.forward(input_concat) + + def forward(self, inter_label_1, label, inter_label_2, image, label_ref, image_ref, infer=False): + + # Encode Inputs + inter_label_1, input_label, inter_label_2, real_image, input_label_ref, real_image_ref = self.encode_input(inter_label_1, label, inter_label_2, image, label_ref, image_ref) + + fake_inter_1 = self.netG.forward(inter_label_1, input_label, real_image) + fake_image = self.netG.forward(input_label, input_label, real_image) + fake_inter_2 = self.netG.forward(inter_label_2, input_label, real_image) + + blend_image, alpha = self.netB.forward(fake_inter_1, fake_inter_2) + + # Fake Detection and Loss + pred_fake_pool = self.discriminate(input_label, fake_image, use_pool=True) + loss_D_fake = self.criterionGAN(pred_fake_pool, False) + pred_blend_pool = self.discriminate(input_label, blend_image, use_pool=True) + loss_D_blend = self.criterionGAN(pred_blend_pool, False) + + # Real Detection and Loss + pred_real = self.discriminate(input_label, real_image) + loss_D_real = self.criterionGAN(pred_real, True) + + # GAN loss (Fake Passability Loss) + pred_fake = self.netD.forward(torch.cat((input_label, fake_image), dim=1)) + loss_G_GAN = self.criterionGAN(pred_fake, True) + pred_blend = self.netD.forward(torch.cat((input_label, blend_image), dim=1)) + loss_GB_GAN = self.criterionGAN(pred_blend, True) + + # GAN feature matching loss + loss_G_GAN_Feat = 0 + loss_GB_GAN_Feat = 0 + if not self.opt.no_ganFeat_loss: + feat_weights = 4.0 / (self.opt.n_layers_D + 1) + D_weights = 1.0 / self.opt.num_D + for i in range(self.opt.num_D): + for j in range(len(pred_fake[i])-1): + loss_G_GAN_Feat += D_weights * feat_weights * \ + self.criterionFeat(pred_fake[i][j], pred_real[i][j].detach()) * self.opt.lambda_feat + loss_GB_GAN_Feat += D_weights * feat_weights * \ + self.criterionFeat(pred_blend[i][j], pred_real[i][j].detach()) * self.opt.lambda_feat + + # VGG feature matching loss + loss_G_VGG = 0 + loss_GB_VGG = 0 + if not self.opt.no_vgg_loss: + loss_G_VGG += self.criterionVGG(fake_image, real_image) * self.opt.lambda_feat + loss_GB_VGG += self.criterionVGG(blend_image, real_image) * self.opt.lambda_feat + + # Only return the fake_B image if necessary to save BW + return [ self.loss_filter( loss_G_GAN, loss_G_GAN_Feat, loss_G_VGG, loss_GB_GAN, loss_GB_GAN_Feat, loss_GB_VGG, loss_D_real, loss_D_fake, loss_D_blend ), None if not infer else fake_inter_1, fake_image, fake_inter_2, blend_image, alpha, real_image, inter_label_1, input_label, inter_label_2 ] + + def inference(self, label, label_ref, image_ref): + + # Encode Inputs + image_ref = Variable(image_ref) + input_label, input_label_ref, real_image_ref = self.encode_input_test(Variable(label), Variable(label_ref), image_ref, infer=True) + + if torch.__version__.startswith('0.4'): + with torch.no_grad(): + fake_image = self.netG.forward(input_label, input_label_ref, real_image_ref) + else: + fake_image = self.netG.forward(input_label, input_label_ref, real_image_ref) + return fake_image + + def save(self, which_epoch): + self.save_network(self.netG, 'G', which_epoch, self.gpu_ids) + self.save_network(self.netD, 'D', which_epoch, self.gpu_ids) + self.save_network(self.netB, 'B', which_epoch, self.gpu_ids) + + def update_fixed_params(self): + # after fixing the global generator for a number of iterations, also start finetuning it + params = list(self.netG.parameters()) + if self.gen_features: + params += list(self.netE.parameters()) + self.optimizer_G = torch.optim.Adam(params, lr=self.opt.lr, betas=(self.opt.beta1, 0.999)) + if self.opt.verbose: + print('------------ Now also finetuning global generator -----------') + + def update_learning_rate(self): + lrd = self.opt.lr / self.opt.niter_decay + lr = self.old_lr - lrd + for param_group in self.optimizer_D.param_groups: + param_group['lr'] = lr + for param_group in self.optimizer_G.param_groups: + param_group['lr'] = lr + if self.opt.verbose: + print('update learning rate: %f -> %f' % (self.old_lr, lr)) + self.old_lr = lr + +class InferenceModel(Pix2PixHDModel): + def forward(self, inp): + label = inp + return self.inference(label) + + diff --git a/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/models/pix2pixHD_model.pyc b/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/models/pix2pixHD_model.pyc new file mode 100755 index 0000000000000000000000000000000000000000..c03aae8d3f2b160f2e46a03b727d28c3038e565f GIT binary patch literal 12701 zcmcgyU2q&%RzBS=jWqJe@{eT8v8{~l{3r?jP1t0U>?W2K%Z5ZGEoZ&B;xw)4o>5CZ zJ)`cHEeGj=Rd&O&3;V!NQN>RI3!8<7q6!`;eu^g^D2k$rDxhi~_}Mq8f+AHs@VI>6 zx!p4(8*esHC29Kh{WdDOV&OllDHjwGZVH zc^?DRBp8*jDou=3WzCMrkKP)WU`)bst=-qF-A}5PW^14@`Y!$!XDwv&Q6!)9)5u?r zLsL^l&Gok53g7H>;)p=~`Z+%f-{=Hk-0z%(`Q1*smNjfZj^P=>a|KWKE%a@PY}>LW zvZ};wJF*$GIDb2k?IGD3l2y=d$rgTKi!I0aa^%>~5ZiNGRvm=`;4lGq6#Up~g>zjU zVQQGQV_Dn7vV{(#vNcRNYyFtqB!()hQ2|QwFVDFmu31Ed;)K>8 zkgZV;g5T;v0&Ii$s;nMTCSeHh;jm79T&E6-Kwwppm$myRL;^7~-7&<{EJa~a7d+xx zMh=pE2#LQLWf9&I2^zpN{(bby>?B?akVWr>nz<8&cfoOyq?=O?vh|HHEZPD;_jBTG z`fh5`-bouPV3J5P?X*1u&3;+#H`a96#s-k9 zWM`g#$B$x4bG{ESSL2SK-w2belPcsADP`yP!nEV5^k)q}&%@Ny_PCR@W;vG96D1%q zQz)PeV=rkaA>A|#yk$RcthmIb2o+Xwur;R^nLcg5n^TLrN>C>WuXS=yIYt7to2QVo zD2gW8yU3k?I}6k7{8}32(OTAN`^owF&fNqP3bOO9sNFr=jW=3RlAXU2#^LhiH~ehv z?6tSf^Sy9+(F?+M=X{&Wnw{@Po0srhoAt`>B^o) zB38w!S>smSx@aA=C#(sS$E_*rpmhlKGbo?3p0sDI89Uz(=r=J6o@?`JA{s=%Kgz=# zp6o7?u(XS`UoeW68ZuRV)(lAqHFxBFOKw#(Kcx8)%@1oHCJ!(b%~z!`g<~{@#IUb0 zg*1+1S~D*0(k$oH;4+lY6;ed$1_?{Eq;b(bgVd}7p>#2`FmCSc!jPuKZ--CdosllF z8dMwBQ!<&)o=1|4noStBfeiBI*Bp~Ci+ljBQH%K{m=ai0r<`kp0!pUvIgBS;Kmy$+ zvJxM_VWDA7VccLBVVas%`AJxETFYs92ZpaI+hejdCh0S@Q}5cc{tSj7`-%;Q(L|tJLIqwL`muHhyr#bvs>y+3 z#xx=%A=b2MHY*O%jQv)&XDUYe<4%@&%_xS2c?QK|K@CM*(HrSTILnOt7?S)Tl8r3n z^mWc2`>SbVEoQD2?y~^v3#xkw#l5f@2QW^QM7a`C%<3tQ&6N@BF>AtBH(&xKn&T^Y zvcJUaAXM1rja)%^EbNS{s{Ct?WKU6yvCY@gm#A})1?=wF+PzUpR1)a|hpiF5>pmYcJIAJ^9XnoeknJr?i>H9e&1VNEA#creiFl`~Pg)J|*Lu>vu*2$hd$dR%J#fkt$o6WaNt z>X|{Q77j*^-3AG(DEy3a2DCjYt93~uI)n??$StL}mVHqPJ|#uww4}qfY0G}jf~72& zcDnF*7Ot`JjCP-(0p+ldQRm@@aYQn~U@$;4G_DT-nYI%Wz{MIwus&?}XiNWMh;5JS zG7d?4+0LIRxeiNJlQmQMqy%trEct@Px#qKq@p0CG9M8zi)0&6n0f9fD0N^dLpOqTK zjDY8qv*#r+5^w~(H>-L9{u5c^T~vv?B&*M&M*BRcsLo3Ew>H=G2Nger!VLYXlK1B% zI3U463E-R^mH?|XCBYF1j!G~M2~t$gE2)_2A3SmwlsSN7Hv@-g9M&jgp%m4)9P+` zQIHwOxsF=(p0l&ao$dYA7mKq}okleBV>qKhz1PeP3?(pf?^@7wPSbhy$}@TdSFz94 zc`~ancJli5b~g^&VUhz+vy;}}*ogCJ;mx{Jid`VZ9ll`$ht0v7`r;I91JE$wDD?Gah zd@fxvWma<7UG_^-A|*3l9O}_$r?pXLEF5cCTJ>? zfLNLD&Fge=YC}$`=+L>L$Qj=T-hp6M<_D)aS59b7ae}L=WOu@Jxs!!TVxyDfQL+(w zDlGXpu<&H$;qYZ@=znNUAu)RTy1NoJRy>s%YBYAT(=0v0h8lPU`A` zfIc%FO^^ND*HKbs>BNB-r>^?s)EIDQm#3!0L*-tlFGm$B4x}Wpf#Y46MxBHzNfCoo z5ajiX3e9=-KvYk8tcLz+>n;RTN}dnsF?u{eWI6pcSQ;E|o6%;u?3`(K&p4IfXoj)9 zC~0<-)!?r39`DS(4oEEpQ6o3VfEGOVQ@nxJh%yFfXF4=ak@4+3`4zLo(Msj$Xa^SanN_tV(Q5&IkDKvr{@0aJ*S?-q?=4L5% z>Rr1y-iUhxiOLHEV*xWSREcP4F|(-E?nFTeG2e&aTAbHbXohOHB*KVh9lJG_7PaZcK&@vd6a&eP5W!UTBi16I|Z zLh#@iV8; z;Oro@hdtb3k=_&>lU@podO-r)6`NohY@~k# z2gX8NucVA+hAi&>JX zTSRmZ5yS6|1eC*ZGe`^*9z;_8I3$=g0+9xZVcmmB#y$=S{C;?#28m(#gGj*Dj}*@* zJgo0SSW)yfOn6w|hj5_iYv%s2z7Lsr(HE^oWFRONp)f=IuDWHgu5XasL>EuQG*Gh2K?{AfNaztDAudlS1zK_qhD|c zpy72Rbr4QQ-cgBDc&Re1tVjZvzXZMy%m9EGPzVBe;Ki&f(yYL1rhYG~E7gxIt4m9R zjK7awiMUm%3vj0s`+0=R2sPVkvFR778|5Bijy{gdtyqJv?j&Z*@1i?H1ay$@Z@Q#=6$4LR#7h$6d1!E$ zJ=3rW4#7P20JMr;H}=>tbx#p7calNG7ayp(q`uNXaF}S!eCz1Er<;TBsBlsdN;X>` zgj2H5BN6)$%)9wNWOIwCTD75?ZUv{1_8*NBWAo_!qi0S3Cy+D$6NoHNGse>;JAXkzwgIQKEldD~*9y71?dzM~s2T zt1$3AV{EkNg$E3L@A}3Vc*KZ6!}qq$!{tPJ*5y7V2m?VE4U&iL5{N%xgBsZMsF98& zAMCcZ_hEhrd4=6FgndG)ioOH%7)pxe!|kLofmjZ3MsLBmBAB4bV^N?Ym$YAEc%r>9dSP1@>|r4*nFC86(G1XfZYTe(jxhN{yXxsIjW8c7u*Poi)By zKW`(0*ad^U_(4Vy(!p6tW4v&YxP;gd5JbGLY78+&kxQ%Eh>j2iT~X95QBq741k){Agd0foVk=0WJt{~HUE^wGg4dMoO{WO>*s5up= zN#@CkTq&$NrHQ8j9`w}y8B>f?Cx;=ho&y#4-mWFje0p66E+n|S`wq$(2@;sw3QXw% zTm9)XsA2ULJlSJlkkLQ0JXi_t(5gT?6?vy3+gOQF8Vqxz1K*eM@Fq3f%}pKl46V?@ zUOg&%^$4@~>Vdph4|f6eSa0A#U#PgPfyoKkMjReZAOpa#3Ko}Z$F{Z+nMXaO;UHHG z@Y{&e_aG6T?m;4MZy*nskdp(DAQ*k8N*F{Z&0YNwt%3W6n=|#B4FMEZ{BWGGEXpN9 z`3CvXvLj;psJXA>gi85fM+E&%$75y3N9f;gAJ_DRw!*QiN%o!%V@Y83^Dy+$sqK@r zk44>nncAL_%|91-1HCHHHh=p=*?v?uf0Ye5m~VmBM>S9j9McN$1=+64)^XXIk*$+5 z$==|rf!nI%%mTik3$ysRU`Oxt6bhsqmdWQq?G{Mg)f*Elx_oT!6SDpv*3I?5wr;|h zbMmJ&JzY*?QnJ6dw@zshlt6lPwTek90cbSfXvg#)Xg^)X;jx}r8PJO!xM2a+=!Z&3 z&7{WRMq%{UBLzA-T{ytLow~?5yA&@D*dN}ZE z&t;vZlj(lQguambdrX>4{(yA{@v2O_tQW~d$_Df2&-p3YpmE`zw7Xev% z$@(2dFQ8_o$mmrOOfJGJ#f@&?7Hfp7h%%cn9>2%X%V6b_L7`>Nt2c^LtDXB1aBVa< z#=X8p@SDT;p-ljs#}rei0diBRvt~f^ZWy&z@WBX2o$r+_lp>9IuMSecc-`QTDDUY` zq5}JGpe=h9i8vSRX+(6U?c>fgb`d<-@DDjpB9F(Z=%*l)8W}oeIo3;ft`@a*>x`o= z0e4M4SMX%Nhy*SHG>g_0ss|GU%Z(cSyonk8DhmL#=*TaR|yQNBMSjz4DdCrs#L^!3R7GZgo9v5kWB z1PVVeJPwh}sDR?stP61e5R=2|H*m<(uXU)Ux`s4tTsM?-fnP+1lCH8(rkN{S!wa~>FOn|>0AesD!1Hj& zVEXg@Iyooc{NcG}_Ft3~iD>B`n zu$-@%{qHM;{wfpQ1Yc+Frq|?rx9iIg(?%D;qmVZz<&{Tvmdj0RFNkI{ z?)@x|M7rn{2tK;#KZ^|4M;E=w*l^{_RmW;h<6G2CI9CAC(A)g&18O7TC964ndHsUi zEC4hN_aMMqa%icC4T&zb-eSNSb~d`%z#5Vzs-t>vi_^ed znsM73=&`qJJBh)IwJoRaaq6Gq;vPbySVv$G4_o{7sUa$*;X;j0dU~_wdAgB%{F-tD zm#m)qIJ)Ttpx&gof5_s8OuorP9YXaF|Bl6fVDfDy|AeGJd(G{E`O@zkNpZ%AX~n8K z)k^h7?SUedyT2=q-dbwfj{-BdqZjRkGuCV)qPJC*~X5C6CZ?uN06jUh) zKIGm=t=aqsKNWPz96jgvMFz)ims@F{NqfN`F4Y{xf%40bU=3COjDq_wObE~?Y0uW} zI~evd=^|G_72Ok;_Q>UiV?rTp>X>VM{c`%8zma#;O+f?Ci;#ZA*UuSmUoWz`U@Fkd z_`KhIA*^3JxG$nXzd>+MGk1mwY1sRztMa{U)a|pBtBh)FBFG2el;Yp6K62!{a`Jxw D*|u?y literal 0 HcmV?d00001 diff --git a/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/options/__init__.py b/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/options/__init__.py new file mode 100755 index 0000000..e69de29 diff --git a/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/options/__init__.pyc b/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/options/__init__.pyc new file mode 100755 index 0000000000000000000000000000000000000000..ca2575e2ee59b26ab685e2cec5c295951e794318 GIT binary patch literal 175 zcmZSn%*(|kSRI$l00oRd+5w1*S%5?e14FO|NW@PANHCxg#SuU;{m|mnqGJ8*qRf)a z?Be{~#5{eM{PMh<{KS-E{q)S-0^Nd~()7%{Vtwb-oYW*o-^Aig`k0xrSoI1v39M8E(ekl_Ht#VkM~g&~+hlhJP_LlHWHa^HWN5Qtd!Cegg`k0xrSoI1v39L?8o3AjbiSi&=m~3PUi1CZpdH!>k+6AqTL<+ s;!{#{^Y!x!N;30-O5)=)^D;}~yB1=3a$JBi)Y7sYnkT9I#Y51SMR;x0*8AOJ6x zx}emP+`ReJKS6%%U&3q8={fofdFnR{QX@x1~+}9mEFyjiFeDl`c}&_Gpx`mJT!QL7vC7XxNo(J+wo1GTd9`%dJ29%2=dxcV&0b7lB%* zSDBVIEYoQ(@RaBJULrVY9t*$oFGEg2?oZ!0Uw}1kZc4Ch#$n$LnycY<;D;`cn_aIT z*^xi!dO_U0FMP3a_puvq+K-+z>HhHUn!`muY=)Ws%?;S%j9wbiZd!F5THA4Gt+

R%#t&{0#0rGI@~ZcUKy`UZYqmgIqVZR0TJ6_LWSAbpR%Q?)}SR;ouVa+vueM0jUAfND0b6lT6;~P2q zZUXr{hkU`mg#26l_JmcxR|Qq%U+tUx4>@^vzV1MF&&S8%=wwA-b1$LFM=ddTh7=N0>o&kH#Ut}2n4vACJiSu$TDcg47_C@Q2 z?`{ZxrQ^1R^V#*4C|M<#g&n4Pf{C3-NRLvg!M;(9i?+u_%(i<%3Z_C9s}K*d+mC#} zZopYTa=!Mz_JPF?MbF z`tr&$ZYvEohy}B+vrZ`4{q+~gN!xDsM0+y|y+Fkd_hd5LXih-M>1=K0YL&JGXQXCQ zv29IgO-0_$3a&@1$-;KeYxgo`$quZgJrc@(T1BoaH)xB*9zU@F1gak{l_>^|He7}I zIXIcMZOC!B45M(M(wcUTqvSv&=g625L0d%5?TSE17apuzY1wU)XgPrVU>n?qFsJAh@wy`SVugJc$n7^4fgpmyMAMteX_zfJXJ5sS+L6)3~8m^3%$08 zGwIlaEg?6;SUgBhsIc3mts5crf?S|!dhS>kZR;}>&qFH9$-Eat12ytM3^=LTHkC_a z@u+MkXXCc(L#@8sg>3j&^}Lvkc{^Sph&Ne+7XsFXzy60uN2L?A4^PpOXD?7dX6tGS zYxmqBK%mju8Yf;lfO5QAfn#3y6&nNP!JK@kWoGZ0rpJsBO3syGsFI~dqq$1E5lIor z5N?dUpxdll+PB(M$-He#LFPH3bjY68wDy3r?Qzvh+Vp}+IzfmW(F<9WImlIv{KphB zjs0~b3CW~I4Qaw~LfquDGv@k%bihG|Q_j)*CF10hAVjo7Uc@)50Nb-3f zgls28yv#!EhuroMs@Vj*_L;;IVc*}S+zmpf<9ktZ3zFjq#h~K}zRY-tOi+wSx@wFT zqRiDXiUV37%Q|~#$AafQDR90X(iu-Xp|W?K8srM{4^2fp9))ohy$i@@cgG=OER;h| zPYYhqN&fNJ-SPT^J_`o@4Oj>F9@Qn^54VvpsAXCzCZe2W6>cPmp`M5~a&ecn!$7&n zd{hT2atJiIDZe6I75KZ%W$|DG!We>^v=+QUWr}-o$wF8@w5-3%^FSxE=LOo2&T|eA# z{p1{)92i7apdgQGoc3=3wxonj=yne8kpD00Oh7ea9nV*2h9DM?GFIe^Ee2bJOip8l z1NDQw@Y@iQ-w07L`QUb-dF&NgOIsPx0ADN4V@f+@i1<)4o zUo_7EUzntsi&nBU*|HuBf8M4Prr$I!>T-40`W@UrU28ly)Q!9D7_EcCduri}ppAdj(Y;7WU1(;;_iA&62!Bbns6Ni|ck_jd$kj#g~Sr*XaEvzQ&#yPmRB5-{7NXdhI83ISaG% z_Qa2kzM( zqWoI@td3cE0dbN=cujwzQH;lF0jsAKFMdwx(W;Ca#S*Hir{u?&gElFOM8MPH7QXP( zQY0xG%BYp-htd*qu^&rDj8+(@#mH5?wD<}QW9!63qe#7DTBJ5tQt-CQ{Y@ZoYmO?v z7Yqb?HfWF*L#&mSw((^s>eD*S@a^bsiB<)fNKI_3oIRwL2c5JL=4NVcwW?^GrCw(W zHB~se)vm%mlnP(`JXuTH2{)ljyn?|n%VyP_F>BTw?loLhbD>fJJaGT5*d?BGH=GLTK@%S Cq0tWj literal 0 HcmV?d00001 diff --git a/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/options/__pycache__/base_options.cpython-38.pyc b/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/options/__pycache__/base_options.cpython-38.pyc new file mode 100755 index 0000000000000000000000000000000000000000..bb57622a34f3a4eb634fd2390485b5119c0cb332 GIT binary patch literal 4290 zcmai1ZFAek5eC3lQLmO|#dhQL;mPsr z?}$FcxdnfFTOE&anEWNx%u3v zmkzPkSgV_9v9CP8MYHkG(H@t93WMY}EpBq_jnOP*(jaMqmwDxl*(~uYWY*HjFWp#t z7b!0c;+8okkDe|3?&B9PU{J=sd0-3-2SPCxxB&sy+JkyAt-7)kxiS{%%stuZ_e7wc z&{U>n4a;=W3q0kyzLyC86D){@-~P#vZUuhm@_4!9 z^&&g+`yDTcm+uQ-tlxX=#vAsdr_1#J@ZOrkMK4?qGaHuIVUIJMG@_lf>NvEw<4lkt zMGS^nGn0ieyBi~xUpb%xZtZ|4aK*rQWxO)EsVr`hKi?M35mo{Qzvi$iunAtvVUxh7 z_;e1N0XEC$a@Ywz&rkA&w}qn}PVq%m;2-eQe6dsDXZYE-r6byJ!2KaV$3Gg==(pfz z{NtR|^T00fi#hC)#xCcu-vRqQujjB&fL-BBIn35rBZn=+nydWUnC2@$KIMPNab1VT zH*)sf1oBx9`J8_N`M3D(F{}Ql3aZHO?3w&eIeB-%d4b={VP8Vx{hY*AXz~?*kmGuY zm5=z>IqVx?f9Buju*Vu(%VFOEyTYI3u&2Nnf0o0Z1AD<=W*GkqlBcAT=liv!Y}kyf~9yM14!#jfz9WWu&PQQz@+oSIjYD|lvj{(^}{r@^i= zc6I66(#jJ4RvN4y3ua$q?NGA&PhKV`Y`fJJt&J%30u?*llgV^rc??QUWqUJMtF$RN zBQ=wXZEHevBJ#FZ@O!kH%x`tQRyR|Y?7&{yBcbf2Rph&JgO*6_(Gv?mpc>**xnj_0 z-BnnhgOh37h8%~>FbexBt!d{tO7=x^j*J-+v_<6HjtGQw;la9*5b;QGnhj|UrV4p3qYd4nIrz>pTQ}v>p0lS>WkXBmV z&})e}la4dk6mmU`#e-y4g`Ex^-4Ll4sLHFDxIKpNa`eKE>J*b z`)Uenb=@F9pwZqMCtkXUa=coBV_x_b2Lt87oSfA%vw5cJF(ZVMb7dH+WU1uz$q zeBJ{gJ4q2Qu@L7Ww>*SuwgA%}lh`8c`8$-mLFlx7FG_AfavY%;v^~L>7!Q#NiV;ay zjp0U=xjII1N$aawXAhlN@S-aP?$|@R=jkL=HrJ^^t|0%=QpDp?7-!Ktk8F0g9U{g; zIpp-T;05jEUyt2wuh;Lfpx;}Eb#U)dUGn{K3kic-rln#b%2`(7hJqOCiD)4gcUUV7 zl#9$qb)X`LK!cm|JHl0gzr$P>_tzneA-G9v!5dVjxEDvhyMx0IbjY6^0t^5Tx3WB_ z+O}#te%MJ?^UkD7p+pdeNKsdE2=1T*L3w;8UiYCS@<~(Mun8%FQsw)C>*^)c!?fJ- z!*$nB&Y{tPL1YCA^03Bf{{~=7N;rg0`|vLL|Dw(WR3p~*e1(PxV(}yprOc_g$Z59HfM< zlG>i*V#y$CE0h})@JG@h6E@%czrr*h69Epjg`4o;k`(JIE#dH8C8RaQPfo|V*&uHxz4ychYf>Oe- z=;-lPdB4KV19R8v(3+xC9$2f!kLPy_syZm_nY+b7ky{%jd55Uro){DlOw)LMN7V=c zM@da`aGHji8kF{@4~#t%64(#cZh5x?T2ajm%6lfp%AVP_7D31Qe}Yodt7c!#?Vr%n z=k<8-vee}B7M8`SnsbG;^~ z$;C_)O`Z6qF0{it^L4hiMzimY>&8>#nekWc54`S7uKjE-Wx;jc9{aH28x$MEY&$7m zLQ{1qPA=qNn|*~2Ef0aYf|r(t+E%HxrSdD@FTIvjGV!(KVqVyL<=5(GvPiv%*vR6# zreDq|vg5RX-P4K}zo4XORz~e%2{qJH@;ug{If^0?@U*yzce}I{Ny>pTDkQp~w1f=o z#qwifw8A(oMy~3n#aCz(o3mr>A$5#tk(ygcLE9|%Hh{#<8LIbQ&==^`pg~#;u~%B! z!aJd;Pf7}CyfwO;qFF)yQR~_)XAi06K__j5xsjTi%_`busrQ*cB^8ctu&b~S1;Q7< zOwy8e!cEAJS1=f6*{qsVX3d(xfA*a#*Q_dX@~l}cEaK&H&YU3*ARpnEoEXnaEe$W} zC?53cNqaj^T5}v+0DUOqq*IRb8uc}!$SG_o^^s6@7*md3jV0x>q<)IZOH9QG8WpRb oXslk6RQBan8pxSx2{&h}tBLQElvZzNTHGO#G07wYr_8GLKY}{WDF6Tf literal 0 HcmV?d00001 diff --git a/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/options/__pycache__/test_options.cpython-37.pyc b/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/options/__pycache__/test_options.cpython-37.pyc new file mode 100755 index 0000000000000000000000000000000000000000..c056286dd960cec07b0325ff5d31914eb93a7325 GIT binary patch literal 1483 zcmY*ZOOxC*5SAbFV0Jc;oz0SL2vNvss@CJckxIymB3v>lU{aNuqEIPTd#tR-Hj=y# zv$y1${{Tn+lCNB#iUVg(wCveoMlQ?hPu*X)KFNBryE`D*23LRi>3NrsKdf=td>lN& zS5xc+5l%{$&fIaxGa_8!y(7W{=dCw)h5rK?cm8tFZS2%LPq>B`g^_utBZSt?#nB^t z^*(k$=8hm3>j?K9nR~)R4!_=+fi{=Hme4l)#p1|QeD&Yh8S<-hL4LzhF321AjVB!A zn~rebI>HnFdw-30OjmS8Zwu?A=Lcf2h3%luLH zn#a`I6wdgvNI_4=UhSLn0_vWCn3t)k{RGmY+6@8)8g0#lRMlV-uFS9+X?_Blu56ls zf=P8H2)HgFGE8yY!3Bzq?VpF$trn&YZ1cBqR4(}fw5_m6aNn^CtgE_a6qnf%J>lt5 zQ|S=}6WMA`6B#88iabi@-IO=p4;qj*AT>Enb1oiI4W=;~A3G{F&^CfbJW4>&WiBAC z20@VIr)^bdI9n3IrbrVhgU)&TgsXk*FUjk$G zfH6FSWopMm#(pk&+Rij-n&oVCBTT69fD*e0+RD~<8hbkS=EK+c`&<^qp!{8A< znqnu2a8i;vOUEHkiRcOU4G}Iluia%&c;Az0|1SsArcSf-CD-u0G_uHbgwR&&Ve|+e zy^kG`r6UN|JEHf7EM4IukJs!hK$}YfOM2PtI|?OF@X>!`XUMP41^EpnU65D3SFUi3 zXZoV|+7Yhs-g;Mf-web+47acmX1*i*Eo=w##$tC1yN1}i;=L{Gy79!Gm~3JDn0G@Q zY+>(Z)%T#btg(Pk4IN zDm|iLBCF;!kx{~+ETUvNNO_z6c?;4Oq^6*0!No(W!L&h}W8W$bw5y;Ij}j0xD+Hvq z9|TEp$}*mx%RgO0>f0Dn8+Imj5-o$Oa=XGPt+WBfN^X++Fa~ZaTn$^pAlQSCAI$Rd zy#B-_Ky4O{3q`NSuRKE=AdgUlpqDK5BnVI_TjQ*qyz#Xu6fmPIs6Ul4wHC`*fzLGiAx6l~$TS=bE8W*$pZh^Ymp+OL|+y^#ggZM;poA&i| zr0lod_%fHKt8WG+SIE|kxe$!2Rh2>Bo?X-QW87SP*G4UeQZMW=!MnrLkjB3gwTD#y z{usW%Y8@UcWIWad?qv8?ahls>p~IET%AiauRHwrykiyHy&$vDgzWFw^|4$#!nSiVa zi|!^0?FnP+U{;>19h|~mCi)ON;`q)@XMf|j=XAgNV1b3}+i3Rx;^uf!jTp;$28=a6 zV|WIu)V_}y`?=z2H_}eiE@z{g;DibfD6xB>t!*QxjptK$Iev+s&!y1T({^a1+?t~- RM>G1YoAnDz))I|-{{g5IsNetq literal 0 HcmV?d00001 diff --git a/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/options/base_options.py b/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/options/base_options.py new file mode 100755 index 0000000..06fb555 --- /dev/null +++ b/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/options/base_options.py @@ -0,0 +1,89 @@ +### Copyright (C) 2017 NVIDIA Corporation. All rights reserved. +### Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode). +import argparse +import os +from util import util +import torch + +class BaseOptions(): + def __init__(self): + self.parser = argparse.ArgumentParser() + self.initialized = False + + def initialize(self): + # experiment specifics + self.parser.add_argument('--name', type=str, default='label2face_512p', help='name of the experiment. It decides where to store samples and models') + self.parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU') + self.parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here') + self.parser.add_argument('--model', type=str, default='pix2pixHD', help='which model to use') + self.parser.add_argument('--norm', type=str, default='instance', help='instance normalization or batch normalization') + self.parser.add_argument('--use_dropout', action='store_true', help='use dropout for the generator') + self.parser.add_argument('--data_type', default=32, type=int, choices=[8, 16, 32], help="Supported data type i.e. 8, 16, 32 bit") + self.parser.add_argument('--verbose', action='store_true', default=False, help='toggles verbose') + + # input/output sizes + self.parser.add_argument('--batchSize', type=int, default=1, help='input batch size') + self.parser.add_argument('--loadSize', type=int, default=512, help='scale images to this size') + self.parser.add_argument('--fineSize', type=int, default=512, help='then crop to this size') + self.parser.add_argument('--label_nc', type=int, default=19, help='# of input label channels') + self.parser.add_argument('--input_nc', type=int, default=3, help='# of input image channels') + self.parser.add_argument('--output_nc', type=int, default=3, help='# of output image channels') + + # for setting inputs + self.parser.add_argument('--dataroot', type=str, default='../Data_preprocessing/') + self.parser.add_argument('--resize_or_crop', type=str, default='scale_width', help='scaling and cropping of images at load time [resize_and_crop|crop|scale_width|scale_width_and_crop]') + self.parser.add_argument('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly') + self.parser.add_argument('--no_flip', action='store_true', help='if specified, do not flip the images for data argumentation') + self.parser.add_argument('--nThreads', default=2, type=int, help='# threads for loading data') + self.parser.add_argument('--max_dataset_size', type=int, default=float("inf"), help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.') + + # for displays + self.parser.add_argument('--display_winsize', type=int, default=512, help='display window size') + self.parser.add_argument('--tf_log', action='store_true', help='if specified, use tensorboard logging. Requires tensorflow installed') + + # for generator + self.parser.add_argument('--netG', type=str, default='global', help='selects model to use for netG') + self.parser.add_argument('--ngf', type=int, default=64, help='# of gen filters in first conv layer') + self.parser.add_argument('--n_downsample_global', type=int, default=4, help='number of downsampling layers in netG') + self.parser.add_argument('--n_blocks_global', type=int, default=4, help='number of residual blocks in the global generator network') + self.parser.add_argument('--n_blocks_local', type=int, default=3, help='number of residual blocks in the local enhancer network') + self.parser.add_argument('--n_local_enhancers', type=int, default=1, help='number of local enhancers to use') + self.parser.add_argument('--niter_fix_global', type=int, default=0, help='number of epochs that we only train the outmost local enhancer') + + self.initialized = True + + def parse(self, save=True): + if not self.initialized: + self.initialize() + self.opt = self.parser.parse_args() + self.opt.isTrain = self.isTrain # train or test + + str_ids = self.opt.gpu_ids.split(',') + self.opt.gpu_ids = [] + for str_id in str_ids: + id = int(str_id) + if id >= 0: + self.opt.gpu_ids.append(id) + + # set gpu ids + # if len(self.opt.gpu_ids) > 0: + # torch.cuda.set_device(self.opt.gpu_ids[0]) + + args = vars(self.opt) + + print('------------ Options -------------') + for k, v in sorted(args.items()): + print('%s: %s' % (str(k), str(v))) + print('-------------- End ----------------') + + # save to the disk + expr_dir = os.path.join(self.opt.checkpoints_dir, self.opt.name) + util.mkdirs(expr_dir) + if save and not self.opt.continue_train: + file_name = os.path.join(expr_dir, 'opt.txt') + with open(file_name, 'wt') as opt_file: + opt_file.write('------------ Options -------------\n') + for k, v in sorted(args.items()): + opt_file.write('%s: %s\n' % (str(k), str(v))) + opt_file.write('-------------- End ----------------\n') + return self.opt diff --git a/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/options/base_options.pyc b/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/options/base_options.pyc new file mode 100755 index 0000000000000000000000000000000000000000..163999fdbbeef713182af3718cdb6cce9f41dc29 GIT binary patch literal 5455 zcmcgw+j`r^5nhm#Y$;OYL+m(?(rn~7Ruh4=l(X%+cBDi~Q!6&@d~V&O_#k3QE(HQG zb}34&bd~fvZ`9uPG5QjHfaIS6AVQEYdQ*$Sa>PGxXIcO6{OtJl-~ZYX`ClF1 z_wbl+&;_O$8GLjS9QVm=xTY@6l_02R5d)n*zRT z;-Me4HsWy<#(rR0Jv~g^Bpmg0WLoPgR6F+{_-5DLdeoxt&HLM4poVcP&TQ4%f!)0P zrI{SC^qxnec-{;aGh}7qEI7wTnZ-LB1~E>sW~Mev#kF7e!D%+*RVzRvPrVIENA z4G~-w!J8tuB6``bTm(U*MDSL@tWFz})c#Fnxwi{uU?3UsP6e@4LR@3SyA{OsQ;7E} zh#MuuGV}Ot1@V3f@c|=NDhT%!qFF(-7?C;oCg#efTP@A4G5-%M)E^bLf|Wk5P=8XI z`?NCm89U|kLOM|2s&Kwt%)twozJqC&;CB_=-3czpEdBcm;$BIP`b>_cO zq5iVO|0_mpRuEey#Mg}YLk028Da0QuhzBJ^=E>~};@c7;+j9>qh({$vo8@@S2&5;< zZ-`Y&YfljC+lDfh>qh=ian7{|iLz%f7O1X23awp0KlFE0xYqSM%KLbAEn#3^g^Ag~ zB$3LvD{Ws%wV$X|Q$B3UudNJJM+eHtabKlM+E^MJqhtJG5@Oho0y&HW6&gd?$aQdM**zSf#@WR!GQWj9Xc`or(cdFIjStIlo` z>&O}}(5acnXtPxsHJ31wEh~MNW={o@Bs8SA>++l>i4wiPhG%QTT)^A7uRHxrVA2Jv z7siXzA?H@4W$Q;BWn93E7a3uiGQUp;n1rx9z6GNb!z`xjf~OaxaT1R#A0>W=Xe?!A zHl1hFkuq=Lm2}UMYyh%;Pem&AVPZokj_U@#^*wT^mUw5ccKWe~e45TNeGUyDpN~gL z5~mhsAQDL=Qa4poezqc4KUt9yammv20z|?=g-#O^!6Q zjE<6#%@sG8F{F~~(y_)sI*rX6gzoqu79aXOu!WUvUmIDX=;}xjg}Dqcl#XNv_IeSL zH?0?S^hIJzysyzV%_QMT+3EXHMB6WuhNp-EXWuCb7HL9}X$n6XCX}L9k`Ku?}pXs~~y@1xFEq z5_%y8Gu!!Al69p;ILg1|{19=>@Q?@&OB~C6uR#5f3xkRTrHjXIg_sYZsz$tHE3QcE z@4`{}nwqwEpi*+j5EGfhm06K7T$PSBHf=_L*P%?YP&^C|@{JY6UN_Wxih0D{Zyf7(p2Z=?y=br2Mig9EJh4Cyb<{R zzQ?c1dSrf7fwXhG<{$We;P30%GGdN$*hz^tcpyGfcDb>i}X-gBG0zf^AQ9_0P<`X`XO$R z+rm%@W&=G@_uR-TQtthj(m#wKA#OFGiiBpb2{YVC07~0}(B!R#tM7VTs{5PIvPNIpzt@u*IkU3rmJd6=4T-uyYEA+1c&P9PQL?g<7ei93DJ^^qsWIKOGb*%TX-)kP8; z#_+<)WtM3_waMsq4vilKUJ-d5D#oUH$k;YHo3B9FJig_M;RzB(o5~Ho0JZ6`UB>gW zE4VUR*L8JUzv2LwH4JFf9hC&p_sgqnhYtgaAy7{X62YCN_Tf zNF1ROJ}VBv3m`DU-J~XVr$p-I?1;+@jRW93_XPf*qKY7ac zMF%%C@Hs2&MKO3y9G+!a4jqyHi*HyBF}Nh=@G@b5dn1+t(TuRL-i!#aU`_no6GxDJ zRt#`~%h>F%pO(8dkvqqN%?qVxqwG0PbJ#3P_B0UU(VuchHBcUtU$?mwUD4)|+c~9W+!gVK3`KH~sxMZW!w5Ld4yx1(0W~^vSKw! zR1|Qi@8Q=F9>dv!vXT=4lG4z$Unex_i1b41Ccf=+XFw%8)s&@DoeFrKq&r3%bJ;?} zT`KJP=NCFnbu>~aj7dV`7()tPaE#wj6uXgg4Nn?>FT36#SMgPY@=?}jsZd=J4>kc6 zue-bf_PDyoW4TW#%*T06UCM8?%N+I5WtNjWes4EHp literal 0 HcmV?d00001 diff --git a/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/options/test_options.py b/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/options/test_options.py new file mode 100755 index 0000000..27c5e77 --- /dev/null +++ b/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/options/test_options.py @@ -0,0 +1,19 @@ +### Copyright (C) 2017 NVIDIA Corporation. All rights reserved. +### Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode). +from .base_options import BaseOptions + +class TestOptions(BaseOptions): + def initialize(self): + BaseOptions.initialize(self) + self.parser.add_argument('--ntest', type=int, default=float("inf"), help='# of test examples.') + self.parser.add_argument('--results_dir', type=str, default='./results/', help='saves results here.') + self.parser.add_argument('--aspect_ratio', type=float, default=1.0, help='aspect ratio of result images') + self.parser.add_argument('--phase', type=str, default='test', help='train, val, test, etc') + self.parser.add_argument('--which_epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model') + self.parser.add_argument('--how_many', type=int, default=1000, help='how many test images to run') + self.parser.add_argument('--cluster_path', type=str, default='features_clustered_010.npy', help='the path for clustered results of encoded features') + self.parser.add_argument('--use_encoded_image', action='store_true', help='if specified, encode the real image to get the feature map') + self.parser.add_argument("--export_onnx", type=str, help="export ONNX model to a given file") + self.parser.add_argument("--engine", type=str, help="run serialized TRT engine") + self.parser.add_argument("--onnx", type=str, help="run ONNX model via TRT") + self.isTrain = False diff --git a/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/options/test_options.pyc b/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/options/test_options.pyc new file mode 100755 index 0000000000000000000000000000000000000000..55de9f3c4242e75c8f3533f2c5591e1bc96b42f5 GIT binary patch literal 1912 zcmcgt+m75s5N*#SyU8XSvV;V3Bc0@Bgx2H0BLYGa1iUa2n?-^M(dxL{Zzx|S{;WNhbOAPu5lfy4jl~RjD zy-rl(M#?9uNR%6^zm)n@sh?htl)Io_O)df+5=;}G;uTEKE%}#i3{6e4-}e-U{rPbW zevUyqn7rasa43Ak@7^o%l{1FNC5#ss^aplPse?rQ4ieyn$H7P)jF~Ms#tTd^USwua zJ1jP^7_ZY2)(GUoHQj?1onF&DT+uxeUEJhs8zf7i z+>cR*8Es-t(>kKC$T^B~+Iry7HNLRRD#kI^p)N#O`l_WH5IojRq1i_J6T4_DPgB~4 zJWV@Kup;9^M_Z6gvq6%HA=x8OdJyT-cYaFO#L!af^JqGYZkt1b&p%J;J|4YBi-ynb zt#w$~xu+q3wnV4)0e}_VgjwutsCV^|t#*a#uJ$n>SRR)lFO6@Tyo?*bR+fXI&#@Gh z7L|^Tt{Ur}YWS9oz<9MJuX`h%+r0EnFB<172{}#6=GZK3eH#8o_UJCWLZVsHC#|Q> ze!68n#GhA7ioP?gjV0ZIN#Si=B33hKyfaTeelo4wQ~CtzSbEJ^T{N9Ohn%MtdGd7* zPEMaWk^-bDsGC7$gotFo=6uT|myBZ5c@w*(r%$mBh33+PB6x=!ic`&}civWg6*!VP zrwG#FG(_7D$@V90(?!$N^$9(M!+x({&SpRMSz~jn=i$iLx(H|{eodsQujirmbQ>F_ z2{q_~t-`N}Xn$`XL`2KgOH=WEYXGh|J_?q>lgm);VJHbO)FJk+B=pvH@Exy0ZJjf= zn=co>KI{C5mk^49KIkPBJCS^CD|7|lltBC3YW-W2?-2Q_nBb{oqMxzKe!vlw9d
gcMHN$4w zp6+qcI1kb`D%nmZ$)lC;y`=w6_$o)vtueJ-cw;0R#-MeV6}IB&f8uY6J^wFoifM#t jBboIyxf*Y9r_bV)>g_T1Z4;D?eb(pb3r?XBy*~O6P^H(! literal 0 HcmV?d00001 diff --git a/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/options/train_options.py b/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/options/train_options.py new file mode 100755 index 0000000..9976ade --- /dev/null +++ b/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/options/train_options.py @@ -0,0 +1,36 @@ +### Copyright (C) 2017 NVIDIA Corporation. All rights reserved. +### Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode). +from .base_options import BaseOptions + +class TrainOptions(BaseOptions): + def initialize(self): + BaseOptions.initialize(self) + # for displays + self.parser.add_argument('--display_freq', type=int, default=100, help='frequency of showing training results on screen') + self.parser.add_argument('--print_freq', type=int, default=100, help='frequency of showing training results on console') + self.parser.add_argument('--save_latest_freq', type=int, default=1000, help='frequency of saving the latest results') + self.parser.add_argument('--save_epoch_freq', type=int, default=10, help='frequency of saving checkpoints at the end of epochs') + self.parser.add_argument('--no_html', action='store_true', help='do not save intermediate training results to [opt.checkpoints_dir]/[opt.name]/web/') + self.parser.add_argument('--debug', action='store_true', help='only do one epoch and displays at each iteration') + + # for training + self.parser.add_argument('--continue_train', action='store_true', help='continue training: load the latest model') + self.parser.add_argument('--load_pretrain', type=str, default='./checkpoints/label2face_512p', help='load the pretrained model from the specified location') + self.parser.add_argument('--which_epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model') + self.parser.add_argument('--phase', type=str, default='train', help='train, val, test, etc') + self.parser.add_argument('--niter', type=int, default=100, help='# of iter at starting learning rate') + self.parser.add_argument('--niter_decay', type=int, default=100, help='# of iter to linearly decay learning rate to zero') + self.parser.add_argument('--beta1', type=float, default=0.5, help='momentum term of adam') + self.parser.add_argument('--lr', type=float, default=0.00005, help='initial learning rate for adam') + + # for discriminators + self.parser.add_argument('--num_D', type=int, default=2, help='number of discriminators to use') + self.parser.add_argument('--n_layers_D', type=int, default=3, help='only used if which_model_netD==n_layers') + self.parser.add_argument('--ndf', type=int, default=64, help='# of discrim filters in first conv layer') + self.parser.add_argument('--lambda_feat', type=float, default=10.0, help='weight for feature matching loss') + self.parser.add_argument('--no_ganFeat_loss', action='store_true', help='if specified, do *not* use discriminator feature matching loss') + self.parser.add_argument('--no_vgg_loss', action='store_true', help='if specified, do *not* use VGG feature matching loss') + self.parser.add_argument('--no_lsgan', action='store_true', help='do *not* use least square GAN, if false, use vanilla GAN') + self.parser.add_argument('--pool_size', type=int, default=0, help='the size of image buffer that stores previously generated images') + + self.isTrain = True diff --git a/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/util/__init__.py b/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/util/__init__.py new file mode 100755 index 0000000..e69de29 diff --git a/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/util/__init__.pyc b/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/util/__init__.pyc new file mode 100755 index 0000000000000000000000000000000000000000..fbff9c6ee08348ee0015343a680d255f69f0e892 GIT binary patch literal 172 zcmZSn%*(|kSRI$l00oRd+5w1*S%5?e14FO|NW@PANHCxg#i2kk{m|mnqGJ8*qRf)a z?Be{~#5{eM{PMh<{KS-E{q)S-0^Nd~()7%{Vtwb-oYW*o-^Aig`k0xrSoI1v39M8E(ekl_Ht#VkM~g&~+hlhJP_LlH-spDW@3 literal 0 HcmV?d00001 diff --git a/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/util/__pycache__/__init__.cpython-38.pyc b/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/util/__pycache__/__init__.cpython-38.pyc new file mode 100755 index 0000000000000000000000000000000000000000..41a1292e536fe257734fb2ba7820b6d52f0cdc91 GIT binary patch literal 176 zcmWIL<>g`k0xrSoI1v39L?8o3AjbiSi&=m~3PUi1CZpdH!>k+6AqTL<+ q;!{#{^Yu$hGIR9f<1_OzOXB183My}L*yQG?l;)(`fvo!s#0&uArz`aU literal 0 HcmV?d00001 diff --git a/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/util/__pycache__/image_pool.cpython-37.pyc b/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/util/__pycache__/image_pool.cpython-37.pyc new file mode 100755 index 0000000000000000000000000000000000000000..d35fc3b79b28fd8b5e4a4eaa6062918714bae70b GIT binary patch literal 1078 zcmYjQOK;Oa5Z+xs;-pC^y-=ziIB@Zy@gJz5FR5@K0wJhm307{_HE#UMt}SSAPVG0_;#ot>Tec3wUh40;4s@8Q>v2PPqZ@NlyX5cYu72$&#( zCd6-d>Qgwggr$aWP;yL!A{9K#4`dviCgB7%H;aL=2c))v zg#__@Ca7Rn#5YwB5QVSvJei;yF5Hn0fz%UVHLVFJH93bcr>5K%c%~6( zI1>b5*jQ4qa}C`s0Ew%39%?76QXZ#~(sqn*D3@tdg~^$OT9BVaI^S24&9(bxaqSYU8nO0cinBg$F7CLkOgVTx$beE=Auea z?sGn?f~38X_^--MWH>DM7)B!Tf5o{~$Q0wj9QBak6Bm>cuqOrP% V)j&6?IlFBf&#)s3iwvlc*FVNe`NseN literal 0 HcmV?d00001 diff --git a/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/util/__pycache__/image_pool.cpython-38.pyc b/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/util/__pycache__/image_pool.cpython-38.pyc new file mode 100755 index 0000000000000000000000000000000000000000..2590683aa133cdc29268206d978b7a85edb1df96 GIT binary patch literal 1090 zcmY*Y&2G~`5Z+z?#7UDo&9EfHW>5>jNbh(AD@|o{KCcNFko>AqDG(u z5i}uwJ5!&+nk6hXe1noxA`D^P5Mf?3-xTx>ajjc8L&h%E&f7r7!Fdu+(6c!hEDk}` z9;lEYrq2Ww?1uP8iwI~IpxL_nA`PPORh}mkJhmQaqa1^%N1$q26HaP!3F%HvxhHT< zBk*u02tcv1q!8yCx?2DmSMfa5PFAHnP9vr581GRo)20fO3kkfC??gH<{eI`2sZc6! zD&sPqsyq!c?8>3 zm-K{u__{Dkwlr!(&=p%45NBfSl*spJ3-$m>LZ7v?Yhy(zsoC0^4a?5b!P>xCvo?hR zyH%!URT$1giPTACLfb)6gqhH`3^I|Yx>IHGMJ~ZW zN1T<~nk0D^N^Bf$OoGzwNaV3O?hj;GRx*Qjz(&wkGizG--4>6>LK|gTG(FL(E&^GQ zh`ibYQ#}SnpnfKNJ=$M?tWO=fOSjPn_70uj`(NGW^w_nq8L}V^IoCbT(_B;u+I`Mv zRgknh68~E{M1{?=p)eDP|1I{dLgi4$Zal6fcW`$AN^dv(je@GoBN>Q>iY96UtATEw S<{Y$nJi&@6BpFcQReu0A1o&G3 literal 0 HcmV?d00001 diff --git a/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/util/__pycache__/util.cpython-37.pyc b/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/util/__pycache__/util.cpython-37.pyc new file mode 100755 index 0000000000000000000000000000000000000000..6398800be48cf2f84b6b0048377b494a7b7c64cc GIT binary patch literal 3751 zcmZu!&2J<}6|bu9p6MBnyH&lu!GRzlMF=8<>;p(}M4UPEH{`bCpeU`Dm(#P>*16jwLegq(yr5QtV>{&5EqHWk$R0 zV6>x3zuK;j=iIg%I9oO8bcL+Q>H%-p>ATCND{J_gl65(Cz}gMjkkc5a<%~RsaYkN} z=j9wY&dCe%A}F)>nf#2LKVWh`IKOYT=cM&b)>b%kSd%?qwMO=6syj8q+@ut55$XL!YzAw~TkBzL|mokxAIos#^{2qI>G_rH+(9WEk z<@O@Wc`o*?BV2oD`5l(y_?#V9j@V%puX}{`xh44}CdFmoS?#cnZ%~dbn%SC?*hBJd zkseIGoNZfu3@#}012KkH*IG1t;d+&gf>Al!61mGu4O@-ChFC&udU8tsGV+<_+X8P{lT6V@j%<1C>&@fee4f{ zmeX#8q&Tl!rYm7T+ZzPB7ALCjN8yt|Pn8Vi20Z*NQwtF2>4Vj`(?F%G+bYb$?KJ88 z@#?+g{TRo|bhQ`u2QLqzVGkm&-VLH)^Y%CVbo=GkAFR@N|MrF_gMP9)%)-ck8-qPH zjeQ$<(|G{K>)hsZdU~m991i6LbhRKbo9Wpby0nB6!qsYA;!tJWtK`@YE zzqH$5lJ!>Ah^S`3pw0nkJ0&ezeScTGcatbl_*95L*0S4;$lna2vGUv1*fZbiB4*oD z<($#GQF#d#Y0t*k%7*0`P|`U7#$70_!DpeihH$wn)KySBk8BQ2AODa&plMV*s&iE1sJ>Bc!*7-<8&x%`XQ*=v za#1eH%W_#>kyquV-Zktcw&tIjSOaRgksZ+=3s9+?CHQ!WZSUd3AxUvD=it?D5RCGc$JXjAyQ&nZcg2 zd5euIJ0DXSq_QYK+Z8uaBn|$&!I3jTe}V9w?}CrK?Zu747~3k-B?XSbT2_$&FTQ?? z@ZcC%6#L-#K89i)Kz#)OgnttLMS$Y(CE+RIzawy168=W`&j|duBzz5nBHI~&J-US7 z11Nq80Ky*${{>O+mxMnM;nzfX-4N7mB1j^TxX65luuIf-NuZTx`FIcYRU)hr2W4KI z@@FD!5Fj0Zu#KVk2@&W*ApD#N{~`h@0;iDF#Se%umV=fTR1=Du09f(~O`g)}39jBU z9H$7h8#XoEZ_~Q*T_(%bo2NL4TEr)Wg7*90CF_QxcppPCMW9IpbYYUZ2SwSbAfP@^ z*nbe7#lHbcDJXHM3P7u(&STI{SzjCzl-fagMB1zq7e!>NruhcCmcBnwmx!G!&3l?} zY7t7!w{!(r-V5cKRH~;+WgA%(`J#5RM0FnPIu&NmbO)6B(je-V32uRoHNd1g z*Ymkzj_ z17YoMAsd+ndJB1IUm#~>%h@n)W(t7uCgWUC`(;S1A^=^#vzG-AO+So2IE*5%^eXaLqf67J!3)XZ z+w~rroU&Ijd$gyF@01kn7;n;cAKmL&PlJk4mm<&eu}?~^9XZ*un1>V1w1f$G(c{DtaOM6J^w7deJfvt+(EI)3kl+MLLPDQ+BiD@1|!n{gCNNOl%sr u)Rkm&E9hjU!~Y-gw0pC>)f%Ojlo}_~ojDftkO__QC$l z*|gQi*abm;C??SATc->yJg>G^a&S06qiaHyT^zCR2)PPY}3 z;JoUXu7z>FJq&asNmcAe;ln`BR2wP+JbcX5G8lStd*!VxP}$0+3iEI?OJhG-xskq~ z;5wPD48nN$;xHNw!1BuVAPUxB`>vmDzWD9iE7ae7ZOxNGoUV-WFf!osa9hoSZVN42 z0${w!Z9dPJ2|6u4$ItR5VfGwSGFdJ)qvUrnC^(5;Es(OIL}8T2p*R7>ks_@ zP|0e+Hf+hEJo_V9I@G~n0x$_O46_?*H|lms)np1#l~Il&^STSut8WIuP=;}(yT7KZ zow{LD&0&K&2B7VXBx%L|1MOZPBBd#Y*|xc#QvE06b|4R%y&<=;!yljmbESj`G^1Bc55lgi{%j zBn7MU=zC=9(~tPv=j|0t_I>uE89|}h#e%_OTVoFYg{t7A%ZXe0a>chIadd3uqEih3 zZDqOAPP{2Y)e+hub~9~Lca+td*_6G6QyHCSd=VmT33UdYEBxf5O;)5Ww zC5RNkWh%%CLt3PndI39iEqDMA&a@l*n*o^0I@~NA;YK}=&0j;CNUX&TrXoU4DuWlKf9uGCtMVC%Fn-M$chF zeHHMK=E%BnjBB|q zpEE1#<2s^Q4Ux<(ELfx{T=~UQuuAz{QD0_P*=rQj?8(acBP({{Oja%&SpiMizQ)G2 zEu=I`mGb$%xPruK#$PgHL{Z>hpz&5wkhNW0E6+n*e0^(PM_=FI548m|-{)iA0KInK!IikE`c9fse;=W)H3Hf_w$J0eaZMx$J zbZ+vN{o`uw$dNL6iYN{1pE*L#3D)07SI!V<69Sc)q>4gObSep`uh8hfaLn>Q02Nyl zG*k_sQ&Y#$X{X9Hj&VrsAYCGKHfa}mW2d3{8Y-FCAF7kIoh!`;ny+gSO3gQP4e{O! z<&kizXHBx~Q3l4*8#tN013*c0feQqc{onsT`?qlHPu%(K$OIL)-K+Ua>PO; zGbQ;3V%UyAtouI7bVL~{ur~#jUU*FDyyQacMWgSM5SJ@=lyOE`-`C!9CAiB)~%B8C6 zF*Ld}#_}qRc8o1*J4QYHIU+_ri#*Qu#fIyUYIrCdVp@ literal 0 HcmV?d00001 diff --git a/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/util/image_pool.py b/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/util/image_pool.py new file mode 100755 index 0000000..63e1877 --- /dev/null +++ b/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/util/image_pool.py @@ -0,0 +1,31 @@ +import random +import torch +from torch.autograd import Variable +class ImagePool(): + def __init__(self, pool_size): + self.pool_size = pool_size + if self.pool_size > 0: + self.num_imgs = 0 + self.images = [] + + def query(self, images): + if self.pool_size == 0: + return images + return_images = [] + for image in images.data: + image = torch.unsqueeze(image, 0) + if self.num_imgs < self.pool_size: + self.num_imgs = self.num_imgs + 1 + self.images.append(image) + return_images.append(image) + else: + p = random.uniform(0, 1) + if p > 0.5: + random_id = random.randint(0, self.pool_size-1) + tmp = self.images[random_id].clone() + self.images[random_id] = image + return_images.append(tmp) + else: + return_images.append(image) + return_images = Variable(torch.cat(return_images, 0)) + return return_images diff --git a/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/util/image_pool.pyc b/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/util/image_pool.pyc new file mode 100755 index 0000000000000000000000000000000000000000..0efab8651d625c740d8489412e531cf5acde1c2f GIT binary patch literal 1487 zcmcgr&2G~`5T3P@v`Jg&j}R?aIQY_9+$&0HCE!2>j-VnEthVts-NatI-nF15IhE(* z&W!_azyrX{IJ6RXYJ11CKQr^qH?!fdm0oY{%f}(ipAi2~akxDcDSiSnfWfscU?6xs zfGmJKgh2=*4Iynq+JdhDR{-w^EwbxJ9rR&-<8u@VRQ)1~cd9b#B-6f&lb5*~>DNV( zUB{<$&v3X0C^SaqXy|tkkWW%VNcvHL+d~q6#{n5VMllsI1(2u!j5&cx3#x6hrgZ@$ z-jV@W&Z_ZsWY82MN}rLH2ueD0sTs7bb7}IC^PCZZc8tKc@VwL6X+kg}9F1_tsKsxc zu3S8+j5iZk-t9cQ65{X7iOafcwN}v)6`khs>@7h$;HGo}51fzX0P! za)>766D_bmLZGtz$XYnv*sJW=V3YIw$-QCoLjYkjE`YL~EMARwsA>Jh>SvAJOrIVl_SqNTq#US!_M( zIJRTe6XcT7G@+zP*^ST3TWUhWi8NFfOQ^U-yb%xV~E?9C8#Z4Zp{m&bf5S@IJNpwWF(*);QP=3iG3V(+L hwZV?6eKD$3%JpOGTSUwB?KXrxA>@uYTSlf=#UJ89IBfs` literal 0 HcmV?d00001 diff --git a/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/util/util.py b/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/util/util.py new file mode 100755 index 0000000..008fef3 --- /dev/null +++ b/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/util/util.py @@ -0,0 +1,107 @@ +from __future__ import print_function + +print ('?') +import torch +import numpy as np +from PIL import Image +# import numpy as np +import os + +# Converts a Tensor into a Numpy array +# |imtype|: the desired type of the converted numpy array +def tensor2im(image_tensor, imtype=np.uint8, normalize=True): + if isinstance(image_tensor, list): + image_numpy = [] + for i in range(len(image_tensor)): + image_numpy.append(tensor2im(image_tensor[i], imtype, normalize)) + return image_numpy + image_numpy = image_tensor.cpu().float().numpy() + #if normalize: + # image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0 + #else: + # image_numpy = np.transpose(image_numpy, (1, 2, 0)) * 255.0 + image_numpy = (image_numpy + 1) / 2.0 + image_numpy = np.clip(image_numpy, 0, 1) + if image_numpy.shape[2] == 1 or image_numpy.shape[2] > 3: + image_numpy = image_numpy[:,:,0] + + return image_numpy + +# Converts a one-hot tensor into a colorful label map +def tensor2label(label_tensor, n_label, imtype=np.uint8): + if n_label == 0: + return tensor2im(label_tensor, imtype) + label_tensor = label_tensor.cpu().float() + if label_tensor.size()[0] > 1: + label_tensor = label_tensor.max(0, keepdim=True)[1] + label_tensor = Colorize(n_label)(label_tensor) + #label_numpy = np.transpose(label_tensor.numpy(), (1, 2, 0)) + label_numpy = label_tensor.numpy() + label_numpy = label_numpy / 255.0 + + return label_numpy + +def save_image(image_numpy, image_path): + image_pil = Image.fromarray(image_numpy) + image_pil.save(image_path) + +def mkdirs(paths): + if isinstance(paths, list) and not isinstance(paths, str): + for path in paths: + mkdir(path) + else: + mkdir(paths) + +def mkdir(path): + if not os.path.exists(path): + os.makedirs(path) + +############################################################################### +# Code from +# https://github.com/ycszen/pytorch-seg/blob/master/transform.py +# Modified so it complies with the Citscape label map colors +############################################################################### +def uint82bin(n, count=8): + """returns the binary of integer n, count refers to amount of bits""" + return ''.join([str((n >> y) & 1) for y in range(count-1, -1, -1)]) + +def labelcolormap(N): + if N == 35: # cityscape + cmap = np.array([( 0, 0, 0), ( 0, 0, 0), ( 0, 0, 0), ( 0, 0, 0), ( 0, 0, 0), (111, 74, 0), ( 81, 0, 81), + (128, 64,128), (244, 35,232), (250,170,160), (230,150,140), ( 70, 70, 70), (102,102,156), (190,153,153), + (180,165,180), (150,100,100), (150,120, 90), (153,153,153), (153,153,153), (250,170, 30), (220,220, 0), + (107,142, 35), (152,251,152), ( 70,130,180), (220, 20, 60), (255, 0, 0), ( 0, 0,142), ( 0, 0, 70), + ( 0, 60,100), ( 0, 0, 90), ( 0, 0,110), ( 0, 80,100), ( 0, 0,230), (119, 11, 32), ( 0, 0,142)], + dtype=np.uint8) + else: + cmap = np.zeros((N, 3), dtype=np.uint8) + for i in range(N): + r, g, b = 0, 0, 0 + id = i + for j in range(7): + str_id = uint82bin(id) + r = r ^ (np.uint8(str_id[-1]) << (7-j)) + g = g ^ (np.uint8(str_id[-2]) << (7-j)) + b = b ^ (np.uint8(str_id[-3]) << (7-j)) + id = id >> 3 + cmap[i, 0] = r + cmap[i, 1] = g + cmap[i, 2] = b + return cmap + +class Colorize(object): + def __init__(self, n=35): + self.cmap = labelcolormap(n) + self.cmap = torch.from_numpy(self.cmap[:n]) + + def __call__(self, gray_image): + size = gray_image.size() + color_image = torch.ByteTensor(3, size[1], size[2]).fill_(0) + + for label in range(0, len(self.cmap)): + mask = (label == gray_image[0]).cpu() + color_image[0][mask] = self.cmap[label][0] + color_image[1][mask] = self.cmap[label][1] + color_image[2][mask] = self.cmap[label][2] + + return color_image diff --git a/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/util/util.pyc b/gimp-plugins/CelebAMask-HQ/MaskGAN_demo/util/util.pyc new file mode 100755 index 0000000000000000000000000000000000000000..60ddcf1af58eaf5ce7497f1475ba2c2b2e54905a GIT binary patch literal 5383 zcmcgw&u?5;6+ZXPc>E*dAGMR#(>BdCQO!`2x&%rJII)wMrfFJ}Jg2I#>W-f0zH#1o z=FRxMH`K0dQHT*jLWmV=r7qa8VciWN@n66Ku|TR+v0?)&5R~sbcRXWPVd;82-`scZ z&vVXq?>YD7{xLrG_gg=BrzX9>9DbL?Ncvk8EdGr&MOsGIjF`|9>&ZiuSd)`xPS$eT z3gx`44QV+q&4RRsWo@|MZba5bBpi~)sDxu;hvdMMwNpKbEyw}JFNz(O1Ch0Hu_IcT z5Id@cNwMg4N}ElIEvoP_vEvd>OWlZ_pyZ**_ts{(uGPvURv&$azluRAn*el@D9)<& zZd}WvcAQbZxZ53pQOHF0fJPpAlB~+6$bL?Cu1MxdmSYtx zg8$|93$mY=y__`ik`1w9FE6D%BYQAfPVKf`k+;xb6?KgQSKJs@93f&<)WPCUz{+Rv zl#NPbOl2y@7_LdEBYspU5N78kHg-8{=ad#c3AAwJ^)$u^FWqv=m8#cr%0{APVD*BL-^2I8E#yEBS^O{y?Sq+zqhWhRBhO5&v&z^sqjK)*B`;CTx0q;K+L2m zn38$c3y>C5#VZI)?VfP`>q;mRFW35oqTX=X%{S!+72b!;Xzw zjv0S#FIA|gD(lm=K1rcIOKEx%pcU+>;C|8#Gq%3jZnhKD`gBZ%x~nsr!A96TbS9^Y zt19xR*n(|c3w5G;PT^$Srz`65RHr?M`O|5Dn4%ds)8=vWw3+sbX3VRRsnnFe>c%n( zI3OB*Gke_baGSF|Ps&w2WmYQe=d+`dww{1`NfPWj69(T8T?0<+_M0-k6J!sxz1skt zsCnY{pkGt0r>aw4XESW2@*&xO{OjgpF*k))-4QZ*qPTEwrQ!CgCp?#`^ z(iSJJuzvw7xPotw0@Eg<&i#GB1*)#sP{29V&`5$<$)nAaS)5DwN90;we^T}lFx3#) zA&aP8!r}CeC);OvO0DYgkS=1JB+R-=oR+f(VR<8pgJiedu9p$M!_6=$#}~`Bb~nz- zNm$20Ue4O(prryd+K6B(3?UlSTna$;C_@_UDE3Jj|2#mqo8|$r5=#fw=iQUGs&42m z6ngnG^jpGWQhHq+hj_S>I%?Msz$j3s7bbqutEcTmX zza{qDVi&|-6?;wWqSz&|*TvotdsFPP*zbsaMeHrHuZq1b_BFA0#J(=}4Y4a?zbp2x z*n47qu}F3Q`U- zTs5X2I#T_@=c=)Sf?(~7SjF}ynvsF7SQR)4eHJiw?|I%mql2q|QJ=Sek3MAVoAA;~ z#4X}NXPc$h2uw7h_X$gce-i#i_!r?fgkKRpCH#``Gr}u`I^iS29|#{3J|_H{fa5}< z9l{#n8Ny!(TZEqzJ|O&?@FT)$!UEwl0*)Ms?h@W5Fxp0CfNlcIq)HRNh;dmrQ1o9U z($RkiJl-@n)Xn3RhRCUZGO~e9Igy(~t*i60h3=Lf7w6}QoyxS9wicQG-EmSM>8s;CYJIHGn~+~bNw$J@GIzj^$`dTVo!M1ujF7J`Z4O`xNdb{5)kQ zy=Tmf-dkoMKc}AwGlkD-GcPv`kqJ?c_ET!jVa8ejgXaM1S$JPx0jRg?QL|ZfTZkr6H|kx|Z9nFI+HDipB6s`nIiB7KmBLP_V=1rI%Pm~Y zzaR(K0w_=ou%?t(RqmoW?;I|OC4A0$?uP5Q5C^TWT2*zb)mGbfn|Sn>(8<3A7%W&z zCv&ycz56z1Q#uw*F<;?n-dj|?i!_^yg2?SAeMhF=(+j&h54mga-hN$guKt(s;9n*@ zL7=Ms(}ZUUa|CXApC_blRXy(78;!7*>1w_Zz4fxR;EaBaEKQ#TOnGDQSJ6y)#r*Wl Q-?}9~a;rwg3PC literal 0 HcmV?d00001 diff --git a/gimp-plugins/DeblurGANv2/README.md b/gimp-plugins/DeblurGANv2/README.md new file mode 100644 index 0000000..5cb8cac --- /dev/null +++ b/gimp-plugins/DeblurGANv2/README.md @@ -0,0 +1,131 @@ +# DeblurGAN-v2: Deblurring (Orders-of-Magnitude) Faster and Better + +Code for this paper [DeblurGAN-v2: Deblurring (Orders-of-Magnitude) Faster and Better](https://arxiv.org/abs/1908.03826) + +Orest Kupyn, Tetiana Martyniuk, Junru Wu, Zhangyang Wang + +In ICCV 2019 + +## Overview + +We present a new end-to-end generative adversarial network (GAN) for single image motion deblurring, named +DeblurGAN-v2, which considerably boosts state-of-the-art deblurring efficiency, quality, and flexibility. DeblurGAN-v2 +is based on a relativistic conditional GAN with a double-scale discriminator. For the first time, we introduce the +Feature Pyramid Network into deblurring, as a core building block in the generator of DeblurGAN-v2. It can flexibly +work with a wide range of backbones, to navigate the balance between performance and efficiency. The plug-in of +sophisticated backbones (e.g., Inception-ResNet-v2) can lead to solid state-of-the-art deblurring. Meanwhile, +with light-weight backbones (e.g., MobileNet and its variants), DeblurGAN-v2 reaches 10-100 times faster than +the nearest competitors, while maintaining close to state-of-the-art results, implying the option of real-time +video deblurring. We demonstrate that DeblurGAN-v2 obtains very competitive performance on several popular +benchmarks, in terms of deblurring quality (both objective and subjective), as well as efficiency. Besides, +we show the architecture to be effective for general image restoration tasks too. + + + +![](./doc_images/kohler_visual.png) +![](./doc_images/restore_visual.png) +![](./doc_images/gopro_table.png) +![](./doc_images/lai_table.png) + + + +## DeblurGAN-v2 Architecture + +![](./doc_images/pipeline.jpg) + + + + + +## Datasets + +The datasets for training can be downloaded via the links below: +- [DVD](https://drive.google.com/file/d/1bpj9pCcZR_6-AHb5aNnev5lILQbH8GMZ/view) +- [GoPro](https://drive.google.com/file/d/1KStHiZn5TNm2mo3OLZLjnRvd0vVFCI0W/view) +- [NFS](https://drive.google.com/file/d/1Ut7qbQOrsTZCUJA_mJLptRMipD8sJzjy/view) + +## Training + +#### Command + +```python train.py``` + +training script will load config under config/config.yaml + +#### Tensorboard visualization + +![](./doc_images/tensorboard2.png) + +## Testing + +To test on a single image, + +```python predict.py IMAGE_NAME.jpg``` + +By default, the name of the pretrained model used by Predictor is 'best_fpn.h5'. One can change it in the code ('weights_path' argument). It assumes that the fpn_inception backbone is used. If you want to try it with different backbone pretrain, please specify it also under ['model']['g_name'] in config/config.yaml. + +## Pre-trained models + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
DatasetG ModelD ModelLoss TypePSNR/ SSIMLink
GoPro Test DatasetInceptionResNet-v2double_ganragan-ls29.55/ 0.934https://drive.google.com/open?id=1UXcsRVW-6KF23_TNzxw-xC0SzaMfXOaR
MobileNetdouble_ganragan-ls28.17/ 0.925https://drive.google.com/open?id=1JhnT4BBeKBBSLqTo6UsJ13HeBXevarrU
MobileNet-DSCdouble_ganragan-ls28.03/ 0.922
+ +## Parent Repository + +The code was taken from https://github.com/KupynOrest/RestoreGAN . This repository contains flexible pipelines for different Image Restoration tasks. + +## Citation + +If you use this code for your research, please cite our paper. + +``` +​``` +@InProceedings{Kupyn_2019_ICCV, +author = {Orest Kupyn and Tetiana Martyniuk and Junru Wu and Zhangyang Wang}, +title = {DeblurGAN-v2: Deblurring (Orders-of-Magnitude) Faster and Better}, +booktitle = {The IEEE International Conference on Computer Vision (ICCV)}, +month = {Oct}, +year = {2019} +} +​``` +``` + diff --git a/gimp-plugins/DeblurGANv2/adversarial_trainer.py b/gimp-plugins/DeblurGANv2/adversarial_trainer.py new file mode 100644 index 0000000..ccc6fbf --- /dev/null +++ b/gimp-plugins/DeblurGANv2/adversarial_trainer.py @@ -0,0 +1,99 @@ +import torch +import copy + + +class GANFactory: + factories = {} + + def __init__(self): + pass + + def add_factory(gan_id, model_factory): + GANFactory.factories.put[gan_id] = model_factory + + add_factory = staticmethod(add_factory) + + # A Template Method: + + def create_model(gan_id, net_d=None, criterion=None): + if gan_id not in GANFactory.factories: + GANFactory.factories[gan_id] = \ + eval(gan_id + '.Factory()') + return GANFactory.factories[gan_id].create(net_d, criterion) + + create_model = staticmethod(create_model) + + +class GANTrainer(object): + def __init__(self, net_d, criterion): + self.net_d = net_d + self.criterion = criterion + + def loss_d(self, pred, gt): + pass + + def loss_g(self, pred, gt): + pass + + def get_params(self): + pass + + +class NoGAN(GANTrainer): + def __init__(self, net_d, criterion): + GANTrainer.__init__(self, net_d, criterion) + + def loss_d(self, pred, gt): + return [0] + + def loss_g(self, pred, gt): + return 0 + + def get_params(self): + return [torch.nn.Parameter(torch.Tensor(1))] + + class Factory: + @staticmethod + def create(net_d, criterion): return NoGAN(net_d, criterion) + + +class SingleGAN(GANTrainer): + def __init__(self, net_d, criterion): + GANTrainer.__init__(self, net_d, criterion) + self.net_d = self.net_d.cuda() + + def loss_d(self, pred, gt): + return self.criterion(self.net_d, pred, gt) + + def loss_g(self, pred, gt): + return self.criterion.get_g_loss(self.net_d, pred, gt) + + def get_params(self): + return self.net_d.parameters() + + class Factory: + @staticmethod + def create(net_d, criterion): return SingleGAN(net_d, criterion) + + +class DoubleGAN(GANTrainer): + def __init__(self, net_d, criterion): + GANTrainer.__init__(self, net_d, criterion) + self.patch_d = net_d['patch'].cuda() + self.full_d = net_d['full'].cuda() + self.full_criterion = copy.deepcopy(criterion) + + def loss_d(self, pred, gt): + return (self.criterion(self.patch_d, pred, gt) + self.full_criterion(self.full_d, pred, gt)) / 2 + + def loss_g(self, pred, gt): + return (self.criterion.get_g_loss(self.patch_d, pred, gt) + self.full_criterion.get_g_loss(self.full_d, pred, + gt)) / 2 + + def get_params(self): + return list(self.patch_d.parameters()) + list(self.full_d.parameters()) + + class Factory: + @staticmethod + def create(net_d, criterion): return DoubleGAN(net_d, criterion) + diff --git a/gimp-plugins/DeblurGANv2/aug.py b/gimp-plugins/DeblurGANv2/aug.py new file mode 100644 index 0000000..a57cdd0 --- /dev/null +++ b/gimp-plugins/DeblurGANv2/aug.py @@ -0,0 +1,93 @@ +from typing import List + +import albumentations as albu + + +def get_transforms(size, scope = 'geometric', crop='random'): + augs = {'strong': albu.Compose([albu.HorizontalFlip(), + albu.ShiftScaleRotate(shift_limit=0.0, scale_limit=0.2, rotate_limit=20, p=.4), + albu.ElasticTransform(), + albu.OpticalDistortion(), + albu.OneOf([ + albu.CLAHE(clip_limit=2), + albu.IAASharpen(), + albu.IAAEmboss(), + albu.RandomBrightnessContrast(), + albu.RandomGamma() + ], p=0.5), + albu.OneOf([ + albu.RGBShift(), + albu.HueSaturationValue(), + ], p=0.5), + ]), + 'weak': albu.Compose([albu.HorizontalFlip(), + ]), + 'geometric': albu.OneOf([albu.HorizontalFlip(always_apply=True), + albu.ShiftScaleRotate(always_apply=True), + albu.Transpose(always_apply=True), + albu.OpticalDistortion(always_apply=True), + albu.ElasticTransform(always_apply=True), + ]) + } + + aug_fn = augs[scope] + crop_fn = {'random': albu.RandomCrop(size, size, always_apply=True), + 'center': albu.CenterCrop(size, size, always_apply=True)}[crop] + pad = albu.PadIfNeeded(size, size) + + pipeline = albu.Compose([aug_fn, crop_fn, pad], additional_targets={'target': 'image'}) + + def process(a, b): + r = pipeline(image=a, target=b) + return r['image'], r['target'] + + return process + + +def get_normalize(): + normalize = albu.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) + normalize = albu.Compose([normalize], additional_targets={'target': 'image'}) + + def process(a, b): + r = normalize(image=a, target=b) + return r['image'], r['target'] + + return process + + +def _resolve_aug_fn(name): + d = { + 'cutout': albu.Cutout, + 'rgb_shift': albu.RGBShift, + 'hsv_shift': albu.HueSaturationValue, + 'motion_blur': albu.MotionBlur, + 'median_blur': albu.MedianBlur, + 'snow': albu.RandomSnow, + 'shadow': albu.RandomShadow, + 'fog': albu.RandomFog, + 'brightness_contrast': albu.RandomBrightnessContrast, + 'gamma': albu.RandomGamma, + 'sun_flare': albu.RandomSunFlare, + 'sharpen': albu.IAASharpen, + 'jpeg': albu.JpegCompression, + 'gray': albu.ToGray, + # ToDo: pixelize + # ToDo: partial gray + } + return d[name] + + +def get_corrupt_function(config): + augs = [] + for aug_params in config: + name = aug_params.pop('name') + cls = _resolve_aug_fn(name) + prob = aug_params.pop('prob') if 'prob' in aug_params else .5 + augs.append(cls(p=prob, **aug_params)) + + augs = albu.OneOf(augs) + + def process(x): + return augs(image=x)['image'] + + return process diff --git a/gimp-plugins/DeblurGANv2/aug.pyc b/gimp-plugins/DeblurGANv2/aug.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e0b6552684b5359ca02d762232ec42352542856e GIT binary patch literal 3636 zcmcIm%WfP+6usT^aO~KzIoo)$Ee)h)9$2Sbk!s= zvc)0@$O87T;8$4l3GDa>J^;?SHRC`KJ2TT=eQ!N(-Ku-)-0J+XwD8sApFfXf`s?8P z9$tGLMS>@gLZl4jP9OpGP>N9QbR@ykm82(MLxs?nuSM?kB$-p8FUcVl<~U@yHVkX#qV2f1|v$A&De%ha`b?E27JQzdm7)00DZ7x7ZIzhGqE5hrpt&@z-qFa;H^ z1e|8yCwK!t;K3v5vO{;)vo{m^IuL#4C0Rgaz;5iX_46}ipr?WsCmVARZgKmuPb?|%UF?J;WO^Vvh_O8#xH@fr6w03WH;~<~?Dk~0 zFBzO&l^oL^ljIrM!1g>A`Br3%s^jvF$ljdn&H)A|5XgZ$%y4e!V>c@DGItqN{rQI2 z6zR0#ryu$I^_(LJ+S(a6ZONZOg(H-j%y;_joLhhz7kT3cW5tLVUi^MPr^vFdt*cDI z_aHU*)T${Sm>oMZO;hYdG#GK5c8}A_rL9|qZjvN-d0m-e5T&bl6|^U^>jXnk$W zu^qf@Jcif4jN&^zM%?v~;dmtB9t6Z2q2UL+JmwkUz(KTxGCG1{4X7F zn}1nX&J>ps2DgH$A;UamNFxoOR$eNMb$NUnC*6+gwsc3)^Jar8Q(Ocq)GhZ~clc(N z-W;p6`N8?Cmo&!L&YvG*ISs-?_JDe++^Vg0&q9loJub9)w(TnTzX?Mvrc-SB#w(^Q zjpOD;S1zc78sn>zbZFe9HJtn-Q%q7_>FpK*=TUcctpu7kwb|B<`n0K7QI+VvWs+;- zL7FCMvc`$jlFh$NbtyKk8*e=3cPOVp#UVbILBtX+z!Vfo^A+ zn(E2f*V$bXkwu=Ium9<=M$4Rq*pCQne^QB`3P?8vFrYC!pvWmG-h}lhXOydrlpi_k zU^Z)X9GXYYQ$Tv1!w-l#$X*1}{$kf@oJT-{SA)~U1xa?slbdY)2}O?lg@7Z-flo+^ z2hE#`50#MSy1o@i=*^NRF~PCOx^rGS#MG~K$nNuAT40{ny2H%QDlP8k<|Rp{$@mC! zvQC*MxtZ!b>$a*N zs6D%F5{L}gxXv8Ivb9fKqj<(#OeUFf76WQ0)o5IpHdT&*2_-_>z1yTdxn-@{QMWw| zc$7EQ_2>-5=tUGtS~t|u3-ptec0>K#S3ScG>8u_cB~}nGU8Wy=K_NjWSoqKXk_Rmv=Bsq_0|YS4W08k3fpYP)PsF3$7G!wJN^!u{ zy}q>92rhQhrb+>hI~uqSjE6fLP|mwmAm2-XvB4Kh@-a|?Yb?5;0+W90FIPRKKU~6k zxIfUhLG(UK-Ag3!t$hxR*cfwX`EmtBn+4ZRt?Q`X6(NZ)1X?E;FG OaIv%e%KYK^6Mq4by9eO_ literal 0 HcmV?d00001 diff --git a/gimp-plugins/DeblurGANv2/config/config.yaml b/gimp-plugins/DeblurGANv2/config/config.yaml new file mode 100644 index 0000000..3824dbc --- /dev/null +++ b/gimp-plugins/DeblurGANv2/config/config.yaml @@ -0,0 +1,68 @@ +--- +project: deblur_gan +experiment_desc: fpn + +train: + files_a: &FILES_A /datasets/my_dataset/**/*.jpg + files_b: *FILES_A + size: &SIZE 256 + crop: random + preload: &PRELOAD false + preload_size: &PRELOAD_SIZE 0 + bounds: [0, .9] + scope: geometric + corrupt: &CORRUPT + - name: cutout + prob: 0.5 + num_holes: 3 + max_h_size: 25 + max_w_size: 25 + - name: jpeg + quality_lower: 70 + quality_upper: 90 + - name: motion_blur + - name: median_blur + - name: gamma + - name: rgb_shift + - name: hsv_shift + - name: sharpen + +val: + files_a: *FILES_A + files_b: *FILES_A + size: *SIZE + scope: geometric + crop: center + preload: *PRELOAD + preload_size: *PRELOAD_SIZE + bounds: [.9, 1] + corrupt: *CORRUPT + +phase: train +warmup_num: 3 +model: + g_name: fpn_inception + blocks: 9 + d_name: double_gan # may be no_gan, patch_gan, double_gan, multi_scale + d_layers: 3 + content_loss: perceptual + adv_lambda: 0.001 + disc_loss: wgan-gp + learn_residual: True + norm_layer: instance + dropout: True + +num_epochs: 200 +train_batches_per_epoch: 1000 +val_batches_per_epoch: 100 +batch_size: 1 +image_size: [256, 256] + +optimizer: + name: adam + lr: 0.0001 +scheduler: + name: linear + start_epoch: 50 + min_lr: 0.0000001 + diff --git a/gimp-plugins/DeblurGANv2/dataset.py b/gimp-plugins/DeblurGANv2/dataset.py new file mode 100644 index 0000000..b667921 --- /dev/null +++ b/gimp-plugins/DeblurGANv2/dataset.py @@ -0,0 +1,142 @@ +import os +from copy import deepcopy +from functools import partial +from glob import glob +from hashlib import sha1 +from typing import Callable, Iterable, Optional, Tuple + +import cv2 +import numpy as np +from glog import logger +from joblib import Parallel, cpu_count, delayed +from skimage.io import imread +from torch.utils.data import Dataset +from tqdm import tqdm + +import aug + + +def subsample(data: Iterable, bounds: Tuple[float, float], hash_fn: Callable, n_buckets=100, salt='', verbose=True): + data = list(data) + buckets = split_into_buckets(data, n_buckets=n_buckets, salt=salt, hash_fn=hash_fn) + + lower_bound, upper_bound = [x * n_buckets for x in bounds] + msg = f'Subsampling buckets from {lower_bound} to {upper_bound}, total buckets number is {n_buckets}' + if salt: + msg += f'; salt is {salt}' + if verbose: + logger.info(msg) + return np.array([sample for bucket, sample in zip(buckets, data) if lower_bound <= bucket < upper_bound]) + + +def hash_from_paths(x: Tuple[str, str], salt: str = '') -> str: + path_a, path_b = x + names = ''.join(map(os.path.basename, (path_a, path_b))) + return sha1(f'{names}_{salt}'.encode()).hexdigest() + + +def split_into_buckets(data: Iterable, n_buckets: int, hash_fn: Callable, salt=''): + hashes = map(partial(hash_fn, salt=salt), data) + return np.array([int(x, 16) % n_buckets for x in hashes]) + + +def _read_img(x: str): + img = cv2.imread(x) + if img is None: + logger.warning(f'Can not read image {x} with OpenCV, switching to scikit-image') + img = imread(x) + return img + + +class PairedDataset(Dataset): + def __init__(self, + files_a: Tuple[str], + files_b: Tuple[str], + transform_fn: Callable, + normalize_fn: Callable, + corrupt_fn: Optional[Callable] = None, + preload: bool = True, + preload_size: Optional[int] = 0, + verbose=True): + + assert len(files_a) == len(files_b) + + self.preload = preload + self.data_a = files_a + self.data_b = files_b + self.verbose = verbose + self.corrupt_fn = corrupt_fn + self.transform_fn = transform_fn + self.normalize_fn = normalize_fn + logger.info(f'Dataset has been created with {len(self.data_a)} samples') + + if preload: + preload_fn = partial(self._bulk_preload, preload_size=preload_size) + if files_a == files_b: + self.data_a = self.data_b = preload_fn(self.data_a) + else: + self.data_a, self.data_b = map(preload_fn, (self.data_a, self.data_b)) + self.preload = True + + def _bulk_preload(self, data: Iterable[str], preload_size: int): + jobs = [delayed(self._preload)(x, preload_size=preload_size) for x in data] + jobs = tqdm(jobs, desc='preloading images', disable=not self.verbose) + return Parallel(n_jobs=cpu_count(), backend='threading')(jobs) + + @staticmethod + def _preload(x: str, preload_size: int): + img = _read_img(x) + if preload_size: + h, w, *_ = img.shape + h_scale = preload_size / h + w_scale = preload_size / w + scale = max(h_scale, w_scale) + img = cv2.resize(img, fx=scale, fy=scale, dsize=None) + assert min(img.shape[:2]) >= preload_size, f'weird img shape: {img.shape}' + return img + + def _preprocess(self, img, res): + def transpose(x): + return np.transpose(x, (2, 0, 1)) + + return map(transpose, self.normalize_fn(img, res)) + + def __len__(self): + return len(self.data_a) + + def __getitem__(self, idx): + a, b = self.data_a[idx], self.data_b[idx] + if not self.preload: + a, b = map(_read_img, (a, b)) + a, b = self.transform_fn(a, b) + if self.corrupt_fn is not None: + a = self.corrupt_fn(a) + a, b = self._preprocess(a, b) + return {'a': a, 'b': b} + + @staticmethod + def from_config(config): + config = deepcopy(config) + files_a, files_b = map(lambda x: sorted(glob(config[x], recursive=True)), ('files_a', 'files_b')) + transform_fn = aug.get_transforms(size=config['size'], scope=config['scope'], crop=config['crop']) + normalize_fn = aug.get_normalize() + corrupt_fn = aug.get_corrupt_function(config['corrupt']) + + hash_fn = hash_from_paths + # ToDo: add more hash functions + verbose = config.get('verbose', True) + data = subsample(data=zip(files_a, files_b), + bounds=config.get('bounds', (0, 1)), + hash_fn=hash_fn, + verbose=verbose) + + files_a, files_b = map(list, zip(*data)) + + return PairedDataset(files_a=files_a, + files_b=files_b, + preload=config['preload'], + preload_size=config['preload_size'], + corrupt_fn=corrupt_fn, + normalize_fn=normalize_fn, + transform_fn=transform_fn, + verbose=verbose) diff --git a/gimp-plugins/DeblurGANv2/metric_counter.py b/gimp-plugins/DeblurGANv2/metric_counter.py new file mode 100644 index 0000000..f7d189a --- /dev/null +++ b/gimp-plugins/DeblurGANv2/metric_counter.py @@ -0,0 +1,56 @@ +import logging +from collections import defaultdict + +import numpy as np +from tensorboardX import SummaryWriter + +WINDOW_SIZE = 100 + + +class MetricCounter: + def __init__(self, exp_name): + self.writer = SummaryWriter(exp_name) + logging.basicConfig(filename='{}.log'.format(exp_name), level=logging.DEBUG) + self.metrics = defaultdict(list) + self.images = defaultdict(list) + self.best_metric = 0 + + def add_image(self, x: np.ndarray, tag: str): + self.images[tag].append(x) + + def clear(self): + self.metrics = defaultdict(list) + self.images = defaultdict(list) + + def add_losses(self, l_G, l_content, l_D=0): + for name, value in zip(('G_loss', 'G_loss_content', 'G_loss_adv', 'D_loss'), + (l_G, l_content, l_G - l_content, l_D)): + self.metrics[name].append(value) + + def add_metrics(self, psnr, ssim): + for name, value in zip(('PSNR', 'SSIM'), + (psnr, ssim)): + self.metrics[name].append(value) + + def loss_message(self): + metrics = ((k, np.mean(self.metrics[k][-WINDOW_SIZE:])) for k in ('G_loss', 'PSNR', 'SSIM')) + return '; '.join(map(lambda x: f'{x[0]}={x[1]:.4f}', metrics)) + + def write_to_tensorboard(self, epoch_num, validation=False): + scalar_prefix = 'Validation' if validation else 'Train' + for tag in ('G_loss', 'D_loss', 'G_loss_adv', 'G_loss_content', 'SSIM', 'PSNR'): + self.writer.add_scalar(f'{scalar_prefix}_{tag}', np.mean(self.metrics[tag]), global_step=epoch_num) + for tag in self.images: + imgs = self.images[tag] + if imgs: + imgs = np.array(imgs) + self.writer.add_images(tag, imgs[:, :, :, ::-1].astype('float32') / 255, dataformats='NHWC', + global_step=epoch_num) + self.images[tag] = [] + + def update_best_model(self): + cur_metric = np.mean(self.metrics['PSNR']) + if self.best_metric < cur_metric: + self.best_metric = cur_metric + return True + return False diff --git a/gimp-plugins/DeblurGANv2/models/__init__.py b/gimp-plugins/DeblurGANv2/models/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/gimp-plugins/DeblurGANv2/models/__init__.pyc b/gimp-plugins/DeblurGANv2/models/__init__.pyc new file mode 100644 index 0000000000000000000000000000000000000000..083a1b6f503d1b419379ea507be5c1cdc61aa382 GIT binary patch literal 165 zcmZSn%**w>VP0G^0~9aBN{ieb{mP8=bMsSD jbBguj<1_OzOXB183MxxDfJWK?#Y=Ni?LZcn05JmqV8AG} literal 0 HcmV?d00001 diff --git a/gimp-plugins/DeblurGANv2/models/fpn_densenet.py b/gimp-plugins/DeblurGANv2/models/fpn_densenet.py new file mode 100644 index 0000000..2416383 --- /dev/null +++ b/gimp-plugins/DeblurGANv2/models/fpn_densenet.py @@ -0,0 +1,135 @@ +import torch +import torch.nn as nn + +from torchvision.models import resnet50, densenet121, densenet201 + + +class FPNSegHead(nn.Module): + def __init__(self, num_in, num_mid, num_out): + super().__init__() + + self.block0 = nn.Conv2d(num_in, num_mid, kernel_size=3, padding=1, bias=False) + self.block1 = nn.Conv2d(num_mid, num_out, kernel_size=3, padding=1, bias=False) + + def forward(self, x): + x = nn.functional.relu(self.block0(x), inplace=True) + x = nn.functional.relu(self.block1(x), inplace=True) + return x + + +class FPNDense(nn.Module): + + def __init__(self, output_ch=3, num_filters=128, num_filters_fpn=256, pretrained=True): + super().__init__() + + # Feature Pyramid Network (FPN) with four feature maps of resolutions + # 1/4, 1/8, 1/16, 1/32 and `num_filters` filters for all feature maps. + + self.fpn = FPN(num_filters=num_filters_fpn, pretrained=pretrained) + + # The segmentation heads on top of the FPN + + self.head1 = FPNSegHead(num_filters_fpn, num_filters, num_filters) + self.head2 = FPNSegHead(num_filters_fpn, num_filters, num_filters) + self.head3 = FPNSegHead(num_filters_fpn, num_filters, num_filters) + self.head4 = FPNSegHead(num_filters_fpn, num_filters, num_filters) + + self.smooth = nn.Sequential( + nn.Conv2d(4 * num_filters, num_filters, kernel_size=3, padding=1), + nn.BatchNorm2d(num_filters), + nn.ReLU(), + ) + + self.smooth2 = nn.Sequential( + nn.Conv2d(num_filters, num_filters // 2, kernel_size=3, padding=1), + nn.BatchNorm2d(num_filters // 2), + nn.ReLU(), + ) + + self.final = nn.Conv2d(num_filters // 2, output_ch, kernel_size=3, padding=1) + + def forward(self, x): + map0, map1, map2, map3, map4 = self.fpn(x) + + map4 = nn.functional.upsample(self.head4(map4), scale_factor=8, mode="nearest") + map3 = nn.functional.upsample(self.head3(map3), scale_factor=4, mode="nearest") + map2 = nn.functional.upsample(self.head2(map2), scale_factor=2, mode="nearest") + map1 = nn.functional.upsample(self.head1(map1), scale_factor=1, mode="nearest") + + smoothed = self.smooth(torch.cat([map4, map3, map2, map1], dim=1)) + smoothed = nn.functional.upsample(smoothed, scale_factor=2, mode="nearest") + smoothed = self.smooth2(smoothed + map0) + smoothed = nn.functional.upsample(smoothed, scale_factor=2, mode="nearest") + + final = self.final(smoothed) + + nn.Tanh(final) + + +class FPN(nn.Module): + + def __init__(self, num_filters=256, pretrained=True): + """Creates an `FPN` instance for feature extraction. + Args: + num_filters: the number of filters in each output pyramid level + pretrained: use ImageNet pre-trained backbone feature extractor + """ + + super().__init__() + + self.features = densenet121(pretrained=pretrained).features + + self.enc0 = nn.Sequential(self.features.conv0, + self.features.norm0, + self.features.relu0) + self.pool0 = self.features.pool0 + self.enc1 = self.features.denseblock1 # 256 + self.enc2 = self.features.denseblock2 # 512 + self.enc3 = self.features.denseblock3 # 1024 + self.enc4 = self.features.denseblock4 # 2048 + self.norm = self.features.norm5 # 2048 + + self.tr1 = self.features.transition1 # 256 + self.tr2 = self.features.transition2 # 512 + self.tr3 = self.features.transition3 # 1024 + + self.lateral4 = nn.Conv2d(1024, num_filters, kernel_size=1, bias=False) + self.lateral3 = nn.Conv2d(1024, num_filters, kernel_size=1, bias=False) + self.lateral2 = nn.Conv2d(512, num_filters, kernel_size=1, bias=False) + self.lateral1 = nn.Conv2d(256, num_filters, kernel_size=1, bias=False) + self.lateral0 = nn.Conv2d(64, num_filters // 2, kernel_size=1, bias=False) + + def forward(self, x): + # Bottom-up pathway, from ResNet + enc0 = self.enc0(x) + + pooled = self.pool0(enc0) + + enc1 = self.enc1(pooled) # 256 + tr1 = self.tr1(enc1) + + enc2 = self.enc2(tr1) # 512 + tr2 = self.tr2(enc2) + + enc3 = self.enc3(tr2) # 1024 + tr3 = self.tr3(enc3) + + enc4 = self.enc4(tr3) # 2048 + enc4 = self.norm(enc4) + + # Lateral connections + + lateral4 = self.lateral4(enc4) + lateral3 = self.lateral3(enc3) + lateral2 = self.lateral2(enc2) + lateral1 = self.lateral1(enc1) + lateral0 = self.lateral0(enc0) + + # Top-down pathway + + map4 = lateral4 + map3 = lateral3 + nn.functional.upsample(map4, scale_factor=2, mode="nearest") + map2 = lateral2 + nn.functional.upsample(map3, scale_factor=2, mode="nearest") + map1 = lateral1 + nn.functional.upsample(map2, scale_factor=2, mode="nearest") + + return lateral0, map1, map2, map3, map4 diff --git a/gimp-plugins/DeblurGANv2/models/fpn_densenet.pyc b/gimp-plugins/DeblurGANv2/models/fpn_densenet.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a51bfded8de494b2b7efe533e68988cf15f5ac95 GIT binary patch literal 5309 zcmd5=ZExGi5nfU+wiP>0ocQu`d10@^+9H;FU2bH{|nc3y;?C#7nLs$N}*7(Ppzx^>( z(@zDzkI~FuK_dJ)Dp6{G#ye`@s(n|wT$NPRu&VZ}YFJbIH8rfO{kn3ZiqftcEUBKO zqN;kU)QM8R-(OZyZK0z+>sV3I(n81btfRr@_nIp(CH@yaohHpImO*lDQe8aUE-uhc zof-|X)o#5bx3^nG1O3lmboTV&H#&%B`*fgwf@W@lXzUQCDlW{uIl@$wzt?op9acfu zXvkAE(*-GHTLXuia|-OYqK+MPERRQ^74987E~@IptsE=W1#gGB>KMIP@yua$TC&%y zV~vh%=Sb(NPP!)ki;m?0me4f{q9{%eV{QaXD0L7Arpcb724cn|oy$peyKx#9-7eNv zDoqQ{`!q{mwIdn$JV=Zd@bdId^RqcP%9zE z2e<{8aXLzZP|LOWbOiCN-Z%}5I7@>>&MDW)*k5HQjrm9w; z+Q7vz0?vPG0`B&bMJ9T`MJ8fEFHQ=Cxp>hi*F_%0sg7b^rcWa0@5@cl`D2D^^D=vR z>Edk#T>uv@zX*AsvY=BRnBAIj?HRW-<96r4xTpU#)@cz3iBFdn?0gawVZW2*L%^Mw zq`&=6kjxCTtmsRiSk_M7ixJD>H#T0mS0LMHTy(>}|0$U0ZXPb@6)AMt zPteRA5TIY}dUey0uUmKSI;+k`g};~ZE2v201@8IG9!?*ZH~bt|17+@BM z;ra7n)}%68U>FufWMc3kA_av7!o+d#-~?edTNg86#l__?Kel#39fNVzwg2@2#2yz= z6Ig)R*VOScMJHmOw$VV)H(>w(?FC_x2~X@}CNGwcSJ7`W8zSDM9}Yg1Jc~#bRA9w?i%E}5-~Mj{ z?vw+t*)wOjTMob>oH@WN2Q15g6=cr$?YO6+211?h8s_i3l3WLcHeQf=FOj!idMICu zAPu#LqRZ={bDV2We~)A+<>7;dH~oB-ADTxq$@3Nx$s@1mYbp+O?qxl1npiQnr-QKX z*)-&hUgrVoZZFZVbh0qhJoi5G#zuR;90rHFqYDom_orFeI|#y~gDi!e@7kERyE49WZ~UTUQH}SVaUQuo^)8qLPvZ4_9q<{!qBmEs$*BM4IR5PQyz+nM#)$l zSZeUzlBF#{)grg6&h_o7-Yz=V?@aYOQiS;Q899&y9+L-&MboLUv)I#K6k3bIw!HUR z^kxb25zCFQv7A)gYH7{gbl05~XTw>af1$zG<(mb+Qg#pzXTbFm9zi%M%MR2W6y;5s zDb=QSC?)ZuAYd<)sJqc?!3;_%GEjW57fLBjknAZ!S!QR7P)c^5Sdc?lugFvu5x1p`G({-BriyQOTxE#e~*OKj?^ap7Puy_dR|q?LQ-8beo-qt zX7_(y?t?4+l+XTz>uwvz6`oyElqgG4a2EX=pLRYC^ufdS&y62e5;$nP zhmxHKFyI~RD9%By9YJbJZ?s%=2Xz3JCdz%333+z^5inOx($$?@UzV`V?#6hhda# zMHD9|i<^<3wPskG0?38~lWHI;XIrNeB~jk#Pe*6;br%EK4I zwx9MMru`igzus3$9SV~c|Gag~XX*P1P zI%w%VDrLZae*Px|rlg`4)JdHBfgC)b1CxMi5Kc1S1J{ME!m6P-q=6rUNjEUwIY2vh zY}{Nmc@rDe)I~8)y#P!MQ&TXzua)X-m}f(i2)OZ=r~WDQCf2fj*5UeTIMn9;wcs#ex*R)cNV0$BU5ldFk&0)a66v zxrdTn2QX#as-notngp2zH}5N1W4C zuSo2idj5(lh-f0M69Co~Tp-3%KwcoHF24wOHgL0N8>!2x_yJ;T6}DbswVbCCuAl?lD73Wmw-w_G)%gw zG2a>Mju@1WK(-%YnNLwN>VdMJlTnujD*Pv4b=-4)J*D>Fb+r#1CMM|&a~U{T z7$72%WHePOdEmIOlEEsGgIY4gFEJ^SA&`yF)}TcfWU#i148}fAHeJ{sJr81EMP^xo zX*bo*EE&V1)se#@53il6JiF;Grk<=hiJ#t9uQZZ{ci`%aR7-e2lHxg zhE=$Wv-@x@VCuNq!_@(QxF;O5?6?~(`rEd6-7N5t_;mds@23&{q6A~+pfiTrlc+BsySKjTl_*VHI?_;m0SkI4l0-M$r3=lP zrjH6xTw?>`y=LiZ9pEGdl`_e zKR|v8gg4D;wKG#ViJfMNz%IpYz`Tl}J$INAW@8CpEEZWPDHjh@o+t^HO5|iodFn9b z>5||KFU(}`Y(YRKmuck;wD3$4$=I7NY>3-`6&v4-f&+xX)aFM=^ zjs|jFl9pw9m_j1BJ!CYVNCbk*B>}vZ75No{D@g-BEh>Q!GaJhA_2nA zzOu|!wd<9q@*cKTefG@)!wfh@>eIQVp6$4g!V`#{yoO}|piZX7&rRm}iT4Eg=@So^ zbvU8Gc(`DCU33mpTcfBHu!rY$m zgKf`dB;Mdf>f@Eci_LQrkIghl4VH@%6W;cQnejgF`CF!Kat|FJ7CqjkA3WXcC*boi zkNvcoNnTmV7My98nJoGztWmC4qZPzWMqD5<`D^cakq7(LwB_gytU3C-U<4ikT|~P@ zziJae`VX4`GIOckIN*D+$n!PH%BohW1@dUqS@p$#n?Q*8D@YO_8SO?+*I? zxU*i3ULB`e-yE;q@(+#qs2Z~-ThWVlP1yNpQ!>re46wOl?H^av>(XioI;&$OpHw8b zMtQ5f_HzGnq`mfct@vG+j8vO$j8`LQ*hA#Qq4eP`Ow(`5>@n0d^W5?ZaHh>RQD(|I;2<9C5j=hU2CHNbxYqPZ zXBi<9k0nfuksyEncS$bk%Xso>Iwf2}&3>=CXPf{@r#zi;yYHi9*8mC!fgm86v^WTP z0zpRAsi|j+>QOjKt{J)X9fGe3=r!~=0AqLjZ&^io!yClB;7=&U9UtP+=*SQ?M8q3H zV7Tki`-DU#$O(_i4AN$@Xsk&USvH8#DA_TxW>(QCIW=-^R&672$;UZ~O=E34a3N;9 z4a$IaHXE8P8E{#VWHwSu9AOpt>WtdQwYaJF5gMk|z75bwlacSv@g~gt_PE?v%V3He zU(#np0P>WiSzvW%&DQ|0Dlq ztmF|C`S;vvv)+<}SVg0Fez`B*ND?{2TrNVqL80Zw=!IVM5~iPUOatIo^4!gnSDKG; n`T_yh8l8h+Z|YH1IRB}Ab^nx#mhnb9ha*^W&&{2iKX>atf5&w3 literal 0 HcmV?d00001 diff --git a/gimp-plugins/DeblurGANv2/models/fpn_inception_simple.py b/gimp-plugins/DeblurGANv2/models/fpn_inception_simple.py new file mode 100644 index 0000000..b95a4e4 --- /dev/null +++ b/gimp-plugins/DeblurGANv2/models/fpn_inception_simple.py @@ -0,0 +1,160 @@ +import torch +import torch.nn as nn +from pretrainedmodels import inceptionresnetv2 +from torchsummary import summary +import torch.nn.functional as F + +class FPNHead(nn.Module): + def __init__(self, num_in, num_mid, num_out): + super().__init__() + + self.block0 = nn.Conv2d(num_in, num_mid, kernel_size=3, padding=1, bias=False) + self.block1 = nn.Conv2d(num_mid, num_out, kernel_size=3, padding=1, bias=False) + + def forward(self, x): + x = nn.functional.relu(self.block0(x), inplace=True) + x = nn.functional.relu(self.block1(x), inplace=True) + return x + +class ConvBlock(nn.Module): + def __init__(self, num_in, num_out, norm_layer): + super().__init__() + + self.block = nn.Sequential(nn.Conv2d(num_in, num_out, kernel_size=3, padding=1), + norm_layer(num_out), + nn.ReLU(inplace=True)) + + def forward(self, x): + x = self.block(x) + return x + + +class FPNInceptionSimple(nn.Module): + + def __init__(self, norm_layer, output_ch=3, num_filters=128, num_filters_fpn=256): + super().__init__() + + # Feature Pyramid Network (FPN) with four feature maps of resolutions + # 1/4, 1/8, 1/16, 1/32 and `num_filters` filters for all feature maps. + self.fpn = FPN(num_filters=num_filters_fpn, norm_layer=norm_layer) + + # The segmentation heads on top of the FPN + + self.head1 = FPNHead(num_filters_fpn, num_filters, num_filters) + self.head2 = FPNHead(num_filters_fpn, num_filters, num_filters) + self.head3 = FPNHead(num_filters_fpn, num_filters, num_filters) + self.head4 = FPNHead(num_filters_fpn, num_filters, num_filters) + + self.smooth = nn.Sequential( + nn.Conv2d(4 * num_filters, num_filters, kernel_size=3, padding=1), + norm_layer(num_filters), + nn.ReLU(), + ) + + self.smooth2 = nn.Sequential( + nn.Conv2d(num_filters, num_filters // 2, kernel_size=3, padding=1), + norm_layer(num_filters // 2), + nn.ReLU(), + ) + + self.final = nn.Conv2d(num_filters // 2, output_ch, kernel_size=3, padding=1) + + def unfreeze(self): + self.fpn.unfreeze() + + def forward(self, x): + + map0, map1, map2, map3, map4 = self.fpn(x) + + map4 = nn.functional.upsample(self.head4(map4), scale_factor=8, mode="nearest") + map3 = nn.functional.upsample(self.head3(map3), scale_factor=4, mode="nearest") + map2 = nn.functional.upsample(self.head2(map2), scale_factor=2, mode="nearest") + map1 = nn.functional.upsample(self.head1(map1), scale_factor=1, mode="nearest") + + smoothed = self.smooth(torch.cat([map4, map3, map2, map1], dim=1)) + smoothed = nn.functional.upsample(smoothed, scale_factor=2, mode="nearest") + smoothed = self.smooth2(smoothed + map0) + smoothed = nn.functional.upsample(smoothed, scale_factor=2, mode="nearest") + + final = self.final(smoothed) + res = torch.tanh(final) + x + + return torch.clamp(res, min = -1,max = 1) + + +class FPN(nn.Module): + + def __init__(self, norm_layer, num_filters=256): + """Creates an `FPN` instance for feature extraction. + Args: + num_filters: the number of filters in each output pyramid level + pretrained: use ImageNet pre-trained backbone feature extractor + """ + + super().__init__() + self.inception = inceptionresnetv2(num_classes=1000, pretrained='imagenet') + + self.enc0 = self.inception.conv2d_1a + self.enc1 = nn.Sequential( + self.inception.conv2d_2a, + self.inception.conv2d_2b, + self.inception.maxpool_3a, + ) # 64 + self.enc2 = nn.Sequential( + self.inception.conv2d_3b, + self.inception.conv2d_4a, + self.inception.maxpool_5a, + ) # 192 + self.enc3 = nn.Sequential( + self.inception.mixed_5b, + self.inception.repeat, + self.inception.mixed_6a, + ) # 1088 + self.enc4 = nn.Sequential( + self.inception.repeat_1, + self.inception.mixed_7a, + ) #2080 + + self.pad = nn.ReflectionPad2d(1) + self.lateral4 = nn.Conv2d(2080, num_filters, kernel_size=1, bias=False) + self.lateral3 = nn.Conv2d(1088, num_filters, kernel_size=1, bias=False) + self.lateral2 = nn.Conv2d(192, num_filters, kernel_size=1, bias=False) + self.lateral1 = nn.Conv2d(64, num_filters, kernel_size=1, bias=False) + self.lateral0 = nn.Conv2d(32, num_filters // 2, kernel_size=1, bias=False) + + for param in self.inception.parameters(): + param.requires_grad = False + + def unfreeze(self): + for param in self.inception.parameters(): + param.requires_grad = True + + def forward(self, x): + + # Bottom-up pathway, from ResNet + enc0 = self.enc0(x) + + enc1 = self.enc1(enc0) # 256 + + enc2 = self.enc2(enc1) # 512 + + enc3 = self.enc3(enc2) # 1024 + + enc4 = self.enc4(enc3) # 2048 + + # Lateral connections + + lateral4 = self.pad(self.lateral4(enc4)) + lateral3 = self.pad(self.lateral3(enc3)) + lateral2 = self.lateral2(enc2) + lateral1 = self.pad(self.lateral1(enc1)) + lateral0 = self.lateral0(enc0) + + # Top-down pathway + pad = (1, 2, 1, 2) # pad last dim by 1 on each side + pad1 = (0, 1, 0, 1) + map4 = lateral4 + map3 = lateral3 + nn.functional.upsample(map4, scale_factor=2, mode="nearest") + map2 = F.pad(lateral2, pad, "reflect") + nn.functional.upsample(map3, scale_factor=2, mode="nearest") + map1 = lateral1 + nn.functional.upsample(map2, scale_factor=2, mode="nearest") + return F.pad(lateral0, pad1, "reflect"), map1, map2, map3, map4 diff --git a/gimp-plugins/DeblurGANv2/models/fpn_inception_simple.pyc b/gimp-plugins/DeblurGANv2/models/fpn_inception_simple.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6dd026d3f291b99e9821ffefa6d7115e8cd6458a GIT binary patch literal 6926 zcmd5>>v9yw74Df`t?m+tn}jh8PHb;3AS6L9B;_*T!f_>0sFn$_;&?oo>6J#z&Mq^v zM7Xq7REg*CTljw-B>$8Dd4S~mPVdaF#8Q=o{=ydm)zMb=(bGoVhYkuY*FTedN zRD+)y{(g*-e+LrbpQ93`wkzII`>xt{rOQ=GO||Q4yRICH)>JZ~+6}ecuyuSoN%8$D zwLPt(x-#HrRLfD(gnFgaky2l5&#I^~(lJ?e%&BN1Y&u|zv#h6~=@xM(%}kz};`zGmnnYLLYqx{!MM3`Iqs`x#AgWG--`}UqWe|gN zz|s}x;|Vyz)Rf+8y66t;AS}};4^VO+q>ydyJKU^OVE;9B=%_>aJOZupTyPvkT^+f# zL#2H1R?Jn0=*5g<#Ts16R`O$(0upp!mdk3{fckmN=hcal!{^wyAFYjRjDAETM1 zB^_z6jp1YgWZQ96@tt1Lgs2jd!>-ioeDPP;-`719i*#|UW9u=)_#q}>1HA-QIct3pZNudX_qKIRT~g0E720s78-?HxrhA{38lqKx z{wFAN!(t>`V8%e1NUe_n^pWcVyFfXtgfbHX@dliLbA)yr+~f#+HQ_3l)!c-DRnu!V zIlrdy+T&$a&V9=~>zTBOgG6(;Vn5pa?y)}48j&q96|IRSY3fs-VK4QTcCxmg1TRb` zIui1>6-6a+c{z7MFkW}2hj64C<)qJnAqqhSWB{LFi~6$;U0(q^1c|=DD$42j6q@)u zC}MBQS+vm5m(Zk_Nx0AN0|+aamxosZVX|044H50L(lc&}W2_KCTf5H>ehN|rw<_Sg zs!5cq52Bn6*$DLrceF^c*Z3tl1>J-+Iu>eI6!`&KPeKNRA0phr&oUso1VWA0i$>z5 zrUvMcczCk0F-~%FT;r626F{axiu^)GKF?q(p%(c@G>K^IViBAY5QW?v7F13@f9!j^ zG9)9H$9~U_F=H$YX6Lk!V8lN$J2t;)y#O(6gE+%wD^3bTMEx#WV=8H!e~1Z~CILP4 z4@C`CC|v#`CThxvH`qf`vsQ8I6}M4ww>-4}%EWU@5Uj>2BsV~^uX2=4br!W2^U2+t@pVOR+6V)+RBXi#z?0e$LVeHCs@QsC^?m_tV5-X zCq`u(DONn{P`{i|eOL8ss$W-q@PL)1KFoXIcY#F2Dan4S#Pi5;Pl_Q2nkdB(-2__{ z19C@4YY^Il(Ez(jtbSZfGZuzPty|ayn5G2Bj)U?0@B`Jn7y;vq zPKWWawOMrt##NX8+YjdW0fp@f=GEa8tMD6_`qP5%1$Bs<4gSrj2%d)0uDyY57R=@H zFhF+H3c{k3#dM7ls+qRhjMxOYkQ9P)L9|WOiQ_143y#~!%XyixmEfRBKxvwuMJ%1X zHNtKlF!9yzv+su_KOmv~>kmK#>sUv)Cu4;{p+96FZB>?41nHg#g$YJ#(uyp$8DZ_h zgYK=8Su2_KlG!Mk+p-|*Elgx5;GVG8(*p(r^*xlFQCYcHk^IZA`(x*-bI!TyE;{F2 z>%dL~Z$j$(h~RWm9*X4M4R`6vbWO7!qR0;+ejg>LI!$>Ug|WYKBv!j+tgZVEd_805ST|U@%lQhsROUvkH>W6lfNv|QDddZ zBK+&>*|*LU+|0Jb2jqXJ9y1dZCijBW`x=q|YY#VMz&JD>?r~lVoxRL>=Ag*nf^hS^ zIpYm}?q$3A-Kyq!BQg4}SL_)!?3m2!w7fw=g0VdlgnOQi@Lu;t7T{{{CFZ$FMn>vp z2GhlXv^!=z2w!Po?*dP$IUCX`1I ze=QK_X43H1FjE>8SPzcXc4VRL;Go;-B>qPA_1dV^#?EN%c5v*QJJmPqvdeaSU?Ts{ zjwEH7>Ehf*+J9C_Z%C^Z@YhB}eqM>(maI&hR)XghSjD3t!aEQB1Kx{l`bXz4-zKoH zEJ&VC;8%IREn83$jY46eG^C4s|) zqY5E-5EYLFYH#cI2@Q@{-ag>G;DPzBCig64oUEczl0qbltfEnTHBwAgZ6kO|Lz#Wq zSYzA4i!j@5=mE8(*#Kn4z{`vjgVV;a;<{Nynl`2ScwL@WeMF*3)khMCa4^d|;67UL zj4XFlh6%30z0^m1T9B1tTzrW%4_>+Y0Jpf4A0UF^`dLYwd0pacIV3^`UbJP%L&Q6L zxyUuNCgA6Z!cFVec=R!f(!8$g8zeVDB#+E2lm$*B;xtNoQKW6j*R~fm`^;psNgAyF zl;pQ0e<0B$pOZW$`6CG-qW?tl1qmFj(liIX4kC}051Uda(B)l5zj5d{j>0fcAyClCP`31=70e~swE0mm}Le&=C!Eb%pS;or`3a*d*ea~4#`M*jgJ*=2#JV++z zel%UT%HBYucy_r8+)Pu^W5K6{VO+Hj>$CwS_ZX2K{7;FdSrvS0Do40EeAu@3@D)25 zT7NIk!}1WJS5Vi?aV4o8YEihi{Q4*x{K+?BbN`IS5W0omAy Az5oCK literal 0 HcmV?d00001 diff --git a/gimp-plugins/DeblurGANv2/models/fpn_mobilenet.py b/gimp-plugins/DeblurGANv2/models/fpn_mobilenet.py new file mode 100644 index 0000000..ddbf3b3 --- /dev/null +++ b/gimp-plugins/DeblurGANv2/models/fpn_mobilenet.py @@ -0,0 +1,147 @@ +import torch +import torch.nn as nn +from mobilenet_v2 import MobileNetV2 + +class FPNHead(nn.Module): + def __init__(self, num_in, num_mid, num_out): + super().__init__() + + self.block0 = nn.Conv2d(num_in, num_mid, kernel_size=3, padding=1, bias=False) + self.block1 = nn.Conv2d(num_mid, num_out, kernel_size=3, padding=1, bias=False) + + def forward(self, x): + x = nn.functional.relu(self.block0(x), inplace=True) + x = nn.functional.relu(self.block1(x), inplace=True) + return x + + +class FPNMobileNet(nn.Module): + + def __init__(self, norm_layer, output_ch=3, num_filters=64, num_filters_fpn=128, pretrained=True): + super().__init__() + + # Feature Pyramid Network (FPN) with four feature maps of resolutions + # 1/4, 1/8, 1/16, 1/32 and `num_filters` filters for all feature maps. + + self.fpn = FPN(num_filters=num_filters_fpn, norm_layer = norm_layer, pretrained=pretrained) + + # The segmentation heads on top of the FPN + + self.head1 = FPNHead(num_filters_fpn, num_filters, num_filters) + self.head2 = FPNHead(num_filters_fpn, num_filters, num_filters) + self.head3 = FPNHead(num_filters_fpn, num_filters, num_filters) + self.head4 = FPNHead(num_filters_fpn, num_filters, num_filters) + + self.smooth = nn.Sequential( + nn.Conv2d(4 * num_filters, num_filters, kernel_size=3, padding=1), + norm_layer(num_filters), + nn.ReLU(), + ) + + self.smooth2 = nn.Sequential( + nn.Conv2d(num_filters, num_filters // 2, kernel_size=3, padding=1), + norm_layer(num_filters // 2), + nn.ReLU(), + ) + + self.final = nn.Conv2d(num_filters // 2, output_ch, kernel_size=3, padding=1) + + def unfreeze(self): + self.fpn.unfreeze() + + def forward(self, x): + + map0, map1, map2, map3, map4 = self.fpn(x) + + map4 = nn.functional.upsample(self.head4(map4), scale_factor=8, mode="nearest") + map3 = nn.functional.upsample(self.head3(map3), scale_factor=4, mode="nearest") + map2 = nn.functional.upsample(self.head2(map2), scale_factor=2, mode="nearest") + map1 = nn.functional.upsample(self.head1(map1), scale_factor=1, mode="nearest") + + smoothed = self.smooth(torch.cat([map4, map3, map2, map1], dim=1)) + smoothed = nn.functional.upsample(smoothed, scale_factor=2, mode="nearest") + smoothed = self.smooth2(smoothed + map0) + smoothed = nn.functional.upsample(smoothed, scale_factor=2, mode="nearest") + + final = self.final(smoothed) + res = torch.tanh(final) + x + + return torch.clamp(res, min=-1, max=1) + + +class FPN(nn.Module): + + def __init__(self, norm_layer, num_filters=128, pretrained=True): + """Creates an `FPN` instance for feature extraction. + Args: + num_filters: the number of filters in each output pyramid level + pretrained: use ImageNet pre-trained backbone feature extractor + """ + + super().__init__() + net = MobileNetV2(n_class=1000) + + if pretrained: + #Load weights into the project directory + state_dict = torch.load('mobilenetv2.pth.tar') # add map_location='cpu' if no gpu + net.load_state_dict(state_dict) + self.features = net.features + + self.enc0 = nn.Sequential(*self.features[0:2]) + self.enc1 = nn.Sequential(*self.features[2:4]) + self.enc2 = nn.Sequential(*self.features[4:7]) + self.enc3 = nn.Sequential(*self.features[7:11]) + self.enc4 = nn.Sequential(*self.features[11:16]) + + self.td1 = nn.Sequential(nn.Conv2d(num_filters, num_filters, kernel_size=3, padding=1), + norm_layer(num_filters), + nn.ReLU(inplace=True)) + self.td2 = nn.Sequential(nn.Conv2d(num_filters, num_filters, kernel_size=3, padding=1), + norm_layer(num_filters), + nn.ReLU(inplace=True)) + self.td3 = nn.Sequential(nn.Conv2d(num_filters, num_filters, kernel_size=3, padding=1), + norm_layer(num_filters), + nn.ReLU(inplace=True)) + + self.lateral4 = nn.Conv2d(160, num_filters, kernel_size=1, bias=False) + self.lateral3 = nn.Conv2d(64, num_filters, kernel_size=1, bias=False) + self.lateral2 = nn.Conv2d(32, num_filters, kernel_size=1, bias=False) + self.lateral1 = nn.Conv2d(24, num_filters, kernel_size=1, bias=False) + self.lateral0 = nn.Conv2d(16, num_filters // 2, kernel_size=1, bias=False) + + for param in self.features.parameters(): + param.requires_grad = False + + def unfreeze(self): + for param in self.features.parameters(): + param.requires_grad = True + + + def forward(self, x): + + # Bottom-up pathway, from ResNet + enc0 = self.enc0(x) + + enc1 = self.enc1(enc0) # 256 + + enc2 = self.enc2(enc1) # 512 + + enc3 = self.enc3(enc2) # 1024 + + enc4 = self.enc4(enc3) # 2048 + + # Lateral connections + + lateral4 = self.lateral4(enc4) + lateral3 = self.lateral3(enc3) + lateral2 = self.lateral2(enc2) + lateral1 = self.lateral1(enc1) + lateral0 = self.lateral0(enc0) + + # Top-down pathway + map4 = lateral4 + map3 = self.td1(lateral3 + nn.functional.upsample(map4, scale_factor=2, mode="nearest")) + map2 = self.td2(lateral2 + nn.functional.upsample(map3, scale_factor=2, mode="nearest")) + map1 = self.td3(lateral1 + nn.functional.upsample(map2, scale_factor=2, mode="nearest")) + return lateral0, map1, map2, map3, map4 + diff --git a/gimp-plugins/DeblurGANv2/models/fpn_mobilenet.pyc b/gimp-plugins/DeblurGANv2/models/fpn_mobilenet.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3bf97c393e9037bd3f73c4348ad717cca39c4d00 GIT binary patch literal 5963 zcmd5=+j1O95v}gINF!-1*^;m8)wULvoxSO1#{v!lj_h0S#SWot?nABrJkN~ zcaM=BQUnq5BQN{_!H4kyyz>EYPS#9oB;*K)4Jpv5bF!+kv$Co(Pi9T!pUtW4FMs_* zsD^(Pd~e{%Ux7sUcT}R(vyykzuB)E8GUTeHqPkV}tg4)-qKvC{YpUa@sH$Eob)?j9 zpVd`V8yl#X0~0Em7#o-@2PQe?W~+heFhx-@?RA8yD81QoF&tJw_;8|!cyb@45VLn38t)WXPemO#>OfwP zKr37uR#a5gky|-X$_H=7Ty=m^%y_04_CaA)b% zj%CF(ZYvJ*7DtL2NIvMBte6Dn`*9iyZdP4wBqR=T_1S_g0VD zwHDRFd}uRCM+VYC7wW_SvfVf;`QD&tK~xFJH^HrZotrFQ*~{W0-phO4AYFO-GTaHW z?$fNdYr-O5*^ayYPx{GVJ5KYJM`kM-WS`w{zgS=C_9BzyE1iDocjYKkQ!MviYFf`H zCs9t*ecuVWFg~sQ&pXnLc#k)B~c&ujnuH%d(I^s+w&S;TpV z`?cgOIrdnsYotNfz%^*GkByE737WFW`YQU4?L6O8<)(<&@(Alrqh8Qv#B3SHA=>gs zu?Kka6_7G;Z7=~<5O%MDG7@~R5zP{UWNM6NI*o)-yg}|yAR+tsa6`m5^XRgJQ>n6FlaP=^Thj*;kylW>&*8#aE6Xc!`X+g64p-5j&;sjq@wp&q#`DC z;-o;>i%+G!tm`MiOF$8Y`k5)RAWlsb^Pn|@R*pkg$k7X8_O<3fG*4PwvEV42LB4?q znt}peJAm}Hl3OpiJ0*AbI8-;y^MOf=I7l?lS|U`Nuf7&w%e%c^u_IAqS*xrQBeHGC z(<3#r1-4r$pk04Z_~DMe$*x(DvBmqe@-6fo2hh19;sU117eRoCwZ-aHM?P-ddBc zg|bLbM`FB+{V7bf_FMe^T}>Uj>ad~?tLhLuf=-eEGXU0zP^u4MzRbNxj(b`R*#H49 z>4IJ_U_vn@@NsQ*2L4bCaaoF0Pl`*e%7ZNuywgVVKk+$u^({wao;SJJ5d! zlqlzg2Iv`|4*IdQX>|a`RoDO97iRbZg{=imbpTWWq&VvOVMC(-k~(M*IHD;P0f#8< zZnhW)ZNcX}43I85L0I&%xC|*~Zd(+XATxO(p)O%*qnYpBTNwF^}XO68gOUK8OT7>PZN6eiOb5{(qAYmdq0oR1JUO+@JE8oSF-vCkWBI+OcxF0%; z&b+he&N}n11*~fj(AP=kNf$2HYq;{A zHOz~KODdWj+22&roGKQ?er);Gk-+6Q2wWQpTzP}Q^^w5*8w75Q1R5exLv@6GJY~mC zweJnjefJhNQ9b(otmKph<`Mrb_53&PH_-wzCc`rSJ3TNnL1A()NWE`SvV7~|Vgu+2 zjfY~~>tJw@8PDva4w8Gr@{~9HyPs|6_sW*%jg=Gkykf_&W6NY-uj36%Ea>)35bk)k z81ecqvj8_TFEKAnGN$Rc?7HU-a^ro`4YqkT_Av0tupslcf^cuEm%`4IHukbo6Fdr4 zjt>CM^Y|YK+?>EtUe8{vFZYX`*D2Xj|SMg2X_$Hhu=DNu7_^{R;0hu?Q}gKa`kg4wemu~}l+xHCSoKHgXxZ>-uP zIlw6zDWRLtm!WEnkz4%jEQsWOZ_^elQK>zp&J;UvMHUTXJHQ&cPlXqdnwVg|#FKvt zq9&TotXoGqnsa8{+s^RgE}4wlEI(bMTDvb@z( z^(%^3W{$bk3sCcaf8kKBo9cq&-xL` z=OkZ{&`tF(NWLQZISD~Z3QoOB?lH+{AXe!Nn=BRe2Fd@V{>OadM$+~}XkuUYb2fo2ksvh#S~g*&fHxuE(_?C)tmC8p)PZ_Hrh=iHmko1gw0L>HVw literal 0 HcmV?d00001 diff --git a/gimp-plugins/DeblurGANv2/models/fpn_mobilenet.py~HEAD b/gimp-plugins/DeblurGANv2/models/fpn_mobilenet.py~HEAD new file mode 100644 index 0000000..dec6b52 --- /dev/null +++ b/gimp-plugins/DeblurGANv2/models/fpn_mobilenet.py~HEAD @@ -0,0 +1,147 @@ +import torch +import torch.nn as nn +from models.mobilenet_v2 import MobileNetV2 + +class FPNHead(nn.Module): + def __init__(self, num_in, num_mid, num_out): + super().__init__() + + self.block0 = nn.Conv2d(num_in, num_mid, kernel_size=3, padding=1, bias=False) + self.block1 = nn.Conv2d(num_mid, num_out, kernel_size=3, padding=1, bias=False) + + def forward(self, x): + x = nn.functional.relu(self.block0(x), inplace=True) + x = nn.functional.relu(self.block1(x), inplace=True) + return x + + +class FPNMobileNet(nn.Module): + + def __init__(self, norm_layer, output_ch=3, num_filters=128, num_filters_fpn=128, pretrained=True): + super().__init__() + + # Feature Pyramid Network (FPN) with four feature maps of resolutions + # 1/4, 1/8, 1/16, 1/32 and `num_filters` filters for all feature maps. + + self.fpn = FPN(num_filters=num_filters_fpn, pretrained=pretrained) + + # The segmentation heads on top of the FPN + + self.head1 = FPNHead(num_filters_fpn, num_filters, num_filters) + self.head2 = FPNHead(num_filters_fpn, num_filters, num_filters) + self.head3 = FPNHead(num_filters_fpn, num_filters, num_filters) + self.head4 = FPNHead(num_filters_fpn, num_filters, num_filters) + + self.smooth = nn.Sequential( + nn.Conv2d(4 * num_filters, num_filters, kernel_size=3, padding=1), + norm_layer(num_filters), + nn.ReLU(), + ) + + self.smooth2 = nn.Sequential( + nn.Conv2d(num_filters, num_filters // 2, kernel_size=3, padding=1), + norm_layer(num_filters // 2), + nn.ReLU(), + ) + + self.final = nn.Conv2d(num_filters // 2, output_ch, kernel_size=3, padding=1) + + def unfreeze(self): + self.fpn.unfreeze() + + def forward(self, x): + + map0, map1, map2, map3, map4 = self.fpn(x) + + map4 = nn.functional.upsample(self.head4(map4), scale_factor=8, mode="nearest") + map3 = nn.functional.upsample(self.head3(map3), scale_factor=4, mode="nearest") + map2 = nn.functional.upsample(self.head2(map2), scale_factor=2, mode="nearest") + map1 = nn.functional.upsample(self.head1(map1), scale_factor=1, mode="nearest") + + smoothed = self.smooth(torch.cat([map4, map3, map2, map1], dim=1)) + smoothed = nn.functional.upsample(smoothed, scale_factor=2, mode="nearest") + smoothed = self.smooth2(smoothed + map0) + smoothed = nn.functional.upsample(smoothed, scale_factor=2, mode="nearest") + + final = self.final(smoothed) + res = torch.tanh(final) + x + + return torch.clamp(res, min=-1, max=1) + + +class FPN(nn.Module): + + def __init__(self, num_filters=128, pretrained=True): + """Creates an `FPN` instance for feature extraction. + Args: + num_filters: the number of filters in each output pyramid level + pretrained: use ImageNet pre-trained backbone feature extractor + """ + + super().__init__() + net = MobileNetV2(n_class=1000) + + if pretrained: + #Load weights into the project directory + state_dict = torch.load('mobilenetv2.pth.tar') # add map_location='cpu' if no gpu + net.load_state_dict(state_dict) + self.features = net.features + + self.enc0 = nn.Sequential(*self.features[0:2]) + self.enc1 = nn.Sequential(*self.features[2:4]) + self.enc2 = nn.Sequential(*self.features[4:7]) + self.enc3 = nn.Sequential(*self.features[7:11]) + self.enc4 = nn.Sequential(*self.features[11:16]) + + self.td1 = nn.Sequential(nn.Conv2d(num_filters, num_filters, kernel_size=3, padding=1), + norm_layer(num_filters), + nn.ReLU(inplace=True)) + self.td2 = nn.Sequential(nn.Conv2d(num_filters, num_filters, kernel_size=3, padding=1), + norm_layer(num_filters), + nn.ReLU(inplace=True)) + self.td3 = nn.Sequential(nn.Conv2d(num_filters, num_filters, kernel_size=3, padding=1), + norm_layer(num_filters), + nn.ReLU(inplace=True)) + + self.lateral4 = nn.Conv2d(160, num_filters, kernel_size=1, bias=False) + self.lateral3 = nn.Conv2d(64, num_filters, kernel_size=1, bias=False) + self.lateral2 = nn.Conv2d(32, num_filters, kernel_size=1, bias=False) + self.lateral1 = nn.Conv2d(24, num_filters, kernel_size=1, bias=False) + self.lateral0 = nn.Conv2d(16, num_filters // 2, kernel_size=1, bias=False) + + for param in self.features.parameters(): + param.requires_grad = False + + def unfreeze(self): + for param in self.features.parameters(): + param.requires_grad = True + + + def forward(self, x): + + # Bottom-up pathway, from ResNet + enc0 = self.enc0(x) + + enc1 = self.enc1(enc0) # 256 + + enc2 = self.enc2(enc1) # 512 + + enc3 = self.enc3(enc2) # 1024 + + enc4 = self.enc4(enc3) # 2048 + + # Lateral connections + + lateral4 = self.lateral4(enc4) + lateral3 = self.lateral3(enc3) + lateral2 = self.lateral2(enc2) + lateral1 = self.lateral1(enc1) + lateral0 = self.lateral0(enc0) + + # Top-down pathway + map4 = lateral4 + map3 = self.td1(lateral3 + nn.functional.upsample(map4, scale_factor=2, mode="nearest")) + map2 = self.td2(lateral2 + nn.functional.upsample(map3, scale_factor=2, mode="nearest")) + map1 = self.td3(lateral1 + nn.functional.upsample(map2, scale_factor=2, mode="nearest")) + return lateral0, map1, map2, map3, map4 + diff --git a/gimp-plugins/DeblurGANv2/models/losses.py b/gimp-plugins/DeblurGANv2/models/losses.py new file mode 100644 index 0000000..e9193b4 --- /dev/null +++ b/gimp-plugins/DeblurGANv2/models/losses.py @@ -0,0 +1,300 @@ +import torch +import torch.autograd as autograd +import torch.nn as nn +import torchvision.models as models +import torchvision.transforms as transforms +from torch.autograd import Variable + +from util.image_pool import ImagePool + + +############################################################################### +# Functions +############################################################################### + +class ContentLoss(): + def initialize(self, loss): + self.criterion = loss + + def get_loss(self, fakeIm, realIm): + return self.criterion(fakeIm, realIm) + + def __call__(self, fakeIm, realIm): + return self.get_loss(fakeIm, realIm) + + +class PerceptualLoss(): + + def contentFunc(self): + conv_3_3_layer = 14 + cnn = models.vgg19(pretrained=True).features + cnn = cnn.cuda() + model = nn.Sequential() + model = model.cuda() + model = model.eval() + for i, layer in enumerate(list(cnn)): + model.add_module(str(i), layer) + if i == conv_3_3_layer: + break + return model + + def initialize(self, loss): + with torch.no_grad(): + self.criterion = loss + self.contentFunc = self.contentFunc() + self.transform = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) + + def get_loss(self, fakeIm, realIm): + fakeIm = (fakeIm + 1) / 2.0 + realIm = (realIm + 1) / 2.0 + fakeIm[0, :, :, :] = self.transform(fakeIm[0, :, :, :]) + realIm[0, :, :, :] = self.transform(realIm[0, :, :, :]) + f_fake = self.contentFunc.forward(fakeIm) + f_real = self.contentFunc.forward(realIm) + f_real_no_grad = f_real.detach() + loss = self.criterion(f_fake, f_real_no_grad) + return 0.006 * torch.mean(loss) + 0.5 * nn.MSELoss()(fakeIm, realIm) + + def __call__(self, fakeIm, realIm): + return self.get_loss(fakeIm, realIm) + + +class GANLoss(nn.Module): + def __init__(self, use_l1=True, target_real_label=1.0, target_fake_label=0.0, + tensor=torch.FloatTensor): + super(GANLoss, self).__init__() + self.real_label = target_real_label + self.fake_label = target_fake_label + self.real_label_var = None + self.fake_label_var = None + self.Tensor = tensor + if use_l1: + self.loss = nn.L1Loss() + else: + self.loss = nn.BCEWithLogitsLoss() + + def get_target_tensor(self, input, target_is_real): + if target_is_real: + create_label = ((self.real_label_var is None) or + (self.real_label_var.numel() != input.numel())) + if create_label: + real_tensor = self.Tensor(input.size()).fill_(self.real_label) + self.real_label_var = Variable(real_tensor, requires_grad=False) + target_tensor = self.real_label_var + else: + create_label = ((self.fake_label_var is None) or + (self.fake_label_var.numel() != input.numel())) + if create_label: + fake_tensor = self.Tensor(input.size()).fill_(self.fake_label) + self.fake_label_var = Variable(fake_tensor, requires_grad=False) + target_tensor = self.fake_label_var + return target_tensor.cuda() + + def __call__(self, input, target_is_real): + target_tensor = self.get_target_tensor(input, target_is_real) + return self.loss(input, target_tensor) + + +class DiscLoss(nn.Module): + def name(self): + return 'DiscLoss' + + def __init__(self): + super(DiscLoss, self).__init__() + + self.criterionGAN = GANLoss(use_l1=False) + self.fake_AB_pool = ImagePool(50) + + def get_g_loss(self, net, fakeB, realB): + # First, G(A) should fake the discriminator + pred_fake = net.forward(fakeB) + return self.criterionGAN(pred_fake, 1) + + def get_loss(self, net, fakeB, realB): + # Fake + # stop backprop to the generator by detaching fake_B + # Generated Image Disc Output should be close to zero + self.pred_fake = net.forward(fakeB.detach()) + self.loss_D_fake = self.criterionGAN(self.pred_fake, 0) + + # Real + self.pred_real = net.forward(realB) + self.loss_D_real = self.criterionGAN(self.pred_real, 1) + + # Combined loss + self.loss_D = (self.loss_D_fake + self.loss_D_real) * 0.5 + return self.loss_D + + def __call__(self, net, fakeB, realB): + return self.get_loss(net, fakeB, realB) + + +class RelativisticDiscLoss(nn.Module): + def name(self): + return 'RelativisticDiscLoss' + + def __init__(self): + super(RelativisticDiscLoss, self).__init__() + + self.criterionGAN = GANLoss(use_l1=False) + self.fake_pool = ImagePool(50) # create image buffer to store previously generated images + self.real_pool = ImagePool(50) + + def get_g_loss(self, net, fakeB, realB): + # First, G(A) should fake the discriminator + self.pred_fake = net.forward(fakeB) + + # Real + self.pred_real = net.forward(realB) + errG = (self.criterionGAN(self.pred_real - torch.mean(self.fake_pool.query()), 0) + + self.criterionGAN(self.pred_fake - torch.mean(self.real_pool.query()), 1)) / 2 + return errG + + def get_loss(self, net, fakeB, realB): + # Fake + # stop backprop to the generator by detaching fake_B + # Generated Image Disc Output should be close to zero + self.fake_B = fakeB.detach() + self.real_B = realB + self.pred_fake = net.forward(fakeB.detach()) + self.fake_pool.add(self.pred_fake) + + # Real + self.pred_real = net.forward(realB) + self.real_pool.add(self.pred_real) + + # Combined loss + self.loss_D = (self.criterionGAN(self.pred_real - torch.mean(self.fake_pool.query()), 1) + + self.criterionGAN(self.pred_fake - torch.mean(self.real_pool.query()), 0)) / 2 + return self.loss_D + + def __call__(self, net, fakeB, realB): + return self.get_loss(net, fakeB, realB) + + +class RelativisticDiscLossLS(nn.Module): + def name(self): + return 'RelativisticDiscLossLS' + + def __init__(self): + super(RelativisticDiscLossLS, self).__init__() + + self.criterionGAN = GANLoss(use_l1=True) + self.fake_pool = ImagePool(50) # create image buffer to store previously generated images + self.real_pool = ImagePool(50) + + def get_g_loss(self, net, fakeB, realB): + # First, G(A) should fake the discriminator + self.pred_fake = net.forward(fakeB) + + # Real + self.pred_real = net.forward(realB) + errG = (torch.mean((self.pred_real - torch.mean(self.fake_pool.query()) + 1) ** 2) + + torch.mean((self.pred_fake - torch.mean(self.real_pool.query()) - 1) ** 2)) / 2 + return errG + + def get_loss(self, net, fakeB, realB): + # Fake + # stop backprop to the generator by detaching fake_B + # Generated Image Disc Output should be close to zero + self.fake_B = fakeB.detach() + self.real_B = realB + self.pred_fake = net.forward(fakeB.detach()) + self.fake_pool.add(self.pred_fake) + + # Real + self.pred_real = net.forward(realB) + self.real_pool.add(self.pred_real) + + # Combined loss + self.loss_D = (torch.mean((self.pred_real - torch.mean(self.fake_pool.query()) - 1) ** 2) + + torch.mean((self.pred_fake - torch.mean(self.real_pool.query()) + 1) ** 2)) / 2 + return self.loss_D + + def __call__(self, net, fakeB, realB): + return self.get_loss(net, fakeB, realB) + + +class DiscLossLS(DiscLoss): + def name(self): + return 'DiscLossLS' + + def __init__(self): + super(DiscLossLS, self).__init__() + self.criterionGAN = GANLoss(use_l1=True) + + def get_g_loss(self, net, fakeB, realB): + return DiscLoss.get_g_loss(self, net, fakeB) + + def get_loss(self, net, fakeB, realB): + return DiscLoss.get_loss(self, net, fakeB, realB) + + +class DiscLossWGANGP(DiscLossLS): + def name(self): + return 'DiscLossWGAN-GP' + + def __init__(self): + super(DiscLossWGANGP, self).__init__() + self.LAMBDA = 10 + + def get_g_loss(self, net, fakeB, realB): + # First, G(A) should fake the discriminator + self.D_fake = net.forward(fakeB) + return -self.D_fake.mean() + + def calc_gradient_penalty(self, netD, real_data, fake_data): + alpha = torch.rand(1, 1) + alpha = alpha.expand(real_data.size()) + alpha = alpha.cuda() + + interpolates = alpha * real_data + ((1 - alpha) * fake_data) + + interpolates = interpolates.cuda() + interpolates = Variable(interpolates, requires_grad=True) + + disc_interpolates = netD.forward(interpolates) + + gradients = autograd.grad(outputs=disc_interpolates, inputs=interpolates, + grad_outputs=torch.ones(disc_interpolates.size()).cuda(), + create_graph=True, retain_graph=True, only_inputs=True)[0] + + gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean() * self.LAMBDA + return gradient_penalty + + def get_loss(self, net, fakeB, realB): + self.D_fake = net.forward(fakeB.detach()) + self.D_fake = self.D_fake.mean() + + # Real + self.D_real = net.forward(realB) + self.D_real = self.D_real.mean() + # Combined loss + self.loss_D = self.D_fake - self.D_real + gradient_penalty = self.calc_gradient_penalty(net, realB.data, fakeB.data) + return self.loss_D + gradient_penalty + + +def get_loss(model): + if model['content_loss'] == 'perceptual': + content_loss = PerceptualLoss() + content_loss.initialize(nn.MSELoss()) + elif model['content_loss'] == 'l1': + content_loss = ContentLoss() + content_loss.initialize(nn.L1Loss()) + else: + raise ValueError("ContentLoss [%s] not recognized." % model['content_loss']) + + if model['disc_loss'] == 'wgan-gp': + disc_loss = DiscLossWGANGP() + elif model['disc_loss'] == 'lsgan': + disc_loss = DiscLossLS() + elif model['disc_loss'] == 'gan': + disc_loss = DiscLoss() + elif model['disc_loss'] == 'ragan': + disc_loss = RelativisticDiscLoss() + elif model['disc_loss'] == 'ragan-ls': + disc_loss = RelativisticDiscLossLS() + else: + raise ValueError("GAN Loss [%s] not recognized." % model['disc_loss']) + return content_loss, disc_loss diff --git a/gimp-plugins/DeblurGANv2/models/mobilenet_v2.py b/gimp-plugins/DeblurGANv2/models/mobilenet_v2.py new file mode 100755 index 0000000..0e6ec71 --- /dev/null +++ b/gimp-plugins/DeblurGANv2/models/mobilenet_v2.py @@ -0,0 +1,126 @@ +import torch.nn as nn +import math + + +def conv_bn(inp, oup, stride): + return nn.Sequential( + nn.Conv2d(inp, oup, 3, stride, 1, bias=False), + nn.BatchNorm2d(oup), + nn.ReLU6(inplace=True) + ) + + +def conv_1x1_bn(inp, oup): + return nn.Sequential( + nn.Conv2d(inp, oup, 1, 1, 0, bias=False), + nn.BatchNorm2d(oup), + nn.ReLU6(inplace=True) + ) + + +class InvertedResidual(nn.Module): + def __init__(self, inp, oup, stride, expand_ratio): + super(InvertedResidual, self).__init__() + self.stride = stride + assert stride in [1, 2] + + hidden_dim = round(inp * expand_ratio) + self.use_res_connect = self.stride == 1 and inp == oup + + if expand_ratio == 1: + self.conv = nn.Sequential( + # dw + nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False), + nn.BatchNorm2d(hidden_dim), + nn.ReLU6(inplace=True), + # pw-linear + nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False), + nn.BatchNorm2d(oup), + ) + else: + self.conv = nn.Sequential( + # pw + nn.Conv2d(inp, hidden_dim, 1, 1, 0, bias=False), + nn.BatchNorm2d(hidden_dim), + nn.ReLU6(inplace=True), + # dw + nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False), + nn.BatchNorm2d(hidden_dim), + nn.ReLU6(inplace=True), + # pw-linear + nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False), + nn.BatchNorm2d(oup), + ) + + def forward(self, x): + if self.use_res_connect: + return x + self.conv(x) + else: + return self.conv(x) + + +class MobileNetV2(nn.Module): + def __init__(self, n_class=1000, input_size=224, width_mult=1.): + super(MobileNetV2, self).__init__() + block = InvertedResidual + input_channel = 32 + last_channel = 1280 + interverted_residual_setting = [ + # t, c, n, s + [1, 16, 1, 1], + [6, 24, 2, 2], + [6, 32, 3, 2], + [6, 64, 4, 2], + [6, 96, 3, 1], + [6, 160, 3, 2], + [6, 320, 1, 1], + ] + + # building first layer + assert input_size % 32 == 0 + input_channel = int(input_channel * width_mult) + self.last_channel = int(last_channel * width_mult) if width_mult > 1.0 else last_channel + self.features = [conv_bn(3, input_channel, 2)] + # building inverted residual blocks + for t, c, n, s in interverted_residual_setting: + output_channel = int(c * width_mult) + for i in range(n): + if i == 0: + self.features.append(block(input_channel, output_channel, s, expand_ratio=t)) + else: + self.features.append(block(input_channel, output_channel, 1, expand_ratio=t)) + input_channel = output_channel + # building last several layers + self.features.append(conv_1x1_bn(input_channel, self.last_channel)) + # make it nn.Sequential + self.features = nn.Sequential(*self.features) + + # building classifier + self.classifier = nn.Sequential( + nn.Dropout(0.2), + nn.Linear(self.last_channel, n_class), + ) + + self._initialize_weights() + + def forward(self, x): + x = self.features(x) + x = x.mean(3).mean(2) + x = self.classifier(x) + return x + + def _initialize_weights(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels + m.weight.data.normal_(0, math.sqrt(2. / n)) + if m.bias is not None: + m.bias.data.zero_() + elif isinstance(m, nn.BatchNorm2d): + m.weight.data.fill_(1) + m.bias.data.zero_() + elif isinstance(m, nn.Linear): + n = m.weight.size(1) + m.weight.data.normal_(0, 0.01) + m.bias.data.zero_() + diff --git a/gimp-plugins/DeblurGANv2/models/mobilenet_v2.pyc b/gimp-plugins/DeblurGANv2/models/mobilenet_v2.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4abf1d443dce7be8de6fe37cab91598cc7380d25 GIT binary patch literal 4623 zcmcgw&5s;M6@S(9u`}!a_S*3#!3iNo10i0;2S9*gCox1w$tdb2u~-qB-tOAj-tFm` z?W(aiW=2A+k2!II91v#?Nc<06_y;(HIKUB!JMjCxo}C#h5g&)8*H!ageZ5yz@BQ8@ z*Z#h`{Fk5n@`s5`zZ!n;p*Xgr_-7;+IX1FyF4#c!14#pUBC=nTw5CK|(z;k9#|=5e z$OaqFMZUk^l(f0fu{7&g;@W$i7UX3A#MezIo28BN7K&r#RXXcH#v)@ZZe*+qK(w#P zdBd=`&aQK#;~P}QH=%$zuR%rw>vS_Nof$4!M|F_J&M&d480K+eJEVPpswn(2e)jC? z$QC|}bF2h~2F1fWspnGPiF0TD6;N-+J~`?R%Ko$`wC}wCRbBpj<;Zp@5gMut{T+;k zx|s82mf8+_L@O79+I`>I((Rstu*EdZO|xPW?v>>~ zA2&`M_x(Q}H%~f45$;VwvUE96aF{yFh3^_*Ti zdktr`3ihe9@*8ORZt>8T-lm~-SvrdI1a10pYj2{sXHi*rg}Mse1BYg4@bHsxPj{7Y zyWH(M%3CPz=a>!y_ONQWOh7w7$0dzx1|Mi&K$`QL99MOfe-;?TaZBsAj4=_8QVD>@a3HV^f$^U=0*<`G7P=JZ6}qus zrAv{mrt9`-9Ps7oMD4{yYy$46xpfd@iXWCd!d)Db4$-{jcB*mp$&Ew zG&0(xVu*ThuY@nF+g7HnmaaEEAKl>{D`L3O(3asES=x6LWku$rD5TwaHoxKE`#u{K z-!98RsdFHpNIjPrIUAMMMX=BUFz{bEkU|g*xtDLxg_}^|I*R);Dlu10+iV3J=4P;Bn!(EesrBHdxlwDPCVkN|gD#?o zI?*d2=msL?v+4k#VPqN+p4ns11-Ga7N|)_cFtZIZyoRb`sSbQmD@#TfPR#N)uRguICC|Xx};E|#>-Q3)U0XRpVt8R zt_Ga0^&i|x(5@c`C>(wT#0(DU>Ka3y)D#!%Q(UZ4h5b4KlKap8ic8J@2&#vI?{9Y# zu1GIoFwpB}-1-d6M2wv%+5r?HY=YbaxQG@_Q+goKjz+-Ml1y2s)mmWNk}fZ>SG2cw zF&HSgICyowa&Ji{OPZG$xt^{Oe%-GZ@ES;1o8dLoS(lT#lz&hx2S!(Y1;7}L!ZwTp zvIvt)f+VH~`$c<8&OVXxlHC9BXYzx(OjI%-ob|BArH)Py`IDd!#;i0JUIP_JeVyC z8%D$Lg)vP1VR6fV-)|2K4%LCjA$at4Ic^n2RxRk(AiH1#9SL zFQU!_QMJNd2I@@h=mx17XlM7x(vY$rvHBIOd+>7w?wP5D&!D~c$uIc*{;rA$xn%e` zR)i~cgpjhQnVpCBA~~|j2+0}{IuF^P&U}E+9^2@V&3Z@Pb?6_Vf)`Pxh;)ihMuI!? zk;7nRA7!aOiu$A6t5yg3AUQ!`$4Th8i#@!67XQ39ds|kS52KK#J`wU?pA|g?O0RXI zb)mJ>n1+2&@N_}#$mU>mSvbWzx{Knzj*20r$-mmVng3m@UpE`(mbnq^;P)y*%dOxg zvlcubT&p#~Jy)9r7}}2_&No2N{`3U`@0e&jK~w>*fhD14&UOrg$!dEwY)@YT(-#Pr zR6cwWl*Z=1jf?OW+u5nN819foxzq=O1`zD&BHLXzH*ih~!yiys^Hkma4qDE(6f^W$ zcmKn>!Pao6*d`uLNI&7F<;P_462qD7OsPP2wn;W*d!mN|KseSc6oY_I03bjL(#2~y zMXX^s7(gU)(w1^_%5nyCPcf&P=5%#I@R|z#oeHj2UZ38?r>8E?+t7KyBZ$Mi&5P^% zl#U1=bF~)qSQY5;K!J!UeOGZrF=z@VaMMxP^1tvCh-YxTj`2tMxI;gA4 zZ-LeaefS)PghZ%$E);TsxtdB{7SJ#vypEz-Thna4(r)U1W+?FcL75!gE{f`Azey!| plM4BIh3{$}R|ONU^#4&>b^BIjgs;=f9aE}xvr=of+O3^0{R^e7lOg~B literal 0 HcmV?d00001 diff --git a/gimp-plugins/DeblurGANv2/models/models.py b/gimp-plugins/DeblurGANv2/models/models.py new file mode 100755 index 0000000..6f85fc2 --- /dev/null +++ b/gimp-plugins/DeblurGANv2/models/models.py @@ -0,0 +1,35 @@ +import numpy as np +import torch.nn as nn +from skimage.measure import compare_ssim as SSIM + +from util.metrics import PSNR + + +class DeblurModel(nn.Module): + def __init__(self): + super(DeblurModel, self).__init__() + + def get_input(self, data): + img = data['a'] + inputs = img + targets = data['b'] + inputs, targets = inputs.cuda(), targets.cuda() + return inputs, targets + + def tensor2im(self, image_tensor, imtype=np.uint8): + image_numpy = image_tensor[0].cpu().float().numpy() + image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0 + return image_numpy.astype(imtype) + + def get_images_and_metrics(self, inp, output, target): + inp = self.tensor2im(inp) + fake = self.tensor2im(output.data) + real = self.tensor2im(target.data) + psnr = PSNR(fake, real) + ssim = SSIM(fake, real, multichannel=True) + vis_img = np.hstack((inp, fake, real)) + return psnr, ssim, vis_img + + +def get_model(model_config): + return DeblurModel() diff --git a/gimp-plugins/DeblurGANv2/models/models.pyc b/gimp-plugins/DeblurGANv2/models/models.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d38a301ddcc2b1144fced023d35fdf70b326c538 GIT binary patch literal 2209 zcmcIkOKv1Z5RI(rpFK0!e1;h#!~(Hs7Puv^SO78~X@rDZRy8c>1*Mwm>~^*5r^>96 zEVWmjEmz|MOPC*nO+VzC3+4rLX|Tf>kMF8yOk7E@l3GXOq>;72^}1%$`X$PVEn^N2|>)rH12 zpK_}oa&O<-E@2pc0+FUgUHCM82;bRqJ;jQsaKAvC{pM`vW|v*zi%Zv3Sv@#b-$L1e z3nNF@k?bP5j@6D}4`c_fl%gF#QYFHASb%FCA!Q~z2N)um%GVFs<^3v~^16h=GniH8 zGta*i)y9wbuG`)_PsV-LZLD{Xh=(T@xC1=kR#uhUhY+)GMtF`)&d?Yq16F`uLAeDe zAh0#0JCF`Akjz$rLItB3P)9FT19^_({)kXwbx2rm?#g;z;6N#eS3wpwH1RnN%0JDi zBKojF_fH9x;+7+D2DdJo%&S)SReR-mR^4iXzRPOYHqI)*ne$hz)mjcTFzuqsHa7LP zc1;&(sJKfzDXi(Kk4;4g;H0QNL`;r3iuu%hX7anby{7@K#&!b*AOm1tYxmJa0|pOR z2pfQT@C*g8piXx~Qgb~bg26<+qa1G{b3JCKZbv*eEUjZbe`*T)D@JBO9y(yU&kmGxbbyO&sT??H$; zFdyK1V4jNd`L*OcRwG= zD&uq%BeNIpjSq(RBcE*Q5YXwQ^Y3oNb4dq#n0D+b9lmMa)a&9=nPBmd3BB)g7`MZH z${>= 0) + super(ResnetGenerator, self).__init__() + self.input_nc = input_nc + self.output_nc = output_nc + self.ngf = ngf + self.use_parallel = use_parallel + self.learn_residual = learn_residual + if type(norm_layer) == functools.partial: + use_bias = norm_layer.func == nn.InstanceNorm2d + else: + use_bias = norm_layer == nn.InstanceNorm2d + + model = [nn.ReflectionPad2d(3), + nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, + bias=use_bias), + norm_layer(ngf), + nn.ReLU(True)] + + n_downsampling = 2 + for i in range(n_downsampling): + mult = 2**i + model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, + stride=2, padding=1, bias=use_bias), + norm_layer(ngf * mult * 2), + nn.ReLU(True)] + + mult = 2**n_downsampling + for i in range(n_blocks): + model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)] + + for i in range(n_downsampling): + mult = 2**(n_downsampling - i) + model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2), + kernel_size=3, stride=2, + padding=1, output_padding=1, + bias=use_bias), + norm_layer(int(ngf * mult / 2)), + nn.ReLU(True)] + model += [nn.ReflectionPad2d(3)] + model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)] + model += [nn.Tanh()] + + self.model = nn.Sequential(*model) + + def forward(self, input): + output = self.model(input) + if self.learn_residual: + output = input + output + output = torch.clamp(output,min = -1,max = 1) + return output + + +# Define a resnet block +class ResnetBlock(nn.Module): + def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias): + super(ResnetBlock, self).__init__() + self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias) + + def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias): + conv_block = [] + p = 0 + if padding_type == 'reflect': + conv_block += [nn.ReflectionPad2d(1)] + elif padding_type == 'replicate': + conv_block += [nn.ReplicationPad2d(1)] + elif padding_type == 'zero': + p = 1 + else: + raise NotImplementedError('padding [%s] is not implemented' % padding_type) + + conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), + norm_layer(dim), + nn.ReLU(True)] + if use_dropout: + conv_block += [nn.Dropout(0.5)] + + p = 0 + if padding_type == 'reflect': + conv_block += [nn.ReflectionPad2d(1)] + elif padding_type == 'replicate': + conv_block += [nn.ReplicationPad2d(1)] + elif padding_type == 'zero': + p = 1 + else: + raise NotImplementedError('padding [%s] is not implemented' % padding_type) + conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), + norm_layer(dim)] + + return nn.Sequential(*conv_block) + + def forward(self, x): + out = x + self.conv_block(x) + return out + + +class DicsriminatorTail(nn.Module): + def __init__(self, nf_mult, n_layers, ndf=64, norm_layer=nn.BatchNorm2d, use_parallel=True): + super(DicsriminatorTail, self).__init__() + self.use_parallel = use_parallel + if type(norm_layer) == functools.partial: + use_bias = norm_layer.func == nn.InstanceNorm2d + else: + use_bias = norm_layer == nn.InstanceNorm2d + + kw = 4 + padw = int(np.ceil((kw-1)/2)) + + nf_mult_prev = nf_mult + nf_mult = min(2**n_layers, 8) + sequence = [ + nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, + kernel_size=kw, stride=1, padding=padw, bias=use_bias), + norm_layer(ndf * nf_mult), + nn.LeakyReLU(0.2, True) + ] + + sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)] + + self.model = nn.Sequential(*sequence) + + def forward(self, input): + return self.model(input) + + +class MultiScaleDiscriminator(nn.Module): + def __init__(self, input_nc=3, ndf=64, norm_layer=nn.BatchNorm2d, use_parallel=True): + super(MultiScaleDiscriminator, self).__init__() + self.use_parallel = use_parallel + if type(norm_layer) == functools.partial: + use_bias = norm_layer.func == nn.InstanceNorm2d + else: + use_bias = norm_layer == nn.InstanceNorm2d + + kw = 4 + padw = int(np.ceil((kw-1)/2)) + sequence = [ + nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), + nn.LeakyReLU(0.2, True) + ] + + nf_mult = 1 + for n in range(1, 3): + nf_mult_prev = nf_mult + nf_mult = min(2**n, 8) + sequence += [ + nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, + kernel_size=kw, stride=2, padding=padw, bias=use_bias), + norm_layer(ndf * nf_mult), + nn.LeakyReLU(0.2, True) + ] + + self.scale_one = nn.Sequential(*sequence) + self.first_tail = DicsriminatorTail(nf_mult=nf_mult, n_layers=3) + nf_mult_prev = 4 + nf_mult = 8 + + self.scale_two = nn.Sequential( + nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, + kernel_size=kw, stride=2, padding=padw, bias=use_bias), + norm_layer(ndf * nf_mult), + nn.LeakyReLU(0.2, True)) + nf_mult_prev = nf_mult + self.second_tail = DicsriminatorTail(nf_mult=nf_mult, n_layers=4) + self.scale_three = nn.Sequential( + nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias), + norm_layer(ndf * nf_mult), + nn.LeakyReLU(0.2, True)) + self.third_tail = DicsriminatorTail(nf_mult=nf_mult, n_layers=5) + + def forward(self, input): + x = self.scale_one(input) + x_1 = self.first_tail(x) + x = self.scale_two(x) + x_2 = self.second_tail(x) + x = self.scale_three(x) + x = self.third_tail(x) + return [x_1, x_2, x] + + +# Defines the PatchGAN discriminator with the specified arguments. +class NLayerDiscriminator(nn.Module): + def __init__(self, input_nc=3, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, use_sigmoid=False, use_parallel=True): + super(NLayerDiscriminator, self).__init__() + self.use_parallel = use_parallel + if type(norm_layer) == functools.partial: + use_bias = norm_layer.func == nn.InstanceNorm2d + else: + use_bias = norm_layer == nn.InstanceNorm2d + + kw = 4 + padw = int(np.ceil((kw-1)/2)) + sequence = [ + nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), + nn.LeakyReLU(0.2, True) + ] + + nf_mult = 1 + for n in range(1, n_layers): + nf_mult_prev = nf_mult + nf_mult = min(2**n, 8) + sequence += [ + nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, + kernel_size=kw, stride=2, padding=padw, bias=use_bias), + norm_layer(ndf * nf_mult), + nn.LeakyReLU(0.2, True) + ] + + nf_mult_prev = nf_mult + nf_mult = min(2**n_layers, 8) + sequence += [ + nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, + kernel_size=kw, stride=1, padding=padw, bias=use_bias), + norm_layer(ndf * nf_mult), + nn.LeakyReLU(0.2, True) + ] + + sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)] + + if use_sigmoid: + sequence += [nn.Sigmoid()] + + self.model = nn.Sequential(*sequence) + + def forward(self, input): + return self.model(input) + + +def get_fullD(model_config): + model_d = NLayerDiscriminator(n_layers=5, + norm_layer=get_norm_layer(norm_type=model_config['norm_layer']), + use_sigmoid=False) + return model_d + + +def get_generator(model_config): + generator_name = model_config['g_name'] + if generator_name == 'resnet': + model_g = ResnetGenerator(norm_layer=get_norm_layer(norm_type=model_config['norm_layer']), + use_dropout=model_config['dropout'], + n_blocks=model_config['blocks'], + learn_residual=model_config['learn_residual']) + elif generator_name == 'fpn_mobilenet': + model_g = FPNMobileNet(norm_layer=get_norm_layer(norm_type=model_config['norm_layer'])) + elif generator_name == 'fpn_inception': + # model_g = FPNInception(norm_layer=get_norm_layer(norm_type=model_config['norm_layer'])) + # torch.save(model_g, 'mymodel.pth') + model_g = torch.load('mymodel.pth') + elif generator_name == 'fpn_inception_simple': + model_g = FPNInceptionSimple(norm_layer=get_norm_layer(norm_type=model_config['norm_layer'])) + elif generator_name == 'fpn_dense': + model_g = FPNDense() + elif generator_name == 'unet_seresnext': + model_g = UNetSEResNext(norm_layer=get_norm_layer(norm_type=model_config['norm_layer']), + pretrained=model_config['pretrained']) + else: + raise ValueError("Generator Network [%s] not recognized." % generator_name) + + return nn.DataParallel(model_g) + +def get_generator_new(weights_path): + + model_g = torch.load(weights_path+'mymodel.pth') + + return nn.DataParallel(model_g) + +def get_discriminator(model_config): + discriminator_name = model_config['d_name'] + if discriminator_name == 'no_gan': + model_d = None + elif discriminator_name == 'patch_gan': + model_d = NLayerDiscriminator(n_layers=model_config['d_layers'], + norm_layer=get_norm_layer(norm_type=model_config['norm_layer']), + use_sigmoid=False) + model_d = nn.DataParallel(model_d) + elif discriminator_name == 'double_gan': + patch_gan = NLayerDiscriminator(n_layers=model_config['d_layers'], + norm_layer=get_norm_layer(norm_type=model_config['norm_layer']), + use_sigmoid=False) + patch_gan = nn.DataParallel(patch_gan) + full_gan = get_fullD(model_config) + full_gan = nn.DataParallel(full_gan) + model_d = {'patch': patch_gan, + 'full': full_gan} + elif discriminator_name == 'multi_scale': + model_d = MultiScaleDiscriminator(norm_layer=get_norm_layer(norm_type=model_config['norm_layer'])) + model_d = nn.DataParallel(model_d) + else: + raise ValueError("Discriminator Network [%s] not recognized." % discriminator_name) + + return model_d + + +def get_nets(model_config): + return get_generator(model_config), get_discriminator(model_config) diff --git a/gimp-plugins/DeblurGANv2/models/networks.pyc b/gimp-plugins/DeblurGANv2/models/networks.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8841f021c86be08018f50f50e9bc917407e034d1 GIT binary patch literal 11391 zcmd5?-*X&SmA>6G8hP~Fl4VJWlPC}qkp+t+{3Sz?4Bz+No}Q7SWJ#4pA-nI~ zUw!Yn=brPObI`aPn*t+ z*_yHCaTCv)&YanrLm5NV%|pj*oj1XR30<>0Z?+v1Oqz$r92xVCtpyWIjWkS`4Hrx> zGtw|yHY}Q8ZlvLS*>KSW^CJxlWy6vQF0in*Y=T8)R!neFna519q|Dm4An7}2qM~-vonB*VTtzy{i)=0N8b*q`6YCF5^JZJK86O8edi${+6j$;nU3eMWd z$B^Fh^VUwzvcJ6@C85V`zQ82)TYJs4pCnPzZi4JQ%Pyln>82e&j^6dT`qkKf5T>i& zyqw)xjk49Go3Czn`$@3I#pZP++x?`Kce`;0LX7G8X&(78mW6?md=fv`NI?UGz7S|j zZl(P&pFzX*f>%WwseRp#!AMfq=;qfuy*TWINgf8TrD-=^ z4JD%_dmXv;H?uI!*7wpVkM^=|$4}O8K4|UuY3F9z-3?oLww{MszP=IOi~H$UUTM7Z z!g{A0gmJc>g!%n$x|dz)JrJ$!FmDd$Igi0J3W{;2oX7Dz?an#*EXt4$y!w?e2~$7s zrls1G&TDwGFCqyc*U+ee&Qr%KCWKzQCLH7c03uN}59`!!Y%cX++<10dBO*w2m5H{J zTu;O87|UsK7E-}SLiZ!E>;PjpW^c@7^OPotNG`d9$Q)_rBP;__gL1QK4qRm*Q59us z=3rbIr00}5$2~ZpxEip3cT$3Sz?$7DlTVx78P*(5m}HifqczOWa+!P=`8i4}CbH|r zNM52jeZac9>YPP5@)+AJK-n1ji| z3|PmExxnS%2OTbOHUKPuPO1aA8_Zd?a}GM5oV#MafXUzfKXaS&Olh9|S4Bk467e|_ z!Kp8Zf1Gf_9AJV;vI4MqfHk5A7U)#ljva5$xxN2`(WQKQuoU#4Hp?T+SsV z$|49oWl=Lk^1;%^xD_W~`hW%gw{yE--(a%}QjakeER>kqZ98(^xFN;>2VFBI`tF47l*UxbS>_- z_B0G%xrOGe*Udt_OJbuWw=nG|JA&8E@a;Z8n5a&O7C>t2)GZ6+ZSP|o`{PWWVM4xp zmyn1tN%I~j%sd(w?-NWOXYvG-7m$b*Ni*o)Pck2v&zV zA~DXqv*6Ure~ZqdJL?kNYp9!dYiONto@2t~ z3MV&aGrFmD`E91@jD^(i5;3vNLoKe(aJtOFmH|t1zVfveHpnS$;v%K!!Jdr zRA=OC&3X3I-lvgi;n5#u^ctRweM+1D5XM@2$c!uvGgX>z^1el>7(B1??av{BU53aP z78&M|78wEqn}u3P3o@{VW9CSn9?Wz={3HPl35B(ndGbLR8^<92A!lxBb_d*)0jH#(Sl)YdePV^I38~4McXOBbZh|h z5uSn$MsS8`(XkV7P_#X?q1dW112y72bP_O9)ug{0(13R)M+wy_;MkhRoWEjC&kZ_# z*qUB;9?hECuGYb&!R@Qja&NBD2w8*eT}0NJUD~@e1Z=nbJQQTV8>ZdtDO49W_1N(t z8hZ8K5(lC5dCN?AfWw~i?|)uiOsy?G)JroZHTOdg$1Ts*9hdZZeJz?aDO%{B_#hlo>H|fKD|t8t}=O=$tz6G z7`=(rh^v@PXzSFg_0^p6vJthiG=c%;0qvF_#Vu6ouZmQF{t0ACKp!5@9P<#t7@fn3 z>9ql-M_18b5Jo49kM`(Uu=_M%w-{cba|f6pevFyjnh@!A*6fbcaoVl322fHQqeg1r zAOJmp=Wyz9<{~D!W*K_Vg8=>?LSvEEc>+Q$gkk~5NgVbNG2qBJ9ig^v4@C~nD4+;q z0BV6OI_ClGpz}xW;6Q{1X!&wianmpg(dRf-w6G^2c;h^AdzRa#_hFclR%SfH|d~)1IRZOgvg{| zNw97GV39*#R}=2t*MtDZ`#KU7?>BpC_>Kn2yCAFYt9pN zaor_n*_n10E9(|>=*seuJktXk1)PBc0x`ENIM`$q$Iu^AM+>px(zyX3VHefxgae7P^C;%}yeG))`NbstV*3JHVL=}R^IW}$F z$ptQThI;_pgs}<0Se=dNj#v~2!)!2mlRz^30MOn)>%d#967p=P0?7~_DG8y|cFw&0 z3v-*J&U5$zpB5O)VfY52lqnHsTyhd2Z9ptSQJ@fjsJ9*=MMt0(vmjt&IWS5CFy7Lz zbY}A|A7AzKV8$YFL)AKo)+d0vJ-}!iuOvRozw`cyxtmBL%C5pK_Vn@=8$JqxdvEZK z8%*Q^Hkgyve~LNbIUPXZc}7iab`b!ly6P;n9i>^`%;5oy^g!5Gp^_}bQ7N#kgnavc zzLSPF=$!9Fscn{5Y*FuHocHTYWSA~9r!a@d+$NJZnUDu^Hxus`vS$O7WaUrsDEx2| z>z@Y3*ATE^Sfhf_1yTR8$~=DIo5B*<8kYF1qjlM70a^?f>&(3kUWGx=vG-(W%~&*O2@`xhkE z-+Yn9mzZ2fB8$A=e7^X7;cOUA%k*nJ8D)_1jp4t)11m-K`cG0v3xaL^l)Ywo!^i#3 z=@yEX^%~!xyA+OCDA=jeNb$A;c1jnK0BPDOT~(a0QyhzIDecrGgy}Rpg`;z6r~Y5( z7)N(|$57fSxUrBd7>Ck1mVp_yQx>g9?G(C{c8Uh(G&?mRJ2g49Q?MrkI|Ubfk|F!T zPT^v}8ZnrM!aKHhYTDWdp`!o5v#5r8WpY&w>NTbuF#0L#YE!f6M94>T#I zHCbaU-kOiQ%spcBp~&TC)b4bnVB`ouK|Triv}ngK9J1oQ548f`9lm$AM&NgtBKs1O zzs?AZnuLou2*6DIT^|Oj4wgsLgu{RY7oGtfL7EcxZ21-qfk>ZD@XVa0|7-CLQ}z+9UgUYr9X6>tcNj=QM`LdY>?+Jix-%1 zz4pF*3OU``FciEUwI$QGA~+K}C})cI<=g!@-Z;nI;i`F6&hw^NK@Wl&FIGH{E?l@{ zR_tQI5q|uzDF&U&8g{Rl6$UGiJBJ*=47oaaSLKb#Z^$(`{~YMw$jVcA3^udtf2pgz z$NmX@1^6WJ_35IZ^}A}F)K=*kB%X;=t^N}NGV8jRgVyhZNO<{G!Yp{tvAlh-E7;wllJAB@PxCG^sc7uTQO zXIVins<@P6s0(2te8;=k9!PMm^zt2?itIaO58T-@w5#ps zV1Yi0Ay)P=ViT9CByK;K?~Ql|zCXeDC_zRF$`2-18}`Es8#3nSCMg{8c7p5R;EIjC z2=XQ5RC@lSi-Bqf4U6-}2*rs9RmdgcOL`@N@rWaW`v~ zsm1OjzdiVx!}6J~jMbm^+2{{J4+oRn+z_@7#*I#zd7Wda;D(3>lZ0o<`7FR z!G>BU4O>Mfw~jev%rQ%pZFm>@`~w1p;-=7|adMmOl_GiU^_UP%ybQ?@T<(WadneCu zvz_mFeKz5-$s@coQt0*=SDHz9pI2Zu((K~m6$PD72sc{yXFdQ~o}$k|&79_kFVIG! zL^OiIV?Y!HJAfJ%VSqKnWcZa8Lrm89TDbEm&nn5!rQ?d(rvLj9Mp8tEBVk}{3X1Z8 znb&t-Y?2?0BBG+`22lGvst}X-JePo5FDzk7OSmL(c|YM6=%6LXNVW(gM|kk<7%!PA z@)lTvA+RLfX4_As+t}yU4ofmd04a!65&0Ye9vUSnThQ&}i#xqdE!5l6SDFVMwHZK- znmS--JjfqAjeIyD@Qz4>HF&bWi_*~i4Q18C;l+50FQmxX6Qb0ca1o4#=d@xE(qX)R zMWWS_P||<4a(#@9E0k-x2v+0k(Cs-{Pini(%_27o9GJ%MC48Vz=sk6y)O%jiLGNK$ zAWy{wvtzco%k0;%0o!+w^&TQorANk`%~B~URAHEd>^fIicx?)i2*=q&Uo2cn61#t> zuz`F*v2FAFdAFVVfyYJYJBFm+={>OVAOs_fC(E$M9C_g-Oa%YLuhKYy4r>``O=J{r z=#v9o{M*}f28*oZ++|LJB!2C)NTzx3AsO;Zxkrh6WXVggH~19d%Uo~98MyT{0QLds dJ9D+E`IRrMJh$@1$`@DG>*Mv6daeHWUjWv*qh|mB literal 0 HcmV?d00001 diff --git a/gimp-plugins/DeblurGANv2/models/senet.py b/gimp-plugins/DeblurGANv2/models/senet.py new file mode 100755 index 0000000..ceb420d --- /dev/null +++ b/gimp-plugins/DeblurGANv2/models/senet.py @@ -0,0 +1,430 @@ +from __future__ import print_function, division, absolute_import +from collections import OrderedDict +import math + +import torch.nn as nn +from torch.utils import model_zoo + +__all__ = ['SENet', 'senet154', 'se_resnet50', 'se_resnet101', 'se_resnet152', + 'se_resnext50_32x4d', 'se_resnext101_32x4d'] + +pretrained_settings = { + 'senet154': { + 'imagenet': { + 'url': 'http://data.lip6.fr/cadene/pretrainedmodels/senet154-c7b49a05.pth', + 'input_space': 'RGB', + 'input_size': [3, 224, 224], + 'input_range': [0, 1], + 'mean': [0.485, 0.456, 0.406], + 'std': [0.229, 0.224, 0.225], + 'num_classes': 1000 + } + }, + 'se_resnet50': { + 'imagenet': { + 'url': 'http://data.lip6.fr/cadene/pretrainedmodels/se_resnet50-ce0d4300.pth', + 'input_space': 'RGB', + 'input_size': [3, 224, 224], + 'input_range': [0, 1], + 'mean': [0.485, 0.456, 0.406], + 'std': [0.229, 0.224, 0.225], + 'num_classes': 1000 + } + }, + 'se_resnet101': { + 'imagenet': { + 'url': 'http://data.lip6.fr/cadene/pretrainedmodels/se_resnet101-7e38fcc6.pth', + 'input_space': 'RGB', + 'input_size': [3, 224, 224], + 'input_range': [0, 1], + 'mean': [0.485, 0.456, 0.406], + 'std': [0.229, 0.224, 0.225], + 'num_classes': 1000 + } + }, + 'se_resnet152': { + 'imagenet': { + 'url': 'http://data.lip6.fr/cadene/pretrainedmodels/se_resnet152-d17c99b7.pth', + 'input_space': 'RGB', + 'input_size': [3, 224, 224], + 'input_range': [0, 1], + 'mean': [0.485, 0.456, 0.406], + 'std': [0.229, 0.224, 0.225], + 'num_classes': 1000 + } + }, + 'se_resnext50_32x4d': { + 'imagenet': { + 'url': 'http://data.lip6.fr/cadene/pretrainedmodels/se_resnext50_32x4d-a260b3a4.pth', + 'input_space': 'RGB', + 'input_size': [3, 224, 224], + 'input_range': [0, 1], + 'mean': [0.485, 0.456, 0.406], + 'std': [0.229, 0.224, 0.225], + 'num_classes': 1000 + } + }, + 'se_resnext101_32x4d': { + 'imagenet': { + 'url': 'http://data.lip6.fr/cadene/pretrainedmodels/se_resnext101_32x4d-3b2fe3d8.pth', + 'input_space': 'RGB', + 'input_size': [3, 224, 224], + 'input_range': [0, 1], + 'mean': [0.485, 0.456, 0.406], + 'std': [0.229, 0.224, 0.225], + 'num_classes': 1000 + } + }, +} + + +class SEModule(nn.Module): + + def __init__(self, channels, reduction): + super(SEModule, self).__init__() + self.avg_pool = nn.AdaptiveAvgPool2d(1) + self.fc1 = nn.Conv2d(channels, channels // reduction, kernel_size=1, + padding=0) + self.relu = nn.ReLU(inplace=True) + self.fc2 = nn.Conv2d(channels // reduction, channels, kernel_size=1, + padding=0) + self.sigmoid = nn.Sigmoid() + + def forward(self, x): + module_input = x + x = self.avg_pool(x) + x = self.fc1(x) + x = self.relu(x) + x = self.fc2(x) + x = self.sigmoid(x) + return module_input * x + + +class Bottleneck(nn.Module): + """ + Base class for bottlenecks that implements `forward()` method. + """ + def forward(self, x): + residual = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.bn3(out) + + if self.downsample is not None: + residual = self.downsample(x) + + out = self.se_module(out) + residual + out = self.relu(out) + + return out + + +class SEBottleneck(Bottleneck): + """ + Bottleneck for SENet154. + """ + expansion = 4 + + def __init__(self, inplanes, planes, groups, reduction, stride=1, + downsample=None): + super(SEBottleneck, self).__init__() + self.conv1 = nn.Conv2d(inplanes, planes * 2, kernel_size=1) + self.bn1 = nn.InstanceNorm2d(planes * 2, affine=False) + self.conv2 = nn.Conv2d(planes * 2, planes * 4, kernel_size=3, + stride=stride, padding=1, groups=groups) + self.bn2 = nn.InstanceNorm2d(planes * 4, affine=False) + self.conv3 = nn.Conv2d(planes * 4, planes * 4, kernel_size=1) + self.bn3 = nn.InstanceNorm2d(planes * 4, affine=False) + self.relu = nn.ReLU(inplace=True) + self.se_module = SEModule(planes * 4, reduction=reduction) + self.downsample = downsample + self.stride = stride + + +class SEResNetBottleneck(Bottleneck): + """ + ResNet bottleneck with a Squeeze-and-Excitation module. It follows Caffe + implementation and uses `stride=stride` in `conv1` and not in `conv2` + (the latter is used in the torchvision implementation of ResNet). + """ + expansion = 4 + + def __init__(self, inplanes, planes, groups, reduction, stride=1, + downsample=None): + super(SEResNetBottleneck, self).__init__() + self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, + stride=stride) + self.bn1 = nn.InstanceNorm2d(planes, affine=False) + self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, padding=1, + groups=groups) + self.bn2 = nn.InstanceNorm2d(planes, affine=False) + self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1) + self.bn3 = nn.InstanceNorm2d(planes * 4, affine=False) + self.relu = nn.ReLU(inplace=True) + self.se_module = SEModule(planes * 4, reduction=reduction) + self.downsample = downsample + self.stride = stride + + +class SEResNeXtBottleneck(Bottleneck): + """ + ResNeXt bottleneck type C with a Squeeze-and-Excitation module. + """ + expansion = 4 + + def __init__(self, inplanes, planes, groups, reduction, stride=1, + downsample=None, base_width=4): + super(SEResNeXtBottleneck, self).__init__() + width = math.floor(planes * (base_width / 64)) * groups + self.conv1 = nn.Conv2d(inplanes, width, kernel_size=1, + stride=1) + self.bn1 = nn.InstanceNorm2d(width, affine=False) + self.conv2 = nn.Conv2d(width, width, kernel_size=3, stride=stride, + padding=1, groups=groups) + self.bn2 = nn.InstanceNorm2d(width, affine=False) + self.conv3 = nn.Conv2d(width, planes * 4, kernel_size=1) + self.bn3 = nn.InstanceNorm2d(planes * 4, affine=False) + self.relu = nn.ReLU(inplace=True) + self.se_module = SEModule(planes * 4, reduction=reduction) + self.downsample = downsample + self.stride = stride + + +class SENet(nn.Module): + + def __init__(self, block, layers, groups, reduction, dropout_p=0.2, + inplanes=128, input_3x3=True, downsample_kernel_size=3, + downsample_padding=1, num_classes=1000): + """ + Parameters + ---------- + block (nn.Module): Bottleneck class. + - For SENet154: SEBottleneck + - For SE-ResNet models: SEResNetBottleneck + - For SE-ResNeXt models: SEResNeXtBottleneck + layers (list of ints): Number of residual blocks for 4 layers of the + network (layer1...layer4). + groups (int): Number of groups for the 3x3 convolution in each + bottleneck block. + - For SENet154: 64 + - For SE-ResNet models: 1 + - For SE-ResNeXt models: 32 + reduction (int): Reduction ratio for Squeeze-and-Excitation modules. + - For all models: 16 + dropout_p (float or None): Drop probability for the Dropout layer. + If `None` the Dropout layer is not used. + - For SENet154: 0.2 + - For SE-ResNet models: None + - For SE-ResNeXt models: None + inplanes (int): Number of input channels for layer1. + - For SENet154: 128 + - For SE-ResNet models: 64 + - For SE-ResNeXt models: 64 + input_3x3 (bool): If `True`, use three 3x3 convolutions instead of + a single 7x7 convolution in layer0. + - For SENet154: True + - For SE-ResNet models: False + - For SE-ResNeXt models: False + downsample_kernel_size (int): Kernel size for downsampling convolutions + in layer2, layer3 and layer4. + - For SENet154: 3 + - For SE-ResNet models: 1 + - For SE-ResNeXt models: 1 + downsample_padding (int): Padding for downsampling convolutions in + layer2, layer3 and layer4. + - For SENet154: 1 + - For SE-ResNet models: 0 + - For SE-ResNeXt models: 0 + num_classes (int): Number of outputs in `last_linear` layer. + - For all models: 1000 + """ + super(SENet, self).__init__() + self.inplanes = inplanes + if input_3x3: + layer0_modules = [ + ('conv1', nn.Conv2d(3, 64, 3, stride=2, padding=1)), + ('bn1', nn.InstanceNorm2d(64, affine=False)), + ('relu1', nn.ReLU(inplace=True)), + ('conv2', nn.Conv2d(64, 64, 3, stride=1, padding=1)), + ('bn2', nn.InstanceNorm2d(64, affine=False)), + ('relu2', nn.ReLU(inplace=True)), + ('conv3', nn.Conv2d(64, inplanes, 3, stride=1, padding=1)), + ('bn3', nn.InstanceNorm2d(inplanes, affine=False)), + ('relu3', nn.ReLU(inplace=True)), + ] + else: + layer0_modules = [ + ('conv1', nn.Conv2d(3, inplanes, kernel_size=7, stride=2, + padding=3)), + ('bn1', nn.InstanceNorm2d(inplanes, affine=False)), + ('relu1', nn.ReLU(inplace=True)), + ] + # To preserve compatibility with Caffe weights `ceil_mode=True` + # is used instead of `padding=1`. + layer0_modules.append(('pool', nn.MaxPool2d(3, stride=2, + ceil_mode=True))) + self.layer0 = nn.Sequential(OrderedDict(layer0_modules)) + self.layer1 = self._make_layer( + block, + planes=64, + blocks=layers[0], + groups=groups, + reduction=reduction, + downsample_kernel_size=1, + downsample_padding=0 + ) + self.layer2 = self._make_layer( + block, + planes=128, + blocks=layers[1], + stride=2, + groups=groups, + reduction=reduction, + downsample_kernel_size=downsample_kernel_size, + downsample_padding=downsample_padding + ) + self.layer3 = self._make_layer( + block, + planes=256, + blocks=layers[2], + stride=2, + groups=groups, + reduction=reduction, + downsample_kernel_size=downsample_kernel_size, + downsample_padding=downsample_padding + ) + self.layer4 = self._make_layer( + block, + planes=512, + blocks=layers[3], + stride=2, + groups=groups, + reduction=reduction, + downsample_kernel_size=downsample_kernel_size, + downsample_padding=downsample_padding + ) + self.avg_pool = nn.AvgPool2d(7, stride=1) + self.dropout = nn.Dropout(dropout_p) if dropout_p is not None else None + self.last_linear = nn.Linear(512 * block.expansion, num_classes) + + def _make_layer(self, block, planes, blocks, groups, reduction, stride=1, + downsample_kernel_size=1, downsample_padding=0): + downsample = None + if stride != 1 or self.inplanes != planes * block.expansion: + downsample = nn.Sequential( + nn.Conv2d(self.inplanes, planes * block.expansion, + kernel_size=downsample_kernel_size, stride=stride, + padding=downsample_padding), + nn.InstanceNorm2d(planes * block.expansion, affine=False), + ) + + layers = [] + layers.append(block(self.inplanes, planes, groups, reduction, stride, + downsample)) + self.inplanes = planes * block.expansion + for i in range(1, blocks): + layers.append(block(self.inplanes, planes, groups, reduction)) + + return nn.Sequential(*layers) + + def features(self, x): + x = self.layer0(x) + x = self.layer1(x) + x = self.layer2(x) + x = self.layer3(x) + x = self.layer4(x) + return x + + def logits(self, x): + x = self.avg_pool(x) + if self.dropout is not None: + x = self.dropout(x) + x = x.view(x.size(0), -1) + x = self.last_linear(x) + return x + + def forward(self, x): + x = self.features(x) + x = self.logits(x) + return x + + +def initialize_pretrained_model(model, num_classes, settings): + assert num_classes == settings['num_classes'], \ + 'num_classes should be {}, but is {}'.format( + settings['num_classes'], num_classes) + model.load_state_dict(model_zoo.load_url(settings['url'])) + model.input_space = settings['input_space'] + model.input_size = settings['input_size'] + model.input_range = settings['input_range'] + model.mean = settings['mean'] + model.std = settings['std'] + + +def senet154(num_classes=1000, pretrained='imagenet'): + model = SENet(SEBottleneck, [3, 8, 36, 3], groups=64, reduction=16, + dropout_p=0.2, num_classes=num_classes) + if pretrained is not None: + settings = pretrained_settings['senet154'][pretrained] + initialize_pretrained_model(model, num_classes, settings) + return model + + +def se_resnet50(num_classes=1000, pretrained='imagenet'): + model = SENet(SEResNetBottleneck, [3, 4, 6, 3], groups=1, reduction=16, + dropout_p=None, inplanes=64, input_3x3=False, + downsample_kernel_size=1, downsample_padding=0, + num_classes=num_classes) + if pretrained is not None: + settings = pretrained_settings['se_resnet50'][pretrained] + initialize_pretrained_model(model, num_classes, settings) + return model + + +def se_resnet101(num_classes=1000, pretrained='imagenet'): + model = SENet(SEResNetBottleneck, [3, 4, 23, 3], groups=1, reduction=16, + dropout_p=None, inplanes=64, input_3x3=False, + downsample_kernel_size=1, downsample_padding=0, + num_classes=num_classes) + if pretrained is not None: + settings = pretrained_settings['se_resnet101'][pretrained] + initialize_pretrained_model(model, num_classes, settings) + return model + + +def se_resnet152(num_classes=1000, pretrained='imagenet'): + model = SENet(SEResNetBottleneck, [3, 8, 36, 3], groups=1, reduction=16, + dropout_p=None, inplanes=64, input_3x3=False, + downsample_kernel_size=1, downsample_padding=0, + num_classes=num_classes) + if pretrained is not None: + settings = pretrained_settings['se_resnet152'][pretrained] + initialize_pretrained_model(model, num_classes, settings) + return model + + +def se_resnext50_32x4d(num_classes=1000, pretrained='imagenet'): + model = SENet(SEResNeXtBottleneck, [3, 4, 6, 3], groups=32, reduction=16, + dropout_p=None, inplanes=64, input_3x3=False, + downsample_kernel_size=1, downsample_padding=0, + num_classes=num_classes) + return model + + +def se_resnext101_32x4d(num_classes=1000, pretrained='imagenet'): + model = SENet(SEResNeXtBottleneck, [3, 4, 23, 3], groups=32, reduction=16, + dropout_p=None, inplanes=64, input_3x3=False, + downsample_kernel_size=1, downsample_padding=0, + num_classes=num_classes) + if pretrained is not None: + settings = pretrained_settings['se_resnext101_32x4d'][pretrained] + initialize_pretrained_model(model, num_classes, settings) + return model \ No newline at end of file diff --git a/gimp-plugins/DeblurGANv2/models/senet.pyc b/gimp-plugins/DeblurGANv2/models/senet.pyc new file mode 100644 index 0000000000000000000000000000000000000000..64236f7f8fdb41c1fcd6c1436e3aade65cc456d3 GIT binary patch literal 15348 zcmdU0OKcoRdaj-!hfj%=L`kG1OKnMZMzQ#&C|hf<-rg9tX>D_z^%B?}_Dna2P4+z0 z-6K)91Q=@_7TDYp^~Y1TeCD4sr-K2oP*?&M^U!@B6D?LyfqKEGk}- zQ&Z2ss{Z=lRV@DJf#F|&{<9y|RQxl5?`!LZ3V&Wy%E6zZe5D$OS~FCmpw|Og4By-r_yZ~YSN8?}U<&{2zQ7+!;h)u0m@20h&xi^y9i)k&$ za5|}-Nox-)5Op@^Y}#-{Idd$mO(@dg8Rg8Y$KWH-tS&A{5%jBzi&8wE6qlqpnG`Qc z5%jG)FG~@$UR0~)%ivs(TwDg{s^}n+cHlLm%0{PIi@a7d8bp=j-Sa{g#!#@UVaxAC zZpCY~TR~JdIB*2XM}gx8u5-()MJu{?2(^utv+cl*0^4i4jx05tO;$EtyIfs*%bvS5(~dSH;-?e%@@;+RhAi7_ zwmVTJY}+;0W4re+NTOlpbc^??3&|1VN7n^*v+mZ1|M4%|?+pLyYW=HwfBWT$*?+!T z|KpoK^?&_e|8zC#L;r@0Uazb<-}==@zr0$1@E@JLfDd&dqWE;qB)IgoBvA>@L2!QL8O~ zO-Pp(rk(lAwYT1?UKU92Kj)eh3YRePIxZrZg8zt-x)nrJfsI1j;0nIg%6+cw5H#z7}kcseTsE3eH z1+`V6oH9TXTYVhCcBA&U4bs1s1UXa?6ieHd53=*oa}f<8s>A%HPhIh-4b zVHn+GX;JC`Meu%~tas z>WQ|E+PpB7<$mz7WWE=4T*(A3gqZHKAd6Y`>W!A?NIukgEfmR+q3dtRM72%3iA{yT zmr{^+9g&h{6qF<(X>#`C&<(=b?ZAt??XcCbo3nQwU}Vs^6STJ6S`^OKAw8$tey8p= z!`WMI)$at~xxR95VKy&>X4(%d63Hq);W!dy95D_XQ^u%qRMHv|!%gs8$m0jd;K&{o zcrII#2Sq#*qSz8z$cpGA^b)rgyNi9fqV$|t6{)^brXaUoVRDAaStjS0yoN+@X1$Ia zNK~mE5#QJpH~^;!dAWbMlB3&b1^4a1`61>GNmt4!8Dr^hnTHwER?}{{pe>5)N+n)l zMeuFCfx2v+`*X0Q^6%mka!IAa(6FrJMk|VZNbcHp4b}3K%8TPDrt$(t^=@Gi{vJ9E zoA|k5hps95zyv(oqfI+9L43a3aGOzRuEz)~UsyLAZnW8QW(0(g7MGTi*59Gf zgS0?4pfr$Hk+SEJE`nh4MO5aCAgz3HSLgojR>-h?XIS}4Z4as7OPO~dq0)kCiY(e1 ziu*(69q+LkT0O+*N|^^QQl|!aF)y;9+ME}1POBH<>V>psF|JvZ<8oT}o1sl?icnc; zLFZC3z<|*dB?KqzU|ryRgGgZNcuvRGwnS?`IYJ0|o9j7+1guaHR>)H14i}CXV+Fm= zi|AxcBN0}rRGe0=Qh6!dFoQ|!_-Jk@mJE>_tG9db1Ld9QPs*HK`~WAD+6tPmC}wUy z6=^@=Qc7vwNSQ%+Lo8C*p_=MFQoLxe8Z{O_Blw7DJqxmVs^$e_fZ)K96PzCeTM%WM zs^Pe;kC^?!khVe5q2LAG#;<-oT>RXJaCo7bu^~;-g+6N9p+q~!z4HLI=GFiKDF-lJ zkJj5@sUE{e1vFGS;2eZzmqm`LXguvbq8`F_(5pYB9s&$jbXZ}f%)o1*5kUKW#iP`) zjv>+J0M)X+u>mVvW0+?sx;l&k&v7k^HYwMGR;L|WlPFu1yX6VyrkH$-3Du!>lF4Z% zvq;2id9N8pcC+TLw1Nh7Kac!f+Yeo9j@{;&EHGJQvc$w>@+uROh8P_d-687|lXsAm zdHhv9uhqYe0#ZGs_V zMa(cOWo7FM`if9;ciMK7ew?+8DnVtoSjoL`>xF0bV|>CvBqgwQsZcb;fm^+8x#5Z% zy)Xv9@4NNa2 z6o*|{8?}PkrgpJ2)vPCMZJ2sL7oIAhsBc6-4L_5B(((yB8#@7c5tWxFIq{?=R2+eW z=3 zzPgAU;a`zRJtfX9bnYc_tgCFe#^i;F7wbAf-C*+a1#a2m^4mGO5w7H zNU?5my)R9Y@GSn6OBG4dA4?L(d}2xRNiR`CbuP979g?To3gQ+&Mu<8y7v4kvOqzTW z<;6+#pzWGBpG~T0A;M*7tAeX1!=_fn8+$S>Ok)_lhB`SxzrLs@9)T_U6UQFr0a!~s zdZd|Cj}C+CVd~dwg$;((19cMy!pwlN05bzq00`;{M9d7ZXHOtt%@hbA3~76X!^B!R zkPM64bX-Bte^A;GHN`^4Rcu}yQcwnz4}_FAFODSU#YAFWAoz*1j?If}K;d(+HVqPO zOx$LU#)R1I4I9B4+6)_hs}=0BFJ2J0^VGE?uqU^RTTzAoR=JO0gkF{8L_tgP!nHK< z@HhBqF9BlUWGV;_LmYgw1OZR(uen$e1hGj`Lxud%WD|zJgG|cs;2;-7hCj_L{pvC9 zoRHUlVsAeN|HE$>RF4so15*r122I0~8K5UYuC4$1pYi9btKR1*>9{X=SV=Ip#vZhQ z$MF%Rd=RE?53eCMp5Yh|sNl4L1%eQ_Fs)uAQmh3NY2iHm%kCot54Nz}U0|cYz0fJq70tBl*2@sw@0uYPmI!BblqqWA&m5Eh!x#e$yeb49kBg|9R8zPugsjAcA^aiS84?YrlrxEu zC60v0gpf!&68A28aet)eFFfP; zBZ4qoKIwBvJ=UY5Eldf&{9A!o16&xAhH3&ECTFco%2?bsogw13PQVWx#tBLrFl9o7 zzY5+6H^8Z)92u{=V+RPv!7>ihs_FD6t*ZL1+O}D4HfKJRc;baCX68NUaF;fc36ek4 z=DUb~B_942~>Q27zQID8J7&q+a&2o-+q9Nnq}V$BNO&L zuG>OvWu?=o!Zkoe5XXm1*>EUfiY2%XG7Rl-8B$lq}ZBeJ-&Xwr+}1s`1__$RJi+w7XHn#E6KeS4#Pd8un~aw5ACJ-y_RnKetL}a%js-R&}t)8S81DN*a!%r0%MH! z0h_nbWVVA=)vkKJ7d?=PiA^?Oj6S*Ux!&6_*ExJ$(B9RN&LQ1LI*8qa66C>aM!@Qd zIX9DGgNK~7??x^sN;mRk#~gmUdM5FX1iv{7l|a6k+=a-7W7_LEPqN_og*SV)?_Thp zM3Y-?uAi)^Li#ONaSw!JA&Pn1vwo4N1fB!;2?7s6;bG+34%n`1WwsgOrpkBC%R85M zbA;@4ZttVv#9aWNb~X~E+M{P~x7}T{$a5uk&z79k`*QV$O?>Lg>7>1}tNea4c}uzg zyUXSlF6!?^iDhVEviE)$_t-)v7N3#Jvu^3K-Aa6Um~8NlzDwMDbJ$SVx}I}$*?OL2 zo@{e-T~l?RckUFkZn-=AWLKFEfZV`c8R^qp$Gv`3@ew<BpEixy{L8KS|*NxR@xGElB!gATJ)MM-bz}GtVm>C?>D~A3isIy{Rhk%21wM*5yY}fh4b(~ zj~ZEey6-gV5u)llX&k^e;z0Nvj=x6>-Q|+;Y&DaZK`63DqNV@F(-;5kzmY-U;Snxj z;F}m~yQqK!Sh~FOgjow4x1c6a8+;bu=iq`6j@<4ZaF`g%fx`zd2)hi@tzlQWjETYU zA%$zi>XL~c=a6i}E=f`YGd(xm{7vj4I`B#wHX4iMNl``1^qWZA=o!L`ue4&=_A{ zAcvsCNzmN5am+YZkc)CipzNZYTj24_bvcmwbzff?I0bGuetGdRvex&Re8Pk;KCCq) z@?a>tYTiHG$R+lMYeyY;Di(1>j#23&i!md`38kj6(T|Yni4*V^Z}aC8hDlze@HQX3 z1%5#ar&;R3MC1(32jH=!Gcv34@MJ}}*!v&+=xZ=}rozV{LZhOb?s@Kg$E%&KdC_UNKng|i9o z@v}!38PYn)gim%Y?%O)Vgl}i{%NMp@$Ai&;p8oA1z*}A(A3_0$hRaJ|__)=tZMFa`&O9LLmK79tUmWXzVh-hPi6cOU}FIrw5Vr`vdfw(N`WeTs6Aa`)S=t!N6e~6%zE8vKEUMt#0Cz`zTNc838HRi|`o*ZHo zxikVTc(r(H`c0k~zPxhR!j2|fZu<%Dt-Bn+ed=?esgrfHT5M>u3qJewkbz6429FiT zd5FGI;~)+rJp!&>jsUoNwj%(8CjS;foZ~@10=;<6BL55`M>E)UG&?*D{EtX_G{T`z z9u3(hJ{sZ-|7-&M&_L0iQ}@w`P3>J{ml~5hOumam^pZX=dP!P2$$8yk7n=XpH6}EE zcb$+GvhHP^3zr7%nmjf=kH@B?5SG6+XCr3m*Et(9-#7GZxU`@AcAgEFo)c-<#BP!H zEPO*X_zgW9F6|HZJR2&+RHo5E(078rL9IL~(-MIO!IjB0YLKUkDHF5p(jdLit;Jcl ztkPOS#MEsFQN9O-wT#s9C$;L`r;X{2AKF&4kD-{{a^#B*X)DSC~{DzKFRYIt{t ze~W-28cLwedY3H}8}c-6rrDG?8PZqR;C{*vwM9y$RjDBS3E0%0V#arv6Qf<2+>yKE4BP8b&Fv&hO8$xI_1nSC^bWQ%XZM#g_fFxa5NKSFW}k%dvb%pS*Q t6y6Gd2k<)%J9QktW5xZdIb51LHg)pYv13!mCa1m}8XX%N8m1BS5dLA&V28%Q$=ex;eFW)5aTe8lk7O*Qac<*BPlC`jk3pku~b^> zCs|Mak0!Q5B@xAaOvD7ZM~**fH1RBMw? zB3ALk)qj9gY(p5_H@2*JF-O?ACmCPqL)lBjy_D%LJG}>i1G92qXB9Q5&^A157N>WL zLWL*Ut14SlgMb5Kac9b7Wt|hbfxEKgBpat;WI7EFTI2AOvB@=WUe@00C0Sy7JuM2n z4Q|q%I!oI8!Us*Uby`Ee}uLpCTq$tXH=uIvhX+eBxfDjYnNfoq?z-!bj@8m(( z*uxLCDY_tq2o>_7;RztVjm!)Xuv}B&ANb%osMaSdJO@gu1P6x|Vn|*`F5(chF#eaz z{X6ajFgX)yf%J$DKmh3Q6~J;*S9yo%$S|%_&uX-P$2@UWoz~E^LVX~prk(|2D1jPW zkU{EYu_48kvbZY6W?5`Wajh)2rMRx_MRke+v9UmHg7P<~S-YXmFf}HQ5qJ7DUyHq zxNj2BI5##Algz}4s>4Q*R6@)rVlqN)?0jPK%%r^{`HgY@WBqJ$0w)^q*Yi2&Kkxw0JoqRsAIqhe#i~(2&Yjhpjca0 z5F#3^lnN+TmlYgGt(FQX`pXK!T?23zz=st!OHE{2Y5)>jozo*I5!_|l(DbLRK&{{( z(K)T0$T%dLIPA}8=nh;*>7PFu1z$EaY@H5Ly$#B^6cY>D3@3gPtn zh(t!YAYUw;p_^Fa!z2Yegt>q!Oq1iR7mdILdEppB2!p`yh@lHD+(1!(lj2(xZ&7@g z;%x}Q2j+?%NyHh2R=>lx9g1Cwo77^U@R6)wGA_d5I5qk^Y`jIW2hpLuX_@pAbN_wj z|2R)yMm-N?F{rqMRLErB98YcUM#X*Ge&G4ub@|@#8bDl!?4>VLyh6d#{t99ghEcB~ zNhlRm0|USyfq@RDu$WNzxHI>ophrufB%O2t>r?6gE#{BlyzXv%??#Mc9Y4 zFq(&;zfbW4iXTFBB!Ctl%X5}17f+7Y+SDNw8VyPlEY2Sh6nsM6&%>-3j|#IFTMqMS zR@g9$Om~zI_hJVn0g(7?uIM3MpaAZ{6mZV&1Yx{Z5E!2NNgRVN%ISqc@MxS>=mRL7 z2a@P%U}NrC$Tv#^78!U=?;7NWcdZ(Dmr?rSJBA$>JyZE)ku&^|tEX|!iRC;tLt1!> zG<^tvO#n}$1A5CxNpn;4EdFi*DNKYWi|9j4vJ9~F^SC}W|eV)%7hv>}yX6~7pGiT17 zIaBUs({896e(v^Ve@R?;=gNZ`E}ORe$?5zp<)CdEsk(I&dh3WocVct-7WNhx(@c2+RXFt-pjq`@Hee=e)6f>i_=7#z6 z3fW?M?pUgSLux2pn5Tu9m&?ryvz=E_h=#E(`5o8XeZZ6{{Zh9?v`oq6T9f*PbqW3j zGiJ;*{um?{>7g}uhIH_^96*%e4#EM{{V(*4=NYCy%}@iT=!ZZi76M&1s^(RIxv!+y=7LM^q+}9OdyK zG1{k+PHf5!4=4K46nspWjpq8(i811hYkfoFG-EY58f;1h1Ila+5zf&JoEWHhkha<> zk3>4vpK&baBvQEn16-O!24t8Lr;HmK8j*?Cv>~x{JP7OFMAd4=ghF!z?CQBz&MhWRX2|CO8@PJ5TI28;bgd!!? z$8+N&eIm5M#3?Jg3yJYUdcd(*olYlG!-dg>iG@LWaA9x%_&{oIE;m;cC)As{XmK%} z8t64eSse6bi$N;bk{BCBi>W*15+Q9!Z`m{&NU3RT#6vykP@|iu2-ToiQh!$~t!Y$^ z6MZNMy(yg#b6`)alE7k%6Zt?xqKF!d28FQQmCfwg1@y9*ro+#kjS3{tmWYBS2hxM7 z@!?{k5p^hz(>LZMhO-6KkurQzqGeOwV68WL!^Fj#1&NX!wvPy3Rq(wypYlhN$i<-TXl zSJC?lqtwAniZC1>Lo>x9S-%L29CqYfqPN%OMZLmS8Xhea8xqS#gG73BN{r(iExz!h zA*$?GCWX{!`0*S9#9kJIEUJlGX0V-*Yc{5a$LYf9{uEtOd#csXGbF!((Hy#G{n$wg_0MFnP}0K&(uHC$x`8pLd2AcuQ$q_v(Rl?>7tGHpm& zwJ(jpvXl=+wtAQltgBAerBGven^@0wua-n+baVr%k{;yUEpNeUr4}?L}e>v8|bC zvoF^XSVNE=;)io981QBghrZxaXKqk=1)b*Uq#35pkE{+E|F|)k5MNg91hIGT6 zMB`jP&cis`tG=F=_qB5!E);;M5-qnWU~H-y!C26E!+ImCLjPjSgFX+Tv+4Y}a2@sB z%A+Eu7dkK!O_R}2*%lQ>=AchHFt*pD7>$CAC}aTV*+pJkWr3ho>B#$#aRe$)nW_4C zACs8k7RhhC;23*my}dFwNE9e^TE?_156&e?8C3p}`kU28}jcl+5P_j2EpObHzC5*paD?&8C=i^iqv}9V1hw zWsA>}0gmT2kY>Vx(X0yxmqUf}@WH*-{S+shx6s|`Q9cU>L=k?W;YuWRl!pQ^jK*lt z^fJ@4wys~8Skk?8u~XneA{-8q3hds!?<^0(&mP-O{+B zSG}uqIL4T)=JJYk;)toe7W8Br+@Nm0+$WrHf@n*k(^Z~nZgd!zgkySmj;pVfs&j4Q zoJ7-{MDzM`<%Nl>NT+b4`XY>tsh~cQn2B`VuW*=)`DMuFm@*E^^$(AW8BQ!&*{vmG zj%r1mnOJk?(lZx^Z$RlRg?TasX$Tn4ZW7a>J=|u!;k>KkSewRdN;6gh=q(!+yuoUAIUwt|t=99I+9l%0lQb~+o~3|cIFVH|ssj7n+_Iy^jB-{5m`Z6~Q2 zL(0p;)JsdBE%c-|qSmRt;dC;>S9!yy!L?+kE2eKRpP;NKwB6eE6$#A5l^dD-Bgru? zQmJw$5}uLxPM!QD8RiY8laVsJBs%a-*4LMtANG~%ADcsElFmqu3=+#_6)85z{EQUY zvh7s3mQd^&KOo^z>DbAU)L3#jHPSbbN^D-3=ow0f{S!qPNN-MVo|9M;jHm0PIuNOr za;+})L@pI697o?8%A*kUJ8kO0E8)AE$ul`a6k_4l(u@YI-bkvb>gkM7Xa*v>)YG3y z_iyOwN*f3ZJEZ; zVazS=i=je9kDbO>WnP)Bdl5%+Pflc6Rtz#vFb-qnmcapK^>T$S59pILW`tpb8{DI= zm3dRXqLXpr$WD8tvY9H=OBV@JStcN>nD#N)Jvj(QgX9dSk=r!8p5e7ZxYhVK1KVhd zk=R<2*FyNRl9o^x$RHC>8No9!9!zW-PhJfpGUhdI3*lU;Y?U=<&=kG{qw3@u+nO3B zRxvxjiWz)V!D3w-lnY;BE4EQ9YpVyMC@Y<6XedlS{dL9jj)SPWOeo#jg;FHb=K9EM z&3bkrtZO)$=n+TfmSmWhyuYj;xL)9!Ia-6E6pauh9ZOe)3eyqkXO+c{dsRi$+2vAx z#&j}j)aFE9KfHOwzld*K-aP~KjPe^YY)xs}>S{&#P0Zar*<&J@pblkCfDNxw)g1zn zysM;ZMW`v`a_@B2uX?=JgEiVdOfIdj%_r8*HJO#fH(X_#O(;ioJ69yHu6%x{;#u{V z=XWmU>yq7j_22+1rhCNUsFT7%ewTjMLWXTorNr0ecU_;~?dnze-Me?f zk-)fyrH?(X>)w09%(9T*Geg?FNV|9U9=tZp)$B6ZN8jzIW{vI65@!srDkI6v2ZrREer}-cGgSuD0&_$1>>AvCd;FQHHHa5?*9OhNz zAS)UNuQ_g~a781UVUkhm@^DeZx0fGzS&NBprDr&`1@BfkJn7GLw$K;GM|uXc!{Yty zY-;YP0D34xXLDP7M8j>U7tMZ-WDq~no0(XaPBC{)CsuC3#hM*RtZ-AYBpm9^CxHbI z5`!)sgB1J`gECrPQo#mkG9Aa%5m^0rfq=#e~$<}-_;?b7ZOmTF~ z=KpEQr~C%j{3TVBmEcr@(Nega%gs~KH05`Q=hOTz;?wdk;?w#+|Q*#6kS?no5~(hx{7iq=FLkq|C5DR9+9FtsXbo}aF5~w z((*e2M+)i6v*J2qgHI57^;8bDH%A`BTrmIUFH>;- zA}0RQwEPwA(`KLpCpSmh#>LSl;*PfUxQ6kug06Jf$Vhr+t4x?$WdM!OE!{Ji68Bgq zrboWd|QNXOEq8Y+9Z6P4o%9pS@$3Qu!%?wz;c)KElx{5?rNUuTS+g}UjFr()Q*_d)k1DHL<#Z{k z$~Q^9?-ed5@} z=JI>Q4f&jFf*Jl8gK;JwBTNoE6LXv9jHEUbXsWNvAEE=SLreL?l9^p(Tyr>sog*@P zT$j;dQG#DbW^@!ZgW$}Jj(v_wuFoIMCKLKm{+P)|Ky~?9|DJ(Qa?LHfMFu`Ec)Sxa zT2-SNJZno>dX^PZmYjQeKld_!VdN=3WEnn$Lx=Qc29|0p$^(eSO!CVkUS31xSM@?& zF#vGe2*MEwO`>jZGn$DpH!9M7C0;cW2^K=Mx*d#Zfw>!qPiE;3>O<4vA*TPcFxL5 zK}k5Ptiv-Way3;Q9%Z~UYeEil+nO8uS~}9L4P(VjgpZaD-+m*-sw`&%iw(T|dz#Ar z`!KOHaJ2>w@qC(7<#+l-sf4*T_f49LPB%8n)~%KZvwFsf?XA9@!$LmK2qC_ZEt6;O zMA90`J*FyQA=@I!)JNU4pm{pcqK@J#^XAvYQB%dxZjvEWWuc_)Cj2YU zL?#)hPIyWHN~S|UTs5*cpi}+6&nyAA6w`;@<#x#CadeAvWR;C7+ z2l28G8(tf8m{%O@i>`k-trRVuL>(F8^<`6q&SmU(OULtb@^t*9Oa62GoW_Ox7BGs7 zyi_ZD%Qh6xS%1f_ogwEx!%bd1Wp!z(EL+SZt*U^&L1b8=E_(6aSD7TmbthW&GML7& zWgjAkC<9l5kQ|$cq%WRCx>xD7q7-YL-+@lB%t#xgwQlY?!J=QZ-Ae zrpD&hie!HBNQzcT)}mQ9wsw@0%}ktOy-GGaMwn_Pww!nQRs*KxQn zVf6!A+?I(C_=-CQTKVR3hjp-*Vl`hCX>`)%9`FOL@V6A0X_2oXPOs6ixdWq{a$)=W zDDptiNxA^tME>fiLQwW&vkis?P4+K0iF?d8-(&~U(TAn5Nn2T$zvarv%#k=(r1Drw z=8|F4@-AkfQw?4VQziOr+{9TpJ@2HkB4aOTUX96R?0+8!v!Rmwnvvd8&yOclHO)BRwcbw}dl@0uwXOqfCf=xE5Y-Uo~lI-5u5C8M6GPNwvB}5>&*&Xr8 z!g)yDX`#E_jLF%}b@{gL%U-D4T@D^R-&up|GKK#631t!=efyeeeyTd5h}$-CWfmEk zU^PX&T{eT8z?R0Y_P~cP|Cx!^a&Q1AUbr#0>`qs!q>FVu1@a25Kwp+WEoBYWbR%iz&v6V2MX>}GjAtg6>uN<+QLv&mizEF>sY15jy|?Kt^0KXR z-m;4qo6#Aub6O6^q&Zd11P-;iL<~#?m%bP-GK`2X3%!G0<6O#>Vhkr*EmAHF!}8ST zmCOzqsfjI^v??vtg^&Wsz%5z`vUXf*RHe0AAi{xJUL~7yVXE1L6=-__6$qz*RC&3Z z4JND9#-?Bj^!E0fa+^uQar7^8(+EejO z(<3h*6n4_aX_{YdoW{7u`EP=RHqL+L@b8hs@tmgA;skxhZPO9b|Udx+fwfz5A z@wG(WC)TGmuN+sQnnGOY>KN>G^b?h8;;#3#P-F;hHvJUNgr;Bf7lI|TH~J$S+~`Q4qxhV zg|;_)e2JY)H#RbK%Em_8Yl}`?JI9{p*ho{$*eEB1`4tj!vm&?5Ci|a;M~65OvU``vEF-_Ld-|$V zPRbu=3y{Y%CG7Nb#S{2-qRkZ-5nOC@#gmvTE@7_d8mcLkU;4u4ijmnwUH;_%1+&Cu zYo4AKE@(yyw@vfIY_12VeNI+*Q4k-FSjuoNP@axV+^ZkSb&~0$H(zEGm)Ito_!4*( z5A9bajg+r55)RASY15bITlEO%SB_SAsPW@*b~8J;McvkkL2+CALTOFrem1E=Ilmb9ZsqLcxYe~{oyWQImGi_M?9q)i-kM3Li&_&O(Wkp=Lg9E= za*n1iTZB{8BAi;vpXU5Tr_;r|eBVEVU(2oUuON7)_5GFf{j-wY+etCrgGne%XFTUNBli&);xPi*psac$sVCA2AoBe3nmuJ zRS>epAkn;3oW&d6*2csr3M(5A)IM@4!H{gX>K`59q5wH1I>`8e6kwV_o6Y*ZJ1}>e z9dj<=#X=HA4iptQu_H&H+y(>FQ5XrPyFYEuWtibb)%dbV!hiN?|e zTYXBYaAIzvspJ@KN@s^Mev$MqR z@}N=J35zQ9>ir&b^}(M~yKylQXh%0NOotjyz;_$`0sRW|zt-;q376 zw3+Z;(QuB~54(#?)VZa5X+#Gn)R?SL{M^uRXsg32&@t@xiRx^e?XSv*DgoXE9!~6%iu7h)nmdMVkhL@^TmuP^K zYub8MRmZC;$N2i#^PJYBd1*JHajcJHcXA;-U%MeE3n^TtAYyvp4u*ZB<23p35O6fI*_I0Sx#nz*W_@T+#Yhpsyv!rUYnYoSj_QsEqW)fSUIE`mXB^} zHh3c@2RTYyRe=+1BQ8IWlT=M=cwnA_VWUwWS&XGK>kTHOa-E2@Hb;$>zpJ}QKn*XK z37~Vb>?PD)4p2y^KB$3lRVn{~6hm|gCNKMxzi?meMhSsN;ukFuDBJ&6N^+9WX1gy3B5c)^jYo#K;7K_H-@ad z)0p*?4ZB7 zlvmKhdPGK>z5YTKXRG}0u={(U+$rRRm8J{Q>-s3Tm)Trzxwwr0OTk(4xpT#a+GQj1 zE>^u^V=*022a8Bd?h^9W(Ai;W+f8a)Ss1l@n<{eY?bT~|$N=$)eyLrm&djuQFe^ua zwLH>5aoPDys`X2>oTKMl0^u0Qz5xNN3#X;yCGAe@`@$xcvY|__kkFC}J6SoR)LZ0` zhqZ*vH*8dslx`p~}6z|N1jaAAU@!MPjgwRWnm}-AL;jj3RqOI0*-PKbTNNV6(<|Ndg+9;fJ{Y(ah2<# zS#!5IBWamLc!$qfzgV3~jYVGmb1^O)7UnX&iV0|}`S7xyn>bTzQvDQJo7J%ZW=8@{ zUe$1x-`fegc=hzs#z9ytc|@j?69yw@%@S<8X%3Br%{QHHl{k6L?9s4L9R&2{4#!#{ zbtWfikC-~F)5B?w7aQ1QI_l=**6k#!L?N0g20+_VIVX?U%9)9tta43pm23K+RZe-u zSFWjMQqcsc~5gB*R zN!Zxb;-c@DQrJ+|Y4L}YuQH1;x1h^nV$)0Wl81U=qAIZ#$;f+X0;dpIgS~gg%giC~ z^H2bCZI~TzW9S*Zf+>91)39999Xa;4wY!v@yRMKAid#2Gv0TXaDgRt`!0{0ui^&T$ zUeXf^t;85-oxH4gBX^1qrl~Zs#-+8{^`){1%`*9xwma~uY>;~oR5dhLphjMwa>c2? z!%&#&a4NJ?HaWd$o2-&d=H)P>W6+soGgWhBPwhWb8ZBOE`u|l$lQWU#UFwvo zMEB`RS?x6+o)P4dqJdnpS+;f|@;0H3k%Nr#0DXZsF3mF%F6(JYc_sEt@XeKH>6v12 ztgvw2ymYQ%Q+7jkjGLoV4Wq%(JozsjGgepXp2 zrH1LvV{{!3td#|uwB7m+;QcG3u6lRQBg$N&^oV|i%!F(8ze`|=m)_6_tn;|&_o*^pRy$iXA z*I?%_6nq0~uftsZZ&l5YxTV(~y|uTlCrI~=XNRTBxX0@9T&JLSMph@Wweg;fY|vY*_17kTZN7F@eoK0N{vwufcNt4MHl_bw3=pT9;*ea(0uCAv1*J*_X7zb+kOEM+b6lCXW zZZb7vRaARfxZudRQx@>-mX6SOA?Jt8O#wcCg4|ZQ1jy4vfg^z!u)T zHuc#oZUKo7Mz)?@TDy6DX{(z_h3MI-KyY3M>)2tE<;xwcL^DMiN{x)91Z4#=Wn(It z=IL1r@l44*1p}-H&Xo(rY-vwM=uF(iUnTA=kPjht( zbXu!k%^w8b9XNZ#mLX|Z_2X;`1@fcxk~*y!p&w_Oq&&rB zDzyn45^Jc~#GGyJH71wjOQ#5H5w3EDTZ(MPOflJ7zH z_2s6Bd{BnvFj+xYcZh4TlOkVch_SrdKzdcDyao`Znh9^rWv{k$DOnLOOHFg7A#`_= z+m2LVgNJN@%oh3l2M_T%5nblW=0@3*xwq*(SI0a)wAl_VHkrQw)U6 zTCl7Yh^~n^`m!wBZK?DCmX}k~bK86dp*EO5RNJR)fh3>M0C>X{0vIsQMe1Sg;mpz! z>dyMBw-s`xk0og_YyIP_)RBzU$dOT9Gvd=KI+znK;Xl2#9%0F5wS-H+d%#x$i{*2- zt&JilS&Wdwg;o!}uOEH3odvYB_uN@yJKrB$Upy%WSI%ejiwP)1fZ&X(;ocIosiZ;#8fiXk?6 zY%RH|ME$Ia?h=aL)9|A4NWM6P^DokQ{fkTam$>c9FJ&%wqo2pWj9)MB-ZlEN$lt^a zeFtmk{LRaM9*>*V&eFnJwQ-Ph}{JNWemrn+xrV)>?IW@{|J7H;fnY0ken zmfu_8@K*S}P5Ir)ue;#)cAYH0Bcl`Ey8+*s(W&jbDEGUQ-MfbG4l!W)dt&N9p7UGU z^Y4kx^Sv;9A9=o?Qh$JxA|W@dVE%*g%oZ$Y&VMMD*@t0vFU&pyvyUdb_mRd?6UT)q zY7p_Ze9V|S$HN(Xhs^sOJ|6Kqv@|xgAox#6K2ufj_4O*2Pm;%dsilrt6&z-QJh*e>({=#hfu;f2H6DI=TQb$}taq3G8NaqP>H zx^X@=!KN#_ZZn*<@%58JxS=i_oa%5^GAMC~fEhGL(BvMF&1@dmUR=GgM(?j;GY$iU zaDRksDR*B1^gA}guV7WYwA|a&7;nQ{Yj(KqndqI}tFv&1pz`Go^vvD!SN2szY>pq` zmR~%vuRHkT!K`@Ei7?eJ`4tu27{vA?ae0jK>CAF7^=MRKo=C4@jITIp5o*02-D^+V zvhww!`7r}ASDsr^s!U?;O45`Ukz9QjbC^NKcd{Q#wN@rjOC%SjGd12C?TsxBjokXz zWF;1bioBS&xrAID%L<}iE?aBO%c{iw*JLs*`J9pDT&;2V36F4bsi%kOAj2W{mG$&c z)wNtzsMe ztNfOA9`p#0y8LJLZTh)V{`2l_`Y3kv3;u2TMSeZjy`y_1|E25g<@q@9m+j^G6@pLL z%k!%Qzh*DbuM_-+y_&vB@LTq3`ZmGuU|hw^`(1nHdj!94&-{Sk5AB&B5&W?|^AmzU zRfGJq@=?R)RQ~6glev@r$x{9ouHe5U;jetbpW@fkR@h$?{EZd%8QzV*O=ebQR`cxd zO8MU>Gtm`+Kalc|p2eT|_2oex{&*10Jn0G&u$~Z^q>B@oOQ?wq8-hAOxa}fwc*#}b!LA1G zCSchq9_-HVWahfe_4Strdk`)KdnzLL)dqVJgwfuRP=3KahV3gDBe0Acc?J6!zQ5pd zk!^5*0S5{oqp;v00}d8I#$v%Cgb?JRLNf6qvX;hn?i33SlV@TEWx?S@P{Jc12rdCp z!5wMv3?x`M0!Cmhk!N3 zTuaa(fQ;jUd4#BQqmWGe_-#R3V{6bPPsG)@nFz9Ifl!iG2vuu?Bs1|NxCQgu8-sQd z#Il=D1nfE>uv-9u-9kt*8@wmf9Z zJa}EEa!eL1B1JOuaOM$oa53RhaFQabgG&g)bt%N_;K_z96U-T};1t776-@Q-Gy_f- zK=rT7fHMS8{abFp3IVj^oN2&H0kpNwGGGN#}ChOAs-nB!Rm?I_NX3Uog2wIv6k@ zEr7~kkPzB7B*g4vkReV*OAv2HFX6`oGkPf?+%A(iZ$>XS@CpGVX7oyahi3FD!lmG9MO3uc5QNdS z5HH&647*;io4OWxGkSyJFA`h@`CgXw&?Tj*j3qlEmio8Y0N& zb_n^r7NYsQ&fwQW$mb4-T8=jmu%7frOYkNEv?slp5Oscwka#oN)WJ%2@K$*uuEuX8 zf|A?`p(J-fsM^~hHOy$!f+jTa9VCclcQ+BRdnW{T?}EVY-H=*lbU|BtM{o}bgk6Oh zeGee4-V1@%`yeWe_e=&ufA<^kfBgRj2k?$jhe*#>E{)mV zj`R_05BFrnaXNd}`!U_JXn9*iB;)eo38&EueRt5#5N?8(g0Qe_C+jBm3@K08{IdG# zRf@WBi2z=*3Aa5^ProzA?a8uodsAdSBD`mO-!IL2I6kFt^%ZNEW$oU1kwji19+#5S z`nc?E4~#^aLH9QeNLefYbr;ec_SIGD@O$0n1>|_b^3>+;oV>W22YT&FX9)H`gq%7wUIf+x%XJa`EOa1Ct^n6YEB`(k4)zuRx6$=UsV8SyhEs>4xlHT>1 z=<0$f(!cdMiMHPzt`rU-d_uALr}*WFGbnh@Xdj)39$rxOOizg~7M^eZ6>E%(?R& zOZ8od&CI@M!1o2vsnib)_@MwgmHLqZKNf&p*EXH{iGe?bbnjkS`rv1P*V`oP=a%eA zh&VcQqV)@Yl>JpLeZem!R{T}JB2WsRl6b4<(*&vKuO&g~ulkK)&j_aes^1#$I{|dE z`Flc4>;E9c`K$g&xD@dPC z0;n*jaAhF;r$S7a(+E+v=|W=NEKO|-IvP7#JA&=x8Sz)`>-<$KyB3M3jm`5)38~v- zd(x4`4iIMgJ35zKcth$S_!4%3cz4yV{3yGt7FoOQ1_-y^CC?x`MDv+r@LUM_G(gltGmn5ZMWZEX5x9I)tD4%II$9U73UdMpgk6QZ>O??TErP&mF+_!NlBAu~UA2T1p}T4+ z;Zkt2BC3PS2*ULgh}XeW4LeP+&|P)9VO@f${+(gKasgETRv2)m0IGj04LC~x^)Ib5 zV6_0+T5AmGhB$ZC*%GJh&at?21yFY98F0P;%5JRz>jY4C>kYU7qMfgYK-r46rh;Bc zDpoW_pcM2;yjjtHf{0;25`T@kQ%+ec>&+NXb!f> zGld(bPdRj&Go$x)t+X>p%O9H-T|K>slvH9X1Qom(qTRG)@Ffts>7@{q{4$8QqnGoe zY)4mG`&|JDw<{&i+tI5GyjsAB+y5GVhj#Q@!lmFkMO25cCkUe(AYQgFGVH~I-PCoT zx1%pH{H21cByTj}Wdf)qUv9un0;nW!CWItkAtY9k^EuVk%&D&6mGTVlqx+_qA-&HV z(pM2f5pRJYxmQC}a<>|M8wAO{27=^nhr}CFGHPs=g__rrBtE0p5kp3=hmg@75Y6Zf z2EP$PMsI?s@pv-HL-j^BegNRq!qn#PfSMG4Q(w0>Ae_;P+lgoGqn%Gzwb--bVuXRoc?` z1H$S95LkT>qT=|Fq@C23ewY-YExng;DfoyYs)rvX2-lB6ydHksuuljU+R{%NcAsFX zgP$_segRYmA28rS0aOPcGT_q!sPTK)fJX$-_WFzgpM^ME`ZGBNh3Q#+z6bXh~pa&FOF{- z_ASVTRm1LYThwt@GO|@V+V0up6|OXkq;CaOEI0xfa@@P1~Si%mQ9|j{@@Wi(!(eHVg2ZZ9&Uq~u!_|B`982wUEbb!t0L%H`BN3~#jO4PirVGZ_MS|B z)uo>3_o%oE+2LIU@6G<>bQalE5h;xx)q#pg|GhqYvLzac3y4P}+ch`KCSclc zGxxV_ZsDqd$QKiwr9S9k05Ahl7VAzg=MaRdz6UN6oS<0OyrRH2`z%Byl zyJJ@ab`wC~9lH~vYKRQ0j@S_D+IUZxcECE!GNdxKyP&v*fgdC3*Qd5p? z?F-t2Ir0o6v0fV-_xi!{Tp}o80|dd%gQ(yd4Q_%UxMm1~Yk|}n95*%Th_sa?vH7$S zK|bve@|h3Od^!wX03n}+5FHF2N5GomcuUYJfDVpMAVi%{6cRHwp5NNkw4jCEdyC|W zxEe1ef|8sBp(INnRBb6Fc4#c@;1`@sf>?ITh=AQG5ZIjxf!%44xREiXYi?|T-RUF{ zb`@TREUZ} z@0yeA!f|4`NJ&3Tl#z~p=7Dz|2M{@^&<^O$3XF^->Bsb1Ns&<@}30&l9Yu>Ydvo_I(W9o+9_m zOSYb=JNdk3l*Lt+peaR~5t!sZ%3&QC$;k)4XzPxwC%B}aiwlN1{G5{G9nmY|W+vRS zRf6THak_OFO1QP?$*4@Q=XOQVPCVXU1&xSO;!@7d>CE>?Y4cve|QmFpM zx;&%9RklLN;y+h8BqPt4S2g9t%fbqYNLJYazSdRFD$VvGxn#zQT2-{TtmxoLcrds% zCXtgmJwxeYkCeHmZ%Yqp!qchOvROr*mHw%adn!EX zq-BvyTllUvA{PmitIG4>cB?qHhiw??&00!deOgK`RiCRnT#o195>Fhj>(b={Nfvf; z_cUSPPVOo*4(iIi5HjMYe9VxC(zE=LTxwNWl%ttYL~mPgWB(jSnUYz(79vSi%R-r> zpB{#Z7IzV6y2iHj6i0jP_*~DXAT<`=6=dhR5VXp zSr}nKglSzc%5YNs6JrEQL7uNp?}e^alzDcu+e}`0_eDZGa+VJTZD8^ zZB(bkMf@lQTLm(w#Ki<*U4nSlml$@bV9NS311=XpSzkd2)>jH~PKm1sbC5)lbG!T% zhru-Ju*8$TvE zC0+vvx7#JoJ0)Ig;Ohj8I3-@s@6ai62jNoi21Rs2_(p;-dK1J;_RWU9MX;N?8og8E zt%ko%aFyeo2HYip%JJ<6yh8w$6B>ZykleVL6XGg^C2S0=fe>4xfi1Oe8k|7LdfT1 z5S@pAoPfs31ZnjLImtS1A*OVA+Y-#q?S`+K`ZP&PXb|A;gon35LRD+!0L+-6~<$d zc2cLrmq-yhB_1bS3cjp}>fl!h!u1J=*TJtE_BFvmr^MF{`-Whuf8R9VTLP&5ecOQV z2%!4+T?4)+fO-+WZ@>=((AN5)0Y8E`r^JsXPTBp$;(jWCviq3#A?WpP5N|qn=SSIeHd>SI0SLD}CC;19y$sx2z-y!sdVy-N55Ggx zxi8^Tu%9BTw)+!=(E$)I+XD?dNU-Q3$Ab+&L~xblp#~f#fa==e1{@)PN-{wRNggRA zR+8+#Yg{0Q9B0Tgbkzxm9A^?i36FvxxT7H|xMK{S1wn902!gAJ#G6ibLz%&vO_JDr zjwOP8=0M12E=2QbFnAt>d>SEYEt&{eQ#4zG76G*Pv=XAuZ9-y9XH$Fg{1&lw?eaui zjpq|VNje~uM6Q&fY6~H8rc(vjI=?wMjs&snjwb?koet0fQ>#!^W;sp&kK6rt%{Mz|E5qKN9?sRZGA8pP}1>4tR)7Mjj8 z3|lUk>fZ_j&J;lPZ>0fe384D7%7E1ZsI6OLK(_$eT4x(@4#b(xb0tpMoo8|93!v=Q z8n8|PWw+jd3j|PhJqGkb9J`dnDZ4(4>lZ-T4H%FXK-mo%FeHGo%NUS_s3pFTfOii! z2OIc#;Ru$ERkmTmrC>x7nAyw`M69C_FV-=`@`9;d4h$#=pfW8ILVL%Bgk$B6{3r#R z1Txl}3Bq~{#IwH0u&shA>x&I2381VmAq4A7g*cOc8R1fJxgycA@)aOta3#cNaFtkPPF0BwpJ40w?M+7vG)MA=>W98TIJ2d#W6D|d>RYW`6>j=W=^$;)FI}Ce+U{QnrM#JAExXSU(2E0W8mE&6t zc$)w!$2$oj$Ge2olw(_4TXXPsc_z;My@Lo!csB&Wy%VB>dzZoQh9J0mAPDX~ka&YH z?bXrP-Wt4@B(eFtj|lR4KZJZf0MUFtXz+(1ry;O= z7!qgjl^xrdgGWdp>?#cYX8>XKSqQ8?2T@^sUeZo#@E;{bXz;&4xD1W=RuZ3Dg|fVS3m4fr0! zS?KReoU;3Y#r;qKW%nZkek_2p`-uTR6+qej%z&Rm)IvW=z>k%G!H-h#OM%RA{)!+q ze+uGj{8Id{M>D;OV5K65B!0y6qv_Gj}j}M+iN0CkT4IGsK(DI)0Q*=OSyeT>#;> ztHgQJxtoEz3mDlMya&HS)43<%Qm~gIsjKqYx7AzJw`A+eHVt-VE0J{&I3&{ZeweK>*$N|=BkxFaDdxETh|gdn)1 zAPDYgNWAHkeCErL@)(lD<}-^3@<~F-ryipD%r^K~2>Hx`sI{0&z?!1L63i1odru=F z>f9tG#&k+aI$B$rgJyXmuEs4yP?A;%C250DwRT9H=~Q+LuyylE5X-KE2-q!vz-}P~ zcE>^DOsBA0&;q;TNg(VhOlKz`tWJQy>O_bNW09nt)O0Q;MQA!tB3ufVD55&JlptJB zhIk!ZX4ol$g{JdV!%h=S_3v~8x&%=DJHvqG0;vA2FyKr9)Yh#u;4A^OwN@Fh8sbdn z8i`YO-4=JY0Ltzh1I`sd*_~&=`2r}rwFaz%sOel!0Mps5ZiWl^!Bt8End$5$NX=6a zU-Lf0`UO+dIbc9q0Il{QArx##h%=oT!d#`KNYr#*2to!MAU=a(!$t&~+;rwF<*208 zoW~5v3!oAT3@8Yo5-Jj+Y~w;|b@O9A0_nZ$Y1dAWgC2pBP)SMobFomUaY z1+Ivy?KK2pbS=co_Bz9^7c6QzZ!r8tf~zE7Y`{wdP)WYjfExu+NxqB_l6<+4nv!g7 zYwiecl4s%!$<0Ji!dE~L+$$j}xK|l`3k1Qv8iL?%h14{i?D8X@+ei|d&ufSvpW7ki z^IC}J^E!iH45y79sJbv$dlcH^W=yiMSfSjR;C| zCxnvR1)*whhtx2gI8^a?y@Ld??CvH4cJGA1?p+Yry&F=)bha*NhTT0R5Ox)&^F4sD zdM^Z4?}Ml?-Y;nYviqC?pBF&c zJ!-%g1W~)!Ny%P)U z{!6(VPUWhN4d-w%Qh^)H<+dljk?XE`QiN-s1d;olJkxL!LlxE4=typMbb~EW;jK${ zkCQvnA>T5ryp2iupXR@q>&{R7hwojoy#Q-6|MY!Ex*77nw-@~jZZGnBXA1Sd=c*!q zUB$oh=AN)q^^cC{*oc?wA08h_%jX&=X5u1sr=7oYOg6>3oxEl?1qi>>#%HFyGzYR9 zvxU*1XO!>f1lfVK-i%b>gF||qzI^e>Zz0z2T5&&^dxX#3vbisiJM|MWH`j$5kL4-r zPX9+QBI1&`=Fmn}Tt`$Aqc+z|GHfzSV@KcC>6dpUVfe9j(_Tej+_Wyp4rPk#0=>-1 z#AJN9d`7>8SGn^ikvPZh4ixJ>oE|K$8`dYyk`tHBl@gN_7;C$21?JA6tK~? zq*^RIQ4LwQpLizH_VtwwfZN!&tRb~==-+x5l&`%t65oHWdI>qax*}Zt^mB&yy4~J$8#xa(~~_ZLh3S4piCx@Qe|PJr-y`rr8BS0?_> zk5cdtfvlQScF<~0h4^YtGifZ$h^a!B(*K0sZ0M)-f1NsF}_tk&_X#upg1`Qa3IB!@+;*?$1;w}_G z*=;ajSO8@=Vn9v+WjAWT7{sy5OPsO`EUqAcvMU-eE`YMzXuu``l-*_nwm=-aizH6j zZMC?I1yFV+11=Fj*%MG{!qQ1H-3Gn@SK2VfS6@#nz6}sxKCR_@xk$AK5 z*Ak?Wu7mhSy56uG1XCOTA_HD5fVR*}2+UpDtD_)Ybgv%$t(1hUwQ_*y1 zQ$yo~0ixp)Gd!#uS1AVuQ%cJdCmj^JTN8+lUMBzjJk(Y{b~0}wOV8I=s~f~|DO9wCb^QN@;VYr zsC6-MBtgpOfh=17&bA6$*Eu$^*-JITLBHl!@w}UMfYk*rr_1Z`{w4yY;AZ|hd-w{1 z*uz&syghuCVYdjT_VCpP+$w?_HLKv}q1j2adqYyu&{un>}kQ%1;w){9K>^>opeoXyI z1Md?sGN%3%zr!*0{e(-w1B&Q)=0SomdI;i$`)R`-7A*P=wMPv9jNmHD&l>PK0aTQq zH{ekLRFq#Jgebo#q^2mjyt6TQOrBv3m#`y6{=>Gxmx!Q*k3*2$mmw;-uNeFU1j&6B zg5QYCnh67*o%0ZegG4lO%{`_X{Fm z_e%)ueg%QuQ;-^CYQ8bd9Or2g2)l|g^{)Y8^&1GRo`I+^ek*Aw9aH~~6yccq_k`Kf zr-Utdf`w!1zZ&+eV5)!58Spm&RR8{Nz&`{~{hP9*vYraD)|zI( zbOE%rwliRRh#R%;AaTlWM~mA@0A;tc0d)fCsC5?ub`?O`?PkF45FNGdK_GeIFaxo0 zw7Mq<+wBB51G+ass=p7!SASo__7hAE=>7&AAb{5WKtgEQK|;d)>j(3r6dWRuu|AX_ ztPg{D)`uH*gkZ`#VZf0BDC-%7U_Dcav&}~lW-hKs)HWXjLI$%SK7*uT^@2@qn`c|f zVf?sXD=b@oDvdmn>!licHcgEBCf{$L{O3e2qj5FsM;W;hHdU>YG#}; zM1oj$86sepg~0AY2<$dMYS`wEruiIW9wvdXtFX-@fUwFzU^NO+VT?)INo{kU6rpVn z2$zC_BC3N$f^Z#&cpcnm*e1b3+q~JZErO~3U1Y#k0aX7kHlQSc>fa>>Tq=MX)5{FF zTmWsYD-5_2;%xI(5~u90wzz8qPe>ue0{3gS07EEpDD-3w009x%=5kkRk5#ns;s|lBaTNR1!!@LcI3|<5A z8QgBzYXzI!cD~M1zFtyl&UYB_1_4w;Z#3Xd0;q)EOo+0*MM$j<-_h2=)%I_dXT)}f z=j0PLK56+j5>kgdAt>El5bc<6H~1Y8I_BLF)cTzeZ#v(_4{tidF9W|D5N`KKoVT3s zG4QG&1g!HsZVA3DfOej*5Tedc2&rK? z7s$sozba3})%a^fP?E1hD9JYQCK-g7S&L056>W2_m{RpDM__3s&)N=lW6rtt(DdAG^GeuMfe@+mtPeQy7 z{=%?d3Km+%&7KN|2Sh_jr3 zmN;ei7mNF=0Lt!J1D+E=+5OFczYC!3{$aqBojkj#1dZJ^Lez7*0LpGV1GX1H+3jG! zjshsVoebC+qK3GRK=Q&3oV5uD$-97*f?WkSYrGpla@if?rMZV;dkUu3crOF?7C=S0 z4)!j81@f;ht#NBEh1jezD;v39h1CV!%=XRFo$huuK3I zv7Xe7X%j8$v$kK-9XOOTfD4d6wXO0knIrB}ARq35hrL3-o5y_3}hq zjV~aAlJr0*NiT$|r6BR9p8fH%#@I)KSa$tHz-|BnyEFuLgOGSr4?DhN5Dbw(*j1SN z3?Qts5LjIZQDJP5w3C|pVN!&qeuQu-$SI;aI7$$%V-T-{dBXz1LQ`KbtSFf3-?#xA z1yKFlWWZ(tRR6XZaFGCNOt%_vu>jgyB?B&jI8%SA#3{SWEbejvl-(5uTq%IEyUKv8 z1yFX^7;r5_ZS!>m{2=*yew2b61Tx$CB7)TX#SmZfml*a^!PIu%Xu!(^&}zS&5DIpa z5NA7YCR_?$p-6O){7MiqcooEFaEoED7A$HzCtfCXtEIe6QfkhxG2nIqR6?&c;B^A1 zgkDdGvfUx1Mu%^1Y?Uj4-XPD2?QD0>-^Ln)aI{ zz7yh2=Ux2prZXHQza0>6?~pieIqx>`odQM%$?xKKXgS|axD?!@h-&S72*T*S5HH&I z8TNj`qPIzX!0-bD3WyK`gtk5dpieLtys}2<*NIsbx7A$Tp#GkwDm0Sk7+)!sBPCTF)5pTZprqzmqs+_j`-`g8<6zj|Tin0A=@Q1O6g_viqw6 z&qCC4K1Tq{X~#VO#*b3)cY(}u{(~SjpRzNLhu;R6$`7zI^*2p#hf z2x@&O#9Pk8_))f;?lP&v0pWIp#Cgk^Fz`qLBbIXpzeCG8lW-|GN)grCqY1+37>F0` zEW?t5MK_|=8$Mfb738r7%n?8ZIoE&&0aTFl2qDNuAvFcbvBHj^NuHsnPS}XnOavut zfgre6hzhRF;C2Xtn-4*79gv!qvq=xgFCa;5J`0H;pW`6pb38=z=`{ER2>F}{QDd=) zfOVe5mf$1-wDT+>M4gukiMO0h9r6vYljVuH8ZRS)lAHpeB&R~C+G&s)mb0m&9dE_y zB#33#MFi~5fWU4!1a>PRH7sY7Uh8)z34~pR-X5Mbw+?Y{^vl60%NNNOvAQgE?E%ICMbrd(r|xdeh_FOhVi zWxmv~%LI!UPMOa3>UT>ssZ5{Q?8`0b6_Qj1ex(6d37}QF+JI{W&?;R^2<5*{NUaXv z+#<)Vua{@!fcza(%yh~&gUj4^Rg{Js4sIYNwRjN(t$Q&z-e}mH1Pgn`n+MD|3nB;{#i&ZQMNR; zH+BSnk!N^qPT2eNS0X6kvk)Zr97HAeH-rBUL300qAh{`gmZhc}ujztgZ21XBZgq5+Ep(7G=sgqEEoBwW^5 z!jDq0R3Kw5pDBX%GKgn=ieaY;rmRmh;B*0$br&I6pCQEA=H-M-!3ss9w)spDGFS=m z8JuO zCDudG>I)#+^Lq^Lh0ya;5c*9Y#M|b6eoVB@1E8==OQg5Wg9Z)>7_rS6euuU>OSlwV zsEGE94Fq8{4DrGpF)Sxo)HaVAJ|?(|GH*a2fQqtUKv4h{Q7(|X(Kg95 zah7T`5tMKX1j$_lQORvJ_+ki>D?yOlC6Jo7xwVNMhry*JiOuISBFN`*2>DzA(R{8n z_$mnbTn$lkbPWOPp4VD}>jcp5c|9TOe1nj9+uYhLcd)%ko`|dQi;18lFM&{!mqMu8 zjgT6)xwToQm@gwiEW4Kz0lS+Zu)7%oyH`MJ*yh%jCboILk_5u8!ZyDO5LUN9VD)N< z3gcEuJE?8HjTE76ehuMLaJwR^gRdnB*VjS34!+*7I|K`D^BW9%qhP9kZ!+M`0;vAI z#ela8p!)YV1MU<+jpR3Mwv>-ZO3nE*27FckmC)x5_`Corp+^Z( zwl4^&)#2&x+#~x%c}8sK^X>h4jD*zTOAwUqafo)zFB|+72p#hY2x|RRh&P>ITowLH@*mp9-LY{Fwni7eEF1Bq0R(3n4WH*&$zw{G~h-XGeZT1SNb5g5aKp zsNjBW@NXap?imPz`z@rVP%j5K1x)Le-{2YFN&;Ci%+9 zb|i>pw>=TC+W`W*9U-vW2~xvywlyt)-OeNsb`_Sh4iHwmKwz~iM1`@Nq@C1q?oNu( za_&L66zr*p>fl}k;kq}(>)<|y?JHPlIrlScf5BA$4lv+A0aX7EGT>kVRR0b!;7|e7 z)E#EP;R0xD9brHM;wQfYK<2f zaGU@t%Hs*4w4Fl2LGlUwCtRY0%x`ouT zPi^z%HrKP|nQa;*pF>J2aV~^Tavnqt$@vDagPtozz=Wfy#u2M6n4E5 z=}mpgz&-&ZgXDgGho*jja4ASDqTOteAdH3}Ubq>}ch%Od!w18NLD$l&}ava^nz{+(v^pL6F>L2$I_ZscGul8{6kM z2N#hfHlM9Tkk7>s@+m4NTJJgI zxgRrE{&BtY>^<+E{hqy-!?|+j|Ow+MLs-5TIF0h{UV z0qzj+r*&t5yO5$ck5rq}jf%Fr1)T1l0QU+w-F*S>7jU`<0z8P=n;#-bZ<0rIP)HsY z3h#V`keQD`(wUD9_NbV>^RWPr3wYX35V63XloY*l9C0BTZ&H4f{1k)%o<`CEo(cA> zSh;sr%=6{%bJ6pE>gkc65AcG3EA(Q3mjqm)2}E4m%aXheU&jh-NM6x7^3MNy_UBbP zGKbd?7TxQJH|86G-$dA$Zy_w}w~^F3-{By&&eBctyFju{R9ottlLEdcSay>Q$<&{4ov*C{|}0e@Ou0xX9VowKXjoUU86Eid47-2?OxaJrrWRuFKy6$A7_ z?3}#`aL#!4XC)2_$;v|EoP7wH`6@^{^HqcO6|-}$7GQM&PkRj_7TB7SqH|Ud7m`Yo za_6jqFhDhu4p0-UR;=7P`_1E=b*eQgyR zaXpLXYtve&46UvTy3dy zZV_-x!7}IU&vD5)w<0biTbp>*ZbL|-ZIQIJ+XdTRY*_z(>DLQ81m98Ih1@B?&H^sv zE&+BGa3OaiqL8~watm2k&Euzg=p1(HJkS2@Nds577ee9oMqIdk0`H4Zxcv|cw?C5W zob?U-W`1%2T{6cxkOsy%2w|Lq5s!07;6o9{8GzU<4kL)oGcYgMux+czf3XoK%BBVMEaWPI;-)5b&K#!7ho!NLhfYV(c;06JwyD`8`0#0{xfLjn7@vQ`X2A;}~ zi2cmZZ5$So+Xb6E^K%D;ir$H&6}>CiNU<`*)IVeY|APjjqU+u2>O$WW;9dbw<-P#- z3wSCI5V7bVl;myn20v&1kj|N}XMRT0lUY2Bu-YC$yd%d19*eLeA4OQtk0H4`zoE7k zhk2YX44n1M&l5B;(vt`yjYB-r_`pvgjPx|(<^Bx8zm7a8KDuY4?{jKP9r=F&pBF6q z7T^VrOS|Dk;zIJ0iFd;ULayayB>n7O3HGX3X*awU>~%4JNN)sqQ@|h6TLIn{@Q3tH zfOiGF-X;c^B;fV-UVzC+arwTlHmCa_+CCI;x+wuZLI!3$?DKInf1>_Q{%L^E5HIG> z33OLcdf4X+jtj|D(YUerl8{n-g`}nUI@mX2zOk4V;9CLL;X5Lh;&jQs5Bq%2VIlcJ zFp~dBNb;YMH2KfLei3u>UjzIm;N-s(k$i@vc-JzMxRA^;*{y$z^ufD7APn#)k`6FC z*c`F4O_=ep&)n$wmwI~SzXSXu;LoE)OXptziO*v}BCc&AN!}jh&$jY)$HF>izP>A4 zgq}=dQG}hb7~-9=c;F=vcE*wjdvGZvy=_^VgLIRm-<7q5lCG5+(_PX!;4*?`yJT68 zOZP2phzm(u6Yr9Cgd}Q@q{ZzJtfN@@cV(S|cNTXky98KHz@_XOpqqe~_VPrOvb!Xw zl+~3LRrOU#51qsRC+B(CrzZ_u;R*PdIj!{P`Q;5Dz`F{>t6i972dhfhc21p ztU?3htcoyBU&Q0A7I<}pan?ZWU277=-mi!bl>*+LRYc6WT9SF+Qe9bD!TRFbo!@kBONl+twjUr)<#IT4nn$hk?i}HYF@jDO8z3&mhU(kwealAlC^_}U#D!!N6R*Kd3CX${lCHtcgKZ&Ja_TLE^%wK{ z+bY1;0$zXH1lU%<>ul^rH2 z{TyQ;2ZiKtp-6oMA*qi<($q%9+T-#ZaTtlg=zw)W=5y%D zB+f-x)#oAJ`R50|0Ac4}h_G)4BdIrE#KFAYd@+=Cm#8uI=1T)!CRpapmvdb5<{`v| zWT=Vv#V|q=U4f**obH1F9|}0#lmH(g_U4ZX(w}2| z!a*VVR4BajGeTzmIg-x&i(pg5?44f*_)5Uj{+fsd_Kl?IozsX5$+sru-^P6hVSwpK zI>7hAeh_QkJAaIxKdGlj{yD%e0Dd0l33eZ}>gXXH7p_C#jtGV8giyH7NUn2M)i?5Q@-B4A9A`Ni7^f@3 zINcDBvwYz02;=lXY!*EUV)LvJ9aa?Z=IKSmoO?@hoYNk@lFrDQ@yaxCC4CUCWEF&| zt%~G1XCu#DCVlCUnQk>2NVhsdx-}5et%>A0C(rs+Bo%Z}x+cz92_#h&LaJ)S#i&u= zW}UN^9wq0jBQ7NMCSHRLgk)_*(lxkNu(icX&bdyob;Z2?)(fz{fY)EY02>H+{cRXv zBLQ38#sM}F@TawDfX$Gib8fCSr`sahwiIx>{sFcUaJsDnY$M=w+XmPUDW=}#2SL+^efESDlD)*kjrS&`n)@JWHTMm+pP1cv z{{ROFxReJHv1kvHlx~s_=Ae)qA{40)B_#C#Bu#x-uz_MueRzN)1f2RvB2phEDH{FJ z#D(M-lk%J7V<8NXAn5?d1v_4>d80oedY-7B9{HpICkyx!I3>WT0{#R}BjValm*n|V zHP7bNB?X-`UpL8T(344=iLglqAvTh;0-udwBCfXJb^7!RjPs$SyFiVp z)n6EJuwdCu@TgJ>tsU5co!f%H4!extoz(tFLa*tIuws zOXfJY(!e;kA&hf7;&JW>d?&&vHI#pJ=Zy!4w>m5rh#;iAfy|EkZvrJWA)X%5s7cyhJ%P9wPpWUTRzHp&C95A#Tu7cW@fv)ZkgU%j=^A`C*mGhftN&lH=f%AK zUI_4_fY;wk0VW7|{kgT>gkcE2l!sV75X8-j{>gHPefeX z&yu_iU&AjGRV2UY9C_z|J?!%<9ht*#2#fA_#2a%);F$;;a~8s~{sT#^^G^;^>nzD3rUrU*I+dvS!4+?BYH>}FE#oV!C9U=Ji6V9#KCiIqF&ybt^A9X5@6lMKmzZ#R%hEf_R)u17C(P&gF>BVhBNO zo}tlUn1DCW6-3PWN=deJ@(1F0F!U;&ku~G1Y2Zq(LAa9P2vfTj$#KqF-soPJTt|n@ zbR%dW-Sr6RZa_$PBa-8swGFlW%;6?FC|wihyctNUTM$y+intiJsc*B+c{@Ey&UpuM zA-U7UYw#{YvW`U3H8?8R-C`x@yeHVbVqSmu1-M_p>+gX84+?nwJrrQHfUWM~0FMax z(;5?CEK+pNN7d$Zk44+#0#5ftfF}i-^io@9UiTdf4X!dNPX-5mwt2#5?k%z#k** z$WIW~^QTDe&abPk;n$QuqYDFPJ?!&2O^oyf!bnpQkMw2WuMkH18u4=fhTvaE9uyzl zwCMY-+EPdUF5q;*vTp&t=eV>RejqL+Kbm+q{6xsL{EVcZ-7mp@6)WwA--7)v<_~E` zfSCgRkY)w=L%<)>p8;kIc)iUDFjv6q?XLiTBgN(WkJ_BB#WKFI1&~O$V1R{?tcQIT zj^;(w-^mvZuo&XSyf^`O71gDOeU{+3kSr-0Hx^3~Qi`RKv=l9awG#7%4CB?gzZp6It!=(JX zvhEND=z*jI^bEFwSlK4bc-Uvf=-Ep>J#y~=D+&1XSUEr+0e`%!5OHm*N^82`dwKKlytRfOm|6LzAt0rnAeJ!c$chANTPL+w7BaATTiU~yR!9z_Y-$1HwdtyfJ?bifQkUyu3Z1XTOm|#YlO;egXFqbeITRlzFPmr-BV{| z&3G>wxRSjQu4EsCsqKs8+_%*0O+m?ibjVD%KMkZi03qFh2+iS##|wDaXsRI7AP7824QgrHq+MMo;XggED=>`QjOTg*Q z4secu)14dOJjC97K0(umeJ+3$k_*MdKnD{t{fm%v`WFYgM9czBu#x)u&c$K`kDa41)Ta?B2r%`DSGn=V&3>+Qtr(+Kp5ag zBpu+UU^k03@6ESF&s){gBi|O_b^(6^cLcaoz@NZfL|ofQNuHt9*EjGi-6)+iU*3E- zJ(>W@^!joj`Ic$jPoYKIBy{y z=k37nAdK@aVmq2h5ZiN7ba+p|+jBAzbADfv?ad99`r-KpIwNbwAJV{;OhLGkj}WHz zF_PoW4OMzg+9!0#O!p}br27mZ-RB7DzCd!knYWYk_YbDhLFt-!^OrzUeT9(fYb0*6 z)wfx1o<@(7H-Af9NWL@i8k|l@*6)#Y4gL`9N3oJO{}k+JF|WU00{klA_4iwV-vzw> zW(1fiU^AT+;12gL-=8jsZFexI&!+bP;fcmLuZYx=Qjkd_$ey9?(ta$UFb* zVV~vc$Q-&OEV>?uH)hYkDXJ6t%vYLrk?dpUiS_4T-yJoNov2y3E3|=MfLRJT;5pW@E1Jnt)ko80qvO$tt z$a=jXzftF8`N&!{aD{6l6mA{FgJ16h8#>2PP8Cf&lh6b)=TZAjw z4qF2$qsbLOt&Kqq}vH0-OdQf5Yy?nRH1bM8%CNcJ)D8r+wVtotG98r(nF0b(WRJTTZnVqSj-2RKB)>+jG2 z0|dPO4ht|)z*cv7fFlI_X&o8hD5U6|N2|^0j)}Ho1)MGkaGZeC9UtHX0jE1Lz)46k z-N|Zmx>KUBBx}Lkh_`;^D^U5>n0ckhGfT z2fIMbZhT>Y!2&MjMMNyxizTI-dOd8eL0e*9ujP*m{Si6aD{+VUr9vj zt0YCEznYjgewdWsBoBu$z_my^z;(e!h&6BY*GJDA)YBv17~m!We*!lLxJAI9z^z1F z+ij9Of8s8O-FmyunXjAVJLt(I?nKxmcOf>Ck%31c7|GoT+wdMFb^3cbNS!|Y0^>d? z>F!r!YV{8Ud{D6LCix+bOIAOcn0Jeqc$+;!NTM-FTHLY09u+IM`p1GlF78r35#UJy zmvUTy@d7U8Q$&>VX-RG=>-7iHp3ym3#`i1@T;X#FmHQv!$~_X;cWqL&v%HJ^ShEvtLIq< z{ps6@IwNbwlW5>d-b1*O$p}+>AIY}*8vT-WMe+e1GShuX1L>w9r27aV-N#6_)sv1_ z1tp)*LFt-U{ii@ueTI1Z<|i2KY_DpVsdIW*|jxo~bscn-y(;2squJ z0cHz0-JAe(1)T1$0DmL)=6?v%o8%U4H1h@cZ_zs!BxL3bA?eH)4z`Gxy>rn3iwStz zixaWHmXH*^b4g;}`C(File{#90a_yI0IhvjvYX`I9G9GPC1Tz!V&YZXhmb_8AZck=4c1qz{9&Kf zg0C*_Laq^DO#v6OB0!~p3t2@(A*&_1g{;$`N2}2}?9_Q4_Nk?TE388(Ts`8#H3V)% zDBM~Igk}oxrb?A~g&bl-(&Uy&rtdDq{et|bY7-vJoX0Z`LY@Usy!zKdW zJev|R=glNJ&gn0^H`f_iGv0y*u4GGuE9s9gwXKjG=d7&I`$M;;LuR^dXdvCT2V}R68Rs#xCmHtaI*4kCJolMqEgCH}M+WgOIFy zBIz33E7;y*CFk5H*uG+3fBOa4U%>0{fB**yc>Ns|;9vn;-5~)E74WAuAi!Zr(K!dI z&FK!0wj%_b?#KW~2{_%+0ge%Hx?=+*h@JB|0-Q4*_Boydep^x~?mAB-WacL!>C8_K zc8Zvt^V9&R33%G46S2SwlA?2-LCiZpOv;^e5QG8FLec@w4t9=MxpU6@u+O>C^E~zR z$ma*RK)@BcFu-5|SLh-luI*w;-iEK#(`m^iI!Dg=Ul03SN=N2!8N#Bw9P!2+5_l-W z#vF#Qtgk>)=e&}Gf1R`Vu+LRMvR$pV)H$ySI9#yIIj`lobP5DIrcLg5}ja-FlXL2s;ikS>|yJVXQIj7AveVZ`G+5_k;4IAal; z#iInVc^-=nj|+J7JVC^qpOj=fCqKR8*U-l4jI0@tr-3VZ3gJqgMwr?&NS<>x)^iK{ zEFCh_Jx2rS{)dq6d4zN?AUV#-?}+j-+KWj3wZs#AK(K4Tiu5N zrU>}c`Y6E1NYObzQJd3!8f~8mINj#~z7TM_sR6zeaJsJoe2o;-eWNy~n-*=~3OL<& z0j3K$-S+{05OBI51N?;8h<_$1d)Vg}4hzYzf=wRw`3*uve@D`a&ImSBtjsX89`>0P zUH?#57y8ctvjserIRWMhcq)GpvFQJnVzH7n1fS-VGfHxt5Md`q^~~)>*8y8@dErPRt)t z*8trF{2?tLpu2!Sq#gl!3V6M(5MV_CueV+SdLzZ(xWj|tGD`MhZasVNT4n)%89u(|gvGVWA4hep!xJx-8z+nO|<-h=k3%Ha= z5K+n_CAp>KVeg9MD4oOqC+B(C=V%(Z!ebCBcP!${C4rAasNC@gl{*2+bua!|#MhfA z(j{}8lW1U^lM%)_1@Sni20jg8oYN6|SAih*{u$BXOaX7tK}5{?EJ^l#OSNCkcDByQ zn(;X_a3$v=T*-L|Q#&8Yxo@ei)$e0mK!?n97t%nw!3gOtLP&Qpl5^iuUCS@-C6~}a z>6+ZPTnZ%BWeBM*M_h~{>f7vn%TRihoO&2>A-TfDYw$`!vR;LxYw+q|*NByzdU&vF z#k~Hm3ot^!>+kviHwbwB-5B5|0Xy~00d5iSr*&(9+mNDD->x>NyCd4}6mYt`0*n-J zx={h{7I3uDbjY08qBM|hF@$uBBcxjb$?@iznkpW*Uy=?=*TkEb0+MQJgj6k& zxXD)EX1%#JJxboZ3~?b@*2HVD4Ix?EBIz1z7p%Ql$(uU_>nP^+*C{|}0k6L<0hSZ+ z`s*5?n}E%9`2gJo{Au+F&=VJ92An(g~B`6AY|riBI(R4f>nyyJF5az3wYW!L@cmcNzps&h^m^!xb&kCAzaIA4 zgpSN%Q-npg8RCt(dEhM&Hs+QH%ep_3TIW_Aq}EybImXsNvTdWb)H$~exSe3xw{hEZ zTyoAGhzrS%CSJ8W5t3+UBrWYO!FClZch22{?=J2_?h#;50T*(w0DB9#koyo($bBWb zg>2;azmxrRPL^}-PXkwY07BsoL|nLo0w0V}xI+*McPNtUoO(e=MKXXcnd2Nr1LF)t z80T=r;~WwANQ7~YLTnaC6U62@CORA|;LVc|G3VnX+0I#O4?kXKWX<>l8n}`Z5w7GU zgsGj3+V z7x4NU5@4u+t!`L=D+K&$T^Zmir0ASitIg@IiMHVaPIqm9>ja!`M1boBobHAIHzLJ! zH>u6(ZjQEF1f1^H0JjM^-R%MH5OBIX1Kfq!h({7Meb{Fdq>$V#9&UUOA=SJWNvnBZ zu=~aA#t#H|P{5^ph=@fyT2i`6ewc$o@`zBR9z#g#u}GTw(O{2>IrZZKo)B>ACy7Ws zPEs`b@x;9G!=(Hs`Dq9PJcFbIJR9sevF45bf6?=K_4LRu1b9)vpTJ82CJ6Wwc$tW6 zdqtAxPy8w>cU-UPocX#*evO_?;&p^g@&;lfc{A`^2uAWY!Zv&dNuB;(4pOI2zrdIX zCEX-7rdI!6z{!GTH_7jFT(bHPh*e7D;R{v@6&%|BI&jWlR z;8IQv@TGuD`4thR{92M*%Emfg^zn_($uhoaG;oFAB2?}>#Fd*K_;e`4#avzXkpsVVoI=-EAg8Y|mNI;ST|C&p(Nn^K41B)yJ=! z&e0iJGoDKWSMnFamHdq`wSSNttFNol>jYc0&q=oc4WwHTA>Bd<=@v$EtiG!8REr}n#uDn=tko|`kCN3dMO;XhHt`y4Nl4aKNV*1F2U|w0WcAAi zYa{0M*ET>q0k6OI0XhhH{dEk`Nx){>IY1Wye_G20=!z7*xtrRYZuw~IF5q-M0`wGc zx)lPfDByIx0`x}g%_|Y4H_0n=P)Pa+g?Fw($jnzo(wX-Swwjo|bM*jg2zc6S60yK4 zBt`G6B<7tTCgnHD)er`#LDB(ggVl+Zd*{3l`_xCz2KDsFjRDpYaD~!*m#nXIxJiw3T6Z-m0_gSc?}2Hp>$ zaQh<^?f@j)IVpk;B>9PrzcLgAb@5Hj-{ zk#y!a1-n_y&Us6KTLnDr+lW|Tw@Zr7c?U7?{4gnZ&buHCFcL`z7!~Ypv2y2}_hFxV zqUXKp>5=aXaKC^n^gw_I1ze$rh`6@VlI#s%$D5<{CaQ;Zj-2zq9`<>Jj?7^U!lE0C zcw;^q_%Vcy`8dL|ega9I^GOc=bX zb)JWP-lBmkd>f%~?;tMRyMZSn6mAki;od{?oU^9UZxEVHm&|eAr-5-kKp5vk#N$i} z{1L)9A0sx4PY7c3d>S1-6Y%EwoQOGpA<1;k8ot!$tIw%ABWuQA(!iB`g>WTbBTVfZ zB-1%riM9G6!8AH#ru&u#(tU@JZaPA`?~zRBR60F~{{tPAu8DL02qe`{2&sNXT#R4T zw^`@>l^!MM{EfJf{BGhkID?R^Gm&%+&Iu*kgxdL8)e+Bqk!0Ydy z04+KIY;_9|G1h_t{O%Jj&{M!uSs}oR0-j1QA{KpbN$y7Hw{m!QpU#=DhkaJ2C$s2- zu-aBZydzf)+!tX-u7DkA6;Gatyf#>$PED-1nB#) z4I2d8P|P3FMgcY!@Q1WXfK3JbA#E05a{;fnEdp#Q;Puu&z*b0c`EIQ?r`smlwiR%? z?E-9%WIgP&Lp1NG{!YGAfSnO9=3NN5tEeqK?6WJ!yx31PZY*{uq!fD~X({#$wwIW1 zEcOntkAUm2FA+;|Kgquj`|QtQAvr)Wk{?J&@`I2x`N6>s5p(iG0}K#w^23NoK2TD; zYdM^lH-4Cue^+)SgaM91(gBVRc8plrCd_!)=h*0(sHaChF2L~u{ya_yaH4=ekCTYF zwv#0}dyvN~^!A8Tbk2NzS9U5rnZ#)bJL7c3JEIWz41}F=Cc+*Zgrv7EXK|2jlJvW> zv!SFrM~&$&IXB>Wf@Qnpe2z=^Ef)~;S`icPlEH)|x(G>&dvUN!#LB-byEOP^;x6Uo z0fq>;ltTjy6L2Z7Afl94N^(nCr?*F3rE~cIq^kWTtz72GTutLFt;@w>$zQ)fj|SV-XkQQT1*1zU47`l$`o;;zII-iPzwhgk&9u zq-$_|u&2aIPW^PSXT-ezo(=GvfY;yu0z5C^_4h)67X|FpF9nz&;7{x20IwiLr+!s! zPWM{0y)NK%Zv=Q#!0Fx!@V0={y%XSF#NIrSpy|UtlOToUJ@GKm$%IV*eI%X!2f;oR zvw==Q}p=NO-IP)I%#iqxMIlKKlIO+7W(mts!+Re-MrocbFgQcsf< zz4=>W-uPis?#j29L zxRlEhQOY)w+)~!o^EG^1os;EM?P%Z%+apx21LDec4BQE!a-9(>*9FP-=F0kde)O~) zT{6e%N(19`Ll|dy#N%`i+yh~po`~&e1%lX~D@KQ20^XjziJ0?Bl5B6TY|!r>t*kS$ zX55Dcu4EO2D_Ip`YJHJBZ|02>Rmo~}$V|674WwHGA>En?=_-&sZ*J7PH7n_$bWOau z3P`GIgj6+1++?e7v))`skCHdn6Bm*O6R*KWLb9%fq-${PVC#sLym{SV>xp^&tskJD zfY;v!0X7uy`r9bL#sW6eO#*Bx;7@C_0GlI4Z{9*}PPb*W^%routpaQ<;B?yr*jB*l zwhOR5VsGAoApJSUjvN${orJoA3 z-uYot{%zd85C+%}Ne9?J*a2eAd*^}C^C0!~$Oi{FM8Fj~G{67>SLiSzu5F+sZ^Ktr z>n}AQu5;v_|MjrX5p-k@Mbq}EybImYoovYnu| z^se^AfF}u-eH(W&$0g@Hg}9KMYT{LU8X<{JN7B+3f}J5&?wn@^A0+NVo)zG10T=R| z0Otz0kmnIm$nzz+g{;xnniuGtEFZa$2Ci^0Lg6k#T)2w^UxHA$OA!ip8ItRqRdxD1 zftS-IbDSYGFwRhfafTrt=Ze5rB8+ntVzaoKAU4l6(P6lNH_x?1%=tP=wsTh5!$;_h ztQlWV16Ohb!j;^JFtwYI9OvX^4?OI1GaWM1-9iKDZbe9U8$!C)X5Splbe zF2Mf;obLGmFCfKqFRIPyUW&E}0#5gGfL8>Z?$rRV2{_&B0p37t#BUNbec0zMNFjM! zJlyylLaO;Ll2&tKut{Qef5<^0nIaUaKO!Xc$4Hv`lVG2U zIrV1&J{NH6FNjDzRZ=wiFNt~Mhe`QO^4Aas_y$P_m=^3?vF45byXZMxJw5XG0e%qh zC-7r{p9K5~{7l5P{UXWpr|N3mas8@u=IbW;H+nLO-w`&+48%q_~Bl!bi8~%x; zPCuK2)albNFy=r>H&>0R)&CXnZ^5#ga6xp;sj1YF7`11u%rQZ7wIDO*Z%OUX}?E0R_^C(HO+)4&xjgHX9;5m&BF z;I;^rYll#|_DHVPSJ&zN6dmZ2IZj6!7^f4$IGquX(~7r%VtXzh9l8s6 zd-fn=&OIgBR$pDu8$Xj3bVk;USEPX}>4k75y%DCi5|U%}{CzV1m9a7%GSl^;fpn`N zq+1msU0)=}>Z|=H6;`8z(lxRA)q$j110mI#h>KC7zRg;FB|S=3UqxIryL;B;FA*iyjh`Ultwu{Uo`klrM3!$Be0Rw%r4J3?l@J(A9RhhRI3 z**kX%u(N=ty$cZwY*$IqJ9i`IogXIUH_3ZI7+_B%9bm6udyAEO=e!U5>=QlrRZow+ zUx57uT%iL394O!l9Yn;n9W2S)@ce2D-;f=mbL5@>^{~&ObYu<#5Ek8Gh&SfIz=tDj z%p(w%^^r(wokwwyT4(7d`Dh^7j!|3coW}-C1j}xckK?%HoW~RMZV?l&+7k&$bP|%5 z_T*rvh?PI=b87I@#9heK0~7>Y$TI?*Dd0j5BBGFINiqw`tL3U|8u_2l**b@vI?uyC z=g`0vo{Lbp^AH#A{J<9=6z)QV!VN~Uom1n~>zSB~=#n|k#WXO^B?#kOig=vM0$+|W z&Je_AF_a)S&#>rlg@8BDl|;<>DoKWO>Pi~)C+4r#8Cf&Fh6b)=IKq`&i!in8kSynP zI^ETdphIT5>uDg}4G8IOL`ZiNlI5IEr?1Fwri0Qoan4(Sq`DO$)oqB2al869>zsGc zqvV`-5*L!YOuPn15|VWklCHtKgWV%ma?X2$-6!VtcYlBf1ibzp4DgVE*Wc&>4-44p z9tki;z@OIG0FNR?=X^|UPWO1UJt5$9PX-t#;B?~yJSE_CPX~Agv2#94fOE#fKF@K$ zZ%Yb=b3RYV%wIs#nZFq9B{4haga9uKc-pTJvA|xH6rJ-mV&3^-Qtq5@Kp5anBpu+b zU~h|+JLkL)`@9o9-&IeKJTbr|0axg~0Fwn=q4$ZnwhttE8@{GWk4b*0bL5==^{~$r zIx>fk5Ek9Xh&SdZfj>pqn4cjm>(7zYIltiGU*{}7>@yWewlCF|I_FmbzZNWW&Tlv_ zIp;KD{tml|SM7I%B$|$-rTsqG4`SsH`}`RECvg|@=K#M5xRAdF_)Wlt{GEtG&XD95 zQtvNJX6hVv>O2qo%%XuS`~#tIe6$oadmyPgAf)Pu zxEP((w^`@xOplUtb|Ee#%b9o$b|oZhHzZwy%LnT&R&vfB!Fq~${jCsSMFFqBUIBUw zc>S#uU}XVYU7rA}2>8=lH9%ja=$xym&FNN;wlxHtZp{D{0!~*Mpi02$ssq#@#dNi5 zbGo`{s~2#(h5(HMPPbNowFR7RodD}1HsbXN${zMvpTk1ZPq4|uJ{v%&=!QsI(T##_ zELLWiSr7Yc5?wb{R~LG-0GkVVDq95DQovK`PsF0%N|LwHYpZJ5=v(WY`FhxA8+tN} zZ4p-6c8GW6_JMam*pWLTtmmDO+?~&F$1(8EbYbAEhkbUTiIH|i7-=`eBkdk|4}_8S zM7-ShBKX&l2gOIXcl6yyZK)&g8*o3tvTp(Q=eV>R4j?Wh2by>{97M>q9E_x&-66pa z6)WwA0l^Lv^M^Drz~KV^kd6p&q<}x9qXHZ);PrM)fMW%`-jV>vA;sl;yxN@ZglIca z!0Ap3a59qhu+J&ce5(38`Dp=8N4%H|1l(2Bl^*svgJWLoCmJ^vg9s_bSx8!nvxA)@ z<{OK11Dq$|I-F0$QoKO&@54S9a#%R{K1m2JX|hkb@e&ui7wBVQL_gn&Pf>jT^%;Lqbm zBChQwN$wtOsH>_;Zq_;T^msfdswXeyRt`uj}dn% z#|C&*z@>aFz~cfgrO$9aJU#(5E8oR<)fGa>NH2;;nh*t=dOh`s+>ba-9B+w%<~ z=KQ84`@SW9G43s$ku~GDY2ZrULAa845vDc~$+>Ug7vs2to|VV}8>Lh_e*80g=GO#dGwoqmfhW(y!; zpbG|ANWgPnn21%jh@|v$j72#pB#Q|}>ct63y#$h`UNYEHV)o{x1GE%y>Q+RgZY?Q# z^D@M|@x!Fto7+Gbpe>RP&@NbevF5$GL-g#Zo*ubVfX)K`1iA!RPQaf)S0b*hngx4G#PT|4zP!0RJ()xggjL-W@y=f%@QMgKzZb&3>5Zh`yb=fVdh^Oq()Cee>dmVJ zTvf2loBMKH^5)fudAEp(_r)57Bw7xWRe4Uk-KuCLH*95$p&<~SSC zz&INtjI#;iaW)OS8NxW5BetV02x5C~86ElyczbR|#GJR5WP5Wx59d@Q+vtp}8E;Di zSF#<#m28hNwH=TgZ?5Mtrn+QDI%KBXi3ZZ`jF4^@gmk+iIo@1fr9aiZ8y%Fci8t>K zB-I`WsrE$TCR=@*_2#|lQS#<}hzrTSCSHU45t4O(Bwd3C1Upcy3d&28%WCofk#Vi`CO3UlQO_0axg<0GA86LPLnSwxN=|4PReZ z&BJNKbdJ39zaI9vf{x7LN`yss72=I~b>M3dHs)}IWqmD@TIY2fq}EybImQSe*{)Yx z>YO(Oyiu_1+qj!JE;;AT#D(M*6R+A^2}yJtl9u-NV0Va>JLjFj?-F+*M+O)r;6mOV z;2r@N@?Ihed7mV=koEjdTynq8$?}m0Xy6JTL@3-thzmD5@WTj&djz3yV~~93Y^>*} z2V?1yInJXrFwSEL<2;UdoF@W5i7?JM#AY#`AU4la(cx(UZ=Pp}nDeueZ0BsK(AN^r z>5Qxy|BnW)!T+FCsb4*-)WBDEAT_GSf|obz=csop?H^(NwCyrsU)I_KN;C^_dl#D(Nt6R*LEgk+tBq-*fKV3Wm4&iQ_@ z55&CwJ`6BL!0Yd$03Qo@{e2SPQvqAuX8}GJ@Tc`ffT>8)Ilok!(|r|fUkffh+is^n(o74RmZ9fS(-OmAj5pcR+1NZR1Xj?3Ylx~t2;GmE!C={s|A|&;~NSb<) zV2g@5^wtD_-8BfG-p);~(yd@1>Nq>YZ*$QE5TO&DE--tDH&rOHSblcKEy6q6s zZI6&{2PDVp`2&!A(X%5Rl&*=@?*t^(&IqY?L0pVo)wfxz-;Ew6tKXftknCaNHMl1s zS@%NHHMn=MeZ)#uzi+Vp#Jv9Y4{(5h*WZBw4ifPCJ2=200yfh_0}K%Gr*&9>fk@Gt z4_BMh9T9Cu3OLC8_F zcB+`Y^Rxh`3wYWEA{N*glA?E>NnA(kiKJ0T|^gLfZ zJ@N$sE);Nu1_!uEz!kcfh-=+=Fq^y z5H{u&2+R6PB(=_~I7qFtbd!8FkZjkeEp^V}0k0J-yGg!|3wK}O`wtv#R7d zI%KB%9}T2?9wFTe2QW}WjjdX${= zb>c$uhKbkUn}lS23rW}D+ri!uD>>)8!6u4%{Y?t+o`BciR44;6njh-IM?y z3Ha0cIKU@J(K$a=o6~(3ZJ!G`-4_9-3OL=D0lpG&x~~I#gV;Hz5#XHhu+O&~@Y|9? z;hfV6nfdofI`bcb{U~PV{3*cC0-p9SL@cmhB}M1_jhJ_Sn3Ox`3Q5z&JZQEe&#L6G`X&=0UxC_}a zKqmngvU7kg0xsloL=>{CB)5>YHT;5iH=V;yo#$bntD;Z{T_ zTrVWgIr#&K6+Fn^n=YB-tV9Fjtc);DAH?IV5_naFarz=Qi`58X^Q;~n))4UKS(Au4 zS4c9QlPl4Kh?P1cYsOVHa3$3US5kv8wOSDEH> zoU>7XI(BV3C|wihTn9+1brDjnhqxH)t8cT;*^eG2=iGp}kZfq;HMkKWSvN+~HMmKz zO~p#ixmmEy#k~Hu2(YDq*I)ktTM2mmZ5?160bAX+0k#wHr?q{69gw1P?x;4W+bP<1 z7I3;<0_-Z_bh`!EUBKz~2(TwoOt+WXoNn)E+eg6Z_6@L~fYa?C-~a)qJ21dOh>iGQ zg0hEw4&ktn94gr4VV?mIDtZ`_R&-#n!^O%BGwWfWBckh(>gqxt72s$APvw{Z#|n5V z2@#9_I7!|{ujE%3>XPGi&U`)Wa{@h?#fb>3?IgrI^5np$AneFf5!Ul*Nbb(B)N_8x z>2zV>tcQIHG%?Z{2qT?|c%(sr&q5gKY{bj`9D;uxc~E?G=SJW2)RsE(`2jBwEc+JV zLXJzjVK8waxyZ!3;bKCr@E#+{dt_pCq zfY;kK0fq~By675BuC4&9|t(liwQPHpGkhb^`7y z>Prv%+`%y~_7ja8i@OLZ#YiMA#i(F+i}}Xlo&fg>xDNLbu@vu@{QI!a0~{8T2L&Vf zLxdzBjikvR4)%zclaC27R=~+0B_jD_lHy&<Tstg7IJgvm=fXTH8Gn?O${@iM~B zcm?s!cs1~A2s`6-ggy8MlHRtw$w9hF((lUNf|BlSHKx1doq+EOmhF;>9GC7}CK2;m z5fkr{$%G_&A4!Y*L9h?S%D*d{68s}^m-6EPp9r{=p9c6$z@_}0h*Ewb$t`6ie~BQO zs&n}NEhpe~T?2FzaJuCKbQf^C z9sznH_U07`nm+8aBBYS?5)T9IO~~|DLelB49ITI+4RnJyaUH2Z{CrZcZ-;KU+hds zqFs=*xVr}1O|0CTcMraYxJ$WbfV~7<%Dn^ZBj8f*OGGL6ljN1Mnt$q5B>U@}ET=kv z2CncxgvuR+xN-*vJ_Mn1hayyN0Fvv?)m3^q*2}XK9L5lMUryx1rT+{>t_pB9QuO9))aG=?PsF>c|YklZR1 z-gz4#Grt{4XMRVpJH_mscLf+J;AxK{Vu9T)DSGEU#Juyvr2N~s`ydQ(KavjcK(GhJ zn)l9!qUUJ!^vDkfctpSz8WUiwfGhMU5!d#ZBzMEt>ra_Ku5;v_|MjrX6Le$_Pa-V3 zafmnO_`pvgY|N(-mi048YMsw=kXmQy=NQic$@V|BrOx?$z!wC|zKwg4PLcU8xAty>Q3t3rL%bx>e zr%uv2Sw8X}4P4=5gu=a#xNsi?{t%&XQxFRG5t8kk8mCTw)B9t(WRCL*4UF?C!Z@EH z9_RDGUm%P#6|q@-Nf4XotLX5xfH%)KM9g`bB*Qs%CH0m3%==rNku~G*Xy8hwBV5V% z2vhq3$#PDo({J+qNQcaHKhZ$CpApjif{^Z4B+EISPH!^#jSfoJ#5sQll4=G*s+ow3 zF-v`$by~B zRur=v_X^Nkz@=P?h()`yq;!+qhl4`0icq9pm5|hZku>#c!B!V@>NNtaDd5x?+_= z?iOHo0he+QB1*ZZB)63OZePTe+b{6`2$eekp>hWzxmM3} z_uPyhM3>BQ4yJ)|4nY{_P{iX52z(gAI0F&8+u;PUJ&%YEM+$g*9!12QkCtRxeQmuS zr#VJvWX2N4&NvrN1O&n6`6IY_z&&kc5-SjpJA0k6MH0$eI!GrcUp8=WJ zwSd!I6JWT2(_I_jI>g>Qf*`#~zMg|Za)VHK=Z%ES{3axw`OU#@5wmyR8sIhoPy2Qv z7T6ts4&eeR8(_o=5xzCXYN0!`Z4XQGHhdkge{Dz}(K+(Y|9aSG3>}%nScFCQDB_LzSm4JIHs%uu%lb(qwa#%I zq}ExwNgfX*+f!;wo%88{&j^;?BtOe>$vK}R=G`JDUbW8?lIR5_E$xfJUJ@&R*k?lU zm&IMkR|32$;6lC@;B^5P@(m&i`KBbdkp9Q^TRMlGI?uyCZ_~gPzJpM>cM%tEV&F*# zg?kU7aFdZ-=j7&;5A1!qWRCLz4UF?4!Z=e9kMmLBj}gZC1hH9sN)Vgpv*_@-fH%(< zM9g`rB-=Ub>hw6xmpUVB#$VCEm3)nGCEp-SZ5oo}oOR^mo|_Jt>As_Zbkh;ieUFgt z2PDTi>-e?XisVN+C|wih{0T^^pAl01g18vJs&BK-`5Qe-&iOlWA(>&~H8_)yth11G z4gL}APqC77&JH$5%(SqIg1bbR07FXrMA>Ls{_^umN{oF$0g^iBj)e0n|ReW5R#}7NlUv{ zu(id?ANE-%_`2dQGD1L&Z1O`P*UAgK;QNOdscVjQBr%{u3y^e8#!0OCS& zn2FcmKti$}j-+ewh+s#Gm7MdaU`LC2{T&nFSOKrUB*1Y3UVq02I6=TxcVd8(1pH~8 z9N-kB=$xmj&FM~yw$lZit`Oi10jE1Nz#svqJ1fB1NHN_xYIC}CqwPEar#nBu1p-cY zVSvE`PIpm&ixC^~B?M&;`&`OlA-PPj$-_REL#XHwB(3PsV8g`93^VItpDUv4mFnt3 zUlrhL0Z-+c0K)}5m1~Ju^w&x9HafpT$M=XMbk2M|>~lRmnZ*qVtL;X_JMyN$HzVxG zTM*Xstw`?9Z{TOt70GRMVc@KXeQu|Tk?uek=}yEW-4%Ew!bqbKFZa6%{&nO*@zLEA zeeYFU>d5y6ykD^FTYv{RF71W~i3`a?Cf*IB3AvVsk@T~BB-j|S(ry?V>`^g)NRI`0 zT)-dF69JwS@P{-mz<2?#x2FO;E#UR`On_&R;_`h?ZBF;UXnS73>0Su%B9is6&r8uf zLH(Wl-&nj8;9UXNVImPr zagyZUhkf4Tu#ij^jO6bVlKcZCP5xo9DPm6kQGkyHoct3al7A{G-nD#2%o{&U%D*f7 z0>S`Ok#vABgMB4dwh1#H_W3$`exsfqd0K#P1^jt@7ht-8KacN;xV9f8nR~FlhA+0O zbz7`+=Igt%pXkXXen!|CzaZWjzXtvdVQ2h~um@)#>21qQ4$@7MepfaNO1eMPnC_B4 z1I`vK+a+^2F5S1xCFZpvCf+4~6O!m3BrR@>9%c(5O};BzF!(~^F6F`j77=hM7Y(qO zfJ?bJ5v5#0l3PmtH(8M^sdM=MSP+R!C)oVGMDPCJBg+9MvPL*R}G<8(soU7ZPH?{|p~%L#aUb|qrY-6R?JExHol zw=AzSvS!?!2Ck$B!j<$ynA!?R)_sf9>Fp6K(jhZlFB(YK8zJ3F2jYR=z@OH70oF%~PTfy!PPaj{Z7AS$8wJ={ z!09##u&IF4Z5Cj2#NNCGLDPqQwuBUt{^DVvTM;t-t&w#4+XUNI%m%t$fb9i5_Z^5> zWjjhrKgZaKgF>>iP^8|4kkq>(Y3kjA?Jnljdj!~1z^V5lBK6*qqBrkD%o{&U%Ds6% z2m|boqyrof>_D;Rz4@T%d9ZqVmfax!%k#ZSZRwL+Fw@&QKZ{XBfgb zS0Ene%D`74jB_<&JGzD-w&(EZaIJv1=XFHPd4wd}n;UBMev0dLM%IjPpn)s75#dU1 zLYUgkNRBr*_)p5+LWj(Bx6(km+Yr*-j*#vSB*&W@_#N_!l?x z@cMf!z~cfo(0Su%B4TfT zi6H$s#sm%u$;(3Fov#ow^H-5{=C1{NUCiG3Mu0a3JngrLSYU5Uir)DSG4K2^DgQQZ zB7^}ZA?X0`1)D6^ym!7IJwH%SkNjbPDFUw0M*%(-aD_f0;@UoyVOf8Jq}Dl&gVZ`pKgakMNVf0PmOAJ3fZq$2 zeH-@!$0g_dk+_ijWa3r(Ga-q7LDJIx8tgZ*a_9U#_zZCua%O;80xskq0sa(lA!id& z$T^a{LN-=Z@~p~Sos;Dwf6>4d{*6$$e-Ib0MNi`e5DK>-Lg5xda-EZ3P{W}Yrc35H zi_pM0iz1A(7~*jj54;4zI7=cni=_x+^DG@5S_*jcv?5~8ttHvc*~tGYDw1V%M%Ij% zrGYDHgK#Bn5vJA-$#Kp`|3SI-bjVECfd>TP?uq0{_R{TZda!ePP>_0d}`ygA%7hMX>`>P*6T|Q&obF4Af9Ba;T*4cBPyBDz!OWb+pw8I9zRjIsu2P56~dsaE$>rK#JiuRGY(X6m2~O9B$(Pn+P~u&j6bW zINW9dHb-2Dx4>xrW}ht~g=8!7sK#4klFe<9G@IK7+fK~YxL1Jf1)R#>SWMa-B&CPs z9XTi@eS{)(Ura*pgruQ&4%Sc1q5B8eMZlqV#Uk_oNwLuHhRr8_n3O*x?*XBMJ(0A7 zy@Ks6Hupk5Fgypz(>)IkFhsz2U}%700=@%HSX|q1Np5{=)DzbTo%26EB#*?CL5xCJ zB%=`*k}-kDA{3Hw2+MFhl2-Z&9Hf;#eF0-4lyLi~F)j5;z)6B-56P1`E|vNz*nGB# ziI>@am;{=Nq{*EYY=5!xQhz}31I3-nLV$w=oXUd(Oc!t}55Xdphe~o%SzA$2O=&qy z=VTSW!)f3Ok3h)W48)l`GVoCdnL8RGbH^ZgrM|YpAM1E5E}8utM+5yFkI>Hvi2FG) z@JR^$oQ$}-&BTc1c}h5(D&XaL8Wv+dU6NVqYb&btM$9vGM%IYWq=73r3*k!6Mi|;T zNOq|woZc9FE)JRD&ZB{F=Ocu>03qBgB)in>5t&E$3vp1mW~KfjAfYZs2z3eKWLzrW zxt99N@FSNyD`8`0-k?2 z2e?JR#q`zyw+Z-e-5%f$q*$BpRGY)y6>WD5INUt}?iFyj`vTlA;Bd18Jb<`1KZub& zBtOJKA$eFRYUd-EjQmj~9rOgwA9!X(hwNSfMjf_*Di{$`)=f`2dWME(%qM*%1DrvN_-IFY|# zk;q>qxruD7biv7H{_Xod&M(4}`@1i8yh81^yc$asMDB?q4Lga#s59N&mwo zv!7OLxS!St{j@>cPusxl5c-)9aj}>mBNopB;jo~97f*XE#@s=YT{$bO^xpi1bVk;Q z7p8#`FM@C-iy{ndF(jvQR#xeo9T&$TGu#q15N=6?a7!VCTN=r!oR!u3((z?*P`GB5 zb6FsvmO}`&JmO@mAm6!G&K2<}RnC>L3(3kRo`b7k5_MH1or9|d>nK*LoT~?0L(KDU z%>Zi&c>b*&U>yO^zjXt25^$;O9AG^G->vlnbU})hv#Z)1u3NNq7jU?W0F?p`R~4XI zz~O2F)FQ5&br@96c(YGE2mH09P*l!FOh&!|l8$`CU>k|Ka`p(Yv4DrY2^JHqr=(ap zH^t^NKTOIi=jISP*aAsA*fQ8wV&#>y<(qxB4$p1m>7KU@u$_Q2)GNUD0?trxEUs+_ zN#265tgqs4jI0q)r-3Uu1mQ{!MHt#)NKWOfs?zH-hvSeL?g$zPHv=KukqF_ALUJl+ zRaG5tFh3dxg=8ck6-xvyfutyijcpcTu!m zEZ}gL1h`bd;Vuhsxq!o65#UOs815>yIo#FJc8!3;T^ry!0f)OjzzqTpcVmE?5EtT` zG0NWTa|?%s#(v02_>`t+=f|>PZpS!~KZn-+q_XN0Cz(ct&!2JRq z%4{qq{R5J`guc%w zNnGeS>&-q-(L_&ABlPqP;+~!j{2W40&m*4hFJR26$kXH2y%@ePsV%L@F9&=@uxZ_)<=J{0gx`Y6E1 z0-kT51o%|I^X;<$pCiTT`-R#Z?#pQVO2Fa14)6_<^=6-Mqxn1eJNWkjen32#f5hOa zqOtU5pPxA9gZ)I~!QvN8Qt>O2rsB6?zl-_7;*S7-3OEmcVKEi|mdtsx&p#X%l79sw z_%_n}Cl)qNi9zw7VNP6`P1zT9G zY!PO>*=LdPTvVR!d9eVC3;2F45nxFH-;brRxVEJwd26tm&pc>ImeD!?(`#kR;>jSE zLs%KhBVHLR1YQwgWvqm-23JPX$Cgz%NEb=^TG^^l!mXypbd_`rxVm83Dp`Z$(sRq2 z*nF&riC4+mm;_n}Nt3&7uufv-ua$KUzMi;Kxqg5y0#0Su0Nn(f%I;XCvOon? zxSt+@H%91Z6U4QvCq}IOO~YX`0WZ(Zu^96flI-V}>U#Z-yQR*^8u3;%a3xzKT*)>F zL)#X~d2Xq$*GD>TheKw#UNjJHdxUVk5yI_&R+hZ`AfqXZmobbv7e4mUQyI01(nA7BFF+B^}X z`I~+AffSNNJPPO}Ool%hNryir*uG*ep!)@wD&VnC!(x{0FDdO~9Kb;#IZ!A<7cdEZ z5R!&IIM{SChdw00p#lzl7#5)qmlSLB5!ig&DJ`zF)Mbh04E6e4xAX^Bmv)nld-tAnUcJMQd6mYy;F3~|5Te##gjpthA^v7N4)aS2z(~O z%0CNX-JFf2wfP(lTGr-sp@ch6jcIK@Ki~y|Wwm)0$EDhQAvT{aV&ZjiF(!d7LDJ-2 z8tgK$^4ffP@GHcf$}0n0CE!$E9pD-Pr}A1XQhA*uHTyYdq`(wjrd_2xROT@uH;dKp*@D=)Mh?-gO|@9$00M^ z6EqO+NrZ4uA%uGx$*IjXb#?rS;xjlXT(jEzERayoA%uD!iHB_Y&b2nbh)1b5zl2># zUN-R@dG-#Y=`74ZCfFTnc(E~Xy@_)x%i z>!Sc4BgNYMiP{|Q(`frlz~Md*@P&ZGeHq{@0f+lKz&D6%^S2o3KE`((6q4_SqIUj( z$;f|1(vklZ>}N68&R+ujD&S%NhQ$Q?T~e%_e_->OA139m#{C7MgTIlqgMWhkD>nDq z`CoXpTC3D^Yb?UG5pagu252YX49$ndwaqWdTky3NeA;%hfX-3voY$Lu7Q~TG+apZ6 z4u}`#LV*`XSeT0-OzTCFv~(`UL0URX`xuJ@iME8=(#pAHz@-GsUX5Ft<5J~Z2D^|f zYvNhE943L5N7B@;5Nt)U^2)hV@Rh}V3s(uSs(=%@T7ZrMPUPxXBytT&ZX)^YgNkHL zos(5Z)}nzcTpJ;A>mW|tx`8_(B(5_;;?_fQD`#!BzT0JeTr&IVLIeGDMd+s+;(odZ zu0ZIg5^=Gp!idFF9S$`DUOcr}jJZydT{&xA!|QcM)`%Nu;7S@1u4Ds*p>2rdR8GF` zfGyjNaL5eTg9gHFj1X=Ugm68PoXS~STgTJ?rZ^~Ev&y*{kWiZ=gxUgeGPacOTr1~R zc$6yV*4Tw)8xzmLZ83?u9g@z$Uct5(D^kf+@1mU5^%V^0}K>!xIqC1BgJq-)aGzQqivXg z!!-pMF5qw@0*n-JxKRN{BQC^aFq*&FXDpn5%ITV3L4S zIT?#dJ4I4@NZyx&Lb9Jwgr15?=xInAdjDVth&l9u0SW>ReGnF*50(@Q{d8QkLx#Xmvk z{7(TEv;VF7WjTnY#fYb2lQnrM|AZkqw@kaLMfF zW*X?{7KDCoMcmJAfp16X=MKcx?M{qXo_B@A-2z^o_h2#RdnMVWzOI%pRY>mB8CfH~ zp9ZdEHo}!WfH1TNk(^RrrypX;LpWrHdzc2oJ%SMKQG{@hAvvYKjt?HjG~!SBKT5G${pE#K_(XL$Z4Pxt(HfPVy>p??GXC*TaVTHDvw zT9Uiq8};#*ZE#_N&HK$hZE<7_?GPs2e25q3{DBuhSeOeUOzZYYS~@#$ke1HUL-ImE zqAjepv~n&Ia8bdshvdaLE>+IOvH5Hf6VKWuF$uI3lBRa)V9SV=zu9Nm;LC|Sk;?~I zLBNSzF~CX!PUOm1+~idxd5PrPyZL2%Rh`31ZSiKG)o9=fJ0c`*b;OBVBk-CCiCYUH zacd*Fm6H#3>0$w~7Sd6(+ zl3h9L{bF~O&d3^ZH4R)z4Z@YwA`Gn#$*G+DZ6*V&$00LZ0}X_0LC&%f;g^b+v=+de>V0nfi30_-T@Qr9OyUjg5(odWEP6f0*xwK-h>Xxl}= z;dTu$K)~U43$VL@!|f4ZPsEjTFAOSYyxC`O4)|+Hp{SgLFd6w^BpvyXU_-@RIfn&k z67aBxV==)-NQ#wnBsQPM?>gf43c&*HrP0^^2*ur%|7G9bAmkG^TYuA2slGY zfJp+*&}1yGZHgp!!8homqxRJ~s+{wBv(J7wGKQ%LlWrQ~g}Hy=0}vMGfe6#OfTWf4 zAP(kK&f=SW4h9l!y4uppc}T!R1u;9f_o=Ju2AIV&!l4 zIVSkA;!fmo0ge}NB2Nf#qJR^55*CR(S(2AXzFmujKU3$hQd_*)=M);a!c!3vcN*fv zogVlMgv6bRkhrsu+{)QdrH>9f8<)&}&Y^*R&PC|wJjDH+ANT@SstNKWN!sMe?RU5!I#xNB%2 z+_ebdu0sfSJ(5#78*221{~K^nxMr2}Mj)YXLI`y;;$+++-?>)KTk$AW&fBmH$?Ybd zgLhyO^-d(6gLehHTdY($?+JFVnCIVp0qz&@{F@!%0Rhjy2Ln7L;8OQ+fJX#;w;m1f z7*ed9kE_k$o`|+51sv|F08a}z+%o~56>zxc0z8it!@Zz3hkG&FUJ`J)mjk>a;Bc=7 zcum0JUJvjF;zIl;M%kNv-r}&3ye-)5%|7oy$mqLBn$h=yy)RZ)Ftgt5^Fg?NC|4)? zqW~WZcqpF)_*B3{`3#Fm|G6Z4q4OOuwfYmfFLciT^k$zg@njTVAdcfqzBl={LmF{dbHx6?uBx zxC+3?p ze}Dx9e3KRo&|bhdsY8H;1U%ms4zP%T=i8zI7DI~DcX72j+!E2Yq=3UM6<}#3>&-sP zMDw!pcktx`ERPg}ub?&uUoqNN5^(U91FRz8Yh5+KY61?|F~I7G*Txzco%%00Zq(GK zVFT+1Cu{0>Q>)<(x}NzT_+A6I%e>Qrb*rZ@2lw^SJS=4 z)pVOUEm@n(=r^fNt5!qD4xTirW9j-jjUV5w_k>|n#x`}{phJg_{2SIZyyL)uqsEV# zJaAyANljyi(;%*Ylcr2;N;;L!?b6ZB-BRz)5y76E*lkEMc>K_j6&-tY+-&fqQA0PM zFn-^vVV$b0D!O#!&HY_Ej%iB9H;o-QY1IBrJ*tT|IT!4XTpTBN?UsQr3C3t4WE$gH#iyQ8`Y)b)UtauRe9G9D&2-oQ#+@lo~mq? zHC8rjRNrNdRppHnrc9=F@`PmQ$ZkUiPwv!dDtB`#S(#eZxl6~&IbE<+rtncA_;*Ow zVe(Dx+xd`WU5rA~iT@WL_&Z}#F4sfS619G?E@FP*?;4<+fa_CtEPg~(Nanl=uad(; zQY9F_O;lqNyaq{w*9NN-bMX2A4FV3{h(+)XB*ot*HpDI@8<~{9hTQ`~2OA@42b%=z zDOUc&aeC{KgL<{@)q2Lr!6S!6*PDjxW^#4cn+MoJz_o1409y&TmTis2#cd#nl5KU4emK6r_zsi%_G;Cs_w?5MgznY0SKG;*diPwYS9{FXll{|aJA4^OFN7aj z+at!k1Mh(FLu*HbACG;I^f!vW9Psn2O|LdHMjn*DjJ*?-a679p{Q=f5V1L1~UnzFs zxb!QvE!0y3J&nU9r>F5W(bEKko+cvhX`jFeLQj(rKU+-3h@W>;!eL(l|Ge7|i*Zkt zM@IwO13`_sg=9Dr~w2Oao~Dk*=1WmcJQU0ZtY0JUlJH=>mR2IU~TC0={Es1vnciKJc8QHitVm z+RhVjxbp*CAmDJb0$eEIa2Ex*81VzoB^b@$kasDBFG3ZMVtP3y!@mMahks?TtHfMP zuMTjHfX99<7BlTSNolX~dJgy+RG|ocBPOA5LekJT2fIbgp>GXvn}9>#jz#D@B*i*? zCpKS$YEoXO?}pI9JxJQYy}|AiE3eb7TGr|N!*#Y?-SqPCv&%`g=xNr=N!s?gcfbb^67CFA0|YqVY1vr8@lzHs6G5;-&E#CV^f@()_*= z>`k$mj47?tZ$-n~YH&8+3Gl9fv-w_t_XV8I53tDQhmzcE`jfFf(m8q&tyrg9&8gEL z)5Jx7f{?sV5hw4nz@H-|?+b+FeTn4O>Bd^NHj}S#$?559n&{~pgr2@d+|ze~zenil z2gGIRM~qmeKZV230$!%SU@`7rCE0blk?;SgNq*BA*`xlQCa&cVglqW|VR(NbId!_x zpY8lN4mkn;p^1S1A_V*oAz-U@6|m)3qVgW>Mt{jxYa9sJyiT_P5~?jisCI}GGM{|s zTBqm7qg1CCz~&DmOgtIeV-mFkl1|2jf-Njos?&=ETU5;RaIpZ33wRzb5nxFH&%>nx zEG^)|x=es&1$@Vr3$Q#=`o&aj4!2^ott8-ZD+gFbAbv3ou$q9wbqugN62F*Yr2CF* za=@Q02t^%T8 z4l09HiOr*qR)=eiT-|kTfI0!Es6If0fK$|n#l>wP$yxb)vri45GO(e}Q5|j7zWBUL z9c@*5-fh*s{CRgHd>Ka%go(E?;sv@%;GPHzbW?<>y&00$(akxSQ%5OkzHM7T3Ad#h z(>l6Uz^w(#o_Dw5xKu~C#V#b9g88jR4>5X3zV4Ll5?rzXT@V>m|Kj}hT8Qoze-6c*ziEy<{( z9<}P|7@d(l>ajF&E#nZbWjw<0CLo!0l<$GA)L*kq#33i(J~R<9K?pbrA>d>rvyLjD z{>*R+4g_poNB0F1YCnWfQxPX*ntbP4NB760R7VfM=FcQdJQ)j^L_G*eC*#4vri+#8 z=pn%l74tkiEWqIco`**Sm?7YKcw~U11iXTd4seWs@7S>cjzfxd^mw&7+zHWkqJYDl z6yRh5hnpGT6aj}jHNa_z>*(ni=`Q3M92Al>g`$q0g~`azM$(a=6YN|u*U|F=oG;*E zUx39Vn(~79j1;3@&9 z=;{F12slO8VsUZTNitV{Ju|Gfj`tm0uX9vKr!O++b%UHbdIP?U<3@yucN5|TdUN1g z5EkgI2vhquB(0;jb1>(1gHj#614_6%)tJ`Ny8_-VSXM{x;kZ;s@5L@8_nCO=-j7M3 z*+`n-2ZB8)*6ekIhoa$OH8`7(1b9@y*?cU(;{wj+6If*PNlA7#`Gbi{{lfc{&Y_}Y zyl(I`O$@l{%QGZ0z$@o*SpT$ab^p{}2ig_OX7T|XQ&%-|g{3+mh z_*a0x1zh<43GlCg@7R9SQ2qrUJ9f5mgkpRw`#=;#!EvA$ui>FmW=wm zEQeh9a!8ud<%6w&jDDMQk5<);O1)w8-h4X~PkFTG=c)dhHYGcJFP zfNLV7KXVrz#9D#ZmM;aWn3r{Q=-}%{dnW-0?;Kz~0q14?09^zevTJ~Di1X4NgZ-$a zX=2mh$pb5s3XVsQ<58CkRbun1w>%;{)tF?b21&D18>~*uKfUS$Gzd5+jaV%04J5o! z^Z&Nnwb|GSL&sFscSttm^#31hc5TFE^s6L4vDsC;y#HaZi#?X1gU2@YsO(lzwv`gw zUd1T5Xj3eI*UJ&KgQ7_P(~g+-l-WCJtjgc8%8c{>uq9UPqKbaXciq^xDek&e&Sg)G z56e%QhKetn5mwL8*O6Skn7wc5frMmOn!YNXV(RQlu!Zl=ttL{?KdCtDl|Gix{ z)Vqr0o6W(}4x=^~W5nebTt3)zC(Qd?Nz>RV5n-pM?fZ8cHGbmQ!9$yRY&Cf7q^4qj z|6gsk&g}ZwiIw(*$RugvbAK+R^2un7LsiRqiAi1N$_4s z8hra;y~P}ShX6YYICvi{g7=jaAGvqJE+jjfls|I!gU~^LB<*0AV7rQyJ#zP&-QtOR zKzQyZPxrigfIS3U<@XG*mw>DM-dJ4QKuO*+Vr6v&?;{(ebM(Yr(KepAd(H0Mvz=e{ z4AGZb|INy;%A(Rm;~AfNwb>-n$Y!&f&>(b}Of{*X(qgu0RWyco0JB4n~~1>46VHNZp|b zsXGkGecYxH@@vxJxMX*91TAzl1EHfM5qES{;G+>bItKB><*^uXGmZ;~;|2T_Z~_*i zK2ehOw5{LR8tVBr2%V8R=96jRQf4At$|(qgI~B=#*skNbtJb#({XvpS8~c66#!pQ0E~|$NBP|>$CO+c$A*CXJHqT3r#!^ zFTy11#Yj32F9~+3Sm{~&vS62sc@AC?;7S3{!K(sXE#NtLO@M0!{M2?`fa?W(zitR{ zBT{?_yh&{icXPDeBH(bh2DnYY;cgFbhk(P~8Q?C&4}o`MG~c7U2U1Ax6^|-m!Fp`Sp~&`$M3iMYTmkRXP*oEX96EBQ!F$we?l4ke& zU_Xe>>@__t&_4$MN!+>oIlwOh&gHKGeiLvmf5#%1e@Jq3$)A2#B!B80y);lP(9_cb z{TD4<;@=3V`v-CA{tf&eLh4#|cIsLq`32e?wZSF3qqelrQ9Fc==0n`k{DBuh=x9O2 z6{$T&tk4eOu#kXP=)zcxdJ#!hfv%~ms?et$E~+y!$GjLVT*~4Im$C%HyjT*+D$u;V zQdLpU8>*MWAv@mEv=DC@gm}v$#9I!@D$t5oslSt79tYwzFVHIh3AG|ZsFe_>V`cfy zwLq_eN2x%sid{%nGx0p^h)LAdk#rue5o}GdQh{D8*xF*AgX;uXSHN?yQ-IC_o`dTJ zSYN;uwM&4m0={3}0(3`;1-e3Q4p$j%RRRuI9iT?Q;c5fa2{>GRfCj_`x)CGYoZEnd zLb9Py6wr+@8F>#R9r?z=HW70H?HOQG0S|jKET-7zl41ef0=tlGX`%w!c1E%l2hq#c zSoE@uz+y*@Tzurhd`MQX?ZnEf=&Tl1v{!g;FHd*hJHQSCPScJ7`Up5peX+Q}og|qn zU$e~LP_nbm@ndeOiq7gaou}Ldc$>rYw!IeMzv9~*`r*kS`XfxeT@WwOT>}q5SfINh zOzz#0w21D(L0UwqXufZI0*SVl+R_@jcff&ys-X+aNCsj1J`Tnx71ANtg=DCSC-5*# zqBS9Df`6{?ROlM_XZG(%zM4dxX znN=*Lv(iGk4=r3`f{?yRh|@PY@Dzmf?Te7U{gB*3T2-qrHJge{c1P1_p`-l~IywMx zM+XKjAaryP;>vL_M%Li3vCnHYBO!>~W zke-4^sgRzET}Vzd@jN^olc;AP={!6$*jZwwLV9+vbHqFc&kb;%fal=(0WJ{m9Gn&4 zLIJO!ivnCM;QMt+fJ>2LA-zm(4tII9T_NCbR|dFBz~QbAaE*Y&T^ry!#0&6xjC6PI z1`hbXVxcH;H(@gJn~`+nw*O75D8$Akkh@TUtn84)}`T4D~%Dc@^9D@imN6A$=XYki22y3H&A|(cVJR z1iu~Z9kKG44Bid?p19Nbet-`IoX!sed?es>evCyrKau37lY}-TpXwZnO3RlFKBI+8 z{2U>DUm#B3mw~@RNZ;28>H7xBE~L6|4XR$>;*#CbceK#a_Xr*RfViU{1OJ53(a(r0 z$1fOhM}7^5-vqppe#c_ee@HS4DVI`Rp(*sI&d40|U$k&3ep5O)gm`Tb;7;bH~IovwYwyuD~ zbqdf~z~R;lu)cu9bqUZF@p|rtQT8W;-8n2I6@tzFWUvxKMyrrCqt(G`#L7x1zecxs z2(1m*I=MR0^#K|LJe0-&8whwP8)7l-Hkb^=Hi&!LHt==`J@rC7;kU;~i?jX$!gsHCIPajwv^wt?u#aHbO9g#7E-i?i zuzA7U#0#PyCKuBmNpIgS!FClZEr}F|7!5gv-I!m2tF#BN6X*A#{?LQcs`HA&@XJG zH{`_ywo=D)%*)!M@enW(lT_@3q^U@PO%n5?#^eA~1e}L`v6zkfN#<;MOy#hUOcRXY z`(qOP03;25V6cLigC7*&U;zi8jz#c8B*o{LL$M3VVJ79Ts2vWWgCmf%gBih&6f0YY z8CxDlh3C=obkD~GI99;-_4C-gd~V`-_#!4zUqaG(_;RpU#7gD*)nKoQc@DlF z;0*!K!8ZfECEz*uc7S&TT&~{@@ScG0*ZToJK#Jx1L$x{FN744NfWv(f;8OvI`z*lc z0uJ{@fG-gj=&vxEZ+UzT;e~DSsG{FuGW_q5bok!~`$5cA^v3`{33%*3V=>Quk(9PP ze&wK${3aBkf5#;BA4nSd&tQLvIrQHF{tMAv)eJ)>SEcNk9zV^JZEzr7^8&pskWkwp zgzAOF^R|5FTA+L5Q7X_oU>A}dO*{|#U=p=2lFq}Og6%9;D$xCc^%wIT+$F%S0-l2d z0_-N>IkN-SQaCK_M9<6a{oFCLFV;7PsCId!q zr578V+I=B(upg3kFg4gTvGM{stHm#x`-kTN@^sG!1}F$PLk9&oSil*Yj>WYdBFS6y zHGVtxp*qJ;xbxcWI1En)aX7-PI|A|AoDuj)gtd7T!n{5jN$ck^9HjNLwA*nkkZ8xL zEiIqN2RuP=#^^!h&v)-cj!Wh9Bl{i+%Z-i;XyFoPA!P1C#F@J&@Wlw3y96O~ zmm<04lZQZR%w@P_cXT-|baVwmM^_^5=&Hb1BXo2P;(Bo{My#Lf!r^)Wub&&R81;>k z?DENvUmmq@(ixd!zL^#-w;?&@lP}#S-t9PK$Gd|T;@yc5?=FORcOyCF zlP`_tLkjM}fq2c!=enBe#GgRE#JA8&j;`*mCpyU3&}$!o`(-(67>-zorjMG zdrYiUJ|7SEgqY{xlL4L*@Em+Pz%v4#gU<$dPQdl<`2a5n_`$?nVxLKh<@0mwLh^;lfYEdIIKG6?!BZXDqJm7fIft=d(-Lnfq1esC>?IkK;Ey8N}}hv+fVXYxB>*e<7^R zzY*s3KS)|W|K(s#`J5HE>^~sUT6Li*}%&oWNvwc%&marme1OHszS0NF4-NeL<=3QjL^|4h&x&}@M;Jh zbwpe*R>z2&u|_zoDd6?978avkTasNq>-^a_>*$QkF|SJtm(mI0QaU3HZapNYeAZR! z^A*>}Av<0dT8P&bAzn9xc-@hl@>y4@4_K_gfq2c!XC;tORS2P~5vQX@zH=>~wRn`u zXB~DSsW&9*o8f{@X@ku;+NgAEcZtD0Hc8-v4j zh+LiMp#g>ocqmN)h6{KoBe0nEBPDr@z0Tj`H%jNIZ2k}18>8`M9AgmX+gQY_a$Ml? z2&-}e!i=7XGsC7 zaNb{yX< zDd78bRDh!ee4ma9aIAo*+;IVp7x0ujA;5`9aR#5HHitVo+GYwk+$jN0MY6UxPK)N# z}IjDb(pceaZ7mKDo^)(TY%dId_V38aHoLp$6Z)l+uf49WmsRO zPrSWH=lGYud2Vmqiz}nJ4`FTGk9cj&4*USZ+ISFQ6+VQdk1r2%FlU*Rwl^Mu67Eqo zrpx59fR787Et4lWEy^HTroIw$jI`5UxwiEkpL?k&Wrdpqzu2&sD)A$9K|xdp7A zcQI5Y@8go)(Fe59(T4~feT2B9j{|>#(9x%e>(^%(vHU*|hc5)YLchdf)L%)mpI_=5 z^v!r*>x|4Xe?tqG@-4!pe1|Z&?~$D67rv5#56b-khwOMi(n7qS5aRue5bqZx=lP|+ zv5wDL`4tD^HGh8j4M?cp5kmceI30h=cdpMbf8kLo*MDOdl7CD*5C6p^>VHT&4_kFL zYmKDkx=paQVxEKT0?a4iIXHiS1q3_?7Yxu|z~#C_fQ1BnzZMR#2vRK9i>l4x7K^sU z1sraP080uu+)@FS7I3&_0xXNTKre^Ue0yVgNFiB4Jc`_km<)dXOp-#u^+Hk~M`Q^jesNUK>e6uM=!tF^BFHptFEOuZKnG^(Dmu-37al zbTuh2(A^+(&>cxTs0damR$ic|xA^t4Dm<&@>7Hu>)C%|x)CH&)@EvHt;@TP|c}1mx z4d}XL1D&G+J@4&}4RK`@8zD^V9*7tJ#(_6MSo}Q^7S5(fTA(-Mpk;yH97?z?)R-3N zEdy>PSXQ97=D1X#x54HwCQZCBw!qWP62in za4!2{k<0#)++6yLF?P{86swlo8@tlNB@RGH-EN3ew|n3{5K^}%LhANHatm}ry|y>@ z#wEL>fwa)kAcT$vBkpKO;GqZ|4MSX!nlNI84iASB0$!mbu^9CzNp^v5;9GPnlF>RN zbIfCC;ZnvTT*^3v!Hq|93UouG-WxOlhwOM0X(8S|2=Nkxc$1Kv0^L}_Uu!3maUfpv z0zCytsC^Ma?T5tkwtVMWpr_$cD$x657m@=^JP!}VBx(Uk=ix!Y4i+mtPfrhah?wW# zp#csP@Eklmz!3tTgEImgDd38FRDh!ee7}wfa4b?R(8sCG;f{~C69gRY!~iD=INZqr zW(qjmDFIGJT%b?GNVhjm=b(_BAruAlOiV_87Ltzq>|p1JxqzM<;5-2j`+O{>*aebe z0iA`-pD~)0Z*N=#p@WN&w1Z26T`E>yKxefmpqGW`XUVDqPvCZ4wUVG`(mB+c#YU=N6uZ*M#p{2_5? z^5FoF2so3E26#-snS30JOg6`$ANgw%jXwBLVbx4>MO+Q_*%YmEuY`uQ7WI`Vi%I{Ogs<2$0X_x zNIDOH4EB>)seJw%>=!Z5!CwRXCg3^vdw@R#JO}>_@Rxw=-QNNJ5%B%`H^6^Lv3$1b zRtnb|iwkQb;BajNv=eZ+`2x%@;BX5BSP*gfY>$y{Z*<^*H(m=x`CJ&24i-VukuMr- zF)^3V#RDuM;9)O`#S~jgQY@cKWAhn7Cgs~3%R=a2IVA01`Cu!Em6y+!+Z!u}=SuQ) z&npL5MZgJPHNa{D&QM1zu5EQm&Z4ic;BSf>k~MUW%I7?{H`c_HL9B%^>()lRHrENf zF2dUEgfOo=BWd|ukApeob5`84^?^j|qPDbrb`97~uxxvyJIAH+S%FVnscJCh9o8U>um4FYT^;7o3WMJ9Vlax=+yFjXWQ>l{i+%k7O# zXyFojB4ln;#F^VH@a71a+X5kTTOxVoldt2`PrR*g$?j-tTIgsSgpRgF+|hP{dm(hR zJ>q)N8zXMU4&kt)fY(nSEJoc|l370aP)WTxYbTwNIp&>d;Zph`TuOh0!R>=m28`JO{@H7$@L4I6lAx0oS{U z0rnB_{YnB%LW<>cvf3POO0?}O;Bfl|m@43K(*o=-;BW^7I1nj@E2z!k4vMye1sraA zfI|cv?$7{-2{_#00ggaijb~t#ZEqaOVIet6u-W#;(GW6v43cK_*kH$rl~v8G?TzEZ z^#r*((I*BtNx(xnIlxQ-59Jgrrv0gsyv1J0kNJw^G@YZe`9ExLoQ^N!I0Iq6or!o= zo)!3PgjIPC!i+u_$zA@Hd|qHhavm;PBN^Kp=hH+_7a;UB3vo{u2EGWPr;8C!_)9R- zx}0urTpG@osWC0gmj}E;u*JLh^w#*$<-!a5Z7RGG1nsL?Yl16^~3?%rs-Pr%{s4=@|a z+TM5|nje(EgFh7DVZ`(K5e%L#s!Q7&k8;dMmWaj!#^acz;t3>8#goCF67vJb(*d3l za2}q;Vm3Y}nX|p|JcqpPS}=mYh)M95kTm$q!Cnz_@K*!8Cg9+&V-frfN%86BP3%JQ zmPz@mXKzF3;2k9G;N4*FiIuIxjO~s0!}9}qy5|oAd?eue@o|7p1bja}#p2pNljJSK zs;Wlbk@dOG@h^Y#+}`*CS4QzA!rJ%>@!I%0@HYr+<6DGP_#KiyzI@NYoMlql-uMAZ zxF6M+E|Z@E{w!FwOn%|G^!)NGHt+2=@iO@xlR$qUX?Fh%_Lo@s^UL4C{}FdC{|)e; zfOFZZyQ8&6BA0Ej$YoneZZ2!|WtHu8PUg?@`Do!1=SN800*F($VBq!$sq27{x`mM3 z0#?N*q*o*h*7%=*PXBnNoNz!!}TzUx;~Q5!!E(P zij~TBw_x4HJO?WRR0?mKqFEt*BhwK;Wmu6jRYL7 zM}Un59Bz{UJp~+Y(*TVd^h({IO3X|b)jikfhCfK%OuA zZ;!=1>n$m5Z|uMUZ@U(X(0woo-4{ti?-XojF^BFKpud1a?}A0>T_wc=Jpj9q>}FD4 zpm&GR!5&E3!JfhP5-TszEw?xJ4$pz|bkBnV3>NSm7!qKpfbYOCEUv9dl2=sNG3DKJ z!*z}d^t`t>M&QaQMj}k>QHU4+=)hwT7XMg;g)?A$3P1xdpnqL0>0)3@+In z9ZL%x9f#1-@rXM*A@GR^9i4=@BAtv8D|BW!oFd>AdMXy9K24HcpsO4CR`}#}osl`_ zGic#b&P2GBvk(S%Hj-1I`A|lF_MC%5cD!?GA>Mfi@y=hwcYu2Y9PZu# z_X#-M{Q+hpF3=BPq}v-0a=;s}g`$8yjLFC!LDG>w8tgGK7tqH8JR#s=KZ(T@drDF) zpig5Ll4nfHw>O@J(7|&^+QIX|UJxrUpe?sIUJTEdkp8$etyV7T0cwM z8y^9Q_OaU1^7%=?PX)`iH$LOIR6ak)<{jQ9p0;0N66h-=&F$C0z7d;Q_Ayo82LDdn znfyM$4+75Qj{$xXa3+7oB9p&Jax+=a!(Q^M&Y_gF+}`+&7B2C3gv|YcICFmn{tF>< zeA0)SY>N}S=%`hNJ8F%%qc#}ewg?@yLtHQB!-(}We>f~4;Pta0 z7Nc%2$u6I@6}5bxUI(3#Ip&3Eq0fa8#=Ho^;1)%4%4cn5HSre1Av@mUv=DCzgm_CL z#9Io_W1ViRak7Eubqdf~!1rst0P7>g^4Udg z4%ao>x(PU3_W%_F4p$kVO2FZ&1JoccpS2k2_C_5CyzyEn%4Y*6BX2~~k#7)eLot`n zjRN!#@US<=Vv21dDVEQk*o9EiIqB1>9Y*YLn&#wy)lLsE^#bE z=Efn;-1xu~5HdFrA#?j6x#hFAK|k>lT(UcwL<=2FM(Ah?;*RzWydOeGQxVsTX&7-c z_78^x1iXF@#A4J1Np|_HtJM2^4$>K!V?LM`E@e8xr5u7VxI>Yg@>y5az;8o`;gB8g za9W6W1VX$S2=R_Ya>^$kBE-||Q8*BNti@E8A<2i%wVU8mCEO-!A=wN96UY183LYzX9hS+z;p2I0Ots}-klrZJOSUY z^8;Lf6wBu^0uFagfNKRD z?z#Zi3pm^j0d7QGjc>vz+updD!$NY4V6*LwTOnlhHYCmH?ZNI4E32AW+Z%U=>s@kn zqVEoHkAR1AZ-Dy*Je2#fnD(C;ammX+=9dD#ELirkz$+Y=7R0OAh2%98FNoJMxtKSQ^!B|O>@Bg03QhWK7APABLPpjj{|%n;3@ZMfX|TP4E|hg4);a0eJS8@Uj_IY$=crdCYryM zzk`1l;CsaL`3DT1E^12K8$WVfNPZHH2aKOFNyRTnnu=e8{U+uIjNb$NA>cgxiN$RE zOEPDB<8Kam+qGZ>{}+?s{~>AcR+VP0kqF)^$pR+jubwRk zp@a5F+Chh43yGDj!;I~Xg~M|ZdAjFC11u)s`>}X{B?Npwmc-)PmXhQx!}{tv-YB)S z&hamQ^W5H823JP0EW+AY4)NMpKJW?%Yhy)(Rk#w8KEABX!JK7M+TK_NO1M?km@bpm z0(KNETPCY>TzY<41Dp4Dn|PV5g-M{bkuksA?a`8dAJKEQFlesc{m{0Zepc!y?d}d#5@Q0 z46v7g=iuG}1`2o%4hk?>z~y>KfT04uU&8`4A;oe%Tx||FBHBg@INYcJqXispOn|Wh z4mU2qc*F&I0!H)gjfs##vX6LF(FBv>PeRh+PYyOk%q4o?0Q(7e>{GFrXVWC5?T!68 z;BD7J5&A$(LKl!U^g+Q67IWz70S*yx=tHpxeVC+Jpby6`BuAK(7w8!fIye$ZJ2)!X z(PHHVy5;u9G2wZvJl*qg0ge~&9XKJti2}X@Ct-1ICrk2*3g5`VpGVEqIV#Ze-rhI` zS4MFv!n8gO@!~%{@EHh;|4f91a~6^o=(9O!S)k8>67F0zrUm-EfaeRA73d2%E*0om z*u2Br#0%phOafhuq}jbB*rj4K%eFTz3x2t{b9qI8D+Qd(s{&jt;9OpVMJ}(E;5rOL-XK zQXWAV+@nZNfo}BMg&)HqJKp285bp_ucuyk4dkV=Z(2bS)B!#DOAYStV{S1&$&mx3+ z4vFV&`OdXKzko-nK);AxNM17WJbW3GsIMUDJbX3SYhtAW{d%xB#5@Pz4DgnK=iu7` z-VyK|d^f;*0 z3-mV_>GsCA9Pq|#p(vo=V>0p|kaXlf2K!0O1@z|tzX*8PzhW`Pev=dn=h}UL^zzZR)&4m%>^&&`GKNsa7t)Hdsjm3aOTU>2v z`CKC4l7eO18%uFqDxXVZ^A2wlPupcN3A7xN=63mDD~OeEZ>$)6C2?nRIL;trK`%gv@n9$XsV6w|q9% z=^=1ET(Ub_pB6gmg3wV{#2s}D+#R8#3dHrI5+l}6RX9`&c>UC1G3r`LX8CO3Z_9Z5 zah=Y{9CJM_TuKANr8FW8ZUZE{d^S{6={x&2#34K0Mzj#G2SU7!5#nuvWS39k>D>=K zaUfpv^0_IHP@5ry+8l8@wvg{!%jcGOl*;E;*o9@DDM0|N{~Ts{Y5q}v-qIN*)fLQy`4VKVY2BpvziU?ap_K1T)^CE#I?#$t+% zkrd14SnNVF&ZK;MV?2ZoCLn1C6NBv|R$e|^Zf_*vIZ2-Gd2)a$0?yFB0rnGchNfb1 zZPO$l~HOd2VkUfG2}E5MkC85UqC&Vd>+cdobov< zZrNc#q8+Zbw0s^BaE4&n_QsJMm&)f+*oEY16HnV?FbQ-llIHffV8@Ghvb#dhKff0#5*6C?2ay=g^p$+baWx&jxGv(F+xX|Ag&jeV#Lk3EF3Nu@cOv| zi&0-G$u6IjReGP#RXQVc%vaOGrCft>Dc2$l?m8r=d{$Q1^4rk$IAq7WffnN3h!F24 zgm^b2IpwpmS|3hz3l79jMbt(`77nn8 zfbY|y0TvVRlv_N&5(1ubO9og9DbC=f)#h-^MBB0g4!2x@<&muIjTNGKMfp4UN&!|z zJfBy=;OU~aw7sz^$9z+yXgpwa#3U7~BWWtu2)3q}A28Mmu(p8nunrcpab3xr?Tt0D<#FJmn!T+Qf*TH>RAnh4r-CKgSue# zVrAz##^#9E#ukCML|7YJA*{l!k@WFp8xH0ylhXFawot-tr^a-d^a{AWVA(S1&2j1Z zWe05D+il`y(g%}3eUUV~I|bWWto-? z2*e$Y3_J>sSo&?fk|T;DB$~bQh<|@V!58FHitVU+D;X4xYGihF5qxy1UOT`;m!(hHsS(( z4o36sjdLM|}oNGz9zu60uFs07NM_~6btkX*nEAYNqK?32|@=qBWVY>1iMwNyg;|y z-ncD1ZxBiz9GrYWu3n7?MS^bsv|^f5w5pCIn&)4-o0 zbo4pmiu46Wtk5sR;VS{J(66x=^*5620$o$7FCY3=XJn4~J6gDu?-4HL2ZX`>h~yM# zzHXGCJwM@)9q(sai1!OZyk8OG{f6We=$fiZUNik22jVp^(0>34^(R88zmRy|mhW5( z^gno%3iQ9&h2%dI&%;(Vj@lYY=V6;*ZN*9jx?QmO#5@P*53qoM=iq_?+6#CNb_lSL zfGg_40TvPP{aQ4@Vo0$-FRnI+TO!()6mYnu0xT`yaLWW(R>0ww3$Q%m0=)u8y1lU? z2fXoGC<^GxnB;R6Bpvyx!B!J<0qq!Ibpa214J@YEnv!AxT?@OAtZh=hy|E624%S7| z4mt(vELL7XTW)Wx7oO|O(>-?y&{e=0>K358fHPEq#kEyR@)mtfT~&QmQl)eJggdY8 zjcPm@L=D2Mt3|vv>jKv!tjz|5dEJPl^>YIb()wB2-q;XGw2jo3md_pmHx?}0-q?iW zQu*wO%{#nJJZ(3_B+%wan%gacZ7Ej1y|GpBt;L@CU7 zq(3-x2c1JHX}P_zBQ0EFAB4>HMVz^v0`H8Fxqb+l>yPA@&w4!s?t)8pN4wHOM*|Q# z+6{3>y9eF_p`$$!*NeR{V*TtL4g&?eegcNui@>%P*9}m$PnPVPG3zsqs;Zm9q z1~(kZDWA1GyVoTnaLA4~k{05PLWnmSA>J4yr+o4;wY=Up76;-rFQ4Opgc^?!Y69YP zOqB0j%jZ6Ll*(sX9P!RAO zJSf1y0T{# z4$eW+4$cjBo>+PLY`ML0et2FWPxm}4z=Z?Id*gCE8N?L`v+hd7YxAnWS0k*=YY^u3wMbe%uj61&`J5HE?0O*4ZctlVK5q=Jt+YcZ!w2JaAX=yTzT!dji}m;7r~Z;C=yT zayAy3d_a<$$p(E6_=7r!QqppJ;~`qO#D@_w_Xy(5JsS8igv>pTkhv$2-11php`Un9 z;*#CbQ?$_0(+C|sgSew-13!n*(esGw#S0j5GhPgbmjt|iUdCe7uSl}XC*O0-6Zfk+ zBXi8J(ZZ#?j&Lb&APnwJB&U4V>1#)mw{Xag_ckrWdj}!jy9n{#LvqSzT}>@dv+v_T zyyoTe10bP3Lho55-^$R4OhhGN!N~}~qzYg|| znCIZP0lpLP9Q;1O4+5TpKL+?o!1eCu0KW+Me*GHYH>6lTe^;Bs{Sj?{3OL+f0sazaj~+hnYF#KM7S;~S0{R@080xvGs^^6R=`794vT5Oyd-b2 z*EQ6$#khjbQQ7<-wl`M9mvO9wFyB^2yed}-yeh(~Tn%AHcSQ1+zjrrR$3<%-V|!x_ zn&@dwgr3$y+|$~D*ForMUBnZ<6GmE>)9sDU;k=$2)55%dz%GJiFAH?#xU?X;VHc9_ zCSDK~m|RRHlHR_mVAW!!1yK{MR?PRQECJfF1&#avKNO z1Su|vo@#TrO`~ly0f*Z>z!pf>_Qsacyp{YNeCq()AfC_LV(@fPSK8j#j^jeoOEexZ zw#OtDy^%B(I|SQN%nulS0`wJd9(KZFHtsB$v%S%eL*8~R7{PbJB>1jK8hk*o-NYPx z_W*kcIQX7e1m8&iXjge$7!^%4Tjzx>T}dt)4~jAA^(+L(ZNZA=Wj55n3= z5LV$NBz=6D%)y*xQrg~_0wvtOYD|~OegUTnmMxQM9G9M7_Q&SE-6mcp2VxSafTYVb<#6@Tfdb25LHA59CF zcnm`7jzyfh;{qR#kh&8PQgL_LcB|ntmhZU(}xROh6C}MKfhcKB-9lMp{_)nj;rK5*XNh3 z@hFw+Yp@H+wI-g2*I^R%dL*5PHw3#;tW>UV3U;%Y=in^?ZWZtxye+`(0-l3+1h`Yc z<@&AwcMJG_-4ozmq*$)+Q=7xxA8oS*9PWVt4+=QkLjfKZaJWYTJc_tLKZenKd*g9P zA$dYPs_2uL4F4%49sbk7o)L2ueKx>z0v`MGSj@8*B&F?*7dhZ<*Fq8cWlTc9f~28e z4fdLtL%$y24FQLK6N}LQhq1Gcx1#F8u%Z|km|z#EU~xO>7O+9=z{b|Gq|K+|O#v3iK}le-+FX=-)U_ z3-s^Uyu;hX3*%2r0{w+#+5J1%KVtdqjempxC+=L%(BzY6L?V|R0?Z`fT+WO|E;~v} zbLp4nXVEzntM=O)v(myP&W4b>*%7C%Q{XueQa2|;>gGa93v^RM3srG$T#7sDObZ>& zgV52uh&!4u@calJEr7TpEr<~-v`aX274QmO2#ZlKEGaI~`X|06Swv?Pj(Jg9xRk{Z zE@g3q!7YK56lmU9%~vcei9>O`rD!4E(g^XEL5SB4DJjrRP5O4s?l=&yZGm1ENT}ry zLM@NP^R|4aS)hC3krwC`uq(-mCZ2~YVG?y^B%6n;1nVW17U-H_wPK!wbph%Hyq+5Z zGzxeQHU(%Fa7Aqi&??~j)jL2Rq*|c+s?Fj0MO%LXhZ_)Jpn$^-3NTo};f4elinu@z z!^pNbhI7ChuZ5z3j=*H(Bav+6qk^p_<^sBUfHed>>@~5NVrxmN1$1rfO0tegetTmy zgbv0aSqJL|8!MJCpzXIe)(g+|<>{U`2(Y1mGqh2FjRl;cO|ZDOO(kWEp6~yrfNrL9 z{DeEb?TyXxWDw&JX5AKu*XEXiw?bH(TO-WtZIG;fZp%SdKhy1v?SMoZuePjwZXa+5 z!QA%7jvS}ub0=)x;cen+y9*|Pc15z>?iOr!vHbQ%5_}JFXL8Q~dkHv`dk5G@z?s|^ zi%jk(Db1w6_S37T;hQUnX4eq+(Cg4M#$VD2$?$+DJ`GPjSbYS!*D6? z=x|!-=m><4jzrwiQGt&}=;#>4_2O8JSU<;w!|?)MKPO-@>Juf!<+Hg-|NNe$GYZFi zGA&%nDF~NxD#GASLrThLbF+RM_jDYJLo}v4=)XNnOIsrFAsKwnCIY? z0j?789K1TfH3FW4*9N#w!1eC>05=Hue%%=0CZt+EZ&sVb-4boL3OL+t0d5y?xH|&e zDd2E-1-Kh=`Md`s+updB1KxNo6y@`NOh*0yl8yYqU=NA8d_ElD5djbTQ7op|W0Gq5 zd>p%yJYkaG-gpv12Tvhc2TuokMl4@G+i!0?8=lX}(>*^Q-~|C^=*0jp2{=P9V{vV- zNXiy{OKmH!)4i&5R6eJ>z402J4B~Z!S@#CwwfSb?w-DCm+X(ae9V9EC?{YA;d`^g4 z_8yRE6V;ZL&q)C%3+A>r-sd!(8oxY+fRahDwcnF;IrVLi#wBF z1o%?Gnfxli*80SPrXLa5G&(=m^Hr&&Jd#Um}B^I=z#`As|z7r-Ry zf=D(Gy9DbhmX^t%#C8Qc|WwklnD$&+Uz~O2F)CxFUU4VK4hieGX zh`1UzVdSHz_i5?JOpn!)mD8OI= z4`m1zi+!l1Y_Yf0Xy0s@&QaO?Keji9%%GD5N^y*0I@^7it zo1fRfMQfyBdt*(S=xHs4p4LX((>j4iBlI)|@q}L&Bdg2V_Qu$7UQdl#VO~Gr27Y|D2JfUchub;Yb`fy6T?6cf6m4(p9?gmT9ej@fdm^6CdtvZ& z(VT8??9Fi{*+(=UF!seH75gDsD)tX{fS4aJ4h&Eca2^iAVm2NunYz7k2#37wS}=ki zhDq?lku3NT!HyJj@S_48E#TnCU=jRSN%iUFIP6Mtyh;AmvlAe6a3Yd*a8j_7#d7Pg zV0+_~@H|zX?)kI;rwjOgoDtwm0pE|au(-CfC8f)d@209r&e1vk<8Qj#8|UK6D9%Gz z8|Ncl8y5t=5Mgaxgs=)PMzY732^>saCh7LZB~Zd$s>W=YTo&+h!Q3*rg5&i0vHbJP^}%ltcP?)XaFc*@d2@hU1f0uTvB>3ZlG0rIPmSAk zPT}**9kg(XcOs#zBQ4i&V^@-QOgsLL0uDDN!1n?U_d|dm5f|v6 zFxqZ!{0ylizlcW_{S}kp|Au74|2^0rVy>cp2KYKrg{T`vScrlyFO_F)PqZ2V6!lSD?FboEGTr*u2Br#0z6N zOad*BWZCTztfyFhdt-&*D~dapD+O3tz`0x{KraF3vIdJ>)=Elq*;>y_^L09hV%2_o zqn;Knu>m1~YemWmbZdNNNN-$(9uA| z6=@JgtkA*XFhsyBbSM_19wsR)(5*G~jr;*OTxS%Hc~x4tlo1G*G7@2MqmbeP&39et z{d23~P#kY{T8Os>LcBE*;;n@g7ii+??|`*&AYR)7y$+C2qY*-lLE?E^zSAtwWAR7} z^m^EpWPKCQ!woQrx*?Ly!;OM%ES47NO@eJI<~g`ofXxLw2ge22Lcnuy%K%#mxT0gD7uWbWthg1vnc(pm)_R+S3fWz$=U?%~G+d0540uHxpfZY%m=-n~0?Tv&3-gqq( z1$0kLM!px4jePH5`-r)K?i*k~0S|kBET-52l4=1x5WA98O!C_s2SMoIU?l6{kYI<3 zuO(g2qUIFpxSk;yA0Wtpty zM}~Q;h|ZyuwBO#iiWV;MYJ|*PgE({72EGm{Z*6Pt1}A6d><`b%KZqJ z@&Ll%9z;sYXDvTvTa!G5Lvg%^X(8St2=N|8i1!#$Qa)?_b5f7vK)kl)^9dlKofr;F|&75^%kHJHR^vzF+SKcn_(T&xvYtxJl7AS-|1m5AcD2!+jXwBLRo|IKU@} z%jc&U+4ja~9Pq|#p(vkUU^4PAk!<8&1^Zge<@1{W-wJrx-(fMurbw#g^Ly+{@`Fi! zd*eq49sGo39sC^Z7qNW#Y`?wnYk2-9Pxt(LfIkGBp+5usCEyJGjm5S7BPm_{=<_&%+TstHzVS;*#QGQ6T;e@8DU;`M6&WZ3kOrn=Y+UrvjRD7HnnBt zbM}Cp1asRPb8wuN&pELx$y_F$wsT_=s56q~cAjAKisfG(m@oMJ;?Cp(0TvW+Cc6aa zD&R~mgheJ7mXu|(u0~%ey@<}Cl(gU8Sd34kh!Ii((+kX zr+@L5!KJvPZnV%*cZ80XMcmPHftN?(PyTDX)NgiEPK7+f7vQa>zbqm z2jaCYpRGVb^+pKQ2XQ+3%6FROvmYL5`RtEfNd}mB9uCAL>L4VWhl7I+5lhSG&|t&F zJO_sdSXIDta72KS0-l4T0<0$BdbfIjH3WRW)(o%~QZ1irtIgroiMG)K4mT#ix&jV2 zHo$rU4!3@Q4UlTM4b|pw8%5j30uHxHfK3G)ZnFTJ3pm`k09zog##>_Kwl}uou##*o z*k*fU8weTQ7RfTYU9jGH4lhV*{8XpIzXZ|qMK zJsp71(}9S4ssug=p{IipPxwPHvbvmYZyXxVhp90u%!da&LNNETz>yrM3*sp3N^-P` z7sN4`T+FdZcKePCcDz`+AWjH&qL}Z~NdZn4@O?TZz^MYhPp1VqUBFZBi~wf}c*>m> z;B2HigU?Z$!<`##=LtC6`2j9Kincc{jOL5v@8A~)n1FabUxLBYMN7K9aVf`@s+4(<(hpIB}k7Hn_aAD$1$(>*^J z;2{CukB0+1BH;V+C>GcDn51+WHaGB3;o~~TfBa2%d*caQ8O4(bYvU=zYvbv_&mgRg zXAxH6b4d31@;nDqmr1(4@dA`^FRC$HCNBkiSunRuUg0=>et8v}_ja3jnY@lkpf`{# zyKe@2ODzBV@^XifxkuQ=sU#q zYYIjz|L?=$2LZ3pAF&wqPm-eN7rvKR4+zQ6I-_vRztF;^{EBcXzab3ncck$7g?RP+ z+E?-i4#n~Qq=k5YA;kL|A>Kbo;q!~)`By4&AYR+&m;Zo-nxVHt&4@T19WbW-{4x_B zX}O*myOMM?@jRRblc=*I**u&r*z96yx$YEf4l&QcIRnfk;5j&VfX)J*gYyKKSHR_Z zz5w$J_uDhts;kriKLIMuAaDYVw9B$D7iwQW~;sKUGT%ec4XuG|!6r_?Y zEgn^L8BB)X4atVzJ=n5huA<8YSYE(m?}5cU>nTaMH&)<)w_OWG=#?-Dy)u%8UL{yB zF^8@RP%Gfjby$S1msAUM19l~8G|3m}CI}rgBUuM6!CJ-g1-kwAM(^7A|E^giF~AVQ_mRMFpB1w$^DoVjmoe z1CXKu?RZVib;*G^5U*{4t^f&j5JISfk$B#g?==6wJQR<#Kp%!( zNe(yhJUjxEs7E5%JUlAc(PC+VJ|@_)VxEJ?1vp;7bMS-!Ckl8Do)q9@0aw&h0-P$~ z`*m7?(~)X{K0|E|cV@JmCE##p2RKK-;m!?ko`A!hAK(JS1^PmaYxyB^Fy>Tss4z5G84z3S&gIKos1 zc$;|IK8Z=7r;seSPX~KOEWf?+Z1CsAoyq3|yddCAz8K&o0cY}MEHe3uq%@QMPSjU* z4yC01_Qq?paEY%YWbO^bnR_$vTL_tZ8zFP=AjRcVHKCC`)Z|@UiaUCb7CM@U(9tBs z9Ze4WK0-$yAg&i5V#NCSC>%Z(@cQ`#i&1|nDJY+{O-;Pvnty&j(;0%xn}>e}`%5e>pMMAYN6d5Z-vIv!cn;3c z$B|}4Vh(l)Fq44m-OK?x3iy7_5@1%OT0Un}o5Rf>ZJh)>rRNARr+~xF6<}@whwB_* z9>nEyUW{yeV?GXey9ZcSz!_RD!14mlP!BAwt*4}9(Kprb z^=Ea-3OYySbGq9bE8@u@RzjF{DDYanIiv#F*< z|KhEQOL0eQ(LzUSBXqP5;*LfK9)r-)x`^w=Sd6$C>xIMm0$x8GU@__qCB@~lwzi(n z=5C}j3dg)LEnLbb2$!-c!r(STO3J6IT}`q%4#n}t(L%f}5aMl#5N|7_qZHo|UJH+W2FW+gF&+YL@%jXW*m1IW~&%>QCiMlhA&BI-S?JAa*&)tIU zF6KFy1lU8sb8yc9dkJ_B?j2ws0oS{I1MDZ@`?Y_71CVO@JWy>8SBbWR1RU<*0EY-T z+@S#u6L7f00~~==!yTzMhdV0TjuvpZV*(s2;BdzUI9|ZvP6%)!;%a;nMs9oKWDYCI zDS~abH%^6+(bJGDqo)TuLo8P{i?%n;4A-;d>O`L%;2Z%D<=g=033w>yV=?V7kd!TU z-U-5Mffwo=mCgTSd*dQ}8OOy4^KAm+Re4F^OA%J(We79+a-?+m^L6ZWeFZLBBL&+V zSJFgJS0VItHR7JG34AR=PuC%y@YiExbvfJKxFMWxRAW|{Zwh#`VD4psTR2V^#I4ws zi)))ADO`qpc~880_Px&WAAi%`-uMAmM)4!U+V~0a+W0x} zF9>VnSAhH`U;f4Bz1=2WCNuPP zpc#=YyB&hfB$j`EnK^hzap!WD0J91>m$LdshcP8ya=hA4&L|x7(zI|X%OG4zH-y1;M~a?b z6pybZs7aQ^p*Y@hv=DE3gm^s=;`Ky|o?jG?uTrT=R=|OHZJ%FO1QKc`gitFZPRA

oAF0k7V<(Ay}hWTCSUdHH&!;wghMu@Eq(NppSs(VBY}! z1YEBB2N)pW`!z7YAf#HZ2dmBDhD6&?0f!qFV7P$8tr}p2fWwUpFbZ*jUJaw|_QvXv zO0tG{RM9mt8U9*GHvF}Nts~|tIy%4@0gruMEaur*NxHqU9tXVbS|~zqfJx{Lku3B^ z!8R6i=uHA_D&WwYVG(+BNwq+a!>%M-nB)uemJmAF3duUyI@mU1`2yX3dt=-1+)keE zd3=EF1$+l~2(Y7o@4!x2T-(l)vZB)3s{Q9(bdC!2^tU&5#g$R)hA^#nN4)rxz;8;`eLQybPmO;{r1N3v~Yr#j z6vw-q7UErj5bsKacvm6C1)6xxynJ#s4#aC)psxWE>RNKBiFyl?&BI%R-6ob6=-Y$cA?7)FXMno|JO}R%aF2lJ;JpFv6L3YnKfnV5 zzF!XpcnGN$=!ezjaF0aWqXG{1Sb)a`9PWt#PYO8PQvsevT%e!9$hJ40<$yO{3q=8a z9+Q#3fMg?oG1yCDE}$<5ctya&eie%;_L`(xKwrnMByX7Hw>RE|(7{_s*1_As-Vw_e z(DvIK?}q1l@^sG=156Tdh9(DiU%(mq0E=t;P*S?+`TSc$@{!K*6Yli3H$KLbL41NR z>pn%iHa`peIl|if0%2Z%iDdQjD-N>ynQm`<4J6t(YRk&!w*kKs%x!N>;W#ay-(&L* zZxc`3A2A8^6O!fj=U~5x<+nF}4gQXfKLnh~KLh+F;7tCFMJE4|lx4EEM!#A4 zug;;AwBO$Nj}|U*hJMc6jEFPW0Rub}Lgr>h$XrLHw0zb!)bSDCS#T-tXjWQ?I2*z( znjLXRodVB+(9xWT>&09cv3}+bht2|CKl5NQ>Ukx_<+Ij1E%WJ&!ZFWJ3zxD0!lf*T zFt{#AN%^d8(oYR_#i2OfLbMQXVT5>#AjDe~DJh@cAWjy;fp~4p=i)#@ErAefNyO<` zO1{%9pG)JBmd|CdD@ivK&%^GRL|qoi=HYU|mKRIQXOCb##XJXB2(Y4n=io{KRu=Fa zTqQs+0oS{l0JQ?XUv&ZMk!tyDP@BUwMq87B!!-wJ5pcNH0KEkqu1|ozh|6a`jBI7LgPu#SK;G&;Z-0cU7kEUs;=q-@dG`O8t)(>W@i)7{=! zA5R9c0m7`?5b@gFDDcJzYjYEXdA%u;mCwyMm|8w3#4Xz#NVIWk%gW~#0k;&)ZEtMF zaaumN#;zpWn0VT5i%FpEkSw?3gKaOCe|cbs;5&*tlRE|2S-_dxCBUu%&g5=bWO8>& zVJ4d!cx#PzHWQsgDQUmGu?H<&;+_bZ+Y52#_71!cLgw~G$lQKNarsn7P5M2|{c$Po z=m1*i=s<*yDu_EeDDc4u9UX$WUL1-MH{-ByI9$N%=LjrDeWawIeCks4me1rUol!XE zqiNw%jzPGTV-W^-98y$16|Y4#At_yIzfWzGo z;6?$5yD7lUNHyFoYIC?-qwO{Uhr2z%9Rdz_XMno|9PaJ__aLsu_hRI>H}2!GlH4!Y zW_#lS2pN44$ujy-u!qHRRkLV&*xq;+U&iqq!hCxk@v3|w@QVnm@+E{B{W4Oz{2Lqei17+8S|bJ98?Vwt zPp={L^g806-U$3ALQiiYp73vDWOX^)-gqaR-&JE)nBNOHQ84$iz$A{-1u+@BlDu!? z1@QqU7xN*K-M){4eJqwPh);rjD(3t2S%A+4e4oAu@TGw7(^mn$7Vwn&Ccw7>o^sy> zn1WPi@b_wSxF4eJM*)ZXDZtN2(e}nK(fq6Y9sIWdzayT{e_-?&+o855`4cPMWC{5%*bMz)e$1E=3#SeO&c;kw%*mN0Q#Uy}a#%@b5scuoVshSW zNEUqdV4cJqe2xHf3OM*&SOlM2QhkQ$j9p3QG0DG%HZOz@=0ma$<`1@jSZ*~IY;r6Z zo?YbWp1THENWgbx;Q)&W_^vF9#kDOaDP4@MO-&8S;yTBF0#0|6V+mXt#gYgsWGTce zWa+@mAgqvX2y3xBl0C#M%fZyel5TP=2PNF{YRnc(kAOV|bBkpKj?<@@6|pPHN+w<` zD`OI96(q}UuV6J|`KOrL;C158Wqp7K0q3$YK$C!T*^EUlTO?(?>4mF7xDbG2RKB)V?Pv&d3Km2-Q+l&gGzFQP=r1blh8*YS?HsK9V6z@ z#|Ah~z@d-FBJ>H8YJolxyONw_k}uFFL+IcXBT!Frt#But`CMzVSMey|V3(gOWqu#dz%2R{z*iGb(ervW|_@ErU+z!w6ps9y&7O2GH) z>j2*%)dKyk+8pk?XqzJ7aNh^`LBQdD4DgeH!~GoK7sLhnSBz|v<2Mc}$?rl@K>xsG z>1~pp$?zG)I6r1)QO|u(-ClC1s1grB>h3)>-HH33qy%9P{AG zAm&Awb@L%!oAU==0AXz|h%m3aAX)wF%0X5?(@l0rx<IRR&K`2alxoXMV8WO4;bX(s(` zax3Z_N=f@oj+JQP5?4mZ+$xAO*DG)hLgs1_GFOL`md_TyiKiZy;*J_ zq@;ZE79Qda#i2OfFj|N=93kGS2=PWBCFPS}P;6*PM&dxcw&imakWi~3gjyYOI@XZy zG|T6jc%Bp9&!2H0VCVw z*pY)uvXfAh&z&(D`7TH{@?C@NCg$?Fdw@j1!`=gnDYmDiT0Zx}t|WV#i3`I70^qI7Gl1Iuwg*J4{lx==sbhZwxtH=cs&6 zca!4?JQ>812(#`e#B1~Dz{eo0&0`Vf^>Ij6K9A>MYWbWHx9kKU(N0ubRz6P(c(Pz_ zlj9VQ)AD&Lb|pE@#MAb4Oah&OWVt;v*jZxvmj})cevY^^d2WF71f0q916&~BOkRjZ zCNGkdX0o1l*(4Y197;+1O^yk)aEX^7WbRVLnY%3TUah#i(zU6qnD|7Cupu+@>=M z$9y|2T*@5?mvSe<;O;_7%4ch9BYzFujYDy~duSovy$JE{Lx^`jQc^x!TlE7*58yz& zw&n9dAfX;Y2=y@HbUY&8X_n7N@kq<(W7w7CaTCwOCoqZnB$Ca;r-D5#mX^p{_0xq6aE($t&xIF zj{j()rx^yirx_9V)ByuL6GBfjBcAXbF|xXxZF0;K&aMhE#p(!aaSg;PWX-^9 zA*_(K5!T{5NcIpjnuDo}CEes010~$LYRneP*nsN^<`&EP9H&n)8(>$G4NbgQHo_#( z#z>akO@eJImVb)bEcoW)&gHlOTL?IpTL#!lz`5KSi(GCaDb1xnmAS3XDf}0GJ6gEJ z@d&Bg9&ze+2)rXg>UKg%-Ofm98LQ=I#A=dVa4GI+S6b+3H-wIMN8C{ocn^e*_C#F8 z_QHrFuy;7@Bj6&iFBYTTPg3+0)5xdz^%~m#I-_vR2hhT$9Efl!6@=^!(A5Oa>Ql&3XHa!99Kg4 zIUMn*qE}-w{A-YG_}2!zPRv#G`T#cwc4ZuI(vFSy8F0)fn@|zqJgHIB7E++?gU%F3^0DAe#uk>x{xN|3M3v@+ZQj{Dm;MzmbvxUDw>qzdirp zP#o`HT8Q@_LcAFUIo^y&NrA3w;e*vl2ONmkwm{DWB-G3Zp*kW?$1L)lW`Uj+kF-F~ zhRu)en0Ov`!X)Y(NH!1W3^tcoTA=3+)>+JRaGn733V06A7hrw?&%p%(EGXcL+9g0& z0pG8M0xXPF3-lstbGSvLZ7~6dTRgxL0uHxifTaW+Zs`EaATH3|FtSaK?i}z-I6_fC zm&0V_%Olyydj#t#<^sAxfE5Ki?3J*XVk=9k1#}hcO47?DzsXSpp@Ui^>!2=Jy;#10 zw%_Dv2+u}&y62_<%>vF)OMq4ZXQ($8*Vac;w&?5Y^g~8{b&j8Kr?<(`4^IZsA7R!F zK)f~w1|Ed4HU}fDlOae}KZkOV)z5U3V;GQV!_}6R&s77C5X^0IjN~{ipQErV$!aE^ zwyR?jXbmLG?V7>X63cILtQ~wEac6RLfH4BjeT;KI`lC5I7E(;*PeUg^spF=x8g%9c>+W z8-$LwMO-ho!-(}WJ{-0e@cP*Si&5_=DK4M&&8>Vh%1%0?aLhZ?!lmqja4EYY3~o21 zqOBy9<3PN&<#Qh(q4q@xwIAYi>@VMG zmd^w5NXzGe*nA+<#PjeVOrjo)Wb^QlV26sO<@2y$hl_a*9ueS30nfps0vs*iIe1Kf zV+CCAjtg+SfbZ7{0Zv4!XI5SP!hF|tjL zb2#9aaD<|Ko`=cE&quP6Ul8m${MweBRB$)bcqYZrMFRqTQ>utbE=V@P5JE zCdUIDr{(iO>`L;GiKp$um;`zR$#VN>u*bymFAqE({0VVq^2q>C2{@Ba2Y5!nnS2(D zOg<+m&13_=S(!Yqb0{V4H#uIQg-d)9A#*Pw&fLp^UqQ&+s|cBU4Jj?34NdwN?{!>? zJ9>i_I(ieKqqh)u^mgEP5ITAnalLpCBW}jTaF`_E^)nfZQNJ%KE}sppdbiI9I-_vR zAJW35e1vc*A0rIz6QrbkHrCYf*U+ap6vz9F7UF%55bq0wcwZtV<+G8uK-MH*;Xu5$ z<@0MGp}s)~^)2Fbd?(*&md`18q~-H_Y(9`_;(7QZCQ*MvvU&J(uwTT|^7(79-^4rz ze-H47falzxu0?d!N8ZUs6+vHe~!%EUc zu+1h%R|pwh2+1O z;OnB2ZaPP0^Z(f7=#DSrSQcTvEr)njE+4oD!m8|vFr!yMN|%45_c&L?MQfyBlVc^C z=xJqyo>oEJQ?I}^2tCyzp73=TSzXRHIqJi?L5*2qZVcEYn0r~End5Xpv|#fMf+k)N zy)n6%K1g=^`UdMKmM)0?!3K!=J`D^oNWk}LaDX8KzE48~3={B_8y;X)0Z+LR0Y)O# z89YjD4!2sgtuEkjYXn#mDca;%E1K7qzk{z6U^L?SJO)FX9CgXMSm`FmSnNu&p4#Fu zV|`2#vH_AMWW!(^iTN>O;{cloI2)T{F()^ZOx@(zoWn{oPB4ORfl2T!ku3OD!L}B2 z@NEKYE8yVUVG(@1r1}iAJ$5D8!6g40+Kv!9*a^ux*g4oPV!72=u*tD&cp%q^B9I8L8pj>N7cN11rB9F0k!V~{Mn z#|Ar2EdLa9eDD**oy!vgoFw2}o*dv50q62mEOL38q%@bjPq-#IUFQ`3i+%dp>)4npeAMM&LwNNE{kV}ZACosUa#M;FjSM;9V=bP?i?E)F~ap`%L>SFuYm zq6l0T4wnmfonC>(sIQb1KgIA#37*`p(iw$gzM2*;P$B-CvPp>9WL)aG!{M%!}&4)=V37X%#c#Q-k}INZwtUO`-@U&Uy<$?+PblDsY+ zRrC!^hW{p#4galRZ;QE#z7ybG0gwGXEaurnNxI1~iGxZqStvrkk4fkckSz3v!9Egm z=#K+@BH+-UViEc?Nwq+Kj$KK|Ai9nKQ(3rdWIo}Ga~r{-GSq@K+l9-NoF?j!sv)epjnVCyR!zH zO)S63F?;Y%;?Ct90p=8NF6Rm`w}5lm8H-%bBPq?LzeQ?ZokOu|zsWHlEnMRK2&r2D zaq1Qf+yxy8o164eyM=Kn?r0HO=x9-djuu1Q(c*!ZKm#1&~Nj98&d zhr==gUZLHv7UfTj)10+-}LZ~{#>8O|QGz)YC9%+GY#I7VwCZ31Q zm_%(svU%7VthZQNp!)>tE9N=aFF=0*&%pr!1`2o%4hk?>z!h~!fT04uU&8_nN2&#S zRkbxsF5t{-3n0S|jaET-5-l4=3n7`u{eVv^tF*c3tsn;}^Tn+F>wmM@^~H#xQl&n@NY zp0^6HwSY6UO@M6$oT2ToxVG_R1NnH;HeC?)MTIgX-* zOFSAObH^ag+_8a=L&)6m2$?$pDJ`EZ4SEPX5trhQPNId5PDbeH6vQ2!8u&DXj!s8h zFV4V-^>b!8oF(A(b2b*EK1WhqKB-Q;75QA9Q8?!FXyH=MN4S&=5C(T4Qc^zoK`7!~ zghO$>i)kU=1cZ2(AjG>ADJh>V{xY`9a3EgW@_9LsP*)&?x)O0Zu9ELG%jea2q~-G( zY(9`_;(2%-CQ+|PvUzwzup7nF@_AFRo5ef_ZwYX#falf6n;#?0>rarZe16Ko)bcqYZrNu*qJ6HmtbBeE@JqqmCdXGCr{(i& z>`L;DiKp$im<0L`$#Od-*!N=jmj`|b{-d}v`BQ+O1)RxW0{klAO#X&NCV!WdX0oA` zC$T?t4yC01CdZ$&aEX5*WbSXonfoX3zX+N840rxgUbe3PQY_;`2alxJO_IQSV6$`Zp8pA3HW}k9AFisT0VQJ z&EaaItyaL{>H^dYI9x-3MgfOw3eb#H!?mc*;aa1uw}8X-3D8%-;ra#WFW_(k0t`f4 zjR#@mHaP}!SV@Kmw%Ozu3L&GzkSwFagRLr-tC~fd93#SYq+FfoQ2|yH@K9C{u!ew# zvL+VOel1DKVy~@f)VEBpt#ed1|Bp?Mb?{{zqY>uY7{se`-N0iJR^@sKGkSfb?CGht z#(SI_;G#8Bu*tC@P4u)8LQfkb?rD?2nGLi*9CD^HA4t`pI(*+#-3@n15DXBigoQ2Kz@R{UaLpujT2j?PL2j>Ml zUo5v83pP0}2+s@U>7FkNaIt{z%7g%y2>7mCip8~ECMjEtwYAN>Y3g#F<39nXyUB3{ zu8iVJgcWiX;uUgr;A;?8$h8P-@j4`Xh`FAFsf#7uRM%}>{uc(L4$NuWEBEW39GyGtzp6mxg*d&Hf~djs4j;9TAx-~j>W@JgUhoYKfG7iLR`xNs&kWe2W zg!&M1IzE!`G@oKV#v?7-pI}##Pfa`zKf@&I=SVgWzX_=ioO1z7_Bs z{4T&00nfqj1N0p=BO==rb+J-?({pclZdBnz743v?F<9dt#q4i*Zw zuvor8x8LMgBs>?Dr+Z#3z~TbF14{&0Qowg$DJ-sSX-Qd8(QoY4B+KX=73k@2a&*I$ zQFKR`*2^MZ{L2Mi9%1qKKv+0Ek*q+kz(M;0y&{xwE2%Ln&?^UAMKD*OdvTl==o)N( ztj5F(qYjfm^+=Z8hG30i`Av?d;LYOBWlMlo0q3%JfIb4wWnV0E*-uiM%Z9qxlbc~bl zGz;_=c%%h-OYBOrm5Jx!)|f=y2Fd2(w!yX&OAGY)VB3p%4(ti+&%&J6>zxy0_-o~a0di95OIO7U}T#d z2XRnI4i<_6dI%;XKNQJEeps->#auv-2ymo;hkXp);_!wlgJVi=I!t@-~OFbdH~J zr?<&*Hl7UP9E4eSF5>9EBCdakGuM>ABuMcp8fHQexfSUxI$(ymr z~?JEI?`#Qik0uJ|WfbRqx zZc2df5tq*&FtSaKA33NbKM6(o{27yx|AJ&A|25ceVlJP*2lzw4!~PSCDfXA7T0Z~A zt|b4MIt%^7uBjh{H@;MTZw0w@j<^!1~o` z9$=h+!)*~@O96-5D!|rAHQY98bGU7zZ94&n8y{eM0f*Zmz>WeAw^M+f5m)0~Fmjt5 zyK-1bb`xx~$+0_xj3!8y(LI9gDVBTaEZXGQD_r-Ms}sFXfPDo#l>GwiFW{jZfW@>w zP*S$o8=Gp`XsqZQmCgTSlj9(K8OOm0^X(ACtMbsmhas%W!x3im5lHFsZ}cALk+^7$ z6l`)FMH4+8jnLCEhaK3==(**%86!3kzD8R)6o^lfc zTq58pcWHpjkm?M+Tx||_MYLTh;BZ$3xEd+ixfs#8-d?MSRrpAti`vH>>=hI4yG=abd%#1l$F3wF zn0T>#h)JN2kSx0&2m3@U{}l6S@Xy4Z%g+ORA>dqo8Q?1c=kjYTa`}y~D-H0{?`=zXD#T|6wue8CKQB+&sShyGCmHqy$fHGvZKuDII9xQf5N9 zl$j9**AXdsifQ7VLv_h4I26a5l@{X7h7fOdgm|5hlBbxaR{iqX95@iK?NiL0Ktj!h z5NdA3>F6xqX+FiwgGXAl=f$oh^O<-a&W}md1(0kWE*PwfSX#8Z23ttXb8z7RiwJlQ zE*fAl0nfq311urnqP=8*r38GxmJYBCQZ3ru)aG#AqitCMhg&Yd@&XRmBS239hg%`Q ziipefN*HZ7IaY>Ll2yc`iuS@}_%%p2{Mul3Vy>d~0U886_C_q`S(7B)aiSx!fkewgS%Ob^*o zFG5HAA+AXKW5fzQARG=9@CvP9G3tXP#RaS;(e4^IzvhFDsl&kS~!nCIZx0nQQd96UF`c>*> z4(qwT{H-fz>mfz%fEBM>u&g44*-W6~r-wQBNz?qzcMJ6Xp zN;B!N5`ACiP)gcwa(qAwm-rz<<~~B4xsL;Xf{?jS5i<7~Qd&M+8ubwPIWEN=eL)Ky zeTmS~SBN|MI`B6L9est{+hd@tbj^8*&6{!vm~K3n`&g-iJb z;ZlA@7~F42N%?H8;iGWL?>H34`-2wZ{fQ9oFNAo1BPHci@8RLw192c;+w%D@kWl|2 zgqmSQH61fzOuKw`z#}c6Ghy?AOcT$;j+jK913APr&zU{s0Rg)$+NZ+8nM+v~?A5xP<~NEZ}g91XxtS;T8+9 zIO6iT1V*;Wu_OnTWGSI2pG#vh@@0^0jXd3RZGbufXQ)0vgMc&Ch{d%vNy-*IAKv2Y z37U0|%I9=9Ia=^!5UmKat~cVf*(Y#cgtgfZVP5w~vhq2AgQ?|nLfo=}K%xy&TUI^? z2OJ`p+vFI^aaulyVONsjCZ4vdViITslI3<}uu)?9mj_l0zPh+Gxki9B1)RyX0<10I zOs<1PCPzz3GufnX1|OqyC?)MTIo73xOB{=kx%CidZvDU;AY^Vsgv@P(l$Oud7X6F2 zF)qa&Z9)qjZHmy*W{5l5Jn%S#j}uGQFB) zJDpKD=JB*}Dcd7l$_@yF+Yu=)pLI2L`pU1Ja43$qGcCm11tH$92=R79ipwYQcmXWg z9S7pIEuRUHP3pFa1-BQ2l%VDo`Y6VJo_Fp0W9lFh>df*mN9md{GC zgTy=s4-RmMfal<$0S*)J96UV05dyAvM+P`b!1wFu0LLKJ@_DS<9PYSiJ6^!yP6%+K zfWw^>;A8=ZJ0-xWNHyGPYIC^LqwNd!Z28yKc#rc|T(m|C zHaTviiJopp=;;o`J>41jE`*-$Mm*u~!N}@zw#jjCINztntT5jn@BzWx%K{H_oGyrm zuq(;KCSDMaU~(~!BH8VGEZE~>>4JD7*pp(uPfrDSTEO?|nE=lU_&z-s;CTU0xfcSw zDBvmgQh=9{>I{BGZ4UQpw7n+aaIXh=11Z|%cr%*clD~t$9pD|r^Z8v2ZE`dw?_s5z z922oC$t1PKW5#4m67oKhCFFx(ABy=g99!^0{CH{{oZX zUm{uXuY!Fo=HTB1_*THdzr!N<6iM|N=6h_uhtDMc8rqK#I`|36I`}!*FJigXSg^_Q zYk2-9Pxt(LfIkF$SN;s}mw@le-&kDRKa#S=SX_EoOMtEd&gDW_Xt@I%UEqwEpOji2AATFy3s;M-4Qxk7I8<* z1zsMZqaKK>SWk>70xN{WiUMAzD`7F}l_kYbF?>>jC%08}M&X!y(ZZ$FAY4i6) z^C_ku9%<3;k6lRyn0Ouz#3bq7Mrvu#bT6z`gilc32kIOZ=;?2ARB&Y!2O&)BgAp(OLjoU) zu=o!{SU86xS%E%+gZ2gbNGRcsQe#%2j}CZ@V6H$P%W+zukHhB2YD~N^PQWD4iAa{+ zlY*Trmfz$!CHSf0&gE$VP8V=4&j@g)fOB~k7P&lIQku)gCiWiA(K!^W_M05%(!wR4 zhmgAS5vT5gz!xHTtM5z$^4xEJl5uq_{xW@qGa`$@My;aLhN*!lm4Za49z-4DM#6q(ImEt;4tA zP#o`8T8MWWLcH4%;@yFi6zKYzRz6C4Cl17GTcGa(66$V*Q1>8C$G!5MW`VvBkF-GF zk6lR~F!4Nm5R<46A=x~9IM^d%X@Pz;*kfXzgO3MzLcnwI$pB9Ycn&@t;28l|)Mo=c zC*b?_e1I2_YJq-HZ4UQRw7o3gaIXY-Rlwn13-G#t!@UvUO~eKIEsSiF<82Nq$vZ+( zK;Ok=fQT? zSh-A6#5Nsx~{3gfD!8?jOld}YvRlu2?Ex_yo&SWPnGC7B& zG?V@+(K&SvrKJ5P$6T~9*0~Wf*BNo<<_SD6LgwZ}$lUx$Y5C-ZF&+XJz@@mO1!lkb3Ou1l8E8HHnBniei) z8H7vehA_D9NJ;tRPXyvEi$ig| zp;ktmj#cD4&GOj`kFdYIC?D(Kb}T;f4hmF5qyh1{fjWa3ceZLR>yq z!^k!{R_CCStRWQTb4^S}z7~>=eC=TCh`D@@4lqW*!(JDQDK=J8EuZUQSCaKj@|zqR zKl3&%V+ydj!nXIQ+c}Q%>ryL;0%omu!Vp#v?UhTww0u8(eptLzMf!f zoul$O-A#^d@MI9%BFwt&5U!%P>m8A-eD1`-)bcqYZrRR2qV1x#tbFbo za5ur+Cdcj^r{y!jt|WVyc-roXNua%uEVp|H+ea+_^1!~q_Y-#}_YZJ@fHQeufQo=K zc@P$vJXlhiNj{!dlN_RRC?)MTIS!?TOFRrAbB80&+!29~M9ADx2$?$?DJ`E3t@;=5 z7+i`wI+hkXIu4oZpJC$aH@dU&uLhU`gBQg`E2Cx@0R2Y zol!XEGil*c&O*49vk?Y&4pLG+8|(CyU+3ab9Pd0@h<83hybBQGU5J#F&&K+CUP-wK z2jaCYpBDoOH31>iC5Y2;seGqdJ}<)~EuWWT^MOng&%-M*iFy^1&BLpMT_cv3&ufES zC+0bLeSjMTJO^(KaFc-N;LQPU5pcb`HNb5GzF)ToxC5z{&pXxTaCb%9-2x7GPk?&` z9PYjV_X{}O0|6dHs^K0|o5MXEZI1{z+@k>=6L7f413V$%a8Cw!3UM`l8Y8#K@eGHR zK&xt9gL<~Ust-(Xjg zZ%w=)zQg2VrXbnv`##tYV(EhTG1yOHzE3{~_(j0?>DK_i3HUz!9^elFPq{w>{3YNi z_jiDQkm?NnS8WdWU$o7zS{iOfEUvTzQnbl2Q#8*ke+TavU>3ylc~%TDfOx@&|pTkPBfM5h)5R>3t zkSuuDU<-*k_`(4e5peKDu?W7Hr1}iAI5ywIXOe#nZAl0nEQMqpEFEkavD|7b*yQLI zp55i?o|g@efU`%UBb?Tv3y(g-dZqYtuqU>mYPA8gWNs0opA_R3S1_-v;josai-QA7d-Tl9xXY9S$=RIqo@Bg~K z@A`bN_xFr-&#}f_bF4WB>+HFsFOu^VQ_ns$zd4CRcD#PH5U)Q%ya5RD1|m66G4+*| zymPJz2jVqOlbxMv6tdpf-m)B-#!YaJb0< z4ij*=!vh>4;BZFG#|1cEz+*oF zi+OgUq_oL#5(kB3hERk)8I#bbAZh4RgPkVk(5DADL%^ZW#3J-rl45~A8@rI4W5QhT z-fC)ea4v)n&O_1;&JQ+Ith_)^9#Gqj{%ZnXi?H~wLs&T1BWZ!Y zfrFL>`bH?>Zc<}fpl=R%i(pxSzLn!rfxZo!AFDC(!ngyIKzAZ(cJB&yw^;cm$34OC z6?ZQ03ouK-xx7EX0|L(FgIMJ9AxUm7>-g@PXK10LXAwGj4sl1%2YvyeqZbiZq?a&ag}xjPuLyXB zzKX@DUz21P=!Od3ikZBwGcw2g1}$95n+TWk7Q*1(Msf;tgWo#*4i4Gz-lc_jvk~IG zhY;_5B&R?(uw!4Be1HS-niuE~frR=9A=Jl+)A5OX=USjY#iLZ9Kf^90pPP6det}8U zFOhT}eiiI%u~LElCfK)Po`c^7_+G$s@P`0D3V06w6yRq8SJYnu{3_u4^;>}7kz#@V zLv0TCXSDq#;BbEj_(#CuTJ7LVZ;eE_HUZiqF3|H}q?;V`a!^R-6N>saKPDq@homE4 zAXs}b7tjR*EF|DzFO0<$TSQVUpo?M`lEqBQH#ruE(7_T&+QE{+mJ%y3pfg(((51t3 z8F{+rWdkfH;0!GvU%4A|F+4o3Z4vNRfJjB0rA>g zE%53HYjX{RdA%l**3Y##Nb6^5lVfcl(biF0T0T1l>?ByW$+0fSrSiERb|L9(;%U1+ zCV{#jX>Pj)s}L*Shut}87|o!ZB7dvZGq6ymWVsrD)80_9c_cS zUTljI>u0-g*j~WPu{#!{-a(RGKK12%6-f`BkvZlaY2i|KLb#Nk2!rc|CE^+t%d8?!0)o*tkh}XP)_5l)V4}?&AB2LF%@||n>+#8Ql z`P>Ja4`iBn9`?l~>V8N%5Bmk{FIFm_1A+|{^BimnFi602aBzSj0-l3I0}K;zy&E22 zgn;kY$N-~|V)-1cHisJ%ZTky2ToPccfWwUoFkZmnCImPDarvBxk#2HK;-HWmC=})M zAWTMnFp`eE5bO{!m(N23OcwC455r=L9WE)B&m*u4$&n`In;cUhbTAc3J2)!XG_msX zIjcqaoF1M>%hNp{6W~|@XXv;9#|t<^Ctz`HCra`bePdl6Ur%t7&QbaN*G-NYcru8S z5oX;fh}Y(+flotNo2Mhp>obtFe4fd{obov{ZrNEtqMfa_w0xcu@La*NO^)+8E|t&o zu?xve6HnUA#(zdUeB@Jq#=$;$#yQ5oap`+Un zI=UTkM|T9i6QQHK5Z8;lG2&+26At$Zc>Uam#i(aVGRtRmg}%5oxnE~wj`;yvxReJG zF6AMF!99#*m(S{oYJKI`BRFKodz2R9J%$kPafEnJAlc=!nr$auNqG_n;x#XyPXP(_ zG(xCn5U1l=`OdX`K8HuCd_IrO2Qp1O4`0M2>PtvE4_^-Uidd<9z8dT`G0(x*1H2*N zIrwIPw*))~-wyDOfa~460cH#Me!UmqeWX}EKTw;)eHd*Y2{_!x0X`9MxK9IoCg5FejcH+CCg8GyWiJaX$8l*vERS7CRxt5` zSP_$pSqVvR-^#&O5i2c-RfBa9^L<(^!0H0NPiq8NQ@~ShtpIBac*?C4pre4NT&Do* zBE=cJp4uF)bF{55;BZ|6bVaf@IVz&LQvMEJ6`&gNe6GRJCP#fzi&fg>sKYKK^=ga9 zj0Q{+(ukx9*&x`4Vt&lnD8R-7&c-HK%*jn9b2d4;aac$;6O7=SV-kD|Bn`f0u&u-# zeCq()2srq*SOnirQhbKl9-HssGbw)!Z3hS)^gz-Mb_}+YSlMdK*yQLLp1tJho_7wg zi-7OSt^s-r_^#}R#kK7&$y6A)5& z0OHh53_J-Tbq6A(?jR($j8)dx@b;~PamnteKnoomg3!^Sh&!4b_%MWy4o6(Yj=+c_ zaAY`45%4;lip8jpl4L){RQWZuX*wfw%+qP%QjSKrlw%MEcPx_g6jN2Hulza=hwOOA z(?Yxx5aOMP5bq=;=P9O&ANi8o(>Gx>-{iO%Qb=wQk1BdA zCd0oCNr!)Xusg(DMehu7mw?B9Hx~2k9!Y7F<6aI5$$df*dKM<3??=+m4+MKq%%L9& z@UVbGKY~T*MQGx#VO^#P_WfZR=OzYPXFa9?IzlpH;-$Gb8 zZzE}eeusmW1^Qhm;byBbEzs`;d|$AvK!3n-sX%{-&5zZXcwu~uNuW=VG`pV$`%J8S zljHN?Ux+)GUk3O}z`6W7z&8TU<+oVm@;gaxF6;Sjq~v>@L$PYP$?*d%T;h)isrw0W z>V6LV3qtCCMM&LmNN$0yuFyyAe#a%dqd#b&qdyTk`U`PKe+T{tp`%ti7AsO~j98&< zusE-+fLG`|Sd4mJNp^weooZ|%%%?Ll$2>nRglva!DGMMBu04`dpsW4X;RSKXj<*mk z#9J64-XaL`7DaLjbafSfX(o%|K)mJ!dT}73mOu!#B;o?Slzit}pqIv@RG^o^E+osE zcpff?Nz~<$bRMn{Y(=qBfnF)t%3_{_s{~k8z;m!efYk&%2UicUhJY*TngP}l@cmjl zz&c2=KzCG|!*z_W1!N%2{=Pr2iQiy8QK<$Yuiqex9Isv4&LUlz0UCy?!RqvbjOoH?0_)qdLUk# zI|kkfVQuzAnAg3Kw0`c)L0Ugcn;g3UiMFfS((>6m;BJCtn;g4yTq>V^unWl^CZ4u? zViIUCB+c#K!S)d=-{jagcwcd6a=!rm1f0qK0R{*-lLN8HWRoN}lm05vK{|(0(sGky zFfCl-5QNMPMVz@|frlexZUjQ+Mk4v;vtAE@qj1UYXf!Q!GzOug{SkMR1Rjgf(Ky8Q zVmwByp9$e`fPmM}L@Y)xeYa8{Oct_$uyyoR|3Xo7!5keh>I33gEJJ<3#9gkA^ zJQ|x1WSV#$9*arT)jav z&J^(dIxE20NU?mLqc(>-H`>k?hJ63fHQPA7T0!Kv8Nf8FG`4^IX$ z3t`sXk9ci95colawfPXjynYx-%jY8;%qgEU*8uhB!y=Qf zOL8+=qtC3qp>rrDEjKydq=id-3n6oFBhK7Af!{^Q+-!u*y@%wMPu>H`Cdd1@WOwud zEp+rDLPsAV?&#ydpCEMfDdKwZ8AjZU&%@yh0k5Agu^9DNlI-$X*QnpT_*!RVj`j=L;~u zfahSl01F6s4z>@lpn&V$LID;Q@cmjOz@kX8d@iOohg&?_mJo2bB?Bxa;BZR^SVq9% zmJP5RQVh4e+8l0$Xj@Uh;Z_Q;vVg;_5@1yUhwBhvHN@3;b&Rr2jx{(eBx?#b+vHda z!hKpBNi(`mu#RG7RWoaoqf@x9D_19ay#SpBJe2hVbP@1Sx?(ZyD$Jbph>O-p#wN!GG||(B2t93t zxTlQ+Z-UU%ridqeH;l9{r<)v`h4bcWObhcC0k;$^ds$#Bj!O$-YwSX@jfoe;wwPSZ zc1U{rwhz`_th69@2-ZW)_i4ufI|=wc^$gHU!1rnA0J{iy%IzAUw}7YIZUJ^jiZi&6 z+8l0=Xxmf3;r0r!H|?u>r;j zIQV!hf=`eXpJ5Kb=6m=|%3nj91fhcik+g$@f*mYYwi+`wISS!@^a7RwBdOHVN;V;7QBOuSf5#U#*aNSfW#gPkE( z{uFa&@Uz67%d-QVBj8+~8{j+v=kk0kaye6yn@fJywj#Me=VbniejzPf;zbClyBKlm zE(v@oLh3F4 z50P~E9|ikZ%vJQ00G|qY?4MyV&pwxwHaWiFppbki6rsPuB=pxv8v2`H--qj^SfGExE+jvjlo#k~JKE#WE{=n@J7XJbW3#UDj z7U%^zXjz~af)Z|FHKql6k${T|mKEs5I4%|F#j*LZ8WS&!B{2!K6q06l>0rx<&ER#x z(w~{j247Cxxm-TL3IfjMiUC#J-BUWhF zaHtUQ3a!Lq)K!w~0?jAw*yO0z8JT0Qp@mDSMYxnYgu&G#IR(10v4Vel8gR&t*GLQT zHb98CAws;3kemYD*jURZ$Hq7iuX%yq1W2e&5khrCoQ}=pJJ$leIUc0~y#;n5+0w-G za4SrrZjGe#aGPM;ij@lVcEPq6^Bn9RUu%_J_JeY=b;>=^|Q3eF&Rj-!_=0R&%*;AAy~G_aU{p3@;L>&kW4l4v^@%w zK+}*kx6^|iEjFY3%(Q$S6Z}|lXY#lJ#|t=ZAz?nP=i%iatPE!A@FQmvO7A57CJf?p`-H znFt+SfVf^- zB)fdpR95KwTdu|-JKi<45bs)qc-JAsyB^6dpTyH|;@yA)@tT*<8-awn2_e+Yh|_V4 zeCJv|Z^ffjK5xV31DPhChj(BS^-d(6hj#_LTdY(*?+JFVnCIYq0cHt!4&EQ&0Rhj! z2Ln7L;ClCPfJX#;za9zxc0z8kne7=B@ zZgRZHK_PibD9Y!{n2h`tBpvyy!Cn({`FuUV8v-8on^;V-w!DljB_o z9n4134&Dp)zF2wrY`MwtL3n;BPxt&$fR6>7p-%#QD&P!#hQ+mgF3DT;yy=WxzAto+ z%ICjsa(sy=gZK(z)_sk5ZGIE@TZFaw9m2f+9!bmR4;;)XpEKi@{Rkx5Pijlc=g$Fu z5iHx}_?6>Q`TPyLko<1qY5NBzf&N6&-2N5pZ?W>12mT4(su$duY>maqZ3LXjwgKi5 za3<%)B9rq;ax=+~5!WU2>l{i+%T11Uw9v}}2$^e-ICBdIUI-y`3nOH15hS;KR@UoZ zyhU-z?r1Su=xA|-j+Q{&(UO6eLg;8|#PwnsjJO%ghQo3KUO&rYG3pg0+2xbpCg6#C zMV*m3=9OsSQdUN|lvNN0ws@1j4Fr6@HVm*4QY@bvtIgpyiMCAz9Ijh{%>*27 z^8i~2INX*2wnB>GwpN?NZ4+(V3OL+$0k#)#xb6XV5OBC20d_=Ojd#K*+vMoUVIk=y z*ld$yX9yYH1xYiyYp~v8WmPk4lVi7V-CeFube{lw2zV%a2G~o$L)jaPX}^yoZ?RX^ zRj|>xug+1~{12NPeeq=+`ytG?eu!6P|G)zfR^>p18Qp~BF8?a;aSp;oYb0ZnV=zth zGz6iip@@4L7I-*9Pa_ad_>mZCT~0SSMuqceHKv7mOu+pG%U%{pI4&)SvDk%VoQW62 zcuX#40+Qao1A!2-Tdg#d>L_&yyPV6uRx++hI@7x0ujBEXSI zaRyIOo5M|wwxa|bZd!opNY*CD(b0U2{2lz*0LLMo&&OkElcO;?0jsphaUymhIZ18t zm@xyBgq)0|2{|R$sbYT2I4!{G0?x)6Sj@>YC37}8&f>6;oGlo^&%q@4xkwuPykO^x zIrz)~7YI1`g;)f?NK$-;xfq-8;WH_J4ee409bAT_9b6vl3bC@)n6b%mWq4jCPxpLv zfNKPNSFR0koq+Gk^;lfn4U)XY$Qvp7WcZCb$A1F;>n6udxH5{H5mv}8h*!w1fp0@t zA-5x}#XFGnA?8jF<}8-dCdXY+!riULbg|qM@Ls{P#d06VrKgx#*oEYN6EBtrFbVV^ zl4kdzU=NFxKgB!}{84e|^05Gq3pkfg1b9-wxqJ$XTs|$y&1JQAZ=caQng60cOAD9y z975`zN1VDB0>6lmx|a}A_cD@O#;R+pdHdEYxMX+qDlK&M8bU{}Bkt&pz;7aS^cLbO z_BKWofp@~;T>-Dt*;tJFJxTUc4DXWU$?biekvZlMXyH;mM7WfX5C-=#lJgW(-KclY zeS$-FyiaK%-e(B$K1Yc61(Nd=Q_XjHRwQ5IK)mKpF<$`*^)*7MZxE;BTlvoQDdsyo zN=5s7>_YN`iRa;um_+>vN$26u!F~}d742Vx{U+u)_QY_l@sLkQ#jkfs&9B%#q?F1ZdfdK6VeESy+un^)hy)Z`eO^!t% zg=A6jsG^HuGW^AnbofgITT;wbbg2MK3wZ3yU@^~@m6SF)mgAt1EH4zHSHL9nibxuI zrC=+IIrJ(4RuypQ4p@XG(=ErJGyfC_95@<6d&F<#G zwh$}d@La8Weva6n(Ux+C{`^uIeO5-CGLoj zx}6ZGu4mw02&vl{A$7YTxdoa}NAfB7U2)0os5dQiv>QT4yCd$XPvAWeI@%L)McNA^ zR_Na0u#bRO=)PEty00X=Kr>`E5%$v=nPcup3zyO#;Zg=53~nHjQ=n`7*5M``vf~Y+ zg?NJz;tfHFHx$V!(6tqnymC4W2jVp^(8Ga*8i5dMB;s_8lJ8s#^k_Uv1$qp2A=%%= z^Dx0A>R2S5hvR~c7b_L$3Be8!^BkNQV3L66;DG@S67U>6I6y(b74?t+hYI+9O%8Av zQY_GitIgq#h_)jI9BxX0sR9mnRDfv$4mUl((TEH5F&OD4$FUp~lH-J;fF6&@$WK7h zk)IgsBrzAz839ff@UTz8Vv3zADHhPvunWoQCgqzPXF%xSOeF2#tYBx0l^4*Kn;hqa z=ehE9&*ud=U%(lf8Q=l|XXrvKuI(a8-lDIq;k_rx#X84Nxc|1vaS5Ib;!=cJcNyZf zd3oR~5Z30E2=n?XB(0xUbCA~0(k90>K%!l%wzPa+7w~$)vQ3T~I4+gX8?g(?O(veU zH)9g$79`E>t-)>+E8pa}J@_5s&g7i|?hNw^@!mp+_coGKKKXS%UiEti2jVp^pYH+*H5(z+dx+EVzI^9e zK0m;tR6ak%<^!1~o`)Y}67>@#orj+W`%J7)Dfur^mgnAfW!Y5DBH!JP6rGj7>xK%%X#wzPb%5pYex zvQ3V)I4+gXwXqAyIwqdB9We>i2}yIiZm{*l%3mJn9DIFoXR=Fxt^&?vMSw~HXR->5 zOjb*BGg+t4tk&opN=eI2j#^r{#5#n`)g#VaL*Pb)%x!>>xebxr@>$oQfAKcLCA*`I zX`!P{5IWivaYx+(Z-&s(=7{UX78r3ewhV`@1iXH>#$wdlNV3akJ-_){m29gsGRM3f zEnLd>2$#|wVQ@PjIpwpScPa7hfjDHx+mROH?Sv4oCqlelNKW~z_qYA+j05qSm(N{* zgxVD$RBy!T*iF83EuXvNQ7WH(u=zlyiRa;-m_*$RN$26-!S)d=mCt>H^%e6R+%G^s z0nfqy0R{+o4h{^^B;a~CD8OI=->)G7h9brCIZSO1H$2)#2sqrx0HXvPZghY#0uHx- zfCMRq8>=>l8y9Wk1srZdfCB^^ZeoB*0uFazfP)ZM@m&qm`hI!9&m zKWuUwi!b9i4q?6>k9bv{5coudRe2J^jGlqyF8_M(ah{Be)=0)C$0;ewW%D{O)MGN5H}F4RD`;uXR>{`vn~C zfdCI8ULFr&bnM-3iX2kfxLr29y)2InuM-1)XbQdiz?9g#d>yPO- zV0gd5O=G+M&#UPgSJSn{)pQv%DS4F3*mY*xR;>n%=r?w3htlKvc{@rjk>V1#_ICM(c{O_I&O3_ zU`Uq%{l<0dIFSTQBxMt;`2XroojX*{xd!UW_+(TE&S{@K#!4O6tJ7i0;~4x>7XMrP zE9ps0>iJVhTF;*j_KcYSN_sZHa{?~r&tvgV>I)L@(GD9)86f_P5Z-Dg9xr0Oj7j`g zkTm|Q!Cn({{MQ4#A>jCLViEr>Noj-OZ4P*^nNWm&7n9Jlku>ys!QK~h=nn#XDB#c^ zVG;UcN%0T7Pp}KgrzYha44*;h;BzGH;EP~iij_aAv})bryQ01d*RSR3uD=QJt$_bo z`7Xfs0{&;^2P`h`M@inJ3ZIQ;3-2eLqeqqE14G9ity=TEq~GKi*Rg0|`bptud>O|t z2oDgyBF4W3{vF`~;tzyJl|Pa6Z@j-anDZ=A+5`R@O1OX2m_AFi+SRZ%QvNK_hU3y7 zd2O)^$vh^0mY5flK=UDKe&-L?PHYBaO5X{*Ks2;hgR{9{fQ1B{&4mLjBH(N;ibXaT zljLU8-?F#3&f)2+RqGwc_3GY=ey4PAJ;hmCf+j9nv(3l z^Z15THaynS8QG&=nd0f*ZmKo0?j+cCgSh@U-sVx;>Fy*MZ&I}1e}-3618?~0@&?;UJ6G1t-E z1N0H_u=l`XlIb zaEh7&3=(jP24it?LnL`CzmZ>Cu1JRJ9M#du3w!BzsHpk2B~ZeRRbyI5#|0cO zI8|3M)pu|L$E7-Y05+e>Hu2P*gh`+Sku<*t1v^-5hW-pJeITt64Tq?~**rABWC3UM zumFb(IGaack}6Bl_DLh_~|PTusuM{`$KjIG)A2OX(+LPYort)nlLF5`=;>s{W#bf#xF4s6!)XFuMyF#j?lUAg zb+kc0sC1^z$R71sG;u9wBV5Zl2*W!U$*ZFcHN4sKJREWYo=+12XCef=03qOoNM0Rn z(68cNgaZMa*U^iCgt`PF)TM|Ma+!SRT1PL(qf|$)z%C?Lns_o^g-O(_k#sU%6YN^C zQXRc6*!5zbhc^VcQNZ)?rT{k!cplyo;8p>zpxXl6F5o+MM}Rw#VjaCpZ4P&LwA~}% zaQ6ndPr%`31-M_p;T{O^AW{tXklGyX;b?nAz~LSZ@R)$ZJs#i*0f&1sz*C49_0t&5 z-=Fggq>wx-9<}*7OtSerl4kRTU@wZfHop|$WdW!16)Yz1tCG^=^J^RwlGlYI^c$Fj zeiKPUzZL9lF^7I9z`FttJsXSA?@5Yf|9$L2@_|Wt+5Zqi2OlA62OkIfM6A5*QxpH^ z@5y}{uAj-(U4I_n3jyDPF9Uof;9KxD78m!8B&SgE5lt=GZ*|Upe0=^6U&ir0!aDf@ zaXtAl@J|T!r+3CS+|1gzEbI1U7CUiQ}m5~?#osPz#iq>Fs#TK2o* zQ7Zct*oCCh#FMcKlc?25IvHz%)rytMeqFG7G0($>0F45khZ_XgP{8wWqW~KVxUg;# zU{e9#v2Fo2LyC2JbG13#7SXn)fWvJSU~2(~+a|!a0uHxbfb9|2>FyZmBxHq>m}wox^sYC1U&3rv6y7NCB-_r8+IYt-K4yZ_JPpB9!T23p27AK zE3cy+TGr9M!*w6Iy6b%d^c8T5_6yKYz$xmF#l;Pf#oU+$c4sb#!#VF@j}}&--&+ zs-p>ZAsK7psXGpnK;w}#zY~HTAU2~1SuH(2PmG31YH&6W3~-Quvw3iUf`GGm2o~8q zRFa#`YQCf{nXGfDC>@HA&mHDGJ|9LC7kM~B@{T~9ydwioK}gJspp@Y@C1*%jm>#I7z_EXa*MJK3S4oM=R@ifjv1z zXJn81RGPS!(-5xZbcErZf#lTDO20OCCJs3P&!UNdXCnkW2O;3ONKPH)JEinIjspRk z*U|HVgqn#E>H@?Gxlq1yt)my=QL3XCWAot?6HmrVF^PH^l1|3UgIyt3s-ss1yGqRS z@ah292zVY|8{j$t&%^5j+#uk>cVmE?1boMC4sZ)ntfRN8&Eal~w%Y|9?v4O=3OL+d z0qz!XxO)QJixk7%r#6S16>awmINSpP9u#o6hXOn-;Bb!wcocD2ehj1Iu;KHtzgK+q z`EdxZZi`24eiD;xK82*&d^*@OVy?~426#@useB%biTi@2^!WTD2fVN?6ro?nB=jpt z8v4~>uZcPI>jB;naOgL&2>q6%SoYt>=5=k8^0NOfgbrpSX$S8Gdta=)>}Nbae-N%8 z%GF(e6yRe4--1s9d@A5u@EH~t_qimmP*qiF$$p`8{^R5Gm-sS{uMpPB*NE%MH-W!J zs3+ecti{s!j?uukZosm82m1yExRz|p%RS<@^Dw0$7`G_hn zXLrCMC*W!{5pZ>cfNLNGTocJD`#js|c^n4tXX@Y7%zw20X7qGxXlA>A>eRZ2G|O5o!%NFeSF@AgF>>cP}I@w zFd6ywNILTF!FCXH9qkcdM*$CeCoCpePf4+k_QK|MZIeFn+w@NDE)Y7{6-hhj9c(wT z@;aLF_`G|#_K~Z*-Xp-C0#4Ch0rnPfiuS?c;`Wu~t^Dc={ejk3=lsXV=l$?y9Q_a` zUVp?3bU@&N2n)0cVQLRT(mFbrgE@7Se=2<2hCm57RE=pJ9Tsr7VAaU=7$nW_{=pKlW{=NfqhXvHoXzn8CI~p22LzZX;A~F9BAW+Fa)iss8Byxhz$R72HG;u8_AzaH0gyEfx zzczLX4mkl&rHO#2Ap|@fA>bKEP93eT;agdgGjSkb^E!GKkWgnMggOUtLe7=% zTE7j3Uf?X=+d3afX%LP0SuLy9Zfal>= z0j?JC3c4o1wF16l*9Ev9Db~>&)aGzEM%zsS4tH~aTLc{L)&RE&INa?4?m%2e@5G>v zRt?#TH(K1qK_R(YDC+1vn2h{hBpvyE!DflMj@}>O0Ra#DK`bWOLy}@0eHfeX0W~Rq z9(@!-2ah3X2agAPLae-wc5C@E#FOFrlw95Q(*d3laEhJ{@SK2C^gI?9_ktvMz+?ZF>t!xVP1q*3owYzAIQ(M`v?fs-y2=^L?Wxp1L1k66ixD&F@FSJ{FtN zgRGWbhWI2JK2?LW`B{L^1)R+<0(>dpY<`7BHounSWmCTfT#W*c|Rk$b+o3kkuNg%1(%$jex-??enaT#cf>vY5%^Dp zp8i5yHvYzl`|(dWwAvluWz-srakr6V*U_3PeKAy9osm82d1&HV=0&)c`4EOTKax{N z`3M^y9cYI`PQV3dB4B%jfD0l7TnNdjqc!{>S52}o4g_poM;8GSYEgtxiy=ox)CQ+9`(#g1Nu;s)`b#(b)D~Nd>t{7k?0nfvg1FRz8dAMqT z4gy|5s|8qHz;|qo0Ba(}I=YtH9B%DsTSvg*ItJ(@;Be~(SWm#=ItN%EDTeE!HizpP zZ509zR~ev6z~QO`)Cf3SZGbw&Ww{=shUD-s0Tp+60SAy-74}Dt9NgTY$|3eCeA9*h0XUzGZ-|1bDMST>jPpw?T$| z;x0UhZ3Ay7UkX$)FWc+T!MjKM4gwC|BfyRV&dW{#dI~sXuK+tE&dV+syx}=%8q?Ho zT;Hl>SB^_B)bz$KB)iEYva>rT+3AC%+1Vr5o?`yjYp(!%3pgkHV6n9KmGDB%|L*P7 zn~xYhV0c6Oq%UXxKk#Pi{kV)>&r07+9ar}s-b&4Tkq7h}!P}U-@LteSqjzrFq4&mH z_Zu-*Z@$)hq4i(9$2vm(>pQF+PH#e0*f}BePS*%eTNt@-%`SicH9u3qh4G>B=5N*I zO{?kquQTKPFWx!*AG*kYi?^?gk0~|3ypENf^>*6QCC+(Ma>t6!m7V7Hj&Vh5@osb8 zU0j(RuCjAQr#bK0{hx0lAK>?}E8(NM6yMLzPdU?plUn{(?`HXoyWXM0&_Ud(aq)KI zxMg$Zgy)5BYSJ`feDt8X({bpiF(dj7Xe!R3xHbQ$-U*%ae&*)yV-6?3B|6g45i%!b z(X9VnHwGH>cfAf1)4BU^X(;PK$!H$G{B6|zSfhR$b$^UPGJyXr)~|t>)UPHatzUzJ z4Hk3#8WLcrfa}*VEb7;A$(*mc8o^;987UaWaTF%OM{O(q98Ou+w| z9FE1c9U;kk7OnMeDP%8eO8YS?TT-64x{w;yeYUcim3=sLPsHfVw@Iu zI>Hm<(Fo64#~|s0{jnU(d4Z%uyZ8t0I4I$cS7Z8Me?q_$1{rRo2!5uxb9q*Pvjv>Xa{`e;bmS zI)^9RmY-$4fEF(CLWI;^ggA8<2fhR$b(bQf?lL6zv7QHMUS_@=m+X$NpoNaEMCj-$ z#2sB7_!@+cu0{M1dmTpHjO)YU1_3{9+=#`fZ<1s`)z{Y7@?}be2lzz5WB(M3dG?v4v={g} z2ZiJdp$PpYCZWGV($HTA`$o*6zYXx6fJ1+eMd%+S#RB~!b|Lx6q`W}?455QxkhFte zgZ(Bp_X7QUc>W6ROq{g&BFC1_Y!LkCqD95D&y%=^O zS=_`6V+l+GEs3PrT`JhpV&w&Tnc&NcJD1A^SYE)nTp_@U0?y@1Sls88C7HRbYpAQL z)GxfQqH{8f)vB~`i5(D9w;JNqtsZy{gw(Bxkh-;y>;kQh{9P?;k2HY8O#BXi8vv~Vdk2$xce zFt|D-t3WHBKS*7VLw39dT8P((5N`v7cpD;F1={iW#gSwq9EjJvKyM5r)Fud_HbtC{ zZt|ULf!+*{Qi0wayO3;Q;(53wCQ-LS(s{Ubux-Rj1$x_H+lhG&ZXck#fal;20eT2{ z4(=FWCjnQ~o&kCZ_BrVpm$Z9!}X4~-2@zN_W*qa9Bz*QdkQ$*UIF$-T%h;C zNH+oZ<)D!C6^a77A0{L3homF#A8de_3+TWAO#&YFAS|ZXU`ern4#6%YLrqjbTTe}f zaS*)>$D)@J0*g)?ve=M?_=)OZqr}Rq=%j1zF0#$$1T z6C`;npAWI}#B_kpQ5F3)UUTi(eRlU*Jmu;`DBarVb?nkp?nFEp#3Y2NcOc>gdQjkl z5f*3xVR9dWq($^l4$>mZBd71%WFXNFQ(Ia?4-a^RplWE_smYPpzK>HdN`-VPb|E>+ z#1nWLCefxNX@ZXqc8u7J?z7WZu#XLXoVe3@e1H=KoX!&ioFw3M&cGs_CrfhES4?*JM&L6M(svd@`p!mj3u!gKdRvv8gG+Wt z=h8w)=OJ`-KH`pM2EG8HqYDvNj*Bqjj$9lLmk4+zU5dr1FOy^!Qa+&0lj-F;BXi7G z(88r$iEt@bAq?(nB&U#8H`Y*?ufZWZ-nFz4?>dBd*CWKc0m&(()s6aU`WtZ|Uh_hF z6Od3hBZRsIaXN05?_3M%ZFrOl>FwBswI|%vvMO-<{Sb8rrJC37sdvyaG<>d;#Vca6;Q*k4srQd9wTnb3gNJ#fS1!sSd4pRNp>}@splQi$tpS{d(^Aa z#Img1^XZg;xp01Bash)PhE+kz|JQ*u6iCT%Ild&pTwOFa1)+^E|8zP%q$l z*btym!1Hi}02>Op0B#guV*%f>O#*C+6zgd>wK?2o(YCpO!)*~@O96-5D!|qP4!2E! zZINQQ?bPOQ+ece>0f*ZmKo0?j+cCgS0uI+RKrh6LdS{HXZ^7Dy!$PvFV6$(*>J1^I zyCG>tcMsM_th|Eqg5>|KpnHVto^o}f_X@DLfQPbAfPDo#l)hL@{QV?(3%<5WUoFv3 z=cs}SRN4p-gsG8KxjzyzXLi^*(I0hihw}FV)WmDim2MFV( zii;LWhj#9{M~8OlgWE8g=xI1YPa_cbG&1lggq}ttp73KZ(hAKr$KBgMoD(&sC3wt*v>+y6^SZf-7sNzNE@l#v-o68a9VAv-5C;b1m|4ioTw zIy}G;0-kb52ACq?DK|C1QAlwHPg9%2O^>#t1sv{}0LLQ5Z^3%}O#OFO1V1jCkC(rL zpAg_gr1yNWXOx`8!F12=8Cd!&ZdhMlDdJs7Cv(hiSBu7z!Ks)e<}@Tt%;~|-5cBiL znE}oca8}O7Vs4%znX?aaE{BEWJi!QlJ|@9uB5Cjof?X));1>nBSir$A!6NvjlH$Y7 zW!Qz}a+C6x*{*=l!Ienb!BxSo7Asqo8T%mDgy*&LbkElXxL&|_=Y{|`3i$5agvGVp zEXiGzys}Y~+@f>-+skaX;>sv)Ls%)dBVH+Y1ilktrQC(EChtbl$C`UMn6qd~`yls1 z33s0w(?v5Y;QfMSi{=52OOG@UVi%H!OuT3w#w5@qNSfV8gFPlz{xaL+!JiO!E}snW zlz?;jbbx0BoXcmi$mMgA%v@I1)m2sKkL~Am4ppY*KFABSaEUJ>r0ylese3u_D+sB3 z6(M!6A=%|j9aUCv(_Y6VyQ4Q~p`$kuI(iFnM{fsy2ce^P5m&R>7_na83y1dwyk0-R zV$>f>GM;BV<{I7``;pGb9P`Jta4DZ4T*{{igZm80dYq}}Lx27ro6m8`j`syE#QPE< z-d70mzDBa1W*o1fzAE_!2jVqIdSy&PuY#nZR}I!d%%N8cu)2Ul zuYpDAH6_IYy%u&MS=*$%K(7O#gN{hrL8oBrip{-1uNR)3<>{W+570%xcc5#43IX4N zN-VCeN|ISrn820QdU>N-=lr(Mv<@&dh0@NLDN%k2Ve zFW_8u53qxPbJ+unT<$2z%q6e6*EZHspm)+anZ>FnEnH$Rgw*YfICZ-O-W4Hry%AEk z84?t@P!)H_CA*_Ow9wHW2p#Q-xTC!S?~TyWK8P#Qz8JAW`-a1Q0$!p0uo!iJ zNk)OzrPS22_cK6eWR7_tEnG?y!levC7~Ehat3WGWE#GXE48b8g-cVYIHw+=(aD;dx zkgNjjc#UkNkHmp^%?tD>AfZMhgc^f59sA37t_3>5qg0^BV)H|KCZ31mF^M_>N$24K z!6u583iPC42a0(P9u(kU0nfogfI|d42M-M}S-=(bumFb(_dEhdVmJF#-;EY=Gks7wF?L(tVH8J3~?|pl4zilCw&6bk(}DIA#`vKl6G)zu=B*q3uwzNkn_WHraayA z1pzJ;aE2}laIt_hbO{#McBv$9(bxNzq%YGsDxm+m1#&r_4B`reS$8GkwRu(Gs}a`b zH3;+iS|qKX*Kv^6&(api^+2NCptiJp-Wc#E!KuU2-H)3&E|t$)unWnpCeH0`m;|~V zNppKgusg-dcR%h5ez&+Yc~5|Q1)RzI0?ZO{Chx~0lMhI8Gg(>57n3}wb0{S(cRwDY zg-d)GA#;x)&fKGcA4AC8;|Q610?93(^)-#W1ob2?*&RJa3mrX;(9tu9J9;+oa|j(h zkGNjEfD!BG#c+5@!0YE_EJpo`B)fd-U*NjrRh^MJ=GSQ9QeH>6ls6Ct_a>54KI`jw zAvt*qhwON7(?Yy=5aPXy5N|e;Q$FkYX7P&TJsgPFynMb7B-958p*}>Mj*sL!*Yf!> z9;Nd62{zweYT|kL875IbN78xtMX)c$O6BvbU|)-Q4t^8hTLI6(?*e=;;5qn1fFA{1 z?|usKvw-i{F9Ci-Ti3G z0beL86xD5BOh!H*l8$`-VC}?QJ{JhkUckd%5Q{0ckfc~X7sf6mix(O1e~H}11u-t6fKX%#jPO8TlEcGBGtc)=4RzbWvR}I_&VRf#CFtb-j(gL~$2XhMO?6_%b0*SVk+R_5LcEEK6 zrw&W^JUVh*DxjUP3(2}Bp1A8_5~wqhW_SHyUBqT6r=?Fob&ZA!H8_`*0jdO?%jy6% z0?uVE7P+jGWahHIij7a+3!c>L9I8pio<{>sTx26c?lwT2yA1)yrD!L~gr7F4?Hs4)p;>oxVCQ=c#;4J~)mA3=DBjCI8E*94|TavpN{hM3w>74)eQri2tGKvonR>+5lSI9?! zKSo#~pCGKoPm%OV<}(iFESAz<$LCPOeWAv5v3wcuE5WkG@-@e$N11Q13(2=8UM$~X z66kv*&F&AueiSQzDeb4=KZ`q;zXbSIz`6V_!0!UiE6 z=x9O2Rcs-QC;|(I!y*D+r;B1S>cu44&oK>E)jY>6t}`;nyaX*=%903|vJ}GLmPT?O zW2n!(#eEqZvg0jF3-Ojih_^gKycLj~r~CX&v>wSuiJRw~--1nVf~IoK({x&oeq>jmg6;5oQ{ zfGz?q+Fb)w2>55|4DYLR{#$|G4_8Lf4`EvON4)q41RjX6_?r+G z&LAW$(1ST>S)hkN2{%-YX@MRVaJXPufgZtesX&j!=39$Qyf8*%5@-yPW_SN!iCB4o z9vgg|xN|u^zyty3@_+yn1)R%CSmg3RNp3D18fqGngLF=2u{xL*F0p`+xXJn4~L|V9%lMpUt2EyP@Msf-?KjOe=G)}=GJKm|Z5brdE zc&8)8I|Ion(EJ(9dq~g3fq2ae^jSbcosAIc93-B)`jTLmig^xR7T|IL&%rAKTq)o=cvXO_1zb_D32?1|@7Hw!u1AUm z`UbT*+>Oz8lYqnB9N-oKhr2bvZ2}H=dw@F-7w9`N(!GwmIN%G5grb1ngUQJ6MbeSq z7i^Z83+Vj;9uV-bAH-scJtQd>(1)@4!XlIMy^cpAbnqCGcJO$xC&bDNXv@8hC&Tk8 zdAjGP13V+(3_Tm*IRR(rc`UB&1xenb=R*Um#TRvs3h2M?b-aWpgLoNX*1dvwZN3`# zHH5YKI>Nku14--Wn;fL|v$WUo7LaIft1T^`?*x2TuxzhmHpiv%`5rc3R%GI7`vE3_ zK19;oeiZCuvGTo+PlA6c?o56b;Bx_I@{0gp3OJKrVUfwNB{`YoYuNbwVe*a6p_H`T z>-d%yF7Z2r%zckIb3X+B5g~IwA!P1nB(HqdRro7Uf59cYqhD#Equ&rZ`WT1-V$^LUndP&NFAd~*w5`s_9P>Q1a4GX5T*`b1gPR}8 zE}wN3b^3Ypb~t3mTYwhgwMU4zAVR!_knHlwuchnf%@@Xjc+Jb_B0xediV$is#OYXE zzH=>~OW;u|pG#u%Jw+y-hf8A;br~d`hsy?APOMZumk+jrnCIY%0ag<599%iTDgvH^ zs|M&G;Ci=OfYk+jzt#w_CQ>Y)YpKoQ){eGy1RSnofKCDqw{C#-1RSn&fb|iV&n_70 zUPo6B3Q2`fl+Q{`MqY)aBd-otBj)m18=y|W!>-3-iZw`z<+BmHkZfR5zSprKgbp@B z(hfEbwux9-`OMnu*fdgT`j zb!>|(!`KdC-ffR~b#@QD1H$U;fiSaoMA8Df69;n&=Vu@&-6PnZV$JqC_KJqR)!7A# z(bGtTo<{N*2tDnOxM(C8aX-d}!#DviqVZUadx9jphF1DJDG$&Y*`uCF6W1~c z;aUzv7~VlhP7$rF(MHa}IOGH@&_uvP5CR^G5O6Y*Q$;Il^-F<=;XuIVRrGKmp^iWZ zbtK}1Op)(gtLRibN>%hI>_Rfl#FKG4CQ*+@(#d#Cuw%taRrI)E$BTI$o)F+f0nfve z0?ZKbJUltTDFQBgrv^Apz<2EQ0B0b@Dte~c9PX@WJ6piv&Ixd?fWw^^;Cumxn;GB& zq!{i(wK?2H(RQ(b!(9^KQUQm%EWqUg4tGU>D-jpvt1!y;I52pPQ& zNi%wVup7k6i)Y4O$Bp57lU$wXn*-b;;Gx_a;5Gpd<#sG4{vDFs1@Cv)-Kld_JpaR9 z$6fd`j=K@&+dYWa<-LLLLs*xy5N7oKNNxqFs?Zn1Jb;T9Nyc8sgEZ08LkK-RjJT&q z0zZn-(_@Gy{NosDaZdL-o(ShB)tFZ2rvg4LSoZ3`GaQ!|#Ix9iBi03i6m=}=r z_PrSFC9%?icsbZBV!ltW26#=t_v!TjZwUB4y&2#w0Z+NN1H2>PDfe!G*+_8)zo#~b zdq3Jf5OBB;1AK&J?R9({&7a8M!9NY~nf&MMb$retj~dmby^b$9E+k)y#?!`Em?Y$D zBu&US!M+vq)5doJz87#de!yZ*{wSHV*YOjFydhgKg8za^@L!QM_;0~}7jy7G0{ki9 z;D2Ee{BKF|QRW})Legqqk@A<)T0`id4U%@yHrPC3Wvek`uVdcuoKK$adHw+H1bkN( z2+&@@cV$5=u5BSn-eRocy{-+(!aC=_y_B{Hu8d+)gcY(F;uW%Z;3W`N$dU+aaVaEy zl3ALAIg6#V*Rc$gaLcMOT`bE5Twbtjv8=#x=}~4yY~J;4;>EHuCV^H#((JAptbB%-Y5U=@D%(g&6ZHEwQd&KGJF5kI6#q5Absc84WE+jjecpmPA zNz|T5IuCmV+gYqsw08-%tC;6t?*O|Acn==U4e6M3Dq>u~~k19GGli`m*(&3K` zHcHG@baa3*0v`MRSj@9TQrhbn%K>l57K+g0F$p~ZNkbnHY@(P$PYQ6LfI}aIMd*Vi z#R6TxE+mJTlo#kjA#^YqNjo?!*x_PxFVIJX=aKSs&r1jfgwCDe%n*9o>SsBHfA+EA+N-xLv?2^bRaWeWxV5K-aJrU6I_S zGcw0~H!WPsJqVX_FT&vNLvjjq4gWPHvvA0ccRww}djKKcg9z~+LUIZ;(fHb!hjAcY z^8)<{kWh~zgnA5#r*8SqwLm|CN2x$RiCsvZGVwfo8k4BcAn81OHrR7wr2_qYuouKU z2VV^El7Q#n%K=^y@Em+Kz-t1osILcjL%{dz%>Zv9#RC1d+8pkkXnR+{;bsSTPr%{c z5AcD2!+jXwBg6&zV~ljK;}Z^eOSVuH(9bX#`R7PF@-KpYDdqzDRe-MrJnV0_YOrN%>yK4-h)|5lK7vDcH|qw0Z_)E+TGrw}I!6WcU-vp%_4OcHBh0!sh}UM@!1Exi&3O^#^?XQLKj-Hlt)Hd6 zj&?v&vVhvs^4UJ%f`Vmx9Sd<>DxV8u^WJY0PuoQ?3A7lJ=63O5ONf>4bu1ZtDRF0V z=>W?JIFri;SWdv1Tpo)|t{}Lb86k74AkN&Xfjb~%ZZ(9= zt&ZfD&sx3?yCPWwm+X$#q=k;wLg;91#2u{@xFbSGoe0K8b8wpg+X{FNZWmyC0nfqi0d^2@z3UNRM*-ijodWbkisiGH+8l1@Xxl}= z;dTwsTfpIV3$VL@!}STU2jcR%Cq}y0u@?ut7h5RG=RTN>d|xCTdEa3BiMf3C3(#M{ z!ybUe6dNchmd_^aLNdste6M3Lgbs!vX$M1t4HGLXpILhy!^3riT;27^0HXw)qR|1y z2slOiV{vhbByZK%Rp=k+u{uZf^WXP6#^K5^#v{zT35Zwc0f8qXtjX(b9Zv! zQxI}@Dnjl~Lvl+fpQYf(Urxs*r>8S$qNg(vdO8bnPiF@{2cf5P5f_c~Fyelk9}Y7G zyofHqV%!%>vTJA^&v+HdMLHvU)ECplwOoR5Etet;?=mE(h}Q8P1uFxW)n}w zTQG@wE0Rve+k)LLR;r?R1iMqr^YE?!cMEtP-V@+n0nfwx0?ZO{*}Ff$0|LHd4+eM$ zDOS;k)#h-IMBAeR4)<7q#|0eji2zRuINVbKo<@q{o>7~_JsWM$2{_#I0bUSrxEBMw zB;ar_2Y3Z>QGOMpY_H=r4hzZag3VrCeFH*9-$c@kz7_0kvGU@XvDfiVxV|e_Cwg{( z_XIqY_XB(&;Guko#l-(elDptp?-j|%I!DFxKkRjUf-mFv6k)!7hIn0m9{3A{b@?U2 zjQ$GAtpN4BFjtX$jm!T>+(Jgamg& z6OUp;?7jEid+)vX-W7ZAz2En@*4dMDzMPHxpZh%b`g|Y1wcnY&X3yF)>+CbLlW39@ z*z5R~B3k+mp{4H;Yv~8aKO(gB6Ji7YXN-`X!(PWPUinuQhV1;C!`}sSRtNszIGzxH zVrQbijM#+u81?w0kk1p zlzPoF)wGt&9aIR|U8!_XC17`@8jEYIkt9tC{i>EZS09f2z`p%@JnE7KqJ| z0gkssm?2vs%*BC7c#;{!LHlHh_d2$Q;%*xihRL$6!|eofCd*)s<42k8v3cv;h)tFq zF>$mL5~4fA+0J6QOKC%$4->ao?&4s$fW>lG2fGPaEJt7w%aM}ASXNb4mqnv=4%wyS zUdQf~aEW^$glO6c!+~T1#Ao+=3t(H zCGEo<%onixb%cW>k!;dFN@eEmXfHcPz}y|{;5Y$ucf5lW1kBxu4o*VsvEyWn{JoA- zAerb?anGWsVbcB6k4P4{wTCs&E=*zd^f`6 zzXxIB+>3++eIExM6ZHL1+&!Sekf0xQ_>f>uf_|9eI6*&x&AZ=5Y+^iyiKEAn5Zx!7 zJt>x(pr3O7w7A9c83)e_SS+7&@VtP<@&znn`JyB-mNojb%rEJjg30P-O1Q*V5JLAV zVxfD@@#_eodjlbKZz72ax?hQY5Aa)5DcsQ8l+e&S2o1f9SVQkQUVzZh`-o+v4={X& zeu%|+9|_nD{TPd0eXgP>La|f3Hldm#0mOW z>`e5V5gUiUW8(D>B#gs9o&6;iC+NSO{Uc^$@Lva=>HszdJ7eK#F##KcT^w{3u#CF6 zgKh$Lzm{;YB$7?g-Bo7pdU)AV0_JXM2g?YUyJa0LCt&WDcd!Ct33^40u-CB?2fUIk z}3<1M556 zKrA9Zd; z?|vJxVY@jdjvZEpzM}@-v>L|hAcm#hF5d766h0`bgw2qjj0{-&T=R_c`CLy?L zKr9@Ms=LtiIhh)9`kaEDiJFYqIGl=!*JdP)!xm@L#Nzb1ud_(Z#$c<1HUS%h(;dtZ zurWB(!7KsGce5SrCt&w$j)VP?Z2HWo%-kK|Wd{nFySWYy5-@iMJ2*tZ+#TxRFvQa5 zJdCi{aX1IOi!J2o^9W3Oek2llew4GL#Vmau-KS246)-S+4Ok=b|yN}NbX+8 zNe~)183_%X;_Othob*{}uj4eYdb+Aw(`Pt1Q@}!WmV>heEJWvEadGEL5@tP*pZb74 zPv07dgHdVRl}EFtRU2LIS;vgZ2cvz;D{+K)hX{vXDTp zba<6u&R)mW9LEXt8thDTtq~iz*J0x5dL%^m24^>l$ zUBF^_2NtosQ<4} zhM&_Jg?s%xMO@1Z2-or=Lib)m5|U_XMLFMzUZz5V!&fN6;j0J^Uqf*CI+Bn@OYMi1 z-=G2x^RwuiKwQ0r;OcF}0`iXPE;Nh2ON}^-zK5NO78tR?_&z3HKS07@{LtA)VsRGz z*x4syHV!{^@R@*(!_OUjAzb~r{orLk3Yfc} z9Q-U`?tXFbtAM%t&B59bk*(QCmr_*=^wJKN5wNZ->tHzn>&o(2 z4Ez-&i4(q`mY-JCIZB?3u-CB?b?L{-2;*%P#OCFyj#opNm#ZU;=rxe!4A8HJrq`s3 zCP{(4jhAIr%c_W7#3+Aj2Y{GFo zA$nqGq9P+UA$nnQF`FXc_HE{@w^%$O`Zy~Vv-{N7L5YCfr&0%H0(PIu9aIR|kgIf1 zC14}C+CdGH9l`xnX72iX+2#V~ZVLwkkV1PMTYB+Us&CE*IvAw-?RyiTTy@7*bb9`3`Rmgws*FJm_2Rm=wK%Si^dQv#^lbD_Pvgw9P&c8pgZ3M6X(N` z;CxqSyNQ|e5e`NQnDbFsINx28eU#Y)I}`0`BzGxoF9;3njf4h9I~yaGGaC!+b&U0z z`>3Y1Ti2e?qB9Ei`BX}{l+zF{<#dG3oq;4g#+2Jn z`JPFI!v4;p1b=5E_&W!|-?>P_Q%rf8{%r4gRKQ>UQ_T55TwQ?R>O#cAagpjS^eN_I zYQ#zV66{QLsSz88mto@dawLqyE1X>^7ANhioLwzuWAGXW*9zDeyw1V(0yYM3aB!o5 zCGDFW+$>=C>lO#MBH5&Uo65}H?Ot|=fVsQV!CeC8?rsP72$;Kj9o&c5W5@j%`FkA? zKr+#T;+{nx!le5TBcc0`ID1sgvgl(D9v85_KY_(Kdr}hbbv(rZFJue3>t`@={VWn( zKj-XuF?0QbgBJzN^-EZ|ep!-D(63-;qF0UNCg|57H1IkS8hFFmn_>%3&~JIow^h?x ze#gPP0(J-9bFe_b?!fz4T-yhdq@+?_Rn3={4|UF>CFqZ+N-sV}7}lR4Hu*nw{29XJ z{~TfBe1U`n{Urw-6ZBV5+J4b6$<-X zgA)9$iQsQ71b=HI2?@HQf|t0Xb*O;9`~G{@3 z==nCzwiUAkx}Af;0@m&Au^3`INU{lZN9;_rlabuLjv){l*ck~840Se4EH{C6-0RrI zYYtaUYk5}(y9ro?MmQKLU=bRH#kK7&Nt*N(HD%1jJ#>x|=zs2Y>`6^Ju@}Oq+Z(aD zIok0Ugt<8uVO;Nng#1~{LCBx+UdK2f-s)5q(&u=G69jYiI_f!&(`Oo+cfXC;u$_d7 zqXs0zw$a&SvE03mDbAb3Es|3mGz(ZHTO3Riut@HUMI<9hVkG zd^8mb`#Xjb{2hzn?>Gd1$0G^pv$9I-h$m11fBEV2L?EtCLU465V&OPNbr+gGPo+kj zK2O8WM5i0Ead-wMUe83rI6TYQ*VdcKjPp~0SnP%4jvb<5Iup##XTuWn)OvB`hb2)=O};v_g=@- zRHYlwAdI_b5u2UQIes2tcD{fxvR_0(0)2^t_5`}XZ`#X1yuG5bkU(E`_?lqOUdQVk z#|iWe>`e5g5gWK~VdCg*Bt-WeXYY#T?RC881q)PQv3%db2Lcw$4;_3YV6prdi&%al zNsMJ_89SYy>Kw93fxV8;DB>bNM~K}Qh{f(p$6q1D?rVhDeS;*XQ2&c@-%=%^rSB-B zrSB11`T?<)esug3LQ6j*mNb6B@cZ$rSNKi9CeiO$^!pD<;T&33uBYKYbw=S{|3wkk z@;Ab@{DaWFf02YF$`7sZov2fNqQlM<;czhohg}dHc104hC_j0^2hQSD;0p7zXg46P zmOyZ|Bw_*SuDT1&qCKb)XVImwGttsUY%nf^iPvS3Fc_C}w!B!JMOSdPqL_`tl^m=r zVB>HV2dfI$I9$!a>H?Pb)^M<JMzZcUQJJ~x>19O%=B}56O$E%|W)6A_n7cj>iV;i7eKB(OI!ZXqM5TgxdmUvE zB3h1wh*mhO6w6JX1@=0sylS~#$D%DbpAWar@ycNNT89oUWIctVW8<}b|~u?aB>lZ)9M z3Ab+#XM2jp6JjrCdyCn98tq_=fZeCD4)zhS`&8>-oPZ6vItSwgY{*S;P>*Csa9U;N zZlae>5-@iS4jPd{dmWR#c#7(q^CkyVRlj|&qnSe5I5<|o z?#gjkT-))Iq{+yi0pus0PS81vwv=`vRq4e^2s7kl#Ae7Tj!#9HA*UhC#nX}SBy$D_ z?UNo^mNyR%dnCd=6l&k@X-Ea!3@Kgyhk&0F6_Y_eQ{iK7dV5Z#NMT`ZQnly-^p zOT{ggmpQmxz+!oYgDV9rmRDgB%c~`cvE-!G@ z5g~LpA%yN`Br%OuSL?I>7OE6(=vGQ-=r)9gZbz)4I~?DM(9m6oWwE<4JPF+6748+V zd3qlfy}n;k_&KJgRPVh#pfd{g`9Vs!l!p*5L!0e|^VF;4?=^$dcmXAuj>bE>=0r}`*$QE+fzhmP14}tcrvL zy&4A{6ZGm(+^wO)kf7IexRzi}f?k{BI6<$2&AZ=5Y+|g3iK7$}qPxDc4a9O2^oGti z61P}x>|hfCi)Bv-MFJMfURcC(Q%Pbht1H{2_O~4+_#2GiZ+irP zJ0J-O+W(%&j#R*3euCZ!h^rw8u69QJsatgynxKbKBTmq}U}vJ?Mr<7Jiiy|VkT4EM zI2$PzC+Jbmb{DfTxQBy11#ArNY=5D-~ zO%O15^$yYk=5C^cNdo4s!9gQp33@U{*z1_W0k32Wc>+(DN2&)5I)+?&}~D zux__vF~r&=*#tTrI}^CXP-=%e2^M(`g{mG z6FqFi#^EEFczqNJIrK{k?-91S~{9I`~PzLi95h7x#-KVb<4_@Plzx(XTp3`SZW`I)0-n z-S{10-2H*r?EKU5UkJ1FZ-kNk4-yjSzZ|qD&;@?eI!!chossqgx|qW*f;oE~T{(^u z=;GL!sGAWRxJzJiSxX`zy4{`i5X;-^Sjr2QR)NKG83)S>SS*)wu)Ki9as@16xuPT~ zmZc?Z7e_1U9I{D)y^fVB;v!c;h~27)#cnmnt0Tm24TRXOi6o}b(y|);ol2@Cw6r!w zw6qRFOY0)m(t3_l2raFTSkl-4!|%t2UST5vn?xI9(eF(pg>xvcLspkXJ#|LmUKdfs zwe&)`mQ4}5w;7UOMBZn7%X6SYGE10*}u@8iZ z)*>OII7A_K-W7+3s_esI+!G2U1`8#;5SMVCwz_mob6yZ5mhuv3hZ^XQbbE_2rW%Vtfd)_ zXCkyT3$Xz|8zUs=u-CDlSDvH7@OZnw!;E0g>c9aU#}nc}>`XM*h)sxtFu9n6k#PGC zadxO!JRuHqHc!m%)8P*03)p=+!oiUOcAt)NaI}C8xnmq0D_}$JI0wfg*%5q#%FNw~ zUUrgzxjWgxDM+Ecj#It(G}Slfr#m=9_1pJ4&g76sjp}%><1CId(b=N@v~dn50XY{5 z0Xfgv`C|68ae;#i1uPmDVKF8zmbCA6T*4tQWDC0U%P?_%ITD;-;p|E=bAFYBs|C#Y zHCQ;mR+4>`xehxMU2i0JDeVRb4cv%?25xe8vslh-EU?#ci`TqWHLc~_9NaEocjXQT zcM8~DxeJSHyIYbp8OtiF_(`XGbk3qJrQJ(adT}4Z47ne%8S;SR2N7n-LkM&6VI(}s zJin%1al_KlN`s7GEZUi*0&LxEYD!#=vgF0_c>?Ji{&n* zz2N*saf{_k4qg_pSia)mRRN3TYgokcbxC3@E30^G;|-ldcImj+@g^l);#&xzdmFLP zz2o>@gwVZ*5V{3OVj3%})@S|uR4Lrh2b9pzhX@UQgjho#JN^Wrp-&OZVxM7n68PLJ zd?8@-^h+#y{gtHfb4)pZY_Tl*T4xmO^EZ@mDc>So%6ABz`yNSnj48LD^8JAdh5h|V z3I2XU@b@!ndhraB&CS1Z)g0;b2Jt8-v{)^boM5 zy_AEc1?+w;<6v1No3xixnYmlu%T^FDcPlzrNx|hlEbGNF4)ew8^SREsOuVW2J zCR$V6v*=owbaibcbblRZ>xx+xUC%*E!1}&E7UOIKNxavwAqTvWE#$5@#>Dj|NO0ZL zS&^8z?&V-p0du_>7Os0svI)8mb|xw|lAECWLTI1_2@RAwD-&CIf-d)(6{=}1S30N? zuscxgphm#%KtC+5t-mBGsqp(g`O>nv&RMhsy#-b2#Q=n1y(MCke=ElW5hnj2go(2? z5)$+_9CS?3+d^@-oeD#O9_(;?!JGuW1IKZK-VvL3zm3?$7=nqToskgTq0WYhJbi(6tMesl!K#@Y=S;UW#;Z! zFFQ`a+#T=W1Oan*qJxtJ%-zWjPC+a|pNbLoI!@z&SF(jXfu4a$&(B0c&(CspwwNW* za~zy2VBJ0siy?NtB%44lz|KS$8p+-3xClZ67bBs8OPpOQmYYC3?sZ(|H7{39YxxQX zR|;5!u5xg-fJNvUEUxWZNz$aRsIFu#UZ-=EK>u^E<9ce+i5n0`-HnLN&6^zGj4(HE zK^WJ!A|Zd?#zDxR@m|O6K)l_dvXDOSba|lkA&Dh z;Os%M+`W#6oIfmXk$l9#qXHJm#~eH^V3B+Ri%32xNsMHTe!u8bI)_x!aj)ZPO1Q*l z5F+<1Vv&2!@$(3gdjTPGFCvNQv$CX8ziW^xg&TUA5*m61p`lk1Yv?t{uOl?{24eZ* zO$?tuZ+V5c1#JGjgGI03l@v~&mHc^)lIT61QLxVoDB)7xN4S&^5IXlEl8`04Fw z5fuvi`R@pJbJxwm5(4IKNeA5#OP@V3!d}Nx9Plo- zkf+aOFlk^}B=meaXUmIO`dq=miUQW{m9Q9MD@(HJa~14Nw5pNZy^hr&G_X1n8d$^G znqoQWv(R40T3&T+Rkfzqaj>p{MSML6DFF-7`dD1t29l&%U&$Zo;g9F&9Ocjd-s{+i zs&r#xgmJeCVzaZS<06FF*$ZK0Z;FHjx)}%U33P$qwBA6x^-)*M-z+yQ7i&$q*u^Uh7qCgRD;E9UO;R|AR`JUps-h7(qj0ZBQpB~4 zLb#UQ5xTbrl8{7M{op&%o>WM1xEDn@+#A8+Xat92kc2E+Wk0MumI^q`&!YPPaaD`p zY8+w#sZ-sBX3_D~h_mPf>`YW|#0Fy;6R#7IFc>E}YY>aGXrr^qVm1z^IA{{EaX8gM zvw)4m76;P=EbZ;i9S1^RdgCkR+qPIPdRfOX|$EC&85lEevLqd#YRs?JgJT!g)j z)2K^7PDdDTXCO8&&vbki!n`~iVML#UBxisc{Uxt+siH|zV6WpmifHM4gqAKqtfdPb zUxd)o#fS~~OE5xm4tpJ!dgaSh7_#%_4zCc*Ssl2N<9I?`g`J76HewUv8cZ(cS|r@Q z>zrLL7Eg#9oZTp9_vt1FHw)N(y2Zh*0(PHnb8x$W4Y@lU+$mr~?k)#+BiRvrkIKy5 zyofVsQh!2?L4y^aUH_#xFd=MOu0MD^SEIv(YaM~#|zuj4U}GtuLs{`nb-chKFJue4^OrDj{xTArzvApwF?0T!gVzPj z`5Ra`e^Ziulz9s~6TNLDcPZ^12o1c8ga+Powm>XrHWt|Hc;9P&pqke5hYmgxu)FfH zgHHtPu6&BcwS6W@nvB)_j*FV;bDgtjOKD$Fm0o;_FhjmVY=(U8_#1>7@-4z#{0<3E zGT(F1K3U?ujvt`7`%#5qvi#)mXThAw@(aiDqs*_^y!CCwCd==bIQjz#(f!leUt+mS zX@5KaN8DohuY*pF0E=a3EW9lyV6p6iMJ&5Y5@T6e&RZLc>m0I6$Gwhjl;Cs;gwQRC zSm?St?tu`xr4T~5G?JLcs;l)`zYJ9hH?%AzG_)K-L(3!9&=0us_sIcVv48{C+%L?nP^iZ zHV!w##A|OPjKe<8ipAoj-Pc)(n2o_w2W0{_2Fo2(2-p~`bWkN=NxRxXjey;+eh&I0 z*`&R>%FNvsUN%6$+->P#D*&b{s{wa=|5GMargo)FPgaqBfLB|9=4T`&c zRTvU<&O`8bK9Z22{qKof zKn48eC+G`-xVi|z)y0TEb*t_|6ZECjh!gZ>*qP{ZBQ_4Nz{Km7NEnA#IlEdcPSDpl zyH?D`;B^kJ7qBsSgM%9dYz*Gy;AR2KsJA${Rlx4oZ4PcnvI+VQm6^Lcz3eUlb9c9c zdj!ngy$v)I*Uda~n1o{XjJ%1DlJ%7yE<6@RTpK$P`fOY#R zEQZ+Al57Hf20IfyYb1BC<2eWoJdcD1UU2rJSZ)ICxYzNL*L+zut>sr7yeeQ3dd

z0v4e+u(-B2B}tRMUv(LC@hzRB1p1$Q9dA>UPP~IK>fS|cZocPu0m9sTA7Na7fQ0<{ zAqOFU#(Nzf0rB>+%0l}5#NnrcIeQ(SaU7@5&#`&;+lUR@FEMfS6%u0mwX<);a`!sE zb^e{WMe=(GKL}VPe{}GZfJO3WEF$@fBr%dTWz}WTuR4cR(s8fjH%hp~-w`7B2V#-? z)A3&jk^37Va{nMn>9b!6zvP-fCr_2a4RxAq4RuCnXfebZ>f*R7LPLuqmM^+t`21PI zD=aBs^QSu&z3w3?m_GZJlD-G!#lHK`G&&$X~K z(b`6A9Ik_j*L9IF4%c&*5{uL4`pz~GvoW}#gN+1i3~uaT69F58JslJYSibA!U{e9R zUz<7TjbzhjAC;NAVlV3}VD3sBlnR);G6&@X=B~m)C1UBb3M1@wRCB<)*g~E@`(e`a z{z&Ng=FYYdv-CN@!IlEn?X9pFVgn`F^f?GS6K!oIcdug`2n}qDga)>AHdrhteHPm5 z*xsw|psLpNjt+Jbun-M#u(N=LXebsJH%yW+>-+IDR{G;PI!F2QzxO(ZQ%r6p`ZL{oGQ z*`&Z;M-xR{NVz)0s>>?yFg_f3;>F-ohC84D@ifCy%LQ69cYiXwA zSqLr7Ml5OUhvD~Qj#t=Uz$Q@!i+&#S)9Qa*XOO zG>aZfjW~-QhnRjbO9TOXE-=hz{cTO z4$c;^w0Dk!a|P^A(913oFn1R_xJ1C*UFzU60dse`gDa4%yDL>@ z?ymB(s|C#6H4d&7Fn8BExL&~A-QeIx#FFw&7&&_#H*=VYZV}Ae>$nv{L~lbvL~nO? zhgfd%EU?#cr&qm8RV~nWJGe){x^l0B`vj~j_hT{eACM$Yc)l!^L=Wm5CC^3J>v)K| z^y6WK@%9K}^YT&0k0H#<#}P*K6G(Cf;CC07L{Cyhlcd02$5Ry1($fenJ%dU_wgIrv`nXJ z7YAJhEX<2zG1$9F+BZ>_;4l*{Dd^6-W8$a>5}YsPY-urbzKnxq1rZwtSMmkVl4-23)sC_2a9W4 zSCTZ#%c}bEZq|A_XVKQ^QdFfE>my9P4G^1p8#>+yVd`y+Fv&MT!Xr{o4%%m7yopi- z#a%BIhFQ3&!_5S9W?^rR<7cEk*u3;@#Aab%OdORUA-bi`%EWTl=*pc}h+8Zx9aITe zEUO*V2v{upVG+yzlEhfr?(pV1hqTpk6J-lZxWoYnq1zI%&~4>-AVTN{A%t#gBr!jh z_3OuT?KV^?+|ag^(9m`W4Gl)Dq3s>-fY8v6h^5D!Fnq2K@d`T&*jycoMX!fR3O^u~ zm+P-O?V>XZ_IWrZT*|Hpm$Dl|=SCn2&qw9uyq^?}q(WhTqbR}O?g;+&K=8LGlJIy` zZkvRAQ2~GXk4Jk0aWxvj)fmLWF;;aK`gpVtHR4Z$TI@_T&WMe}I!wHdN5VLq;H+LO z{xnECnuaFT-t0ULvj4kioO7@XpuNx*XJR0quhcE4I2OhdAt2K%ba+(lm2Dq!x~ z984E5cQYK!6fk$Q9Lz>6Rquz9zlkyj!t2=Lo<%d5bpHS(bpJqSbHyx+9^~L)0qgrA zSd6nnCGjT8VH{+lc|z{`a7$zlsn(5HCKQ&rPiKFz`D0(J+^aB!x8-GQ^PxVEz;NlB%g zHSm(?9G$ah3Hn^B(u?yDhV}V~P5uiUUx+aIFG83&7b77-U&2Ah1brzKcbBO!B zULly1ps(aOPS96j^Gb*jn;6$%;^#8J$tE z&(BiAr96jlDbFKx?gbjp9Rd_FAjc1EJ6Q<5jIhN=O7dPA>{e(PfU9L7ZQ5@x3ho5EP?*(pi>jTy4@Lz zA-0$#n?So@XQHk~ayL;HhtNPbBs8#uvn9oH6X=45aYH%}={&EYu3@~l+}&&TP)%!j zDF;gnScH~wu&jVZXgMsdZFx!3q_5~#%}cr~=o}@`|J+1bk(zX3C4^D8GGcRc700V0 z%+1vh#`Wq*$e(L)5b|fdiLxdTZ)>S6q|db-t|OSUiLx%oar#^jI}@dh*sxt66Gs~$ zA+{Sj+ej>T6J=xPn}}N^dpal*ut@fDu&IDWax*L<*;|qn$;uLbQ)ARe=a5P|ZlV-Z z!X@@ah+GL`kt=mvh7h@OgveDOiRqKSvdm_8B~=PHR7D95RU{9SzbM1^c`;C0xoj2$!-gLg%(a64Ga7bveI= zWiS;A``exp{Oy3?Z$|`wJ0S__lUFSG74}1@fWQ3oxib(~LlImJLo6J-sP013=WuGo z>2p`?OthO38;2t>@j4O-<8YL--NoYcxreho#cT}jb)#2VD2V5m?U8C8XPnVn7hdirXZF+n=ryA%2Wu_(+7=c@)CPJ{k!L^cW7>6X*iJX~zQbcAUyW0zKa034%GBC?|3pC(x6y zGttRLY~Y@PiKA1I5Z%+9oi3KQiE@S)oT&nfseZMKznNS5PIPr7J0-rK=EH zx*D;Tu5o-VLQB^nmNc%%@cVItSGZBYCeck;^!sK>;T&2;J}-%G(HVt%eJe#=%WVkP zayvrz?m!ZfXjP>iukWNng2THg!r|Qr4(~y5crTKWMXUHZo|5Q3D&R0bi{1~!)dL8w z9z-l452@}#v*^Rrh_mP;*qP{2BQ_Wx!^G?3NEnPyID1ko&Z18_ds@uK;WG}N6|iym zoP*~DY#hGe;6(vTdoMY7S-|etD-K>ovRU*sm6^NOz3dGEbN8l$w*<`H+Ya6lFn8}d zcn`_CTc9#?_r8~XAYkr3bnuaYx%=3`Cj#c~QwN_RmXtro$k{~sg2PPorC{DB%2yB~ z`ZW?F`i--1#d4EpflZX}yz2L=YJvX2!H)vgm7g5^EMQ&v1&e|It0ZZ{SM&2CCDCs> zN6B*$Hc@`3F8%leVZ8l`*u4D9@!tsZ@*ji|{V$T30jl{$6D3insn&F%O_a_Q(b8fF zEpQ7+jU5%qM@C-4NWbnFJx^+Sp(C?LkV^1M=8QEDMJjGJFY+&CY1;SvI+?w z=hYmvkC=GDzXpoCeku$jroY3@1;dCL8*RaH{CPeAI}>eb#8T*1m^d1Ug!m3}wzU|a z=iM4kn(sGq8!y;a1s2Wi91Iq)Xm0Od2LX%bj#xxgWI!B*u-7J{2d)Sc9 zhgp<%rihCiiV(bEh~Zrv4@U^zt_Z=~4N0EOm1TS-8bOtWmPS%UOQR54+8wc$_Heu> zLQ8ugHkE*D~u7anKc%Re(xhG{CUn_g5~qPR%aCM^*D;SmO6xM8IRDt2}r`{ zdA0q4-g+t|I80N7!-)tECm}d&KoUOBtNH#|5;al*hxwo9lYzLJg5atNv4Bif-GzRh zH&Y}2Ja56~&-NIx!MHCbULzz7##U!-V)5tsbZ0ZfY#h#XFiXJ3;cN%{3D`KC<6wUQ z`^3yRI6%Pe*ntk_BH0h%gH&ej4)(G`1kBx`4h|DAck>(^E@1BFJ2(Qdq&_*$qr5tu#TMS;4}g2$mtHw5XcN!a`JEZJi@zlrWc*1BJ0xG4$cvn zH)Kic*0~%MjasUyuC=YUsj)d-7e(g@H7q`%ttFb!P~6;HJhi2MdQ-Z!*votDY}I;1 z=TmP~bKg;|>8Q2ul&G<-aY}2;)Vk)r!)H%usEei!k6I?DC$zQpozysWTAyi6(DIp1a^L3WzS)+ktSc*?Haoh2 zMn|67y;G+NTyks5ucc_HU16{OJyQG^ZLq-xgQiVuYD~AL>Qa7csx7K(Zk^Z?O|5Hd zY-vulwWQh_(y7Mg2`y6_n)Pu2r}|Uc(0|(-Z5aH?{T0)yQZ%p1fNl8tSH{+xyigwWF>-1fpnI@6@KfQ#;hoX>88) z8kd^ckdD%Elr|lgnwlo4v^+c2JbmhTuBl~WPOE-7EzNaJsrtsL>E>1fuiEvo2lq>` z$F!wqwb90y5T?wy;HXb(7YkB>t0H>smsLkN#uI$qR}JU(MuV$Vs*dYd1?~|4sQ0P5 z*6dKwjmW<``45-JLGJYxX9q@GThvIijD*n4%yi?V2BK(JqMK7UzA2q@+NzuzPoVUforzv146qSb3$acU$^XVf)K*Qk%$2cP^kHZv~jni#0{sfkg`)VP5$ z8t)V8n&ynk6!jU?x^L7rXG(?!vQ{s~o89t47dJ(LnaJOEo>Q8cQ{V>X+(94CssD+G zXhJaJ>nj)l`kHq=>L634e;s$C|2TOamuhSc1Kp=gdtf>Qql2?-^#3zB1!ALX$)3Il z@v)Y-n`OzOwSRvfP;#;5mRqK#5?^v}Ho=vZ^iGwR^a|Ii8lGzgkRX(&$#3<|MZBrm zJ5}~S6&XMp+XGTli|m|yH`AiV=C&fQQ>YbN4 zds|u?p!AhZ^a@q$^l9m+$fMMoeAzUyS8?#z-VYTN=eXuVbB;n-ty=DdQZM%~zoO4* zstm8=DKFi|g?PM1lI&U{#WbfcIql^HxN)L&t37#Gr*yS8KNiV%j=MA;69s(nYxf#k zxxU(I;Y#D%G_0jL{XcQyV=dj*UbVHTSNq-SP-Y=-Z<NcuHNPK2>X{H#h1d*KcjH zhpTt0Zh+EGv64(tWzMw|3X8vWoEfaoHeFr2bH!$2T{8owcS>W;8mlFNO_<(P*OspJ zvRY<>nZ(})Rkr2!Udqq-mg#&F4M>%y`li~i$35iNv{v%FqD$SI0_mvWU8Bl#M;amq}fZ#b&^H@dxL_TUOl1>s2w1S5bnq0SyTBl>W&Hbnp@~s)skG zkD};hz}V;&{w*5PW$0#|`CoceS3P$|uUgS-D(cuyAZh!S^SWw{>Y8qzP!hd?-LL~U z<;?&~qqndpPwisg<=)25MDM87zsJ3c$<@Axgps?z+52KP96xaIp@4mN`v{A{_OV27 zbA3*epZG!P2j2+T{r%R# zcZhZSdkpV(S@Z*T+`}KSGto~f^&b9=Nn5`lp@+XZ`%TPx_`8EY1gwXDV$s9DB=m6d zSKg_={kU{;-D1|sfBcwE>R6rp7vi1l)NBoRM!b`YIp`u_o$TshaRKXOHwQ}~*2yI? zyp!cockH;6J+L#;QY!UME{#d!%OIhX%Q{<5%sRQegB1j}(S;>ts&{MFQ5zURZQ;Q;9lRT4bHt%#TYaKQSWP zsopxp@kXo-i zPK99+yw2fx!FlTBJR2($IOaB%7F?KOU5PIs6VlB*Z1&hOdoYZHvicrTJ!g(;`~PH_ zD)&0HSe2&fkuA%_i&gnoR#cmNTwv`gF6JSaWh=hD`_fg*3>I4IQWe`~zW^51rFhtG z>@&WujRh#y6UwqBt@d0tv$3rqyE0@;F)YnBH>Y!!WO``@q(0p`fprYl2O3!%VR80QaoFx25bFgMBD86qQ}*yvg;+q#rCwF z`)yatZN8E)E7d1uU%R8JbGFNjo;5ZzC%YIC)7|SLaaQKWq^GquHnlWoGO0~dVzh(rx@oq> zKz*|Hy_WG>(o~Rgqh1_;>G$gxm%EH)R|KIoTd1?Opzt*;H3N4ok>4t<1@X|68n?~3 zRFSW>$>gh$?udWgSA||FzA9zcJL)D*WNkEW#UmT(t!HyrZPiapAUN@5!|AOo5H)7! zNQ$_$@y)PtLTieZP@eJP6(Vm<%c4}a7>sXWAn_H1vBnM8LRng?DjrN$vsfVZz_%~P zPN8Z>nD}wdilmiHwai` zTh5h-+dZQ$YE(kd`c)&dB3=+9j?F0|z%m$jROzC9US+SLy;HuV*0I&t*_8COHdbLZ zz2Zp6cV%h|>z!@Wr;gJlmK2w^4}QMXrWySD!lnO4>c(VmW&25W(WL2epj%(xIB_DY zu9{d{x#Rn=cYq<-GL1m!DLJ^aMR6?va(6Ulv<1wmEWEM^OVe8V=8EE$ic>@RCY~mk zjm(6m*|t^|oK2s`?N1YS7B3a9I1)Ky#@2(gw@TfKm(CjM+UCs4)cUeub9$y$QHx47 zyKSX6Za>otz|wGQYK)(u860X%C4iZ@!mQf1alON(K&kYkL9v}7XqdRv(%2Reu z1%F&(s%1PI88a9fwpN&U$NF20`-iOPA2Zo0pDo*23-l`Rc~8Qg&CH(K*i;v>S}?sW zwA)a}YCW4K{4;%g8*z^B_5@~ADyb<(MV;+$)CWi0SIbXvcIPH%O*&+=xT$eUI8X(jD;I|g%^CjMHra-4lisO4dbRIk zkiz)*|MQf9ZP;#_W@ zHQC*bhB{B<3XcA3{{(c$JZ5B9P8pC|XNz|!bm_K7GqaA{NETGyQxfv0oV?7Pza5aX z)0N#cYcDVGdGvqoS_qB$lkKMY^-t+INrrg0e-tSx=Z7rb)ywml8^8J%a--Hktp$<< zpJh4eS03w{2$AM0H7z)K&T=X;LoXXy*ipus?W?eHK`=3o3^T+dYZTPIo|0eK%djfBN=4fbZBNg@STgR{R zn^D{nP3l`WzO^rJ=v5b&)RdH!S-aZaF~hcKCN!jZkh4D3@{pXcvdvSeRVh}LUa9p5 zq$=85)pX(Ekqp1PHYvA@B6di6hNRm5b!HO7eNHC(9i~^$5LG$o@L#-=ozpHm3eiNq zc-boSB#cbdz`xnGghouhw@yaFH~lHjn#4}jJLM;3-^ZBh1-Z-i%VeIhs z(;S^q_)Gcz6yY<2@TL3!g!T_a3chh9hZQ)SON9i72T_E>gAp7ag5dB_q~IGzI4sqV zWFJNa9Ol1qG!KZY!x3D~M=T&msP04CZ~4)0x6J>r;roV%=QljE{Z@CJq5sabZ~jM8 zgKz$24Ue;ca})=e=x8Bd8#)FPx5pykejexSc(E6_joUl_xnkS-@`PDGp8* zuv>YWgVP1Fn#D} z!pwPsB3gP9p{1t~Yw2mn&mgq)EMmjvIgF0$W6yi#7gQM5$6j>!l3>pI*vlMieQdIp zyO=1iAerb@Q6D6)VRAvQBjFal;p|PZyu|mG7rd3*WQZVtAmDTX$yV2)5qj2K;f+Bo=i4g6t z5N_btNa4g+R#~P$@c9iD5*&U@5e~mYaQHof!yk~siLb2E{u;=SRKQ_=;`<4RtDg~E z{esvn|5bGtn)rUBMo4_WbHIn@h?d%`1IPv}KtWyijZe?dIPFzgD zZeS@U;$Vci}ALe zBu;!O4l>dDLV1aA0|+f_h=dk4a<;M9eV(F;NaXCUdsz4a#l}KXZ;{%?itEiGNbE+w# zr5c2m`XSa*f5)36w6q0cvtR&5$Hcd#SKdm6A@L1#I7l!j@omj_@2{8!?iE#8<}8ctv~YjKYa;Pm1ul7eci6M!12ak%YuoR@M)PW2lhe za4bbQ+y}v7ErP>wNJ8Q(E72chsiOi8^Aq2AAg(4LxT;6&mZw#Bp^0xIHA3Q>!~sw6 zLViCRF>yN?3HNh~vnH`P@lAEsEM~W|#lbWIyOsMohy?6bwmN7Nuv<3W!3+VrWiuVj zLb8c(w#v-ieqJ_5z})TcAR}Pz4sdXwfVrFN;2^{j-@zCpzG-#!(IGMmy@gF-J&WZ% zc1ureVnK=}-Qjih+UPozGe))if3ne)y$0=Z9Y(7oiRE-pKF zXM<%#UYcVe!*&9+bQL+-JzU?53)hVFDl%0QirZSIB^R<>)YdXJGcLO@vxw_J_OdujpV^njL-!L`glgxo z_QC6FoFOBA&)jc{f8)>gjTgpMgUy8OtK;!@K?scc6E+b7jXP4nWwudV`UQ79*<#%h zX9Mdllh&-VFY)HhfwKCdvizkVAJ*|&5ldRUWmVLVU9q;=)6xTc+)iw2scY+1%=-#ibnHaNik>e|FYt-?b|5r%PFJFDXZZBl?mlllvVTpn&Pqi zm)~fI*NSu6t;wF(e!lyuN@%B&`nrzx>xqvGUw0XG)vxyc#m9x~|6l$8|9Jf!;=Ext zImVkrZE>D!wWaZP+}g;3ZWskQE29595VPZ`jQHieh-)YO@2lcD_f+E`|9%#Zb;tWy zxL(KmSGeAy`YUjK7E6mr{=#PdyXV=2k@IS;zh3@7Ve{8ulixGXx*@03?mL~2a(#9b z#lCw)^Z3NH#mU1lGSPhgJ96tuBjqAQ#JKA z)ma#s=xi11hu*?(kUIy$rJt*M@edWB=j?p3|M=2$fmgjyRjpGOIk;HBI(3PIO9iY` zmtpZFeYqszOH+AGbp^k}<_evoFHPCsr^_2uorfm;%PGwq+hf$FACDu%{0YQj{-ool5Mur`Ld>5*Ld>7# zpgraTyZYdM4vM?yRTyIag2NXDb7KAy$L%pMi(Xc_V*UySYiTOZi}`C1oWHJmam?Rv z_NLf>#QZI<`nIZC%-?bFu7Jh-JqHT}EavZH5%Uiui7_wLulxT{=P2gcU%;wyKIDE6 zxoaHpkEls6K1PW6Cx}J-Q^%hnMErAvh<|~Eh=0jJhlfRTxQRB>Lwe}drrXVr@X{)@9;#r`AUzj@W)Rn-Fihl4)_EZ~1R z_*=jN{tp%b|5uV2@N$0neAH&&?D$LDThl7=EQp$j@#p15-qE8#d|r7Otid;^Wwb% z1m`QNUL5b0oUJUD6Yn8&JG>RUiq~9KH7(q$Iapo5!o7xrH3cl(Yhe-YwIzw+X05d> zT1V$7+{=5ohs+(4%0}BXj`q6LqYLXHL_398wAXjM0YbDlM2PlANQm~v9JEI}<YxmpL6 zvLA@Dw=mkj(X_iQ?cg@|ihHLP&97arBEjn`IX@OQE2qb^yrG2yG2ky*T4+?`#LL zg=d@{z2;7;X%{iX!Oj9U5r;Y$CSVhB7c4GwxFjLtlvY-h^V_(0)j9e^otu4%88SEN zHT>PEN-strB%P6nC7n@@cSlG%dmtp8J&}-f_TnHU9leQQxnXZ8?nbLHWSub%#|q{w zhwsC29Pd(o-jhBzAQS7gUL$a+`f58COTZj{_Su+CaoWVg!^`+v!ldpP9E*x z7y-LI$6^uu<0O;ATs)q`Omu=^-q<)1LK`O`p^cNBog%jIv2m)`JWVyN@zWigAz%SJ z)4^E+7O=CixV&>D31g$Yx=b7I=jxnAn~UdBm0p~WFgPwiY;auY_#%YCaWTT+xC9A< z<5CX7;0SZ^GAQmYS78_(S2(;yQxV>z&;o zw(vOL=rwOrO^fr*4sH>!IN$2vHUW$C?O4S54oPC1%PYxwcj}x)nuB*ylTO@?5axRj z3-i5>??VXl{Rm-x0107!kb@BBcn&@U#M{Fv3sHW=;iG~f%9?|ZiC9-3_bN{aSXZBP z@RWef!KWQOBVcpzSqIM{HV2=_$j-s?=mieqIrt)WCVEN5J_ldMr1e*jaNk~a_L`W@ z!PgzUAz-)XO)P@{mZW_SzRe*&b1IlOHr|EM#(PL;V}Y~x#TGs`KJc0!s-`vmk%Nx~ zEMT8F_*B3G_8AtJ_qiltY?Sc3cKK!eU+A1gn}c6cm0o;>FgU(OY;b(z_*;a*@g2h8 z_#O#^;|C7H;0SZ@M=0)oQehY!KRf(IFpLgqF#b|V`EbYZES{wHhMejBew9dQS3GQs-`tw z;-FN(0#@drT)+ZWfyL!jN)pFLRSB;|Rq32Xn}gL>^-s9*#wvcam8&8Yz|IzFbT0a*nq(oiPott{M=vE$Wi&P-);Ss)@sDTgYvE$?PiL=~`#t3ls?v)?5yr-0 zh>eYTjt@r|8}kvy#t{f(qoKK>rKqT3TEo7hmMg6&;SFnk?PPfce>Au(I+7~1yx81M z_Wfwc-0%xSj-rHyjz(zc7{nSn*70!&4IPiz3ppoXgptR!`v5u7E1#soF!WA#c#2>c zdi?czj&t9^I*sE@bh@aIkTWp3pfizh|ITuDw%Cab`1qy!bDWge-MYZR zg#vcBF2dq&T`cieur4t(yB%|>D%z;O42$cyT);;C6{8c`6)cu^t9cN-N=U=JS<>0@(~A*3fPT&%)#RVcEg@<@T7p-!mYenlc36M^%xiwG zn%49e4!#tyfPLlQYXJ+`H&|TUw~~akS5{G8URD!*r*o9{Qk}D_GXq(I_uom8zdG|h zHR;3;2&3>v#FEiZj(4oKoDvTZ1cP>w%a{#KHK%Q zK-BHAkRi`!5t0?J+pI_Iccq+p-eU7!@hH_2gQWsds$~Yt1)@|d7*VQ~5_{gN7&bg_ zwF0B(*D&IFtQCl!U#B?k+9S#LJZ@&l2fX!qMBbU+zyujLBAn?>2&LPM;!*BXkaiHDbZ);wA(q722_)4ngjBncDE1!3{@3yL z5|KGxfsK0FC*)G@XGq!uNLI>&W{1R@j(6CM_2wI8JYvu<5M?YGaJjHi#sPytfhgB8 zgX02Gt`i0)k%r@)(p;oF?YT1o(G_P6&Iv@ivcY+QNO!^DA`*^wi2mD$5}6o>#sJXa!32yuquHZ!$X87d4z;fJT`uUU=&Xg z+VB}NmmwAV2leZQ=Y*&cW8JoSmDaLtLrk!r7YO?qMq)oNjcW+|89|~)M;WvXsqE^F zId)ug*_*IehOY%PL((hZ8<9AT6ApPR5U25-!K6U+#e0J(f#{151|JdnqDHz;nu~PF zbJGHm?z6#+K&1O(@KqqveKYuuRC~1O%INs;#MpHD0T@a@`8zXG?QQ-aH%m88rC-2V DjUH$` literal 0 HcmV?d00001 diff --git a/gimp-plugins/DeblurGANv2/predict.py b/gimp-plugins/DeblurGANv2/predict.py new file mode 100644 index 0000000..59b3bfd --- /dev/null +++ b/gimp-plugins/DeblurGANv2/predict.py @@ -0,0 +1,108 @@ +import os +from glob import glob +# from typing import Optional + +import cv2 +import numpy as np +import torch +import yaml +from fire import Fire +from tqdm import tqdm + +from aug import get_normalize +from models.networks import get_generator + + +class Predictor: + def __init__(self, weights_path, model_name=''): + with open('config/config.yaml') as cfg: + config = yaml.load(cfg) + model = get_generator(model_name or config['model']) + model.load_state_dict(torch.load(weights_path, map_location=lambda storage, loc: storage)['model']) + if torch.cuda.is_available(): + self.model = model.cuda() + else: + self.model = model + self.model.train(True) + # GAN inference should be in train mode to use actual stats in norm layers, + # it's not a bug + self.normalize_fn = get_normalize() + + @staticmethod + def _array_to_batch(x): + x = np.transpose(x, (2, 0, 1)) + x = np.expand_dims(x, 0) + return torch.from_numpy(x) + + def _preprocess(self, x, mask): + x, _ = self.normalize_fn(x, x) + if mask is None: + mask = np.ones_like(x, dtype=np.float32) + else: + mask = np.round(mask.astype('float32') / 255) + + h, w, _ = x.shape + block_size = 32 + min_height = (h // block_size + 1) * block_size + min_width = (w // block_size + 1) * block_size + + pad_params = {'mode': 'constant', + 'constant_values': 0, + 'pad_width': ((0, min_height - h), (0, min_width - w), (0, 0)) + } + x = np.pad(x, **pad_params) + mask = np.pad(mask, **pad_params) + + return map(self._array_to_batch, (x, mask)), h, w + + @staticmethod + def _postprocess(x): + x, = x + x = x.detach().cpu().float().numpy() + x = (np.transpose(x, (1, 2, 0)) + 1) / 2.0 * 255.0 + return x.astype('uint8') + + def __call__(self, img, mask, ignore_mask=True): + (img, mask), h, w = self._preprocess(img, mask) + with torch.no_grad(): + if torch.cuda.is_available(): + inputs = [img.cuda()] + else: + inputs = [img] + if not ignore_mask: + inputs += [mask] + pred = self.model(*inputs) + return self._postprocess(pred)[:h, :w, :] + +def sorted_glob(pattern): + return sorted(glob(pattern)) + +def main(img_pattern, + mask_pattern = None, + weights_path='best_fpn.h5', + out_dir='submit/', + side_by_side = False): + + + imgs = sorted_glob(img_pattern) + masks = sorted_glob(mask_pattern) if mask_pattern is not None else [None for _ in imgs] + pairs = zip(imgs, masks) + names = sorted([os.path.basename(x) for x in glob(img_pattern)]) + predictor = Predictor(weights_path=weights_path) + + # os.makedirs(out_dir) + for name, pair in tqdm(zip(names, pairs), total=len(names)): + f_img, f_mask = pair + img, mask = map(cv2.imread, (f_img, f_mask)) + img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) + + pred = predictor(img, mask) + if side_by_side: + pred = np.hstack((img, pred)) + pred = cv2.cvtColor(pred, cv2.COLOR_RGB2BGR) + cv2.imwrite(os.path.join(out_dir, name), + pred) + + +if __name__ == '__main__': + Fire(main) diff --git a/gimp-plugins/DeblurGANv2/predictorClass.py b/gimp-plugins/DeblurGANv2/predictorClass.py new file mode 100644 index 0000000..93b628a --- /dev/null +++ b/gimp-plugins/DeblurGANv2/predictorClass.py @@ -0,0 +1,67 @@ +from models.networks import get_generator_new +from aug import get_normalize +import torch +import numpy as np +config={'project': 'deblur_gan', 'warmup_num': 3, 'optimizer': {'lr': 0.0001, 'name': 'adam'}, 'val': {'preload': False, 'bounds': [0.9, 1], 'crop': 'center', 'files_b': '/datasets/my_dataset/**/*.jpg', 'files_a': '/datasets/my_dataset/**/*.jpg', 'scope': 'geometric', 'corrupt': [{'num_holes': 3, 'max_w_size': 25, 'max_h_size': 25, 'name': 'cutout', 'prob': 0.5}, {'quality_lower': 70, 'name': 'jpeg', 'quality_upper': 90}, {'name': 'motion_blur'}, {'name': 'median_blur'}, {'name': 'gamma'}, {'name': 'rgb_shift'}, {'name': 'hsv_shift'}, {'name': 'sharpen'}], 'preload_size': 0, 'size': 256}, 'val_batches_per_epoch': 100, 'num_epochs': 200, 'batch_size': 1, 'experiment_desc': 'fpn', 'train_batches_per_epoch': 1000, 'train': {'preload': False, 'bounds': [0, 0.9], 'crop': 'random', 'files_b': '/datasets/my_dataset/**/*.jpg', 'files_a': '/datasets/my_dataset/**/*.jpg', 'preload_size': 0, 'corrupt': [{'num_holes': 3, 'max_w_size': 25, 'max_h_size': 25, 'name': 'cutout', 'prob': 0.5}, {'quality_lower': 70, 'name': 'jpeg', 'quality_upper': 90}, {'name': 'motion_blur'}, {'name': 'median_blur'}, {'name': 'gamma'}, {'name': 'rgb_shift'}, {'name': 'hsv_shift'}, {'name': 'sharpen'}], 'scope': 'geometric', 'size': 256}, 'scheduler': {'min_lr': 1e-07, 'name': 'linear', 'start_epoch': 50}, 'image_size': [256, 256], 'phase': 'train', 'model': {'d_name': 'double_gan', 'disc_loss': 'wgan-gp', 'blocks': 9, 'content_loss': 'perceptual', 'adv_lambda': 0.001, 'dropout': True, 'g_name': 'fpn_inception', 'd_layers': 3, 'learn_residual': True, 'norm_layer': 'instance'}} + + +class Predictor: + def __init__(self, weights_path, model_name=''): + # model = get_generator(model_name or config['model']) + model = get_generator_new(weights_path[0:-11]) + model.load_state_dict(torch.load(weights_path, map_location=lambda storage, loc: storage)['model']) + if torch.cuda.is_available(): + self.model = model.cuda() + else: + self.model = model + self.model.train(True) + # GAN inference should be in train mode to use actual stats in norm layers, + # it's not a bug + self.normalize_fn = get_normalize() + + @staticmethod + def _array_to_batch(x): + x = np.transpose(x, (2, 0, 1)) + x = np.expand_dims(x, 0) + return torch.from_numpy(x) + + def _preprocess(self, x, mask): + x, _ = self.normalize_fn(x, x) + if mask is None: + mask = np.ones_like(x, dtype=np.float32) + else: + mask = np.round(mask.astype('float32') / 255) + + h, w, _ = x.shape + block_size = 32 + min_height = (h // block_size + 1) * block_size + min_width = (w // block_size + 1) * block_size + + pad_params = {'mode': 'constant', + 'constant_values': 0, + 'pad_width': ((0, min_height - h), (0, min_width - w), (0, 0)) + } + x = np.pad(x, **pad_params) + mask = np.pad(mask, **pad_params) + + return map(self._array_to_batch, (x, mask)), h, w + + @staticmethod + def _postprocess(x): + x, = x + x = x.detach().cpu().float().numpy() + x = (np.transpose(x, (1, 2, 0)) + 1) / 2.0 * 255.0 + return x.astype('uint8') + + def __call__(self, img, mask, ignore_mask=True): + (img, mask), h, w = self._preprocess(img, mask) + with torch.no_grad(): + if torch.cuda.is_available(): + inputs = [img.cuda()] + else: + inputs = [img] + if not ignore_mask: + inputs += [mask] + pred = self.model(*inputs) + return self._postprocess(pred)[:h, :w, :] + diff --git a/gimp-plugins/DeblurGANv2/predictorClass.pyc b/gimp-plugins/DeblurGANv2/predictorClass.pyc new file mode 100644 index 0000000000000000000000000000000000000000..96bdb7278fcdc696fe38b0dd3849f47062aa90a1 GIT binary patch literal 4832 zcmc&%OLJUD6+U-HGkW+TN3r5ZoDdU|3?}vv2NV;GWyitJ%Zhwu$3f+yY2Char=|PK z?H_V|;4GUKJ1N;CMuwlbC#fmD}0L6|f^PSTp*%+3n82R?&bl-E&<2zrs z+kZbabMe(*{xG5O?;!fic-${qLEf>tMFFUB2O z?UFrA29gdfm@b(~TCgoLQ)H$gm?3ij-9ZSaDV-#92s7-Tr3{;nVDc21Ido5xIU2fo zbkC4EhF3bo8jnM8g3QU#J&W#C(E1#i=dr6z<||}gfc$ijf0fLOkVj;`CITM4v^RQr zZ}iIEsJA!zx{NZIe+CM_LFQF-XUUvH*T?fsGOwXKPv&)Wvt+)7?%QM*(7i$C!oN9+ z*AMSG`knvgX!yYY-_a28T}R&xQT;BNi*W0E5)yNX%pzu&@gN>&#jh)vyh-LQ>_?`r zl6f0~?~}QP?mC%w(7j9MJ#_Dr`G6Ku#PtSdOJqJohvY5?;Z4k9GArnA(ZcF9ncFk~ z{yw75h#nCAU={k?w9>l+wAzRG^&*V?Q)sf(t1KLT^ikW{s|1=KfVeQ924ll|_u{QJuyum{CmG;7tUG){&|T6*kfeMwW*?l}zlMtkj2 z@A}1-8jt&D&h*cmTdy-wJ+OJ|)Y@ZXEe2$11ydTE_zv{Aq^g5qa6t_#ykk#5F3uJD zzS>j{*6vM*VagUJBVUbth7o>WE{c7K&zil4x#xI(GD0N%mddNmvHk0Hn%UJS*GF}Y z^#c$WmA6%?IObuV8mq^1HaF8n5!`69HRXnO;Kiw-yEmS3mfTP`HG;74Q`1Q~K_J>a|$!dZ3 zfH2NNZp&0R_B`w32s*DDAFPtdI2dn6IpWI}+d^k)FptfA-H`{q^@Z)|MCL zW7^zPxh~d#TmFIpsDUJb41%U?$!dTD`9LP7_$l6qx3<#8`NNpxaJy6u@M`#rylzW) zkC){mT*zU+M6sw$nu`<54M3Hk_!GP$KQ7p}@VM(}QsUzb5Wwm78IHCVr8qKL!Yr8L z=xXzLwIlM0U3pi|tB$-fMe$0HgX8f~!eHRy<%nO!<9>_=OWXJi{tjXc)U%dx>QcHB#hCncWHf+5E*%MdUqeF zkwA@X-;$Jt{RaqAQda01=}I&QKjEnG!<4jw$FUDkyCQ_S)WeK=yg`!a5Kv&tv$IeU zR(XrNfu?`kA#eSS#(KNqszR6j<*fu|S}X@fb^Xj1_1QWfWyoUxZK2)kSC;Ob@1tNt zzOLrlxpVcFiAV{TQzgijT#2<3#t52faLHL&UkNMuFU zDSc1dT%#65UwnpDvE#RzQ5p+7^TxeXLJi6uF9#Ay^MUBuOl>yw;0*1D0!Bff06Cb6 zH%LOXPtj2*BI}jnGId-n)OtF4F`A9$TGP>ycEXPEvj@r7FaaciKV%rNd(fhtAZdxD zwJQNeCs+bZt@QZyCEVW-IXon%SC0|!q>MMpxmX4w0jHI&u0TGVFfb(O4k;>J%q1ux z8#GnH=W4wb4rn&_{=UMB$y({A(Ob$_ftR0w$Z@4mYc86Lq?DO1_u@7Ov$)ixno+cf z+g*e%)HCj^L>p7oEIwA%p~4a2DB5XJc}kW#w2jKf1qX4K%AZGvWoSeR-)+;*1nqQ~ zB#@h(E^Q+RT{;O`IEMb{&LnL^#p9AG;+rls;)cC@y`G|NY@DFu1G#cV&~-RQ0Acekk-4pa)gs>i>YX za!oE(Wg6e)8J7VjPUUtZjX!1)XRzTrmK-wLiJ2_Ko_d)cE}H@a~9if}GA1wO?!ElHg_ zg{d?Y9ghx1&$dnht~C=)Mz1jDTmQ)w%&*A)U0`Du{5XO@9ozy5XxrQ}fyiNn z&43{gg5&NO06Y)e9v#06gmwd&OhN6EVB!&bWRUUiau|GBU6#a+Z0RoqqT$#-+TUZK zNk&cTHF&;_c~Xz$Edij6gjNI^F*_6g2#v6eBz9j@z?GrEe14n*CSJ#`6PWpV{3Q%s z7BKBx#pAv}!_^izMoJBgFs2!hjZg!C7h?ajTev1*B1DZWG#?6!NJcWGEL;;J3S-W1VEUJhRqKKUG3oTNLTv6~;W3D}23f}yZyf_@?dhnf!q2r)qYpV+-3a5mzc zo9k$V1~n`jWKSB8#CiTx)HHBR5qA$}Jx7HU@+w+LlPK zlVaqX!oDVt4OA-8d9Kt2jNKd>icUv|#z$*6IugxCJ?0xBHYI1iQc~nff!c5tS1MlS z4GaaooY(~aAsbdkNG61f!x6vF&3!bFe3H@s{>~6z##h)m$4u3Y4j!8APS4Ly%5_R; zSS~Q`T$%dKs@ZT{H6&TmqfD+vlHvFhhy&#^m0r`iOT`Vz6sUY)Gx^rOdlc@7A>Nmm TBjgrz1nD`5zlZdY?zH~}yevG( literal 0 HcmV?d00001 diff --git a/gimp-plugins/DeblurGANv2/requirements.txt b/gimp-plugins/DeblurGANv2/requirements.txt new file mode 100644 index 0000000..615b808 --- /dev/null +++ b/gimp-plugins/DeblurGANv2/requirements.txt @@ -0,0 +1,13 @@ +torch==1.0.1 +torchvision +pretrainedmodels +numpy +opencv-python-headless +joblib +albumentations +scikit-image +tqdm +glog +tensorboardx +fire +# this file is not ready yet diff --git a/gimp-plugins/DeblurGANv2/schedulers.py b/gimp-plugins/DeblurGANv2/schedulers.py new file mode 100644 index 0000000..ca1841f --- /dev/null +++ b/gimp-plugins/DeblurGANv2/schedulers.py @@ -0,0 +1,59 @@ +import math + +from torch.optim import lr_scheduler + + +class WarmRestart(lr_scheduler.CosineAnnealingLR): + """This class implements Stochastic Gradient Descent with Warm Restarts(SGDR): https://arxiv.org/abs/1608.03983. + + Set the learning rate of each parameter group using a cosine annealing schedule, When last_epoch=-1, sets initial lr as lr. + This can't support scheduler.step(epoch). please keep epoch=None. + """ + + def __init__(self, optimizer, T_max=30, T_mult=1, eta_min=0, last_epoch=-1): + """implements SGDR + + Parameters: + ---------- + T_max : int + Maximum number of epochs. + T_mult : int + Multiplicative factor of T_max. + eta_min : int + Minimum learning rate. Default: 0. + last_epoch : int + The index of last epoch. Default: -1. + """ + self.T_mult = T_mult + super().__init__(optimizer, T_max, eta_min, last_epoch) + + def get_lr(self): + if self.last_epoch == self.T_max: + self.last_epoch = 0 + self.T_max *= self.T_mult + return [self.eta_min + (base_lr - self.eta_min) * (1 + math.cos(math.pi * self.last_epoch / self.T_max)) / 2 for + base_lr in self.base_lrs] + + +class LinearDecay(lr_scheduler._LRScheduler): + """This class implements LinearDecay + + """ + + def __init__(self, optimizer, num_epochs, start_epoch=0, min_lr=0, last_epoch=-1): + """implements LinearDecay + + Parameters: + ---------- + + """ + self.num_epochs = num_epochs + self.start_epoch = start_epoch + self.min_lr = min_lr + super().__init__(optimizer, last_epoch) + + def get_lr(self): + if self.last_epoch < self.start_epoch: + return self.base_lrs + return [base_lr - ((base_lr - self.min_lr) / self.num_epochs) * (self.last_epoch - self.start_epoch) for + base_lr in self.base_lrs] diff --git a/gimp-plugins/DeblurGANv2/test.sh b/gimp-plugins/DeblurGANv2/test.sh new file mode 100755 index 0000000..c96ece7 --- /dev/null +++ b/gimp-plugins/DeblurGANv2/test.sh @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +python3 -m unittest discover $(pwd) diff --git a/gimp-plugins/DeblurGANv2/test_aug.py b/gimp-plugins/DeblurGANv2/test_aug.py new file mode 100644 index 0000000..3564f28 --- /dev/null +++ b/gimp-plugins/DeblurGANv2/test_aug.py @@ -0,0 +1,20 @@ +import unittest + +import numpy as np + +from aug import get_transforms + + +class AugTest(unittest.TestCase): + @staticmethod + def make_images(): + img = (np.random.rand(100, 100, 3) * 255).astype('uint8') + return img.copy(), img.copy() + + def test_aug(self): + for scope in ('strong', 'weak'): + for crop in ('random', 'center'): + aug_pipeline = get_transforms(80, scope=scope, crop=crop) + a, b = self.make_images() + a, b = aug_pipeline(a, b) + np.testing.assert_allclose(a, b) diff --git a/gimp-plugins/DeblurGANv2/test_dataset.py b/gimp-plugins/DeblurGANv2/test_dataset.py new file mode 100644 index 0000000..784b7f6 --- /dev/null +++ b/gimp-plugins/DeblurGANv2/test_dataset.py @@ -0,0 +1,76 @@ +import os +import unittest +from shutil import rmtree +from tempfile import mkdtemp + +import cv2 +import numpy as np +from torch.utils.data import DataLoader + +from dataset import PairedDataset + + +def make_img(): + return (np.random.rand(100, 100, 3) * 255).astype('uint8') + + +class AugTest(unittest.TestCase): + tmp_dir = mkdtemp() + raw = os.path.join(tmp_dir, 'raw') + gt = os.path.join(tmp_dir, 'gt') + + def setUp(self): + for d in (self.raw, self.gt): + os.makedirs(d) + + for i in range(5): + for d in (self.raw, self.gt): + img = make_img() + cv2.imwrite(os.path.join(d, f'{i}.png'), img) + + def tearDown(self): + rmtree(self.tmp_dir) + + def dataset_gen(self, equal=True): + base_config = {'files_a': os.path.join(self.raw, '*.png'), + 'files_b': os.path.join(self.raw if equal else self.gt, '*.png'), + 'size': 32, + } + for b in ([0, 1], [0, 0.9]): + for scope in ('strong', 'weak'): + for crop in ('random', 'center'): + for preload in (0, 1): + for preload_size in (0, 64): + config = base_config.copy() + config['bounds'] = b + config['scope'] = scope + config['crop'] = crop + config['preload'] = preload + config['preload_size'] = preload_size + config['verbose'] = False + dataset = PairedDataset.from_config(config) + yield dataset + + def test_equal_datasets(self): + for dataset in self.dataset_gen(equal=True): + dataloader = DataLoader(dataset=dataset, + batch_size=2, + shuffle=True, + drop_last=True) + dataloader = iter(dataloader) + batch = next(dataloader) + a, b = map(lambda x: x.numpy(), map(batch.get, ('a', 'b'))) + + np.testing.assert_allclose(a, b) + + def test_datasets(self): + for dataset in self.dataset_gen(equal=False): + dataloader = DataLoader(dataset=dataset, + batch_size=2, + shuffle=True, + drop_last=True) + dataloader = iter(dataloader) + batch = next(dataloader) + a, b = map(lambda x: x.numpy(), map(batch.get, ('a', 'b'))) + + assert not np.all(a == b), 'images should not be the same' diff --git a/gimp-plugins/DeblurGANv2/test_metrics.py b/gimp-plugins/DeblurGANv2/test_metrics.py new file mode 100644 index 0000000..d6dc7f7 --- /dev/null +++ b/gimp-plugins/DeblurGANv2/test_metrics.py @@ -0,0 +1,90 @@ +from __future__ import print_function +import argparse +import numpy as np +import torch +import cv2 +import yaml +import os +from torchvision import models, transforms +from torch.autograd import Variable +import shutil +import glob +import tqdm +from util.metrics import PSNR +from albumentations import Compose, CenterCrop, PadIfNeeded +from PIL import Image +from ssim.ssimlib import SSIM +from models.networks import get_generator + + +def get_args(): + parser = argparse.ArgumentParser('Test an image') + parser.add_argument('--img_folder', required=True, help='GoPRO Folder') + parser.add_argument('--weights_path', required=True, help='Weights path') + + return parser.parse_args() + + +def prepare_dirs(path): + if os.path.exists(path): + shutil.rmtree(path) + os.makedirs(path) + + +def get_gt_image(path): + dir, filename = os.path.split(path) + base, seq = os.path.split(dir) + base, _ = os.path.split(base) + img = cv2.cvtColor(cv2.imread(os.path.join(base, 'sharp', seq, filename)), cv2.COLOR_BGR2RGB) + return img + + +def test_image(model, image_path): + img_transforms = transforms.Compose([ + transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) + ]) + size_transform = Compose([ + PadIfNeeded(736, 1280) + ]) + crop = CenterCrop(720, 1280) + img = cv2.imread(image_path) + img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) + img_s = size_transform(image=img)['image'] + img_tensor = torch.from_numpy(np.transpose(img_s / 255, (2, 0, 1)).astype('float32')) + img_tensor = img_transforms(img_tensor) + with torch.no_grad(): + img_tensor = Variable(img_tensor.unsqueeze(0).cuda()) + result_image = model(img_tensor) + result_image = result_image[0].cpu().float().numpy() + result_image = (np.transpose(result_image, (1, 2, 0)) + 1) / 2.0 * 255.0 + result_image = crop(image=result_image)['image'] + result_image = result_image.astype('uint8') + gt_image = get_gt_image(image_path) + _, filename = os.path.split(image_path) + psnr = PSNR(result_image, gt_image) + pilFake = Image.fromarray(result_image) + pilReal = Image.fromarray(gt_image) + ssim = SSIM(pilFake).cw_ssim_value(pilReal) + return psnr, ssim + + +def test(model, files): + psnr = 0 + ssim = 0 + for file in tqdm.tqdm(files): + cur_psnr, cur_ssim = test_image(model, file) + psnr += cur_psnr + ssim += cur_ssim + print("PSNR = {}".format(psnr / len(files))) + print("SSIM = {}".format(ssim / len(files))) + + +if __name__ == '__main__': + args = get_args() + with open('config/config.yaml') as cfg: + config = yaml.load(cfg) + model = get_generator(config['model']) + model.load_state_dict(torch.load(args.weights_path)['model']) + model = model.cuda() + filenames = sorted(glob.glob(args.img_folder + '/test' + '/blur/**/*.png', recursive=True)) + test(model, filenames) diff --git a/gimp-plugins/DeblurGANv2/testing.py b/gimp-plugins/DeblurGANv2/testing.py new file mode 100644 index 0000000..d9606a2 --- /dev/null +++ b/gimp-plugins/DeblurGANv2/testing.py @@ -0,0 +1,9 @@ +import cv2 +from predictorClass import Predictor + +predictor = Predictor(weights_path='best_fpn.h5') +img = cv2.imread('img/img.jpg') +img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) +pred = predictor(img, None) +pred = cv2.cvtColor(pred, cv2.COLOR_RGB2BGR) +cv2.imwrite('submit/img.jpg',pred) diff --git a/gimp-plugins/DeblurGANv2/train.py b/gimp-plugins/DeblurGANv2/train.py new file mode 100644 index 0000000..323dff7 --- /dev/null +++ b/gimp-plugins/DeblurGANv2/train.py @@ -0,0 +1,181 @@ +import logging +from functools import partial + +import cv2 +import torch +import torch.optim as optim +import tqdm +import yaml +from joblib import cpu_count +from torch.utils.data import DataLoader + +from adversarial_trainer import GANFactory +from dataset import PairedDataset +from metric_counter import MetricCounter +from models.losses import get_loss +from models.models import get_model +from models.networks import get_nets +from schedulers import LinearDecay, WarmRestart + +cv2.setNumThreads(0) + + +class Trainer: + def __init__(self, config, train: DataLoader, val: DataLoader): + self.config = config + self.train_dataset = train + self.val_dataset = val + self.adv_lambda = config['model']['adv_lambda'] + self.metric_counter = MetricCounter(config['experiment_desc']) + self.warmup_epochs = config['warmup_num'] + + def train(self): + self._init_params() + for epoch in range(0, config['num_epochs']): + if (epoch == self.warmup_epochs) and not (self.warmup_epochs == 0): + self.netG.module.unfreeze() + self.optimizer_G = self._get_optim(self.netG.parameters()) + self.scheduler_G = self._get_scheduler(self.optimizer_G) + self._run_epoch(epoch) + self._validate(epoch) + self.scheduler_G.step() + self.scheduler_D.step() + + if self.metric_counter.update_best_model(): + torch.save({ + 'model': self.netG.state_dict() + }, 'best_{}.h5'.format(self.config['experiment_desc'])) + torch.save({ + 'model': self.netG.state_dict() + }, 'last_{}.h5'.format(self.config['experiment_desc'])) + print(self.metric_counter.loss_message()) + logging.debug("Experiment Name: %s, Epoch: %d, Loss: %s" % ( + self.config['experiment_desc'], epoch, self.metric_counter.loss_message())) + + def _run_epoch(self, epoch): + self.metric_counter.clear() + for param_group in self.optimizer_G.param_groups: + lr = param_group['lr'] + + epoch_size = config.get('train_batches_per_epoch') or len(self.train_dataset) + tq = tqdm.tqdm(self.train_dataset, total=epoch_size) + tq.set_description('Epoch {}, lr {}'.format(epoch, lr)) + i = 0 + for data in tq: + inputs, targets = self.model.get_input(data) + outputs = self.netG(inputs) + loss_D = self._update_d(outputs, targets) + self.optimizer_G.zero_grad() + loss_content = self.criterionG(outputs, targets) + loss_adv = self.adv_trainer.loss_g(outputs, targets) + loss_G = loss_content + self.adv_lambda * loss_adv + loss_G.backward() + self.optimizer_G.step() + self.metric_counter.add_losses(loss_G.item(), loss_content.item(), loss_D) + curr_psnr, curr_ssim, img_for_vis = self.model.get_images_and_metrics(inputs, outputs, targets) + self.metric_counter.add_metrics(curr_psnr, curr_ssim) + tq.set_postfix(loss=self.metric_counter.loss_message()) + if not i: + self.metric_counter.add_image(img_for_vis, tag='train') + i += 1 + if i > epoch_size: + break + tq.close() + self.metric_counter.write_to_tensorboard(epoch) + + def _validate(self, epoch): + self.metric_counter.clear() + epoch_size = config.get('val_batches_per_epoch') or len(self.val_dataset) + tq = tqdm.tqdm(self.val_dataset, total=epoch_size) + tq.set_description('Validation') + i = 0 + for data in tq: + inputs, targets = self.model.get_input(data) + outputs = self.netG(inputs) + loss_content = self.criterionG(outputs, targets) + loss_adv = self.adv_trainer.loss_g(outputs, targets) + loss_G = loss_content + self.adv_lambda * loss_adv + self.metric_counter.add_losses(loss_G.item(), loss_content.item()) + curr_psnr, curr_ssim, img_for_vis = self.model.get_images_and_metrics(inputs, outputs, targets) + self.metric_counter.add_metrics(curr_psnr, curr_ssim) + if not i: + self.metric_counter.add_image(img_for_vis, tag='val') + i += 1 + if i > epoch_size: + break + tq.close() + self.metric_counter.write_to_tensorboard(epoch, validation=True) + + def _update_d(self, outputs, targets): + if self.config['model']['d_name'] == 'no_gan': + return 0 + self.optimizer_D.zero_grad() + loss_D = self.adv_lambda * self.adv_trainer.loss_d(outputs, targets) + loss_D.backward(retain_graph=True) + self.optimizer_D.step() + return loss_D.item() + + def _get_optim(self, params): + if self.config['optimizer']['name'] == 'adam': + optimizer = optim.Adam(params, lr=self.config['optimizer']['lr']) + elif self.config['optimizer']['name'] == 'sgd': + optimizer = optim.SGD(params, lr=self.config['optimizer']['lr']) + elif self.config['optimizer']['name'] == 'adadelta': + optimizer = optim.Adadelta(params, lr=self.config['optimizer']['lr']) + else: + raise ValueError("Optimizer [%s] not recognized." % self.config['optimizer']['name']) + return optimizer + + def _get_scheduler(self, optimizer): + if self.config['scheduler']['name'] == 'plateau': + scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, + mode='min', + patience=self.config['scheduler']['patience'], + factor=self.config['scheduler']['factor'], + min_lr=self.config['scheduler']['min_lr']) + elif self.config['optimizer']['name'] == 'sgdr': + scheduler = WarmRestart(optimizer) + elif self.config['scheduler']['name'] == 'linear': + scheduler = LinearDecay(optimizer, + min_lr=self.config['scheduler']['min_lr'], + num_epochs=self.config['num_epochs'], + start_epoch=self.config['scheduler']['start_epoch']) + else: + raise ValueError("Scheduler [%s] not recognized." % self.config['scheduler']['name']) + return scheduler + + @staticmethod + def _get_adversarial_trainer(d_name, net_d, criterion_d): + if d_name == 'no_gan': + return GANFactory.create_model('NoGAN') + elif d_name == 'patch_gan' or d_name == 'multi_scale': + return GANFactory.create_model('SingleGAN', net_d, criterion_d) + elif d_name == 'double_gan': + return GANFactory.create_model('DoubleGAN', net_d, criterion_d) + else: + raise ValueError("Discriminator Network [%s] not recognized." % d_name) + + def _init_params(self): + self.criterionG, criterionD = get_loss(self.config['model']) + self.netG, netD = get_nets(self.config['model']) + self.netG.cuda() + self.adv_trainer = self._get_adversarial_trainer(self.config['model']['d_name'], netD, criterionD) + self.model = get_model(self.config['model']) + self.optimizer_G = self._get_optim(filter(lambda p: p.requires_grad, self.netG.parameters())) + self.optimizer_D = self._get_optim(self.adv_trainer.get_params()) + self.scheduler_G = self._get_scheduler(self.optimizer_G) + self.scheduler_D = self._get_scheduler(self.optimizer_D) + + +if __name__ == '__main__': + with open('config/config.yaml', 'r') as f: + config = yaml.load(f) + + batch_size = config.pop('batch_size') + get_dataloader = partial(DataLoader, batch_size=batch_size, num_workers=cpu_count(), shuffle=True, drop_last=True) + + datasets = map(config.pop, ('train', 'val')) + datasets = map(PairedDataset.from_config, datasets) + train, val = map(get_dataloader, datasets) + trainer = Trainer(config, train=train, val=val) + trainer.train() diff --git a/gimp-plugins/DeblurGANv2/util/__init__.py b/gimp-plugins/DeblurGANv2/util/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/gimp-plugins/DeblurGANv2/util/__init__.pyc b/gimp-plugins/DeblurGANv2/util/__init__.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a0ca0fa6cd2196cc5da58bcea12be88ed1a61cf1 GIT binary patch literal 163 zcmZSn%**w>VP0G^0~9aBN{ieb{mP8=OG`3y i^yA|*^D;}~*O!6)nh literal 0 HcmV?d00001 diff --git a/gimp-plugins/DeblurGANv2/util/image_pool.py b/gimp-plugins/DeblurGANv2/util/image_pool.py new file mode 100644 index 0000000..590bba8 --- /dev/null +++ b/gimp-plugins/DeblurGANv2/util/image_pool.py @@ -0,0 +1,33 @@ +import random +import numpy as np +import torch +from torch.autograd import Variable +from collections import deque + + +class ImagePool(): + def __init__(self, pool_size): + self.pool_size = pool_size + self.sample_size = pool_size + if self.pool_size > 0: + self.num_imgs = 0 + self.images = deque() + + def add(self, images): + if self.pool_size == 0: + return images + for image in images.data: + image = torch.unsqueeze(image, 0) + if self.num_imgs < self.pool_size: + self.num_imgs = self.num_imgs + 1 + self.images.append(image) + else: + self.images.popleft() + self.images.append(image) + + def query(self): + if len(self.images) > self.sample_size: + return_images = list(random.sample(self.images, self.sample_size)) + else: + return_images = list(self.images) + return torch.cat(return_images, 0) diff --git a/gimp-plugins/DeblurGANv2/util/metrics.py b/gimp-plugins/DeblurGANv2/util/metrics.py new file mode 100644 index 0000000..13e4671 --- /dev/null +++ b/gimp-plugins/DeblurGANv2/util/metrics.py @@ -0,0 +1,54 @@ +import math +from math import exp + +import numpy as np +import torch +import torch.nn.functional as F +from torch.autograd import Variable + + +def gaussian(window_size, sigma): + gauss = torch.Tensor([exp(-(x - window_size // 2) ** 2 / float(2 * sigma ** 2)) for x in range(window_size)]) + return gauss / gauss.sum() + + +def create_window(window_size, channel): + _1D_window = gaussian(window_size, 1.5).unsqueeze(1) + _2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0) + window = Variable(_2D_window.expand(channel, 1, window_size, window_size).contiguous()) + return window + + +def SSIM(img1, img2): + (_, channel, _, _) = img1.size() + window_size = 11 + window = create_window(window_size, channel) + + if img1.is_cuda: + window = window.cuda(img1.get_device()) + window = window.type_as(img1) + + mu1 = F.conv2d(img1, window, padding=window_size // 2, groups=channel) + mu2 = F.conv2d(img2, window, padding=window_size // 2, groups=channel) + + mu1_sq = mu1.pow(2) + mu2_sq = mu2.pow(2) + mu1_mu2 = mu1 * mu2 + + sigma1_sq = F.conv2d(img1 * img1, window, padding=window_size // 2, groups=channel) - mu1_sq + sigma2_sq = F.conv2d(img2 * img2, window, padding=window_size // 2, groups=channel) - mu2_sq + sigma12 = F.conv2d(img1 * img2, window, padding=window_size // 2, groups=channel) - mu1_mu2 + + C1 = 0.01 ** 2 + C2 = 0.03 ** 2 + + ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2)) + return ssim_map.mean() + + +def PSNR(img1, img2): + mse = np.mean((img1 / 255. - img2 / 255.) ** 2) + if mse == 0: + return 100 + PIXEL_MAX = 1 + return 20 * math.log10(PIXEL_MAX / math.sqrt(mse)) diff --git a/gimp-plugins/DeblurGANv2/util/metrics.pyc b/gimp-plugins/DeblurGANv2/util/metrics.pyc new file mode 100644 index 0000000000000000000000000000000000000000..51d491c20377d71d948ba7e1905dfd4c670eaf06 GIT binary patch literal 2473 zcmcIlOLN;)6h2peBz7JSjZHG)o$12EOyb&$t{Boz0;L1-1i3IB1NEqus>rbhB<2{1pv}x|wdV9xCc8Qqw+*y15xHPu=`XFVLV!gE{KXizP?H5`|^zmN7C< zPch{Jxdl-c$t{YqMDCm@%e-u-K94N~|Ke%a6;zXh@E(t{26ukxB!TmA==FKmdAudO zyLk0i5Qb==$cJLlnCM*5-ZxE}SUk~KOju=O9b$FCVB?C$isfn*&>G$c79FqhxCsWx ze^8)vqFSfU3$_E6^&&4-U=b(%L&ME`UZmqhjENKZo()@uc{Ge2BL~rGSm!O91&Gri za^usU4xV^2N(X-E7(Vo4Y5Pv9bsbF-p+AMz_(FS$ZX6|n3669eI#J``tbgbv;Xx7) zyuQ&5KL|%#qhaa?k#6jH4~J=T=XU#~)ksY+Y=oXkg1+7!of#%m)(D)a3|;fiNbM_O zmJi3r5@tgvU>1aes&k7eb}bRqNFS%32L{TMu`0x=BK2GEs zK4U+QOyH++s_TN8gUzr2hf(AW1=MTq^)lyV&DKSeXUlr7&@W58oy2pDH{C@A2F>0g zwXU|*^~^!9q0KuN2mOS@g2@5MB>Fnf0S~rWTI@2g3T8ko$cOtT@B+XVC2blbX%XgV ziq|;s3lw2C*aenZ5aRF=V6d~}kUBZqaHlg`lOjzKSmxyjjK&DobzBC1nbufc zf?A=qDpnttXpPO|1sX5n!{*v#g~ls1C=r~{sdId`0|{03)5vk%Ao8VtNWt)vI2~#K z+v+cO{&@0q$AA9zk1sxd{%ptJ+I{oIpEoyl{GWgO{*zxP`#W_GT~sTC0g8oP;}%aD z=w3f{9qAE;alU7Iu6GjjJ;9i>k=JvyB*w?GE^^?c90VS;<^qVuK zB?EglciOzBU1eAfB21gpZ%gJyyQhz*YKv9Ivo%axlC4tV1jO7(>jIbcwIq*so7tx& zHA?Fs?1j$g3R8}$?R4($zs}pb4#AmLR28d)Z^hbFn^r|tRM~o8;b#?9(Q0K`$RoLu zNEUXWWET(;1!-5y?v~m*GG>+YNa`da=?JOUAY@Mm*C7%WcOg#9d597T1!*Pal`o`f zr_MZ1i{bC_Er}p0hJlNYtTwV&q@)RR(a3%mhRnB)Ih6Zk82il|V$#QnkqT?S2b8_Z zQ1Pht#7_rz?|<}JZ~ylFE2jLQ)3%#fTyxx#dPgm)JZP_T!IO^5J2Xne(V2aNJGj!D zRTg~rZb#AfqcrN9AdZ}2cKS%__NCQ+i_yy$L!8vae&V=tMaX3$Ib}D1ux~caoAM!P&uR$!T6>AX`TvIE#QlT_gdgDK@6Zvxh literal 0 HcmV?d00001 diff --git a/gimp-plugins/colorize.py b/gimp-plugins/colorize.py new file mode 100755 index 0000000..e87156c --- /dev/null +++ b/gimp-plugins/colorize.py @@ -0,0 +1,111 @@ + + +from gimpfu import * +import sys + +sys.path.extend([baseLoc+'gimpenv/lib/python2.7',baseLoc+'gimpenv/lib/python2.7/site-packages',baseLoc+'gimpenv/lib/python2.7/site-packages/setuptools',baseLoc+'neural-colorization']) + + +import torch +from model import generator +from torch.autograd import Variable +from scipy.ndimage import zoom +from PIL import Image +from argparse import Namespace +import numpy as np +from skimage.color import rgb2yuv,yuv2rgb + +def getcolor(input_image): + p = np.repeat(input_image, 3, axis=2) + + if torch.cuda.is_available(): + g_available=1 + else: + g_available=-1 + + args=Namespace(model=baseLoc+'neural-colorization/model.pth',gpu=g_available) + + G = generator() + + if torch.cuda.is_available(): + G=G.cuda() + G.load_state_dict(torch.load(args.model)) + else: + G.load_state_dict(torch.load(args.model,map_location=torch.device('cpu'))) + + + img_yuv = rgb2yuv(p) + H,W,_ = img_yuv.shape + infimg = np.expand_dims(np.expand_dims(img_yuv[...,0], axis=0), axis=0) + img_variable = Variable(torch.Tensor(infimg-0.5)) + if args.gpu>=0: + img_variable=img_variable.cuda(args.gpu) + res = G(img_variable) + uv=res.cpu().detach().numpy() + uv[:,0,:,:] *= 0.436 + uv[:,1,:,:] *= 0.615 + (_,_,H1,W1) = uv.shape + uv = zoom(uv,(1,1,float(H)/H1,float(W)/W1)) + yuv = np.concatenate([infimg,uv],axis=1)[0] + rgb=yuv2rgb(yuv.transpose(1,2,0)) + out=(rgb.clip(min=0,max=1)*255)[:,:,[0,1,2]] + + out=out.astype(np.uint8) + + + return out + +def channelData(layer):#convert gimp image to numpy + region=layer.get_pixel_rgn(0, 0, layer.width,layer.height) + pixChars=region[:,:] # Take whole layer + bpp=region.bpp + # return np.frombuffer(pixChars,dtype=np.uint8).reshape(len(pixChars)/bpp,bpp) + return np.frombuffer(pixChars,dtype=np.uint8).reshape(layer.height,layer.width,bpp) + +def createResultLayer(image,name,result): + rlBytes=np.uint8(result).tobytes(); + rl=gimp.Layer(image,name,image.width,image.height,image.active_layer.type,100,NORMAL_MODE) + region=rl.get_pixel_rgn(0, 0, rl.width,rl.height,True) + region[:,:]=rlBytes + image.add_layer(rl,0) + gimp.displays_flush() + +def genNewImg(name,layer_np): + h,w,d=layer_np.shape + img=pdb.gimp_image_new(w, h, RGB) + display=pdb.gimp_display_new(img) + + rlBytes=np.uint8(layer_np).tobytes(); + rl=gimp.Layer(img,name,img.width,img.height,RGB,100,NORMAL_MODE) + region=rl.get_pixel_rgn(0, 0, rl.width,rl.height,True) + region[:,:]=rlBytes + + pdb.gimp_image_insert_layer(img, rl, None, 0) + gimp.displays_flush() + +def colorize(img, layer) : + gimp.progress_init("Coloring " + layer.name + "...") + + imgmat = channelData(layer) + cpy=getcolor(imgmat) + + genNewImg(layer.name+'_colored',cpy) + + + +register( + "colorize", + "colorize", + "Generate monocular disparity map based on deep learning.", + "Kritik Soman", + "Your", + "2020", + "colorize...", + "*", # Alternately use RGB, RGB*, GRAY*, INDEXED etc. + [ (PF_IMAGE, "image", "Input image", None), + (PF_DRAWABLE, "drawable", "Input drawable", None), + ], + [], + colorize, menu="/Layer/GIML-ML") + +main() diff --git a/gimp-plugins/deblur.py b/gimp-plugins/deblur.py new file mode 100755 index 0000000..b91b790 --- /dev/null +++ b/gimp-plugins/deblur.py @@ -0,0 +1,53 @@ + + +from gimpfu import * +import sys +sys.path.extend([baseLoc+'gimpenv/lib/python2.7',baseLoc+'gimpenv/lib/python2.7/site-packages',baseLoc+'gimpenv/lib/python2.7/site-packages/setuptools',baseLoc+'DeblurGANv2']) + +import cv2 +from predictorClass import Predictor +import numpy as np + +def channelData(layer):#convert gimp image to numpy + region=layer.get_pixel_rgn(0, 0, layer.width,layer.height) + pixChars=region[:,:] # Take whole layer + bpp=region.bpp + # return np.frombuffer(pixChars,dtype=np.uint8).reshape(len(pixChars)/bpp,bpp) + return np.frombuffer(pixChars,dtype=np.uint8).reshape(layer.height,layer.width,bpp) + +def createResultLayer(image,name,result): + rlBytes=np.uint8(result).tobytes(); + rl=gimp.Layer(image,name,image.width,image.height,image.active_layer.type,100,NORMAL_MODE) + region=rl.get_pixel_rgn(0, 0, rl.width,rl.height,True) + region[:,:]=rlBytes + image.add_layer(rl,0) + gimp.displays_flush() + +def getdeblur(img): + predictor = Predictor(weights_path=baseLoc+'DeblurGANv2/'+'best_fpn.h5') + pred = predictor(img, None) + return pred + +def deblur(img, layer): + gimp.progress_init("Running for " + layer.name + "...") + imgmat = channelData(layer) + pred = getdeblur(imgmat) + createResultLayer(img,'deblur_'+layer.name,pred) + + +register( + "deblur", + "deblur", + "Running deblurring.", + "Kritik Soman", + "Your", + "2020", + "deblur...", + "*", # Alternately use RGB, RGB*, GRAY*, INDEXED etc. + [ (PF_IMAGE, "image", "Input image", None), + (PF_DRAWABLE, "drawable", "Input drawable", None), + ], + [], + deblur, menu="/Layer/GIML-ML") + +main() diff --git a/gimp-plugins/deeplabv3.py b/gimp-plugins/deeplabv3.py new file mode 100755 index 0000000..55a9305 --- /dev/null +++ b/gimp-plugins/deeplabv3.py @@ -0,0 +1,88 @@ + + +from gimpfu import * +import sys +sys.path.extend([baseLoc+'gimpenv/lib/python2.7',baseLoc+'gimpenv/lib/python2.7/site-packages',baseLoc+'gimpenv/lib/python2.7/site-packages/setuptools']) + + +from PIL import Image +import torch +from torchvision import transforms, datasets +import numpy as np + +def getSeg(input_image): + model = torch.load(baseLoc+'deeplabv3/deeplabv3+model.pt') + model.eval() + preprocess = transforms.Compose([ + transforms.ToTensor(), + transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), + ]) + + input_image = Image.fromarray(input_image) + input_tensor = preprocess(input_image) + input_batch = input_tensor.unsqueeze(0) # create a mini-batch as expected by the model + if torch.cuda.is_available(): + input_batch = input_batch.to('cuda') + model.to('cuda') + + with torch.no_grad(): + output = model(input_batch)['out'][0] + output_predictions = output.argmax(0) + + + # create a color pallette, selecting a color for each class + palette = torch.tensor([2 ** 25 - 1, 2 ** 15 - 1, 2 ** 21 - 1]) + colors = torch.as_tensor([i for i in range(21)])[:, None] * palette + colors = (colors % 255).numpy().astype("uint8") + + # plot the semantic segmentation predictions of 21 classes in each color + r = Image.fromarray(output_predictions.byte().cpu().numpy()).resize(input_image.size) + + tmp = np.array(r) + tmp2 = 10*np.repeat(tmp[:, :, np.newaxis], 3, axis=2) + + + return tmp2 + + +def channelData(layer):#convert gimp image to numpy + region=layer.get_pixel_rgn(0, 0, layer.width,layer.height) + pixChars=region[:,:] # Take whole layer + bpp=region.bpp + return np.frombuffer(pixChars,dtype=np.uint8).reshape(layer.height,layer.width,bpp) + +def createResultLayer(image,name,result): + rlBytes=np.uint8(result).tobytes(); + rl=gimp.Layer(image,name,image.width,image.height,image.active_layer.type,100,NORMAL_MODE) + region=rl.get_pixel_rgn(0, 0, rl.width,rl.height,True) + region[:,:]=rlBytes + image.add_layer(rl,0) + gimp.displays_flush() + +def deeplabv3(img, layer) : + if torch.cuda.is_available(): + gimp.progress_init("(Using GPU) Generating semantic segmentation map for " + layer.name + "...") + else: + gimp.progress_init("(Using CPU) Generating semantic segmentation map for " + layer.name + "...") + + imgmat = channelData(layer) + cpy=getSeg(imgmat) + createResultLayer(img,'new_output',cpy) + + +register( + "deeplabv3", + "deeplabv3", + "Generate semantic segmentation map based on deep learning.", + "Kritik Soman", + "GIMP-ML", + "2020", + "deeplabv3...", + "*", # Alternately use RGB, RGB*, GRAY*, INDEXED etc. + [ (PF_IMAGE, "image", "Input image", None), + (PF_DRAWABLE, "drawable", "Input drawable", None) + ], + [], + deeplabv3, menu="/Layer/GIML-ML") + +main() diff --git a/gimp-plugins/face-parsing.PyTorch/__pycache__/model.cpython-38.pyc b/gimp-plugins/face-parsing.PyTorch/__pycache__/model.cpython-38.pyc new file mode 100755 index 0000000000000000000000000000000000000000..c7216a39472602cda52c07f5fb0f6251868e502e GIT binary patch literal 9174 zcmds7TaO$^74G}=%m6+IHIZauoE%V$pWnNYu>RacKFt@Wy& zx$i`AJ4()7tdTSNv+_!O21KNJdZ>9uSo}h(>ryI3&s4JTEF}TY{z9`f&q4pCtaQ24 zzI$o?o#@T?j!blLgo)5jommx172D z$-5m<+dSLqgi-TM_Y*OLS4m+mgV6Lj-O-Ky@o(3e(RY@HlX9m2)ir~y^B-v&=VhGn z<0v3uE!7Y7T|LqF4b)85SWlrxhx&Te5`1T-U(GBLHG5(awL?QJN#@7mQCwAVnw&zt zBOrzF5maKS4SGpm(03{$Bg>oAs4@ND*udTZmjgS{u4o^;#G$2nVr-dvhH!DUw(L~j za-_<@)Ik?QZM&%M?YpvW9^lR$H-zFGTI+(}3}es(!IvHuJ-4>mXoezcXQc+D9VbD% z7KvHh6muv%i+bSAZwHMQ=E847LxQHCIZ87G(a9@9h4^6N=koB3<|h*Z5f!w20%t4% zO`pM0(M#IS%7{QlbLv6@h%=BF! z#xJE?b)@zSnxW^m8li%OB0z#hAH`yoeVk+=^Jx~} zo-$ynHNR%kj!umadc>5*@Pz=tQ;IJIv4O+TcJ*C?okF4|X8BPpiVe(6Bl(_hebjXM=;a^1M0$tgN4 zd$Tm&FgNK~P%)&^dCr@Sb`%H+MVS+HyHPt7-{Nheh*&_8S{7}M0JR`h00qJP;32e**g_w07*YZ<$)oIAT@xb#KvNzJeeYBC<1`040W67 z`!wP#Is0n839tkeXC zk+KJrqo|}nNrE3?m4dq)ez((U(z~O3DCl&GAF%i#8`uf` zsmyHknpI^h;z{zYdK4s?t%PY3<%cRNQJd@Pn>b^xm$ZtLIC;S3dh&SLJbVE99zSBj z5qbLf4;TScfhcUc=~8XwM03Vrx~^LkN^Li!2*E%Of(LZr{X_fqnCHu{2@{?b|b! zQDxE6E4hUwb=Tf?uoRBBDeStbJ;5vPmQrVeSKggTfETGVeg{}ED^X(>s4#mx1QAx;K*3NG z%XYWZ3~(Vzil4A}o5edQswdPm5~Doh+Vh@q^LfuER$cTQUd~-m*Yj(*eG#`Wdge7S z&6_N)foPfit8#Ch~8UV9{?ODchae5!fO2_6MAIRh@8ro*N>3WFrV>GnhY+U#;7Gw$ z%PBLb`tb`ALmnO$KV!k^m!qxbYY#z7Tw^hzeT0bL;>-<9`BCsOmqv&D@L|I5heIBu za8^f${6LtHqL(R{@bw0s_eX|5@I#M@{#Hgm_(70?I{s^Rm?MMh7Jv-o)^LvfsEqu5 z6=Ybtip8xbqN}|aA>J{89f!i8A^ru5NrcFAP0Tip=b8}KJwgYEcjzH9Rb5bdDBh77 zs?mEc?iHB`yT?R!3E^K^hJQ1#so8$`R|RTFa3C&7tT4Fr>BT~h&cU8Wq*|Wz-dfSNwh6Eh^et5vwQcH%g7*4M-IkBIEZ84 zhFfV4!bD{M;L_1WBUKns%55XBsCbFCU*fg`oJ6>QsRd`QkTMlH$OR*9*=R|pty&gW zF&xjV1&Q!MjW}wru_|Fs3Q}U-hvdDEqUuTq%`@w97e%e>S#|btjSYT;LU~wulU&7w_-;}l zg66g2Sgiyr1r}nXlAC(4M`l)(d&D6P2T3q&h|B4X(Xftdnh;95?ctpdVTprQ&uL+hTkTa?_=1h*u)5p<+!-pFwPi!&uRI?9BJ4?cu{nW=ke9a;ZPu<8YCC%rwW#IAboI^dWiF zty>niFt}|2rBT}$Zy>^CzQx(s4>Zuovm{1}5N6MauSiO5XP9XL+Y;P;cX74uoW^2I z4whueF(e17ms)k?T-Cmk?air8i~jzpO6)&_K}q|RrilKWBUPR$7G=-0}t3TL{ z@G3cHWQRh0pG|Gqvo|j_Oh)Fb=diH<6kl+GasD4s=;nfTWS z{^igMgUpKV;zhOy+F{#sFmi4pyn#lZt?Z2#isTDGxNNlEIYX<*o?Qxu6{*l`a;+QoWjC9Skv zQnO3TCMckQ45Wvu=N`$U|D7Ism{WnC^e+@B(%+k1QnHEyU1HzNy!qIf-+S-(hR<3p zpW(UJ9{=I~5@Y|Q#^uLF<38T}?+}tnp0Zw9^1kQ^Ue>L?5R9|F-E%69+pigo*Q-M# zdcItdwsc&2K;y?0`?$giAgVQb*S4b}GZB+R=)i!9UbLoB!@ zyOypeSujk)EY7t*7!_4rkzL;~)k5*F-)ua~W0h|ls-#E`^Fcq%Ha@E$$T#+q{_yTF z9qlDqzOfrd@!esla_nvW@vE;0oeC0wrE7E!oClu7aymW}O)i-DR<%)&NOAeGt497V zDlzME$$CP%->_Xy5WTMV027>s_3Uq$&3X<1t7-p1m?zPLbPydHWTN>pv>MgxcnP=s z&kzMGfcvSK3bJ=$O@RNzI<+P&unTAEPQ(oTK@B}(B2ESB-X(p#a1MNl?VQo5hGdXU zTI0~l<;0TKp{4$W@hyy7BE7Vr#iq}k`6@G7vnd%i@9uon<+_$+ut*e_cI{fq!9I;e z6k*nFYbPHKW2KhS(q0fGSyBXn!B9H{@us3P>c)exi1v2|s=p<*^*q}wd$*{*rR^v2 z7tge<;&h~aa_Kzm57SutPvfIeoE1r!DhmFtt?fKccWGZiv>#^LBS)cH=_Xv)gCI)7 zJjW);V-(p}twLPKo3BAIzJy=X{Fb=Om$}V7GChoM3-90K)eEbw&rcMQW6vJ~fnF0a zU!yW%Q^d=RpNN7_f#DPunwd80Hq}o^>r^#<0-GMVk{#C7Bj{Z-7uJ}Fo9x7fMoetX zu_yLZ_J;3tYw8-lqWL3r8>ONkRX0i8BJo{_uA{z(Li3l}L)<5F6sA2d-yf(V8Wq|C zOeG!*P;_l~ppHW&e}s8CtJChr>9N#*5V*4C=b-`!x0)2oPB|>YKURO~Y59Ce4$pKzAY)heaHi0l{;k zgl$YNSqF$O^BY`kLi1m&L)1V|{yxN^6)>ByS&P=c4hK{cGM zuDvH!8i#)rn1rKSlD(&i;i3z~*(UeYWf0NN(yk#$--iZ*$EpG1v>$uWvNq%_!N zR-yr2q1LIBOnUxD5};F-Ai!C7S#_vuiv+PkX;^^kx%i{E%< zZnyt~gMNp#09wgGe8E9rAg~Cs0XS%0a~8Fdb%>X0hL>Q%sW(A*fOn90CibrNl&K$J z>2AFQ=ZAEP3kLd#nje#(v#3u<{0gGtB1Ocaon83;J?&Dl`C(aZZ(Ruy!w;WhcuoXR z@sI!$ncsMM0Y$|?Z@KbYZ2VhHFW8h9#JO~GYsSF6d}_}a=wR(@ablgor6<;*tKK`h zJrQ7KoHYSHpoQC*Ve%~94dHxZM~mGEncOpXs_|f;J-TiMBbDkVh}K-NOv}#;3UZl5 zg?5r6?&sPa4JCBCZKhoG&jEUb&Gy=q(1MFj%?~wyaTTCXG4u)Eobnj~r}HmjPFcDQ3vPxGcfQk0RB;jE*raI?o18cqSPR(gn`Rmf;$=j$Z*dvmj(BWB#T33 zQog#2?P|9SCv)jAF{H_JD@m){-1cOzV#7{{WUZISq$ZCp$Q~ivDApj%R zsA;Yy#6%QBCL4IX+4LHoXZVS-Yd^$ACpf35C|U3yRQmv8o`kDhd`mm#{;$3AZ$Orn zP_E9K>${^YqDy<2YX73HD5mxGauhE0X})eeJs?+ij6&BJ8ugGq|7#L-OV;j}Cg+=k zX~?l8R>DJD2n|h1RAz<=)Vrj0hAPPl+~V-RphV^Z6jLKmwD(DArca6F^EC({L)2*7 UmhX1n@oSy;JacuUFreRm0DXTm+W-In literal 0 HcmV?d00001 diff --git a/gimp-plugins/face-parsing.PyTorch/evaluate.py b/gimp-plugins/face-parsing.PyTorch/evaluate.py new file mode 100755 index 0000000..cb0864d --- /dev/null +++ b/gimp-plugins/face-parsing.PyTorch/evaluate.py @@ -0,0 +1,95 @@ +#!/usr/bin/python +# -*- encoding: utf-8 -*- + +from logger import setup_logger +from model import BiSeNet +from face_dataset import FaceMask + +import torch +import torch.nn as nn +from torch.utils.data import DataLoader +import torch.nn.functional as F +import torch.distributed as dist + +import os +import os.path as osp +import logging +import time +import numpy as np +from tqdm import tqdm +import math +from PIL import Image +import torchvision.transforms as transforms +import cv2 + +def vis_parsing_maps(im, parsing_anno, stride, save_im=False, save_path='vis_results/parsing_map_on_im.jpg'): + # Colors for all 20 parts + part_colors = [[255, 0, 0], [255, 85, 0], [255, 170, 0], + [255, 0, 85], [255, 0, 170], + [0, 255, 0], [85, 255, 0], [170, 255, 0], + [0, 255, 85], [0, 255, 170], + [0, 0, 255], [85, 0, 255], [170, 0, 255], + [0, 85, 255], [0, 170, 255], + [255, 255, 0], [255, 255, 85], [255, 255, 170], + [255, 0, 255], [255, 85, 255], [255, 170, 255], + [0, 255, 255], [85, 255, 255], [170, 255, 255]] + + im = np.array(im) + vis_im = im.copy().astype(np.uint8) + vis_parsing_anno = parsing_anno.copy().astype(np.uint8) + vis_parsing_anno = cv2.resize(vis_parsing_anno, None, fx=stride, fy=stride, interpolation=cv2.INTER_NEAREST) + vis_parsing_anno_color = np.zeros((vis_parsing_anno.shape[0], vis_parsing_anno.shape[1], 3)) + 255 + + num_of_class = np.max(vis_parsing_anno) + + for pi in range(1, num_of_class + 1): + index = np.where(vis_parsing_anno == pi) + vis_parsing_anno_color[index[0], index[1], :] = part_colors[pi] + + vis_parsing_anno_color = vis_parsing_anno_color.astype(np.uint8) + # print(vis_parsing_anno_color.shape, vis_im.shape) + vis_im = cv2.addWeighted(cv2.cvtColor(vis_im, cv2.COLOR_RGB2BGR), 0.4, vis_parsing_anno_color, 0.6, 0) + + # Save result or not + if save_im: + cv2.imwrite(save_path, vis_im, [int(cv2.IMWRITE_JPEG_QUALITY), 100]) + + # return vis_im + +def evaluate(respth='./res/test_res', dspth='./data', cp='model_final_diss.pth'): + + if not os.path.exists(respth): + os.makedirs(respth) + + n_classes = 19 + net = BiSeNet(n_classes=n_classes) + net.cuda() + save_pth = osp.join('res/cp', cp) + net.load_state_dict(torch.load(save_pth)) + net.eval() + + to_tensor = transforms.Compose([ + transforms.ToTensor(), + transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)), + ]) + with torch.no_grad(): + for image_path in os.listdir(dspth): + img = Image.open(osp.join(dspth, image_path)) + image = img.resize((512, 512), Image.BILINEAR) + img = to_tensor(image) + img = torch.unsqueeze(img, 0) + img = img.cuda() + out = net(img)[0] + parsing = out.squeeze(0).cpu().numpy().argmax(0) + + vis_parsing_maps(image, parsing, stride=1, save_im=True, save_path=osp.join(respth, image_path)) + + + + + + + +if __name__ == "__main__": + setup_logger('./res') + evaluate() diff --git a/gimp-plugins/face-parsing.PyTorch/face_dataset.py b/gimp-plugins/face-parsing.PyTorch/face_dataset.py new file mode 100755 index 0000000..a1ece7f --- /dev/null +++ b/gimp-plugins/face-parsing.PyTorch/face_dataset.py @@ -0,0 +1,106 @@ +#!/usr/bin/python +# -*- encoding: utf-8 -*- + +import torch +from torch.utils.data import Dataset +import torchvision.transforms as transforms + +import os.path as osp +import os +from PIL import Image +import numpy as np +import json +import cv2 + +from transform import * + + + +class FaceMask(Dataset): + def __init__(self, rootpth, cropsize=(640, 480), mode='train', *args, **kwargs): + super(FaceMask, self).__init__(*args, **kwargs) + assert mode in ('train', 'val', 'test') + self.mode = mode + self.ignore_lb = 255 + self.rootpth = rootpth + + self.imgs = os.listdir(os.path.join(self.rootpth, 'CelebA-HQ-img')) + + # pre-processing + self.to_tensor = transforms.Compose([ + transforms.ToTensor(), + transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)), + ]) + self.trans_train = Compose([ + ColorJitter( + brightness=0.5, + contrast=0.5, + saturation=0.5), + HorizontalFlip(), + RandomScale((0.75, 1.0, 1.25, 1.5, 1.75, 2.0)), + RandomCrop(cropsize) + ]) + + def __getitem__(self, idx): + impth = self.imgs[idx] + img = Image.open(osp.join(self.rootpth, 'CelebA-HQ-img', impth)) + img = img.resize((512, 512), Image.BILINEAR) + label = Image.open(osp.join(self.rootpth, 'mask', impth[:-3]+'png')).convert('P') + # print(np.unique(np.array(label))) + if self.mode == 'train': + im_lb = dict(im=img, lb=label) + im_lb = self.trans_train(im_lb) + img, label = im_lb['im'], im_lb['lb'] + img = self.to_tensor(img) + label = np.array(label).astype(np.int64)[np.newaxis, :] + return img, label + + def __len__(self): + return len(self.imgs) + + +if __name__ == "__main__": + face_data = '/home/zll/data/CelebAMask-HQ/CelebA-HQ-img' + face_sep_mask = '/home/zll/data/CelebAMask-HQ/CelebAMask-HQ-mask-anno' + mask_path = '/home/zll/data/CelebAMask-HQ/mask' + counter = 0 + total = 0 + for i in range(15): + # files = os.listdir(osp.join(face_sep_mask, str(i))) + + atts = ['skin', 'l_brow', 'r_brow', 'l_eye', 'r_eye', 'eye_g', 'l_ear', 'r_ear', 'ear_r', + 'nose', 'mouth', 'u_lip', 'l_lip', 'neck', 'neck_l', 'cloth', 'hair', 'hat'] + + for j in range(i*2000, (i+1)*2000): + + mask = np.zeros((512, 512)) + + for l, att in enumerate(atts, 1): + total += 1 + file_name = ''.join([str(j).rjust(5, '0'), '_', att, '.png']) + path = osp.join(face_sep_mask, str(i), file_name) + + if os.path.exists(path): + counter += 1 + sep_mask = np.array(Image.open(path).convert('P')) + # print(np.unique(sep_mask)) + + mask[sep_mask == 225] = l + cv2.imwrite('{}/{}.png'.format(mask_path, j), mask) + print(j) + + print(counter, total) + + + + + + + + + + + + + + diff --git a/gimp-plugins/face-parsing.PyTorch/logger.py b/gimp-plugins/face-parsing.PyTorch/logger.py new file mode 100755 index 0000000..d3f9ddc --- /dev/null +++ b/gimp-plugins/face-parsing.PyTorch/logger.py @@ -0,0 +1,23 @@ +#!/usr/bin/python +# -*- encoding: utf-8 -*- + + +import os.path as osp +import time +import sys +import logging + +import torch.distributed as dist + + +def setup_logger(logpth): + logfile = 'BiSeNet-{}.log'.format(time.strftime('%Y-%m-%d-%H-%M-%S')) + logfile = osp.join(logpth, logfile) + FORMAT = '%(levelname)s %(filename)s(%(lineno)d): %(message)s' + log_level = logging.INFO + if dist.is_initialized() and not dist.get_rank()==0: + log_level = logging.ERROR + logging.basicConfig(level=log_level, format=FORMAT, filename=logfile) + logging.root.addHandler(logging.StreamHandler()) + + diff --git a/gimp-plugins/face-parsing.PyTorch/logger.pyc b/gimp-plugins/face-parsing.PyTorch/logger.pyc new file mode 100755 index 0000000000000000000000000000000000000000..1e40109c0fcf8e77e6ef5657d83e25cdeae42545 GIT binary patch literal 1020 zcmcIi&2G~`5T3R3+ola5A#p&0izWBqi3)}=nvIK-anH(uifL)gI6#&e3t{zQXCIe&~tRZ}^ZF8RJ46gso_F%*4HqB4m@ z5{;7RJc*|E8Aanlew9VVOL=0SM)6V=a_7XDQkAUqi8y(InNnKIGpthh7!3#TkGdA3 zr8XsZjtJ~K#C>#9TA`yII#o)ax6UlR6Jk``+GlgED%=mY)LEvg%m+V=&n6xSWt}5g z%Fb1#oZ^MLk;1pjq?;RFtvsRkX*x;WAy!^;s}`raTB=O1$VTYM2>!dE$DA5j=d?AyX8;g1eAFnU8SzHZCzchpT+V!xRm2O?ncQImX@1X(q zh&^B})@4svk3D8F$?{aS9n>6YlMDhWy`+@(j2|aHeQi_Pv7^ty(YOI^)~L(1!)?;q k{_#^%nLb4GSK)t<*}(xXOD)!gd_{CEsp+FI{005+4^lGeC;$Ke literal 0 HcmV?d00001 diff --git a/gimp-plugins/face-parsing.PyTorch/loss.py b/gimp-plugins/face-parsing.PyTorch/loss.py new file mode 100755 index 0000000..f8f65aa --- /dev/null +++ b/gimp-plugins/face-parsing.PyTorch/loss.py @@ -0,0 +1,75 @@ +#!/usr/bin/python +# -*- encoding: utf-8 -*- + + +import torch +import torch.nn as nn +import torch.nn.functional as F + +import numpy as np + + +class OhemCELoss(nn.Module): + def __init__(self, thresh, n_min, ignore_lb=255, *args, **kwargs): + super(OhemCELoss, self).__init__() + self.thresh = -torch.log(torch.tensor(thresh, dtype=torch.float)).cuda() + self.n_min = n_min + self.ignore_lb = ignore_lb + self.criteria = nn.CrossEntropyLoss(ignore_index=ignore_lb, reduction='none') + + def forward(self, logits, labels): + N, C, H, W = logits.size() + loss = self.criteria(logits, labels).view(-1) + loss, _ = torch.sort(loss, descending=True) + if loss[self.n_min] > self.thresh: + loss = loss[loss>self.thresh] + else: + loss = loss[:self.n_min] + return torch.mean(loss) + + +class SoftmaxFocalLoss(nn.Module): + def __init__(self, gamma, ignore_lb=255, *args, **kwargs): + super(SoftmaxFocalLoss, self).__init__() + self.gamma = gamma + self.nll = nn.NLLLoss(ignore_index=ignore_lb) + + def forward(self, logits, labels): + scores = F.softmax(logits, dim=1) + factor = torch.pow(1.-scores, self.gamma) + log_score = F.log_softmax(logits, dim=1) + log_score = factor * log_score + loss = self.nll(log_score, labels) + return loss + + +if __name__ == '__main__': + torch.manual_seed(15) + criteria1 = OhemCELoss(thresh=0.7, n_min=16*20*20//16).cuda() + criteria2 = OhemCELoss(thresh=0.7, n_min=16*20*20//16).cuda() + net1 = nn.Sequential( + nn.Conv2d(3, 19, kernel_size=3, stride=2, padding=1), + ) + net1.cuda() + net1.train() + net2 = nn.Sequential( + nn.Conv2d(3, 19, kernel_size=3, stride=2, padding=1), + ) + net2.cuda() + net2.train() + + with torch.no_grad(): + inten = torch.randn(16, 3, 20, 20).cuda() + lbs = torch.randint(0, 19, [16, 20, 20]).cuda() + lbs[1, :, :] = 255 + + logits1 = net1(inten) + logits1 = F.interpolate(logits1, inten.size()[2:], mode='bilinear') + logits2 = net2(inten) + logits2 = F.interpolate(logits2, inten.size()[2:], mode='bilinear') + + loss1 = criteria1(logits1, lbs) + loss2 = criteria2(logits2, lbs) + loss = loss1 + loss2 + print(loss.detach().cpu()) + loss.backward() diff --git a/gimp-plugins/face-parsing.PyTorch/makeup.py b/gimp-plugins/face-parsing.PyTorch/makeup.py new file mode 100755 index 0000000..b03f141 --- /dev/null +++ b/gimp-plugins/face-parsing.PyTorch/makeup.py @@ -0,0 +1,130 @@ +import cv2 +import os +import numpy as np +from skimage.filters import gaussian + + +def sharpen(img): + img = img * 1.0 + gauss_out = gaussian(img, sigma=5, multichannel=True) + + alpha = 1.5 + img_out = (img - gauss_out) * alpha + img + + img_out = img_out / 255.0 + + mask_1 = img_out < 0 + mask_2 = img_out > 1 + + img_out = img_out * (1 - mask_1) + img_out = img_out * (1 - mask_2) + mask_2 + img_out = np.clip(img_out, 0, 1) + img_out = img_out * 255 + return np.array(img_out, dtype=np.uint8) + + +def hair(image, parsing, part=17, color=[230, 50, 20]): + b, g, r = color #[10, 50, 250] # [10, 250, 10] + tar_color = np.zeros_like(image) + tar_color[:, :, 0] = b + tar_color[:, :, 1] = g + tar_color[:, :, 2] = r + + image_hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV) + tar_hsv = cv2.cvtColor(tar_color, cv2.COLOR_BGR2HSV) + + if part == 12 or part == 13: + image_hsv[:, :, 0:2] = tar_hsv[:, :, 0:2] + else: + image_hsv[:, :, 0:1] = tar_hsv[:, :, 0:1] + + changed = cv2.cvtColor(image_hsv, cv2.COLOR_HSV2BGR) + + if part == 17: + changed = sharpen(changed) + + changed[parsing != part] = image[parsing != part] + # changed = cv2.resize(changed, (512, 512)) + return changed + +# +# def lip(image, parsing, part=17, color=[230, 50, 20]): +# b, g, r = color #[10, 50, 250] # [10, 250, 10] +# tar_color = np.zeros_like(image) +# tar_color[:, :, 0] = b +# tar_color[:, :, 1] = g +# tar_color[:, :, 2] = r +# +# image_lab = cv2.cvtColor(image, cv2.COLOR_BGR2Lab) +# il, ia, ib = cv2.split(image_lab) +# +# tar_lab = cv2.cvtColor(tar_color, cv2.COLOR_BGR2Lab) +# tl, ta, tb = cv2.split(tar_lab) +# +# image_lab[:, :, 0] = np.clip(il - np.mean(il) + tl, 0, 100) +# image_lab[:, :, 1] = np.clip(ia - np.mean(ia) + ta, -127, 128) +# image_lab[:, :, 2] = np.clip(ib - np.mean(ib) + tb, -127, 128) +# +# +# changed = cv2.cvtColor(image_lab, cv2.COLOR_Lab2BGR) +# +# if part == 17: +# changed = sharpen(changed) +# +# changed[parsing != part] = image[parsing != part] +# # changed = cv2.resize(changed, (512, 512)) +# return changed + + +if __name__ == '__main__': + # 1 face + # 10 nose + # 11 teeth + # 12 upper lip + # 13 lower lip + # 17 hair + num = 116 + table = { + 'hair': 17, + 'upper_lip': 12, + 'lower_lip': 13 + } + image_path = '/home/zll/data/CelebAMask-HQ/test-img/{}.jpg'.format(num) + parsing_path = 'res/test_res/{}.png'.format(num) + + image = cv2.imread(image_path) + ori = image.copy() + parsing = np.array(cv2.imread(parsing_path, 0)) + parsing = cv2.resize(parsing, image.shape[0:2], interpolation=cv2.INTER_NEAREST) + + parts = [table['hair'], table['upper_lip'], table['lower_lip']] + # colors = [[20, 20, 200], [100, 100, 230], [100, 100, 230]] + colors = [[100, 200, 100]] + for part, color in zip(parts, colors): + image = hair(image, parsing, part, color) + cv2.imwrite('res/makeup/116_ori.png', cv2.resize(ori, (512, 512))) + cv2.imwrite('res/makeup/116_2.png', cv2.resize(image, (512, 512))) + + cv2.imshow('image', cv2.resize(ori, (512, 512))) + cv2.imshow('color', cv2.resize(image, (512, 512))) + + # cv2.imshow('image', ori) + # cv2.imshow('color', image) + + cv2.waitKey(0) + cv2.destroyAllWindows() + + + + + + + + + + + + + + + diff --git a/gimp-plugins/face-parsing.PyTorch/model.py b/gimp-plugins/face-parsing.PyTorch/model.py new file mode 100755 index 0000000..040f41f --- /dev/null +++ b/gimp-plugins/face-parsing.PyTorch/model.py @@ -0,0 +1,283 @@ +#!/usr/bin/python +# -*- encoding: utf-8 -*- + + +import torch +import torch.nn as nn +import torch.nn.functional as F +import torchvision + +from resnet import Resnet18 +# from modules.bn import InPlaceABNSync as BatchNorm2d + + +class ConvBNReLU(nn.Module): + def __init__(self, in_chan, out_chan, ks=3, stride=1, padding=1, *args, **kwargs): + super(ConvBNReLU, self).__init__() + self.conv = nn.Conv2d(in_chan, + out_chan, + kernel_size = ks, + stride = stride, + padding = padding, + bias = False) + self.bn = nn.BatchNorm2d(out_chan) + self.init_weight() + + def forward(self, x): + x = self.conv(x) + x = F.relu(self.bn(x)) + return x + + def init_weight(self): + for ly in self.children(): + if isinstance(ly, nn.Conv2d): + nn.init.kaiming_normal_(ly.weight, a=1) + if not ly.bias is None: nn.init.constant_(ly.bias, 0) + +class BiSeNetOutput(nn.Module): + def __init__(self, in_chan, mid_chan, n_classes, *args, **kwargs): + super(BiSeNetOutput, self).__init__() + self.conv = ConvBNReLU(in_chan, mid_chan, ks=3, stride=1, padding=1) + self.conv_out = nn.Conv2d(mid_chan, n_classes, kernel_size=1, bias=False) + self.init_weight() + + def forward(self, x): + x = self.conv(x) + x = self.conv_out(x) + return x + + def init_weight(self): + for ly in self.children(): + if isinstance(ly, nn.Conv2d): + nn.init.kaiming_normal_(ly.weight, a=1) + if not ly.bias is None: nn.init.constant_(ly.bias, 0) + + def get_params(self): + wd_params, nowd_params = [], [] + for name, module in self.named_modules(): + if isinstance(module, nn.Linear) or isinstance(module, nn.Conv2d): + wd_params.append(module.weight) + if not module.bias is None: + nowd_params.append(module.bias) + elif isinstance(module, nn.BatchNorm2d): + nowd_params += list(module.parameters()) + return wd_params, nowd_params + + +class AttentionRefinementModule(nn.Module): + def __init__(self, in_chan, out_chan, *args, **kwargs): + super(AttentionRefinementModule, self).__init__() + self.conv = ConvBNReLU(in_chan, out_chan, ks=3, stride=1, padding=1) + self.conv_atten = nn.Conv2d(out_chan, out_chan, kernel_size= 1, bias=False) + self.bn_atten = nn.BatchNorm2d(out_chan) + self.sigmoid_atten = nn.Sigmoid() + self.init_weight() + + def forward(self, x): + feat = self.conv(x) + atten = F.avg_pool2d(feat, feat.size()[2:]) + atten = self.conv_atten(atten) + atten = self.bn_atten(atten) + atten = self.sigmoid_atten(atten) + out = torch.mul(feat, atten) + return out + + def init_weight(self): + for ly in self.children(): + if isinstance(ly, nn.Conv2d): + nn.init.kaiming_normal_(ly.weight, a=1) + if not ly.bias is None: nn.init.constant_(ly.bias, 0) + + +class ContextPath(nn.Module): + def __init__(self, *args, **kwargs): + super(ContextPath, self).__init__() + self.resnet = Resnet18() + self.arm16 = AttentionRefinementModule(256, 128) + self.arm32 = AttentionRefinementModule(512, 128) + self.conv_head32 = ConvBNReLU(128, 128, ks=3, stride=1, padding=1) + self.conv_head16 = ConvBNReLU(128, 128, ks=3, stride=1, padding=1) + self.conv_avg = ConvBNReLU(512, 128, ks=1, stride=1, padding=0) + + self.init_weight() + + def forward(self, x): + H0, W0 = x.size()[2:] + feat8, feat16, feat32 = self.resnet(x) + H8, W8 = feat8.size()[2:] + H16, W16 = feat16.size()[2:] + H32, W32 = feat32.size()[2:] + + avg = F.avg_pool2d(feat32, feat32.size()[2:]) + avg = self.conv_avg(avg) + avg_up = F.interpolate(avg, (H32, W32), mode='nearest') + + feat32_arm = self.arm32(feat32) + feat32_sum = feat32_arm + avg_up + feat32_up = F.interpolate(feat32_sum, (H16, W16), mode='nearest') + feat32_up = self.conv_head32(feat32_up) + + feat16_arm = self.arm16(feat16) + feat16_sum = feat16_arm + feat32_up + feat16_up = F.interpolate(feat16_sum, (H8, W8), mode='nearest') + feat16_up = self.conv_head16(feat16_up) + + return feat8, feat16_up, feat32_up # x8, x8, x16 + + def init_weight(self): + for ly in self.children(): + if isinstance(ly, nn.Conv2d): + nn.init.kaiming_normal_(ly.weight, a=1) + if not ly.bias is None: nn.init.constant_(ly.bias, 0) + + def get_params(self): + wd_params, nowd_params = [], [] + for name, module in self.named_modules(): + if isinstance(module, (nn.Linear, nn.Conv2d)): + wd_params.append(module.weight) + if not module.bias is None: + nowd_params.append(module.bias) + elif isinstance(module, nn.BatchNorm2d): + nowd_params += list(module.parameters()) + return wd_params, nowd_params + + +### This is not used, since I replace this with the resnet feature with the same size +class SpatialPath(nn.Module): + def __init__(self, *args, **kwargs): + super(SpatialPath, self).__init__() + self.conv1 = ConvBNReLU(3, 64, ks=7, stride=2, padding=3) + self.conv2 = ConvBNReLU(64, 64, ks=3, stride=2, padding=1) + self.conv3 = ConvBNReLU(64, 64, ks=3, stride=2, padding=1) + self.conv_out = ConvBNReLU(64, 128, ks=1, stride=1, padding=0) + self.init_weight() + + def forward(self, x): + feat = self.conv1(x) + feat = self.conv2(feat) + feat = self.conv3(feat) + feat = self.conv_out(feat) + return feat + + def init_weight(self): + for ly in self.children(): + if isinstance(ly, nn.Conv2d): + nn.init.kaiming_normal_(ly.weight, a=1) + if not ly.bias is None: nn.init.constant_(ly.bias, 0) + + def get_params(self): + wd_params, nowd_params = [], [] + for name, module in self.named_modules(): + if isinstance(module, nn.Linear) or isinstance(module, nn.Conv2d): + wd_params.append(module.weight) + if not module.bias is None: + nowd_params.append(module.bias) + elif isinstance(module, nn.BatchNorm2d): + nowd_params += list(module.parameters()) + return wd_params, nowd_params + + +class FeatureFusionModule(nn.Module): + def __init__(self, in_chan, out_chan, *args, **kwargs): + super(FeatureFusionModule, self).__init__() + self.convblk = ConvBNReLU(in_chan, out_chan, ks=1, stride=1, padding=0) + self.conv1 = nn.Conv2d(out_chan, + out_chan//4, + kernel_size = 1, + stride = 1, + padding = 0, + bias = False) + self.conv2 = nn.Conv2d(out_chan//4, + out_chan, + kernel_size = 1, + stride = 1, + padding = 0, + bias = False) + self.relu = nn.ReLU(inplace=True) + self.sigmoid = nn.Sigmoid() + self.init_weight() + + def forward(self, fsp, fcp): + fcat = torch.cat([fsp, fcp], dim=1) + feat = self.convblk(fcat) + atten = F.avg_pool2d(feat, feat.size()[2:]) + atten = self.conv1(atten) + atten = self.relu(atten) + atten = self.conv2(atten) + atten = self.sigmoid(atten) + feat_atten = torch.mul(feat, atten) + feat_out = feat_atten + feat + return feat_out + + def init_weight(self): + for ly in self.children(): + if isinstance(ly, nn.Conv2d): + nn.init.kaiming_normal_(ly.weight, a=1) + if not ly.bias is None: nn.init.constant_(ly.bias, 0) + + def get_params(self): + wd_params, nowd_params = [], [] + for name, module in self.named_modules(): + if isinstance(module, nn.Linear) or isinstance(module, nn.Conv2d): + wd_params.append(module.weight) + if not module.bias is None: + nowd_params.append(module.bias) + elif isinstance(module, nn.BatchNorm2d): + nowd_params += list(module.parameters()) + return wd_params, nowd_params + + +class BiSeNet(nn.Module): + def __init__(self, n_classes, *args, **kwargs): + super(BiSeNet, self).__init__() + self.cp = ContextPath() + ## here self.sp is deleted + self.ffm = FeatureFusionModule(256, 256) + self.conv_out = BiSeNetOutput(256, 256, n_classes) + self.conv_out16 = BiSeNetOutput(128, 64, n_classes) + self.conv_out32 = BiSeNetOutput(128, 64, n_classes) + self.init_weight() + + def forward(self, x): + H, W = x.size()[2:] + feat_res8, feat_cp8, feat_cp16 = self.cp(x) # here return res3b1 feature + feat_sp = feat_res8 # use res3b1 feature to replace spatial path feature + feat_fuse = self.ffm(feat_sp, feat_cp8) + + feat_out = self.conv_out(feat_fuse) + feat_out16 = self.conv_out16(feat_cp8) + feat_out32 = self.conv_out32(feat_cp16) + + feat_out = F.interpolate(feat_out, (H, W), mode='bilinear', align_corners=True) + feat_out16 = F.interpolate(feat_out16, (H, W), mode='bilinear', align_corners=True) + feat_out32 = F.interpolate(feat_out32, (H, W), mode='bilinear', align_corners=True) + return feat_out, feat_out16, feat_out32 + + def init_weight(self): + for ly in self.children(): + if isinstance(ly, nn.Conv2d): + nn.init.kaiming_normal_(ly.weight, a=1) + if not ly.bias is None: nn.init.constant_(ly.bias, 0) + + def get_params(self): + wd_params, nowd_params, lr_mul_wd_params, lr_mul_nowd_params = [], [], [], [] + for name, child in self.named_children(): + child_wd_params, child_nowd_params = child.get_params() + if isinstance(child, FeatureFusionModule) or isinstance(child, BiSeNetOutput): + lr_mul_wd_params += child_wd_params + lr_mul_nowd_params += child_nowd_params + else: + wd_params += child_wd_params + nowd_params += child_nowd_params + return wd_params, nowd_params, lr_mul_wd_params, lr_mul_nowd_params + + +if __name__ == "__main__": + net = BiSeNet(19) + net.cuda() + net.eval() + in_ten = torch.randn(16, 3, 640, 480).cuda() + out, out16, out32 = net(in_ten) + print(out.shape) + + net.get_params() diff --git a/gimp-plugins/face-parsing.PyTorch/model.pyc b/gimp-plugins/face-parsing.PyTorch/model.pyc new file mode 100755 index 0000000000000000000000000000000000000000..447c7098357afeaeaae4b1454339fc5b127f32ec GIT binary patch literal 13335 zcmeHOO^+N$8LpoFoc-{A_;c;VA;Ad)iS2j|&H^EEY~xrcY@%`^i)E9cJ>5IoyE8rG z=^opOc9TdO#1(-!At8kWLb)I$kaFRU#FY~#PH^FZ_yO=dPxW-qI%^V)H?}2vw`=RW ztE=96p7(vLJn*l{v4yF3-m01G-w^(u#gnu>V*>noreVxd9{0>z$t;z6MFUK(4VtB) zUe2&t8dkhy8Y8ATYL-Tg7YvvX^~z?&Gr^$wz?dy#zPB`Hf}y^G;k;nn1S5R~qj|xE z3Cev1V|l@(3C8;hCh~$kCYbCi*pnAbnP94~U^*|DHsOp3X3ToYbQ)ebZGu^|<(Z{f z6V95oy&Ss7*gh9CW^KO-_L^Xy2@pA8wg$6WO9$8@M2=^c2h7qzvvkNT&6)6^SvzcA zzQj(BsFPR|eYtWFGavmM{}wBlev^(OuwfF1>8XWu42kot`1-j;8@}>JzG8#OK8GiH z20@6m!W64JEL~7CAK=AUt^t1D(m*!*OM}KjZzN$SIl116(r7(t zHU0SHh1QL@(ei`jWIbxOkGC7$dK4!oSNvLdyzO@qOv8!S-hRE+sjZ%Dwt}#6qW!kz z98KX#W)T=~uUGblv!4lX)~j)7d^ooAc#@|Pa3K(ST$)Xf%ZvqPCe~4zE+6sBlE%df zzsho{P}i7?`oK=u=-NpnD|~_FC{%iLcRp3qztZYpHiN@>S3*fK-jsL9t8q{q1m!|s zc@2@9YuSfx8FS&yjK! zS#QppGJ+QEj%qdbn;|wND^#nH*lq(!Nvu7DJatiwPO-bXHz*YxrX*$9n;kC?rIgom z(dBS4OkeG$?QU8_hW@ekIXv6Z^aEh0z>X2&9V3D~Muv!{gJ@M_K#AmH2t!v8*%?D2 zS`I*go)9G9Cdd%sR%w7hDHw$;q7D%jLWv3^2J*jEDzU;Rkdy+nXo&y^d0jaFTjo;EEsqKkz2ryNz8u_`w;12oTiBwXP-n+>?2EV z-lgy0J|KJNU^aDsftj7d;BLV(OZ2rz7;yH5T=o&h9%XQp0j13n9_$wo^v&htOyb!E zD&$r_h36JT5;<@@+@X?+j}fWsw*>avESa0X=x2%_o8^+(959>Mh>#S7J-$f3)0e~j zYC7+U5I`Rwkzw+w$5>7rA8{9xU6jejJ4O&P^jqQh_y;4fL8q zKM|TcX6OFC5mZ%{NKuMgMVjq76us9${qC%?23QZ%Y#`6!y_xDW3H~|eO+z6#04{pe zs|c-#0QQRv1a>*^SV~GUM8)nZLJ?qzBSNTQuRL5HDi5dJPA{b?bbzQ8+i(Rd)I{RD zBF7pE^)II)4m;F!_1V*9E*N;fQy&l;2o=^1Q}hJ%0@65(RRutHgKGru1( zZ9e~cz1nWI8nBu;NE&VSG^>4;!5LQL2c)!9U;`S{>^2G<5t*vtN{A(5O}A342}PsW zl6Q4Ca^Js#C!u)JXm|tCY@8V?T|-@bu4yM=GvTHg+Li}h9Pt}ZeG zLkmdc2jq^H%Oa}ttvC&Drmy+wY7Hg&mqY9`NQR#VvCz_qzJ<2W=9JhRYyW`6&RDyp z{p+;)%)FGbO&1B&e%Tb*d_+ws`J;5H@YfjyfL`bl$fWWw>!AD@>Hv(b3xUHS~ki^IEPvjp_t*2k_VH4z4-t4z52BkbVt^n^80P_-6DDm(&Namszuc-x>6>x7W*Fvw_;9+k4rkUeG(ds%VBJ zXb9nqSs4Hskz*Vgi8bi>601|S4U^QFvh3{pvaVH5&O;<&WnYA=ZHE};U2^h);k z5^5=({RMxj1M+CMElPELRsBx0Kbdr!a_wZLeRWTr*;W_H{<=tZ+Xc?vRfr{zuHs3E z?*^LoAw2jgkx!47ro1uls5h1Wtq~_VMfomy$gQ8rtxGlKJv_H?tLBq)na}5Mkf?97 zWq+pqBm}we=8tx^Wg)N75`h7Liq7@_Zo+;jHFKK@`{CU(VJ*e9uv+bxnUv1?6~=CH zB~V$m^j}+&cArjpoOvf0oMiAUOY!W2kAYU>5ApQboJZ~-o0BZO%+ZY^fC4mLJ_wtW z9DBL#r;*>-RTb#Bc+bu%kW+i8ko0RcaKYscPp7MZlMsQi3%#hPv1E0Ic%GE0Mz^b@ zGbh$5s~@O9=tsI!FxGn)_y-o!XrcCSb{5@P*jYrK#v@IJpe-v%UR37K~pfzU?mCuKv0Ki zQ#;P;RR!JXoKANj5brf$ZM6H82C(hX0C!^$;VFOzu!Sp?nR2Bv>1?&Bg22Y`J(*{f z+IDdZK|B=+hKcX)(3s2?#q958 zUhi~6dw>+dm|%vRwUVHx&`yB;xt%~4#G*bnPP{&PxZj$(v5NQyfy z#Ol?tfW7+xnQs-MFd(xPr$p{@jC`m#@!5hCV8F;L3*8yLKyZ)?3)dx~i1a6Ov)Prx z9PX8J1UeXE4lH|_>9COxZla5ko`ozEGr)xbE}-+>QsV8``3>9yH9xgJa|#PtRwq|a3P9x1$9vxQ@-z9GUBYIBu zv48IfRg_ z?k!J%q3W)qKS4?;--UA8>KNDuDn4x9|s9g%6&l zr*dF2g2Mso&OfUIGu*FXridLX!IsMeC4Vv#xD0gGfxWxa$g%W0(a$1Xk;uqYY#`}) zmZrMAhPAfkj5%;!X}XI!w3CFX@_hqD;Y;Lh>?Pqo=Pq^E@}>~8Ut;iic*OsX7iFht z@Q9aYz5V*BaW(j%T;jI3T~pdjTvy=ZHqyhfvEar#<*vk9BH-X zsOI*$q}zs9azAkuKJf*;S@c_LFBC z@5-QA9^{o-cZ7wj0XMtad6{FmF2}Mv#==ekOv3eu-Krxj02}rrakpfWI&qFkcVX&X zYRuT(&ryTMHXcQ6&|G_(SfOnBnv{*}Pfz#mKCuxz*1VoTSy+_!Hzf$V8&JKwwYt~i zJjya^j-iIzYT7utr#PkY)96%jIiXryV0W;a$UARLB2Eaq(#u$1Y>uWNFOXbFVKxk>mb`r^o+euA=_Hlo{8j+_W zE`am9zK@+9{S_Znc?5aRWMO?>AvqDpvO(o1=;fSP>Bco)q4FCJvht)bQ!a1Sy^d?v zxM=kv`^Do} zZGcbXC8Te$4NDw^W-%Q8F_x_Q?Qpm6o@uh5$4MzJT)&{VK806L08eMZ*wce^2j`B? Z&CQ*edtz>W?)U`Gwa4&t6g)m!`VSAJr1: + # get global batch size + if equal_batches: + batch_size *= ctx.world_size + else: + dist.all_reduce(batch_size, dist.ReduceOp.SUM) + + ctx.factor = x.shape[0]/float(batch_size.item()) + + mean_all = mean.clone() * ctx.factor + dist.all_reduce(mean_all, dist.ReduceOp.SUM) + + var_all = (var + (mean - mean_all) ** 2) * ctx.factor + dist.all_reduce(var_all, dist.ReduceOp.SUM) + + mean = mean_all + var = var_all + + # Update running stats + running_mean.mul_((1 - ctx.momentum)).add_(ctx.momentum * mean) + count = batch_size.item() * x.view(x.shape[0],x.shape[1],-1).shape[-1] + running_var.mul_((1 - ctx.momentum)).add_(ctx.momentum * var * (float(count) / (count - 1))) + + # Mark in-place modified tensors + ctx.mark_dirty(x, running_mean, running_var) + else: + mean, var = running_mean.contiguous(), running_var.contiguous() + ctx.mark_dirty(x) + + # BN forward + activation + _backend.forward(x, mean, var, weight, bias, ctx.affine, ctx.eps) + _act_forward(ctx, x) + + # Output + ctx.var = var + ctx.save_for_backward(x, var, weight, bias) + return x + + @staticmethod + @once_differentiable + def backward(ctx, dz): + z, var, weight, bias = ctx.saved_tensors + dz = dz.contiguous() + + # Undo activation + _act_backward(ctx, z, dz) + + if ctx.training: + edz, eydz = _backend.edz_eydz(z, dz, weight, bias, ctx.affine, ctx.eps) + edz_local = edz.clone() + eydz_local = eydz.clone() + + if ctx.world_size>1: + edz *= ctx.factor + dist.all_reduce(edz, dist.ReduceOp.SUM) + + eydz *= ctx.factor + dist.all_reduce(eydz, dist.ReduceOp.SUM) + else: + edz_local = edz = dz.new_zeros(dz.size(1)) + eydz_local = eydz = dz.new_zeros(dz.size(1)) + + dx = _backend.backward(z, dz, var, weight, bias, edz, eydz, ctx.affine, ctx.eps) + dweight = eydz_local * weight.sign() if ctx.affine else None + dbias = edz_local if ctx.affine else None + + return dx, dweight, dbias, None, None, None, None, None, None, None + +inplace_abn = InPlaceABN.apply +inplace_abn_sync = InPlaceABNSync.apply + +__all__ = ["inplace_abn", "inplace_abn_sync", "ACT_RELU", "ACT_LEAKY_RELU", "ACT_ELU", "ACT_NONE"] diff --git a/gimp-plugins/face-parsing.PyTorch/modules/misc.py b/gimp-plugins/face-parsing.PyTorch/modules/misc.py new file mode 100755 index 0000000..3c50b69 --- /dev/null +++ b/gimp-plugins/face-parsing.PyTorch/modules/misc.py @@ -0,0 +1,21 @@ +import torch.nn as nn +import torch +import torch.distributed as dist + +class GlobalAvgPool2d(nn.Module): + def __init__(self): + """Global average pooling over the input's spatial dimensions""" + super(GlobalAvgPool2d, self).__init__() + + def forward(self, inputs): + in_size = inputs.size() + return inputs.view((in_size[0], in_size[1], -1)).mean(dim=2) + +class SingleGPU(nn.Module): + def __init__(self, module): + super(SingleGPU, self).__init__() + self.module=module + + def forward(self, input): + return self.module(input.cuda(non_blocking=True)) + diff --git a/gimp-plugins/face-parsing.PyTorch/modules/residual.py b/gimp-plugins/face-parsing.PyTorch/modules/residual.py new file mode 100755 index 0000000..b7d51ad --- /dev/null +++ b/gimp-plugins/face-parsing.PyTorch/modules/residual.py @@ -0,0 +1,88 @@ +from collections import OrderedDict + +import torch.nn as nn + +from .bn import ABN + + +class IdentityResidualBlock(nn.Module): + def __init__(self, + in_channels, + channels, + stride=1, + dilation=1, + groups=1, + norm_act=ABN, + dropout=None): + """Configurable identity-mapping residual block + + Parameters + ---------- + in_channels : int + Number of input channels. + channels : list of int + Number of channels in the internal feature maps. Can either have two or three elements: if three construct + a residual block with two `3 x 3` convolutions, otherwise construct a bottleneck block with `1 x 1`, then + `3 x 3` then `1 x 1` convolutions. + stride : int + Stride of the first `3 x 3` convolution + dilation : int + Dilation to apply to the `3 x 3` convolutions. + groups : int + Number of convolution groups. This is used to create ResNeXt-style blocks and is only compatible with + bottleneck blocks. + norm_act : callable + Function to create normalization / activation Module. + dropout: callable + Function to create Dropout Module. + """ + super(IdentityResidualBlock, self).__init__() + + # Check parameters for inconsistencies + if len(channels) != 2 and len(channels) != 3: + raise ValueError("channels must contain either two or three values") + if len(channels) == 2 and groups != 1: + raise ValueError("groups > 1 are only valid if len(channels) == 3") + + is_bottleneck = len(channels) == 3 + need_proj_conv = stride != 1 or in_channels != channels[-1] + + self.bn1 = norm_act(in_channels) + if not is_bottleneck: + layers = [ + ("conv1", nn.Conv2d(in_channels, channels[0], 3, stride=stride, padding=dilation, bias=False, + dilation=dilation)), + ("bn2", norm_act(channels[0])), + ("conv2", nn.Conv2d(channels[0], channels[1], 3, stride=1, padding=dilation, bias=False, + dilation=dilation)) + ] + if dropout is not None: + layers = layers[0:2] + [("dropout", dropout())] + layers[2:] + else: + layers = [ + ("conv1", nn.Conv2d(in_channels, channels[0], 1, stride=stride, padding=0, bias=False)), + ("bn2", norm_act(channels[0])), + ("conv2", nn.Conv2d(channels[0], channels[1], 3, stride=1, padding=dilation, bias=False, + groups=groups, dilation=dilation)), + ("bn3", norm_act(channels[1])), + ("conv3", nn.Conv2d(channels[1], channels[2], 1, stride=1, padding=0, bias=False)) + ] + if dropout is not None: + layers = layers[0:4] + [("dropout", dropout())] + layers[4:] + self.convs = nn.Sequential(OrderedDict(layers)) + + if need_proj_conv: + self.proj_conv = nn.Conv2d(in_channels, channels[-1], 1, stride=stride, padding=0, bias=False) + + def forward(self, x): + if hasattr(self, "proj_conv"): + bn1 = self.bn1(x) + shortcut = self.proj_conv(bn1) + else: + shortcut = x.clone() + bn1 = self.bn1(x) + + out = self.convs(bn1) + out.add_(shortcut) + + return out diff --git a/gimp-plugins/face-parsing.PyTorch/modules/src/checks.h b/gimp-plugins/face-parsing.PyTorch/modules/src/checks.h new file mode 100755 index 0000000..e761a6f --- /dev/null +++ b/gimp-plugins/face-parsing.PyTorch/modules/src/checks.h @@ -0,0 +1,15 @@ +#pragma once + +#include + +// Define AT_CHECK for old version of ATen where the same function was called AT_ASSERT +#ifndef AT_CHECK +#define AT_CHECK AT_ASSERT +#endif + +#define CHECK_CUDA(x) AT_CHECK((x).type().is_cuda(), #x " must be a CUDA tensor") +#define CHECK_CPU(x) AT_CHECK(!(x).type().is_cuda(), #x " must be a CPU tensor") +#define CHECK_CONTIGUOUS(x) AT_CHECK((x).is_contiguous(), #x " must be contiguous") + +#define CHECK_CUDA_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x) +#define CHECK_CPU_INPUT(x) CHECK_CPU(x); CHECK_CONTIGUOUS(x) \ No newline at end of file diff --git a/gimp-plugins/face-parsing.PyTorch/modules/src/inplace_abn.cpp b/gimp-plugins/face-parsing.PyTorch/modules/src/inplace_abn.cpp new file mode 100755 index 0000000..0a6b112 --- /dev/null +++ b/gimp-plugins/face-parsing.PyTorch/modules/src/inplace_abn.cpp @@ -0,0 +1,95 @@ +#include + +#include + +#include "inplace_abn.h" + +std::vector mean_var(at::Tensor x) { + if (x.is_cuda()) { + if (x.type().scalarType() == at::ScalarType::Half) { + return mean_var_cuda_h(x); + } else { + return mean_var_cuda(x); + } + } else { + return mean_var_cpu(x); + } +} + +at::Tensor forward(at::Tensor x, at::Tensor mean, at::Tensor var, at::Tensor weight, at::Tensor bias, + bool affine, float eps) { + if (x.is_cuda()) { + if (x.type().scalarType() == at::ScalarType::Half) { + return forward_cuda_h(x, mean, var, weight, bias, affine, eps); + } else { + return forward_cuda(x, mean, var, weight, bias, affine, eps); + } + } else { + return forward_cpu(x, mean, var, weight, bias, affine, eps); + } +} + +std::vector edz_eydz(at::Tensor z, at::Tensor dz, at::Tensor weight, at::Tensor bias, + bool affine, float eps) { + if (z.is_cuda()) { + if (z.type().scalarType() == at::ScalarType::Half) { + return edz_eydz_cuda_h(z, dz, weight, bias, affine, eps); + } else { + return edz_eydz_cuda(z, dz, weight, bias, affine, eps); + } + } else { + return edz_eydz_cpu(z, dz, weight, bias, affine, eps); + } +} + +at::Tensor backward(at::Tensor z, at::Tensor dz, at::Tensor var, at::Tensor weight, at::Tensor bias, + at::Tensor edz, at::Tensor eydz, bool affine, float eps) { + if (z.is_cuda()) { + if (z.type().scalarType() == at::ScalarType::Half) { + return backward_cuda_h(z, dz, var, weight, bias, edz, eydz, affine, eps); + } else { + return backward_cuda(z, dz, var, weight, bias, edz, eydz, affine, eps); + } + } else { + return backward_cpu(z, dz, var, weight, bias, edz, eydz, affine, eps); + } +} + +void leaky_relu_forward(at::Tensor z, float slope) { + at::leaky_relu_(z, slope); +} + +void leaky_relu_backward(at::Tensor z, at::Tensor dz, float slope) { + if (z.is_cuda()) { + if (z.type().scalarType() == at::ScalarType::Half) { + return leaky_relu_backward_cuda_h(z, dz, slope); + } else { + return leaky_relu_backward_cuda(z, dz, slope); + } + } else { + return leaky_relu_backward_cpu(z, dz, slope); + } +} + +void elu_forward(at::Tensor z) { + at::elu_(z); +} + +void elu_backward(at::Tensor z, at::Tensor dz) { + if (z.is_cuda()) { + return elu_backward_cuda(z, dz); + } else { + return elu_backward_cpu(z, dz); + } +} + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("mean_var", &mean_var, "Mean and variance computation"); + m.def("forward", &forward, "In-place forward computation"); + m.def("edz_eydz", &edz_eydz, "First part of backward computation"); + m.def("backward", &backward, "Second part of backward computation"); + m.def("leaky_relu_forward", &leaky_relu_forward, "Leaky relu forward computation"); + m.def("leaky_relu_backward", &leaky_relu_backward, "Leaky relu backward computation and inversion"); + m.def("elu_forward", &elu_forward, "Elu forward computation"); + m.def("elu_backward", &elu_backward, "Elu backward computation and inversion"); +} diff --git a/gimp-plugins/face-parsing.PyTorch/modules/src/inplace_abn.h b/gimp-plugins/face-parsing.PyTorch/modules/src/inplace_abn.h new file mode 100755 index 0000000..17afd11 --- /dev/null +++ b/gimp-plugins/face-parsing.PyTorch/modules/src/inplace_abn.h @@ -0,0 +1,88 @@ +#pragma once + +#include + +#include + +std::vector mean_var_cpu(at::Tensor x); +std::vector mean_var_cuda(at::Tensor x); +std::vector mean_var_cuda_h(at::Tensor x); + +at::Tensor forward_cpu(at::Tensor x, at::Tensor mean, at::Tensor var, at::Tensor weight, at::Tensor bias, + bool affine, float eps); +at::Tensor forward_cuda(at::Tensor x, at::Tensor mean, at::Tensor var, at::Tensor weight, at::Tensor bias, + bool affine, float eps); +at::Tensor forward_cuda_h(at::Tensor x, at::Tensor mean, at::Tensor var, at::Tensor weight, at::Tensor bias, + bool affine, float eps); + +std::vector edz_eydz_cpu(at::Tensor z, at::Tensor dz, at::Tensor weight, at::Tensor bias, + bool affine, float eps); +std::vector edz_eydz_cuda(at::Tensor z, at::Tensor dz, at::Tensor weight, at::Tensor bias, + bool affine, float eps); +std::vector edz_eydz_cuda_h(at::Tensor z, at::Tensor dz, at::Tensor weight, at::Tensor bias, + bool affine, float eps); + +at::Tensor backward_cpu(at::Tensor z, at::Tensor dz, at::Tensor var, at::Tensor weight, at::Tensor bias, + at::Tensor edz, at::Tensor eydz, bool affine, float eps); +at::Tensor backward_cuda(at::Tensor z, at::Tensor dz, at::Tensor var, at::Tensor weight, at::Tensor bias, + at::Tensor edz, at::Tensor eydz, bool affine, float eps); +at::Tensor backward_cuda_h(at::Tensor z, at::Tensor dz, at::Tensor var, at::Tensor weight, at::Tensor bias, + at::Tensor edz, at::Tensor eydz, bool affine, float eps); + +void leaky_relu_backward_cpu(at::Tensor z, at::Tensor dz, float slope); +void leaky_relu_backward_cuda(at::Tensor z, at::Tensor dz, float slope); +void leaky_relu_backward_cuda_h(at::Tensor z, at::Tensor dz, float slope); + +void elu_backward_cpu(at::Tensor z, at::Tensor dz); +void elu_backward_cuda(at::Tensor z, at::Tensor dz); + +static void get_dims(at::Tensor x, int64_t& num, int64_t& chn, int64_t& sp) { + num = x.size(0); + chn = x.size(1); + sp = 1; + for (int64_t i = 2; i < x.ndimension(); ++i) + sp *= x.size(i); +} + +/* + * Specialized CUDA reduction functions for BN + */ +#ifdef __CUDACC__ + +#include "utils/cuda.cuh" + +template +__device__ T reduce(Op op, int plane, int N, int S) { + T sum = (T)0; + for (int batch = 0; batch < N; ++batch) { + for (int x = threadIdx.x; x < S; x += blockDim.x) { + sum += op(batch, plane, x); + } + } + + // sum over NumThreads within a warp + sum = warpSum(sum); + + // 'transpose', and reduce within warp again + __shared__ T shared[32]; + __syncthreads(); + if (threadIdx.x % WARP_SIZE == 0) { + shared[threadIdx.x / WARP_SIZE] = sum; + } + if (threadIdx.x >= blockDim.x / WARP_SIZE && threadIdx.x < WARP_SIZE) { + // zero out the other entries in shared + shared[threadIdx.x] = (T)0; + } + __syncthreads(); + if (threadIdx.x / WARP_SIZE == 0) { + sum = warpSum(shared[threadIdx.x]); + if (threadIdx.x == 0) { + shared[0] = sum; + } + } + __syncthreads(); + + // Everyone picks it up, should be broadcast into the whole gradInput + return shared[0]; +} +#endif diff --git a/gimp-plugins/face-parsing.PyTorch/modules/src/inplace_abn_cpu.cpp b/gimp-plugins/face-parsing.PyTorch/modules/src/inplace_abn_cpu.cpp new file mode 100755 index 0000000..ffc6d38 --- /dev/null +++ b/gimp-plugins/face-parsing.PyTorch/modules/src/inplace_abn_cpu.cpp @@ -0,0 +1,119 @@ +#include + +#include + +#include "utils/checks.h" +#include "inplace_abn.h" + +at::Tensor reduce_sum(at::Tensor x) { + if (x.ndimension() == 2) { + return x.sum(0); + } else { + auto x_view = x.view({x.size(0), x.size(1), -1}); + return x_view.sum(-1).sum(0); + } +} + +at::Tensor broadcast_to(at::Tensor v, at::Tensor x) { + if (x.ndimension() == 2) { + return v; + } else { + std::vector broadcast_size = {1, -1}; + for (int64_t i = 2; i < x.ndimension(); ++i) + broadcast_size.push_back(1); + + return v.view(broadcast_size); + } +} + +int64_t count(at::Tensor x) { + int64_t count = x.size(0); + for (int64_t i = 2; i < x.ndimension(); ++i) + count *= x.size(i); + + return count; +} + +at::Tensor invert_affine(at::Tensor z, at::Tensor weight, at::Tensor bias, bool affine, float eps) { + if (affine) { + return (z - broadcast_to(bias, z)) / broadcast_to(at::abs(weight) + eps, z); + } else { + return z; + } +} + +std::vector mean_var_cpu(at::Tensor x) { + auto num = count(x); + auto mean = reduce_sum(x) / num; + auto diff = x - broadcast_to(mean, x); + auto var = reduce_sum(diff.pow(2)) / num; + + return {mean, var}; +} + +at::Tensor forward_cpu(at::Tensor x, at::Tensor mean, at::Tensor var, at::Tensor weight, at::Tensor bias, + bool affine, float eps) { + auto gamma = affine ? at::abs(weight) + eps : at::ones_like(var); + auto mul = at::rsqrt(var + eps) * gamma; + + x.sub_(broadcast_to(mean, x)); + x.mul_(broadcast_to(mul, x)); + if (affine) x.add_(broadcast_to(bias, x)); + + return x; +} + +std::vector edz_eydz_cpu(at::Tensor z, at::Tensor dz, at::Tensor weight, at::Tensor bias, + bool affine, float eps) { + auto edz = reduce_sum(dz); + auto y = invert_affine(z, weight, bias, affine, eps); + auto eydz = reduce_sum(y * dz); + + return {edz, eydz}; +} + +at::Tensor backward_cpu(at::Tensor z, at::Tensor dz, at::Tensor var, at::Tensor weight, at::Tensor bias, + at::Tensor edz, at::Tensor eydz, bool affine, float eps) { + auto y = invert_affine(z, weight, bias, affine, eps); + auto mul = affine ? at::rsqrt(var + eps) * (at::abs(weight) + eps) : at::rsqrt(var + eps); + + auto num = count(z); + auto dx = (dz - broadcast_to(edz / num, dz) - y * broadcast_to(eydz / num, dz)) * broadcast_to(mul, dz); + return dx; +} + +void leaky_relu_backward_cpu(at::Tensor z, at::Tensor dz, float slope) { + CHECK_CPU_INPUT(z); + CHECK_CPU_INPUT(dz); + + AT_DISPATCH_FLOATING_TYPES(z.type(), "leaky_relu_backward_cpu", ([&] { + int64_t count = z.numel(); + auto *_z = z.data(); + auto *_dz = dz.data(); + + for (int64_t i = 0; i < count; ++i) { + if (_z[i] < 0) { + _z[i] *= 1 / slope; + _dz[i] *= slope; + } + } + })); +} + +void elu_backward_cpu(at::Tensor z, at::Tensor dz) { + CHECK_CPU_INPUT(z); + CHECK_CPU_INPUT(dz); + + AT_DISPATCH_FLOATING_TYPES(z.type(), "elu_backward_cpu", ([&] { + int64_t count = z.numel(); + auto *_z = z.data(); + auto *_dz = dz.data(); + + for (int64_t i = 0; i < count; ++i) { + if (_z[i] < 0) { + _z[i] = log1p(_z[i]); + _dz[i] *= (_z[i] + 1.f); + } + } + })); +} diff --git a/gimp-plugins/face-parsing.PyTorch/modules/src/inplace_abn_cuda.cu b/gimp-plugins/face-parsing.PyTorch/modules/src/inplace_abn_cuda.cu new file mode 100755 index 0000000..b157b06 --- /dev/null +++ b/gimp-plugins/face-parsing.PyTorch/modules/src/inplace_abn_cuda.cu @@ -0,0 +1,333 @@ +#include + +#include +#include + +#include + +#include "utils/checks.h" +#include "utils/cuda.cuh" +#include "inplace_abn.h" + +#include + +// Operations for reduce +template +struct SumOp { + __device__ SumOp(const T *t, int c, int s) + : tensor(t), chn(c), sp(s) {} + __device__ __forceinline__ T operator()(int batch, int plane, int n) { + return tensor[(batch * chn + plane) * sp + n]; + } + const T *tensor; + const int chn; + const int sp; +}; + +template +struct VarOp { + __device__ VarOp(T m, const T *t, int c, int s) + : mean(m), tensor(t), chn(c), sp(s) {} + __device__ __forceinline__ T operator()(int batch, int plane, int n) { + T val = tensor[(batch * chn + plane) * sp + n]; + return (val - mean) * (val - mean); + } + const T mean; + const T *tensor; + const int chn; + const int sp; +}; + +template +struct GradOp { + __device__ GradOp(T _weight, T _bias, const T *_z, const T *_dz, int c, int s) + : weight(_weight), bias(_bias), z(_z), dz(_dz), chn(c), sp(s) {} + __device__ __forceinline__ Pair operator()(int batch, int plane, int n) { + T _y = (z[(batch * chn + plane) * sp + n] - bias) / weight; + T _dz = dz[(batch * chn + plane) * sp + n]; + return Pair(_dz, _y * _dz); + } + const T weight; + const T bias; + const T *z; + const T *dz; + const int chn; + const int sp; +}; + +/*********** + * mean_var + ***********/ + +template +__global__ void mean_var_kernel(const T *x, T *mean, T *var, int num, int chn, int sp) { + int plane = blockIdx.x; + T norm = T(1) / T(num * sp); + + T _mean = reduce>(SumOp(x, chn, sp), plane, num, sp) * norm; + __syncthreads(); + T _var = reduce>(VarOp(_mean, x, chn, sp), plane, num, sp) * norm; + + if (threadIdx.x == 0) { + mean[plane] = _mean; + var[plane] = _var; + } +} + +std::vector mean_var_cuda(at::Tensor x) { + CHECK_CUDA_INPUT(x); + + // Extract dimensions + int64_t num, chn, sp; + get_dims(x, num, chn, sp); + + // Prepare output tensors + auto mean = at::empty({chn}, x.options()); + auto var = at::empty({chn}, x.options()); + + // Run kernel + dim3 blocks(chn); + dim3 threads(getNumThreads(sp)); + auto stream = at::cuda::getCurrentCUDAStream(); + AT_DISPATCH_FLOATING_TYPES(x.type(), "mean_var_cuda", ([&] { + mean_var_kernel<<>>( + x.data(), + mean.data(), + var.data(), + num, chn, sp); + })); + + return {mean, var}; +} + +/********** + * forward + **********/ + +template +__global__ void forward_kernel(T *x, const T *mean, const T *var, const T *weight, const T *bias, + bool affine, float eps, int num, int chn, int sp) { + int plane = blockIdx.x; + + T _mean = mean[plane]; + T _var = var[plane]; + T _weight = affine ? abs(weight[plane]) + eps : T(1); + T _bias = affine ? bias[plane] : T(0); + + T mul = rsqrt(_var + eps) * _weight; + + for (int batch = 0; batch < num; ++batch) { + for (int n = threadIdx.x; n < sp; n += blockDim.x) { + T _x = x[(batch * chn + plane) * sp + n]; + T _y = (_x - _mean) * mul + _bias; + + x[(batch * chn + plane) * sp + n] = _y; + } + } +} + +at::Tensor forward_cuda(at::Tensor x, at::Tensor mean, at::Tensor var, at::Tensor weight, at::Tensor bias, + bool affine, float eps) { + CHECK_CUDA_INPUT(x); + CHECK_CUDA_INPUT(mean); + CHECK_CUDA_INPUT(var); + CHECK_CUDA_INPUT(weight); + CHECK_CUDA_INPUT(bias); + + // Extract dimensions + int64_t num, chn, sp; + get_dims(x, num, chn, sp); + + // Run kernel + dim3 blocks(chn); + dim3 threads(getNumThreads(sp)); + auto stream = at::cuda::getCurrentCUDAStream(); + AT_DISPATCH_FLOATING_TYPES(x.type(), "forward_cuda", ([&] { + forward_kernel<<>>( + x.data(), + mean.data(), + var.data(), + weight.data(), + bias.data(), + affine, eps, num, chn, sp); + })); + + return x; +} + +/*********** + * edz_eydz + ***********/ + +template +__global__ void edz_eydz_kernel(const T *z, const T *dz, const T *weight, const T *bias, + T *edz, T *eydz, bool affine, float eps, int num, int chn, int sp) { + int plane = blockIdx.x; + + T _weight = affine ? abs(weight[plane]) + eps : 1.f; + T _bias = affine ? bias[plane] : 0.f; + + Pair res = reduce, GradOp>(GradOp(_weight, _bias, z, dz, chn, sp), plane, num, sp); + __syncthreads(); + + if (threadIdx.x == 0) { + edz[plane] = res.v1; + eydz[plane] = res.v2; + } +} + +std::vector edz_eydz_cuda(at::Tensor z, at::Tensor dz, at::Tensor weight, at::Tensor bias, + bool affine, float eps) { + CHECK_CUDA_INPUT(z); + CHECK_CUDA_INPUT(dz); + CHECK_CUDA_INPUT(weight); + CHECK_CUDA_INPUT(bias); + + // Extract dimensions + int64_t num, chn, sp; + get_dims(z, num, chn, sp); + + auto edz = at::empty({chn}, z.options()); + auto eydz = at::empty({chn}, z.options()); + + // Run kernel + dim3 blocks(chn); + dim3 threads(getNumThreads(sp)); + auto stream = at::cuda::getCurrentCUDAStream(); + AT_DISPATCH_FLOATING_TYPES(z.type(), "edz_eydz_cuda", ([&] { + edz_eydz_kernel<<>>( + z.data(), + dz.data(), + weight.data(), + bias.data(), + edz.data(), + eydz.data(), + affine, eps, num, chn, sp); + })); + + return {edz, eydz}; +} + +/*********** + * backward + ***********/ + +template +__global__ void backward_kernel(const T *z, const T *dz, const T *var, const T *weight, const T *bias, const T *edz, + const T *eydz, T *dx, bool affine, float eps, int num, int chn, int sp) { + int plane = blockIdx.x; + + T _weight = affine ? abs(weight[plane]) + eps : 1.f; + T _bias = affine ? bias[plane] : 0.f; + T _var = var[plane]; + T _edz = edz[plane]; + T _eydz = eydz[plane]; + + T _mul = _weight * rsqrt(_var + eps); + T count = T(num * sp); + + for (int batch = 0; batch < num; ++batch) { + for (int n = threadIdx.x; n < sp; n += blockDim.x) { + T _dz = dz[(batch * chn + plane) * sp + n]; + T _y = (z[(batch * chn + plane) * sp + n] - _bias) / _weight; + + dx[(batch * chn + plane) * sp + n] = (_dz - _edz / count - _y * _eydz / count) * _mul; + } + } +} + +at::Tensor backward_cuda(at::Tensor z, at::Tensor dz, at::Tensor var, at::Tensor weight, at::Tensor bias, + at::Tensor edz, at::Tensor eydz, bool affine, float eps) { + CHECK_CUDA_INPUT(z); + CHECK_CUDA_INPUT(dz); + CHECK_CUDA_INPUT(var); + CHECK_CUDA_INPUT(weight); + CHECK_CUDA_INPUT(bias); + CHECK_CUDA_INPUT(edz); + CHECK_CUDA_INPUT(eydz); + + // Extract dimensions + int64_t num, chn, sp; + get_dims(z, num, chn, sp); + + auto dx = at::zeros_like(z); + + // Run kernel + dim3 blocks(chn); + dim3 threads(getNumThreads(sp)); + auto stream = at::cuda::getCurrentCUDAStream(); + AT_DISPATCH_FLOATING_TYPES(z.type(), "backward_cuda", ([&] { + backward_kernel<<>>( + z.data(), + dz.data(), + var.data(), + weight.data(), + bias.data(), + edz.data(), + eydz.data(), + dx.data(), + affine, eps, num, chn, sp); + })); + + return dx; +} + +/************** + * activations + **************/ + +template +inline void leaky_relu_backward_impl(T *z, T *dz, float slope, int64_t count) { + // Create thrust pointers + thrust::device_ptr th_z = thrust::device_pointer_cast(z); + thrust::device_ptr th_dz = thrust::device_pointer_cast(dz); + + auto stream = at::cuda::getCurrentCUDAStream(); + thrust::transform_if(thrust::cuda::par.on(stream), + th_dz, th_dz + count, th_z, th_dz, + [slope] __device__ (const T& dz) { return dz * slope; }, + [] __device__ (const T& z) { return z < 0; }); + thrust::transform_if(thrust::cuda::par.on(stream), + th_z, th_z + count, th_z, + [slope] __device__ (const T& z) { return z / slope; }, + [] __device__ (const T& z) { return z < 0; }); +} + +void leaky_relu_backward_cuda(at::Tensor z, at::Tensor dz, float slope) { + CHECK_CUDA_INPUT(z); + CHECK_CUDA_INPUT(dz); + + int64_t count = z.numel(); + + AT_DISPATCH_FLOATING_TYPES(z.type(), "leaky_relu_backward_cuda", ([&] { + leaky_relu_backward_impl(z.data(), dz.data(), slope, count); + })); +} + +template +inline void elu_backward_impl(T *z, T *dz, int64_t count) { + // Create thrust pointers + thrust::device_ptr th_z = thrust::device_pointer_cast(z); + thrust::device_ptr th_dz = thrust::device_pointer_cast(dz); + + auto stream = at::cuda::getCurrentCUDAStream(); + thrust::transform_if(thrust::cuda::par.on(stream), + th_dz, th_dz + count, th_z, th_z, th_dz, + [] __device__ (const T& dz, const T& z) { return dz * (z + 1.); }, + [] __device__ (const T& z) { return z < 0; }); + thrust::transform_if(thrust::cuda::par.on(stream), + th_z, th_z + count, th_z, + [] __device__ (const T& z) { return log1p(z); }, + [] __device__ (const T& z) { return z < 0; }); +} + +void elu_backward_cuda(at::Tensor z, at::Tensor dz) { + CHECK_CUDA_INPUT(z); + CHECK_CUDA_INPUT(dz); + + int64_t count = z.numel(); + + AT_DISPATCH_FLOATING_TYPES(z.type(), "leaky_relu_backward_cuda", ([&] { + elu_backward_impl(z.data(), dz.data(), count); + })); +} diff --git a/gimp-plugins/face-parsing.PyTorch/modules/src/inplace_abn_cuda_half.cu b/gimp-plugins/face-parsing.PyTorch/modules/src/inplace_abn_cuda_half.cu new file mode 100755 index 0000000..bb63e73 --- /dev/null +++ b/gimp-plugins/face-parsing.PyTorch/modules/src/inplace_abn_cuda_half.cu @@ -0,0 +1,275 @@ +#include + +#include + +#include + +#include "utils/checks.h" +#include "utils/cuda.cuh" +#include "inplace_abn.h" + +#include + +// Operations for reduce +struct SumOpH { + __device__ SumOpH(const half *t, int c, int s) + : tensor(t), chn(c), sp(s) {} + __device__ __forceinline__ float operator()(int batch, int plane, int n) { + return __half2float(tensor[(batch * chn + plane) * sp + n]); + } + const half *tensor; + const int chn; + const int sp; +}; + +struct VarOpH { + __device__ VarOpH(float m, const half *t, int c, int s) + : mean(m), tensor(t), chn(c), sp(s) {} + __device__ __forceinline__ float operator()(int batch, int plane, int n) { + const auto t = __half2float(tensor[(batch * chn + plane) * sp + n]); + return (t - mean) * (t - mean); + } + const float mean; + const half *tensor; + const int chn; + const int sp; +}; + +struct GradOpH { + __device__ GradOpH(float _weight, float _bias, const half *_z, const half *_dz, int c, int s) + : weight(_weight), bias(_bias), z(_z), dz(_dz), chn(c), sp(s) {} + __device__ __forceinline__ Pair operator()(int batch, int plane, int n) { + float _y = (__half2float(z[(batch * chn + plane) * sp + n]) - bias) / weight; + float _dz = __half2float(dz[(batch * chn + plane) * sp + n]); + return Pair(_dz, _y * _dz); + } + const float weight; + const float bias; + const half *z; + const half *dz; + const int chn; + const int sp; +}; + +/*********** + * mean_var + ***********/ + +__global__ void mean_var_kernel_h(const half *x, float *mean, float *var, int num, int chn, int sp) { + int plane = blockIdx.x; + float norm = 1.f / static_cast(num * sp); + + float _mean = reduce(SumOpH(x, chn, sp), plane, num, sp) * norm; + __syncthreads(); + float _var = reduce(VarOpH(_mean, x, chn, sp), plane, num, sp) * norm; + + if (threadIdx.x == 0) { + mean[plane] = _mean; + var[plane] = _var; + } +} + +std::vector mean_var_cuda_h(at::Tensor x) { + CHECK_CUDA_INPUT(x); + + // Extract dimensions + int64_t num, chn, sp; + get_dims(x, num, chn, sp); + + // Prepare output tensors + auto mean = at::empty({chn},x.options().dtype(at::kFloat)); + auto var = at::empty({chn},x.options().dtype(at::kFloat)); + + // Run kernel + dim3 blocks(chn); + dim3 threads(getNumThreads(sp)); + auto stream = at::cuda::getCurrentCUDAStream(); + mean_var_kernel_h<<>>( + reinterpret_cast(x.data()), + mean.data(), + var.data(), + num, chn, sp); + + return {mean, var}; +} + +/********** + * forward + **********/ + +__global__ void forward_kernel_h(half *x, const float *mean, const float *var, const float *weight, const float *bias, + bool affine, float eps, int num, int chn, int sp) { + int plane = blockIdx.x; + + const float _mean = mean[plane]; + const float _var = var[plane]; + const float _weight = affine ? abs(weight[plane]) + eps : 1.f; + const float _bias = affine ? bias[plane] : 0.f; + + const float mul = rsqrt(_var + eps) * _weight; + + for (int batch = 0; batch < num; ++batch) { + for (int n = threadIdx.x; n < sp; n += blockDim.x) { + half *x_ptr = x + (batch * chn + plane) * sp + n; + float _x = __half2float(*x_ptr); + float _y = (_x - _mean) * mul + _bias; + + *x_ptr = __float2half(_y); + } + } +} + +at::Tensor forward_cuda_h(at::Tensor x, at::Tensor mean, at::Tensor var, at::Tensor weight, at::Tensor bias, + bool affine, float eps) { + CHECK_CUDA_INPUT(x); + CHECK_CUDA_INPUT(mean); + CHECK_CUDA_INPUT(var); + CHECK_CUDA_INPUT(weight); + CHECK_CUDA_INPUT(bias); + + // Extract dimensions + int64_t num, chn, sp; + get_dims(x, num, chn, sp); + + // Run kernel + dim3 blocks(chn); + dim3 threads(getNumThreads(sp)); + auto stream = at::cuda::getCurrentCUDAStream(); + forward_kernel_h<<>>( + reinterpret_cast(x.data()), + mean.data(), + var.data(), + weight.data(), + bias.data(), + affine, eps, num, chn, sp); + + return x; +} + +__global__ void edz_eydz_kernel_h(const half *z, const half *dz, const float *weight, const float *bias, + float *edz, float *eydz, bool affine, float eps, int num, int chn, int sp) { + int plane = blockIdx.x; + + float _weight = affine ? abs(weight[plane]) + eps : 1.f; + float _bias = affine ? bias[plane] : 0.f; + + Pair res = reduce, GradOpH>(GradOpH(_weight, _bias, z, dz, chn, sp), plane, num, sp); + __syncthreads(); + + if (threadIdx.x == 0) { + edz[plane] = res.v1; + eydz[plane] = res.v2; + } +} + +std::vector edz_eydz_cuda_h(at::Tensor z, at::Tensor dz, at::Tensor weight, at::Tensor bias, + bool affine, float eps) { + CHECK_CUDA_INPUT(z); + CHECK_CUDA_INPUT(dz); + CHECK_CUDA_INPUT(weight); + CHECK_CUDA_INPUT(bias); + + // Extract dimensions + int64_t num, chn, sp; + get_dims(z, num, chn, sp); + + auto edz = at::empty({chn},z.options().dtype(at::kFloat)); + auto eydz = at::empty({chn},z.options().dtype(at::kFloat)); + + // Run kernel + dim3 blocks(chn); + dim3 threads(getNumThreads(sp)); + auto stream = at::cuda::getCurrentCUDAStream(); + edz_eydz_kernel_h<<>>( + reinterpret_cast(z.data()), + reinterpret_cast(dz.data()), + weight.data(), + bias.data(), + edz.data(), + eydz.data(), + affine, eps, num, chn, sp); + + return {edz, eydz}; +} + +__global__ void backward_kernel_h(const half *z, const half *dz, const float *var, const float *weight, const float *bias, const float *edz, + const float *eydz, half *dx, bool affine, float eps, int num, int chn, int sp) { + int plane = blockIdx.x; + + float _weight = affine ? abs(weight[plane]) + eps : 1.f; + float _bias = affine ? bias[plane] : 0.f; + float _var = var[plane]; + float _edz = edz[plane]; + float _eydz = eydz[plane]; + + float _mul = _weight * rsqrt(_var + eps); + float count = float(num * sp); + + for (int batch = 0; batch < num; ++batch) { + for (int n = threadIdx.x; n < sp; n += blockDim.x) { + float _dz = __half2float(dz[(batch * chn + plane) * sp + n]); + float _y = (__half2float(z[(batch * chn + plane) * sp + n]) - _bias) / _weight; + + dx[(batch * chn + plane) * sp + n] = __float2half((_dz - _edz / count - _y * _eydz / count) * _mul); + } + } +} + +at::Tensor backward_cuda_h(at::Tensor z, at::Tensor dz, at::Tensor var, at::Tensor weight, at::Tensor bias, + at::Tensor edz, at::Tensor eydz, bool affine, float eps) { + CHECK_CUDA_INPUT(z); + CHECK_CUDA_INPUT(dz); + CHECK_CUDA_INPUT(var); + CHECK_CUDA_INPUT(weight); + CHECK_CUDA_INPUT(bias); + CHECK_CUDA_INPUT(edz); + CHECK_CUDA_INPUT(eydz); + + // Extract dimensions + int64_t num, chn, sp; + get_dims(z, num, chn, sp); + + auto dx = at::zeros_like(z); + + // Run kernel + dim3 blocks(chn); + dim3 threads(getNumThreads(sp)); + auto stream = at::cuda::getCurrentCUDAStream(); + backward_kernel_h<<>>( + reinterpret_cast(z.data()), + reinterpret_cast(dz.data()), + var.data(), + weight.data(), + bias.data(), + edz.data(), + eydz.data(), + reinterpret_cast(dx.data()), + affine, eps, num, chn, sp); + + return dx; +} + +__global__ void leaky_relu_backward_impl_h(half *z, half *dz, float slope, int64_t count) { + for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < count; i += blockDim.x * gridDim.x){ + float _z = __half2float(z[i]); + if (_z < 0) { + dz[i] = __float2half(__half2float(dz[i]) * slope); + z[i] = __float2half(_z / slope); + } + } +} + +void leaky_relu_backward_cuda_h(at::Tensor z, at::Tensor dz, float slope) { + CHECK_CUDA_INPUT(z); + CHECK_CUDA_INPUT(dz); + + int64_t count = z.numel(); + dim3 threads(getNumThreads(count)); + dim3 blocks = (count + threads.x - 1) / threads.x; + auto stream = at::cuda::getCurrentCUDAStream(); + leaky_relu_backward_impl_h<<>>( + reinterpret_cast(z.data()), + reinterpret_cast(dz.data()), + slope, count); +} + diff --git a/gimp-plugins/face-parsing.PyTorch/modules/src/utils/checks.h b/gimp-plugins/face-parsing.PyTorch/modules/src/utils/checks.h new file mode 100755 index 0000000..e761a6f --- /dev/null +++ b/gimp-plugins/face-parsing.PyTorch/modules/src/utils/checks.h @@ -0,0 +1,15 @@ +#pragma once + +#include + +// Define AT_CHECK for old version of ATen where the same function was called AT_ASSERT +#ifndef AT_CHECK +#define AT_CHECK AT_ASSERT +#endif + +#define CHECK_CUDA(x) AT_CHECK((x).type().is_cuda(), #x " must be a CUDA tensor") +#define CHECK_CPU(x) AT_CHECK(!(x).type().is_cuda(), #x " must be a CPU tensor") +#define CHECK_CONTIGUOUS(x) AT_CHECK((x).is_contiguous(), #x " must be contiguous") + +#define CHECK_CUDA_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x) +#define CHECK_CPU_INPUT(x) CHECK_CPU(x); CHECK_CONTIGUOUS(x) \ No newline at end of file diff --git a/gimp-plugins/face-parsing.PyTorch/modules/src/utils/common.h b/gimp-plugins/face-parsing.PyTorch/modules/src/utils/common.h new file mode 100755 index 0000000..e8403ee --- /dev/null +++ b/gimp-plugins/face-parsing.PyTorch/modules/src/utils/common.h @@ -0,0 +1,49 @@ +#pragma once + +#include + +/* + * Functions to share code between CPU and GPU + */ + +#ifdef __CUDACC__ +// CUDA versions + +#define HOST_DEVICE __host__ __device__ +#define INLINE_HOST_DEVICE __host__ __device__ inline +#define FLOOR(x) floor(x) + +#if __CUDA_ARCH__ >= 600 +// Recent compute capabilities have block-level atomicAdd for all data types, so we use that +#define ACCUM(x,y) atomicAdd_block(&(x),(y)) +#else +// Older architectures don't have block-level atomicAdd, nor atomicAdd for doubles, so we defer to atomicAdd for float +// and use the known atomicCAS-based implementation for double +template +__device__ inline data_t atomic_add(data_t *address, data_t val) { + return atomicAdd(address, val); +} + +template<> +__device__ inline double atomic_add(double *address, double val) { + unsigned long long int* address_as_ull = (unsigned long long int*)address; + unsigned long long int old = *address_as_ull, assumed; + do { + assumed = old; + old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); + } while (assumed != old); + return __longlong_as_double(old); +} + +#define ACCUM(x,y) atomic_add(&(x),(y)) +#endif // #if __CUDA_ARCH__ >= 600 + +#else +// CPU versions + +#define HOST_DEVICE +#define INLINE_HOST_DEVICE inline +#define FLOOR(x) std::floor(x) +#define ACCUM(x,y) (x) += (y) + +#endif // #ifdef __CUDACC__ \ No newline at end of file diff --git a/gimp-plugins/face-parsing.PyTorch/modules/src/utils/cuda.cuh b/gimp-plugins/face-parsing.PyTorch/modules/src/utils/cuda.cuh new file mode 100755 index 0000000..60c0023 --- /dev/null +++ b/gimp-plugins/face-parsing.PyTorch/modules/src/utils/cuda.cuh @@ -0,0 +1,71 @@ +#pragma once + +/* + * General settings and functions + */ +const int WARP_SIZE = 32; +const int MAX_BLOCK_SIZE = 1024; + +static int getNumThreads(int nElem) { + int threadSizes[6] = {32, 64, 128, 256, 512, MAX_BLOCK_SIZE}; + for (int i = 0; i < 6; ++i) { + if (nElem <= threadSizes[i]) { + return threadSizes[i]; + } + } + return MAX_BLOCK_SIZE; +} + +/* + * Reduction utilities + */ +template +__device__ __forceinline__ T WARP_SHFL_XOR(T value, int laneMask, int width = warpSize, + unsigned int mask = 0xffffffff) { +#if CUDART_VERSION >= 9000 + return __shfl_xor_sync(mask, value, laneMask, width); +#else + return __shfl_xor(value, laneMask, width); +#endif +} + +__device__ __forceinline__ int getMSB(int val) { return 31 - __clz(val); } + +template +struct Pair { + T v1, v2; + __device__ Pair() {} + __device__ Pair(T _v1, T _v2) : v1(_v1), v2(_v2) {} + __device__ Pair(T v) : v1(v), v2(v) {} + __device__ Pair(int v) : v1(v), v2(v) {} + __device__ Pair &operator+=(const Pair &a) { + v1 += a.v1; + v2 += a.v2; + return *this; + } +}; + +template +static __device__ __forceinline__ T warpSum(T val) { +#if __CUDA_ARCH__ >= 300 + for (int i = 0; i < getMSB(WARP_SIZE); ++i) { + val += WARP_SHFL_XOR(val, 1 << i, WARP_SIZE); + } +#else + __shared__ T values[MAX_BLOCK_SIZE]; + values[threadIdx.x] = val; + __threadfence_block(); + const int base = (threadIdx.x / WARP_SIZE) * WARP_SIZE; + for (int i = 1; i < WARP_SIZE; i++) { + val += values[base + ((i + threadIdx.x) % WARP_SIZE)]; + } +#endif + return val; +} + +template +static __device__ __forceinline__ Pair warpSum(Pair value) { + value.v1 = warpSum(value.v1); + value.v2 = warpSum(value.v2); + return value; +} \ No newline at end of file diff --git a/gimp-plugins/face-parsing.PyTorch/optimizer.py b/gimp-plugins/face-parsing.PyTorch/optimizer.py new file mode 100755 index 0000000..0c99e06 --- /dev/null +++ b/gimp-plugins/face-parsing.PyTorch/optimizer.py @@ -0,0 +1,69 @@ +#!/usr/bin/python +# -*- encoding: utf-8 -*- + + +import torch +import logging + +logger = logging.getLogger() + +class Optimizer(object): + def __init__(self, + model, + lr0, + momentum, + wd, + warmup_steps, + warmup_start_lr, + max_iter, + power, + *args, **kwargs): + self.warmup_steps = warmup_steps + self.warmup_start_lr = warmup_start_lr + self.lr0 = lr0 + self.lr = self.lr0 + self.max_iter = float(max_iter) + self.power = power + self.it = 0 + wd_params, nowd_params, lr_mul_wd_params, lr_mul_nowd_params = model.get_params() + param_list = [ + {'params': wd_params}, + {'params': nowd_params, 'weight_decay': 0}, + {'params': lr_mul_wd_params, 'lr_mul': True}, + {'params': lr_mul_nowd_params, 'weight_decay': 0, 'lr_mul': True}] + self.optim = torch.optim.SGD( + param_list, + lr = lr0, + momentum = momentum, + weight_decay = wd) + self.warmup_factor = (self.lr0/self.warmup_start_lr)**(1./self.warmup_steps) + + + def get_lr(self): + if self.it <= self.warmup_steps: + lr = self.warmup_start_lr*(self.warmup_factor**self.it) + else: + factor = (1-(self.it-self.warmup_steps)/(self.max_iter-self.warmup_steps))**self.power + lr = self.lr0 * factor + return lr + + + def step(self): + self.lr = self.get_lr() + for pg in self.optim.param_groups: + if pg.get('lr_mul', False): + pg['lr'] = self.lr * 10 + else: + pg['lr'] = self.lr + if self.optim.defaults.get('lr_mul', False): + self.optim.defaults['lr'] = self.lr * 10 + else: + self.optim.defaults['lr'] = self.lr + self.it += 1 + self.optim.step() + if self.it == self.warmup_steps+2: + logger.info('==> warmup done, start to implement poly lr strategy') + + def zero_grad(self): + self.optim.zero_grad() + diff --git a/gimp-plugins/face-parsing.PyTorch/prepropess_data.py b/gimp-plugins/face-parsing.PyTorch/prepropess_data.py new file mode 100755 index 0000000..ee7ed56 --- /dev/null +++ b/gimp-plugins/face-parsing.PyTorch/prepropess_data.py @@ -0,0 +1,38 @@ +#!/usr/bin/python +# -*- encoding: utf-8 -*- + +import os.path as osp +import os +import cv2 +from transform import * +from PIL import Image + +face_data = '/home/zll/data/CelebAMask-HQ/CelebA-HQ-img' +face_sep_mask = '/home/zll/data/CelebAMask-HQ/CelebAMask-HQ-mask-anno' +mask_path = '/home/zll/data/CelebAMask-HQ/mask' +counter = 0 +total = 0 +for i in range(15): + + atts = ['skin', 'l_brow', 'r_brow', 'l_eye', 'r_eye', 'eye_g', 'l_ear', 'r_ear', 'ear_r', + 'nose', 'mouth', 'u_lip', 'l_lip', 'neck', 'neck_l', 'cloth', 'hair', 'hat'] + + for j in range(i * 2000, (i + 1) * 2000): + + mask = np.zeros((512, 512)) + + for l, att in enumerate(atts, 1): + total += 1 + file_name = ''.join([str(j).rjust(5, '0'), '_', att, '.png']) + path = osp.join(face_sep_mask, str(i), file_name) + + if os.path.exists(path): + counter += 1 + sep_mask = np.array(Image.open(path).convert('P')) + # print(np.unique(sep_mask)) + + mask[sep_mask == 225] = l + cv2.imwrite('{}/{}.png'.format(mask_path, j), mask) + print(j) + +print(counter, total) \ No newline at end of file diff --git a/gimp-plugins/face-parsing.PyTorch/resnet.py b/gimp-plugins/face-parsing.PyTorch/resnet.py new file mode 100755 index 0000000..aa2bf95 --- /dev/null +++ b/gimp-plugins/face-parsing.PyTorch/resnet.py @@ -0,0 +1,109 @@ +#!/usr/bin/python +# -*- encoding: utf-8 -*- + +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.utils.model_zoo as modelzoo + +# from modules.bn import InPlaceABNSync as BatchNorm2d + +resnet18_url = 'https://download.pytorch.org/models/resnet18-5c106cde.pth' + + +def conv3x3(in_planes, out_planes, stride=1): + """3x3 convolution with padding""" + return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, + padding=1, bias=False) + + +class BasicBlock(nn.Module): + def __init__(self, in_chan, out_chan, stride=1): + super(BasicBlock, self).__init__() + self.conv1 = conv3x3(in_chan, out_chan, stride) + self.bn1 = nn.BatchNorm2d(out_chan) + self.conv2 = conv3x3(out_chan, out_chan) + self.bn2 = nn.BatchNorm2d(out_chan) + self.relu = nn.ReLU(inplace=True) + self.downsample = None + if in_chan != out_chan or stride != 1: + self.downsample = nn.Sequential( + nn.Conv2d(in_chan, out_chan, + kernel_size=1, stride=stride, bias=False), + nn.BatchNorm2d(out_chan), + ) + + def forward(self, x): + residual = self.conv1(x) + residual = F.relu(self.bn1(residual)) + residual = self.conv2(residual) + residual = self.bn2(residual) + + shortcut = x + if self.downsample is not None: + shortcut = self.downsample(x) + + out = shortcut + residual + out = self.relu(out) + return out + + +def create_layer_basic(in_chan, out_chan, bnum, stride=1): + layers = [BasicBlock(in_chan, out_chan, stride=stride)] + for i in range(bnum-1): + layers.append(BasicBlock(out_chan, out_chan, stride=1)) + return nn.Sequential(*layers) + + +class Resnet18(nn.Module): + def __init__(self): + super(Resnet18, self).__init__() + self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, + bias=False) + self.bn1 = nn.BatchNorm2d(64) + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + self.layer1 = create_layer_basic(64, 64, bnum=2, stride=1) + self.layer2 = create_layer_basic(64, 128, bnum=2, stride=2) + self.layer3 = create_layer_basic(128, 256, bnum=2, stride=2) + self.layer4 = create_layer_basic(256, 512, bnum=2, stride=2) + self.init_weight() + + def forward(self, x): + x = self.conv1(x) + x = F.relu(self.bn1(x)) + x = self.maxpool(x) + + x = self.layer1(x) + feat8 = self.layer2(x) # 1/8 + feat16 = self.layer3(feat8) # 1/16 + feat32 = self.layer4(feat16) # 1/32 + return feat8, feat16, feat32 + + def init_weight(self): + state_dict = modelzoo.load_url(resnet18_url) + self_state_dict = self.state_dict() + for k, v in state_dict.items(): + if 'fc' in k: continue + self_state_dict.update({k: v}) + self.load_state_dict(self_state_dict) + + def get_params(self): + wd_params, nowd_params = [], [] + for name, module in self.named_modules(): + if isinstance(module, (nn.Linear, nn.Conv2d)): + wd_params.append(module.weight) + if not module.bias is None: + nowd_params.append(module.bias) + elif isinstance(module, nn.BatchNorm2d): + nowd_params += list(module.parameters()) + return wd_params, nowd_params + + +if __name__ == "__main__": + net = Resnet18() + x = torch.randn(16, 3, 224, 224) + out = net(x) + print(out[0].size()) + print(out[1].size()) + print(out[2].size()) + net.get_params() diff --git a/gimp-plugins/face-parsing.PyTorch/resnet.pyc b/gimp-plugins/face-parsing.PyTorch/resnet.pyc new file mode 100755 index 0000000000000000000000000000000000000000..fc8bb0754c178471e2a9907a6fa07fa849c54203 GIT binary patch literal 4822 zcmcIo+in}z72PwWC{d0mrFl13WN z&~pwg*|5<9GEfxgWBUR5f__XN`r3c!Q=ihc_8F22^lFKmt#jGuw)Z+~?_uM=ON;lr zFaH=z{WpjI5Ac|lCz9anNiH&&mOVN2W#CV`8mt~RWiU5sY02Td94^S9EeSLiC22^~ zlsBFXE=e*cDfBzC=Sk9%HzKDZe;6#O(!44yOR{iAsXbNdO1dJ+q8#~B`Ci)P(7|O% zMGmh>g1I}AV4@{C#dcODMc1mFV%e*bUX^57)()?6%%`8R+W29$d!~9>4zE+uEA;rg zy%pFa`yPKg=GQ15SUWNw_xnkCT;yexY>ZymvWgEj%4)wqER!@h{VFv@YB%rS{bjuQ z;k`IXH%9g#gGnWBK=8SQ=Mf&$gRnS-C$B_a!7%v3zzul?*`&>2D(lOs-`wrZbHqCk z+b7%WaalYs^Rdm!V*NO?2kWCKNwQ+!a_*zFD$+bO*~>KJdbY)(w#t&!a+X?GH)We> zc$!6~rH&#uB~VK``K`Gca)F!v%>WEqBy-fiYkL+Y<&6RD;l=8Chg3P^VB%~ zGKz-#7XP-Vdw3X`EPj}m@lniX{_!La@tAiZ0F?o3t6n7@VP2o`8Z;!>)q^mD8NHjJ#-2H!x_Mbg^SSS%d3W*qAU_N1*SS|u@TT?%e+txI&`46ES%G6YYVQ5 zCve@VcpL~g0}d!G*Q*>g#QdFWVb>^E|I$%il`FLlP($oARc%h}Wh~k_yQO*$%R+FA z?DmL?9kpv#z@KrdHVDuv>dAnJ=v{$O5SZ~OtpZxcE$JWEfPrZ8!~S05~l528XhOq{ZEkthw8u=+Lb;3kB4tKK#5ihiznZSSVn z^17(S#3r{*vpm9M{t5wyA1Vs~KCgX34q#)Wf(VVSoh@*(q6})R&Yh`Ztl|+!Ouz(f zsdCi;Fx?Cd`X5lj3^!h5B8~m?h&V*NQnPD;tlkdLF*GNS@d$tK+Cjev0jws}KUS-^A1P)dBt@ z$__0K$wf3w!%%S+hT!pWj;i`HxP#HNt!nA5@|s>K?okP?<#oC-5llRj@f023pa2qr zt6YpA>GC@m7b;sF7!6cXG*DYy-xH}+{U9_my&B9C5x{uLA1 z%#3f<84b44YUo~p>u-4Lo}w_QqjSs-{t??6CO$wRorH-uD8Qs% z9V6;sfr#ADmN`ZZx+&&xKGTr?KU83LoEYx%Do{LpsaXLBwh=HI9I{BpCHv!xSc0n$9rXG>dpna}=uCTClinrmbxG9u z-ev7|`PKQ}uF}T}wjo|ZAdxP1H0Q`3ps2AVBaR)o$a{ebBF=@IpR@F53K}wF-0msC zbkG+3f`X*#K&LFjyP?DsbXg)LSq7mmqLVMnGDo=Kio@t+gj(=EyE#O$u{kSl&5GNz z;zt_0G_)P3+5Ul5HVnvY7ldCu-fu8}O;xMj9j}8ZamV{n{W%{m{seWnb`~#?KAepg zO7)Bv3Pav>NB*t~dN^C1h6Wc+<|om_#gPTgag=KiG8j0Tve%eK2^!ux;~kb}L4x-w zxJ$7?(Wm$jViqX^W@efa?BVj>S0Avnd2d?W-nuBG-@)3S;xU9z9Upu|2>s}_Cf4{o z!edGZTx(?NOvk5&1rZB_^~nR86m~}S@wZ#@jn6O)dd7W)$&Ol5eIn0#8hVjL&Jp{t zBf9QkG7mjZ=*~&@=q?j4$_|zj*^4#%R|eD9jF)Ap6~2Uo<0`kjER#16Z6JFaOHNF( z*s7H?n+}bdc05YZV|jgahMD3S@9>33RhUhMGZiKCBbCoD$~&7_o8)0gDD+OpTk|^7 z0+pZl%|D^CuU{W{r#JplUk>%gUuNDQDXzN{SkZ^UPSyY4B2o`7HwR%)QE( z$4RJtMoF(IXLSZQZc3MM8PNs#T6c*2qTUI9ZxF+XF5VJ+e%o7t_pgHv-}PL!@gBN@ zpHOI6;5`l$F26@gF%zHem1|(Tal6AWH0U$RUN(mu3 zfqbQH>fUL4a1ZUleG2lmI`s=pb2a_c80kVSt*aS2Ub}QQB!5YvOdYJVkHFv$3&BO3 lTW$1P2fg3vs$9PVG~k7**=lrJYd`MHuYJ(b7b~{&^M6I*$ZP-r literal 0 HcmV?d00001 diff --git a/gimp-plugins/face-parsing.PyTorch/test.py b/gimp-plugins/face-parsing.PyTorch/test.py new file mode 100755 index 0000000..cd94a31 --- /dev/null +++ b/gimp-plugins/face-parsing.PyTorch/test.py @@ -0,0 +1,100 @@ +#!/usr/bin/python +# -*- encoding: utf-8 -*- + +from logger import setup_logger +from model import BiSeNet + +import torch + +import os +import os.path as osp +import numpy as np +from PIL import Image +import torchvision.transforms as transforms +import cv2 + +def vis_parsing_maps(im, parsing_anno, stride, save_im=False, save_path='vis_results/parsing_map_on_im.jpg'): + # Colors for all 20 parts + part_colors = [[255, 0, 0], [255, 85, 0], [255, 170, 0], + [255, 0, 85], [255, 0, 170], + [0, 255, 0], [85, 255, 0], [170, 255, 0], + [0, 255, 85], [0, 255, 170], + [0, 0, 255], [85, 0, 255], [170, 0, 255], + [0, 85, 255], [0, 170, 255], + [255, 255, 0], [255, 255, 85], [255, 255, 170], + [255, 0, 255], [255, 85, 255], [255, 170, 255], + [0, 255, 255], [85, 255, 255], [170, 255, 255]] + + im = np.array(im) + vis_im = im.copy().astype(np.uint8) + vis_parsing_anno = parsing_anno.copy().astype(np.uint8) + vis_parsing_anno = cv2.resize(vis_parsing_anno, None, fx=stride, fy=stride, interpolation=cv2.INTER_NEAREST) + vis_parsing_anno_color = np.zeros((vis_parsing_anno.shape[0], vis_parsing_anno.shape[1], 3)) + 255 + + num_of_class = np.max(vis_parsing_anno) + + for pi in range(1, num_of_class + 1): + index = np.where(vis_parsing_anno == pi) + vis_parsing_anno_color[index[0], index[1], :] = part_colors[pi] + + vis_parsing_anno_color = vis_parsing_anno_color.astype(np.uint8) + # print(vis_parsing_anno_color.shape, vis_im.shape) + vis_im = cv2.addWeighted(cv2.cvtColor(vis_im, cv2.COLOR_RGB2BGR), 0.4, vis_parsing_anno_color, 0.6, 0) + + # Save result or not + if save_im: + cv2.imwrite(save_path[:-4] +'.png', vis_parsing_anno) + cv2.imwrite(save_path, vis_im, [int(cv2.IMWRITE_JPEG_QUALITY), 100]) + + # return vis_im + +def evaluate(respth='./res/test_res', dspth='./data', cp='model_final_diss.pth'): + + if not os.path.exists(respth): + os.makedirs(respth) + + n_classes = 19 + net = BiSeNet(n_classes=n_classes) + save_pth = osp.join('res/cp', cp) + + if torch.cuda.is_available(): + net.cuda() + net.load_state_dict(torch.load(save_pth)) + else: + net.load_state_dict(torch.load(save_pth, map_location=lambda storage, loc: storage)) + + + net.eval() + + to_tensor = transforms.Compose([ + transforms.ToTensor(), + transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)), + ]) + with torch.no_grad(): + for image_path in os.listdir(dspth): + img = Image.open(osp.join(dspth, image_path)) + image = img.resize((512, 512), Image.BILINEAR) + img = to_tensor(image) + img = torch.unsqueeze(img, 0) + if torch.cuda.is_available(): + img = img.cuda() + out = net(img)[0] + if torch.cuda.is_available(): + parsing = out.squeeze(0).cpu().numpy().argmax(0) + else: + parsing = out.squeeze(0).numpy().argmax(0) + # print(parsing) + print(np.unique(parsing)) + + vis_parsing_maps(image, parsing, stride=1, save_im=True, save_path=osp.join(respth, image_path)) + + + + + + + +if __name__ == "__main__": + evaluate(dspth='makeup/116_ori.png', cp='79999_iter.pth') + + diff --git a/gimp-plugins/face-parsing.PyTorch/train.py b/gimp-plugins/face-parsing.PyTorch/train.py new file mode 100755 index 0000000..580a0ca --- /dev/null +++ b/gimp-plugins/face-parsing.PyTorch/train.py @@ -0,0 +1,179 @@ +#!/usr/bin/python +# -*- encoding: utf-8 -*- + +from logger import setup_logger +from model import BiSeNet +from face_dataset import FaceMask +from loss import OhemCELoss +from evaluate import evaluate +from optimizer import Optimizer +import cv2 +import numpy as np + +import torch +import torch.nn as nn +from torch.utils.data import DataLoader +import torch.nn.functional as F +import torch.distributed as dist + +import os +import os.path as osp +import logging +import time +import datetime +import argparse + + +respth = './res' +if not osp.exists(respth): + os.makedirs(respth) +logger = logging.getLogger() + + +def parse_args(): + parse = argparse.ArgumentParser() + parse.add_argument( + '--local_rank', + dest = 'local_rank', + type = int, + default = -1, + ) + return parse.parse_args() + + +def train(): + args = parse_args() + torch.cuda.set_device(args.local_rank) + dist.init_process_group( + backend = 'nccl', + init_method = 'tcp://127.0.0.1:33241', + world_size = torch.cuda.device_count(), + rank=args.local_rank + ) + setup_logger(respth) + + # dataset + n_classes = 19 + n_img_per_gpu = 16 + n_workers = 8 + cropsize = [448, 448] + data_root = '/home/zll/data/CelebAMask-HQ/' + + ds = FaceMask(data_root, cropsize=cropsize, mode='train') + sampler = torch.utils.data.distributed.DistributedSampler(ds) + dl = DataLoader(ds, + batch_size = n_img_per_gpu, + shuffle = False, + sampler = sampler, + num_workers = n_workers, + pin_memory = True, + drop_last = True) + + # model + ignore_idx = -100 + net = BiSeNet(n_classes=n_classes) + net.cuda() + net.train() + net = nn.parallel.DistributedDataParallel(net, + device_ids = [args.local_rank, ], + output_device = args.local_rank + ) + score_thres = 0.7 + n_min = n_img_per_gpu * cropsize[0] * cropsize[1]//16 + LossP = OhemCELoss(thresh=score_thres, n_min=n_min, ignore_lb=ignore_idx) + Loss2 = OhemCELoss(thresh=score_thres, n_min=n_min, ignore_lb=ignore_idx) + Loss3 = OhemCELoss(thresh=score_thres, n_min=n_min, ignore_lb=ignore_idx) + + ## optimizer + momentum = 0.9 + weight_decay = 5e-4 + lr_start = 1e-2 + max_iter = 80000 + power = 0.9 + warmup_steps = 1000 + warmup_start_lr = 1e-5 + optim = Optimizer( + model = net.module, + lr0 = lr_start, + momentum = momentum, + wd = weight_decay, + warmup_steps = warmup_steps, + warmup_start_lr = warmup_start_lr, + max_iter = max_iter, + power = power) + + ## train loop + msg_iter = 50 + loss_avg = [] + st = glob_st = time.time() + diter = iter(dl) + epoch = 0 + for it in range(max_iter): + try: + im, lb = next(diter) + if not im.size()[0] == n_img_per_gpu: + raise StopIteration + except StopIteration: + epoch += 1 + sampler.set_epoch(epoch) + diter = iter(dl) + im, lb = next(diter) + im = im.cuda() + lb = lb.cuda() + H, W = im.size()[2:] + lb = torch.squeeze(lb, 1) + + optim.zero_grad() + out, out16, out32 = net(im) + lossp = LossP(out, lb) + loss2 = Loss2(out16, lb) + loss3 = Loss3(out32, lb) + loss = lossp + loss2 + loss3 + loss.backward() + optim.step() + + loss_avg.append(loss.item()) + + # print training log message + if (it+1) % msg_iter == 0: + loss_avg = sum(loss_avg) / len(loss_avg) + lr = optim.lr + ed = time.time() + t_intv, glob_t_intv = ed - st, ed - glob_st + eta = int((max_iter - it) * (glob_t_intv / it)) + eta = str(datetime.timedelta(seconds=eta)) + msg = ', '.join([ + 'it: {it}/{max_it}', + 'lr: {lr:4f}', + 'loss: {loss:.4f}', + 'eta: {eta}', + 'time: {time:.4f}', + ]).format( + it = it+1, + max_it = max_iter, + lr = lr, + loss = loss_avg, + time = t_intv, + eta = eta + ) + logger.info(msg) + loss_avg = [] + st = ed + if dist.get_rank() == 0: + if (it+1) % 5000 == 0: + state = net.module.state_dict() if hasattr(net, 'module') else net.state_dict() + if dist.get_rank() == 0: + torch.save(state, './res/cp/{}_iter.pth'.format(it)) + evaluate(dspth='/home/zll/data/CelebAMask-HQ/test-img', cp='{}_iter.pth'.format(it)) + + # dump the final model + save_pth = osp.join(respth, 'model_final_diss.pth') + # net.cpu() + state = net.module.state_dict() if hasattr(net, 'module') else net.state_dict() + if dist.get_rank() == 0: + torch.save(state, save_pth) + logger.info('training done, model saved to: {}'.format(save_pth)) + + +if __name__ == "__main__": + train() diff --git a/gimp-plugins/face-parsing.PyTorch/transform.py b/gimp-plugins/face-parsing.PyTorch/transform.py new file mode 100755 index 0000000..9479ae3 --- /dev/null +++ b/gimp-plugins/face-parsing.PyTorch/transform.py @@ -0,0 +1,129 @@ +#!/usr/bin/python +# -*- encoding: utf-8 -*- + + +from PIL import Image +import PIL.ImageEnhance as ImageEnhance +import random +import numpy as np + +class RandomCrop(object): + def __init__(self, size, *args, **kwargs): + self.size = size + + def __call__(self, im_lb): + im = im_lb['im'] + lb = im_lb['lb'] + assert im.size == lb.size + W, H = self.size + w, h = im.size + + if (W, H) == (w, h): return dict(im=im, lb=lb) + if w < W or h < H: + scale = float(W) / w if w < h else float(H) / h + w, h = int(scale * w + 1), int(scale * h + 1) + im = im.resize((w, h), Image.BILINEAR) + lb = lb.resize((w, h), Image.NEAREST) + sw, sh = random.random() * (w - W), random.random() * (h - H) + crop = int(sw), int(sh), int(sw) + W, int(sh) + H + return dict( + im = im.crop(crop), + lb = lb.crop(crop) + ) + + +class HorizontalFlip(object): + def __init__(self, p=0.5, *args, **kwargs): + self.p = p + + def __call__(self, im_lb): + if random.random() > self.p: + return im_lb + else: + im = im_lb['im'] + lb = im_lb['lb'] + + # atts = [1 'skin', 2 'l_brow', 3 'r_brow', 4 'l_eye', 5 'r_eye', 6 'eye_g', 7 'l_ear', 8 'r_ear', 9 'ear_r', + # 10 'nose', 11 'mouth', 12 'u_lip', 13 'l_lip', 14 'neck', 15 'neck_l', 16 'cloth', 17 'hair', 18 'hat'] + + flip_lb = np.array(lb) + flip_lb[lb == 2] = 3 + flip_lb[lb == 3] = 2 + flip_lb[lb == 4] = 5 + flip_lb[lb == 5] = 4 + flip_lb[lb == 7] = 8 + flip_lb[lb == 8] = 7 + flip_lb = Image.fromarray(flip_lb) + return dict(im = im.transpose(Image.FLIP_LEFT_RIGHT), + lb = flip_lb.transpose(Image.FLIP_LEFT_RIGHT), + ) + + +class RandomScale(object): + def __init__(self, scales=(1, ), *args, **kwargs): + self.scales = scales + + def __call__(self, im_lb): + im = im_lb['im'] + lb = im_lb['lb'] + W, H = im.size + scale = random.choice(self.scales) + w, h = int(W * scale), int(H * scale) + return dict(im = im.resize((w, h), Image.BILINEAR), + lb = lb.resize((w, h), Image.NEAREST), + ) + + +class ColorJitter(object): + def __init__(self, brightness=None, contrast=None, saturation=None, *args, **kwargs): + if not brightness is None and brightness>0: + self.brightness = [max(1-brightness, 0), 1+brightness] + if not contrast is None and contrast>0: + self.contrast = [max(1-contrast, 0), 1+contrast] + if not saturation is None and saturation>0: + self.saturation = [max(1-saturation, 0), 1+saturation] + + def __call__(self, im_lb): + im = im_lb['im'] + lb = im_lb['lb'] + r_brightness = random.uniform(self.brightness[0], self.brightness[1]) + r_contrast = random.uniform(self.contrast[0], self.contrast[1]) + r_saturation = random.uniform(self.saturation[0], self.saturation[1]) + im = ImageEnhance.Brightness(im).enhance(r_brightness) + im = ImageEnhance.Contrast(im).enhance(r_contrast) + im = ImageEnhance.Color(im).enhance(r_saturation) + return dict(im = im, + lb = lb, + ) + + +class MultiScale(object): + def __init__(self, scales): + self.scales = scales + + def __call__(self, img): + W, H = img.size + sizes = [(int(W*ratio), int(H*ratio)) for ratio in self.scales] + imgs = [] + [imgs.append(img.resize(size, Image.BILINEAR)) for size in sizes] + return imgs + + +class Compose(object): + def __init__(self, do_list): + self.do_list = do_list + + def __call__(self, im_lb): + for comp in self.do_list: + im_lb = comp(im_lb) + return im_lb + + + + +if __name__ == '__main__': + flip = HorizontalFlip(p = 1) + crop = RandomCrop((321, 321)) + rscales = RandomScale((0.75, 1.0, 1.5, 1.75, 2.0)) + img = Image.open('data/img.jpg') + lb = Image.open('data/label.png') diff --git a/gimp-plugins/facegen.py b/gimp-plugins/facegen.py new file mode 100755 index 0000000..a09cae0 --- /dev/null +++ b/gimp-plugins/facegen.py @@ -0,0 +1,175 @@ + + +from gimpfu import * +import sys + +sys.path.extend([baseLoc+'gimpenv/lib/python2.7',baseLoc+'gimpenv/lib/python2.7/site-packages',baseLoc+'gimpenv/lib/python2.7/site-packages/setuptools',baseLoc+'CelebAMask-HQ/MaskGAN_demo']) + + +import torch +from argparse import Namespace +from models.models import create_model +from data.base_dataset import get_params, get_transform, normalize +import os +import numpy as np +from PIL import Image + +colors = np.array([[0, 0, 0], [204, 0, 0], [76, 153, 0], \ +[204, 204, 0], [51, 51, 255], [204, 0, 204], [0, 255, 255], \ +[51, 255, 255], [102, 51, 0], [255, 0, 0], [102, 204, 0], \ +[255, 255, 0], [0, 0, 153], [0, 0, 204], [255, 51, 153], \ +[0, 204, 204], [0, 51, 0], [255, 153, 51], [0, 204, 0]]) +colors = colors.astype(np.uint8) + +def getlabelmat(mask,idx): + x=np.zeros((mask.shape[0],mask.shape[1],3)) + x[mask==idx,0]=colors[idx][0] + x[mask==idx,1]=colors[idx][1] + x[mask==idx,2]=colors[idx][2] + return x + +def colorMask(mask): + x=np.zeros((mask.shape[0],mask.shape[1],3)) + for idx in range(19): + x=x+getlabelmat(mask,idx) + # mask=np.dstack((mask1,mask2,mask3)) + return np.uint8(x) + +def labelMask(mask): + x=np.zeros((mask.shape[0],mask.shape[1],3)) + for idx in range(19): + tmp=np.logical_and(mask[:,:,0]==colors[idx][0],mask[:,:,1]==colors[idx][1]) + tmp2=np.logical_and(tmp,mask[:,:,2]==colors[idx][2]) + x[tmp2]=idx + return x + +def getOptions(): + mydict={'aspect_ratio': 1.0, + 'batchSize': 1, + 'checkpoints_dir': baseLoc+'CelebAMask-HQ/MaskGAN_demo/checkpoints', + 'cluster_path': 'features_clustered_010.npy', + 'data_type': 32, + 'dataroot': '../Data_preprocessing/', + 'display_winsize': 512, + 'engine': None, + 'export_onnx': None, + 'fineSize': 512, + 'gpu_ids': [0], + 'how_many': 1000, + 'input_nc': 3, + 'isTrain': False, + 'label_nc': 19, + 'loadSize': 512, + 'max_dataset_size': 'inf', + 'model': 'pix2pixHD', + 'nThreads': 2, + 'n_blocks_global': 4, + 'n_blocks_local': 3, + 'n_downsample_global': 4, + 'n_local_enhancers': 1, + 'name': 'label2face_512p', + 'netG': 'global', + 'ngf': 64, + 'niter_fix_global': 0, + 'no_flip': False, + 'norm': 'instance', + 'ntest': 'inf', + 'onnx': None, + 'output_nc': 3, + 'phase': 'test', + 'resize_or_crop': 'scale_width', + 'results_dir': './results/', + 'serial_batches': False, + 'tf_log': False, + 'use_dropout': False, + 'use_encoded_image': False, + 'verbose': False, + 'which_epoch': 'latest'} + args = Namespace(**mydict) + return args + +def channelData(layer):#convert gimp image to numpy + region=layer.get_pixel_rgn(0, 0, layer.width,layer.height) + pixChars=region[:,:] # Take whole layer + bpp=region.bpp + # return np.frombuffer(pixChars,dtype=np.uint8).reshape(len(pixChars)/bpp,bpp) + return np.frombuffer(pixChars,dtype=np.uint8).reshape(layer.height,layer.width,bpp) + +def createResultLayer(image,name,result): + rlBytes=np.uint8(result).tobytes(); + rl=gimp.Layer(image,name,image.width,image.height,image.active_layer.type,100,NORMAL_MODE) + region=rl.get_pixel_rgn(0, 0, rl.width,rl.height,True) + region[:,:]=rlBytes + image.add_layer(rl,0) + gimp.displays_flush() + +def getnewface(img,mask,mask_m): + h,w,d = img.shape + img = Image.fromarray(img) + lmask = labelMask(mask) + lmask_m = labelMask(mask_m) + + + os.environ["CUDA_VISIBLE_DEVICES"] = str(0) + opt = getOptions() + + model = create_model(opt) + + params = get_params(opt, (512,512)) + transform_mask = get_transform(opt, params, method=Image.NEAREST, normalize=False, normalize_mask=True) + transform_image = get_transform(opt, params) + mask = transform_mask(Image.fromarray(np.uint8(lmask))) + mask_m = transform_mask(Image.fromarray(np.uint8(lmask_m))) + img = transform_image(img) + + generated = model.inference(torch.FloatTensor([mask_m.numpy()]), torch.FloatTensor([mask.numpy()]), torch.FloatTensor([img.numpy()])) + + result = generated.permute(0, 2, 3, 1) + if torch.cuda.is_available(): + result = result.cpu().numpy() + else: + result = result.detach().numpy() + + result = (result + 1) * 127.5 + result = np.asarray(result[0,:,:,:], dtype=np.uint8) + result = Image.fromarray(result) + result = result.resize([w,h]) + + result = np.array(result) + return result + + +def facegen(imggimp, curlayer,layeri,layerm,layermm) : + if torch.cuda.is_available(): + gimp.progress_init("(Using GPU) Running face gen for " + layeri.name + "...") + else: + gimp.progress_init("(Using CPU) Running face gen for " + layeri.name + "...") + + img = channelData(layeri) + mask = channelData(layerm) + mask_m = channelData(layermm) + + cpy=getnewface(img,mask,mask_m) + createResultLayer(imggimp,'new_output',cpy) + + + +register( + "facegen", + "facegen", + "Running face gen.", + "Kritik Soman", + "Your", + "2020", + "facegen...", + "*", # Alternately use RGB, RGB*, GRAY*, INDEXED etc. + [ (PF_IMAGE, "image", "Input image", None), + (PF_DRAWABLE, "drawable", "Input drawable", None), + (PF_LAYER, "drawinglayer", "Original Image:", None), + (PF_LAYER, "drawinglayer", "Original Mask:", None), + (PF_LAYER, "drawinglayer", "Modified Mask:", None), + ], + [], + facegen, menu="/Layer/GIML-ML") + +main() diff --git a/gimp-plugins/faceparse.py b/gimp-plugins/faceparse.py new file mode 100755 index 0000000..f52c980 --- /dev/null +++ b/gimp-plugins/faceparse.py @@ -0,0 +1,167 @@ + + +from gimpfu import * +import sys + +sys.path.extend([baseLoc+'gimpenv/lib/python2.7',baseLoc+'gimpenv/lib/python2.7/site-packages',baseLoc+'gimpenv/lib/python2.7/site-packages/setuptools',baseLoc+'face-parsing.PyTorch']) + + +from model import BiSeNet +from PIL import Image +import torch +from torchvision import transforms, datasets +import numpy as np + +colors = np.array([[0,0,0], +[204,0,0], +[0,255,255], +[51,255,255], +[51,51,255], +[204,0,204], +[204,204,0], +[102,51,0], +[255,0,0], +[0,204,204], +[76,153,0], +[102,204,0], +[255,255,0], +[0,0,153], +[255,153,51], +[0,51,0], +[0,204,0], +[0,0,204], +[255,51,153]]) +colors = colors.astype(np.uint8) + +def getlabelmat(mask,idx): + x=np.zeros((mask.shape[0],mask.shape[1],3)) + x[mask==idx,0]=colors[idx][0] + x[mask==idx,1]=colors[idx][1] + x[mask==idx,2]=colors[idx][2] + return x + +def colorMask(mask): + x=np.zeros((mask.shape[0],mask.shape[1],3)) + for idx in range(19): + x=x+getlabelmat(mask,idx) + return np.uint8(x) + +def getface(input_image): + save_pth = baseLoc+'face-parsing.PyTorch/79999_iter.pth' + input_image = Image.fromarray(input_image) + + n_classes = 19 + net = BiSeNet(n_classes=n_classes) + if torch.cuda.is_available(): + net.cuda() + net.load_state_dict(torch.load(save_pth)) + else: + net.load_state_dict(torch.load(save_pth, map_location=lambda storage, loc: storage)) + + + net.eval() + + + to_tensor = transforms.Compose([ + transforms.ToTensor(), + transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)), + ]) + + + with torch.no_grad(): + img = input_image.resize((512, 512), Image.BILINEAR) + img = to_tensor(img) + img = torch.unsqueeze(img, 0) + if torch.cuda.is_available(): + img = img.cuda() + out = net(img)[0] + if torch.cuda.is_available(): + parsing = out.squeeze(0).cpu().numpy().argmax(0) + else: + parsing = out.squeeze(0).numpy().argmax(0) + + parsing = Image.fromarray(np.uint8(parsing)) + parsing = parsing.resize(input_image.size) + parsing = np.array(parsing) + + return parsing + +def getSeg(input_image): + model = torch.load(baseLoc+'deeplabv3+model.pt') + model.eval() + preprocess = transforms.Compose([ + transforms.ToTensor(), + transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), + ]) + + input_image = Image.fromarray(input_image) + + input_tensor = preprocess(input_image) + input_batch = input_tensor.unsqueeze(0) # create a mini-batch as expected by the model + + # move the input and model to GPU for speed if available + if torch.cuda.is_available(): + input_batch = input_batch.to('cuda') + model.to('cuda') + + with torch.no_grad(): + output = model(input_batch)['out'][0] + output_predictions = output.argmax(0) + + + # create a color pallette, selecting a color for each class + palette = torch.tensor([2 ** 25 - 1, 2 ** 15 - 1, 2 ** 21 - 1]) + colors = torch.as_tensor([i for i in range(21)])[:, None] * palette + colors = (colors % 255).numpy().astype("uint8") + + r = Image.fromarray(output_predictions.byte().cpu().numpy()).resize(input_image.size) + + tmp = np.array(r) + tmp2 = 10*np.repeat(tmp[:, :, np.newaxis], 3, axis=2) + + return tmp2 + +def channelData(layer):#convert gimp image to numpy + region=layer.get_pixel_rgn(0, 0, layer.width,layer.height) + pixChars=region[:,:] # Take whole layer + bpp=region.bpp + return np.frombuffer(pixChars,dtype=np.uint8).reshape(layer.height,layer.width,bpp) + +def createResultLayer(image,name,result): + rlBytes=np.uint8(result).tobytes(); + rl=gimp.Layer(image,name,image.width,image.height,image.active_layer.type,100,NORMAL_MODE) + region=rl.get_pixel_rgn(0, 0, rl.width,rl.height,True) + region[:,:]=rlBytes + image.add_layer(rl,0) + gimp.displays_flush() + +def faceparse(img, layer) : + if torch.cuda.is_available(): + gimp.progress_init("(Using GPU) Running face parse for " + layer.name + "...") + else: + gimp.progress_init("(Using CPU) Running face parse for " + layer.name + "...") + + imgmat = channelData(layer) + cpy=getface(imgmat) + cpy = colorMask(cpy) + createResultLayer(img,'new_output',cpy) + + + + +register( + "faceparse", + "faceparse", + "Running face parse.", + "Kritik Soman", + "Your", + "2020", + "faceparse...", + "*", # Alternately use RGB, RGB*, GRAY*, INDEXED etc. + [ (PF_IMAGE, "image", "Input image", None), + (PF_DRAWABLE, "drawable", "Input drawable", None), + ], + [], + faceparse, menu="/Layer/GIML-ML") + +main() diff --git a/gimp-plugins/installGimpML-mac.sh b/gimp-plugins/installGimpML-mac.sh new file mode 100644 index 0000000..cd39298 --- /dev/null +++ b/gimp-plugins/installGimpML-mac.sh @@ -0,0 +1,39 @@ +if [ ! -d "gimpenv" ]; then + + echo "\n-----------Installing GIMP-ML-----------\n" + + python -m pip install --user virtualenv + python -m virtualenv gimpenv + source gimpenv/bin/activate + pip install torchvision + pip install opencv-python + pip install numpy + pip install future + pip install torch + pip install matplotlib + pip install scipy + pip install scikit-image + pip install typing + pip install albumentations + pip install enum + pip install pretrainedmodels + cwd=$(pwd) + echo -e "baseLoc='${cwd}/'\n$(cat colorize.py)" > colorize.py + echo -e "baseLoc='${cwd}/'\n$(cat deblur.py)" > deblur.py + echo -e "baseLoc='${cwd}/'\n$(cat deeplabv3.py)" > deeplabv3.py + echo -e "baseLoc='${cwd}/'\n$(cat facegen.py)" > facegen.py + echo -e "baseLoc='${cwd}/'\n$(cat faceparse.py)" > faceparse.py + echo -e "baseLoc='${cwd}/'\n$(cat monodepth.py)" > monodepth.py + echo -e "baseLoc='${cwd}/'\n$(cat super_resolution.py)" > super_resolution.py + + deactivate + + echo "\n-----------Installed GIMP-ML------------\n" + +else + + echo "Environment already setup!" + +fi + + diff --git a/gimp-plugins/installGimpML-ubuntu.sh b/gimp-plugins/installGimpML-ubuntu.sh new file mode 100644 index 0000000..e68a5d1 --- /dev/null +++ b/gimp-plugins/installGimpML-ubuntu.sh @@ -0,0 +1,41 @@ +if [ ! -d "gimpenv" ]; then + + echo "\n-----------Installing GIMP-ML-----------\n" + sudo apt install python-minimal + python2 -m pip install --user virtualenv + python2 -m virtualenv gimpenv + source gimpenv/bin/activate + python2 -m pip install torchvision + python2 -m pip install opencv-python + python2 -m pip install numpy + python2 -m pip install future + python2 -m pip install torch + python2 -m pip install matplotlib + python2 -m pip install scipy + python2 -m pip install scikit-image + python2 -m pip install typing + python2 -m pip install albumentations + python2 -m pip install enum + python2 -m pip install pretrainedmodels + cwd=$(pwd) + echo -e "baseLoc='${cwd}/'\n$(cat colorize.py)" > colorize.py + echo -e "baseLoc='${cwd}/'\n$(cat deblur.py)" > deblur.py + echo -e "baseLoc='${cwd}/'\n$(cat deeplabv3.py)" > deeplabv3.py + echo -e "baseLoc='${cwd}/'\n$(cat facegen.py)" > facegen.py + echo -e "baseLoc='${cwd}/'\n$(cat faceparse.py)" > faceparse.py + echo -e "baseLoc='${cwd}/'\n$(cat monodepth.py)" > monodepth.py + echo -e "baseLoc='${cwd}/'\n$(cat super_resolution.py)" > super_resolution.py + + deactivate + + echo "\n-----------Installed GIMP-ML------------\n" + +else + + echo "Environment already setup!" + +fi + + + + diff --git a/gimp-plugins/invert.py b/gimp-plugins/invert.py new file mode 100755 index 0000000..fdcaeae --- /dev/null +++ b/gimp-plugins/invert.py @@ -0,0 +1,25 @@ + +from gimpfu import * + +def invert(img, layer) : + gimp.progress_init("Inverting " + layer.name + "...") + pdb.gimp_undo_push_group_start(img) + pdb.gimp_invert(layer) + pdb.gimp_undo_push_group_end(img) + +register( + "Invert", + "Invert", + "Invert", + "Kritik Soman", + "Your Name", + "2020", + "Invert...", + "*", # Alternately use RGB, RGB*, GRAY*, INDEXED etc. + [ (PF_IMAGE, "image", "Input image", None), + (PF_DRAWABLE, "drawable", "Input drawable", None), + ], + [], + invert, menu="/Filters/Enhance") + +main() diff --git a/gimp-plugins/monodepth.py b/gimp-plugins/monodepth.py new file mode 100755 index 0000000..20efebf --- /dev/null +++ b/gimp-plugins/monodepth.py @@ -0,0 +1,113 @@ + + +from gimpfu import * +import sys +sys.path.extend([baseLoc+'gimpenv/lib/python2.7',baseLoc+'gimpenv/lib/python2.7/site-packages',baseLoc+'gimpenv/lib/python2.7/site-packages/setuptools',baseLoc+'monodepth2']) + + +import PIL.Image as pil +import networks +import torch +from torchvision import transforms, datasets +import os +import numpy as np +import matplotlib as mpl +import matplotlib.cm as cm + +def getMonoDepth(input_image): + if torch.cuda.is_available(): + device = torch.device("cuda") + else: + device = torch.device("cpu") + loc=baseLoc+'monodepth2/' + + model_path = os.path.join(loc+"models", 'mono+stereo_640x192') + encoder_path = os.path.join(model_path, "encoder.pth") + depth_decoder_path = os.path.join(model_path, "depth.pth") + + # LOADING PRETRAINED MODEL + encoder = networks.ResnetEncoder(18, False) + loaded_dict_enc = torch.load(encoder_path, map_location=device) + + # extract the height and width of image that this model was trained with + feed_height = loaded_dict_enc['height'] + feed_width = loaded_dict_enc['width'] + filtered_dict_enc = {k: v for k, v in loaded_dict_enc.items() if k in encoder.state_dict()} + encoder.load_state_dict(filtered_dict_enc) + encoder.to(device) + encoder.eval() + + depth_decoder = networks.DepthDecoder(num_ch_enc=encoder.num_ch_enc, scales=range(4)) + + loaded_dict = torch.load(depth_decoder_path, map_location=device) + depth_decoder.load_state_dict(loaded_dict) + + depth_decoder.to(device) + depth_decoder.eval() + + with torch.no_grad(): + input_image = pil.fromarray(input_image) + # input_image = pil.open(image_path).convert('RGB') + original_width, original_height = input_image.size + input_image = input_image.resize((feed_width, feed_height), pil.LANCZOS) + input_image = transforms.ToTensor()(input_image).unsqueeze(0) + + # PREDICTION + input_image = input_image.to(device) + features = encoder(input_image) + outputs = depth_decoder(features) + + disp = outputs[("disp", 0)] + disp_resized = torch.nn.functional.interpolate( + disp, (original_height, original_width), mode="bilinear", align_corners=False) + + # Saving colormapped depth image + disp_resized_np = disp_resized.squeeze().cpu().numpy() + vmax = np.percentile(disp_resized_np, 95) + normalizer = mpl.colors.Normalize(vmin=disp_resized_np.min(), vmax=vmax) + mapper = cm.ScalarMappable(norm=normalizer, cmap='magma') + colormapped_im = (mapper.to_rgba(disp_resized_np)[:, :, :3] * 255).astype(np.uint8) + return colormapped_im + +def channelData(layer):#convert gimp image to numpy + region=layer.get_pixel_rgn(0, 0, layer.width,layer.height) + pixChars=region[:,:] # Take whole layer + bpp=region.bpp + # return np.frombuffer(pixChars,dtype=np.uint8).reshape(len(pixChars)/bpp,bpp) + return np.frombuffer(pixChars,dtype=np.uint8).reshape(layer.height,layer.width,bpp) + +def createResultLayer(image,name,result): + rlBytes=np.uint8(result).tobytes(); + rl=gimp.Layer(image,name,image.width,image.height,image.active_layer.type,100,NORMAL_MODE) + region=rl.get_pixel_rgn(0, 0, rl.width,rl.height,True) + region[:,:]=rlBytes + image.add_layer(rl,0) + gimp.displays_flush() + +def MonoDepth(img, layer) : + gimp.progress_init("Generating disparity map for " + layer.name + "...") + + imgmat = channelData(layer) + cpy=getMonoDepth(imgmat) + + createResultLayer(img,'new_output',cpy) + + + + +register( + "MonoDepth", + "MonoDepth", + "Generate monocular disparity map based on deep learning.", + "Kritik Soman", + "Your", + "2020", + "MonoDepth...", + "*", # Alternately use RGB, RGB*, GRAY*, INDEXED etc. + [ (PF_IMAGE, "image", "Input image", None), + (PF_DRAWABLE, "drawable", "Input drawable", None), + ], + [], + MonoDepth, menu="/Layer/GIML-ML") + +main() diff --git a/gimp-plugins/monodepth2/evaluate_depth.py b/gimp-plugins/monodepth2/evaluate_depth.py new file mode 100755 index 0000000..7746ef9 --- /dev/null +++ b/gimp-plugins/monodepth2/evaluate_depth.py @@ -0,0 +1,230 @@ +from __future__ import absolute_import, division, print_function + +import os +import cv2 +import numpy as np + +import torch +from torch.utils.data import DataLoader + +from layers import disp_to_depth +from utils import readlines +from options import MonodepthOptions +import datasets +import networks + +cv2.setNumThreads(0) # This speeds up evaluation 5x on our unix systems (OpenCV 3.3.1) + + +splits_dir = os.path.join(os.path.dirname(__file__), "splits") + +# Models which were trained with stereo supervision were trained with a nominal +# baseline of 0.1 units. The KITTI rig has a baseline of 54cm. Therefore, +# to convert our stereo predictions to real-world scale we multiply our depths by 5.4. +STEREO_SCALE_FACTOR = 5.4 + + +def compute_errors(gt, pred): + """Computation of error metrics between predicted and ground truth depths + """ + thresh = np.maximum((gt / pred), (pred / gt)) + a1 = (thresh < 1.25 ).mean() + a2 = (thresh < 1.25 ** 2).mean() + a3 = (thresh < 1.25 ** 3).mean() + + rmse = (gt - pred) ** 2 + rmse = np.sqrt(rmse.mean()) + + rmse_log = (np.log(gt) - np.log(pred)) ** 2 + rmse_log = np.sqrt(rmse_log.mean()) + + abs_rel = np.mean(np.abs(gt - pred) / gt) + + sq_rel = np.mean(((gt - pred) ** 2) / gt) + + return abs_rel, sq_rel, rmse, rmse_log, a1, a2, a3 + + +def batch_post_process_disparity(l_disp, r_disp): + """Apply the disparity post-processing method as introduced in Monodepthv1 + """ + _, h, w = l_disp.shape + m_disp = 0.5 * (l_disp + r_disp) + l, _ = np.meshgrid(np.linspace(0, 1, w), np.linspace(0, 1, h)) + l_mask = (1.0 - np.clip(20 * (l - 0.05), 0, 1))[None, ...] + r_mask = l_mask[:, :, ::-1] + return r_mask * l_disp + l_mask * r_disp + (1.0 - l_mask - r_mask) * m_disp + + +def evaluate(opt): + """Evaluates a pretrained model using a specified test set + """ + MIN_DEPTH = 1e-3 + MAX_DEPTH = 80 + + assert sum((opt.eval_mono, opt.eval_stereo)) == 1, \ + "Please choose mono or stereo evaluation by setting either --eval_mono or --eval_stereo" + + if opt.ext_disp_to_eval is None: + + opt.load_weights_folder = os.path.expanduser(opt.load_weights_folder) + + assert os.path.isdir(opt.load_weights_folder), \ + "Cannot find a folder at {}".format(opt.load_weights_folder) + + print("-> Loading weights from {}".format(opt.load_weights_folder)) + + filenames = readlines(os.path.join(splits_dir, opt.eval_split, "test_files.txt")) + encoder_path = os.path.join(opt.load_weights_folder, "encoder.pth") + decoder_path = os.path.join(opt.load_weights_folder, "depth.pth") + + encoder_dict = torch.load(encoder_path) + + dataset = datasets.KITTIRAWDataset(opt.data_path, filenames, + encoder_dict['height'], encoder_dict['width'], + [0], 4, is_train=False) + dataloader = DataLoader(dataset, 16, shuffle=False, num_workers=opt.num_workers, + pin_memory=True, drop_last=False) + + encoder = networks.ResnetEncoder(opt.num_layers, False) + depth_decoder = networks.DepthDecoder(encoder.num_ch_enc) + + model_dict = encoder.state_dict() + encoder.load_state_dict({k: v for k, v in encoder_dict.items() if k in model_dict}) + depth_decoder.load_state_dict(torch.load(decoder_path)) + + encoder.cuda() + encoder.eval() + depth_decoder.cuda() + depth_decoder.eval() + + pred_disps = [] + + print("-> Computing predictions with size {}x{}".format( + encoder_dict['width'], encoder_dict['height'])) + + with torch.no_grad(): + for data in dataloader: + input_color = data[("color", 0, 0)].cuda() + + if opt.post_process: + # Post-processed results require each image to have two forward passes + input_color = torch.cat((input_color, torch.flip(input_color, [3])), 0) + + output = depth_decoder(encoder(input_color)) + + pred_disp, _ = disp_to_depth(output[("disp", 0)], opt.min_depth, opt.max_depth) + pred_disp = pred_disp.cpu()[:, 0].numpy() + + if opt.post_process: + N = pred_disp.shape[0] // 2 + pred_disp = batch_post_process_disparity(pred_disp[:N], pred_disp[N:, :, ::-1]) + + pred_disps.append(pred_disp) + + pred_disps = np.concatenate(pred_disps) + + else: + # Load predictions from file + print("-> Loading predictions from {}".format(opt.ext_disp_to_eval)) + pred_disps = np.load(opt.ext_disp_to_eval) + + if opt.eval_eigen_to_benchmark: + eigen_to_benchmark_ids = np.load( + os.path.join(splits_dir, "benchmark", "eigen_to_benchmark_ids.npy")) + + pred_disps = pred_disps[eigen_to_benchmark_ids] + + if opt.save_pred_disps: + output_path = os.path.join( + opt.load_weights_folder, "disps_{}_split.npy".format(opt.eval_split)) + print("-> Saving predicted disparities to ", output_path) + np.save(output_path, pred_disps) + + if opt.no_eval: + print("-> Evaluation disabled. Done.") + quit() + + elif opt.eval_split == 'benchmark': + save_dir = os.path.join(opt.load_weights_folder, "benchmark_predictions") + print("-> Saving out benchmark predictions to {}".format(save_dir)) + if not os.path.exists(save_dir): + os.makedirs(save_dir) + + for idx in range(len(pred_disps)): + disp_resized = cv2.resize(pred_disps[idx], (1216, 352)) + depth = STEREO_SCALE_FACTOR / disp_resized + depth = np.clip(depth, 0, 80) + depth = np.uint16(depth * 256) + save_path = os.path.join(save_dir, "{:010d}.png".format(idx)) + cv2.imwrite(save_path, depth) + + print("-> No ground truth is available for the KITTI benchmark, so not evaluating. Done.") + quit() + + gt_path = os.path.join(splits_dir, opt.eval_split, "gt_depths.npz") + gt_depths = np.load(gt_path, fix_imports=True, encoding='latin1')["data"] + + print("-> Evaluating") + + if opt.eval_stereo: + print(" Stereo evaluation - " + "disabling median scaling, scaling by {}".format(STEREO_SCALE_FACTOR)) + opt.disable_median_scaling = True + opt.pred_depth_scale_factor = STEREO_SCALE_FACTOR + else: + print(" Mono evaluation - using median scaling") + + errors = [] + ratios = [] + + for i in range(pred_disps.shape[0]): + + gt_depth = gt_depths[i] + gt_height, gt_width = gt_depth.shape[:2] + + pred_disp = pred_disps[i] + pred_disp = cv2.resize(pred_disp, (gt_width, gt_height)) + pred_depth = 1 / pred_disp + + if opt.eval_split == "eigen": + mask = np.logical_and(gt_depth > MIN_DEPTH, gt_depth < MAX_DEPTH) + + crop = np.array([0.40810811 * gt_height, 0.99189189 * gt_height, + 0.03594771 * gt_width, 0.96405229 * gt_width]).astype(np.int32) + crop_mask = np.zeros(mask.shape) + crop_mask[crop[0]:crop[1], crop[2]:crop[3]] = 1 + mask = np.logical_and(mask, crop_mask) + + else: + mask = gt_depth > 0 + + pred_depth = pred_depth[mask] + gt_depth = gt_depth[mask] + + pred_depth *= opt.pred_depth_scale_factor + if not opt.disable_median_scaling: + ratio = np.median(gt_depth) / np.median(pred_depth) + ratios.append(ratio) + pred_depth *= ratio + + pred_depth[pred_depth < MIN_DEPTH] = MIN_DEPTH + pred_depth[pred_depth > MAX_DEPTH] = MAX_DEPTH + + errors.append(compute_errors(gt_depth, pred_depth)) + + if not opt.disable_median_scaling: + ratios = np.array(ratios) + med = np.median(ratios) + print(" Scaling ratios | med: {:0.3f} | std: {:0.3f}".format(med, np.std(ratios / med))) + + mean_errors = np.array(errors).mean(0) + + print("\n " + ("{:>8} | " * 7).format("abs_rel", "sq_rel", "rmse", "rmse_log", "a1", "a2", "a3")) + print(("&{: 8.3f} " * 7).format(*mean_errors.tolist()) + "\\\\") + print("\n-> Done!") + + +if __name__ == "__main__": + options = MonodepthOptions() + evaluate(options.parse()) diff --git a/gimp-plugins/monodepth2/evaluate_pose.py b/gimp-plugins/monodepth2/evaluate_pose.py new file mode 100755 index 0000000..4b852a0 --- /dev/null +++ b/gimp-plugins/monodepth2/evaluate_pose.py @@ -0,0 +1,134 @@ +# Copyright Niantic 2019. Patent Pending. All rights reserved. +# +# This software is licensed under the terms of the Monodepth2 licence +# which allows for non-commercial use only, the full terms of which are made +# available in the LICENSE file. + +from __future__ import absolute_import, division, print_function + +import os +import numpy as np + +import torch +from torch.utils.data import DataLoader + +from layers import transformation_from_parameters +from utils import readlines +from options import MonodepthOptions +from datasets import KITTIOdomDataset +import networks + + +# from https://github.com/tinghuiz/SfMLearner +def dump_xyz(source_to_target_transformations): + xyzs = [] + cam_to_world = np.eye(4) + xyzs.append(cam_to_world[:3, 3]) + for source_to_target_transformation in source_to_target_transformations: + cam_to_world = np.dot(cam_to_world, source_to_target_transformation) + xyzs.append(cam_to_world[:3, 3]) + return xyzs + + +# from https://github.com/tinghuiz/SfMLearner +def compute_ate(gtruth_xyz, pred_xyz_o): + + # Make sure that the first matched frames align (no need for rotational alignment as + # all the predicted/ground-truth snippets have been converted to use the same coordinate + # system with the first frame of the snippet being the origin). + offset = gtruth_xyz[0] - pred_xyz_o[0] + pred_xyz = pred_xyz_o + offset[None, :] + + # Optimize the scaling factor + scale = np.sum(gtruth_xyz * pred_xyz) / np.sum(pred_xyz ** 2) + alignment_error = pred_xyz * scale - gtruth_xyz + rmse = np.sqrt(np.sum(alignment_error ** 2)) / gtruth_xyz.shape[0] + return rmse + + +def evaluate(opt): + """Evaluate odometry on the KITTI dataset + """ + assert os.path.isdir(opt.load_weights_folder), \ + "Cannot find a folder at {}".format(opt.load_weights_folder) + + assert opt.eval_split == "odom_9" or opt.eval_split == "odom_10", \ + "eval_split should be either odom_9 or odom_10" + + sequence_id = int(opt.eval_split.split("_")[1]) + + filenames = readlines( + os.path.join(os.path.dirname(__file__), "splits", "odom", + "test_files_{:02d}.txt".format(sequence_id))) + + dataset = KITTIOdomDataset(opt.data_path, filenames, opt.height, opt.width, + [0, 1], 4, is_train=False) + dataloader = DataLoader(dataset, opt.batch_size, shuffle=False, + num_workers=opt.num_workers, pin_memory=True, drop_last=False) + + pose_encoder_path = os.path.join(opt.load_weights_folder, "pose_encoder.pth") + pose_decoder_path = os.path.join(opt.load_weights_folder, "pose.pth") + + pose_encoder = networks.ResnetEncoder(opt.num_layers, False, 2) + pose_encoder.load_state_dict(torch.load(pose_encoder_path)) + + pose_decoder = networks.PoseDecoder(pose_encoder.num_ch_enc, 1, 2) + pose_decoder.load_state_dict(torch.load(pose_decoder_path)) + + pose_encoder.cuda() + pose_encoder.eval() + pose_decoder.cuda() + pose_decoder.eval() + + pred_poses = [] + + print("-> Computing pose predictions") + + opt.frame_ids = [0, 1] # pose network only takes two frames as input + + with torch.no_grad(): + for inputs in dataloader: + for key, ipt in inputs.items(): + inputs[key] = ipt.cuda() + + all_color_aug = torch.cat([inputs[("color_aug", i, 0)] for i in opt.frame_ids], 1) + + features = [pose_encoder(all_color_aug)] + axisangle, translation = pose_decoder(features) + + pred_poses.append( + transformation_from_parameters(axisangle[:, 0], translation[:, 0]).cpu().numpy()) + + pred_poses = np.concatenate(pred_poses) + + gt_poses_path = os.path.join(opt.data_path, "poses", "{:02d}.txt".format(sequence_id)) + gt_global_poses = np.loadtxt(gt_poses_path).reshape(-1, 3, 4) + gt_global_poses = np.concatenate( + (gt_global_poses, np.zeros((gt_global_poses.shape[0], 1, 4))), 1) + gt_global_poses[:, 3, 3] = 1 + gt_xyzs = gt_global_poses[:, :3, 3] + + gt_local_poses = [] + for i in range(1, len(gt_global_poses)): + gt_local_poses.append( + np.linalg.inv(np.dot(np.linalg.inv(gt_global_poses[i - 1]), gt_global_poses[i]))) + + ates = [] + num_frames = gt_xyzs.shape[0] + track_length = 5 + for i in range(0, num_frames - 1): + local_xyzs = np.array(dump_xyz(pred_poses[i:i + track_length - 1])) + gt_local_xyzs = np.array(dump_xyz(gt_local_poses[i:i + track_length - 1])) + + ates.append(compute_ate(gt_local_xyzs, local_xyzs)) + + print("\n Trajectory error: {:0.3f}, std: {:0.3f}\n".format(np.mean(ates), np.std(ates))) + + save_path = os.path.join(opt.load_weights_folder, "poses.npy") + np.save(save_path, pred_poses) + print("-> Predictions saved to", save_path) + + +if __name__ == "__main__": + options = MonodepthOptions() + evaluate(options.parse()) diff --git a/gimp-plugins/monodepth2/export_gt_depth.py b/gimp-plugins/monodepth2/export_gt_depth.py new file mode 100755 index 0000000..4263b74 --- /dev/null +++ b/gimp-plugins/monodepth2/export_gt_depth.py @@ -0,0 +1,65 @@ +# Copyright Niantic 2019. Patent Pending. All rights reserved. +# +# This software is licensed under the terms of the Monodepth2 licence +# which allows for non-commercial use only, the full terms of which are made +# available in the LICENSE file. + +from __future__ import absolute_import, division, print_function + +import os + +import argparse +import numpy as np +import PIL.Image as pil + +from utils import readlines +from kitti_utils import generate_depth_map + + +def export_gt_depths_kitti(): + + parser = argparse.ArgumentParser(description='export_gt_depth') + + parser.add_argument('--data_path', + type=str, + help='path to the root of the KITTI data', + required=True) + parser.add_argument('--split', + type=str, + help='which split to export gt from', + required=True, + choices=["eigen", "eigen_benchmark"]) + opt = parser.parse_args() + + split_folder = os.path.join(os.path.dirname(__file__), "splits", opt.split) + lines = readlines(os.path.join(split_folder, "test_files.txt")) + + print("Exporting ground truth depths for {}".format(opt.split)) + + gt_depths = [] + for line in lines: + + folder, frame_id, _ = line.split() + frame_id = int(frame_id) + + if opt.split == "eigen": + calib_dir = os.path.join(opt.data_path, folder.split("/")[0]) + velo_filename = os.path.join(opt.data_path, folder, + "velodyne_points/data", "{:010d}.bin".format(frame_id)) + gt_depth = generate_depth_map(calib_dir, velo_filename, 2, True) + elif opt.split == "eigen_benchmark": + gt_depth_path = os.path.join(opt.data_path, folder, "proj_depth", + "groundtruth", "image_02", "{:010d}.png".format(frame_id)) + gt_depth = np.array(pil.open(gt_depth_path)).astype(np.float32) / 256 + + gt_depths.append(gt_depth.astype(np.float32)) + + output_path = os.path.join(split_folder, "gt_depths.npz") + + print("Saving to {}".format(opt.split)) + + np.savez_compressed(output_path, data=np.array(gt_depths)) + + +if __name__ == "__main__": + export_gt_depths_kitti() diff --git a/gimp-plugins/monodepth2/layers.py b/gimp-plugins/monodepth2/layers.py new file mode 100755 index 0000000..070cadb --- /dev/null +++ b/gimp-plugins/monodepth2/layers.py @@ -0,0 +1,269 @@ +# Copyright Niantic 2019. Patent Pending. All rights reserved. +# +# This software is licensed under the terms of the Monodepth2 licence +# which allows for non-commercial use only, the full terms of which are made +# available in the LICENSE file. + +from __future__ import absolute_import, division, print_function + +import numpy as np + +import torch +import torch.nn as nn +import torch.nn.functional as F + + +def disp_to_depth(disp, min_depth, max_depth): + """Convert network's sigmoid output into depth prediction + The formula for this conversion is given in the 'additional considerations' + section of the paper. + """ + min_disp = 1 / max_depth + max_disp = 1 / min_depth + scaled_disp = min_disp + (max_disp - min_disp) * disp + depth = 1 / scaled_disp + return scaled_disp, depth + + +def transformation_from_parameters(axisangle, translation, invert=False): + """Convert the network's (axisangle, translation) output into a 4x4 matrix + """ + R = rot_from_axisangle(axisangle) + t = translation.clone() + + if invert: + R = R.transpose(1, 2) + t *= -1 + + T = get_translation_matrix(t) + + if invert: + M = torch.matmul(R, T) + else: + M = torch.matmul(T, R) + + return M + + +def get_translation_matrix(translation_vector): + """Convert a translation vector into a 4x4 transformation matrix + """ + T = torch.zeros(translation_vector.shape[0], 4, 4).to(device=translation_vector.device) + + t = translation_vector.contiguous().view(-1, 3, 1) + + T[:, 0, 0] = 1 + T[:, 1, 1] = 1 + T[:, 2, 2] = 1 + T[:, 3, 3] = 1 + T[:, :3, 3, None] = t + + return T + + +def rot_from_axisangle(vec): + """Convert an axisangle rotation into a 4x4 transformation matrix + (adapted from https://github.com/Wallacoloo/printipi) + Input 'vec' has to be Bx1x3 + """ + angle = torch.norm(vec, 2, 2, True) + axis = vec / (angle + 1e-7) + + ca = torch.cos(angle) + sa = torch.sin(angle) + C = 1 - ca + + x = axis[..., 0].unsqueeze(1) + y = axis[..., 1].unsqueeze(1) + z = axis[..., 2].unsqueeze(1) + + xs = x * sa + ys = y * sa + zs = z * sa + xC = x * C + yC = y * C + zC = z * C + xyC = x * yC + yzC = y * zC + zxC = z * xC + + rot = torch.zeros((vec.shape[0], 4, 4)).to(device=vec.device) + + rot[:, 0, 0] = torch.squeeze(x * xC + ca) + rot[:, 0, 1] = torch.squeeze(xyC - zs) + rot[:, 0, 2] = torch.squeeze(zxC + ys) + rot[:, 1, 0] = torch.squeeze(xyC + zs) + rot[:, 1, 1] = torch.squeeze(y * yC + ca) + rot[:, 1, 2] = torch.squeeze(yzC - xs) + rot[:, 2, 0] = torch.squeeze(zxC - ys) + rot[:, 2, 1] = torch.squeeze(yzC + xs) + rot[:, 2, 2] = torch.squeeze(z * zC + ca) + rot[:, 3, 3] = 1 + + return rot + + +class ConvBlock(nn.Module): + """Layer to perform a convolution followed by ELU + """ + def __init__(self, in_channels, out_channels): + super(ConvBlock, self).__init__() + + self.conv = Conv3x3(in_channels, out_channels) + self.nonlin = nn.ELU(inplace=True) + + def forward(self, x): + out = self.conv(x) + out = self.nonlin(out) + return out + + +class Conv3x3(nn.Module): + """Layer to pad and convolve input + """ + def __init__(self, in_channels, out_channels, use_refl=True): + super(Conv3x3, self).__init__() + + if use_refl: + self.pad = nn.ReflectionPad2d(1) + else: + self.pad = nn.ZeroPad2d(1) + self.conv = nn.Conv2d(int(in_channels), int(out_channels), 3) + + def forward(self, x): + out = self.pad(x) + out = self.conv(out) + return out + + +class BackprojectDepth(nn.Module): + """Layer to transform a depth image into a point cloud + """ + def __init__(self, batch_size, height, width): + super(BackprojectDepth, self).__init__() + + self.batch_size = batch_size + self.height = height + self.width = width + + meshgrid = np.meshgrid(range(self.width), range(self.height), indexing='xy') + self.id_coords = np.stack(meshgrid, axis=0).astype(np.float32) + self.id_coords = nn.Parameter(torch.from_numpy(self.id_coords), + requires_grad=False) + + self.ones = nn.Parameter(torch.ones(self.batch_size, 1, self.height * self.width), + requires_grad=False) + + self.pix_coords = torch.unsqueeze(torch.stack( + [self.id_coords[0].view(-1), self.id_coords[1].view(-1)], 0), 0) + self.pix_coords = self.pix_coords.repeat(batch_size, 1, 1) + self.pix_coords = nn.Parameter(torch.cat([self.pix_coords, self.ones], 1), + requires_grad=False) + + def forward(self, depth, inv_K): + cam_points = torch.matmul(inv_K[:, :3, :3], self.pix_coords) + cam_points = depth.view(self.batch_size, 1, -1) * cam_points + cam_points = torch.cat([cam_points, self.ones], 1) + + return cam_points + + +class Project3D(nn.Module): + """Layer which projects 3D points into a camera with intrinsics K and at position T + """ + def __init__(self, batch_size, height, width, eps=1e-7): + super(Project3D, self).__init__() + + self.batch_size = batch_size + self.height = height + self.width = width + self.eps = eps + + def forward(self, points, K, T): + P = torch.matmul(K, T)[:, :3, :] + + cam_points = torch.matmul(P, points) + + pix_coords = cam_points[:, :2, :] / (cam_points[:, 2, :].unsqueeze(1) + self.eps) + pix_coords = pix_coords.view(self.batch_size, 2, self.height, self.width) + pix_coords = pix_coords.permute(0, 2, 3, 1) + pix_coords[..., 0] /= self.width - 1 + pix_coords[..., 1] /= self.height - 1 + pix_coords = (pix_coords - 0.5) * 2 + return pix_coords + + +def upsample(x): + """Upsample input tensor by a factor of 2 + """ + return F.interpolate(x, scale_factor=2, mode="nearest") + + +def get_smooth_loss(disp, img): + """Computes the smoothness loss for a disparity image + The color image is used for edge-aware smoothness + """ + grad_disp_x = torch.abs(disp[:, :, :, :-1] - disp[:, :, :, 1:]) + grad_disp_y = torch.abs(disp[:, :, :-1, :] - disp[:, :, 1:, :]) + + grad_img_x = torch.mean(torch.abs(img[:, :, :, :-1] - img[:, :, :, 1:]), 1, keepdim=True) + grad_img_y = torch.mean(torch.abs(img[:, :, :-1, :] - img[:, :, 1:, :]), 1, keepdim=True) + + grad_disp_x *= torch.exp(-grad_img_x) + grad_disp_y *= torch.exp(-grad_img_y) + + return grad_disp_x.mean() + grad_disp_y.mean() + + +class SSIM(nn.Module): + """Layer to compute the SSIM loss between a pair of images + """ + def __init__(self): + super(SSIM, self).__init__() + self.mu_x_pool = nn.AvgPool2d(3, 1) + self.mu_y_pool = nn.AvgPool2d(3, 1) + self.sig_x_pool = nn.AvgPool2d(3, 1) + self.sig_y_pool = nn.AvgPool2d(3, 1) + self.sig_xy_pool = nn.AvgPool2d(3, 1) + + self.refl = nn.ReflectionPad2d(1) + + self.C1 = 0.01 ** 2 + self.C2 = 0.03 ** 2 + + def forward(self, x, y): + x = self.refl(x) + y = self.refl(y) + + mu_x = self.mu_x_pool(x) + mu_y = self.mu_y_pool(y) + + sigma_x = self.sig_x_pool(x ** 2) - mu_x ** 2 + sigma_y = self.sig_y_pool(y ** 2) - mu_y ** 2 + sigma_xy = self.sig_xy_pool(x * y) - mu_x * mu_y + + SSIM_n = (2 * mu_x * mu_y + self.C1) * (2 * sigma_xy + self.C2) + SSIM_d = (mu_x ** 2 + mu_y ** 2 + self.C1) * (sigma_x + sigma_y + self.C2) + + return torch.clamp((1 - SSIM_n / SSIM_d) / 2, 0, 1) + + +def compute_depth_errors(gt, pred): + """Computation of error metrics between predicted and ground truth depths + """ + thresh = torch.max((gt / pred), (pred / gt)) + a1 = (thresh < 1.25 ).float().mean() + a2 = (thresh < 1.25 ** 2).float().mean() + a3 = (thresh < 1.25 ** 3).float().mean() + + rmse = (gt - pred) ** 2 + rmse = torch.sqrt(rmse.mean()) + + rmse_log = (torch.log(gt) - torch.log(pred)) ** 2 + rmse_log = torch.sqrt(rmse_log.mean()) + + abs_rel = torch.mean(torch.abs(gt - pred) / gt) + + sq_rel = torch.mean((gt - pred) ** 2 / gt) + + return abs_rel, sq_rel, rmse, rmse_log, a1, a2, a3 diff --git a/gimp-plugins/monodepth2/layers.pyc b/gimp-plugins/monodepth2/layers.pyc new file mode 100644 index 0000000000000000000000000000000000000000..651ca83ef62306fbe7644a907d5a2c1afaaa6720 GIT binary patch literal 10823 zcmcgyO>7)#TK=lrZo6&ANo;3qkHe^T8A#7A<1oo+#qMS&A zyPWQ-PJPvmZEXIM0STl%a75a@ES!**8`=XB7fx`26A}^bHDvp`@h&ScoL^VY&nP|quXmZ)?dM27><`ZMSwXx z{3z}h9lxK)QKAXW`S|;=6Z^Ya-Wzm7zWt(;SihzIxDFqwcCsHMlAQdC0WArv7ZKzWr=h)%#&T&No$(fV@ltk7;ldiYWw9V*5oMkzSHEH9waM56Tqx zRAsEF1dEuMM~Q8P-8gdTX?DjIx*?D<_Dcw^e`Mp_Uhjpi>-Vz9X*UZad%cY#d$r#k zw38G?S(>TkjqBa;5S2Ikhs6bUhjE%k=B9Fq9nK*z({tXOw}j`}13f}orH+7CeMjH^Y!TRE^QkY+Ss6R68!36QefW5Nc9nd8<>f zV`PF99f7cmao0TWiRP-Ei0lgzR-mv@_@To0&xa6R8jedY(SEr__rpBw#RbUaDr#8b zgQ+ff3zZsZ<3fe+E0v{6i<2Y6b9G|+zeEHy0=fX1I9gZ-F_0cW8-M{EH43IcHr|P= zgp)@OkQSr@EU2;O37X*8K}a+F|JV_gXLhUR)+YdtmY$+F^kNc{vd%b)_mfsE z(o49Hf{|^}V}pPY5Q;@bM{%B6#cT(#Q;=kfELi7@j0?CblJ+1QSYgk85 zom4$RV@)^hfYcjYL2xb<6;L$%G#*PxG1V_tHY)3$`+eTg%QEuGN}`u(Gf zVC$e)N3xh6^azj|%j)6@z#rlm84b`Hi&{mkW9SJYXL7=be9M{`bl$9;QL8E3cM!&cVKvg=Z)l78}v1k*byRhw&I(rbPvv_4dv z2eE(m;I)IBPH+(b+He2SpZwlG|Mk%u!i~)YSq@x;3)_MeDN?Sej7n8u^Gydd6h01U zh)XFXfso_Fd=Nuo6I!g^76?Zu9HjQqAdZjX;6=9FU~rXzz=-NkS>7P4t0}RDJTLjI z-npX!u~w*;4JFsM6dou%RCuJ~16TRby^h@LU`u6(?sc@Kp$?GbtPjid5o$4wgF7FD z<96M_BL&w?uX&4=i{3?VuCnamSE2rN>Q~vUe8Kxk{l@S0infv@cDI|g9=6b-{&+Zd zsXqIN#L(tJwFo5upCSU*sYrF7GQoWTyS<5;?@_lSkw7D*;PFH0xm4k)=i){xULGyzAeqxM=?E}sNM3IGp<`g9o1vapHhM>hv znMO`)-$pLzFzth~V+l_uw+5J7K&wEyZnK%BNzrVoGWX%;!A(&HB-y^sHBEJJm~ZfT zwxW`eW@$G`oj@bC+PJ$bZ5b+ds}rVa+_l953Sb+IGS27RzlL6W9Fp@eIO!#?MK0p@ z@vSZ7M6n%v2%TusmQfwT_CXjSN#lm6ye)PNZX+7pMW96mTTH%+Kx@Y;&Smo2WYERq zFpn%cFV{f2ZICa^6G@@;;Y6rUG@HFF8g!ZDX*Qct)@n8bZbI-Dg7LHz`xMrQNCF$? zCRKxKS+4~o#TFj@ad*WNKaR*GPs9==PhoOH18{6HJz|TOQF+W3VdRHtR5HbW4EP4V zIzAvB4Idq12N`GZ0c1+P$#a3?0otd)#l}p_WI?7FVwA~hGu&542aJdUoF>eMkU9pW zK=RoZeml74<_EVJa1Y#u-$6`Pc@Xb*oelay7~P0OBmFQEmh3=7bh*;T!9`^Sz_zGN z7L+5fd+hq_>~$8wz{brSJ&(`y(;O#B1nVp9N$`nmg>&H*T7PaS+_f>U1aBgFKC>rn ze~f36L%5&8+YH`e@N*2v=${EU@YE0SXaeMhg?d3<`fk{I*w3>)9REFf0!G~Ne~=hI zgZ@UWF)~*Cu=JuNy|7IVfj9~Meujt-yLS+c*@c)PDx;(Vx}C``jz1tghGGK@b}S(NAudbkKvNioL!WcHv|pK*N-^!A~F(yo5lL?k5Ljml`*Z`*BzZ z7NJ*4v^kXch^>B}!8aLv3&Hu|1-I|pcr2$xt)TAV2aWG#Zw>yE2A&1)tKJ6EQn!b{+V5R3xG(<^+1~7Dytisr=sz2vaLxY+~Y1FI= z^5F!WWiUY#JSZJNK&T0bE!yKi`xZ0#0KtR=p&_8-ft#xNu5&?zy{6E@p3l}2iob`) zlH!f`x_8yvsJJtI8yOQ|{8?xvWRBm#Gs5Be6-TfRgKysBCh70F>IqJfS_#w>V2isZ zkk-y4u-sXP!H+vhi>_;7yY+A0bC_#~2;72JmWTdhSdGkr-`yrH>%S|7CM;0f%4zL? zD1aUrg)MRqpDG?HHeW^LEXC&eNDbsAR6(~P^f(7`Ytq6kKa zzyZD`c))(FdJEOzEE;AbG0v`;HH@$_bg7exODrG}J3v6hY_=I1^LMwO<33=^3BV92 z^GqjDV}!O61F-#ixK5JuTaYZgc5op(|6O?^75yOK@Is;Up^>-H-5=wz^nn`hHMp(= z?{fg?Rs0s63G_(?@3X)Z^2;A`9wbWXcC#(=6rT~D%8TO2PJ+Mzyp*5HeQ}iiBIk!^7 z&1zf%^fTP0#NvLLrsr}f&hjW1@n=|&B?+1;=+>Vm{oT0K{kDV6=TmnH3Zigf0jLh0unefXz+kEz{%(^n? z$crFij)D(9B%EPWx_g@@KKmG%W#db{_Ajq||E<42dU{(}+y22zAOG+hKltwL_N!Z0 zfA|kydhvGqcmLv7-~Q8+_is0N6wV&|6~-iY?r_xvz@Oc3e~@KecygHP4Vni~v9se9ixNb9ue^O0G%^_qL#a2UUZ3g<$8TFl>acqe$5y(_~X zL~z|%M4asu9v%0;A%bn*8~IP2ri7$UfGFZbqw}shBM6zU)M-)36m~81$OKr+x)?-5 zRDc5J85KdFLTru75Q>0Vs1D`Gwc@qk+6i#;mTKz;odSK^=Cu zTGj)~K@(F-^*~Gb00>?`Oc!nHW@8Q2Hwc`V{n#|4{2Oax4Q@~HF@s-Y@aqhIgTWI9 zPZ|6sgWqB>x-M>Yp&YojJWYx1U=uM1UZA64iV#v5AIM11K`4JR-_hgrGx;DL^-XPk83al*1a)Bts6UWs znW>6MvVtfWan}eS=NjSDwNWFN>Ey7;9&^{dE9Z|+0fL(I#OPWYir@ni)nXF7AIFL? zA>7Z-z%Jx=S;C4F*zST9$R6i8L@qug$!V|-k-z*%3j&!Q`F5TS@G9~F%v*h_2|1g?Z~elq`W`2PN9;mC6*49dqr}=gdn9$dugfG| z^|*Tt`5rs0&HJd}gn{JWcM%)!o?@TkHbQ50%Uxx?K^E4%>Q#Kfv;d%4s@4}4R_d4P UUs%7mzO?SIuh(n!OY5KePxEY@vH$=8 literal 0 HcmV?d00001 diff --git a/gimp-plugins/monodepth2/networks/__init__.py b/gimp-plugins/monodepth2/networks/__init__.py new file mode 100755 index 0000000..2386870 --- /dev/null +++ b/gimp-plugins/monodepth2/networks/__init__.py @@ -0,0 +1,4 @@ +from .resnet_encoder import ResnetEncoder +from .depth_decoder import DepthDecoder +from .pose_decoder import PoseDecoder +from .pose_cnn import PoseCNN diff --git a/gimp-plugins/monodepth2/networks/__init__.pyc b/gimp-plugins/monodepth2/networks/__init__.pyc new file mode 100644 index 0000000000000000000000000000000000000000..834ff2c107aa63dab8300b814d502232ea54be0e GIT binary patch literal 397 zcmY+AT}s3-6oqfv=~UDam(dpu_$;DWUyD@cr4NRo31cxorO7zB1$XCK+<+&|AY*<` zIA7r0+~hy2)35K3*Pad|!+DSGYly^Rq=^ipjuDGKBb|}SsmqhkNiWD0)D_7uNH58h z)RpiBy;MsC4GdC62`fmeyj9`J_D-rP;d9_xjp40UNAwKz*?IL3mIwIQG);8^6D}ar zoGDZaZ9!5k8AF9ksqOrPSZhS;AB|0im^9Pg+BS!&u#dz)!1A?MljmmB^V;pLb{pyW yb}-}BsCV1J;>uZcg_<`UJ?`D~?zs?y9fAMp*<$N#xxasC47epaIZ literal 0 HcmV?d00001 diff --git a/gimp-plugins/monodepth2/networks/depth_decoder.py b/gimp-plugins/monodepth2/networks/depth_decoder.py new file mode 100755 index 0000000..498ec38 --- /dev/null +++ b/gimp-plugins/monodepth2/networks/depth_decoder.py @@ -0,0 +1,65 @@ +# Copyright Niantic 2019. Patent Pending. All rights reserved. +# +# This software is licensed under the terms of the Monodepth2 licence +# which allows for non-commercial use only, the full terms of which are made +# available in the LICENSE file. + +from __future__ import absolute_import, division, print_function + +import numpy as np +import torch +import torch.nn as nn + +from collections import OrderedDict +from layers import * + + +class DepthDecoder(nn.Module): + def __init__(self, num_ch_enc, scales=range(4), num_output_channels=1, use_skips=True): + super(DepthDecoder, self).__init__() + + self.num_output_channels = num_output_channels + self.use_skips = use_skips + self.upsample_mode = 'nearest' + self.scales = scales + + self.num_ch_enc = num_ch_enc + self.num_ch_dec = np.array([16, 32, 64, 128, 256]) + + # decoder + self.convs = OrderedDict() + for i in range(4, -1, -1): + # upconv_0 + num_ch_in = self.num_ch_enc[-1] if i == 4 else self.num_ch_dec[i + 1] + num_ch_out = self.num_ch_dec[i] + self.convs[("upconv", i, 0)] = ConvBlock(num_ch_in, num_ch_out) + + # upconv_1 + num_ch_in = self.num_ch_dec[i] + if self.use_skips and i > 0: + num_ch_in += self.num_ch_enc[i - 1] + num_ch_out = self.num_ch_dec[i] + self.convs[("upconv", i, 1)] = ConvBlock(num_ch_in, num_ch_out) + + for s in self.scales: + self.convs[("dispconv", s)] = Conv3x3(self.num_ch_dec[s], self.num_output_channels) + + self.decoder = nn.ModuleList(list(self.convs.values())) + self.sigmoid = nn.Sigmoid() + + def forward(self, input_features): + self.outputs = {} + + # decoder + x = input_features[-1] + for i in range(4, -1, -1): + x = self.convs[("upconv", i, 0)](x) + x = [upsample(x)] + if self.use_skips and i > 0: + x += [input_features[i - 1]] + x = torch.cat(x, 1) + x = self.convs[("upconv", i, 1)](x) + if i in self.scales: + self.outputs[("disp", i)] = self.sigmoid(self.convs[("dispconv", i)](x)) + + return self.outputs diff --git a/gimp-plugins/monodepth2/networks/depth_decoder.pyc b/gimp-plugins/monodepth2/networks/depth_decoder.pyc new file mode 100644 index 0000000000000000000000000000000000000000..10a9d2c03733f74f053982e83a3c3c94f1548ef9 GIT binary patch literal 2358 zcmcIlO>Z1U5Url^etI3}J61cyd)Bivo9Q0M zMz$2mKJh2`6Z``19Jp}jH*g2ut62wg;}CXddaJ9etEykutM&g_>HqNS!8friK0baQ ze`~~k4oB<;#{AQK}=Th2sIMZOxZ(CyBsgYX%18ZjfciXr;WmU->cFq_TG%DFCd zNp_IgtSsCmOy*TqxNuSzv4iT_$Z+Q}hR>@ct$2RkbUNO1{{_o@tHX=2zEx?mgzwMaSKBaZyWDZdix1#hsVe< zTzN9}cw}?za~@(`QUOlYB5cXAr<%4LH=2&%)YBfrBX9ysfZvwg7Bi!hJ`W71lm~M7 zn>gUvkpvgfl~Z4~zy4WH;1}CEvhPXtBTc7jF)S|Wq_4|8xivgNV63WQc!J1C^X&w5T&P4UcQzWhnpI|jq`jU zCtcRZB6wGZl>7@R4&=Bi6Hm5Yeqmm?%QC$p?&>1{={2GS@D})87aH6+hwvsZeP^_S z8+_(il0_O-sdd?9$k|;Ek2$>NAh_P<`v1ZwV+}aoeLauM;y{aeQL|Pxqg%9KmDqZo zRskh72*a$%To}4*&=mD7ENeHfT^R30MUm##v8rmDhIT)jTih^X*l0G-({P4D)~Gg) za%9?HxQO?{6uj^_oompjs-mNd>q#01KGt=WDO;7QsMt+4@KNKk zP)DDYNu8&kX4W~@FxV+1d27psC{L4;>qFfJR$k5Dy8f%a-uB){Q|5LK?K6$ptt6|g>R}Q-C!q&_Cg69PTkXCHq&oU(op$XpfeC1BS zWi5pyv-74fXwi_t2G_ZT3{lRU6QHRPrBYeNdq4)AIC4!9y@QE%>EK=NV_{_lnKelx zSCiN*uEXFC?eMI=zn&@2oU0!8R~Ri|2-0n75A)Kz+$|xss?o+3i%Nu%76oEEEGCVHxJk@`1+q5D=kVi*gt~QdU!Dy%a{|>3SZ8ac& T%!;$@JGZ@o8JJb@x$plCxd`F! literal 0 HcmV?d00001 diff --git a/gimp-plugins/monodepth2/networks/pose_cnn.py b/gimp-plugins/monodepth2/networks/pose_cnn.py new file mode 100755 index 0000000..16baec7 --- /dev/null +++ b/gimp-plugins/monodepth2/networks/pose_cnn.py @@ -0,0 +1,50 @@ +# Copyright Niantic 2019. Patent Pending. All rights reserved. +# +# This software is licensed under the terms of the Monodepth2 licence +# which allows for non-commercial use only, the full terms of which are made +# available in the LICENSE file. + +from __future__ import absolute_import, division, print_function + +import torch +import torch.nn as nn + + +class PoseCNN(nn.Module): + def __init__(self, num_input_frames): + super(PoseCNN, self).__init__() + + self.num_input_frames = num_input_frames + + self.convs = {} + self.convs[0] = nn.Conv2d(3 * num_input_frames, 16, 7, 2, 3) + self.convs[1] = nn.Conv2d(16, 32, 5, 2, 2) + self.convs[2] = nn.Conv2d(32, 64, 3, 2, 1) + self.convs[3] = nn.Conv2d(64, 128, 3, 2, 1) + self.convs[4] = nn.Conv2d(128, 256, 3, 2, 1) + self.convs[5] = nn.Conv2d(256, 256, 3, 2, 1) + self.convs[6] = nn.Conv2d(256, 256, 3, 2, 1) + + self.pose_conv = nn.Conv2d(256, 6 * (num_input_frames - 1), 1) + + self.num_convs = len(self.convs) + + self.relu = nn.ReLU(True) + + self.net = nn.ModuleList(list(self.convs.values())) + + def forward(self, out): + + for i in range(self.num_convs): + out = self.convs[i](out) + out = self.relu(out) + + out = self.pose_conv(out) + out = out.mean(3).mean(2) + + out = 0.01 * out.view(-1, self.num_input_frames - 1, 1, 6) + + axisangle = out[..., :3] + translation = out[..., 3:] + + return axisangle, translation diff --git a/gimp-plugins/monodepth2/networks/pose_cnn.pyc b/gimp-plugins/monodepth2/networks/pose_cnn.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dd6d711ee56355532c88f51128e080263b85c2c1 GIT binary patch literal 1873 zcmcIk-HzKt6h3yGzwL(Iw$W-qy{G^o-*hAO4yl4dD{)g+$lD4L35-dc?F4(AddArT zU8&NR8ykP@y;zBxlEjlU=X`VK%zWQ*=kL+ZZ-1`8o5|+W!Tv$%to%I~xv*f=h|N(mPC=t1$w?d{JmbKphvxwoy~J{lUm!0~ZYax; zMF%f*o_1I<0%;^(Pl<#Gjd36x7H7o#qD#aQc319gPzd8X`ih7yAqHCn!b7fRj=?2F zv_&Aia}^@kA`rfI6=HXbK=?{T|349<%ZL#VjWQ3%thJ*z(ihOx9{i%ctfAh~j`(Y` z@$9N+B;c||F86r5h;9J;r0OIl)yIaiF#8-xvms5Y7jv7I%7ceK{4HsH8B_d|GA&|6A? zXTl=4>SdaC757i_M^8MB$90pdQRfi$4m@90^QOoj8Ryks(BKa1tZ1-1bK2aiIu+pZ zVv*=^5RsUJ`wYY6iOXv@DYMz+VRdGUDx15>DH`X_s%TD)MX0jXJYV^zcPGerR@Fbc zNn5dGM=bkjv7 zV##fs1`xZ&oH{_zK&$4>{A+~ZiFi3b1+yviy!D09T?1M0-RyVz2(FN!Y9xY8v zRco56SP}~5O@(iB>(^b literal 0 HcmV?d00001 diff --git a/gimp-plugins/monodepth2/networks/pose_decoder.py b/gimp-plugins/monodepth2/networks/pose_decoder.py new file mode 100755 index 0000000..4b03b60 --- /dev/null +++ b/gimp-plugins/monodepth2/networks/pose_decoder.py @@ -0,0 +1,54 @@ +# Copyright Niantic 2019. Patent Pending. All rights reserved. +# +# This software is licensed under the terms of the Monodepth2 licence +# which allows for non-commercial use only, the full terms of which are made +# available in the LICENSE file. + +from __future__ import absolute_import, division, print_function + +import torch +import torch.nn as nn +from collections import OrderedDict + + +class PoseDecoder(nn.Module): + def __init__(self, num_ch_enc, num_input_features, num_frames_to_predict_for=None, stride=1): + super(PoseDecoder, self).__init__() + + self.num_ch_enc = num_ch_enc + self.num_input_features = num_input_features + + if num_frames_to_predict_for is None: + num_frames_to_predict_for = num_input_features - 1 + self.num_frames_to_predict_for = num_frames_to_predict_for + + self.convs = OrderedDict() + self.convs[("squeeze")] = nn.Conv2d(self.num_ch_enc[-1], 256, 1) + self.convs[("pose", 0)] = nn.Conv2d(num_input_features * 256, 256, 3, stride, 1) + self.convs[("pose", 1)] = nn.Conv2d(256, 256, 3, stride, 1) + self.convs[("pose", 2)] = nn.Conv2d(256, 6 * num_frames_to_predict_for, 1) + + self.relu = nn.ReLU() + + self.net = nn.ModuleList(list(self.convs.values())) + + def forward(self, input_features): + last_features = [f[-1] for f in input_features] + + cat_features = [self.relu(self.convs["squeeze"](f)) for f in last_features] + cat_features = torch.cat(cat_features, 1) + + out = cat_features + for i in range(3): + out = self.convs[("pose", i)](out) + if i != 2: + out = self.relu(out) + + out = out.mean(3).mean(2) + + out = 0.01 * out.view(-1, self.num_frames_to_predict_for, 1, 6) + + axisangle = out[..., :3] + translation = out[..., 3:] + + return axisangle, translation diff --git a/gimp-plugins/monodepth2/networks/pose_decoder.pyc b/gimp-plugins/monodepth2/networks/pose_decoder.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dfc6000f79184642284a7c3290691a47e86a65e2 GIT binary patch literal 2111 zcmcIl-EJFI5T3K^-~5n}Mp1%5ty(~PLoD^W6a+zBPy*VhLRe~-W%oE)XT9s}IZjH6 zuM%!}7~X|P;2C%T_`b1AQp6>-;*&WubLRJ(aro!@%CEnlKhI?O3Gx5aAB?yh1irbE zQe-i}W!e;#Pe!vU^I#oD zhA&rfeADE%vH3xfdA)yIJ9}WW8r;`X{ER8y!vibKEDx8tUL!@&0fR(h@ln)*QbGG! z8wbA(_;bGB0^8%y$R$z=WE?OtbE)?vl*@pEq$QVX{BiUncMy^zK+!Ivu(NYDbLfNLgEthBmv_Rk?v{qheQT(48tG6|JjAi>Z zKWhv5sw^X0)5z8Z9a#7nZ%}jvuZxT~S;V7g9rZcXk`;g9QwYn{1=F3)tbJ*V5VU7q zr|6EhzEBVZU`0IQ8%*3>*@IhBRel>h&z`v1)HX3~Dp-=Fs0yDXeg&LrHc7IR!~!f^ z%WF|hXTXn5{j9N0V>zrV>-U!OQIk%rOMIP7fhK?{8P!d!anoB?SLe<%f~xWg2QVJy zYA3c&o+{B;K$!l&uJc)GpA^n3za+s&=V>{Eb6SP#h=x_c*>V*3af|snUL&9S&NoGF zBT%A;`vTP9sk4n6Oww#{P+wGKo#t+EjJf$}TF#D(3Zl9K{HOlp;Q($f>gLo9SokDg z%*FonrAm z3h%w=ft+;(Ag3^d@V*KGO-1<_BNxaXlo#ls`uj3(35p8T6U!kd3LMb$w!HX?wE^^@ zfAH*MnRgU}x=$`UGNQ3Ih0w)4@5pH=&6z5}<15kGnc(j`+{f*uH4>fW%e+ffEHTUh zA>P%=>B&_~wrLoueO(kS`6W=NbA3TBqLY8Pm?j8%R5^;+#X}v1m_1%h2D`q1Ics|S z^Y(9F{_*nq6CH&8i2aBO?wG@*2={f9ohVqc)GMb+t7EH(nb@>a;=HgI3YbXov%|;S zm;b^$M``3~x=h`zIUv+v{bn#2Wj*uQaIk@;gVp?^aPV7NU9TQ7xH9DueiOLVt%1qA zNSpitoG;<|UNA6QLC4%NduGk-1iQiAU 1: + self.encoder = resnet_multiimage_input(num_layers, pretrained, num_input_images) + else: + self.encoder = resnets[num_layers](pretrained) + + if num_layers > 34: + self.num_ch_enc[1:] *= 4 + + def forward(self, input_image): + self.features = [] + x = (input_image - 0.45) / 0.225 + x = self.encoder.conv1(x) + x = self.encoder.bn1(x) + self.features.append(self.encoder.relu(x)) + self.features.append(self.encoder.layer1(self.encoder.maxpool(self.features[-1]))) + self.features.append(self.encoder.layer2(self.features[-1])) + self.features.append(self.encoder.layer3(self.features[-1])) + self.features.append(self.encoder.layer4(self.features[-1])) + + return self.features diff --git a/gimp-plugins/monodepth2/networks/resnet_encoder.pyc b/gimp-plugins/monodepth2/networks/resnet_encoder.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fd81848e2dab08a93aaa30440f46d49ed42d6125 GIT binary patch literal 4657 zcmcIo&2wBw5%1ZRR;w?|mSji5hP*gXtdm%4$*~9~mDnPIa)DLhnUF$N@OXM}b~U?i z-&@|yN>(gYrI0Uh;>e9N2X6cs;lQ2ZzzGhVsN%u_e%-UH-Pi>udp|}!{d#(Odb)qz ztLi@&=YI94!=ENJd{y!H-p>@--+@T+QIrw&6zwVMRcNn5y(;ZhN%6A^WmB{_Epni1 zv{xI^Gt{ZmUVSv3rOq7f&5fq>)LEd;BJC|wS|vS4oeR`blupr8qBEi&?=6wONGVj; z=qa|nMClCaI(3#su6{1J!d5(L%)(m3M`-W?+!BU(-xk@x>B#i@Md@4}ywn^TYl_@m zz@%TA+(oTHo;a{}-DU7XZFjYMILMrN(2LvpLEay@FF0q1o%3agYGR+j4e$xP1T%0H zwzW#(qs9~@>~ArAyU4982Z^&m9F*GT+6BEL)md<4+(B>{m&Yb=2l=44ugjol1%|f_ z4DW6?<^sI#r*YruG-#DYFF0_nZ|^pnZLA*bZzM&p**|tgnH)6z*_!)VvES^)*6Fe- z4~Bf%)i%8i8~tO^ZT<#_83jX=X$&=d=hB&0Tg0Y7W}ep1s$$f|iDwJ)Y>w8wjb^_m-xWUg#vs&K_EVAtl;W)%{`0;*UIiB>gqDrS!B(p+RQMM z?-(IzQf(?cXJ$W6Q-mRQAlf&G9cCgDvpCTb9gC;7Mv=*liy}!;*)-SIv2A%S)*^=vw^CX9 zoj9{vs07SSIpI$1l7ro%>_LW=?&q7bX{f*du@FBj2RdZC9OJ~p`0+WX zt#KBKiX&~>$Z0kVC*oBu3fZs*-;=DjI%~<1k=aRiys;IdfgDki#nxi;i&(;Q!4Lo+ zV+hh0r)*##8SHmKH9xkxv_OfZ`9X1%XGNS^KuWKFv!4yxu)f(Va@=g+9c(oLSw}_L z9YIH=^8}AdKuE|q%D8E-V<5GzW>rnSsg~7c^_se-uJgOHg86lPuPA{ek4v zF(v}!fL^~)gcM@58-3a!6m5_3YMPi;p=~HwKdrJnE(A#00Zc-2IGK={;v4~-o*!4iMC90EnWl&a!PVW*y(t8o{BRc{)KM2IjQ|8CN}0K-E>xf0v;M zzz9%Qq;i~Y8sM>rW7>c7p(jh0uZX^64a-s4ws)m?aB<>rlRU9O1NHRA-C*~;lnu+B zf4mVq99S3Z>tOSppeTbox6XC+OYO=SfRP4`eIz54JZJ@+wQr$hx?d*R8cWdst`+VDv);nrZBGw0jNEl7KE9~oV8XYyfL zqJLqXzJd$sl}h&P#MUG`0u7u@JB4$Z&UMoD2+PPK3!_hqLUNC@JsOmmtoDyEv!b`f z(BuSdq<0WNO~B!nN*2OzXsS|^xCUp5C>aNVOvm>rUJ%8CT^=5VjNEbG1G!irGOS;x z|`Z<6odYO?I_(oCV#o2R5guL5qrS-kEX?_r#obfVE7y4$0ZZ*tht4KAXgT>5>cxWxHpq%Ua^mt}l>9`(rq zm&35hLgQrLd}m^E`|XLzom*1s{ob3mHYaCywnXiZ;%uNL>BDU{;2!JdR2v3HgF_+w z8V`4Q_!bX*SP}A??aN6baRz(EUc*3YrSe*3QO(JFv9h4nCbsqS~_C}tE3>1zl_r$O4 z4XI|<1A<(iNDrLJtbZnDn6pB7;cZ?o4TJpaL27o$N`2n*lZR3y(w(eL;GU W;l}d$u2mM0%}aPMSJ&5 Loading model from ", model_path) + encoder_path = os.path.join(model_path, "encoder.pth") + depth_decoder_path = os.path.join(model_path, "depth.pth") + + # LOADING PRETRAINED MODEL + print(" Loading pretrained encoder") + encoder = networks.ResnetEncoder(18, False) + loaded_dict_enc = torch.load(encoder_path, map_location=device) + + # extract the height and width of image that this model was trained with + feed_height = loaded_dict_enc['height'] + feed_width = loaded_dict_enc['width'] + filtered_dict_enc = {k: v for k, v in loaded_dict_enc.items() if k in encoder.state_dict()} + encoder.load_state_dict(filtered_dict_enc) + encoder.to(device) + encoder.eval() + + print(" Loading pretrained decoder") + depth_decoder = networks.DepthDecoder( + num_ch_enc=encoder.num_ch_enc, scales=range(4)) + + loaded_dict = torch.load(depth_decoder_path, map_location=device) + depth_decoder.load_state_dict(loaded_dict) + + depth_decoder.to(device) + depth_decoder.eval() + + # FINDING INPUT IMAGES + if os.path.isfile(args.image_path): + # Only testing on a single image + paths = [args.image_path] + output_directory = os.path.dirname(args.image_path) + elif os.path.isdir(args.image_path): + # Searching folder for images + paths = glob.glob(os.path.join(args.image_path, '*.{}'.format(args.ext))) + output_directory = args.image_path + else: + raise Exception("Can not find args.image_path: {}".format(args.image_path)) + + print("-> Predicting on {:d} test images".format(len(paths))) + + # PREDICTING ON EACH IMAGE IN TURN + with torch.no_grad(): + for idx, image_path in enumerate(paths): + + if image_path.endswith("_disp.jpg"): + # don't try to predict disparity for a disparity image! + continue + + # Load image and preprocess + # input_image = cv2.imread(image_path) + # input_image = cv2.cvtColor(input_image, cv2.COLOR_BGR2RGB) + input_image = pil.open(image_path).convert('RGB') + original_width, original_height = input_image.size + # input_image = cv2.resize(input_image, (feed_width, feed_height)) + input_image = input_image.resize((feed_width, feed_height), pil.LANCZOS) + input_image = transforms.ToTensor()(input_image).unsqueeze(0) + + # PREDICTION + input_image = input_image.to(device) + features = encoder(input_image) + outputs = depth_decoder(features) + + disp = outputs[("disp", 0)] + disp_resized = torch.nn.functional.interpolate( + disp, (original_height, original_width), mode="bilinear", align_corners=False) + + # Saving numpy file + output_name = os.path.splitext(os.path.basename(image_path))[0] + name_dest_npy = os.path.join(output_directory, "{}_disp.npy".format(output_name)) + scaled_disp, _ = disp_to_depth(disp, 0.1, 100) + np.save(name_dest_npy, scaled_disp.cpu().numpy()) + + # Saving colormapped depth image + disp_resized_np = disp_resized.squeeze().cpu().numpy() + vmax = np.percentile(disp_resized_np, 95) + normalizer = mpl.colors.Normalize(vmin=disp_resized_np.min(), vmax=vmax) + mapper = cm.ScalarMappable(norm=normalizer, cmap='magma') + colormapped_im = (mapper.to_rgba(disp_resized_np)[:, :, :3] * 255).astype(np.uint8) + im = pil.fromarray(colormapped_im) + + name_dest_im = os.path.join(output_directory, "{}_disp.jpeg".format(output_name)) + im.save(name_dest_im) + # cv2.imwrite('/Users/kritiksoman/Downloads/gimp-plugins/out5.jpg',cv2.cvtColor(colormapped_im, cv2.COLOR_RGB2BGR)) + + print(" Processed {:d} of {:d} images - saved prediction to {}".format( + idx + 1, len(paths), name_dest_im)) + + print('-> Done!') + + +if __name__ == '__main__': + args = parse_args() + test_simple(args) diff --git a/gimp-plugins/monodepth2/train.py b/gimp-plugins/monodepth2/train.py new file mode 100755 index 0000000..ee1425e --- /dev/null +++ b/gimp-plugins/monodepth2/train.py @@ -0,0 +1,18 @@ +# Copyright Niantic 2019. Patent Pending. All rights reserved. +# +# This software is licensed under the terms of the Monodepth2 licence +# which allows for non-commercial use only, the full terms of which are made +# available in the LICENSE file. + +from __future__ import absolute_import, division, print_function + +from trainer import Trainer +from options import MonodepthOptions + +options = MonodepthOptions() +opts = options.parse() + + +if __name__ == "__main__": + trainer = Trainer(opts) + trainer.train() diff --git a/gimp-plugins/monodepth2/trainer.py b/gimp-plugins/monodepth2/trainer.py new file mode 100755 index 0000000..a726dad --- /dev/null +++ b/gimp-plugins/monodepth2/trainer.py @@ -0,0 +1,630 @@ +# Copyright Niantic 2019. Patent Pending. All rights reserved. +# +# This software is licensed under the terms of the Monodepth2 licence +# which allows for non-commercial use only, the full terms of which are made +# available in the LICENSE file. + +from __future__ import absolute_import, division, print_function + +import numpy as np +import time + +import torch +import torch.nn.functional as F +import torch.optim as optim +from torch.utils.data import DataLoader +from tensorboardX import SummaryWriter + +import json + +from utils import * +from kitti_utils import * +from layers import * + +import datasets +import networks +from IPython import embed + + +class Trainer: + def __init__(self, options): + self.opt = options + self.log_path = os.path.join(self.opt.log_dir, self.opt.model_name) + + # checking height and width are multiples of 32 + assert self.opt.height % 32 == 0, "'height' must be a multiple of 32" + assert self.opt.width % 32 == 0, "'width' must be a multiple of 32" + + self.models = {} + self.parameters_to_train = [] + + self.device = torch.device("cpu" if self.opt.no_cuda else "cuda") + + self.num_scales = len(self.opt.scales) + self.num_input_frames = len(self.opt.frame_ids) + self.num_pose_frames = 2 if self.opt.pose_model_input == "pairs" else self.num_input_frames + + assert self.opt.frame_ids[0] == 0, "frame_ids must start with 0" + + self.use_pose_net = not (self.opt.use_stereo and self.opt.frame_ids == [0]) + + if self.opt.use_stereo: + self.opt.frame_ids.append("s") + + self.models["encoder"] = networks.ResnetEncoder( + self.opt.num_layers, self.opt.weights_init == "pretrained") + self.models["encoder"].to(self.device) + self.parameters_to_train += list(self.models["encoder"].parameters()) + + self.models["depth"] = networks.DepthDecoder( + self.models["encoder"].num_ch_enc, self.opt.scales) + self.models["depth"].to(self.device) + self.parameters_to_train += list(self.models["depth"].parameters()) + + if self.use_pose_net: + if self.opt.pose_model_type == "separate_resnet": + self.models["pose_encoder"] = networks.ResnetEncoder( + self.opt.num_layers, + self.opt.weights_init == "pretrained", + num_input_images=self.num_pose_frames) + + self.models["pose_encoder"].to(self.device) + self.parameters_to_train += list(self.models["pose_encoder"].parameters()) + + self.models["pose"] = networks.PoseDecoder( + self.models["pose_encoder"].num_ch_enc, + num_input_features=1, + num_frames_to_predict_for=2) + + elif self.opt.pose_model_type == "shared": + self.models["pose"] = networks.PoseDecoder( + self.models["encoder"].num_ch_enc, self.num_pose_frames) + + elif self.opt.pose_model_type == "posecnn": + self.models["pose"] = networks.PoseCNN( + self.num_input_frames if self.opt.pose_model_input == "all" else 2) + + self.models["pose"].to(self.device) + self.parameters_to_train += list(self.models["pose"].parameters()) + + if self.opt.predictive_mask: + assert self.opt.disable_automasking, \ + "When using predictive_mask, please disable automasking with --disable_automasking" + + # Our implementation of the predictive masking baseline has the the same architecture + # as our depth decoder. We predict a separate mask for each source frame. + self.models["predictive_mask"] = networks.DepthDecoder( + self.models["encoder"].num_ch_enc, self.opt.scales, + num_output_channels=(len(self.opt.frame_ids) - 1)) + self.models["predictive_mask"].to(self.device) + self.parameters_to_train += list(self.models["predictive_mask"].parameters()) + + self.model_optimizer = optim.Adam(self.parameters_to_train, self.opt.learning_rate) + self.model_lr_scheduler = optim.lr_scheduler.StepLR( + self.model_optimizer, self.opt.scheduler_step_size, 0.1) + + if self.opt.load_weights_folder is not None: + self.load_model() + + print("Training model named:\n ", self.opt.model_name) + print("Models and tensorboard events files are saved to:\n ", self.opt.log_dir) + print("Training is using:\n ", self.device) + + # data + datasets_dict = {"kitti": datasets.KITTIRAWDataset, + "kitti_odom": datasets.KITTIOdomDataset} + self.dataset = datasets_dict[self.opt.dataset] + + fpath = os.path.join(os.path.dirname(__file__), "splits", self.opt.split, "{}_files.txt") + + train_filenames = readlines(fpath.format("train")) + val_filenames = readlines(fpath.format("val")) + img_ext = '.png' if self.opt.png else '.jpg' + + num_train_samples = len(train_filenames) + self.num_total_steps = num_train_samples // self.opt.batch_size * self.opt.num_epochs + + train_dataset = self.dataset( + self.opt.data_path, train_filenames, self.opt.height, self.opt.width, + self.opt.frame_ids, 4, is_train=True, img_ext=img_ext) + self.train_loader = DataLoader( + train_dataset, self.opt.batch_size, True, + num_workers=self.opt.num_workers, pin_memory=True, drop_last=True) + val_dataset = self.dataset( + self.opt.data_path, val_filenames, self.opt.height, self.opt.width, + self.opt.frame_ids, 4, is_train=False, img_ext=img_ext) + self.val_loader = DataLoader( + val_dataset, self.opt.batch_size, True, + num_workers=self.opt.num_workers, pin_memory=True, drop_last=True) + self.val_iter = iter(self.val_loader) + + self.writers = {} + for mode in ["train", "val"]: + self.writers[mode] = SummaryWriter(os.path.join(self.log_path, mode)) + + if not self.opt.no_ssim: + self.ssim = SSIM() + self.ssim.to(self.device) + + self.backproject_depth = {} + self.project_3d = {} + for scale in self.opt.scales: + h = self.opt.height // (2 ** scale) + w = self.opt.width // (2 ** scale) + + self.backproject_depth[scale] = BackprojectDepth(self.opt.batch_size, h, w) + self.backproject_depth[scale].to(self.device) + + self.project_3d[scale] = Project3D(self.opt.batch_size, h, w) + self.project_3d[scale].to(self.device) + + self.depth_metric_names = [ + "de/abs_rel", "de/sq_rel", "de/rms", "de/log_rms", "da/a1", "da/a2", "da/a3"] + + print("Using split:\n ", self.opt.split) + print("There are {:d} training items and {:d} validation items\n".format( + len(train_dataset), len(val_dataset))) + + self.save_opts() + + def set_train(self): + """Convert all models to training mode + """ + for m in self.models.values(): + m.train() + + def set_eval(self): + """Convert all models to testing/evaluation mode + """ + for m in self.models.values(): + m.eval() + + def train(self): + """Run the entire training pipeline + """ + self.epoch = 0 + self.step = 0 + self.start_time = time.time() + for self.epoch in range(self.opt.num_epochs): + self.run_epoch() + if (self.epoch + 1) % self.opt.save_frequency == 0: + self.save_model() + + def run_epoch(self): + """Run a single epoch of training and validation + """ + self.model_lr_scheduler.step() + + print("Training") + self.set_train() + + for batch_idx, inputs in enumerate(self.train_loader): + + before_op_time = time.time() + + outputs, losses = self.process_batch(inputs) + + self.model_optimizer.zero_grad() + losses["loss"].backward() + self.model_optimizer.step() + + duration = time.time() - before_op_time + + # log less frequently after the first 2000 steps to save time & disk space + early_phase = batch_idx % self.opt.log_frequency == 0 and self.step < 2000 + late_phase = self.step % 2000 == 0 + + if early_phase or late_phase: + self.log_time(batch_idx, duration, losses["loss"].cpu().data) + + if "depth_gt" in inputs: + self.compute_depth_losses(inputs, outputs, losses) + + self.log("train", inputs, outputs, losses) + self.val() + + self.step += 1 + + def process_batch(self, inputs): + """Pass a minibatch through the network and generate images and losses + """ + for key, ipt in inputs.items(): + inputs[key] = ipt.to(self.device) + + if self.opt.pose_model_type == "shared": + # If we are using a shared encoder for both depth and pose (as advocated + # in monodepthv1), then all images are fed separately through the depth encoder. + all_color_aug = torch.cat([inputs[("color_aug", i, 0)] for i in self.opt.frame_ids]) + all_features = self.models["encoder"](all_color_aug) + all_features = [torch.split(f, self.opt.batch_size) for f in all_features] + + features = {} + for i, k in enumerate(self.opt.frame_ids): + features[k] = [f[i] for f in all_features] + + outputs = self.models["depth"](features[0]) + else: + # Otherwise, we only feed the image with frame_id 0 through the depth encoder + features = self.models["encoder"](inputs["color_aug", 0, 0]) + outputs = self.models["depth"](features) + + if self.opt.predictive_mask: + outputs["predictive_mask"] = self.models["predictive_mask"](features) + + if self.use_pose_net: + outputs.update(self.predict_poses(inputs, features)) + + self.generate_images_pred(inputs, outputs) + losses = self.compute_losses(inputs, outputs) + + return outputs, losses + + def predict_poses(self, inputs, features): + """Predict poses between input frames for monocular sequences. + """ + outputs = {} + if self.num_pose_frames == 2: + # In this setting, we compute the pose to each source frame via a + # separate forward pass through the pose network. + + # select what features the pose network takes as input + if self.opt.pose_model_type == "shared": + pose_feats = {f_i: features[f_i] for f_i in self.opt.frame_ids} + else: + pose_feats = {f_i: inputs["color_aug", f_i, 0] for f_i in self.opt.frame_ids} + + for f_i in self.opt.frame_ids[1:]: + if f_i != "s": + # To maintain ordering we always pass frames in temporal order + if f_i < 0: + pose_inputs = [pose_feats[f_i], pose_feats[0]] + else: + pose_inputs = [pose_feats[0], pose_feats[f_i]] + + if self.opt.pose_model_type == "separate_resnet": + pose_inputs = [self.models["pose_encoder"](torch.cat(pose_inputs, 1))] + elif self.opt.pose_model_type == "posecnn": + pose_inputs = torch.cat(pose_inputs, 1) + + axisangle, translation = self.models["pose"](pose_inputs) + outputs[("axisangle", 0, f_i)] = axisangle + outputs[("translation", 0, f_i)] = translation + + # Invert the matrix if the frame id is negative + outputs[("cam_T_cam", 0, f_i)] = transformation_from_parameters( + axisangle[:, 0], translation[:, 0], invert=(f_i < 0)) + + else: + # Here we input all frames to the pose net (and predict all poses) together + if self.opt.pose_model_type in ["separate_resnet", "posecnn"]: + pose_inputs = torch.cat( + [inputs[("color_aug", i, 0)] for i in self.opt.frame_ids if i != "s"], 1) + + if self.opt.pose_model_type == "separate_resnet": + pose_inputs = [self.models["pose_encoder"](pose_inputs)] + + elif self.opt.pose_model_type == "shared": + pose_inputs = [features[i] for i in self.opt.frame_ids if i != "s"] + + axisangle, translation = self.models["pose"](pose_inputs) + + for i, f_i in enumerate(self.opt.frame_ids[1:]): + if f_i != "s": + outputs[("axisangle", 0, f_i)] = axisangle + outputs[("translation", 0, f_i)] = translation + outputs[("cam_T_cam", 0, f_i)] = transformation_from_parameters( + axisangle[:, i], translation[:, i]) + + return outputs + + def val(self): + """Validate the model on a single minibatch + """ + self.set_eval() + try: + inputs = self.val_iter.next() + except StopIteration: + self.val_iter = iter(self.val_loader) + inputs = self.val_iter.next() + + with torch.no_grad(): + outputs, losses = self.process_batch(inputs) + + if "depth_gt" in inputs: + self.compute_depth_losses(inputs, outputs, losses) + + self.log("val", inputs, outputs, losses) + del inputs, outputs, losses + + self.set_train() + + def generate_images_pred(self, inputs, outputs): + """Generate the warped (reprojected) color images for a minibatch. + Generated images are saved into the `outputs` dictionary. + """ + for scale in self.opt.scales: + disp = outputs[("disp", scale)] + if self.opt.v1_multiscale: + source_scale = scale + else: + disp = F.interpolate( + disp, [self.opt.height, self.opt.width], mode="bilinear", align_corners=False) + source_scale = 0 + + _, depth = disp_to_depth(disp, self.opt.min_depth, self.opt.max_depth) + + outputs[("depth", 0, scale)] = depth + + for i, frame_id in enumerate(self.opt.frame_ids[1:]): + + if frame_id == "s": + T = inputs["stereo_T"] + else: + T = outputs[("cam_T_cam", 0, frame_id)] + + # from the authors of https://arxiv.org/abs/1712.00175 + if self.opt.pose_model_type == "posecnn": + + axisangle = outputs[("axisangle", 0, frame_id)] + translation = outputs[("translation", 0, frame_id)] + + inv_depth = 1 / depth + mean_inv_depth = inv_depth.mean(3, True).mean(2, True) + + T = transformation_from_parameters( + axisangle[:, 0], translation[:, 0] * mean_inv_depth[:, 0], frame_id < 0) + + cam_points = self.backproject_depth[source_scale]( + depth, inputs[("inv_K", source_scale)]) + pix_coords = self.project_3d[source_scale]( + cam_points, inputs[("K", source_scale)], T) + + outputs[("sample", frame_id, scale)] = pix_coords + + outputs[("color", frame_id, scale)] = F.grid_sample( + inputs[("color", frame_id, source_scale)], + outputs[("sample", frame_id, scale)], + padding_mode="border") + + if not self.opt.disable_automasking: + outputs[("color_identity", frame_id, scale)] = \ + inputs[("color", frame_id, source_scale)] + + def compute_reprojection_loss(self, pred, target): + """Computes reprojection loss between a batch of predicted and target images + """ + abs_diff = torch.abs(target - pred) + l1_loss = abs_diff.mean(1, True) + + if self.opt.no_ssim: + reprojection_loss = l1_loss + else: + ssim_loss = self.ssim(pred, target).mean(1, True) + reprojection_loss = 0.85 * ssim_loss + 0.15 * l1_loss + + return reprojection_loss + + def compute_losses(self, inputs, outputs): + """Compute the reprojection and smoothness losses for a minibatch + """ + losses = {} + total_loss = 0 + + for scale in self.opt.scales: + loss = 0 + reprojection_losses = [] + + if self.opt.v1_multiscale: + source_scale = scale + else: + source_scale = 0 + + disp = outputs[("disp", scale)] + color = inputs[("color", 0, scale)] + target = inputs[("color", 0, source_scale)] + + for frame_id in self.opt.frame_ids[1:]: + pred = outputs[("color", frame_id, scale)] + reprojection_losses.append(self.compute_reprojection_loss(pred, target)) + + reprojection_losses = torch.cat(reprojection_losses, 1) + + if not self.opt.disable_automasking: + identity_reprojection_losses = [] + for frame_id in self.opt.frame_ids[1:]: + pred = inputs[("color", frame_id, source_scale)] + identity_reprojection_losses.append( + self.compute_reprojection_loss(pred, target)) + + identity_reprojection_losses = torch.cat(identity_reprojection_losses, 1) + + if self.opt.avg_reprojection: + identity_reprojection_loss = identity_reprojection_losses.mean(1, keepdim=True) + else: + # save both images, and do min all at once below + identity_reprojection_loss = identity_reprojection_losses + + elif self.opt.predictive_mask: + # use the predicted mask + mask = outputs["predictive_mask"]["disp", scale] + if not self.opt.v1_multiscale: + mask = F.interpolate( + mask, [self.opt.height, self.opt.width], + mode="bilinear", align_corners=False) + + reprojection_losses *= mask + + # add a loss pushing mask to 1 (using nn.BCELoss for stability) + weighting_loss = 0.2 * nn.BCELoss()(mask, torch.ones(mask.shape).cuda()) + loss += weighting_loss.mean() + + if self.opt.avg_reprojection: + reprojection_loss = reprojection_losses.mean(1, keepdim=True) + else: + reprojection_loss = reprojection_losses + + if not self.opt.disable_automasking: + # add random numbers to break ties + identity_reprojection_loss += torch.randn( + identity_reprojection_loss.shape).cuda() * 0.00001 + + combined = torch.cat((identity_reprojection_loss, reprojection_loss), dim=1) + else: + combined = reprojection_loss + + if combined.shape[1] == 1: + to_optimise = combined + else: + to_optimise, idxs = torch.min(combined, dim=1) + + if not self.opt.disable_automasking: + outputs["identity_selection/{}".format(scale)] = ( + idxs > identity_reprojection_loss.shape[1] - 1).float() + + loss += to_optimise.mean() + + mean_disp = disp.mean(2, True).mean(3, True) + norm_disp = disp / (mean_disp + 1e-7) + smooth_loss = get_smooth_loss(norm_disp, color) + + loss += self.opt.disparity_smoothness * smooth_loss / (2 ** scale) + total_loss += loss + losses["loss/{}".format(scale)] = loss + + total_loss /= self.num_scales + losses["loss"] = total_loss + return losses + + def compute_depth_losses(self, inputs, outputs, losses): + """Compute depth metrics, to allow monitoring during training + + This isn't particularly accurate as it averages over the entire batch, + so is only used to give an indication of validation performance + """ + depth_pred = outputs[("depth", 0, 0)] + depth_pred = torch.clamp(F.interpolate( + depth_pred, [375, 1242], mode="bilinear", align_corners=False), 1e-3, 80) + depth_pred = depth_pred.detach() + + depth_gt = inputs["depth_gt"] + mask = depth_gt > 0 + + # garg/eigen crop + crop_mask = torch.zeros_like(mask) + crop_mask[:, :, 153:371, 44:1197] = 1 + mask = mask * crop_mask + + depth_gt = depth_gt[mask] + depth_pred = depth_pred[mask] + depth_pred *= torch.median(depth_gt) / torch.median(depth_pred) + + depth_pred = torch.clamp(depth_pred, min=1e-3, max=80) + + depth_errors = compute_depth_errors(depth_gt, depth_pred) + + for i, metric in enumerate(self.depth_metric_names): + losses[metric] = np.array(depth_errors[i].cpu()) + + def log_time(self, batch_idx, duration, loss): + """Print a logging statement to the terminal + """ + samples_per_sec = self.opt.batch_size / duration + time_sofar = time.time() - self.start_time + training_time_left = ( + self.num_total_steps / self.step - 1.0) * time_sofar if self.step > 0 else 0 + print_string = "epoch {:>3} | batch {:>6} | examples/s: {:5.1f}" + \ + " | loss: {:.5f} | time elapsed: {} | time left: {}" + print(print_string.format(self.epoch, batch_idx, samples_per_sec, loss, + sec_to_hm_str(time_sofar), sec_to_hm_str(training_time_left))) + + def log(self, mode, inputs, outputs, losses): + """Write an event to the tensorboard events file + """ + writer = self.writers[mode] + for l, v in losses.items(): + writer.add_scalar("{}".format(l), v, self.step) + + for j in range(min(4, self.opt.batch_size)): # write a maxmimum of four images + for s in self.opt.scales: + for frame_id in self.opt.frame_ids: + writer.add_image( + "color_{}_{}/{}".format(frame_id, s, j), + inputs[("color", frame_id, s)][j].data, self.step) + if s == 0 and frame_id != 0: + writer.add_image( + "color_pred_{}_{}/{}".format(frame_id, s, j), + outputs[("color", frame_id, s)][j].data, self.step) + + writer.add_image( + "disp_{}/{}".format(s, j), + normalize_image(outputs[("disp", s)][j]), self.step) + + if self.opt.predictive_mask: + for f_idx, frame_id in enumerate(self.opt.frame_ids[1:]): + writer.add_image( + "predictive_mask_{}_{}/{}".format(frame_id, s, j), + outputs["predictive_mask"][("disp", s)][j, f_idx][None, ...], + self.step) + + elif not self.opt.disable_automasking: + writer.add_image( + "automask_{}/{}".format(s, j), + outputs["identity_selection/{}".format(s)][j][None, ...], self.step) + + def save_opts(self): + """Save options to disk so we know what we ran this experiment with + """ + models_dir = os.path.join(self.log_path, "models") + if not os.path.exists(models_dir): + os.makedirs(models_dir) + to_save = self.opt.__dict__.copy() + + with open(os.path.join(models_dir, 'opt.json'), 'w') as f: + json.dump(to_save, f, indent=2) + + def save_model(self): + """Save model weights to disk + """ + save_folder = os.path.join(self.log_path, "models", "weights_{}".format(self.epoch)) + if not os.path.exists(save_folder): + os.makedirs(save_folder) + + for model_name, model in self.models.items(): + save_path = os.path.join(save_folder, "{}.pth".format(model_name)) + to_save = model.state_dict() + if model_name == 'encoder': + # save the sizes - these are needed at prediction time + to_save['height'] = self.opt.height + to_save['width'] = self.opt.width + to_save['use_stereo'] = self.opt.use_stereo + torch.save(to_save, save_path) + + save_path = os.path.join(save_folder, "{}.pth".format("adam")) + torch.save(self.model_optimizer.state_dict(), save_path) + + def load_model(self): + """Load model(s) from disk + """ + self.opt.load_weights_folder = os.path.expanduser(self.opt.load_weights_folder) + + assert os.path.isdir(self.opt.load_weights_folder), \ + "Cannot find folder {}".format(self.opt.load_weights_folder) + print("loading model from folder {}".format(self.opt.load_weights_folder)) + + for n in self.opt.models_to_load: + print("Loading {} weights...".format(n)) + path = os.path.join(self.opt.load_weights_folder, "{}.pth".format(n)) + model_dict = self.models[n].state_dict() + pretrained_dict = torch.load(path) + pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict} + model_dict.update(pretrained_dict) + self.models[n].load_state_dict(model_dict) + + # loading adam state + optimizer_load_path = os.path.join(self.opt.load_weights_folder, "adam.pth") + if os.path.isfile(optimizer_load_path): + print("Loading Adam weights") + optimizer_dict = torch.load(optimizer_load_path) + self.model_optimizer.load_state_dict(optimizer_dict) + else: + print("Cannot find Adam weights so Adam is randomly initialized") diff --git a/gimp-plugins/monodepth2/utils.py b/gimp-plugins/monodepth2/utils.py new file mode 100755 index 0000000..e309cf7 --- /dev/null +++ b/gimp-plugins/monodepth2/utils.py @@ -0,0 +1,114 @@ +# Copyright Niantic 2019. Patent Pending. All rights reserved. +# +# This software is licensed under the terms of the Monodepth2 licence +# which allows for non-commercial use only, the full terms of which are made +# available in the LICENSE file. + +from __future__ import absolute_import, division, print_function +import os +import hashlib +import zipfile +from six.moves import urllib + + +def readlines(filename): + """Read all the lines in a text file and return as a list + """ + with open(filename, 'r') as f: + lines = f.read().splitlines() + return lines + + +def normalize_image(x): + """Rescale image pixels to span range [0, 1] + """ + ma = float(x.max().cpu().data) + mi = float(x.min().cpu().data) + d = ma - mi if ma != mi else 1e5 + return (x - mi) / d + + +def sec_to_hm(t): + """Convert time in seconds to time in hours, minutes and seconds + e.g. 10239 -> (2, 50, 39) + """ + t = int(t) + s = t % 60 + t //= 60 + m = t % 60 + t //= 60 + return t, m, s + + +def sec_to_hm_str(t): + """Convert time in seconds to a nice string + e.g. 10239 -> '02h50m39s' + """ + h, m, s = sec_to_hm(t) + return "{:02d}h{:02d}m{:02d}s".format(h, m, s) + + +def download_model_if_doesnt_exist(model_name): + """If pretrained kitti model doesn't exist, download and unzip it + """ + # values are tuples of (, ) + download_paths = { + "mono_640x192": + ("https://storage.googleapis.com/niantic-lon-static/research/monodepth2/mono_640x192.zip", + "a964b8356e08a02d009609d9e3928f7c"), + "stereo_640x192": + ("https://storage.googleapis.com/niantic-lon-static/research/monodepth2/stereo_640x192.zip", + "3dfb76bcff0786e4ec07ac00f658dd07"), + "mono+stereo_640x192": + ("https://storage.googleapis.com/niantic-lon-static/research/monodepth2/mono%2Bstereo_640x192.zip", + "c024d69012485ed05d7eaa9617a96b81"), + "mono_no_pt_640x192": + ("https://storage.googleapis.com/niantic-lon-static/research/monodepth2/mono_no_pt_640x192.zip", + "9c2f071e35027c895a4728358ffc913a"), + "stereo_no_pt_640x192": + ("https://storage.googleapis.com/niantic-lon-static/research/monodepth2/stereo_no_pt_640x192.zip", + "41ec2de112905f85541ac33a854742d1"), + "mono+stereo_no_pt_640x192": + ("https://storage.googleapis.com/niantic-lon-static/research/monodepth2/mono%2Bstereo_no_pt_640x192.zip", + "46c3b824f541d143a45c37df65fbab0a"), + "mono_1024x320": + ("https://storage.googleapis.com/niantic-lon-static/research/monodepth2/mono_1024x320.zip", + "0ab0766efdfeea89a0d9ea8ba90e1e63"), + "stereo_1024x320": + ("https://storage.googleapis.com/niantic-lon-static/research/monodepth2/stereo_1024x320.zip", + "afc2f2126d70cf3fdf26b550898b501a"), + "mono+stereo_1024x320": + ("https://storage.googleapis.com/niantic-lon-static/research/monodepth2/mono%2Bstereo_1024x320.zip", + "cdc5fc9b23513c07d5b19235d9ef08f7"), + } + + if not os.path.exists("models"): + os.makedirs("models") + + model_path = os.path.join("models", model_name) + + def check_file_matches_md5(checksum, fpath): + if not os.path.exists(fpath): + return False + with open(fpath, 'rb') as f: + current_md5checksum = hashlib.md5(f.read()).hexdigest() + return current_md5checksum == checksum + + # see if we have the model already downloaded... + if not os.path.exists(os.path.join(model_path, "encoder.pth")): + + model_url, required_md5checksum = download_paths[model_name] + + if not check_file_matches_md5(required_md5checksum, model_path + ".zip"): + print("-> Downloading pretrained model to {}".format(model_path + ".zip")) + urllib.request.urlretrieve(model_url, model_path + ".zip") + + if not check_file_matches_md5(required_md5checksum, model_path + ".zip"): + print(" Failed to download a file which matches the checksum - quitting") + quit() + + print(" Unzipping model...") + with zipfile.ZipFile(model_path + ".zip", 'r') as f: + f.extractall(model_path) + + print(" Model unzipped to {}".format(model_path)) diff --git a/gimp-plugins/monodepth2/utils.pyc b/gimp-plugins/monodepth2/utils.pyc new file mode 100755 index 0000000000000000000000000000000000000000..d416cdda14ad9ee7d52be972bb9ab814ee9fedbb GIT binary patch literal 5685 zcmcgwTXP$?6~>aZZk8L59p7RnSvz&5#1gq&a(PM9jGJ^k?X>kwSCcwb-O%6yOG4TU zy#O6GmL59pTOab<`dj+a=~KRgC26U4rY}@26F7^@!NGS9&N)E!Uzaxi@y{3EL~Qo2 zg8Jbzhv~n85cuz~gt63N`wmMhY`?Z<9S8aX+rKcUSJ?iFrK>Di zW$7B*U&EXl`#WR%>r7m*#0C>9mbl2oswFm=ShK_>;=Z@N4k;9gwp~SG-a)~KI!~rX z4pchH3uD&MCe#b1Ri2rj;A2v#%nagb78x`>-gao_3W{lwBx)GZH*vdw-tm9O=IfuM2qhOTPZHORrJJZsYFA}0 zca1zUZmbgN@=UmeG}8jj8a;{9X2ZolLxL2K%bnFg=M$M3A__1y8zA%~QO0sDN6}u1 zX~t7&r^kkNQH}t?wyDs+K+*V0%R)B}3uV-y&QqQ>ewBZlB{>(mF#>kGlVmzlnQo+c zmJ2yCW4|#qD$(`HUk%|-QdrIeswMzg^@?-Tu>g{oWGfcHZ$Oj)o<{7T%3e8yJ1^t&GN@NM;x%plv z%e97l=TsYjKHUab0eoDAAzNIZ5zjn(Aq(Rgl>*$%)iTO6VWFKjjq_=tcidEEIDRl? z81`(G#ah-!b+_sHt-ibaxx4M}xB(Q=>VIr|)n{0wJd3mi9MRIG?di0jYFnmDg_i6X zG7d~W7^ff7$0iC^+jKT&e+z3Sg!?0vViVOTOuU2Y0TWkH?J#i-)h-h^QPr8a%{ry~0POI>;tn|!JtFPEgOs06X0=5l z5Qy%Op~HnkVd8xXBv2cTw)1C1joga(fQAvf)FF*BcIzpo)!4}e2HhT`#Gnt^4UE$N zAxLZ!!AXjK5N87!!?IJtaoU<1n zk#yg`%TBQ33Ol$&9kk#R@l%8#;&gu|4O*!KOQLIUy8-z1mneP{yAy<+f+Hpg_fQ$5 z+!TI3aYZh5wq;y-gh;f5&oV05^E}PIQxjK}0ZRW7e8$F1^rwx6HhBTZR3GK}D3N@k zbUn(`My7aXRJ5Dq*{+8BR8galTJj_V z=+3eSViDL7zz_rUb%yM952-A^k*iPyxkr%@;i8OO2dfl!Fcd)x1Y+Y8!RSh3lkG#g z*w8|cZK_)g?d{9bxwZ-NgqyLI%7zXNzKlKMiI$59FzMPnHq>S=2E#Kb(Le9e#_i>5_^+t45t!F*rv^x)@TF z^gZHOPSAzrb=)oQiQkS@GhJMSZZ-hsAzEyODYQ1QTChs?l+v=&C_M<`gWT4B!W|M+d-J?wJj zT4jChZsl_2s&m764<%j}DjUu%=W4~O%R(@~dnBF!B5QW7? ztD}0FzmVE$EhJ5a^dw*}{^9#X{Tjt+9>A>uWy%yH)Ir literal 0 HcmV?d00001 diff --git a/gimp-plugins/moveWeights.sh b/gimp-plugins/moveWeights.sh new file mode 100644 index 0000000..be3d3f4 --- /dev/null +++ b/gimp-plugins/moveWeights.sh @@ -0,0 +1,9 @@ +unzip weights.zip +mv weights/colorize/* neural-colorization/ +mv weights/deblur/* DeblurGANv2/ +mv weights/deeplabv3/* deeplabv3 +mv weights/facegen/* CelebAMask-HQ/MaskGAN_demo/checkpoints/label2face_512p/ +mv weights/faceparse/* face-parsing.PyTorch/ +mv weights/monodepth/* monodepth2/models/mono+stereo_640x192/ +mv weights/super_resolution/* pytorch-SRResNet/model/ +rm -rf weights/ diff --git a/gimp-plugins/neural-colorization/__pycache__/model.cpython-38.pyc b/gimp-plugins/neural-colorization/__pycache__/model.cpython-38.pyc new file mode 100755 index 0000000000000000000000000000000000000000..d4f9ebd5e36d6c30dea708935df53d6b1debb37e GIT binary patch literal 4119 zcmcgv&5zSY6rZsj=X+(99L@pAzypSxvEsPm;T;3@j6+m0*Y#Do@d^hdEfJ9yj8817#?Z<`0eRs z#(twg_OURyiWdF^AerPIYbIabHJS#ezTuf&t7+-J>E*h1)8_03la|cwF`09X2l=7d zEC6H6JTQ4+ioleBDaax)MYn`;8RL>HV_eqb3dR*##ki`+RkWKTE+ag-_F(-rYJ#<{wgc)Dxj(8G}&!mhT?VDmJ0lZE~3}x-MK+ava#z_o!cSoJW1(B@}I?suAPr#EC7} z^BgB5mhxPYY#j)4i*`wYS4@p*0WzRG-DnZ z1LNnJoJss=DGGpEB|wYfvZpya?Y`ftn~7VEwjWzUZHG!5r(LWqq!r|iNzj+!+e5`( z9CJz!irhLaTLai;&vbP6`G{R-->f_{2gvV%Ip9M+;+#EN8W`&Qz}Q4~OrvfhSV@G! z*nTX$zKgHu^;}=ptwiTs$M5x{*be%U{wC8iank8gn}px#DYqw-dz`eQG-ljF4#S`` z8H45~*KLJT#?&zaD+I_N>Ma7qC?o7JDag}HXw#&ywC`KM%siB;SJ6V6KdmFWhA^rj zs2f&nC#uaP0If^iOWF6As&owJlkT$?yz zZa%$~rR_;p}{65x_H}rZKOye1YIS}N|b(*vT zjUwbGnjxVqzNR$jgp@gHk8&7W($x6eh-T91nWwW{<|fqq3^k8)JxeWRs64V|SyuL@ z5>3p-?2)OZWM>|sKvrH>hkR&81>6n51EtBpI^tPd0MF$)p2rrzb7hX_@dfZ)o#T08 z0X$D;coq?nNqiAuTSq1v;UcJJ65*6SpC8ps_LN3!bm|#41nT7}T#?2fA??%$Ghj?9 zIX(waH^y{}P<7o`A5xzH@N{aa^8n+k3Bn}Ag`}_d)kXBj-vhLW-}IqAwM{f#@kteZ z^6ENsz!jYZb2TR}T=w)KeB}~|zoW6;v)iukDiH;0?K19(!V9mUlgv^i`$$nBt+ZG= z&IULWos+IU#{A1@>Jxxhu|_p$)!Y$6+`dS;ky6u#`v0M%A_u&P<5z@V zn0_}?Zi~|IUdr(mvf*F-_jSr`QTkC$UY!5hDYr%G*PC*zmt({Cu2ay7fsE*J$QlCH$}#gf7jx zEBqb&(D55{Sr!-54M%5K_zM8t2@ALr7PvhF32X6+#smIs$NzCiqYDM3;MI>`|4BR<09Seqag4?eJO~S60+z=!L|}oQFV*$Tlya; xao=aaB!3BXaZCJ4j?r>5zSFy(E>0+7$(8#G?SPPOCin%S*Idc3l$R}f{{e(E;~M|~ literal 0 HcmV?d00001 diff --git a/gimp-plugins/neural-colorization/build_dataset_directory.py b/gimp-plugins/neural-colorization/build_dataset_directory.py new file mode 100755 index 0000000..06d2bc4 --- /dev/null +++ b/gimp-plugins/neural-colorization/build_dataset_directory.py @@ -0,0 +1,42 @@ +import os +import shutil +import argparse +image_extensions = {'.jpg', '.jpeg', '.JPG', '.JPEG'} + +def parse_args(): + parser = argparse.ArgumentParser(description="Put all places 365 images in single folder.") + parser.add_argument("-i", + "--input_dir", + required=True, + type=str, + help="input folder: the folder containing unzipped places 365 files") + parser.add_argument("-o", + "--output_dir", + required=True, + type=str, + help="output folder: the folder to put all images") + args = parser.parse_args() + return args + +def genlist(image_dir): + image_list = [] + for filename in os.listdir(image_dir): + path = os.path.join(image_dir,filename) + if os.path.isdir(path): + image_list = image_list + genlist(path) + else: + ext = os.path.splitext(filename)[1] + if ext in image_extensions: + image_list.append(os.path.join(image_dir, filename)) + return image_list + + +args = parse_args() +if not os.path.exists(args.output_dir): + os.makedirs(args.output_dir) +flist = genlist(args.input_dir) +for i,p in enumerate(flist): + if os.path.getsize(p) != 0: + os.rename(p,os.path.join(args.output_dir,str(i)+'.jpg')) +shutil.rmtree(args.input_dir) +print('done') \ No newline at end of file diff --git a/gimp-plugins/neural-colorization/colorize.py b/gimp-plugins/neural-colorization/colorize.py new file mode 100755 index 0000000..86320c5 --- /dev/null +++ b/gimp-plugins/neural-colorization/colorize.py @@ -0,0 +1,73 @@ +import torch +from model import generator +from torch.autograd import Variable +from scipy.ndimage import zoom +import cv2 +import os +from PIL import Image +import argparse +import numpy as np +from skimage.color import rgb2yuv,yuv2rgb + +def parse_args(): + parser = argparse.ArgumentParser(description="Colorize images") + parser.add_argument("-i", + "--input", + type=str, + required=True, + help="input image/input dir") + parser.add_argument("-o", + "--output", + type=str, + required=True, + help="output image/output dir") + parser.add_argument("-m", + "--model", + type=str, + required=True, + help="location for model (Generator)") + parser.add_argument("--gpu", + type=int, + default=-1, + help="which GPU to use? [-1 for cpu]") + args = parser.parse_args() + return args + +args = parse_args() + +G = generator() + +if torch.cuda.is_available(): +# args.gpu>=0: + G=G.cuda(args.gpu) + G.load_state_dict(torch.load(args.model)) +else: + G.load_state_dict(torch.load(args.model,map_location=torch.device('cpu'))) + +def inference(G,in_path,out_path): + p=Image.open(in_path).convert('RGB') + img_yuv = rgb2yuv(p) + H,W,_ = img_yuv.shape + infimg = np.expand_dims(np.expand_dims(img_yuv[...,0], axis=0), axis=0) + img_variable = Variable(torch.Tensor(infimg-0.5)) + if args.gpu>=0: + img_variable=img_variable.cuda(args.gpu) + res = G(img_variable) + uv=res.cpu().detach().numpy() + uv[:,0,:,:] *= 0.436 + uv[:,1,:,:] *= 0.615 + (_,_,H1,W1) = uv.shape + uv = zoom(uv,(1,1,H/H1,W/W1)) + yuv = np.concatenate([infimg,uv],axis=1)[0] + rgb=yuv2rgb(yuv.transpose(1,2,0)) + cv2.imwrite(out_path,(rgb.clip(min=0,max=1)*256)[:,:,[2,1,0]]) + + +if not os.path.isdir(args.input): + inference(G,args.input,args.output) +else: + if not os.path.exists(args.output): + os.makedirs(args.output) + for f in os.listdir(args.input): + inference(G,os.path.join(args.input,f),os.path.join(args.output,f)) + diff --git a/gimp-plugins/neural-colorization/model.py b/gimp-plugins/neural-colorization/model.py new file mode 100755 index 0000000..afee57b --- /dev/null +++ b/gimp-plugins/neural-colorization/model.py @@ -0,0 +1,123 @@ +import torch +import torch.nn as nn +from functools import reduce +from torch.autograd import Variable + + +class shave_block(nn.Module): + def __init__(self, s): + super(shave_block, self).__init__() + self.s=s + def forward(self,x): + return x[:,:,self.s:-self.s,self.s:-self.s] + +class LambdaBase(nn.Sequential): + def __init__(self, fn, *args): + super(LambdaBase, self).__init__(*args) + self.lambda_func = fn + + def forward_prepare(self, input): + output = [] + for module in self._modules.values(): + output.append(module(input)) + return output if output else input + +class Lambda(LambdaBase): + def forward(self, input): + return self.lambda_func(self.forward_prepare(input)) + +class LambdaMap(LambdaBase): + def forward(self, input): + return list(map(self.lambda_func,self.forward_prepare(input))) + +class LambdaReduce(LambdaBase): + def forward(self, input): + return reduce(self.lambda_func,self.forward_prepare(input)) + +def generator(): + G = nn.Sequential( # Sequential, + nn.ReflectionPad2d((40, 40, 40, 40)), + nn.Conv2d(1,32,(9, 9),(1, 1),(4, 4)), + nn.BatchNorm2d(32), + nn.ReLU(), + nn.Conv2d(32,64,(3, 3),(2, 2),(1, 1)), + nn.BatchNorm2d(64), + nn.ReLU(), + nn.Conv2d(64,128,(3, 3),(2, 2),(1, 1)), + nn.BatchNorm2d(128), + nn.ReLU(), + nn.Sequential( # Sequential, + LambdaMap(lambda x: x, # ConcatTable, + nn.Sequential( # Sequential, + nn.Conv2d(128,128,(3, 3)), + nn.BatchNorm2d(128), + nn.ReLU(), + nn.Conv2d(128,128,(3, 3)), + nn.BatchNorm2d(128), + ), + shave_block(2), + ), + LambdaReduce(lambda x,y: x+y), # CAddTable, + ), + nn.Sequential( # Sequential, + LambdaMap(lambda x: x, # ConcatTable, + nn.Sequential( # Sequential, + nn.Conv2d(128,128,(3, 3)), + nn.BatchNorm2d(128), + nn.ReLU(), + nn.Conv2d(128,128,(3, 3)), + nn.BatchNorm2d(128), + ), + shave_block(2), + ), + LambdaReduce(lambda x,y: x+y), # CAddTable, + ), + nn.Sequential( # Sequential, + LambdaMap(lambda x: x, # ConcatTable, + nn.Sequential( # Sequential, + nn.Conv2d(128,128,(3, 3)), + nn.BatchNorm2d(128), + nn.ReLU(), + nn.Conv2d(128,128,(3, 3)), + nn.BatchNorm2d(128), + ), + shave_block(2), + ), + LambdaReduce(lambda x,y: x+y), # CAddTable, + ), + nn.Sequential( # Sequential, + LambdaMap(lambda x: x, # ConcatTable, + nn.Sequential( # Sequential, + nn.Conv2d(128,128,(3, 3)), + nn.BatchNorm2d(128), + nn.ReLU(), + nn.Conv2d(128,128,(3, 3)), + nn.BatchNorm2d(128), + ), + shave_block(2), + ), + LambdaReduce(lambda x,y: x+y), # CAddTable, + ), + nn.Sequential( # Sequential, + LambdaMap(lambda x: x, # ConcatTable, + nn.Sequential( # Sequential, + nn.Conv2d(128,128,(3, 3)), + nn.BatchNorm2d(128), + nn.ReLU(), + nn.Conv2d(128,128,(3, 3)), + nn.BatchNorm2d(128), + ), + shave_block(2), + ), + LambdaReduce(lambda x,y: x+y), # CAddTable, + ), + nn.ConvTranspose2d(128,64,(3, 3),(2, 2),(1, 1),(1, 1)), + nn.BatchNorm2d(64), + nn.ReLU(), + nn.ConvTranspose2d(64,32,(3, 3),(2, 2),(1, 1),(1, 1)), + nn.BatchNorm2d(32), + nn.ReLU(), + nn.Conv2d(32,2,(9, 9),(1, 1),(4, 4)), + nn.Tanh(), + ) + return G \ No newline at end of file diff --git a/gimp-plugins/neural-colorization/model.pyc b/gimp-plugins/neural-colorization/model.pyc new file mode 100755 index 0000000000000000000000000000000000000000..33c2db96b3ad7875a248f95cbbc3fd13b4adf9fd GIT binary patch literal 6371 zcmd5=ZEq7t5T3Ig=M@5>B!u^uQe0X{OTSf=Qi6&qK~!CUib$<>c+bg&^PRan^HK$= zQrb`DC-qpKU>K|q}*j(4&zvpX}-%+9X2{P%$gQ~vXphIBt={J)8k{sm(2 zbtDv7De8`_mt>`+LnR5z(yGWxMI5r0B^;4fRaUBKSHv&L`lvJ=F(dN3$fn4nl`%0@ z9T?Yv(PCgi%$WES+B;tKPKuexdnb$DDKS%d?}4KCfS7}9tQ@4$%e8435d4GBQq4g% ztAZrH=`?)X9z*+~mjvG9(9b5(NY}h)e*JM6H=Y!5aON$J9S8BTIIIzs0tg(fl*L`H zl`z~OVIhW#DCt>{3|~h!U_am}5d~*M+x0S+T2^J2Y8A4Ma8jr3CoY%CsA0VxL_t=s zV-1X{oQ%AwA2wa`*HA1;zejE1LFy;z!jmM(f+uO*@}h;?@$)E*J(Di12Cep$cGy`B zqI4nhJBb%wX~bci1iyM&5JwBG*!bbq_Dh!{C_ZIHoFh(Q9AQ-BzD5O>7!8d-Y($8l z0$bNHq;zsXyIGO>+-zE9&7j3D7gtMP?21|0DAsSr$#X9;>TwDc$AWLsj=Yu+v?tJC zuLF)wh^E5flA))T)`kf=b)kL=dDR&&s}gs;)??${@>0KnG5x%EOqMZ?@5;=04Fcl+mgc~uq|jIFqV3VQYHJe`%F(4Bn4!DaO45lZXqPPEaG zY9e{&g&p5ky>{D=jK+|SU+&5vYIic_j5}FgwF~SDDj5;!Ms~fO_-!xokDx0Z0}-d{ z%sO*6&fRfzxQuTb)kjh5sid`GdW&Im5G9Z5ih^RJx`BTE6cK$G&Au#)$>?c|^gXZrL0~hK73?#p6j_6N2id*YJ~-8S`-PiOGe8)mSw;X_Ufccu-EbO` z_r?u#kbEka4mZ$t>jqb^A0On2BDc}O@B|&8*9bbpMn3XZ_cB4$T@={^+|NVo-r&xS zcP^J+brfc%4*Ft|{YdzP4!Dux#_|yZmvvml&;&gyW>(CcY>sGug`HiV5gs62W^h); z91}Ay=J*!o>L6xtj*B@V=A@WYTbReUF+&+JPw-mV(-X2V9pBTkS`(2g|+C z&o)5rG)<%VTkSoxgS{X3FF084#eTK{at{xZJF|n_Px==eEca4B+W@&o-jr({1Q*xD zd@AO$n9pRhn#a=ipanme5mBFuxgzGOn1wC;JM9R>)fZyE6mu;P@(FI4%{h6c`D(fL z6?WlLz;iF)#thDq@RSOe-h!JX*rO8g_zZpnX^<~3%c$!;0?98=b?6e!dM@wIy~A~g z=c?ZRuG<^$jcbqszihdXP*>G#3>9dGY&`ddc4ScP^q0-cUD44!0U_5Z=p@K~KutCJ zw$O+md1HmpvtTRM;kw)icC4_4pvMO+7Lx3>?vKAqodt{&a*;1oUrWL!Cl^igZsf7I6MW~a$2H~sAlyeMa{0)kyJ&ep~|BN$Tnkr3~hTV5USGV*SWawkK zJ}*PV#+u$}w01R$T>es48~B=+#c`PGZ^w+!HkQfjWbta^8JDr6?zyKgqt*d@8?euV se1Nc?p;O#TAU(vjHuP(meN($Z#ZtcOOgYoeA?Hweesq3vw!*)E0Xk&7mjD0& literal 0 HcmV?d00001 diff --git a/gimp-plugins/neural-colorization/resize_all_imgs.py b/gimp-plugins/neural-colorization/resize_all_imgs.py new file mode 100755 index 0000000..fc944d8 --- /dev/null +++ b/gimp-plugins/neural-colorization/resize_all_imgs.py @@ -0,0 +1,35 @@ +from multiprocessing import Pool +from PIL import Image +import os +import argparse + +def parse_args(): + parser = argparse.ArgumentParser(description="Resize all colorful imgs to 256*256 for training") + parser.add_argument("-d", + "--dir", + required=True, + type=str, + help="The directory includes all jpg images") + parser.add_argument("-n", + "--nprocesses", + default=10, + type=int, + help="Using how many processes") + args = parser.parse_args() + return args + +def doit(x): + a=Image.open(x) + if a.getbands()!=('R','G','B'): + os.remove(x) + return + a.resize((256,256),Image.BICUBIC).save(x) + return + +args=parse_args() +pool = Pool(processes=args.nprocesses) +jpgs = [] +flist = os.listdir(args.dir) +full_flist = [os.path.join(args.dir,x) for x in flist] +pool.map(doit, full_flist) +print('done') \ No newline at end of file diff --git a/gimp-plugins/neural-colorization/train.py b/gimp-plugins/neural-colorization/train.py new file mode 100755 index 0000000..d5f5ae2 --- /dev/null +++ b/gimp-plugins/neural-colorization/train.py @@ -0,0 +1,186 @@ +import torch +import torch.nn as nn +import argparse +from torch.autograd import Variable +import torchvision.models as models +import os +from torch.utils import data +from model import generator +import numpy as np +from PIL import Image +from skimage.color import rgb2yuv,yuv2rgb +import cv2 + +def parse_args(): + parser = argparse.ArgumentParser(description="Train a GAN based model") + parser.add_argument("-d", + "--training_dir", + type=str, + required=True, + help="Training directory (folder contains all 256*256 images)") + parser.add_argument("-t", + "--test_image", + type=str, + default=None, + help="Test image location") + parser.add_argument("-c", + "--checkpoint_location", + type=str, + required=True, + help="Place to save checkpoints") + parser.add_argument("-e", + "--epoch", + type=int, + default=120, + help="Epoches to run training") + parser.add_argument("--gpu", + type=int, + default=0, + help="which GPU to use?") + parser.add_argument("-b", + "--batch_size", + type=int, + default=20, + help="batch size") + parser.add_argument("-w", + "--num_workers", + type=int, + default=6, + help="Number of workers to fetch data") + parser.add_argument("-p", + "--pixel_loss_weights", + type=float, + default=1000.0, + help="Pixel-wise loss weights") + parser.add_argument("--g_every", + type=int, + default=1, + help="Training generator every k iteration") + parser.add_argument("--g_lr", + type=float, + default=1e-4, + help="learning rate for generator") + parser.add_argument("--d_lr", + type=float, + default=1e-4, + help="learning rate for discriminator") + parser.add_argument("-i", + "--checkpoint_every", + type=int, + default=100, + help="Save checkpoint every k iteration (checkpoints for same epoch will overwrite)") + parser.add_argument("--d_init", + type=str, + default=None, + help="Init weights for discriminator") + parser.add_argument("--g_init", + type=str, + default=None, + help="Init weights for generator") + args = parser.parse_args() + return args + +# define data generator +class img_data(data.Dataset): + def __init__(self, path): + files = os.listdir(path) + self.files = [os.path.join(path,x) for x in files] + def __len__(self): + return len(self.files) + + def __getitem__(self, index): + img = Image.open(self.files[index]) + yuv = rgb2yuv(img) + y = yuv[...,0]-0.5 + u_t = yuv[...,1] / 0.43601035 + v_t = yuv[...,2] / 0.61497538 + return torch.Tensor(np.expand_dims(y,axis=0)),torch.Tensor(np.stack([u_t,v_t],axis=0)) + + +args = parse_args() +if not os.path.exists(os.path.join(args.checkpoint_location,'weights')): + os.makedirs(os.path.join(args.checkpoint_location,'weights')) + +# Define G, same as torch version +G = generator().cuda(args.gpu) + +# define D +D = models.resnet18(pretrained=False,num_classes=2) +D.fc = nn.Sequential(nn.Linear(512, 1), nn.Sigmoid()) +D = D.cuda(args.gpu) + +trainset = img_data(args.training_dir) +params = {'batch_size': args.batch_size, + 'shuffle': True, + 'num_workers': args.num_workers} +training_generator = data.DataLoader(trainset, **params) +if args.test_image is not None: + test_img = Image.open(args.test_image).convert('RGB').resize((256,256)) + test_yuv = rgb2yuv(test_img) + test_inf = test_yuv[...,0].reshape(1,1,256,256) + test_var = Variable(torch.Tensor(test_inf-0.5)).cuda(args.gpu) +if args.d_init is not None: + D.load_state_dict(torch.load(args.d_init)) +if args.g_init is not None: + G.load_state_dict(torch.load(args.g_init)) + +# save test image for beginning +if args.test_image is not None: + test_res = G(test_var) + uv=test_res.cpu().detach().numpy() + uv[:,0,:,:] *= 0.436 + uv[:,1,:,:] *= 0.615 + test_yuv = np.concatenate([test_inf,uv],axis=1).reshape(3,256,256) + test_rgb = yuv2rgb(test_yuv.transpose(1,2,0)) + cv2.imwrite(os.path.join(args.checkpoint_location,'test_init.jpg'),(test_rgb.clip(min=0,max=1)*256)[:,:,[2,1,0]]) + +i=0 +adversarial_loss = torch.nn.BCELoss() +optimizer_G = torch.optim.Adam(G.parameters(), lr=args.g_lr, betas=(0.5, 0.999)) +optimizer_D = torch.optim.Adam(D.parameters(), lr=args.d_lr, betas=(0.5, 0.999)) +for epoch in range(args.epoch): + for y, uv in training_generator: + # Adversarial ground truths + valid = Variable(torch.Tensor(y.size(0), 1).fill_(1.0), requires_grad=False).cuda(args.gpu) + fake = Variable(torch.Tensor(y.size(0), 1).fill_(0.0), requires_grad=False).cuda(args.gpu) + + yvar = Variable(y).cuda(args.gpu) + uvvar = Variable(uv).cuda(args.gpu) + real_imgs = torch.cat([yvar,uvvar],dim=1) + + optimizer_G.zero_grad() + uvgen = G(yvar) + # Generate a batch of images + gen_imgs = torch.cat([yvar.detach(),uvgen],dim=1) + + # Loss measures generator's ability to fool the discriminator + g_loss_gan = adversarial_loss(D(gen_imgs), valid) + g_loss = g_loss_gan + args.pixel_loss_weights * torch.mean((uvvar-uvgen)**2) + if i%args.g_every==0: + g_loss.backward() + optimizer_G.step() + + optimizer_D.zero_grad() + + # Measure discriminator's ability to classify real from generated samples + real_loss = adversarial_loss(D(real_imgs), valid) + fake_loss = adversarial_loss(D(gen_imgs.detach()), fake) + d_loss = (real_loss + fake_loss) / 2 + d_loss.backward() + optimizer_D.step() + i+=1 + if i%args.checkpoint_every==0: + print ("Epoch: %d: [D loss: %f] [G total loss: %f] [G GAN Loss: %f]" % (epoch, d_loss.item(), g_loss.item(), g_loss_gan.item())) + + torch.save(D.state_dict(), os.path.join(args.checkpoint_location,'weights','D'+str(epoch)+'.pth')) + torch.save(G.state_dict(), os.path.join(args.checkpoint_location,'weights','G'+str(epoch)+'.pth')) + if args.test_image is not None: + test_res = G(test_var) + uv=test_res.cpu().detach().numpy() + uv[:,0,:,:] *= 0.436 + uv[:,1,:,:] *= 0.615 + test_yuv = np.concatenate([test_inf,uv],axis=1).reshape(3,256,256) + test_rgb = yuv2rgb(test_yuv.transpose(1,2,0)) + cv2.imwrite(os.path.join(args.checkpoint_location,'test_epoch_'+str(epoch)+'.jpg'),(test_rgb.clip(min=0,max=1)*256)[:,:,[2,1,0]]) +torch.save(D.state_dict(), os.path.join(args.checkpoint_location,'D_final.pth')) +torch.save(G.state_dict(), os.path.join(args.checkpoint_location,'G_final.pth')) diff --git a/gimp-plugins/pytorch-SRResNet/__pycache__/srresnet.cpython-38.pyc b/gimp-plugins/pytorch-SRResNet/__pycache__/srresnet.cpython-38.pyc new file mode 100755 index 0000000000000000000000000000000000000000..a558116dc11bde5c0f90883b469b6e4b7ca88dc1 GIT binary patch literal 3791 zcmb7H&2t+`74M!eeao_=Bu=uS47(I)+1M4cd@q}@@g`6N19rkeRWLPCkL zVk1*jIaPZ~Y7ZQMlXT351OEko!uE}#DwJ-V_X5AyGqU9msHIi?rr+!CH{J8z@7Jxz zl}eGJ{IPcQo2Qo<`v)nr#es4cRs9M=Fu^;lnLc^fYFa#P+fDlc6PB=FFky$*j&o!; zU0599!s2EY4;D}4V98~coZy?Rk)OafTW^t^Eq11;chCq~lMB|gK4EPxxUgR2o3^lp z^MW;)>5ye~Dc@hQc(QB@~pxut*@SlJF z6P3T+fhiZo{Z7yd8)faPK|hqz!s~S2_oFyUd|z8}ti5}^`01+9ZVS)by84q?B|+Q@ z*L$)HvmM2^b>U$c?Cx)b4FE9&-d^*e*DC{`=|{iG*b+qX715W4j+Sy57kDh`w7 z{=U3~C3~o<48eHSTI4nS7Wu*AnMf~B_tm0pP7CF44)q?Y`aZ;vA0mh&hKkUStRWwp z-=PhSsx!37a%d;+$Q#;2Yiyy<-ej-&dLt*V!-rfXafQTNByK=x*X%;uh)lzgH_61G zH!BENoJ7gKFM=e{9%9=cByx$S8S!Nrm}~dsvp|YDD9QkmJNydvS>^{<&+aoDi?l%~ zOF}61x(=z4*+!yKs{uCPP;xr{NNNU$z27Np6Wg@aUm=Tlz^mx;R!7B zq9BSXED^hWQV^vvft0}K0LT`A>)=P=rCBA|%di$i+03dW_NbbTt;~*veNI%z+<-WH z%NHFtrZ?5H(frK1=dsR(Fn3 zO9GXoe0wBoWF;Xf3KKYB6T*;0gKeQwx!brY7x0v~NDwS#odgaIri;WLpcP8(Yh)#q z=wdh64gF5Ae@s2Vwc%!+Hxs&%kXLD>_ayeA=;CI$Hz2$O9bNuK^gQfrZV%e+PN)lm zzG?-X@crkyWag$o(z!J4l`cjKWMa5Wn!OUyYwuYYZEYvoG3d+1U^Sc!+UW+#Hi&Ca zCX$Fny9c4{`3*b6ZCCSH8dxl$H^l~RnHH-ZJ< zLDeMu2|G=}LtZEz6pnkS>gy0&47|gJ))7a3)DFQ#U@3ltXd|lS#M%K9y}~gfzkb>Wi4>7W%_c*qw-nH zn(nrca_=|s3i&M(-y!jB5^s}uhXhf)`~gJ9!SbtUYa7{;&RL>4^k*Pk z2O%TcEmG(z>Bcj{1^G8HNK*XeH%J&1(DyO~D~SzL-%Hdwp;dwmofe8fheWxV_Y90$ zP)`**)Bc|uk0I2xQ-n(3rMHti^#&+DOQ<6Uq~n6_EY#z>4s0F~rO3auOh3^l=yiG* z0wv}I!gj&9(rG^!SGr(aNtDj{yLeproWJq~{>q|!&fmr3h}viNiS<=3@K+I)bN6G-Xwz!{at0EAyb>Q@c%Z-Dsc4Dq{1)~8(l6*OMk znIA2PnwTeAkKfDsbEf~*asNWv5B(Cx*Tp3whEMt4D?|A88NxSQe~#wm3Q|I@LQH;7 zt=kY0L%u}p$0R^TQ^IFkzpek}!QT)5a3{0;>y9}P8kf^^;33U72&hwwSjQX&<^a&8 zkAkGNo!uhy?J!6NAb;&W#3eP5+HSXQlUUWc&1kFJ1NY}t+K?ZRryr8|ki;DlcS)p7 z-Hvd5X;_BeP--j^p&W t`VrZ@pPAdMxo)O643l0<=$m;1IoJ6Q(j)&v4$WMa>FCW{3-b%h{{gx=KQ#aV literal 0 HcmV?d00001 diff --git a/gimp-plugins/pytorch-SRResNet/data/generate_train_srresnet.m b/gimp-plugins/pytorch-SRResNet/data/generate_train_srresnet.m new file mode 100755 index 0000000..31ab342 --- /dev/null +++ b/gimp-plugins/pytorch-SRResNet/data/generate_train_srresnet.m @@ -0,0 +1,92 @@ +clear; +close all; +folder = 'path/to/train/folder'; + +savepath = 'srresnet_x4.h5'; + +%% scale factors +scale = 4; + +size_label = 96; +size_input = size_label/scale; +stride = 48; + +%% downsizing +downsizes = [1,0.7,0.5]; + +data = zeros(size_input, size_input, 3, 1); +label = zeros(size_label, size_label, 3, 1); + +count = 0; +margain = 0; + +%% generate data +filepaths = []; +filepaths = [filepaths; dir(fullfile(folder, '*.jpg'))]; +filepaths = [filepaths; dir(fullfile(folder, '*.bmp'))]; +filepaths = [filepaths; dir(fullfile(folder, '*.png'))]; + +length(filepaths) + +for i = 1 : length(filepaths) + for flip = 1: 3 + for degree = 1 : 4 + for downsize = 1 : length(downsizes) + image = imread(fullfile(folder,filepaths(i).name)); + if flip == 1 + image = flipdim(image ,1); + end + if flip == 2 + image = flipdim(image ,2); + end + + image = imrotate(image, 90 * (degree - 1)); + image = imresize(image,downsizes(downsize),'bicubic'); + + if size(image,3)==3 + %image = rgb2ycbcr(image); + image = im2double(image); + im_label = modcrop(image, scale); + [hei,wid, c] = size(im_label); + + filepaths(i).name + for x = 1 + margain : stride : hei-size_label+1 - margain + for y = 1 + margain :stride : wid-size_label+1 - margain + subim_label = im_label(x : x+size_label-1, y : y+size_label-1, :); + subim_input = imresize(subim_label,1/scale,'bicubic'); + % figure; + % imshow(subim_input); + % figure; + % imshow(subim_label); + count=count+1; + data(:, :, :, count) = subim_input; + label(:, :, :, count) = subim_label; + end + end + end + end + end + end +end + +order = randperm(count); +data = data(:, :, :, order); +label = label(:, :, :, order); + +%% writing to HDF5 +chunksz = 64; +created_flag = false; +totalct = 0; + +for batchno = 1:floor(count/chunksz) + batchno + last_read=(batchno-1)*chunksz; + batchdata = data(:,:,:,last_read+1:last_read+chunksz); + batchlabs = label(:,:,:,last_read+1:last_read+chunksz); + startloc = struct('dat',[1,1,1,totalct+1], 'lab', [1,1,1,totalct+1]); + curr_dat_sz = store2hdf5(savepath, batchdata, batchlabs, ~created_flag, startloc, chunksz); + created_flag = true; + totalct = curr_dat_sz(end); +end + +h5disp(savepath); \ No newline at end of file diff --git a/gimp-plugins/pytorch-SRResNet/data/modcrop.m b/gimp-plugins/pytorch-SRResNet/data/modcrop.m new file mode 100755 index 0000000..728c688 --- /dev/null +++ b/gimp-plugins/pytorch-SRResNet/data/modcrop.m @@ -0,0 +1,12 @@ +function imgs = modcrop(imgs, modulo) +if size(imgs,3)==1 + sz = size(imgs); + sz = sz - mod(sz, modulo); + imgs = imgs(1:sz(1), 1:sz(2)); +else + tmpsz = size(imgs); + sz = tmpsz(1:2); + sz = sz - mod(sz, modulo); + imgs = imgs(1:sz(1), 1:sz(2),:); +end + diff --git a/gimp-plugins/pytorch-SRResNet/data/store2hdf5.m b/gimp-plugins/pytorch-SRResNet/data/store2hdf5.m new file mode 100755 index 0000000..0a0016d --- /dev/null +++ b/gimp-plugins/pytorch-SRResNet/data/store2hdf5.m @@ -0,0 +1,59 @@ +function [curr_dat_sz, curr_lab_sz] = store2hdf5(filename, data, labels, create, startloc, chunksz) + % *data* is W*H*C*N matrix of images should be normalized (e.g. to lie between 0 and 1) beforehand + % *label* is D*N matrix of labels (D labels per sample) + % *create* [0/1] specifies whether to create file newly or to append to previously created file, useful to store information in batches when a dataset is too big to be held in memory (default: 1) + % *startloc* (point at which to start writing data). By default, + % if create=1 (create mode), startloc.data=[1 1 1 1], and startloc.lab=[1 1]; + % if create=0 (append mode), startloc.data=[1 1 1 K+1], and startloc.lab = [1 K+1]; where K is the current number of samples stored in the HDF + % chunksz (used only in create mode), specifies number of samples to be stored per chunk (see HDF5 documentation on chunking) for creating HDF5 files with unbounded maximum size - TLDR; higher chunk sizes allow faster read-write operations + + % verify that format is right + dat_dims=size(data); + lab_dims=size(labels); + num_samples=dat_dims(end); + + assert(lab_dims(end)==num_samples, 'Number of samples should be matched between data and labels'); + + if ~exist('create','var') + create=true; + end + + + if create + %fprintf('Creating dataset with %d samples\n', num_samples); + if ~exist('chunksz', 'var') + chunksz=1000; + end + if exist(filename, 'file') + fprintf('Warning: replacing existing file %s \n', filename); + delete(filename); + end + h5create(filename, '/data', [dat_dims(1:end-1) Inf], 'Datatype', 'single', 'ChunkSize', [dat_dims(1:end-1) chunksz]); % width, height, channels, number + h5create(filename, '/label', [lab_dims(1:end-1) Inf], 'Datatype', 'single', 'ChunkSize', [lab_dims(1:end-1) chunksz]); % width, height, channels, number + if ~exist('startloc','var') + startloc.dat=[ones(1,length(dat_dims)-1), 1]; + startloc.lab=[ones(1,length(lab_dims)-1), 1]; + end + else % append mode + if ~exist('startloc','var') + info=h5info(filename); + prev_dat_sz=info.Datasets(1).Dataspace.Size; + prev_lab_sz=info.Datasets(2).Dataspace.Size; + assert(prev_dat_sz(1:end-1)==dat_dims(1:end-1), 'Data dimensions must match existing dimensions in dataset'); + assert(prev_lab_sz(1:end-1)==lab_dims(1:end-1), 'Label dimensions must match existing dimensions in dataset'); + startloc.dat=[ones(1,length(dat_dims)-1), prev_dat_sz(end)+1]; + startloc.lab=[ones(1,length(lab_dims)-1), prev_lab_sz(end)+1]; + end + end + + if ~isempty(data) + h5write(filename, '/data', single(data), startloc.dat, size(data)); + h5write(filename, '/label', single(labels), startloc.lab, size(labels)); + end + + if nargout + info=h5info(filename); + curr_dat_sz=info.Datasets(1).Dataspace.Size; + curr_lab_sz=info.Datasets(2).Dataspace.Size; + end +end diff --git a/gimp-plugins/pytorch-SRResNet/dataset.py b/gimp-plugins/pytorch-SRResNet/dataset.py new file mode 100755 index 0000000..efdd959 --- /dev/null +++ b/gimp-plugins/pytorch-SRResNet/dataset.py @@ -0,0 +1,16 @@ +import torch.utils.data as data +import torch +import h5py + +class DatasetFromHdf5(data.Dataset): + def __init__(self, file_path): + super(DatasetFromHdf5, self).__init__() + hf = h5py.File(file_path) + self.data = hf.get("data") + self.target = hf.get("label") + + def __getitem__(self, index): + return torch.from_numpy(self.data[index,:,:,:]).float(), torch.from_numpy(self.target[index,:,:,:]).float() + + def __len__(self): + return self.data.shape[0] \ No newline at end of file diff --git a/gimp-plugins/pytorch-SRResNet/demo.py b/gimp-plugins/pytorch-SRResNet/demo.py new file mode 100755 index 0000000..ae24b90 --- /dev/null +++ b/gimp-plugins/pytorch-SRResNet/demo.py @@ -0,0 +1,85 @@ +import argparse, os +import torch +from torch.autograd import Variable +import numpy as np +import time, math +import scipy.io as sio +import matplotlib.pyplot as plt + +parser = argparse.ArgumentParser(description="PyTorch SRResNet Demo") +parser.add_argument("--cuda", action="store_true", help="use cuda?") +parser.add_argument("--model", default="model/model_srresnet.pth", type=str, help="model path") +parser.add_argument("--image", default="butterfly_GT", type=str, help="image name") +parser.add_argument("--dataset", default="Set5", type=str, help="dataset name") +parser.add_argument("--scale", default=4, type=int, help="scale factor, Default: 4") +parser.add_argument("--gpus", default="0", type=str, help="gpu ids (default: 0)") + +def PSNR(pred, gt, shave_border=0): + height, width = pred.shape[:2] + pred = pred[shave_border:height - shave_border, shave_border:width - shave_border] + gt = gt[shave_border:height - shave_border, shave_border:width - shave_border] + imdff = pred - gt + rmse = math.sqrt(np.mean(imdff ** 2)) + if rmse == 0: + return 100 + return 20 * math.log10(255.0 / rmse) + +opt = parser.parse_args() +cuda = opt.cuda + +if cuda: + print("=> use gpu id: '{}'".format(opt.gpus)) + os.environ["CUDA_VISIBLE_DEVICES"] = opt.gpus + if not torch.cuda.is_available(): + raise Exception("No GPU found or Wrong gpu id, please run without --cuda") + +model = torch.load(opt.model)["model"] + +im_gt = sio.loadmat("testsets/" + opt.dataset + "/" + opt.image + ".mat")['im_gt'] +im_b = sio.loadmat("testsets/" + opt.dataset + "/" + opt.image + ".mat")['im_b'] +im_l = sio.loadmat("testsets/" + opt.dataset + "/" + opt.image + ".mat")['im_l'] + +im_gt = im_gt.astype(float).astype(np.uint8) +im_b = im_b.astype(float).astype(np.uint8) +im_l = im_l.astype(float).astype(np.uint8) + +im_input = im_l.astype(np.float32).transpose(2,0,1) +im_input = im_input.reshape(1,im_input.shape[0],im_input.shape[1],im_input.shape[2]) +im_input = Variable(torch.from_numpy(im_input/255.).float()) + +if cuda: + model = model.cuda() + im_input = im_input.cuda() +else: + model = model.cpu() + +start_time = time.time() +out = model(im_input) +elapsed_time = time.time() - start_time + +out = out.cpu() + +im_h = out.data[0].numpy().astype(np.float32) + +im_h = im_h*255. +im_h[im_h<0] = 0 +im_h[im_h>255.] = 255. +im_h = im_h.transpose(1,2,0) + +print("Dataset=",opt.dataset) +print("Scale=",opt.scale) +print("It takes {}s for processing".format(elapsed_time)) + +fig = plt.figure() +ax = plt.subplot("131") +ax.imshow(im_gt) +ax.set_title("GT") + +ax = plt.subplot("132") +ax.imshow(im_b) +ax.set_title("Input(Bicubic)") + +ax = plt.subplot("133") +ax.imshow(im_h.astype(np.uint8)) +ax.set_title("Output(SRResNet)") +plt.show() diff --git a/gimp-plugins/pytorch-SRResNet/eval.py b/gimp-plugins/pytorch-SRResNet/eval.py new file mode 100755 index 0000000..58f293f --- /dev/null +++ b/gimp-plugins/pytorch-SRResNet/eval.py @@ -0,0 +1,93 @@ +import matlab.engine +import argparse, os +import torch +from torch.autograd import Variable +import numpy as np +import time, math, glob +import scipy.io as sio +import cv2 + +parser = argparse.ArgumentParser(description="PyTorch SRResNet Eval") +parser.add_argument("--cuda", action="store_true", help="use cuda?") +parser.add_argument("--model", default="model/model_srresnet.pth", type=str, help="model path") +parser.add_argument("--dataset", default="Set5", type=str, help="dataset name, Default: Set5") +parser.add_argument("--scale", default=4, type=int, help="scale factor, Default: 4") +parser.add_argument("--gpus", default="0", type=str, help="gpu ids (default: 0)") + +def PSNR(pred, gt, shave_border=0): + height, width = pred.shape[:2] + pred = pred[shave_border:height - shave_border, shave_border:width - shave_border] + gt = gt[shave_border:height - shave_border, shave_border:width - shave_border] + imdff = pred - gt + rmse = math.sqrt(np.mean(imdff ** 2)) + if rmse == 0: + return 100 + return 20 * math.log10(255.0 / rmse) + +opt = parser.parse_args() +cuda = opt.cuda +eng = matlab.engine.start_matlab() + +if cuda: + print("=> use gpu id: '{}'".format(opt.gpus)) + os.environ["CUDA_VISIBLE_DEVICES"] = opt.gpus + if not torch.cuda.is_available(): + raise Exception("No GPU found or Wrong gpu id, please run without --cuda") + +model = torch.load(opt.model)["model"] + +image_list = glob.glob("./testsets/" + opt.dataset + "/*.*") + +avg_psnr_predicted = 0.0 +avg_psnr_bicubic = 0.0 +avg_elapsed_time = 0.0 + +for image_name in image_list: + print("Processing ", image_name) + im_gt_y = sio.loadmat(image_name)['im_gt_y'] + im_b_y = sio.loadmat(image_name)['im_b_y'] + im_l = sio.loadmat(image_name)['im_l'] + + im_gt_y = im_gt_y.astype(float) + im_b_y = im_b_y.astype(float) + im_l = im_l.astype(float) + + psnr_bicubic = PSNR(im_gt_y, im_b_y,shave_border=opt.scale) + avg_psnr_bicubic += psnr_bicubic + + im_input = im_l.astype(np.float32).transpose(2,0,1) + im_input = im_input.reshape(1,im_input.shape[0],im_input.shape[1],im_input.shape[2]) + im_input = Variable(torch.from_numpy(im_input/255.).float()) + + if cuda: + model = model.cuda() + im_input = im_input.cuda() + else: + model = model.cpu() + + start_time = time.time() + HR_4x = model(im_input) + elapsed_time = time.time() - start_time + avg_elapsed_time += elapsed_time + + HR_4x = HR_4x.cpu() + + im_h = HR_4x.data[0].numpy().astype(np.float32) + + im_h = im_h*255. + im_h = np.clip(im_h, 0., 255.) + im_h = im_h.transpose(1,2,0).astype(np.float32) + + im_h_matlab = matlab.double((im_h / 255.).tolist()) + im_h_ycbcr = eng.rgb2ycbcr(im_h_matlab) + im_h_ycbcr = np.array(im_h_ycbcr._data).reshape(im_h_ycbcr.size, order='F').astype(np.float32) * 255. + im_h_y = im_h_ycbcr[:,:,0] + + psnr_predicted = PSNR(im_gt_y, im_h_y,shave_border=opt.scale) + avg_psnr_predicted += psnr_predicted + +print("Scale=", opt.scale) +print("Dataset=", opt.dataset) +print("PSNR_predicted=", avg_psnr_predicted/len(image_list)) +print("PSNR_bicubic=", avg_psnr_bicubic/len(image_list)) +print("It takes average {}s for processing".format(avg_elapsed_time/len(image_list))) diff --git a/gimp-plugins/pytorch-SRResNet/main_srresnet.py b/gimp-plugins/pytorch-SRResNet/main_srresnet.py new file mode 100755 index 0000000..f074e68 --- /dev/null +++ b/gimp-plugins/pytorch-SRResNet/main_srresnet.py @@ -0,0 +1,166 @@ +import argparse, os +import torch +import math, random +import torch.backends.cudnn as cudnn +import torch.nn as nn +import torch.optim as optim +from torch.autograd import Variable +from torch.utils.data import DataLoader +from srresnet import _NetG +from dataset import DatasetFromHdf5 +from torchvision import models +import torch.utils.model_zoo as model_zoo + +# Training settings +parser = argparse.ArgumentParser(description="PyTorch SRResNet") +parser.add_argument("--batchSize", type=int, default=16, help="training batch size") +parser.add_argument("--nEpochs", type=int, default=500, help="number of epochs to train for") +parser.add_argument("--lr", type=float, default=1e-4, help="Learning Rate. Default=1e-4") +parser.add_argument("--step", type=int, default=200, help="Sets the learning rate to the initial LR decayed by momentum every n epochs, Default: n=500") +parser.add_argument("--cuda", action="store_true", help="Use cuda?") +parser.add_argument("--resume", default="", type=str, help="Path to checkpoint (default: none)") +parser.add_argument("--start-epoch", default=1, type=int, help="Manual epoch number (useful on restarts)") +parser.add_argument("--threads", type=int, default=0, help="Number of threads for data loader to use, Default: 1") +parser.add_argument("--pretrained", default="", type=str, help="path to pretrained model (default: none)") +parser.add_argument("--vgg_loss", action="store_true", help="Use content loss?") +parser.add_argument("--gpus", default="0", type=str, help="gpu ids (default: 0)") + +def main(): + + global opt, model, netContent + opt = parser.parse_args() + print(opt) + + cuda = opt.cuda + if cuda: + print("=> use gpu id: '{}'".format(opt.gpus)) + os.environ["CUDA_VISIBLE_DEVICES"] = opt.gpus + if not torch.cuda.is_available(): + raise Exception("No GPU found or Wrong gpu id, please run without --cuda") + + opt.seed = random.randint(1, 10000) + print("Random Seed: ", opt.seed) + torch.manual_seed(opt.seed) + if cuda: + torch.cuda.manual_seed(opt.seed) + + cudnn.benchmark = True + + print("===> Loading datasets") + train_set = DatasetFromHdf5("/path/to/your/hdf5/data/like/rgb_srresnet_x4.h5") + training_data_loader = DataLoader(dataset=train_set, num_workers=opt.threads, \ + batch_size=opt.batchSize, shuffle=True) + + if opt.vgg_loss: + print('===> Loading VGG model') + netVGG = models.vgg19() + netVGG.load_state_dict(model_zoo.load_url('https://download.pytorch.org/models/vgg19-dcbb9e9d.pth')) + class _content_model(nn.Module): + def __init__(self): + super(_content_model, self).__init__() + self.feature = nn.Sequential(*list(netVGG.features.children())[:-1]) + + def forward(self, x): + out = self.feature(x) + return out + + netContent = _content_model() + + print("===> Building model") + model = _NetG() + criterion = nn.MSELoss(size_average=False) + + print("===> Setting GPU") + if cuda: + model = model.cuda() + criterion = criterion.cuda() + if opt.vgg_loss: + netContent = netContent.cuda() + + # optionally resume from a checkpoint + if opt.resume: + if os.path.isfile(opt.resume): + print("=> loading checkpoint '{}'".format(opt.resume)) + checkpoint = torch.load(opt.resume) + opt.start_epoch = checkpoint["epoch"] + 1 + model.load_state_dict(checkpoint["model"].state_dict()) + else: + print("=> no checkpoint found at '{}'".format(opt.resume)) + + # optionally copy weights from a checkpoint + if opt.pretrained: + if os.path.isfile(opt.pretrained): + print("=> loading model '{}'".format(opt.pretrained)) + weights = torch.load(opt.pretrained) + model.load_state_dict(weights['model'].state_dict()) + else: + print("=> no model found at '{}'".format(opt.pretrained)) + + print("===> Setting Optimizer") + optimizer = optim.Adam(model.parameters(), lr=opt.lr) + + print("===> Training") + for epoch in range(opt.start_epoch, opt.nEpochs + 1): + train(training_data_loader, optimizer, model, criterion, epoch) + save_checkpoint(model, epoch) + +def adjust_learning_rate(optimizer, epoch): + """Sets the learning rate to the initial LR decayed by 10""" + lr = opt.lr * (0.1 ** (epoch // opt.step)) + return lr + +def train(training_data_loader, optimizer, model, criterion, epoch): + + lr = adjust_learning_rate(optimizer, epoch-1) + + for param_group in optimizer.param_groups: + param_group["lr"] = lr + + print("Epoch={}, lr={}".format(epoch, optimizer.param_groups[0]["lr"])) + model.train() + + for iteration, batch in enumerate(training_data_loader, 1): + + input, target = Variable(batch[0]), Variable(batch[1], requires_grad=False) + + if opt.cuda: + input = input.cuda() + target = target.cuda() + + output = model(input) + loss = criterion(output, target) + + if opt.vgg_loss: + content_input = netContent(output) + content_target = netContent(target) + content_target = content_target.detach() + content_loss = criterion(content_input, content_target) + + optimizer.zero_grad() + + if opt.vgg_loss: + netContent.zero_grad() + content_loss.backward(retain_graph=True) + + loss.backward() + + optimizer.step() + + if iteration%100 == 0: + if opt.vgg_loss: + print("===> Epoch[{}]({}/{}): Loss: {:.5} Content_loss {:.5}".format(epoch, iteration, len(training_data_loader), loss.data[0], content_loss.data[0])) + else: + print("===> Epoch[{}]({}/{}): Loss: {:.5}".format(epoch, iteration, len(training_data_loader), loss.data[0])) + +def save_checkpoint(model, epoch): + model_out_path = "checkpoint/" + "model_epoch_{}.pth".format(epoch) + state = {"epoch": epoch ,"model": model} + if not os.path.exists("checkpoint/"): + os.makedirs("checkpoint/") + + torch.save(state, model_out_path) + + print("Checkpoint saved to {}".format(model_out_path)) + +if __name__ == "__main__": + main() diff --git a/gimp-plugins/pytorch-SRResNet/srresnet.py b/gimp-plugins/pytorch-SRResNet/srresnet.py new file mode 100755 index 0000000..7d8e6fc --- /dev/null +++ b/gimp-plugins/pytorch-SRResNet/srresnet.py @@ -0,0 +1,141 @@ +import torch +import torch.nn as nn +import math + +class _Residual_Block(nn.Module): + def __init__(self): + super(_Residual_Block, self).__init__() + + self.conv1 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1, bias=False) + self.in1 = nn.InstanceNorm2d(64, affine=True) + self.relu = nn.LeakyReLU(0.2, inplace=True) + self.conv2 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1, bias=False) + self.in2 = nn.InstanceNorm2d(64, affine=True) + + def forward(self, x): + identity_data = x + output = self.relu(self.in1(self.conv1(x))) + output = self.in2(self.conv2(output)) + output = torch.add(output,identity_data) + return output + +class _NetG(nn.Module): + def __init__(self): + super(_NetG, self).__init__() + + self.conv_input = nn.Conv2d(in_channels=3, out_channels=64, kernel_size=9, stride=1, padding=4, bias=False) + self.relu = nn.LeakyReLU(0.2, inplace=True) + + self.residual = self.make_layer(_Residual_Block, 16) + + self.conv_mid = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1, bias=False) + self.bn_mid = nn.InstanceNorm2d(64, affine=True) + + self.upscale4x = nn.Sequential( + nn.Conv2d(in_channels=64, out_channels=256, kernel_size=3, stride=1, padding=1, bias=False), + nn.PixelShuffle(2), + nn.LeakyReLU(0.2, inplace=True), + nn.Conv2d(in_channels=64, out_channels=256, kernel_size=3, stride=1, padding=1, bias=False), + nn.PixelShuffle(2), + nn.LeakyReLU(0.2, inplace=True), + ) + + self.conv_output = nn.Conv2d(in_channels=64, out_channels=3, kernel_size=9, stride=1, padding=4, bias=False) + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels + m.weight.data.normal_(0, math.sqrt(2. / n)) + if m.bias is not None: + m.bias.data.zero_() + + def make_layer(self, block, num_of_layer): + layers = [] + for _ in range(num_of_layer): + layers.append(block()) + return nn.Sequential(*layers) + + def forward(self, x): + out = self.relu(self.conv_input(x)) + residual = out + out = self.residual(out) + out = self.bn_mid(self.conv_mid(out)) + out = torch.add(out,residual) + out = self.upscale4x(out) + out = self.conv_output(out) + return out + +class _NetD(nn.Module): + def __init__(self): + super(_NetD, self).__init__() + + self.features = nn.Sequential( + + # input is (3) x 96 x 96 + nn.Conv2d(in_channels=3, out_channels=64, kernel_size=3, stride=1, padding=1, bias=False), + nn.LeakyReLU(0.2, inplace=True), + + # state size. (64) x 96 x 96 + nn.Conv2d(in_channels=64, out_channels=64, kernel_size=4, stride=2, padding=1, bias=False), + nn.BatchNorm2d(64), + nn.LeakyReLU(0.2, inplace=True), + + # state size. (64) x 96 x 96 + nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=1, padding=1, bias=False), + nn.BatchNorm2d(128), + nn.LeakyReLU(0.2, inplace=True), + + # state size. (64) x 48 x 48 + nn.Conv2d(in_channels=128, out_channels=128, kernel_size=4, stride=2, padding=1, bias=False), + nn.BatchNorm2d(128), + nn.LeakyReLU(0.2, inplace=True), + + # state size. (128) x 48 x 48 + nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=1, padding=1, bias=False), + nn.BatchNorm2d(256), + nn.LeakyReLU(0.2, inplace=True), + + # state size. (256) x 24 x 24 + nn.Conv2d(in_channels=256, out_channels=256, kernel_size=4, stride=2, padding=1, bias=False), + nn.BatchNorm2d(256), + nn.LeakyReLU(0.2, inplace=True), + + # state size. (256) x 12 x 12 + nn.Conv2d(in_channels=256, out_channels=512, kernel_size=3, stride=1, padding=1, bias=False), + nn.BatchNorm2d(512), + nn.LeakyReLU(0.2, inplace=True), + + # state size. (512) x 12 x 12 + nn.Conv2d(in_channels=512, out_channels=512, kernel_size=4, stride=2, padding=1, bias=False), + nn.BatchNorm2d(512), + nn.LeakyReLU(0.2, inplace=True), + ) + + self.LeakyReLU = nn.LeakyReLU(0.2, inplace=True) + self.fc1 = nn.Linear(512 * 6 * 6, 1024) + self.fc2 = nn.Linear(1024, 1) + self.sigmoid = nn.Sigmoid() + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + m.weight.data.normal_(0.0, 0.02) + elif isinstance(m, nn.BatchNorm2d): + m.weight.data.normal_(1.0, 0.02) + m.bias.data.fill_(0) + + def forward(self, input): + + out = self.features(input) + + # state size. (512) x 6 x 6 + out = out.view(out.size(0), -1) + + # state size. (512 x 6 x 6) + out = self.fc1(out) + + # state size. (1024) + out = self.LeakyReLU(out) + + out = self.fc2(out) + out = self.sigmoid(out) + return out.view(-1, 1).squeeze(1) \ No newline at end of file diff --git a/gimp-plugins/pytorch-SRResNet/srresnet.pyc b/gimp-plugins/pytorch-SRResNet/srresnet.pyc new file mode 100755 index 0000000000000000000000000000000000000000..740404a40280e28694769bd307a745228c6bcaa7 GIT binary patch literal 5412 zcmcIoU2hx56`kFcD9M&heOrp{IU3pNsaKJJgbvEk#8wl5;sbA9wEDy>rgp4gPiM z!k=djf74KdPk{gXDE1jdf}f{SrJj^^PqlsZ#6Ri^)RRCZzS6#GkEy1ol0bc_)S*(p zelo6-v5}7PvZJDs%1FmV*)hT8wrZ1D+WZHfdQn9+3bi#!KTe~KwAXfnwi* z&@d9KE9#T%AZqcGC{95aMqW_Gq&oCGwqp$1FR0|AN~TBJFG~CWv;?NVafwUZ?^cal zsuvd_O%^q_<1EvuHTSu{X^3vWC|hOJj?OV6vgU<00Y_CJY>}Hp8@i~Nfat|ZVzO2- zhWb+zTNxBLnj^})dPn(T(ttxlUY{3{XmQ`FWTS)JRpo!k^=$96k$w)UX=JWIQAV%J)x(_8JO z{g%n>T5rGT=8f&utq_4&*TtI6b8RzS-0tm%7obY~M^`Iv-oNWD<9pX@aKBueMI!DI zDla{C;4|Xds6$HhA@tCGc=S66q|3=M^)eWAp&ff{SJ?6r(dsLX3=jd_XT8ST7S2=5 zQqYs(BE}D!8A$2n(nH2Q&Zx} z2yyUPgMIwae}Y7}P&o;S01#gSo5l!9jAVf%R(Q8IQS3(unKMArF+7?g?f_+A*KtUi zR>`Fi=$VqZ0!RUpE;D|JQx04l9zg=~oPNNW!#n5)>W%7$PT0CMgYz! zoDl#$K_REis$`Zy&N>zyF`F%;w*4Qf4smd>-?`V!gJGli{<=Ts)!rY)bqnyYdhq!d z>HxMb^3Xc_;L7|f50~vFmCUJ?L2Xj4EU zff(T&J47)tNqRf+j*ilJf0Q2pr9$p<&}DkZBq8BI;OA);;aX;I>D@ljDNcpgerfh} zy0zVJHd8H>(C^tsoa#^Zgylq)124fD2O3s184Fw&iV;_d=VZ0#+O)O{p%ei?x+DV& zA@`8RPF!qD#qQ>XI8v_aU9bTi{6gp5NKW?J!|PmSHqHLN{lJ|{Ob>vy7V)5V6& zlqj*{y6Ajd_Z|%R1jX_Mkagz0g&}K~uGvQ?nY!fblBRK_9R;IT>Tu40+)M5jKe~1V z2)P?0LFkx>3MNhrd_B0F9J%yM&&s(IW`7sfgf}VPr?^GI`yVb-e3ydW8ghHMf=i&3 z;IPrL$uR}g4q@hZOVwqHj__#FyC5BmSS$(vZk4961lK;GAhGF2olPrNMBnc%czfl#GAYpK-PPgies;XzOs`_nK zK?$sKs!q4+jH=44;}l=be4ABJ0;`;=)2%u|)!`LMV35DP7AH(>CZrvy-7?dQSF$@a z&M}=EG#=$|hk|KQRqY)J7GYhcH(g0@W{=Vvq?Jf-{w>+aoNCXj_5#zE1j!Immff%Y zq8vIUL%)7?=;F~(rZbD809n!!-No7XJ(hHaKy`9Db1aj2QxYS&4v(h!J%wa43fG2* z-|#u?D4kKQ-!A;^vwyz$)4f5Bf}9&dx&S}ILFOHd*jaFe_fK3Ez|0Bk>*UG&$@c}As70H;$w>M zQLIvMJAz_Olctfp-VjO+*U@o4=uW`?DT<^ne9El^tKJ+65{xs*tHH9jh;LPrkQK1P zl4Dx>f@PDszknDpt#B+U5orce4|zKS5JkuUu@H7cniYZ_Tq0E&y&Qk_Qn=Squ~SR% z3#g5tiwa*oF^1|c7We{6QfGM|75*4KlI%S*`nk&i@d72hhh`T9_gUNED7lNsRrL!! zk_$57ahWlB@_g zJ6G~-NxCoZBW`DzOAv3fnZf-FdA^rdNTED!P|x|9DW?3$Js