2022-09-20 04:15:38 +00:00
|
|
|
import pytest
|
2022-09-17 22:49:38 +00:00
|
|
|
from PIL import Image
|
|
|
|
from pytorch_lightning import seed_everything
|
|
|
|
|
2022-09-24 05:58:48 +00:00
|
|
|
from imaginairy import ImaginePrompt, imagine
|
|
|
|
from imaginairy.enhancers.bool_masker import MASK_PROMPT
|
2022-09-18 00:02:45 +00:00
|
|
|
from imaginairy.enhancers.clip_masking import get_img_mask
|
2022-09-20 04:15:38 +00:00
|
|
|
from imaginairy.enhancers.describe_image_blip import generate_caption
|
|
|
|
from imaginairy.enhancers.describe_image_clip import find_img_text_similarity
|
2022-09-17 22:49:38 +00:00
|
|
|
from imaginairy.enhancers.face_restoration_codeformer import enhance_faces
|
|
|
|
from imaginairy.utils import get_device
|
|
|
|
from tests import TESTS_FOLDER
|
2022-10-16 23:42:46 +00:00
|
|
|
from tests.utils import assert_image_similar_to_expectation
|
2022-09-17 22:49:38 +00:00
|
|
|
|
|
|
|
|
2022-09-25 02:42:54 +00:00
|
|
|
@pytest.mark.skipif(
|
|
|
|
get_device() == "cpu", reason="TypeError: Got unsupported ScalarType BFloat16"
|
|
|
|
)
|
2022-10-16 23:42:46 +00:00
|
|
|
def test_fix_faces(filename_base_for_orig_outputs, filename_base_for_outputs):
|
|
|
|
distorted_img = Image.open(f"{TESTS_FOLDER}/data/distorted_face.png")
|
2022-09-17 22:49:38 +00:00
|
|
|
seed_everything(1)
|
2022-10-16 23:42:46 +00:00
|
|
|
img = enhance_faces(distorted_img)
|
2022-09-17 22:49:38 +00:00
|
|
|
|
2022-10-16 23:42:46 +00:00
|
|
|
distorted_img.save(f"{filename_base_for_orig_outputs}__orig.jpg")
|
|
|
|
img_path = f"{filename_base_for_outputs}.png"
|
|
|
|
assert_image_similar_to_expectation(img, img_path=img_path, threshold=2800)
|
2022-09-18 00:02:45 +00:00
|
|
|
|
|
|
|
|
2022-09-25 02:42:54 +00:00
|
|
|
@pytest.mark.skipif(get_device() == "cpu", reason="Too slow to run on CPU")
|
2022-10-18 06:41:26 +00:00
|
|
|
def test_clip_masking(filename_base_for_outputs):
|
2022-09-24 05:58:48 +00:00
|
|
|
img = Image.open(f"{TESTS_FOLDER}/data/girl_with_a_pearl_earring_large.jpg")
|
2022-09-28 00:04:16 +00:00
|
|
|
|
2022-10-18 06:41:26 +00:00
|
|
|
for mask_modifier in ["*0.5", "*6", "+1", "+11", "+101", "-25"]:
|
2022-09-26 04:55:25 +00:00
|
|
|
pred_bin, pred_grayscale = get_img_mask(
|
2022-09-28 00:04:16 +00:00
|
|
|
img,
|
|
|
|
f"face AND NOT (bandana OR hair OR blue fabric){{{mask_modifier}}}",
|
|
|
|
threshold=0.5,
|
2022-09-26 04:55:25 +00:00
|
|
|
)
|
2022-10-18 06:41:26 +00:00
|
|
|
img_path = f"{filename_base_for_outputs}_mask{mask_modifier}_g.png"
|
|
|
|
assert_image_similar_to_expectation(
|
2022-10-24 05:42:17 +00:00
|
|
|
pred_grayscale, img_path=img_path, threshold=300
|
2022-09-26 04:55:25 +00:00
|
|
|
)
|
2022-09-24 05:58:48 +00:00
|
|
|
|
2022-10-18 06:41:26 +00:00
|
|
|
img_path = f"{filename_base_for_outputs}_mask{mask_modifier}_bin.png"
|
|
|
|
assert_image_similar_to_expectation(pred_bin, img_path=img_path, threshold=10)
|
|
|
|
|
2022-09-24 05:58:48 +00:00
|
|
|
prompt = ImaginePrompt(
|
2022-10-16 23:42:46 +00:00
|
|
|
"",
|
2022-09-24 05:58:48 +00:00
|
|
|
init_image=img,
|
2022-10-16 23:42:46 +00:00
|
|
|
init_image_strength=0.5,
|
2022-09-24 05:58:48 +00:00
|
|
|
# lower steps for faster tests
|
2022-09-28 00:04:16 +00:00
|
|
|
steps=40,
|
|
|
|
mask_prompt="(head OR face){*5}",
|
2022-10-16 23:42:46 +00:00
|
|
|
mask_mode="keep",
|
2022-09-28 00:04:16 +00:00
|
|
|
upscale=False,
|
2022-09-24 05:58:48 +00:00
|
|
|
fix_faces=True,
|
2022-10-22 09:13:06 +00:00
|
|
|
seed=42,
|
2022-11-26 22:52:28 +00:00
|
|
|
sampler_type="plms",
|
2022-09-24 05:58:48 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
result = next(imagine(prompt))
|
2022-10-18 06:41:26 +00:00
|
|
|
img_path = f"{filename_base_for_outputs}.png"
|
2022-11-26 22:52:28 +00:00
|
|
|
assert_image_similar_to_expectation(result.img, img_path=img_path, threshold=600)
|
2022-09-24 05:58:48 +00:00
|
|
|
|
|
|
|
|
|
|
|
boolean_mask_test_cases = [
|
|
|
|
(
|
|
|
|
"fruit bowl",
|
|
|
|
"'fruit bowl'",
|
|
|
|
),
|
|
|
|
(
|
|
|
|
"((((fruit bowl))))",
|
|
|
|
"'fruit bowl'",
|
|
|
|
),
|
|
|
|
(
|
|
|
|
"fruit OR bowl",
|
|
|
|
"('fruit' OR 'bowl')",
|
|
|
|
),
|
|
|
|
(
|
|
|
|
"fruit|bowl",
|
|
|
|
"('fruit' OR 'bowl')",
|
|
|
|
),
|
|
|
|
(
|
|
|
|
"fruit | bowl",
|
|
|
|
"('fruit' OR 'bowl')",
|
|
|
|
),
|
|
|
|
(
|
|
|
|
"fruit OR bowl OR pear",
|
|
|
|
"('fruit' OR 'bowl' OR 'pear')",
|
|
|
|
),
|
|
|
|
(
|
|
|
|
"fruit AND bowl",
|
|
|
|
"('fruit' AND 'bowl')",
|
|
|
|
),
|
|
|
|
(
|
|
|
|
"fruit & bowl",
|
|
|
|
"('fruit' AND 'bowl')",
|
|
|
|
),
|
|
|
|
(
|
|
|
|
"fruit AND NOT green",
|
|
|
|
"('fruit' AND NOT 'green')",
|
|
|
|
),
|
|
|
|
(
|
|
|
|
"fruit bowl{+0.5}",
|
|
|
|
"'fruit bowl'+0.5",
|
|
|
|
),
|
|
|
|
(
|
|
|
|
"fruit bowl{+0.5} OR fruit",
|
|
|
|
"('fruit bowl'+0.5 OR 'fruit')",
|
|
|
|
),
|
|
|
|
(
|
|
|
|
"NOT pizza",
|
|
|
|
"NOT 'pizza'",
|
|
|
|
),
|
|
|
|
(
|
|
|
|
"car AND (wheels OR trunk OR engine OR windows) AND NOT (truck OR headlights{*10})",
|
|
|
|
"('car' AND ('wheels' OR 'trunk' OR 'engine' OR 'windows') AND NOT ('truck' OR 'headlights'*10))",
|
|
|
|
),
|
|
|
|
(
|
|
|
|
"car AND (wheels OR trunk OR engine OR windows OR headlights) AND NOT (truck OR headlights){*10}",
|
|
|
|
"('car' AND ('wheels' OR 'trunk' OR 'engine' OR 'windows' OR 'headlights') AND NOT ('truck' OR 'headlights')*10)",
|
|
|
|
),
|
|
|
|
]
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.parametrize("mask_text,expected", boolean_mask_test_cases)
|
|
|
|
def test_clip_mask_parser(mask_text, expected):
|
|
|
|
parsed = MASK_PROMPT.parseString(mask_text)[0][0]
|
|
|
|
assert str(parsed) == expected
|
2022-09-18 13:07:07 +00:00
|
|
|
|
|
|
|
|
2022-09-25 02:42:54 +00:00
|
|
|
@pytest.mark.skipif(get_device() == "cpu", reason="Too slow to run on CPU")
|
2022-09-20 04:15:38 +00:00
|
|
|
def test_describe_picture():
|
2022-09-18 13:07:07 +00:00
|
|
|
img = Image.open(f"{TESTS_FOLDER}/data/girl_with_a_pearl_earring.jpg")
|
2022-09-20 04:15:38 +00:00
|
|
|
caption = generate_caption(img)
|
2022-10-15 00:21:38 +00:00
|
|
|
assert caption in {
|
|
|
|
"a painting of a girl with a pearl earring wearing a yellow dress and a pearl earring in her ear and a black background",
|
|
|
|
"a painting of a girl with a pearl ear wearing a yellow dress and a pearl earring on her left ear and a black background",
|
|
|
|
}
|
2022-09-20 04:15:38 +00:00
|
|
|
|
|
|
|
|
2022-10-17 02:46:32 +00:00
|
|
|
@pytest.mark.skipif(get_device() == "cpu", reason="Too slow to run on CPU")
|
2022-09-20 04:15:38 +00:00
|
|
|
def test_clip_text_comparison():
|
|
|
|
img = Image.open(f"{TESTS_FOLDER}/data/girl_with_a_pearl_earring.jpg")
|
|
|
|
phrases = [
|
|
|
|
"Johannes Vermeer painting",
|
|
|
|
"a painting of a girl with a pearl earring",
|
|
|
|
"a bulldozer",
|
|
|
|
"photo",
|
|
|
|
]
|
|
|
|
probs = find_img_text_similarity(img, phrases)
|
|
|
|
assert probs[:2] == [
|
|
|
|
(
|
|
|
|
"a painting of a girl with a pearl earring",
|
2022-09-25 02:42:54 +00:00
|
|
|
pytest.approx(0.2857227921485901, abs=0.01),
|
2022-09-20 04:15:38 +00:00
|
|
|
),
|
2022-09-25 02:42:54 +00:00
|
|
|
("Johannes Vermeer painting", pytest.approx(0.25186583399772644, abs=0.01)),
|
2022-09-20 04:15:38 +00:00
|
|
|
]
|