feature: large images now stay well-composed thanks to tile controlnet

pull/422/head
Bryce 6 months ago committed by Bryce Drennan
parent 2372a71e6c
commit 0c03612d44

@ -228,6 +228,8 @@ def _generate_single_image(
result_images["composition"] = comp_img_orig
result_images["composition-upscaled"] = comp_image
composition_strength = prompt.composition_strength
# work around until bug is fixed in cli
composition_strength = 0.5
first_step = int((prompt.steps) * composition_strength)
noise_step = int((prompt.steps - 1) * composition_strength)
log_img(comp_img_orig, "comp_image")
@ -235,9 +237,9 @@ def _generate_single_image(
comp_image_t = pillow_img_to_torch_image(comp_image)
comp_image_t = comp_image_t.to(sd.device, dtype=sd.dtype)
init_latent = sd.lda.encode(comp_image_t)
compose_control_inputs: list[ControlInput] = [
# ControlInput(mode="depth", image=comp_image, strength=1),
# ControlInput(mode="hed", image=comp_image, strength=1),
ControlInput(mode="details", image=comp_image, strength=1),
]
for control_input in compose_control_inputs:
(

@ -114,7 +114,6 @@ def load_model_from_config_old(
config, weights_location, control_weights_locations=None, half_mode=False
):
model = instantiate_from_config(config.model)
print("instantiated")
base_model_dict = load_state_dict(weights_location, half_mode=half_mode)
model.init_from_state_dict(base_model_dict)
@ -233,8 +232,6 @@ def get_diffusion_model_refiners(
dtype=None,
) -> LatentDiffusionModel:
"""Load a diffusion model."""
print(weights_config)
print(f"for inpainting: {for_inpainting}")
return _get_diffusion_model_refiners(
weights_location=weights_config.weights_location,
for_inpainting=for_inpainting,

Binary file not shown.

Before

Width:  |  Height:  |  Size: 2.5 MiB

After

Width:  |  Height:  |  Size: 2.5 MiB

Loading…
Cancel
Save