fix: use float32 at image render time for wider cpu compatibility

If the x_sample was a bfloat on the gpu but the cpu doesn't support bfloat, that can cause a TypeError
```
  File "/home/stdiff/.local/lib/python3.10/site-packages/imaginairy/api.py", line 292, in imagine
    x_sample.cpu().numpy(), "c h w -> h w c"

TypeError: Got unsupported ScalarType BFloat16`
```
pull/21/head
Bryce 2 years ago
parent 08fca72033
commit 9614a82f12

@ -238,6 +238,7 @@ docker run -it --gpus all -v $HOME/.cache/huggingface:/root/.cache/huggingface -
- stable super-res?
- todo: try with 1-0-0-0 mask at full image resolution (rencoding entire image+predicted image at every step)
- todo: use a gaussian pyramid and only include the "high-detail" level of the pyramid into the next step
- https://www.reddit.com/r/StableDiffusion/comments/xkjjf9/upscale_to_huge_sizes_and_add_detail_with_sd/
- ✅ face enhancers
- ✅ gfpgan - https://github.com/TencentARC/GFPGAN
- ✅ codeformer - https://github.com/sczhou/CodeFormer
@ -283,6 +284,7 @@ docker run -it --gpus all -v $HOME/.cache/huggingface:/root/.cache/huggingface -
- https://github.com/huggingface/diffusers/tree/main/src/diffusers/pipelines/stable_diffusion
- https://github.com/lstein/stable-diffusion
- https://github.com/AUTOMATIC1111/stable-diffusion-webui
- https://github.com/blueturtleai/gimp-stable-diffusion
## Further Reading
- Differences between samplers

@ -289,7 +289,7 @@ def imagine(
for x_sample in x_samples:
x_sample = 255.0 * rearrange(
x_sample.cpu().numpy(), "c h w -> h w c"
x_sample.to(torch.float32).cpu().numpy(), "c h w -> h w c"
)
x_sample_8_orig = x_sample.astype(np.uint8)
img = Image.fromarray(x_sample_8_orig)

@ -19,14 +19,14 @@ device_sampler_type_test_cases = {
("k_heun", "0382ef71d9967fefd15676410289ebab"),
],
"cuda": [
("plms", "62e78287e7848e48d45a1b207fb84102"),
("ddim", "164c2a008b100e5fa07d3db2018605bd"),
("k_lms", "450fea507ccfb44b677d30fae9f40a52"),
("k_dpm_2", "901daad7a9e359404d8e3d3f4236c4ce"),
("k_dpm_2_a", "855e80286dfdc89752f6bdd3fdeb1a62"),
("k_euler", "06df9c19d472bfa6530db98be4ea10e8"),
("k_euler_a", "79552628ff77914c8b6870703fe116b5"),
("k_heun", "8ced3578ae25d34da9f4e4b1a20bf416"),
("plms", "0c44d2c8222f519a6700ebae54450435"),
("ddim", "4493ca85c2b24879525eac2b73e5a538"),
("k_lms", "82b38a5638a572d5968422b02e625f66"),
("k_dpm_2", "9df2fcd6256ff68c6cc4a6c603ae8f2e"),
("k_dpm_2_a", "0c5491c1a73094540ed15785f4106bca"),
("k_euler", "c82f628217fab06d8b5d5227827c1d92"),
("k_euler_a", "74f748a8371c2fcec54ecc5dcf1dbb64"),
("k_heun", "9ae586a7a8b10a0a0bf120405e4937e9"),
],
"cpu": [],
}
@ -52,8 +52,8 @@ device_sampler_type_test_cases_img_2_img = {
("ddim", "87d04423f6d03ddfc065cabc62e3909c"),
},
"cuda": {
("plms", "efba8b836b51d262dbf72284844869f8"),
("ddim", "a62878000ad3b581a11dd3fb329dc7d2"),
("plms", "c95f23a7039cf702e2e448f454e27c46"),
("ddim", "ba054c630d9999ccbcc60c9fb687682d"),
},
"cpu": [],
}

Loading…
Cancel
Save