fix: leave sample in float32 for all final processing

This commit is contained in:
Bryce 2022-09-22 10:33:35 -07:00
parent 09f00f9652
commit cc014685bd
2 changed files with 14 additions and 3 deletions

View File

@ -245,11 +245,12 @@ docker run -it --gpus all -v $HOME/.cache/huggingface:/root/.cache/huggingface -
- ✅ gfpgan - https://github.com/TencentARC/GFPGAN - ✅ gfpgan - https://github.com/TencentARC/GFPGAN
- ✅ codeformer - https://github.com/sczhou/CodeFormer - ✅ codeformer - https://github.com/sczhou/CodeFormer
- ✅ image describe feature - - ✅ image describe feature -
- https://github.com/salesforce/BLIP - https://github.com/salesforce/BLIP
- https://github.com/rmokady/CLIP_prefix_caption - https://github.com/rmokady/CLIP_prefix_caption
- https://github.com/pharmapsychotic/clip-interrogator (blip + clip) - https://github.com/pharmapsychotic/clip-interrogator (blip + clip)
- https://github.com/KaiyangZhou/CoOp - https://github.com/KaiyangZhou/CoOp
- outpainting - outpainting
- https://github.com/parlance-zz/g-diffuser-bot/search?q=noise&type=issues
- ✅ inpainting - ✅ inpainting
- https://github.com/andreas128/RePaint - https://github.com/andreas128/RePaint
- img2img but keeps img stable - img2img but keeps img stable
@ -259,6 +260,9 @@ docker run -it --gpus all -v $HOME/.cache/huggingface:/root/.cache/huggingface -
- CPU support - CPU support
- ✅ img2img for plms - ✅ img2img for plms
- img2img for kdiff functions - img2img for kdiff functions
- image masking
- https://boolean-parser.readthedocs.io/en/latest/index.html
- https://github.com/facebookresearch/detectron2
- images as actual prompts instead of just init images - images as actual prompts instead of just init images
- requires model fine-tuning since SD1.4 expects 77x768 text encoding input - requires model fine-tuning since SD1.4 expects 77x768 text encoding input
- https://twitter.com/Buntworthy/status/1566744186153484288 - https://twitter.com/Buntworthy/status/1566744186153484288
@ -266,12 +270,16 @@ docker run -it --gpus all -v $HOME/.cache/huggingface:/root/.cache/huggingface -
- https://github.com/LambdaLabsML/lambda-diffusers - https://github.com/LambdaLabsML/lambda-diffusers
- https://www.reddit.com/r/MachineLearning/comments/x6k5bm/n_stable_diffusion_image_variations_released/ - https://www.reddit.com/r/MachineLearning/comments/x6k5bm/n_stable_diffusion_image_variations_released/
- -
- animations
- https://github.com/francislabountyjr/stable-diffusion/blob/main/inferencing_notebook.ipynb
- https://www.youtube.com/watch?v=E7aAFEhdngI
- cross-attention control: - cross-attention control:
- https://github.com/bloc97/CrossAttentionControl/blob/main/CrossAttention_Release_NoImages.ipynb - https://github.com/bloc97/CrossAttentionControl/blob/main/CrossAttention_Release_NoImages.ipynb
- guided generation - guided generation
- https://colab.research.google.com/drive/1dlgggNa5Mz8sEAGU0wFCHhGLFooW_pf1#scrollTo=UDeXQKbPTdZI - https://colab.research.google.com/drive/1dlgggNa5Mz8sEAGU0wFCHhGLFooW_pf1#scrollTo=UDeXQKbPTdZI
- https://colab.research.google.com/github/aicrumb/doohickey/blob/main/Doohickey_Diffusion.ipynb#scrollTo=PytCwKXCmPid - https://colab.research.google.com/github/aicrumb/doohickey/blob/main/Doohickey_Diffusion.ipynb#scrollTo=PytCwKXCmPid
- https://github.com/mlfoundations/open_clip - https://github.com/mlfoundations/open_clip
- https://github.com/openai/guided-diffusion
- ✅ tiling - ✅ tiling
- output show-work videos - output show-work videos
- image variations https://github.com/lstein/stable-diffusion/blob/main/VARIATIONS.md - image variations https://github.com/lstein/stable-diffusion/blob/main/VARIATIONS.md
@ -279,6 +287,7 @@ docker run -it --gpus all -v $HOME/.cache/huggingface:/root/.cache/huggingface -
- https://www.reddit.com/r/StableDiffusion/comments/xbwb5y/how_to_run_textual_inversion_locally_train_your/ - https://www.reddit.com/r/StableDiffusion/comments/xbwb5y/how_to_run_textual_inversion_locally_train_your/
- https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_textual_inversion_training.ipynb#scrollTo=50JuJUM8EG1h - https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_textual_inversion_training.ipynb#scrollTo=50JuJUM8EG1h
- https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/stable_diffusion_textual_inversion_library_navigator.ipynb - https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/stable_diffusion_textual_inversion_library_navigator.ipynb
- https://github.com/Jack000/glid-3-xl-stable
- fix saturation at high CFG https://www.reddit.com/r/StableDiffusion/comments/xalo78/fixing_excessive_contrastsaturation_resulting/ - fix saturation at high CFG https://www.reddit.com/r/StableDiffusion/comments/xalo78/fixing_excessive_contrastsaturation_resulting/
- https://www.reddit.com/r/StableDiffusion/comments/xbrrgt/a_rundown_of_twenty_new_methodsoptions_added_to/ - https://www.reddit.com/r/StableDiffusion/comments/xbrrgt/a_rundown_of_twenty_new_methodsoptions_added_to/
@ -293,3 +302,4 @@ docker run -it --gpus all -v $HOME/.cache/huggingface:/root/.cache/huggingface -
- https://www.reddit.com/r/StableDiffusion/comments/xbeyw3/can_anyone_offer_a_little_guidance_on_the/ - https://www.reddit.com/r/StableDiffusion/comments/xbeyw3/can_anyone_offer_a_little_guidance_on_the/
- https://www.reddit.com/r/bigsleep/comments/xb5cat/wiskkeys_lists_of_texttoimage_systems_and_related/ - https://www.reddit.com/r/bigsleep/comments/xb5cat/wiskkeys_lists_of_texttoimage_systems_and_related/
- https://huggingface.co/blog/annotated-diffusion - https://huggingface.co/blog/annotated-diffusion
- https://huggingface.co/blog/assets/78_annotated-diffusion/unet_architecture.jpg

View File

@ -288,8 +288,9 @@ def imagine(
x_samples = torch.clamp((x_samples + 1.0) / 2.0, min=0.0, max=1.0) x_samples = torch.clamp((x_samples + 1.0) / 2.0, min=0.0, max=1.0)
for x_sample in x_samples: for x_sample in x_samples:
x_sample = x_sample.to(torch.float32)
x_sample = 255.0 * rearrange( x_sample = 255.0 * rearrange(
x_sample.to(torch.float32).cpu().numpy(), "c h w -> h w c" x_sample.cpu().numpy(), "c h w -> h w c"
) )
x_sample_8_orig = x_sample.astype(np.uint8) x_sample_8_orig = x_sample.astype(np.uint8)
img = Image.fromarray(x_sample_8_orig) img = Image.fromarray(x_sample_8_orig)