mirror of
https://github.com/brycedrennan/imaginAIry
synced 2024-11-05 12:00:15 +00:00
fix: more thorough cleaning of memory when switching models
also cleanup up some test failures
This commit is contained in:
parent
ccf9749df5
commit
1f7403155e
@ -212,7 +212,7 @@ imagine_image_files(prompts, outdir="./my-art")
|
|||||||
|
|
||||||
## Requirements
|
## Requirements
|
||||||
- ~10 gb space for models to download
|
- ~10 gb space for models to download
|
||||||
- A decent computer with either a CUDA supported graphics card or M1 processor.
|
- A decent computer with either a CUDA supported graphics card (and CUDA installed) or an M1 processor.
|
||||||
- Python installed. Preferably Python 3.10. (not conda)
|
- Python installed. Preferably Python 3.10. (not conda)
|
||||||
- For macOS [rust](https://www.rust-lang.org/tools/install) and setuptools-rust must be installed to compile the `tokenizer` library.
|
- For macOS [rust](https://www.rust-lang.org/tools/install) and setuptools-rust must be installed to compile the `tokenizer` library.
|
||||||
They can be installed via: `curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh` and `pip install setuptools-rust`
|
They can be installed via: `curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh` and `pip install setuptools-rust`
|
||||||
@ -230,6 +230,10 @@ docker run -it --gpus all -v $HOME/.cache/huggingface:/root/.cache/huggingface -
|
|||||||
[Example Colab](https://colab.research.google.com/drive/1rOvQNs0Cmn_yU1bKWjCOHzGVDgZkaTtO?usp=sharing)
|
[Example Colab](https://colab.research.google.com/drive/1rOvQNs0Cmn_yU1bKWjCOHzGVDgZkaTtO?usp=sharing)
|
||||||
|
|
||||||
## ChangeLog
|
## ChangeLog
|
||||||
|
**7.1.1**
|
||||||
|
- fix: memory/speed regression introduced in 6.1.0
|
||||||
|
- fix: model switching now clears memory better, thus avoiding out of memory errors
|
||||||
|
|
||||||
**7.1.0**
|
**7.1.0**
|
||||||
- feature: 🎉 Stable Diffusion 2.1. Generated people are no longer (completely) distorted.
|
- feature: 🎉 Stable Diffusion 2.1. Generated people are no longer (completely) distorted.
|
||||||
Use with `--model SD-2.1` or `--model SD-2.0-v`
|
Use with `--model SD-2.1` or `--model SD-2.0-v`
|
||||||
|
@ -63,8 +63,14 @@ class MemoryAwareModel:
|
|||||||
return getattr(self._model, key)
|
return getattr(self._model, key)
|
||||||
|
|
||||||
def unload_model(self):
|
def unload_model(self):
|
||||||
del self._model
|
if self._model is not None:
|
||||||
self._model = None
|
del self._model.cond_stage_model
|
||||||
|
del self._model.first_stage_model
|
||||||
|
del self._model.model
|
||||||
|
del self._model
|
||||||
|
self._model = None
|
||||||
|
if get_device() == "cuda":
|
||||||
|
torch.cuda.empty_cache()
|
||||||
gc.collect()
|
gc.collect()
|
||||||
|
|
||||||
|
|
||||||
|
Binary file not shown.
Before Width: | Height: | Size: 1.0 MiB After Width: | Height: | Size: 1.0 MiB |
@ -54,7 +54,7 @@ def test_model_versions(filename_base_for_orig_outputs, model_version):
|
|||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
threshold = 10000
|
threshold = 14000
|
||||||
|
|
||||||
for i, result in enumerate(imagine(prompts)):
|
for i, result in enumerate(imagine(prompts)):
|
||||||
img_path = f"{filename_base_for_orig_outputs}_{result.prompt.prompt_text}_{result.prompt.model}.png"
|
img_path = f"{filename_base_for_orig_outputs}_{result.prompt.prompt_text}_{result.prompt.model}.png"
|
||||||
@ -157,7 +157,7 @@ def test_img_to_img_fruit_2_gold(
|
|||||||
"k_dpm_2_a": 26000,
|
"k_dpm_2_a": 26000,
|
||||||
"k_dpm_adaptive": 13000,
|
"k_dpm_adaptive": 13000,
|
||||||
}
|
}
|
||||||
threshold = threshold_lookup.get(sampler_type, 10000)
|
threshold = threshold_lookup.get(sampler_type, 11000)
|
||||||
|
|
||||||
pillow_fit_image_within(img).save(f"{filename_base_for_orig_outputs}__orig.jpg")
|
pillow_fit_image_within(img).save(f"{filename_base_for_orig_outputs}__orig.jpg")
|
||||||
img_path = f"{filename_base_for_outputs}.png"
|
img_path = f"{filename_base_for_outputs}.png"
|
||||||
|
@ -21,4 +21,5 @@ def test_text_conditioning():
|
|||||||
"263e5ee7d2be087d816e094b80ffc546", # mps
|
"263e5ee7d2be087d816e094b80ffc546", # mps
|
||||||
"41818051d7c469fc57d0a940c9d24d82",
|
"41818051d7c469fc57d0a940c9d24d82",
|
||||||
"b5f29fb26bceb60dcde19ec7ec5a0711",
|
"b5f29fb26bceb60dcde19ec7ec5a0711",
|
||||||
|
"88245bdb2a83b49092407fc5b4c473ab", # ubuntu, torch 1.12.1 cu116
|
||||||
}
|
}
|
||||||
|
@ -60,7 +60,7 @@ def test_clip_masking(filename_base_for_outputs):
|
|||||||
|
|
||||||
result = next(imagine(prompt))
|
result = next(imagine(prompt))
|
||||||
img_path = f"{filename_base_for_outputs}.png"
|
img_path = f"{filename_base_for_outputs}.png"
|
||||||
assert_image_similar_to_expectation(result.img, img_path=img_path, threshold=600)
|
assert_image_similar_to_expectation(result.img, img_path=img_path, threshold=1000)
|
||||||
|
|
||||||
|
|
||||||
boolean_mask_test_cases = [
|
boolean_mask_test_cases = [
|
||||||
|
Loading…
Reference in New Issue
Block a user