performance: memory management improvements (#210)
- tile mode made more efficient. especially when not being used - add script to iteratively make bigger imagespull/215/head
parent
a338902ab5
commit
7bdde559cc
@ -0,0 +1,40 @@
|
||||
import torch
|
||||
from torch.cuda import OutOfMemoryError
|
||||
|
||||
from imaginairy import ImaginePrompt, imagine_image_files
|
||||
from imaginairy.utils import get_device
|
||||
|
||||
|
||||
def assess_memory_usage():
|
||||
assert get_device() == "cuda"
|
||||
img_size = 1664
|
||||
prompt = ImaginePrompt("strawberries", width=64, height=64, seed=1)
|
||||
imagine_image_files([prompt], outdir="outputs")
|
||||
datalog = []
|
||||
while True:
|
||||
torch.cuda.reset_peak_memory_stats()
|
||||
prompt = ImaginePrompt(
|
||||
"beautiful landscape, Unreal Engine 5, RTX, AAA Game, Detailed 3D Render, Cinema4D",
|
||||
width=img_size,
|
||||
height=img_size,
|
||||
seed=1,
|
||||
)
|
||||
try:
|
||||
imagine_image_files([prompt], outdir="outputs")
|
||||
except OutOfMemoryError as e:
|
||||
print(f"Out of memory at {img_size}x{img_size} size image.")
|
||||
print(e)
|
||||
break
|
||||
max_used = torch.cuda.max_memory_allocated() / 1024**3
|
||||
datalog.append((img_size, max_used))
|
||||
print(f"{img_size},{max_used:.2f}\n")
|
||||
img_size += 128
|
||||
|
||||
with open("img_size_memory_usage.csv", "w", encoding="utf-8") as f:
|
||||
f.write("img_size,max_used\n")
|
||||
for img_size, max_used in datalog:
|
||||
f.write(f"{img_size},{max_used:.2f}\n")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
assess_memory_usage()
|
Loading…
Reference in New Issue