fix: videogen. track gpu tests

pull/411/head^2
Bryce 6 months ago committed by Bryce Drennan
parent eae4f20ae2
commit 37ecd1e5e0

@ -274,9 +274,7 @@ def _generate_single_image(
"target_width": init_image.width,
}
)
comp_image, comp_img_orig = _generate_composition_image(
**compose_kwargs
)
comp_image, comp_img_orig = _generate_composition_image(**compose_kwargs)
if comp_image is not None:
result_images["composition"] = comp_img_orig

@ -541,7 +541,10 @@ class ImaginePrompt(BaseModel, protected_namespaces=()):
if v is None:
v = steps_lookup[info.data["solver_type"]]
return int(v)
try:
return int(v)
except (OverflowError, TypeError) as e:
raise ValueError("Steps must be an integer") from e
@model_validator(mode="after")
def validate_init_image_strength(self):

@ -70,7 +70,6 @@ def instantiate_from_config(config: dict) -> Any:
if config == "__is_unconditional__":
return None
raise KeyError("Expected key `target` to instantiate.")
assert isinstance(config, dict)
params = config.get("params", {})
_cls = get_obj_from_str(config["target"])
start = time.perf_counter()

@ -47,6 +47,14 @@ _NAMED_RESOLUTIONS = {k.upper(): v for k, v in _NAMED_RESOLUTIONS.items()}
def normalize_image_size(resolution: str | int | tuple[int, int]) -> tuple[int, int]:
size = _normalize_image_size(resolution)
if any(s <= 0 for s in size):
msg = f"Invalid resolution: {resolution!r}"
raise ValueError(msg)
return size
def _normalize_image_size(resolution: str | int | tuple[int, int]) -> tuple[int, int]:
match resolution:
case (int(), int()):
return resolution # type: ignore

@ -70,15 +70,15 @@ def generate_video(
seed = default(seed, random.randint(0, 1000000))
output_fps = default(output_fps, fps_id)
video_model_config = config.video_models.get(model_name, None)
video_model_config = config.MODEL_WEIGHT_CONFIG_LOOKUP.get(model_name, None)
if video_model_config is None:
msg = f"Version {model_name} does not exist."
raise ValueError(msg)
num_frames = default(num_frames, video_model_config["default_frames"])
num_steps = default(num_steps, video_model_config["default_steps"])
num_frames = default(num_frames, video_model_config.defaults.get("frames", 12))
num_steps = default(num_steps, video_model_config.defaults.get("steps", 30))
output_folder = default(output_folder, "outputs/video/")
video_config_path = f"{PKG_ROOT}/{video_model_config['config_path']}"
video_config_path = f"{PKG_ROOT}/{video_model_config.architecture.config_path}"
logger.info(
f"Generating a {num_frames} frame video from {input_path}. Device:{device} seed:{seed}"
@ -88,7 +88,7 @@ def generate_video(
device="cpu",
num_frames=num_frames,
num_steps=num_steps,
weights_url=video_model_config["weights_location"],
weights_url=video_model_config.weights_location,
)
torch.manual_seed(seed)

@ -1,4 +1,6 @@
import contextlib
import csv
import gc
import logging
import os
import sys
@ -7,6 +9,7 @@ from shutil import rmtree
import pytest
import responses
import torch.cuda
from tqdm import tqdm
from urllib3 import HTTPConnectionPool
@ -128,13 +131,42 @@ def default_model_loaded():
next(imagine(prompt))
cuda_tests_node_ids = []
cuda_test_tracker_filepath = f"{TESTS_FOLDER}/data/cuda-tests.csv"
@pytest.fixture(autouse=True)
def detect_cuda_tests(request):
if torch.cuda.is_available():
torch.cuda.reset_peak_memory_stats()
start_memory = torch.cuda.max_memory_allocated()
yield
if torch.cuda.is_available():
end_memory = torch.cuda.max_memory_allocated()
memory_diff = end_memory - start_memory
if memory_diff > 0:
test_name = request.node.name
print(f"Test {test_name} used {memory_diff} bytes of GPU memory")
cuda_tests_node_ids.append(test_name)
torch.cuda.empty_cache()
gc.collect()
@pytest.hookimpl()
def pytest_collection_modifyitems(config, items):
"""Only select a subset of tests to run, based on the --subset option."""
node_ids_to_mark = read_stored_cuda_test_nodes()
for item in items:
if item.nodeid in node_ids_to_mark:
item.add_marker(pytest.mark.gputest)
filtered_node_ids = set()
node_ids = [f.nodeid for f in items]
node_ids.sort()
subset = config.getoption("--subset")
if subset:
partition_no, total_partitions = subset.split("/")
partition_no, total_partitions = int(partition_no), int(total_partitions)
@ -168,3 +200,26 @@ def pytest_sessionstart(session):
if "nvidia_smi" in debug_info:
print(debug_info["nvidia_smi"])
def pytest_sessionfinish(session, exitstatus):
existing_node_ids = read_stored_cuda_test_nodes()
updated_node_ids = existing_node_ids.union(set(cuda_tests_node_ids))
# Write updated, sorted list of node IDs to file
with open(cuda_test_tracker_filepath, "w", newline="") as file:
writer = csv.writer(file)
for node_id in sorted(updated_node_ids):
writer.writerow([node_id])
def read_stored_cuda_test_nodes():
node_ids = set()
try:
with open(cuda_test_tracker_filepath, newline="") as file:
reader = csv.reader(file)
for row in reader:
node_ids.add(row[0])
except FileNotFoundError:
pass # File does not exist yet
return node_ids

@ -0,0 +1,51 @@
test_cache_ordering
test_clip_masking
test_clip_text_comparison
test_cliptext_inpainting_pearl_doctor
test_colorize_cmd
test_control_images[depth-create_depth_map]
test_control_images[hed-create_hed_edges]
test_control_images[normal-create_normal_map]
test_control_images[openpose-create_pose_map]
test_controlnet[canny]
test_controlnet[colorize]
test_controlnet[depth]
test_controlnet[edit]
test_controlnet[hed]
test_controlnet[inpaint]
test_controlnet[normal]
test_controlnet[openpose]
test_controlnet[shuffle]
test_describe_cmd
test_describe_picture
test_edit_cmd
test_edit_demo
test_fix_faces
test_get_existing_move_to_gpu
test_imagine[ddim]
test_imagine[dpmpp]
test_imagine_cmd
test_img2img_beach_to_sunset[ddim]
test_img2img_beach_to_sunset[dpmpp]
test_img2img_low_noise[ddim]
test_img2img_low_noise[dpmpp]
test_img_to_file
test_img_to_img_from_url_cats[ddim]
test_img_to_img_from_url_cats[dpmpp]
test_img_to_img_fruit_2_gold[ddim-0.05]
test_img_to_img_fruit_2_gold[ddim-0.2]
test_img_to_img_fruit_2_gold[ddim-0]
test_img_to_img_fruit_2_gold[ddim-1]
test_img_to_img_fruit_2_gold[dpmpp-0.05]
test_img_to_img_fruit_2_gold[dpmpp-0.2]
test_img_to_img_fruit_2_gold[dpmpp-0]
test_img_to_img_fruit_2_gold[dpmpp-1]
test_img_to_img_fruit_2_gold_repeat
test_inpainting_bench
test_large_image
test_model_versions[SD-1.5]
test_nonlinearity
test_outpainting_outpaint
test_set_cpu_full
test_text_conditioning
test_tile_mode
1 test_cache_ordering
2 test_clip_masking
3 test_clip_text_comparison
4 test_cliptext_inpainting_pearl_doctor
5 test_colorize_cmd
6 test_control_images[depth-create_depth_map]
7 test_control_images[hed-create_hed_edges]
8 test_control_images[normal-create_normal_map]
9 test_control_images[openpose-create_pose_map]
10 test_controlnet[canny]
11 test_controlnet[colorize]
12 test_controlnet[depth]
13 test_controlnet[edit]
14 test_controlnet[hed]
15 test_controlnet[inpaint]
16 test_controlnet[normal]
17 test_controlnet[openpose]
18 test_controlnet[shuffle]
19 test_describe_cmd
20 test_describe_picture
21 test_edit_cmd
22 test_edit_demo
23 test_fix_faces
24 test_get_existing_move_to_gpu
25 test_imagine[ddim]
26 test_imagine[dpmpp]
27 test_imagine_cmd
28 test_img2img_beach_to_sunset[ddim]
29 test_img2img_beach_to_sunset[dpmpp]
30 test_img2img_low_noise[ddim]
31 test_img2img_low_noise[dpmpp]
32 test_img_to_file
33 test_img_to_img_from_url_cats[ddim]
34 test_img_to_img_from_url_cats[dpmpp]
35 test_img_to_img_fruit_2_gold[ddim-0.05]
36 test_img_to_img_fruit_2_gold[ddim-0.2]
37 test_img_to_img_fruit_2_gold[ddim-0]
38 test_img_to_img_fruit_2_gold[ddim-1]
39 test_img_to_img_fruit_2_gold[dpmpp-0.05]
40 test_img_to_img_fruit_2_gold[dpmpp-0.2]
41 test_img_to_img_fruit_2_gold[dpmpp-0]
42 test_img_to_img_fruit_2_gold[dpmpp-1]
43 test_img_to_img_fruit_2_gold_repeat
44 test_inpainting_bench
45 test_large_image
46 test_model_versions[SD-1.5]
47 test_nonlinearity
48 test_outpainting_outpaint
49 test_set_cpu_full
50 test_text_conditioning
51 test_tile_mode

@ -9,3 +9,5 @@ filterwarnings =
plugins = pydantic.mypy
exclude = ^(downloads|dist|other|testing_support|imaginairy/vendored|imaginairy/modules/sgm)/.*
ignore_missing_imports = True
markers =
gputest: uses the gpu
Loading…
Cancel
Save