style: use latest ruff

pull/478/head
Bryce 2 months ago committed by Bryce Drennan
parent a8acb451c5
commit 9cdacd454f

@ -1,4 +1,5 @@
"""Pythonic AI generation of images and videos"""
import os
# tells pytorch to allow MPS usage (for Mac M1 compatibility)

@ -271,9 +271,9 @@ def generate_single_image(
fit_width=prompt.width,
fit_height=prompt.height,
)
result_images[
f"control-{control_input.mode}"
] = control_image_disp
result_images[f"control-{control_input.mode}"] = (
control_image_disp
)
controlnets.append((controlnet, control_image_t))
for controlnet, control_image_t in controlnets:

@ -5,6 +5,7 @@ If we don't do this then the scripts will be slow to start up because of
pkg_resources.require() which is called by setuptools to ensure the
"correct" version of the package is installed.
"""
import os

@ -16,6 +16,7 @@ Examples:
fruit{-0.1} OR bowl
"""
import operator
from abc import ABC
from typing import ClassVar

@ -54,9 +54,7 @@ def transfer_audio(sourceVideo, targetVideo):
f'ffmpeg -y -i "{sourceVideo}" -c:a aac -b:a 160k -vn {tempAudioFileName}'
)
os.system(
'ffmpeg -y -i "{}" -i {} -c copy "{}"'.format(
targetNoAudio, tempAudioFileName, targetVideo
)
f'ffmpeg -y -i "{targetNoAudio}" -i {tempAudioFileName} -c copy "{targetVideo}"'
)
if (
os.path.getsize(targetVideo) == 0
@ -159,9 +157,7 @@ def interpolate_video_file(
fourcc = cv2.VideoWriter_fourcc("m", "p", "4", "v") # type: ignore
video_path_wo_ext, ext = os.path.splitext(video_path)
print(
"{}.{}, {} frames in total, {}FPS to {}FPS".format(
video_path_wo_ext, output_extension, tot_frame, fps, target_fps
)
f"{video_path_wo_ext}.{output_extension}, {tot_frame} frames in total, {fps}FPS to {target_fps}FPS"
)
if png_out is False and fpsNotAssigned is True:
print("The audio will be merged after interpolation process")

@ -1,4 +1,5 @@
"""Functions to create hint images for controlnet."""
from typing import TYPE_CHECKING, Callable, Dict, Union
if TYPE_CHECKING:

@ -7,6 +7,7 @@ https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bb
https://github.com/CompVis/taming-transformers
-- merci.
"""
import itertools
import logging
from contextlib import contextmanager, nullcontext
@ -1602,9 +1603,9 @@ class LatentDiffusion(DDPM):
unconditional_conditioning=uc,
)
x_samples_cfg = self.decode_first_stage(samples_cfg)
log[
f"samples_cfg_scale_{unconditional_guidance_scale:.2f}"
] = x_samples_cfg
log[f"samples_cfg_scale_{unconditional_guidance_scale:.2f}"] = (
x_samples_cfg
)
if inpaint:
# make a simple center square
@ -1921,9 +1922,9 @@ class LatentFinetuneDiffusion(LatentDiffusion):
unconditional_conditioning=uc_full,
)
x_samples_cfg = self.decode_first_stage(samples_cfg)
log[
f"samples_cfg_scale_{unconditional_guidance_scale:.2f}"
] = x_samples_cfg
log[f"samples_cfg_scale_{unconditional_guidance_scale:.2f}"] = (
x_samples_cfg
)
return log

@ -9,7 +9,6 @@
#
# thanks!
import logging
import math

@ -271,9 +271,7 @@ def load_model(
if "openvino" not in model_type:
print(
"Model loaded, number of parameters = {:.0f}M".format(
sum(p.numel() for p in model.parameters()) / 1e6
)
f"Model loaded, number of parameters = {sum(p.numel() for p in model.parameters()) / 1e6:.0f}M"
)
else:
print("Model loaded, optimized with OpenVINO")

@ -145,15 +145,9 @@ class DPT(BaseModel):
class DPTDepthModel(DPT):
def __init__(self, path=None, non_negative=True, **kwargs):
features = kwargs["features"] if "features" in kwargs else 256
head_features_1 = (
kwargs["head_features_1"] if "head_features_1" in kwargs else features
)
head_features_2 = (
kwargs["head_features_2"] if "head_features_2" in kwargs else 32
)
kwargs.pop("head_features_1", None)
kwargs.pop("head_features_2", None)
features = kwargs.pop("features", 256)
head_features_1 = kwargs.pop("head_features_1", features)
head_features_2 = kwargs.pop("head_features_2", 32)
head = nn.Sequential(
nn.Conv2d(

@ -3,6 +3,7 @@ MidashNet: Network for monocular depth estimation trained by mixing several data
This file contains code that is adapted from
https://github.com/thomasjpfan/pytorch_refinenet/blob/master/pytorch_refinenet/refinenet/refinenet_4cascade.py.
"""
import torch
from torch import nn

@ -3,6 +3,7 @@ MidashNet: Network for monocular depth estimation trained by mixing several data
This file contains code that is adapted from
https://github.com/thomasjpfan/pytorch_refinenet/blob/master/pytorch_refinenet/refinenet/refinenet_4cascade.py.
"""
import torch
from torch import nn

@ -1,4 +1,5 @@
"""Utils for monoDepth."""
import re
import sys

@ -1,8 +1,7 @@
"""
Partially ported from https://github.com/crowsonkb/k-diffusion/blob/master/k_diffusion/sampling.py
Partially ported from https://github.com/crowsonkb/k-diffusion/blob/master/k_diffusion/sampling.py
"""
from typing import Dict, Union
import torch

@ -30,7 +30,7 @@ class OpenAIWrapper(IdentityWrapper):
return self.diffusion_model(
x,
timesteps=t,
context=c.get("crossattn", None),
y=c.get("vector", None),
context=c.get("crossattn"),
y=c.get("vector"),
**kwargs,
)

@ -3,6 +3,7 @@ Wrapper for instruct pix2pix model.
modified from https://github.com/timothybrooks/instruct-pix2pix/blob/main/edit_cli.py
"""
import torch
from einops import einops
from torch import nn

@ -1,4 +1,5 @@
"""Functions for creating animations from images."""
import logging
import os.path
from typing import TYPE_CHECKING, List, Sequence

@ -159,7 +159,7 @@ def add_tiles(tiles, base_img, tile_coords, tile_size, overlap):
t += 1
column += 1
row += 1
row += 1 # noqa
# if row >= 2:
# exit()
column = 0

@ -9,6 +9,7 @@ OpenCV B, G, R, A 0-255 np.ndarray
Torch (B), C, H, W R, G, B -1.0-1.0 torch.Tensor
"""
from typing import Sequence
import numpy as np

@ -1,4 +1,5 @@
"""Code for defining package root path"""
import os
PKG_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))

@ -118,9 +118,9 @@ class DoubleTextEncoderTranslator:
new_key = f"Parallel.CLIPTextEncoderL.{k}"
new_sd[new_key] = text_encoder_l_weights.pop(k)
new_sd[
"Parallel.TextEncoderWithPooling.Parallel.Chain.Linear.weight"
] = text_encoder_g_weights.pop("Linear.weight")
new_sd["Parallel.TextEncoderWithPooling.Parallel.Chain.Linear.weight"] = (
text_encoder_g_weights.pop("Linear.weight")
)
for k in list(text_encoder_g_weights.keys()):
if k.startswith("TransformerLayer_32"):
new_key = f"Parallel.TextEncoderWithPooling.Parallel.Chain.CLIPTextEncoderG.TransformerLayer{k[19:]}"

@ -5,6 +5,7 @@ mkdocs-click
mkdocstrings[python]
mypy
ruff
pip-tools
pytest
pytest-asyncio
pytest-randomly

@ -8,13 +8,15 @@ annotated-types==0.6.0
# via pydantic
antlr4-python3-runtime==4.9.3
# via omegaconf
anyio==4.2.0
anyio==4.3.0
# via
# httpx
# starlette
babel==2.14.0
# via mkdocs-material
certifi==2023.11.17
build==1.1.1
# via pip-tools
certifi==2024.2.2
# via
# httpcore
# httpx
@ -29,6 +31,7 @@ click==8.1.7
# mkdocs
# mkdocs-click
# mkdocstrings
# pip-tools
# uvicorn
click-help-colors==0.9.4
# via imaginAIry (setup.py)
@ -38,9 +41,9 @@ colorama==0.4.6
# via
# griffe
# mkdocs-material
coverage==7.4.0
coverage==7.4.4
# via -r requirements-dev.in
diffusers==0.25.0
diffusers==0.27.0
# via imaginAIry (setup.py)
einops==0.7.0
# via imaginAIry (setup.py)
@ -48,7 +51,7 @@ exceptiongroup==1.2.0
# via
# anyio
# pytest
fastapi==0.108.0
fastapi==0.110.0
# via imaginAIry (setup.py)
filelock==3.13.1
# via
@ -56,7 +59,7 @@ filelock==3.13.1
# huggingface-hub
# torch
# transformers
fsspec==2023.12.2
fsspec==2024.2.0
# via
# huggingface-hub
# torch
@ -66,17 +69,17 @@ ftfy==6.1.3
# open-clip-torch
ghp-import==2.1.0
# via mkdocs
griffe==0.38.1
griffe==0.42.0
# via mkdocstrings-python
h11==0.14.0
# via
# httpcore
# uvicorn
httpcore==1.0.2
httpcore==1.0.4
# via httpx
httpx==0.26.0
httpx==0.27.0
# via -r requirements-dev.in
huggingface-hub==0.20.2
huggingface-hub==0.21.4
# via
# diffusers
# open-clip-torch
@ -88,32 +91,36 @@ idna==3.6
# anyio
# httpx
# requests
importlib-metadata==7.0.1
importlib-metadata==7.0.2
# via diffusers
iniconfig==2.0.0
# via pytest
jaxtyping==0.2.25
jaxtyping==0.2.28
# via imaginAIry (setup.py)
jinja2==3.1.2
jinja2==3.1.3
# via
# mkdocs
# mkdocs-material
# mkdocstrings
# torch
kornia==0.7.1
kornia==0.7.2
# via imaginAIry (setup.py)
markdown==3.5.1
kornia-rs==0.1.1
# via kornia
markdown==3.5.2
# via
# mkdocs
# mkdocs-autorefs
# mkdocs-click
# mkdocs-material
# mkdocstrings
# mkdocstrings-python
# pymdown-extensions
markupsafe==2.1.3
markupsafe==2.1.5
# via
# jinja2
# mkdocs
# mkdocs-autorefs
# mkdocstrings
mergedeep==1.3.4
# via mkdocs
@ -122,23 +129,23 @@ mkdocs==1.5.3
# mkdocs-autorefs
# mkdocs-material
# mkdocstrings
mkdocs-autorefs==0.5.0
mkdocs-autorefs==1.0.1
# via mkdocstrings
mkdocs-click==0.8.1
# via -r requirements-dev.in
mkdocs-material==9.5.3
mkdocs-material==9.5.13
# via -r requirements-dev.in
mkdocs-material-extensions==1.3.1
# via mkdocs-material
mkdocstrings[python]==0.24.0
mkdocstrings[python]==0.24.1
# via
# -r requirements-dev.in
# mkdocstrings-python
mkdocstrings-python==1.7.5
mkdocstrings-python==1.9.0
# via mkdocstrings
mpmath==1.3.0
# via sympy
mypy==1.8.0
mypy==1.9.0
# via -r requirements-dev.in
mypy-extensions==1.0.0
# via mypy
@ -156,12 +163,13 @@ numpy==1.24.4
# transformers
omegaconf==2.3.0
# via imaginAIry (setup.py)
open-clip-torch==2.23.0
open-clip-torch==2.24.0
# via imaginAIry (setup.py)
opencv-python==4.9.0.80
# via imaginAIry (setup.py)
packaging==23.2
packaging==24.0
# via
# build
# huggingface-hub
# kornia
# mkdocs
@ -177,45 +185,51 @@ pillow==10.2.0
# diffusers
# imaginAIry (setup.py)
# torchvision
platformdirs==4.1.0
pip-tools==7.4.1
# via -r requirements-dev.in
platformdirs==4.2.0
# via
# mkdocs
# mkdocstrings
pluggy==1.3.0
pluggy==1.4.0
# via pytest
protobuf==4.25.1
protobuf==5.26.0
# via
# imaginAIry (setup.py)
# open-clip-torch
psutil==5.9.7
psutil==5.9.8
# via imaginAIry (setup.py)
pydantic==2.5.3
pydantic==2.6.4
# via
# fastapi
# imaginAIry (setup.py)
pydantic-core==2.14.6
pydantic-core==2.16.3
# via pydantic
pygments==2.17.2
# via mkdocs-material
pymdown-extensions==10.7
pymdown-extensions==10.7.1
# via
# mkdocs-material
# mkdocstrings
pyparsing==3.1.1
pyparsing==3.1.2
# via imaginAIry (setup.py)
pytest==7.4.4
pyproject-hooks==1.0.0
# via
# build
# pip-tools
pytest==8.1.1
# via
# -r requirements-dev.in
# pytest-asyncio
# pytest-randomly
# pytest-sugar
pytest-asyncio==0.23.3
pytest-asyncio==0.23.5.post1
# via -r requirements-dev.in
pytest-randomly==3.15.0
# via -r requirements-dev.in
pytest-sugar==0.9.7
pytest-sugar==1.0.0
# via -r requirements-dev.in
python-dateutil==2.8.2
python-dateutil==2.9.0.post0
# via ghp-import
pyyaml==6.0.1
# via
@ -242,31 +256,30 @@ requests==2.31.0
# imaginAIry (setup.py)
# mkdocs-material
# responses
# torchvision
# transformers
responses==0.24.1
responses==0.25.0
# via -r requirements-dev.in
ruff==0.1.11
ruff==0.3.3
# via -r requirements-dev.in
safetensors==0.4.1
safetensors==0.4.2
# via
# diffusers
# imaginAIry (setup.py)
# timm
# transformers
scipy==1.10.1
scipy==1.12.0
# via
# imaginAIry (setup.py)
# torchdiffeq
sentencepiece==0.1.99
sentencepiece==0.2.0
# via open-clip-torch
six==1.16.0
# via python-dateutil
sniffio==1.3.0
sniffio==1.3.1
# via
# anyio
# httpx
starlette==0.32.0.post1
starlette==0.36.3
# via fastapi
sympy==1.12
# via torch
@ -274,17 +287,20 @@ termcolor==2.4.0
# via
# imaginAIry (setup.py)
# pytest-sugar
timm==0.9.12
timm==0.9.16
# via
# imaginAIry (setup.py)
# open-clip-torch
tokenizers==0.15.0
tokenizers==0.15.2
# via transformers
tomli==2.0.1
# via
# build
# mypy
# pip-tools
# pyproject-hooks
# pytest
torch==2.1.2
torch==2.2.1
# via
# imaginAIry (setup.py)
# kornia
@ -294,52 +310,57 @@ torch==2.1.2
# torchvision
torchdiffeq==0.2.3
# via imaginAIry (setup.py)
torchvision==0.16.2
torchvision==0.17.1
# via
# imaginAIry (setup.py)
# open-clip-torch
# timm
tqdm==4.66.1
tqdm==4.66.2
# via
# huggingface-hub
# imaginAIry (setup.py)
# open-clip-torch
# transformers
transformers==4.36.2
transformers==4.38.2
# via imaginAIry (setup.py)
typeguard==2.13.3
# via jaxtyping
types-pillow==10.1.0.20240106
types-pillow==10.2.0.20240311
# via -r requirements-dev.in
types-psutil==5.9.5.20240106
types-psutil==5.9.5.20240311
# via -r requirements-dev.in
types-requests==2.31.0.20240106
types-requests==2.31.0.20240311
# via -r requirements-dev.in
types-tqdm==4.66.0.20240106
# via -r requirements-dev.in
typing-extensions==4.9.0
typing-extensions==4.10.0
# via
# anyio
# fastapi
# huggingface-hub
# jaxtyping
# mypy
# pydantic
# pydantic-core
# torch
# uvicorn
urllib3==2.1.0
urllib3==2.2.1
# via
# requests
# responses
# types-requests
uvicorn==0.25.0
uvicorn==0.28.0
# via imaginAIry (setup.py)
watchdog==3.0.0
watchdog==4.0.0
# via mkdocs
wcwidth==0.2.13
# via ftfy
wheel==0.42.0
# via -r requirements-dev.in
zipp==3.17.0
wheel==0.43.0
# via
# -r requirements-dev.in
# pip-tools
zipp==3.18.1
# via importlib-metadata
# The following packages are considered to be unsafe in a requirements file:
# pip
# setuptools

@ -1,14 +1,16 @@
extend-ignore = ["E501", "G004", "PT004", "PT005", "RET504", "SIM114", "TRY003", "TRY400", "TRY401", "RUF012", "RUF100", "ISC001"]
extend-exclude = ["imaginairy/vendored", "downloads", "other"]
[lint]
extend-ignore = ["E501", "G004", "PT004", "PT005", "RET504", "SIM114", "TRY003", "TRY400", "TRY401", "RUF012", "RUF100", "ISC001"]
extend-select = [
"I", "E", "W", "UP", "ASYNC", "BLE", "A001", "A002",
"C4", "DTZ", "T10", "EM", "ISC", "ICN", "G", "PIE", "PT",
"Q", "SIM", "TID", "TCH", "PLC", "PLE", "TRY", "RUF"
]
[isort]
[lint.isort]
combine-as-imports = true
[flake8-errmsg]
[lint.flake8-errmsg]
max-string-length = 50
Loading…
Cancel
Save