build: unpin dependencies

This commit is contained in:
Bryce 2023-01-08 20:45:58 -08:00 committed by Bryce Drennan
parent 9e0a9e2c29
commit e9a3e1a99b
11 changed files with 102 additions and 115 deletions

View File

@ -59,7 +59,7 @@ run-dev: build-dev-image
docker run -it -v $$HOME/.cache/huggingface:/root/.cache/huggingface -v $$HOME/.cache/torch:/root/.cache/torch -v `pwd`/outputs:/outputs imaginairy-dev /bin/bash
requirements: ## Freeze the requirements.txt file
pip-compile setup.py requirements-dev.in --output-file=requirements-dev.txt --upgrade
pip-compile setup.py requirements-dev.in --output-file=requirements-dev.txt --upgrade --resolver=backtracking
require_pyenv:
@if ! [ -x "$$(command -v pyenv)" ]; then\

View File

@ -1,15 +1,14 @@
import gc
import glob
import logging
import os
import sys
import urllib.parse
import requests
import torch
from huggingface_hub import hf_hub_download, try_to_load_from_cache
from omegaconf import OmegaConf
from transformers import cached_path
from transformers.utils.hub import TRANSFORMERS_CACHE, HfFolder
from transformers.utils.hub import url_to_filename as tf_url_to_filename
from transformers.utils.hub import HfFolder
from imaginairy import config as iconfig
from imaginairy.config import MODEL_SHORT_NAMES
@ -226,7 +225,8 @@ def get_cached_url_path(url):
"""
try:
return huggingface_cached_path(url)
if url.startswith("https://huggingface.co"):
return huggingface_cached_path(url)
except (OSError, ValueError):
pass
filename = url.split("/")[-1]
@ -242,16 +242,6 @@ def get_cached_url_path(url):
return dest_path
def find_url_in_huggingface_cache(url):
huggingface_filename = os.path.join(TRANSFORMERS_CACHE, tf_url_to_filename(url))
for name in glob.glob(huggingface_filename + "*"):
if name.endswith((".json", ".lock")):
continue
return name
return None
def check_huggingface_url_authorized(url):
if not url.startswith("https://huggingface.co/"):
return None
@ -272,9 +262,27 @@ def check_huggingface_url_authorized(url):
def huggingface_cached_path(url):
# bypass all the HEAD calls done by the default `cached_path`
dest_path = find_url_in_huggingface_cache(url)
repo, commit_hash, filepath = extract_huggingface_repo_commit_file_from_url(url)
dest_path = try_to_load_from_cache(
repo_id=repo, revision=commit_hash, filename=filepath
)
if not dest_path:
check_huggingface_url_authorized(url)
token = HfFolder.get_token()
dest_path = cached_path(url, use_auth_token=token)
logger.info(f"Downloading {url} from huggingface")
dest_path = hf_hub_download(
repo_id=repo, revision=commit_hash, filename=filepath, token=token
)
return dest_path
def extract_huggingface_repo_commit_file_from_url(url):
parsed_url = urllib.parse.urlparse(url)
path_components = parsed_url.path.strip("/").split("/")
repo = "/".join(path_components[0:2])
assert path_components[2] == "resolve"
commit_hash = path_components[3]
filepath = "/".join(path_components[4:])
return repo, commit_hash, filepath

View File

@ -49,7 +49,7 @@ class BLIP_Pretrain(nn.Module):
check_hash=True,
)
state_dict = checkpoint["model"]
msg = self.visual_encoder.load_state_dict(state_dict, strict=False)
self.visual_encoder.load_state_dict(state_dict, strict=False)
elif vit == "large":
from timm.models.helpers import load_custom_pretrained
from timm.models.vision_transformer import default_cfgs

View File

@ -199,7 +199,7 @@ class CLIPDenseBase(nn.Module):
with torch.no_grad():
inp_size = x_inp.shape[2:]
x_inp.shape[2:]
if self.n_tokens is not None:
stride2 = x_inp.shape[2] // self.n_tokens
@ -382,7 +382,7 @@ def clip_load_untrained(version):
transformer_width = state_dict["ln_final.weight"].shape[0]
transformer_heads = transformer_width // 64
transformer_layers = len(
{k.split(".")[2] for k in state_dict if k.startswith(f"transformer.resblocks")}
{k.split(".")[2] for k in state_dict if k.startswith("transformer.resblocks")}
)
return CLIP(
@ -649,7 +649,6 @@ class CLIPDenseBaseline(CLIPDenseBase):
):
super().__init__(version, reduce_cond, reduce_dim, prompt, n_tokens)
device = "cpu"
# self.cond_layer = cond_layer
self.extract_layer = extract_layer

View File

@ -437,7 +437,7 @@ class VQAutoEncoder(nn.Module):
)
logger.info(f"vqgan is loaded from: {model_path} [params]")
else:
raise ValueError(f"Wrong params!")
raise ValueError("Wrong params!")
def forward(self, x):
x = self.encoder(x)
@ -506,7 +506,7 @@ class VQGANDiscriminator(nn.Module):
torch.load(model_path, map_location="cpu")["params"]
)
else:
raise ValueError(f"Wrong params!")
raise ValueError("Wrong params!")
def forward(self, x):
return self.main(x)

View File

@ -27,9 +27,9 @@ class VDenoiser(nn.Module):
return (t * math.pi / 2).tan()
def loss(self, input, noise, sigma, **kwargs):
c_skip, c_out, c_in = [
c_skip, c_out, c_in = (
utils.append_dims(x, input.ndim) for x in self.get_scalings(sigma)
]
)
noised_input = input + noise * utils.append_dims(sigma, input.ndim)
model_output = self.inner_model(
noised_input * c_in, self.sigma_to_t(sigma), **kwargs
@ -38,9 +38,9 @@ class VDenoiser(nn.Module):
return (model_output - target).pow(2).flatten(1).mean(1)
def forward(self, input, sigma, **kwargs):
c_skip, c_out, c_in = [
c_skip, c_out, c_in = (
utils.append_dims(x, input.ndim) for x in self.get_scalings(sigma)
]
)
return (
self.inner_model(input * c_in, self.sigma_to_t(sigma), **kwargs) * c_out
+ input * c_skip
@ -116,17 +116,17 @@ class DiscreteEpsDDPMDenoiser(DiscreteSchedule):
return self.inner_model(*args, **kwargs)
def loss(self, input, noise, sigma, **kwargs):
c_out, c_in = [
c_out, c_in = (
utils.append_dims(x, input.ndim) for x in self.get_scalings(sigma)
]
)
noised_input = input + noise * utils.append_dims(sigma, input.ndim)
eps = self.get_eps(noised_input * c_in, self.sigma_to_t(sigma), **kwargs)
return (eps - noise).pow(2).flatten(1).mean(1)
def forward(self, input, sigma, **kwargs):
c_out, c_in = [
c_out, c_in = (
utils.append_dims(x, input.ndim) for x in self.get_scalings(sigma)
]
)
eps = self.get_eps(input * c_in, self.sigma_to_t(sigma), **kwargs)
return input + eps * c_out
@ -178,18 +178,18 @@ class DiscreteVDDPMDenoiser(DiscreteSchedule):
return self.inner_model(*args, **kwargs)
def loss(self, input, noise, sigma, **kwargs):
c_skip, c_out, c_in = [
c_skip, c_out, c_in = (
utils.append_dims(x, input.ndim) for x in self.get_scalings(sigma)
]
)
noised_input = input + noise * utils.append_dims(sigma, input.ndim)
model_output = self.get_v(noised_input * c_in, self.sigma_to_t(sigma), **kwargs)
target = (input - c_skip * noised_input) / c_out
return (model_output - target).pow(2).flatten(1).mean(1)
def forward(self, input, sigma, **kwargs):
c_skip, c_out, c_in = [
c_skip, c_out, c_in = (
utils.append_dims(x, input.ndim) for x in self.get_scalings(sigma)
]
)
return (
self.get_v(input * c_in, self.sigma_to_t(sigma), **kwargs) * c_out
+ input * c_skip

View File

@ -24,26 +24,26 @@ class Denoiser(nn.Module):
return c_skip, c_out, c_in
def loss(self, input, noise, sigma, **kwargs):
c_skip, c_out, c_in = [
c_skip, c_out, c_in = (
utils.append_dims(x, input.ndim) for x in self.get_scalings(sigma)
]
)
noised_input = input + noise * utils.append_dims(sigma, input.ndim)
model_output = self.inner_model(noised_input * c_in, sigma, **kwargs)
target = (input - c_skip * noised_input) / c_out
return (model_output - target).pow(2).flatten(1).mean(1)
def forward(self, input, sigma, **kwargs):
c_skip, c_out, c_in = [
c_skip, c_out, c_in = (
utils.append_dims(x, input.ndim) for x in self.get_scalings(sigma)
]
)
return self.inner_model(input * c_in, sigma, **kwargs) * c_out + input * c_skip
class DenoiserWithVariance(Denoiser):
def loss(self, input, noise, sigma, **kwargs):
c_skip, c_out, c_in = [
c_skip, c_out, c_in = (
utils.append_dims(x, input.ndim) for x in self.get_scalings(sigma)
]
)
noised_input = input + noise * utils.append_dims(sigma, input.ndim)
model_output, logvar = self.inner_model(
noised_input * c_in, sigma, return_variance=True, **kwargs

View File

@ -2,7 +2,6 @@ black
coverage
isort
ruff
pydocstyle
pylama
pylint
pytest

View File

@ -1,22 +1,20 @@
#
# This file is autogenerated by pip-compile with python 3.10
# To update, run:
# This file is autogenerated by pip-compile with Python 3.10
# by the following command:
#
# pip-compile --output-file=requirements-dev.txt requirements-dev.in setup.py
#
absl-py==1.3.0
# via
# tb-nightly
# tensorboard
# via tb-nightly
addict==2.4.0
# via basicsr
aiohttp==3.8.3
# via fsspec
aiosignal==1.3.1
# via aiohttp
antlr4-python3-runtime==4.8
antlr4-python3-runtime==4.9.3
# via omegaconf
astroid==2.12.13
astroid==2.13.2
# via pylint
async-timeout==4.0.2
# via aiohttp
@ -30,7 +28,7 @@ basicsr==1.4.2
# realesrgan
black==22.12.0
# via -r requirements-dev.in
cachetools==5.2.0
cachetools==5.2.1
# via google-auth
certifi==2022.12.7
# via requests
@ -47,7 +45,7 @@ click-shell==2.1
# via imaginAIry (setup.py)
contourpy==1.0.6
# via matplotlib
coverage==7.0.2
coverage==7.0.4
# via -r requirements-dev.in
cycler==0.11.0
# via matplotlib
@ -55,7 +53,7 @@ diffusers==0.11.1
# via imaginAIry (setup.py)
dill==0.3.6
# via pylint
einops==0.3.0
einops==0.6.0
# via imaginAIry (setup.py)
exceptiongroup==1.1.0
# via pytest
@ -85,9 +83,7 @@ ftfy==6.1.1
# imaginAIry (setup.py)
# open-clip-torch
future==0.18.2
# via
# basicsr
# pytorch-lightning
# via basicsr
gfpgan==1.3.8
# via
# imaginAIry (setup.py)
@ -96,15 +92,10 @@ google-auth==2.15.0
# via
# google-auth-oauthlib
# tb-nightly
# tensorboard
google-auth-oauthlib==0.4.6
# via
# tb-nightly
# tensorboard
# via tb-nightly
grpcio==1.51.1
# via
# tb-nightly
# tensorboard
# via tb-nightly
huggingface-hub==0.11.1
# via
# diffusers
@ -115,13 +106,13 @@ idna==3.4
# via
# requests
# yarl
imageio==2.9.0
imageio==2.24.0
# via
# imaginAIry (setup.py)
# scikit-image
importlib-metadata==6.0.0
# via diffusers
iniconfig==1.1.1
iniconfig==2.0.0
# via pytest
isort==5.11.4
# via
@ -129,10 +120,12 @@ isort==5.11.4
# pylint
kiwisolver==1.4.4
# via matplotlib
kornia==0.6
kornia==0.6.9
# via imaginAIry (setup.py)
lazy-object-proxy==1.8.0
lazy-object-proxy==1.9.0
# via astroid
lightning-utilities==0.5.0
# via pytorch-lightning
llvmlite==0.39.1
# via numba
lmdb==1.4.0
@ -140,9 +133,7 @@ lmdb==1.4.0
# basicsr
# gfpgan
markdown==3.4.1
# via
# tb-nightly
# tensorboard
# via tb-nightly
markupsafe==2.1.1
# via werkzeug
matplotlib==3.6.2
@ -157,7 +148,7 @@ multidict==6.0.4
# yarl
mypy-extensions==0.4.3
# via black
networkx==2.8.8
networkx==3.0
# via scikit-image
numba==0.56.4
# via facexlib
@ -181,16 +172,16 @@ numpy==1.23.5
# scikit-image
# scipy
# tb-nightly
# tensorboard
# tensorboardx
# tifffile
# torchmetrics
# torchvision
# transformers
oauthlib==3.2.2
# via requests-oauthlib
omegaconf==2.1.1
omegaconf==2.3.0
# via imaginAIry (setup.py)
open-clip-torch==2.9.1
open-clip-torch==2.9.2
# via imaginAIry (setup.py)
opencv-python==4.7.0.68
# via
@ -198,10 +189,11 @@ opencv-python==4.7.0.68
# facexlib
# gfpgan
# realesrgan
packaging==22.0
packaging==23.0
# via
# huggingface-hub
# kornia
# lightning-utilities
# matplotlib
# pytest
# pytest-sugar
@ -228,12 +220,12 @@ platformdirs==2.6.2
# pylint
pluggy==1.0.0
# via pytest
protobuf==3.20.3
protobuf==3.20.1
# via
# imaginAIry (setup.py)
# open-clip-torch
# tb-nightly
# tensorboard
# tensorboardx
psutil==5.9.4
# via imaginAIry (setup.py)
pyasn1==0.4.8
@ -244,12 +236,8 @@ pyasn1-modules==0.2.8
# via google-auth
pycodestyle==2.10.0
# via pylama
pydeprecate==0.3.1
# via pytorch-lightning
pydocstyle==6.1.1
# via
# -r requirements-dev.in
# pylama
pydocstyle==6.2.3
# via pylama
pyflakes==3.0.1
# via pylama
pylama==8.4.1
@ -269,7 +257,7 @@ pytest-sugar==0.9.6
# via -r requirements-dev.in
python-dateutil==2.8.2
# via matplotlib
pytorch-lightning==1.4.2
pytorch-lightning==1.8.6
# via imaginAIry (setup.py)
pywavelets==1.4.1
# via scikit-image
@ -299,7 +287,6 @@ requests==2.28.1
# requests-oauthlib
# responses
# tb-nightly
# tensorboard
# torchvision
# transformers
requests-oauthlib==1.3.1
@ -308,13 +295,13 @@ responses==0.22.0
# via -r requirements-dev.in
rsa==4.9
# via google-auth
ruff==0.0.206
ruff==0.0.215
# via -r requirements-dev.in
safetensors==0.2.7
# via imaginAIry (setup.py)
scikit-image==0.19.3
# via basicsr
scipy==1.9.3
scipy==1.10.0
# via
# basicsr
# facexlib
@ -330,27 +317,23 @@ six==1.16.0
# python-dateutil
snowballstemmer==2.2.0
# via pydocstyle
tb-nightly==2.12.0a20230102
tb-nightly==2.12.0a20230107
# via
# basicsr
# gfpgan
tensorboard==2.11.0
# via pytorch-lightning
tensorboard-data-server==0.6.1
# via
# tb-nightly
# tensorboard
# via tb-nightly
tensorboard-plugin-wit==1.8.1
# via
# tb-nightly
# tensorboard
termcolor==2.1.1
# via tb-nightly
tensorboardx==2.5.1
# via pytorch-lightning
termcolor==2.2.0
# via pytest-sugar
tifffile==2022.10.10
# via scikit-image
timm==0.6.12
# via imaginAIry (setup.py)
tokenizers==0.12.1
tokenizers==0.13.2
# via transformers
toml==0.10.2
# via responses
@ -378,7 +361,7 @@ torch==1.13.1
# torchvision
torchdiffeq==0.2.3
# via imaginAIry (setup.py)
torchmetrics==0.6.0
torchmetrics==0.11.0
# via
# imaginAIry (setup.py)
# pytorch-lightning
@ -402,13 +385,15 @@ tqdm==4.64.1
# pytorch-lightning
# realesrgan
# transformers
transformers==4.19.2
transformers==4.25.1
# via imaginAIry (setup.py)
types-toml==0.10.8.1
# via responses
typing-extensions==4.4.0
# via
# astroid
# huggingface-hub
# lightning-utilities
# pytorch-lightning
# torch
# torchvision
@ -419,13 +404,9 @@ urllib3==1.26.13
wcwidth==0.2.5
# via ftfy
werkzeug==2.2.2
# via
# tb-nightly
# tensorboard
# via tb-nightly
wheel==0.38.4
# via
# tb-nightly
# tensorboard
# via tb-nightly
wrapt==1.14.1
# via astroid
yapf==0.32.0

View File

@ -43,21 +43,21 @@ setup(
"numpy",
"tqdm",
"diffusers",
"imageio==2.9.0",
"imageio>=2.9.0",
"Pillow>=8.0.0",
"psutil",
"pytorch-lightning==1.4.2",
"omegaconf==2.1.1",
"pytorch-lightning>=1.4.2",
"omegaconf>=2.1.1",
"open-clip-torch",
"requests",
"einops==0.3.0",
"einops>=0.3.0",
"safetensors",
"timm>=0.4.12", # for vendored blip
"torchdiffeq",
"transformers==4.19.2",
"torchmetrics==0.6.0",
"transformers>=4.19.2",
"torchmetrics>=0.6.0",
"torchvision>=0.13.1",
"kornia==0.6",
"kornia>=0.6",
"realesrgan",
"gfpgan>=1.3.7",
],

View File

@ -9,13 +9,13 @@ filterwarnings =
[pylama]
format = pylint
skip = */.tox/*,*/.env/*,build/*,*/downloads/*,other/*,prolly_delete/*,downloads/*,imaginairy/vendored/*,testing_support/vastai_cli_official.py
linters = pylint,pycodestyle,pydocstyle,pyflakes,mypy
linters = pylint,pycodestyle,pyflakes,mypy
ignore =
Z999,C0103,C0301,C0302,C0114,C0115,C0116,
Z999,D100,D101,D102,D103,D105,D106,D107,D200,D202,D203,D205,D212,D400,D401,D406,D407,D413,D415,D417,
Z999,E203,E501,E1101,E1131,
Z999,R0901,R0902,R0903,R0904,R0193,R0912,R0913,R0914,R0915,R1702,
Z999,W0221,W0511,W0612,W0613,W1203
Z999,W0221,W0511,W0612,W0613,W0632,W1203
[pylama:tests/*]
ignore = C0104,C0114,C0116,D103,W0143,W0613