refactor: run import sorter

This commit is contained in:
Bryce 2022-09-11 13:58:14 -07:00
parent 043373ef17
commit 438c2868ad
15 changed files with 25 additions and 28 deletions

View File

@ -2,5 +2,5 @@ import os
os.putenv("PYTORCH_ENABLE_MPS_FALLBACK", "1")
from .api import imagine_images, imagine_image_files # noqa
from .api import imagine_image_files, imagine_images # noqa
from .schema import ImaginePrompt, ImagineResult, WeightedPrompt # noqa

View File

@ -5,13 +5,13 @@ import subprocess
from contextlib import nullcontext
from functools import lru_cache
import PIL
import numpy as np
import PIL
import torch
import torch.nn
from PIL import Image
from einops import rearrange
from omegaconf import OmegaConf
from PIL import Image
from pytorch_lightning import seed_everything
from torch import autocast
from transformers import cached_path
@ -21,9 +21,9 @@ from imaginairy.modules.diffusion.plms import PLMSSampler
from imaginairy.safety import is_nsfw
from imaginairy.schema import ImaginePrompt, ImagineResult
from imaginairy.utils import (
fix_torch_nn_layer_norm,
get_device,
instantiate_from_config,
fix_torch_nn_layer_norm,
)
LIB_PATH = os.path.dirname(__file__)

View File

@ -59,6 +59,5 @@ setup_env()
from imaginairy.cmds import imagine_cmd # noqa
if __name__ == "__main__":
imagine_cmd() # noqa

View File

@ -1,6 +1,7 @@
import logging.config
import click
from imaginairy.api import load_model
logger = logging.getLogger(__name__)

View File

@ -1,12 +1,13 @@
from inspect import isfunction
import math
from inspect import isfunction
import torch
import torch.nn.functional as F
from torch import nn, einsum
from einops import rearrange, repeat
from torch import einsum, nn
from imaginairy.modules.diffusion.util import checkpoint
from imaginairy.utils import get_device_name, get_device
from imaginairy.utils import get_device, get_device_name
def exists(val):

View File

@ -6,7 +6,7 @@ import torch
import torch.nn as nn
from einops import rearrange
from imaginairy.modules.diffusion.model import Encoder, Decoder
from imaginairy.modules.diffusion.model import Decoder, Encoder
from imaginairy.modules.distributions import DiagonalGaussianDistribution
from imaginairy.utils import instantiate_from_config

View File

@ -3,7 +3,7 @@ import kornia
import torch
import torch.nn as nn
from einops import repeat
from transformers import CLIPTokenizer, CLIPTextModel
from transformers import CLIPTextModel, CLIPTokenizer
from imaginairy.utils import get_device

View File

@ -6,10 +6,10 @@ import torch
from tqdm import tqdm
from imaginairy.modules.diffusion.util import (
extract_into_tensor,
make_ddim_sampling_parameters,
make_ddim_timesteps,
noise_like,
extract_into_tensor,
)
from imaginairy.utils import get_device

View File

@ -16,15 +16,10 @@ from einops import rearrange
from torchvision.utils import make_grid
from tqdm import tqdm
from imaginairy.modules.autoencoder import (
VQModelInterface,
)
from imaginairy.modules.diffusion.util import (
make_beta_schedule,
noise_like,
)
from imaginairy.modules.autoencoder import VQModelInterface
from imaginairy.modules.diffusion.util import make_beta_schedule, noise_like
from imaginairy.modules.distributions import DiagonalGaussianDistribution
from imaginairy.utils import log_params, instantiate_from_config
from imaginairy.utils import instantiate_from_config, log_params
logger = logging.getLogger(__name__)
__conditioning_keys__ = {"concat": "c_concat", "crossattn": "c_crossattn", "adm": "y"}

View File

@ -10,7 +10,7 @@ from einops import rearrange
from imaginairy.modules.attention import LinearAttention
from imaginairy.modules.distributions import DiagonalGaussianDistribution
from imaginairy.utils import instantiate_from_config, get_device
from imaginairy.utils import get_device, instantiate_from_config
logger = logging.getLogger(__name__)

View File

@ -1,21 +1,21 @@
from abc import abstractmethod
import math
from abc import abstractmethod
import numpy as np
import torch as th
import torch.nn as nn
import torch.nn.functional as F
from imaginairy.modules.attention import SpatialTransformer
from imaginairy.modules.diffusion.util import (
avg_pool_nd,
checkpoint,
conv_nd,
linear,
avg_pool_nd,
zero_module,
normalization,
timestep_embedding,
zero_module,
)
from imaginairy.modules.attention import SpatialTransformer
# dummy replace

View File

@ -1,5 +1,5 @@
import torch
import numpy as np
import torch
class AbstractDistribution:

View File

@ -51,7 +51,7 @@ def get_obj_from_str(string, reload=False):
return getattr(importlib.import_module(module, package=None), cls)
from torch.overrides import has_torch_function_variadic, handle_torch_function
from torch.overrides import handle_torch_function, has_torch_function_variadic
def _fixed_layer_norm(

View File

@ -1,4 +1,4 @@
from setuptools import setup, find_packages
from setuptools import find_packages, setup
setup(
name="imaginairy",

View File

@ -1,5 +1,6 @@
from imaginairy.api import imagine_images, imagine_image_files
from imaginairy.api import imagine_image_files, imagine_images
from imaginairy.schema import ImaginePrompt
from . import TESTS_FOLDER