From 1644e8938552349e636cea2b9f3a42d6ce0ae3a6 Mon Sep 17 00:00:00 2001 From: mike dupont Date: Wed, 17 Apr 2024 12:56:48 -0400 Subject: [PATCH] reformat black --- src/petals/__init__.py | 2 +- src/petals/client/ptune.py | 2 +- src/petals/client/remote_forward_backward.py | 1 + src/petals/client/routing/spending_policy.py | 1 + src/petals/client/sequential_autograd.py | 1 + src/petals/models/bloom/block.py | 1 + src/petals/models/falcon/block.py | 1 + src/petals/models/llama/block.py | 1 + src/petals/server/block_functions.py | 1 + src/petals/server/from_pretrained.py | 1 + src/petals/server/memory_cache.py | 1 + src/petals/utils/auto_config.py | 2 +- src/petals/utils/convert_block.py | 1 + src/petals/utils/dht.py | 1 + 14 files changed, 14 insertions(+), 3 deletions(-) diff --git a/src/petals/__init__.py b/src/petals/__init__.py index 52e5af1..68120bf 100644 --- a/src/petals/__init__.py +++ b/src/petals/__init__.py @@ -20,7 +20,7 @@ from petals.utils.logging import initialize_logs as _initialize_logs __version__ = "2.3.0.dev2" -#if not os.getenv("PETALS_IGNORE_DEPENDENCY_VERSION"): +# if not os.getenv("PETALS_IGNORE_DEPENDENCY_VERSION"): # assert ( # version.parse("4.38.2") <= version.parse(transformers.__version__) < version.parse("4.39.0") # ), "Please install a proper transformers version: pip install transformers>=4.37.1,<4.39.0" diff --git a/src/petals/client/ptune.py b/src/petals/client/ptune.py index f3995d6..ecd3543 100644 --- a/src/petals/client/ptune.py +++ b/src/petals/client/ptune.py @@ -51,7 +51,7 @@ class PTuneMixin: batch_size, self.pre_seq_len, self.config.num_hidden_layers, - self.config.hidden_size + self.config.hidden_size, # TODO: should be num_hidden_layers - 1 ) intermediate_prompts = intermediate_prompts.permute([2, 0, 1, 3]) diff --git a/src/petals/client/remote_forward_backward.py b/src/petals/client/remote_forward_backward.py index 44abe26..d611067 100644 --- a/src/petals/client/remote_forward_backward.py +++ b/src/petals/client/remote_forward_backward.py @@ -1,6 +1,7 @@ """ Utility functions that call RPC forward or backward on a single remote server """ + import asyncio from typing import Iterable, List, Optional, Sequence, Tuple diff --git a/src/petals/client/routing/spending_policy.py b/src/petals/client/routing/spending_policy.py index 0af3db7..f4eddba 100644 --- a/src/petals/client/routing/spending_policy.py +++ b/src/petals/client/routing/spending_policy.py @@ -3,6 +3,7 @@ An interface for exchanging internal "BLOOM points" for higher priority compute The intent is to let Petals participants earn points by helping others while idle (e.g. at night), then use these points to run their own compute experiments faster. See Section 4 of https://arxiv.org/abs/2209.01188 for discussion. """ + from abc import ABC, abstractmethod diff --git a/src/petals/client/sequential_autograd.py b/src/petals/client/sequential_autograd.py index 9d965d2..c95f0a7 100644 --- a/src/petals/client/sequential_autograd.py +++ b/src/petals/client/sequential_autograd.py @@ -1,6 +1,7 @@ """ A PyTorch autograd function that runs forward/backward on a sequence of remote servers in a fault-tolerant manner """ + import asyncio import itertools from collections import deque diff --git a/src/petals/models/bloom/block.py b/src/petals/models/bloom/block.py index 439b9ca..d743186 100644 --- a/src/petals/models/bloom/block.py +++ b/src/petals/models/bloom/block.py @@ -3,6 +3,7 @@ Bloom intermediate layer Based on https://github.com/huggingface/transformers/commit/ca2a55e9dfb245527b5e1c954fec6ffbb7aef07b See commit history for authorship. """ + from typing import Optional, Tuple import torch diff --git a/src/petals/models/falcon/block.py b/src/petals/models/falcon/block.py index a510aba..761bd5d 100644 --- a/src/petals/models/falcon/block.py +++ b/src/petals/models/falcon/block.py @@ -3,6 +3,7 @@ Falcon intermediate layer Based on https://github.com/huggingface/transformers/blob/main/src/transformers/models/falcon/modeling_falcon.py See commit history for authorship. """ + import math from functools import partial from typing import Optional, Tuple diff --git a/src/petals/models/llama/block.py b/src/petals/models/llama/block.py index 2eb8f73..bd6c8c8 100644 --- a/src/petals/models/llama/block.py +++ b/src/petals/models/llama/block.py @@ -3,6 +3,7 @@ LLaMA intermediate layer Based on https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/modeling_llama.py See commit history for authorship. """ + import math from typing import Optional, Tuple diff --git a/src/petals/server/block_functions.py b/src/petals/server/block_functions.py index a79f05c..4c3cf9a 100644 --- a/src/petals/server/block_functions.py +++ b/src/petals/server/block_functions.py @@ -1,6 +1,7 @@ """ This module implements server-side computations on served blocks: forward, backward and inference; used by handler """ + from __future__ import annotations from typing import Any, AsyncIterator, Dict, Optional, Sequence, Tuple, Union diff --git a/src/petals/server/from_pretrained.py b/src/petals/server/from_pretrained.py index 4a3b150..bad0b2e 100644 --- a/src/petals/server/from_pretrained.py +++ b/src/petals/server/from_pretrained.py @@ -6,6 +6,7 @@ If necessary, one can rewrite this to implement a different behavior, such as: - fetch the weights over IPoAC, using a fleet of trained pigeons ( http://www.faqs.org/rfcs/rfc1149.html ) """ + import json import time from contextlib import suppress diff --git a/src/petals/server/memory_cache.py b/src/petals/server/memory_cache.py index fa4db21..9e7abf7 100644 --- a/src/petals/server/memory_cache.py +++ b/src/petals/server/memory_cache.py @@ -4,6 +4,7 @@ A pytorch memory cache that can be allocated by ConnectionHandler (on cpu) and u For now, the only purpose of this code is to ensure that allocated memory will be deleted properly. """ + import asyncio import contextlib import ctypes diff --git a/src/petals/utils/auto_config.py b/src/petals/utils/auto_config.py index 9b09553..6043c7b 100644 --- a/src/petals/utils/auto_config.py +++ b/src/petals/utils/auto_config.py @@ -41,7 +41,7 @@ class _AutoDistributedBase: kwargs["use_auth_token"] = True kwargs["trust_remote_code"] = True -#trust_remote_code=True + # trust_remote_code=True config = AutoConfig.from_pretrained(model_name_or_path, *args, **kwargs) if config.model_type not in _CLASS_MAPPING: diff --git a/src/petals/utils/convert_block.py b/src/petals/utils/convert_block.py index 94d3e29..26d6b7d 100644 --- a/src/petals/utils/convert_block.py +++ b/src/petals/utils/convert_block.py @@ -1,6 +1,7 @@ """ Tools for converting transformer blocks, applying quantization and/or tensor parallelism """ + import re from enum import Enum from typing import Optional, Sequence diff --git a/src/petals/utils/dht.py b/src/petals/utils/dht.py index 4faf74a..357cd98 100644 --- a/src/petals/utils/dht.py +++ b/src/petals/utils/dht.py @@ -1,6 +1,7 @@ """ Utilities for declaring and retrieving active model layers using a shared DHT. """ + from __future__ import annotations import math