Implement RemoteSequential slicing and extra repr, add tests (#30)
- finish renaming RemoteSequenceInfo -> RemoteSequenceManager (why: if it was an *Info, user would expect it to be similar - to a dataclass; whereas in actuality, the class is doing heavy network interactions on its own) - implement RemoteSequenceManager.make_sequence (from https://pastebin.com/uXgy2U8B ) - make RemoteSequentialInferenceSession use RemoteSequenceManager.make_sequence - make tests pass again - make it possible to create inference session without RemoteTransformerBlock - make a standalone test for RemoteSequential - rollback convert-model Co-authored-by: Tim Dettmers <tim.dettmers@gmail.com>pull/32/head
parent
6ee942e915
commit
f0c7383181
@ -1,4 +1,5 @@
|
||||
from src.client.remote_block import RemoteTransformerBlock, RemoteTransformerBlockInferenceSession
|
||||
from src.client.inference_session import RemoteSequentialInferenceSession, RemoteTransformerBlockInferenceSession
|
||||
from src.client.remote_block import RemoteTransformerBlock
|
||||
from src.client.remote_model import DistributedBloomConfig, DistributedBloomForCausalLM, DistributedBloomModel
|
||||
from src.client.remote_sequential import RemoteSequential
|
||||
from src.client.sequence_manager import RemoteSequenceManager
|
||||
|
@ -0,0 +1,173 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import contextlib
|
||||
from typing import AsyncIterator, List, Optional
|
||||
|
||||
import torch
|
||||
from hivemind import (
|
||||
P2P,
|
||||
anext,
|
||||
deserialize_torch_tensor,
|
||||
get_logger,
|
||||
nested_flatten,
|
||||
serialize_torch_tensor,
|
||||
use_hivemind_log_handler,
|
||||
)
|
||||
from hivemind.moe.client.remote_expert_worker import RemoteExpertWorker
|
||||
from hivemind.p2p import StubBase
|
||||
from hivemind.proto import runtime_pb2
|
||||
|
||||
from src.client.sequence_manager import RemoteSequenceManager
|
||||
from src.data_structures import CHAIN_DELIMITER, ModuleUID, RemoteSpanInfo, RPCInfo
|
||||
from src.server.handler import TransformerConnectionHandler
|
||||
|
||||
use_hivemind_log_handler("in_root_logger")
|
||||
logger = get_logger(__file__)
|
||||
|
||||
|
||||
class RemoteTransformerBlockInferenceSession:
|
||||
"""
|
||||
An interface to a single multi-step *inference* session for a specific remote module on a specific server
|
||||
|
||||
:note: this inference session is *not* fault-tolerant out of the box
|
||||
"""
|
||||
|
||||
def __init__(self, uid: ModuleUID, rpc_info: RPCInfo, inputs_queue: asyncio.Queue, outputs_aiter: AsyncIterator):
|
||||
self.uid, self.rpc_info = uid, rpc_info
|
||||
# warning: this code manages async objects that are only usable inside RemoteExpertWorker's background thread;
|
||||
# using them in any other EventLoop may cause side-effects including, headaches, diarrhea, and loss of sleep
|
||||
self._inputs_queue: asyncio.Queue[runtime_pb2.ExpertRequest] = inputs_queue
|
||||
self._outputs_stream: AsyncIterator[runtime_pb2.ExpertResponse] = outputs_aiter
|
||||
self.stepped = False
|
||||
self.closed = False
|
||||
|
||||
@classmethod
|
||||
async def _create(
|
||||
cls, stub: StubBase, uid: ModuleUID, rpc_info: RPCInfo, timeout: Optional[float] = None
|
||||
) -> RemoteTransformerBlockInferenceSession:
|
||||
"""Create a new session for a given remote module. This code is meant to be run inside RemoteExpertWorker"""
|
||||
inputs_queue = asyncio.Queue()
|
||||
outputs_stream = await stub.rpc_inference(cls._read_inputs_from_queue(inputs_queue, timeout), timeout=timeout)
|
||||
return cls(uid, rpc_info, inputs_queue, outputs_stream)
|
||||
|
||||
@staticmethod
|
||||
async def _read_inputs_from_queue(queue: asyncio.Queue, timeout: Optional[float]) -> AsyncIterator:
|
||||
while True:
|
||||
next_input_message = await asyncio.wait_for(queue.get(), timeout)
|
||||
yield next_input_message
|
||||
if not next_input_message.uid and not next_input_message.tensors:
|
||||
break # this message means "done sending"
|
||||
|
||||
def step(self, new_hidden_states: torch.Tensor):
|
||||
"""Inference step: send a chunk of input tensors and receive a chunk of outputs"""
|
||||
if self.closed:
|
||||
raise Exception("Session is closed, cannot perform step")
|
||||
# serialize inputs and put them into the queue
|
||||
inputs = (new_hidden_states,)
|
||||
outputs_serialized = RemoteExpertWorker.run_coroutine(
|
||||
self._step(
|
||||
runtime_pb2.ExpertRequest(
|
||||
uid=self.uid,
|
||||
tensors=[
|
||||
serialize_torch_tensor(tensor, proto.compression)
|
||||
for tensor, proto in zip(inputs, nested_flatten(self.rpc_info["forward_schema"]))
|
||||
],
|
||||
)
|
||||
)
|
||||
)
|
||||
outputs = list(map(deserialize_torch_tensor, outputs_serialized.tensors))
|
||||
assert outputs[0].shape == inputs[0].shape, f"expected outputs[0] to be hidden states but got {outputs[0]}"
|
||||
return outputs[0]
|
||||
|
||||
async def _step(self, inputs_serialized: runtime_pb2.ExpertRequest) -> runtime_pb2.ExpertResponse:
|
||||
"""Inference step on serialized data. This code is meant to be run inside RemoteExpertWorker"""
|
||||
await self._inputs_queue.put(inputs_serialized)
|
||||
self.stepped = True
|
||||
return await anext(self._outputs_stream)
|
||||
|
||||
def close(self):
|
||||
"""Finish a given inference session, close the underlying connection"""
|
||||
if self._outputs_stream is None:
|
||||
return # already closed
|
||||
RemoteExpertWorker.run_coroutine(self._aclose_stream())
|
||||
self._outputs_stream = self._inputs_queue = None
|
||||
self.closed = True
|
||||
|
||||
async def _aclose_stream(self):
|
||||
"""Close the inference session. This code is meant to be run inside RemoteExpertWorker"""
|
||||
if self._outputs_stream is None:
|
||||
return # already closed
|
||||
if self.stepped:
|
||||
await self._inputs_queue.put(runtime_pb2.ExpertRequest()) # empty request will trigger end of session
|
||||
try:
|
||||
await anext(self._outputs_stream)
|
||||
except StopAsyncIteration:
|
||||
pass
|
||||
|
||||
def __del__(self):
|
||||
self.close()
|
||||
|
||||
def __enter__(self):
|
||||
assert not self.closed
|
||||
return self
|
||||
|
||||
def __exit__(self, *exc_details):
|
||||
self.close()
|
||||
|
||||
|
||||
class RemoteSequentialInferenceSession:
|
||||
"""
|
||||
An interface to a multi-step *inference* session for a sequence of remote transformer blocks
|
||||
"""
|
||||
|
||||
def __init__(self, sequence_manager: RemoteSequenceManager, p2p: P2P, timeout: Optional[float] = None):
|
||||
self.sequence_manager = sequence_manager
|
||||
self.p2p = p2p
|
||||
self.closed = False
|
||||
self.chosen_spans: List[RemoteSpanInfo] = []
|
||||
self.stack = contextlib.ExitStack()
|
||||
self.inference_sessions: List[RemoteTransformerBlockInferenceSession] = []
|
||||
self.timeout = timeout
|
||||
|
||||
def __enter__(self):
|
||||
assert not self.closed and not self.chosen_spans
|
||||
self.stack.__enter__()
|
||||
# TODO(yozh) replace this code with a fault-tolerant chain that can be reconstructed if some peers fail
|
||||
self.chosen_spans.extend(self.sequence_manager.make_sequence())
|
||||
|
||||
for chosen_span in self.chosen_spans:
|
||||
stub = TransformerConnectionHandler.get_stub(self.p2p, chosen_span.peer_id)
|
||||
span_uids: str = CHAIN_DELIMITER.join(self.sequence_manager.block_uids[chosen_span.start : chosen_span.end])
|
||||
inference_session = RemoteExpertWorker.run_coroutine(
|
||||
RemoteTransformerBlockInferenceSession._create(
|
||||
stub, span_uids, rpc_info=self.sequence_manager.rpc_info, timeout=self.timeout
|
||||
)
|
||||
)
|
||||
self.inference_sessions.append(inference_session)
|
||||
self.stack.enter_context(inference_session)
|
||||
|
||||
return self
|
||||
|
||||
def step(self, inputs: torch.Tensor):
|
||||
assert not self.closed
|
||||
if torch.is_grad_enabled():
|
||||
logger.warning("Running inference session with grad enabled. Gradients will *not* be propagated correctly.")
|
||||
for session in self.inference_sessions:
|
||||
outputs = session.step(inputs)
|
||||
assert outputs.shape == inputs.shape, f"expected {inputs.shape}, got {outputs.shape}"
|
||||
inputs = outputs
|
||||
return inputs
|
||||
|
||||
def close(self, *exc_details):
|
||||
"""Finish a given inference session, close the underlying connection"""
|
||||
if not self.closed:
|
||||
self.stack.__exit__(*exc_details or (None, None, None))
|
||||
self.inference_sessions.clear()
|
||||
self.closed = True
|
||||
|
||||
def __exit__(self, *exc_details):
|
||||
self.close(*exc_details)
|
||||
|
||||
def __del__(self):
|
||||
self.close()
|
@ -0,0 +1,51 @@
|
||||
import asyncio
|
||||
import gc
|
||||
from contextlib import suppress
|
||||
|
||||
import psutil
|
||||
import pytest
|
||||
from hivemind.utils.crypto import RSAPrivateKey
|
||||
from hivemind.utils.logging import get_logger, use_hivemind_log_handler
|
||||
from hivemind.utils.mpfuture import MPFuture
|
||||
|
||||
use_hivemind_log_handler("in_root_logger")
|
||||
logger = get_logger(__name__)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def event_loop():
|
||||
"""
|
||||
This overrides the ``event_loop`` fixture from pytest-asyncio
|
||||
(e.g. to make it compatible with ``asyncio.subprocess``).
|
||||
|
||||
This fixture is identical to the original one but does not call ``loop.close()`` in the end.
|
||||
Indeed, at this point, the loop is already stopped (i.e. next tests are free to create new loops).
|
||||
However, finalizers of objects created in the current test may reference the current loop and fail if it is closed.
|
||||
For example, this happens while using ``asyncio.subprocess`` (the ``asyncio.subprocess.Process`` finalizer
|
||||
fails if the loop is closed, but works if the loop is only stopped).
|
||||
"""
|
||||
|
||||
yield asyncio.get_event_loop()
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True, scope="session")
|
||||
def cleanup_children():
|
||||
yield
|
||||
|
||||
with RSAPrivateKey._process_wide_key_lock:
|
||||
RSAPrivateKey._process_wide_key = None
|
||||
|
||||
gc.collect() # Call .__del__() for removed objects
|
||||
|
||||
children = psutil.Process().children(recursive=True)
|
||||
if children:
|
||||
logger.info(f"Cleaning up {len(children)} leftover child processes")
|
||||
for child in children:
|
||||
with suppress(psutil.NoSuchProcess):
|
||||
child.terminate()
|
||||
psutil.wait_procs(children, timeout=1)
|
||||
for child in children:
|
||||
with suppress(psutil.NoSuchProcess):
|
||||
child.kill()
|
||||
|
||||
MPFuture.reset_backend()
|
@ -1,47 +1,39 @@
|
||||
# Note: this code is being actively modified by justheuristic. If you want to change anything about it, please warn me.
|
||||
import os
|
||||
import random
|
||||
|
||||
import hivemind
|
||||
import pytest
|
||||
import torch
|
||||
import transformers
|
||||
from test_utils import *
|
||||
|
||||
from src.bloom.from_pretrained import load_pretrained_block
|
||||
from src.client.remote_block import RemoteTransformerBlock
|
||||
from src.data_structures import UID_DELIMITER
|
||||
from src.dht_utils import get_remote_module
|
||||
|
||||
INITIAL_PEERS = os.environ.get("INITIAL_PEERS")
|
||||
if not INITIAL_PEERS:
|
||||
raise RuntimeError("Must specify INITIAL_PEERS environment variable with one or more peer ids")
|
||||
INITIAL_PEERS = INITIAL_PEERS.split()
|
||||
|
||||
|
||||
BLOCK_UID = os.environ.get("BLOCK_UID")
|
||||
if not BLOCK_UID:
|
||||
raise RuntimeError("Must specify BLOCK_UID as an index of a transformer block to be tested")
|
||||
|
||||
REF_NAME = os.environ.get("REF_NAME", "bigscience/test-bloomd-6b3")
|
||||
REF_INDEX = int(os.environ.get("REF_INDEX", BLOCK_UID.split(".")[-1]))
|
||||
|
||||
|
||||
@pytest.mark.forked
|
||||
def test_remote_block_exact_match(atol_forward=1e-5, atol_inference=1e-3):
|
||||
dht = hivemind.DHT(initial_peers=INITIAL_PEERS, client_mode=True, start=True)
|
||||
config = transformers.AutoConfig.from_pretrained(MODEL_NAME)
|
||||
|
||||
remote_block = get_remote_module(dht, BLOCK_UID)
|
||||
assert remote_block is not None, f"Could not find {BLOCK_UID} in DHT"
|
||||
assert isinstance(remote_block, RemoteTransformerBlock)
|
||||
ref_config = transformers.AutoConfig.from_pretrained(REF_NAME)
|
||||
for block_index in random.sample(range(config.n_layer), 3):
|
||||
block_uid = f"{MODEL_NAME}{UID_DELIMITER}{block_index}"
|
||||
remote_block = get_remote_module(dht, block_uid)
|
||||
assert remote_block is not None, f"Could not find {block_uid} in DHT"
|
||||
assert isinstance(remote_block, RemoteTransformerBlock)
|
||||
|
||||
inputs = torch.randn(1, 8, ref_config.hidden_size)
|
||||
(outputs_forward,) = remote_block(inputs)
|
||||
inputs = torch.randn(1, 8, config.hidden_size)
|
||||
(outputs_forward,) = remote_block(inputs)
|
||||
|
||||
outputs_inference = []
|
||||
with remote_block.inference_session() as sess:
|
||||
for i in range(inputs.shape[1]):
|
||||
outputs_inference.append(sess.step(inputs[:, i : i + 1, :]))
|
||||
outputs_inference = torch.cat(outputs_inference, dim=1)
|
||||
outputs_inference = []
|
||||
with remote_block.inference_session() as sess:
|
||||
for i in range(inputs.shape[1]):
|
||||
outputs_inference.append(sess.step(inputs[:, i : i + 1, :]))
|
||||
outputs_inference = torch.cat(outputs_inference, dim=1)
|
||||
|
||||
ref_block = load_pretrained_block(REF_NAME, REF_INDEX, torch_dtype=torch.float32)
|
||||
(outputs_local,) = ref_block(inputs)
|
||||
ref_block = load_pretrained_block(MODEL_NAME, block_index, torch_dtype=torch.float32)
|
||||
(outputs_local,) = ref_block(inputs)
|
||||
|
||||
assert torch.allclose(outputs_local, outputs_forward, rtol=0, atol=atol_forward)
|
||||
assert torch.allclose(outputs_local, outputs_inference, rtol=0, atol=atol_inference)
|
||||
assert torch.allclose(outputs_local, outputs_forward, rtol=0, atol=atol_forward)
|
||||
assert torch.allclose(outputs_local, outputs_inference, rtol=0, atol=atol_inference)
|
||||
|
@ -0,0 +1,43 @@
|
||||
import pytest
|
||||
import torch
|
||||
from hivemind import DHT, get_logger, use_hivemind_log_handler
|
||||
from test_utils import *
|
||||
|
||||
from src import RemoteSequential
|
||||
from src.client.remote_model import DistributedBloomConfig
|
||||
|
||||
use_hivemind_log_handler("in_root_logger")
|
||||
logger = get_logger(__file__)
|
||||
|
||||
|
||||
@pytest.mark.forked
|
||||
def test_remote_sequential():
|
||||
config = DistributedBloomConfig.from_pretrained(MODEL_NAME, initial_peers=INITIAL_PEERS)
|
||||
dht = DHT(initial_peers=config.initial_peers, client_mode=True, start=True)
|
||||
test_inputs = torch.randn(1, 5, config.hidden_size, requires_grad=True)
|
||||
grad_proj = torch.randn(1, 5, config.hidden_size)
|
||||
|
||||
sequential = RemoteSequential(config, dht)
|
||||
|
||||
full_outputs = sequential(test_inputs)
|
||||
(full_outputs * grad_proj).sum().backward()
|
||||
assert test_inputs.grad is not None
|
||||
full_grad = test_inputs.grad.clone()
|
||||
test_inputs.grad.data.zero_()
|
||||
|
||||
first_half = sequential[: config.n_layer // 2]
|
||||
second_half = sequential[config.n_layer // 2 :]
|
||||
assert len(first_half) + len(second_half) == len(sequential)
|
||||
assert abs(len(first_half) - len(second_half)) == config.n_layer % 2
|
||||
for m in sequential, first_half, second_half:
|
||||
assert isinstance(repr(m), str)
|
||||
|
||||
hidden = first_half(test_inputs)
|
||||
assert isinstance(hidden, torch.Tensor)
|
||||
assert hidden.shape == test_inputs.shape
|
||||
assert hidden.requires_grad
|
||||
second_half_outputs = second_half(hidden)
|
||||
assert torch.allclose(second_half_outputs, full_outputs)
|
||||
|
||||
(second_half_outputs * grad_proj).sum().backward()
|
||||
assert torch.allclose(test_inputs.grad, full_grad)
|
@ -0,0 +1,13 @@
|
||||
import os
|
||||
|
||||
INITIAL_PEERS = os.environ.get("INITIAL_PEERS")
|
||||
if not INITIAL_PEERS:
|
||||
raise RuntimeError("Must specify INITIAL_PEERS environment variable with one or more peer ids")
|
||||
INITIAL_PEERS = INITIAL_PEERS.split()
|
||||
|
||||
|
||||
MODEL_NAME = os.environ.get("MODEL_NAME")
|
||||
if not MODEL_NAME:
|
||||
raise RuntimeError("Must specify MODEL_NAME as an index of a transformer block to be tested")
|
||||
|
||||
REF_NAME = os.environ.get("REF_NAME")
|
Loading…
Reference in New Issue