mirror of
https://github.com/bigscience-workshop/petals
synced 2024-10-31 09:20:41 +00:00
cb3f018f9f
This PR: 1. **Abolishes the model conversion procedure.** Now, models are downloaded directly from original repositories like https://huggingface.co/bigscience/bloom. Servers download only shards with blocks to be hosted, and clients download only shards with input/output embeddings and layernorms. - BLOOM is loaded from `bigscience/bloom`, but we use the DHT prefix `bigscience/bloom-petals` for backward compatibility. Same with smaller BLOOMs and BLOOMZ. - LLaMA can be loaded from any repo like `username/llama-65b-hf`, but we use the DHT prefix `llama-65b-hf` (without the username) to accomodate blocks from different repos (there're a few of them with minor differences, such as `Llama` vs. `LLaMA` in the class name). 2. **Refactors the client to generalize it for multiple models.** Now, we have `petals.models` packages that contain model-specific code (e.g. `petals.models.bloom`, `petals.models.llama`). General code (e.g. CPU-efficient LM head, p-tuning) is kept in `petals.client`. 3. **Introduces** `WrappedLlamaBlock`, `DistributedLlamaConfig`, `DistributedLlamaForCausalLM`, `DistributedLlamaForSequenceClassification`, and `DistributedLlamaModel` compatible with Petals functionality (p-tuning, adapters, etc.). 4. **Introduces** `AutoDistributedConfig` that automatically chooses the correct config class (`DistributedLlamaConfig` or `DistributedBloomConfig`). The refactored configs contain all model-specific info for both clients and servers. Upgrade instructions: - Remove disk caches for blocks in old (converted) format to save disk space. That is, remove `~/.cache/petals/model--bigscience--bloom-petals` and `~/.cache/petals/model--bigscience--bloomz-petals` directories (if present).
39 lines
1.6 KiB
Python
39 lines
1.6 KiB
Python
import random
|
|
|
|
import pytest
|
|
import torch
|
|
|
|
from petals import DistributedBloomConfig, RemoteSequential
|
|
from petals.server.from_pretrained import load_pretrained_block
|
|
from test_utils import *
|
|
|
|
|
|
@pytest.mark.forked
|
|
def test_remote_block_exact_match(atol_forward=1e-4, atol_inference=1e-3):
|
|
config = DistributedBloomConfig.from_pretrained(MODEL_NAME, initial_peers=INITIAL_PEERS)
|
|
remote_sequential = RemoteSequential(config)
|
|
|
|
for block_index in random.sample(range(config.num_hidden_layers), 3):
|
|
remote_block = remote_sequential[block_index]
|
|
|
|
inputs = torch.randn(1, 8, config.hidden_size)
|
|
outputs_forward = remote_block(inputs)
|
|
|
|
outputs_inference = []
|
|
with torch.inference_mode():
|
|
with remote_block.inference_session(max_length=inputs.shape[1]) as sess:
|
|
for i in range(inputs.shape[1]):
|
|
outputs_inference.append(sess.step(inputs[:, i : i + 1, :]))
|
|
|
|
# test that max length is respected
|
|
with pytest.raises(ValueError, match=r"Maximum length exceeded") as exc_info:
|
|
sess.step(inputs[:, -1:, :])
|
|
assert "Maximum length exceeded" in repr(exc_info.value)
|
|
outputs_inference = torch.cat(outputs_inference, dim=1)
|
|
|
|
ref_block = load_pretrained_block(MODEL_NAME, block_index, torch_dtype=torch.float32)
|
|
(outputs_local,) = ref_block(inputs)
|
|
|
|
assert torch.allclose(outputs_local, outputs_forward, rtol=0, atol=atol_forward)
|
|
assert torch.allclose(outputs_local, outputs_inference, rtol=0, atol=atol_inference)
|