You cannot select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
petals/.github/workflows/run-tests.yaml

127 lines
5.3 KiB
YAML

name: Tests
on:
push:
branches: [ main ]
pull_request:
jobs:
convert-model:
runs-on: ubuntu-latest
env:
BLOOM_TESTING_WRITE_TOKEN: ${{ secrets.BLOOM_TESTING_WRITE_TOKEN }}
timeout-minutes: 15
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Check if the model is cached
id: cache-model
uses: actions/cache@v3
with:
path: ~/.dummy
key: model-v1-${{ hashFiles('setup.cfg', 'src/petals/cli/convert_model.py') }}
- name: Set up Python
if: steps.cache-model.outputs.cache-hit != 'true'
uses: actions/setup-python@v2
with:
python-version: 3.9
- name: Cache dependencies
if: steps.cache-model.outputs.cache-hit != 'true'
uses: actions/cache@v3
with:
path: ~/.cache/pip
key: Key-v1-3.9-${{ hashFiles('setup.cfg') }}
- name: Install dependencies
if: steps.cache-model.outputs.cache-hit != 'true'
run: |
python -m pip install --upgrade pip
pip install .
- name: Delete any test models older than 1 week
if: steps.cache-model.outputs.cache-hit != 'true'
run: |
python tests/scripts/remove_old_models.py --author bloom-testing --use_auth_token $BLOOM_TESTING_WRITE_TOKEN
- name: Delete previous version of this model, if exists
if: steps.cache-model.outputs.cache-hit != 'true'
run: |
export HF_TAG=$(python -c "import os; print(os.environ.get('GITHUB_HEAD_REF') or os.environ.get('GITHUB_REF_NAME'))")
python -c "from huggingface_hub import delete_repo; delete_repo(token='$BLOOM_TESTING_WRITE_TOKEN', \
repo_id='bloom-testing/test-bloomd-560m-$HF_TAG')" || true
- name: Convert model and push to hub
if: steps.cache-model.outputs.cache-hit != 'true'
run: |
export HF_TAG=${{ hashFiles('setup.cfg', 'src/petals/cli/convert_model.py') }}
python -m petals.cli.convert_model --model bigscience/bloom-560m --output_path ./converted_model \
--output_repo bloom-testing/test-bloomd-560m-$HF_TAG --use_auth_token $BLOOM_TESTING_WRITE_TOKEN \
--resize_token_embeddings 50000
run-tests:
runs-on: ubuntu-latest
needs: convert-model
strategy:
matrix:
python-version: [ 3.7, 3.8, 3.9 ]
fail-fast: false
timeout-minutes: 15
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Set up Python
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
- name: Cache dependencies
uses: actions/cache@v3
with:
path: ~/.cache/pip
key: Key-v1-${{ matrix.python-version }}-${{ hashFiles('setup.cfg') }}
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install .[dev]
- name: Test
run: |
export HF_TAG=${{ hashFiles('setup.cfg', 'src/petals/cli/convert_model.py') }}
export MODEL_NAME=bloom-testing/test-bloomd-560m-$HF_TAG
export REF_NAME=bigscience/bloom-560m
python -m petals.cli.run_server --converted_model_name_or_path $MODEL_NAME --block_indices 0:12 \
--new_swarm --identity tests/test.id --host_maddrs /ip4/127.0.0.1/tcp/31337 --throughput 1 \
--torch_dtype float32 --compression NONE --attn_cache_size 0.2GiB &> server1.log &
SERVER1_PID=$!
sleep 5 # wait for the first server to initialize DHT
export INITIAL_PEERS=/ip4/127.0.0.1/tcp/31337/p2p/QmS9KwZptnVdB9FFV7uGgaTq4sEKBwcYeKZDfSpyKDUd1g
# ^-- server 1 multiaddr is determined by --identity and --host_maddrs
python -m petals.cli.run_server --converted_model_name_or_path $MODEL_NAME --block_indices 12:22 \
--initial_peers $INITIAL_PEERS --throughput 1 --torch_dtype float32 &> server2.log &
SERVER2_PID=$!
sleep 10 # wait for initial servers to declare blocks, then let server decide which blocks to serve
python -m petals.cli.run_server --converted_model_name_or_path $MODEL_NAME --block_indices 0:5 \
--initial_peers $INITIAL_PEERS --throughput 1 --torch_dtype float32 &> server3.log &
SERVER3_PID=$!
python -m petals.cli.run_server --converted_model_name_or_path $MODEL_NAME --block_indices 4:14 \
--torch_dtype float32 --initial_peers $INITIAL_PEERS --throughput 1 &> server4.log &
SERVER4_PID=$!
python -m petals.cli.run_server --converted_model_name_or_path $MODEL_NAME --num_blocks 3 \
--initial_peers $INITIAL_PEERS --throughput 1 --tensor_parallel_devices cpu cpu --torch_dtype float32 &> server5.log &
SERVER5_PID=$!
tail -n 100 -f server*.log &
LOGGER_PID=$!
sleep 30 # wait for servers to download layers
kill -0 $SERVER1_PID $SERVER2_PID $SERVER3_PID $SERVER4_PID $SERVER5_PID # ensure all servers survived init
pytest tests --durations=0 --durations-min=1.0 -v
kill -0 $SERVER1_PID $SERVER2_PID $SERVER3_PID $SERVER4_PID $SERVER5_PID # ensure all servers survived tests
kill -s SIGINT $SERVER1_PID $SERVER2_PID $SERVER3_PID $SERVER4_PID $SERVER5_PID $LOGGER_PID
echo "Done!"