mirror of
https://github.com/bigscience-workshop/petals
synced 2024-10-31 09:20:41 +00:00
122 lines
5.1 KiB
YAML
122 lines
5.1 KiB
YAML
name: Tests
|
|
|
|
on:
|
|
push:
|
|
branches: [ main ]
|
|
pull_request:
|
|
|
|
jobs:
|
|
convert-model:
|
|
runs-on: ubuntu-latest
|
|
env:
|
|
BLOOM_TESTING_WRITE_TOKEN: ${{ secrets.BLOOM_TESTING_WRITE_TOKEN }}
|
|
timeout-minutes: 15
|
|
steps:
|
|
- uses: actions/checkout@v2
|
|
- name: Set up Python
|
|
uses: actions/setup-python@v2
|
|
with:
|
|
python-version: 3.9
|
|
- name: Cache dependencies
|
|
uses: actions/cache@v2
|
|
with:
|
|
path: ~/.cache/pip
|
|
key: Key-v1-py3.9-${{ hashFiles('requirements.txt') }}-${{ hashFiles('requirements-dev.txt') }}
|
|
- name: Install dependencies
|
|
run: |
|
|
python -m pip install --upgrade pip
|
|
pip install -r requirements.txt
|
|
- name: Delete any test models older than 1 week
|
|
run: |
|
|
python tests/scripts/remove_old_models.py --author bloom-testing --use_auth_token $BLOOM_TESTING_WRITE_TOKEN
|
|
- name: Delete previous version of this model, if exists
|
|
run: |
|
|
export HF_TAG=$(python -c "import os; print(os.environ.get('GITHUB_HEAD_REF') or os.environ.get('GITHUB_REF_NAME'))")
|
|
python -c "from huggingface_hub import delete_repo; delete_repo(token='$BLOOM_TESTING_WRITE_TOKEN', \
|
|
name='test-bloomd-560m-$HF_TAG', organization='bloom-testing')" || true
|
|
- name: Convert model and push to hub
|
|
run: |
|
|
export HF_TAG=$(python -c "import os; print(os.environ.get('GITHUB_HEAD_REF') or os.environ.get('GITHUB_REF_NAME'))")
|
|
python -m cli.convert_model --model bigscience/bloom-560m --output_path ./converted_model \
|
|
--output_repo bloom-testing/test-bloomd-560m-$HF_TAG --use_auth_token $BLOOM_TESTING_WRITE_TOKEN \
|
|
--resize_token_embeddings 50000
|
|
|
|
run-tests:
|
|
runs-on: ubuntu-latest
|
|
needs: convert-model
|
|
strategy:
|
|
matrix:
|
|
python-version: [ 3.7, 3.8, 3.9 ]
|
|
fail-fast: false
|
|
timeout-minutes: 15
|
|
steps:
|
|
- uses: actions/checkout@v2
|
|
- name: Set up Python
|
|
uses: actions/setup-python@v2
|
|
with:
|
|
python-version: ${{ matrix.python-version }}
|
|
- name: Cache dependencies
|
|
uses: actions/cache@v2
|
|
with:
|
|
path: ~/.cache/pip
|
|
key: Key-v1-${{ matrix.python-version }}-${{ hashFiles('requirements.txt') }}-${{ hashFiles('requirements-dev.txt') }}
|
|
- name: Install dependencies
|
|
run: |
|
|
python -m pip install --upgrade pip
|
|
pip install -r requirements.txt
|
|
pip install -r requirements-dev.txt
|
|
- name: Build bitsandbytes cpuonly
|
|
run: |
|
|
git clone https://github.com/TimDettmers/bitsandbytes.git
|
|
cd bitsandbytes
|
|
git checkout main
|
|
make cpuonly
|
|
pip install .
|
|
cd -
|
|
- name: Test
|
|
run: |
|
|
export HF_TAG=$(python -c "import os; print(os.environ.get('GITHUB_HEAD_REF') or os.environ.get('GITHUB_REF_NAME'))")
|
|
export MODEL_NAME=bloom-testing/test-bloomd-560m-$HF_TAG
|
|
export REF_NAME=bigscience/bloom-560m
|
|
|
|
python -m cli.run_server --converted_model_name_or_path $MODEL_NAME --block_indices 0:12 \
|
|
--torch_dtype float32 --identity tests/test.id --host_maddrs /ip4/127.0.0.1/tcp/31337 \
|
|
--throughput 1 --attn_cache_size 0.2GiB &> server1.log &
|
|
SERVER1_PID=$!
|
|
|
|
sleep 5 # wait for the first server to initialize DHT
|
|
|
|
export INITIAL_PEERS=/ip4/127.0.0.1/tcp/31337/p2p/QmS9KwZptnVdB9FFV7uGgaTq4sEKBwcYeKZDfSpyKDUd1g
|
|
# ^-- server 1 multiaddr is determined by --identity and --host_maddrs
|
|
|
|
python -m cli.run_server --converted_model_name_or_path $MODEL_NAME --block_indices 12:22 \
|
|
--torch_dtype float32 --initial_peers $INITIAL_PEERS --throughput 1 &> server2.log &
|
|
SERVER2_PID=$!
|
|
|
|
sleep 10 # wait for initial servers to declare blocks, then let server decide which blocks to serve
|
|
|
|
python -m cli.run_server --converted_model_name_or_path $MODEL_NAME --block_indices 0:6 \
|
|
--torch_dtype float32 --initial_peers $INITIAL_PEERS --throughput 1 &> server3.log &
|
|
SERVER3_PID=$!
|
|
|
|
python -m cli.run_server --converted_model_name_or_path $MODEL_NAME --block_indices 4:16 \
|
|
--torch_dtype float32 --initial_peers $INITIAL_PEERS --throughput 1 &> server4.log &
|
|
SERVER4_PID=$!
|
|
|
|
python -m cli.run_server --converted_model_name_or_path $MODEL_NAME --num_blocks 3 \
|
|
--torch_dtype float32 --initial_peers $INITIAL_PEERS --throughput 1 &> server5.log &
|
|
SERVER5_PID=$!
|
|
|
|
tail -n 100 -f server*.log &
|
|
LOGGER_PID=$!
|
|
sleep 30 # wait for servers to download layers
|
|
|
|
kill -0 $SERVER1_PID $SERVER2_PID $SERVER3_PID $SERVER4_PID $SERVER5_PID # ensure all servers survived init
|
|
|
|
PYTHONPATH=. pytest tests --durations=0 --durations-min=1.0 -v
|
|
|
|
kill -0 $SERVER1_PID $SERVER2_PID $SERVER3_PID $SERVER4_PID $SERVER5_PID # ensure all servers survived tests
|
|
|
|
kill -s SIGINT $SERVER1_PID $SERVER2_PID $SERVER3_PID $SERVER4_PID $SERVER5_PID $LOGGER_PID
|
|
echo "Done!"
|