mirror of https://github.com/sobolevn/git-secret
upgrade bats-core to v1.5.0, for #755
parent
626cfac075
commit
bc03f224a2
Binary file not shown.
@ -0,0 +1,12 @@
|
||||
diff --git a/CHANGELOG.md b/CHANGELOG.md
|
||||
index 6c83ea5..f8b676c 100644
|
||||
--- a/CHANGELOG.md
|
||||
+++ b/CHANGELOG.md
|
||||
@@ -11,6 +11,7 @@
|
||||
### Bugfixes
|
||||
|
||||
- Fix adding newlines to `.gitignore` entries (#643)
|
||||
+- Fix `cat` and `reveal` on named files while in repo subdir (#710)
|
||||
|
||||
### Misc
|
||||
|
@ -1,65 +0,0 @@
|
||||
name: Release
|
||||
|
||||
on:
|
||||
release: { types: [published] }
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
version:
|
||||
description: 'Version to simulate for deploy'
|
||||
required: true
|
||||
|
||||
jobs:
|
||||
version-check:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- run: |
|
||||
EXPECTED_VERSION=${{ github.event.inputs.version }}
|
||||
EXPECTED_VERSION=${EXPECTED_VERSION:-${GITHUB_REF/refs\/tags\//}}
|
||||
echo "EXPECTED_VERSION=$EXPECTED_VERSION" >> $GITHUB_ENV
|
||||
- name: Check tag version matches artifact versions
|
||||
run: |
|
||||
echo "Expected version: $EXPECTED_VERSION"
|
||||
# use double negation to see the result unless we get a match
|
||||
(./bin/bats --version | grep -F "$EXPECTED_VERSION") || (echo "Bats version check failed: "; ./bin/bats --version; exit -1)
|
||||
(npm view . version | grep -F "$EXPECTED_VERSION") || (echo "npm version check failed: "; npm view . version; exit -1)
|
||||
(grep '^Version:' 'contrib/rpm/bats.spec' | grep -F "$EXPECTED_VERSION") || (echo "debian package version check failed: "; grep '^Version:' 'contrib/rpm/bats.spec'; exit -1)
|
||||
|
||||
npmjs:
|
||||
runs-on: ubuntu-latest
|
||||
needs: version-check
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-node@v2
|
||||
with:
|
||||
registry-url: "https://registry.npmjs.org"
|
||||
- run: npm publish --ignore-scripts
|
||||
env:
|
||||
NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}
|
||||
|
||||
github-npm:
|
||||
runs-on: ubuntu-latest
|
||||
needs: version-check
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-node@v2
|
||||
with:
|
||||
registry-url: "https://npm.pkg.github.com"
|
||||
- name: scope package name as required by GHPR
|
||||
run: npm init -y --scope ${{ github.repository_owner }}
|
||||
- run: npm publish --ignore-scripts
|
||||
env:
|
||||
NODE_AUTH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
dockerhub:
|
||||
runs-on: ubuntu-latest
|
||||
needs: version-check
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: docker/build-push-action@v1
|
||||
with:
|
||||
file: ./Dockerfile
|
||||
platforms: linux/amd64
|
||||
username: ${{ secrets.DOCKER_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
tags: bats/bats:${GITHUB_REF/refs\/tags\//}
|
@ -1,91 +0,0 @@
|
||||
name: Tests
|
||||
|
||||
# Controls when the action will run.
|
||||
on: [push, pull_request, workflow_dispatch]
|
||||
|
||||
jobs:
|
||||
shellcheck:
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Run shellcheck
|
||||
run: |
|
||||
sudo apt-get update -y
|
||||
sudo apt-get install shellcheck
|
||||
./shellcheck.sh
|
||||
|
||||
linux:
|
||||
strategy:
|
||||
matrix:
|
||||
os: ['ubuntu-20.04', 'ubuntu-18.04', 'ubuntu-16.04']
|
||||
env_vars:
|
||||
- ''
|
||||
# allow for some parallelity without GNU parallel, since it is not installed by default
|
||||
- 'BATS_NO_PARALLELIZE_ACROSS_FILES=1 BATS_NUMBER_OF_PARALLEL_JOBS=2'
|
||||
runs-on: ${{ matrix.os }}
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Run test on OS ${{ matrix.os }}
|
||||
shell: 'script -q -e -c "bash {0}"' # work around tty issues
|
||||
env:
|
||||
TERM: linux # fix tput for tty issue work around
|
||||
run: |
|
||||
bash --version
|
||||
bash -c "time ${{ matrix.env_vars }} bin/bats --formatter tap test"
|
||||
|
||||
windows:
|
||||
strategy:
|
||||
matrix:
|
||||
os: ['windows-2019']
|
||||
env_vars:
|
||||
- ''
|
||||
# allow for some parallelity without GNU parallel, since it is not installed by default
|
||||
- 'BATS_NO_PARALLELIZE_ACROSS_FILES=1 BATS_NUMBER_OF_PARALLEL_JOBS=2'
|
||||
runs-on: ${{ matrix.os }}
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Run test on OS ${{ matrix.os }}
|
||||
run: |
|
||||
bash --version
|
||||
bash -c "time ${{ matrix.env_vars }} bin/bats --formatter tap test"
|
||||
|
||||
macos:
|
||||
strategy:
|
||||
matrix:
|
||||
os: ['macos-10.15']
|
||||
env_vars:
|
||||
- ''
|
||||
# allow for some parallelity without GNU parallel, since it is not installed by default
|
||||
- 'BATS_NO_PARALLELIZE_ACROSS_FILES=1 BATS_NUMBER_OF_PARALLEL_JOBS=2'
|
||||
runs-on: ${{ matrix.os }}
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Install unbuffer via expect
|
||||
run: brew install expect
|
||||
- name: Run test on OS ${{ matrix.os }}
|
||||
shell: 'unbuffer bash {0}' # work around tty issues
|
||||
env:
|
||||
TERM: linux # fix tput for tty issue work around
|
||||
run: |
|
||||
bash --version
|
||||
bash -c "time ${{ matrix.env_vars }} bin/bats --formatter tap test"
|
||||
|
||||
bash-version:
|
||||
strategy:
|
||||
matrix:
|
||||
version: ['3.2', '4.0', '4.1', '4.2', '4.3', '4.4', '4', '5.0', '5.1', '5', 'latest']
|
||||
env_vars:
|
||||
- ''
|
||||
# also test running (recursively!) in parallel
|
||||
- '-e BATS_NUMBER_OF_PARALLEL_JOBS=2'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Run test on bash version ${{ matrix.version }}
|
||||
shell: 'script -q -e -c "bash {0}"' # work around tty issues
|
||||
run: |
|
||||
set -e
|
||||
docker build --build-arg bashver="${{ matrix.version }}" --tag "bats/bats:bash-${{ matrix.version }}" .
|
||||
docker run -it "bash:${{ matrix.version }}" --version
|
||||
time docker run -it ${{ matrix.env_vars }} "bats/bats:bash-${{ matrix.version }}" --tap /opt/bats/test
|
||||
|
@ -0,0 +1,30 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
|
||||
case ${1#linux/} in
|
||||
386)
|
||||
TINI_PLATFORM=i386
|
||||
;;
|
||||
arm/v7)
|
||||
TINI_PLATFORM=armhf
|
||||
;;
|
||||
arm/v6)
|
||||
TINI_PLATFORM=armel
|
||||
;;
|
||||
*)
|
||||
TINI_PLATFORM=${1#linux/}
|
||||
;;
|
||||
esac
|
||||
|
||||
echo "Installing tini for $TINI_PLATFORM"
|
||||
|
||||
wget "https://github.com/krallin/tini/releases/download/${TINI_VERSION}/tini-static-${TINI_PLATFORM}" -O /tini
|
||||
wget "https://github.com/krallin/tini/releases/download/${TINI_VERSION}/tini-static-${TINI_PLATFORM}.asc" -O /tini.asc
|
||||
|
||||
chmod +x /tini
|
||||
|
||||
apk add gnupg
|
||||
gpg --import < /tmp/docker/tini.pubkey.gpg
|
||||
gpg --batch --verify /tini.asc /tini
|
||||
apk del gnupg
|
@ -0,0 +1,107 @@
|
||||
-----BEGIN PGP PUBLIC KEY BLOCK-----
|
||||
|
||||
mQINBFANDtsBEACpb69Ul0Ko7D4XxRIvPGnDMuGdocb8PxR+EGbnHe0uS2tCbsfj
|
||||
TOoWWUrjufrWYxGlKNqOxbEhzFA2wSQ6VD6xROPQT5dAdKaGnSCiaUg7XTzcb9u3
|
||||
a5Qbx99EDZWaYDNMnLZnIElDX+YmkkEyrrmjiML63m+1P88Bz7ag18hLkqpCiIVM
|
||||
TMRfQluBJVvndX7Stzm35utugN+xeTQryjLx74CO6TUWyC7hAjvQhR5IdAk4H0oT
|
||||
RsOKZ9OQmpO0CJ1XXpKkDdDc60WVrLp1jwq2M7fx/Nz+z13nTHa3fDw8j10+1k0+
|
||||
c2HafM+GLR5CHlXVMqveWJrimII1ZILxRj/86fFCEC8ZhVW1ym4j+mqEENrzP4I7
|
||||
L3OnyKLxNKIY9CFDhfzLhNAuNeuIp6KgynzuyxWnJO4q7m/B0zcRIBcjXPrpblIx
|
||||
QlT3qQ/vFdcylDDSdbgtjD+9URG6bFR9PVlRTllBDPGQEK8vjV44pxLCenm/TzdB
|
||||
Y4RlEePf+3y7wVrkjg+l4rIDH57Vl188RODuWVGeLZ3IYWqvRUnYxHmta27UH6zY
|
||||
7FNN5p7H2VqP6v9GFhiHOCTKdUbQhOoPLmUTyBas0WsC8sXdwpTy3mJthzfUwgVN
|
||||
2SIXPnndz7RcHwZtW1x9ZtVMDr6ll99kT63+sdZJHmUdlnDr+EGEd/L61QARAQAB
|
||||
tCBUaG9tYXMgT3JvemNvIDx0aG9tYXNAb3JvemNvLmZyPokCOAQTAQIAIgUCUA0O
|
||||
2wIbAwYLCQgHAwIGFQgCCQoLBBYCAwECHgECF4AACgkQmoQVnXABpOViYA//dDQt
|
||||
4f7NGbZNnQ0DgOGgBQjAabeaup6HX1UBVMBmU3OEKkUQoA62Aix8kOz19HJcuOl0
|
||||
Y3koZ7pOrf/39s+tlZKvVuX7F6RFiJpx1+8f+f4IfQEPJRiurwkEp2zwTQfEOlo4
|
||||
Jzv57pLUtmyJVnxMDi2vFeUue+j9BePp20Z1ZEaMBJDjmMZao3iVCVvSHZnS28vk
|
||||
D5+y8VUbyhH4dyaDtu8sb06vCu2uD7ZsMfDTtdkDWRqOLs8FdamZLf8P8AD6AX2c
|
||||
PhhASMWF7Ty2kRvhzHriuplzDUBqod38iLxoC2l5xoDjNMZsY0q9dOXBS5RsKws2
|
||||
qxrJLa/F672lpxlMA9Xnm0LMCNMcnXreldUhl18zVNofIDGex5yP20djOxl7WsQ6
|
||||
0VBAzbbiGLaKyfko5CuSmulzTJNOUNxHTiXjSi6Qz5+5lh0wYnHFulhiYE16EX25
|
||||
/rhvDzN5BgsT3Kdyk/XT/klIB5k4eLXK8PgMsRzp+FDVUL7HKM1HpHMY65gdSR8y
|
||||
ZuXzKXVC43MFPYJMrjdxoEJXJjnzhf+KO6jkyrna0OQoM1YTH6/5X9XoXzQz8TDe
|
||||
7pJt4S27PZCv1NhuqNv7nPhUANr7nFMRzrbx2DNHyxUrxPvuyNmYx/KkoAzw3yaT
|
||||
frbFXU6ccwsXWW6zdcyLEYI2ZqGDfLS7zkjmRsa5AQ0EUzNVzQEIAKxWZf55R676
|
||||
M3IURgx8Ovt8+F3M4Tj+ifePcpY5JHSj9sGkJRugW9Nye5WehuJjFYOEhKrljCTP
|
||||
/mjnE7iQqVTyYC1Ar+cTtNQpadWvQR2MW/UzimdZjBtZxdtGFCWR203jtnG+LGRs
|
||||
R7HyR6A8OBKLl0heTSHx92f+dJCGESZJeVgY49xNOlG4ILl8NseYhaD36QQZReLn
|
||||
Cazy2aOep2H/jz7vnzye38QMkdHcjaw/WfQHWDKzM5Wlf+5TLF/2VTPlKcmtn0QF
|
||||
Rw/vw6kjwsv9eCQ3ThIn/FV/ycdUkhOfz9Su2aIbwYC4C6Xi82RxJKbnqUGOExeC
|
||||
PG2luuWnyQcAEQEAAYkDRAQYAQIADwUCUzNVzQIbAgUJA8JnAAEpCRCahBWdcAGk
|
||||
5cBdIAQZAQIABgUCUzNVzQAKCRAqiuDav/LlpshzCACnQxndwXLZYWwiXOiYvmUI
|
||||
U9mPKynUW2vfTwaIpFBIYnqLAjVsPspwfx7zYCoMtQ+UK9d+Jhyts8vCaPwJHPqC
|
||||
CrBbP/y9K2zRdcDA1DYW/cuubwcstypmO6Eh6iC6xRxa5IHZogK6U+Trqf/EGfI/
|
||||
Y/sc3KBVYSdLkngASdVr5TQltVH9LsxDUo7Ba382Ci0g/VmKkUht/aFRkEfvMrrj
|
||||
ONEGbyCwk2hZuwK1aE6Yweq3tbzrByOF4wqIIe6e1pwPz7lchoGYm7XJ7uckpbGi
|
||||
lZojvu9ruxbGl8tirY/Bj0UZYjASDNCcXXRMvri2FZhDyWMMoeRLj7Mw8xpeWjr7
|
||||
NhoP/2AA16HHwRjfPQo5fDXe7eGXIebwWhbr1nAYQtL2PHFeHG/imEVozcftENNt
|
||||
tVwq/JcfjyiIcKA0niGZmroQCO0HRzRs7T85ITWfWcn/Hqaok6gmLh6QJ6daC7E9
|
||||
Mse07zYM52S2/cS4osfzZj4JEC2n82VuqK9fqzR58GCkPn9rshwljEkMgEfYfTvA
|
||||
k/tm8jACpSdH3bCpSP2rL6FBtm+RuL8zbAucg8BCaVPoOhtp3326MWCnxH7QdiHl
|
||||
1dT4Dgmq4UluXOvyMZY7HI1cq4RXW5eXG5mG0ATQh1JIhIJUaEPzdaEtP5LOVqKE
|
||||
4220MVCWWak/27O9mAFm+lBA5d8O8MWW2ERqpwyYMWg/DVvQP/pB/D6wO4Dp7CMD
|
||||
MLl1LszHab9oRo8vW2cWNVS6mQ/56jgS10nRe8o/VQROfd1De6sk5k9ypk2y2iC7
|
||||
pp7sime3MsnzjR0fKcxjCDAX1Sopi9QzkypM66WHO46Uamn0GeL7OhcwjcoURzJc
|
||||
y5ue1jeeKmmR7ATexlgE87rA4nuMZak1uVz0s6Fsb1ekYIKFEFqwEpqNnyKgBrvt
|
||||
GA8yn9Zp4wF+6luaKMqwptx0q1PmMZacN9E62XdOyGBaVXPq599F3R6049zo04oW
|
||||
WMW7s7V10Vn/pqRkB4bhhUzatiUHWVuamhpR8SligZ4kWPHZuQINBFANDtsBEADJ
|
||||
MvBhyWEBoLgi9nO9hgbbLxiLjKnotKzRpu5m79rhpmtqEN2k0APmoRdUUrE2Y8sd
|
||||
Z551jT0TE2O1j70qLV9c5puK6qyV3BZb9OgQ8wqyCFFjmIdPQAEgnD3K92SOh8Mu
|
||||
lqJW++EsxSqDBt2iVbLyzQuklbIJOg4nVK8ZgswIyewiHVeh3xgcGxJtVb3QjKSz
|
||||
xhj0GYM6X7mo5rjzxEFVyiJXd8ZH79y4FHUr+tJQEzJbvWAGJClPx9czh9Drw2r+
|
||||
Tgq+2v/EkAoyXdv3z7uChh/9s33oQtssJ5TVfAbC06QsPp55RJCkGbTrBQgRB8Xa
|
||||
idPJhG9sIbhVgrRejn5kmK5L/8ACngbSU4zO1NTaquLZGZMzI7Jbx/QeG2DbVqjV
|
||||
6BLA3X+WieM9IUa1vi/CXiKedzlOMv7+CX9N76y4Wls4zsTwXTIzmPCDoysG+75D
|
||||
AoFZW1XJlMeEOGPhBDlR6X47qdLGzOI14NJZELni14+Iih6VvxhvUWmJfTnT+zE+
|
||||
PWb03C0EHsF8uTYqBPag9LYw2cDqvP31bUSiV+Z5YR+4sDcfiVT40rXECd1752Ia
|
||||
yANeL7uG/gBvl+6LHzb3VhI5ILkknVummVY0ry8YpAA0r1GfBE/mFLE3jwTqYkMx
|
||||
nr/SDa3HIql7qBh9k9AhTMId8tXX6p3lO6jd6TeewQARAQABiQIfBBgBAgAJBQJQ
|
||||
DQ7bAhsMAAoJEJqEFZ1wAaTlTeIP/R6/0EAqKyISxnx/6+VCy2j5mj4v8b+KauTT
|
||||
deXJhP37i2EMsosGqUD1LMo8Wv9Az7XOSA/2lW4v8UHQolGwssLFm9L4DDTZBH8M
|
||||
gOFzB++wHNxgIoD2u9vC2eehhMfVlCcH+YWtzTSs94+5gI9xcy8rejkO+AHhXYDR
|
||||
0nr5MM076CWEjWtORgZdHbXWilmr/SdKnIdvkXDXvNcd7tC6izIfnDxN7/4beWOF
|
||||
nv/1s0JLzIFIutFtqqYDC8Y/4JxoMcALhmop+FyYk+RUHT5uGeDauJunjwdBS3VY
|
||||
5NrMcw4LxapV0OQExxu8RAMcYXx6FZmoBMlFI+J6R5ZMS6y1TKLKIpq46CsOSz2V
|
||||
X1SAactdOpXuNOXLe2cv0mYswWGAURebcmcmN49n9JEn1IN8hhawFgFuYdUWjpYx
|
||||
K2K6NZ0vFgRHkRnei9xrf2mW7ob1vKwzeBYGvZj/xIEu/Fv3kizS6t1IeMJKUKlp
|
||||
semAObW+sEO0jLOelL+ZfUO+fImL+0fFxQyzkfNKk5dpxztPlNmv3DY6KTddc1L1
|
||||
uOGCznAmsg8Jp0v0OmCB2Xl15WPwwnYi3CkLGbEcK9stFmu2pZuEeR2DuVGlz3Nr
|
||||
Bu1W/34cXeyudUTxveehuvfkjYBMXVfEM35BHEUqgCtA49b5ZM8SpYgqU3omVsNz
|
||||
7RjMAIe3uQINBFaoGQEBEACypQbnC5fMhpCft6augXnnVzmEh0Se2wBxUum3DMFl
|
||||
U48DJYNlEsKQYsgzEvaayTI0gA1ZyeDg3E4Fnk6ysQmzW9BJ/3Q2pa0GKIkvXOgL
|
||||
nwvSXSnTTqK3zCDuJI0Nj4u9gI8bX7d4PHqQyyFzPWjiIbg9tWHbhT8wwCaay1iG
|
||||
qCZsTa8Iwyve0WaV+7YtRJQeXEfY9Z6oEGqjis7QJNef1MKy1gS1Kq+4sqvdwgo3
|
||||
f3AFSNR14gagYv2myA30ehf4EzzIi8dh9DATc4T86CBuK3TszILLSUwFnrUVT00j
|
||||
3sr2WiqWrxIq/paky8zNEV3Q0vaf2kheLSZRSZsoVH1AfCsVHj4OoWH3BOAzdr5w
|
||||
sTPsV2HVR0O9K98NoA0f45eqfMFBiGVsFuJamBJatPsTVVXZ6fR9aDDKySSB540W
|
||||
egHoABwai+s4FhUtjjZarLPJo47/gFmxF7HHUyNIYYKj5j99h00zcM7XOtKeyHzH
|
||||
vfZvmu150u2DccY9XX5xdzDsWuBeQJQ7VdyVVTPoZGIBZ1Mw3EIfFyAzYvglTCKo
|
||||
MMHH6912PnsMLcbcmF/7pJ4rrehp8bkfS7R3BBNMglLWtoZIYBSWoK8Nl0OyBzQ5
|
||||
lsPvRdUHuMe6rxwA11gRnx4yLd91bWRw/xIBwxz6Jq/lk0ySgdum/hIcOmrNdg73
|
||||
swARAQABiQQ+BBgBAgAJBQJWqBkBAhsCAikJEJqEFZ1wAaTlwV0gBBkBAgAGBQJW
|
||||
qBkBAAoJEAtYjf8FJ6m3uPQP/jonQ5QrOEN+B7ddYmotAwj6wL9whs7rcgb+TJfI
|
||||
rJskyBQG+kHymWEBiODo/AcqkKglGzTNcaDG153l38/IQ/Lo9mxZmkNHtLD6Srkb
|
||||
GjNrBIksRSR+hWbr8UjA/WuqAmDQvFWebmyF4p6deEAf3Rsv2Ml/a0lvC6TyWRds
|
||||
dMyWlDPtlKYn9q2qHjCVX4e2m4uB38Vq3fo8+Ypags5KRC9KyGgZFEGy3F37p+4U
|
||||
/rDT+t/6oKuo5/8RWAixeWNWIli7DQ9DbA4w9qdwL4KGwcnOCcCClT9KqKA3MOds
|
||||
r+dcAF+LkNrG3yvnp+eZ7LHF6PVJAkXnSLOzuEZzXp90pA+1Sw7QWGWpyV/v6CSP
|
||||
sxZInLPjBPJmdVhjMrma2s9SGKs2k4eivDeK4Gr6oF3GRK3VQzsRPDYk9vr8wSbA
|
||||
BR4Qh6Fy/WiME1SyEdcs2UWc+eGZ0ya7QEkQqz7Cvjp6VezUtXWFgy28pVkgEORE
|
||||
sqogypjbSnybeis6Rzvxf0g681fWjWSTcMBGXKTvgW7XnLqG9XMWdx0n6pa6s5yL
|
||||
nkoM5zYcjL+V6wDVQwnF9uoallcO11PEjsEMeK3gz0Y81EmLuHN6411IK1cWfxAI
|
||||
HunwOoPe7nzGOH2EX1+IkCv4PPJqMloEcIIPlwP0ILxTaPuy17wed0pH3dvRZglv
|
||||
J7qQU/AP/3Nhh5AjUtMRI+mDgJpXpYuMy7tNdgKAvgGIL1bP+1LnLtP2+7uy1Pok
|
||||
3938/eAJwAT9sOhpG3qRPs15zj4tc20/Jsypbk4032EhRFWGL5TBtYf9LVl9doFY
|
||||
hGHUgAzREWcw8l7h0/p5VNPm9K0kElCw2fN3z7xwreSPa1WGVsqMdM3nc/B44ocZ
|
||||
HFqrd9kg6N4al6IxkbL7DYAUjfhjkhOQXZXU+MLPVA9lflF4694lbrr7F4GwnzfF
|
||||
8/SOw9LHVh9K3agno6jHB64P/Vqtka4a5+9AKcEkojWEHDoLU8BMUXDZZu9OkFSJ
|
||||
tzqByAzti0dsLE+8GGfas47b3/rjf6qyKvh2+iPw+I6zHH4k7LKpIiK+MkQ2+ZfM
|
||||
zB3jQ9NPNAc63AL/YbnF5XDSFxGBRkcFZo50rZJV5WHA3p+UMISsjoNizE7agLqI
|
||||
2JPnKBzB3aQFFpI3QiivWhKfsO02Vtzl+1TjqReWu4qS1uwJbSsLectAy+4K2WFn
|
||||
RNZ1m/r9/enzEhyKo8zOmUBUzVLhRR/GG7S1MmxB8FSOZh7aHg1xbTYVvq62AW6a
|
||||
0PKvEpn1g0wvPEoKQ+41/dc/ieWgnkZiKJJFxDshKGmxfDznWnZqnOESLz/3hbGy
|
||||
U41nk0UPzHlkUZwsLo9UBdr9fUVYvlYUylBp2dV0jEc5qL62gypA
|
||||
=4tCW
|
||||
-----END PGP PUBLIC KEY BLOCK-----
|
@ -0,0 +1,169 @@
|
||||
FAQ
|
||||
===
|
||||
|
||||
How do I set the working directory?
|
||||
-----------------------------------
|
||||
|
||||
The working directory is simply the directory where you started when executing bats.
|
||||
If you want to enforce a specific directory, you can use `cd` in the `setup_file`/`setup` functions.
|
||||
However, be aware that code outside any function will run before any of these setup functions and my interfere with bats' internals.
|
||||
|
||||
|
||||
How do I see the output of the command under `run` when a test fails?
|
||||
---------------------------------------------------------------------
|
||||
|
||||
`run` captures stdout and stderr of its command and stores it in the `$output` and `${lines[@]}` variables.
|
||||
If you want to see this output, you need to print it yourself, or use functions like `assert_output` that will reproduce it on failure.
|
||||
|
||||
Can I use `--filter` to exclude files/tests?
|
||||
--------------------------------------------
|
||||
|
||||
No, not directly. `--filter` uses a regex to match against test names. So you could try to invert the regex.
|
||||
The filename won't be part of the strings that are tested, so you cannot filter against files.
|
||||
|
||||
How can I exclude a single test from a test run?
|
||||
------------------------------------------------
|
||||
|
||||
If you want to exclude only few tests from a run, you can either `skip` them:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
@test "Testname" {
|
||||
# yadayada
|
||||
}
|
||||
|
||||
becomes
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
@test "Testname" {
|
||||
skip 'Optional skip message'
|
||||
# yadayada
|
||||
}
|
||||
|
||||
or comment them out, e.g.:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
@test "Testname" {
|
||||
|
||||
becomes
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
disabled() { # @test "Testname" {
|
||||
|
||||
For multiple tests or all tests of a file, this becomes tedious, so read on.
|
||||
|
||||
How can I exclude all tests of a file from a test run?
|
||||
--------------------------------------------------------
|
||||
|
||||
If you run your test suite by naming individual files like:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ bats test/a.bats test/b.bats ...
|
||||
|
||||
you can simply omit your file. When running a folder like
|
||||
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ bats test/
|
||||
|
||||
you can prevent test files from being picked up by changing their extension to something other than `.bats`.
|
||||
|
||||
It is also possible to `skip` in `setup_file`/`setup` which will skip all tests in the file.
|
||||
|
||||
How can I include my own `.sh` files for testing?
|
||||
-------------------------------------------------
|
||||
|
||||
You can simply `source <your>.sh` files. However, be aware that `source`ing files with errors outside of any function (or inside `setup_file`) will trip up bats
|
||||
and lead to hard to diagnose errors.
|
||||
Therefore, it is safest to only `source` inside `setup` or the test functions themselves.
|
||||
|
||||
How can I debug a failing test?
|
||||
-------------------------------
|
||||
|
||||
Short of using a bash debugger you should make sure to use appropriate asserts for your task instead of raw bash comparisons, e.g.:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
@test test {
|
||||
run echo test failed
|
||||
assert_output "test"
|
||||
# instead of
|
||||
[ "$output" = "test" ]
|
||||
}
|
||||
|
||||
Because the former will print the output when the test fails while the latter won't.
|
||||
Similarly, you should use `assert_success`/`assert_failure` instead of `[ "$status" -eq 0 ]` for return code checks.
|
||||
|
||||
Is there a mechanism to add file/test specific functionality to a common setup function?
|
||||
----------------------------------------------------------------------------------------
|
||||
|
||||
Often the setup consists of parts that are common between different files of a test suite and parts that are specific to each file.
|
||||
There is no suite wide setup functionality yet, so you should extract these common setup steps into their own file (e.g. `common-test-setup.sh`) and function (e.g. `commonSetup() {}`),
|
||||
which can be `source`d or `load`ed and call it in `setup_file` or `setup`.
|
||||
|
||||
How can I use helper libraries like bats-assert?
|
||||
------------------------------------------------
|
||||
|
||||
This is a short reproduction of https://github.com/ztombol/bats-docs.
|
||||
|
||||
At first, you should make sure the library is installed. This is usually done in the `test_helper/` folders alongside the `.bats` files, giving you a filesystem layout like this:
|
||||
|
||||
.. code-block::
|
||||
|
||||
test/
|
||||
test.bats
|
||||
test_helper/
|
||||
bats-support/
|
||||
bats-assert/
|
||||
|
||||
Next, you should load those helper libraries:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
setup() {
|
||||
load 'test_helper/bats-support/load' # this is required by bats-assert!
|
||||
load 'test_helper/bats-assert/load'
|
||||
}
|
||||
|
||||
Now, you should be able to use the functions from these helpers inside your tests, e.g.:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
@test "test" {
|
||||
run echo test
|
||||
assert_output "test"
|
||||
}
|
||||
|
||||
Note that you obviously need to load the library before using it.
|
||||
If you need the library inside `setup_file` or `teardown_file` you need to load it in `setup_file`.
|
||||
|
||||
How to set a test timeout in bats?
|
||||
----------------------------------
|
||||
|
||||
Unfortunately, this is not possible yet. Please contribute to issue `#396 <https://github.com/bats-core/bats-core/issues/396>`_ for further progress.
|
||||
|
||||
How can I lint/shell-format my bats tests?
|
||||
------------------------------------------
|
||||
|
||||
Due to their custom syntax (`@test`), `.bats` files are not standard bash. This prevents most tools from working with bats.
|
||||
However, there is an alternative syntax `function_name { # @test` to declare tests in a bash compliant manner.
|
||||
|
||||
- shellcheck support since version 0.7
|
||||
- shfmt support since version 3.2.0 (using `-ln bats`)
|
||||
|
||||
|
||||
How can I check if a test failed/succeeded during teardown?
|
||||
-----------------------------------------------------------
|
||||
|
||||
You can check `BATS_TEST_COMPLETED` which will be set to 1 if the test was successful or empty if it was not.
|
||||
There is also `BATS_TEST_SKIPPED` which will be non-empty (contains the skip message or -1) when `skip` was called.
|
||||
|
||||
How can I setup/cleanup before/after all tests?
|
||||
-----------------------------------------------
|
||||
|
||||
Currently, this is not supported. Please contribute your usecase to issue `#39 <https://github.com/bats-core/bats-core/issues/39>`_.
|
@ -0,0 +1,120 @@
|
||||
Gotchas
|
||||
=======
|
||||
|
||||
My test fails although I return true?
|
||||
-------------------------------------
|
||||
|
||||
Using `return 1` to signify `true` for a success as is done often in other languages does not mesh well with Bash's
|
||||
convention of using return code 0 to signify success and everything non-zero to indicate a failure.
|
||||
|
||||
Please adhere to this idiom while using bats, or you will constantly work against your environment.
|
||||
|
||||
My negated statement (e.g. ! true) does not fail the test, even when it should.
|
||||
-------------------------------------------------------------------------------
|
||||
|
||||
Bash deliberately excludes negated return values from causing a pipeline to exit (see bash's `-e` option). You'll need to use the form `! x || false` or (recommended) use `run` and check for `[ $status != 0 ]`.
|
||||
|
||||
If the negated command is the final statement in a test, that final statement's (negated) exit status will propagate through to the test's return code as usual.
|
||||
Negated statements of the form `! x || false` will explicitly fail the test when the pipeline returns true, regardless of where they occur in the test.
|
||||
|
||||
I cannot register a test multiple times via for loop.
|
||||
-----------------------------------------------------
|
||||
|
||||
The usual bats tests (`@test`) are preprocessed into functions.
|
||||
Wrapping them into a for loop only redeclares this function.
|
||||
|
||||
If you are interested in registering multiple calls to the same function, contribute your wishes to issue `#306 <https://github.com/bats-core/bats-core/issues/306>`_.
|
||||
|
||||
I cannot pass parameters to test or .bats files.
|
||||
------------------------------------------------
|
||||
|
||||
Especially while using bats via shebang:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
#!/usr/bin/env bats
|
||||
|
||||
@test "test" {
|
||||
# ...
|
||||
}
|
||||
|
||||
You could be tempted to pass parameters to the test invocation like `./test.bats param1 param2`.
|
||||
However, bats does not support passing parameters to files or tests.
|
||||
If you need such a feature, please let us know about your usecase.
|
||||
|
||||
As a workaround you can use environment variables to pass parameters.
|
||||
|
||||
Why can't my function return results via a variable when using `run`?
|
||||
---------------------------------------------------------------------
|
||||
|
||||
The `run` function executes its command in a subshell which means the changes to variables won't be available in the calling shell.
|
||||
|
||||
If you want to test these functions, you should call them without `run`.
|
||||
|
||||
`run` doesn't fail, although the same command without `run` does.
|
||||
-----------------------------------------------------------------
|
||||
|
||||
`run` is a wrapper that always succeeds. The wrapped command's exit code is stored in `$status` and the stdout/stderr in `$output`.
|
||||
If you want to fail the test, you should explicitly check `$status` or omit `run`. See also `when not to use run <writing-tests.html#when-not-to-use-run>`_.
|
||||
|
||||
`load` won't load my `.sh` files.
|
||||
---------------------------------
|
||||
|
||||
`load` is intended as an internal helper function that always loads `.bash` files (by appending this suffix).
|
||||
If you want to load an `.sh` file, you can simple `source` it.
|
||||
|
||||
I can't lint/shell-format my bats tests.
|
||||
----------------------------------------
|
||||
|
||||
Bats uses a custom syntax for annotating tests (`@test`) that is not bash compliant.
|
||||
Therefore, standard bash tooling won't be able to interact directly with `.bats` files.
|
||||
Shellcheck supports bats' native syntax as of version 0.7.
|
||||
|
||||
Additionally, there is bash compatible syntax for tests:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
function bash_compliant_function_name_as_test_name { # @test
|
||||
# your code
|
||||
}
|
||||
|
||||
|
||||
The output (stdout/err) from commands under `run` is not visible in failed tests.
|
||||
---------------------------------------------------------------------------------
|
||||
|
||||
By default, `run` only stores stdout/stderr in `$output` (and `${lines[@]}`).
|
||||
If you want to see this output, you either should use bat-assert's assertions or have to print `$output` before the check that fails.
|
||||
|
||||
My piped command does not work under run.
|
||||
-----------------------------------------
|
||||
|
||||
Be careful with using pipes and with `run`. While your mind model of `run` might wrap the whole command behind it, bash's parser won't
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
run echo foo | grep bar
|
||||
|
||||
Won't `run (echo foo | grep bar)` but will `(run echo foo) | grep bar`. If you need to incorporate pipes, you either should do
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
run bash -c 'echo foo | grep bar'
|
||||
|
||||
or use a function to wrap the pipe in:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
fun_with_pipes() {
|
||||
echo foo | grep bar
|
||||
}
|
||||
|
||||
run fun_with_pipes
|
||||
|
||||
`[[ ]]` (or `(( ))` did not fail my test
|
||||
----------------------------------------
|
||||
|
||||
The `set -e` handling of `[[ ]]` and `(( ))` changed in Bash 4.1. Older versions, like 3.2 on MacOS,
|
||||
don't abort the test when they fail, unless they are the last command before the (test) function returns,
|
||||
making their exit code the return code.
|
||||
|
||||
`[ ]` does not suffer from this, but is no replacement for all `[[ ]]` usecases. Appending ` || false` will work in all cases.
|
@ -0,0 +1,661 @@
|
||||
Tutorial
|
||||
========
|
||||
|
||||
This tutorial is intended for beginners with bats and possibly bash.
|
||||
Make sure to also read the list of gotchas and the faq.
|
||||
|
||||
For this tutorial we are assuming you already have a project in a git repository and want to add tests.
|
||||
Ultimately they should run in the CI environment but will also be started locally during development.
|
||||
|
||||
..
|
||||
TODO: link to example repository?
|
||||
|
||||
Quick installation
|
||||
------------------
|
||||
|
||||
Since we already have an existing git repository, it is very easy to include bats and its libraries as submodules.
|
||||
We are aiming for following filesystem structure:
|
||||
|
||||
.. code-block::
|
||||
|
||||
src/
|
||||
project.sh
|
||||
...
|
||||
test/
|
||||
bats/ <- submodule
|
||||
test_helper/
|
||||
bats-support/ <- submodule
|
||||
bats-assert/ <- submodule
|
||||
test.bats
|
||||
...
|
||||
|
||||
So we start from the project root:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
git submodule add https://github.com/bats-core/bats-core.git test/bats
|
||||
git submodule add https://github.com/bats-core/bats-support.git test/test_helper/bats-support
|
||||
git submodule add https://github.com/bats-core/bats-assert.git test/test_helper/bats-assert
|
||||
|
||||
Your first test
|
||||
---------------
|
||||
|
||||
Now we want to add our first test.
|
||||
|
||||
In the tutorial repository, we want to build up our project in a TDD fashion.
|
||||
Thus, we start with an empty project and our first test is to just run our (non existing) shell script.
|
||||
|
||||
We start by creating a new test file `test/test.bats`
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
@test "can run our script" {
|
||||
./project.sh
|
||||
}
|
||||
|
||||
and run it by
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ ./test/bats/bin/bats test/test.bats
|
||||
✗ can run our script
|
||||
(in test file test/test.bats, line 2)
|
||||
`./project.sh' failed with status 127
|
||||
/tmp/bats-run-19605/bats.19627.src: line 2: ./project.sh: No such file or directory
|
||||
|
||||
1 test, 1 failure
|
||||
|
||||
Okay, our test is red. Obviously, the project.sh doesn't exist, so we create the file `src/project.sh`:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
mkdir src/
|
||||
echo '#!/usr/bin/env bash' > src/project.sh
|
||||
chmod a+x src/project.sh
|
||||
|
||||
A new test run gives us
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ ./test/bats/bin/bats test/test.bats
|
||||
✗ can run our script
|
||||
(in test file test/test.bats, line 2)
|
||||
`./project.sh' failed with status 127
|
||||
/tmp/bats-run-19605/bats.19627.src: line 2: ./project.sh: No such file or directory
|
||||
|
||||
1 test, 1 failure
|
||||
|
||||
Oh, we still used the wrong path. No problem, we just need to use the correct path to `project.sh`.
|
||||
Since we're still in the same directory as when we started `bats`, we can simply do:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
@test "can run our script" {
|
||||
./src/project.sh
|
||||
}
|
||||
|
||||
and get:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ ./test/bats/bin/bats test/test.bats
|
||||
✓ can run our script
|
||||
|
||||
1 test, 0 failures
|
||||
|
||||
Yesss! But that victory feels shallow: What if somebody less competent than us starts bats from another directory?
|
||||
|
||||
Let's do some setup
|
||||
-------------------
|
||||
|
||||
The obvious solution to becoming independent of `$PWD` is using some fixed anchor point in the filesystem.
|
||||
We can use the path to the test file itself as an anchor and rely on the internal project structure.
|
||||
Since we are lazy people and want to treat our project's files as first class citizens in the executable world, we will also put them on the `$PATH`.
|
||||
Our new `test/test.bats` now looks like this:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
setup() {
|
||||
# get the containing directory of this file
|
||||
# use $BATS_TEST_FILENAME instead of ${BASH_SOURCE[0]} or $0,
|
||||
# as those will point to the bats executable's location or the preprocessed file respectively
|
||||
DIR="$( cd "$( dirname "$BATS_TEST_FILENAME" )" >/dev/null 2>&1 && pwd )"
|
||||
# make executables in src/ visible to PATH
|
||||
PATH="$DIR/../src:$PATH"
|
||||
}
|
||||
|
||||
@test "can run our script" {
|
||||
# notice the missing ./
|
||||
# As we added src/ to $PATH, we can omit the relative path to `src/project.sh`.
|
||||
project.sh
|
||||
}
|
||||
|
||||
still giving us:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ ./test/bats/bin/bats test/test.bats
|
||||
✓ can run our script
|
||||
|
||||
1 test, 0 failures
|
||||
|
||||
It still works as expected. This is because the newly added `setup` function put the absolute path to `src/` onto `$PATH`.
|
||||
This setup function is automatically called before each test.
|
||||
Therefore, our test could execute `project.sh` directly, without using a (relative) path.
|
||||
|
||||
.. important::
|
||||
|
||||
The `setup` function will be called before each individual test in the file.
|
||||
Each file can only define one setup function for all tests in the file.
|
||||
However, the setup functions can differ between different files.
|
||||
|
||||
Dealing with output
|
||||
-------------------
|
||||
|
||||
Okay, we have a green test but our executable does not do anything useful.
|
||||
To keep things simple, let us start with an error message. Our new `src/project.sh` now reads:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
#!/usr/bin/env bash
|
||||
|
||||
echo "Welcome to our project!"
|
||||
|
||||
echo "NOT IMPLEMENTED!" >&2
|
||||
exit 1
|
||||
|
||||
And gives is this test output:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ ./test/bats/bin/bats test/test.bats
|
||||
✗ can run our script
|
||||
(in test file test/test.bats, line 11)
|
||||
`project.sh' failed
|
||||
Welcome to our project!
|
||||
NOT IMPLEMENTED!
|
||||
|
||||
1 test, 1 failure
|
||||
|
||||
Okay, our test failed, because we now exit with 1 instead of 0.
|
||||
Additionally, we see the stdout and stderr of the failing program.
|
||||
|
||||
Our goal now is to retarget our test and check that we get the welcome message.
|
||||
bats-assert gives us some help with this, so we should now load it (and its dependency bats-support),
|
||||
so we change `test/test.bats` to
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
setup() {
|
||||
load 'test_helper/bats-support/load'
|
||||
load 'test_helper/bats-assert/load'
|
||||
# ... the remaining setup is unchanged
|
||||
|
||||
# get the containing directory of this file
|
||||
# use $BATS_TEST_FILENAME instead of ${BASH_SOURCE[0]} or $0,
|
||||
# as those will point to the bats executable's location or the preprocessed file respectively
|
||||
DIR="$( cd "$( dirname "$BATS_TEST_FILENAME" )" >/dev/null 2>&1 && pwd )"
|
||||
# make executables in src/ visible to PATH
|
||||
PATH="$DIR/../src:$PATH"
|
||||
}
|
||||
|
||||
@test "can run our script" {
|
||||
run project.sh # notice `run`!
|
||||
assert_output 'Welcome to our project!'
|
||||
}
|
||||
|
||||
which gives us the following test output:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ LANG=C ./test/bats/bin/bats test/test.bats
|
||||
✗ can run our script
|
||||
(from function `assert_output' in file test/test_helper/bats-assert/src/assert_output.bash, line 194,
|
||||
in test file test/test.bats, line 14)
|
||||
`assert_output 'Welcome to our project!'' failed
|
||||
|
||||
-- output differs --
|
||||
expected (1 lines):
|
||||
Welcome to our project!
|
||||
actual (2 lines):
|
||||
Welcome to our project!
|
||||
NOT IMPLEMENTED!
|
||||
--
|
||||
|
||||
|
||||
1 test, 1 failure
|
||||
|
||||
The first change in this output is the failure description. We now fail on assert_output instead of the call itself.
|
||||
We prefixed our call to `project.sh` with `run`, which is a function provided by bats that executes the command it gets passed as parameters.
|
||||
Then, `run` sucks up the stdout and stderr of the command it ran and stores it in `$output`, stores the exit code in `$status` and returns 0.
|
||||
This means `run` never fails the test and won't generate any context/output in the log of a failed test on its own.
|
||||
|
||||
Marking the test as failed and printing context information is up to the consumers of `$status` and `$output`.
|
||||
`assert_output` is such a consumer, it compares `$output` to the the parameter it got and tells us quite succinctly that it did not match in this case.
|
||||
|
||||
For our current test we don't care about any other output or the error message, so we want it gone.
|
||||
`grep` is always at our fingertips, so we tape together this ramshackle construct
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
run project.sh 2>&1 | grep Welcome
|
||||
|
||||
which gives us the following test result:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ ./test/bats/bin/bats test/test.bats
|
||||
✗ can run our script
|
||||
(in test file test/test.bats, line 13)
|
||||
`run project.sh | grep Welcome' failed
|
||||
|
||||
1 test, 1 failure
|
||||
|
||||
Huh, what is going on? Why does it fail the `run` line again?
|
||||
|
||||
This is a common mistake that can happen when our mind parses the file differently than the bash parser.
|
||||
`run` is just a function, so the pipe won't actually be forwarded into the function. Bash reads this as `(run project.sh) | grep Welcome`,
|
||||
instead of our intended `run (project.sh | grep Welcome)`.
|
||||
|
||||
Unfortunately, the latter is no valid bash syntax, so we have to work around it, e.g. by using a function:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
get_projectsh_welcome_message() {
|
||||
project.sh 2>&1 | grep Welcome
|
||||
}
|
||||
|
||||
@test "Check welcome message" {
|
||||
run get_projectsh_welcome_message
|
||||
assert_output 'Welcome to our project!'
|
||||
}
|
||||
|
||||
Now our test passes again but having to write a function each time we want only a partial match does not accommodate our laziness.
|
||||
Isn't there an app for that? Maybe we should look at the documentation?
|
||||
|
||||
Partial matching can be enabled with the --partial option (-p for short). When used, the assertion fails if the expected substring is not found in $output.
|
||||
|
||||
-- the documentation for `assert_output <https://github.com/bats-core/bats-assert#partial-matching>`_
|
||||
|
||||
Okay, so maybe we should try that:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
@test "Check welcome message" {
|
||||
run project.sh
|
||||
assert_output --partial 'Welcome to our project!'
|
||||
}
|
||||
|
||||
Aaannnd ... the test stays green. Yay!
|
||||
|
||||
There are many other asserts and options but this is not the place for all of them.
|
||||
Skimming the documentation of `bats-assert <https://github.com/bats-core/bats-assert>`_ will give you a good idea what you can do.
|
||||
You should also have a look at the other helper libraries `here <https://github.com/bats-core>`_ like `bats-file <https://github.com/bats-core/bats-file>`_,
|
||||
to avoid reinventing the wheel.
|
||||
|
||||
|
||||
Cleaning up your mess
|
||||
---------------------
|
||||
|
||||
Often our setup or tests leave behind some artifacts that clutter our test environment.
|
||||
You can define a `teardown` function which will be called after each test, regardless whether it failed or not.
|
||||
|
||||
For example, we now want our project.sh to only show the welcome message on the first invocation.
|
||||
So we change our test to this:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
@test "Show welcome message on first invocation" {
|
||||
run project.sh
|
||||
assert_output --partial 'Welcome to our project!'
|
||||
|
||||
run project.sh
|
||||
refute_output --partial 'Welcome to our project!'
|
||||
}
|
||||
|
||||
This test fails as expected:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ ./test/bats/bin/bats test/test.bats
|
||||
✗ Show welcome message on first invocation
|
||||
(from function `refute_output' in file test/test_helper/bats-assert/src/refute_output.bash, line 189,
|
||||
in test file test/test.bats, line 17)
|
||||
`refute_output --partial 'Welcome to our project!'' failed
|
||||
|
||||
-- output should not contain substring --
|
||||
substring (1 lines):
|
||||
Welcome to our project!
|
||||
output (2 lines):
|
||||
Welcome to our project!
|
||||
NOT IMPLEMENTED!
|
||||
--
|
||||
|
||||
|
||||
1 test, 1 failure
|
||||
|
||||
Now, to get the test green again, we want to store the information that we already ran in the file `/tmp/bats-tutorial-project-ran`,
|
||||
so our `src/project.sh` becomes:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
#!/usr/bin/env bash
|
||||
|
||||
FIRST_RUN_FILE=/tmp/bats-tutorial-project-ran
|
||||
|
||||
if [[ ! -e "$FIRST_RUN_FILE" ]]; then
|
||||
echo "Welcome to our project!"
|
||||
touch "$FIRST_RUN_FILE"
|
||||
fi
|
||||
|
||||
echo "NOT IMPLEMENTED!" >&2
|
||||
exit 1
|
||||
|
||||
And our test says:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ ./test/bats/bin/bats test/test.bats
|
||||
✓ Show welcome message on first invocation
|
||||
|
||||
1 test, 0 failures
|
||||
|
||||
Nice, we're done, or are we? Running the test again now gives:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ ./test/bats/bin/bats test/test.bats
|
||||
✗ Show welcome message on first invocation
|
||||
(from function `assert_output' in file test/test_helper/bats-assert/src/assert_output.bash, line 186,
|
||||
in test file test/test.bats, line 14)
|
||||
`assert_output --partial 'Welcome to our project!'' failed
|
||||
|
||||
-- output does not contain substring --
|
||||
substring : Welcome to our project!
|
||||
output : NOT IMPLEMENTED!
|
||||
--
|
||||
|
||||
|
||||
1 test, 1 failure
|
||||
|
||||
Now the first assert failed, because of the leftover `$FIRST_RUN_FILE` from the last test run.
|
||||
|
||||
Luckily, bats offers the `teardown` function, which can take care of that, we add the following code to `test/test.bats`:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
teardown() {
|
||||
rm -f /tmp/bats-tutorial-project-ran
|
||||
}
|
||||
|
||||
Now running the test again first give us the same error, as the teardown has not run yet.
|
||||
On the second try we get a clean `/tmp` folder again and our test passes consistently now.
|
||||
|
||||
It is worth noting that we could do this `rm` in the test code itself but it would get skipped on failures.
|
||||
|
||||
.. important::
|
||||
|
||||
A test ends at its first failure. None of the subsequent commands in this test will be executed.
|
||||
The `teardown` function runs after each individual test in a file, regardless of test success or failure.
|
||||
Similarly to `setup`, each `.bats` file can have its own `teardown` function which will be the same for all tests in the file.
|
||||
|
||||
Test what you can
|
||||
-----------------
|
||||
|
||||
Sometimes tests rely on the environment to provide infrastructure that is needed for the test.
|
||||
If not all test environments provide this infrastructure but we still want to test on them,
|
||||
it would be unhelpful to get errors on parts that are not testable.
|
||||
|
||||
Bats provides you with the `skip` command which can be used in `setup` and `test`.
|
||||
|
||||
.. tip::
|
||||
|
||||
You should `skip` as early as you know it does not make sense to continue.
|
||||
|
||||
In our example project we rewrite the welcome message test to `skip` instead of doing cleanup:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
teardown() {
|
||||
: # Look Ma! No cleanup!
|
||||
}
|
||||
|
||||
@test "Show welcome message on first invocation" {
|
||||
if [[ -e /tmp/bats-tutorial-project-ran ]]; then
|
||||
skip 'The FIRST_RUN_FILE already exists'
|
||||
fi
|
||||
|
||||
run project.sh
|
||||
assert_output --partial 'Welcome to our project!'
|
||||
|
||||
run project.sh
|
||||
refute_output --partial 'Welcome to our project!'
|
||||
}
|
||||
|
||||
The first test run still works due to the cleanup from the last round. However, our second run gives us:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ ./test/bats/bin/bats test/test.bats
|
||||
- Show welcome message on first invocation (skipped: The FIRST_RUN_FILE already exists)
|
||||
|
||||
1 test, 0 failures, 1 skipped
|
||||
|
||||
.. important::
|
||||
|
||||
Skipped tests won't fail a test suite and are counted separately.
|
||||
No test command after `skip` will be executed. If an error occurs before `skip`, the test will fail.
|
||||
An optional reason can be passed to `skip` and will be printed in the test output.
|
||||
|
||||
Setting up a multifile test suite
|
||||
---------------------------------
|
||||
|
||||
With a growing project, putting all tests into one file becomes unwieldy.
|
||||
For our example project, we will extract functionality into the additional file `src/helper.sh`:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
#!/usr/bin/env bash
|
||||
|
||||
_is_first_run() {
|
||||
local FIRST_RUN_FILE=/tmp/bats-tutorial-project-ran
|
||||
if [[ ! -e "$FIRST_RUN_FILE" ]]; then
|
||||
touch "$FIRST_RUN_FILE"
|
||||
return 0
|
||||
fi
|
||||
return 1
|
||||
}
|
||||
|
||||
This allows for testing it separately in a new file `test/helper.bats`:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
setup() {
|
||||
load 'test_helper/common-setup'
|
||||
_common_setup
|
||||
|
||||
source "$PROJECT_ROOT/src/helper.sh"
|
||||
}
|
||||
|
||||
teardown() {
|
||||
rm -f "$NON_EXISTANT_FIRST_RUN_FILE"
|
||||
rm -f "$EXISTING_FIRST_RUN_FILE"
|
||||
}
|
||||
|
||||
@test "Check first run" {
|
||||
NON_EXISTANT_FIRST_RUN_FILE=$(mktemp -u) # only create the name, not the file itself
|
||||
|
||||
assert _is_first_run
|
||||
refute _is_first_run
|
||||
refute _is_first_run
|
||||
|
||||
EXISTING_FIRST_RUN_FILE=$(mktemp)
|
||||
refute _is_first_run
|
||||
refute _is_first_run
|
||||
}
|
||||
|
||||
Since the setup function would have duplicated much of the other files', we split that out into the file `test/test_helper/common-setup.bash`:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
#!/usr/bin/env bash
|
||||
|
||||
_common_setup() {
|
||||
load 'test_helper/bats-support/load'
|
||||
load 'test_helper/bats-assert/load'
|
||||
# get the containing directory of this file
|
||||
# use $BATS_TEST_FILENAME instead of ${BASH_SOURCE[0]} or $0,
|
||||
# as those will point to the bats executable's location or the preprocessed file respectively
|
||||
PROJECT_ROOT="$( cd "$( dirname "$BATS_TEST_FILENAME" )/.." >/dev/null 2>&1 && pwd )"
|
||||
# make executables in src/ visible to PATH
|
||||
PATH="$PROJECT_ROOT/src:$PATH"
|
||||
}
|
||||
|
||||
with the following `setup` in `test/test.bats`:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
setup() {
|
||||
load 'test_helper/common-setup'
|
||||
_common_setup
|
||||
}
|
||||
|
||||
Please note, that we gave our helper the extension `.bash`, which is automatically appended by `load`.
|
||||
|
||||
.. important::
|
||||
|
||||
`load` automatically tries to append `.bash` to its argument.
|
||||
|
||||
In our new `test/helper.bats` we can see, that loading `.sh` is simply done via `source`.
|
||||
|
||||
.. tip::
|
||||
|
||||
Avoid using `load` and `source` outside of any functions.
|
||||
If there is an error in the test file's "free code", the diagnostics are much worse than for code in `setup` or `@test`.
|
||||
|
||||
With the new changes in place, we can run our tests again. However, our previous run command does not include the new file.
|
||||
You could add the new file to the parameter list, e.g. by running `./test/bats/bin/bats test/*.bats`.
|
||||
However, bats also can handle directories:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ ./test/bats/bin/bats test/
|
||||
✓ Check first run
|
||||
- Show welcome message on first invocation (skipped: The FIRST_RUN_FILE already exists)
|
||||
|
||||
2 tests, 0 failures, 1 skipped
|
||||
|
||||
In this mode, bats will pick up all `.bats` files in the directory it was given. There is an additional `-r` switch that will recursively search for more `.bats` files.
|
||||
However, in our project layout this would pick up the test files of bats itself from `test/bats/test`. We don't have test subfolders anyways, so we can do without `-r`.
|
||||
|
||||
|
||||
Avoiding costly repeated setups
|
||||
-------------------------------
|
||||
|
||||
We already have seen the `setup` function in use, which is called before each test.
|
||||
Sometimes our setup is very costly, such as booting up a service just for testing.
|
||||
If we can reuse the same setup across multiple tests, we might want to do only one setup before all these tests.
|
||||
|
||||
This usecase is exactly what the `setup_file` function was created for.
|
||||
It can be defined per file and will run before all tests of the respective file.
|
||||
Similarly, we have `teardown_file`, which will run after all tests of the file, even when you abort a test run or a test failed.
|
||||
|
||||
As an example, we want to add an echo server capability to our project. First, we add the following `server.bats` to our suite:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
setup_file() {
|
||||
load 'test_helper/common-setup'
|
||||
_common_setup
|
||||
PORT=$(project.sh start-echo-server >/dev/null 2>&1)
|
||||
export PORT
|
||||
}
|
||||
|
||||
@test "server is reachable" {
|
||||
nc -z localhost "$PORT"
|
||||
}
|
||||
|
||||
Which will obviously fail:
|
||||
|
||||
Note that `export PORT` to make it visible to the test!
|
||||
Running this gives us:
|
||||
|
||||
..
|
||||
TODO: Update this example with fixed test name reporting from setup_file? (instead of "✗ ")
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ ./test/bats/bin/bats test/server.bats
|
||||
✗
|
||||
(from function `setup_file' in test file test/server.bats, line 4)
|
||||
`PORT=$(project.sh start-echo-server >/dev/null 2>&1)' failed
|
||||
|
||||
1 test, 1 failure
|
||||
|
||||
Now that we got our red test, we need to get it green again.
|
||||
Our new `project.sh` now ends with:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
case $1 in
|
||||
start-echo-server)
|
||||
echo "Starting echo server"
|
||||
PORT=2000
|
||||
ncat -l $PORT -k -c 'xargs -n1 echo' 2>/dev/null & # don't keep open this script's stderr
|
||||
echo $! > /tmp/project-echo-server.pid
|
||||
echo "$PORT" >&2
|
||||
;;
|
||||
*)
|
||||
echo "NOT IMPLEMENTED!" >&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
and the tests now say
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ LANG=C ./test/bats/bin/bats test/server.bats
|
||||
✓ server is reachable
|
||||
|
||||
1 test, 0 failures
|
||||
|
||||
However, running this a second time gives:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ ./test/bats/bin/bats test/server.bats
|
||||
✗ server is reachable
|
||||
(in test file test/server.bats, line 14)
|
||||
`nc -z -w 2 localhost "$PORT"' failed
|
||||
2000
|
||||
Ncat: bind to :::2000: Address already in use. QUITTING.
|
||||
nc: port number invalid: 2000
|
||||
Ncat: bind to :::2000: Address already in use. QUITTING.
|
||||
|
||||
1 test, 1 failure
|
||||
|
||||
Obviously, we did not turn off our server after testing.
|
||||
This is a task for `teardown_file` in `server.bats`:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
teardown_file() {
|
||||
project.sh stop-echo-server
|
||||
}
|
||||
|
||||
Our `project.sh` should also get the new command:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
stop-echo-server)
|
||||
kill "$(< "/tmp/project-echo-server.pid")"
|
||||
rm /tmp/project-echo-server.pid
|
||||
;;
|
||||
|
||||
Now starting our tests again will overwrite the .pid file with the new instance's, so we have to do manual cleanup once.
|
||||
From now on, our test should clean up after itself.
|
||||
|
||||
.. note::
|
||||
|
||||
`teardown_file` will run regardless of tests failing or succeeding.
|
@ -0,0 +1,10 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
bats_prefix_lines_for_tap_output() {
|
||||
while IFS= read -r line; do
|
||||
printf '# %s\n' "$line" || break # avoid feedback loop when errors are redirected into BATS_OUT (see #353)
|
||||
done
|
||||
if [[ -n "$line" ]]; then
|
||||
printf '# %s\n' "$line"
|
||||
fi
|
||||
}
|
@ -1,4 +1,6 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
trap '' INT
|
||||
|
||||
cat
|
||||
|
@ -0,0 +1,66 @@
|
||||
# block until at least <barrier-size> processes of this barrier group entered the barrier
|
||||
# once this happened, all latecomers will go through immediately!
|
||||
# WARNING: a barrier group consists of all processes with the same barrier name *and* size!
|
||||
single-use-barrier() { # <barrier-name> <barrier-size> [<timeout-in-seconds> [<sleep-cycle-time>]]
|
||||
local barrier_name="$1"
|
||||
local barrier_size="$2"
|
||||
local timeout_in_seconds="${3:-0}"
|
||||
local sleep_cycle_time="${4:-1}"
|
||||
# use name and size to distinguish between invocations
|
||||
# this will block inconsistent sizes on the same name!
|
||||
local BARRIER_SUFFIX=${barrier_name//\//_}-$barrier_size
|
||||
local BARRIER_FILE="$BATS_SUITE_TMPDIR/barrier-$BARRIER_SUFFIX"
|
||||
# mark our entry for all others
|
||||
# concurrent writes may interleave but should not lose their newlines
|
||||
echo "in-$$" >> "$BARRIER_FILE"
|
||||
local start="$SECONDS"
|
||||
# wait for others to enter
|
||||
while [[ $(wc -l <"$BARRIER_FILE" ) -lt $barrier_size ]]; do
|
||||
if [[ $timeout_in_seconds -ne 0 && $(( SECONDS - start )) -gt $timeout_in_seconds ]]; then
|
||||
mv "$BARRIER_FILE" "$BARRIER_FILE-timeout"
|
||||
printf "ERROR: single-use-barrier %s timed out\n" "$BARRIER_SUFFIX" >&2
|
||||
return 1
|
||||
fi
|
||||
sleep "$sleep_cycle_time"
|
||||
done
|
||||
# mark our exit
|
||||
echo "out-$$" >> "$BARRIER_FILE"
|
||||
}
|
||||
|
||||
# block until at least <latch-size> signalling threads have passed the latch
|
||||
# SINGLE_USE_LATCH_DIR must be exported!
|
||||
single-use-latch::wait() { # <latch-name> <latch-size> [<timeout-in-seconds> [<sleep-cycle-time>]]
|
||||
local latch_name="$1"
|
||||
local latch_size="$2"
|
||||
local timeout_in_seconds="${3:-0}"
|
||||
local sleep_cycle_time="${4:-1}"
|
||||
|
||||
local LATCH_FILE
|
||||
LATCH_FILE="$(single-use-latch::_filename "$latch_name")"
|
||||
local start="$SECONDS"
|
||||
while [[ (! -e "$LATCH_FILE") || $(wc -l <"$LATCH_FILE" ) -lt $latch_size ]]; do
|
||||
if [[ $timeout_in_seconds -ne 0 && $(( SECONDS - start )) -gt $timeout_in_seconds ]]; then
|
||||
printf "ERROR: single-use-latch %s timed out\n" "$latch_name" >&2
|
||||
mv "$LATCH_FILE" "$LATCH_FILE-timeout"
|
||||
return 1
|
||||
fi
|
||||
sleep "$sleep_cycle_time"
|
||||
done
|
||||
}
|
||||
|
||||
# signal the waiting process that the latch was passed
|
||||
# this does not block
|
||||
# SINGLE_USE_LATCH_DIR must be exported!
|
||||
single-use-latch::signal() { # <latch-name>
|
||||
local latch_name="$1"
|
||||
local LATCH_FILE
|
||||
LATCH_FILE="$(single-use-latch::_filename "$latch_name")"
|
||||
# mark our passing
|
||||
# concurrent process might interleave but will still post their newline
|
||||
echo "passed-$$" >> "$LATCH_FILE"
|
||||
echo "passed-$$ >> $LATCH_FILE" >> /tmp/latch
|
||||
}
|
||||
|
||||
single-use-latch::_filename() { # <latch-name>
|
||||
printf "%s\n" "${SINGLE_USE_LATCH_DIR?}/latch-${1//\//_}"
|
||||
}
|
@ -0,0 +1,8 @@
|
||||
@test "BATS_TMPDIR is set" {
|
||||
[ "${BATS_TMPDIR}" == "${expected:-}" ]
|
||||
}
|
||||
|
||||
@test "BATS_RUN_TMPDIR has BATS_TMPDIR as a prefix" {
|
||||
local regex="^${BATS_TMPDIR}/.+"
|
||||
[[ ${BATS_RUN_TMPDIR} =~ ${regex} ]]
|
||||
}
|
@ -1,8 +1,10 @@
|
||||
@test "setting a variable" {
|
||||
# shellcheck disable=SC2030
|
||||
variable=1
|
||||
[ $variable -eq 1 ]
|
||||
}
|
||||
|
||||
@test "variables do not persist across tests" {
|
||||
# shellcheck disable=SC2031
|
||||
[ -z "$variable" ]
|
||||
}
|
||||
|
@ -0,0 +1,15 @@
|
||||
setup() {
|
||||
true
|
||||
}
|
||||
|
||||
teardown() {
|
||||
true
|
||||
}
|
||||
|
||||
setup_file() {
|
||||
true
|
||||
}
|
||||
|
||||
teardown_file() {
|
||||
true
|
||||
}
|
@ -0,0 +1,5 @@
|
||||
load external_functions
|
||||
|
||||
@test test {
|
||||
true
|
||||
}
|
@ -0,0 +1,11 @@
|
||||
|
||||
|
||||
helper() {
|
||||
false
|
||||
}
|
||||
|
||||
helper
|
||||
|
||||
@test "everything is ok" {
|
||||
true
|
||||
}
|
@ -0,0 +1,8 @@
|
||||
setup() {
|
||||
load '../../concurrent-coordination'
|
||||
}
|
||||
|
||||
@test "test" {
|
||||
single-use-latch::signal hang_in_run
|
||||
run sleep 10
|
||||
}
|
@ -0,0 +1,9 @@
|
||||
setup_file() {
|
||||
load '../../concurrent-coordination'
|
||||
single-use-latch::signal hang_in_setup_file
|
||||
sleep 10
|
||||
}
|
||||
|
||||
@test "empty" {
|
||||
:
|
||||
}
|
@ -0,0 +1,9 @@
|
||||
teardown() {
|
||||
load '../../concurrent-coordination'
|
||||
single-use-latch::signal hang_in_teardown
|
||||
sleep 10
|
||||
}
|
||||
|
||||
@test "empty" {
|
||||
:
|
||||
}
|
@ -0,0 +1,9 @@
|
||||
teardown_file() {
|
||||
load '../../concurrent-coordination'
|
||||
single-use-latch::signal hang_in_teardown_file
|
||||
sleep 10
|
||||
}
|
||||
|
||||
@test "empty" {
|
||||
:
|
||||
}
|
@ -0,0 +1,8 @@
|
||||
setup() {
|
||||
load '../../concurrent-coordination'
|
||||
}
|
||||
|
||||
@test "test" {
|
||||
single-use-latch::signal hang_in_test
|
||||
sleep 10
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue