Hack up a really simple migration testing server
parent
8c2c2ed76b
commit
8bfe371045
@ -1,63 +0,0 @@
|
||||
version: 2
|
||||
jobs:
|
||||
build:
|
||||
docker:
|
||||
- image: circleci/python
|
||||
steps:
|
||||
- checkout
|
||||
- setup_remote_docker
|
||||
|
||||
- run:
|
||||
name: Create version.json
|
||||
command: |
|
||||
printf '{"commit":"%s","version":"%s","source":"https://github.com/%s/%s","build":"%s"}\n' \
|
||||
"$CIRCLE_SHA1" \
|
||||
"$CIRCLE_TAG" \
|
||||
"$CIRCLE_PROJECT_USERNAME" \
|
||||
"$CIRCLE_PROJECT_REPONAME" \
|
||||
"$CIRCLE_BUILD_URL" | tee version.json
|
||||
- store_artifacts:
|
||||
path: version.json
|
||||
|
||||
- run:
|
||||
name: Build deployment container image
|
||||
command: docker build -t app:build .
|
||||
- run:
|
||||
name: Test flake8
|
||||
command: docker run -it app:build test_flake8
|
||||
- run:
|
||||
name: Test nose
|
||||
command: docker run -it app:build test_nose
|
||||
- run:
|
||||
name: Functional tests
|
||||
command: docker run -it app:build test_functional
|
||||
- run:
|
||||
name: Push to Dockerhub
|
||||
command: |
|
||||
if [ "${CIRCLE_BRANCH}" == "master" ]; then
|
||||
bin/ci/deploy-dockerhub.sh latest
|
||||
fi
|
||||
if [[ "${CIRCLE_BRANCH}" == feature* ]] || [[ "${CIRCLE_BRANCH}" == dockerpush* ]]; then
|
||||
bin/ci/deploy-dockerhub.sh "$CIRCLE_BRANCH"
|
||||
fi
|
||||
if [ -n "${CIRCLE_TAG}" ]; then
|
||||
bin/ci/deploy-dockerhub.sh "$CIRCLE_TAG"
|
||||
fi
|
||||
|
||||
workflows:
|
||||
version: 2
|
||||
|
||||
# workflow jobs are _not_ run in tag builds by default
|
||||
# we use filters to whitelist jobs that should be run for tags
|
||||
|
||||
# workflow jobs are run in _all_ branch builds by default
|
||||
# we use filters to blacklist jobs that shouldn't be run for a branch
|
||||
|
||||
# see: https://circleci.com/docs/2.0/workflows/#git-tag-job-execution
|
||||
|
||||
build-test-push:
|
||||
jobs:
|
||||
- build:
|
||||
filters:
|
||||
tags:
|
||||
only: /.*/
|
@ -1,8 +0,0 @@
|
||||
*.pyc
|
||||
local
|
||||
*.egg-info
|
||||
*.swp
|
||||
\.coverage
|
||||
*~
|
||||
nosetests.xml
|
||||
syncserver.db
|
@ -1,27 +0,0 @@
|
||||
language: python
|
||||
|
||||
python:
|
||||
- "2.7"
|
||||
# The way the selection of the Python version is currently made in Makefile
|
||||
# leads to travis always picking up Python 2 for the task.
|
||||
# All versions of Python are appearantly present in a travis environment.
|
||||
# Once the makefile has been adjusted the following lines should be enabled.
|
||||
# - "3.5"
|
||||
# - "3.6"
|
||||
# - "3.7-dev"
|
||||
|
||||
notifications:
|
||||
email:
|
||||
- rfkelly@mozilla.com
|
||||
irc:
|
||||
channels:
|
||||
- "irc.mozilla.org#services-dev"
|
||||
use_notice: false
|
||||
skip_join: false
|
||||
|
||||
install:
|
||||
- pip install virtualenv
|
||||
- make build
|
||||
|
||||
script:
|
||||
- make test
|
@ -1,28 +0,0 @@
|
||||
FROM python:2.7-alpine
|
||||
|
||||
RUN addgroup -g 1001 app \
|
||||
&& adduser -u 1001 -S -D -G app -s /usr/sbin/nologin app
|
||||
|
||||
ENV LANG C.UTF-8
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# install syncserver dependencies
|
||||
COPY ./requirements.txt /app/requirements.txt
|
||||
COPY ./dev-requirements.txt /app/dev-requirements.txt
|
||||
RUN apk --no-cache update \
|
||||
&& apk add dumb-init libstdc++ libffi-dev openssl-dev g++ \
|
||||
&& pip install --upgrade pip \
|
||||
&& pip install --upgrade --no-cache-dir -r requirements.txt \
|
||||
&& pip install --upgrade --no-cache-dir -r dev-requirements.txt \
|
||||
&& apk del g++
|
||||
|
||||
COPY . /app
|
||||
RUN python ./setup.py develop
|
||||
|
||||
# run as non priviledged user
|
||||
USER app
|
||||
|
||||
# run the server by default
|
||||
ENTRYPOINT ["/usr/bin/dumb-init", "/app/docker-entrypoint.sh"]
|
||||
CMD ["server"]
|
@ -1,3 +1,2 @@
|
||||
include syncserver.ini
|
||||
include syncserver.wsgi
|
||||
include syncserver/tests.ini
|
||||
include syncserver/management.html
|
@ -1,179 +1,13 @@
|
||||
Run-Your-Own Firefox Sync Server
|
||||
================================
|
||||
The Very Hacky Migration-Testing Sync Server
|
||||
============================================
|
||||
|
||||
.. image:: https://circleci.com/gh/mozilla-services/syncserver/tree/master.svg?style=svg
|
||||
:target: https://circleci.com/gh/mozilla-services/syncserver/tree/master
|
||||
This is a hacked-up sync server designed to help test client behaviour
|
||||
during the migration to old MySQL-backed sync storage nodes to the new
|
||||
Spanner-backed durable mega-node.
|
||||
|
||||
.. image:: https://img.shields.io/docker/automated/mozilla-services/syncserver.svg?style=flat-square
|
||||
:target: https://hub.docker.com/r/mozilla/syncserver/
|
||||
Run the server using `make server`, and it'll bind to http://localhost:5000/.
|
||||
Open up that URL for an incredibly bare-bones management interface.
|
||||
|
||||
This is an all-in-one package for running a self-hosted Firefox Sync server.
|
||||
It bundles the "tokenserver" project for authentication and the "syncstorage"
|
||||
project for storage, to produce a single stand-alone webapp.
|
||||
|
||||
Complete installation instructions are available at:
|
||||
|
||||
https://mozilla-services.readthedocs.io/en/latest/howtos/run-sync-1.5.html
|
||||
|
||||
|
||||
Quickstart
|
||||
----------
|
||||
|
||||
The Sync Server software runs using **python 2.7**, and the build
|
||||
process requires **make** and **virtualenv**. You will need to have the
|
||||
following packages (or similar, depending on your operating system) installed:
|
||||
|
||||
- python2.7
|
||||
- python2.7-dev
|
||||
- python-virtualenv
|
||||
- gcc and g++
|
||||
- make
|
||||
|
||||
Take a checkout of this repository, then run "make build" to pull in the
|
||||
necessary python package dependencies::
|
||||
|
||||
$ git clone https://github.com/mozilla-services/syncserver
|
||||
$ cd syncserver
|
||||
$ make build
|
||||
|
||||
To sanity-check that things got installed correctly, do the following::
|
||||
|
||||
$ make test
|
||||
|
||||
Now you can run the server::
|
||||
|
||||
$ make serve
|
||||
|
||||
This should start a server on http://localhost:5000/.
|
||||
|
||||
Now go into Firefox's `about:config` page, search for a setting named
|
||||
"tokenServerURI", and change it to point to your server::
|
||||
|
||||
identity.sync.tokenserver.uri: http://localhost:5000/token/1.0/sync/1.5
|
||||
|
||||
(Prior to Firefox 42, the TokenServer preference name for Firefox Desktop was
|
||||
"services.sync.tokenServerURI". While the old preference name will work in
|
||||
Firefox 42 and later, the new preference is recommended as the old preference
|
||||
name will be reset when the user signs out from Sync causing potential
|
||||
confusion.)
|
||||
|
||||
Firefox should now sync against your local server rather than the default
|
||||
Mozilla-hosted servers.
|
||||
|
||||
For more details on setting up a stable deployment, see:
|
||||
|
||||
https://mozilla-services.readthedocs.io/en/latest/howtos/run-sync-1.5.html
|
||||
|
||||
|
||||
Customization
|
||||
-------------
|
||||
|
||||
All customization of the server can be done by editing the file
|
||||
"syncserver.ini", which contains lots of comments to help you on
|
||||
your way. Things you might like to change include:
|
||||
|
||||
* The client-visible hostname for your server. Edit the "public_url"
|
||||
key under the [syncerver] section.
|
||||
|
||||
* The database in which to store sync data. Edit the "sqluri" setting
|
||||
under the [syncserver] section.
|
||||
|
||||
* The secret key to use for signing auth tokens. Find the "secret"
|
||||
entry under the [syncserver] section and follow the instructions
|
||||
in the comment to replace it with a strong random key.
|
||||
|
||||
|
||||
Database Backend Modules
|
||||
------------------------
|
||||
|
||||
If your python installation doesn't provide the "sqlite" module by default,
|
||||
you may need to install it as a separate package::
|
||||
|
||||
$ ./local/bin/pip install pysqlite2
|
||||
|
||||
Similarly, if you want to use a different database backend you will need
|
||||
to install an appropriate python module, e.g::
|
||||
|
||||
$ ./local/bin/pip install PyMySQL
|
||||
$ ./local/bin/pip install psycopg2
|
||||
|
||||
|
||||
Runner under Docker
|
||||
-------------------
|
||||
|
||||
[Dockerhub Page](https://hub.docker.com/r/mozilla/syncserver)
|
||||
|
||||
There is experimental support for running the server inside a Docker
|
||||
container. The docker image runs with UID/GID 1001/1001.
|
||||
Build the image like this::
|
||||
|
||||
$ docker build -t syncserver:latest .
|
||||
|
||||
Then you can run the server by passing in configuration options as
|
||||
environmet variables, like this::
|
||||
|
||||
$ docker run --rm \
|
||||
-p 5000:5000 \
|
||||
-e SYNCSERVER_PUBLIC_URL=http://localhost:5000 \
|
||||
-e SYNCSERVER_SECRET=<PUT YOUR SECRET KEY HERE> \
|
||||
-e SYNCSERVER_SQLURI=sqlite:////tmp/syncserver.db \
|
||||
-e SYNCSERVER_BATCH_UPLOAD_ENABLED=true \
|
||||
-e SYNCSERVER_FORCE_WSGI_ENVIRON=false \
|
||||
-e PORT=5000 \
|
||||
mozilla/syncserver:latest
|
||||
|
||||
or
|
||||
|
||||
$ docker run --rm \
|
||||
-p 5000:5000 \
|
||||
-e SYNCSERVER_PUBLIC_URL=http://localhost:5000 \
|
||||
-e SYNCSERVER_SECRET_FILE=<PUT YOUR SECRET KEY FILE LOCATION HERE> \
|
||||
-e SYNCSERVER_SQLURI=sqlite:////tmp/syncserver.db \
|
||||
-e SYNCSERVER_BATCH_UPLOAD_ENABLED=true \
|
||||
-e SYNCSERVER_FORCE_WSGI_ENVIRON=false \
|
||||
-e PORT=5000 \
|
||||
-v /secret/file/at/host:<PUT YOUR SECRET KEY FILE LOCATION HERE> \
|
||||
mozilla/syncserver:latest
|
||||
|
||||
Don't forget to `generate a random secret key <https://mozilla-services.readthedocs.io/en/latest/howtos/run-sync-1.5.html#further-configuration>`_
|
||||
to use in the `SYNCSERVER_SECRET` environment variable or mount your secret key file!
|
||||
|
||||
And you can test whether it's running correctly by using the builtin
|
||||
function test suite, like so::
|
||||
|
||||
$ /usr/local/bin/python -m syncstorage.tests.functional.test_storage \
|
||||
--use-token-server http://localhost:5000/token/1.0/sync/1.5
|
||||
|
||||
If you'd like a persistent setup, you can mount a volume as well::
|
||||
|
||||
$ docker run -d \
|
||||
-v /syncserver:/data \
|
||||
-p 5000:5000 \
|
||||
-e SYNCSERVER_PUBLIC_URL=http://localhost:5000 \
|
||||
-e SYNCSERVER_SECRET=<PUT YOUR SECRET KEY HERE> \
|
||||
-e SYNCSERVER_SQLURI=sqlite:////data/syncserver.db \
|
||||
-e SYNCSERVER_BATCH_UPLOAD_ENABLED=true \
|
||||
-e SYNCSERVER_FORCE_WSGI_ENVIRON=false \
|
||||
-e PORT=5000 \
|
||||
mozilla/syncserver:latest
|
||||
|
||||
Make sure that /syncserver is owned by 1001:1001
|
||||
|
||||
|
||||
Removing Mozilla-hosted data
|
||||
----------------------------
|
||||
|
||||
If you have previously uploaded Firefox Sync data
|
||||
to the Mozilla-hosted storage service
|
||||
and would like to remove it,
|
||||
you can use the following script to do so::
|
||||
|
||||
$ pip install PyFxA
|
||||
$ python ./bin/delete_user_data.py user@example.com
|
||||
|
||||
|
||||
Questions, Feedback
|
||||
-------------------
|
||||
|
||||
- IRC channel: #sync. See http://irc.mozilla.org/
|
||||
- Mailing list: https://mail.mozilla.org/listinfo/services-dev
|
||||
Configure your browser to use "http://localhost/token/1.0/sync/1.5" for its
|
||||
tokenserver URL. Sync it. Use the managment interface to trigger migration
|
||||
events. See what happens. It'll be fun!
|
@ -1,35 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# THIS IS MEANT TO BE RUN BY CI
|
||||
|
||||
set -e
|
||||
|
||||
# Usage: retry MAX CMD...
|
||||
# Retry CMD up to MAX times. If it fails MAX times, returns failure.
|
||||
# Example: retry 3 docker push "$DOCKERHUB_REPO:$TAG"
|
||||
function retry() {
|
||||
max=$1
|
||||
shift
|
||||
count=1
|
||||
until "$@"; do
|
||||
count=$((count + 1))
|
||||
if [[ $count -gt $max ]]; then
|
||||
return 1
|
||||
fi
|
||||
echo "$count / $max"
|
||||
done
|
||||
return 0
|
||||
}
|
||||
|
||||
# configure docker creds
|
||||
retry 3 echo "$DOCKER_PASS" | docker login -u="$DOCKER_USER" --password-stdin
|
||||
|
||||
# docker tag and push git branch to dockerhub
|
||||
if [ -n "$1" ]; then
|
||||
[ "$1" == master ] && TAG=latest || TAG="$1"
|
||||
docker tag app:build "$DOCKERHUB_REPO:$TAG" ||
|
||||
(echo "Couldn't tag app:build as $DOCKERHUB_REPO:$TAG" && false)
|
||||
retry 3 docker push "$DOCKERHUB_REPO:$TAG" ||
|
||||
(echo "Couldn't push $DOCKERHUB_REPO:$TAG" && false)
|
||||
echo "Pushed $DOCKERHUB_REPO:$TAG"
|
||||
fi
|
@ -1,91 +0,0 @@
|
||||
#
|
||||
# A helper script to delete user data from a Sync storage server.
|
||||
#
|
||||
# You can use this script to explicitly delete stored sync data
|
||||
# for a user, without having to connect a Firefox profile and
|
||||
# without having to reset their password. It may be useful if
|
||||
# you've started running a self-hosted storage server and want
|
||||
# to delete data that was previously stored on the Mozilla-hosted
|
||||
# servers.
|
||||
#
|
||||
# Use it like so:
|
||||
#
|
||||
# $> pip install PyFxA
|
||||
# $> python delete_user_data.py user@example.com
|
||||
#
|
||||
# The script makes a best-effort attempt to sign in to the user's
|
||||
# account, authenticate to the Firefox Sync Tokenserver, and delete
|
||||
# the user's stored sync data. The login process might fail due to
|
||||
# things like rate-limiting, server-side security measures, or API
|
||||
# changes in the login process.
|
||||
#
|
||||
|
||||
import sys
|
||||
import getpass
|
||||
import hashlib
|
||||
import argparse
|
||||
import urlparse
|
||||
|
||||
import requests
|
||||
import hawkauthlib
|
||||
import fxa.core
|
||||
|
||||
DEFAULT_FXA_URI = "https://api.accounts.firefox.com"
|
||||
DEFAULT_TOKENSERVER_URI = "https://token.services.mozilla.com"
|
||||
|
||||
def main(argv):
|
||||
parser = argparse.ArgumentParser(description="Delete Firefox Sync data")
|
||||
parser.add_argument("email",
|
||||
help="Email of the account for which to delete data")
|
||||
parser.add_argument("--accounts-uri", default=DEFAULT_FXA_URI,
|
||||
help="URI of the Firefox Accounts API server")
|
||||
parser.add_argument("--tokenserver-uri", default=DEFAULT_TOKENSERVER_URI,
|
||||
help="URI of the Firefox Sync tokenserver")
|
||||
|
||||
args = parser.parse_args(argv)
|
||||
|
||||
# Sign in to the account.
|
||||
c = fxa.core.Client(args.accounts_uri)
|
||||
password = getpass.getpass("Password for {}: ".format(args.email))
|
||||
s = c.login(args.email, password, keys=True)
|
||||
try:
|
||||
# Verify the session if necessary.
|
||||
# TODO: this won't work if the user has enabled two-step auth.
|
||||
status = s.get_email_status()
|
||||
if not status["sessionVerified"]:
|
||||
code = raw_input("Enter verification link or code: ")
|
||||
if "?" in code:
|
||||
# They copy-pasted the full URL.
|
||||
code_url = urlparse.urlparse(code)
|
||||
code = urlparse.parse_qs(code_url.query)["code"][0]
|
||||
s.verify_email_code(code)
|
||||
|
||||
# Prepare authentication details for tokenserver.
|
||||
(_, kB) = s.fetch_keys()
|
||||
xcs = hashlib.sha256(kB).hexdigest()[:32]
|
||||
auth = s.get_identity_assertion(args.tokenserver_uri)
|
||||
|
||||
# Auth to tokenserver, find sync storage node.
|
||||
token_uri = urlparse.urljoin(args.tokenserver_uri, "1.0/sync/1.5")
|
||||
r = requests.get(token_uri, headers={
|
||||
"Authorization": "BrowserID " + auth,
|
||||
"X-Client-State": xcs,
|
||||
})
|
||||
r.raise_for_status()
|
||||
|
||||
node = r.json()
|
||||
api_endpoint = node["api_endpoint"]
|
||||
hawk_id = node["id"].encode("ascii")
|
||||
hawk_key = node["key"].encode("ascii")
|
||||
print "Deleting from", api_endpoint
|
||||
req = requests.Request("DELETE", api_endpoint).prepare()
|
||||
hawkauthlib.sign_request(req, hawk_id, hawk_key)
|
||||
r = requests.session().send(req)
|
||||
r.raise_for_status()
|
||||
print r
|
||||
finally:
|
||||
s.destroy_session()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main(sys.argv[1:])
|
@ -1,2 +0,0 @@
|
||||
flake8==3.3
|
||||
nose==1.3.7
|
@ -1,50 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
cd $(dirname $0)
|
||||
case "$1" in
|
||||
server)
|
||||
export SYNCSERVER_SQLURI="${SYNCSERVER_SQLURI:-sqlite:///tmp/syncserver.db}"
|
||||
exec gunicorn \
|
||||
--bind ${HOST-0.0.0.0}:${PORT-5000} \
|
||||
--forwarded-allow-ips="${SYNCSERVER_FORWARDED_ALLOW_IPS:-127.0.0.1,172.17.0.1}" \
|
||||
syncserver.wsgi_app
|
||||
;;
|
||||
|
||||
test_all)
|
||||
$0 test_flake8
|
||||
$0 test_nose
|
||||
$0 test_functional
|
||||
;;
|
||||
|
||||
test_flake8)
|
||||
echo "test - flake8"
|
||||
flake8 syncserver
|
||||
;;
|
||||
|
||||
test_nose)
|
||||
echo "test - nose"
|
||||
nosetests --verbose --nocapture syncstorage.tests
|
||||
;;
|
||||
|
||||
test_functional)
|
||||
echo "test - functional"
|
||||
# run functional tests
|
||||
gunicorn --paste ./syncserver/tests.ini &
|
||||
SERVER_PID=$!
|
||||
sleep 2
|
||||
|
||||
$0 test_endpoint http://localhost:5000
|
||||
|
||||
kill $SERVER_PID
|
||||
;;
|
||||
|
||||
test_endpoint)
|
||||
exec python -m syncstorage.tests.functional.test_storage \
|
||||
--use-token-server $2/token/1.0/sync/1.5
|
||||
;;
|
||||
|
||||
*)
|
||||
echo "Unknown CMD, $1"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
@ -1,43 +0,0 @@
|
||||
# This Source Code Form is subject to the terms of the Mozilla Public
|
||||
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
# You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
import os
|
||||
import sys
|
||||
import site
|
||||
from logging.config import fileConfig
|
||||
try:
|
||||
from ConfigParser import NoSectionError
|
||||
except ImportError:
|
||||
from configparser import NoSectionError
|
||||
|
||||
# detecting if virtualenv was used in this dir
|
||||
_CURDIR = os.path.dirname(os.path.abspath(__file__))
|
||||
_PY_VER = sys.version.split()[0][:3]
|
||||
_SITE_PKG = os.path.join(_CURDIR, 'local', 'lib', 'python' + _PY_VER, 'site-packages')
|
||||
|
||||
# adding virtualenv's site-package and ordering paths
|
||||
saved = sys.path[:]
|
||||
|
||||
if os.path.exists(_SITE_PKG):
|
||||
site.addsitedir(_SITE_PKG)
|
||||
|
||||
for path in sys.path:
|
||||
if path not in saved:
|
||||
saved.insert(0, path)
|
||||
|
||||
sys.path[:] = saved
|
||||
|
||||
# setting up the egg cache to a place where apache can write
|
||||
os.environ['PYTHON_EGG_CACHE'] = '/tmp/python-eggs'
|
||||
|
||||
# setting up logging
|
||||
ini_file = os.path.join(_CURDIR, 'syncserver.ini')
|
||||
try:
|
||||
fileConfig(ini_file)
|
||||
except NoSectionError:
|
||||
pass
|
||||
|
||||
# running the app using Paste
|
||||
from paste.deploy import loadapp
|
||||
application = loadapp('config:%s'% ini_file)
|
@ -0,0 +1,19 @@
|
||||
<html>
|
||||
<head><title>Super Cool Migration Testing Management Interface 2.0</title></head>
|
||||
<body>
|
||||
<h1>Migration Testing Server Management Interface</h1>
|
||||
<p><b>Current Migration State:</b> {migration_state}</p>
|
||||
<form action="/" method="POST">
|
||||
<input type="hidden" name="cmd" value="begin_migration" />
|
||||
<input type="submit" value="Begin Migration" />
|
||||
</form>
|
||||
<form action="/" method="POST">
|
||||
<input type="hidden" name="cmd" value="complete_migration" />
|
||||
<input type="submit" value="Complete Migration" />
|
||||
</form>
|
||||
<form action="/" method="POST">
|
||||
<input type="hidden" name="cmd" value="reset" />
|
||||
<input type="submit" value="Reset" />
|
||||
</form>
|
||||
</body>
|
||||
</html>
|
@ -0,0 +1,113 @@
|
||||
# This Source Code Form is subject to the terms of the Mozilla Public
|
||||
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
# You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
"""
|
||||
Helpers for fiddling with the migration state of the server.
|
||||
We allow the server to be in one of three states:
|
||||
|
||||
* Pre-migration: the user is syncing as normal to the old backend.
|
||||
|
||||
* Migrating: we're actively moving the user's data to the new backend.
|
||||
|
||||
* Post-migration: we've finished moving the user's data to the new backend.
|
||||
|
||||
You can use the functions exposed by this module to move the server between
|
||||
the different states.
|
||||
"""
|
||||
|
||||
from sqlalchemy import Column, Integer
|
||||
from sqlalchemy import create_engine, Table, MetaData
|
||||
from sqlalchemy.sql import text as sqltext
|
||||
|
||||
PRE_MIGRATION = 1
|
||||
MIGRATING = 2
|
||||
POST_MIGRATION = 3
|
||||
|
||||
# A very simple db table in which to store the migration state.
|
||||
|
||||
_metadata = MetaData()
|
||||
|
||||
_migration = Table(
|
||||
"migration",
|
||||
_metadata,
|
||||
Column("state", Integer(), nullable=False),
|
||||
)
|
||||
|
||||
|
||||
class MigrationStateManager(object):
|
||||
|
||||
def __init__(self, sqluri):
|
||||
self.sqluri = sqluri
|
||||
self._engine = create_engine(sqluri, pool_reset_on_return=True)
|
||||
_migration.create(self._engine, checkfirst=True)
|
||||
|
||||
def _query(self, q, **kwds):
|
||||
return self._engine.execute(sqltext(q), **kwds)
|
||||
|
||||
def current_state(self):
|
||||
row = self._query("""
|
||||
SELECT state FROM migration
|
||||
""").fetchone()
|
||||
if row is None:
|
||||
return PRE_MIGRATION
|
||||
return row[0]
|
||||
|
||||
def current_state_name(self):
|
||||
state = self.current_state()
|
||||
if state == PRE_MIGRATION:
|
||||
return "PRE_MIGRATION"
|
||||
if state == MIGRATING:
|
||||
return "MIGRATING"
|
||||
if state == POST_MIGRATION:
|
||||
return "POST_MIGRATION"
|
||||
return "WTF?"
|
||||
|
||||
def begin_migration(self):
|
||||
self._set_current_state(MIGRATING)
|
||||
|
||||
def complete_migration(self):
|
||||
self._migrate_data()
|
||||
self._set_current_state(POST_MIGRATION)
|
||||
|
||||
def reset_to_pre_migration_state(self):
|
||||
self._clear_storage()
|
||||
self._set_current_state(PRE_MIGRATION)
|
||||
|
||||
def _set_current_state(self, state):
|
||||
r = self._query("""
|
||||
UPDATE migration SET state=:state
|
||||
""", state=state)
|
||||
if r.rowcount == 0:
|
||||
self._query("""
|
||||
INSERT INTO migration (state) VALUES (:state)
|
||||
""", state=state)
|
||||
|
||||
def _migrate_data(self):
|
||||
# Migrating data is remarkably easy when it's already in the same db!
|
||||
self._query("""
|
||||
UPDATE bso SET userid=2 WHERE userid=1;
|
||||
""")
|
||||
self._query("""
|
||||
UPDATE user_collections SET userid=2 WHERE userid=1;
|
||||
""")
|
||||
|
||||
def _clear_storage(self):
|
||||
self._query("""
|
||||
DELETE FROM batch_upload_items;
|
||||
""")
|
||||
self._query("""
|
||||
DELETE FROM batch_uploads;
|
||||
""")
|
||||
self._query("""
|
||||
DELETE FROM bso;
|
||||
""")
|
||||
self._query("""
|
||||
DELETE FROM collections;
|
||||
""")
|
||||
self._query("""
|
||||
DELETE FROM user_collections;
|
||||
""")
|
||||
|
||||
def includeme(config):
|
||||
sqluri = config.registry.settings["sqluri"]
|
||||
config.registry["MigrationStateManager"] = MigrationStateManager(sqluri)
|
@ -1,240 +0,0 @@
|
||||
# This Source Code Form is subject to the terms of the Mozilla Public
|
||||
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
# You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
"""
|
||||
Simple node-assignment backend using a single, static node.
|
||||
|
||||
This is a greatly-simplified node-assignment backend. It keeps user records
|
||||
in an SQL database, but does not attempt to do any node management. All users
|
||||
are implicitly assigned to a single, static node.
|
||||
|
||||
"""
|
||||
import time
|
||||
|
||||
try:
|
||||
from urlparse import urlparse
|
||||
except ImportError:
|
||||
from urllib.parse import urlparse
|
||||
|
||||
from mozsvc.exceptions import BackendError
|
||||
|
||||
from sqlalchemy import Column, Integer, String, BigInteger, Index
|
||||
from sqlalchemy import create_engine, Table, MetaData
|
||||
from sqlalchemy.pool import QueuePool
|
||||
from sqlalchemy.sql import text as sqltext
|
||||
|
||||
from tokenserver.assignment import INodeAssignment
|
||||
from zope.interface import implements
|
||||
|
||||
|
||||
metadata = MetaData()
|
||||
|
||||
|
||||
users = Table(
|
||||
"users",
|
||||
metadata,
|
||||
Column("uid", Integer(), primary_key=True, autoincrement=True,
|
||||
nullable=False),
|
||||
Column("service", String(32), nullable=False),
|
||||
Column("email", String(255), nullable=False),
|
||||
Column("generation", BigInteger(), nullable=False),
|
||||
Column("client_state", String(32), nullable=False),
|
||||
Column("created_at", BigInteger(), nullable=False),
|
||||
Column("replaced_at", BigInteger(), nullable=True),
|
||||
Column("keys_changed_at", BigInteger(), nullable=True),
|
||||
Column("node", String(255), nullable=True),
|
||||
Index('lookup_idx', 'email', 'service', 'created_at'),
|
||||
)
|
||||
|
||||
|
||||
_GET_USER_RECORDS = sqltext("""\
|
||||
select
|
||||
uid, generation, client_state, created_at, replaced_at,
|
||||
keys_changed_at, node
|
||||
from
|
||||
users
|
||||
where
|
||||
email = :email
|
||||
and
|
||||
service = :service
|
||||
order by
|
||||
created_at desc, uid desc
|
||||
limit
|
||||
20
|
||||
""")
|
||||
|
||||
|
||||
_CREATE_USER_RECORD = sqltext("""\
|
||||
insert into
|
||||
users
|
||||
(service, email, generation, client_state, created_at, replaced_at,
|
||||
keys_changed_at, node)
|
||||
values
|
||||
(:service, :email, :generation, :client_state, :timestamp, NULL,
|
||||
:keys_changed_at, :node)
|
||||
""")
|
||||
|
||||
|
||||
_UPDATE_GENERATION_NUMBER = sqltext("""\
|
||||
update
|
||||
users
|
||||
set
|
||||
generation = :generation
|
||||
where
|
||||
service = :service and email = :email and
|
||||
generation < :generation and replaced_at is null
|
||||
""")
|
||||
|
||||
|
||||
_REPLACE_USER_RECORDS = sqltext("""\
|
||||
update
|
||||
users
|
||||
set
|
||||
replaced_at = :timestamp
|
||||
where
|
||||
service = :service and email = :email
|
||||
and replaced_at is null and created_at < :timestamp
|
||||
""")
|
||||
|
||||
|
||||
def get_timestamp():
|
||||
"""Get current timestamp in milliseconds."""
|
||||
return int(time.time() * 1000)
|
||||
|
||||
|
||||
class StaticNodeAssignment(object):
|
||||
implements(INodeAssignment)
|
||||
|
||||
def __init__(self, sqluri, node_url, **kw):
|
||||
self.sqluri = sqluri
|
||||
self.node_url = node_url
|
||||
self.driver = urlparse(sqluri).scheme.lower()
|
||||
sqlkw = {
|
||||
"logging_name": "syncserver",
|
||||
"connect_args": {},
|
||||
"poolclass": QueuePool,
|
||||
"pool_reset_on_return": True,
|
||||
}
|
||||
if self.driver == "sqlite":
|
||||
# We must mark it as safe to share sqlite connections between
|
||||
# threads. The pool will ensure there's no race conditions.
|
||||
sqlkw["connect_args"]["check_same_thread"] = False
|
||||
# If using a :memory: database, we must use a QueuePool of
|
||||
# size 1 so that a single connection is shared by all threads.
|
||||
if urlparse(sqluri).path.lower() in ("/", "/:memory:"):
|
||||
sqlkw["pool_size"] = 1
|
||||
sqlkw["max_overflow"] = 0
|
||||
if "mysql" in self.driver:
|
||||
# Guard against the db closing idle conections.
|
||||
sqlkw["pool_recycle"] = kw.get("pool_recycle", 3600)
|
||||
self._engine = create_engine(sqluri, **sqlkw)
|
||||
users.create(self._engine, checkfirst=True)
|
||||
|
||||
def get_user(self, service, email, **kw):
|
||||
params = {'service': service, 'email': email}
|
||||
res = self._engine.execute(_GET_USER_RECORDS, **params)
|
||||
try:
|
||||
row = res.fetchone()
|
||||
if row is None:
|
||||
return None
|
||||
# The first row is the most up-to-date user record.
|
||||
user = {
|
||||
'email': email,
|
||||
'uid': row.uid,
|
||||
'node': self.node_url,
|
||||
'generation': row.generation,
|
||||
'client_state': row.client_state,
|
||||
'first_seen_at': row.created_at,
|
||||
'old_client_states': {},
|
||||
'keys_changed_at': row.keys_changed_at,
|
||||
}
|
||||
# Any subsequent rows are due to old client-state values.
|
||||
old_row = res.fetchone()
|
||||
update_replaced_at = False
|
||||
while old_row is not None:
|
||||
if old_row.client_state != user['client_state']:
|
||||
user['old_client_states'][old_row.client_state] = True
|
||||
# Make sure each old row is marked as replaced.
|
||||
# They might not be, due to races in row creation.
|
||||
if old_row.replaced_at is None:
|
||||
update_replaced_at = True
|
||||
old_row = res.fetchone()
|
||||
if update_replaced_at:
|
||||
self._engine.execute(_REPLACE_USER_RECORDS, {
|
||||
'service': service,
|
||||
'email': user['email'],
|
||||
'timestamp': row.created_at,
|
||||
}).close()
|
||||
return user
|
||||
finally:
|
||||
res.close()
|
||||
|
||||
def allocate_user(self, service, email, generation=0, client_state='',
|
||||
keys_changed_at=0, **kw):
|
||||
now = get_timestamp()
|
||||
params = {
|
||||
'service': service, 'email': email, 'generation': generation,
|
||||
'client_state': client_state, 'timestamp': now,
|
||||
'keys_changed_at': keys_changed_at, 'node': self.node_url,
|
||||
}
|
||||
res = self._engine.execute(_CREATE_USER_RECORD, **params)
|
||||
res.close()
|
||||
return {
|
||||
'email': email,
|
||||
'uid': res.lastrowid,
|
||||
'node': self.node_url,
|
||||
'generation': generation,
|
||||
'client_state': client_state,
|
||||
'first_seen_at': now,
|
||||
'old_client_states': {},
|
||||
'keys_changed_at': keys_changed_at,
|
||||
}
|
||||
|
||||
def update_user(self, service, user, generation=None, client_state=None,
|
||||
keys_changed_at=0, node=None, **kw):
|
||||
if client_state is None:
|
||||
# uid can stay the same, just update the generation number.
|
||||
if generation is not None:
|
||||
params = {
|
||||
'service': service,
|
||||
'email': user['email'],
|
||||
'generation': generation,
|
||||
}
|
||||
res = self._engine.execute(_UPDATE_GENERATION_NUMBER, **params)
|
||||
res.close()
|
||||
user['generation'] = max(generation, user['generation'])
|
||||
else:
|
||||
# reject previously-seen client-state strings.
|
||||
if client_state == user['client_state']:
|
||||
raise BackendError('previously seen client-state string')
|
||||
if client_state in user['old_client_states']:
|
||||
raise BackendError('previously seen client-state string')
|
||||
# need to create a new record for new client_state.
|
||||
if generation is not None:
|
||||
generation = max(user['generation'], generation)
|
||||
else:
|
||||
generation = user['generation']
|
||||
now = get_timestamp()
|
||||
params = {
|
||||
'service': service, 'email': user['email'],
|
||||
'generation': generation, 'client_state': client_state,
|
||||
'timestamp': now,
|
||||
'keys_changed_at': keys_changed_at, 'node': node,
|
||||
}
|
||||
res = self._engine.execute(_CREATE_USER_RECORD, **params)
|
||||
res.close()
|
||||
user['uid'] = res.lastrowid
|
||||
user['generation'] = generation
|
||||
user['old_client_states'][user['client_state']] = True
|
||||
user['client_state'] = client_state
|
||||
user['keys_changed_at'] = keys_changed_at
|
||||
user['node'] = node
|
||||
# Mark old records as having been replaced.
|
||||
# If we crash here, they are unmarked and we may fail to
|
||||
# garbage collect them for a while, but the active state
|
||||
# will be undamaged.
|
||||
params = {
|
||||
'service': service, 'email': user['email'], 'timestamp': now
|
||||
}
|
||||
res = self._engine.execute(_REPLACE_USER_RECORDS, **params)
|
||||
res.close()
|
@ -0,0 +1,42 @@
|
||||
# This Source Code Form is subject to the terms of the Mozilla Public
|
||||
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
# You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
from pyramid import httpexceptions
|
||||
|
||||
import syncserver.migration
|
||||
|
||||
|
||||
def interpose_migration_state_errors(handler, registry):
|
||||
"""Tween to send errors from storage endpoint based on migration state.
|
||||
|
||||
It's a bit weird to do this in a tween, but it means we can use the existing
|
||||
syncstorage app without any changes byt just interposing a bit of code in
|
||||
front of it.
|
||||
"""
|
||||
|
||||
def interpose_migration_state_errors_tween(request):
|
||||
if request.path.startswith("/storage/"):
|
||||
migration_state = request.registry["MigrationStateManager"].current_state()
|
||||
if migration_state == syncserver.migration.MIGRATING:
|
||||
# We 503-inate the storage node while migration is in progress.
|
||||
return httpexceptions.HTTPServiceUnavailable(body="0")
|
||||
elif migration_state == syncserver.migration.POST_MIGRATION:
|
||||
# We 401-inate the old storage node after migration.
|
||||
if request.path.startswith("/storage/1.5/1/"):
|
||||
return httpexceptions.HTTPUnauthorized(body="0")
|
||||
elif migration_state == syncserver.migration.PRE_MIGRATION:
|
||||
# We won't do this in production, but for testing locally,
|
||||
# 401-inate the new storage node if we haven't migrated yet.
|
||||
# this will force clients to refresh their token and go back
|
||||
# to the old node.
|
||||
if request.path.startswith("/storage/1.5/2/"):
|
||||
return httpexceptions.HTTPUnauthorized(body="0")
|
||||
return handler(request)
|
||||
|
||||
return interpose_migration_state_errors_tween
|
||||
|
||||
|
||||
def includeme(config):
|
||||
"""Include all the SyncServer tweens into the given config."""
|
||||
config.add_tween("syncserver.tweens.interpose_migration_state_errors")
|
@ -0,0 +1,100 @@
|
||||
# This Source Code Form is subject to the terms of the Mozilla Public
|
||||
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
# You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
"""
|
||||
|
||||
Fake node-assignment backend and control interface.
|
||||
|
||||
In addition to hosting a simple storage node, this server hosts a fake tokenserver
|
||||
node-assignent interface and a little management page that can toggle its behaviour
|
||||
to simulate the storage node migration. It supports the following states:
|
||||
|
||||
* Pre-migration: all requests to the tokenserver endpoint are assigned uid 1 and
|
||||
are allowed to proceed through to accessing the storage backend.
|
||||
|
||||
* Migrating: all requests to the tokenserver endpoint are assigned uid 1, but when
|
||||
they try to acces the storage node they get a 503 error.
|
||||
|
||||
* Post-migration: all requests to the tokenserver endpoint are assigned uid 2 and
|
||||
are allowed to proceed through to accessing the storage backend;
|
||||
a tween enforces that storage requests for uid 1 will receive a
|
||||
401 error in this state.
|
||||
|
||||
This broadly simulates the different states we expect to move the servers through
|
||||
during the production deployment.
|
||||
|
||||
"""
|
||||
|
||||
import os
|
||||
|
||||
from cornice import Service
|
||||
from pyramid import httpexceptions
|
||||
from pyramid.response import Response
|
||||
from pyramid.interfaces import IAuthenticationPolicy
|
||||
|
||||
import syncserver.migration
|
||||
|
||||
# A GET on / returns a simple management interface,
|
||||
# while POST requests control the state of the server.
|
||||
|
||||
management = Service(name='management', path='/')
|
||||
|
||||
@management.get(renderer="string")
|
||||
def _management(request):
|
||||
"""HTML for the server management interface."""
|
||||
src = os.path.join(os.path.dirname(__file__), 'management.html')
|
||||
with open(src) as f:
|
||||
content = f.read()
|
||||
content = content.format(
|
||||
migration_state=request.registry["MigrationStateManager"].current_state_name()
|
||||
)
|
||||
return Response(content, content_type="text/html")
|
||||
|
||||
@management.post()
|
||||
def _management(request):
|
||||
"""Command handler for the server management interface."""
|
||||
mgr = request.registry["MigrationStateManager"]
|
||||
cmd = request.POST["cmd"]
|
||||
if cmd == "begin_migration":
|
||||
mgr.begin_migration()
|
||||
elif cmd == "complete_migration":
|
||||
mgr.complete_migration()
|
||||
elif cmd == "reset":
|
||||
mgr.reset_to_pre_migration_state()
|
||||
else:
|
||||
return httpexceptions.HTTPBadRequest(body="Unknown cmd: {}".format(cmd))
|
||||
return httpexceptions.HTTPFound(request.relative_url("/", to_application=True))
|
||||
|
||||
|
||||
# The fake tokenserver endpoint is hosted at /token/1.0/sync/1.5
|
||||
|
||||
token = Service(name='token', path='/token/1.0/sync/1.5')
|
||||
|
||||
@token.get()
|
||||
def _token(request):
|
||||
"""Fake tokenserver endpoint.
|
||||
|
||||
This endpoint ignoreds all auth and just assigns the caller a uid or 1 or 2
|
||||
depending on what state the server is currently in.
|
||||
"""
|
||||
migration_state = request.registry["MigrationStateManager"].current_state()
|
||||
if migration_state != syncserver.migration.POST_MIGRATION:
|
||||
uid = 1
|
||||
else:
|
||||
uid = 2
|
||||
|
||||
endpoint = request.relative_url("/storage/1.5/{}".format(uid), to_application=True)
|
||||
|
||||
# Sign a token using the fixed uid, for the storage backend to accept.
|
||||
auth_policy = request.registry.getUtility(IAuthenticationPolicy)
|
||||
token, key = auth_policy.encode_hawk_id(request, uid)
|
||||
|
||||
return {
|
||||
'id': token,
|
||||
'key': key,
|
||||
'uid': uid,
|
||||
'api_endpoint': endpoint,
|
||||
'duration': 60,
|
||||
'hashalg': 'sha256',
|
||||
'hashed_fxa_uid': '0' * 64,
|
||||
}
|
@ -1,2 +0,0 @@
|
||||
import syncserver
|
||||
application = syncserver.main()
|
Loading…
Reference in New Issue