Hack up a really simple migration testing server

migration-testing
Ryan Kelly 4 years ago
parent 8c2c2ed76b
commit 8bfe371045
No known key found for this signature in database
GPG Key ID: FB70C973A037D258

@ -1,63 +0,0 @@
version: 2
jobs:
build:
docker:
- image: circleci/python
steps:
- checkout
- setup_remote_docker
- run:
name: Create version.json
command: |
printf '{"commit":"%s","version":"%s","source":"https://github.com/%s/%s","build":"%s"}\n' \
"$CIRCLE_SHA1" \
"$CIRCLE_TAG" \
"$CIRCLE_PROJECT_USERNAME" \
"$CIRCLE_PROJECT_REPONAME" \
"$CIRCLE_BUILD_URL" | tee version.json
- store_artifacts:
path: version.json
- run:
name: Build deployment container image
command: docker build -t app:build .
- run:
name: Test flake8
command: docker run -it app:build test_flake8
- run:
name: Test nose
command: docker run -it app:build test_nose
- run:
name: Functional tests
command: docker run -it app:build test_functional
- run:
name: Push to Dockerhub
command: |
if [ "${CIRCLE_BRANCH}" == "master" ]; then
bin/ci/deploy-dockerhub.sh latest
fi
if [[ "${CIRCLE_BRANCH}" == feature* ]] || [[ "${CIRCLE_BRANCH}" == dockerpush* ]]; then
bin/ci/deploy-dockerhub.sh "$CIRCLE_BRANCH"
fi
if [ -n "${CIRCLE_TAG}" ]; then
bin/ci/deploy-dockerhub.sh "$CIRCLE_TAG"
fi
workflows:
version: 2
# workflow jobs are _not_ run in tag builds by default
# we use filters to whitelist jobs that should be run for tags
# workflow jobs are run in _all_ branch builds by default
# we use filters to blacklist jobs that shouldn't be run for a branch
# see: https://circleci.com/docs/2.0/workflows/#git-tag-job-execution
build-test-push:
jobs:
- build:
filters:
tags:
only: /.*/

@ -1,8 +0,0 @@
*.pyc
local
*.egg-info
*.swp
\.coverage
*~
nosetests.xml
syncserver.db

@ -1,27 +0,0 @@
language: python
python:
- "2.7"
# The way the selection of the Python version is currently made in Makefile
# leads to travis always picking up Python 2 for the task.
# All versions of Python are appearantly present in a travis environment.
# Once the makefile has been adjusted the following lines should be enabled.
# - "3.5"
# - "3.6"
# - "3.7-dev"
notifications:
email:
- rfkelly@mozilla.com
irc:
channels:
- "irc.mozilla.org#services-dev"
use_notice: false
skip_join: false
install:
- pip install virtualenv
- make build
script:
- make test

@ -1,28 +0,0 @@
FROM python:2.7-alpine
RUN addgroup -g 1001 app \
&& adduser -u 1001 -S -D -G app -s /usr/sbin/nologin app
ENV LANG C.UTF-8
WORKDIR /app
# install syncserver dependencies
COPY ./requirements.txt /app/requirements.txt
COPY ./dev-requirements.txt /app/dev-requirements.txt
RUN apk --no-cache update \
&& apk add dumb-init libstdc++ libffi-dev openssl-dev g++ \
&& pip install --upgrade pip \
&& pip install --upgrade --no-cache-dir -r requirements.txt \
&& pip install --upgrade --no-cache-dir -r dev-requirements.txt \
&& apk del g++
COPY . /app
RUN python ./setup.py develop
# run as non priviledged user
USER app
# run the server by default
ENTRYPOINT ["/usr/bin/dumb-init", "/app/docker-entrypoint.sh"]
CMD ["server"]

@ -1,3 +1,2 @@
include syncserver.ini
include syncserver.wsgi
include syncserver/tests.ini
include syncserver/management.html

@ -17,7 +17,6 @@ CFLAGS = "-Wno-error -Wno-error=format-security"
INSTALL = CFLAGS=$(CFLAGS) ARCHFLAGS=$(ARCHFLAGS) $(ENV)/bin/pip install
.PHONY: all
all: build
@ -30,22 +29,6 @@ $(ENV)/COMPLETE: requirements.txt
$(ENV)/bin/python ./setup.py develop
touch $(ENV)/COMPLETE
.PHONY: test
test: | $(TOOLS)
$(ENV)/bin/flake8 ./syncserver
$(ENV)/bin/nosetests -s syncstorage.tests
# Tokenserver tests currently broken due to incorrect file paths
# $(ENV)/bin/nosetests -s tokenserver.tests
# Test against a running server.
$(ENV)/bin/gunicorn --paste syncserver/tests.ini 2> /dev/null & SERVER_PID=$$!; \
sleep 2; \
$(ENV)/bin/python -m syncstorage.tests.functional.test_storage \
--use-token-server http://localhost:5000/token/1.0/sync/1.5; \
kill $$SERVER_PID
$(TOOLS): | $(ENV)/COMPLETE
$(INSTALL) -r dev-requirements.txt
.PHONY: serve
serve: | $(ENV)/COMPLETE

@ -1,179 +1,13 @@
Run-Your-Own Firefox Sync Server
================================
The Very Hacky Migration-Testing Sync Server
============================================
.. image:: https://circleci.com/gh/mozilla-services/syncserver/tree/master.svg?style=svg
:target: https://circleci.com/gh/mozilla-services/syncserver/tree/master
This is a hacked-up sync server designed to help test client behaviour
during the migration to old MySQL-backed sync storage nodes to the new
Spanner-backed durable mega-node.
.. image:: https://img.shields.io/docker/automated/mozilla-services/syncserver.svg?style=flat-square
:target: https://hub.docker.com/r/mozilla/syncserver/
Run the server using `make server`, and it'll bind to http://localhost:5000/.
Open up that URL for an incredibly bare-bones management interface.
This is an all-in-one package for running a self-hosted Firefox Sync server.
It bundles the "tokenserver" project for authentication and the "syncstorage"
project for storage, to produce a single stand-alone webapp.
Complete installation instructions are available at:
https://mozilla-services.readthedocs.io/en/latest/howtos/run-sync-1.5.html
Quickstart
----------
The Sync Server software runs using **python 2.7**, and the build
process requires **make** and **virtualenv**. You will need to have the
following packages (or similar, depending on your operating system) installed:
- python2.7
- python2.7-dev
- python-virtualenv
- gcc and g++
- make
Take a checkout of this repository, then run "make build" to pull in the
necessary python package dependencies::
$ git clone https://github.com/mozilla-services/syncserver
$ cd syncserver
$ make build
To sanity-check that things got installed correctly, do the following::
$ make test
Now you can run the server::
$ make serve
This should start a server on http://localhost:5000/.
Now go into Firefox's `about:config` page, search for a setting named
"tokenServerURI", and change it to point to your server::
identity.sync.tokenserver.uri: http://localhost:5000/token/1.0/sync/1.5
(Prior to Firefox 42, the TokenServer preference name for Firefox Desktop was
"services.sync.tokenServerURI". While the old preference name will work in
Firefox 42 and later, the new preference is recommended as the old preference
name will be reset when the user signs out from Sync causing potential
confusion.)
Firefox should now sync against your local server rather than the default
Mozilla-hosted servers.
For more details on setting up a stable deployment, see:
https://mozilla-services.readthedocs.io/en/latest/howtos/run-sync-1.5.html
Customization
-------------
All customization of the server can be done by editing the file
"syncserver.ini", which contains lots of comments to help you on
your way. Things you might like to change include:
* The client-visible hostname for your server. Edit the "public_url"
key under the [syncerver] section.
* The database in which to store sync data. Edit the "sqluri" setting
under the [syncserver] section.
* The secret key to use for signing auth tokens. Find the "secret"
entry under the [syncserver] section and follow the instructions
in the comment to replace it with a strong random key.
Database Backend Modules
------------------------
If your python installation doesn't provide the "sqlite" module by default,
you may need to install it as a separate package::
$ ./local/bin/pip install pysqlite2
Similarly, if you want to use a different database backend you will need
to install an appropriate python module, e.g::
$ ./local/bin/pip install PyMySQL
$ ./local/bin/pip install psycopg2
Runner under Docker
-------------------
[Dockerhub Page](https://hub.docker.com/r/mozilla/syncserver)
There is experimental support for running the server inside a Docker
container. The docker image runs with UID/GID 1001/1001.
Build the image like this::
$ docker build -t syncserver:latest .
Then you can run the server by passing in configuration options as
environmet variables, like this::
$ docker run --rm \
-p 5000:5000 \
-e SYNCSERVER_PUBLIC_URL=http://localhost:5000 \
-e SYNCSERVER_SECRET=<PUT YOUR SECRET KEY HERE> \
-e SYNCSERVER_SQLURI=sqlite:////tmp/syncserver.db \
-e SYNCSERVER_BATCH_UPLOAD_ENABLED=true \
-e SYNCSERVER_FORCE_WSGI_ENVIRON=false \
-e PORT=5000 \
mozilla/syncserver:latest
or
$ docker run --rm \
-p 5000:5000 \
-e SYNCSERVER_PUBLIC_URL=http://localhost:5000 \
-e SYNCSERVER_SECRET_FILE=<PUT YOUR SECRET KEY FILE LOCATION HERE> \
-e SYNCSERVER_SQLURI=sqlite:////tmp/syncserver.db \
-e SYNCSERVER_BATCH_UPLOAD_ENABLED=true \
-e SYNCSERVER_FORCE_WSGI_ENVIRON=false \
-e PORT=5000 \
-v /secret/file/at/host:<PUT YOUR SECRET KEY FILE LOCATION HERE> \
mozilla/syncserver:latest
Don't forget to `generate a random secret key <https://mozilla-services.readthedocs.io/en/latest/howtos/run-sync-1.5.html#further-configuration>`_
to use in the `SYNCSERVER_SECRET` environment variable or mount your secret key file!
And you can test whether it's running correctly by using the builtin
function test suite, like so::
$ /usr/local/bin/python -m syncstorage.tests.functional.test_storage \
--use-token-server http://localhost:5000/token/1.0/sync/1.5
If you'd like a persistent setup, you can mount a volume as well::
$ docker run -d \
-v /syncserver:/data \
-p 5000:5000 \
-e SYNCSERVER_PUBLIC_URL=http://localhost:5000 \
-e SYNCSERVER_SECRET=<PUT YOUR SECRET KEY HERE> \
-e SYNCSERVER_SQLURI=sqlite:////data/syncserver.db \
-e SYNCSERVER_BATCH_UPLOAD_ENABLED=true \
-e SYNCSERVER_FORCE_WSGI_ENVIRON=false \
-e PORT=5000 \
mozilla/syncserver:latest
Make sure that /syncserver is owned by 1001:1001
Removing Mozilla-hosted data
----------------------------
If you have previously uploaded Firefox Sync data
to the Mozilla-hosted storage service
and would like to remove it,
you can use the following script to do so::
$ pip install PyFxA
$ python ./bin/delete_user_data.py user@example.com
Questions, Feedback
-------------------
- IRC channel: #sync. See http://irc.mozilla.org/
- Mailing list: https://mail.mozilla.org/listinfo/services-dev
Configure your browser to use "http://localhost/token/1.0/sync/1.5" for its
tokenserver URL. Sync it. Use the managment interface to trigger migration
events. See what happens. It'll be fun!

@ -1,35 +0,0 @@
#!/bin/bash
# THIS IS MEANT TO BE RUN BY CI
set -e
# Usage: retry MAX CMD...
# Retry CMD up to MAX times. If it fails MAX times, returns failure.
# Example: retry 3 docker push "$DOCKERHUB_REPO:$TAG"
function retry() {
max=$1
shift
count=1
until "$@"; do
count=$((count + 1))
if [[ $count -gt $max ]]; then
return 1
fi
echo "$count / $max"
done
return 0
}
# configure docker creds
retry 3 echo "$DOCKER_PASS" | docker login -u="$DOCKER_USER" --password-stdin
# docker tag and push git branch to dockerhub
if [ -n "$1" ]; then
[ "$1" == master ] && TAG=latest || TAG="$1"
docker tag app:build "$DOCKERHUB_REPO:$TAG" ||
(echo "Couldn't tag app:build as $DOCKERHUB_REPO:$TAG" && false)
retry 3 docker push "$DOCKERHUB_REPO:$TAG" ||
(echo "Couldn't push $DOCKERHUB_REPO:$TAG" && false)
echo "Pushed $DOCKERHUB_REPO:$TAG"
fi

@ -1,91 +0,0 @@
#
# A helper script to delete user data from a Sync storage server.
#
# You can use this script to explicitly delete stored sync data
# for a user, without having to connect a Firefox profile and
# without having to reset their password. It may be useful if
# you've started running a self-hosted storage server and want
# to delete data that was previously stored on the Mozilla-hosted
# servers.
#
# Use it like so:
#
# $> pip install PyFxA
# $> python delete_user_data.py user@example.com
#
# The script makes a best-effort attempt to sign in to the user's
# account, authenticate to the Firefox Sync Tokenserver, and delete
# the user's stored sync data. The login process might fail due to
# things like rate-limiting, server-side security measures, or API
# changes in the login process.
#
import sys
import getpass
import hashlib
import argparse
import urlparse
import requests
import hawkauthlib
import fxa.core
DEFAULT_FXA_URI = "https://api.accounts.firefox.com"
DEFAULT_TOKENSERVER_URI = "https://token.services.mozilla.com"
def main(argv):
parser = argparse.ArgumentParser(description="Delete Firefox Sync data")
parser.add_argument("email",
help="Email of the account for which to delete data")
parser.add_argument("--accounts-uri", default=DEFAULT_FXA_URI,
help="URI of the Firefox Accounts API server")
parser.add_argument("--tokenserver-uri", default=DEFAULT_TOKENSERVER_URI,
help="URI of the Firefox Sync tokenserver")
args = parser.parse_args(argv)
# Sign in to the account.
c = fxa.core.Client(args.accounts_uri)
password = getpass.getpass("Password for {}: ".format(args.email))
s = c.login(args.email, password, keys=True)
try:
# Verify the session if necessary.
# TODO: this won't work if the user has enabled two-step auth.
status = s.get_email_status()
if not status["sessionVerified"]:
code = raw_input("Enter verification link or code: ")
if "?" in code:
# They copy-pasted the full URL.
code_url = urlparse.urlparse(code)
code = urlparse.parse_qs(code_url.query)["code"][0]
s.verify_email_code(code)
# Prepare authentication details for tokenserver.
(_, kB) = s.fetch_keys()
xcs = hashlib.sha256(kB).hexdigest()[:32]
auth = s.get_identity_assertion(args.tokenserver_uri)
# Auth to tokenserver, find sync storage node.
token_uri = urlparse.urljoin(args.tokenserver_uri, "1.0/sync/1.5")
r = requests.get(token_uri, headers={
"Authorization": "BrowserID " + auth,
"X-Client-State": xcs,
})
r.raise_for_status()
node = r.json()
api_endpoint = node["api_endpoint"]
hawk_id = node["id"].encode("ascii")
hawk_key = node["key"].encode("ascii")
print "Deleting from", api_endpoint
req = requests.Request("DELETE", api_endpoint).prepare()
hawkauthlib.sign_request(req, hawk_id, hawk_key)
r = requests.session().send(req)
r.raise_for_status()
print r
finally:
s.destroy_session()
if __name__ == "__main__":
main(sys.argv[1:])

@ -1,2 +0,0 @@
flake8==3.3
nose==1.3.7

@ -1,50 +0,0 @@
#!/bin/sh
cd $(dirname $0)
case "$1" in
server)
export SYNCSERVER_SQLURI="${SYNCSERVER_SQLURI:-sqlite:///tmp/syncserver.db}"
exec gunicorn \
--bind ${HOST-0.0.0.0}:${PORT-5000} \
--forwarded-allow-ips="${SYNCSERVER_FORWARDED_ALLOW_IPS:-127.0.0.1,172.17.0.1}" \
syncserver.wsgi_app
;;
test_all)
$0 test_flake8
$0 test_nose
$0 test_functional
;;
test_flake8)
echo "test - flake8"
flake8 syncserver
;;
test_nose)
echo "test - nose"
nosetests --verbose --nocapture syncstorage.tests
;;
test_functional)
echo "test - functional"
# run functional tests
gunicorn --paste ./syncserver/tests.ini &
SERVER_PID=$!
sleep 2
$0 test_endpoint http://localhost:5000
kill $SERVER_PID
;;
test_endpoint)
exec python -m syncstorage.tests.functional.test_storage \
--use-token-server $2/token/1.0/sync/1.5
;;
*)
echo "Unknown CMD, $1"
exit 1
;;
esac

@ -9,5 +9,4 @@ zope.component==4.2.1
configparser==3.5
mozsvc==0.9
futures==3.0
https://github.com/mozilla-services/tokenserver/archive/1.4.5.zip
https://github.com/mozilla-services/server-syncstorage/archive/1.6.14.zip
https://github.com/mozilla-services/server-syncstorage/archive/1.6.14.zip

@ -7,52 +7,3 @@ timeout = 30
[app:main]
use = egg:syncserver
[syncserver]
# This must be edited to point to the public URL of your server,
# i.e. the URL as seen by Firefox.
public_url = http://localhost:5000/
# By default, syncserver will accept identity assertions issued by
# any BrowserID issuer. The line below restricts it to accept assertions
# from just the production Firefox Account servers. If you are hosting
# your own account server, put its public URL here instead.
identity_provider = https://accounts.firefox.com/
# This defines the database in which to store all server data.
#sqluri = sqlite:////tmp/syncserver.db
#sqluri = pymysql://sample_user:sample_password@127.0.0.1/syncstorage
# This is a secret key used for signing authentication tokens.
# It should be long and randomly-generated.
# The following command will give a suitable value on *nix systems:
#
# head -c 20 /dev/urandom | sha1sum
#
# If not specified then the server will generate a temporary one at startup.
#secret = INSERT_SECRET_KEY_HERE
# Set this to "false" to disable new-user signups on the server.
# Only requests by existing accounts will be honoured.
# allow_new_users = false
# Set this to "true" to work around a mismatch between public_url and
# the application URL as seen by python, which can happen in certain reverse-
# proxy hosting setups. It will overwrite the WSGI environ dict with the
# details from public_url. This could have security implications if e.g.
# you tell the app that it's on HTTPS but it's really on HTTP, so it should
# only be used as a last resort and after careful checking of server config.
force_wsgi_environ = false
[tokenserver]
# Use a custom MySQL based syncstorage node hosted at http://localhost:8000
# node_url = http://localhost:8000
# sqluri = pymysql://sample_user:sample_password@127.0.0.1/syncstorage_rs
[endpoints]
# Replace syncserver endpoints with alternate server implementation, ie:
# MySQL based syncstorage-rs 1.5 server hosted at http://localhost:8000/1.5
# "{node}/1.5/{uid}"
# sync-1.5 = "http://localhost:8000/1.5/{uid}"

@ -1,43 +0,0 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import os
import sys
import site
from logging.config import fileConfig
try:
from ConfigParser import NoSectionError
except ImportError:
from configparser import NoSectionError
# detecting if virtualenv was used in this dir
_CURDIR = os.path.dirname(os.path.abspath(__file__))
_PY_VER = sys.version.split()[0][:3]
_SITE_PKG = os.path.join(_CURDIR, 'local', 'lib', 'python' + _PY_VER, 'site-packages')
# adding virtualenv's site-package and ordering paths
saved = sys.path[:]
if os.path.exists(_SITE_PKG):
site.addsitedir(_SITE_PKG)
for path in sys.path:
if path not in saved:
saved.insert(0, path)
sys.path[:] = saved
# setting up the egg cache to a place where apache can write
os.environ['PYTHON_EGG_CACHE'] = '/tmp/python-eggs'
# setting up logging
ini_file = os.path.join(_CURDIR, 'syncserver.ini')
try:
fileConfig(ini_file)
except NoSectionError:
pass
# running the app using Paste
from paste.deploy import loadapp
application = loadapp('config:%s'% ini_file)

@ -13,95 +13,28 @@ except ImportError:
import requests
from pyramid.response import Response
from pyramid.events import NewRequest, subscriber
try:
import requests.packages.urllib3.contrib.pyopenssl
HAS_PYOPENSSL = True
except ImportError:
HAS_PYOPENSSL = False
import mozsvc.config
from tokenserver.util import _JSONError
logger = logging.getLogger("syncserver")
DEFAULT_TOKENSERVER_BACKEND = "syncserver.staticnode.StaticNodeAssignment"
def includeme(config):
"""Install SyncServer application into the given Pyramid configurator."""
# Set the umask so that files are created with secure permissions.
# Necessary for e.g. created-on-demand sqlite database files.
os.umask(0o077)
# If PyOpenSSL is available, configure requests to use it.
# This helps improve security on older python versions.
if HAS_PYOPENSSL:
requests.packages.urllib3.contrib.pyopenssl.inject_into_urllib3()
settings = config.registry.settings
import_settings_from_environment_variables(settings)
# Sanity-check the deployment settings and provide sensible defaults.
public_url = settings.get("syncserver.public_url")
if public_url is None:
raise RuntimeError("you must configure syncserver.public_url")
public_url = public_url.rstrip("/")
settings["syncserver.public_url"] = public_url
secret = settings.get("syncserver.secret")
if secret is None:
secret = generate_random_hex_key(64)
sqluri = settings.get("syncserver.sqluri")
if sqluri is None:
rootdir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
sqluri = "sqlite:///" + os.path.join(rootdir, "syncserver.db")
settings["sqluri"] = sqluri
# Automagically configure from IdP if one is given.
idp = settings.get("syncserver.identity_provider")
if idp is not None:
r = requests.get(urljoin(idp, '/.well-known/fxa-client-configuration'))
r.raise_for_status()
idp_config = r.json()
idp_issuer = urlparse(idp_config["auth_server_base_url"]).netloc
# Configure app-specific defaults based on top-level configuration.
settings.pop("config", None)
if "tokenserver.backend" not in settings:
# Default to our simple static node-assignment backend
settings["tokenserver.backend"] = DEFAULT_TOKENSERVER_BACKEND
if settings["tokenserver.backend"] == DEFAULT_TOKENSERVER_BACKEND:
# Provide some additional defaults for the default backend,
# unless overridden in the config.
if "tokenserver.sqluri" not in settings:
settings["tokenserver.sqluri"] = sqluri
if "tokenserver.node_url" not in settings:
settings["tokenserver.node_url"] = public_url
if "endpoints.sync-1.5" not in settings:
settings["endpoints.sync-1.5"] = "{node}/storage/1.5/{uid}"
if "tokenserver.monkey_patch_gevent" not in settings:
# Default to no gevent monkey-patching
settings["tokenserver.monkey_patch_gevent"] = False
if "tokenserver.applications" not in settings:
# Default to just the sync-1.5 application
settings["tokenserver.applications"] = "sync-1.5"
if "tokenserver.secrets.backend" not in settings:
# Default to a single fixed signing secret
settings["tokenserver.secrets.backend"] = "mozsvc.secrets.FixedSecrets"
settings["tokenserver.secrets.secrets"] = [secret]
if "tokenserver.allow_new_users" not in settings:
allow_new_users = settings.get("syncserver.allow_new_users")
if allow_new_users is not None:
settings["tokenserver.allow_new_users"] = allow_new_users
if "hawkauth.secrets.backend" not in settings:
# Default to the same secrets backend as the tokenserver
for key in settings.keys():
if key.startswith("tokenserver.secrets."):
newkey = "hawkauth" + key[len("tokenserver"):]
settings[newkey] = settings[key]
if "storage.backend" not in settings:
# Default to sql syncstorage backend
settings["storage.backend"] = "syncstorage.storage.sql.SQLStorage"
@ -109,151 +42,27 @@ def includeme(config):
settings["storage.create_tables"] = True
if "storage.batch_upload_enabled" not in settings:
settings["storage.batch_upload_enabled"] = True
if "browserid.backend" not in settings:
# Default to local verifier to reduce external dependencies,
# unless an explicit verifier URL has been configured.
verifier_url = settings.get("syncserver.browserid_verifier")
if not verifier_url:
settings["browserid.backend"] = \
"tokenserver.verifiers.LocalBrowserIdVerifier"
else:
settings["browserid.backend"] = \
"tokenserver.verifiers.RemoteBrowserIdVerifier"
settings["browserid.verifier_url"] = verifier_url
# Use base of public_url as only audience
audience = urlunparse(urlparse(public_url)._replace(path=""))
settings["browserid.audiences"] = audience
# If an IdP was specified, allow it and only it as issuer.
if idp is not None:
settings["browserid.trusted_issuers"] = [idp_issuer]
settings["browserid.allowed_issuers"] = [idp_issuer]
if "oauth.backend" not in settings:
settings["oauth.backend"] = "tokenserver.verifiers.RemoteOAuthVerifier"
# If an IdP was specified, use it for oauth verification.
if idp is not None:
settings["oauth.server_url"] = idp_config["oauth_server_base_url"]
settings["oauth.default_issuer"] = idp_issuer
if "loggers" not in settings:
# Default to basic logging config.
root_logger = logging.getLogger("")
if not root_logger.handlers:
logging.basicConfig(level=logging.WARN)
if "fxa.metrics_uid_secret_key" not in settings:
# Default to a randomly-generated secret.
# This setting isn't useful in a self-hosted setup
# and setting a default avoids scary-sounding warnings.
settings["fxa.metrics_uid_secret_key"] = generate_random_hex_key(32)
logging.basicConfig(level=logging.INFO)
# Include the relevant sub-packages.
config.scan("syncserver", ignore=["syncserver.wsgi_app"])
if "hawkauth.secrets.backend" not in settings:
# Default to a secret that's not really that secret...
settings["hawkauth.secrets.backend"] = "mozsvc.secrets.FixedSecrets"
settings["hawkauth.secrets.secrets"] = ["secret!!"]
config.include("cornice")
config.include("syncserver.migration")
config.include("syncserver.tweens")
config.scan("syncserver.views")
config.include("syncstorage", route_prefix="/storage")
config.include("tokenserver", route_prefix="/token")
# Add a top-level "it works!" view.
def itworks(request):
return Response("it works!")
config.add_route('itworks', '/')
config.add_view(itworks, route_name='itworks')
def import_settings_from_environment_variables(settings, environ=None):
"""Helper function to import settings from environment variables.
This helper exists to allow the most commonly-changed settings to be
configured via environment variables, which is useful when deploying
with docker. For more complex configuration needs you should write
a .ini config file.
"""
if environ is None:
environ = os.environ
SETTINGS_FROM_ENVIRON = (
("SYNCSERVER_PUBLIC_URL", "syncserver.public_url", str),
("SYNCSERVER_SECRET", "syncserver.secret", str),
("SYNCSERVER_SQLURI", "syncserver.sqluri", str),
("SYNCSERVER_IDENTITY_PROVIDER", "syncserver.identity_provider", str),
("SYNCSERVER_BROWSERID_VERIFIER",
"syncserver.browserid_verifier",
str),
("SYNCSERVER_ALLOW_NEW_USERS",
"syncserver.allow_new_users",
str_to_bool),
("SYNCSERVER_FORCE_WSGI_ENVIRON",
"syncserver.force_wsgi_environ",
str_to_bool),
("SYNCSERVER_BATCH_UPLOAD_ENABLED",
"storage.batch_upload_enabled",
str_to_bool),
)
if "SYNCSERVER_SECRET_FILE" in environ:
settings["syncserver.secret"] = \
open(environ["SYNCSERVER_SECRET_FILE"]).read().strip()
for key, name, convert in SETTINGS_FROM_ENVIRON:
try:
settings[name] = convert(environ[key])
except KeyError:
pass
def str_to_bool(value):
"""Helper to convert textual boolean strings to actual booleans."""
if value.lower() in ("true", "on", "1", "yes"):
return True
if value.lower() in ("false", "off", "0", "no"):
return False
raise ValueError("unable to parse boolean from %r" % (value,))
def generate_random_hex_key(length):
return binascii.hexlify(os.urandom(length // 2))
@subscriber(NewRequest)
def reconcile_wsgi_environ_with_public_url(event):
"""Event-listener that checks and tweaks WSGI environ based on public_url.
This is a simple trick to help ensure that the configured public_url
matches the actual deployed address. It fixes fixes parts of the WSGI
environ where it makes sense (e.g. SCRIPT_NAME) and warns about any parts
that seem obviously mis-configured (e.g. http:// versus https://).
It's very important to get public_url and WSGI environ matching exactly,
since they're used for browserid audience checking and HAWK signature
validation, so mismatches can easily cause strange and cryptic errors.
"""
request = event.request
public_url = request.registry.settings["syncserver.public_url"]
p_public_url = urlparse(public_url)
# If we don't have a SCRIPT_NAME, take it from the public_url.
# This is often the case if we're behind e.g. an nginx proxy that
# is serving us at some sub-path.
if not request.script_name:
request.script_name = p_public_url.path.rstrip("/")
# If the environ does not match public_url, requests are almost certainly
# going to fail due to auth errors. We can either bail out early, or we
# can forcibly clobber the WSGI environ with the values from public_url.
# This is a security risk if you've e.g. mis-configured the server, so
# it's not enabled by default.
application_url = request.application_url
if public_url != application_url:
if not request.registry.settings.get("syncserver.force_wsgi_environ"):
msg = "\n".join((
"The public_url setting doesn't match the application url.",
"This will almost certainly cause authentication failures!",
" public_url setting is: %s" % (public_url,),
" application url is: %s" % (application_url,),
"You can disable this check by setting the force_wsgi_environ",
"option in your config file, but do so at your own risk.",
))
logger.error(msg)
raise _JSONError([msg], status_code=500)
request.scheme = p_public_url.scheme
request.host = p_public_url.netloc
request.script_name = p_public_url.path.rstrip("/")
def get_configurator(global_config, **settings):
"""Load a SyncStorge configurator object from deployment settings."""
"""Load a mozsvc configurator object from deployment settings."""
config = mozsvc.config.get_configurator(global_config, **settings)
config.begin()
try:

@ -0,0 +1,19 @@
<html>
<head><title>Super Cool Migration Testing Management Interface 2.0</title></head>
<body>
<h1>Migration Testing Server Management Interface</h1>
<p><b>Current Migration State:</b> {migration_state}</p>
<form action="/" method="POST">
<input type="hidden" name="cmd" value="begin_migration" />
<input type="submit" value="Begin Migration" />
</form>
<form action="/" method="POST">
<input type="hidden" name="cmd" value="complete_migration" />
<input type="submit" value="Complete Migration" />
</form>
<form action="/" method="POST">
<input type="hidden" name="cmd" value="reset" />
<input type="submit" value="Reset" />
</form>
</body>
</html>

@ -0,0 +1,113 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
"""
Helpers for fiddling with the migration state of the server.
We allow the server to be in one of three states:
* Pre-migration: the user is syncing as normal to the old backend.
* Migrating: we're actively moving the user's data to the new backend.
* Post-migration: we've finished moving the user's data to the new backend.
You can use the functions exposed by this module to move the server between
the different states.
"""
from sqlalchemy import Column, Integer
from sqlalchemy import create_engine, Table, MetaData
from sqlalchemy.sql import text as sqltext
PRE_MIGRATION = 1
MIGRATING = 2
POST_MIGRATION = 3
# A very simple db table in which to store the migration state.
_metadata = MetaData()
_migration = Table(
"migration",
_metadata,
Column("state", Integer(), nullable=False),
)
class MigrationStateManager(object):
def __init__(self, sqluri):
self.sqluri = sqluri
self._engine = create_engine(sqluri, pool_reset_on_return=True)
_migration.create(self._engine, checkfirst=True)
def _query(self, q, **kwds):
return self._engine.execute(sqltext(q), **kwds)
def current_state(self):
row = self._query("""
SELECT state FROM migration
""").fetchone()
if row is None:
return PRE_MIGRATION
return row[0]
def current_state_name(self):
state = self.current_state()
if state == PRE_MIGRATION:
return "PRE_MIGRATION"
if state == MIGRATING:
return "MIGRATING"
if state == POST_MIGRATION:
return "POST_MIGRATION"
return "WTF?"
def begin_migration(self):
self._set_current_state(MIGRATING)
def complete_migration(self):
self._migrate_data()
self._set_current_state(POST_MIGRATION)
def reset_to_pre_migration_state(self):
self._clear_storage()
self._set_current_state(PRE_MIGRATION)
def _set_current_state(self, state):
r = self._query("""
UPDATE migration SET state=:state
""", state=state)
if r.rowcount == 0:
self._query("""
INSERT INTO migration (state) VALUES (:state)
""", state=state)
def _migrate_data(self):
# Migrating data is remarkably easy when it's already in the same db!
self._query("""
UPDATE bso SET userid=2 WHERE userid=1;
""")
self._query("""
UPDATE user_collections SET userid=2 WHERE userid=1;
""")
def _clear_storage(self):
self._query("""
DELETE FROM batch_upload_items;
""")
self._query("""
DELETE FROM batch_uploads;
""")
self._query("""
DELETE FROM bso;
""")
self._query("""
DELETE FROM collections;
""")
self._query("""
DELETE FROM user_collections;
""")
def includeme(config):
sqluri = config.registry.settings["sqluri"]
config.registry["MigrationStateManager"] = MigrationStateManager(sqluri)

@ -1,240 +0,0 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
"""
Simple node-assignment backend using a single, static node.
This is a greatly-simplified node-assignment backend. It keeps user records
in an SQL database, but does not attempt to do any node management. All users
are implicitly assigned to a single, static node.
"""
import time
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse
from mozsvc.exceptions import BackendError
from sqlalchemy import Column, Integer, String, BigInteger, Index
from sqlalchemy import create_engine, Table, MetaData
from sqlalchemy.pool import QueuePool
from sqlalchemy.sql import text as sqltext
from tokenserver.assignment import INodeAssignment
from zope.interface import implements
metadata = MetaData()
users = Table(
"users",
metadata,
Column("uid", Integer(), primary_key=True, autoincrement=True,
nullable=False),
Column("service", String(32), nullable=False),
Column("email", String(255), nullable=False),
Column("generation", BigInteger(), nullable=False),
Column("client_state", String(32), nullable=False),
Column("created_at", BigInteger(), nullable=False),
Column("replaced_at", BigInteger(), nullable=True),
Column("keys_changed_at", BigInteger(), nullable=True),
Column("node", String(255), nullable=True),
Index('lookup_idx', 'email', 'service', 'created_at'),
)
_GET_USER_RECORDS = sqltext("""\
select
uid, generation, client_state, created_at, replaced_at,
keys_changed_at, node
from
users
where
email = :email
and
service = :service
order by
created_at desc, uid desc
limit
20
""")
_CREATE_USER_RECORD = sqltext("""\
insert into
users
(service, email, generation, client_state, created_at, replaced_at,
keys_changed_at, node)
values
(:service, :email, :generation, :client_state, :timestamp, NULL,
:keys_changed_at, :node)
""")
_UPDATE_GENERATION_NUMBER = sqltext("""\
update
users
set
generation = :generation
where
service = :service and email = :email and
generation < :generation and replaced_at is null
""")
_REPLACE_USER_RECORDS = sqltext("""\
update
users
set
replaced_at = :timestamp
where
service = :service and email = :email
and replaced_at is null and created_at < :timestamp
""")
def get_timestamp():
"""Get current timestamp in milliseconds."""
return int(time.time() * 1000)
class StaticNodeAssignment(object):
implements(INodeAssignment)
def __init__(self, sqluri, node_url, **kw):
self.sqluri = sqluri
self.node_url = node_url
self.driver = urlparse(sqluri).scheme.lower()
sqlkw = {
"logging_name": "syncserver",
"connect_args": {},
"poolclass": QueuePool,
"pool_reset_on_return": True,
}
if self.driver == "sqlite":
# We must mark it as safe to share sqlite connections between
# threads. The pool will ensure there's no race conditions.
sqlkw["connect_args"]["check_same_thread"] = False
# If using a :memory: database, we must use a QueuePool of
# size 1 so that a single connection is shared by all threads.
if urlparse(sqluri).path.lower() in ("/", "/:memory:"):
sqlkw["pool_size"] = 1
sqlkw["max_overflow"] = 0
if "mysql" in self.driver:
# Guard against the db closing idle conections.
sqlkw["pool_recycle"] = kw.get("pool_recycle", 3600)
self._engine = create_engine(sqluri, **sqlkw)
users.create(self._engine, checkfirst=True)
def get_user(self, service, email, **kw):
params = {'service': service, 'email': email}
res = self._engine.execute(_GET_USER_RECORDS, **params)
try:
row = res.fetchone()
if row is None:
return None
# The first row is the most up-to-date user record.
user = {
'email': email,
'uid': row.uid,
'node': self.node_url,
'generation': row.generation,
'client_state': row.client_state,
'first_seen_at': row.created_at,
'old_client_states': {},
'keys_changed_at': row.keys_changed_at,
}
# Any subsequent rows are due to old client-state values.
old_row = res.fetchone()
update_replaced_at = False
while old_row is not None:
if old_row.client_state != user['client_state']:
user['old_client_states'][old_row.client_state] = True
# Make sure each old row is marked as replaced.
# They might not be, due to races in row creation.
if old_row.replaced_at is None:
update_replaced_at = True
old_row = res.fetchone()
if update_replaced_at:
self._engine.execute(_REPLACE_USER_RECORDS, {
'service': service,
'email': user['email'],
'timestamp': row.created_at,
}).close()
return user
finally:
res.close()
def allocate_user(self, service, email, generation=0, client_state='',
keys_changed_at=0, **kw):
now = get_timestamp()
params = {
'service': service, 'email': email, 'generation': generation,
'client_state': client_state, 'timestamp': now,
'keys_changed_at': keys_changed_at, 'node': self.node_url,
}
res = self._engine.execute(_CREATE_USER_RECORD, **params)
res.close()
return {
'email': email,
'uid': res.lastrowid,
'node': self.node_url,
'generation': generation,
'client_state': client_state,
'first_seen_at': now,
'old_client_states': {},
'keys_changed_at': keys_changed_at,
}
def update_user(self, service, user, generation=None, client_state=None,
keys_changed_at=0, node=None, **kw):
if client_state is None:
# uid can stay the same, just update the generation number.
if generation is not None:
params = {
'service': service,
'email': user['email'],
'generation': generation,
}
res = self._engine.execute(_UPDATE_GENERATION_NUMBER, **params)
res.close()
user['generation'] = max(generation, user['generation'])
else:
# reject previously-seen client-state strings.
if client_state == user['client_state']:
raise BackendError('previously seen client-state string')
if client_state in user['old_client_states']:
raise BackendError('previously seen client-state string')
# need to create a new record for new client_state.
if generation is not None:
generation = max(user['generation'], generation)
else:
generation = user['generation']
now = get_timestamp()
params = {
'service': service, 'email': user['email'],
'generation': generation, 'client_state': client_state,
'timestamp': now,
'keys_changed_at': keys_changed_at, 'node': node,
}
res = self._engine.execute(_CREATE_USER_RECORD, **params)
res.close()
user['uid'] = res.lastrowid
user['generation'] = generation
user['old_client_states'][user['client_state']] = True
user['client_state'] = client_state
user['keys_changed_at'] = keys_changed_at
user['node'] = node
# Mark old records as having been replaced.
# If we crash here, they are unmarked and we may fail to
# garbage collect them for a while, but the active state
# will be undamaged.
params = {
'service': service, 'email': user['email'], 'timestamp': now
}
res = self._engine.execute(_REPLACE_USER_RECORDS, **params)
res.close()

@ -0,0 +1,42 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
from pyramid import httpexceptions
import syncserver.migration
def interpose_migration_state_errors(handler, registry):
"""Tween to send errors from storage endpoint based on migration state.
It's a bit weird to do this in a tween, but it means we can use the existing
syncstorage app without any changes byt just interposing a bit of code in
front of it.
"""
def interpose_migration_state_errors_tween(request):
if request.path.startswith("/storage/"):
migration_state = request.registry["MigrationStateManager"].current_state()
if migration_state == syncserver.migration.MIGRATING:
# We 503-inate the storage node while migration is in progress.
return httpexceptions.HTTPServiceUnavailable(body="0")
elif migration_state == syncserver.migration.POST_MIGRATION:
# We 401-inate the old storage node after migration.
if request.path.startswith("/storage/1.5/1/"):
return httpexceptions.HTTPUnauthorized(body="0")
elif migration_state == syncserver.migration.PRE_MIGRATION:
# We won't do this in production, but for testing locally,
# 401-inate the new storage node if we haven't migrated yet.
# this will force clients to refresh their token and go back
# to the old node.
if request.path.startswith("/storage/1.5/2/"):
return httpexceptions.HTTPUnauthorized(body="0")
return handler(request)
return interpose_migration_state_errors_tween
def includeme(config):
"""Include all the SyncServer tweens into the given config."""
config.add_tween("syncserver.tweens.interpose_migration_state_errors")

@ -0,0 +1,100 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
"""
Fake node-assignment backend and control interface.
In addition to hosting a simple storage node, this server hosts a fake tokenserver
node-assignent interface and a little management page that can toggle its behaviour
to simulate the storage node migration. It supports the following states:
* Pre-migration: all requests to the tokenserver endpoint are assigned uid 1 and
are allowed to proceed through to accessing the storage backend.
* Migrating: all requests to the tokenserver endpoint are assigned uid 1, but when
they try to acces the storage node they get a 503 error.
* Post-migration: all requests to the tokenserver endpoint are assigned uid 2 and
are allowed to proceed through to accessing the storage backend;
a tween enforces that storage requests for uid 1 will receive a
401 error in this state.
This broadly simulates the different states we expect to move the servers through
during the production deployment.
"""
import os
from cornice import Service
from pyramid import httpexceptions
from pyramid.response import Response
from pyramid.interfaces import IAuthenticationPolicy
import syncserver.migration
# A GET on / returns a simple management interface,
# while POST requests control the state of the server.
management = Service(name='management', path='/')
@management.get(renderer="string")
def _management(request):
"""HTML for the server management interface."""
src = os.path.join(os.path.dirname(__file__), 'management.html')
with open(src) as f:
content = f.read()
content = content.format(
migration_state=request.registry["MigrationStateManager"].current_state_name()
)
return Response(content, content_type="text/html")
@management.post()
def _management(request):
"""Command handler for the server management interface."""
mgr = request.registry["MigrationStateManager"]
cmd = request.POST["cmd"]
if cmd == "begin_migration":
mgr.begin_migration()
elif cmd == "complete_migration":
mgr.complete_migration()
elif cmd == "reset":
mgr.reset_to_pre_migration_state()
else:
return httpexceptions.HTTPBadRequest(body="Unknown cmd: {}".format(cmd))
return httpexceptions.HTTPFound(request.relative_url("/", to_application=True))
# The fake tokenserver endpoint is hosted at /token/1.0/sync/1.5
token = Service(name='token', path='/token/1.0/sync/1.5')
@token.get()
def _token(request):
"""Fake tokenserver endpoint.
This endpoint ignoreds all auth and just assigns the caller a uid or 1 or 2
depending on what state the server is currently in.
"""
migration_state = request.registry["MigrationStateManager"].current_state()
if migration_state != syncserver.migration.POST_MIGRATION:
uid = 1
else:
uid = 2
endpoint = request.relative_url("/storage/1.5/{}".format(uid), to_application=True)
# Sign a token using the fixed uid, for the storage backend to accept.
auth_policy = request.registry.getUtility(IAuthenticationPolicy)
token, key = auth_policy.encode_hawk_id(request, uid)
return {
'id': token,
'key': key,
'uid': uid,
'api_endpoint': endpoint,
'duration': 60,
'hashalg': 'sha256',
'hashed_fxa_uid': '0' * 64,
}

@ -1,2 +0,0 @@
import syncserver
application = syncserver.main()
Loading…
Cancel
Save