diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..845da41 --- /dev/null +++ b/.gitignore @@ -0,0 +1,11 @@ +.ropeproject +*~ +semantic.cache +*.pyc +*.pyo +deploy/openmanage* +docs/_build* +*.key* +*.crt* +*.csr* +*.tar.bz2 \ No newline at end of file diff --git a/bin/backup_omva.sh b/bin/backup_omva.sh new file mode 100755 index 0000000..5a02d24 --- /dev/null +++ b/bin/backup_omva.sh @@ -0,0 +1,25 @@ +#!/bin/bash + +# Backup script for the OpenManage Virtual Appliance. +# Today: Generates a tarball with the important-to-backup data. +# Tomorrow: This will eventually send the tarball automatically for secure offsite backup. + +. /etc/default/openmanage + +backup_workspace=$HOME/omva-backup +backup_date=`date -u +%Y%m%d_%H%M` +# Stage one: prepare the destination +mkdir -p $backup_workspace + +# Stage two: Collect the trivial stuff. +cp $OPENMANAGE_CONFIGDIR/agent_config.json $backup_workspace +cp -r $SPIDEROAK_ESCROW_KEYS_PATH $backup_workspace +cp -r $SPIDEROAK_ESCROW_LAYERS_PATH $backup_workspace + +# Stage three: collect the DB contents. +su postgres -c "pg_dump openmanage" > $backup_workspace/db_dump.sql + +pushd $HOME +tar czf $HOME/omva-backup-$backup_date.tar.gz ./omva-backup +rm -r $backup_workspace +popd diff --git a/bin/directory_agent_main.py b/bin/directory_agent_main.py new file mode 100644 index 0000000..03cc01e --- /dev/null +++ b/bin/directory_agent_main.py @@ -0,0 +1,132 @@ +#!/usr/bin/env python + +''' +directory_agent_main.py + +Directory Agent main program. + +(c) 2011 SpiderOak, Inc. +''' +import fcntl +import json +import logging +from optparse import OptionParser, OptionGroup +import os +import psycopg2 +import sys + +from common import DATA_DIR, read_config_file, merge_config, set_config +from account_mgr.user_source import group_manager + +class StartupException(Exception): + pass + +def _initialize_logging(): + handler = logging.FileHandler("%s/directory_agent" % + (os.environ['OPENMANAGE_LOGS'],)) + formatter = logging.Formatter( + '%(asctime)s %(levelname)-8s %(name)-20s: %(message)s') + handler.setFormatter(formatter) + logging.root.addHandler(handler) + logging.root.setLevel(logging.DEBUG) + +def parse_cmdline(): + parser = OptionParser() + + parser.add_option("--config", dest="config_file", default=None, + help="The location of the JSON configuration file.", + metavar="FILE") + parser.add_option("--dir-uri", dest="dir_uri", + help="The LDAP URI to the directory.", + metavar="URI") + parser.add_option("--dir-base-dn", dest="dir_base_dn", + help="The LDAP base DN to use for searches.", + metavar="DN") + parser.add_option("--dir-user", dest="dir_user", + help="The user to bind to LDAP as.", + metavar="USER") + parser.add_option("--api-root", dest="api_root", + help="API Root for SpiderOak.", + metavar="API_ROOT") + parser.add_option("--api-code", dest="promo_code", + help="Promo code for SpiderOak plans.", + metavar="API_CODE") + + dangerous = OptionGroup(parser, "Dangerous Repair Commands", + "These commands should only be used to repair a broken instance, and should never be used normally. Refer to documentation!") + dangerous.add_option("--rebuild-db", dest="rebuild_database", default=False, + action="store_true", + help="Rebuild the local user DB.") + parser.add_option_group(dangerous) + options, _ = parser.parse_args() + + # Prune it up a bit and return it as a dict. + optdict = vars(options) + for key in optdict.keys(): + if optdict[key] is None: + del optdict[key] + + return optdict + +def process_config(): + cmdline_opts = parse_cmdline() + + config = read_config_file(cmdline_opts.get('config_file', None)) + config = merge_config(config, cmdline_opts) + + if 'groups' not in config: + raise StartupException("Lacking an LDAP mapping group in the config file. Check your docs!") + + log = logging.getLogger('process_config') + log.debug('%s' % config['api_root']) + return config + +def get_lock(): + lockfile = open(os.path.join("%s/lock" % (DATA_DIR,)), 'w') + fcntl.flock(lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB) + return lockfile + +def release_lock(lockfile): + if lockfile is not None: + fcntl.flock(lockfile, fcntl.LOCK_UN) + lockfile.close() + + +def main(): + _initialize_logging() + # Read our configuration, and process errors from it. + log = logging.getLogger('main') + try: + config = process_config() + except (IOError, ValueError,): + return '''Cannot find, open, or understand your config file. Lacking options +otherwise, it should be at: + +/home/openmanage/openmanage/conf/agent_config.json + +Run %s -h for help.''' % (sys.argv[0],) + except StartupException as e: + return str(e) + + set_config(config) + lockfile = get_lock() + # Moving along, open the database + db_conn = psycopg2.connect(database=config['db_db'], + user=config['db_user'], + password=config['db_pass'], + host=config['db_host']) + + if config['rebuild_database']: + log.info("DB repair requested, beginning rebuild") + group_manager.run_db_repair(config, db_conn) + log.info("DB repair complete") + + + log.info("LDAP -> SpiderOak sync starting") + group_manager.run_group_management(config, db_conn) + + release_lock(lockfile) + return 0 + +if __name__ == "__main__": + sys.exit(main()) diff --git a/bin/finish_setup.sh b/bin/finish_setup.sh new file mode 100755 index 0000000..31738ec --- /dev/null +++ b/bin/finish_setup.sh @@ -0,0 +1,22 @@ +#!/bin/bash +# Openmanage service finalization script. +# Running this will start your services, so make sure you're configured! +# (c) 2011 SpiderOak, Inc. + +. /etc/default/openmanage + +if [ -e $OPENMANAGE_ROOT/netkes/account_mgr/user_source ]; then + sudo ln -s $OPENMANAGE_ROOT/bin/run_openmanage.sh /etc/cron.hourly/run_openmanage || exit 1 +fi + +sudo mkdir -p /etc/service/openmanage/supervise +sudo ln -s $OPENMANAGE_ROOT/etc/service/openmanage/run /etc/service/openmanage/run +sudo sv start openmanage + +if [ -e $OPENMANAGE_ROOT/netkes/account_mgr/user_source/ldap_source.py ]; then + echo "Now we're going to start the initial LDAP->SpiderOak account sync. +This may take a while. +" + sudo $OPENMANAGE_ROOT/bin/run_openmanage.sh +fi + diff --git a/bin/first_setup.sh b/bin/first_setup.sh new file mode 100755 index 0000000..391e58c --- /dev/null +++ b/bin/first_setup.sh @@ -0,0 +1,22 @@ +#!/bin/bash + +if [ -e ~/.ran_firstlogin ]; then + exit 0 +fi + +. /etc/default/openmanage + +sudo dpkg-reconfigure tzdata + +touch ~/.ran_firstlogin + +echo "PATH=$OPENMANAGE_ROOT/bin:\$PATH" >> ~/.bashrc + +echo "Great, all done! + +To setup the directory agent, please configure your settings, and then run +'finish_setup.sh' to start services. + +Please see the documentation for more detail. + +" diff --git a/bin/make_keys.sh b/bin/make_keys.sh new file mode 100755 index 0000000..2e79da3 --- /dev/null +++ b/bin/make_keys.sh @@ -0,0 +1,6 @@ +#!/bin/bash + +. /etc/default/openmanage + +python $OPENMANAGE_ROOT/netkes/key_escrow/admin.py create_base +python $OPENMANAGE_ROOT/netkes/key_escrow/admin.py setup_brand $1 diff --git a/bin/manage_users.py b/bin/manage_users.py new file mode 100755 index 0000000..589641a --- /dev/null +++ b/bin/manage_users.py @@ -0,0 +1,215 @@ +#!/usr/bin/python +""" +manage_users + +Command-line interface for managing user assignments for OpenManage. + +(c) 2011 SpiderOak, Inc. +""" + +import getpass +import logging +from optparse import OptionParser, OptionGroup +import os +import psycopg2 +import sys + +from account_mgr.cmdline_utils import run_command +from common import read_config_file, merge_config, set_config + +class BadConfigOpts(Exception): + pass + +def _initialize_logging(): + handler = logging.StreamHandler() + formatter = logging.Formatter( + '%(asctime)s %(levelname)-8s %(name)-20s: %(message)s') + handler.setFormatter(formatter) + logging.root.addHandler(handler) + + if 'SPIDEROAK_AGENT_LOG_DEBUG' in os.environ: + logging.root.setLevel(logging.DEBUG) + else: + logging.root.setLevel(logging.INFO) + +def parse_cmdline(): + parser = OptionParser() + parser.add_option("--config", dest="config_file", default=None, + help="The location of the JSON configuration file.", + metavar="FILE") + parser.add_option("--force", dest="force", action="store_true", default=False, + help="Force setting user configuration, even with the presence of the LDAP agent.") + + reading = OptionGroup(parser, "User Listing", + "Selecting either of the two following options will return a list of the users you have, and ignore all other options.") + reading.add_option("--users-json", dest="users_json", default=False, + action="store_true", + help="Returns the list of users for the enterprise as a machine-readable JSON string.") + reading.add_option("--users-csv", dest="users_csv", default=None, metavar="CSV FILE", + help="Writes the users directory into a CSV file suitable for use elsewhere.") + parser.add_option_group(reading) + + users = OptionGroup(parser, "User(s) Selection", + "The following options influence how we read users to manipulate into the system. Either CSV OR singly-specified users are to be used; using both (or none!) will result in error.") + users.add_option("--email-addr", dest="email_addr", default=None, metavar="EMAIL ADDRESS", + help="Email address for the user.") + users.add_option("--csv-file", dest="csv_file", default=None, metavar="CSV FILE", + help="CSV file with list of users to manipulate and their options. Please see the documentation!") + parser.add_option_group(users) + + operations = OptionGroup(parser, "User Manipulations", + "These configure what we're going to do on the user(s).") + operations.add_option("--setpw", dest="setpw", action="store_true", + default=False, + help="Set the password for the selected user.") + operations.add_option("--create", dest="create", action="store_true", + default=False, + help="Create user(s).") + operations.add_option("--enable", dest="enable", action="store_true", + default=False, + help="Enable user(s).") + operations.add_option("--disable", dest="disable", action="store_true", + default=False, + help="Disable user(s).") + operations.add_option("--set-group", dest="set_group", action="store_true", + default=False, + help="Assign group ID(s) to user(s).") + operations.add_option("--set-email", dest="set_email", action="store_true", + default=False, + help="Change email address(es) for users(s).") + parser.add_option_group(operations) + + new_config = OptionGroup(parser, "Changes", + "These provided the information required for the above actions.") + new_config.add_option("--given-name", dest="given_name", default=None, + metavar="GIVEN NAME", + help="Given name for the user. Required for --create") + new_config.add_option("--surname", dest="surname", default=None, + metavar="SURNAME", + help="Surname for the user. Required for --create") + new_config.add_option("--new-email", dest="new_email", default=None, + metavar="EMAIL", + help="New email address for the user. Required for --set-email (already set via '--email-addr' in --create)") + new_config.add_option("--group-id", dest="group_id", default=None, + metavar="GROUP ID", + help="Group ID to set for the user. Required for --create, --set-group") + parser.add_option_group(new_config) + options, _ = parser.parse_args() + + optdict = vars(options) + for key in optdict.keys(): + if optdict[key] is None: + del optdict[key] + + return optdict + + +def validate_options(optdict): + """Determines the legality of the options set by the user.""" + log = logging.getLogger("validate_options") + + if ('users_csv' in optdict) or ('users_json' in optdict): + # We don't care about anything else if we're given these options. + return + + # Check to see if they've configured no or both email AND CSV file. + if ('email_addr' in optdict) != ('csv_file' in optdict): + raise BadConfigOpts("Needs exactly ONE of either '--email-addr' OR '--csv-file'") + # We need exactly ONE action. + ops = ['set_email', 'set_group', 'disable', 'enable', 'create', 'setpw'] + ops_counter = 0 + for op in ops: + if op in optdict: + ops_counter += 1 + + if ops_counter != 1: + raise BadConfigOpts("Needs exactly ONE action option!") + + # If we're using this here command-line, we need to verify the required options are set per action. + if 'email_addr' in optdict: + if 'set_email' in optdict: + if 'new_email' not in optdict: + raise BadConfigOpts("Need to specify the new email address!") + elif 'create' in optdict: + if ('given_name' not in optdict) or \ + ('surname' not in optdict) or \ + ('group_id' not in optdict): + raise BadConfigOpts("Need to specify the required options for the create function") + elif 'set_group' in optdict: + if 'group_id' not in optdict: + raise BadConfigOpts("Need to specify the group id to set") + + # Prune out extra options from optdict if we're using the CSV method. + if 'csv_file' in optdict: + options = ['new_email', 'given_name', 'surname', 'group_id'] + for option in options: + if option in optdict: + del optdict[option] + log.warn('Pointless option given for CSV use: %s' % option) + + +def process_config(): + cmdline_opts = parse_cmdline() + validate_options(cmdline_opts) + + config = read_config_file(cmdline_opts.get('config_file', None)) + config = merge_config(config, cmdline_opts) + + return config + +def _try_new_password(): + """Tries up to 3 times to get a new password. + + :returns string or None: New password, or None if it can't be typed in reliably.""" + log = logging.getLogger('try_new_password') + + for i in range(1,4): + password1 = getpass.getpass("New SpiderOak password: ") + password2 = getpass.getpass("New SpiderOak password (again): ") + + if password1 == password2: + return password1 + else: + log.warn("Passwords do not match! Attempt %d of 3." % i) + + return None + +def main(): + _initialize_logging() + log = logging.getLogger("main") + config = process_config() + set_config(config) + + db_conn = psycopg2.connect(database=config['db_db'], + user=config['db_user'], + password=config['db_pass'], + host=config['db_host']) + + # Try and catch using this tool alongside the LDAP user_source. + try: + import account_mgr.user_source.ldap_source + except ImportError: + # This is fine; if we can't import LDAP, that's the expected behavior. + pass + else: + log.warn("LDAP module available, this may produce inconsistent state.") + if 'force' not in config: + log.error("--force option not provided, aborting.") + return 1 + + # Make sure we grab the password if that's required! + if config['setpw'] and 'csvfile' not in config: + print "grabbing password" + config['password'] = _try_new_password() + if config['password'] is None: + log.error("Failed setting password, aborting.") + return 1 + + results = run_command(db_conn, config) + if results is None: + print results + + return 0 + +if __name__ == "__main__": + sys.exit(main()) diff --git a/bin/restore_omva.sh b/bin/restore_omva.sh new file mode 100644 index 0000000..783ceea --- /dev/null +++ b/bin/restore_omva.sh @@ -0,0 +1,34 @@ +#!/bin/bash + +# Restore script for the OpenManage Virtual Appliance. +# Today: restores OMVA configuration and database from a backup tarball. +# Tomorrow: fetches the OMVA restoration tarball from secure offsite backup. + +. /etc/default/openmanage + +# Stage Zero: Sanity-check the tarball +file $1 | grep 'gzip compressed data' 2>&1 1>/dev/null +if [ $? != 0 ]; then + echo "Backup argument $1 not showing as a tarball properly. Aborting." >&2 + exit +fi + +# Stage One: Unzip the tarball. +tar xzf $1 +pushd omva-backup + +# Stage Two: move the configuration and keys back into place. +mkdir -p $SPIDEROAK_ESCROW_KEYS_PATH +mkdir -p $SPIDEROAK_ESCROW_LAYERS_PATH + +cp -r omva-backup/keys/* $SPIDEROAK_ESCROW_KEYS_PATH +cp -r omva-backup/layers/* $SPIDEROAK_ESCROW_LAYERS_PATH + +cp agent-config.json $OPENMANAGE_CONFIGDIR + +# Stage Three: Re-load the DB SQL. +su postgres -c "psql -f db_dump.sql openmanage" + +# Clean up. +popd +rm -r omva-backup diff --git a/bin/run_openmanage.sh b/bin/run_openmanage.sh new file mode 100755 index 0000000..338d89b --- /dev/null +++ b/bin/run_openmanage.sh @@ -0,0 +1,7 @@ +#!/bin/bash +# Openmanage run script for cron. +# openmanage cron hourly + +. /etc/default/openmanage + +python $OPENMANAGE_ROOT/bin/directory_agent_main.py diff --git a/bin/set_db_pw.sh b/bin/set_db_pw.sh new file mode 100755 index 0000000..f3c802c --- /dev/null +++ b/bin/set_db_pw.sh @@ -0,0 +1,22 @@ +#!/bin/bash + +tries=0 +while [ $tries -le 2 ]; do + stty -echo + read -p "New SQL password: " firstpw; echo + read -p "New SQL password (again): " secondpw; echo + stty echo + + if [ $firstpw = $secondpw ]; then + break + fi + tries=$(( $tries + 1 )) + echo "Passwords did not match, try again." +done + +if [ $tries -eq 3 ]; then + echo "Too many tries, giving up!" + exit 1 +fi + +psql -h localhost -U directory_agent openmanage -c "alter role directory_agent with password '$firstpw';" diff --git a/bin/upgrade_om.sh b/bin/upgrade_om.sh new file mode 100755 index 0000000..8e9557b --- /dev/null +++ b/bin/upgrade_om.sh @@ -0,0 +1,29 @@ +#!/bin/bash + +# Upgrade the OpenManage virtual appliance, given a tarball. +# 2011 SpiderOak, Inc. + +. /etc/default/openmanage + +die() { + echo $@ >&2 + exit 1 +} + +[ $# -eq 1 ] || die "Please specify the path to the upgrade tarball." +ziptype=`file -bi $1` +[ "$ziptype" == "application/x-bzip2; charset=binary" ] || die "$1 not a tarball!" + +upgrade=`realpath "$1"` +sudo sv stop openmanage +pushd /opt +cp openmanage/etc/agent_config.json $HOME +sudo tar xjf $upgrade +sudo cp $HOME/agent_config.json /opt/openmanage/etc + +popd + +sudo sv start openmanage + +echo "OpenManage upgrade complete." + diff --git a/deploy/copyfiles.dist b/deploy/copyfiles.dist new file mode 100644 index 0000000..8b707d7 --- /dev/null +++ b/deploy/copyfiles.dist @@ -0,0 +1,7 @@ +openmanage /opt +server.crt /var/lib/openmanage/server.crt +server.csr /var/lib/openmanage/server.csr +server.key /var/lib/openmanage/server.key +server.key.secure /var/lib/openmanage/server.key.secure +motd.tail /etc/motd.tail +nginx-default /etc/nginx/sites-available/default \ No newline at end of file diff --git a/deploy/firstboot.sh b/deploy/firstboot.sh new file mode 100755 index 0000000..c77555b --- /dev/null +++ b/deploy/firstboot.sh @@ -0,0 +1,26 @@ +#!/bin/bash +# OpenManage initial boot setup script. + +# Regenerate SSH keys. +rm /etc/ssh/ssh_host*key* +dpkg-reconfigure -fnoninteractive -pcritical openssh-server + +. /etc/default/openmanage + +# Setup the NetKES escrow keys. +$OPENMANAGE_ROOT/bin/make_keys.sh $OPENMANAGE_BRAND + +# Install the DB. +su postgres -c "createdb openmanage" +su postgres -c "psql -f /opt/openmanage/sql/base_schema.sql openmanage" + +# Setup the django project. +pushd $OPENMANAGE_DJANGO_ROOT/omva +python manage.py syncdb --noinput +python manage.py createsuperuser --noinput --username="console_admin" --email="invalid@email.act" +popd + +echo "/opt/openmanage/bin/first_setup.sh" >> /home/openmanage/.bashrc +echo "PATH=$OPENMANAGE_ROOT/bin:\$PATH" >> /home/openmanage/.bashrc + + diff --git a/deploy/make_tarball.sh b/deploy/make_tarball.sh new file mode 100755 index 0000000..59a6846 --- /dev/null +++ b/deploy/make_tarball.sh @@ -0,0 +1,80 @@ +#!/bin/bash + +pushd $1 > /dev/null +source_dir=`pwd` +popd > /dev/null + +version=$2 + +brand_identifier=$3 +ldap=$4 +echo "Building OpenManage software suite from $source_dir for $3" +if [ $4 == "ldap" ]; then + echo "Enabling LDAP integration in this build." +fi + +deploy_dir=$source_dir/deploy +buildit_dir=$deploy_dir/openmanage +rm $deploy_dir/openmanage.tar.bz2 +rm -r $buildit_dir +mkdir $buildit_dir + +# Setup the python packages in the tarball. +# included_packages="bin etc lib sql" + +# for package in $included_packages; do +# mkdir $buildit_dir/$package +# cp $source_dir/$package/*.py $buildit_dir/$package/ 2> /dev/null +# cp $source_dir/$package/*.sh $buildit_dir/$package/ 2> /dev/null +# done + +# Setup the base. +mkdir $buildit_dir/bin +cp $source_dir/bin/*.{sh,py} $buildit_dir/bin 2> /dev/null + +# Copy libraries +mkdir $buildit_dir/netkes +for package in `ls $source_dir/netkes`; do + mkdir -p $buildit_dir/netkes/$package + cp $source_dir/netkes/$package/*.py $buildit_dir/netkes/$package 2> /dev/null +done + +# Copy LDAP over if necessary. +if [ $4 == "ldap" ]; then + mkdir $buildit_dir/netkes/account_mgr/user_source + cp $source_dir/netkes/account_mgr/user_source/*.py $buildit_dir/netkes/account_mgr/user_source +fi + +# Copy over the django project +cp -r $source_dir/django $buildit_dir + +# Setup the SQL package +mkdir $buildit_dir/sql +cp $source_dir/sql/*.sql $buildit_dir/sql + +# Package the configuration files. +included_management="openmanage_defaults apt_list py_list agent_config.json.sample" +mkdir $buildit_dir/etc +for file in $included_management; do + cp $source_dir/etc/$file $buildit_dir/etc +done + +# Set the brand in the configuration +echo "OPENMANAGE_BRAND=$3" > $buildit_dir/etc/brand + +# Configure the runsv service. +mkdir -p $buildit_dir/etc/service/openmanage +mkdir -p $buildit_dir/etc/service/admin_console +cp $source_dir/etc/service/openmanage/run $buildit_dir/etc/service/openmanage +cp $source_dir/etc/service/admin_console/run $buildit_dir/etc/service/admin_console + +# Tag it +echo "SpiderOak OpenManage $version" > $buildit_dir/etc/OpenManage_version.txt +echo "Built `date`" >> $buildit_dir/etc/OpenManage_version.txt + +# Zip it +pushd $deploy_dir > /dev/null +tar cjf openmanage.tar.bz2 openmanage +popd > /dev/null + +cat $buildit_dir/etc/OpenManage_version.txt diff --git a/deploy/make_vm.sh b/deploy/make_vm.sh new file mode 100755 index 0000000..970cabd --- /dev/null +++ b/deploy/make_vm.sh @@ -0,0 +1,40 @@ +#!/bin/bash + +# Builds a SO OMVA. + +if [ $# -eq 0 ]; then + echo "I need a hypervisor!" + exit +fi + +netfile=`pwd`/network-$2 +if [ -e $netfile ]; then + echo 'network settings:' + netsettings=`cat $netfile` + echo $netsettings + echo '' +else + netsettings='' +fi + +firstboot=`pwd`/firstboot.sh +execscript=`pwd`/postinstall.sh + +./make_tarball.sh .. $1 $2 $4 + +sudo vmbuilder $3 ubuntu --verbose --debug -c omva_vmbuilder.conf -o --firstboot $firstboot --execscript $execscript $netsettings -d $2_$3 --part vmbuilder.partition + +# echo "sleeping between steps..." +# sleep 10 +# mv $DESTDIR openmanage-$2-$3 +# echo "sleeping between steps..." +# sleep 10 + +# which pgiz > /dev/null +# if [ $? -eq 0 ]; then +# our_gzip=pigz +# else +# our_gzip=gzip +# fi + +# tar cf - openmanage-$2-$3/ | $our_gzip > openmanage-$2-$3.tar.gz diff --git a/deploy/motd.tail b/deploy/motd.tail new file mode 100644 index 0000000..67a9d2d --- /dev/null +++ b/deploy/motd.tail @@ -0,0 +1,4 @@ + +SpiderOak OpenManage Virtual Appliance + +For documentation, please visit https://spideroak.com diff --git a/deploy/nginx-default b/deploy/nginx-default new file mode 100644 index 0000000..2459ecf --- /dev/null +++ b/deploy/nginx-default @@ -0,0 +1,51 @@ +# You may add here your +# server { +# ... +# } +# statements for each of your virtual hosts + +upstream c_netkes_agent { + server unix:/var/lib/openmanage/net_kes.sock; +} + +upstream c_admin_console { + server unix:/var/lib/openmanage/admin_console.sock; +} + +# HTTPS server +# +server { + listen 443; + + ssl on; + ssl_certificate /var/lib/openmanage/server.crt; + ssl_certificate_key /var/lib/openmanage/server.key; + + ssl_session_timeout 5m; + + ssl_protocols TLSv1; + ssl_ciphers RC4-SHA:HIGH:!ADH:!MD5; + ssl_prefer_server_ciphers on; + + location / { + try_files $uri $uri/ @console; + } + + # Configuration to take us to the netkes_agent. + location ~ ^(\/auth|\/layers|\/data) { + proxy_pass http://c_netkes_agent; + proxy_read_timeout 900; + proxy_intercept_errors off; + } + + # Configuration to hit the management console. + location /static { + root /opt/openmanage/django; + } + + location @console { + proxy_pass http://c_admin_console; + proxy_read_timeout 900; + proxy_intercept_errors off; + } +} diff --git a/deploy/omva_vmbuilder.conf b/deploy/omva_vmbuilder.conf new file mode 100644 index 0000000..87e8002 --- /dev/null +++ b/deploy/omva_vmbuilder.conf @@ -0,0 +1,18 @@ +[DEFAULT] +arch = amd64 +user = openmanage +pass = openmanage +name = OpenManage +mem = 384 +tmpfs = - + +[ubuntu] +part = vmbuilder.partition +mirror = http://127.0.0.1:9999/ubuntu +security-mirror = http://127.0.0.1:9999/ubuntu +suite = lucid +flavour = virtual +components = main,universe +addpkg = build-essential, postgresql, python-psycopg2, python-crypto, python-ldap, python-cerealizer, python-pip, libevent-dev, python-dev, realpath, runit, acpid, openssh-server, nano, wget, python-bcrypt, nginx, python-django, git-core, python-mako, python-pyrad +hostname = openmanage +copy = copyfiles.dist diff --git a/deploy/postinstall.sh b/deploy/postinstall.sh new file mode 100755 index 0000000..5a30bc5 --- /dev/null +++ b/deploy/postinstall.sh @@ -0,0 +1,25 @@ +#!/bin/bash + +# Setup web services +chroot $1 pip install gunicorn +chroot $1 pip install django-pgsql-interval-field +chroot $1 pip install django-pagination +chroot $1 pip install IPy +chroot $1 chmod a+x /opt/openmanage/django/setup_git.sh +chroot $1 /opt/openmanage/django/setup_git.sh + +chroot $1 mkdir -p /etc/service/admin_console/supervise +chroot $1 ln -s /opt/openmanage/etc/service/admin_console/run /etc/service/admin_console/run + +# Setup openmanage services +chroot $1 ln -s /opt/openmanage/etc/openmanage_defaults /etc/default/openmanage +chroot $1 mkdir -p /var/lib/openmanage/log +chroot $1 mkdir -p /var/lib/openmanage/layers +chroot $1 mkdir -p /var/lib/openmanage/keys + +# Tweak the DB setup to run on the machine +chroot $1 sed -i "s/max_connections = /max_connections = 20 #/" /etc/postgresql/8.4/main/postgresql.conf + +# Update fstab +chroot $1 sed -i "s/\/dev\/sda1/LABEL='root_disk'/" /etc/fstab +chroot $1 sed -i "s/\/dev\/sda2/LABEL='swap'/" /etc/fstab diff --git a/deploy/vmbuilder.partition b/deploy/vmbuilder.partition new file mode 100644 index 0000000..98b72e3 --- /dev/null +++ b/deploy/vmbuilder.partition @@ -0,0 +1,2 @@ +root 2000 + diff --git a/django/omva/__init__.py b/django/omva/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/django/omva/agent_config.json b/django/omva/agent_config.json new file mode 100644 index 0000000..a4e658b --- /dev/null +++ b/django/omva/agent_config.json @@ -0,0 +1 @@ +{"dir_base_dn": "dc=test, dc=domain, dc=com", "listen_port": 443, "api_user": "enterprise_admin", "dir_password": "SEEKRET", "db_pass": "initial", "api_password": "ALSO_SEEKRET", "dir_guid_source": "objectGUID", "dir_email_source": "mail", "dir_uri": "ldaps://dc1.test.domain.com", "dir_username_source": "userPrincipalName", "dir_lname_source": "sn", "listen_addr": "127.0.0.1", "groups": [{"ldap_id": "cn=SpiderUsers, dc=test, dc=domain, dc=com", "group_id": 1, "type": "dn"}, {"ldap_id": "cn=MoreSpiderUsers, dc=test, dc=domain, dc=com", "group_id": 2, "type": "dn"}, {"ldap_id": "cn=SpiderUsers, dc=test, dc=domain, dc=com", "group_id": 31, "type": "dn"}, {"ldap_id": "cn=SpiderUsers, dc=test, dc=domain, dc=com", "group_id": 29, "type": "dn"}, {"ldap_id": "cn=SpiderUsers, dc=test, dc=domain, dc=com", "group_id": 32, "type": "dn"}, {"ldap_id": "cn=SpiderUsers, dc=test, dc=domain, dc=com", "group_id": 33, "type": "dn"}, {"ldap_id": "cn=SpiderUsers, dc=test, dc=domain, dc=com", "group_id": 34, "type": "dn"}, {"ldap_id": "cn=SpiderUsers, dc=test, dc=domain, dc=com", "group_id": 11, "type": "dn"}, {"ldap_id": "cn=SpiderUsers, dc=test, dc=domain, dc=com", "group_id": 5, "type": "dn"}, {"ldap_id": "cn=SpiderUsers, dc=test, dc=domain, dc=com", "group_id": 10, "type": "dn"}, {"ldap_id": "cn=SpiderUsers, dc=test, dc=domain, dc=com", "group_id": 27, "type": "dn"}, {"ldap_id": "cn=SpiderUsers, dc=test, dc=domain, dc=com", "group_id": 30, "type": "dn"}], "dir_type": "ad", "dir_user": "superadmin@test.domain.com", "dir_fname_source": "givenName", "dir_member_source": "memberUid"} \ No newline at end of file diff --git a/django/omva/build/pip-delete-this-directory.txt b/django/omva/build/pip-delete-this-directory.txt new file mode 100644 index 0000000..c8883ea --- /dev/null +++ b/django/omva/build/pip-delete-this-directory.txt @@ -0,0 +1,5 @@ +This file is placed here by pip to indicate the source was put +here by pip. + +Once this package is successfully installed this source code will be +deleted (unless you remove this file). diff --git a/django/omva/enterprise/__init__.py b/django/omva/enterprise/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/django/omva/enterprise/urls.py b/django/omva/enterprise/urls.py new file mode 100644 index 0000000..56178b4 --- /dev/null +++ b/django/omva/enterprise/urls.py @@ -0,0 +1,9 @@ +from django.conf.urls.defaults import * + +from django.contrib import admin +admin.autodiscover() + + +urlpatterns = patterns('', + (r'', include('blue_mgnt.urls', namespace='blue_mgnt')), +) diff --git a/django/omva/manage.py b/django/omva/manage.py new file mode 100644 index 0000000..5e78ea9 --- /dev/null +++ b/django/omva/manage.py @@ -0,0 +1,11 @@ +#!/usr/bin/env python +from django.core.management import execute_manager +try: + import settings # Assumed to be in the same directory. +except ImportError: + import sys + sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__) + sys.exit(1) + +if __name__ == "__main__": + execute_manager(settings) diff --git a/django/omva/settings.py b/django/omva/settings.py new file mode 100644 index 0000000..72a5c3f --- /dev/null +++ b/django/omva/settings.py @@ -0,0 +1,109 @@ +import os +import sys + +PROJECT_DIR = os.path.abspath(os.path.dirname(__file__)) + +sys.path += [os.path.join(PROJECT_DIR, '../apps')] +sys.path += ['/opt/openmanage/django/apps'] + +DEBUG = True +LOCAL_DEV = os.name == 'nt' +TEMPLATE_DEBUG = DEBUG + +ADMINS = () + # ('Your Name', 'your_email@domain.com'), + +MANAGERS = ADMINS + +DATABASE_ENGINE = 'postgresql_psycopg2' # 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'. +DATABASE_NAME = 'openmanage' # Or path to database file if using sqlite3. +DATABASE_USER = 'admin_console' # Not used with sqlite3. +DATABASE_PASSWORD = 'iexyjtso' # Not used with sqlite3. +DATABASE_HOST = 'localhost' +DATABASE_PORT = '' + +ACCOUNT_API_URL = "https://spideroak.com/apis/accounts/v1/" + +EMAIL_HOST = 'localhost' +EMAIL_PORT = 25 + +MANAGEMENT_VM = True + +# Local time zone for this installation. Choices can be found here: +# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name +# although not all choices may be available on all operating systems. +# If running in a Windows environment this must be set to the same as your +# system time zone. +TIME_ZONE = 'America/Chicago' + +# Language code for this installation. All choices can be found here: +# http://www.i18nguy.com/unicode/language-identifiers.html +LANGUAGE_CODE = 'en-us' + +SITE_ID = 1 + +# If you set this to False, Django will make some optimizations so as not +# to load the internationalization machinery. +USE_I18N = True + +# Absolute path to the directory that holds media. +# Example: "/home/media/media.lawrence.com/" +MEDIA_ROOT = '' + +# URL that handles the media served from MEDIA_ROOT. Make sure to use a +# trailing slash if there is a path component (optional in other cases). +# Examples: "http://media.lawrence.com", "http://example.com/media/" +MEDIA_URL = '' + +# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a +# trailing slash. +# Examples: "http://foo.com/media/", "/media/". +ADMIN_MEDIA_PREFIX = '/static/affiliate/admin/' + +# Make this unique, and don't share it with anybody. +SECRET_KEY = os.environ['DJANGO_SECRET_KEY'] + +# List of callables that know how to import templates from various sources. +TEMPLATE_LOADERS = ( + 'django.template.loaders.filesystem.load_template_source', + 'django.template.loaders.app_directories.load_template_source', +# 'django.template.loaders.eggs.load_template_source', +) + +MIDDLEWARE_CLASSES = ( + 'django.middleware.common.CommonMiddleware', + 'django.contrib.sessions.middleware.SessionMiddleware', + 'django.contrib.auth.middleware.AuthenticationMiddleware', + 'django.middleware.transaction.TransactionMiddleware', + 'pagination.middleware.PaginationMiddleware', +) + +ROOT_URLCONF = 'omva.urls' + +TEMPLATE_DIRS = ( + # Put strings here, like "/home/html/django_templates" or "C:/www/django/templates". + # Always use forward slashes, even on Windows. + # Don't forget to use absolute paths, not relative paths. +) + +TEMPLATE_CONTEXT_PROCESSORS = ( + 'django.core.context_processors.auth', + 'django.core.context_processors.debug', + 'django.core.context_processors.i18n', + 'django.core.context_processors.media', + 'django.core.context_processors.request', +) + +INSTALLED_APPS = ( + 'django.contrib.auth', + 'django.contrib.contenttypes', + 'django.contrib.sessions', + 'django.contrib.sites', + 'django.contrib.admin', + 'blue_mgnt', + 'pagination', +) + +AUTHENTICATION_BACKENDS = ( + 'spideroak.auth.backend.PartnerTokenBackend', +) diff --git a/django/omva/sql.db b/django/omva/sql.db new file mode 100644 index 0000000..cc784ce Binary files /dev/null and b/django/omva/sql.db differ diff --git a/django/omva/start_shell b/django/omva/start_shell new file mode 100644 index 0000000..83e9e94 --- /dev/null +++ b/django/omva/start_shell @@ -0,0 +1,24 @@ +#!/bin/bash + +export PANDORA_DATABASE_HOST=localhost +export SPIDEROAK_NODE_NAME=localhost +export SPIDEROAK_DATA_CENTER_NAME=local +export SPIDEROAK_CLUSTER_NAME=localcluster +export PANDORA_REPOSITORY_PATH=/var/pandora/storage +export PANDORA_SERVER_CACHE_PATH=/var/pandora/cache +export NUS_SINGLE_SERVER=1 +export SPIDEROAK_SHARE_OBJECT_SERVER_HOST=127.0.0.1 +export SPIDEROAK_SHARE_OBJECT_SERVER_PORT=4888 +export CC_BILLING_BIN=`pwd`/billing_system +#export DISABLE_FILEDUMP_ACCEL=1 + +. /opt/so/bin/env.sh +~/Work/bin/start_postgresql + +function finish { + ../bin/stop_postgresql +} + +trap "finish" INT + +PYTHONPATH=.:..:../.. python manage.py runserver 8081 diff --git a/django/omva/urls.py b/django/omva/urls.py new file mode 100644 index 0000000..89d09c4 --- /dev/null +++ b/django/omva/urls.py @@ -0,0 +1,5 @@ +from django.conf.urls.defaults import * + +urlpatterns = patterns('', + (r'^', include('omva.enterprise.urls')), +) diff --git a/django/omva/views.py b/django/omva/views.py new file mode 100644 index 0000000..0b3d91a --- /dev/null +++ b/django/omva/views.py @@ -0,0 +1,4 @@ +from django.http import HttpResponse + +def login(request): + return HttpResponse('hi') diff --git a/django/setup_git.sh b/django/setup_git.sh new file mode 100755 index 0000000..543d852 --- /dev/null +++ b/django/setup_git.sh @@ -0,0 +1,29 @@ +#!/bin/bash + +pushd /opt/openmanage/django + +git clone https://github.com/adamcik/django-postgresql-netfields.git + +mkdir apps +pushd apps + +git clone https://spideroak.com/dist/blue_management.git +git clone https://spideroak.com/dist/so_common.git +pushd blue_management + +git submodule init +git submodule update + +ln -s templates/base ../so_common/templates/base + +popd # blue_management +popd #apps + +# Setup the static content +mkdir static + +ln -s /opt/openmanage/django/apps/so_common/static /opt/openmanage/django/static/blue_common +ln -s /opt/openmanage/django/apps/so_common/templates/base /opt/openmanage/django/apps/blue_management/blue_mgnt/templates/base + +popd # /opt/openmanage/django + diff --git a/docs/Makefile b/docs/Makefile new file mode 100644 index 0000000..147bacb --- /dev/null +++ b/docs/Makefile @@ -0,0 +1,130 @@ +# Makefile for Sphinx documentation +# + +# You can set these variables from the command line. +SPHINXOPTS = +SPHINXBUILD = sphinx-build +PAPER = +BUILDDIR = _build + +# Internal variables. +PAPEROPT_a4 = -D latex_paper_size=a4 +PAPEROPT_letter = -D latex_paper_size=letter +ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . + +.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest + +help: + @echo "Please use \`make ' where is one of" + @echo " html to make standalone HTML files" + @echo " dirhtml to make HTML files named index.html in directories" + @echo " singlehtml to make a single large HTML file" + @echo " pickle to make pickle files" + @echo " json to make JSON files" + @echo " htmlhelp to make HTML files and a HTML help project" + @echo " qthelp to make HTML files and a qthelp project" + @echo " devhelp to make HTML files and a Devhelp project" + @echo " epub to make an epub" + @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" + @echo " latexpdf to make LaTeX files and run them through pdflatex" + @echo " text to make text files" + @echo " man to make manual pages" + @echo " changes to make an overview of all changed/added/deprecated items" + @echo " linkcheck to check all external links for integrity" + @echo " doctest to run all doctests embedded in the documentation (if enabled)" + +clean: + -rm -rf $(BUILDDIR)/* + +html: + $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." + +dirhtml: + $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." + +singlehtml: + $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml + @echo + @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." + +pickle: + $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle + @echo + @echo "Build finished; now you can process the pickle files." + +json: + $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json + @echo + @echo "Build finished; now you can process the JSON files." + +htmlhelp: + $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp + @echo + @echo "Build finished; now you can run HTML Help Workshop with the" \ + ".hhp project file in $(BUILDDIR)/htmlhelp." + +qthelp: + $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp + @echo + @echo "Build finished; now you can run "qcollectiongenerator" with the" \ + ".qhcp project file in $(BUILDDIR)/qthelp, like this:" + @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/OpenManageVirtualAppliance.qhcp" + @echo "To view the help file:" + @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/OpenManageVirtualAppliance.qhc" + +devhelp: + $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp + @echo + @echo "Build finished." + @echo "To view the help file:" + @echo "# mkdir -p $$HOME/.local/share/devhelp/OpenManageVirtualAppliance" + @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/OpenManageVirtualAppliance" + @echo "# devhelp" + +epub: + $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub + @echo + @echo "Build finished. The epub file is in $(BUILDDIR)/epub." + +latex: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo + @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." + @echo "Run \`make' in that directory to run these through (pdf)latex" \ + "(use \`make latexpdf' here to do that automatically)." + +latexpdf: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo "Running LaTeX files through pdflatex..." + make -C $(BUILDDIR)/latex all-pdf + @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." + +text: + $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text + @echo + @echo "Build finished. The text files are in $(BUILDDIR)/text." + +man: + $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man + @echo + @echo "Build finished. The manual pages are in $(BUILDDIR)/man." + +changes: + $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes + @echo + @echo "The overview file is in $(BUILDDIR)/changes." + +linkcheck: + $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck + @echo + @echo "Link check complete; look for any errors in the above output " \ + "or in $(BUILDDIR)/linkcheck/output.txt." + +doctest: + $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest + @echo "Testing of doctests in the sources finished, look at the " \ + "results in $(BUILDDIR)/doctest/output.txt." diff --git a/docs/accountsetup.rst b/docs/accountsetup.rst new file mode 100644 index 0000000..07a8d8c --- /dev/null +++ b/docs/accountsetup.rst @@ -0,0 +1,27 @@ +Subscription Configuration and Management +========================================= + +The SpiderOak-facing components of your OpenManage system are managed through the `SpiderOak Partners Console `_ . You can login through the login and password provided to you during your SpiderOak Blue subscription setup. + +.. note:: + + The login used for the Partners Console is the same as the password required for the API access mentioned at :ref:`common_configuration` + +General Setup +************* + +There are three general configuration options, enterprise-wide: + + * ``Default share ttl``: Sets the time-to-live (TTL) for share links + * ``Default autopurge``: Sets the autopurge time from the clients within your enterprise. + * ``NetKES server URL``: URL for your OMVA that can be reached from SpiderOak, used for authentication. + +Groups +****** + +Groups have the following properties: + * ``Group ID``: Unique identifier for your group, used in configuring directory access (if used) at :ref:`group_configuration`. + * ``Plan``: Storage plan made available to users in the group. + * ``Allow WebAPI``: Allows mobile and web access for users in the group. + * ``Require Domain for Windows Install``: If enabled, forces all SpiderOak installations for members of the group to belong to your Windows domain. + * ``Delete Group``: When checked during editing, this will delete the group. diff --git a/docs/conf.py b/docs/conf.py new file mode 100644 index 0000000..5c58c1d --- /dev/null +++ b/docs/conf.py @@ -0,0 +1,216 @@ +# -*- coding: utf-8 -*- +# +# OpenManage Virtual Appliance documentation build configuration file, created by +# sphinx-quickstart on Fri Sep 23 13:07:27 2011. +# +# This file is execfile()d with the current directory set to its containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +import sys, os + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +#sys.path.insert(0, os.path.abspath('.')) + +# -- General configuration ----------------------------------------------------- + +# If your documentation needs a minimal Sphinx version, state it here. +#needs_sphinx = '1.0' + +# Add any Sphinx extension module names here, as strings. They can be extensions +# coming with Sphinx (named 'sphinx.ext.*') or your custom ones. +extensions = [] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# The suffix of source filenames. +source_suffix = '.rst' + +# The encoding of source files. +#source_encoding = 'utf-8-sig' + +# The master toctree document. +master_doc = 'index' + +# General information about the project. +project = u'OpenManage Virtual Appliance' +copyright = u'2011, SpiderOak, Inc.' + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +# The short X.Y version. +version = '1.0' +# The full version, including alpha/beta/rc tags. +release = '1.0pre' + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +#language = None + +# There are two options for replacing |today|: either, you set today to some +# non-false value, then it is used: +#today = '' +# Else, today_fmt is used as the format for a strftime call. +#today_fmt = '%B %d, %Y' + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +exclude_patterns = ['_build'] + +# The reST default role (used for this markup: `text`) to use for all documents. +#default_role = None + +# If true, '()' will be appended to :func: etc. cross-reference text. +#add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +#add_module_names = True + +# If true, sectionauthor and moduleauthor directives will be shown in the +# output. They are ignored by default. +#show_authors = False + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'sphinx' + +# A list of ignored prefixes for module index sorting. +#modindex_common_prefix = [] + + +# -- Options for HTML output --------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +html_theme = 'default' + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +#html_theme_options = {} + +# Add any paths that contain custom themes here, relative to this directory. +#html_theme_path = [] + +# The name for this set of Sphinx documents. If None, it defaults to +# " v documentation". +#html_title = None + +# A shorter title for the navigation bar. Default is the same as html_title. +#html_short_title = None + +# The name of an image file (relative to this directory) to place at the top +# of the sidebar. +#html_logo = None + +# The name of an image file (within the static path) to use as favicon of the +# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 +# pixels large. +#html_favicon = None + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['_static'] + +# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, +# using the given strftime format. +#html_last_updated_fmt = '%b %d, %Y' + +# If true, SmartyPants will be used to convert quotes and dashes to +# typographically correct entities. +#html_use_smartypants = True + +# Custom sidebar templates, maps document names to template names. +#html_sidebars = {} + +# Additional templates that should be rendered to pages, maps page names to +# template names. +#html_additional_pages = {} + +# If false, no module index is generated. +#html_domain_indices = True + +# If false, no index is generated. +#html_use_index = True + +# If true, the index is split into individual pages for each letter. +#html_split_index = False + +# If true, links to the reST sources are added to the pages. +#html_show_sourcelink = True + +# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. +#html_show_sphinx = True + +# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. +#html_show_copyright = True + +# If true, an OpenSearch description file will be output, and all pages will +# contain a tag referring to it. The value of this option must be the +# base URL from which the finished HTML is served. +#html_use_opensearch = '' + +# This is the file name suffix for HTML files (e.g. ".xhtml"). +#html_file_suffix = None + +# Output file base name for HTML help builder. +htmlhelp_basename = 'OpenManageVirtualAppliancedoc' + + +# -- Options for LaTeX output -------------------------------------------------- + +# The paper size ('letter' or 'a4'). +#latex_paper_size = 'letter' + +# The font size ('10pt', '11pt' or '12pt'). +#latex_font_size = '10pt' + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, author, documentclass [howto/manual]). +latex_documents = [ + ('index', 'OpenManageVirtualAppliance.tex', u'OpenManage Virtual Appliance Documentation', + u'Matthew Erickson', 'manual'), +] + +# The name of an image file (relative to this directory) to place at the top of +# the title page. +#latex_logo = None + +# For "manual" documents, if this is true, then toplevel headings are parts, +# not chapters. +#latex_use_parts = False + +# If true, show page references after internal links. +#latex_show_pagerefs = False + +# If true, show URL addresses after external links. +#latex_show_urls = False + +# Additional stuff for the LaTeX preamble. +#latex_preamble = '' + +# Documents to append as an appendix to all manuals. +#latex_appendices = [] + +# If false, no module index is generated. +#latex_domain_indices = True + + +# -- Options for manual page output -------------------------------------------- + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [ + ('index', 'openmanagevirtualappliance', u'OpenManage Virtual Appliance Documentation', + [u'Matthew Erickson'], 1) +] diff --git a/docs/index.rst b/docs/index.rst new file mode 100644 index 0000000..ff329bc --- /dev/null +++ b/docs/index.rst @@ -0,0 +1,26 @@ +.. OpenManage Virtual Appliance documentation master file, created by + sphinx-quickstart on Fri Sep 23 13:07:27 2011. + You can adapt this file completely to your liking, but it should at least + contain the root `toctree` directive. + +Welcome to OpenManage Virtual Appliance's documentation! +======================================================== + +Contents: + +.. toctree:: + :maxdepth: 2 + + Introduction + vasetup + ldap + radius + remote_prefs + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`modindex` +* :ref:`search` + diff --git a/docs/intro.rst b/docs/intro.rst new file mode 100644 index 0000000..f362891 --- /dev/null +++ b/docs/intro.rst @@ -0,0 +1,13 @@ +The OpenManage Virtual Appliance +================================ + +The OpenManage Virtual Appliance (OMVA) provides automatically managed configuration for SpiderOak Blue™ services. It provides two major services: authentication and user management, and local encryption key escrow. + +The OMVA is a virtual appliance running Linux to provide services from within your organization to SpiderOak. By using the OMVA, SpiderOak retains our innovative zero-knowledge (Ø-K) security model concerning your organization's data while allowing you full control over both data and user account management. + +Services on the OMVA: + * Communicate out to the SpiderOak Accounts API to configure and manage user accounts, + * Listen to SpiderOak for queries for key escrow use, + * Listen to SpiderOak for queries concerning user authentication. + +You will be using the OMVA in one of two configurations: Blue Private Cloud, or Blue Hosted Storage. The OMVA operates largely the same between the two configurations, however the connection information will change if you are connecting to SpiderOak hosted storage or your own Private Cloud install. diff --git a/docs/ldap.rst b/docs/ldap.rst new file mode 100644 index 0000000..d1dacb2 --- /dev/null +++ b/docs/ldap.rst @@ -0,0 +1,24 @@ +Directory Integration +===================== + +OMVA supports connecting to your organization's user directory and automatically configuring SpiderOak user accounts for members of your organization, as well as using your directory to authenticate users. + +OpenManage supports the Lightweight Directory Access Protocol (LDAP). This is an open industry-wide standard, supported by Microsoft, Apple, Novell, and the open source community. + +LDAP integration is accomplished by reading the member list of groups defined in the configuration file, matching them to the SpiderOak user database, and resolving differences. The implementation internally uses a SQL database to assist in resolving the user DB differences and tracking the matching between the LDAP user object and user entries in the SpiderOak database. + +Active Directory +**************** + +As Microsoft Active Directory is a superset of LDAP, OpenManage communicates with it via standard LDAP methods. Your AD must be configured properly, and to configure OpenManage, you must have some understanding of LDAP. You can browse your AD via Microsoft's ``ldp.exe`` tool (included with Windows Server), or the extended attributes on AD objects when turned on through the MMC settings for *Active Directory Users and Computers*. + +Setup +***** + +OpenManage works by examining the members of user groups in LDAP. Each LDAP group correlates to a group setup through the SpiderOak Blue :ref:`managementconsole`, using internally-held data to connect a SpiderOak Blue user group to your LDAP group. + +.. note:: + See :ref:`ldap_configuration` for more information on how to configure the OMVA to connect to your LDAP server. + +The OpenManage LDAP user will need to be able to read the member list of the group, as well as read the attributes of the user objects. No write permissions are necessary, and should be avoided for security reasons. + diff --git a/docs/managementconsole.rst b/docs/managementconsole.rst new file mode 100644 index 0000000..adc6269 --- /dev/null +++ b/docs/managementconsole.rst @@ -0,0 +1,4 @@ +Web Management Console +====================== + +Your OMVA includes a web-based management console to control your account. diff --git a/docs/radius.rst b/docs/radius.rst new file mode 100644 index 0000000..2f1c314 --- /dev/null +++ b/docs/radius.rst @@ -0,0 +1,4 @@ +RADIUS Authentication +===================== + +RADIUS authentication provides a way for networked services to offload authentication to another service for remote authentication and accounting. It is popularly used by industry-standard two-factor authentication systems such as RSA SecurID and CryptoCard. diff --git a/docs/remote_prefs.rst b/docs/remote_prefs.rst new file mode 100644 index 0000000..3441a45 --- /dev/null +++ b/docs/remote_prefs.rst @@ -0,0 +1,132 @@ +Remote Preferences +================== + +The SpiderOak: Blue™ client supports management of client preferences through central configuration per client machine. Currently only Windows registry settings are available, however Mac and Linux settings are coming soon. + +When settings are configured centrally, the SpiderOak: Blue™ client will use them over locally-configured settings, as a method to enforce policy. + +Windows Configuration +********************* + +Windows central management is accomplished via the registry. This enables administrators to push registry settings via Group Policy Objects administered at the domain level. + +Both ``HKEY_CURRENT_USER`` and ``HKEY_LOCAL_MACHINE`` trees are supported, with ``HKEY_LOCAL_MACHINE`` taking priority. The location in the registry for the preferences is ``\SOFTWARE\SpiderOak\SpiderOak\Preferences``. + +.. warning:: + + Note that if you are manually editing preferences in ``HKEY_LOCAL_MACHINE`` with ``regedit`` on 64-bit Windows, you need to place these in a 32-bit compatability key, as: ``HKEY_LOCAL_MACHINE\SOFTWARE\Wow6432Node\SpiderOak\SpiderOak\Preferences`` + +Supported Registry Preference Value Types ++++++++++++++++++++++++++++++++++++++++++ + +The following types are used in SpiderOak registry settings: + +* Boolean: + + * String (``REG_SZ``) of ``True`` or ``False`` + * ``REG_DWORD`` or ``REG_QWORD`` with values 0 for ``False``, 1 for ``True``. + +* String: + + * String (``REG_SZ``) + +* Integer: + + * String (``REG_SZ``) representation of the integer (example: ``"12345"``) + * ``REG_DWORD`` or ``REG_QWORD`` of the value. + +Supported Preferences ++++++++++++++++++++++ + +Interface +--------- + +* ShowCloseOrMinimizeDialogOnClose : Boolean +* MinimizeToTrayOnClose : Boolean +* LaunchMinimizedAtStartup : Boolean +* ShowSplashScreenAtStartup : Boolean +* UseAlternativeTrayIconStyle : Boolean +* DisableSpaceCalculations : Boolean +* ShowHotkeyEnabled : Boolean +* ShowHotkeyModifier : String + + * Options: ``Alt``, ``Ctrl``, ``Alt + Ctrl``, ``Alt + Shift``, ``Ctrl + Shift`` + +* ShowHotkeySymbol : String + + * A single capitalized letter string value, e.g. "S". + * Spacebar represented as ``SPACE``. + +Backup +------ + +* DontArchiveFilesLargerThanEnabled : Boolean +* DontArchiveFilesLargerThanSize : Integer +* DontArchiveFilesOlderThanEnabled : Boolean +* DontArchiveFilesOlderThanSeconds : Integer + + * Value is in seconds. + +* !Wildcards : String +* FolderWildcards : String +* EnablePreviews : Boolean + +Schedule +-------- + +For the values ending in "ScanInterval", the following options are available: ``Automatic``, ``5 Minutes``, ``15 Minutes``, ``30 Minutes``, ``1 Hour``, ``2 Hours``, ``4 Hours``, ``8 Hours``, ``12 Hours``, ``24 Hours``, and ``48 Hours``. + +For the values ending in "ScheduleDay", the following options are available: ``Everyday``, ``Monday``, ``Tuesday``, ``Wednesday``, ``Thursday``, ``Friday``, ``Saturday``, ``Sunday``, ``Weekdays``, and ``Weekends``. + +For the values ending in "ScheduleHour", values are strings of the integers "1" through "12". + +For the AMPM values, the value is either ``AM`` or ``PM``. + +* FullScheduleEnable : Boolean +* FullScanInterval : String +* FullScheduleDay : String +* FullScheduleHour : String +* !FullScheduleAMPM : String +* SyncScheduleEnable : Boolean +* SyncScanInterval : String +* SyncScheduleDay : String +* SyncScheduleHour : String +* !SyncScheduleAMPM : String +* ShareScheduleEnable : Boolean +* ShareScanInterval : String +* ShareScheduleDay : String +* ShareScheduleHour : String +* !ShareScheduleAMPM : String +* EnableAutomaticScan : Boolean + +Copy +---- + +* SecondaryCopyEnabled : Boolean +* SecondaryCopyLocationType : String + + * Options: ``Local Folder``, ``FTP Server``, ``SFTP Server`` + +* SecondaryCopyLocation : String +* SecondaryCopyHostname : String +* SecondaryCopyPort : String +* SecondaryCopyUsername : String +* SecondaryCopyPassword : String + +Proxy +----- + +* HttpProxyEnabled : Boolean +* HttpProxyHost : String +* HttpProxyPort : String +* HttpProxyUsername : String +* LimitBandwidthEnabled : Boolean +* LimitUploadBucket : String + +General +------- + +* DownloadsLocation : String + + + diff --git a/docs/vasetup.rst b/docs/vasetup.rst new file mode 100644 index 0000000..f8b5cdb --- /dev/null +++ b/docs/vasetup.rst @@ -0,0 +1,129 @@ +OMVA Setup +========== + +The OMVA is a virtual machine with the following base requirements: + + * 512 MB RAM + * 5 GB HDD + * 1 Network address + +Network Access +************** + +The OMVA needs to be able to connect to and accept connections from your SpiderOak Blue storage to function. Your firewall and proxy configuration around the OMVA will need to be configured to permit connections to and from your storage. We have provided a table you can use as a worksheet to reference if a Blue Hosted Storage customer, or fill in and then reference as a Private Cloud customer. + ++-----------------+----------+-----------------------------------+-------------------------+ +| Protocol | In / Out | Hosted Storage | Private Cloud | ++=================+==========+===================================+=========================+ +| HTTPS (443) | out | ``spideroak.com`` (208.28.15.128) | | ++-----------------+----------+-----------------------------------+-------------------------+ +| HTTPS (443) | in | | 208.28.15.128 | | +| | | | 208.28.15.131 | (only 1 IP necessary) | +| | | | 38.121.104.4 | | +| | | | 38.121.104.5 | | ++-----------------+----------+-----------------------------------+-------------------------+ + +In addition, the following ports are necessary to be kept open locally for administrative purposes: + +* HTTPS (port 443) IN (Web management console) +* SSH (port 22) IN (Command-line management) + +Finally, you will need to connect to your directory server: + +* LDAP/LDAPS to your directory server. + +The OMVA is configured to get its network address through DHCP. Please contact SpiderOak if you require a virtual machine with a pre-configured IP address. + +There are two SSL configurations possible behind a proxy with the OMVA. The virtual appliance ships with its own self-signed certificate that SpiderOak will optionally validate lacking the presence of a conventional SSL certificate signed by a CA. This provides flexibility to position the OMVA within your infrastructure behind proxies (or not) as desired. + +Lastly, if using directory integration, you will need a user account in the directory with access rights to read the groups you are defining to hold SpiderOak-enabled users. + +.. note:: + + For more information concerning directory integration, please see :doc:`ldap` + +The current version of the machine is running Ubuntu 10.04 LTS in a +stripped-down configuration. To retreive updates to the OS, you will either have to enable outbound HTTP access to ``archive.ubuntu.com`` and ``security.ubuntu.com`` or configure the OMVA's apt to use a local mirror or proxy. + + +Initial Installation +******************** + +Upon initial boot, the system will configure itself, including creating encryption escrow keys, generating fresh OpenSSH keys, and presenting you with a login prompt. The default login credentials are: + +* username: ``openmanage`` +* password: ``openmanage`` + +With the initial login, a script will guide you through changing your admin password and the DB user password. + +Once logged in, please configure the services on the machine for your use. A sample configuration script is included at ``/opt/openmanage/etc/agent_config.json.sample``. This is a JSON-format file with sample (and it should be noted, incorrect!) values to access your local directory, your SpiderOak administrative account, and the password you set for the local database. In addition, the sample file includes definitions for sample directory groups. + +The JSON configuration file should be named ``/opt/openmanage/etc/agent_config.json``. Once that is configured, run ``finish_setup.sh`` from the command prompt. The configuration will finish, and the system services will start along with an initial directory to SpiderOak account sync (for accounts with that feature). + + +Configuration File Options +************************** + +The configuration file is a simple JSON-format file at ``/opt/openmanage/etc/agent_config.json``. There is a sample configuration file included with the OMVA at ``/opt/openmanage/etc/agent_config.json.sample`` that can be copied to the actual name and then edited to setup the initial configuration. + +.. _common_configuration: + +Common Configuration Options +++++++++++++++++++++++++++++ + +* ``api_user``: The administrative user for your SpiderOak Blue subscription. This is the same as the user you use to login to the web admin console on the SpiderOak website. +* ``api_password``: The password for the administrative user's account. +* ``api_root``: The URL to connect to the Billing API for your storage backend. **NOTE:** This option is internally configured properly by default for Blue Hosted Storage. You only need to introduce this variable for Blue Private Cloud. +* ``db_pass``: The password you've chosen for the database access. +* ``listen_addr``: The IP address for the NetKES to listen on. This should be the IP address configured for the OMVA. +* ``listen_port``: The port for the OMVA to listen on. The default of 443 (HTTPS) is a reasonable sane default. + +.. _ldap_configuration: + +LDAP Configuration Options ++++++++++++++++++++++++++++++++ + +.. note:: + For more information concerning directory integration, please see :doc:`ldap` + +OpenManage's directory integration features are based around the Lightweight Directory Access Protocol (LDAP). As Microsoft Active Directory is a form of LDAP, we use LDAP conventions when referring to AD. Mapping AD concepts to LDAP terms is simple but generally beyond the scope of this documentation; the source defaults in the sample configuration file are geared towards the default AD LDAP schema, but can be changed to suit your requirements. + +* ``dir_uri``: This should reflect the LDAP URI to connect to, in the form of ``://[:]``. ```` is either ``ldap`` or ``ldaps``, depending on use of SSL for your LDAP connection. +* ``dir_base_dn``: The base DN in the LDAP tree to run searches against. In the case of the sample, this simply searches against the entire domain for test.domain.com. To restrict it to the top-level Users OU, for example, it would then be ``cn=Users, dc=test, dc=domain, dc=com``. Leaving this set at too high of a level (say, ``dc=test, dc=domain, dc=com``) may negatively impact performance searching through too many not useful objects. +* ``dir_type``: The type of LDAP installation you have, either ``posix`` for OpenLDAP and RedHat Directory Server, or ``ad`` for Microsoft Active Directory. +* ``dir_user``: The user account created to give the directory agent access to read the group membership. +* ``dir_password``: The password for the above user account. +* ``dir_guid_source``: Field name for user objects defining a UID that will not change for the life of the object. This is used to track user objects through name changes and group reassignments. The sample provides the MS AD UID field. If using AD, this should not need to be changed. For other LDAP implementations, please use whatever field name is used by your implementation. +* ``dir_fname_source``: Source for the personal name in the LDAP schema. The default given is for AD. +* ``dir_lname_source``: Source for the surname in the LDAP schema. The default given is for AD. +* ``dir_username_source``: Source for the unique username in the LDAP schema. The default given is for AD, which in simple cases will suffice. If the user's email address is represented in the directory, we recommend that as well as a suitable field. +* ``auth_method``: Source for authentication. Either ``ldap`` for LDAP-bind authentication, or ``radius`` for RADIUS authentication. + +.. note:: + See :doc:`ldap` or :doc:`radius` for details on LDAP and RADIUS authentication. + +RADIUS Configuration Options +++++++++++++++++++++++++++++ + +.. note:: + For more information concerning RADIUS authentication, please see :doc:`radius`. + + +* ``rad_server``: The RADIUS server to connect to. +* ``rad_secret``: The shared RADIUS secret. +* ``rad_dictionary``: The RADIUS dictionary. + +Group Configuration +------------------- + +The ``groups`` member in the configuration is special. Please leave the blank group configuration from the sample, as this will be populated from the :doc:`managementconsole`. Entries in this section are considered internal to the software. + +Post Setup +********** + +After running ``finish_setup.sh``, the OMVA should 'just work' with little to no administrative interaction from there. However, we recommend that a backup be made of your key escrow keys. If the KES keys are lost, **all user accounts will have to be reset from scratch!** This is obviously a bad thing. We *highly* recommend that backups of the KES keys be made. They can be found at ``/var/lib/openmanage/keys`` and ``/var/lib/openmanage/layers``. We recommend making a backup of those directories and storing them somewhere safe and secure. + +.. warning:: + Backup your escrow keys as above; in the event of failure, **YOU WILL NOT BE ABLE TO RECOVER YOUR DATA**. + +In a near-future release, backup tools will be made available as part of the OMVA toolset. diff --git a/etc/agent_config.json.sample b/etc/agent_config.json.sample new file mode 100644 index 0000000..382eaf6 --- /dev/null +++ b/etc/agent_config.json.sample @@ -0,0 +1,30 @@ +{ + "dir_uri": "ldaps://dc1.test.domain.com", + "dir_base_dn": "dc=test, dc=domain, dc=com", + "dir_user": "superadmin@test.domain.com", + "dir_password": "SEEKRET", + "dir_guid_source": "objectGUID", + "dir_member_source": "memberUid", + "dir_username_source": "userPrincipalName", + "dir_fname_source": "givenName", + "dir_lname_source": "sn", + "dir_email_source": "mail", + "dir_type": "ad", + "api_user": "enterprise_admin", + "api_password": "ALSO_SEEKRET", + "db_pass": "initial", + + "listen_addr": "127.0.0.1", + "listen_port": 443, + + "groups": [ + {"group_id": 1, + "type": "dn", + "ldap_id": "cn=SpiderUsers, dc=test, dc=domain, dc=com" + }, + {"group_id": 2, + "type": "dn", + "ldap_id": "cn=MoreSpiderUsers, dc=test, dc=domain, dc=com" + } + ] +} diff --git a/etc/apt_list b/etc/apt_list new file mode 100644 index 0000000..e69de29 diff --git a/etc/openmanage_defaults b/etc/openmanage_defaults new file mode 100644 index 0000000..bbc3667 --- /dev/null +++ b/etc/openmanage_defaults @@ -0,0 +1,14 @@ +#!/bin/sh + +export OPENMANAGE_ROOT=/opt/openmanage +export OPENMANAGE_DATA=/var/lib/openmanage +export OPENMANAGE_CONFIGDIR=$OPENMANAGE_ROOT/etc +export OPENMANAGE_LOGS=$OPENMANAGE_DATA/log +export OPENMANAGE_DJANGO_ROOT=$OPENMANAGE_ROOT/django + +export PYTHONPATH=$OPENMANAGE_ROOT/netkes:$OPENMANAGE_DJANGO_ROOT/apps/blue_management:$OPENMANAGE_DJANGO_ROOT/apps/so_common:$OPENMANAGE_ROOT:$OPENMANAGE_DJANGO_ROOT/django-postgresql-netfields:$PYTHONPATH + +export SPIDEROAK_ESCROW_LAYERS_PATH=$OPENMANAGE_DATA/layers +export SPIDEROAK_ESCROW_KEYS_PATH=$OPENMANAGE_DATA/keys + +. $OPENMANAGE_ROOT/etc/brand diff --git a/etc/openmanage_init b/etc/openmanage_init new file mode 100755 index 0000000..fb85a20 --- /dev/null +++ b/etc/openmanage_init @@ -0,0 +1,151 @@ +#! /bin/sh +### BEGIN INIT INFO +# Provides: openmanage +# Required-Start: $remote_fs $syslog +# Required-Stop: $remote_fs $syslog +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: OpenManage NetKES Server +# Description: SpiderOak OpenManage NetKES server providing remote authentication # for SpiderOak clients. +### END INIT INFO + +# Author: Matthew Erickson + +# PATH should only include /usr/* if it runs after the mountnfs.sh script +PATH=/opt/openmanage/bin:/sbin:/usr/sbin:/bin:/usr/bin +DESC="OpenManage NetKES Server" +NAME=openmanage +DAEMON='/opt/openmanage/agent/netkes_main.py &' +#DAEMON_ARGS="--options args" +PIDFILE=/var/run/$NAME.pid +SCRIPTNAME=/etc/init.d/$NAME + +[ -x "$DAEMON" ] || exit 0 + +# Read configuration variable file if it is present +[ -r /etc/default/$NAME ] && . /etc/default/$NAME + +# Load the VERBOSE setting and other rcS variables +. /lib/init/vars.sh + +# Define LSB log_* functions. +# Depend on lsb-base (>= 3.0-6) to ensure that this file is present. +. /lib/lsb/init-functions + +# +# Function that starts the daemon/service +# +do_start() +{ + # Return + # 0 if daemon has been started + # 1 if daemon was already running + # 2 if daemon could not be started + start-stop-daemon --start --quiet --pidfile $PIDFILE --exec $DAEMON --test > /dev/null \ + || return 1 + start-stop-daemon --start --quiet --pidfile $PIDFILE --exec $DAEMON -- \ + $DAEMON_ARGS \ + || return 2 + # Add code here, if necessary, that waits for the process to be ready + # to handle requests from services started subsequently which depend + # on this one. As a last resort, sleep for some time. +} + +# +# Function that stops the daemon/service +# +do_stop() +{ + # Return + # 0 if daemon has been stopped + # 1 if daemon was already stopped + # 2 if daemon could not be stopped + # other if a failure occurred + start-stop-daemon --stop --quiet --retry=TERM/30/KILL/5 --pidfile $PIDFILE --name $NAME + RETVAL="$?" + [ "$RETVAL" = 2 ] && return 2 + # Wait for children to finish too if this is a daemon that forks + # and if the daemon is only ever run from this initscript. + # If the above conditions are not satisfied then add some other code + # that waits for the process to drop all resources that could be + # needed by services started subsequently. A last resort is to + # sleep for some time. + start-stop-daemon --stop --quiet --oknodo --retry=0/30/KILL/5 --exec $DAEMON + [ "$?" = 2 ] && return 2 + # Many daemons don't delete their pidfiles when they exit. + rm -f $PIDFILE + return "$RETVAL" +} + +# +# Function that sends a SIGHUP to the daemon/service +# +do_reload() { + # + # If the daemon can reload its configuration without + # restarting (for example, when it is sent a SIGHUP), + # then implement that here. + # + start-stop-daemon --stop --signal 1 --quiet --pidfile $PIDFILE --name $NAME + return 0 +} + +case "$1" in + start) + [ "$VERBOSE" != no ] && log_daemon_msg "Starting $DESC" "$NAME" + do_start + case "$?" in + 0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;; + 2) [ "$VERBOSE" != no ] && log_end_msg 1 ;; + esac + ;; + stop) + [ "$VERBOSE" != no ] && log_daemon_msg "Stopping $DESC" "$NAME" + do_stop + case "$?" in + 0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;; + 2) [ "$VERBOSE" != no ] && log_end_msg 1 ;; + esac + ;; + status) + status_of_proc "$DAEMON" "$NAME" && exit 0 || exit $? + ;; + #reload|force-reload) + # + # If do_reload() is not implemented then leave this commented out + # and leave 'force-reload' as an alias for 'restart'. + # + #log_daemon_msg "Reloading $DESC" "$NAME" + #do_reload + #log_end_msg $? + #;; + restart|force-reload) + # + # If the "reload" option is implemented then remove the + # 'force-reload' alias + # + log_daemon_msg "Restarting $DESC" "$NAME" + do_stop + case "$?" in + 0|1) + do_start + case "$?" in + 0) log_end_msg 0 ;; + 1) log_end_msg 1 ;; # Old process is still running + *) log_end_msg 1 ;; # Failed to start + esac + ;; + *) + # Failed to stop + log_end_msg 1 + ;; + esac + ;; + *) + #echo "Usage: $SCRIPTNAME {start|stop|restart|reload|force-reload}" >&2 + echo "Usage: $SCRIPTNAME {start|stop|status|restart|force-reload}" >&2 + exit 3 + ;; +esac + +: diff --git a/etc/py_list b/etc/py_list new file mode 100644 index 0000000..e69de29 diff --git a/etc/service/admin_console/run b/etc/service/admin_console/run new file mode 100755 index 0000000..fac1cae --- /dev/null +++ b/etc/service/admin_console/run @@ -0,0 +1,12 @@ +#!/bin/bash + +. /etc/default/openmanage + +PID=/var/run/gunicorn_console.pid + +SOCK=unix:$OPENMANAGE_DATA/admin_console.sock + +if [ -f $PID ]; then rm $PID; fi + +cd $OPENMANAGE_DJANGO_ROOT/omva +exec gunicorn_django -b $SOCK --pid=$PID diff --git a/etc/service/openmanage/log/run b/etc/service/openmanage/log/run new file mode 100755 index 0000000..12380c3 --- /dev/null +++ b/etc/service/openmanage/log/run @@ -0,0 +1,9 @@ +#!/bin/bash + +. /etc/default/openmanage + +LOGDIR=$OPENMANAGE_LOGS/netkes + +[ -d $LOGDIR ] || mkdir -p $LOGDIR + +exec svlogd -ttt $LOGDIR \ No newline at end of file diff --git a/etc/service/openmanage/run b/etc/service/openmanage/run new file mode 100755 index 0000000..5edf91e --- /dev/null +++ b/etc/service/openmanage/run @@ -0,0 +1,14 @@ +#!/bin/bash + +. /etc/default/openmanage + +NETKES_APP=netkes_agent.app_factory:app_factory +PID=/var/run/gunicorn.pid + +CONF=$OPENMANAGE_ROOT/netkes/netkes_agent/gunicorn.conf.py +SOCK=unix:$OPENMANAGE_DATA/net_kes.sock + +if [ -f $PID ]; then rm $PID; fi + +cd $OPENMANAGE_ROOT +exec gunicorn -c $CONF -b $SOCK --pid=$PID $NETKES_APP 2>&1 diff --git a/lib/Pandora/__init__.py b/lib/Pandora/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/lib/Pandora/https.py b/lib/Pandora/https.py new file mode 100644 index 0000000..5434de0 --- /dev/null +++ b/lib/Pandora/https.py @@ -0,0 +1,175 @@ +"""A certificate-validating HTTPS handler for urllib2. + +http://stackoverflow.com/questions/1087227/validate-ssl-certificates-with-python +""" + +import httplib +import re +import socket +import urllib2 +import ssl +import sys + +_KES_CERT = ''' +-----BEGIN CERTIFICATE----- +MIIFkDCCA3gCCQCdG/jvT/y4VjANBgkqhkiG9w0BAQUFADCBiTELMAkGA1UEBhMC +VVMxETAPBgNVBAgTCElsbGlub2lzMRAwDgYDVQQHEwdDaGljYWdvMRcwFQYDVQQK +Ew5TcGlkZXJPYWssIEluYzEZMBcGA1UEAxMQTWF0dGhldyBFcmlja3NvbjEhMB8G +CSqGSIb3DQEJARYSbWF0dEBzcGlkZXJvYWsuY29tMB4XDTExMDkxMjE1MTMxOFoX +DTIxMDkxMTE1MTMxOFowgYkxCzAJBgNVBAYTAlVTMREwDwYDVQQIEwhJbGxpbm9p +czEQMA4GA1UEBxMHQ2hpY2FnbzEXMBUGA1UEChMOU3BpZGVyT2FrLCBJbmMxGTAX +BgNVBAMTEE1hdHRoZXcgRXJpY2tzb24xITAfBgkqhkiG9w0BCQEWEm1hdHRAc3Bp +ZGVyb2FrLmNvbTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAKqhrhuH +NnCGYvgtnPEF1dhi2JGtwhDi0ep/EuQbhbGjrfLk12QB4NI2yK4Bxf1Aogl2yPiD +3BSgz70rGsBy0nNbHnfRJDaRj8OqxpcsWjyGns9Yw79GeJUB/3Zq/DBHCbJeHOux +nJt4dW1ZyvEYMQjA8SlmbobzMpSvCwAHHkuGIf5RDr6M8ZaaN+pQ1zZm5dBJgq9D +SuK8fpKO4DULTdFeaT225kWFJXx+8jgbhJNvv8PE5pacspwZ4oWO38ThhMz3oCG6 +kAa6w8mazmxTeNeG95UUHLUbl+2Gj7cI+JKR8IQKPiDr7ryqvVoiPbvwkOfAssor +VsNNjozaEOgJ+64Cj3ZGTl1cfeFwdQfsqy5JjH2ATKF/VZUjBq8ZYy3Z6GGMffnF +PfCE/I/cpgT/GsKKT7jJYeFGr1QPAb0iy0LG6BtLI2SQ+sndF842JoIKuFZAU72m +8Mlh4Nud3wxhBtw3pP8dDOBOjB+VkvElOE7hdaIUd8RL8+2EQiZZmRRVRzxC+vld +WatjnB0QzCxXaPTHALLQlB2xHA4K5lXbj/mWhwZUY1sLPYOzBbLclZVIBzUZrryI +C5+qI3Ce1OMQHz+l9WTfGmHC+um8IWRi8N7UKu19UMji3qdsz6sYW2t67y1gWkpX +VX1NHdOlpHFvDEvJiT1MmMl7kcw/OmH24fa/AgMBAAEwDQYJKoZIhvcNAQEFBQAD +ggIBAEsnoP4lb7CB+kt4pRM2VBUO4Uhxuc/V3cDbCei8XAMFco24/MwCFlyy3WVL +Mo6V+Sx2s2s02dkfDVhFIORyOIV00Yq6CTDhsmfy6XStY10KcPNo+3MajIznCgMp +TgUNoFMfs5Z5kmNzJtz47DoZs3GP5V3V6tajUfjlAbAmjJv90xnJe856TkzAXq8A +EKI2TzmamgsarNyTCCBVNRtaUFC7w3DN0Oi9AyjVEGzuJPPOGiKvzv5gUnJ3DLoe +G2/E+3IQVbuPO6LyFlNxraQM6UHLEylkXmxemFFiV2vIsHqPxMM9MSL+rnt0335F +s7st7OsFbjRBw77jiPgWY3MA5O5C6Vhcw7N7NtgvuaHWn2GLZnjdpnKzGxSABfqD +rI5kNzUVusy9+XkbC65hEZWF5eUdP2u0+81KSHFRF5wnnCHZuXwNr68QKjZ5tE6X +3cXF4MazbEZq2ywu/u2B0gKeqTZX/6vMWK3lLyiWHftI/3UU3UqTfsx8nET9deB7 +vXHy6Uv5njAmG9EY3QF1XJTiFOKtjY63wbfBQf9FTQ4wd/JV8XnTZJ2i8q1A9ZWh +2+aZjKVxajYu3ezq3LVCXXRz0xPh4/6oBGcf2KHHmXiMsC5yBadld4zzaqdAlPi8 +v6Yf7goycsxixzuwR/o3UK0z2bGssb4gkYJxEksACgOd+yRM +-----END CERTIFICATE----- +''' + + +def create_connection(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, bind_address=None): + """Connect to *address* and return the socket object. + + Convenience function. Connect to *address* (a 2-tuple ``(host, + port)``) and return the socket object. Passing the optional + *timeout* parameter will set the timeout on the socket instance + before attempting to connect. If no *timeout* is supplied, the + global default timeout setting returned by :func:`getdefaulttimeout` + is used. + """ + + msg = "getaddrinfo returns an empty list" + host, port = address + for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM): + af, socktype, proto, canonname, sa = res + sock = None + try: + sock = socket.socket(af, socktype, proto) + if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT: + sock.settimeout(timeout) + if bind_address is not None: + sock.bind(bind_address) + sock.connect(sa) + return sock + + except socket.error, msg: + if sock is not None: + sock.close() + + raise socket.error, msg + + +class InvalidCertificateException(httplib.HTTPException, urllib2.URLError): + def __init__(self, host, cert, reason): + httplib.HTTPException.__init__(self) + self.host = host + self.cert = cert + self.reason = reason + + def __str__(self): + return ('Host %s returned an invalid certificate (%s) %s\n' % + (self.host, self.reason, self.cert)) + + +class CertValidatingHTTPSConnection(httplib.HTTPConnection): + default_port = httplib.HTTPS_PORT + + def __init__(self, host, port=None, key_file=None, cert_file=None, + ca_certs=None, strict=None, bind_address=None, **kwargs): + httplib.HTTPConnection.__init__(self, host, port, strict, **kwargs) + self.key_file = key_file + self.cert_file = cert_file + self.ca_certs = ca_certs + if self.ca_certs: + self.cert_reqs = ssl.CERT_REQUIRED + else: + self.cert_reqs = ssl.CERT_NONE + self.bind_address = bind_address + + def _GetValidHostsForCert(self, cert): + if 'subjectAltName' in cert: + return [x[1] for x in cert['subjectAltName'] + if x[0].lower() == 'dns'] + else: + return [x[0][1] for x in cert['subject'] + if x[0][0].lower() == 'commonname'] + + def _ValidateCertificateHostname(self, cert, bincert, hostname): + hosts = self._GetValidHostsForCert(cert) + for host in hosts: + host_re = host.replace('.', '\.').replace('*', '[^.]*') + if re.search('^%s$' % (host_re,), hostname, re.I): + return True + + # If we cannot validate against the hostname, try against the + # KES certificate. + binary_kes_cert = ssl.PEM_cert_to_DER_cert(_KES_CERT) + if binary_kes_cert == bincert: + return True + + return False + + def connect(self): + sock = create_connection((self.host, self.port), self.timeout, self.bind_address) + self.sock = ssl.wrap_socket(sock, keyfile=self.key_file, + certfile=self.cert_file, + cert_reqs=self.cert_reqs, + ca_certs=self.ca_certs) + if self.cert_reqs & ssl.CERT_REQUIRED: + cert = self.sock.getpeercert() + bincert = self.sock.getpeercert(binary_form=True) + hostname = self.host.split(':', 0)[0] + if not self._ValidateCertificateHostname(cert, bincert, hostname): + raise InvalidCertificateException(hostname, cert, + 'hostname mismatch') + + +class VerifiedHTTPSHandler(urllib2.HTTPSHandler): + def __init__(self, **kwargs): + urllib2.AbstractHTTPHandler.__init__(self) + self._connection_args = kwargs + + def https_open(self, req): + def http_class_wrapper(host, **kwargs): + full_kwargs = dict(self._connection_args) + full_kwargs.update(kwargs) + return CertValidatingHTTPSConnection(host, **full_kwargs) + + try: + return self.do_open(http_class_wrapper, req) + except urllib2.URLError, e: + if type(e.reason) == ssl.SSLError and e.reason.args[0] == 1: + raise InvalidCertificateException(req.host, '', + e.reason.args[1]) + raise + + https_request = urllib2.HTTPSHandler.do_request_ + + +#if __name__ == "__main__": +# if len(sys.argv) != 3: +# print "usage: python %s CA_CERT URL" % sys.argv[0] +# exit(2) + +# handler = VerifiedHTTPSHandler(ca_certs = sys.argv[1]) +# opener = urllib2.build_opener(handler) +# print opener.open(sys.argv[2]).read() diff --git a/lib/Pandora/serial.py b/lib/Pandora/serial.py new file mode 100644 index 0000000..b65c039 --- /dev/null +++ b/lib/Pandora/serial.py @@ -0,0 +1,102 @@ +'''! @package Pandora.serial +Abstract away from the "latest hotness" in serialization + +Since we aren't sure that we will forever stick with cerealizer, +having already once switched from pickle, we've created this module to +centralize our implementation. + +Use register to note classes which are safe to serialize. + +@complete OK 20080102 bryon +''' + +import sys +from itertools import chain +import Crypto.PublicKey.RSA + +# Cerealizer has mostly the same interface as pickle, so we just +# import it's functions here. + +from cerealizer import dump, dumps, load, loads, register, register_alias +from cerealizer import NotCerealizerFileError, NonCerealizableObjectError, \ + EndOfFile + +NotSerializerFileError = NotCerealizerFileError +NotSerializableObjectError = NotCerealizerFileError + +cryptoclass = ('Crypto.PublicKey.RSA', 'RSAobj' ) + +if hasattr(Crypto.PublicKey.RSA,'RSAImplementation'): + cryptoclass = ('Crypto.PublicKey.RSA', '_RSAobj') + +## Our serializable classes/modules + +## 20110725 MattE- Shortened from the original source, no other use of +## Pandora libs means no need to register other Pandora types here. +known = [ + cryptoclass , + ] + +aliases = None + +if not hasattr(Crypto.PublicKey.RSA,'RSAImplementation'): + aliases = { + 'Crypto.PublicKey.RSA': ( + ('RSAobj', 'RSAobj_c'), + ) + } +else: + aliases = { + 'Crypto.PublicKey.RSA': ( + ('_RSAobj', 'RSAobj'), + ('_RSAobj', 'RSAobj_c'), + ) + } + +_already_called = False +def register_all(extras = []): + """! Register our known serializables. + + @param extras extra class/mod names to allow serialization + @good OK 20080102 bryon + """ + global _already_called + if _already_called: + return + _already_called = True + for module_name, class_names in chain(known, extras): + + before = sys.modules + + if type(class_names) == str: + class_names = [ class_names ] + + + module = __import__(module_name, globals(), locals(), + class_names) + for class_name in class_names: + clas = getattr(module, class_name) + # this has to be ugly to maintain backwards crypto compatibility + # internal classnames that cerealizer picks up/picked up on changed from PyCrypto 2.0.1 -> 2.1.0 + # as such we need to alias both the generated and read it classnames. + # if any more changes need to happen here, or this gets called more than once per process + # or we start multiprocessing for whatever reason, register_all will need a refactoring. + if class_name == '_RSAobj': + register(clas, classname = 'Crypto.PublicKey.RSA.RSAobj') + else: + register(clas) + + if module_name in aliases: + for alias in aliases[module_name]: + clas = getattr(module,alias[0]) + register_alias(clas, '.'.join((module_name, alias[1]))) + + sys.modules = before + +if not hasattr(Crypto.PublicKey.RSA,'RSAImplementation'): + Crypto.PublicKey.RSA.construct = Crypto.PublicKey.RSA.construct_py + Crypto.PublicKey.RSA.generate = Crypto.PublicKey.RSA.generate_py + + + __all__ = [dump, dumps, load, loads, register, register_all, + NotCerealizerFileError] diff --git a/lib/__init__.py b/lib/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/lib/account_mgr/__init__.py b/lib/account_mgr/__init__.py new file mode 100644 index 0000000..693c8d1 --- /dev/null +++ b/lib/account_mgr/__init__.py @@ -0,0 +1,113 @@ +''' +__init__.py + +Init and common functions for the OpenManage user management system. + +(c) 2011 SpiderOak, Inc. +''' + +import logging +import psycopg2 +from accounts_api import Api +from contextlib import contextmanager + +SELECT_ADMIN_TOKEN = ''' +select no_devices_only, single_use_only, + case when exists(select 1 from admin_token_avatar_use where token=%(token)s) then true + else false end as token_used +from admin_setup_tokens +where token=%(token)s and expiry > now() and + exists(select 1 from users where email=%(email)s and enabled) +''' + +INSERT_ADMIN_AUTH_TOKEN_AVATAR_USE = ''' +insert into admin_token_avatar_use (token, avatar_id) +values (%(token)s, (select avatar_id from users where email=%(email)s)) +''' + +@contextmanager +def get_cursor(config): + try: + conn = psycopg2.connect(database=config['db_db'], + user=config['db_user'], + password=config['db_pass'], + host=config['db_host']) + yield conn.cursor() + except: + conn.rollback() + raise + else: + conn.commit() + +def get_api(config): + return Api.create('https://spideroak.com/apis/accounts/v1/', + config['api_user'], + config['api_password'],) + +def admin_token_auth(config, username, password): + log = logging.getLogger("admin_token_auth") + log.debug('checking admin auth code for username: %s' % username) + api = get_api(config) + user_token = dict(email=username, token=password) + with get_cursor(config) as cur: + cur.execute(SELECT_ADMIN_TOKEN, user_token) + if cur.rowcount != 1: + return False + + no_devices_only, single_use_only, token_used = cur.fetchone() + + if no_devices_only and api.list_devices(username): + return False + + if single_use_only and token_used: + return False + + with get_cursor(config) as cur: + cur.execute(INSERT_ADMIN_AUTH_TOKEN_AVATAR_USE, user_token) + + return True + +def authenticator(config, username, password): + """Authenticates users against OpenManage. + + This calls the correct authentication source to auth users. + + We expect a user_source module to offer a "can_auth" function taking three arguments: + * The config dictionary + * The username trying to authenticate + * Their password + + The net_kes webserver should not at any time have to know or care how we + are actually authenticating users, only that we do. + """ + + log = logging.getLogger("authenticator") + + auth_method = config.get('auth_method', None) + auth_source = None + + print 'checking' + if admin_token_auth(config, username, password): + return True + + if auth_method == 'ldap': + log.debug("Attempting to use LDAP simple bind for authenticating %s" % (username,)) + from account_mgr.user_source import ldap_source + auth_source = ldap_source + + elif auth_method == 'radius': + log.debug("Attempting to use RADIUS authentication for %s" % (username,)) + from account_mgr.user_source import radius_source + auth_source = radius_source + + elif auth_method == 'local': + log.debug("Attempting to use local authentication for %s" % (username,)) + from account_mgr.user_source import local_source + auth_source = local_source + + else: + log.error("No user authentication source provided, please check agent_config.") + log.warn("Returning failed authentication for %s" % (username,)) + return False + + return auth_source.can_auth(config, username, password) diff --git a/lib/account_mgr/account_runner.py b/lib/account_mgr/account_runner.py new file mode 100644 index 0000000..ec29edf --- /dev/null +++ b/lib/account_mgr/account_runner.py @@ -0,0 +1,172 @@ +""" +account_runner.py +(c) 2011 SpiderOak, Inc + +Runs account manipulation options against the local DB and the Billing API. + +The functions here are meant to be quasi-transactional; if there's an error raised through the +billing API handling functions, we will write out what we can to the DB to keep state consistent. +""" +import inspect + +import logging + +import api_interface + + +class BailApiCall(Exception): + pass + +class AccountRunner(object): + """ + Manages running account manipulation operations between our local DB and + the SpiderOak BillingAPI. + """ + _ADD_USERS_STATEMENT = ''' + INSERT INTO users + (uniqueid, email, avatar_id, givenname, surname, group_id) VALUES + (%(uniqueid)s,%(email)s,%(avatar_id)s,%(firstname)s,%(lastname)s,%(group_id)s); + ''' + def __init__(self, config, db_conn): + self._log = logging.getLogger("AccountRunner") + self._promo_code = config.get("promo_code", None) + self._db_conn = db_conn + + def runall(self, changes_dict): + """ + Commits all changes presented in the changes_dict. + + Keys in changes_dict must conform to the rest of the public APIs for this class. + + :param changes_dict: Dictionary of user changes. + """ + for action in changes_dict.keys(): + fun = getattr(self, action) + ok_users, fail_users = fun(changes_dict[action]) + if len(fail_users): + self._log.error("Got error during runall, aborted on action: %s" % (action,)) + break + + def create(self, users): + """ + Creates users SpiderOak users and updates the local DB with the user list. + + :param users: List of users to create + :returns tuple(list, list): (created users, failed users). + """ + + try: + created_users = self._api_create_users(users) + except BailApiCall as e: + (created_users, ) = e.args + + cur = self._db_conn.cursor() + cur.executemany(self._ADD_USERS_STATEMENT, created_users) + + return ( + created_users, + [user for user in users if user not in created_users], + ) + + + def enable(self, users): + """ + Toggles the enabled status of users in the SpiderOak DB. + + :param users: List of users to enable. + :returns tuple(list, list): (created users, failed users). + """ + return self._run_generic(api_interface.activate_user, users, + "UPDATE users SET enabled=true WHERE avatar_id=%(avatar_id)s") + + def disable(self, users): + """Disables users in SpiderOak's user DB. + + :param users: list of users to disable + :returns tuple(list, list): (success users, failed users) + """ + + return self._run_generic(api_interface.deactivate_user, users, + "UPDATE users SET enabled=false WHERE avatar_id=%(avatar_id)s") + + def group(self, users): + """Assigns users to plans in the SO user DB. + + :param users: list of users to set the plan for. + :returns tuple(list, list): (success users, failed users) + """ + + return self._run_generic(api_interface.set_user_group, users, + "UPDATE users SET group_id=%(group_id)s WHERE avatar_id=%(avatar_id)s") + + def email(self, users): + """Changes user email addresses. + + :param users: list of users to set email addresses for. + :returns tuple(list, list): (success users, failed users) + """ + + return self._run_generic(api_interface.change_email, users, + "UPDATE users SET email=%(email)s WHERE avatar_id=%(avatar_id)s") + + def _run_generic(self, fun, users, sql_statement): + """Internal function to run generic actions with both the API and DB.""" + try: + complete_users = self._api_run_generic(fun, users) + except BailApiCall as e: + (complete_users, ) = e.args + + cur = self._db_conn.cursor() + cur.executemany(sql_statement, complete_users) + + return ( + complete_users, + [user for user in users if user not in complete_users], + ) + + def _api_create_users(self, users): + """Internal function to create users via the billing API.""" + results = list() + for user in users: + try: + result = api_interface.create_user(user, self._promo_code) + except api_interface.ApiActionFailedError as e: + import traceback + traceback.print_exc() + self._log.error('Got ApiActionFailedError: %s' % e) + raise BailApiCall(results) + else: + user['avatar_id'] = result['avatar_id'] + results.append(user) + + self._log.info("created user %s" % (user['email'],)) + + return results + + def _api_run_generic(self, fun, users): + """Internal function to run API calls given the specific API function.""" + + results = [] + # Start building the arguments dictionary. + argdict = {} + args = inspect.getargspec(fun) + if 'promo_code' in args.args: + argdict['promo_code'] = self._promo_code + + # In the event of getting an API exception, we still need to + # update the DB with what we've done to keep things consistent, so + # we catch the error and bail with the current state of the + # results array. + for user in users: + argdict['user'] = user + try: + result = fun(**argdict) + except api_interface.ApiActionFailedError as e: + import traceback + traceback.print_exc() + self._log.error('Function %s got ApiActionFailedError: %s' % (fun, e,)) + raise BailApiCall(results) + else: + results.append(user) + + return results diff --git a/lib/account_mgr/accounts_api.py b/lib/account_mgr/accounts_api.py new file mode 100644 index 0000000..1b121ab --- /dev/null +++ b/lib/account_mgr/accounts_api.py @@ -0,0 +1,273 @@ +import json +import urllib +import urllib2 + +from api_client import ApiClient + + +class Api(object): + class Error(Exception): + pass + class BadParams(Error): + pass + class NotFound(Error): + pass + class DuplicateGroupName(Error): + pass + class DuplicateUsername(Error): + pass + class DuplicateEmail(Error): + pass + class BadPlan(Error): + pass + class BadGroup(Error): + pass + class QuotaExceeded(Error): + pass + class EmailNotSent(Error): + pass + + @classmethod + def create(cls, base, username, password): + """Factory method using default ApiClient class.""" + client = ApiClient(base, username, password) + return cls(client) + + def __init__(self, client): + self.client = client + + def ping(self): + return self.client.get_json('ping') + + ### Plans + + def list_plans(self): + return self.client.get_json('plans') + + ### Quota + + def quota(self): + return self.client.get_json('partner/quota') + + ### Features + + def enterprise_features(self): + return self.client.get_json('partner/features') + + ### Settings + + def enterprise_settings(self): + return self.client.get_json('partner/settings') + + def update_enterprise_settings(self, settings): + try: + return self.client.post_json('partner/settings', settings) + except urllib2.HTTPError, err: + if err.code == 400: + raise self.BadParams() + raise + + def update_enterprise_password(self, new_password): + try: + return self.client.post_json('partner/password', new_password) + except urllib2.HTTPError, err: + if err.code == 400: + raise self.BadParams() + raise + + ### Groups + + def list_groups(self): + return self.client.get_json('groups/') + + def search_groups(self, name): + return self.client.get_json('groups/?search=%s' % urllib.quote(name)) + + def create_group(self, group_info): + try: + resp = self.client.post_json_raw_response( + 'groups/', group_info) + except urllib2.HTTPError, err: + if err.code == 400: + raise self.BadParams() + elif err.code == 409: + data = json.loads(err.read()) + if 'name' in data['conflicts']: + raise self.DuplicateGroupName() + elif 'plan_id' in data['conflicts']: + raise self.BadPlan() + raise + return int(resp.info()['location'].rsplit('/', 1)[-1]) + + def get_group(self, group_id): + try: + return self.client.get_json('groups/%d' % (group_id,)) + except urllib2.HTTPError, err: + if err.code == 404: + raise self.NotFound() + raise + + def edit_group(self, group_id, group_info): + try: + self.client.post_json('groups/%d' % (group_id,), group_info) + except urllib2.HTTPError, err: + if err.code == 404: + raise self.NotFound() + elif err.code == 400: + raise self.BadParams() + elif err.code == 409: + data = json.loads(err.read()) + if 'name' in data['conflicts']: + raise self.DuplicateGroupName() + elif 'plan_id' in data['conflicts']: + raise self.BadPlan() + elif 'avatars_over_quota' in data['conflicts']: + raise self.QuotaExceeded() + raise + + def delete_group(self, group_id, new_group_id=None): + try: + if new_group_id: + self.client.delete('groups/%d?move_to=%d' % (group_id, new_group_id)) + else: + self.client.delete('groups/%d' % (group_id,)) + except urllib2.HTTPError, err: + if err.code == 404: + raise self.NotFound() + raise + + ### Shares + + def _create_query_string(self, limit, offset): + get_params = dict() + if limit: + get_params['limit'] = limit + if offset: + get_params['offset'] = offset + query_string = '' + if get_params: + query_string = '?%s' % urllib.urlencode(get_params) + return query_string + + def list_shares_for_brand(self, limit=None, offset=None): + query_string = self._create_query_string(limit, offset) + return self.client.get_json('shares/%s' % query_string) + + ### Users + + def list_users(self, limit=None, offset=None): + query_string = self._create_query_string(limit, offset) + return self.client.get_json('users/%s' % query_string) + + def search_users(self, name_or_email, limit=None, offset=None): + query_string = self._create_query_string(limit, offset) + if query_string: + query_string = '&' + query_string + return self.client.get_json('users/?search=%s%s' % (urllib.quote(name_or_email), query_string)) + def get_user_count(self): + return self.client.get_json('users/?action=user_count')['user_count'] + + def create_user(self, user_info): + try: + return self.client.post_json('users/', user_info) + except urllib2.HTTPError, err: + if err.code == 400: + raise self.BadParams() + elif err.code == 409: + data = json.loads(err.read()) + if 'username' in data['conflicts']: + raise self.DuplicateUsername() + if 'email' in data['conflicts']: + raise self.DuplicateEmail() + elif 'plan_id' in data['conflicts']: + raise self.BadPlan() + elif 'group_id' in data['conflicts']: + raise self.BadGroup() + raise + + def get_user(self, username_or_email): + try: + return self.client.get_json( + 'users/%s' % (username_or_email,)) + except urllib2.HTTPError, err: + if err.code == 404: + raise self.NotFound() + raise + + def list_devices(self, username_or_email): + try: + return self.client.get_json( + 'users/%s/devices' % (username_or_email,)) + except urllib2.HTTPError, err: + if err.code == 404: + raise self.NotFound() + raise + + def list_shares(self, username_or_email): + try: + return self.client.get_json( + 'users/%s/shares/' % (username_or_email,)) + except urllib2.HTTPError, err: + if err.code == 404: + raise self.NotFound() + raise + + def get_share(self, username_or_email, room_key): + try: + return self.client.get_json( + 'users/%s/shares/%s' % (username_or_email, room_key)) + except urllib2.HTTPError, err: + if err.code == 404: + raise self.NotFound() + raise + + def edit_share(self, username_or_email, room_key, enable): + action = 'enable' if enable else 'disable' + try: + return self.client.post_json( + 'users/%s/shares/%s?action=%s' % (username_or_email, room_key, action), {}) + except urllib2.HTTPError, err: + if err.code == 404: + raise self.NotFound() + raise + + def edit_user(self, username_or_email, user_info): + try: + self.client.post_json( + 'users/%s' % (username_or_email,), user_info) + except urllib2.HTTPError, err: + if err.code == 404: + raise self.NotFound() + elif err.code == 400: + raise self.BadParams() + elif err.code == 402: + raise self.QuotaExceeded() + elif err.code == 409: + data = json.loads(err.read()) + if 'email' in data['conflicts']: + raise self.DuplicateEmail() + elif 'group_id' in data['conflicts']: + raise self.BadGroup() + elif 'plan_id' in data['conflicts']: + raise self.BadPlan() + raise + + def delete_user(self, username_or_email): + try: + self.client.delete('users/%s' % (username_or_email,)) + except urllib2.HTTPError, err: + if err.code == 404: + raise self.NotFound() + raise + + def send_activation_email(self, username_or_email): + try: + self.client.post('users/%s?action=sendactivationemail' % ( + username_or_email,), '') + except urllib2.HTTPError, err: + if err.code == 404: + raise self.NotFound() + elif err.code == 409: + raise self.EmailNotSent() + raise + diff --git a/lib/account_mgr/api_client.py b/lib/account_mgr/api_client.py new file mode 100644 index 0000000..c59a183 --- /dev/null +++ b/lib/account_mgr/api_client.py @@ -0,0 +1,77 @@ +import json +import urllib2 +from base64 import b64encode +from urlparse import urljoin + +from netkes.Pandora.https import VerifiedHTTPSHandler + + +_DEFAULT_HANDLERS = [ + urllib2.ProxyHandler, + urllib2.HTTPDefaultErrorHandler, + urllib2.HTTPRedirectHandler, + urllib2.HTTPErrorProcessor, +] +def _make_opener(url): + opener = urllib2.OpenerDirector() + for handler_class in _DEFAULT_HANDLERS: + opener.add_handler(handler_class()) + opener.add_handler(VerifiedHTTPSHandler()) + return opener + + +class RequestWithMethod(urllib2.Request): + _method = None + + def set_method(self, method): + self._method = method + + def get_method(self): + return self._method or urllib2.Request.get_method(self) + + +class ApiClient(object): + def __init__(self, base, username, password): + self.base = base + self.username = username + self.password = password + self.opener = _make_opener(base) + + def open(self, path, data=None, headers=None, method=None): + if headers is None: + headers = {} + if ( + self.username and + 'authorization' not in set(k.lower() for k in headers) + ): + headers['authorization'] = 'Basic %s' % ( + b64encode('%s:%s' % ( + self.username, self.password + )).strip(), + ) + req = RequestWithMethod(urljoin(self.base, path), data, headers) + req.set_method(method) + return self.opener.open(req) + + def get(self, path): + return self.open(path) + + def get_json(self, path): + return json.loads(self.get(path).read()) + + def post(self, path, data, headers=None): + if not isinstance(data, basestring): + data = urlencode(data) + return self.open(path, data, headers) + + def post_json_raw_response(self, path, data, headers=None): + return self.post(path, json.dumps(data), headers) + + def post_json(self, path, data, headers=None): + body = self.post_json_raw_response(path, data, headers).read() + if body: + return json.loads(body) + return None + + def delete(self, path, headers=None): + return self.open(path, headers=headers, method='DELETE') diff --git a/lib/account_mgr/api_interface.py b/lib/account_mgr/api_interface.py new file mode 100644 index 0000000..3cf65ba --- /dev/null +++ b/lib/account_mgr/api_interface.py @@ -0,0 +1,250 @@ +''' +api_interface.py + +(c) 2011 SpiderOak, Inc. + +Provides an interface to the billing and new user APIs. + +''' + +import json +import logging +import re +from urllib import quote +import urllib2 + +from common import get_config +from Pandora import https + +API_URL_BASE = "%s/%s" +NO_PLANS = "No storage plans provided." + +class ApiActionFailedError(Exception): + pass + +class ManipulateUserFailed(ApiActionFailedError): + pass + +class FetchInformationFailed(ApiActionFailedError): + pass + + +_AVATAR_ID_URL = "users/%d" +_EMAIL_URL = "users/byemail/%s" +def _make_api_url_for_user(user): + """ + Creates a string to reference the user in the SpiderOak Billing API. + Currently supports using either an avatar_id or email address. + """ + if 'avatar_id' in user: + return _AVATAR_ID_URL % (user['avatar_id'],) + else: + return _EMAIL_URL % (quote(user['email']),) + +def _run_api_call(action, data=None): + ''' + Runs a call against the SpiderOak API. + Returns a python object containing the response. + ''' + log = logging.getLogger('run_api_call') + uri = API_URL_BASE % (get_config()['api_root'], action, ) + + https_handler = https.VerifiedHTTPSHandler() + https_opener = urllib2.build_opener(https_handler) + urllib2.install_opener(https_opener) + + auth_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm() + auth_mgr.add_password(realm=None, uri=uri, + user=get_config()['api_user'], + passwd=get_config()['api_password']) + + log.debug("Trying with user %s and pass %s" % (get_config()['api_user'], + get_config()['api_password'],)) + + auth_handler = urllib2.HTTPBasicAuthHandler(auth_mgr) + auth_opener = urllib2.build_opener(auth_handler) + urllib2.install_opener(auth_opener) + + if data is None: + fh = urllib2.urlopen(uri) + else: + datastr = json.dumps(data) + fh = urllib2.urlopen(uri, datastr) + + json_string = fh.read() + retr_data = json.loads(json_string) + + return retr_data + + +def create_user(user, promo_code=None): + ''' + Uses the SpiderOak new user API to create a new user. + Returns: newly created user data information.. + Raises ManipulateUserFailed on failure. + ''' + + new_user_data = {"action": "create_user", + "auto_username_seq": True, + "firstname": user['firstname'], + "lastname": user['lastname'], + "email": user['email'], + "group_id": user['group_id'], + } + if promo_code is not None: + new_user_data["promo"] = promo_code + + try: + result = _run_api_call("users/", new_user_data) + except Exception as e: + import traceback + traceback.print_exc() + raise ManipulateUserFailed(str(e)) + + if result['success']: + return result + else: + raise ManipulateUserFailed("%s: result['reason']" % user['email']) + + +def set_user_group(user, promo_code=None): + ''' + Sets the group_id of a specified avatar. + Raises ManipulateUserFailed in error. + ''' + user_group_data = {"action" : "set_group", + "group_id": user['group_id'], + } + if promo_code is not None: + user_group_data['promo_code'] = promo_code + + try: + result = _run_api_call(_make_api_url_for_user(user), + user_group_data) + except Exception as e: + import traceback + traceback.print_exc() + raise ManipulateUserFailed(str(e)) + + if result['success']: + return result + else: + raise ManipulateUserFailed(result['reason']) + + +def activate_user(user): + ''' + Activates the given avatar. + Raises ManipulateUserFailed in error. + ''' + activate_data = {"action" : "set_enabled", + "enabled": True, + } + try: + result = _run_api_call(_make_api_url_for_user(user), + activate_data) + except Exception as e: + raise ManipulateUserFailed(str(e)) + + if result['success']: + return result + else: + raise ManipulateUserFailed(result['reason']) + + +def deactivate_user(user): + ''' + Deactivates the given avatar. + Raises ManipulateUserFailed in error. + ''' + deactivate_data = {"action" : "set_enabled", + "enabled": False, + } + try: + result = _run_api_call(_make_api_url_for_user(user), + deactivate_data) + except Exception as e: + raise ManipulateUserFailed(str(e)) + + if result['success']: + return result + else: + raise ManipulateUserFailed(result['reason']) + + +def purge_user(user): + ''' + Deactivates the given avatar. + Raises ManipulateUserFailed in error. + ''' + purge_data = {"action" : "purge_account", } + try: + result = _run_api_call(_make_api_url_for_user(user), + purge_data) + except Exception as e: + raise ManipulateUserFailed(str(e)) + + if result['success']: + return result + else: + raise ManipulateUserFailed(result['reason']) + + +def change_email(user): + ''' + Sets the email on the given avatar_id to the email. + Raises ManipulateUserFailed in error. + ''' + change_data = {"action" : "set_email", + "email" : user['email'], + } + try: + # This accounts for the fact that we might pass in two email + # addresses when looking up the user by email. + if 'old_email' in user: + api_url = _make_api_url_for_user({'email': user['old_email']}) + else: + api_url = _make_api_url_for_user(user) + result = _run_api_call(api_url, change_data) + except Exception as e: + raise ManipulateUserFailed(str(e)) + + if result['success']: + return result + else: + raise ManipulateUserFailed(result['reason']) + + +def fetch_users(): + ''' + Returns a list of the users currently registered with SpiderOak. + Raises FetchInformationError on problem. + ''' + try: + result = _run_api_call("users/") + except Exception as e: + raise FetchInformationFailed(str(e)) + + return result + + +def fetch_plans(promo_code=None): + ''' + Returns a list of the plans available to us. + Raises FetchInformationError on problem. + ''' + if promo_code is None: + action = "plans/" + else: + action = "plans/?promo=%s" % promo_code + + try: + result = _run_api_call(action) + except Exception as e: + raise FetchInformationFailed(str(e)) + + if len(result) < 1: + raise FetchInformationFailed(NO_PLANS) + + return result + diff --git a/lib/account_mgr/cmdline_utils.py b/lib/account_mgr/cmdline_utils.py new file mode 100644 index 0000000..0725ef3 --- /dev/null +++ b/lib/account_mgr/cmdline_utils.py @@ -0,0 +1,182 @@ +""" +cmdline_utils.py + +Helper functions and classes for OpenManage command-line utilities. +""" +import csv +import json +import logging + +import api_interface +from account_mgr.local_source import set_user_password, set_multi_passwords + + +SETPW_REQUIRED_KEYS = frozenset(['email_addr', 'password']) +CREATE_REQUIRED_KEYS = frozenset(['email_addr', 'given_name', 'surname', 'group_id']) +SET_EMAIL_REQUIRED_KEYS = frozenset(['email_addr', 'new_email']) +SET_GROUP_REQUIRED_KEYS = frozenset(['email_addr', 'group_id']) + +class UsersActionError(Exception): + pass + +class CSVMissingKeys(Exception): + pass + +def assure_keys(dict_reader, required_keys): + """ + Reads from a csv.DictReader object and creates a list. Ensures + that required_keys are in every row from the DictReader. + + :param dict_reader: The opened csv.DictReader object. + :param required_keys: Set of keys required in every row in the CSV file. + :returns list: list of change dicts. + """ + rows = list() + for row in dict_reader: + keys = set(row.keys()) + if required_keys <= keys: + rows.append(row) + else: + raise CSVMissingKeys("Missing one or more of required keys: %s" % + (required_keys, )) + return rows + +def run_csv_file(db_conn, filename, optdict): + """Runs the appropriate actions from a CSV file. + + :param db_conn: DB connection object + :param filename: CSV filename + :param optdict: Options dictionary. + :returns int: number of successful user actions. + """ + + log = logging.getLogger("run_csv_file") + dict_reader = csv.DictReader(filename) + + if 'setpw' in optdict: + user_dicts = assure_keys(dict_reader, SETPW_REQUIRED_KEYS) + emails = (email for email in user_dicts['email_addr']) + pws = (pw for pw in user_dicts['password']) + set_multi_passwords(db_conn, emails, pws) + + # All done, so leave the function here. + return len(user_dicts) + + success_count = 0 + if 'create' in optdict: + # Runs the creation routine for each user. + user_dicts = assure_keys(dict_reader, CREATE_REQUIRED_KEYS) + for user in user_dicts: + result = api_interface.create_user( + {'firstname': user['given_name'], + 'lastname': user['surname'], + 'email': user['email_addr'],}) + + result['group_id'] = user['group_id'] + try: + result2 = api_interface.set_user_group(result) + except api_interface.ApiActionFailedError as e: + log.error("User created with no group assignment: %s" % + (user['email_addr'],)) + raise e + success_count += 1 + + elif 'set_email' in optdict: + # Sets emails for each user. + user_dicts = assure_keys(dict_reader, SET_EMAIL_REQUIRED_KEYS) + for user in user_dicts: + result = api_interface.change_email( + {'email': user['new_email'], + 'old_email': user['email_addr'],}) + success_count += 1 + elif 'set_group' in optdict: + # Sets groups for each user. + user_dicts = assure_keys(dict_reader, SET_GROUP_REQUIRED_KEYS) + for user in user_dicts: + result = api_interface.set_user_group( + {'email': user['email_addr'], + 'group_id': user['group_id'],}) + success_count += 1 + elif 'disable' in optdict: + user_dicts = assure_keys(dict_reader, frozenset(['email_addr'])) + for user in user_dicts: + result = api_interface.deactivate_user( + {'email': user['email_addr'],}) + success_count += 1 + elif 'enable' in optdict: + user_dicts = assure_keys(dict_reader, frozenset(['email_addr'])) + for user in user_dicts: + result = api_interface.activate_user( + {'email': user['email_addr'],}) + success_count += 1 + else: + raise UsersActionError("Got an action that's not accounted for!") + + return success_count + +def run_single_command(db_conn, email_address, optdict): + log = logging.getLogger("run_single_command") + + if optdict['setpw']: + set_user_password(db_conn, email_address, optdict['password']) + + elif optdict['create']: + result = api_interface.create_user( + {'firstname': optdict['given_name'], + 'lastname': optdict['surname'], + 'email': email_address, + 'group_id': optdict['group_id']}) + + elif optdict['set_email']: + result = api_interface.change_email( + {'email': optdict['new-email'], + 'old_email': email_address,}) + elif optdict['set_group']: + result = api_interface.set_user_group( + {'email': email_address, + 'group_id': optdict['group_id'],}) + elif optdict['disable']: + result = api_interface.deactivate_user( + {'email': email_address,}) + elif optdict['enable']: + result = api_interface.activate_user( + {'email': email_address,}) + else: + raise UsersActionError("Got an action that's not accounted for!") + +def get_user_list(): + """Fetches the list of users from SpiderOak, returns it as JSON.""" + return api_interface.fetch_users() + +def csvify_userlist(csvfile, users): + """Takes a JSON-ified list of users, and returns it as a CSV file.""" + user_list = json.loads(users) + dict_writer = csv.DictWriter(csvfile, + ['email', 'firstname', 'lastname', + 'group_id', 'share_id', 'bytes_stored', + 'enabled',], + extrasaction='ignore') + dict_writer.writeheader() + dict_writer.writerows(user_list) + + return None + + +def run_command(db_conn, optdict): + """Matches the options in optdict to a specific action we need to do. + + :param optdict: options dictionary + + """ + + if 'csv_file' in optdict: + run_csv_file(db_conn, optdict.pop('csv_file'), optdict) + elif 'email_addr' in optdict: + run_single_command(db_conn, optdict.pop('email_addr'), optdict) + elif 'users_csv' in optdict or 'users_json' in optdict: + users = get_user_list() + if 'users_csv' in optdict: + return csvify_userlist(optdict['users_csv'], users) + + return users + diff --git a/lib/account_mgr/fix_url_truncation_bug.patch b/lib/account_mgr/fix_url_truncation_bug.patch new file mode 100644 index 0000000..bd27e9c --- /dev/null +++ b/lib/account_mgr/fix_url_truncation_bug.patch @@ -0,0 +1,148 @@ +From 25bcaa4f2cc6cbdae4a02b581e65589938377397 Mon Sep 17 00:00:00 2001 +From: Ben Zimmerman +Date: Wed, 4 Apr 2012 13:44:48 -0500 +Subject: [PATCH] Fixed url truncation bug + +--- + lib/account_mgr/accounts_api.py | 29 +++++++++++++++-------------- + lib/account_mgr/api_client.py | 2 +- + 2 files changed, 16 insertions(+), 15 deletions(-) + create mode 100644 lib/__init__.py + +diff --git a/lib/__init__.py b/lib/__init__.py +new file mode 100644 +index 0000000..e69de29 +diff --git a/lib/account_mgr/accounts_api.py b/lib/account_mgr/accounts_api.py +index c1d9bca..b3880d2 100644 +--- a/lib/account_mgr/accounts_api.py ++++ b/lib/account_mgr/accounts_api.py +@@ -1,7 +1,7 @@ + import json + import urllib2 + +-from .api_client import ApiClient ++from api_client import ApiClient + + + class Api(object): +@@ -41,16 +41,16 @@ class Api(object): + ### Features + + def enterprise_features(self): +- return self.client.get_json('/partner/features') ++ return self.client.get_json('partner/features') + + ### Settings + + def enterprise_settings(self): +- return self.client.get_json('/partner/settings') ++ return self.client.get_json('partner/settings') + + def update_enterprise_settings(self, settings): + try: +- return self.client.post_json('/partner/settings', settings) ++ return self.client.post_json('partner/settings', settings) + except urllib2.HTTPError, err: + if err.code == 400: + raise self.BadParams() +@@ -59,12 +59,12 @@ class Api(object): + ### Groups + + def list_groups(self): +- return self.client.get_json('/groups/') ++ return self.client.get_json('groups/') + + def create_group(self, group_info): + try: + resp = self.client.post_json_raw_response( +- '/groups/', group_info) ++ 'groups/', group_info) + except urllib2.HTTPError, err: + if err.code == 400: + raise self.BadParams() +@@ -79,7 +79,7 @@ class Api(object): + + def get_group(self, group_id): + try: +- return self.client.get_json('/groups/%d' % (group_id,)) ++ return self.client.get_json('groups/%d' % (group_id,)) + except urllib2.HTTPError, err: + if err.code == 404: + raise self.NotFound() +@@ -87,7 +87,7 @@ class Api(object): + + def edit_group(self, group_id, group_info): + try: +- self.client.post_json('/groups/%d' % (group_id,), group_info) ++ self.client.post_json('groups/%d' % (group_id,), group_info) + except urllib2.HTTPError, err: + if err.code == 404: + raise self.NotFound() +@@ -105,7 +105,7 @@ class Api(object): + + def delete_group(self, group_id): + try: +- self.client.delete('/groups/%d' % (group_id,)) ++ self.client.delete('groups/%d' % (group_id,)) + except urllib2.HTTPError, err: + if err.code == 404: + raise self.NotFound() +@@ -114,11 +114,11 @@ class Api(object): + ### Users + + def list_users(self): +- return self.client.get_json('/users/') ++ return self.client.get_json('users/') + + def create_user(self, user_info): + try: +- return self.client.post_json('/users/', user_info) ++ return self.client.post_json('users/', user_info) + except urllib2.HTTPError, err: + if err.code == 400: + raise self.BadParams() +@@ -137,7 +137,7 @@ class Api(object): + def get_user(self, username_or_email): + try: + return self.client.get_json( +- '/users/%s' % (username_or_email,)) ++ 'users/%s' % (username_or_email,)) + except urllib2.HTTPError, err: + if err.code == 404: + raise self.NotFound() +@@ -146,7 +146,7 @@ class Api(object): + def edit_user(self, username_or_email, user_info): + try: + self.client.post_json( +- '/users/%s' % (username_or_email,), user_info) ++ 'users/%s' % (username_or_email,), user_info) + except urllib2.HTTPError, err: + if err.code == 404: + raise self.NotFound() +@@ -166,8 +166,9 @@ class Api(object): + + def delete_user(self, username_or_email): + try: +- self.client.delete('/users/%s' % (username_or_email,)) ++ self.client.delete('users/%s' % (username_or_email,)) + except urllib2.HTTPError, err: + if err.code == 404: + raise self.NotFound() + raise ++ +diff --git a/lib/account_mgr/api_client.py b/lib/account_mgr/api_client.py +index 84b1551..4807dfb 100644 +--- a/lib/account_mgr/api_client.py ++++ b/lib/account_mgr/api_client.py +@@ -2,7 +2,7 @@ import json + import urllib2 + from urlparse import urljoin + +-from Pandora.https import VerifiedHTTPSHandler ++from lib.Pandora.https import VerifiedHTTPSHandler + + + _DEFAULT_HANDLERS = [ +-- +1.7.4.2 + diff --git a/lib/account_mgr/setup_token.py b/lib/account_mgr/setup_token.py new file mode 100644 index 0000000..57cc435 --- /dev/null +++ b/lib/account_mgr/setup_token.py @@ -0,0 +1,55 @@ +''' +setup_token.py + +(c) 2012 SpiderOak, Inc. + +Manages setup authentication tokens. +''' + +import base64 +import datetime +import os +import sys + +TOKEN_LENGTH = 30 # Tokens are 30-characters long. Note that we apply + # base64 encoding, so increasing this value may + # introduce padding characters. + +def new_token(): + return base64.urlsafe_b64encode(os.urandom(TOKEN_LENGTH)) + +def create_token(db_conn, expiry=None, no_devices_only=True, single_use_only=True): + """ + Creates an administrative setup token with the given options. + + :param db_conn: Open database connection. + :param expiry: Datetime object of token's expiry, or None for now. + :param no_devices_only: Restricts the token to use with accounts with no devices created. + :param single_use_only: Restricts the token for single uses with a given user. + + :return: The 30-character string token. + """ + + token = new_token() + + create_token_query_base = "INSERT INTO admin_setup_tokens (token, no_devices_only, single_use_only" + + if expiry is None: + create_token_query = create_token_query_base + ") VALUES (%s, %s, %s)" + query_args = (token, no_devices_only, single_use_only, ) + else: + create_token_query = create_token_query_base + ", expiry) VALUES (%s, %s, %s, %s)" + query_args = (token, no_devices_only, single_use_only, expiry, ) + + cur = db_conn.cursor() + + try: + cur.execute(create_token_query, query_args) + except Exception, e: + db_conn.rollback() + raise e + else: + db_conn.commit() + + return token + diff --git a/lib/account_mgr/test.py b/lib/account_mgr/test.py new file mode 100644 index 0000000..624d0e3 --- /dev/null +++ b/lib/account_mgr/test.py @@ -0,0 +1,18 @@ +from accounts_api import Api +api = Api.create( + 'https://dhain.dev.spideroak.com:888/apis/accounts/v1/', + 'ben12', + 'bbb', +) +from accounts_api import Api +api = Api.create( + 'https://bdzim.dev.spideroak.com:888/apis/accounts/v1/', + 'ben12', + 'bbb', +) +from accounts_api import Api +api = Api.create( + 'https://bdzim.dev.spideroak.com/apis/accounts/v1/', + 'remote_test_admin', + 'w0mbat', +) diff --git a/lib/account_mgr/test/__init__.py b/lib/account_mgr/test/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/lib/account_mgr/test/test_account_mgr.py b/lib/account_mgr/test/test_account_mgr.py new file mode 100644 index 0000000..bf4c781 --- /dev/null +++ b/lib/account_mgr/test/test_account_mgr.py @@ -0,0 +1,68 @@ +import unittest +from mock import Mock, MagicMock, sentinel, patch + +import account_mgr + +class TestAdminTokenAuth(unittest.TestCase): + def setUp(self): + account_mgr.get_cursor = MagicMock() + cur = MagicMock() + account_mgr.get_cursor.return_value = cur + self.cur = cur.__enter__() + account_mgr.get_api = MagicMock() + self.api = MagicMock() + account_mgr.get_api.return_value = self.api + + def test_no_restrictions(self): + self.cur.rowcount = 1 + self.cur.fetchone.return_value = (False, False, False) + self.api.list_devices.return_value = [] + self.assertTrue( + account_mgr.admin_token_auth({}, + sentinel.username, + sentinel.password) + ) + + def test_bad_credentials(self): + self.cur.rowcount = 0 + self.assertFalse( + account_mgr.admin_token_auth({}, + sentinel.username, + sentinel.password) + ) + + def test_no_devices_only(self): + self.cur.rowcount = 1 + self.cur.fetchone.return_value = (True, False, False) + self.api.list_devices.return_value = [1] + self.assertFalse( + account_mgr.admin_token_auth({}, + sentinel.username, + sentinel.password) + ) + self.cur.fetchone.return_value = (True, False, False) + self.api.list_devices.return_value = [] + self.assertTrue( + account_mgr.admin_token_auth({}, + sentinel.username, + sentinel.password) + ) + + def test_single_use_only(self): + self.cur.rowcount = 1 + self.cur.fetchone.return_value = (False, True, True) + self.api.list_devices.return_value = [] + self.assertFalse( + account_mgr.admin_token_auth({}, + sentinel.username, + sentinel.password) + ) + self.cur.fetchone.return_value = (False, True, False) + self.api.list_devices.return_value = [] + self.assertTrue( + account_mgr.admin_token_auth({}, + sentinel.username, + sentinel.password) + ) +if __name__ == '__main__': + unittest.main() diff --git a/lib/account_mgr/test/test_accounts_api.py b/lib/account_mgr/test/test_accounts_api.py new file mode 100644 index 0000000..542858c --- /dev/null +++ b/lib/account_mgr/test/test_accounts_api.py @@ -0,0 +1,345 @@ +import json +import unittest +from mock import patch, sentinel, Mock + +from account_mgr import accounts_api + + +class FakeHttpError(accounts_api.urllib2.HTTPError): + def __init__(self, code, body=''): + self.code = code + self.body = body + + def read(self): + return self.body + + +class TestAccountsApi(unittest.TestCase): + def setUp(self): + self.client = Mock() + self.api = accounts_api.Api(self.client) + + @patch.object(accounts_api, 'ApiClient') + def test_create(self, ApiClient): + api = accounts_api.Api.create( + sentinel.base, sentinel.username, sentinel.password) + self.assertIs(api.client, ApiClient.return_value) + ApiClient.assert_called_once_with( + sentinel.base, sentinel.username, sentinel.password) + + def test_ping(self): + self.assertIs( + self.api.ping(), + self.client.get_json.return_value + ) + self.client.get_json.assert_called_once_with('ping') + + ### Plans + + def test_list_plans(self): + self.assertIs( + self.api.list_plans(), + self.client.get_json.return_value + ) + self.client.get_json.assert_called_once_with('plans') + + ### Quota + + def test_quota(self): + self.assertIs( + self.api.quota(), + self.client.get_json.return_value + ) + self.client.get_json.assert_called_once_with('partner/quota') + + ### Features + + def test_enterprise_features(self): + self.assertIs( + self.api.enterprise_features(), + self.client.get_json.return_value + ) + self.client.get_json.assert_called_once_with('partner/features') + + ### Settings + + def test_enterprise_settings(self): + self.assertIs( + self.api.enterprise_settings(), + self.client.get_json.return_value + ) + self.client.get_json.assert_called_once_with('partner/settings') + + def test_update_enterprise_settings(self): + self.assertIs( + self.api.update_enterprise_settings(sentinel.settings), + self.client.post_json.return_value + ) + self.client.post_json.assert_called_once_with( + 'partner/settings', sentinel.settings) + + def test_update_enterprise_settings_bad_params(self): + self.client.post_json.side_effect = FakeHttpError(400) + with self.assertRaises(self.api.BadParams): + self.api.update_enterprise_settings(sentinel.settings) + + ### Groups + + def test_list_groups(self): + self.assertIs( + self.api.list_groups(), + self.client.get_json.return_value + ) + self.client.get_json.assert_called_once_with('groups/') + + def test_create_group(self): + response = self.client.post_json_raw_response.return_value + response.info.return_value = {'location': 'groups/42'} + self.assertEqual(self.api.create_group(sentinel.info), 42) + self.client.post_json_raw_response.assert_called_once_with( + 'groups/', sentinel.info) + + def test_create_group_bad_params(self): + self.client.post_json_raw_response.side_effect = \ + FakeHttpError(400) + with self.assertRaises(self.api.BadParams): + self.api.create_group(sentinel.info) + + def test_create_group_duplicate_name(self): + response = FakeHttpError(409, json.dumps({ + 'reason': 'The following fields conflict ' + 'with an existing record', + 'conflicts': ['name'] + })) + self.client.post_json_raw_response.side_effect = response + with self.assertRaises(self.api.DuplicateGroupName): + self.api.create_group(sentinel.info) + + def test_create_group_invalid_plan(self): + response = FakeHttpError(409, json.dumps({ + 'reason': 'Invalid values for the following fields', + 'conflicts': ['plan_id'] + })) + self.client.post_json_raw_response.side_effect = response + with self.assertRaises(self.api.BadPlan): + self.api.create_group(sentinel.info) + + def test_get_group(self): + self.assertIs( + self.api.get_group(42), + self.client.get_json.return_value + ) + self.client.get_json.assert_called_once_with('groups/42') + + def test_get_group_not_found(self): + self.client.get_json.side_effect = FakeHttpError(404) + with self.assertRaises(self.api.NotFound): + self.api.get_group(42) + + def test_edit_group(self): + self.api.edit_group(42, sentinel.info) + self.client.post_json.assert_called_once_with( + 'groups/42', sentinel.info) + + def test_edit_group_bad_params(self): + self.client.post_json.side_effect = FakeHttpError(400) + with self.assertRaises(self.api.BadParams): + self.api.edit_group(42, sentinel.info) + + def test_edit_group_not_found(self): + self.client.post_json.side_effect = FakeHttpError(404) + with self.assertRaises(self.api.NotFound): + self.api.edit_group(42, sentinel.info) + + def test_edit_group_duplicate_name(self): + response = FakeHttpError(409, json.dumps({ + 'reason': 'The following fields conflict ' + 'with an existing record', + 'conflicts': ['name'] + })) + self.client.post_json.side_effect = response + with self.assertRaises(self.api.DuplicateGroupName): + self.api.edit_group(42, sentinel.info) + + def test_edit_group_invalid_plan(self): + response = FakeHttpError(409, json.dumps({ + 'reason': 'Invalid values for the following fields', + 'conflicts': ['plan_id'] + })) + self.client.post_json.side_effect = response + with self.assertRaises(self.api.BadPlan): + self.api.edit_group(42, sentinel.info) + + def test_edit_group_quota_exceeded(self): + self.client.post_json.side_effect = FakeHttpError(402) + with self.assertRaises(self.api.QuotaExceeded): + self.api.edit_group(42, sentinel.info) + + def test_delete_group(self): + self.api.delete_group(42) + self.client.delete.assert_called_once_with('groups/42') + + def test_delete_group_not_found(self): + self.client.delete.side_effect = FakeHttpError(404) + with self.assertRaises(self.api.NotFound): + self.api.delete_group(42) + + ### Users + + def test_list_users(self): + self.assertIs( + self.api.list_users(), + self.client.get_json.return_value + ) + self.client.get_json.assert_called_once_with('users/') + + def test_create_user(self): + self.api.create_user(sentinel.info) + self.client.post_json.assert_called_once_with( + 'users/', sentinel.info) + + def test_create_user_bad_params(self): + self.client.post_json.side_effect = FakeHttpError(400) + with self.assertRaises(self.api.BadParams): + self.api.create_user(sentinel.info) + + def test_create_user_duplicate_username(self): + response = FakeHttpError(409, json.dumps({ + 'reason': 'The following fields conflict ' + 'with an existing record', + 'conflicts': ['username'] + })) + self.client.post_json.side_effect = response + with self.assertRaises(self.api.DuplicateUsername): + self.api.create_user(sentinel.info) + + def test_create_user_duplicate_email(self): + response = FakeHttpError(409, json.dumps({ + 'reason': 'The following fields conflict ' + 'with an existing record', + 'conflicts': ['email'] + })) + self.client.post_json.side_effect = response + with self.assertRaises(self.api.DuplicateEmail): + self.api.create_user(sentinel.info) + + def test_create_user_invalid_group(self): + response = FakeHttpError(409, json.dumps({ + 'reason': 'Invalid values for the following fields', + 'conflicts': ['group_id'] + })) + self.client.post_json.side_effect = response + with self.assertRaises(self.api.BadGroup): + self.api.create_user(sentinel.info) + + def test_create_user_invalid_plan(self): + response = FakeHttpError(409, json.dumps({ + 'reason': 'Invalid values for the following fields', + 'conflicts': ['plan_id'] + })) + self.client.post_json.side_effect = response + with self.assertRaises(self.api.BadPlan): + self.api.create_user(sentinel.info) + + def test_get_user(self): + self.assertIs( + self.api.get_user('username'), + self.client.get_json.return_value + ) + self.client.get_json.assert_called_once_with('users/username') + + def test_get_user_not_found(self): + self.client.get_json.side_effect = FakeHttpError(404) + with self.assertRaises(self.api.NotFound): + self.api.get_user('username') + + def test_list_devices(self): + self.assertIs( + self.api.list_devices('username'), + self.client.get_json.return_value + ) + self.client.get_json.assert_called_once_with( + 'users/username/devices') + + def test_list_devices_user_not_found(self): + self.client.get_json.side_effect = FakeHttpError(404) + with self.assertRaises(self.api.NotFound): + self.api.list_devices('username') + + def test_edit_user(self): + self.api.edit_user('username', sentinel.info) + self.client.post_json.assert_called_once_with( + 'users/username', sentinel.info) + + def test_edit_user_bad_params(self): + self.client.post_json.side_effect = FakeHttpError(400) + with self.assertRaises(self.api.BadParams): + self.api.edit_user('username', sentinel.info) + + def test_edit_user_not_found(self): + self.client.post_json.side_effect = FakeHttpError(404) + with self.assertRaises(self.api.NotFound): + self.api.edit_user('username', sentinel.info) + + def test_edit_user_duplicate_email(self): + response = FakeHttpError(409, json.dumps({ + 'reason': 'The following fields conflict ' + 'with an existing record', + 'conflicts': ['email'] + })) + self.client.post_json.side_effect = response + with self.assertRaises(self.api.DuplicateEmail): + self.api.edit_user('username', sentinel.info) + + def test_edit_user_invalid_group(self): + response = FakeHttpError(409, json.dumps({ + 'reason': 'Invalid values for the following fields', + 'conflicts': ['group_id'] + })) + self.client.post_json.side_effect = response + with self.assertRaises(self.api.BadGroup): + self.api.edit_user('username', sentinel.info) + + def test_edit_user_invalid_plan(self): + response = FakeHttpError(409, json.dumps({ + 'reason': 'Invalid values for the following fields', + 'conflicts': ['plan_id'] + })) + self.client.post_json.side_effect = response + with self.assertRaises(self.api.BadPlan): + self.api.edit_user('username', sentinel.info) + + def test_edit_user_quota_exceeded(self): + self.client.post_json.side_effect = FakeHttpError(402) + with self.assertRaises(self.api.QuotaExceeded): + self.api.edit_user('username', sentinel.info) + + def test_delete_user(self): + self.api.delete_user('username') + self.client.delete.assert_called_once_with('users/username') + + def test_delete_user_not_found(self): + self.client.delete.side_effect = FakeHttpError(404) + with self.assertRaises(self.api.NotFound): + self.api.delete_user('username') + + def test_send_activation_email(self): + self.api.send_activation_email('username') + self.client.post.assert_called_once_with( + 'users/username?action=sendactivationemail', + '' + ) + + def test_send_activation_email_not_found(self): + self.client.post.side_effect = FakeHttpError(404) + with self.assertRaises(self.api.NotFound): + self.api.send_activation_email('username') + + def test_send_activation_email_not_sent(self): + self.client.post.side_effect = FakeHttpError(409) + with self.assertRaises(self.api.EmailNotSent): + self.api.send_activation_email('username') + + +if __name__ == '__main__': + unittest.main() diff --git a/lib/account_mgr/test/test_api_client.py b/lib/account_mgr/test/test_api_client.py new file mode 100644 index 0000000..f5d25c8 --- /dev/null +++ b/lib/account_mgr/test/test_api_client.py @@ -0,0 +1,60 @@ +import json +import unittest +from mock import sentinel, patch + +from account_mgr import api_client + + +class TestApiClient(unittest.TestCase): + @patch.object(api_client, 'VerifiedHTTPSHandler') + def setUp(self, httpshandler): + self.httpshandler = httpshandler.return_value + self.response = self.httpshandler.https_open.return_value + self.client = api_client.ApiClient( + 'https://example.com', + sentinel.api_username, + sentinel.api_password + ) + + def test_verifies_ssl_certificate(self): + self.client.open('/') + self.assertEqual(self.httpshandler.https_open.call_count, 1) + + @patch.object(api_client, 'RequestWithMethod') + def test_logs_in_using_provided_credentials(self, req): + self.client.open('/') + req.assert_called_once_with( + self.client.base + '/', + None, + {'authorization': ( + 'Basic PFNlbnRpbmVsT2JqZWN0ICJhcGlfdXNlcm5hbWUiPjo8' + 'U2VudGluZWxPYmplY3QgImFwaV9wYXNzd29yZCI+' + )} + ) + + def test_get_json(self): + data = {'foo': 'bar'} + self.response.read.return_value = json.dumps(data) + self.assertEqual(self.client.get_json('/'), data) + + def test_post_json(self): + postdata = {'foo': 'bar'} + respdata = {'baz': 'qux'} + self.response.read.return_value = json.dumps(respdata) + self.assertEqual(self.client.post_json('/', postdata), respdata) + ((req,), _) = self.httpshandler.https_open.call_args + self.assertEqual(json.loads(req.data), postdata) + + def test_delete(self): + self.client.delete('/') + ((req,), _) = self.httpshandler.https_open.call_args + self.assertEqual(req.get_method(), 'DELETE') + + def test_raises_HTTPError_on_error_responses(self): + self.response.code = 409 + with self.assertRaises(api_client.urllib2.HTTPError): + self.client.open('/') + + +if __name__ == '__main__': + unittest.main() diff --git a/lib/account_mgr/test/test_api_interface.py b/lib/account_mgr/test/test_api_interface.py new file mode 100644 index 0000000..39cb85c --- /dev/null +++ b/lib/account_mgr/test/test_api_interface.py @@ -0,0 +1,365 @@ +import unittest +from mock import Mock, MagicMock, sentinel, patch + +import json + +from directory_agent import api_interface + +class MockException(Exception): + pass + +class TestRunApiCall(unittest.TestCase): + def setUp(self): + self.url_patcher = patch("urllib.urlopen") + self.urlopen = self.url_patcher.start() + + urlfile = MagicMock(spec=file) + self.test_return_data = { "testkey1": "testvalue1", + "testkey2": 2 + } + urlfile.read.return_value = json.dumps(self.test_return_data) + self.urlopen.return_value = urlfile + + def tearDown(self): + self.url_patcher.stop() + + def test_successful_read_nodata(self): + self.assertEqual(api_interface._run_api_call(str(sentinel.api_root), + str(sentinel.action)), + self.test_return_data) + + args, _ = self.urlopen.call_args + assert len(args) == 1 + + def test_successful_read_withdata(self): + test_send_data = {"testsend1": "testvalues1", + "testsend2": "testvalues2", + } + self.assertEqual(api_interface._run_api_call(str(sentinel.api_root), + str(sentinel.action), + test_send_data), + self.test_return_data) + args, _ = self.urlopen.call_args + assert len(args) == 2 + self.assertEqual(args[1], json.dumps(test_send_data)) + + def test_blows_up_with_bad_json_returned(self): + urlfile = MagicMock(spec=file) + urlfile.read.return_value = "DEADBEEF" + self.urlopen.return_value = urlfile + + with self.assertRaises(ValueError): + api_interface._run_api_call(str(sentinel.api_root), + str(sentinel.action)) + + def test_blows_up_with_bad_data_given(self): + with self.assertRaises(TypeError): + api_interface._run_api_call(str(sentinel.api_root), + str(sentinel.action), + sentinel.bad_obj) + + def test_gets_url_read_exceptions(self): + def side_effect(): + raise Exception("DEADBEEF") + + urlfile = MagicMock(spec=file) + urlfile.read.side_effect = side_effect + self.urlopen.return_value = urlfile + + with self.assertRaises(Exception) as cm: + api_interface._run_api_call(str(sentinel.api_root), + str(sentinel.action)) + + the_exception = cm.exception + self.assertEqual(str(the_exception), "DEADBEEF") + + def test_gets_url_open_exceptions(self): + self.urlopen.side_effect = Exception("DEADBEEF") + + with self.assertRaises(Exception) as cm: + api_interface._run_api_call(str(sentinel.api_root), + str(sentinel.action)) + + the_exception = cm.exception + self.assertEqual(str(the_exception), "DEADBEEF") + + +class TestDeactivateUsers(unittest.TestCase): + def setUp(self): + self.run_api_patcher = patch("directory_agent.api_interface._run_api_call") + self.run_api_call = self.run_api_patcher.start() + self.test_user = {'avatar_id': 1001} + + def tearDown(self): + self.run_api_patcher.stop() + + def test_deactivate_succeeds(self): + ret_val = {"success" : True} + self.run_api_call.return_value = ret_val + + self.assertEqual(api_interface.deactivate_user(sentinel.api_root, + self.test_user), + ret_val) + + + def test_deactivate_fails(self): + ret_val = {"success" : False, + "reason" : "DEADBEEF", + } + self.run_api_call.return_value = ret_val + + with self.assertRaises(api_interface.ManipulateUserFailed) as cm: + retval = api_interface.deactivate_user(sentinel.api_root, + self.test_user) + + the_exception = cm.exception + self.assertEqual(str(the_exception), "DEADBEEF") + + + def test_deactivate_connection_problem(self): + self.run_api_call.side_effect = MockException("DEADBEEF") + + with self.assertRaises(api_interface.ManipulateUserFailed) as cm: + retval = api_interface.deactivate_user(sentinel.api_root, + self.test_user) + + the_exception = cm.exception + self.assertEqual(str(the_exception), "DEADBEEF") + + +class TestPurgeUser(unittest.TestCase): + def setUp(self): + self.run_api_patcher = patch("directory_agent.api_interface._run_api_call") + self.run_api_call = self.run_api_patcher.start() + self.test_user = {'avatar_id': 1001} + + def tearDown(self): + self.run_api_patcher.stop() + + def test_purge_succeeds(self): + ret_val = {"success" : True} + self.run_api_call.return_value = ret_val + + self.assertEqual(api_interface.purge_user(sentinel.api_root, + self.test_user), + ret_val) + + + def test_purge_fails(self): + ret_val = {"success" : False, + "reason" : "DEADBEEF", + } + self.run_api_call.return_value = ret_val + + with self.assertRaises(api_interface.ManipulateUserFailed) as cm: + retval = api_interface.purge_user(sentinel.api_root, + self.test_user) + + the_exception = cm.exception + self.assertEqual(str(the_exception), "DEADBEEF") + + + def test_purge_connection_problem(self): + self.run_api_call.side_effect = MockException("DEADBEEF") + + with self.assertRaises(api_interface.ManipulateUserFailed) as cm: + retval = api_interface.purge_user(sentinel.api_root, + self.test_user) + + the_exception = cm.exception + self.assertEqual(str(the_exception), "DEADBEEF") + +class TestFetchUsers(unittest.TestCase): + def setUp(self): + self.run_api_patcher = patch("directory_agent.api_interface._run_api_call") + self.run_api_call = self.run_api_patcher.start() + + def tearDown(self): + self.run_api_patcher.stop() + + def test_fetch_list_empty_succeeds(self): + ret_val = [] + + self.run_api_call.return_value = ret_val + + result = api_interface.fetch_users(Mock()) + self.assertEqual(result, ret_val) + + def test_fetch_list_succeeds(self): + ret_val = [{'avatar_id': sentinel.avatar_id1, + 'username' : sentinel.username1, + }, + {'avatar_id': sentinel.avatar_id2, + 'username' : sentinel.username2 + }] + + self.run_api_call.return_value = ret_val + + result = api_interface.fetch_users(Mock()) + self.assertEqual(result, ret_val) + + def test_fetch_list_fails(self): + self.run_api_call.side_effect = Exception("DEADBEEF") + + with self.assertRaises(api_interface.FetchInformationFailed) as cm: + api_interface.fetch_users(Mock()) + + the_exception = cm.exception + self.assertEqual(str(the_exception), "DEADBEEF") + +class TestFetchPlans(unittest.TestCase): + def setUp(self): + self.run_api_patcher = patch("directory_agent.api_interface._run_api_call") + self.run_api_call = self.run_api_patcher.start() + + def tearDown(self): + self.run_api_patcher.stop() + + def test_fetch_plans_empty_fails(self): + ret_val = [] + + self.run_api_call.return_value = ret_val + + with self.assertRaises(api_interface.FetchInformationFailed) as cm: + result = api_interface.fetch_plans(Mock()) + + the_exception = cm.exception + self.assertEqual(str(the_exception), api_interface.NO_PLANS) + + def test_fetch_list_succeeds(self): + ret_val = [{'group_id': sentinel.group_id1, + 'storage_gigs' : sentinel.storage_gigs1, + }, + {'group_id': sentinel.group_id2, + 'storage_gigs' : sentinel.storage_gigs2 + }] + + self.run_api_call.return_value = ret_val + + result = api_interface.fetch_plans(Mock()) + self.assertEqual(result, ret_val) + + def test_fetch_list_fails(self): + self.run_api_call.side_effect = Exception("DEADBEEF") + + with self.assertRaises(api_interface.FetchInformationFailed) as cm: + api_interface.fetch_plans(Mock()) + + the_exception = cm.exception + self.assertEqual(str(the_exception), "DEADBEEF") + +class TestSetUserPlan(unittest.TestCase): + def setUp(self): + self.run_api_patcher = patch("directory_agent.api_interface._run_api_call") + self.run_api_call = self.run_api_patcher.start() + self.test_user = {'avatar_id': 1001, + 'group_id' : sentinel.group_id} + + def tearDown(self): + self.run_api_patcher.stop() + + def test_planset_success_nopromo(self): + ret_val = {'success': True} + self.run_api_call.return_value = ret_val + + self.assertEqual(api_interface.set_user_plan(sentinel.api_root, + self.test_user), + ret_val) + + args, _ = self.run_api_call.call_args + assert len(args) == 3 + self.assertIs(args[0], sentinel.api_root) + self.assertIs(args[2]['group_id'], sentinel.group_id) + + def test_planset_success_promo(self): + ret_val = {'success': True} + self.run_api_call.return_value = ret_val + + self.assertEqual(api_interface.set_user_plan(sentinel.api_root, + self.test_user, + sentinel.promo_code), + ret_val) + + args, _ = self.run_api_call.call_args + assert len(args) == 3 + self.assertIs(args[2]['promo_code'], sentinel.promo_code) + + def test_planset_failure(self): + ret_val = {'success' : False, + 'reason' : "DEADBEEF", + } + self.run_api_call.return_value = ret_val + + with self.assertRaises(api_interface.ManipulateUserFailed) as cm: + res = api_interface.set_user_plan(sentinel.api_root, + self.test_user) + + the_exception = cm.exception + self.assertEqual(str(the_exception), "DEADBEEF") + + def test_planset_exception(self): + self.run_api_call.side_effect = MockException("DEADBEEF") + + with self.assertRaises(api_interface.ManipulateUserFailed) as cm: + res = api_interface.set_user_plan(sentinel.api_root, + self.test_user) + + the_exception = cm.exception + self.assertEqual(str(the_exception), "DEADBEEF") + +class TestCreateUser(unittest.TestCase): + def setUp(self): + self.run_api_patcher = patch("directory_agent.api_interface._run_api_call") + self.run_api_call = self.run_api_patcher.start() + + def tearDown(self): + self.run_api_patcher.stop() + + def test_create_succeeds(self): + self.run_api_call.return_value = {'success': True, + 'server_generated_username': sentinel.testuser} + + testuser = {'email': sentinel.email, + 'firstname': sentinel.givenName, + 'lastname': sentinel.surname, + } + result = api_interface.create_user(sentinel.api_root,testuser) + + self.assertEqual(result['server_generated_username'], sentinel.testuser) + + args, _ = self.run_api_call.call_args + + self.assertIs(sentinel.api_root, args[0]) + assert sentinel.email in args[2].values() + assert sentinel.givenName in args[2].values() + assert sentinel.surname in args[2].values() + + def test_create_run_api_call_exception(self): + self.run_api_call.side_effect = Exception("DEADBEEF") + + testuser = {'email': sentinel.email, + 'firstname': sentinel.givenName, + 'lastname': sentinel.surname, + } + + with self.assertRaises(api_interface.ManipulateUserFailed) as cm: + api_interface.create_user(sentinel.api_root,testuser) + + the_exception = cm.exception + self.assertEqual(str(the_exception), "DEADBEEF") + + def test_create_user_add_failed(self): + self.run_api_call.return_value = {'success' : False, + 'reason' : "Mocked it up to fail, duh!", + } + + testuser = {'email': sentinel.email, + 'firstname': sentinel.givenName, + 'lastname': sentinel.surname, + } + with self.assertRaises(api_interface.ManipulateUserFailed): + api_interface.create_user(sentinel.api_root, + testuser) + +if __name__ == "__main__": + unittest.main() diff --git a/lib/account_mgr/test/test_group_manager.py b/lib/account_mgr/test/test_group_manager.py new file mode 100644 index 0000000..abfc35c --- /dev/null +++ b/lib/account_mgr/test/test_group_manager.py @@ -0,0 +1,336 @@ +import unittest +from mock import Mock, mocksignature, sentinel, patch + +import copy + +from directory_agent import group_manager, api_interface + +class TestApiCreateUsers(unittest.TestCase): + def setUp(self): + self.api_iface_create_patcher = patch("directory_agent.api_interface.create_user") + self.api_iface_create = self.api_iface_create_patcher.start() + + self.api_iface_plan_patcher = patch("directory_agent.api_interface.set_user_plan") + self.api_iface_setplan = self.api_iface_plan_patcher.start() + + def tearDown(self): + self.api_iface_create_patcher.stop() + self.api_iface_plan_patcher.stop() + + def test_create_no_users(self): + config = Mock() + users = [] + + self.assertEqual(group_manager._api_create_users(config, users), + []) + + def test_create_one_user(self): + config = Mock() + users = [{'uniqueid' : sentinel.uniqueid, + 'email' : sentinel.email, + 'firstname': sentinel.firstname, + 'lastname' : sentinel.lastname, + 'plan_id' : sentinel.plan_id, + }] + + server_ok = {'success' : True, + 'avatar_id' : sentinel.avatar_id, + 'account_id': sentinel.account_id, + 'server_assigned_password': sentinel.server_assigned_password, + } + self.api_iface_create.return_value = server_ok + + results = group_manager._api_create_users(config, users) + + self.assertEqual(len(results), 1) + self.assertIs(results[0]['uniqueid'], sentinel.uniqueid) + self.assertIs(results[0]['avatar_id'], sentinel.avatar_id) + + def test_create_many_users(self): + config = Mock() + + users = [{'uniqueid' : sentinel.uniqueid1, + 'email' : sentinel.email1, + 'firstname': sentinel.firstname1, + 'lastname' : sentinel.lastname1, + 'plan_id' : sentinel.plan_id1, + }, + {'uniqueid' : sentinel.uniqueid2, + 'email' : sentinel.email2, + 'firstname': sentinel.firstname2, + 'lastname' : sentinel.lastname2, + 'plan_id' : sentinel.plan_id2, + }, + {'uniqueid' : sentinel.uniqueid3, + 'email' : sentinel.email3, + 'firstname': sentinel.firstname3, + 'lastname' : sentinel.lastname3, + 'plan_id' : sentinel.plan_id3, + },] + + returns = [{'success' : True, + 'avatar_id' : sentinel.avatar_id1, + 'account_id': sentinel.account_id1, + 'server_assigned_password' : sentinel.s_a_p1, + }, + {'success' : True, + 'avatar_id' : sentinel.avatar_id2, + 'account_id': sentinel.account_id2, + 'server_assigned_password' : sentinel.s_a_p2, + }, + {'success' : True, + 'avatar_id' : sentinel.avatar_id3, + 'account_id': sentinel.account_id3, + 'server_assigned_password' : sentinel.s_a_p3, + },] + + def side_effect(*args): + return returns.pop(0) + self.api_iface_create.side_effect = side_effect + + results = group_manager._api_create_users(config, users) + + self.assertEqual(len(results), 3) + + self.assertIs(results[0]['uniqueid'], sentinel.uniqueid1) + self.assertIs(results[0]['avatar_id'], sentinel.avatar_id1) + + self.assertIs(results[1]['uniqueid'], sentinel.uniqueid2) + self.assertIs(results[1]['avatar_id'], sentinel.avatar_id2) + + self.assertIs(results[2]['uniqueid'], sentinel.uniqueid3) + self.assertIs(results[2]['avatar_id'], sentinel.avatar_id3) + +class TestRunGeneric(unittest.TestCase): + def setUp(self): + def config_get_args(str1, str2): + if str2 == 'api_root': + return sentinel.api_root + else: + return sentinel.promo_code + + self.config = Mock() + self.config.get = config_get_args + + + def testfun_nopromo(api_root, user): + pass + + def testfun_promo(api_root, user, promo_code): + pass + + self.testfun_nopromo_mock = Mock() + self.testfun_nopromo = mocksignature(testfun_nopromo, + self.testfun_nopromo_mock) + self.testfun_nopromo.return_value = sentinel.testfun_nopromo + + self.testfun_promo_mock = Mock() + self.testfun_promo = mocksignature(testfun_promo, + self.testfun_promo_mock) + self.testfun_promo.return_value = sentinel.testfun_promo + + def test_works_nousers_nopromo(self): + results = group_manager._api_run_generic(self.config, + self.testfun_nopromo, + []) + self.assertEqual(len(results), 0) + + def test_exception_nousers_nopromo(self): + self.testfun_nopromo.side_effect = api_interface.ApiActionFailedError + results = group_manager._api_run_generic(self.config, + self.testfun_nopromo, + []) + self.assertEqual(len(results), 0) + + + def test_works_nousers_promo(self): + results = group_manager._api_run_generic(self.config, + self.testfun_promo, + []) + self.assertEqual(len(results), 0) + + def test_works_oneuser_nopromo(self): + user = {'field1': sentinel.field1, 'field2': sentinel.field2} + + results = group_manager._api_run_generic(self.config, + self.testfun_nopromo, + [user]) + + self.assertEqual(len(results), 1) + self.assertIs(results[0], user) + args, _ = self.testfun_nopromo_mock.call_args + assert user in args + assert sentinel.promo_code not in args + + + def test_exception_oneuser_nopromo(self): + user = {'field1': sentinel.field1, 'field2': sentinel.field2} + self.testfun_nopromo_mock.side_effect = api_interface.ApiActionFailedError + with self.assertRaises(group_manager.BailApiCall) as cm: + results = group_manager._api_run_generic(self.config, + self.testfun_nopromo, + [user]) + e = cm.exception + result_list, = e.args + self.assertEqual(len(result_list), 0) + + + def test_works_oneuser_promo(self): + user = {'field1': sentinel.field1, 'field2': sentinel.field2} + + results = group_manager._api_run_generic(self.config, + self.testfun_promo, + [user]) + + self.assertEqual(len(results), 1) + self.assertIs(results[0], user) + args, _ = self.testfun_promo_mock.call_args + assert user in args + assert sentinel.promo_code in args + + def test_works_multiuser_nopromo(self): + users = [{'field1': sentinel.field1_1, 'field2': sentinel.field2_1,}, + {'field1': sentinel.field1_2, 'field2': sentinel.field2_2,}, + {'field1': sentinel.field1_3, 'field2': sentinel.field2_3,},] + + results = group_manager._api_run_generic(self.config, + self.testfun_nopromo, + users) + + self.assertEqual(len(results), len(users)) + for i, d in enumerate(results): + self.assertIs(d['field1'], users[i]['field1']) + self.assertIs(d['field2'], users[i]['field2']) + + def test_exception_multiuser_nopromo(self): + users = [{'field1': sentinel.field1_1, 'field2': sentinel.field2_1,}, + {'field1': sentinel.field1_2, 'field2': sentinel.field2_2,}, + {'field1': sentinel.field1_3, 'field2': sentinel.field2_3,},] + + poplist = copy.copy(users) + + def side_effect(*args, **kwargs): + if len(poplist) > 1: + return poplist.pop(0) + else: + raise api_interface.ApiActionFailedError() + + self.testfun_nopromo_mock.side_effect = side_effect + + with self.assertRaises(group_manager.BailApiCall) as cm: + _ = group_manager._api_run_generic(self.config, + self.testfun_nopromo, + users) + + e = cm.exception + results, = e.args + self.assertEqual(len(results), len(users) - 1) + for i, d in enumerate(results): + self.assertIs(d['field1'], users[i]['field1']) + self.assertIs(d['field2'], users[i]['field2']) + + def test_works_multiuser_promo(self): + users = [{'field1': sentinel.field1_1, 'field2': sentinel.field2_1,}, + {'field1': sentinel.field1_2, 'field2': sentinel.field2_2,}, + {'field1': sentinel.field1_3, 'field2': sentinel.field2_3,},] + + results = group_manager._api_run_generic(self.config, + self.testfun_promo, + users) + + self.assertEqual(len(results), len(users)) + for i, d in enumerate(results): + self.assertIs(d['field1'], users[i]['field1']) + self.assertIs(d['field2'], users[i]['field2']) + + +class TestProcessQuery(unittest.TestCase): + def setUp(self): + self.db_conn = Mock() + self.query = Mock() + self.cur = Mock() + + self.db_conn.cursor.return_value = self.cur + + self.extras = ['field1', 'field2'] + + + def test_works_norows_noextras(self): + self.cur.fetchall.return_value = list() + + results = group_manager._process_query(self.db_conn, self.query) + + self.assertEqual(len(results), 0) + + def test_works_onerow_noextras(self): + self.cur.fetchall.return_value = [[sentinel.uniqueid]] + + results = group_manager._process_query(self.db_conn, self.query) + + self.assertEqual(len(results), 1) + self.assertEqual(len(results[0].keys()), 1) + self.assertEqual(results[0]['uniqueid'], sentinel.uniqueid) + + def test_works_multirows_noextras(self): + id_array = [[sentinel.uniqueid1], + [sentinel.uniqueid2], + [sentinel.uniqueid3]] + self.cur.fetchall.return_value = id_array + + results = group_manager._process_query(self.db_conn, self.query) + + self.assertEqual(len(results), 3) + self.assertEqual(len(results[0].keys()), 1) + + for i in xrange(0,3): + self.assertIs(results[i]['uniqueid'], id_array[i][0]) + + def test_works_norows_extras(self): + self.cur.fetchall.return_value = list() + + results = group_manager._process_query(self.db_conn, self.query, self.extras) + + self.assertEqual(len(results), 0) + + def test_works_onerow_extras(self): + indiv_array = [[sentinel.uniqueid, sentinel.field1, sentinel.field2]] + self.cur.fetchall.return_value = indiv_array + + results = group_manager._process_query(self.db_conn, self.query, self.extras) + self.assertEqual(len(results), 1) + self.assertEqual(len(results[0]), 3) + + self.assertIs(results[0]['field1'], sentinel.field1) + self.assertIs(results[0]['field2'], sentinel.field2) + + def test_works_multirows_extras(self): + test_array = [[sentinel.uniqueid1, sentinel.field1_1, sentinel.field2_1], + [sentinel.uniqueid2, sentinel.field1_2, sentinel.field2_2], + [sentinel.uniqueid3, sentinel.field1_3, sentinel.field2_3]] + + self.cur.fetchall.return_value = test_array + + results = group_manager._process_query(self.db_conn, self.query, self.extras) + + self.assertEqual(len(results),3) + self.assertEqual(len(results[0]), 3) + + for i in xrange(0,3): + self.assertIs(results[i]['uniqueid'], test_array[i][0]) + self.assertIs(results[i]['field1'], test_array[i][1]) + self.assertIs(results[i]['field2'], test_array[i][2]) + + def test_blows_up_with_bad_extras(self): + indiv_array = [[sentinel.uniqueid, sentinel.field1, sentinel.field2]] + self.cur.fetchall.return_value = indiv_array + + self.extras.append('field3') + + with self.assertRaises(IndexError): + results = group_manager._process_query(self.db_conn, + self.query, + self.extras) + + +if __name__ == "__main__": + unittest.main() diff --git a/lib/account_mgr/test/test_ldap_reader.py b/lib/account_mgr/test/test_ldap_reader.py new file mode 100644 index 0000000..fee7b6c --- /dev/null +++ b/lib/account_mgr/test/test_ldap_reader.py @@ -0,0 +1,145 @@ +import unittest +from mock import Mock, MagicMock, sentinel, patch +import copy + +import ldap + +from directory_agent import ldap_reader + +class TestCollectGroups(unittest.TestCase): + def setUp(self): + self.test_groups = [{'type':"dn", + 'ldap_id': "cn=test1,dn=testdomain,dn=com", + 'group_id': sentinel.group_id1}, + {'type':"dn", + 'ldap_id': "cn=test2,dn=testdomain,dn=com", + 'group_id': sentinel.group_id2}, + {'type':"dn", + 'ldap_id': "cn=test3,dn=testdomain,dn=com", + 'group_id': sentinel.group_id3}, + ] + + def test_returns_empty_groups(self): + conn = Mock() + conn.search_s.return_value = [] + config = {'groups': self.test_groups, + 'dir_guid_source': 'user_guid_source', + 'dir_username_source': 'user_source', + 'dir_fname_source': 'fname_source', + 'dir_lname_source': 'lname_source',} + self.assertEqual(len(ldap_reader.collect_groups((conn, Mock()), config)), + 0) + + @patch('directory_agent.ldap_reader.get_group') + def test_returns_populated_groups(self, get_group): + conn = Mock() + + # The following tom-foolery with returns and side_effect is to make sure + # we don't have to bother with setting up get_group correctly, and so + # we assume it works correctly to mock it here. + returns = [[sentinel.testuser1, sentinel.testuser2], + [sentinel.testuser3, sentinel.testuser4], + [sentinel.testuser5, sentinel.testuser6]] + + expected = [sentinel.testuser1, sentinel.testuser2, + sentinel.testuser3, sentinel.testuser4, + sentinel.testuser5, sentinel.testuser6, + ] + + def side_effect(*args): + result = returns.pop(0) + return result + + config = {'groups': self.test_groups, + 'dir_guid_source': 'user_guid_source', + 'dir_username_source': 'user_source', + 'dir_fname_source': 'fname_source', + 'dir_lname_source': 'lname_source',} + + groups = [("dn", Mock(),), ("dn", Mock(),), ("dn", Mock(),)] + + get_group.side_effect = side_effect + + self.assertEqual(ldap_reader.collect_groups(conn, config), + expected) + +class TestCheckGetGroup(unittest.TestCase): + def test_rejects_bad_group(self): + conn = Mock() + config = MagicMock() + test_group = {'type':"DEADBEEF", + 'ldap_id': "cn=test,dn=testdomain,dn=com", + 'group_id': sentinel.group_id} + with self.assertRaises(ldap_reader.InvalidGroupConfiguration): + ldap_reader.get_group(conn, config, test_group) + + def test_uses_base_dn(self): + conn = Mock() + conn.search_s.return_value = [] + config = MagicMock() + test_group = {'type':"dn", + 'ldap_id': "cn=test,dn=testdomain,dn=com", + 'group_id': sentinel.group_id} + ldap_reader.get_group((conn, sentinel.base_dn,), config, test_group) + args, _ = conn.search_s.call_args + self.assertIs(args[0], sentinel.base_dn) + + def test_returns_empty_group(self): + conn = Mock() + conn.search_s.return_value = [] + test_group = {'type':"dn", + 'ldap_id': "cn=test,dn=testdomain,dn=com", + 'group_id': sentinel.group_id} + self.assertEqual( + len(ldap_reader.get_group((conn, Mock(),), MagicMock(), test_group)), + 0) + + def test_returns_group_users(self): + conn = Mock() + config = { + 'dir_guid_source': 'user_guid_source', + 'dir_username_source': 'user_source', + 'dir_fname_source': 'fname_source', + 'dir_lname_source': 'lname_source', + } + ldap_results = [(Mock(), {config['dir_guid_source'] : [sentinel.guid1], + config['dir_username_source']: [sentinel.testuser1], + config['dir_fname_source'] : [sentinel.testfname1], + config['dir_lname_source'] : [sentinel.testlname1], + } + ), + (Mock(), {config['dir_guid_source'] : [sentinel.guid2], + config['dir_username_source']: [sentinel.testuser2], + config['dir_fname_source'] : [sentinel.testfname2], + config['dir_lname_source'] : [sentinel.testlname2], + } + ), + (None, [Mock()]), + (None, [Mock()]), + ] + + test_group = {'type':"dn", + 'ldap_id': "cn=test,dn=testdomain,dn=com", + 'group_id': sentinel.group_id} + + conn.search_s.return_value = ldap_results + self.assertEqual(ldap_reader.get_group((conn, Mock(),), + config, + test_group), + [{'email' : sentinel.testuser1, + 'firstname' : sentinel.testfname1, + 'lastname' : sentinel.testlname1, + 'uniqueid' : sentinel.guid1, + 'group_id' : sentinel.group_id, + }, + {'email' : sentinel.testuser2, + 'firstname' : sentinel.testfname2, + 'lastname' : sentinel.testlname2, + 'uniqueid' : sentinel.guid2, + 'group_id' : sentinel.group_id, + } + ]) + + +if __name__ == '__main__': + unittest.main() diff --git a/lib/account_mgr/user_source/__init__.py b/lib/account_mgr/user_source/__init__.py new file mode 100644 index 0000000..2cf9d3a --- /dev/null +++ b/lib/account_mgr/user_source/__init__.py @@ -0,0 +1,7 @@ +""" +__init__.py + +(c) 2011 SpiderOak, Inc. + +Provides the bits for working with LDAP. +""" diff --git a/lib/account_mgr/user_source/group_manager.py b/lib/account_mgr/user_source/group_manager.py new file mode 100644 index 0000000..679a21d --- /dev/null +++ b/lib/account_mgr/user_source/group_manager.py @@ -0,0 +1,178 @@ +""" +group_manager.py + +(c) 2011 SpiderOak, Inc. + +Provides the group management decision making; given sets of users +from both LDAP and SpiderOak, determines the changes required to make +SpiderOak fit the LDAP groups. +""" + +import logging +import psycopg2 + +from account_mgr import api_interface +from account_mgr.user_source import ldap_source +from account_mgr import account_runner + +_USERS_TO_CREATE_QUERY = ''' +SELECT +l.uniqueid, l.email, l.givenname, l.surname, l.group_id +FROM ldap_users l +LEFT OUTER JOIN users u ON l.uniqueid = u.uniqueid +WHERE u.uniqueid IS NULL; +''' + +_USERS_TO_ENABLE_QUERY = ''' +SELECT +l.uniqueid, u.avatar_id +FROM ldap_users l +LEFT OUTER JOIN users u ON l.uniqueid = u.uniqueid +WHERE u.enabled IS FALSE; +''' + +_USERS_TO_DISABLE_QUERY = ''' +SELECT +u.uniqueid, u.avatar_id +FROM users u +LEFT OUTER JOIN ldap_users l ON u.uniqueid = l.uniqueid +WHERE l.uniqueid IS NULL; +''' + +_USERS_TO_PLANCHANGE_QUERY = ''' +SELECT +l.uniqueid, u.avatar_id, l.group_id +FROM ldap_users l +LEFT OUTER JOIN users u ON l.uniqueid = u.uniqueid +WHERE l.group_id != u.group_id; +''' + +_USERS_TO_EMAILCHANGE_QUERY = ''' +SELECT +l.uniqueid, u.avatar_id, l.email +FROM ldap_users l +LEFT OUTER JOIN users u ON l.uniqueid = u.uniqueid +WHERE l.email != u.email; +''' + + +def _process_query(db_conn, query, extras=None): + log = logging.getLogger('_process_query') + + if extras is None: + extras = [] + + cur = db_conn.cursor() + cur.execute(query) + results = list() + for row in cur.fetchall(): + userinfo = {'uniqueid' : row[0]} + for index, extra in enumerate(extras): + userinfo[extra] = row[index+1] + + if 'avatar_id' in extras: + log.debug('Query processing avatar %d' % (userinfo['avatar_id'],)) + else: + log.debug('Query processing avatar %s' % (userinfo['email'],)) + + results.append(userinfo) + + return results + +def _calculate_changes_against_db(db_conn, users): + """ + Calculates the changes necessary by comparing our groups from LDAP to the DB. + """ + log = logging.getLogger('calculate_changes') + api_actions = dict() + + cur = db_conn.cursor() + cur.execute("CREATE TEMPORARY TABLE ldap_users (LIKE users) ON COMMIT DROP;") + cur.execute("ALTER TABLE ldap_users DROP COLUMN avatar_id;") + cur.execute("ALTER TABLE ldap_users DROP COLUMN enabled;") + cur.executemany("INSERT INTO ldap_users (uniqueid, email, givenname, surname, group_id) VALUES (%(uniqueid)s, %(email)s, %(firstname)s, %(lastname)s, %(group_id)s);", + users) + cur.close() + + # Users to create. + log.debug('Creating users:') + api_actions['create'] = _process_query(db_conn, _USERS_TO_CREATE_QUERY, + ['email', 'firstname', + 'lastname', 'group_id']) + log.debug('Enabling users:') + api_actions['enable'] = _process_query(db_conn, _USERS_TO_ENABLE_QUERY, + ['avatar_id']) + log.debug('Disabling users:') + api_actions['disable'] = _process_query(db_conn, _USERS_TO_DISABLE_QUERY, + ['avatar_id']) + log.debug('Group change:') + api_actions['group'] = _process_query(db_conn, _USERS_TO_PLANCHANGE_QUERY, + ['avatar_id', 'group_id']) + log.debug('Email change:') + api_actions['email'] = _process_query(db_conn, _USERS_TO_EMAILCHANGE_QUERY, + ['avatar_id', 'email']) + + return api_actions + + +def run_group_management(config, db_conn): + """ + Resolves differences between the LDAP and our idea of the SpiderOak user DB. + + :param config: configuration dict. Should be the standard OpenManage setup. + :param user_source: UserSource object to pull users from. + :param db_conn: DB connection object + """ + log = logging.getLogger('run_group_management') + + # First step, collect the users from the LDAP groups. + ldap_conn = ldap_source.OMLDAPConnection(config["dir_uri"], config["dir_base_dn"], config["dir_user"], config["dir_password"]) + + ldap_users = ldap_source.collect_groups(ldap_conn, config) + change_groups = _calculate_changes_against_db(db_conn, ldap_users) + + runner = account_runner.AccountRunner(config, db_conn) + runner.runall(change_groups) + db_conn.commit() + + +def run_db_repair(config, db_conn): + """Repairs the current user DB and billing API versus LDAP.""" + # TODO: figure out what to do when email addresses *don't* match. + + # Collect the users from LDAP, and insert into a temporary table. + ldap_conn = ldap_source.OMLDAPConnection(config["dir_uri"], + config["dir_base_dn"], + config["dir_user"], + config["dir_password"]) + + ldap_users = ldap_source.collect_groups(ldap_conn, config) + cur = db_conn.cursor() + cur.execute("CREATE TEMPORARY TABLE ldap_users (LIKE users) ON COMMIT DROP;") + cur.execute("ALTER TABLE ldap_users DROP COLUMN avatar_id;") + cur.execute("ALTER TABLE ldap_users DROP COLUMN enabled;") + cur.executemany("INSERT INTO ldap_users (uniqueid, email, givenname, surname, group_id) VALUES (%(uniqueid)s, %(email)s, %(firstname)s, %(lastname)s, %(group_id)s);", + ldap_users) + + # Collect the users from the SpiderOak BillingAPI, and insert into + # a temporary table. + spider_users = api_interface.fetch_users() + cur = db_conn.cursor() + cur.execute("CREATE TEMPORARY TABLE spider_users (LIKE users) ON COMMIT DROP;") + cur.execute("ALTER TABLE spider_users DROP COLUMN uniqueid;") + cur.executemany("INSERT INTO spider_users " + "(avatar_id, email, givenname, surname, group_id, enabled) VALUES " + "(%(avatar_id)s, %(email)s, %(firstname)s, %(lastname)s, " + "%(group_id)s, %(enabled)s);", + spider_users) + + # Clear out the current database. + cur.execute("DELETE FROM users;") + + # Insert rows into users where email addresses match. + cur.execute("INSERT INTO users " + "SELECT l.uniqueid, s.email, s.avatar_id, s.givenname, " + "s.surname, s.group_id, s.enabled " + "FROM ldap_users l JOIN spider_users AS s ON l.email = s.email ") + + db_conn.commit() diff --git a/lib/account_mgr/user_source/ldap_source.py b/lib/account_mgr/user_source/ldap_source.py new file mode 100644 index 0000000..cbdc38a --- /dev/null +++ b/lib/account_mgr/user_source/ldap_source.py @@ -0,0 +1,191 @@ +''' +ldap_reader.py + +Pulls the enterprise user groups from the LDAP server. + +(c) 2011, SpiderOak, Inc. +''' + +import ldap +import logging +import re + +# MS ActiveDirectory does not properly give redirections; it passes +# redirects to the LDAP library, which dutifully follows them, but +# MSAD does not pass credentials along with the redirect process. This +# results in a case where we are using the same established, bound +# connection with our actual bound credentials having been +# stripped. The only recourse is to ignore referrals from LDAP +# servers. +ldap.set_option(ldap.OPT_REFERRALS, 0) + +class InvalidGroupConfiguration(Exception): + ''' + Thrown when invalid group configuration is used. + ''' + pass + +class OMLDAPConnection(object): + def __init__(self, uri, base_dn, username, password): + log = logging.getLogger('OMLDAPConnection __init__') + self.conn = ldap.initialize(uri) + self.conn.simple_bind_s(username, password) + log.debug("Bound to %s as %s" % (uri, username,)) + + self.base_dn = base_dn + + +def can_auth(config, username, password): + ''' + Checks the ability of the given username and password to connect to the AD. + Returns true if valid, false if not. + ''' + log = logging.getLogger("can_bind") + # Throw out empty passwords. + if password == "": + return False + + conn = ldap.initialize(config['dir_uri']) + try: + conn.simple_bind_s(username, password) + # ANY failure here results in a failure to auth. No exceptions! + except Exception: + log.exception("Failed on LDAP bind") + return False + + return True + + +def ldap_connect(uri, base_dn, username, password): + ''' + Returns a tuple of (bound LDAP connection object, base DN). + Accepts a directory containing our connection settings. + ''' + log = logging.getLogger('ldap_connect') + conn = ldap.initialize(uri) + conn.simple_bind_s(username, password) + log.debug("Bound to %s as %s" % (uri, username,)) + return (conn, base_dn, ) + +def collect_groups(conn, config): + ''' + Returns a list of lists of users per user group. + The user groups are a list of LDAP DNs. + ''' + + result_groups = [] + + for group in config['groups']: + result_groups.extend(get_group(conn, config, group)) + + return result_groups + + +def group_by_guid(conn, guid): + ''' + Returns the DN of a group given the GUID. + Active Directory-only. + ''' + results = conn.conn.search_s(conn.base_dn, + ldap.SCOPE_SUBTREE, + "(objectGUID=%s)" % (guid,), + ["dn"], + ) + return results + + +def _get_group_ad(ldap_conn, config, group, dn): + log = logging.getLogger('_get_group_ad %s' % (dn,)) + user_list = [] + for dn, result_dict in ldap_conn.conn.search_s( + ldap_conn.base_dn, ldap.SCOPE_SUBTREE, "(memberOf=%s)" % (dn,), + [config['dir_guid_source'].encode('utf-8'), + config['dir_username_source'].encode('utf-8'), + config['dir_fname_source'].encode('utf-8'), + config['dir_lname_source'].encode('utf-8')] + ): + if dn is None: + continue + log.debug("Appending user %s" % result_dict[config['dir_username_source']][0]) + user_list.append({ + 'uniqueid' : result_dict[config['dir_guid_source']][0], + 'email' : result_dict[config['dir_username_source']][0], + 'firstname' : result_dict[config['dir_fname_source']][0], + 'lastname' : result_dict[config['dir_lname_source']][0], + 'group_id' : group['group_id'], + }) + return user_list + + +def _get_group_posix(ldap_conn, config, group, dn): + log = logging.getLogger('_get_group_posix %s' % (dn,)) + user_list = [] + for dn, result_dict in ldap_conn.conn.search_s( + group['ldap_id'], + ldap.SCOPE_SUBTREE, + attrlist=[config['dir_guid_source'], config['dir_member_source']] + ): + print dn, result_dict + if dn is None: + continue + # Search LDAP to get User entries that match group + for user in result_dict[config['dir_member_source']]: + log.debug("Found user %s", user) + + # Split apart the uid from the rest of the member_source + regex_result = re.search(r'^(uid=\w+),', user) + uid = regex_result.group(1) + + # Add each user that matches + for dn, user_dict in ldap_conn.conn.search_s( + ldap_conn.base_dn, + ldap.SCOPE_SUBTREE, uid, + [config['dir_guid_source'], + config['dir_fname_source'], + config['dir_lname_source'], + config['dir_username_source']] + ): + if dn is None: + continue + log.debug("Appending user %s", user) + user_list.append({ + 'uniqueid' : user_dict[config['dir_guid_source']][0], + 'email' : user_dict[config['dir_username_source']][0], + 'firstname' : user_dict[config['dir_fname_source']][0], + 'lastname' : user_dict[config['dir_lname_source']][0], + 'group_id' : group['group_id'], + }) + + return user_list + +_GROUP_GETTERS = { + 'ad': _get_group_ad, + 'posix': _get_group_posix, +} + + +def get_group(ldap_conn, config, group): + ''' + Returns a list of user dicts for the specified group. + + user dict keys: uniqueid, email, firstname, lastname, group_id + ''' + # TODO: figure out how to smoothly handle using GUIDs in configuration. + # AD stores GUIDs as a very unfriendly 16-byte value. + log = logging.getLogger("get_group %d" % (group['group_id'],)) + if group['type'].lower() != "dn": + raise InvalidGroupConfiguration("passed a group value != 'dn'") + dn = group['ldap_id'] + + try: + group_getter = _GROUP_GETTERS[config.get('dir_type', 'ad').lower()] + except KeyError: + raise InvalidGroupConfiguration( + "unknown dir_type %r" % (config['dir_type'],)) + + log.debug("Group DN: %s", dn) + user_list = group_getter(ldap_conn, config, group, dn) + log.info("Found %d users", len(user_list)) + + return user_list + diff --git a/lib/account_mgr/user_source/local_source.py b/lib/account_mgr/user_source/local_source.py new file mode 100644 index 0000000..8dcea0b --- /dev/null +++ b/lib/account_mgr/user_source/local_source.py @@ -0,0 +1,86 @@ +''' +local_source.py + +Provides self-contained user management functionality on the virtual appliance. + +(c) 2012, SpiderOak, Inc. +''' + +import logging +import psycopg2 + +log = logging.getLogger('local_source') +try: + import bcrypt +except ImportError: + log.warn('no bcrypt; ldap only this system') + +from common import get_config + +# This is only filled in the event of hitting authenticator and needing to connect to a DB. +_AUTHENTICATOR_DB_CONN = None + +_PW_HASH_SELECT=''' +SELECT email, pw_hash +FROM passwords WHERE email=%s;''' +def check_local_auth(db_conn, username, password): + log = logging.getLogger("check_local_auth") + log.info('login: %s %s' % (username, password,)) + cur = db_conn.cursor() + cur.execute(_PW_HASH_SELECT, (username,)) + if cur.rowcount != 1: + return False + + row = cur.fetchone() + + try: + return bcrypt.hashpw(password, row[1]) == row[1] + except ValueError: + return False + +def _get_db_conn(config): + global _AUTHENTICATOR_DB_CONN + if _AUTHENTICATOR_DB_CONN is None: + _AUTHENTICATOR_DB_CONN = psycopg2.connect(database=config['db_db'], + user=config['db_user'], + password=config['db_pass'], + host=config['db_host']) + + return _AUTHENTICATOR_DB_CONN + +def can_auth(config, username, password): + return check_local_auth(_get_db_conn(config), username, password) + +def set_user_password(db_conn, email, password): + """ + Sets the password for the user. + + This is secretly a wrapper for :func:`set_multi_passwords`. + + :param db_conn: DB connection object + :param email: User's email + :param password: User's password. + + """ + log = logging.getLogger("set_user_password") + set_multi_passwords(db_conn, [email], [password]) + +def set_multi_passwords(db_conn, emails, passwords): + """ + Sets passwords for the given emails. + + :param emails: List of email addresses. + :param passwords: List of passwords to set for the given emails. + :raises: TypeError + + """ + if len(emails) != len(passwords): + raise TypeError("Argument lengths do not match!") + hashed_pws = (bcrypt.hashpw(pw, bcrypt.gensalt()) for pw in passwords) + cur = db_conn.cursor() + + cur.executemany( + "SELECT upsert_password(%s, %s)", itertools.izip(emails, hashed_pws) + ) + + db_conn.commit() diff --git a/lib/account_mgr/user_source/radius_source.py b/lib/account_mgr/user_source/radius_source.py new file mode 100644 index 0000000..77a1df4 --- /dev/null +++ b/lib/account_mgr/user_source/radius_source.py @@ -0,0 +1,73 @@ +""" +radius_source.py + +Provides RADIUS authentication for the OpenManage stack. + +This module *DOES NOT* provide user accounts management; that will have to be +provided via another plugin. + +The following agent_config options are expected by this module: +rad_server: the RADIUS server we will be authenticating to +rad_secret: the RADIUS secret we will be using +rad_dict: the RADIUS dictionary to use. + +(c) 2012 SpiderOak, Inc. +RADIUS auth code also contributed by RedHat, Inc. +""" + +import logging +from socket import gethostname + +import pyrad.packet +from pyrad.client import Client +from pyrad.dictionary import Dictionary + + +def process_username(username, chop_at_symbol=True): + """ + Selectively splits a username of the form "foo@bar.com" into just "foo" + for auth purposes. + """ + + if chop_at_symbol: + username, _ = username.split('@', 1) + + return username + + +def can_auth(config, username, password): + """ + Performs authentication against a RADIUS server. + """ + + log = logging.getLogger('radius_source.can_auth') + + log.debug("Attempting RADIUS auth to %s for user %s" % (config['rad_server'], username,)) + + processed_user = process_username(username) + + # Create a RADIUS client to communicate with the server. + srv = Client( + server = config['rad_server'], + secret = config['rad_secret'], + dict = Dictionary(config['rad_dictionary']),) + + req = srv.CreateAuthPacket( + code = pyrad.packet.AccessRequest, + User_Name = processed_user, + NAS_Identifier = gethostname(),) + + req['User-Password'] = req.PwCrypt(password) + + try: + reply = srv.SendPacket(req) + except Exception: + log.exception("Problem contacting RADIUS server") + return False + + if reply.code == pyrad.packet.AccessAccept: + log.info("User %s accepted by RADIUS" % (username,)) + return True + + log.info("User %s rejected by RADIUS" % (username,)) + return False diff --git a/lib/common/__init__.py b/lib/common/__init__.py new file mode 100644 index 0000000..26c5a23 --- /dev/null +++ b/lib/common/__init__.py @@ -0,0 +1,88 @@ +''' +__init__.py + +Common settings for all of the OMVA applications. + +(c) 2011 SpiderOak, Inc. +''' + +import json +import logging +import os +import os.path + +CONFIG_DIR = os.environ.get("OPENMANAGE_CONFIGDIR", + "/opt/openmanage/etc") +DATA_DIR = os.environ.get("OPENMANAGE_DATADIR", + "/var/lib/openmanage") +DEFAULT_RC = "agent_config.json" + +_CONFIG = None + +def set_config(config): + global _CONFIG + _CONFIG = config + +def get_config(): + global _CONFIG + return _CONFIG + +def get_ssl_keys(): + log = logging.getLogger('get_ssl_keys') + key_home = DATA_DIR + key_fname = os.path.join(key_home, 'server.key') + cert_fname = os.path.join(key_home, 'server.crt') + + print key_fname+" "+ cert_fname + if os.path.exists(key_fname) and os.path.exists(cert_fname): + log.info("Using SSL key/cert: %s %s"% (key_fname, cert_fname,)) + return key_fname, cert_fname + + log.warn("No SSL certs found at %s" % (DATA_DIR,)) + return None, None + + +def make_defaults(): + '''Provides default and sane configuration options + + Most users shouldn't need to change this. + ''' + default_config = {'db_user': 'directory_agent', + 'db_host': 'localhost', + 'db_db': 'openmanage', + 'api_root': 'https://spideroak.com/apis/partners/billing', + } + + return default_config + +def read_config_file(cmdline_option=None): + '''Reads the configuration file, optionally using an envar and/or command-line argument for the location.''' + + if cmdline_option is not None: + config_file = cmdline_option + else: + config_file = os.path.join(CONFIG_DIR, DEFAULT_RC) + + # TODO: cleanup the configuration file path. + if not os.path.exists(config_file): + log = logging.getLogger("read_config_file") + log.warn("Missing config file at %s" % (config_file,)) + return dict() + + with open(config_file) as json_fobj: + fileconfig = json.load(json_fobj) + + for key in fileconfig.keys(): + if isinstance(fileconfig[key], unicode): + fileconfig[key] = fileconfig[key].encode('utf_8') + + config = merge_config(make_defaults(), fileconfig) + + return config + +def merge_config(config, cmdline_opts): + '''Merges the command-line options with the configuration file.''' + for key, value in cmdline_opts.iteritems(): + config[key] = value + + return config diff --git a/lib/key_escrow/__init__.py b/lib/key_escrow/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/lib/key_escrow/admin.py b/lib/key_escrow/admin.py new file mode 100644 index 0000000..03bc5cf --- /dev/null +++ b/lib/key_escrow/admin.py @@ -0,0 +1,204 @@ +""" +base layer of escrow is always ourselves - so that we are not trusting escrow +agents with data they can read. + +""" + +import os +import time +import shutil + +from key_escrow.gen import make_keypair +from key_escrow.write import escrow_binary + +from Pandora.serial import load, dump, register_all + +_ESCROW_LAYERS_PATH = os.environ["SPIDEROAK_ESCROW_LAYERS_PATH"] +_ESCROW_KEYS_PATH = os.environ["SPIDEROAK_ESCROW_KEYS_PATH"] + +class EscrowError(Exception): pass + +def save_key(key_id, keypair): + """ + save (key id, keypair, ) for key id + """ + key_fn = os.path.join(_ESCROW_KEYS_PATH, "%s.key" % (key_id, )) + with open(key_fn, "ab") as fobj: + dump((key_id, keypair, ), fobj) + print "Saved %s to %s" % ( key_id, key_fn, ) + + return True + +def load_keypair(key_id): + "load and return keypair for key id" + key_fn = os.path.join(_ESCROW_KEYS_PATH, "%s.key" % (key_id, )) + with open(key_fn, "rb") as fobj: + stored_key_id, keypair = load(fobj) + assert key_id == stored_key_id + + return keypair + +def read_config(name): + """ + return value from named config file + """ + cfg_fn = os.path.join(_ESCROW_KEYS_PATH, "%s.cfg" % name) + with open(cfg_fn) as fobj: + return fobj.readline().strip() + +def write_config(name, value): + "write value to named config file" + cfg_fn = os.path.join(_ESCROW_KEYS_PATH, "%s.cfg" % name) + if os.path.exists(cfg_fn): + raise EscrowError("config %s already exists" % name) + with open(cfg_fn, "wb") as fobj: + fobj.write("%s\n" % (value, )) + + return True + +def get_base(): + "return (base key ID, keypair,) for base layer" + + base_id = read_config("base") + + keypair = load_keypair(base_id) + + return base_id, keypair + +def create_base(): + """ + Run only once to create base layer of key escrow (which is kept internal.) + + create a new (key_id, keypair, ) and save it. + create a new file base.cfg with key_id in first line + """ + + base_id, base_keypair = make_keypair() + + save_key(base_id, base_keypair) + + write_config("base", base_id) + + print "base key ID %s and cfg saved" % ( base_id, ) + + return True + +def setup_brand(brand_identifier): + """ + create keys, brand config file, and brand layers file for NUS + """ + base_id, base_keypair = get_base() + + brand_id, brand_keypair = make_keypair() + + save_key(brand_id, brand_keypair) + + layers = ( (brand_id, brand_keypair.publickey(), ), + (base_id, base_keypair.publickey(), ), ) + + layer_fn = os.path.join(_ESCROW_LAYERS_PATH, + "brand.%s.layers.serial" % ( brand_identifier, )) + + if os.path.exists(layer_fn): + raise EscrowError("Brand id %s layers exist" % (brand_identifier, )) + with open(layer_fn, "ab") as fobj: + dump(layers, fobj) + + write_config("brand.%s" % (brand_identifier, ), brand_id) + + print "new keys and config saved for brand %s" % brand_identifier + + return brand_id, brand_identifier, layers + +def test_base(): + "test creating and reading base cfg and key" + create_base() + base_id, keypair = get_base() + assert keypair.__class__.__name__ == "RSAobj" + print "test base ok" + return True + +def test_setup_brand(): + from key_escrow.server import read_escrow_data + brand_identifier = 'my_test_brand' + brand_id, _brand_keypair, layers = setup_brand(brand_identifier) + + assert brand_id == layers[0][0] + + _user_key_id, user_keypair = make_keypair() + test_data = "0123456789" + escrowed_data = escrow_binary(layers, test_data, user_keypair) + plain_escrowed_data = read_escrow_data(brand_identifier, escrowed_data, + sign_key=user_keypair.publickey()) + assert plain_escrowed_data == test_data + + print "setup brand test ok" + return True + +def test_all(): + "run all tests" + global _ESCROW_KEYS_PATH + global _ESCROW_LAYERS_PATH + test_dir = "/tmp/key_escrow_admin_test.%s" % ( time.time(), ) + try: + _ESCROW_KEYS_PATH = os.path.join(test_dir, "keys") + _ESCROW_LAYERS_PATH = os.path.join(test_dir, "layers") + os.environ["SPIDEROAK_ESCROW_LAYERS_PATH"] = _ESCROW_LAYERS_PATH + os.environ["SPIDEROAK_ESCROW_KEYS_PATH"] = _ESCROW_KEYS_PATH + os.makedirs(_ESCROW_KEYS_PATH) + os.makedirs(_ESCROW_LAYERS_PATH) + test_results = [] + test_results.append(test_base()) + test_results.append(test_setup_brand()) + assert all(test_results) + finally: + shutil.rmtree(test_dir) + +def run_as_utility(): + register_all() + import sys + if "testall" in sys.argv: + test_all() + return + + if "create_base" in sys.argv: + create_base() + elif "setup_brand" in sys.argv: + brand_identifier = sys.argv[sys.argv.index('setup_brand') + 1] + setup_brand(brand_identifier) + else: + print >>sys.stderr, "I don't know what you want me to do!" + + print >>sys.stderr, "IF YOU HAVE CREATED NEW KEYS, BACK THEM UP NOW!" + +if __name__ == "__main__": + run_as_utility() + +#def setup_escrow_agent(agent_name): +# """ +# """ +# pass +# +# +#def setup_brand(brand_identifier, agent_name=None): +# """ +# """ +# pass +# +# +#def get_agent(agent_name): +# "return (ID, keypair,) for specified agent layer escrow" +# +# agent_cfg = "agent.%s.cfg" % (agent_name, ) +# agent_key_id = read_config(agent_cfg) +# keypair = load_key(agent_key_id) +# +# return agent_key_id, keypair +# +# +# +# +# +# +# +# diff --git a/lib/key_escrow/gen.py b/lib/key_escrow/gen.py new file mode 100644 index 0000000..e8b5608 --- /dev/null +++ b/lib/key_escrow/gen.py @@ -0,0 +1,27 @@ +import time +import random +import hmac +from hashlib import sha256 + +from Crypto.PublicKey import RSA + +from key_escrow.write import random_string + +GEN_COUNTER = 1 + +_hmac_key = '\x91\xc3\x94\xb2\xc7\xa4\xf6\xf8;n\x8a\xb1r{&\xf0.m\x97L\xab\x174\r\r\x92\x9c\xf4}\x9dp\xc7' + +def new_key_id(): + "create new unique key ID" + global GEN_COUNTER + timestamp = time.strftime("%Y%m%d%H%M%S", time.localtime()) + key_id = "%s-%d-%d" % ( timestamp, GEN_COUNTER, random.randint(1, 99999), ) + key_hmac_digest = hmac.new(_hmac_key, key_id, sha256).hexdigest() + GEN_COUNTER += 1 + return key_id + key_hmac_digest + + +def make_keypair(size=2048): + "return Key ID string, keypair obj" + rsakey = RSA.generate(size, random_string) + return new_key_id(), rsakey diff --git a/lib/key_escrow/read.py b/lib/key_escrow/read.py new file mode 100644 index 0000000..3d2df6d --- /dev/null +++ b/lib/key_escrow/read.py @@ -0,0 +1,76 @@ +import os +import time +import json +import zlib +import struct +from binascii import b2a_base64, a2b_base64 +from hashlib import sha256 + +from Crypto.PublicKey import RSA +from Crypto.Cipher import AES +from Crypto.Util.number import bytes_to_long, long_to_bytes + +AES_KEY_SIZE = 32 +AES_NONCE_SIZE = 16 + +def read_escrow_layer(escrow_keys, layer_data, sign_key=None): + """ + inverse of make_escrow_layer + + escrow_keys = dictionary of available private escrow keys in the format + key_id = key object + layer_data = binary output from make_escrow_layer + sign_key = public key of the user who has signed this layer + """ + + header_format = "!HHHL" + header_size = struct.calcsize(header_format) + + if not len(layer_data) > header_size: + raise ValueError( + "Layer too small: expected >%d bytes" % ( header_size, )) + + ( key_id_len, sig_hmac_len, sig_len, payload_len, ) = struct.unpack( + header_format, layer_data[0:header_size]) + + expected_size = header_size + sum( + (key_id_len, sig_hmac_len, sig_len, payload_len, )) + + if not len(layer_data) == expected_size: + raise ValueError( + "Layer wrong sized: expected %d but %d" % ( + expected_size, len(layer_data), )) + + body_format = "!%ds%ds%ds%ds" % ( + key_id_len, sig_hmac_len, sig_len, payload_len, ) + + ( key_id, sig_hmac, sig, payload, ) = struct.unpack( + body_format, layer_data[header_size:]) + + if not key_id in escrow_keys: + raise KeyError("Key not available for ID %r" % (key_id, )) + + if sign_key is not None: + valid = sign_key.verify(sig_hmac, (bytes_to_long(sig), )) + if not valid: + raise ValueError("Signature error") + + payload_data = json.loads(zlib.decompress(payload)) + for k, v in payload_data.iteritems(): + payload_data[k] = a2b_base64(v) + + priv_key = escrow_keys[key_id] + aes_key = priv_key.decrypt(payload_data['aes_key']) + + + if not len(aes_key) == AES_KEY_SIZE: + raise ValueError("aes_key wrongsized %d" % + (len(aes_key), )) + if not len(payload_data['aes_iv']) == AES_NONCE_SIZE: + raise ValueError("aes_iv wrongsized") + + aes = AES.new(aes_key, AES.MODE_CFB, + payload_data['aes_iv']) + data = aes.decrypt(payload_data['data']) + + return data diff --git a/lib/key_escrow/server.py b/lib/key_escrow/server.py new file mode 100644 index 0000000..3572baf --- /dev/null +++ b/lib/key_escrow/server.py @@ -0,0 +1,90 @@ +import os +from key_escrow.read import read_escrow_layer + +from Pandora.serial import load + +_ESCROW_LAYERS_PATH = os.environ["SPIDEROAK_ESCROW_LAYERS_PATH"] +_ESCROW_KEYS_PATH = os.environ["SPIDEROAK_ESCROW_KEYS_PATH"] + +_ESCROW_LAYERS_CACHE = dict() +_ESCROW_KEYS_CACHE = dict() + + +def get_escrow_layers(brand_identifier): + """ + Return a binary string containing a serilization of escrow key layers + + The de-serialized structure will be a list of tulpes of the form: + (key id, public key, ) + + The first item in the list is the innermost escrow layer (to which + plaintext data is first enciphered.) + """ + + if brand_identifier in _ESCROW_LAYERS_CACHE: + return _ESCROW_LAYERS_CACHE[brand_identifier] + + filepath = os.path.join(_ESCROW_LAYERS_PATH, + "brand.%s.layers.serial" % (brand_identifier, )) + + with open(filepath, "rb") as fobj: + data = fobj.read() + + _ESCROW_LAYERS_CACHE[brand_identifier] = data + + return data + +def load_escrow_key_cache(): + """ + populate escrow key cache with everything in SPIDEROAK_ESCROW_KEYS_PATH + """ + #print "loading keys in %s" % _ESCROW_KEYS_PATH + + # TODO perhaps memcache this w/ short (30m?) expire. + + for name in os.listdir(_ESCROW_KEYS_PATH): + if not name.endswith(".key"): + continue + + filename_key_id = name[0:-4] + if filename_key_id in _ESCROW_KEYS_CACHE: + continue + + keypath = os.path.join(_ESCROW_KEYS_PATH, name) + with open(keypath, "rb") as fobj: + key_id, key = load(fobj) + assert filename_key_id == key_id + _ESCROW_KEYS_CACHE[key_id] = key + #print "Loaded %s" % key_id + + return True + +def read_escrow_data(brand_identifier, escrowed_data, layer_count=2, + sign_key=None, _recur=0): + """ + escrowed_data = binary data encoded to escrow keys + sign_key = user's public key used to check signatures (optional) + layer_count = number of layers to go through (2 by default) + + returns: plaintext escrowed data + """ + # TODO: make this talk to a remote key escrow service hardened/isolated by + # policy + + layer_data = escrowed_data + + try: + for layer_idx in range(layer_count): + layer_data = read_escrow_layer( + _ESCROW_KEYS_CACHE, layer_data, sign_key) + except KeyError, err: + if not "Key not available for ID" in str(err): + raise + if _recur: + raise + load_escrow_key_cache() + return read_escrow_data(brand_identifier, escrowed_data, + layer_count=layer_count, sign_key=sign_key, _recur=_recur+1) + + return layer_data + diff --git a/lib/key_escrow/test.py b/lib/key_escrow/test.py new file mode 100644 index 0000000..641bc9a --- /dev/null +++ b/lib/key_escrow/test.py @@ -0,0 +1,56 @@ +import os + +from key_escrow.write import make_escrow_layer +from key_escrow.read import read_escrow_layer +from key_escrow.gen import make_keypair + +_TEST_LAYERS = 500 +_TEST_DATA_SIZE = 4097 + +def test_write_and_read_layers(): + """ + test encapsulating data in many escrow layers and reading it back out + """ + + userkey = make_keypair() + + layers = list() + for _ in range(_TEST_LAYERS): + layers.append(make_keypair()) + + # this is the data that goes in the innermost layer + data = os.urandom(_TEST_DATA_SIZE) + + layer_data = data + + # we encapsulate this data in layers of key escrow + for idx, layer in enumerate(layers): + cipher_layer_data = make_escrow_layer( + layer[0], layer[1].publickey(), layer_data, userkey[1]) + + # at every layer we test that we can read back the data + plain_layer_data = read_escrow_layer( + { layer[0]: layer[1] }, cipher_layer_data, userkey[1].publickey()) + assert plain_layer_data == layer_data, \ + "readback fail at layer %d" % (idx + 1) + + layer_data = cipher_layer_data + + + # read back the layers in reverse + for idx, layer in enumerate(layers[::-1]): + plain_layer_data = read_escrow_layer( + { layer[0]: layer[1] }, layer_data, userkey[1].publickey()) + layer_data = plain_layer_data + + # we should get our original data back out + assert layer_data == data + + return True + +def test_all(): + assert test_write_and_read_layers() + print "All tests complete" + +if __name__ == "__main__": + test_all() diff --git a/lib/key_escrow/write.py b/lib/key_escrow/write.py new file mode 100644 index 0000000..f47471e --- /dev/null +++ b/lib/key_escrow/write.py @@ -0,0 +1,80 @@ +import os +import time +import json +import zlib +import struct +from binascii import b2a_base64, a2b_base64 +from hashlib import sha256 +import hmac + +from Crypto.PublicKey import RSA +from Crypto.Cipher import AES +from Crypto.Util.number import bytes_to_long, long_to_bytes + +AES_KEY_SIZE = 32 +AES_NONCE_SIZE = 16 + +def random_string(size): + "return cryptographically secure string of specified size" + return os.urandom(size) + +def new_session_key(size): + """ + make session key suitable for use for encrypting data via rsa + """ + # XXX: there's a bug in Crypto.PublicKey.RSA. + # It loses the first byte if it is NUL, so until this is fixed, we + # don't use keys with a first byte of \x00 + key = random_string(size) + while key[0] == "\x00": + key = random_string(size) + + return key + +def make_escrow_layer(pub_key_id, pub_key, data, sign_key): + """ + make an escrow layer (string) that includes the binary data + + pub_key_id = string to identify the private key the layer can be read with + pub_key = public key object for the escrow party at this layer + data = binary data to store + sign_key = private key object of the user signing the layer + + returns binary string + """ + + aes_key = new_session_key(AES_KEY_SIZE) + aes_iv = sha256(str(time.time())).digest()[:AES_NONCE_SIZE] + aes = AES.new(aes_key, AES.MODE_CFB, aes_iv) + aes_encoded_data = aes.encrypt(data) + + payload = zlib.compress(json.dumps(dict( + aes_key = b2a_base64( + pub_key.encrypt(aes_key, random_string(len(aes_key)))[0]), + aes_iv = b2a_base64(aes_iv), + data = b2a_base64(aes_encoded_data)))) + + sig_hmac = hmac.new(key='', msg=payload, digestmod=sha256).digest() + sig = long_to_bytes(sign_key.sign(sig_hmac, random_string(len(sig_hmac)))[0]) + + struct_format = "!HHHL%ds%ds%ds%ds" % ( + len(pub_key_id), len(sig_hmac), len(sig), len(payload), ) + + return struct.pack(struct_format, + len(pub_key_id), len(sig_hmac), len(sig), len(payload), + pub_key_id, sig_hmac, sig, payload) + + +def escrow_binary(escrow_key_layers, data, sign_key): + """ + write binary escrowed data, signed by private key, to the given escrow + layers + """ + + layer_data = data + for idx, layer in enumerate(escrow_key_layers): + layer_data = make_escrow_layer( + layer[0], layer[1], layer_data, sign_key) + + return layer_data + diff --git a/lib/netkes_agent/__init__.py b/lib/netkes_agent/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/lib/netkes_agent/app_factory.py b/lib/netkes_agent/app_factory.py new file mode 100644 index 0000000..c442104 --- /dev/null +++ b/lib/netkes_agent/app_factory.py @@ -0,0 +1,155 @@ +import logging +import os +import re + +from urllib import unquote +from wsgi_util.router import router +from wsgi_util.http import BadRequest, SuperSimple, NotFound, Forbidden, ServerError +from wsgi_util.post_util import read_postdata, read_querydata + +from common import get_config, read_config_file, set_config +from account_mgr import authenticator +from key_escrow import server +from Pandora import serial + + +def setup_logging(): + handler = logging.StreamHandler() + formatter = logging.Formatter( + '%(asctime)s %(levelname)-7s %(name)-15s: %(message)s') + handler.setFormatter(formatter) + logging.root.addHandler(handler) + + if 'SPIDEROAK_NETKES_LOG_DEBUG' in os.environ: + logging.root.setLevel(logging.DEBUG) + logging.info("Debug logging enabled. Warning, lots of output!") + else: + logging.root.setLevel(logging.INFO) + +def setup_application(): + config = get_config() + if config is not None: + return + config = read_config_file() + set_config(config) + +setup_logging() +setup_application() +serial.register_all() + +@read_querydata +def get_layers(environ, start_response): + log = logging.getLogger("get_layers") + + log.debug("start") + try: + brand_identifier = environ['query_data']['brand_id'][0] + except KeyError: + log.error("Got bad request.") + return BadRequest()(environ, start_response) + + try: + layer_data = server.get_escrow_layers(brand_identifier) + except (KeyError, IOError,): + log.warn("Got missing brand_identifier: %s" % (brand_identifier,)) + return NotFound()(environ, start_response) + + log.info("Returning escrow keys for %s" % (brand_identifier,)) + + return SuperSimple(layer_data, ctype="application/octet-stream")(environ, start_response) + + +@read_querydata +def authenticate_user(environ, start_response): + log = logging.getLogger('authenticate_user') + log.debug("start") + + try: + brand_identifier = environ['query_data']['brand_id'][0] + username = environ['query_data']['username'][0] + password = environ['query_data']['password'][0] + crypt_pw = environ['query_data'].get('crypt_pw', ["True"])[0] + except KeyError: + log.error("Got bad request.") + return BadRequest()(environ, start_response) + + decoded_user = unquote(username) + + # If we get anything OTHER than explicitly "False" in the request, we will assume it's an encrypted password. + if crypt_pw == "False": + plaintext_password = password + else: + try: + plaintext_password = server.read_escrow_data( + brand_identifier, password) + except KeyError: + log.warn("missing identifier %s" % (brand_identifier,)) + return NotFound()(environ, start_response) + except ValueError: + log.warn("bad values for authenticating user %s" % (decoded_user,)) + return BadRequest()(environ, start_response) + except Exception: + log.exception("server.read_escrow_data failed for user %s brand %s" + % (decoded_user, brand_identifier,)) + return ServerError()(environ, start_response) + + if not authenticator(get_config(), decoded_user, plaintext_password): + log.info("Auth failed for %s" % (decoded_user,)) + return Forbidden()(environ, start_response) + + log.info("Auth OK for brand %s with user %s" % (brand_identifier, decoded_user, )) + return SuperSimple("OK")(environ, start_response) + + +@read_querydata +@read_postdata +def read_data(environ, start_response): + log = logging.getLogger("read_data") + + log.debug("start") + try: + brand_identifier = environ['query_data']['brand_id'][0] + escrowed_data = environ['post_data']['escrow_data'][0] + serial_sign_key = environ['post_data']['sign_key'][0] + except KeyError: + log.warn("KeyError at start") + return BadRequest()(environ, start_response) + + try: + layer_count = int(environ['post_data'].get('layer_count', [])[0]) + except IndexError: + layer_count = None + + sign_key = serial.loads(serial_sign_key) + log.debug("Being sent:") + log.debug("brand_identifier: %r" % brand_identifier) + log.debug("layer_count: %r" % layer_count) + + try: + if layer_count is None: + plaintext_data = server.read_escrow_data(brand_identifier, escrowed_data, sign_key=sign_key) + else: + plaintext_data = server.read_escrow_data(brand_identifier, escrowed_data, + layer_count=layer_count, + sign_key = sign_key) + except ValueError: + log.warn("ValueError at reading escrow data") + return BadRequest()(environ, start_response) + except KeyError: + log.warn("KeyError at reading escrow data") + return NotFound()(environ, start_response) + except Exception: + log.exception('500 error in reading escrow data') + return ServerError()(environ, start_response,) + + log.info("Read data for brand %s" % (brand_identifier,)) + return SuperSimple(plaintext_data, ctype="application/octet-stream")(environ, start_response) + +def app_factory(environ, start_response): + # rx, methods, app + urls = [ + (re.compile(r'/layers$'), ('GET', 'HEAD',), get_layers), + (re.compile(r'/auth$'), ('GET', 'HEAD',), authenticate_user), + (re.compile(r'/data$'), ('POST'), read_data), + ] + return router(urls)(environ, start_response) diff --git a/lib/netkes_agent/config_mgr.py b/lib/netkes_agent/config_mgr.py new file mode 100644 index 0000000..e831b69 --- /dev/null +++ b/lib/netkes_agent/config_mgr.py @@ -0,0 +1,67 @@ +""" +config_mgr.py + +Provides an API to control the virtual machine's NetKES and directory agent configuration. +""" + +import json +import os +import os.path +import subprocess + +_SERVICE_NAME = 'openmanage' + +class ConfigManager(object): + """ + Provides an easy interface to get and set openmanage configuration + + Assumes you have r/w access to the configuration file, and ability to restart + the openmanage service. + """ + def __init__(self, filename): + """ + Constructor. Give it a filename, and it will pull configuration from that file. + + @see default_config for a great place to start looking for the configuration file. + """ + self._config_file = filename + + with open(self._config_file) as cf: + self.config = json.load(cf) + + def new_cfg(self, new_filename, want_file_read=False): + """ + Changes the config file we point at. + + If it exists, we can optionally read it + """ + self._config_file = new_filename + + if want_file_read: + with open(self._config_file) as cf: + self.config = json.load(cf) + + def apply_config(self): + """ + Saves the current configuration to the configuration file, and restarts services + to apply the new configuration. + """ + with open(self._config_file, 'w') as cf: + json.dump(self.config, cf) + + self._kick_services() + + def _kick_services(self): + command = "sv restart " + _SERVICE_NAME + subprocess.call(command, shell=True) + + +def default_config(): + """ + Provides a sane place where the configuration file is normally kept. + """ + conf_dir = os.environ.get('OPENMANAGE_CONFIGDIR', None) + if conf_dir is None: + return None + + return os.path.join(conf_dir, 'agent_config.json') diff --git a/lib/netkes_agent/gunicorn.conf.py b/lib/netkes_agent/gunicorn.conf.py new file mode 100644 index 0000000..e069934 --- /dev/null +++ b/lib/netkes_agent/gunicorn.conf.py @@ -0,0 +1,3 @@ +import multiprocessing + +workers = multiprocessing.cpu_count() * 2 + 1 diff --git a/lib/netkes_agent/test/__init__.py b/lib/netkes_agent/test/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/lib/netkes_agent/test/test.json b/lib/netkes_agent/test/test.json new file mode 100644 index 0000000..2c734d3 --- /dev/null +++ b/lib/netkes_agent/test/test.json @@ -0,0 +1,3 @@ +{ + "testkey_1": "testvalue_1_deadbeef" +} diff --git a/lib/netkes_agent/test/test_config_mgr.py b/lib/netkes_agent/test/test_config_mgr.py new file mode 100644 index 0000000..b932503 --- /dev/null +++ b/lib/netkes_agent/test/test_config_mgr.py @@ -0,0 +1,39 @@ +import unittest + +from netkes_agent import config_mgr + +_config_test_file = "test.json" +_tmp_config_test = "tmptest.json" + +class TestReadConfiguration(unittest.TestCase): + def test_successful_read_withkey(self): + mgr = config_mgr.ConfigManager(_config_test_file) + + self.assertEqual(mgr.config['testkey_1'], + 'testvalue_1_deadbeef') + + def test_successful_read_nokey(self): + mgr = config_mgr.ConfigManager(_config_test_file) + + with self.assertRaises(KeyError): + throwaway = mgr.config['testkey_deadbeef'] + + def test_failed_read(self): + with self.assertRaises(IOError): + mgr = config_mgr.ConfigManager('DEADBEEF') + + +class TestSetConfiguration(unittest.TestCase): + def setUp(self): + self.mgr = config_mgr.ConfigManager(_config_test_file) + + def test_set_new_data(self): + self.mgr.config['test_newvalue'] = 'DEADBEEFERY' + + self.assertEqual(self.mgr.config['test_newvalue'], 'DEADBEEFERY') + + def test_set_apply_new_data(self): + self.mgr.config['test_newvalue'] = 'DEADBEEF_2' + +if __name__ == "__main__": + unittest.main() diff --git a/lib/wsgi_util/__init__.py b/lib/wsgi_util/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/lib/wsgi_util/auth.py b/lib/wsgi_util/auth.py new file mode 100644 index 0000000..89f1c9b --- /dev/null +++ b/lib/wsgi_util/auth.py @@ -0,0 +1,8 @@ +import base64 + + +def parse_auth_header(header): + scheme, data = header.split(' ', 1) + if scheme != 'Basic': + raise ValueError('%s authentication scheme not supported.' % (scheme,)) + return base64.b64decode(data).split(':', 1) diff --git a/lib/wsgi_util/cookie_util.py b/lib/wsgi_util/cookie_util.py new file mode 100644 index 0000000..d3211f4 --- /dev/null +++ b/lib/wsgi_util/cookie_util.py @@ -0,0 +1,14 @@ +import Cookie +from urllib import unquote + + +def read_cookie(app): + def read_cookie(environ, start_response): + try: + cookie = Cookie.SimpleCookie(environ.get('HTTP_COOKIE', '')) + except Cookie.CookieError: + pass + else: + environ['cookie_data'] = dict((k, v.value and unquote(v.value)) for k, v in cookie.items()) + return app(environ, start_response) + return read_cookie diff --git a/lib/wsgi_util/http.py b/lib/wsgi_util/http.py new file mode 100644 index 0000000..6b3e374 --- /dev/null +++ b/lib/wsgi_util/http.py @@ -0,0 +1,108 @@ +import sys + +from wsgi_util import http_status + + +ee = ('%(status)s' + '

%(status)s

%(message)s

\r\n') + + +def SuperSimple(message, status=http_status.OK, + headers=(), ctype='text/plain', exc_info=()): + def app(environ, start_response): + start_response(status, + [('Content-type', ctype), + ('Content-length', str(len(message)))] + list(headers), + exc_info) + return [message] + return app + + +def Simple(message, status=http_status.OK, + headers=(), ctype='text/html', exc_info=()): + body = ee % dict(status=status, message=message) + return SuperSimple(body, status, headers, ctype, exc_info) + + +def BadRequest(extra_headers=()): + return Simple('Bad request.', http_status.BAD_REQUEST, list(extra_headers)) + + +def NotImplemented(extra_headers=()): + return Simple('Not implemented.', http_status.NOT_IMPLEMENTED, list(extra_headers)) + + +def ServerError(extra_headers=()): + return Simple('An internal server error has occurred. ' + 'Please try again later.', + http_status.SERVER_ERROR, + list(extra_headers), + exc_info=sys.exc_info()) + + +def NotFound(extra_headers=()): + return Simple('Not found.', http_status.NOT_FOUND, list(extra_headers)) + + +def Created(extra_headers=()): + return Simple('Created.', http_status.CREATED, list(extra_headers)) + + +def NotModified(extra_headers=()): + def app(environ, start_response): + start_response(http_status.NOT_MODIFIED, list(extra_headers)) + return [] + return app + + +def MovedPermanently(location, extra_headers=()): + return Simple('The requested resource has moved to ' + '%(location)s.' % locals(), + http_status.MOVED_PERMANENTLY, + [('Location', location)] + list(extra_headers)) + + +def SeeOther(location, extra_headers=()): + return Simple('The requested resource was found at ' + '%(location)s.' % locals(), + http_status.SEE_OTHER, + [('Location', location)] + list(extra_headers)) + + +def RangeNotSatisfiable(size, extra_headers=()): + return Simple('Requested range not satisfiable.', + http_status.RANGE_NOT_SATISFIABLE, + [('Content-range', '*/%d' % (size,))] + list(extra_headers)) + + +def HelloWorld(extra_headers=()): + return Simple('Hello World!', ctype='text/plain', headers=list(extra_headers)) + + +def Options(methods, extra_headers=()): + methods = ', '.join(methods) + return Simple('The requested resource supports the following methods: ' + + methods, headers=[('Allow', methods)] + list(extra_headers)) + + +def MethodNotAllowed(methods, extra_headers=()): + return Simple('Method not allowed.', + http_status.METHOD_NOT_ALLOWED, + [('Allow', ', '.join(methods))] + list(extra_headers)) + + +def Forbidden(extra_headers=()): + return Simple('Forbidden.', + http_status.FORBIDDEN, + list(extra_headers)) + + +def Unauthorized(challenge, extra_headers=()): + return Simple('Unauthorized.', + http_status.UNAUTHORIZED, + [('WWW-Authenticate', challenge)] + list(extra_headers)) + +def Teapot(extra_headers=()): + return Simple("I'm a teapot.", + http_status.IM_A_TEAPOT, + list(extra_headers)) diff --git a/lib/wsgi_util/http_status.py b/lib/wsgi_util/http_status.py new file mode 100644 index 0000000..5064dad --- /dev/null +++ b/lib/wsgi_util/http_status.py @@ -0,0 +1,57 @@ +def messages(): + status = dict( + CONTINUE = (100, "Continue"), + SWITCHING_PROTOCOLS = (101, "Switching Protocols"), + + OK = (200, "OK"), + CREATED = (201, "Created"), + ACCEPTED = (202, "Accepted"), + NON_AUTHORITATIVE = (203, "Non-Authoritative Information"), + NO_CONTENT = (204, "No Content"), + RESET_CONTENT = (205, "Reset Content"), + PARTIAL_CONTENT = (206, "Partial Content"), + + MULTIPLE_CHOICES = (300, "Multiple Choices"), + MOVED_PERMANENTLY = (301, "Moved Permanently"), + FOUND = (302, "Found"), + SEE_OTHER = (303, "See Other"), + NOT_MODIFIED = (304, "Not Modified"), + USE_PROXY = (305, "Use Proxy"), + TEMP_REDIRECT = (307, "Temporary Redirect"), + + BAD_REQUEST = (400, "Bad Request"), + UNAUTHORIZED = (401, "Unauthorized"), + PAYMENT_REQUIRED = (402, "Payment Required"), + FORBIDDEN = (403, "Forbidden"), + NOT_FOUND = (404, "Not Found"), + METHOD_NOT_ALLOWED = (405, "Method Not Allowed"), + NOT_ACCEPTABLE = (406, "Not Acceptable"), + PROXY_AUTH_REQUIRED = (407, "Proxy Authentication Required"), + REQUEST_TIME_OUT = (408, "Request Time-out"), + CONFLICT = (409, "Conflict"), + GONE = (410, "Gone"), + LENGTH_REQUIRED = (411, "Length Required"), + PRECONDITION_FAILED = (412, "Precondition Failed"), + ENTITY_TOO_LARGE = (413, "Request Entity Too Large"), + URI_TOO_LARGE = (414, "Request-URI Too Large"), + UNSUPPORTED_MEDIA_TYPE = (415, "Unsupported Media Type"), + RANGE_NOT_SATISFIABLE = (416, "Requested Range Not Satisfiable"), + EXPECTATION_FAILED = (417, "Expectation Failed"), + IM_A_TEAPOT = (418, "I am a teapot"), + + SERVER_ERROR = (500, "Internal Server Error"), + NOT_IMPLEMENTED = (501, "Not Implemented"), + BAD_GATEWAY = (502, "Bad Gateway"), + SERVICE_UNAVAILABLE = (503, "Service Unavailable"), + GATEWAY_TIME_OUT = (504, "Gateway Time-out"), + VERSION_NOT_SUPPORTED = (505, "HTTP Version Not Supported"), + ) + + messages = dict(status.itervalues()) + status = dict((k, '%d %s' % v) for k, v in status.iteritems()) + status['messages'] = messages + return status + +messages = messages() +__all__ = list(messages.keys()) +locals().update(messages) diff --git a/lib/wsgi_util/json_util.py b/lib/wsgi_util/json_util.py new file mode 100644 index 0000000..c9fe700 --- /dev/null +++ b/lib/wsgi_util/json_util.py @@ -0,0 +1,30 @@ +import json +from urlparse import parse_qs + +from wsgi_util import http_status + + +def dump_json(data, environ, start_response): + try: + callback = parse_qs(environ['QUERY_STRING'])['callback'][0] + except (TypeError, ValueError, IndexError, KeyError): + data = json.dumps(data) + else: + data = '%s(%s)' % (callback, json.dumps(data)) + start_response(http_status.OK, [('Content-type', 'application/javascript'), + ('Content-length', str(len(data)))]) + return [data] + + +dump_jsonp = dump_json + + +def read_json(app): + def read_json(environ, start_response): + data = ''.join(environ['wsgi.input']) + try: + environ['json_data'] = json.loads(data) + except ValueError: + pass + return app(environ, start_response) + return read_json diff --git a/lib/wsgi_util/post_util.py b/lib/wsgi_util/post_util.py new file mode 100644 index 0000000..d35c66b --- /dev/null +++ b/lib/wsgi_util/post_util.py @@ -0,0 +1,16 @@ +from urlparse import parse_qs + + +def read_postdata(app): + def read_postdata(environ, start_response): + data = ''.join(environ['wsgi.input']) + environ['post_data'] = parse_qs(data) + return app(environ, start_response) + return read_postdata + + +def read_querydata(app): + def read_querydata(environ, start_response): + environ['query_data'] = parse_qs(environ['QUERY_STRING']) + return app(environ, start_response) + return read_querydata diff --git a/lib/wsgi_util/router.py b/lib/wsgi_util/router.py new file mode 100644 index 0000000..573286b --- /dev/null +++ b/lib/wsgi_util/router.py @@ -0,0 +1,27 @@ +'''A wsgi middleware that dispatches requests.''' + +from wsgi_util import http + + +class router(object): + def __init__(self, routes=()): + self.routes = routes + + def __call__(self, environ, start_response): + if environ['REQUEST_METHOD'] == 'OPTIONS' and environ['PATH_INFO'] == '*': + return http.HelloWorld()(environ, start_response) + for rx, methods, application in self.routes: + m = rx.match(environ['PATH_INFO']) + if m is None: + continue + if methods and environ['REQUEST_METHOD'] not in methods: + if environ['REQUEST_METHOD'] == 'OPTIONS': + return http.Options(methods)(environ, start_response) + return http.MethodNotAllowed(methods)(environ, start_response) + environ['SCRIPT_NAME'] += m.group(0) + environ['PATH_INFO'] = environ['PATH_INFO'][m.end():] + environ['router.args'] = m.groups() + environ['router.kwargs'] = m.groupdict() + return application(environ, start_response) + else: + return http.NotFound()(environ, start_response) diff --git a/netkes/Pandora/__init__.py b/netkes/Pandora/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/netkes/Pandora/https.py b/netkes/Pandora/https.py new file mode 100644 index 0000000..5434de0 --- /dev/null +++ b/netkes/Pandora/https.py @@ -0,0 +1,175 @@ +"""A certificate-validating HTTPS handler for urllib2. + +http://stackoverflow.com/questions/1087227/validate-ssl-certificates-with-python +""" + +import httplib +import re +import socket +import urllib2 +import ssl +import sys + +_KES_CERT = ''' +-----BEGIN CERTIFICATE----- +MIIFkDCCA3gCCQCdG/jvT/y4VjANBgkqhkiG9w0BAQUFADCBiTELMAkGA1UEBhMC +VVMxETAPBgNVBAgTCElsbGlub2lzMRAwDgYDVQQHEwdDaGljYWdvMRcwFQYDVQQK +Ew5TcGlkZXJPYWssIEluYzEZMBcGA1UEAxMQTWF0dGhldyBFcmlja3NvbjEhMB8G +CSqGSIb3DQEJARYSbWF0dEBzcGlkZXJvYWsuY29tMB4XDTExMDkxMjE1MTMxOFoX +DTIxMDkxMTE1MTMxOFowgYkxCzAJBgNVBAYTAlVTMREwDwYDVQQIEwhJbGxpbm9p +czEQMA4GA1UEBxMHQ2hpY2FnbzEXMBUGA1UEChMOU3BpZGVyT2FrLCBJbmMxGTAX +BgNVBAMTEE1hdHRoZXcgRXJpY2tzb24xITAfBgkqhkiG9w0BCQEWEm1hdHRAc3Bp +ZGVyb2FrLmNvbTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAKqhrhuH +NnCGYvgtnPEF1dhi2JGtwhDi0ep/EuQbhbGjrfLk12QB4NI2yK4Bxf1Aogl2yPiD +3BSgz70rGsBy0nNbHnfRJDaRj8OqxpcsWjyGns9Yw79GeJUB/3Zq/DBHCbJeHOux +nJt4dW1ZyvEYMQjA8SlmbobzMpSvCwAHHkuGIf5RDr6M8ZaaN+pQ1zZm5dBJgq9D +SuK8fpKO4DULTdFeaT225kWFJXx+8jgbhJNvv8PE5pacspwZ4oWO38ThhMz3oCG6 +kAa6w8mazmxTeNeG95UUHLUbl+2Gj7cI+JKR8IQKPiDr7ryqvVoiPbvwkOfAssor +VsNNjozaEOgJ+64Cj3ZGTl1cfeFwdQfsqy5JjH2ATKF/VZUjBq8ZYy3Z6GGMffnF +PfCE/I/cpgT/GsKKT7jJYeFGr1QPAb0iy0LG6BtLI2SQ+sndF842JoIKuFZAU72m +8Mlh4Nud3wxhBtw3pP8dDOBOjB+VkvElOE7hdaIUd8RL8+2EQiZZmRRVRzxC+vld +WatjnB0QzCxXaPTHALLQlB2xHA4K5lXbj/mWhwZUY1sLPYOzBbLclZVIBzUZrryI +C5+qI3Ce1OMQHz+l9WTfGmHC+um8IWRi8N7UKu19UMji3qdsz6sYW2t67y1gWkpX +VX1NHdOlpHFvDEvJiT1MmMl7kcw/OmH24fa/AgMBAAEwDQYJKoZIhvcNAQEFBQAD +ggIBAEsnoP4lb7CB+kt4pRM2VBUO4Uhxuc/V3cDbCei8XAMFco24/MwCFlyy3WVL +Mo6V+Sx2s2s02dkfDVhFIORyOIV00Yq6CTDhsmfy6XStY10KcPNo+3MajIznCgMp +TgUNoFMfs5Z5kmNzJtz47DoZs3GP5V3V6tajUfjlAbAmjJv90xnJe856TkzAXq8A +EKI2TzmamgsarNyTCCBVNRtaUFC7w3DN0Oi9AyjVEGzuJPPOGiKvzv5gUnJ3DLoe +G2/E+3IQVbuPO6LyFlNxraQM6UHLEylkXmxemFFiV2vIsHqPxMM9MSL+rnt0335F +s7st7OsFbjRBw77jiPgWY3MA5O5C6Vhcw7N7NtgvuaHWn2GLZnjdpnKzGxSABfqD +rI5kNzUVusy9+XkbC65hEZWF5eUdP2u0+81KSHFRF5wnnCHZuXwNr68QKjZ5tE6X +3cXF4MazbEZq2ywu/u2B0gKeqTZX/6vMWK3lLyiWHftI/3UU3UqTfsx8nET9deB7 +vXHy6Uv5njAmG9EY3QF1XJTiFOKtjY63wbfBQf9FTQ4wd/JV8XnTZJ2i8q1A9ZWh +2+aZjKVxajYu3ezq3LVCXXRz0xPh4/6oBGcf2KHHmXiMsC5yBadld4zzaqdAlPi8 +v6Yf7goycsxixzuwR/o3UK0z2bGssb4gkYJxEksACgOd+yRM +-----END CERTIFICATE----- +''' + + +def create_connection(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, bind_address=None): + """Connect to *address* and return the socket object. + + Convenience function. Connect to *address* (a 2-tuple ``(host, + port)``) and return the socket object. Passing the optional + *timeout* parameter will set the timeout on the socket instance + before attempting to connect. If no *timeout* is supplied, the + global default timeout setting returned by :func:`getdefaulttimeout` + is used. + """ + + msg = "getaddrinfo returns an empty list" + host, port = address + for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM): + af, socktype, proto, canonname, sa = res + sock = None + try: + sock = socket.socket(af, socktype, proto) + if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT: + sock.settimeout(timeout) + if bind_address is not None: + sock.bind(bind_address) + sock.connect(sa) + return sock + + except socket.error, msg: + if sock is not None: + sock.close() + + raise socket.error, msg + + +class InvalidCertificateException(httplib.HTTPException, urllib2.URLError): + def __init__(self, host, cert, reason): + httplib.HTTPException.__init__(self) + self.host = host + self.cert = cert + self.reason = reason + + def __str__(self): + return ('Host %s returned an invalid certificate (%s) %s\n' % + (self.host, self.reason, self.cert)) + + +class CertValidatingHTTPSConnection(httplib.HTTPConnection): + default_port = httplib.HTTPS_PORT + + def __init__(self, host, port=None, key_file=None, cert_file=None, + ca_certs=None, strict=None, bind_address=None, **kwargs): + httplib.HTTPConnection.__init__(self, host, port, strict, **kwargs) + self.key_file = key_file + self.cert_file = cert_file + self.ca_certs = ca_certs + if self.ca_certs: + self.cert_reqs = ssl.CERT_REQUIRED + else: + self.cert_reqs = ssl.CERT_NONE + self.bind_address = bind_address + + def _GetValidHostsForCert(self, cert): + if 'subjectAltName' in cert: + return [x[1] for x in cert['subjectAltName'] + if x[0].lower() == 'dns'] + else: + return [x[0][1] for x in cert['subject'] + if x[0][0].lower() == 'commonname'] + + def _ValidateCertificateHostname(self, cert, bincert, hostname): + hosts = self._GetValidHostsForCert(cert) + for host in hosts: + host_re = host.replace('.', '\.').replace('*', '[^.]*') + if re.search('^%s$' % (host_re,), hostname, re.I): + return True + + # If we cannot validate against the hostname, try against the + # KES certificate. + binary_kes_cert = ssl.PEM_cert_to_DER_cert(_KES_CERT) + if binary_kes_cert == bincert: + return True + + return False + + def connect(self): + sock = create_connection((self.host, self.port), self.timeout, self.bind_address) + self.sock = ssl.wrap_socket(sock, keyfile=self.key_file, + certfile=self.cert_file, + cert_reqs=self.cert_reqs, + ca_certs=self.ca_certs) + if self.cert_reqs & ssl.CERT_REQUIRED: + cert = self.sock.getpeercert() + bincert = self.sock.getpeercert(binary_form=True) + hostname = self.host.split(':', 0)[0] + if not self._ValidateCertificateHostname(cert, bincert, hostname): + raise InvalidCertificateException(hostname, cert, + 'hostname mismatch') + + +class VerifiedHTTPSHandler(urllib2.HTTPSHandler): + def __init__(self, **kwargs): + urllib2.AbstractHTTPHandler.__init__(self) + self._connection_args = kwargs + + def https_open(self, req): + def http_class_wrapper(host, **kwargs): + full_kwargs = dict(self._connection_args) + full_kwargs.update(kwargs) + return CertValidatingHTTPSConnection(host, **full_kwargs) + + try: + return self.do_open(http_class_wrapper, req) + except urllib2.URLError, e: + if type(e.reason) == ssl.SSLError and e.reason.args[0] == 1: + raise InvalidCertificateException(req.host, '', + e.reason.args[1]) + raise + + https_request = urllib2.HTTPSHandler.do_request_ + + +#if __name__ == "__main__": +# if len(sys.argv) != 3: +# print "usage: python %s CA_CERT URL" % sys.argv[0] +# exit(2) + +# handler = VerifiedHTTPSHandler(ca_certs = sys.argv[1]) +# opener = urllib2.build_opener(handler) +# print opener.open(sys.argv[2]).read() diff --git a/netkes/Pandora/serial.py b/netkes/Pandora/serial.py new file mode 100644 index 0000000..b65c039 --- /dev/null +++ b/netkes/Pandora/serial.py @@ -0,0 +1,102 @@ +'''! @package Pandora.serial +Abstract away from the "latest hotness" in serialization + +Since we aren't sure that we will forever stick with cerealizer, +having already once switched from pickle, we've created this module to +centralize our implementation. + +Use register to note classes which are safe to serialize. + +@complete OK 20080102 bryon +''' + +import sys +from itertools import chain +import Crypto.PublicKey.RSA + +# Cerealizer has mostly the same interface as pickle, so we just +# import it's functions here. + +from cerealizer import dump, dumps, load, loads, register, register_alias +from cerealizer import NotCerealizerFileError, NonCerealizableObjectError, \ + EndOfFile + +NotSerializerFileError = NotCerealizerFileError +NotSerializableObjectError = NotCerealizerFileError + +cryptoclass = ('Crypto.PublicKey.RSA', 'RSAobj' ) + +if hasattr(Crypto.PublicKey.RSA,'RSAImplementation'): + cryptoclass = ('Crypto.PublicKey.RSA', '_RSAobj') + +## Our serializable classes/modules + +## 20110725 MattE- Shortened from the original source, no other use of +## Pandora libs means no need to register other Pandora types here. +known = [ + cryptoclass , + ] + +aliases = None + +if not hasattr(Crypto.PublicKey.RSA,'RSAImplementation'): + aliases = { + 'Crypto.PublicKey.RSA': ( + ('RSAobj', 'RSAobj_c'), + ) + } +else: + aliases = { + 'Crypto.PublicKey.RSA': ( + ('_RSAobj', 'RSAobj'), + ('_RSAobj', 'RSAobj_c'), + ) + } + +_already_called = False +def register_all(extras = []): + """! Register our known serializables. + + @param extras extra class/mod names to allow serialization + @good OK 20080102 bryon + """ + global _already_called + if _already_called: + return + _already_called = True + for module_name, class_names in chain(known, extras): + + before = sys.modules + + if type(class_names) == str: + class_names = [ class_names ] + + + module = __import__(module_name, globals(), locals(), + class_names) + for class_name in class_names: + clas = getattr(module, class_name) + # this has to be ugly to maintain backwards crypto compatibility + # internal classnames that cerealizer picks up/picked up on changed from PyCrypto 2.0.1 -> 2.1.0 + # as such we need to alias both the generated and read it classnames. + # if any more changes need to happen here, or this gets called more than once per process + # or we start multiprocessing for whatever reason, register_all will need a refactoring. + if class_name == '_RSAobj': + register(clas, classname = 'Crypto.PublicKey.RSA.RSAobj') + else: + register(clas) + + if module_name in aliases: + for alias in aliases[module_name]: + clas = getattr(module,alias[0]) + register_alias(clas, '.'.join((module_name, alias[1]))) + + sys.modules = before + +if not hasattr(Crypto.PublicKey.RSA,'RSAImplementation'): + Crypto.PublicKey.RSA.construct = Crypto.PublicKey.RSA.construct_py + Crypto.PublicKey.RSA.generate = Crypto.PublicKey.RSA.generate_py + + + __all__ = [dump, dumps, load, loads, register, register_all, + NotCerealizerFileError] diff --git a/netkes/__init__.py b/netkes/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/netkes/account_mgr/__init__.py b/netkes/account_mgr/__init__.py new file mode 100644 index 0000000..693c8d1 --- /dev/null +++ b/netkes/account_mgr/__init__.py @@ -0,0 +1,113 @@ +''' +__init__.py + +Init and common functions for the OpenManage user management system. + +(c) 2011 SpiderOak, Inc. +''' + +import logging +import psycopg2 +from accounts_api import Api +from contextlib import contextmanager + +SELECT_ADMIN_TOKEN = ''' +select no_devices_only, single_use_only, + case when exists(select 1 from admin_token_avatar_use where token=%(token)s) then true + else false end as token_used +from admin_setup_tokens +where token=%(token)s and expiry > now() and + exists(select 1 from users where email=%(email)s and enabled) +''' + +INSERT_ADMIN_AUTH_TOKEN_AVATAR_USE = ''' +insert into admin_token_avatar_use (token, avatar_id) +values (%(token)s, (select avatar_id from users where email=%(email)s)) +''' + +@contextmanager +def get_cursor(config): + try: + conn = psycopg2.connect(database=config['db_db'], + user=config['db_user'], + password=config['db_pass'], + host=config['db_host']) + yield conn.cursor() + except: + conn.rollback() + raise + else: + conn.commit() + +def get_api(config): + return Api.create('https://spideroak.com/apis/accounts/v1/', + config['api_user'], + config['api_password'],) + +def admin_token_auth(config, username, password): + log = logging.getLogger("admin_token_auth") + log.debug('checking admin auth code for username: %s' % username) + api = get_api(config) + user_token = dict(email=username, token=password) + with get_cursor(config) as cur: + cur.execute(SELECT_ADMIN_TOKEN, user_token) + if cur.rowcount != 1: + return False + + no_devices_only, single_use_only, token_used = cur.fetchone() + + if no_devices_only and api.list_devices(username): + return False + + if single_use_only and token_used: + return False + + with get_cursor(config) as cur: + cur.execute(INSERT_ADMIN_AUTH_TOKEN_AVATAR_USE, user_token) + + return True + +def authenticator(config, username, password): + """Authenticates users against OpenManage. + + This calls the correct authentication source to auth users. + + We expect a user_source module to offer a "can_auth" function taking three arguments: + * The config dictionary + * The username trying to authenticate + * Their password + + The net_kes webserver should not at any time have to know or care how we + are actually authenticating users, only that we do. + """ + + log = logging.getLogger("authenticator") + + auth_method = config.get('auth_method', None) + auth_source = None + + print 'checking' + if admin_token_auth(config, username, password): + return True + + if auth_method == 'ldap': + log.debug("Attempting to use LDAP simple bind for authenticating %s" % (username,)) + from account_mgr.user_source import ldap_source + auth_source = ldap_source + + elif auth_method == 'radius': + log.debug("Attempting to use RADIUS authentication for %s" % (username,)) + from account_mgr.user_source import radius_source + auth_source = radius_source + + elif auth_method == 'local': + log.debug("Attempting to use local authentication for %s" % (username,)) + from account_mgr.user_source import local_source + auth_source = local_source + + else: + log.error("No user authentication source provided, please check agent_config.") + log.warn("Returning failed authentication for %s" % (username,)) + return False + + return auth_source.can_auth(config, username, password) diff --git a/netkes/account_mgr/account_runner.py b/netkes/account_mgr/account_runner.py new file mode 100644 index 0000000..ec29edf --- /dev/null +++ b/netkes/account_mgr/account_runner.py @@ -0,0 +1,172 @@ +""" +account_runner.py +(c) 2011 SpiderOak, Inc + +Runs account manipulation options against the local DB and the Billing API. + +The functions here are meant to be quasi-transactional; if there's an error raised through the +billing API handling functions, we will write out what we can to the DB to keep state consistent. +""" +import inspect + +import logging + +import api_interface + + +class BailApiCall(Exception): + pass + +class AccountRunner(object): + """ + Manages running account manipulation operations between our local DB and + the SpiderOak BillingAPI. + """ + _ADD_USERS_STATEMENT = ''' + INSERT INTO users + (uniqueid, email, avatar_id, givenname, surname, group_id) VALUES + (%(uniqueid)s,%(email)s,%(avatar_id)s,%(firstname)s,%(lastname)s,%(group_id)s); + ''' + def __init__(self, config, db_conn): + self._log = logging.getLogger("AccountRunner") + self._promo_code = config.get("promo_code", None) + self._db_conn = db_conn + + def runall(self, changes_dict): + """ + Commits all changes presented in the changes_dict. + + Keys in changes_dict must conform to the rest of the public APIs for this class. + + :param changes_dict: Dictionary of user changes. + """ + for action in changes_dict.keys(): + fun = getattr(self, action) + ok_users, fail_users = fun(changes_dict[action]) + if len(fail_users): + self._log.error("Got error during runall, aborted on action: %s" % (action,)) + break + + def create(self, users): + """ + Creates users SpiderOak users and updates the local DB with the user list. + + :param users: List of users to create + :returns tuple(list, list): (created users, failed users). + """ + + try: + created_users = self._api_create_users(users) + except BailApiCall as e: + (created_users, ) = e.args + + cur = self._db_conn.cursor() + cur.executemany(self._ADD_USERS_STATEMENT, created_users) + + return ( + created_users, + [user for user in users if user not in created_users], + ) + + + def enable(self, users): + """ + Toggles the enabled status of users in the SpiderOak DB. + + :param users: List of users to enable. + :returns tuple(list, list): (created users, failed users). + """ + return self._run_generic(api_interface.activate_user, users, + "UPDATE users SET enabled=true WHERE avatar_id=%(avatar_id)s") + + def disable(self, users): + """Disables users in SpiderOak's user DB. + + :param users: list of users to disable + :returns tuple(list, list): (success users, failed users) + """ + + return self._run_generic(api_interface.deactivate_user, users, + "UPDATE users SET enabled=false WHERE avatar_id=%(avatar_id)s") + + def group(self, users): + """Assigns users to plans in the SO user DB. + + :param users: list of users to set the plan for. + :returns tuple(list, list): (success users, failed users) + """ + + return self._run_generic(api_interface.set_user_group, users, + "UPDATE users SET group_id=%(group_id)s WHERE avatar_id=%(avatar_id)s") + + def email(self, users): + """Changes user email addresses. + + :param users: list of users to set email addresses for. + :returns tuple(list, list): (success users, failed users) + """ + + return self._run_generic(api_interface.change_email, users, + "UPDATE users SET email=%(email)s WHERE avatar_id=%(avatar_id)s") + + def _run_generic(self, fun, users, sql_statement): + """Internal function to run generic actions with both the API and DB.""" + try: + complete_users = self._api_run_generic(fun, users) + except BailApiCall as e: + (complete_users, ) = e.args + + cur = self._db_conn.cursor() + cur.executemany(sql_statement, complete_users) + + return ( + complete_users, + [user for user in users if user not in complete_users], + ) + + def _api_create_users(self, users): + """Internal function to create users via the billing API.""" + results = list() + for user in users: + try: + result = api_interface.create_user(user, self._promo_code) + except api_interface.ApiActionFailedError as e: + import traceback + traceback.print_exc() + self._log.error('Got ApiActionFailedError: %s' % e) + raise BailApiCall(results) + else: + user['avatar_id'] = result['avatar_id'] + results.append(user) + + self._log.info("created user %s" % (user['email'],)) + + return results + + def _api_run_generic(self, fun, users): + """Internal function to run API calls given the specific API function.""" + + results = [] + # Start building the arguments dictionary. + argdict = {} + args = inspect.getargspec(fun) + if 'promo_code' in args.args: + argdict['promo_code'] = self._promo_code + + # In the event of getting an API exception, we still need to + # update the DB with what we've done to keep things consistent, so + # we catch the error and bail with the current state of the + # results array. + for user in users: + argdict['user'] = user + try: + result = fun(**argdict) + except api_interface.ApiActionFailedError as e: + import traceback + traceback.print_exc() + self._log.error('Function %s got ApiActionFailedError: %s' % (fun, e,)) + raise BailApiCall(results) + else: + results.append(user) + + return results diff --git a/netkes/account_mgr/accounts_api.py b/netkes/account_mgr/accounts_api.py new file mode 100644 index 0000000..1b121ab --- /dev/null +++ b/netkes/account_mgr/accounts_api.py @@ -0,0 +1,273 @@ +import json +import urllib +import urllib2 + +from api_client import ApiClient + + +class Api(object): + class Error(Exception): + pass + class BadParams(Error): + pass + class NotFound(Error): + pass + class DuplicateGroupName(Error): + pass + class DuplicateUsername(Error): + pass + class DuplicateEmail(Error): + pass + class BadPlan(Error): + pass + class BadGroup(Error): + pass + class QuotaExceeded(Error): + pass + class EmailNotSent(Error): + pass + + @classmethod + def create(cls, base, username, password): + """Factory method using default ApiClient class.""" + client = ApiClient(base, username, password) + return cls(client) + + def __init__(self, client): + self.client = client + + def ping(self): + return self.client.get_json('ping') + + ### Plans + + def list_plans(self): + return self.client.get_json('plans') + + ### Quota + + def quota(self): + return self.client.get_json('partner/quota') + + ### Features + + def enterprise_features(self): + return self.client.get_json('partner/features') + + ### Settings + + def enterprise_settings(self): + return self.client.get_json('partner/settings') + + def update_enterprise_settings(self, settings): + try: + return self.client.post_json('partner/settings', settings) + except urllib2.HTTPError, err: + if err.code == 400: + raise self.BadParams() + raise + + def update_enterprise_password(self, new_password): + try: + return self.client.post_json('partner/password', new_password) + except urllib2.HTTPError, err: + if err.code == 400: + raise self.BadParams() + raise + + ### Groups + + def list_groups(self): + return self.client.get_json('groups/') + + def search_groups(self, name): + return self.client.get_json('groups/?search=%s' % urllib.quote(name)) + + def create_group(self, group_info): + try: + resp = self.client.post_json_raw_response( + 'groups/', group_info) + except urllib2.HTTPError, err: + if err.code == 400: + raise self.BadParams() + elif err.code == 409: + data = json.loads(err.read()) + if 'name' in data['conflicts']: + raise self.DuplicateGroupName() + elif 'plan_id' in data['conflicts']: + raise self.BadPlan() + raise + return int(resp.info()['location'].rsplit('/', 1)[-1]) + + def get_group(self, group_id): + try: + return self.client.get_json('groups/%d' % (group_id,)) + except urllib2.HTTPError, err: + if err.code == 404: + raise self.NotFound() + raise + + def edit_group(self, group_id, group_info): + try: + self.client.post_json('groups/%d' % (group_id,), group_info) + except urllib2.HTTPError, err: + if err.code == 404: + raise self.NotFound() + elif err.code == 400: + raise self.BadParams() + elif err.code == 409: + data = json.loads(err.read()) + if 'name' in data['conflicts']: + raise self.DuplicateGroupName() + elif 'plan_id' in data['conflicts']: + raise self.BadPlan() + elif 'avatars_over_quota' in data['conflicts']: + raise self.QuotaExceeded() + raise + + def delete_group(self, group_id, new_group_id=None): + try: + if new_group_id: + self.client.delete('groups/%d?move_to=%d' % (group_id, new_group_id)) + else: + self.client.delete('groups/%d' % (group_id,)) + except urllib2.HTTPError, err: + if err.code == 404: + raise self.NotFound() + raise + + ### Shares + + def _create_query_string(self, limit, offset): + get_params = dict() + if limit: + get_params['limit'] = limit + if offset: + get_params['offset'] = offset + query_string = '' + if get_params: + query_string = '?%s' % urllib.urlencode(get_params) + return query_string + + def list_shares_for_brand(self, limit=None, offset=None): + query_string = self._create_query_string(limit, offset) + return self.client.get_json('shares/%s' % query_string) + + ### Users + + def list_users(self, limit=None, offset=None): + query_string = self._create_query_string(limit, offset) + return self.client.get_json('users/%s' % query_string) + + def search_users(self, name_or_email, limit=None, offset=None): + query_string = self._create_query_string(limit, offset) + if query_string: + query_string = '&' + query_string + return self.client.get_json('users/?search=%s%s' % (urllib.quote(name_or_email), query_string)) + def get_user_count(self): + return self.client.get_json('users/?action=user_count')['user_count'] + + def create_user(self, user_info): + try: + return self.client.post_json('users/', user_info) + except urllib2.HTTPError, err: + if err.code == 400: + raise self.BadParams() + elif err.code == 409: + data = json.loads(err.read()) + if 'username' in data['conflicts']: + raise self.DuplicateUsername() + if 'email' in data['conflicts']: + raise self.DuplicateEmail() + elif 'plan_id' in data['conflicts']: + raise self.BadPlan() + elif 'group_id' in data['conflicts']: + raise self.BadGroup() + raise + + def get_user(self, username_or_email): + try: + return self.client.get_json( + 'users/%s' % (username_or_email,)) + except urllib2.HTTPError, err: + if err.code == 404: + raise self.NotFound() + raise + + def list_devices(self, username_or_email): + try: + return self.client.get_json( + 'users/%s/devices' % (username_or_email,)) + except urllib2.HTTPError, err: + if err.code == 404: + raise self.NotFound() + raise + + def list_shares(self, username_or_email): + try: + return self.client.get_json( + 'users/%s/shares/' % (username_or_email,)) + except urllib2.HTTPError, err: + if err.code == 404: + raise self.NotFound() + raise + + def get_share(self, username_or_email, room_key): + try: + return self.client.get_json( + 'users/%s/shares/%s' % (username_or_email, room_key)) + except urllib2.HTTPError, err: + if err.code == 404: + raise self.NotFound() + raise + + def edit_share(self, username_or_email, room_key, enable): + action = 'enable' if enable else 'disable' + try: + return self.client.post_json( + 'users/%s/shares/%s?action=%s' % (username_or_email, room_key, action), {}) + except urllib2.HTTPError, err: + if err.code == 404: + raise self.NotFound() + raise + + def edit_user(self, username_or_email, user_info): + try: + self.client.post_json( + 'users/%s' % (username_or_email,), user_info) + except urllib2.HTTPError, err: + if err.code == 404: + raise self.NotFound() + elif err.code == 400: + raise self.BadParams() + elif err.code == 402: + raise self.QuotaExceeded() + elif err.code == 409: + data = json.loads(err.read()) + if 'email' in data['conflicts']: + raise self.DuplicateEmail() + elif 'group_id' in data['conflicts']: + raise self.BadGroup() + elif 'plan_id' in data['conflicts']: + raise self.BadPlan() + raise + + def delete_user(self, username_or_email): + try: + self.client.delete('users/%s' % (username_or_email,)) + except urllib2.HTTPError, err: + if err.code == 404: + raise self.NotFound() + raise + + def send_activation_email(self, username_or_email): + try: + self.client.post('users/%s?action=sendactivationemail' % ( + username_or_email,), '') + except urllib2.HTTPError, err: + if err.code == 404: + raise self.NotFound() + elif err.code == 409: + raise self.EmailNotSent() + raise + diff --git a/netkes/account_mgr/api_client.py b/netkes/account_mgr/api_client.py new file mode 100644 index 0000000..c59a183 --- /dev/null +++ b/netkes/account_mgr/api_client.py @@ -0,0 +1,77 @@ +import json +import urllib2 +from base64 import b64encode +from urlparse import urljoin + +from netkes.Pandora.https import VerifiedHTTPSHandler + + +_DEFAULT_HANDLERS = [ + urllib2.ProxyHandler, + urllib2.HTTPDefaultErrorHandler, + urllib2.HTTPRedirectHandler, + urllib2.HTTPErrorProcessor, +] +def _make_opener(url): + opener = urllib2.OpenerDirector() + for handler_class in _DEFAULT_HANDLERS: + opener.add_handler(handler_class()) + opener.add_handler(VerifiedHTTPSHandler()) + return opener + + +class RequestWithMethod(urllib2.Request): + _method = None + + def set_method(self, method): + self._method = method + + def get_method(self): + return self._method or urllib2.Request.get_method(self) + + +class ApiClient(object): + def __init__(self, base, username, password): + self.base = base + self.username = username + self.password = password + self.opener = _make_opener(base) + + def open(self, path, data=None, headers=None, method=None): + if headers is None: + headers = {} + if ( + self.username and + 'authorization' not in set(k.lower() for k in headers) + ): + headers['authorization'] = 'Basic %s' % ( + b64encode('%s:%s' % ( + self.username, self.password + )).strip(), + ) + req = RequestWithMethod(urljoin(self.base, path), data, headers) + req.set_method(method) + return self.opener.open(req) + + def get(self, path): + return self.open(path) + + def get_json(self, path): + return json.loads(self.get(path).read()) + + def post(self, path, data, headers=None): + if not isinstance(data, basestring): + data = urlencode(data) + return self.open(path, data, headers) + + def post_json_raw_response(self, path, data, headers=None): + return self.post(path, json.dumps(data), headers) + + def post_json(self, path, data, headers=None): + body = self.post_json_raw_response(path, data, headers).read() + if body: + return json.loads(body) + return None + + def delete(self, path, headers=None): + return self.open(path, headers=headers, method='DELETE') diff --git a/netkes/account_mgr/api_interface.py b/netkes/account_mgr/api_interface.py new file mode 100644 index 0000000..3cf65ba --- /dev/null +++ b/netkes/account_mgr/api_interface.py @@ -0,0 +1,250 @@ +''' +api_interface.py + +(c) 2011 SpiderOak, Inc. + +Provides an interface to the billing and new user APIs. + +''' + +import json +import logging +import re +from urllib import quote +import urllib2 + +from common import get_config +from Pandora import https + +API_URL_BASE = "%s/%s" +NO_PLANS = "No storage plans provided." + +class ApiActionFailedError(Exception): + pass + +class ManipulateUserFailed(ApiActionFailedError): + pass + +class FetchInformationFailed(ApiActionFailedError): + pass + + +_AVATAR_ID_URL = "users/%d" +_EMAIL_URL = "users/byemail/%s" +def _make_api_url_for_user(user): + """ + Creates a string to reference the user in the SpiderOak Billing API. + Currently supports using either an avatar_id or email address. + """ + if 'avatar_id' in user: + return _AVATAR_ID_URL % (user['avatar_id'],) + else: + return _EMAIL_URL % (quote(user['email']),) + +def _run_api_call(action, data=None): + ''' + Runs a call against the SpiderOak API. + Returns a python object containing the response. + ''' + log = logging.getLogger('run_api_call') + uri = API_URL_BASE % (get_config()['api_root'], action, ) + + https_handler = https.VerifiedHTTPSHandler() + https_opener = urllib2.build_opener(https_handler) + urllib2.install_opener(https_opener) + + auth_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm() + auth_mgr.add_password(realm=None, uri=uri, + user=get_config()['api_user'], + passwd=get_config()['api_password']) + + log.debug("Trying with user %s and pass %s" % (get_config()['api_user'], + get_config()['api_password'],)) + + auth_handler = urllib2.HTTPBasicAuthHandler(auth_mgr) + auth_opener = urllib2.build_opener(auth_handler) + urllib2.install_opener(auth_opener) + + if data is None: + fh = urllib2.urlopen(uri) + else: + datastr = json.dumps(data) + fh = urllib2.urlopen(uri, datastr) + + json_string = fh.read() + retr_data = json.loads(json_string) + + return retr_data + + +def create_user(user, promo_code=None): + ''' + Uses the SpiderOak new user API to create a new user. + Returns: newly created user data information.. + Raises ManipulateUserFailed on failure. + ''' + + new_user_data = {"action": "create_user", + "auto_username_seq": True, + "firstname": user['firstname'], + "lastname": user['lastname'], + "email": user['email'], + "group_id": user['group_id'], + } + if promo_code is not None: + new_user_data["promo"] = promo_code + + try: + result = _run_api_call("users/", new_user_data) + except Exception as e: + import traceback + traceback.print_exc() + raise ManipulateUserFailed(str(e)) + + if result['success']: + return result + else: + raise ManipulateUserFailed("%s: result['reason']" % user['email']) + + +def set_user_group(user, promo_code=None): + ''' + Sets the group_id of a specified avatar. + Raises ManipulateUserFailed in error. + ''' + user_group_data = {"action" : "set_group", + "group_id": user['group_id'], + } + if promo_code is not None: + user_group_data['promo_code'] = promo_code + + try: + result = _run_api_call(_make_api_url_for_user(user), + user_group_data) + except Exception as e: + import traceback + traceback.print_exc() + raise ManipulateUserFailed(str(e)) + + if result['success']: + return result + else: + raise ManipulateUserFailed(result['reason']) + + +def activate_user(user): + ''' + Activates the given avatar. + Raises ManipulateUserFailed in error. + ''' + activate_data = {"action" : "set_enabled", + "enabled": True, + } + try: + result = _run_api_call(_make_api_url_for_user(user), + activate_data) + except Exception as e: + raise ManipulateUserFailed(str(e)) + + if result['success']: + return result + else: + raise ManipulateUserFailed(result['reason']) + + +def deactivate_user(user): + ''' + Deactivates the given avatar. + Raises ManipulateUserFailed in error. + ''' + deactivate_data = {"action" : "set_enabled", + "enabled": False, + } + try: + result = _run_api_call(_make_api_url_for_user(user), + deactivate_data) + except Exception as e: + raise ManipulateUserFailed(str(e)) + + if result['success']: + return result + else: + raise ManipulateUserFailed(result['reason']) + + +def purge_user(user): + ''' + Deactivates the given avatar. + Raises ManipulateUserFailed in error. + ''' + purge_data = {"action" : "purge_account", } + try: + result = _run_api_call(_make_api_url_for_user(user), + purge_data) + except Exception as e: + raise ManipulateUserFailed(str(e)) + + if result['success']: + return result + else: + raise ManipulateUserFailed(result['reason']) + + +def change_email(user): + ''' + Sets the email on the given avatar_id to the email. + Raises ManipulateUserFailed in error. + ''' + change_data = {"action" : "set_email", + "email" : user['email'], + } + try: + # This accounts for the fact that we might pass in two email + # addresses when looking up the user by email. + if 'old_email' in user: + api_url = _make_api_url_for_user({'email': user['old_email']}) + else: + api_url = _make_api_url_for_user(user) + result = _run_api_call(api_url, change_data) + except Exception as e: + raise ManipulateUserFailed(str(e)) + + if result['success']: + return result + else: + raise ManipulateUserFailed(result['reason']) + + +def fetch_users(): + ''' + Returns a list of the users currently registered with SpiderOak. + Raises FetchInformationError on problem. + ''' + try: + result = _run_api_call("users/") + except Exception as e: + raise FetchInformationFailed(str(e)) + + return result + + +def fetch_plans(promo_code=None): + ''' + Returns a list of the plans available to us. + Raises FetchInformationError on problem. + ''' + if promo_code is None: + action = "plans/" + else: + action = "plans/?promo=%s" % promo_code + + try: + result = _run_api_call(action) + except Exception as e: + raise FetchInformationFailed(str(e)) + + if len(result) < 1: + raise FetchInformationFailed(NO_PLANS) + + return result + diff --git a/netkes/account_mgr/cmdline_utils.py b/netkes/account_mgr/cmdline_utils.py new file mode 100644 index 0000000..0725ef3 --- /dev/null +++ b/netkes/account_mgr/cmdline_utils.py @@ -0,0 +1,182 @@ +""" +cmdline_utils.py + +Helper functions and classes for OpenManage command-line utilities. +""" +import csv +import json +import logging + +import api_interface +from account_mgr.local_source import set_user_password, set_multi_passwords + + +SETPW_REQUIRED_KEYS = frozenset(['email_addr', 'password']) +CREATE_REQUIRED_KEYS = frozenset(['email_addr', 'given_name', 'surname', 'group_id']) +SET_EMAIL_REQUIRED_KEYS = frozenset(['email_addr', 'new_email']) +SET_GROUP_REQUIRED_KEYS = frozenset(['email_addr', 'group_id']) + +class UsersActionError(Exception): + pass + +class CSVMissingKeys(Exception): + pass + +def assure_keys(dict_reader, required_keys): + """ + Reads from a csv.DictReader object and creates a list. Ensures + that required_keys are in every row from the DictReader. + + :param dict_reader: The opened csv.DictReader object. + :param required_keys: Set of keys required in every row in the CSV file. + :returns list: list of change dicts. + """ + rows = list() + for row in dict_reader: + keys = set(row.keys()) + if required_keys <= keys: + rows.append(row) + else: + raise CSVMissingKeys("Missing one or more of required keys: %s" % + (required_keys, )) + return rows + +def run_csv_file(db_conn, filename, optdict): + """Runs the appropriate actions from a CSV file. + + :param db_conn: DB connection object + :param filename: CSV filename + :param optdict: Options dictionary. + :returns int: number of successful user actions. + """ + + log = logging.getLogger("run_csv_file") + dict_reader = csv.DictReader(filename) + + if 'setpw' in optdict: + user_dicts = assure_keys(dict_reader, SETPW_REQUIRED_KEYS) + emails = (email for email in user_dicts['email_addr']) + pws = (pw for pw in user_dicts['password']) + set_multi_passwords(db_conn, emails, pws) + + # All done, so leave the function here. + return len(user_dicts) + + success_count = 0 + if 'create' in optdict: + # Runs the creation routine for each user. + user_dicts = assure_keys(dict_reader, CREATE_REQUIRED_KEYS) + for user in user_dicts: + result = api_interface.create_user( + {'firstname': user['given_name'], + 'lastname': user['surname'], + 'email': user['email_addr'],}) + + result['group_id'] = user['group_id'] + try: + result2 = api_interface.set_user_group(result) + except api_interface.ApiActionFailedError as e: + log.error("User created with no group assignment: %s" % + (user['email_addr'],)) + raise e + success_count += 1 + + elif 'set_email' in optdict: + # Sets emails for each user. + user_dicts = assure_keys(dict_reader, SET_EMAIL_REQUIRED_KEYS) + for user in user_dicts: + result = api_interface.change_email( + {'email': user['new_email'], + 'old_email': user['email_addr'],}) + success_count += 1 + elif 'set_group' in optdict: + # Sets groups for each user. + user_dicts = assure_keys(dict_reader, SET_GROUP_REQUIRED_KEYS) + for user in user_dicts: + result = api_interface.set_user_group( + {'email': user['email_addr'], + 'group_id': user['group_id'],}) + success_count += 1 + elif 'disable' in optdict: + user_dicts = assure_keys(dict_reader, frozenset(['email_addr'])) + for user in user_dicts: + result = api_interface.deactivate_user( + {'email': user['email_addr'],}) + success_count += 1 + elif 'enable' in optdict: + user_dicts = assure_keys(dict_reader, frozenset(['email_addr'])) + for user in user_dicts: + result = api_interface.activate_user( + {'email': user['email_addr'],}) + success_count += 1 + else: + raise UsersActionError("Got an action that's not accounted for!") + + return success_count + +def run_single_command(db_conn, email_address, optdict): + log = logging.getLogger("run_single_command") + + if optdict['setpw']: + set_user_password(db_conn, email_address, optdict['password']) + + elif optdict['create']: + result = api_interface.create_user( + {'firstname': optdict['given_name'], + 'lastname': optdict['surname'], + 'email': email_address, + 'group_id': optdict['group_id']}) + + elif optdict['set_email']: + result = api_interface.change_email( + {'email': optdict['new-email'], + 'old_email': email_address,}) + elif optdict['set_group']: + result = api_interface.set_user_group( + {'email': email_address, + 'group_id': optdict['group_id'],}) + elif optdict['disable']: + result = api_interface.deactivate_user( + {'email': email_address,}) + elif optdict['enable']: + result = api_interface.activate_user( + {'email': email_address,}) + else: + raise UsersActionError("Got an action that's not accounted for!") + +def get_user_list(): + """Fetches the list of users from SpiderOak, returns it as JSON.""" + return api_interface.fetch_users() + +def csvify_userlist(csvfile, users): + """Takes a JSON-ified list of users, and returns it as a CSV file.""" + user_list = json.loads(users) + dict_writer = csv.DictWriter(csvfile, + ['email', 'firstname', 'lastname', + 'group_id', 'share_id', 'bytes_stored', + 'enabled',], + extrasaction='ignore') + dict_writer.writeheader() + dict_writer.writerows(user_list) + + return None + + +def run_command(db_conn, optdict): + """Matches the options in optdict to a specific action we need to do. + + :param optdict: options dictionary + + """ + + if 'csv_file' in optdict: + run_csv_file(db_conn, optdict.pop('csv_file'), optdict) + elif 'email_addr' in optdict: + run_single_command(db_conn, optdict.pop('email_addr'), optdict) + elif 'users_csv' in optdict or 'users_json' in optdict: + users = get_user_list() + if 'users_csv' in optdict: + return csvify_userlist(optdict['users_csv'], users) + + return users + diff --git a/netkes/account_mgr/fix_url_truncation_bug.patch b/netkes/account_mgr/fix_url_truncation_bug.patch new file mode 100644 index 0000000..bd27e9c --- /dev/null +++ b/netkes/account_mgr/fix_url_truncation_bug.patch @@ -0,0 +1,148 @@ +From 25bcaa4f2cc6cbdae4a02b581e65589938377397 Mon Sep 17 00:00:00 2001 +From: Ben Zimmerman +Date: Wed, 4 Apr 2012 13:44:48 -0500 +Subject: [PATCH] Fixed url truncation bug + +--- + lib/account_mgr/accounts_api.py | 29 +++++++++++++++-------------- + lib/account_mgr/api_client.py | 2 +- + 2 files changed, 16 insertions(+), 15 deletions(-) + create mode 100644 lib/__init__.py + +diff --git a/lib/__init__.py b/lib/__init__.py +new file mode 100644 +index 0000000..e69de29 +diff --git a/lib/account_mgr/accounts_api.py b/lib/account_mgr/accounts_api.py +index c1d9bca..b3880d2 100644 +--- a/lib/account_mgr/accounts_api.py ++++ b/lib/account_mgr/accounts_api.py +@@ -1,7 +1,7 @@ + import json + import urllib2 + +-from .api_client import ApiClient ++from api_client import ApiClient + + + class Api(object): +@@ -41,16 +41,16 @@ class Api(object): + ### Features + + def enterprise_features(self): +- return self.client.get_json('/partner/features') ++ return self.client.get_json('partner/features') + + ### Settings + + def enterprise_settings(self): +- return self.client.get_json('/partner/settings') ++ return self.client.get_json('partner/settings') + + def update_enterprise_settings(self, settings): + try: +- return self.client.post_json('/partner/settings', settings) ++ return self.client.post_json('partner/settings', settings) + except urllib2.HTTPError, err: + if err.code == 400: + raise self.BadParams() +@@ -59,12 +59,12 @@ class Api(object): + ### Groups + + def list_groups(self): +- return self.client.get_json('/groups/') ++ return self.client.get_json('groups/') + + def create_group(self, group_info): + try: + resp = self.client.post_json_raw_response( +- '/groups/', group_info) ++ 'groups/', group_info) + except urllib2.HTTPError, err: + if err.code == 400: + raise self.BadParams() +@@ -79,7 +79,7 @@ class Api(object): + + def get_group(self, group_id): + try: +- return self.client.get_json('/groups/%d' % (group_id,)) ++ return self.client.get_json('groups/%d' % (group_id,)) + except urllib2.HTTPError, err: + if err.code == 404: + raise self.NotFound() +@@ -87,7 +87,7 @@ class Api(object): + + def edit_group(self, group_id, group_info): + try: +- self.client.post_json('/groups/%d' % (group_id,), group_info) ++ self.client.post_json('groups/%d' % (group_id,), group_info) + except urllib2.HTTPError, err: + if err.code == 404: + raise self.NotFound() +@@ -105,7 +105,7 @@ class Api(object): + + def delete_group(self, group_id): + try: +- self.client.delete('/groups/%d' % (group_id,)) ++ self.client.delete('groups/%d' % (group_id,)) + except urllib2.HTTPError, err: + if err.code == 404: + raise self.NotFound() +@@ -114,11 +114,11 @@ class Api(object): + ### Users + + def list_users(self): +- return self.client.get_json('/users/') ++ return self.client.get_json('users/') + + def create_user(self, user_info): + try: +- return self.client.post_json('/users/', user_info) ++ return self.client.post_json('users/', user_info) + except urllib2.HTTPError, err: + if err.code == 400: + raise self.BadParams() +@@ -137,7 +137,7 @@ class Api(object): + def get_user(self, username_or_email): + try: + return self.client.get_json( +- '/users/%s' % (username_or_email,)) ++ 'users/%s' % (username_or_email,)) + except urllib2.HTTPError, err: + if err.code == 404: + raise self.NotFound() +@@ -146,7 +146,7 @@ class Api(object): + def edit_user(self, username_or_email, user_info): + try: + self.client.post_json( +- '/users/%s' % (username_or_email,), user_info) ++ 'users/%s' % (username_or_email,), user_info) + except urllib2.HTTPError, err: + if err.code == 404: + raise self.NotFound() +@@ -166,8 +166,9 @@ class Api(object): + + def delete_user(self, username_or_email): + try: +- self.client.delete('/users/%s' % (username_or_email,)) ++ self.client.delete('users/%s' % (username_or_email,)) + except urllib2.HTTPError, err: + if err.code == 404: + raise self.NotFound() + raise ++ +diff --git a/lib/account_mgr/api_client.py b/lib/account_mgr/api_client.py +index 84b1551..4807dfb 100644 +--- a/lib/account_mgr/api_client.py ++++ b/lib/account_mgr/api_client.py +@@ -2,7 +2,7 @@ import json + import urllib2 + from urlparse import urljoin + +-from Pandora.https import VerifiedHTTPSHandler ++from lib.Pandora.https import VerifiedHTTPSHandler + + + _DEFAULT_HANDLERS = [ +-- +1.7.4.2 + diff --git a/netkes/account_mgr/setup_token.py b/netkes/account_mgr/setup_token.py new file mode 100644 index 0000000..57cc435 --- /dev/null +++ b/netkes/account_mgr/setup_token.py @@ -0,0 +1,55 @@ +''' +setup_token.py + +(c) 2012 SpiderOak, Inc. + +Manages setup authentication tokens. +''' + +import base64 +import datetime +import os +import sys + +TOKEN_LENGTH = 30 # Tokens are 30-characters long. Note that we apply + # base64 encoding, so increasing this value may + # introduce padding characters. + +def new_token(): + return base64.urlsafe_b64encode(os.urandom(TOKEN_LENGTH)) + +def create_token(db_conn, expiry=None, no_devices_only=True, single_use_only=True): + """ + Creates an administrative setup token with the given options. + + :param db_conn: Open database connection. + :param expiry: Datetime object of token's expiry, or None for now. + :param no_devices_only: Restricts the token to use with accounts with no devices created. + :param single_use_only: Restricts the token for single uses with a given user. + + :return: The 30-character string token. + """ + + token = new_token() + + create_token_query_base = "INSERT INTO admin_setup_tokens (token, no_devices_only, single_use_only" + + if expiry is None: + create_token_query = create_token_query_base + ") VALUES (%s, %s, %s)" + query_args = (token, no_devices_only, single_use_only, ) + else: + create_token_query = create_token_query_base + ", expiry) VALUES (%s, %s, %s, %s)" + query_args = (token, no_devices_only, single_use_only, expiry, ) + + cur = db_conn.cursor() + + try: + cur.execute(create_token_query, query_args) + except Exception, e: + db_conn.rollback() + raise e + else: + db_conn.commit() + + return token + diff --git a/netkes/account_mgr/test.py b/netkes/account_mgr/test.py new file mode 100644 index 0000000..624d0e3 --- /dev/null +++ b/netkes/account_mgr/test.py @@ -0,0 +1,18 @@ +from accounts_api import Api +api = Api.create( + 'https://dhain.dev.spideroak.com:888/apis/accounts/v1/', + 'ben12', + 'bbb', +) +from accounts_api import Api +api = Api.create( + 'https://bdzim.dev.spideroak.com:888/apis/accounts/v1/', + 'ben12', + 'bbb', +) +from accounts_api import Api +api = Api.create( + 'https://bdzim.dev.spideroak.com/apis/accounts/v1/', + 'remote_test_admin', + 'w0mbat', +) diff --git a/netkes/account_mgr/test/__init__.py b/netkes/account_mgr/test/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/netkes/account_mgr/test/test_account_mgr.py b/netkes/account_mgr/test/test_account_mgr.py new file mode 100644 index 0000000..bf4c781 --- /dev/null +++ b/netkes/account_mgr/test/test_account_mgr.py @@ -0,0 +1,68 @@ +import unittest +from mock import Mock, MagicMock, sentinel, patch + +import account_mgr + +class TestAdminTokenAuth(unittest.TestCase): + def setUp(self): + account_mgr.get_cursor = MagicMock() + cur = MagicMock() + account_mgr.get_cursor.return_value = cur + self.cur = cur.__enter__() + account_mgr.get_api = MagicMock() + self.api = MagicMock() + account_mgr.get_api.return_value = self.api + + def test_no_restrictions(self): + self.cur.rowcount = 1 + self.cur.fetchone.return_value = (False, False, False) + self.api.list_devices.return_value = [] + self.assertTrue( + account_mgr.admin_token_auth({}, + sentinel.username, + sentinel.password) + ) + + def test_bad_credentials(self): + self.cur.rowcount = 0 + self.assertFalse( + account_mgr.admin_token_auth({}, + sentinel.username, + sentinel.password) + ) + + def test_no_devices_only(self): + self.cur.rowcount = 1 + self.cur.fetchone.return_value = (True, False, False) + self.api.list_devices.return_value = [1] + self.assertFalse( + account_mgr.admin_token_auth({}, + sentinel.username, + sentinel.password) + ) + self.cur.fetchone.return_value = (True, False, False) + self.api.list_devices.return_value = [] + self.assertTrue( + account_mgr.admin_token_auth({}, + sentinel.username, + sentinel.password) + ) + + def test_single_use_only(self): + self.cur.rowcount = 1 + self.cur.fetchone.return_value = (False, True, True) + self.api.list_devices.return_value = [] + self.assertFalse( + account_mgr.admin_token_auth({}, + sentinel.username, + sentinel.password) + ) + self.cur.fetchone.return_value = (False, True, False) + self.api.list_devices.return_value = [] + self.assertTrue( + account_mgr.admin_token_auth({}, + sentinel.username, + sentinel.password) + ) +if __name__ == '__main__': + unittest.main() diff --git a/netkes/account_mgr/test/test_accounts_api.py b/netkes/account_mgr/test/test_accounts_api.py new file mode 100644 index 0000000..542858c --- /dev/null +++ b/netkes/account_mgr/test/test_accounts_api.py @@ -0,0 +1,345 @@ +import json +import unittest +from mock import patch, sentinel, Mock + +from account_mgr import accounts_api + + +class FakeHttpError(accounts_api.urllib2.HTTPError): + def __init__(self, code, body=''): + self.code = code + self.body = body + + def read(self): + return self.body + + +class TestAccountsApi(unittest.TestCase): + def setUp(self): + self.client = Mock() + self.api = accounts_api.Api(self.client) + + @patch.object(accounts_api, 'ApiClient') + def test_create(self, ApiClient): + api = accounts_api.Api.create( + sentinel.base, sentinel.username, sentinel.password) + self.assertIs(api.client, ApiClient.return_value) + ApiClient.assert_called_once_with( + sentinel.base, sentinel.username, sentinel.password) + + def test_ping(self): + self.assertIs( + self.api.ping(), + self.client.get_json.return_value + ) + self.client.get_json.assert_called_once_with('ping') + + ### Plans + + def test_list_plans(self): + self.assertIs( + self.api.list_plans(), + self.client.get_json.return_value + ) + self.client.get_json.assert_called_once_with('plans') + + ### Quota + + def test_quota(self): + self.assertIs( + self.api.quota(), + self.client.get_json.return_value + ) + self.client.get_json.assert_called_once_with('partner/quota') + + ### Features + + def test_enterprise_features(self): + self.assertIs( + self.api.enterprise_features(), + self.client.get_json.return_value + ) + self.client.get_json.assert_called_once_with('partner/features') + + ### Settings + + def test_enterprise_settings(self): + self.assertIs( + self.api.enterprise_settings(), + self.client.get_json.return_value + ) + self.client.get_json.assert_called_once_with('partner/settings') + + def test_update_enterprise_settings(self): + self.assertIs( + self.api.update_enterprise_settings(sentinel.settings), + self.client.post_json.return_value + ) + self.client.post_json.assert_called_once_with( + 'partner/settings', sentinel.settings) + + def test_update_enterprise_settings_bad_params(self): + self.client.post_json.side_effect = FakeHttpError(400) + with self.assertRaises(self.api.BadParams): + self.api.update_enterprise_settings(sentinel.settings) + + ### Groups + + def test_list_groups(self): + self.assertIs( + self.api.list_groups(), + self.client.get_json.return_value + ) + self.client.get_json.assert_called_once_with('groups/') + + def test_create_group(self): + response = self.client.post_json_raw_response.return_value + response.info.return_value = {'location': 'groups/42'} + self.assertEqual(self.api.create_group(sentinel.info), 42) + self.client.post_json_raw_response.assert_called_once_with( + 'groups/', sentinel.info) + + def test_create_group_bad_params(self): + self.client.post_json_raw_response.side_effect = \ + FakeHttpError(400) + with self.assertRaises(self.api.BadParams): + self.api.create_group(sentinel.info) + + def test_create_group_duplicate_name(self): + response = FakeHttpError(409, json.dumps({ + 'reason': 'The following fields conflict ' + 'with an existing record', + 'conflicts': ['name'] + })) + self.client.post_json_raw_response.side_effect = response + with self.assertRaises(self.api.DuplicateGroupName): + self.api.create_group(sentinel.info) + + def test_create_group_invalid_plan(self): + response = FakeHttpError(409, json.dumps({ + 'reason': 'Invalid values for the following fields', + 'conflicts': ['plan_id'] + })) + self.client.post_json_raw_response.side_effect = response + with self.assertRaises(self.api.BadPlan): + self.api.create_group(sentinel.info) + + def test_get_group(self): + self.assertIs( + self.api.get_group(42), + self.client.get_json.return_value + ) + self.client.get_json.assert_called_once_with('groups/42') + + def test_get_group_not_found(self): + self.client.get_json.side_effect = FakeHttpError(404) + with self.assertRaises(self.api.NotFound): + self.api.get_group(42) + + def test_edit_group(self): + self.api.edit_group(42, sentinel.info) + self.client.post_json.assert_called_once_with( + 'groups/42', sentinel.info) + + def test_edit_group_bad_params(self): + self.client.post_json.side_effect = FakeHttpError(400) + with self.assertRaises(self.api.BadParams): + self.api.edit_group(42, sentinel.info) + + def test_edit_group_not_found(self): + self.client.post_json.side_effect = FakeHttpError(404) + with self.assertRaises(self.api.NotFound): + self.api.edit_group(42, sentinel.info) + + def test_edit_group_duplicate_name(self): + response = FakeHttpError(409, json.dumps({ + 'reason': 'The following fields conflict ' + 'with an existing record', + 'conflicts': ['name'] + })) + self.client.post_json.side_effect = response + with self.assertRaises(self.api.DuplicateGroupName): + self.api.edit_group(42, sentinel.info) + + def test_edit_group_invalid_plan(self): + response = FakeHttpError(409, json.dumps({ + 'reason': 'Invalid values for the following fields', + 'conflicts': ['plan_id'] + })) + self.client.post_json.side_effect = response + with self.assertRaises(self.api.BadPlan): + self.api.edit_group(42, sentinel.info) + + def test_edit_group_quota_exceeded(self): + self.client.post_json.side_effect = FakeHttpError(402) + with self.assertRaises(self.api.QuotaExceeded): + self.api.edit_group(42, sentinel.info) + + def test_delete_group(self): + self.api.delete_group(42) + self.client.delete.assert_called_once_with('groups/42') + + def test_delete_group_not_found(self): + self.client.delete.side_effect = FakeHttpError(404) + with self.assertRaises(self.api.NotFound): + self.api.delete_group(42) + + ### Users + + def test_list_users(self): + self.assertIs( + self.api.list_users(), + self.client.get_json.return_value + ) + self.client.get_json.assert_called_once_with('users/') + + def test_create_user(self): + self.api.create_user(sentinel.info) + self.client.post_json.assert_called_once_with( + 'users/', sentinel.info) + + def test_create_user_bad_params(self): + self.client.post_json.side_effect = FakeHttpError(400) + with self.assertRaises(self.api.BadParams): + self.api.create_user(sentinel.info) + + def test_create_user_duplicate_username(self): + response = FakeHttpError(409, json.dumps({ + 'reason': 'The following fields conflict ' + 'with an existing record', + 'conflicts': ['username'] + })) + self.client.post_json.side_effect = response + with self.assertRaises(self.api.DuplicateUsername): + self.api.create_user(sentinel.info) + + def test_create_user_duplicate_email(self): + response = FakeHttpError(409, json.dumps({ + 'reason': 'The following fields conflict ' + 'with an existing record', + 'conflicts': ['email'] + })) + self.client.post_json.side_effect = response + with self.assertRaises(self.api.DuplicateEmail): + self.api.create_user(sentinel.info) + + def test_create_user_invalid_group(self): + response = FakeHttpError(409, json.dumps({ + 'reason': 'Invalid values for the following fields', + 'conflicts': ['group_id'] + })) + self.client.post_json.side_effect = response + with self.assertRaises(self.api.BadGroup): + self.api.create_user(sentinel.info) + + def test_create_user_invalid_plan(self): + response = FakeHttpError(409, json.dumps({ + 'reason': 'Invalid values for the following fields', + 'conflicts': ['plan_id'] + })) + self.client.post_json.side_effect = response + with self.assertRaises(self.api.BadPlan): + self.api.create_user(sentinel.info) + + def test_get_user(self): + self.assertIs( + self.api.get_user('username'), + self.client.get_json.return_value + ) + self.client.get_json.assert_called_once_with('users/username') + + def test_get_user_not_found(self): + self.client.get_json.side_effect = FakeHttpError(404) + with self.assertRaises(self.api.NotFound): + self.api.get_user('username') + + def test_list_devices(self): + self.assertIs( + self.api.list_devices('username'), + self.client.get_json.return_value + ) + self.client.get_json.assert_called_once_with( + 'users/username/devices') + + def test_list_devices_user_not_found(self): + self.client.get_json.side_effect = FakeHttpError(404) + with self.assertRaises(self.api.NotFound): + self.api.list_devices('username') + + def test_edit_user(self): + self.api.edit_user('username', sentinel.info) + self.client.post_json.assert_called_once_with( + 'users/username', sentinel.info) + + def test_edit_user_bad_params(self): + self.client.post_json.side_effect = FakeHttpError(400) + with self.assertRaises(self.api.BadParams): + self.api.edit_user('username', sentinel.info) + + def test_edit_user_not_found(self): + self.client.post_json.side_effect = FakeHttpError(404) + with self.assertRaises(self.api.NotFound): + self.api.edit_user('username', sentinel.info) + + def test_edit_user_duplicate_email(self): + response = FakeHttpError(409, json.dumps({ + 'reason': 'The following fields conflict ' + 'with an existing record', + 'conflicts': ['email'] + })) + self.client.post_json.side_effect = response + with self.assertRaises(self.api.DuplicateEmail): + self.api.edit_user('username', sentinel.info) + + def test_edit_user_invalid_group(self): + response = FakeHttpError(409, json.dumps({ + 'reason': 'Invalid values for the following fields', + 'conflicts': ['group_id'] + })) + self.client.post_json.side_effect = response + with self.assertRaises(self.api.BadGroup): + self.api.edit_user('username', sentinel.info) + + def test_edit_user_invalid_plan(self): + response = FakeHttpError(409, json.dumps({ + 'reason': 'Invalid values for the following fields', + 'conflicts': ['plan_id'] + })) + self.client.post_json.side_effect = response + with self.assertRaises(self.api.BadPlan): + self.api.edit_user('username', sentinel.info) + + def test_edit_user_quota_exceeded(self): + self.client.post_json.side_effect = FakeHttpError(402) + with self.assertRaises(self.api.QuotaExceeded): + self.api.edit_user('username', sentinel.info) + + def test_delete_user(self): + self.api.delete_user('username') + self.client.delete.assert_called_once_with('users/username') + + def test_delete_user_not_found(self): + self.client.delete.side_effect = FakeHttpError(404) + with self.assertRaises(self.api.NotFound): + self.api.delete_user('username') + + def test_send_activation_email(self): + self.api.send_activation_email('username') + self.client.post.assert_called_once_with( + 'users/username?action=sendactivationemail', + '' + ) + + def test_send_activation_email_not_found(self): + self.client.post.side_effect = FakeHttpError(404) + with self.assertRaises(self.api.NotFound): + self.api.send_activation_email('username') + + def test_send_activation_email_not_sent(self): + self.client.post.side_effect = FakeHttpError(409) + with self.assertRaises(self.api.EmailNotSent): + self.api.send_activation_email('username') + + +if __name__ == '__main__': + unittest.main() diff --git a/netkes/account_mgr/test/test_api_client.py b/netkes/account_mgr/test/test_api_client.py new file mode 100644 index 0000000..f5d25c8 --- /dev/null +++ b/netkes/account_mgr/test/test_api_client.py @@ -0,0 +1,60 @@ +import json +import unittest +from mock import sentinel, patch + +from account_mgr import api_client + + +class TestApiClient(unittest.TestCase): + @patch.object(api_client, 'VerifiedHTTPSHandler') + def setUp(self, httpshandler): + self.httpshandler = httpshandler.return_value + self.response = self.httpshandler.https_open.return_value + self.client = api_client.ApiClient( + 'https://example.com', + sentinel.api_username, + sentinel.api_password + ) + + def test_verifies_ssl_certificate(self): + self.client.open('/') + self.assertEqual(self.httpshandler.https_open.call_count, 1) + + @patch.object(api_client, 'RequestWithMethod') + def test_logs_in_using_provided_credentials(self, req): + self.client.open('/') + req.assert_called_once_with( + self.client.base + '/', + None, + {'authorization': ( + 'Basic PFNlbnRpbmVsT2JqZWN0ICJhcGlfdXNlcm5hbWUiPjo8' + 'U2VudGluZWxPYmplY3QgImFwaV9wYXNzd29yZCI+' + )} + ) + + def test_get_json(self): + data = {'foo': 'bar'} + self.response.read.return_value = json.dumps(data) + self.assertEqual(self.client.get_json('/'), data) + + def test_post_json(self): + postdata = {'foo': 'bar'} + respdata = {'baz': 'qux'} + self.response.read.return_value = json.dumps(respdata) + self.assertEqual(self.client.post_json('/', postdata), respdata) + ((req,), _) = self.httpshandler.https_open.call_args + self.assertEqual(json.loads(req.data), postdata) + + def test_delete(self): + self.client.delete('/') + ((req,), _) = self.httpshandler.https_open.call_args + self.assertEqual(req.get_method(), 'DELETE') + + def test_raises_HTTPError_on_error_responses(self): + self.response.code = 409 + with self.assertRaises(api_client.urllib2.HTTPError): + self.client.open('/') + + +if __name__ == '__main__': + unittest.main() diff --git a/netkes/account_mgr/test/test_api_interface.py b/netkes/account_mgr/test/test_api_interface.py new file mode 100644 index 0000000..39cb85c --- /dev/null +++ b/netkes/account_mgr/test/test_api_interface.py @@ -0,0 +1,365 @@ +import unittest +from mock import Mock, MagicMock, sentinel, patch + +import json + +from directory_agent import api_interface + +class MockException(Exception): + pass + +class TestRunApiCall(unittest.TestCase): + def setUp(self): + self.url_patcher = patch("urllib.urlopen") + self.urlopen = self.url_patcher.start() + + urlfile = MagicMock(spec=file) + self.test_return_data = { "testkey1": "testvalue1", + "testkey2": 2 + } + urlfile.read.return_value = json.dumps(self.test_return_data) + self.urlopen.return_value = urlfile + + def tearDown(self): + self.url_patcher.stop() + + def test_successful_read_nodata(self): + self.assertEqual(api_interface._run_api_call(str(sentinel.api_root), + str(sentinel.action)), + self.test_return_data) + + args, _ = self.urlopen.call_args + assert len(args) == 1 + + def test_successful_read_withdata(self): + test_send_data = {"testsend1": "testvalues1", + "testsend2": "testvalues2", + } + self.assertEqual(api_interface._run_api_call(str(sentinel.api_root), + str(sentinel.action), + test_send_data), + self.test_return_data) + args, _ = self.urlopen.call_args + assert len(args) == 2 + self.assertEqual(args[1], json.dumps(test_send_data)) + + def test_blows_up_with_bad_json_returned(self): + urlfile = MagicMock(spec=file) + urlfile.read.return_value = "DEADBEEF" + self.urlopen.return_value = urlfile + + with self.assertRaises(ValueError): + api_interface._run_api_call(str(sentinel.api_root), + str(sentinel.action)) + + def test_blows_up_with_bad_data_given(self): + with self.assertRaises(TypeError): + api_interface._run_api_call(str(sentinel.api_root), + str(sentinel.action), + sentinel.bad_obj) + + def test_gets_url_read_exceptions(self): + def side_effect(): + raise Exception("DEADBEEF") + + urlfile = MagicMock(spec=file) + urlfile.read.side_effect = side_effect + self.urlopen.return_value = urlfile + + with self.assertRaises(Exception) as cm: + api_interface._run_api_call(str(sentinel.api_root), + str(sentinel.action)) + + the_exception = cm.exception + self.assertEqual(str(the_exception), "DEADBEEF") + + def test_gets_url_open_exceptions(self): + self.urlopen.side_effect = Exception("DEADBEEF") + + with self.assertRaises(Exception) as cm: + api_interface._run_api_call(str(sentinel.api_root), + str(sentinel.action)) + + the_exception = cm.exception + self.assertEqual(str(the_exception), "DEADBEEF") + + +class TestDeactivateUsers(unittest.TestCase): + def setUp(self): + self.run_api_patcher = patch("directory_agent.api_interface._run_api_call") + self.run_api_call = self.run_api_patcher.start() + self.test_user = {'avatar_id': 1001} + + def tearDown(self): + self.run_api_patcher.stop() + + def test_deactivate_succeeds(self): + ret_val = {"success" : True} + self.run_api_call.return_value = ret_val + + self.assertEqual(api_interface.deactivate_user(sentinel.api_root, + self.test_user), + ret_val) + + + def test_deactivate_fails(self): + ret_val = {"success" : False, + "reason" : "DEADBEEF", + } + self.run_api_call.return_value = ret_val + + with self.assertRaises(api_interface.ManipulateUserFailed) as cm: + retval = api_interface.deactivate_user(sentinel.api_root, + self.test_user) + + the_exception = cm.exception + self.assertEqual(str(the_exception), "DEADBEEF") + + + def test_deactivate_connection_problem(self): + self.run_api_call.side_effect = MockException("DEADBEEF") + + with self.assertRaises(api_interface.ManipulateUserFailed) as cm: + retval = api_interface.deactivate_user(sentinel.api_root, + self.test_user) + + the_exception = cm.exception + self.assertEqual(str(the_exception), "DEADBEEF") + + +class TestPurgeUser(unittest.TestCase): + def setUp(self): + self.run_api_patcher = patch("directory_agent.api_interface._run_api_call") + self.run_api_call = self.run_api_patcher.start() + self.test_user = {'avatar_id': 1001} + + def tearDown(self): + self.run_api_patcher.stop() + + def test_purge_succeeds(self): + ret_val = {"success" : True} + self.run_api_call.return_value = ret_val + + self.assertEqual(api_interface.purge_user(sentinel.api_root, + self.test_user), + ret_val) + + + def test_purge_fails(self): + ret_val = {"success" : False, + "reason" : "DEADBEEF", + } + self.run_api_call.return_value = ret_val + + with self.assertRaises(api_interface.ManipulateUserFailed) as cm: + retval = api_interface.purge_user(sentinel.api_root, + self.test_user) + + the_exception = cm.exception + self.assertEqual(str(the_exception), "DEADBEEF") + + + def test_purge_connection_problem(self): + self.run_api_call.side_effect = MockException("DEADBEEF") + + with self.assertRaises(api_interface.ManipulateUserFailed) as cm: + retval = api_interface.purge_user(sentinel.api_root, + self.test_user) + + the_exception = cm.exception + self.assertEqual(str(the_exception), "DEADBEEF") + +class TestFetchUsers(unittest.TestCase): + def setUp(self): + self.run_api_patcher = patch("directory_agent.api_interface._run_api_call") + self.run_api_call = self.run_api_patcher.start() + + def tearDown(self): + self.run_api_patcher.stop() + + def test_fetch_list_empty_succeeds(self): + ret_val = [] + + self.run_api_call.return_value = ret_val + + result = api_interface.fetch_users(Mock()) + self.assertEqual(result, ret_val) + + def test_fetch_list_succeeds(self): + ret_val = [{'avatar_id': sentinel.avatar_id1, + 'username' : sentinel.username1, + }, + {'avatar_id': sentinel.avatar_id2, + 'username' : sentinel.username2 + }] + + self.run_api_call.return_value = ret_val + + result = api_interface.fetch_users(Mock()) + self.assertEqual(result, ret_val) + + def test_fetch_list_fails(self): + self.run_api_call.side_effect = Exception("DEADBEEF") + + with self.assertRaises(api_interface.FetchInformationFailed) as cm: + api_interface.fetch_users(Mock()) + + the_exception = cm.exception + self.assertEqual(str(the_exception), "DEADBEEF") + +class TestFetchPlans(unittest.TestCase): + def setUp(self): + self.run_api_patcher = patch("directory_agent.api_interface._run_api_call") + self.run_api_call = self.run_api_patcher.start() + + def tearDown(self): + self.run_api_patcher.stop() + + def test_fetch_plans_empty_fails(self): + ret_val = [] + + self.run_api_call.return_value = ret_val + + with self.assertRaises(api_interface.FetchInformationFailed) as cm: + result = api_interface.fetch_plans(Mock()) + + the_exception = cm.exception + self.assertEqual(str(the_exception), api_interface.NO_PLANS) + + def test_fetch_list_succeeds(self): + ret_val = [{'group_id': sentinel.group_id1, + 'storage_gigs' : sentinel.storage_gigs1, + }, + {'group_id': sentinel.group_id2, + 'storage_gigs' : sentinel.storage_gigs2 + }] + + self.run_api_call.return_value = ret_val + + result = api_interface.fetch_plans(Mock()) + self.assertEqual(result, ret_val) + + def test_fetch_list_fails(self): + self.run_api_call.side_effect = Exception("DEADBEEF") + + with self.assertRaises(api_interface.FetchInformationFailed) as cm: + api_interface.fetch_plans(Mock()) + + the_exception = cm.exception + self.assertEqual(str(the_exception), "DEADBEEF") + +class TestSetUserPlan(unittest.TestCase): + def setUp(self): + self.run_api_patcher = patch("directory_agent.api_interface._run_api_call") + self.run_api_call = self.run_api_patcher.start() + self.test_user = {'avatar_id': 1001, + 'group_id' : sentinel.group_id} + + def tearDown(self): + self.run_api_patcher.stop() + + def test_planset_success_nopromo(self): + ret_val = {'success': True} + self.run_api_call.return_value = ret_val + + self.assertEqual(api_interface.set_user_plan(sentinel.api_root, + self.test_user), + ret_val) + + args, _ = self.run_api_call.call_args + assert len(args) == 3 + self.assertIs(args[0], sentinel.api_root) + self.assertIs(args[2]['group_id'], sentinel.group_id) + + def test_planset_success_promo(self): + ret_val = {'success': True} + self.run_api_call.return_value = ret_val + + self.assertEqual(api_interface.set_user_plan(sentinel.api_root, + self.test_user, + sentinel.promo_code), + ret_val) + + args, _ = self.run_api_call.call_args + assert len(args) == 3 + self.assertIs(args[2]['promo_code'], sentinel.promo_code) + + def test_planset_failure(self): + ret_val = {'success' : False, + 'reason' : "DEADBEEF", + } + self.run_api_call.return_value = ret_val + + with self.assertRaises(api_interface.ManipulateUserFailed) as cm: + res = api_interface.set_user_plan(sentinel.api_root, + self.test_user) + + the_exception = cm.exception + self.assertEqual(str(the_exception), "DEADBEEF") + + def test_planset_exception(self): + self.run_api_call.side_effect = MockException("DEADBEEF") + + with self.assertRaises(api_interface.ManipulateUserFailed) as cm: + res = api_interface.set_user_plan(sentinel.api_root, + self.test_user) + + the_exception = cm.exception + self.assertEqual(str(the_exception), "DEADBEEF") + +class TestCreateUser(unittest.TestCase): + def setUp(self): + self.run_api_patcher = patch("directory_agent.api_interface._run_api_call") + self.run_api_call = self.run_api_patcher.start() + + def tearDown(self): + self.run_api_patcher.stop() + + def test_create_succeeds(self): + self.run_api_call.return_value = {'success': True, + 'server_generated_username': sentinel.testuser} + + testuser = {'email': sentinel.email, + 'firstname': sentinel.givenName, + 'lastname': sentinel.surname, + } + result = api_interface.create_user(sentinel.api_root,testuser) + + self.assertEqual(result['server_generated_username'], sentinel.testuser) + + args, _ = self.run_api_call.call_args + + self.assertIs(sentinel.api_root, args[0]) + assert sentinel.email in args[2].values() + assert sentinel.givenName in args[2].values() + assert sentinel.surname in args[2].values() + + def test_create_run_api_call_exception(self): + self.run_api_call.side_effect = Exception("DEADBEEF") + + testuser = {'email': sentinel.email, + 'firstname': sentinel.givenName, + 'lastname': sentinel.surname, + } + + with self.assertRaises(api_interface.ManipulateUserFailed) as cm: + api_interface.create_user(sentinel.api_root,testuser) + + the_exception = cm.exception + self.assertEqual(str(the_exception), "DEADBEEF") + + def test_create_user_add_failed(self): + self.run_api_call.return_value = {'success' : False, + 'reason' : "Mocked it up to fail, duh!", + } + + testuser = {'email': sentinel.email, + 'firstname': sentinel.givenName, + 'lastname': sentinel.surname, + } + with self.assertRaises(api_interface.ManipulateUserFailed): + api_interface.create_user(sentinel.api_root, + testuser) + +if __name__ == "__main__": + unittest.main() diff --git a/netkes/account_mgr/test/test_group_manager.py b/netkes/account_mgr/test/test_group_manager.py new file mode 100644 index 0000000..abfc35c --- /dev/null +++ b/netkes/account_mgr/test/test_group_manager.py @@ -0,0 +1,336 @@ +import unittest +from mock import Mock, mocksignature, sentinel, patch + +import copy + +from directory_agent import group_manager, api_interface + +class TestApiCreateUsers(unittest.TestCase): + def setUp(self): + self.api_iface_create_patcher = patch("directory_agent.api_interface.create_user") + self.api_iface_create = self.api_iface_create_patcher.start() + + self.api_iface_plan_patcher = patch("directory_agent.api_interface.set_user_plan") + self.api_iface_setplan = self.api_iface_plan_patcher.start() + + def tearDown(self): + self.api_iface_create_patcher.stop() + self.api_iface_plan_patcher.stop() + + def test_create_no_users(self): + config = Mock() + users = [] + + self.assertEqual(group_manager._api_create_users(config, users), + []) + + def test_create_one_user(self): + config = Mock() + users = [{'uniqueid' : sentinel.uniqueid, + 'email' : sentinel.email, + 'firstname': sentinel.firstname, + 'lastname' : sentinel.lastname, + 'plan_id' : sentinel.plan_id, + }] + + server_ok = {'success' : True, + 'avatar_id' : sentinel.avatar_id, + 'account_id': sentinel.account_id, + 'server_assigned_password': sentinel.server_assigned_password, + } + self.api_iface_create.return_value = server_ok + + results = group_manager._api_create_users(config, users) + + self.assertEqual(len(results), 1) + self.assertIs(results[0]['uniqueid'], sentinel.uniqueid) + self.assertIs(results[0]['avatar_id'], sentinel.avatar_id) + + def test_create_many_users(self): + config = Mock() + + users = [{'uniqueid' : sentinel.uniqueid1, + 'email' : sentinel.email1, + 'firstname': sentinel.firstname1, + 'lastname' : sentinel.lastname1, + 'plan_id' : sentinel.plan_id1, + }, + {'uniqueid' : sentinel.uniqueid2, + 'email' : sentinel.email2, + 'firstname': sentinel.firstname2, + 'lastname' : sentinel.lastname2, + 'plan_id' : sentinel.plan_id2, + }, + {'uniqueid' : sentinel.uniqueid3, + 'email' : sentinel.email3, + 'firstname': sentinel.firstname3, + 'lastname' : sentinel.lastname3, + 'plan_id' : sentinel.plan_id3, + },] + + returns = [{'success' : True, + 'avatar_id' : sentinel.avatar_id1, + 'account_id': sentinel.account_id1, + 'server_assigned_password' : sentinel.s_a_p1, + }, + {'success' : True, + 'avatar_id' : sentinel.avatar_id2, + 'account_id': sentinel.account_id2, + 'server_assigned_password' : sentinel.s_a_p2, + }, + {'success' : True, + 'avatar_id' : sentinel.avatar_id3, + 'account_id': sentinel.account_id3, + 'server_assigned_password' : sentinel.s_a_p3, + },] + + def side_effect(*args): + return returns.pop(0) + self.api_iface_create.side_effect = side_effect + + results = group_manager._api_create_users(config, users) + + self.assertEqual(len(results), 3) + + self.assertIs(results[0]['uniqueid'], sentinel.uniqueid1) + self.assertIs(results[0]['avatar_id'], sentinel.avatar_id1) + + self.assertIs(results[1]['uniqueid'], sentinel.uniqueid2) + self.assertIs(results[1]['avatar_id'], sentinel.avatar_id2) + + self.assertIs(results[2]['uniqueid'], sentinel.uniqueid3) + self.assertIs(results[2]['avatar_id'], sentinel.avatar_id3) + +class TestRunGeneric(unittest.TestCase): + def setUp(self): + def config_get_args(str1, str2): + if str2 == 'api_root': + return sentinel.api_root + else: + return sentinel.promo_code + + self.config = Mock() + self.config.get = config_get_args + + + def testfun_nopromo(api_root, user): + pass + + def testfun_promo(api_root, user, promo_code): + pass + + self.testfun_nopromo_mock = Mock() + self.testfun_nopromo = mocksignature(testfun_nopromo, + self.testfun_nopromo_mock) + self.testfun_nopromo.return_value = sentinel.testfun_nopromo + + self.testfun_promo_mock = Mock() + self.testfun_promo = mocksignature(testfun_promo, + self.testfun_promo_mock) + self.testfun_promo.return_value = sentinel.testfun_promo + + def test_works_nousers_nopromo(self): + results = group_manager._api_run_generic(self.config, + self.testfun_nopromo, + []) + self.assertEqual(len(results), 0) + + def test_exception_nousers_nopromo(self): + self.testfun_nopromo.side_effect = api_interface.ApiActionFailedError + results = group_manager._api_run_generic(self.config, + self.testfun_nopromo, + []) + self.assertEqual(len(results), 0) + + + def test_works_nousers_promo(self): + results = group_manager._api_run_generic(self.config, + self.testfun_promo, + []) + self.assertEqual(len(results), 0) + + def test_works_oneuser_nopromo(self): + user = {'field1': sentinel.field1, 'field2': sentinel.field2} + + results = group_manager._api_run_generic(self.config, + self.testfun_nopromo, + [user]) + + self.assertEqual(len(results), 1) + self.assertIs(results[0], user) + args, _ = self.testfun_nopromo_mock.call_args + assert user in args + assert sentinel.promo_code not in args + + + def test_exception_oneuser_nopromo(self): + user = {'field1': sentinel.field1, 'field2': sentinel.field2} + self.testfun_nopromo_mock.side_effect = api_interface.ApiActionFailedError + with self.assertRaises(group_manager.BailApiCall) as cm: + results = group_manager._api_run_generic(self.config, + self.testfun_nopromo, + [user]) + e = cm.exception + result_list, = e.args + self.assertEqual(len(result_list), 0) + + + def test_works_oneuser_promo(self): + user = {'field1': sentinel.field1, 'field2': sentinel.field2} + + results = group_manager._api_run_generic(self.config, + self.testfun_promo, + [user]) + + self.assertEqual(len(results), 1) + self.assertIs(results[0], user) + args, _ = self.testfun_promo_mock.call_args + assert user in args + assert sentinel.promo_code in args + + def test_works_multiuser_nopromo(self): + users = [{'field1': sentinel.field1_1, 'field2': sentinel.field2_1,}, + {'field1': sentinel.field1_2, 'field2': sentinel.field2_2,}, + {'field1': sentinel.field1_3, 'field2': sentinel.field2_3,},] + + results = group_manager._api_run_generic(self.config, + self.testfun_nopromo, + users) + + self.assertEqual(len(results), len(users)) + for i, d in enumerate(results): + self.assertIs(d['field1'], users[i]['field1']) + self.assertIs(d['field2'], users[i]['field2']) + + def test_exception_multiuser_nopromo(self): + users = [{'field1': sentinel.field1_1, 'field2': sentinel.field2_1,}, + {'field1': sentinel.field1_2, 'field2': sentinel.field2_2,}, + {'field1': sentinel.field1_3, 'field2': sentinel.field2_3,},] + + poplist = copy.copy(users) + + def side_effect(*args, **kwargs): + if len(poplist) > 1: + return poplist.pop(0) + else: + raise api_interface.ApiActionFailedError() + + self.testfun_nopromo_mock.side_effect = side_effect + + with self.assertRaises(group_manager.BailApiCall) as cm: + _ = group_manager._api_run_generic(self.config, + self.testfun_nopromo, + users) + + e = cm.exception + results, = e.args + self.assertEqual(len(results), len(users) - 1) + for i, d in enumerate(results): + self.assertIs(d['field1'], users[i]['field1']) + self.assertIs(d['field2'], users[i]['field2']) + + def test_works_multiuser_promo(self): + users = [{'field1': sentinel.field1_1, 'field2': sentinel.field2_1,}, + {'field1': sentinel.field1_2, 'field2': sentinel.field2_2,}, + {'field1': sentinel.field1_3, 'field2': sentinel.field2_3,},] + + results = group_manager._api_run_generic(self.config, + self.testfun_promo, + users) + + self.assertEqual(len(results), len(users)) + for i, d in enumerate(results): + self.assertIs(d['field1'], users[i]['field1']) + self.assertIs(d['field2'], users[i]['field2']) + + +class TestProcessQuery(unittest.TestCase): + def setUp(self): + self.db_conn = Mock() + self.query = Mock() + self.cur = Mock() + + self.db_conn.cursor.return_value = self.cur + + self.extras = ['field1', 'field2'] + + + def test_works_norows_noextras(self): + self.cur.fetchall.return_value = list() + + results = group_manager._process_query(self.db_conn, self.query) + + self.assertEqual(len(results), 0) + + def test_works_onerow_noextras(self): + self.cur.fetchall.return_value = [[sentinel.uniqueid]] + + results = group_manager._process_query(self.db_conn, self.query) + + self.assertEqual(len(results), 1) + self.assertEqual(len(results[0].keys()), 1) + self.assertEqual(results[0]['uniqueid'], sentinel.uniqueid) + + def test_works_multirows_noextras(self): + id_array = [[sentinel.uniqueid1], + [sentinel.uniqueid2], + [sentinel.uniqueid3]] + self.cur.fetchall.return_value = id_array + + results = group_manager._process_query(self.db_conn, self.query) + + self.assertEqual(len(results), 3) + self.assertEqual(len(results[0].keys()), 1) + + for i in xrange(0,3): + self.assertIs(results[i]['uniqueid'], id_array[i][0]) + + def test_works_norows_extras(self): + self.cur.fetchall.return_value = list() + + results = group_manager._process_query(self.db_conn, self.query, self.extras) + + self.assertEqual(len(results), 0) + + def test_works_onerow_extras(self): + indiv_array = [[sentinel.uniqueid, sentinel.field1, sentinel.field2]] + self.cur.fetchall.return_value = indiv_array + + results = group_manager._process_query(self.db_conn, self.query, self.extras) + self.assertEqual(len(results), 1) + self.assertEqual(len(results[0]), 3) + + self.assertIs(results[0]['field1'], sentinel.field1) + self.assertIs(results[0]['field2'], sentinel.field2) + + def test_works_multirows_extras(self): + test_array = [[sentinel.uniqueid1, sentinel.field1_1, sentinel.field2_1], + [sentinel.uniqueid2, sentinel.field1_2, sentinel.field2_2], + [sentinel.uniqueid3, sentinel.field1_3, sentinel.field2_3]] + + self.cur.fetchall.return_value = test_array + + results = group_manager._process_query(self.db_conn, self.query, self.extras) + + self.assertEqual(len(results),3) + self.assertEqual(len(results[0]), 3) + + for i in xrange(0,3): + self.assertIs(results[i]['uniqueid'], test_array[i][0]) + self.assertIs(results[i]['field1'], test_array[i][1]) + self.assertIs(results[i]['field2'], test_array[i][2]) + + def test_blows_up_with_bad_extras(self): + indiv_array = [[sentinel.uniqueid, sentinel.field1, sentinel.field2]] + self.cur.fetchall.return_value = indiv_array + + self.extras.append('field3') + + with self.assertRaises(IndexError): + results = group_manager._process_query(self.db_conn, + self.query, + self.extras) + + +if __name__ == "__main__": + unittest.main() diff --git a/netkes/account_mgr/test/test_ldap_reader.py b/netkes/account_mgr/test/test_ldap_reader.py new file mode 100644 index 0000000..fee7b6c --- /dev/null +++ b/netkes/account_mgr/test/test_ldap_reader.py @@ -0,0 +1,145 @@ +import unittest +from mock import Mock, MagicMock, sentinel, patch +import copy + +import ldap + +from directory_agent import ldap_reader + +class TestCollectGroups(unittest.TestCase): + def setUp(self): + self.test_groups = [{'type':"dn", + 'ldap_id': "cn=test1,dn=testdomain,dn=com", + 'group_id': sentinel.group_id1}, + {'type':"dn", + 'ldap_id': "cn=test2,dn=testdomain,dn=com", + 'group_id': sentinel.group_id2}, + {'type':"dn", + 'ldap_id': "cn=test3,dn=testdomain,dn=com", + 'group_id': sentinel.group_id3}, + ] + + def test_returns_empty_groups(self): + conn = Mock() + conn.search_s.return_value = [] + config = {'groups': self.test_groups, + 'dir_guid_source': 'user_guid_source', + 'dir_username_source': 'user_source', + 'dir_fname_source': 'fname_source', + 'dir_lname_source': 'lname_source',} + self.assertEqual(len(ldap_reader.collect_groups((conn, Mock()), config)), + 0) + + @patch('directory_agent.ldap_reader.get_group') + def test_returns_populated_groups(self, get_group): + conn = Mock() + + # The following tom-foolery with returns and side_effect is to make sure + # we don't have to bother with setting up get_group correctly, and so + # we assume it works correctly to mock it here. + returns = [[sentinel.testuser1, sentinel.testuser2], + [sentinel.testuser3, sentinel.testuser4], + [sentinel.testuser5, sentinel.testuser6]] + + expected = [sentinel.testuser1, sentinel.testuser2, + sentinel.testuser3, sentinel.testuser4, + sentinel.testuser5, sentinel.testuser6, + ] + + def side_effect(*args): + result = returns.pop(0) + return result + + config = {'groups': self.test_groups, + 'dir_guid_source': 'user_guid_source', + 'dir_username_source': 'user_source', + 'dir_fname_source': 'fname_source', + 'dir_lname_source': 'lname_source',} + + groups = [("dn", Mock(),), ("dn", Mock(),), ("dn", Mock(),)] + + get_group.side_effect = side_effect + + self.assertEqual(ldap_reader.collect_groups(conn, config), + expected) + +class TestCheckGetGroup(unittest.TestCase): + def test_rejects_bad_group(self): + conn = Mock() + config = MagicMock() + test_group = {'type':"DEADBEEF", + 'ldap_id': "cn=test,dn=testdomain,dn=com", + 'group_id': sentinel.group_id} + with self.assertRaises(ldap_reader.InvalidGroupConfiguration): + ldap_reader.get_group(conn, config, test_group) + + def test_uses_base_dn(self): + conn = Mock() + conn.search_s.return_value = [] + config = MagicMock() + test_group = {'type':"dn", + 'ldap_id': "cn=test,dn=testdomain,dn=com", + 'group_id': sentinel.group_id} + ldap_reader.get_group((conn, sentinel.base_dn,), config, test_group) + args, _ = conn.search_s.call_args + self.assertIs(args[0], sentinel.base_dn) + + def test_returns_empty_group(self): + conn = Mock() + conn.search_s.return_value = [] + test_group = {'type':"dn", + 'ldap_id': "cn=test,dn=testdomain,dn=com", + 'group_id': sentinel.group_id} + self.assertEqual( + len(ldap_reader.get_group((conn, Mock(),), MagicMock(), test_group)), + 0) + + def test_returns_group_users(self): + conn = Mock() + config = { + 'dir_guid_source': 'user_guid_source', + 'dir_username_source': 'user_source', + 'dir_fname_source': 'fname_source', + 'dir_lname_source': 'lname_source', + } + ldap_results = [(Mock(), {config['dir_guid_source'] : [sentinel.guid1], + config['dir_username_source']: [sentinel.testuser1], + config['dir_fname_source'] : [sentinel.testfname1], + config['dir_lname_source'] : [sentinel.testlname1], + } + ), + (Mock(), {config['dir_guid_source'] : [sentinel.guid2], + config['dir_username_source']: [sentinel.testuser2], + config['dir_fname_source'] : [sentinel.testfname2], + config['dir_lname_source'] : [sentinel.testlname2], + } + ), + (None, [Mock()]), + (None, [Mock()]), + ] + + test_group = {'type':"dn", + 'ldap_id': "cn=test,dn=testdomain,dn=com", + 'group_id': sentinel.group_id} + + conn.search_s.return_value = ldap_results + self.assertEqual(ldap_reader.get_group((conn, Mock(),), + config, + test_group), + [{'email' : sentinel.testuser1, + 'firstname' : sentinel.testfname1, + 'lastname' : sentinel.testlname1, + 'uniqueid' : sentinel.guid1, + 'group_id' : sentinel.group_id, + }, + {'email' : sentinel.testuser2, + 'firstname' : sentinel.testfname2, + 'lastname' : sentinel.testlname2, + 'uniqueid' : sentinel.guid2, + 'group_id' : sentinel.group_id, + } + ]) + + +if __name__ == '__main__': + unittest.main() diff --git a/netkes/account_mgr/user_source/__init__.py b/netkes/account_mgr/user_source/__init__.py new file mode 100644 index 0000000..2cf9d3a --- /dev/null +++ b/netkes/account_mgr/user_source/__init__.py @@ -0,0 +1,7 @@ +""" +__init__.py + +(c) 2011 SpiderOak, Inc. + +Provides the bits for working with LDAP. +""" diff --git a/netkes/account_mgr/user_source/group_manager.py b/netkes/account_mgr/user_source/group_manager.py new file mode 100644 index 0000000..679a21d --- /dev/null +++ b/netkes/account_mgr/user_source/group_manager.py @@ -0,0 +1,178 @@ +""" +group_manager.py + +(c) 2011 SpiderOak, Inc. + +Provides the group management decision making; given sets of users +from both LDAP and SpiderOak, determines the changes required to make +SpiderOak fit the LDAP groups. +""" + +import logging +import psycopg2 + +from account_mgr import api_interface +from account_mgr.user_source import ldap_source +from account_mgr import account_runner + +_USERS_TO_CREATE_QUERY = ''' +SELECT +l.uniqueid, l.email, l.givenname, l.surname, l.group_id +FROM ldap_users l +LEFT OUTER JOIN users u ON l.uniqueid = u.uniqueid +WHERE u.uniqueid IS NULL; +''' + +_USERS_TO_ENABLE_QUERY = ''' +SELECT +l.uniqueid, u.avatar_id +FROM ldap_users l +LEFT OUTER JOIN users u ON l.uniqueid = u.uniqueid +WHERE u.enabled IS FALSE; +''' + +_USERS_TO_DISABLE_QUERY = ''' +SELECT +u.uniqueid, u.avatar_id +FROM users u +LEFT OUTER JOIN ldap_users l ON u.uniqueid = l.uniqueid +WHERE l.uniqueid IS NULL; +''' + +_USERS_TO_PLANCHANGE_QUERY = ''' +SELECT +l.uniqueid, u.avatar_id, l.group_id +FROM ldap_users l +LEFT OUTER JOIN users u ON l.uniqueid = u.uniqueid +WHERE l.group_id != u.group_id; +''' + +_USERS_TO_EMAILCHANGE_QUERY = ''' +SELECT +l.uniqueid, u.avatar_id, l.email +FROM ldap_users l +LEFT OUTER JOIN users u ON l.uniqueid = u.uniqueid +WHERE l.email != u.email; +''' + + +def _process_query(db_conn, query, extras=None): + log = logging.getLogger('_process_query') + + if extras is None: + extras = [] + + cur = db_conn.cursor() + cur.execute(query) + results = list() + for row in cur.fetchall(): + userinfo = {'uniqueid' : row[0]} + for index, extra in enumerate(extras): + userinfo[extra] = row[index+1] + + if 'avatar_id' in extras: + log.debug('Query processing avatar %d' % (userinfo['avatar_id'],)) + else: + log.debug('Query processing avatar %s' % (userinfo['email'],)) + + results.append(userinfo) + + return results + +def _calculate_changes_against_db(db_conn, users): + """ + Calculates the changes necessary by comparing our groups from LDAP to the DB. + """ + log = logging.getLogger('calculate_changes') + api_actions = dict() + + cur = db_conn.cursor() + cur.execute("CREATE TEMPORARY TABLE ldap_users (LIKE users) ON COMMIT DROP;") + cur.execute("ALTER TABLE ldap_users DROP COLUMN avatar_id;") + cur.execute("ALTER TABLE ldap_users DROP COLUMN enabled;") + cur.executemany("INSERT INTO ldap_users (uniqueid, email, givenname, surname, group_id) VALUES (%(uniqueid)s, %(email)s, %(firstname)s, %(lastname)s, %(group_id)s);", + users) + cur.close() + + # Users to create. + log.debug('Creating users:') + api_actions['create'] = _process_query(db_conn, _USERS_TO_CREATE_QUERY, + ['email', 'firstname', + 'lastname', 'group_id']) + log.debug('Enabling users:') + api_actions['enable'] = _process_query(db_conn, _USERS_TO_ENABLE_QUERY, + ['avatar_id']) + log.debug('Disabling users:') + api_actions['disable'] = _process_query(db_conn, _USERS_TO_DISABLE_QUERY, + ['avatar_id']) + log.debug('Group change:') + api_actions['group'] = _process_query(db_conn, _USERS_TO_PLANCHANGE_QUERY, + ['avatar_id', 'group_id']) + log.debug('Email change:') + api_actions['email'] = _process_query(db_conn, _USERS_TO_EMAILCHANGE_QUERY, + ['avatar_id', 'email']) + + return api_actions + + +def run_group_management(config, db_conn): + """ + Resolves differences between the LDAP and our idea of the SpiderOak user DB. + + :param config: configuration dict. Should be the standard OpenManage setup. + :param user_source: UserSource object to pull users from. + :param db_conn: DB connection object + """ + log = logging.getLogger('run_group_management') + + # First step, collect the users from the LDAP groups. + ldap_conn = ldap_source.OMLDAPConnection(config["dir_uri"], config["dir_base_dn"], config["dir_user"], config["dir_password"]) + + ldap_users = ldap_source.collect_groups(ldap_conn, config) + change_groups = _calculate_changes_against_db(db_conn, ldap_users) + + runner = account_runner.AccountRunner(config, db_conn) + runner.runall(change_groups) + db_conn.commit() + + +def run_db_repair(config, db_conn): + """Repairs the current user DB and billing API versus LDAP.""" + # TODO: figure out what to do when email addresses *don't* match. + + # Collect the users from LDAP, and insert into a temporary table. + ldap_conn = ldap_source.OMLDAPConnection(config["dir_uri"], + config["dir_base_dn"], + config["dir_user"], + config["dir_password"]) + + ldap_users = ldap_source.collect_groups(ldap_conn, config) + cur = db_conn.cursor() + cur.execute("CREATE TEMPORARY TABLE ldap_users (LIKE users) ON COMMIT DROP;") + cur.execute("ALTER TABLE ldap_users DROP COLUMN avatar_id;") + cur.execute("ALTER TABLE ldap_users DROP COLUMN enabled;") + cur.executemany("INSERT INTO ldap_users (uniqueid, email, givenname, surname, group_id) VALUES (%(uniqueid)s, %(email)s, %(firstname)s, %(lastname)s, %(group_id)s);", + ldap_users) + + # Collect the users from the SpiderOak BillingAPI, and insert into + # a temporary table. + spider_users = api_interface.fetch_users() + cur = db_conn.cursor() + cur.execute("CREATE TEMPORARY TABLE spider_users (LIKE users) ON COMMIT DROP;") + cur.execute("ALTER TABLE spider_users DROP COLUMN uniqueid;") + cur.executemany("INSERT INTO spider_users " + "(avatar_id, email, givenname, surname, group_id, enabled) VALUES " + "(%(avatar_id)s, %(email)s, %(firstname)s, %(lastname)s, " + "%(group_id)s, %(enabled)s);", + spider_users) + + # Clear out the current database. + cur.execute("DELETE FROM users;") + + # Insert rows into users where email addresses match. + cur.execute("INSERT INTO users " + "SELECT l.uniqueid, s.email, s.avatar_id, s.givenname, " + "s.surname, s.group_id, s.enabled " + "FROM ldap_users l JOIN spider_users AS s ON l.email = s.email ") + + db_conn.commit() diff --git a/netkes/account_mgr/user_source/ldap_source.py b/netkes/account_mgr/user_source/ldap_source.py new file mode 100644 index 0000000..cbdc38a --- /dev/null +++ b/netkes/account_mgr/user_source/ldap_source.py @@ -0,0 +1,191 @@ +''' +ldap_reader.py + +Pulls the enterprise user groups from the LDAP server. + +(c) 2011, SpiderOak, Inc. +''' + +import ldap +import logging +import re + +# MS ActiveDirectory does not properly give redirections; it passes +# redirects to the LDAP library, which dutifully follows them, but +# MSAD does not pass credentials along with the redirect process. This +# results in a case where we are using the same established, bound +# connection with our actual bound credentials having been +# stripped. The only recourse is to ignore referrals from LDAP +# servers. +ldap.set_option(ldap.OPT_REFERRALS, 0) + +class InvalidGroupConfiguration(Exception): + ''' + Thrown when invalid group configuration is used. + ''' + pass + +class OMLDAPConnection(object): + def __init__(self, uri, base_dn, username, password): + log = logging.getLogger('OMLDAPConnection __init__') + self.conn = ldap.initialize(uri) + self.conn.simple_bind_s(username, password) + log.debug("Bound to %s as %s" % (uri, username,)) + + self.base_dn = base_dn + + +def can_auth(config, username, password): + ''' + Checks the ability of the given username and password to connect to the AD. + Returns true if valid, false if not. + ''' + log = logging.getLogger("can_bind") + # Throw out empty passwords. + if password == "": + return False + + conn = ldap.initialize(config['dir_uri']) + try: + conn.simple_bind_s(username, password) + # ANY failure here results in a failure to auth. No exceptions! + except Exception: + log.exception("Failed on LDAP bind") + return False + + return True + + +def ldap_connect(uri, base_dn, username, password): + ''' + Returns a tuple of (bound LDAP connection object, base DN). + Accepts a directory containing our connection settings. + ''' + log = logging.getLogger('ldap_connect') + conn = ldap.initialize(uri) + conn.simple_bind_s(username, password) + log.debug("Bound to %s as %s" % (uri, username,)) + return (conn, base_dn, ) + +def collect_groups(conn, config): + ''' + Returns a list of lists of users per user group. + The user groups are a list of LDAP DNs. + ''' + + result_groups = [] + + for group in config['groups']: + result_groups.extend(get_group(conn, config, group)) + + return result_groups + + +def group_by_guid(conn, guid): + ''' + Returns the DN of a group given the GUID. + Active Directory-only. + ''' + results = conn.conn.search_s(conn.base_dn, + ldap.SCOPE_SUBTREE, + "(objectGUID=%s)" % (guid,), + ["dn"], + ) + return results + + +def _get_group_ad(ldap_conn, config, group, dn): + log = logging.getLogger('_get_group_ad %s' % (dn,)) + user_list = [] + for dn, result_dict in ldap_conn.conn.search_s( + ldap_conn.base_dn, ldap.SCOPE_SUBTREE, "(memberOf=%s)" % (dn,), + [config['dir_guid_source'].encode('utf-8'), + config['dir_username_source'].encode('utf-8'), + config['dir_fname_source'].encode('utf-8'), + config['dir_lname_source'].encode('utf-8')] + ): + if dn is None: + continue + log.debug("Appending user %s" % result_dict[config['dir_username_source']][0]) + user_list.append({ + 'uniqueid' : result_dict[config['dir_guid_source']][0], + 'email' : result_dict[config['dir_username_source']][0], + 'firstname' : result_dict[config['dir_fname_source']][0], + 'lastname' : result_dict[config['dir_lname_source']][0], + 'group_id' : group['group_id'], + }) + return user_list + + +def _get_group_posix(ldap_conn, config, group, dn): + log = logging.getLogger('_get_group_posix %s' % (dn,)) + user_list = [] + for dn, result_dict in ldap_conn.conn.search_s( + group['ldap_id'], + ldap.SCOPE_SUBTREE, + attrlist=[config['dir_guid_source'], config['dir_member_source']] + ): + print dn, result_dict + if dn is None: + continue + # Search LDAP to get User entries that match group + for user in result_dict[config['dir_member_source']]: + log.debug("Found user %s", user) + + # Split apart the uid from the rest of the member_source + regex_result = re.search(r'^(uid=\w+),', user) + uid = regex_result.group(1) + + # Add each user that matches + for dn, user_dict in ldap_conn.conn.search_s( + ldap_conn.base_dn, + ldap.SCOPE_SUBTREE, uid, + [config['dir_guid_source'], + config['dir_fname_source'], + config['dir_lname_source'], + config['dir_username_source']] + ): + if dn is None: + continue + log.debug("Appending user %s", user) + user_list.append({ + 'uniqueid' : user_dict[config['dir_guid_source']][0], + 'email' : user_dict[config['dir_username_source']][0], + 'firstname' : user_dict[config['dir_fname_source']][0], + 'lastname' : user_dict[config['dir_lname_source']][0], + 'group_id' : group['group_id'], + }) + + return user_list + +_GROUP_GETTERS = { + 'ad': _get_group_ad, + 'posix': _get_group_posix, +} + + +def get_group(ldap_conn, config, group): + ''' + Returns a list of user dicts for the specified group. + + user dict keys: uniqueid, email, firstname, lastname, group_id + ''' + # TODO: figure out how to smoothly handle using GUIDs in configuration. + # AD stores GUIDs as a very unfriendly 16-byte value. + log = logging.getLogger("get_group %d" % (group['group_id'],)) + if group['type'].lower() != "dn": + raise InvalidGroupConfiguration("passed a group value != 'dn'") + dn = group['ldap_id'] + + try: + group_getter = _GROUP_GETTERS[config.get('dir_type', 'ad').lower()] + except KeyError: + raise InvalidGroupConfiguration( + "unknown dir_type %r" % (config['dir_type'],)) + + log.debug("Group DN: %s", dn) + user_list = group_getter(ldap_conn, config, group, dn) + log.info("Found %d users", len(user_list)) + + return user_list + diff --git a/netkes/account_mgr/user_source/local_source.py b/netkes/account_mgr/user_source/local_source.py new file mode 100644 index 0000000..8dcea0b --- /dev/null +++ b/netkes/account_mgr/user_source/local_source.py @@ -0,0 +1,86 @@ +''' +local_source.py + +Provides self-contained user management functionality on the virtual appliance. + +(c) 2012, SpiderOak, Inc. +''' + +import logging +import psycopg2 + +log = logging.getLogger('local_source') +try: + import bcrypt +except ImportError: + log.warn('no bcrypt; ldap only this system') + +from common import get_config + +# This is only filled in the event of hitting authenticator and needing to connect to a DB. +_AUTHENTICATOR_DB_CONN = None + +_PW_HASH_SELECT=''' +SELECT email, pw_hash +FROM passwords WHERE email=%s;''' +def check_local_auth(db_conn, username, password): + log = logging.getLogger("check_local_auth") + log.info('login: %s %s' % (username, password,)) + cur = db_conn.cursor() + cur.execute(_PW_HASH_SELECT, (username,)) + if cur.rowcount != 1: + return False + + row = cur.fetchone() + + try: + return bcrypt.hashpw(password, row[1]) == row[1] + except ValueError: + return False + +def _get_db_conn(config): + global _AUTHENTICATOR_DB_CONN + if _AUTHENTICATOR_DB_CONN is None: + _AUTHENTICATOR_DB_CONN = psycopg2.connect(database=config['db_db'], + user=config['db_user'], + password=config['db_pass'], + host=config['db_host']) + + return _AUTHENTICATOR_DB_CONN + +def can_auth(config, username, password): + return check_local_auth(_get_db_conn(config), username, password) + +def set_user_password(db_conn, email, password): + """ + Sets the password for the user. + + This is secretly a wrapper for :func:`set_multi_passwords`. + + :param db_conn: DB connection object + :param email: User's email + :param password: User's password. + + """ + log = logging.getLogger("set_user_password") + set_multi_passwords(db_conn, [email], [password]) + +def set_multi_passwords(db_conn, emails, passwords): + """ + Sets passwords for the given emails. + + :param emails: List of email addresses. + :param passwords: List of passwords to set for the given emails. + :raises: TypeError + + """ + if len(emails) != len(passwords): + raise TypeError("Argument lengths do not match!") + hashed_pws = (bcrypt.hashpw(pw, bcrypt.gensalt()) for pw in passwords) + cur = db_conn.cursor() + + cur.executemany( + "SELECT upsert_password(%s, %s)", itertools.izip(emails, hashed_pws) + ) + + db_conn.commit() diff --git a/netkes/account_mgr/user_source/radius_source.py b/netkes/account_mgr/user_source/radius_source.py new file mode 100644 index 0000000..77a1df4 --- /dev/null +++ b/netkes/account_mgr/user_source/radius_source.py @@ -0,0 +1,73 @@ +""" +radius_source.py + +Provides RADIUS authentication for the OpenManage stack. + +This module *DOES NOT* provide user accounts management; that will have to be +provided via another plugin. + +The following agent_config options are expected by this module: +rad_server: the RADIUS server we will be authenticating to +rad_secret: the RADIUS secret we will be using +rad_dict: the RADIUS dictionary to use. + +(c) 2012 SpiderOak, Inc. +RADIUS auth code also contributed by RedHat, Inc. +""" + +import logging +from socket import gethostname + +import pyrad.packet +from pyrad.client import Client +from pyrad.dictionary import Dictionary + + +def process_username(username, chop_at_symbol=True): + """ + Selectively splits a username of the form "foo@bar.com" into just "foo" + for auth purposes. + """ + + if chop_at_symbol: + username, _ = username.split('@', 1) + + return username + + +def can_auth(config, username, password): + """ + Performs authentication against a RADIUS server. + """ + + log = logging.getLogger('radius_source.can_auth') + + log.debug("Attempting RADIUS auth to %s for user %s" % (config['rad_server'], username,)) + + processed_user = process_username(username) + + # Create a RADIUS client to communicate with the server. + srv = Client( + server = config['rad_server'], + secret = config['rad_secret'], + dict = Dictionary(config['rad_dictionary']),) + + req = srv.CreateAuthPacket( + code = pyrad.packet.AccessRequest, + User_Name = processed_user, + NAS_Identifier = gethostname(),) + + req['User-Password'] = req.PwCrypt(password) + + try: + reply = srv.SendPacket(req) + except Exception: + log.exception("Problem contacting RADIUS server") + return False + + if reply.code == pyrad.packet.AccessAccept: + log.info("User %s accepted by RADIUS" % (username,)) + return True + + log.info("User %s rejected by RADIUS" % (username,)) + return False diff --git a/netkes/common/__init__.py b/netkes/common/__init__.py new file mode 100644 index 0000000..26c5a23 --- /dev/null +++ b/netkes/common/__init__.py @@ -0,0 +1,88 @@ +''' +__init__.py + +Common settings for all of the OMVA applications. + +(c) 2011 SpiderOak, Inc. +''' + +import json +import logging +import os +import os.path + +CONFIG_DIR = os.environ.get("OPENMANAGE_CONFIGDIR", + "/opt/openmanage/etc") +DATA_DIR = os.environ.get("OPENMANAGE_DATADIR", + "/var/lib/openmanage") +DEFAULT_RC = "agent_config.json" + +_CONFIG = None + +def set_config(config): + global _CONFIG + _CONFIG = config + +def get_config(): + global _CONFIG + return _CONFIG + +def get_ssl_keys(): + log = logging.getLogger('get_ssl_keys') + key_home = DATA_DIR + key_fname = os.path.join(key_home, 'server.key') + cert_fname = os.path.join(key_home, 'server.crt') + + print key_fname+" "+ cert_fname + if os.path.exists(key_fname) and os.path.exists(cert_fname): + log.info("Using SSL key/cert: %s %s"% (key_fname, cert_fname,)) + return key_fname, cert_fname + + log.warn("No SSL certs found at %s" % (DATA_DIR,)) + return None, None + + +def make_defaults(): + '''Provides default and sane configuration options + + Most users shouldn't need to change this. + ''' + default_config = {'db_user': 'directory_agent', + 'db_host': 'localhost', + 'db_db': 'openmanage', + 'api_root': 'https://spideroak.com/apis/partners/billing', + } + + return default_config + +def read_config_file(cmdline_option=None): + '''Reads the configuration file, optionally using an envar and/or command-line argument for the location.''' + + if cmdline_option is not None: + config_file = cmdline_option + else: + config_file = os.path.join(CONFIG_DIR, DEFAULT_RC) + + # TODO: cleanup the configuration file path. + if not os.path.exists(config_file): + log = logging.getLogger("read_config_file") + log.warn("Missing config file at %s" % (config_file,)) + return dict() + + with open(config_file) as json_fobj: + fileconfig = json.load(json_fobj) + + for key in fileconfig.keys(): + if isinstance(fileconfig[key], unicode): + fileconfig[key] = fileconfig[key].encode('utf_8') + + config = merge_config(make_defaults(), fileconfig) + + return config + +def merge_config(config, cmdline_opts): + '''Merges the command-line options with the configuration file.''' + for key, value in cmdline_opts.iteritems(): + config[key] = value + + return config diff --git a/netkes/key_escrow/__init__.py b/netkes/key_escrow/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/netkes/key_escrow/admin.py b/netkes/key_escrow/admin.py new file mode 100644 index 0000000..03bc5cf --- /dev/null +++ b/netkes/key_escrow/admin.py @@ -0,0 +1,204 @@ +""" +base layer of escrow is always ourselves - so that we are not trusting escrow +agents with data they can read. + +""" + +import os +import time +import shutil + +from key_escrow.gen import make_keypair +from key_escrow.write import escrow_binary + +from Pandora.serial import load, dump, register_all + +_ESCROW_LAYERS_PATH = os.environ["SPIDEROAK_ESCROW_LAYERS_PATH"] +_ESCROW_KEYS_PATH = os.environ["SPIDEROAK_ESCROW_KEYS_PATH"] + +class EscrowError(Exception): pass + +def save_key(key_id, keypair): + """ + save (key id, keypair, ) for key id + """ + key_fn = os.path.join(_ESCROW_KEYS_PATH, "%s.key" % (key_id, )) + with open(key_fn, "ab") as fobj: + dump((key_id, keypair, ), fobj) + print "Saved %s to %s" % ( key_id, key_fn, ) + + return True + +def load_keypair(key_id): + "load and return keypair for key id" + key_fn = os.path.join(_ESCROW_KEYS_PATH, "%s.key" % (key_id, )) + with open(key_fn, "rb") as fobj: + stored_key_id, keypair = load(fobj) + assert key_id == stored_key_id + + return keypair + +def read_config(name): + """ + return value from named config file + """ + cfg_fn = os.path.join(_ESCROW_KEYS_PATH, "%s.cfg" % name) + with open(cfg_fn) as fobj: + return fobj.readline().strip() + +def write_config(name, value): + "write value to named config file" + cfg_fn = os.path.join(_ESCROW_KEYS_PATH, "%s.cfg" % name) + if os.path.exists(cfg_fn): + raise EscrowError("config %s already exists" % name) + with open(cfg_fn, "wb") as fobj: + fobj.write("%s\n" % (value, )) + + return True + +def get_base(): + "return (base key ID, keypair,) for base layer" + + base_id = read_config("base") + + keypair = load_keypair(base_id) + + return base_id, keypair + +def create_base(): + """ + Run only once to create base layer of key escrow (which is kept internal.) + + create a new (key_id, keypair, ) and save it. + create a new file base.cfg with key_id in first line + """ + + base_id, base_keypair = make_keypair() + + save_key(base_id, base_keypair) + + write_config("base", base_id) + + print "base key ID %s and cfg saved" % ( base_id, ) + + return True + +def setup_brand(brand_identifier): + """ + create keys, brand config file, and brand layers file for NUS + """ + base_id, base_keypair = get_base() + + brand_id, brand_keypair = make_keypair() + + save_key(brand_id, brand_keypair) + + layers = ( (brand_id, brand_keypair.publickey(), ), + (base_id, base_keypair.publickey(), ), ) + + layer_fn = os.path.join(_ESCROW_LAYERS_PATH, + "brand.%s.layers.serial" % ( brand_identifier, )) + + if os.path.exists(layer_fn): + raise EscrowError("Brand id %s layers exist" % (brand_identifier, )) + with open(layer_fn, "ab") as fobj: + dump(layers, fobj) + + write_config("brand.%s" % (brand_identifier, ), brand_id) + + print "new keys and config saved for brand %s" % brand_identifier + + return brand_id, brand_identifier, layers + +def test_base(): + "test creating and reading base cfg and key" + create_base() + base_id, keypair = get_base() + assert keypair.__class__.__name__ == "RSAobj" + print "test base ok" + return True + +def test_setup_brand(): + from key_escrow.server import read_escrow_data + brand_identifier = 'my_test_brand' + brand_id, _brand_keypair, layers = setup_brand(brand_identifier) + + assert brand_id == layers[0][0] + + _user_key_id, user_keypair = make_keypair() + test_data = "0123456789" + escrowed_data = escrow_binary(layers, test_data, user_keypair) + plain_escrowed_data = read_escrow_data(brand_identifier, escrowed_data, + sign_key=user_keypair.publickey()) + assert plain_escrowed_data == test_data + + print "setup brand test ok" + return True + +def test_all(): + "run all tests" + global _ESCROW_KEYS_PATH + global _ESCROW_LAYERS_PATH + test_dir = "/tmp/key_escrow_admin_test.%s" % ( time.time(), ) + try: + _ESCROW_KEYS_PATH = os.path.join(test_dir, "keys") + _ESCROW_LAYERS_PATH = os.path.join(test_dir, "layers") + os.environ["SPIDEROAK_ESCROW_LAYERS_PATH"] = _ESCROW_LAYERS_PATH + os.environ["SPIDEROAK_ESCROW_KEYS_PATH"] = _ESCROW_KEYS_PATH + os.makedirs(_ESCROW_KEYS_PATH) + os.makedirs(_ESCROW_LAYERS_PATH) + test_results = [] + test_results.append(test_base()) + test_results.append(test_setup_brand()) + assert all(test_results) + finally: + shutil.rmtree(test_dir) + +def run_as_utility(): + register_all() + import sys + if "testall" in sys.argv: + test_all() + return + + if "create_base" in sys.argv: + create_base() + elif "setup_brand" in sys.argv: + brand_identifier = sys.argv[sys.argv.index('setup_brand') + 1] + setup_brand(brand_identifier) + else: + print >>sys.stderr, "I don't know what you want me to do!" + + print >>sys.stderr, "IF YOU HAVE CREATED NEW KEYS, BACK THEM UP NOW!" + +if __name__ == "__main__": + run_as_utility() + +#def setup_escrow_agent(agent_name): +# """ +# """ +# pass +# +# +#def setup_brand(brand_identifier, agent_name=None): +# """ +# """ +# pass +# +# +#def get_agent(agent_name): +# "return (ID, keypair,) for specified agent layer escrow" +# +# agent_cfg = "agent.%s.cfg" % (agent_name, ) +# agent_key_id = read_config(agent_cfg) +# keypair = load_key(agent_key_id) +# +# return agent_key_id, keypair +# +# +# +# +# +# +# +# diff --git a/netkes/key_escrow/gen.py b/netkes/key_escrow/gen.py new file mode 100644 index 0000000..e8b5608 --- /dev/null +++ b/netkes/key_escrow/gen.py @@ -0,0 +1,27 @@ +import time +import random +import hmac +from hashlib import sha256 + +from Crypto.PublicKey import RSA + +from key_escrow.write import random_string + +GEN_COUNTER = 1 + +_hmac_key = '\x91\xc3\x94\xb2\xc7\xa4\xf6\xf8;n\x8a\xb1r{&\xf0.m\x97L\xab\x174\r\r\x92\x9c\xf4}\x9dp\xc7' + +def new_key_id(): + "create new unique key ID" + global GEN_COUNTER + timestamp = time.strftime("%Y%m%d%H%M%S", time.localtime()) + key_id = "%s-%d-%d" % ( timestamp, GEN_COUNTER, random.randint(1, 99999), ) + key_hmac_digest = hmac.new(_hmac_key, key_id, sha256).hexdigest() + GEN_COUNTER += 1 + return key_id + key_hmac_digest + + +def make_keypair(size=2048): + "return Key ID string, keypair obj" + rsakey = RSA.generate(size, random_string) + return new_key_id(), rsakey diff --git a/netkes/key_escrow/read.py b/netkes/key_escrow/read.py new file mode 100644 index 0000000..3d2df6d --- /dev/null +++ b/netkes/key_escrow/read.py @@ -0,0 +1,76 @@ +import os +import time +import json +import zlib +import struct +from binascii import b2a_base64, a2b_base64 +from hashlib import sha256 + +from Crypto.PublicKey import RSA +from Crypto.Cipher import AES +from Crypto.Util.number import bytes_to_long, long_to_bytes + +AES_KEY_SIZE = 32 +AES_NONCE_SIZE = 16 + +def read_escrow_layer(escrow_keys, layer_data, sign_key=None): + """ + inverse of make_escrow_layer + + escrow_keys = dictionary of available private escrow keys in the format + key_id = key object + layer_data = binary output from make_escrow_layer + sign_key = public key of the user who has signed this layer + """ + + header_format = "!HHHL" + header_size = struct.calcsize(header_format) + + if not len(layer_data) > header_size: + raise ValueError( + "Layer too small: expected >%d bytes" % ( header_size, )) + + ( key_id_len, sig_hmac_len, sig_len, payload_len, ) = struct.unpack( + header_format, layer_data[0:header_size]) + + expected_size = header_size + sum( + (key_id_len, sig_hmac_len, sig_len, payload_len, )) + + if not len(layer_data) == expected_size: + raise ValueError( + "Layer wrong sized: expected %d but %d" % ( + expected_size, len(layer_data), )) + + body_format = "!%ds%ds%ds%ds" % ( + key_id_len, sig_hmac_len, sig_len, payload_len, ) + + ( key_id, sig_hmac, sig, payload, ) = struct.unpack( + body_format, layer_data[header_size:]) + + if not key_id in escrow_keys: + raise KeyError("Key not available for ID %r" % (key_id, )) + + if sign_key is not None: + valid = sign_key.verify(sig_hmac, (bytes_to_long(sig), )) + if not valid: + raise ValueError("Signature error") + + payload_data = json.loads(zlib.decompress(payload)) + for k, v in payload_data.iteritems(): + payload_data[k] = a2b_base64(v) + + priv_key = escrow_keys[key_id] + aes_key = priv_key.decrypt(payload_data['aes_key']) + + + if not len(aes_key) == AES_KEY_SIZE: + raise ValueError("aes_key wrongsized %d" % + (len(aes_key), )) + if not len(payload_data['aes_iv']) == AES_NONCE_SIZE: + raise ValueError("aes_iv wrongsized") + + aes = AES.new(aes_key, AES.MODE_CFB, + payload_data['aes_iv']) + data = aes.decrypt(payload_data['data']) + + return data diff --git a/netkes/key_escrow/server.py b/netkes/key_escrow/server.py new file mode 100644 index 0000000..3572baf --- /dev/null +++ b/netkes/key_escrow/server.py @@ -0,0 +1,90 @@ +import os +from key_escrow.read import read_escrow_layer + +from Pandora.serial import load + +_ESCROW_LAYERS_PATH = os.environ["SPIDEROAK_ESCROW_LAYERS_PATH"] +_ESCROW_KEYS_PATH = os.environ["SPIDEROAK_ESCROW_KEYS_PATH"] + +_ESCROW_LAYERS_CACHE = dict() +_ESCROW_KEYS_CACHE = dict() + + +def get_escrow_layers(brand_identifier): + """ + Return a binary string containing a serilization of escrow key layers + + The de-serialized structure will be a list of tulpes of the form: + (key id, public key, ) + + The first item in the list is the innermost escrow layer (to which + plaintext data is first enciphered.) + """ + + if brand_identifier in _ESCROW_LAYERS_CACHE: + return _ESCROW_LAYERS_CACHE[brand_identifier] + + filepath = os.path.join(_ESCROW_LAYERS_PATH, + "brand.%s.layers.serial" % (brand_identifier, )) + + with open(filepath, "rb") as fobj: + data = fobj.read() + + _ESCROW_LAYERS_CACHE[brand_identifier] = data + + return data + +def load_escrow_key_cache(): + """ + populate escrow key cache with everything in SPIDEROAK_ESCROW_KEYS_PATH + """ + #print "loading keys in %s" % _ESCROW_KEYS_PATH + + # TODO perhaps memcache this w/ short (30m?) expire. + + for name in os.listdir(_ESCROW_KEYS_PATH): + if not name.endswith(".key"): + continue + + filename_key_id = name[0:-4] + if filename_key_id in _ESCROW_KEYS_CACHE: + continue + + keypath = os.path.join(_ESCROW_KEYS_PATH, name) + with open(keypath, "rb") as fobj: + key_id, key = load(fobj) + assert filename_key_id == key_id + _ESCROW_KEYS_CACHE[key_id] = key + #print "Loaded %s" % key_id + + return True + +def read_escrow_data(brand_identifier, escrowed_data, layer_count=2, + sign_key=None, _recur=0): + """ + escrowed_data = binary data encoded to escrow keys + sign_key = user's public key used to check signatures (optional) + layer_count = number of layers to go through (2 by default) + + returns: plaintext escrowed data + """ + # TODO: make this talk to a remote key escrow service hardened/isolated by + # policy + + layer_data = escrowed_data + + try: + for layer_idx in range(layer_count): + layer_data = read_escrow_layer( + _ESCROW_KEYS_CACHE, layer_data, sign_key) + except KeyError, err: + if not "Key not available for ID" in str(err): + raise + if _recur: + raise + load_escrow_key_cache() + return read_escrow_data(brand_identifier, escrowed_data, + layer_count=layer_count, sign_key=sign_key, _recur=_recur+1) + + return layer_data + diff --git a/netkes/key_escrow/test.py b/netkes/key_escrow/test.py new file mode 100644 index 0000000..641bc9a --- /dev/null +++ b/netkes/key_escrow/test.py @@ -0,0 +1,56 @@ +import os + +from key_escrow.write import make_escrow_layer +from key_escrow.read import read_escrow_layer +from key_escrow.gen import make_keypair + +_TEST_LAYERS = 500 +_TEST_DATA_SIZE = 4097 + +def test_write_and_read_layers(): + """ + test encapsulating data in many escrow layers and reading it back out + """ + + userkey = make_keypair() + + layers = list() + for _ in range(_TEST_LAYERS): + layers.append(make_keypair()) + + # this is the data that goes in the innermost layer + data = os.urandom(_TEST_DATA_SIZE) + + layer_data = data + + # we encapsulate this data in layers of key escrow + for idx, layer in enumerate(layers): + cipher_layer_data = make_escrow_layer( + layer[0], layer[1].publickey(), layer_data, userkey[1]) + + # at every layer we test that we can read back the data + plain_layer_data = read_escrow_layer( + { layer[0]: layer[1] }, cipher_layer_data, userkey[1].publickey()) + assert plain_layer_data == layer_data, \ + "readback fail at layer %d" % (idx + 1) + + layer_data = cipher_layer_data + + + # read back the layers in reverse + for idx, layer in enumerate(layers[::-1]): + plain_layer_data = read_escrow_layer( + { layer[0]: layer[1] }, layer_data, userkey[1].publickey()) + layer_data = plain_layer_data + + # we should get our original data back out + assert layer_data == data + + return True + +def test_all(): + assert test_write_and_read_layers() + print "All tests complete" + +if __name__ == "__main__": + test_all() diff --git a/netkes/key_escrow/write.py b/netkes/key_escrow/write.py new file mode 100644 index 0000000..f47471e --- /dev/null +++ b/netkes/key_escrow/write.py @@ -0,0 +1,80 @@ +import os +import time +import json +import zlib +import struct +from binascii import b2a_base64, a2b_base64 +from hashlib import sha256 +import hmac + +from Crypto.PublicKey import RSA +from Crypto.Cipher import AES +from Crypto.Util.number import bytes_to_long, long_to_bytes + +AES_KEY_SIZE = 32 +AES_NONCE_SIZE = 16 + +def random_string(size): + "return cryptographically secure string of specified size" + return os.urandom(size) + +def new_session_key(size): + """ + make session key suitable for use for encrypting data via rsa + """ + # XXX: there's a bug in Crypto.PublicKey.RSA. + # It loses the first byte if it is NUL, so until this is fixed, we + # don't use keys with a first byte of \x00 + key = random_string(size) + while key[0] == "\x00": + key = random_string(size) + + return key + +def make_escrow_layer(pub_key_id, pub_key, data, sign_key): + """ + make an escrow layer (string) that includes the binary data + + pub_key_id = string to identify the private key the layer can be read with + pub_key = public key object for the escrow party at this layer + data = binary data to store + sign_key = private key object of the user signing the layer + + returns binary string + """ + + aes_key = new_session_key(AES_KEY_SIZE) + aes_iv = sha256(str(time.time())).digest()[:AES_NONCE_SIZE] + aes = AES.new(aes_key, AES.MODE_CFB, aes_iv) + aes_encoded_data = aes.encrypt(data) + + payload = zlib.compress(json.dumps(dict( + aes_key = b2a_base64( + pub_key.encrypt(aes_key, random_string(len(aes_key)))[0]), + aes_iv = b2a_base64(aes_iv), + data = b2a_base64(aes_encoded_data)))) + + sig_hmac = hmac.new(key='', msg=payload, digestmod=sha256).digest() + sig = long_to_bytes(sign_key.sign(sig_hmac, random_string(len(sig_hmac)))[0]) + + struct_format = "!HHHL%ds%ds%ds%ds" % ( + len(pub_key_id), len(sig_hmac), len(sig), len(payload), ) + + return struct.pack(struct_format, + len(pub_key_id), len(sig_hmac), len(sig), len(payload), + pub_key_id, sig_hmac, sig, payload) + + +def escrow_binary(escrow_key_layers, data, sign_key): + """ + write binary escrowed data, signed by private key, to the given escrow + layers + """ + + layer_data = data + for idx, layer in enumerate(escrow_key_layers): + layer_data = make_escrow_layer( + layer[0], layer[1], layer_data, sign_key) + + return layer_data + diff --git a/netkes/netkes_agent/__init__.py b/netkes/netkes_agent/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/netkes/netkes_agent/app_factory.py b/netkes/netkes_agent/app_factory.py new file mode 100644 index 0000000..c442104 --- /dev/null +++ b/netkes/netkes_agent/app_factory.py @@ -0,0 +1,155 @@ +import logging +import os +import re + +from urllib import unquote +from wsgi_util.router import router +from wsgi_util.http import BadRequest, SuperSimple, NotFound, Forbidden, ServerError +from wsgi_util.post_util import read_postdata, read_querydata + +from common import get_config, read_config_file, set_config +from account_mgr import authenticator +from key_escrow import server +from Pandora import serial + + +def setup_logging(): + handler = logging.StreamHandler() + formatter = logging.Formatter( + '%(asctime)s %(levelname)-7s %(name)-15s: %(message)s') + handler.setFormatter(formatter) + logging.root.addHandler(handler) + + if 'SPIDEROAK_NETKES_LOG_DEBUG' in os.environ: + logging.root.setLevel(logging.DEBUG) + logging.info("Debug logging enabled. Warning, lots of output!") + else: + logging.root.setLevel(logging.INFO) + +def setup_application(): + config = get_config() + if config is not None: + return + config = read_config_file() + set_config(config) + +setup_logging() +setup_application() +serial.register_all() + +@read_querydata +def get_layers(environ, start_response): + log = logging.getLogger("get_layers") + + log.debug("start") + try: + brand_identifier = environ['query_data']['brand_id'][0] + except KeyError: + log.error("Got bad request.") + return BadRequest()(environ, start_response) + + try: + layer_data = server.get_escrow_layers(brand_identifier) + except (KeyError, IOError,): + log.warn("Got missing brand_identifier: %s" % (brand_identifier,)) + return NotFound()(environ, start_response) + + log.info("Returning escrow keys for %s" % (brand_identifier,)) + + return SuperSimple(layer_data, ctype="application/octet-stream")(environ, start_response) + + +@read_querydata +def authenticate_user(environ, start_response): + log = logging.getLogger('authenticate_user') + log.debug("start") + + try: + brand_identifier = environ['query_data']['brand_id'][0] + username = environ['query_data']['username'][0] + password = environ['query_data']['password'][0] + crypt_pw = environ['query_data'].get('crypt_pw', ["True"])[0] + except KeyError: + log.error("Got bad request.") + return BadRequest()(environ, start_response) + + decoded_user = unquote(username) + + # If we get anything OTHER than explicitly "False" in the request, we will assume it's an encrypted password. + if crypt_pw == "False": + plaintext_password = password + else: + try: + plaintext_password = server.read_escrow_data( + brand_identifier, password) + except KeyError: + log.warn("missing identifier %s" % (brand_identifier,)) + return NotFound()(environ, start_response) + except ValueError: + log.warn("bad values for authenticating user %s" % (decoded_user,)) + return BadRequest()(environ, start_response) + except Exception: + log.exception("server.read_escrow_data failed for user %s brand %s" + % (decoded_user, brand_identifier,)) + return ServerError()(environ, start_response) + + if not authenticator(get_config(), decoded_user, plaintext_password): + log.info("Auth failed for %s" % (decoded_user,)) + return Forbidden()(environ, start_response) + + log.info("Auth OK for brand %s with user %s" % (brand_identifier, decoded_user, )) + return SuperSimple("OK")(environ, start_response) + + +@read_querydata +@read_postdata +def read_data(environ, start_response): + log = logging.getLogger("read_data") + + log.debug("start") + try: + brand_identifier = environ['query_data']['brand_id'][0] + escrowed_data = environ['post_data']['escrow_data'][0] + serial_sign_key = environ['post_data']['sign_key'][0] + except KeyError: + log.warn("KeyError at start") + return BadRequest()(environ, start_response) + + try: + layer_count = int(environ['post_data'].get('layer_count', [])[0]) + except IndexError: + layer_count = None + + sign_key = serial.loads(serial_sign_key) + log.debug("Being sent:") + log.debug("brand_identifier: %r" % brand_identifier) + log.debug("layer_count: %r" % layer_count) + + try: + if layer_count is None: + plaintext_data = server.read_escrow_data(brand_identifier, escrowed_data, sign_key=sign_key) + else: + plaintext_data = server.read_escrow_data(brand_identifier, escrowed_data, + layer_count=layer_count, + sign_key = sign_key) + except ValueError: + log.warn("ValueError at reading escrow data") + return BadRequest()(environ, start_response) + except KeyError: + log.warn("KeyError at reading escrow data") + return NotFound()(environ, start_response) + except Exception: + log.exception('500 error in reading escrow data') + return ServerError()(environ, start_response,) + + log.info("Read data for brand %s" % (brand_identifier,)) + return SuperSimple(plaintext_data, ctype="application/octet-stream")(environ, start_response) + +def app_factory(environ, start_response): + # rx, methods, app + urls = [ + (re.compile(r'/layers$'), ('GET', 'HEAD',), get_layers), + (re.compile(r'/auth$'), ('GET', 'HEAD',), authenticate_user), + (re.compile(r'/data$'), ('POST'), read_data), + ] + return router(urls)(environ, start_response) diff --git a/netkes/netkes_agent/config_mgr.py b/netkes/netkes_agent/config_mgr.py new file mode 100644 index 0000000..e831b69 --- /dev/null +++ b/netkes/netkes_agent/config_mgr.py @@ -0,0 +1,67 @@ +""" +config_mgr.py + +Provides an API to control the virtual machine's NetKES and directory agent configuration. +""" + +import json +import os +import os.path +import subprocess + +_SERVICE_NAME = 'openmanage' + +class ConfigManager(object): + """ + Provides an easy interface to get and set openmanage configuration + + Assumes you have r/w access to the configuration file, and ability to restart + the openmanage service. + """ + def __init__(self, filename): + """ + Constructor. Give it a filename, and it will pull configuration from that file. + + @see default_config for a great place to start looking for the configuration file. + """ + self._config_file = filename + + with open(self._config_file) as cf: + self.config = json.load(cf) + + def new_cfg(self, new_filename, want_file_read=False): + """ + Changes the config file we point at. + + If it exists, we can optionally read it + """ + self._config_file = new_filename + + if want_file_read: + with open(self._config_file) as cf: + self.config = json.load(cf) + + def apply_config(self): + """ + Saves the current configuration to the configuration file, and restarts services + to apply the new configuration. + """ + with open(self._config_file, 'w') as cf: + json.dump(self.config, cf) + + self._kick_services() + + def _kick_services(self): + command = "sv restart " + _SERVICE_NAME + subprocess.call(command, shell=True) + + +def default_config(): + """ + Provides a sane place where the configuration file is normally kept. + """ + conf_dir = os.environ.get('OPENMANAGE_CONFIGDIR', None) + if conf_dir is None: + return None + + return os.path.join(conf_dir, 'agent_config.json') diff --git a/netkes/netkes_agent/gunicorn.conf.py b/netkes/netkes_agent/gunicorn.conf.py new file mode 100644 index 0000000..e069934 --- /dev/null +++ b/netkes/netkes_agent/gunicorn.conf.py @@ -0,0 +1,3 @@ +import multiprocessing + +workers = multiprocessing.cpu_count() * 2 + 1 diff --git a/netkes/netkes_agent/test/__init__.py b/netkes/netkes_agent/test/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/netkes/netkes_agent/test/test.json b/netkes/netkes_agent/test/test.json new file mode 100644 index 0000000..2c734d3 --- /dev/null +++ b/netkes/netkes_agent/test/test.json @@ -0,0 +1,3 @@ +{ + "testkey_1": "testvalue_1_deadbeef" +} diff --git a/netkes/netkes_agent/test/test_config_mgr.py b/netkes/netkes_agent/test/test_config_mgr.py new file mode 100644 index 0000000..b932503 --- /dev/null +++ b/netkes/netkes_agent/test/test_config_mgr.py @@ -0,0 +1,39 @@ +import unittest + +from netkes_agent import config_mgr + +_config_test_file = "test.json" +_tmp_config_test = "tmptest.json" + +class TestReadConfiguration(unittest.TestCase): + def test_successful_read_withkey(self): + mgr = config_mgr.ConfigManager(_config_test_file) + + self.assertEqual(mgr.config['testkey_1'], + 'testvalue_1_deadbeef') + + def test_successful_read_nokey(self): + mgr = config_mgr.ConfigManager(_config_test_file) + + with self.assertRaises(KeyError): + throwaway = mgr.config['testkey_deadbeef'] + + def test_failed_read(self): + with self.assertRaises(IOError): + mgr = config_mgr.ConfigManager('DEADBEEF') + + +class TestSetConfiguration(unittest.TestCase): + def setUp(self): + self.mgr = config_mgr.ConfigManager(_config_test_file) + + def test_set_new_data(self): + self.mgr.config['test_newvalue'] = 'DEADBEEFERY' + + self.assertEqual(self.mgr.config['test_newvalue'], 'DEADBEEFERY') + + def test_set_apply_new_data(self): + self.mgr.config['test_newvalue'] = 'DEADBEEF_2' + +if __name__ == "__main__": + unittest.main() diff --git a/netkes/wsgi_util/__init__.py b/netkes/wsgi_util/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/netkes/wsgi_util/auth.py b/netkes/wsgi_util/auth.py new file mode 100644 index 0000000..89f1c9b --- /dev/null +++ b/netkes/wsgi_util/auth.py @@ -0,0 +1,8 @@ +import base64 + + +def parse_auth_header(header): + scheme, data = header.split(' ', 1) + if scheme != 'Basic': + raise ValueError('%s authentication scheme not supported.' % (scheme,)) + return base64.b64decode(data).split(':', 1) diff --git a/netkes/wsgi_util/cookie_util.py b/netkes/wsgi_util/cookie_util.py new file mode 100644 index 0000000..d3211f4 --- /dev/null +++ b/netkes/wsgi_util/cookie_util.py @@ -0,0 +1,14 @@ +import Cookie +from urllib import unquote + + +def read_cookie(app): + def read_cookie(environ, start_response): + try: + cookie = Cookie.SimpleCookie(environ.get('HTTP_COOKIE', '')) + except Cookie.CookieError: + pass + else: + environ['cookie_data'] = dict((k, v.value and unquote(v.value)) for k, v in cookie.items()) + return app(environ, start_response) + return read_cookie diff --git a/netkes/wsgi_util/http.py b/netkes/wsgi_util/http.py new file mode 100644 index 0000000..6b3e374 --- /dev/null +++ b/netkes/wsgi_util/http.py @@ -0,0 +1,108 @@ +import sys + +from wsgi_util import http_status + + +ee = ('%(status)s' + '

%(status)s

%(message)s

\r\n') + + +def SuperSimple(message, status=http_status.OK, + headers=(), ctype='text/plain', exc_info=()): + def app(environ, start_response): + start_response(status, + [('Content-type', ctype), + ('Content-length', str(len(message)))] + list(headers), + exc_info) + return [message] + return app + + +def Simple(message, status=http_status.OK, + headers=(), ctype='text/html', exc_info=()): + body = ee % dict(status=status, message=message) + return SuperSimple(body, status, headers, ctype, exc_info) + + +def BadRequest(extra_headers=()): + return Simple('Bad request.', http_status.BAD_REQUEST, list(extra_headers)) + + +def NotImplemented(extra_headers=()): + return Simple('Not implemented.', http_status.NOT_IMPLEMENTED, list(extra_headers)) + + +def ServerError(extra_headers=()): + return Simple('An internal server error has occurred. ' + 'Please try again later.', + http_status.SERVER_ERROR, + list(extra_headers), + exc_info=sys.exc_info()) + + +def NotFound(extra_headers=()): + return Simple('Not found.', http_status.NOT_FOUND, list(extra_headers)) + + +def Created(extra_headers=()): + return Simple('Created.', http_status.CREATED, list(extra_headers)) + + +def NotModified(extra_headers=()): + def app(environ, start_response): + start_response(http_status.NOT_MODIFIED, list(extra_headers)) + return [] + return app + + +def MovedPermanently(location, extra_headers=()): + return Simple('The requested resource has moved to ' + '%(location)s.' % locals(), + http_status.MOVED_PERMANENTLY, + [('Location', location)] + list(extra_headers)) + + +def SeeOther(location, extra_headers=()): + return Simple('The requested resource was found at ' + '%(location)s.' % locals(), + http_status.SEE_OTHER, + [('Location', location)] + list(extra_headers)) + + +def RangeNotSatisfiable(size, extra_headers=()): + return Simple('Requested range not satisfiable.', + http_status.RANGE_NOT_SATISFIABLE, + [('Content-range', '*/%d' % (size,))] + list(extra_headers)) + + +def HelloWorld(extra_headers=()): + return Simple('Hello World!', ctype='text/plain', headers=list(extra_headers)) + + +def Options(methods, extra_headers=()): + methods = ', '.join(methods) + return Simple('The requested resource supports the following methods: ' + + methods, headers=[('Allow', methods)] + list(extra_headers)) + + +def MethodNotAllowed(methods, extra_headers=()): + return Simple('Method not allowed.', + http_status.METHOD_NOT_ALLOWED, + [('Allow', ', '.join(methods))] + list(extra_headers)) + + +def Forbidden(extra_headers=()): + return Simple('Forbidden.', + http_status.FORBIDDEN, + list(extra_headers)) + + +def Unauthorized(challenge, extra_headers=()): + return Simple('Unauthorized.', + http_status.UNAUTHORIZED, + [('WWW-Authenticate', challenge)] + list(extra_headers)) + +def Teapot(extra_headers=()): + return Simple("I'm a teapot.", + http_status.IM_A_TEAPOT, + list(extra_headers)) diff --git a/netkes/wsgi_util/http_status.py b/netkes/wsgi_util/http_status.py new file mode 100644 index 0000000..5064dad --- /dev/null +++ b/netkes/wsgi_util/http_status.py @@ -0,0 +1,57 @@ +def messages(): + status = dict( + CONTINUE = (100, "Continue"), + SWITCHING_PROTOCOLS = (101, "Switching Protocols"), + + OK = (200, "OK"), + CREATED = (201, "Created"), + ACCEPTED = (202, "Accepted"), + NON_AUTHORITATIVE = (203, "Non-Authoritative Information"), + NO_CONTENT = (204, "No Content"), + RESET_CONTENT = (205, "Reset Content"), + PARTIAL_CONTENT = (206, "Partial Content"), + + MULTIPLE_CHOICES = (300, "Multiple Choices"), + MOVED_PERMANENTLY = (301, "Moved Permanently"), + FOUND = (302, "Found"), + SEE_OTHER = (303, "See Other"), + NOT_MODIFIED = (304, "Not Modified"), + USE_PROXY = (305, "Use Proxy"), + TEMP_REDIRECT = (307, "Temporary Redirect"), + + BAD_REQUEST = (400, "Bad Request"), + UNAUTHORIZED = (401, "Unauthorized"), + PAYMENT_REQUIRED = (402, "Payment Required"), + FORBIDDEN = (403, "Forbidden"), + NOT_FOUND = (404, "Not Found"), + METHOD_NOT_ALLOWED = (405, "Method Not Allowed"), + NOT_ACCEPTABLE = (406, "Not Acceptable"), + PROXY_AUTH_REQUIRED = (407, "Proxy Authentication Required"), + REQUEST_TIME_OUT = (408, "Request Time-out"), + CONFLICT = (409, "Conflict"), + GONE = (410, "Gone"), + LENGTH_REQUIRED = (411, "Length Required"), + PRECONDITION_FAILED = (412, "Precondition Failed"), + ENTITY_TOO_LARGE = (413, "Request Entity Too Large"), + URI_TOO_LARGE = (414, "Request-URI Too Large"), + UNSUPPORTED_MEDIA_TYPE = (415, "Unsupported Media Type"), + RANGE_NOT_SATISFIABLE = (416, "Requested Range Not Satisfiable"), + EXPECTATION_FAILED = (417, "Expectation Failed"), + IM_A_TEAPOT = (418, "I am a teapot"), + + SERVER_ERROR = (500, "Internal Server Error"), + NOT_IMPLEMENTED = (501, "Not Implemented"), + BAD_GATEWAY = (502, "Bad Gateway"), + SERVICE_UNAVAILABLE = (503, "Service Unavailable"), + GATEWAY_TIME_OUT = (504, "Gateway Time-out"), + VERSION_NOT_SUPPORTED = (505, "HTTP Version Not Supported"), + ) + + messages = dict(status.itervalues()) + status = dict((k, '%d %s' % v) for k, v in status.iteritems()) + status['messages'] = messages + return status + +messages = messages() +__all__ = list(messages.keys()) +locals().update(messages) diff --git a/netkes/wsgi_util/json_util.py b/netkes/wsgi_util/json_util.py new file mode 100644 index 0000000..c9fe700 --- /dev/null +++ b/netkes/wsgi_util/json_util.py @@ -0,0 +1,30 @@ +import json +from urlparse import parse_qs + +from wsgi_util import http_status + + +def dump_json(data, environ, start_response): + try: + callback = parse_qs(environ['QUERY_STRING'])['callback'][0] + except (TypeError, ValueError, IndexError, KeyError): + data = json.dumps(data) + else: + data = '%s(%s)' % (callback, json.dumps(data)) + start_response(http_status.OK, [('Content-type', 'application/javascript'), + ('Content-length', str(len(data)))]) + return [data] + + +dump_jsonp = dump_json + + +def read_json(app): + def read_json(environ, start_response): + data = ''.join(environ['wsgi.input']) + try: + environ['json_data'] = json.loads(data) + except ValueError: + pass + return app(environ, start_response) + return read_json diff --git a/netkes/wsgi_util/post_util.py b/netkes/wsgi_util/post_util.py new file mode 100644 index 0000000..d35c66b --- /dev/null +++ b/netkes/wsgi_util/post_util.py @@ -0,0 +1,16 @@ +from urlparse import parse_qs + + +def read_postdata(app): + def read_postdata(environ, start_response): + data = ''.join(environ['wsgi.input']) + environ['post_data'] = parse_qs(data) + return app(environ, start_response) + return read_postdata + + +def read_querydata(app): + def read_querydata(environ, start_response): + environ['query_data'] = parse_qs(environ['QUERY_STRING']) + return app(environ, start_response) + return read_querydata diff --git a/netkes/wsgi_util/router.py b/netkes/wsgi_util/router.py new file mode 100644 index 0000000..573286b --- /dev/null +++ b/netkes/wsgi_util/router.py @@ -0,0 +1,27 @@ +'''A wsgi middleware that dispatches requests.''' + +from wsgi_util import http + + +class router(object): + def __init__(self, routes=()): + self.routes = routes + + def __call__(self, environ, start_response): + if environ['REQUEST_METHOD'] == 'OPTIONS' and environ['PATH_INFO'] == '*': + return http.HelloWorld()(environ, start_response) + for rx, methods, application in self.routes: + m = rx.match(environ['PATH_INFO']) + if m is None: + continue + if methods and environ['REQUEST_METHOD'] not in methods: + if environ['REQUEST_METHOD'] == 'OPTIONS': + return http.Options(methods)(environ, start_response) + return http.MethodNotAllowed(methods)(environ, start_response) + environ['SCRIPT_NAME'] += m.group(0) + environ['PATH_INFO'] = environ['PATH_INFO'][m.end():] + environ['router.args'] = m.groups() + environ['router.kwargs'] = m.groupdict() + return application(environ, start_response) + else: + return http.NotFound()(environ, start_response) diff --git a/sql/2012-12-11_blue_1_2.sql b/sql/2012-12-11_blue_1_2.sql new file mode 100644 index 0000000..ce201b4 --- /dev/null +++ b/sql/2012-12-11_blue_1_2.sql @@ -0,0 +1,32 @@ +begin; + +create table admin_setup_tokens ( + token varchar(40) primary key, + date_created timestamp not null default current_timestamp, + expiry timestamp not null default current_timestamp + '3 days'::interval, + no_devices_only bool not null default TRUE, + single_use_only bool not null default TRUE +); + +create table admin_token_avatar_use ( + token varchar(40) not null references admin_setup_tokens, + avatar_id int4 not null references users (avatar_id), + timestamp timestamp not null default current_timestamp +); + +create or replace view admin_setup_tokens_use as + select token, date_created, expiry, no_devices_only, single_use_only, + exists(select * from admin_token_avatar_use au where au.token=a.token) as used, + case + when single_use_only and exists(select * from admin_token_avatar_use au where au.token=a.token) then false + when expiry < now() then false + else true + end as active + from admin_setup_tokens a; + +grant select on admin_setup_tokens_use to admin_console; +grant select, insert, update on admin_setup_tokens to admin_console; +grant select, update on admin_setup_tokens to directory_agent; +grant select, insert on admin_token_avatar_use to directory_agent; + +commit; diff --git a/sql/base_schema.sql b/sql/base_schema.sql new file mode 100644 index 0000000..bdd5ddb --- /dev/null +++ b/sql/base_schema.sql @@ -0,0 +1,50 @@ +BEGIN; + +CREATE USER directory_agent WITH PASSWORD 'initial'; +CREATE USER admin_console WITH PASSWORD 'iexyjtso'; + +CREATE TABLE passwords ( + email varchar(64) primary key, + pw_hash varchar(128) +); +GRANT SELECT, UPDATE, INSERT, DELETE ON passwords TO directory_agent; + +CREATE TABLE users ( + uniqueid text primary key, + email varchar(64) unique not null, + avatar_id int4 unique not null, + givenname varchar(64) not null, + surname varchar(64) not null, + group_id int4 not null, + enabled boolean not null default true +); +GRANT SELECT, UPDATE, INSERT, DELETE ON users TO directory_agent; + +CREATE INDEX users_idx ON users (uniqueid, email, avatar_id); + +CREATE LANGUAGE plpgsql; + +-- the following is from: +-- http://www.postgresql.org/docs/current/static/plpgsql-control-structures.html#PLPGSQL-UPSERT-EXAMPLE +CREATE FUNCTION upsert_password(user_email VARCHAR(64), pw VARCHAR(128)) RETURNS VOID AS +$$ +BEGIN + LOOP + UPDATE passwords SET pw_hash = pw WHERE user_email = email; + IF found THEN + RETURN; + END IF; + + -- If we didn't update any rows, try now to insert a row. + BEGIN + INSERT INTO passwords VALUES (user_email, pw); + RETURN; + EXCEPTION WHEN unique_violation THEN + -- do nothing, and try to loop over again to update. + END; + END LOOP; +END; +$$ +LANGUAGE plpgsql; + +COMMIT; diff --git a/upgrade/2012-12-11_blue1-2.sh b/upgrade/2012-12-11_blue1-2.sh new file mode 100755 index 0000000..03488a8 --- /dev/null +++ b/upgrade/2012-12-11_blue1-2.sh @@ -0,0 +1,19 @@ +cd /opt/openmanage +rm -rf netkes +git clone https://spideroak.com/dist/net_kes.git +ln -s /opt/openmanage/net_kes/netkes/ /opt/openmanage/netkes + +rm -rf django/omva +ln -s /opt/openmanage/net_kes/django/omva /opt/openmanage/django/omva + +cd django/apps/blue_management +sudo git pull + +cd ../so_common/ +sudo git pull + +sudo -u postgres psql -d openmanage -a -f /opt/openmanage/net_kes/sql/2012-12-11_blue_1_2.sql + +sudo sv restart /etc/service/admin_console/ +sudo sv restart /etc/service/openmanage/ +