diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..0b4e7eb --- /dev/null +++ b/.gitignore @@ -0,0 +1,2 @@ +tests/build +tests/.vagrant diff --git a/CHANGES.md b/CHANGES.md index c1eb0b4..e274913 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,2 +1,13 @@ +Version 2.1: + - Change default Docker version to 1.11.2, was 1.5.0 + - Attempt to handle data volume loss when we update from pre-1.10 to post-1.10. + See: https://github.com/docker/docker/issues/20079 + - Attempt to handle missing volume symlinks when we upgrade pre-1.9 data volumes + to a post-1.9.x version. + - Add "docker_attempt_upgrade_fixes" configuration variable. This defaults to False; + the upgrade fixes mentioned above won't be attempted unless it is set to True. + - Added automated tests in the "tests" directory - if you have Vagrant installed, they + can be run by "cd tests ; ./run_tests.sh". + Version 2.0: - Port to github diff --git a/README.md b/README.md index 871ea1c..3535a7b 100644 --- a/README.md +++ b/README.md @@ -10,32 +10,47 @@ to manage which version of Docker we install. Because of interdepencies between version and software that we install (especially `docker-py`), we have to be careful about Docker versions. - - Our initial deploys used a fixed version of LXC Docker, pinned at `1.5.0` - - Our newer deploy will still use a pinned version Docker Engine, but will be able to specify + - Our initial deploys used a fixed version of LXC Docker, pinned at `1.5.0` + - Our newer deploy will still use a pinned version Docker Engine, but will be able to specify their own versions (e.g. `1.8.1`) +Upgrade Support +--------------- -Role Variables --------------- +Some Docker Engine upgrade paths have known issues. There's code in this role that attempts to +resolve those issues, with minimum disruption, if those upgrade paths are encountered. The +intention is to not require containers to be recreated. -The `docker_version` variable controls what version of Docker is installed. +This code isn't intended to catch everything; an attempt has been made to make it reasonable and +non-harmful, but it hasn't been tested for all possible upgrade paths, nor with features like +non-local storage drivers. With that in mind, this behavior is optional and is disabled by default. - - The default `docker_version` is `1.5.0` (for historical reasons). If select, LXC Docker will be used. - - Otherwise, the stated version of Docker Engine will be used (if available). +The issues we attempt to resolve are documented in the "repair_docker_data_volumes" module. -Testing -------- +Role Variables +-------------- -There are known incompatibilities between some Docker versions and some versions of `docker-py`, -especially between LXC Docker `1.5.0` and `docker-py>1.1.0`. Using this combination will -result in Python errors along the lines of: + - `docker_version` : this variable controls the version of Docker that is installed. Required. + If version `1.5.0` is selected, LXC Docker will be used; otherwise the stated version of + Docker Engine will be installed (if available). + - `docker_attempt_upgrade_fixes` : False by default. If True, the fixes described in "Upgrade + Support" will be attempted + - `cgroup_lite_pkg_state` : When installing on an Ubuntu 13.10 host, the role will install the + `cgroup-lite` package to provide the required cgroups support. This variable can be set to + `latest` - the default - or to `present`. In the former case, the package will be updated, if + necessary, when the role is run. In the latter, the package will only be added if it is not + present. + - `kernel_pkg_state` : For 13.04+, this role will install a `linux-image-extra-` + package. This parameter works the same way as `cgroup_lite_package_state`, except controlling + this package. - client and server don't have same version (client : 1.19, server: 1.17) -Fortunately, newer versions of Docker Engine with both newer and older versions of `docker-py` -appear to be compatible. A `testing.yml` playbook is provided for reference. +Testing +------- +There's a directory "tests" with some Ansible playbooks that can be used for verifying role +behavior. See tests/TESTS.md for more information. License ------- diff --git a/defaults/main.yml b/defaults/main.yml index 6bd5926..0447cdc 100644 --- a/defaults/main.yml +++ b/defaults/main.yml @@ -1,5 +1,9 @@ --- -docker_version: 1.5.0 + kernel_pkg_state: latest cgroup_lite_pkg_state: latest ssh_port: 22 + +docker_role_apt_cache_valid_time: 7200 + +docker_attempt_upgrade_fixes: False \ No newline at end of file diff --git a/handlers/main.yml b/handlers/main.yml index 3a6c32e..cd21505 100644 --- a/handlers/main.yml +++ b/handlers/main.yml @@ -1,2 +1,2 @@ --- -# handlers file for docker.ubuntu + diff --git a/library/collect_container_configs b/library/collect_container_configs new file mode 100644 index 0000000..d03ebc4 --- /dev/null +++ b/library/collect_container_configs @@ -0,0 +1,67 @@ +#!/usr/bin/env python2.7 + +import json +import os +import os.path + +DOCUMENTATION = ''' +--- +module: collect_container_configs +short_description: collects container configuration data +description: + - when run, this module collects the contents of container configuration files for + all current containers + - this can be run before an upgrade to store information that would be used after + an upgrade + - this is returned as a single string containing parsable JSON. +''' + + +CONTAINER_ROOT_DIR = '/var/lib/docker/containers' + + +CONFIG_FILENAME_OPTIONS = [ + ('config.v3.json', 3), + ('config.v2.json', 2), + ('config.json', 1) +] + + +def main(): + module = AnsibleModule( + argument_spec=dict(), + supports_check_mode=True) + + configs = {} + config_versions = {} + + if os.path.isdir(CONTAINER_ROOT_DIR): + for container in os.listdir(CONTAINER_ROOT_DIR): + container_path = os.path.join(CONTAINER_ROOT_DIR, container) + if not os.path.isdir(container_path): + # we only expect directories here. Ignore the unexpected + continue + + for basename, version in CONFIG_FILENAME_OPTIONS: + config_path = os.path.join(container_path, basename) + if os.path.isfile(config_path): + break + else: + # no config file was found + continue + + with file(config_path, 'r') as f: + config = json.load(f) + + configs[container] = config + config_versions[container] = version + + module.exit_json( + changed=False, + configs=configs, + config_versions=config_versions) + + +from ansible.module_utils.basic import * +if __name__ == '__main__': + main() diff --git a/library/repair_docker_data_volumes b/library/repair_docker_data_volumes new file mode 100644 index 0000000..8f77d1b --- /dev/null +++ b/library/repair_docker_data_volumes @@ -0,0 +1,359 @@ +#!/usr/bin/env python2.7 + +import json +import os +import os.path +import re +import subprocess + + +DOCUMENTATION = ''' +--- +module: repair_docker_data_volumes +short_description: fixes issues with docker data volumes that occur during upgrades. +description: + - This module fixes issues with data volumes that might occur when updating Docker. + - It will stop the docker daemon once it decides what to do. + - It handles the following issues: + - 1) when updating to docker 1.9.x, if there is a local volume with its data in + '/var/lib/docker/vfs/dir/', Docker will create an entry in '/var/lib/docker/volumes' + for that volume, with a config file in '/var/lib/docker/volumes//config.json' + pointing to data at '/var/lib/docker/volumes//_data', and it will add a symlink at + the '_data' path pointing to the old data directory, so that the volume stays functional. + However, if you update from a pre-1.9 version to 1.10 or greater, the config file is still + created, it still points to the '_data' path, but the symlink is not created and the volume + is nonfunctional. + - The module fixes the issue by creating the symlink. + - 2) Before 1.10, the volumes were represented in container config in a "Volumes" section, + and after 1.10 they are represented by a "Mounts" section. Upgrading Docker doesn't + translate from one to the other, so any container that has an unmapped volume mounted + before the upgrade will be broken after the upgrade. + - Issue 2 is officialy documented at https://github.com/docker/docker/issues/20079 + - The module fixes the issue by updating the config file in + '/var/lib/docker/containers//config.v2.json'. + - There are two cases handled. The primary case is where the container owned the + volumes. In that case, after the upgrade the "Volumes" and "VolumesRW" keys are still + present in the upgraded data file, and that information can be used directly. + - The secondary case is where the container did not own the volumes, but was pulling them + in from another container using "--volumes-from". In this case, the "Volumes" and + "VolumesRW" keys appear to be removed during the update process, as well as the + "AppliedVolumesFrom" key. In order to address this case, we rely on configuration data + collected before the package update (by the collect_container_configs module). +requirements: + - This module should be run after the docker package upgrade. + - If changes happen, the docker daemon should be started again. + - i.e. run this with "notify: restart docker" +options: + fix_vfs_links: + description: + - should we fix the VFS links (issue 1)? + required: true + fix_container_mountpoints: + description: + - should we fix the container mountpoints (issue 2)? + required: true + old_container_configs: + description: + - config data from containers, before the package update + required: true +''' + +CONTAINER_ROOT_DIR = '/var/lib/docker/containers' +CONFIG_V2_FILENAME = 'config.v2.json' +CONFIG_V3_FILENAME = 'config.v3.json' +BACKUP_FILENAME = 'config.v2.json.pre_volume_fix' +VFS_BASE_DIR = '/var/lib/docker/vfs/dir' +VOLUMES_ROOT_DIR = '/var/lib/docker/volumes' +LOCAL_DRIVER = 'local' + +VFS_MOUNT_REGEX = re.compile(VFS_BASE_DIR + "/([a-f0-9]+)$") +VOLUME_MOUNT_REGEX = re.compile(VOLUMES_ROOT_DIR + "/([a-f0-9]+)/_data") + + +class Volume(object): + def __init__(self, driver, name): + self.driver = driver + self.name = name + + def inspect(self, module): + try: + inspect_output = subprocess.check_output( + ['docker', 'volume', 'inspect', self.name], + stderr=subprocess.STDOUT) + return json.loads(inspect_output)[0] + except subprocess.CalledProcessError, c: + module.fail_json( + msg="failure while inspecting volume {}".format(self.name), + exception=str(c)) + + @property + def is_local(self): + return self.driver == LOCAL_DRIVER + + @property + def vfs_path(self): + return os.path.join(VFS_BASE_DIR, self.name) + + +def _volumes(module): + try: + volumes_output = subprocess.check_output( + ['docker', 'volume', 'ls'], + stderr=subprocess.STDOUT) + + return [ + Volume(driver=line.split()[0], name=line.split()[1]) + for line in volumes_output.split('\n')[1:] if line] + + except subprocess.CalledProcessError, c: + module.fail_json( + msg="failure while listing volumes", + exception=str(c)) + + +class CreateSymlinkOperation(object): + def __init__(self, source_path, link_path): + self.source_path = source_path + self.link_path = link_path + + def run(self, module): + os.symlink(self.source_path, self.link_path) + + def desc(self): + return "creating symlink from {} to {}".format(self.source_path, self.link_path) + + +def fix_volume_mount_missing_symlinks(module): + for volume in _volumes(module): + if not volume.is_local: + # don't deal with volumes that don't use the "local" driver. + continue + + inspect_data = volume.inspect(module) + if os.path.exists(inspect_data['Mountpoint']): + # mount point for this volume is non-broken + continue + + if not os.path.exists(volume.vfs_path): + # mount data isn't where we expect it to be + continue + + yield CreateSymlinkOperation(volume.vfs_path, inspect_data['Mountpoint']) + + +class RewriteConfigOperation(object): + def __init__(self, path, backup_path, updated_content): + self.path = path + self.backup_path = backup_path + self.updated_content = updated_content + + def run(self, module): + old_stat = os.stat(self.path) + os.rename(self.path, self.backup_path) + with open(self.path, 'w') as f: + f.write(self.updated_content) + os.chmod(self.path, old_stat.st_mode) + os.chown(self.path, old_stat.st_uid, old_stat.st_gid) + + def desc(self): + return "updating config file {}".format(self.path) + + +def fix_container_configs(module, old_container_configs, skipped_container_reasons): + + for container in os.listdir(CONTAINER_ROOT_DIR): + container_path = os.path.join(CONTAINER_ROOT_DIR, container) + if not os.path.isdir(container_path): + # we only expect directories here. Ignore the unexpected + continue + + # verify that there's a v2 config file there + config_v2_path = os.path.join(container_path, CONFIG_V2_FILENAME) + if not os.path.isfile(config_v2_path): + skipped_container_reasons.append( + 'skipped {} because v2 path does not exist'.format(container)) + continue + + # Future Docker updates may continue to change this file. If the Docker team does the + # same thing they did last time, they'll add a "config.v3.json" file and leave a + # now-non-functional 'config.v2.json' file in place. If so, skip this file. + config_v3_path = os.path.join(container_path, CONFIG_V3_FILENAME) + if os.path.isfile(config_v3_path): + skipped_container_reasons.append('skipped {} because v3 path exists'.format(container)) + continue + + # find pre-update config + old_config = old_container_configs.get(container) + + # now read config JSON + with file(config_v2_path, 'r') as f: + config = json.load(f) + + # What we're looking for is images that have a config section like this: + # + # "Volumes": { + # "/opt/some/volume": "/var/lib/docker/vfs/dir/33ff....e78b", + # "/opt/mapped/volume": "/opt/mapped/volume/outside" + # }, + # "VolumesRW": { + # "/opt/some/volume": true, + # "/opt/mapped/volume": false + # } + # + # and don't have a "MountPoints" key. + if 'MountPoints' in config and config['MountPoints']: + # there is already a "MountPoints" section, so ignore this container + skipped_container_reasons.append( + 'skipped {} because MountPoints exists'.format(container)) + continue + + if 'Volumes' in config: + # there is a "Volumes" key in the current config, migrate that data + volumes = config['Volumes'] + volumes_rw = config['VolumesRW'] + elif 'Volumes' in old_config: + # there is no volume data in the current config, but it's there in the old config. + # We see this in containers that use "--volumes-from". + # Use the old data + volumes = old_config['Volumes'] + volumes_rw = old_config['VolumesRW'] + else: + # there are no volumes on this container, so there's nothing to do + skipped_container_reasons.append( + 'skipped {} because Volumes does not exist'.format(container)) + continue + + # This config should turn into: + # + # "MountPoints": { + # "/opt/some/volume": { + # "Destination": "/opt/some/volume", + # "Driver": "local", + # "Name": "33ff....e78b", + # "Named": false, + # "Propagation": "", + # "RW": true, + # "Relabel": "", + # "Source": "" + # } + # "/opt/mapped/volume": { + # "Destination": "/opt/mapped/volume", + # "Driver": "", + # "Name": "", + # "Named": false, + # "Propagation": "rprivate", + # "RW": false, + # "Relabel": "", + # "Source": "/opt/mapped/volume/outside" + # } + # }, + # + mountpoints = {} + for internal_path, external_path in volumes.iteritems(): + rw = volumes_rw[internal_path] + + match_result = (VFS_MOUNT_REGEX.match(external_path) + or VOLUME_MOUNT_REGEX.match(external_path)) + if match_result: + # unmapped volume + volume_id = match_result.group(1) + mountpoints[internal_path] = { + 'Destination': internal_path, + 'Driver': 'local', + 'Name': volume_id, + 'Named': False, + 'Propagation': '', + 'RW': rw, + 'Relabel': '', + 'Source': '' + } + else: + # mapped volume + mountpoints[internal_path] = { + 'Destination': internal_path, + 'Driver': '', + 'Name': '', + 'Named': False, + 'Propagation': 'rprivate', + 'RW': rw, + 'Relabel': '', + 'Source': external_path + } + config['MountPoints'] = mountpoints + + backup_path = os.path.join(container_path, BACKUP_FILENAME) + yield RewriteConfigOperation(config_v2_path, backup_path, json.dumps(config)) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + fix_vfs_links=dict(required=True, type='bool'), + fix_container_mountpoints=dict(required=True, type='bool'), + old_container_configs=dict(required=True)), + supports_check_mode=True) + + # We need to figure out what to do before doing it. + # That's because, when we execute our operations, we shut down the docker daemon first - + # but figuring out what to do, in some cases, requires us to query the docker daemon. + + old_container_configs = module.params['old_container_configs'] + + if isinstance(old_container_configs, basestring): + with file('/var/tmp/config_after.json', 'w') as f: + f.write(old_container_configs) + try: + old_container_configs = json.loads(old_container_configs) + except ValueError, v: + module.fail_json( + msg="failed parsing JSON config", + changed=False, + exception=str(v), + old_container_configs=old_container_configs, + tasks_performed=[]) + + operations = [] + if module.params['fix_vfs_links']: + operations.extend(fix_volume_mount_missing_symlinks(module)) + + skipped_container_reasons = [] + if module.params['fix_container_mountpoints']: + operations.extend(fix_container_configs(module, + old_container_configs, + skipped_container_reasons)) + + if not operations: + module.exit_json(changed=False, tasks_performed=[]) + + if not module.check_mode: + try: + subprocess.check_call(['service', 'docker', 'stop']) + except subprocess.CalledProcessError, c: + module.fail_json( + msg="failure while stopping docker daemon", + exception=str(c)) + + tasks_performed = [] + for operation in operations: + if not module.check_mode: + try: + operation.run(module) + except Exception, e: + module.fail_json( + changed=bool(tasks_performed), + msg=("failed during: " + operation.desc()), + exception=str(e), + tasks_performed=tasks_performed) + + tasks_performed.append(operation.desc()) + + module.exit_json( + changed=True, + tasks_performed=tasks_performed, + skipped_container_reasons=skipped_container_reasons, + old_container_configs=old_container_configs) + + +from ansible.module_utils.basic import * +if __name__ == '__main__': + main() diff --git a/tasks/docker-engine.yml b/tasks/docker-engine.yml index 9960936..33d32fc 100644 --- a/tasks/docker-engine.yml +++ b/tasks/docker-engine.yml @@ -19,19 +19,61 @@ repo: 'deb https://apt.dockerproject.org/repo ubuntu-{{ ansible_distribution_release }} main' update_cache: yes +- name: see if docker is already installed + stat: path=/usr/bin/docker get_checksum=False + register: docker_exec_stat_result + always_run: True -# While wildcarding the version is cute, in practice, there only seems to be one -# version of Docker available at a time. I'm not sure what will happen on the next -# release. Alternatively, we could install LXC Docker, which does have many more -# versions, but not the most recent versions. +- name: Determine installed version + command: docker --version + changed_when: False + always_run: True + register: docker_version_result + when: "{{ docker_exec_stat_result.stat.exists }}" +# "docker --version" output is formatted like this: +# +# Docker version 1.10.2, build c3959b1 +# +- name: store installed docker version + set_fact: + installed_docker_version: "{{ docker_version_result.stdout + | regex_replace('^Docker version ', '') + | regex_replace(', build [0-9a-f]+$', '') }}" + when: "{{ docker_exec_stat_result.stat.exists }}" + +- name: collect container data from before upgrade + collect_container_configs: + register: collect_container_configs_result + when: "{{ docker_exec_stat_result.stat.exists + and docker_attempt_upgrade_fixes }}" + - name: Install Docker Engine apt: name="docker-engine={{ docker_version }}-0~{{ ansible_distribution_release }}" state=present register: docker_engine_install -# If this was the first time Docker was installed, when we gathered facts about -# the system, we would not have seen the "ansible_docker0" interface, as it wasn't -# present at the time. Now it is, so re-read facts. +# Some updates will break data volumes. There are two issues: +# - if upgrading from <1.9 to 1.10+, then fix the vfs links. If we're upgrading to a 1.9.x client, +# the Docker upgrade fixes that automatically. +# - if upgrading from <1.10 to 1.10+, then fix the container configs for containers with volumes. +- name: handle repair of data volumes broken by updates + repair_docker_data_volumes: + fix_vfs_links: "{{ installed_docker_version | version_compare('1.9', '<') + and docker_version | version_compare('1.10', '>=') }}" + fix_container_mountpoints: "{{ installed_docker_version | version_compare('1.10', '<') + and docker_version | version_compare('1.10', '>=') }}" + old_container_configs: "{{ collect_container_configs_result.configs | to_json }}" + when: "{{ docker_engine_install | changed + and docker_exec_stat_result.stat.exists + and docker_attempt_upgrade_fixes }}" + register: repair_docker_data_volumes_result + +- name: start docker after repair of docker settings + service: name=docker state=started + when: "{{ repair_docker_data_volumes_result is defined + and repair_docker_data_volumes_result | changed }}" + +# If we just installed or upgraded Docker, re-read Docker-related facts. - name: reread facts about ansible_docker0 setup: filter=ansible_docker0 when: docker_engine_install|changed diff --git a/tasks/main.yml b/tasks/main.yml index b651fe7..b2f063f 100644 --- a/tasks/main.yml +++ b/tasks/main.yml @@ -1,16 +1,22 @@ --- # tasks file for docker.ubuntu + +# These are the version of Ubuntu for which this role has been tested. - name: Fail if not a new release of Ubuntu fail: msg="{{ ansible_distribution_version }} is not an acceptable version of Ubuntu for this role" when: "ansible_distribution_version not in ['12.04', '13.04', '13.10', '14.04', '16.04']" +- name: check that docker_version is set + fail: msg="Required variable \"docker_version\" is not defined." + when: "{{ docker_version is not defined }}" + # https://docs.docker.com/installation/ubuntulinux/ - name: Install trusty kernel onto 12.04 apt: pkg: "{{ item }}" state: latest update_cache: yes - cache_valid_time: 600 + cache_valid_time: "{{ docker_role_apt_cache_valid_time }}" with_items: - linux-image-generic-lts-trusty - linux-headers-generic-lts-trusty @@ -22,7 +28,7 @@ pkg: "linux-image-extra-{{ ansible_kernel }}" state: "{{ kernel_pkg_state }}" update_cache: yes - cache_valid_time: 600 + cache_valid_time: "{{ docker_role_apt_cache_valid_time }}" when: "ansible_distribution_version != '12.04'" # Fix for https://github.com/dotcloud/docker/issues/4568 @@ -31,7 +37,7 @@ pkg: cgroup-lite state: "{{ cgroup_lite_pkg_state }}" update_cache: yes - cache_valid_time: 600 + cache_valid_time: "{{ docker_role_apt_cache_valid_time }}" register: cgroup_lite_result when: "ansible_distribution_version == '13.10'" @@ -72,6 +78,9 @@ register: ufw_default_exists - name: Change ufw default forward policy from drop to accept - lineinfile: dest=/etc/default/ufw regexp="^DEFAULT_FORWARD_POLICY=" line="DEFAULT_FORWARD_POLICY=\"ACCEPT\"" + lineinfile: + dest: /etc/default/ufw + regexp: "^DEFAULT_FORWARD_POLICY=" + line: "DEFAULT_FORWARD_POLICY=\"ACCEPT\"" when: ufw_default_exists.stat.exists diff --git a/testing.yml b/testing.yml deleted file mode 100644 index 0815676..0000000 --- a/testing.yml +++ /dev/null @@ -1,24 +0,0 @@ ---- -# Test docker-py compatibility - -- name: install python - hosts: all - tasks: - - apt: pkg={{ item }} state=present - with_items: - - python - - python-dev - - python-setuptools - - easy_install: name=pip - -- name: Test old docker-py - hosts: all - tasks: - - pip: name=docker-py version=1.1.0 - - command: python -c "from docker import Client; client=Client(base_url='unix://var/run/docker.sock'); print client.info()" - -- name: Install new docker-py - hosts: all - tasks: - - pip: name=docker-py state=latest - - command: python -c "from docker import Client; client=Client(base_url='unix://var/run/docker.sock'); print client.info()" diff --git a/tests/TESTS.md b/tests/TESTS.md new file mode 100644 index 0000000..026c229 --- /dev/null +++ b/tests/TESTS.md @@ -0,0 +1,21 @@ +# Tests + +Here are some playbooks that support different tests, and some code to support running +them in Vagrant. + +## Running tests +The tests are written as Ansible playbooks that operate on `hosts: all`. The tests don't +assume much about the machine they're operating on, but it should be a clean machine if +possible. + +You could use just the playbooks, but there's also a provided Vagrantfile, ansible +inventory, and test runner shell script. + +To run all tests: + + ./run_tests.sh + +To run a specific test: + + TESTS=upgrade ./run_tests.sh + diff --git a/tests/Vagrantfile b/tests/Vagrantfile new file mode 100644 index 0000000..03ad8c1 --- /dev/null +++ b/tests/Vagrantfile @@ -0,0 +1,17 @@ +VM_DEFAULT_RAM = 1024 +VM_DEFAULT_CPUS = 1 + +Vagrant.configure(2) do |config| + + config.vm.box = "bento/ubuntu-14.04" + config.vm.network "private_network", type: "dhcp" + config.vm.provider :virtualbox do |vb| + vb.memory = 1024 + vb.cpus = 1 + vb.customize ["modifyvm", :id, "--natdnshostresolver1", "on"] + end + + config.vm.define "vagrant-docker-test" do |node_config| + node_config.vm.hostname = "vagrant-docker-test" + end +end \ No newline at end of file diff --git a/tests/ansible.cfg b/tests/ansible.cfg new file mode 100644 index 0000000..357db31 --- /dev/null +++ b/tests/ansible.cfg @@ -0,0 +1,6 @@ +[defaults] +host_key_checking=False + +[privilege_escalation] +become=True +become-user=root diff --git a/tests/busybox.tar b/tests/busybox.tar new file mode 100644 index 0000000..0cfc115 Binary files /dev/null and b/tests/busybox.tar differ diff --git a/tests/new_install.yml b/tests/new_install.yml new file mode 100644 index 0000000..0dfe31c --- /dev/null +++ b/tests/new_install.yml @@ -0,0 +1,25 @@ +--- + +- hosts: all + vars: + docker_version: 1.12.1 + roles: + # install recent version of Docker + - role: docker + tasks: + - name: verify that Docker is running and accessible + command: docker ps + - name: get docker client version + # The command we want to run has "{{" in it, but we don't want Ansible to try to template + # it. We use a shell variable to store an open brace, so there's never an Ansible + # variable - before or after the operation - that has '{{' in it. + shell: 'OB="{" && docker version -f "$OB$OB .Client.Version }}"' + register: docker_client_version_result + - name: get docker client version + shell: 'OB="{" && docker version -f "$OB$OB .Server.Version }}"' + register: docker_server_version_result + - name: check version results + assert: + that: + - "{{ docker_client_version_result.stdout == docker_version }}" + - "{{ docker_server_version_result.stdout == docker_version }}" diff --git a/tests/no_upgrade_fixes_part1.yml b/tests/no_upgrade_fixes_part1.yml new file mode 100644 index 0000000..ef25c99 --- /dev/null +++ b/tests/no_upgrade_fixes_part1.yml @@ -0,0 +1,20 @@ +--- + +# playbook #1 for a test that runs an install without any upgrade fixes. +- hosts: all + roles: + - role: docker + docker_version: 1.5.0 + tasks: + - name: copy busybox tar to destination + copy: src=busybox.tar dest=/var/tmp/busybox.tar + - name: import busybox tar + command: docker load -i /var/tmp/busybox.tar + - name: create a container with mapped and unmapped volume + command: docker create --name volume-owner --volume /opt/some/volume --volume /etc/alternatives:/externalstuff/alternatives busybox + - name: write something to unmapped volume + command: docker run --rm --volumes-from volume-owner busybox cp /proc/cpuinfo /opt/some/volume/cpuinfo + - name: create a container that uses data volume + command: docker create --name volume-user --volumes-from volume-owner busybox sleep 9999m + - name: start container that uses data volume + command: docker start volume-user diff --git a/tests/no_upgrade_fixes_part2.yml b/tests/no_upgrade_fixes_part2.yml new file mode 100644 index 0000000..625d988 --- /dev/null +++ b/tests/no_upgrade_fixes_part2.yml @@ -0,0 +1,34 @@ +--- + +# playbook #2 in the upgrade without applying fixes +- hosts: all + tasks: + - name: grab old volume ID + command: ls /var/lib/docker/vfs/dir + register: vfs_dir_result + - name: store old volume ID + set_fact: + old_volume_id: "{{ vfs_dir_result.stdout }}" + +- hosts: all + roles: + - role: docker + docker_version: 1.12.1 + tasks: + - name: look at the volume symlink + stat: path=/var/lib/docker/volumes/_data + register: volume_symlink_stat + - name: verify the link does not exist + assert: + that: + - "not volume_symlink_stat.stat.exists" + - name: start old container that uses data volume + command: docker start volume-user + - name: verify that old volume-using container cannot read unmapped volume + command: docker exec volume-user stat /opt/some/volume/cpuinfo + register: old_volume_user_read_unmapped_result + failed_when: "{{ old_volume_user_read_unmapped_result.rc == 0 }}" + - name: verify that old volume-using container cannot read mapped volume + command: docker exec volume-user stat /externalstuff/alternatives/README + register: old_volume_user_read_mapped_result + failed_when: "{{ old_volume_user_read_mapped_result.rc == 0 }}" diff --git a/tests/old_install.yml b/tests/old_install.yml new file mode 100644 index 0000000..7f8a604 --- /dev/null +++ b/tests/old_install.yml @@ -0,0 +1,15 @@ +--- + +- hosts: all + vars: + docker_version: 1.5.0 + roles: + # install very old version of Docker + - role: docker + tasks: + - name: verify that Docker is running + command: docker ps + - name: verify Docker client version + shell: "docker version | grep 'Client version: {{ docker_version }}'" + - name: verify Docker server version + shell: "docker version | grep 'Server version: {{ docker_version }}'" diff --git a/tests/requirements.txt b/tests/requirements.txt new file mode 100644 index 0000000..90d4055 --- /dev/null +++ b/tests/requirements.txt @@ -0,0 +1 @@ +ansible diff --git a/tests/roles/docker b/tests/roles/docker new file mode 120000 index 0000000..c25bddb --- /dev/null +++ b/tests/roles/docker @@ -0,0 +1 @@ +../.. \ No newline at end of file diff --git a/tests/run_tests.sh b/tests/run_tests.sh new file mode 100755 index 0000000..976ead0 --- /dev/null +++ b/tests/run_tests.sh @@ -0,0 +1,74 @@ +#!/bin/bash + +BUILD_DIR=./build +VENV_DIR=$BUILD_DIR/test_venv +INVENTORY=$BUILD_DIR/inventory + +function before_run() { + if [ "$VIRTUAL_ENV" != "" ] ; then + echo "You currently have a virtual env active. Please deactivate it before proceeding." + exit 1 + fi + if [ ! -d $VENV_DIR ] ; then + virtualenv $VENV_DIR + $VENV_DIR/bin/pip install -r requirements.txt + fi + + . $VENV_DIR/bin/activate +} + +function before_test() { + vagrant up + + vagrant ssh-config | awk ' + BEGIN { entry = "[all]" }; + /^Host / { print entry; entry = $2 }; + /^ HostName / { entry = entry " ansible_host=" $2 }; + /^ User / { entry = entry " ansible_user=" $2 }; + /^ Port / { entry = entry " ansible_port=" $2 }; + /^ IdentityFile / { entry = entry " ansible_ssh_private_key_file=" $2 }; + END { print entry }' > $INVENTORY +} + +function run_ansible_playbook() { + echo " ------------------------------ running ansible playbook $@" + ansible-playbook -vvv -i $INVENTORY "$@" + local result=$? + echo " ------------------------------ return code: $result" + return $result +} + +function after_test() { + vagrant destroy -f +} + +ALL_TESTS="old_install new_install upgrade no_upgrade_fixes" + +function run_old_install_test() { + run_ansible_playbook old_install.yml +} + +function run_new_install_test() { + run_ansible_playbook new_install.yml +} + +function run_upgrade_test() { + run_ansible_playbook upgrade_part1.yml && \ + run_ansible_playbook upgrade_part2.yml +} + +function run_no_upgrade_fixes_test() { + run_ansible_playbook no_upgrade_fixes_part1.yml && \ + run_ansible_playbook no_upgrade_fixes_part2.yml +} + +before_run +RESULTS="" +for TEST in ${TESTS:-$ALL_TESTS} ; do + echo "Running test \"$TEST\"" + before_test + run_${TEST}_test + RESULTS="${RESULTS}Test $TEST : result $?\n" + after_test +done +echo -e "$RESULTS" diff --git a/tests/upgrade_part1.yml b/tests/upgrade_part1.yml new file mode 100644 index 0000000..6ce8757 --- /dev/null +++ b/tests/upgrade_part1.yml @@ -0,0 +1,24 @@ +--- + +# playbook #1 for a test that runs an install with upgrade fixes. Two playbooks are needed in +# order to get the "docker" role to apply twice. +- hosts: all + roles: + - role: docker + docker_version: 1.5.0 + tasks: + # if we want to create images inside Vagrant, we need to provide one - + # docker 1.5.0 can't connect to docker hub any more. + - name: copy busybox tar to destination + copy: src=busybox.tar dest=/var/tmp/busybox.tar + - name: import busybox tar + command: docker load -i /var/tmp/busybox.tar + - name: create a container with mapped and unmapped volume + command: docker create --name volume-owner --volume /opt/some/volume --volume /etc/alternatives:/externalstuff/alternatives busybox + - name: write something to unmapped volume + command: docker run --rm --volumes-from volume-owner busybox cp /proc/cpuinfo /opt/some/volume/cpuinfo + - name: create a container that uses data volume + command: docker create --name volume-user --volumes-from volume-owner busybox sleep 9999m + - name: start container that uses data volume + command: docker start volume-user + diff --git a/tests/upgrade_part2.yml b/tests/upgrade_part2.yml new file mode 100644 index 0000000..12f42a0 --- /dev/null +++ b/tests/upgrade_part2.yml @@ -0,0 +1,38 @@ +--- + +# playbook #2 in the upgrade test. +- hosts: all + tasks: + - name: grab old volume ID + command: ls /var/lib/docker/vfs/dir + register: vfs_dir_result + - name: store old volume ID + set_fact: + old_volume_id: "{{ vfs_dir_result.stdout }}" + +- hosts: all + roles: + - role: docker + docker_version: 1.11.2 + docker_attempt_upgrade_fixes: True + tasks: + - name: read something from volume ID + command: "docker run --rm --volume {{ old_volume_id }}:/foo busybox cat /foo/cpuinfo" + - name: read something from unmapped volume as new container + command: docker run --rm --volumes-from volume-owner busybox stat /opt/some/volume/cpuinfo + - name: read something from mapped volume as new container + command: docker run --rm --volumes-from volume-owner busybox stat /externalstuff/alternatives/README + - name: create a new container that uses data volume + command: docker create --name volume-user2 --volumes-from volume-owner busybox sleep 9999m + - name: start new container that uses data volume + command: docker start volume-user2 + - name: read something from unmapped volume while execed as new container that uses volume + command: docker exec volume-user2 stat /opt/some/volume/cpuinfo + - name: read something from mapped volume while execed as new container that uses volume + command: docker exec volume-user2 stat /externalstuff/alternatives/README + - name: start old container that uses data volume + command: docker start volume-user + - name: read something from unmapped volume while execed as old container that uses volume + command: docker exec volume-user stat /opt/some/volume/cpuinfo + - name: read something from mapped volume while execed as old container that uses volume + command: docker exec volume-user stat /externalstuff/alternatives/README \ No newline at end of file diff --git a/tests/vagrant.inv b/tests/vagrant.inv new file mode 100644 index 0000000..dc3c4cd --- /dev/null +++ b/tests/vagrant.inv @@ -0,0 +1,2 @@ +[all] +vagrant-docker-test