diff --git a/README.md b/README.md index 736c340..28c8787 100644 --- a/README.md +++ b/README.md @@ -17,34 +17,63 @@ Docker versions. Upgrade Support --------------- -Some Docker Engine upgrade paths have known issues. There's code in this role that attempts to -resolve those issues, with minimum disruption, if those upgrade paths are encountered. The -intention is to not require containers to be recreated. +This role no longer directly supports upgrading docker from a previous version. Any additional +steps required as a part of an upgrade should be taken care of in a separate role or playbook. -This code isn't intended to catch everything; an attempt has been made to make it reasonable and -non-harmful, but it hasn't been tested for all possible upgrade paths, nor with features like -non-local storage drivers. With that in mind, this behavior is optional and is disabled by default. - -The issues we attempt to resolve are documented in the "repair_docker_data_volumes" module. +Kernel Requirements +------------------- +Docker has some kernel requirements for proper usage with Ubuntu. We have a new role +ansible-role_kernel_update which will assist with the kernel requirements. Role Variables -------------- - - `docker_version` : this variable controls the version of Docker that is installed. Required. + - `docker_version`: this variable controls the version of Docker that is installed. Required. If version `1.5.0` is selected, LXC Docker will be used; otherwise the stated version of Docker Engine will be installed (if available). - - `docker_daemon_flags` : Empty by default. This variable holds flags that will be passed to + - `docker_daemon_flags`: Empty by default. This variable holds flags that will be passed to the Docker daemon on startup. (This is implemented by modifying the file `/etc/default/docker`.) - - `cgroup_lite_pkg_state` : When installing on an Ubuntu 13.10 host, the role will install the - `cgroup-lite` package to provide the required cgroups support. This variable can be set to - `latest` - the default - or to `present`. In the former case, the package will be updated, if - necessary, when the role is run. In the latter, the package will only be added if it is not - present. - - `kernel_pkg_state` : For 13.04+, this role will install a `linux-image-extra-` - package. This parameter works the same way as `cgroup_lite_package_state`, except controlling - this package. + - `docker_daemon_startup_retries`: this variable controls how many times we poll docker to + confirm it is running after we start or restart it before giving up. Defaults to 10. + +Documentation +------------- + +The documentation for working with Docker on Ubuntu is available online but there has been +some refactoring of the documentation since the original writing. + + * https://docs.docker.com/engine/installation/linux/ubuntu/ + * (old) https://github.com/docker/docker.github.io/blob/master/engine/installation/linux/ubuntulinux.md + * (new) https://github.com/docker/docker.github.io/blob/master/engine/installation/linux/ubuntu.md + +The *old* documentation is what was previously available online and the *new* documentation is what is +currently available online. As of this writing the documentation is at commit '45a19ec' & '9093e0a' respectively. + + * (old) https://github.com/docker/docker.github.io/blob/45a19ec/engine/installation/linux/ubuntulinux.md + * (new) https://github.com/docker/docker.github.io/blob/9093e0a/engine/installation/linux/ubuntu.md + +Links to documentation will therefore get pinned to a particular commit to maintain access +to historical information which may get removed (or moved) in later versions. Maintainers +should check the master branch when updating the role and update links when possible. + +Currently the new documentations says docker is only supported on 14.04[LTS], 16.04[LTS], & 16.10 +but the old documentation has some instructions for 12.04[LTS]. + +12.04 may not have support due to issues with older kernels. Docker documentation mentions some +prerequisites when installing from a binary that could be informative and there is a known +issue with docker running on linux kernels less than 3.19 that could imply 12.04 isn't supported. + + * https://docs.docker.com/engine/installation/binaries/#/prerequisites + * https://github.com/docker/docker/issues/21704#issuecomment-235365424 + +Additional Resources +-------------------- + +Users of this role might also consider reviewing our other ansible roles. +In particular: + * https://github.com/locationlabs/ansible-role_docker-base Testing ------- diff --git a/defaults/main.yml b/defaults/main.yml index 2cd003c..96cfdbd 100644 --- a/defaults/main.yml +++ b/defaults/main.yml @@ -1,16 +1,10 @@ --- -kernel_pkg_state: latest -cgroup_lite_pkg_state: latest -ssh_port: 22 - -docker_role_apt_cache_valid_time: 7200 - # These flags are passed to the Docker daemon on startup. docker_daemon_flags: "" # When we start / restart Docker, this role waits until it is ready before -# proceeding. This variable controls how long we wait before giving up. +# proceeding. This variable controls how many times we retry the service before giving up. # This value should be OK for all but the slowest servers. -docker_daemon_startup_timeout_sec: 10 +docker_daemon_startup_retries: 10 diff --git a/meta/main.yml b/meta/main.yml index ee363e4..8e0ad48 100644 --- a/meta/main.yml +++ b/meta/main.yml @@ -1,20 +1,14 @@ --- galaxy_info: - author: Paul Durivage - description: Docker on Ubuntu greater than 12.04 + author: Location Labs + description: Docker on Ubuntu >= 12.04 license: Apache v2.0 - min_ansible_version: 1.2 + min_ansible_version: 1.9.6 platforms: - name: Ubuntu versions: - - precise - - raring - - saucy - - trusty - categories: - - development - - packaging - - system + - precise # 12.04 + - trusty # 14.04 dependencies: [] # List your role dependencies here, one per line. Only # dependencies available via galaxy should be listed here. diff --git a/tasks/docker-engine.yml b/tasks/docker-engine.yml index 4a226f5..f99d874 100644 --- a/tasks/docker-engine.yml +++ b/tasks/docker-engine.yml @@ -39,3 +39,4 @@ name: "docker-engine={{ docker_version }}-*~{{ ansible_distribution_release }}" state: present register: r_docker_package_install + when: not check_mode diff --git a/tasks/lxc-docker.yml b/tasks/lxc-docker.yml index baace66..e28da8c 100644 --- a/tasks/lxc-docker.yml +++ b/tasks/lxc-docker.yml @@ -18,5 +18,8 @@ update_cache: yes - name: Install LXC Docker - apt: pkg="lxc-docker-{{ docker_version }}" state=present + apt: + name: "lxc-docker-{{ docker_version }}" + state: present register: r_docker_package_install + when: not check_mode diff --git a/tasks/main.yml b/tasks/main.yml index 0889b73..ad1f60f 100644 --- a/tasks/main.yml +++ b/tasks/main.yml @@ -1,5 +1,12 @@ --- -# tasks file for docker.ubuntu +# Main tasks file + +# noop to detect check mode for older versions of ansible. +# http://docs.ansible.com/ansible/playbooks_checkmode.html#information-about-check-mode-in-variables +- command: /bin/true + register: noop_result + +- set_fact: check_mode={{ noop_result|skipped }} # These are the version of Ubuntu for which this role has been tested. - name: Fail if not a new release of Ubuntu @@ -10,54 +17,20 @@ fail: msg="Required variable \"docker_version\" is not defined." when: docker_version is not defined -# https://docs.docker.com/engine/installation/linux/ubuntulinux/#/prerequisites-by-ubuntu-version -# - 12.04: Docker requires the 3.13 kernel version. -# Ensure the trusty kernel is installed. -# - 14.04: Support aufs via the linux-image-extra-* kernel package. -# Achieve this with the xenial kernel, which depends on the -# corresponding extra package, to address issues with kernels -# before 3.19 at the same time. -# https://github.com/docker/docker/issues/21704#issuecomment-235365424 -- name: Install HWE kernel on pre-16.04 LTS - apt: - pkg: "{{ item.name }}" - state: latest - update_cache: yes - cache_valid_time: "{{ docker_role_apt_cache_valid_time }}" - with_items: - - name: linux-image-generic-lts-trusty - version: "12.04" - - name: linux-headers-generic-lts-trusty - version: "12.04" - - name: linux-image-generic-lts-xenial - version: "14.04" - register: kernel_result - when: ansible_distribution_version == item.version - -- name: Install latest kernel extras for Ubuntu 13.04, 13.10 - apt: - pkg: "linux-image-extra-{{ ansible_kernel }}" - state: "{{ kernel_pkg_state }}" - update_cache: yes - cache_valid_time: "{{ docker_role_apt_cache_valid_time }}" - when: ansible_distribution_version in ['13.04', '13.10'] - -# Fix for https://github.com/dotcloud/docker/issues/4568 -- name: Install cgroup-lite for Ubuntu 13.10 - apt: - pkg: cgroup-lite - state: "{{ cgroup_lite_pkg_state }}" - update_cache: yes - cache_valid_time: "{{ docker_role_apt_cache_valid_time }}" - register: cgroup_lite_result - when: ansible_distribution_version == '13.10' - -- include: reboot-and-wait.yml - # Newer versions of Docker no longer require apparmor, but it seems like a good thing to have. - name: Install apparmor apt: pkg=apparmor state=present +# Newer documentation suggests installing a few other (non kernel) packages +# https://github.com/docker/docker.github.io/blob/9093e0a/engine/installation/linux/ubuntu.md#recommended-extra-packages +# https://github.com/docker/docker.github.io/blob/9093e0a/engine/installation/linux/ubuntu.md#set-up-the-repository +- name: Install a few recommended packages + apt: pkg="{{ item }}" state=present + with_items: + - curl + - apt-transport-https + - ca-certificates + - name: Write /etc/default/docker config file template: src=etc_default_docker.j2 dest=/etc/default/docker register: r_etc_default_docker @@ -73,23 +46,20 @@ # if we changed the config, but didn't reinstall docker, then restart it - name: Restart docker on config change service: name=docker state=restarted - when: r_etc_default_docker|changed and not r_docker_package_install|changed + when: r_etc_default_docker|changed and not r_docker_package_install|changed and not check_mode - name: Ensure that Docker is running service: name="docker" state=started + when: not check_mode - name: Wait until docker daemon is available command: docker info register: r_docker_info # need a special case here, or this always fails in check mode. until: r_docker_info|skipped or r_docker_info.rc == 0 - retries: "{{ docker_daemon_startup_timeout_sec }}" + retries: "{{ docker_daemon_startup_retries }}" delay: 1 -- name: reread docker facts - setup: filter=ansible_docker0 - when: r_etc_default_docker|changed or r_docker_package_install|changed - - name: Check if /etc/default/ufw exists stat: path=/etc/default/ufw register: ufw_default_exists @@ -100,3 +70,8 @@ regexp: "^DEFAULT_FORWARD_POLICY=" line: "DEFAULT_FORWARD_POLICY=\"ACCEPT\"" when: ufw_default_exists.stat.exists + +# this role modifies the host in ways which might have been cached +# let's update all facts here to be sure we have ansible up to date. +- name: update facts about host + setup: diff --git a/tasks/reboot-and-wait.yml b/tasks/reboot-and-wait.yml deleted file mode 100644 index a554066..0000000 --- a/tasks/reboot-and-wait.yml +++ /dev/null @@ -1,27 +0,0 @@ ---- -# reboot an Ubuntu machine if needed and wait for it to come back -- name: Detect vagrant instance - set_fact: - is_vagrant: "{{is_vagrant | default(ansible_ssh_user == 'vagrant')}}" - -- name: Reboot instance - command: /sbin/shutdown -r now - args: - removes: /var/run/reboot-required - register: reboot_result - -- name: Reload vagrant instance - local_action: command vagrant reload "{{inventory_hostname}}" - when: reboot_result|changed and is_vagrant - become: false - -- name: Wait for instance to come online - local_action: - module: wait_for - host: "{{ ansible_ssh_host|default(inventory_hostname) }}" - port: "{{ ansible_ssh_port|default(ssh_port) }}" - delay: 30 - timeout: 600 - state: started - when: reboot_result|changed - become: false diff --git a/tests/TESTS.md b/tests/TESTS.md index 5de7dd5..e10c984 100644 --- a/tests/TESTS.md +++ b/tests/TESTS.md @@ -4,6 +4,7 @@ Here are some playbooks that support different tests, and some code to support r them in Vagrant. ## Running tests + The tests are written as Ansible playbooks that operate on `hosts: all`. The tests don't assume much about the machine they're operating on, but it should be a clean machine if possible. @@ -17,4 +18,26 @@ To run all tests: To run a specific test: - TESTS=upgrade ./run_tests.sh + TESTS=new_install ./run_tests.sh + +## Testing on vagrant + +If you have issues with restarting vagrant boxes due to mounting issues you might need +to check your virtualbox and vagrant versions are up to date. If that doesn't solve the +issue you could also check your vagrant plugins. + + > vagrant plugin list + vagrant-cachier (1.2.1) + vagrant-hostmanager (1.8.5) + vagrant-multiprovider-snap (0.0.14) + vagrant-share (1.1.6, system) + vagrant-vbguest (0.13.0) + +To prevent having to enter your password for hostmanager configure passwordless sudo: + + * https://github.com/devopsgroup-io/vagrant-hostmanager#passwordless-sudo + +## Ubuntu versions + +If you have a particular version of Ubuntu you need to test with modify the Vagrantfile to +load a box with the version you need to test before running the test script. \ No newline at end of file diff --git a/tests/Vagrantfile b/tests/Vagrantfile index b35bdb3..48e8440 100644 --- a/tests/Vagrantfile +++ b/tests/Vagrantfile @@ -1,14 +1,90 @@ -Vagrant.configure(2) do |config| - - config.vm.box = "bento/ubuntu-14.04" - config.vm.network "private_network", type: "dhcp" - config.vm.provider :virtualbox do |vb| - vb.memory = 1024 - vb.cpus = 1 - vb.customize ["modifyvm", :id, "--natdnshostresolver1", "on"] +# -*- mode: ruby -*- +# vi: set ft=ruby : + +nodes = [ + { + :hostname => 'vagrant-docker-test-1404', + :domain => '.wavemarket.com', + :ip => '172.19.56.22', + :box => 'bento/ubuntu-14.04', + :ram => '2048', + :cpus => 2, + :autostart => true + }, + { + :hostname => 'vagrant-docker-test-1204', + :domain => '.wavemarket.com', + :ip => '172.19.56.20', + :box => 'bento/ubuntu-12.04', + :ram => '2048', + :cpus => 2, + :autostart => true + }, +] + +VAGRANTFILE_API_VERSION = "2" +VM_DEFAULT_RAM = 512 +VM_DEFAULT_CPUS = 1 +LOCAL_BUILD_DIR="/var/tmp/#{ENV['USER']}/build/" + +Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| + + config.ssh.insert_key = false + + if Vagrant.has_plugin?('vagrant-cachier') + config.cache.enable :apt + else + printf("** Install vagrant-cachier plugin to speedup deploy: `vagrant plugin install vagrant-cachier`.**\n") + end + + if Vagrant.has_plugin?('vagrant-hostmanager') + config.hostmanager.enabled = true + config.hostmanager.manage_host = true + else + raise "** Install vagrant-hostmanager plugin: `vagrant plugin install vagrant-hostmanager`.**\n" end - config.vm.define "vagrant-docker-test" do |node_config| - node_config.vm.hostname = "vagrant-docker-test" + nodes.each do |node| + config.vm.define node[:hostname], autostart: node[:autostart] do |node_config| + node_config.vm.box = node[:box] + node_config.vm.hostname = node[:hostname] + node[:domain] + node_config.vm.network :private_network, ip: node[:ip] + + # setup a shared folder with the host's build folder + # to simplify installing local builds + if File.directory?(LOCAL_BUILD_DIR) + node_config.vm.synced_folder LOCAL_BUILD_DIR, LOCAL_BUILD_DIR + end + + node_config.vm.provider :virtualbox do |vb| + vb.memory = node[:ram] || VM_DEFAULT_RAM + vb.cpus = node[:cpus] || VM_DEFAULT_CPUS + vb.customize ["modifyvm", :id, "--natdnshostresolver1", "on"] + end + + # Moved provisioning to run_tests.sh (uncomment if needed) + # Some basic provisioning that will likely be useful in most cases. + # Previously we did this with inline, but using ansible is probably better. + #node_config.vm.provision "ansible", playbook: "ansible_vagrant_provisioning.yml" + + # Running a playbook generates an inventory file each time (**SO RUN THIS PROVISIONING LAST**). + # + # The inventory file will get generated at: + # .vagrant/provisioners/ansible/inventory/vagrant_ansible_inventory + # + # Provisioning happens after each node is up and running and will result in partial + # inventories containing only the running nodes until the last node is provisioned. + # We don't need to worry about the partial inventories since the inventory + # file is overridden on each provisioning invocation (i.e. the last invocation will + # generate a full inventory and it will override any existing inventory). + node_config.vm.provision "ansible", run: "always" do |ansible| + ansible.playbook = "noop.yml" + ansible.limit = node[:hostname] + ansible.groups = { + "all" => nodes.map{ |n| n[:hostname] }, + } + end + end end + end diff --git a/tests/ansible.cfg b/tests/ansible.cfg index 357db31..a21c0c1 100644 --- a/tests/ansible.cfg +++ b/tests/ansible.cfg @@ -1,6 +1,10 @@ +# config file for ansible -- http://docs.ansible.com/intro_configuration.html +# =========================================================================== + [defaults] -host_key_checking=False +host_key_checking = False [privilege_escalation] -become=True -become-user=root +# http://docs.ansible.com/ansible/intro_configuration.html#privilege-escalation-settings +# http://docs.ansible.com/ansible/become.html#become-privilege-escalation +become = True diff --git a/tests/ansible_vagrant_provisioning.yml b/tests/ansible_vagrant_provisioning.yml new file mode 100644 index 0000000..7523d0c --- /dev/null +++ b/tests/ansible_vagrant_provisioning.yml @@ -0,0 +1,48 @@ +--- +# A basic provisioning script, update as needed. + +- name: Provision vagrant boxes + hosts: all + vars: + timezone: America/Los_Angeles + apt_cache_valid_time_sec: 86400 # 1 day + + tasks: + - name: update apt cache + apt: + update_cache: yes + cache_valid_time: "{{ apt_cache_valid_time_sec }}" + + # https://help.ubuntu.com/community/UbuntuTime#Using_the_Command_Line_.28unattended.29 + - name: set timezone + copy: + content: "{{ timezone }}\\n" + dest: /etc/timezone + owner: root + group: root + mode: 0644 + backup: yes + notify: + - update timezone + + - name: get vagrant private key + slurp: + src: /home/vagrant/.ssh/authorized_keys + register: authorized_keys + + # https://help.ubuntu.com/community/SSH/OpenSSH/Keys + - name: "create /root/.ssh" + file: + path: /root/.ssh + state: directory + mode: 0700 + + - name: copying vagrant private key to root user... + copy: + content: "{{ authorized_keys['content'] | b64decode }}" + dest: /root/.ssh/authorized_keys + mode: 0600 + + handlers: + - name: update timezone + command: dpkg-reconfigure --frontend noninteractive tzdata diff --git a/tests/busybox.tar b/tests/busybox.tar deleted file mode 100644 index 0cfc115..0000000 Binary files a/tests/busybox.tar and /dev/null differ diff --git a/tests/new_install.yml b/tests/new_install.yml index 8723be4..e13de7a 100644 --- a/tests/new_install.yml +++ b/tests/new_install.yml @@ -2,7 +2,7 @@ - hosts: all vars: - docker_version: 1.12.1 + docker_version: 1.11.2 roles: # install recent version of Docker - role: docker @@ -15,7 +15,7 @@ # variable - before or after the operation - that has '{{' in it. shell: 'OB="{" && docker version -f "$OB$OB .Client.Version }}"' register: r_docker_client_version - - name: get docker client version + - name: get docker server version shell: 'OB="{" && docker version -f "$OB$OB .Server.Version }}"' register: r_docker_server_version - name: check version results @@ -23,3 +23,4 @@ that: - r_docker_client_version.stdout == docker_version - r_docker_server_version.stdout == docker_version + when: not check_mode diff --git a/tests/noop.yml b/tests/noop.yml new file mode 100644 index 0000000..2433876 --- /dev/null +++ b/tests/noop.yml @@ -0,0 +1,22 @@ +--- +# noop playbook used by Vagrantfile to auto generate inventory file. We call it noop +# because idealy it would do nothing, but it is currently adding a variable to the +# inventory in case we have users not yet running vagrant 1.8.0. + +# It is possible to set ansible_ssh_user with ansible but it requires vagrant 1.8.0 +# https://www.vagrantup.com/docs/provisioning/ansible_intro.html +# so for now we run a play in here. + +# We only have a single host group, but we limit the inventory to just the host that +# just got provisioned when we run this playbook. +- hosts: all + tasks: + - name: define ansible_ssh_user as vagrant in vagrant_ansible_inventory + lineinfile: + line: | + + [all:vars] + ansible_ssh_user=vagrant + state: present + insertafter: EOF + dest: /vagrant/.vagrant/provisioners/ansible/inventory/vagrant_ansible_inventory diff --git a/tests/old_install.yml b/tests/old_install.yml index 7f8a604..fc55e2c 100644 --- a/tests/old_install.yml +++ b/tests/old_install.yml @@ -9,6 +9,8 @@ tasks: - name: verify that Docker is running command: docker ps + + # these two will error if no match is found - name: verify Docker client version shell: "docker version | grep 'Client version: {{ docker_version }}'" - name: verify Docker server version diff --git a/tests/requirements.txt b/tests/requirements.txt index 90d4055..8c095a9 100644 --- a/tests/requirements.txt +++ b/tests/requirements.txt @@ -1 +1,8 @@ -ansible +ansible==1.9.6 +ecdsa==0.13 +Jinja2==2.7.3 +MarkupSafe==0.23 +paramiko==1.15.2 +pycrypto==2.6.1 +PyYAML==3.11 +netaddr diff --git a/tests/restart_on_config_change_test.yml b/tests/restart_on_config_change_test.yml index cadf8ac..89c84da 100644 --- a/tests/restart_on_config_change_test.yml +++ b/tests/restart_on_config_change_test.yml @@ -2,13 +2,13 @@ - hosts: all tasks: - name: get docker process PID - command: "pgrep -f '/usr/bin/dockerd --raw-logs'" + command: "pgrep -f '/usr/bin/docker daemon --raw-logs'" register: r_docker_pid - hosts: all vars: # same version as already installed - docker_version: 1.12.1 + docker_version: 1.11.2 # .... but different config docker_daemon_flags: "--log-opt max-file=3" roles: @@ -19,7 +19,7 @@ tasks: # will fail if there's no docker process running with matching command line - name: get new docker process PID - command: "pgrep -f '/usr/bin/dockerd --log-opt max-file=3 --raw-logs'" + command: "pgrep -f '/usr/bin/docker daemon --log-opt max-file=3 --raw-logs'" register: r_new_docker_pid - assert: that: r_docker_pid.stdout != r_new_docker_pid.stdout diff --git a/tests/run_tests.sh b/tests/run_tests.sh index 3409667..c48581b 100755 --- a/tests/run_tests.sh +++ b/tests/run_tests.sh @@ -3,6 +3,7 @@ BUILD_DIR=./build VENV_DIR=$BUILD_DIR/test_venv INVENTORY=$BUILD_DIR/inventory +VAGRANT_INVENTORY=.vagrant/provisioners/ansible/inventory/vagrant_ansible_inventory function before_run() { if [ "$VIRTUAL_ENV" != "" ] ; then @@ -18,16 +19,30 @@ function before_run() { } function before_test() { + # TODO: + # It might be faster to rollback to a snapshot taken before a run here + # since updating apt cache can take a bit of time. + # Unfortunately, it would require users installing more plugins or + # updating vagrant. Snapshots are built into vagrant 1.8.0: + # https://www.hashicorp.com/blog/vagrant-1-8.html vagrant up - vagrant ssh-config | awk ' - BEGIN { entry = "[all]" }; - /^Host / { print entry; entry = $2 }; - /^ HostName / { entry = entry " ansible_host=" $2 }; - /^ User / { entry = entry " ansible_user=" $2 }; - /^ Port / { entry = entry " ansible_port=" $2 }; - /^ IdentityFile / { entry = entry " ansible_ssh_private_key_file=" $2 }; - END { print entry }' > $INVENTORY + # create a link to generated inventory file for convenience + # don't overwrite in case user wants to use a different inventory + if [ ! -e $INVENTORY ] ; then + ln -s $(readlink -f $VAGRANT_INVENTORY) $INVENTORY + fi + + # This was a part of the Vagrantfile, but moved here for faster results. + provision_vagrant_hosts +} + +function provision_vagrant_hosts() { + echo " ------------------------------ run provisioning script" + ansible-playbook -v -i $INVENTORY ansible_vagrant_provisioning.yml + local result=$? + echo " ------------------------------ return code: $result" + return $result } function run_ansible_playbook() { @@ -43,7 +58,6 @@ function after_test() { } ALL_TESTS="old_install new_install restart_on_config_change" - function run_old_install_test() { run_ansible_playbook old_install.yml } diff --git a/tests/vagrant.inv b/tests/vagrant.inv deleted file mode 100644 index dc3c4cd..0000000 --- a/tests/vagrant.inv +++ /dev/null @@ -1,2 +0,0 @@ -[all] -vagrant-docker-test