|
2 | 2 | import pytest
|
3 | 3 | import time
|
4 | 4 |
|
5 |
| -from .conftest import GROUP_NAME, LINSTOR_PACKAGE |
| 5 | +from .conftest import GROUP_NAME, LINSTOR_PACKAGE, LINSTOR_RELEASE_PACKAGE |
6 | 6 | from lib.commands import SSHCommandFailed
|
7 | 7 | from lib.common import wait_for, vm_image
|
8 | 8 | from tests.storage import vdi_is_open
|
@@ -137,6 +137,95 @@ def test_linstor_sr_expand_disk(self, linstor_sr, provisioning_type, storage_poo
|
137 | 137 | # Ensure VM is able to start and shutdown on expanded SR
|
138 | 138 | self.test_start_and_shutdown_VM(vm)
|
139 | 139 |
|
| 140 | + @pytest.mark.small_vm |
| 141 | + def test_linstor_sr_expand_host(self, linstor_sr, host, hostB1, provisioning_type, |
| 142 | + storage_pool_name, vm_on_linstor_sr): |
| 143 | + """ |
| 144 | + Join new host in the same pool, detect LINSTOR packages, |
| 145 | + detect free disks, create LVM, and integrate it into LINSTOR SR. |
| 146 | + """ |
| 147 | + sr = linstor_sr |
| 148 | + vm = vm_on_linstor_sr |
| 149 | + vm.start() |
| 150 | + sr_size = sr.pool.master.xe('sr-param-get', {'uuid': sr.uuid, 'param-name': 'physical-size'}) |
| 151 | + resized = False |
| 152 | + # Ensure that its a single host pool and not multi host pool |
| 153 | + assert len(hostB1.pool.hosts) == 1, "This test requires second host to be a single host" |
| 154 | + # Ensure that the host has disks available to use, we do not care about disks symmetry across pool |
| 155 | + available_disks = hostB1.available_disks() |
| 156 | + assert len(available_disks) >= 1, "This test requires second host to have free disk(s)" |
| 157 | + if not hostB1.is_package_installed(LINSTOR_PACKAGE): |
| 158 | + logging.info(f"Installing {LINSTOR_PACKAGE} on host {hostB1}...") |
| 159 | + hostB1.yum_install([LINSTOR_RELEASE_PACKAGE]) |
| 160 | + hostB1.yum_install([LINSTOR_PACKAGE], enablerepo="xcp-ng-linstor-testing") |
| 161 | + # Needed because the linstor driver is not in the xapi sm-plugins list |
| 162 | + # before installing the LINSTOR packages. |
| 163 | + hostB1.ssh(["systemctl", "restart", "multipathd"]) |
| 164 | + hostB1.restart_toolstack(verify=True) |
| 165 | + |
| 166 | + devices = [f"/dev/{disk}" for disk in available_disks] |
| 167 | + |
| 168 | + for disk in available_disks: |
| 169 | + logging.info("* Disk is {}*".format(disk)) |
| 170 | + device = "/dev/" + disk |
| 171 | + hostB1.ssh(['pvcreate', '-ff', '-y', device]) |
| 172 | + |
| 173 | + hostB1.ssh(['vgcreate', GROUP_NAME] + devices) |
| 174 | + |
| 175 | + sr_group_name = "xcp-sr-" + storage_pool_name.replace("/", "_") |
| 176 | + hostname_hostB1 = hostB1.xe('host-param-get', {'uuid': hostB1.uuid, |
| 177 | + 'param-name': 'name-label'}) |
| 178 | + |
| 179 | + controller_option = "--controllers=" |
| 180 | + for member in host.pool.hosts: |
| 181 | + controller_option += f"{member.hostname_or_ip}," |
| 182 | + |
| 183 | + hostB1_pool = hostB1.pool # Saving the hostB1 pool info before overwrite in join_pool. |
| 184 | + try: |
| 185 | + logging.info(f"Join host {hostB1} to pool {host}.") |
| 186 | + # This will cause hostB1 pool to overwrite itself as host.pool creating issues on next run. |
| 187 | + hostB1.join_pool(host.pool) |
| 188 | + logging.info(f"Current list of linstor nodes.") |
| 189 | + logging.info(host.ssh_with_result(["linstor", controller_option, "node", "list"]).stdout) |
| 190 | + logging.info(f"Creating linstor node") |
| 191 | + host.ssh(["linstor", controller_option, "node", "create", "--node-type", "combined", |
| 192 | + "--communication-type", "plain", hostname_hostB1, hostB1.hostname_or_ip]) # Linstor Node Create |
| 193 | + logging.info(hostB1.ssh_with_result(['systemctl', 'restart', 'linstor-satellite.service']).stdout) |
| 194 | + time.sleep(45) # Wait for node to come online |
| 195 | + logging.info(f"New list of linstor nodes.") |
| 196 | + logging.info(host.ssh_with_result(["linstor", controller_option, "node", "list"]).stdout) |
| 197 | + logging.info(f"Expanding with linstor node") |
| 198 | + |
| 199 | + if provisioning_type == "thin": |
| 200 | + hostB1.ssh(['lvcreate', '-l', '+100%FREE', '-T', storage_pool_name]) |
| 201 | + host.ssh_with_result(["linstor", controller_option, "storage-pool", "create", "lvmthin", |
| 202 | + hostname_hostB1, sr_group_name, storage_pool_name]).stdout # Expand linstor |
| 203 | + else: |
| 204 | + host.ssh_with_result(["linstor", controller_option, "storage-pool", "create", "lvm", |
| 205 | + hostname_hostB1, sr_group_name, storage_pool_name]).stdout # Expand linstor |
| 206 | + except Exception as e: |
| 207 | + logging.info("* Exception: {}*".format(e)) |
| 208 | + host.ssh(["linstor", controller_option, "node", "delete", hostname_hostB1]) # Linstor Node Delete |
| 209 | + host.pool.eject_host(hostB1) |
| 210 | + hostB1.ssh(['vgremove', '-y', GROUP_NAME]) |
| 211 | + hostB1.ssh(['pvremove', '-y'] + devices) # Device cleanup |
| 212 | + hostB1.yum_remove([LINSTOR_PACKAGE]) # Package cleanup |
| 213 | + |
| 214 | + resized = True |
| 215 | + sr.scan() |
| 216 | + new_sr_size = sr.pool.master.xe('sr-param-get', {'uuid': sr.uuid, 'param-name': 'physical-size'}) |
| 217 | + assert int(new_sr_size) > int(sr_size) and resized is True, \ |
| 218 | + f"Expected SR size to increase but got old size: {sr_size}, new size: {new_sr_size}" |
| 219 | + logging.info(f"* SR expansion completed from {sr_size} to {new_sr_size}*") |
| 220 | + vm.shutdown(verify=True) |
| 221 | + # Ensure VM is able to start and shutdown on expanded SR |
| 222 | + self.test_start_and_shutdown_VM(vm) |
| 223 | + |
| 224 | + host.ssh_with_result(["linstor", controller_option, "node", "delete", hostname_hostB1]).stdout |
| 225 | + host.pool.eject_host(hostB1) |
| 226 | + hostB1.pool = hostB1_pool # Post eject, reset hostB1.pool for next run ("thick") |
| 227 | + hostB1.yum_remove([LINSTOR_PACKAGE]) # Package cleanup |
| 228 | + |
140 | 229 | # *** tests with reboots (longer tests).
|
141 | 230 |
|
142 | 231 | @pytest.mark.reboot
|
|
0 commit comments