def attach_additional_disk(vm, disksize, targetdev):
"""
Create a disk with disksize, then attach it to given vm.
@param vm: Libvirt VM object.
@param disksize: size of attached disk
@param targetdev: target of disk device
"""
logging.info("Attaching disk...")
disk_path = os.path.join(data_dir.get_tmp_dir(), targetdev)
cmd = "qemu-img create %s %s" % (disk_path, disksize)
status, output = commands.getstatusoutput(cmd)
if status:
return (False, output)
# To confirm attached device do not exist.
virsh.detach_disk(vm.name, targetdev, extra="--config")
attach_result = virsh.attach_disk(vm.name, disk_path, targetdev, extra="--config", debug=True)
if attach_result.exit_status:
return (False, attach_result)
return (True, disk_path)
def attach_additional_device(vm_name, targetdev, disk_path, params):
"""
Create a disk with disksize, then attach it to given vm.
:param vm_name: Libvirt VM name.
:param disk_path: path of attached disk
:param targetdev: target of disk device
:param params: dict include necessary configurations of device
"""
logging.info("Attaching disk...")
# Update params for source file
params['source_file'] = disk_path
params['target_dev'] = targetdev
# Create a file of device
xmlfile = create_disk_xml(params)
# To confirm attached device do not exist.
virsh.detach_disk(vm_name, targetdev, extra="--config")
return virsh.attach_device(domain_opt=vm_name, file_opt=xmlfile,
flagstr="--config", debug=True)
vm.start()
vm.wait_for_login()
# Create virtual device file.
create_device_file(device_source)
# Add acpiphp module before testing if VM's os type is rhle5.*
if not acpiphp_module_modprobe(vm, os_type):
raise error.TestError("Add acpiphp module failed before test.")
# If we are testing cdrom device, we need to detach hdc in VM first.
if device == "cdrom":
if vm.is_alive():
vm.destroy(gracefully=False)
s_detach = virsh.detach_disk(vm_name, device_target, "--config")
if not s_detach:
logging.error("Detach hdc failed before test.")
vm.start()
# If we are testing detach-disk, we need to attach certain device first.
if test_cmd == "detach-disk" and no_attach != "yes":
if bus_type == "ide" and vm.is_alive():
vm.destroy(gracefully=False)
s_attach = virsh.attach_disk(vm_name, device_source, device_target,
"--driver qemu --config").exit_status
if s_attach != 0:
logging.error("Attaching device failed before testing detach-disk")
if vm.is_dead():
vm.start()
# Check disk save and restore.
if test_disk_save_restore:
save_file = "/tmp/%s.save" % vm_name
check_disk_save_restore(save_file, device_targets,
startup_policy)
if os.path.exists(save_file):
os.remove(save_file)
# If we testing hotplug, detach the disk at last.
if device_at_dt_disk:
for i in range(len(disks)):
dt_options = ""
if devices[i] == "cdrom":
dt_options = "--config"
ret = virsh.detach_disk(vm_name, device_targets[i],
dt_options)
libvirt.check_exit_status(ret)
# Check disks in VM after hotunplug.
if check_patitions_hotunplug:
if not check_vm_partitions(devices,
device_targets, False):
raise error.TestFail("See device in VM after hotunplug")
elif hotplug:
for i in range(len(disks_xml)):
if len(device_attach_error) > i:
if device_attach_error[i] == "yes":
continue
ret = virsh.detach_device(vm_name, disks_xml[i].xml,
flagstr=attach_option)
os.remove(disks_xml[i].xml)
#.........这里部分代码省略.........
if not libvirt_version.version_compare(1, 2, 3):
virsh.destroy(vm_name)
# Revert snapshot.
revert_options = ""
if snapshot_revert_paused:
revert_options += " --paused"
revert_result = virsh.snapshot_revert(vm_name, snapshot_name,
revert_options,
debug=True)
if revert_result.exit_status:
# Attempts to revert external snapshots will FAIL with an error
# "revert to external disk snapshot not supported yet" or "revert
# to external snapshot not supported yet" since d410e6f. Thus,
# let's check for that and handle as a SKIP for now. Check bug:
# https://bugzilla.redhat.com/show_bug.cgi?id=1071264
if re.search("revert to external \w* ?snapshot not supported yet",
revert_result.stderr):
test.cancel(revert_result.stderr.strip())
else:
test.fail("Revert snapshot failed. %s" %
revert_result.stderr.strip())
if vm.is_dead():
test.fail("Revert snapshot failed.")
if snapshot_revert_paused:
if vm.is_paused():
vm.resume()
else:
test.fail("Revert command successed, but VM is not "
"paused after reverting with --paused"
" option.")
# login vm.
session = vm.wait_for_login()
# Check the result of revert.
status, output = session.cmd_status_output("cat %s" % tmp_file_path)
logging.debug("After revert cat file output='%s'", output)
if not status:
test.fail("Tmp file exists, revert failed.")
# Close the session.
session.close()
# Test delete snapshot without "--metadata", delete external disk
# snapshot will fail for now.
# Only do this when snapshot creat succeed which filtered in cfg file.
if snapshot_del_test:
if snapshot_name:
del_result = virsh.snapshot_delete(vm_name, snapshot_name,
debug=True,
ignore_status=True)
del_status = del_result.exit_status
snap_xml_path = snap_cfg_path + "%s.xml" % snapshot_name
if del_status:
if not status_error:
test.fail("Failed to delete snapshot.")
else:
if not os.path.exists(snap_xml_path):
test.fail("Snapshot xml file %s missing"
% snap_xml_path)
else:
if status_error:
err_msg = "Snapshot delete succeed but expect fail."
test.fail(err_msg)
else:
if os.path.exists(snap_xml_path):
test.fail("Snapshot xml file %s still"
% snap_xml_path + " exist")
finally:
if vm.is_alive():
vm.destroy(gracefully=False)
virsh.detach_disk(vm_name, target="vdf", extra="--persistent")
if image:
image.remove()
if del_status and snapshot_name:
virsh.snapshot_delete(vm_name, snapshot_name, "--metadata")
for disk in snapshot_external_disk:
if os.path.exists(disk):
os.remove(disk)
vmxml_backup.sync("--snapshots-metadata")
libvirtd = utils_libvirtd.Libvirtd()
if disk_source_protocol == 'gluster':
utlv.setup_or_cleanup_gluster(False, vol_name, brick_path)
if multi_gluster_disks:
brick_path = os.path.join(tmp_dir, "gluster-pool2")
utlv.setup_or_cleanup_gluster(False, "gluster-vol2", brick_path)
libvirtd.restart()
if snapshot_xml_path:
if os.path.exists(snapshot_xml_path):
os.unlink(snapshot_xml_path)
if pvt:
try:
pvt.cleanup_pool(pool_name, pool_type, pool_target,
emulated_image, source_name=vol_name)
except exceptions.TestFail as detail:
libvirtd.restart()
logging.error(str(detail))
def run_svirt_attach_disk(test, params, env):
"""
Test svirt in adding disk to VM.
(1).Init variables for test.
(2).Create a image to attached to VM.
(3).Attach disk.
(4).Start VM and check result.
"""
# Get general variables.
status_error = ('yes' == params.get("status_error", 'no'))
host_sestatus = params.get("svirt_attach_disk_host_selinux", "enforcing")
# Get variables about seclabel for VM.
sec_type = params.get("svirt_attach_disk_vm_sec_type", "dynamic")
sec_model = params.get("svirt_attach_disk_vm_sec_model", "selinux")
sec_label = params.get("svirt_attach_disk_vm_sec_label", None)
sec_relabel = params.get("svirt_attach_disk_vm_sec_relabel", "yes")
sec_dict = {'type': sec_type, 'model': sec_model, 'label': sec_label,
'relabel': sec_relabel}
# Get variables about VM and get a VM object and VMXML instance.
vm_name = params.get("main_vm")
vm = env.get_vm(vm_name)
vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
backup_xml = vmxml.copy()
# Get varialbles about image.
img_label = params.get('svirt_attach_disk_disk_label')
img_name = "svirt_disk"
# Default label for the other disks.
# To ensure VM is able to access other disks.
default_label = params.get('svirt_attach_disk_disk_default_label', None)
# Set selinux of host.
backup_sestatus = utils_selinux.get_status()
utils_selinux.set_status(host_sestatus)
# Set the default label to other disks of vm.
disks = vm.get_disk_devices()
for disk in disks.values():
utils_selinux.set_context_of_file(filename=disk['source'],
context=default_label)
# Init a QemuImg instance.
params['image_name'] = img_name
tmp_dir = data_dir.get_tmp_dir()
image = qemu_storage.QemuImg(params, tmp_dir, img_name)
# Create a image.
img_path, result = image.create(params)
# Set the context of the image.
utils_selinux.set_context_of_file(filename=img_path, context=img_label)
# Set the context of the VM.
vmxml.set_seclabel(sec_dict)
vmxml.sync()
# Do the attach action.
try:
virsh.attach_disk(vm_name, source=img_path, target="vdf",
extra="--persistent", ignore_status=False)
except error.CmdError:
raise error.TestFail("Attach disk %s to vdf on VM %s failed."
% (img_path, vm.name))
# Check result.
try:
# Start VM to check the VM is able to access the image or not.
try:
vm.start()
# Start VM successfully.
# VM with set seclabel can access the image with the
# set context.
if status_error:
raise error.TestFail('Test successed in negative case.')
except virt_vm.VMStartError, e:
# Starting VM failed.
# VM with set seclabel can not access the image with the
# set context.
if not status_error:
raise error.TestFail("Test failed in positive case."
"error: %s" % e)
finally:
# clean up
try:
virsh.detach_disk(vm_name, target="vdf", extra="--persistent",
ignore_status=False)
except error.CmdError:
raise error.TestFail("Detach disk 'vdf' from VM %s failed."
% vm.name)
image.remove()
backup_xml.sync()
utils_selinux.set_status(backup_sestatus)
# Get expected cache state for test
attach_scsi_disk = "yes" == params.get("attach_scsi_disk", "no")
disk_cache = params.get("virsh_migrate_disk_cache", "none")
unsafe_test = False
if options.count("unsafe") and disk_cache != "none":
unsafe_test = True
exception = False
try:
# Change the disk of the vm to shared disk
if vm.is_alive():
vm.destroy(gracefully=False)
devices = vm.get_blk_devices()
for device in devices:
s_detach = virsh.detach_disk(vm_name, device, "--config", debug=True)
if not s_detach:
logging.error("Detach vda failed before test.")
subdriver = utils_test.get_image_info(shared_storage)['format']
extra_attach = ("--config --driver qemu --subdriver %s --cache %s"
% (subdriver, disk_cache))
s_attach = virsh.attach_disk(vm_name, shared_storage, "vda",
extra_attach, debug=True)
if s_attach.exit_status != 0:
logging.error("Attach vda failed before test.")
# Attach a scsi device for special testcases
if attach_scsi_disk:
shared_dir = os.path.dirname(shared_storage)
scsi_disk = "%s/scsi_test.img" % shared_dir
#.........这里部分代码省略.........
# snapshot-info, snapshot-dumpxml, snapshot-create
snapshot_name1 = "snap1"
snapshot_name2 = "snap2"
cmd_result = virsh.snapshot_create_as(vm_name, snapshot_name1,
**virsh_dargs)
libvirt.check_exit_status(cmd_result)
try:
virsh.snapshot_list(vm_name, **virsh_dargs)
except process.CmdError:
test.fail("Failed getting snapshots list for %s" % vm_name)
try:
virsh.snapshot_info(vm_name, snapshot_name1, **virsh_dargs)
except process.CmdError:
test.fail("Failed getting snapshots info for %s" % vm_name)
cmd_result = virsh.snapshot_dumpxml(vm_name, snapshot_name1,
**virsh_dargs)
libvirt.check_exit_status(cmd_result)
cmd_result = virsh.snapshot_create(vm_name, **virsh_dargs)
libvirt.check_exit_status(cmd_result)
cmd_result = virsh.snapshot_current(vm_name, **virsh_dargs)
libvirt.check_exit_status(cmd_result)
snapshot_file = os.path.join(data_dir.get_tmp_dir(), snapshot_name2)
sn_create_op = ("%s --disk-only --diskspec %s,file=%s"
% (snapshot_name2, disk_target, snapshot_file))
cmd_result = virsh.snapshot_create_as(vm_name, sn_create_op,
**virsh_dargs)
libvirt.check_exit_status(cmd_result)
cmd_result = virsh.snapshot_revert(vm_name, snapshot_name1,
**virsh_dargs)
cmd_result = virsh.snapshot_list(vm_name, **virsh_dargs)
if snapshot_name2 not in cmd_result:
test.error("Snapshot %s not found" % snapshot_name2)
elif domain_operation == "":
logging.debug("No domain operation provided, so skip it")
else:
logging.error("Unsupport operation %s in this case, so skip it",
domain_operation)
def find_attach_disk(expect=True):
"""
Find attached disk inside the VM
"""
found_disk = False
if vm.is_dead():
test.error("Domain %s is not running" % vm_name)
else:
try:
session = vm.wait_for_login()
# Here the script needs wait for a while for the guest to
# recognize the hotplugged disk on PPC
if on_ppc:
time.sleep(10)
cmd = "grep %s /proc/partitions" % disk_target
s, o = session.cmd_status_output(cmd)
logging.info("%s output: %s", cmd, o)
session.close()
if s == 0:
found_disk = True
except (LoginError, VMError, ShellError) as e:
logging.error(str(e))
if found_disk == expect:
logging.debug("Check disk inside the VM PASS as expected")
else:
test.error("Check disk inside the VM FAIL")
# Check disk inside the VM, expect is False if status_error=True
find_attach_disk(not status_error)
# Detach disk
cmd_result = virsh.detach_disk(vm_name, disk_target)
libvirt.check_exit_status(cmd_result, status_error)
# Check disk inside the VM
find_attach_disk(False)
finally:
# Clean up snapshot
# Shut down before cleaning up snapshots
if vm.is_alive():
vm.destroy()
libvirt.clean_up_snapshots(vm_name, domxml=vmxml_backup)
# Restore vm
vmxml_backup.sync("--snapshots-metadata")
# Destroy pool and undefine secret, which may not exist
try:
if disk_type == "volume":
virsh.pool_destroy(disk_src_pool)
if chap_auth:
virsh.secret_undefine(secret_uuid)
except Exception:
pass
libvirt.setup_or_cleanup_iscsi(is_setup=False)
请发表评论