From da080c472fc415b0ce918f4dd4a1ab143bb1bca4 Mon Sep 17 00:00:00 2001 From: Philip Hands Date: Mon, 14 Mar 2016 15:36:16 +0100 Subject: rough attempt to grab the good cucumber bits from recent tails --- features/support/helpers/storage_helper.rb | 135 ++++++++++++++++++++++------- 1 file changed, 104 insertions(+), 31 deletions(-) (limited to 'features/support/helpers/storage_helper.rb') diff --git a/features/support/helpers/storage_helper.rb b/features/support/helpers/storage_helper.rb index 80a1e1e0..21537a92 100644 --- a/features/support/helpers/storage_helper.rb +++ b/features/support/helpers/storage_helper.rb @@ -7,30 +7,43 @@ # sense. require 'libvirt' +require 'guestfs' require 'rexml/document' require 'etc' class VMStorage - @@virt = nil - def initialize(virt, xml_path) - @@virt ||= virt + @virt = virt @xml_path = xml_path pool_xml = REXML::Document.new(File.read("#{@xml_path}/storage_pool.xml")) pool_name = pool_xml.elements['pool/name'].text + @pool_path = "#{$config["TMPDIR"]}/#{pool_name}" begin - @pool = @@virt.lookup_storage_pool_by_name(pool_name) + @pool = @virt.lookup_storage_pool_by_name(pool_name) rescue Libvirt::RetrieveError - # There's no pool with that name, so we don't have to clear it - else + @pool = nil + end + if @pool and not(KEEP_SNAPSHOTS) VMStorage.clear_storage_pool(@pool) + @pool = nil + end + unless @pool + pool_xml.elements['pool/target/path'].text = @pool_path + @pool = @virt.define_storage_pool_xml(pool_xml.to_s) + if not(Dir.exists?(@pool_path)) + # We'd like to use @pool.build, which will just create the + # @pool_path directory, but it does so with root:root as owner + # (at least with libvirt 1.2.21-2). libvirt itself can handle + # that situation, but guestfs (at least with <= + # 1:1.28.12-1+b3) cannot when invoked by a non-root user, + # which we want to support. + FileUtils.mkdir(@pool_path) + FileUtils.chown(nil, 'libvirt-qemu', @pool_path) + FileUtils.chmod("ug+wrx", @pool_path) + end end - @pool_path = "#{$tmp_dir}/#{pool_name}" - pool_xml.elements['pool/target/path'].text = @pool_path - @pool = @@virt.define_storage_pool_xml(pool_xml.to_s) - @pool.build - @pool.create + @pool.create unless @pool.active? @pool.refresh end @@ -65,10 +78,23 @@ class VMStorage VMStorage.clear_storage_pool_volumes(@pool) end + def delete_volume(name) + @pool.lookup_volume_by_name(name).delete + end + def create_new_disk(name, options = {}) options[:size] ||= 2 options[:unit] ||= "GiB" options[:type] ||= "qcow2" + # Require 'slightly' more space to be available to give a bit more leeway + # with rounding, temp file creation, etc. + reserved = 500 + needed = convert_to_MiB(options[:size].to_i, options[:unit]) + avail = convert_to_MiB(get_free_space('host', @pool_path), "KiB") + assert(avail - reserved >= needed, + "Error creating disk \"#{name}\" in \"#{@pool_path}\". " \ + "Need #{needed} MiB but only #{avail} MiB is available of " \ + "which #{reserved} MiB is reserved for other temporary files.") begin old_vol = @pool.lookup_volume_by_name(name) rescue Libvirt::RetrieveError @@ -116,28 +142,75 @@ class VMStorage @pool.lookup_volume_by_name(name).path end - # We use parted for the disk_mk* functions since it can format - # partitions "inside" the super block device; mkfs.* need a - # partition device (think /dev/sdaX), so we'd have to use something - # like losetup or kpartx, which would require administrative - # privileges. These functions only work for raw disk images. - - # TODO: We should switch to guestfish/libguestfs (which has - # ruby-bindings) so we could use qcow2 instead of raw, and more - # easily use LVM volumes. - - # For type, see label-type for mklabel in parted(8) - def disk_mklabel(name, type) - assert_equal("raw", disk_format(name)) - path = disk_path(name) - cmd_helper("/sbin/parted -s '#{path}' mklabel #{type}") + def disk_mklabel(name, parttype) + disk = { + :path => disk_path(name), + :opts => { + :format => disk_format(name) + } + } + guestfs_disk_helper(disk) do |g, disk_handle| + g.part_init(disk_handle, parttype) + end end - # For fstype, see fs-type for mkfs in parted(8) - def disk_mkpartfs(name, fstype) - assert(disk_format(name), "raw") - path = disk_path(name) - cmd_helper("/sbin/parted -s '#{path}' mkpartfs primary '#{fstype}' 0% 100%") + def disk_mkpartfs(name, parttype, fstype, opts = {}) + opts[:label] ||= nil + opts[:luks_password] ||= nil + disk = { + :path => disk_path(name), + :opts => { + :format => disk_format(name) + } + } + guestfs_disk_helper(disk) do |g, disk_handle| + g.part_disk(disk_handle, parttype) + g.part_set_name(disk_handle, 1, opts[:label]) if opts[:label] + primary_partition = g.list_partitions()[0] + if opts[:luks_password] + g.luks_format(primary_partition, opts[:luks_password], 0) + luks_mapping = File.basename(primary_partition) + "_unlocked" + g.luks_open(primary_partition, opts[:luks_password], luks_mapping) + luks_dev = "/dev/mapper/#{luks_mapping}" + g.mkfs(fstype, luks_dev) + g.luks_close(luks_dev) + else + g.mkfs(fstype, primary_partition) + end + end + end + + def disk_mkswap(name, parttype) + disk = { + :path => disk_path(name), + :opts => { + :format => disk_format(name) + } + } + guestfs_disk_helper(disk) do |g, disk_handle| + g.part_disk(disk_handle, parttype) + primary_partition = g.list_partitions()[0] + g.mkswap(primary_partition) + end + end + + def guestfs_disk_helper(*disks) + assert(block_given?) + g = Guestfs::Guestfs.new() + g.set_trace(1) + message_callback = Proc.new do |event, _, message, _| + debug_log("libguestfs: #{Guestfs.event_to_string(event)}: #{message}") + end + g.set_event_callback(message_callback, + Guestfs::EVENT_TRACE) + g.set_autosync(1) + disks.each do |disk| + g.add_drive_opts(disk[:path], disk[:opts]) + end + g.launch() + yield(g, *g.list_devices()) + ensure + g.close end end -- cgit v1.2.3-54-g00ecf