summaryrefslogtreecommitdiffstats
path: root/cucumber/features/support/helpers/storage_helper.rb
blob: 3bbdb69c4645c3d465aff90a614387b5e09d49dd (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
# Helper class for manipulating VM storage *volumes*, i.e. it deals
# only with creation of images and keeps a name => volume path lookup
# table (plugging drives or getting info of plugged devices is done in
# the VM class). We'd like better coupling, but given the ridiculous
# disconnect between Libvirt::StoragePool and Libvirt::Domain (hint:
# they have nothing with each other to do whatsoever) it's what makes
# sense.

require 'libvirt'
require 'guestfs'
require 'rexml/document'
require 'etc'

class VMStorage

  def initialize(virt, xml_path)
    @virt = virt
    @xml_path = xml_path
    pool_xml = REXML::Document.new(File.read("#{@xml_path}/storage_pool.xml"))
    pool_name = LIBVIRT_DOMAIN_NAME
    pool_xml.elements['pool/name'].text = pool_name
    @pool_path = "/srv/lvc/vm-pools/#{pool_name}"  # FIXME -- hardwiring the .../vm-pools path semms like a poor effort
    begin
      @pool = @virt.lookup_storage_pool_by_name(pool_name)
    rescue Libvirt::RetrieveError
      @pool = nil
    end
    if @pool and (not(KEEP_SNAPSHOTS) or
                  (KEEP_SNAPSHOTS and not(Dir.exists?(@pool_path))))
      VMStorage.clear_storage_pool(@pool)
      @pool = nil
    end
    if not(Dir.exists?(@pool_path))
      # We'd like to use @pool.build, which will just create the
      # @pool_path directory, but it does so with root:root as owner
      # (at least with libvirt 1.2.21-2). libvirt itself can handle
      # that situation, but guestfs (at least with <=
      # 1:1.28.12-1+b3) cannot when invoked by a non-root user,
      # which we want to support.
      FileUtils.mkdir(@pool_path)
      FileUtils.chown(nil, 'libvirt-qemu', @pool_path)
      FileUtils.chmod("ug+wrx", @pool_path)
    end
    unless @pool
      pool_xml.elements['pool/target/path'].text = @pool_path
      @pool = @virt.define_storage_pool_xml(pool_xml.to_s)
    end
    @pool.create unless @pool.active?
    @pool.refresh
  end

  def VMStorage.clear_storage_pool_volumes(pool)
    was_not_active = !pool.active?
    if was_not_active
      pool.create
    end
    pool.list_volumes.each do |vol_name|
      vol = pool.lookup_volume_by_name(vol_name)
      vol.delete
    end
    if was_not_active
      pool.destroy
    end
  rescue
    # Some of the above operations can fail if the pool's path was
    # deleted by external means; let's ignore that.
  end

  def VMStorage.clear_storage_pool(pool)
    VMStorage.clear_storage_pool_volumes(pool)
    pool.destroy if pool.active?
    pool.undefine
  end

  def clear_pool
    VMStorage.clear_storage_pool(@pool)
  end

  def clear_volumes
    VMStorage.clear_storage_pool_volumes(@pool)
  end

  def list_volumes
    @pool.list_volumes
  end

  def delete_volume(name)
    @pool.lookup_volume_by_name(name).delete
  end

  def create_new_disk(name, options = {})
    options[:size] ||= 2
    options[:unit] ||= "GiB"
    options[:type] ||= "qcow2"
    # Require 'slightly' more space to be available to give a bit more leeway
    # with rounding, temp file creation, etc.
    reserved = 500
    needed = convert_to_MiB(options[:size].to_i, options[:unit])
    avail = convert_to_MiB(get_free_space('host', @pool_path), "KiB")
    assert(avail - reserved >= needed,
           "Error creating disk \"#{name}\" in \"#{@pool_path}\". " \
           "Need #{needed} MiB but only #{avail} MiB is available of " \
           "which #{reserved} MiB is reserved for other temporary files.")
    begin
      old_vol = @pool.lookup_volume_by_name(name)
    rescue Libvirt::RetrieveError
      # noop
    else
      old_vol.delete
    end
    uid = Etc::getpwnam("libvirt-qemu").uid
    gid = Etc::getgrnam("libvirt-qemu").gid
    vol_xml = REXML::Document.new(File.read("#{@xml_path}/volume.xml"))
    vol_xml.elements['volume/name'].text = name
    size_b = convert_to_bytes(options[:size].to_f, options[:unit])
    vol_xml.elements['volume/capacity'].text = size_b.to_s
    vol_xml.elements['volume/target/format'].attributes["type"] = options[:type]
    vol_xml.elements['volume/target/path'].text = "#{@pool_path}/#{name}"
    vol_xml.elements['volume/target/permissions/owner'].text = uid.to_s
    vol_xml.elements['volume/target/permissions/group'].text = gid.to_s
    vol = @pool.create_volume_xml(vol_xml.to_s)
    @pool.refresh
  end

  def clone_to_new_disk(from, to)
    begin
      old_to_vol = @pool.lookup_volume_by_name(to)
    rescue Libvirt::RetrieveError
      # noop
    else
      old_to_vol.delete
    end
    from_vol = @pool.lookup_volume_by_name(from)
    xml = REXML::Document.new(from_vol.xml_desc)
    pool_path = REXML::Document.new(@pool.xml_desc).elements['pool/target/path'].text
    xml.elements['volume/name'].text = to
    xml.elements['volume/target/path'].text = "#{pool_path}/#{to}"
    @pool.create_volume_xml_from(xml.to_s, from_vol)
  end

  def disk_format(name)
    vol = @pool.lookup_volume_by_name(name)
    vol_xml = REXML::Document.new(vol.xml_desc)
    return vol_xml.elements['volume/target/format'].attributes["type"]
  end

  def disk_path(name)
    @pool.lookup_volume_by_name(name).path
  end

  def disk_mklabel(name, parttype)
    guestfs_disk_helper(name) do |g, disk_handle|
      g.part_init(disk_handle, parttype)
    end
  end

  def disk_mkpartfs(name, parttype, fstype, opts = {})
    opts[:label] ||= nil
    opts[:luks_password] ||= nil
    guestfs_disk_helper(name) do |g, disk_handle|
      g.part_disk(disk_handle, parttype)
      g.part_set_name(disk_handle, 1, opts[:label]) if opts[:label]
      primary_partition = g.list_partitions()[0]
      if opts[:luks_password]
        g.luks_format(primary_partition, opts[:luks_password], 0)
        luks_mapping = File.basename(primary_partition) + "_unlocked"
        g.luks_open(primary_partition, opts[:luks_password], luks_mapping)
        luks_dev = "/dev/mapper/#{luks_mapping}"
        g.mkfs(fstype, luks_dev)
        g.luks_close(luks_dev)
      else
        g.mkfs(fstype, primary_partition)
      end
    end
  end

  def disk_mkswap(name, parttype)
    guestfs_disk_helper(name) do |g, disk_handle|
      g.part_disk(disk_handle, parttype)
      primary_partition = g.list_partitions()[0]
      g.mkswap(primary_partition)
    end
  end

  def guestfs_disk_helper(*disks)
    assert(block_given?)
    g = Guestfs::Guestfs.new()
    g.set_trace(1)
    message_callback = Proc.new do |event, _, message, _|
      debug_log("libguestfs: #{Guestfs.event_to_string(event)}: #{message}")
    end
    g.set_event_callback(message_callback,
                         Guestfs::EVENT_TRACE)
    g.set_autosync(1)
    disks.each do |disk|
      if disk.class == String
        g.add_drive_opts(disk_path(disk), format: disk_format(disk))
      elsif disk.class == Hash
        g.add_drive_opts(disk[:path], disk[:opts])
      else
        raise "cannot handle type '#{disk.class}'"
      end
    end
    g.launch()
    yield(g, *g.list_devices())
  ensure
    g.close
  end

end