From a5d56e3b5443263b53b0487c81125123411bd0cf Mon Sep 17 00:00:00 2001 From: Philip Hands Date: Wed, 11 May 2016 17:11:01 +0200 Subject: move cucumber things under cucumber/ --- .../features/support/helpers/chatbot_helper.rb | 59 ++ cucumber/features/support/helpers/ctcp_helper.rb | 126 ++++ .../features/support/helpers/display_helper.rb | 48 ++ cucumber/features/support/helpers/exec_helper.rb | 79 +++ .../features/support/helpers/firewall_helper.rb | 121 ++++ cucumber/features/support/helpers/misc_helpers.rb | 253 ++++++++ cucumber/features/support/helpers/sikuli_helper.rb | 213 +++++++ .../features/support/helpers/sniffing_helper.rb | 43 ++ cucumber/features/support/helpers/sshd_helper.rb | 67 ++ .../features/support/helpers/storage_helper.rb | 216 +++++++ cucumber/features/support/helpers/vm_helper.rb | 676 +++++++++++++++++++++ 11 files changed, 1901 insertions(+) create mode 100644 cucumber/features/support/helpers/chatbot_helper.rb create mode 100644 cucumber/features/support/helpers/ctcp_helper.rb create mode 100644 cucumber/features/support/helpers/display_helper.rb create mode 100644 cucumber/features/support/helpers/exec_helper.rb create mode 100644 cucumber/features/support/helpers/firewall_helper.rb create mode 100644 cucumber/features/support/helpers/misc_helpers.rb create mode 100644 cucumber/features/support/helpers/sikuli_helper.rb create mode 100644 cucumber/features/support/helpers/sniffing_helper.rb create mode 100644 cucumber/features/support/helpers/sshd_helper.rb create mode 100644 cucumber/features/support/helpers/storage_helper.rb create mode 100644 cucumber/features/support/helpers/vm_helper.rb (limited to 'cucumber/features/support/helpers') diff --git a/cucumber/features/support/helpers/chatbot_helper.rb b/cucumber/features/support/helpers/chatbot_helper.rb new file mode 100644 index 00000000..23ce3e1a --- /dev/null +++ b/cucumber/features/support/helpers/chatbot_helper.rb @@ -0,0 +1,59 @@ +require 'tempfile' + +class ChatBot + + def initialize(account, password, otr_key, opts = Hash.new) + @account = account + @password = password + @otr_key = otr_key + @opts = opts + @pid = nil + @otr_key_file = nil + end + + def start + @otr_key_file = Tempfile.new("otr_key.", $config["TMPDIR"]) + @otr_key_file << @otr_key + @otr_key_file.close + + cmd_helper(['/usr/bin/convertkey', @otr_key_file.path]) + cmd_helper(["mv", "#{@otr_key_file.path}3", @otr_key_file.path]) + + cmd = [ + "#{GIT_DIR}/features/scripts/otr-bot.py", + @account, + @password, + @otr_key_file.path + ] + cmd += ["--connect-server", @opts["connect_server"]] if @opts["connect_server"] + cmd += ["--auto-join"] + @opts["auto_join"] if @opts["auto_join"] + cmd += ["--log-file", DEBUG_LOG_PSEUDO_FIFO] + + job = IO.popen(cmd) + @pid = job.pid + end + + def stop + @otr_key_file.delete + begin + Process.kill("TERM", @pid) + rescue + # noop + end + end + + def active? + begin + ret = Process.kill(0, @pid) + rescue Errno::ESRCH => e + if e.message == "No such process" + return false + else + raise e + end + end + assert_equal(1, ret, "This shouldn't happen") + return true + end + +end diff --git a/cucumber/features/support/helpers/ctcp_helper.rb b/cucumber/features/support/helpers/ctcp_helper.rb new file mode 100644 index 00000000..ee5180ab --- /dev/null +++ b/cucumber/features/support/helpers/ctcp_helper.rb @@ -0,0 +1,126 @@ +require 'net/irc' +require 'timeout' + +class CtcpChecker < Net::IRC::Client + + CTCP_SPAM_DELAY = 5 + + # `spam_target`: the nickname of the IRC user to CTCP spam. + # `ctcp_cmds`: the Array of CTCP commands to send. + # `expected_ctcp_replies`: Hash where the keys are the exact set of replies + # we expect, and their values a regex the reply data must match. + def initialize(host, port, spam_target, ctcp_cmds, expected_ctcp_replies) + @spam_target = spam_target + @ctcp_cmds = ctcp_cmds + @expected_ctcp_replies = expected_ctcp_replies + nickname = self.class.random_irc_nickname + opts = { + :nick => nickname, + :user => nickname, + :real => nickname, + } + opts[:logger] = Logger.new(DEBUG_LOG_PSEUDO_FIFO) + super(host, port, opts) + end + + # Makes sure that only the expected CTCP replies are received. + def verify_ctcp_responses + @sent_ctcp_cmds = Set.new + @received_ctcp_replies = Set.new + + # Give 60 seconds for connecting to the server and other overhead + # beyond the expected time to spam all CTCP commands. + expected_ctcp_spam_time = @ctcp_cmds.length * CTCP_SPAM_DELAY + timeout = expected_ctcp_spam_time + 60 + + begin + Timeout::timeout(timeout) do + start + end + rescue Timeout::Error + # Do nothing as we'll check for errors below. + ensure + finish + end + + ctcp_cmds_not_sent = @ctcp_cmds - @sent_ctcp_cmds.to_a + expected_ctcp_replies_not_received = + @expected_ctcp_replies.keys - @received_ctcp_replies.to_a + + if !ctcp_cmds_not_sent.empty? || !expected_ctcp_replies_not_received.empty? + raise "Failed to spam all CTCP commands and receive the expected " + + "replies within #{timeout} seconds.\n" + + (ctcp_cmds_not_sent.empty? ? "" : + "CTCP commands not sent: #{ctcp_cmds_not_sent}\n") + + (expected_ctcp_replies_not_received.empty? ? "" : + "Expected CTCP replies not received: " + + expected_ctcp_replies_not_received.to_s) + end + + end + + # Generate a random IRC nickname, in this case an alpha-numeric + # string with length 10 to 15. To make it legal, the first character + # is forced to be alpha. + def self.random_irc_nickname + random_alpha_string(1) + random_alnum_string(9, 14) + end + + def spam(spam_target) + post(NOTICE, spam_target, "Hi! I'm gonna test your CTCP capabilities now.") + @ctcp_cmds.each do |cmd| + sleep CTCP_SPAM_DELAY + full_cmd = cmd + case cmd + when "PING" + full_cmd += " #{Time.now.to_i}" + when "ACTION" + full_cmd += " barfs on the floor." + when "ERRMSG" + full_cmd += " Pidgin should not respond to this." + end + post(PRIVMSG, spam_target, ctcp_encode(full_cmd)) + @sent_ctcp_cmds << cmd + end + end + + def on_rpl_welcome(m) + super + Thread.new { spam(@spam_target) } + end + + def on_message(m) + if m.command == ERR_NICKNAMEINUSE + finish + new_nick = self.class.random_irc_nickname + @opts.marshal_load({ + :nick => new_nick, + :user => new_nick, + :real => new_nick, + }) + start + return + end + + if m.ctcp? and /^:#{Regexp.escape(@spam_target)}!/.match(m) + m.ctcps.each do |ctcp_reply| + reply_type, _, reply_data = ctcp_reply.partition(" ") + if @expected_ctcp_replies.has_key?(reply_type) + if @expected_ctcp_replies[reply_type].match(reply_data) + @received_ctcp_replies << reply_type + else + raise "Received expected CTCP reply '#{reply_type}' but with " + + "unexpected data '#{reply_data}' " + end + else + raise "Received unexpected CTCP reply '#{reply_type}' with " + + "data '#{reply_data}'" + end + end + end + if Set.new(@ctcp_cmds) == @sent_ctcp_cmds && \ + Set.new(@expected_ctcp_replies.keys) == @received_ctcp_replies + finish + end + end +end diff --git a/cucumber/features/support/helpers/display_helper.rb b/cucumber/features/support/helpers/display_helper.rb new file mode 100644 index 00000000..b4dce733 --- /dev/null +++ b/cucumber/features/support/helpers/display_helper.rb @@ -0,0 +1,48 @@ + +class Display + + def initialize(domain, x_display) + @domain = domain + @x_display = x_display + end + + def active? + p = IO.popen(["xprop", "-display", @x_display, + "-name", "#{@domain} (1) - Virt Viewer", + :err => ["/dev/null", "w"]]) + Process.wait(p.pid) + $?.success? + end + + def start + @virtviewer = IO.popen(["virt-viewer", "--direct", + "--kiosk", + "--reconnect", + "--connect", "qemu:///system", + "--display", @x_display, + @domain, + :err => ["/dev/null", "w"]]) + # We wait for the display to be active to not lose actions + # (e.g. key presses via sikuli) that come immediately after + # starting (or restoring) a vm + try_for(20, { :delay => 0.1, :msg => "virt-viewer failed to start"}) { + active? + } + end + + def stop + return if @virtviewer.nil? + Process.kill("TERM", @virtviewer.pid) + @virtviewer.close + rescue IOError + # IO.pid throws this if the process wasn't started yet. Possibly + # there's a race when doing a start() and then quickly running + # stop(). + end + + def restart + stop + start + end + +end diff --git a/cucumber/features/support/helpers/exec_helper.rb b/cucumber/features/support/helpers/exec_helper.rb new file mode 100644 index 00000000..14e12269 --- /dev/null +++ b/cucumber/features/support/helpers/exec_helper.rb @@ -0,0 +1,79 @@ +require 'json' +require 'socket' +require 'io/wait' + +class VMCommand + + attr_reader :cmd, :returncode, :stdout, :stderr + + def initialize(vm, cmd, options = {}) + @cmd = cmd + @returncode, @stdout, @stderr = VMCommand.execute(vm, cmd, options) + end + + def VMCommand.wait_until_remote_shell_is_up(vm, timeout = 90) + try_for(timeout, :msg => "Remote shell seems to be down") do + sleep(20) + Timeout::timeout(10) do + VMCommand.execute(vm, "echo 'true'") + end + end + end + + # The parameter `cmd` cannot contain newlines. Separate multiple + # commands using ";" instead. + # If `:spawn` is false the server will block until it has finished + # executing `cmd`. If it's true the server won't block, and the + # response will always be [0, "", ""] (only used as an + # ACK). execute() will always block until a response is received, + # though. Spawning is useful when starting processes in the + # background (or running scripts that does the same) like our + # onioncircuits wrapper, or any application we want to interact with. + def VMCommand.execute(vm, cmd, options = {}) + options[:user] ||= "root" + options[:spawn] ||= false + type = options[:spawn] ? "spawn" : "call" + socket = TCPSocket.new("127.0.0.1", vm.get_remote_shell_port) + debug_log("#{type}ing as #{options[:user]}: #{cmd}") + begin + #socket.puts(JSON.dump([type, options[:user], cmd])) + socket.puts( "\n") + sleep(1) + socket.puts( "\003") + sleep(1) + socket.puts( cmd + "\n") + sleep(1) + while socket.ready? + s = socket.readline(sep = "\n").chomp("\n") + debug_log("#{type} read: #{s}") if not(options[:spawn]) + if ('true' == s) then + break + end + end + ensure + socket.close + end + if ('true' == s) + return true + else + return VMCommand.execute(vm, cmd, options) + end + end + + def success? + return @returncode == 0 + end + + def failure? + return not(success?) + end + + def to_s + "Return status: #{@returncode}\n" + + "STDOUT:\n" + + @stdout + + "STDERR:\n" + + @stderr + end + +end diff --git a/cucumber/features/support/helpers/firewall_helper.rb b/cucumber/features/support/helpers/firewall_helper.rb new file mode 100644 index 00000000..fce363c5 --- /dev/null +++ b/cucumber/features/support/helpers/firewall_helper.rb @@ -0,0 +1,121 @@ +require 'packetfu' +require 'ipaddr' + +# Extent IPAddr with a private/public address space checks +class IPAddr + PrivateIPv4Ranges = [ + IPAddr.new("10.0.0.0/8"), + IPAddr.new("172.16.0.0/12"), + IPAddr.new("192.168.0.0/16"), + IPAddr.new("255.255.255.255/32") + ] + + PrivateIPv6Ranges = [ + IPAddr.new("fc00::/7") + ] + + def private? + private_ranges = self.ipv4? ? PrivateIPv4Ranges : PrivateIPv6Ranges + private_ranges.any? { |range| range.include?(self) } + end + + def public? + !private? + end +end + +class FirewallLeakCheck + attr_reader :ipv4_tcp_leaks, :ipv4_nontcp_leaks, :ipv6_leaks, :nonip_leaks, :mac_leaks + + def initialize(pcap_file, options = {}) + options[:accepted_hosts] ||= [] + options[:ignore_lan] ||= true + @pcap_file = pcap_file + packets = PacketFu::PcapFile.new.file_to_array(:filename => @pcap_file) + mac_leaks = Set.new + ipv4_tcp_packets = [] + ipv4_nontcp_packets = [] + ipv6_packets = [] + nonip_packets = [] + packets.each do |p| + if PacketFu::EthPacket.can_parse?(p) + packet = PacketFu::EthPacket.parse(p) + mac_leaks << packet.eth_saddr + mac_leaks << packet.eth_daddr + end + + if PacketFu::TCPPacket.can_parse?(p) + ipv4_tcp_packets << PacketFu::TCPPacket.parse(p) + elsif PacketFu::IPPacket.can_parse?(p) + ipv4_nontcp_packets << PacketFu::IPPacket.parse(p) + elsif PacketFu::IPv6Packet.can_parse?(p) + ipv6_packets << PacketFu::IPv6Packet.parse(p) + elsif PacketFu::Packet.can_parse?(p) + nonip_packets << PacketFu::Packet.parse(p) + else + save_pcap_file + raise "Found something in the pcap file that cannot be parsed" + end + end + ipv4_tcp_hosts = filter_hosts_from_ippackets(ipv4_tcp_packets, + options[:ignore_lan]) + accepted = Set.new(options[:accepted_hosts]) + @mac_leaks = mac_leaks + @ipv4_tcp_leaks = ipv4_tcp_hosts.select { |host| !accepted.member?(host) } + @ipv4_nontcp_leaks = filter_hosts_from_ippackets(ipv4_nontcp_packets, + options[:ignore_lan]) + @ipv6_leaks = filter_hosts_from_ippackets(ipv6_packets, + options[:ignore_lan]) + @nonip_leaks = nonip_packets + end + + def save_pcap_file + save_failure_artifact("Network capture", @pcap_file) + end + + # Returns a list of all unique destination IP addresses found in + # `packets`. Exclude LAN hosts if ignore_lan is set. + def filter_hosts_from_ippackets(packets, ignore_lan) + hosts = [] + packets.each do |p| + candidate = nil + if p.kind_of?(PacketFu::IPPacket) + candidate = p.ip_daddr + elsif p.kind_of?(PacketFu::IPv6Packet) + candidate = p.ipv6_header.ipv6_daddr + else + save_pcap_file + raise "Expected an IP{v4,v6} packet, but got something else:\n" + + p.peek_format + end + if candidate != nil and (not(ignore_lan) or IPAddr.new(candidate).public?) + hosts << candidate + end + end + hosts.uniq + end + + def assert_no_leaks + err = "" + if !@ipv4_tcp_leaks.empty? + err += "The following IPv4 TCP non-Tor Internet hosts were " + + "contacted:\n" + ipv4_tcp_leaks.join("\n") + end + if !@ipv4_nontcp_leaks.empty? + err += "The following IPv4 non-TCP Internet hosts were contacted:\n" + + ipv4_nontcp_leaks.join("\n") + end + if !@ipv6_leaks.empty? + err += "The following IPv6 Internet hosts were contacted:\n" + + ipv6_leaks.join("\n") + end + if !@nonip_leaks.empty? + err += "Some non-IP packets were sent\n" + end + if !err.empty? + save_pcap_file + raise err + end + end + +end diff --git a/cucumber/features/support/helpers/misc_helpers.rb b/cucumber/features/support/helpers/misc_helpers.rb new file mode 100644 index 00000000..7e09411f --- /dev/null +++ b/cucumber/features/support/helpers/misc_helpers.rb @@ -0,0 +1,253 @@ +require 'date' +require 'timeout' +require 'test/unit' + +# Test::Unit adds an at_exit hook which, among other things, consumes +# the command-line arguments that were intended for cucumber. If +# e.g. `--format` was passed it will throw an error since it's not a +# valid option for Test::Unit, and it throwing an error at this time +# (at_exit) will make Cucumber think it failed and consequently exit +# with an error. Fooling Test::Unit that this hook has already run +# works around this craziness. +Test::Unit.run = true + +# Make all the assert_* methods easily accessible in any context. +include Test::Unit::Assertions + +def assert_vmcommand_success(p, msg = nil) + assert(p.success?, msg.nil? ? "Command failed: #{p.cmd}\n" + \ + "error code: #{p.returncode}\n" \ + "stderr: #{p.stderr}" : \ + msg) +end + +# It's forbidden to throw this exception (or subclasses) in anything +# but try_for() below. Just don't use it anywhere else! +class UniqueTryForTimeoutError < Exception +end + +# Call block (ignoring any exceptions it may throw) repeatedly with +# one second breaks until it returns true, or until `timeout` seconds have +# passed when we throw a Timeout::Error exception. +def try_for(timeout, options = {}) + options[:delay] ||= 1 + last_exception = nil + # Create a unique exception used only for this particular try_for + # call's Timeout to allow nested try_for:s. If we used the same one, + # the innermost try_for would catch all outer ones', creating a + # really strange situation. + unique_timeout_exception = Class.new(UniqueTryForTimeoutError) + Timeout::timeout(timeout, unique_timeout_exception) do + loop do + begin + return if yield + rescue NameError, UniqueTryForTimeoutError => e + # NameError most likely means typos, and hiding that is rarely + # (never?) a good idea, so we rethrow them. See below why we + # also rethrow *all* the unique exceptions. + raise e + rescue Exception => e + # All other exceptions are ignored while trying the + # block. Well we save the last exception so we can print it in + # case of a timeout. + last_exception = e + end + sleep options[:delay] + end + end + # At this point the block above either succeeded and we'll return, + # or we are throwing an exception. If the latter, we either have a + # NameError that we'll not catch (and will any try_for below us in + # the stack), or we have a unique exception. That can mean one of + # two things: + # 1. it's the one unique to this try_for, and in that case we'll + # catch it, rethrowing it as something that will be ignored by + # inside the blocks of all try_for:s below us in the stack. + # 2. it's an exception unique to another try_for. Assuming that we + # do not throw the unique exceptions in any other place or way + # than we do it in this function, this means that there is a + # try_for below us in the stack to which this exception must be + # unique to. + # Let 1 be the base step, and 2 the inductive step, and we sort of + # an inductive proof for the correctness of try_for when it's + # nested. It shows that for an infinite stack of try_for:s, any of + # the unique exceptions will be caught only by the try_for instance + # it is unique to, and all try_for:s in between will ignore it so it + # ends up there immediately. +rescue unique_timeout_exception => e + msg = options[:msg] || 'try_for() timeout expired' + if last_exception + msg += "\nLast ignored exception was: " + + "#{last_exception.class}: #{last_exception}" + end + raise Timeout::Error.new(msg) +end + +class TorFailure < StandardError +end + +class MaxRetriesFailure < StandardError +end + +# This will retry the block up to MAX_NEW_TOR_CIRCUIT_RETRIES +# times. The block must raise an exception for a run to be considered +# as a failure. After a failure recovery_proc will be called (if +# given) and the intention with it is to bring us back to the state +# expected by the block, so it can be retried. +def retry_tor(recovery_proc = nil, &block) + tor_recovery_proc = Proc.new do + force_new_tor_circuit + recovery_proc.call if recovery_proc + end + + retry_action($config['MAX_NEW_TOR_CIRCUIT_RETRIES'], + :recovery_proc => tor_recovery_proc, + :operation_name => 'Tor operation', &block) +end + +def retry_i2p(recovery_proc = nil, &block) + retry_action(15, :recovery_proc => recovery_proc, + :operation_name => 'I2P operation', &block) +end + +def retry_action(max_retries, options = {}, &block) + assert(max_retries.is_a?(Integer), "max_retries must be an integer") + options[:recovery_proc] ||= nil + options[:operation_name] ||= 'Operation' + + retries = 1 + loop do + begin + block.call + return + rescue Exception => e + if retries <= max_retries + debug_log("#{options[:operation_name]} failed (Try #{retries} of " + + "#{max_retries}) with:\n" + + "#{e.class}: #{e.message}") + options[:recovery_proc].call if options[:recovery_proc] + retries += 1 + else + raise MaxRetriesFailure.new("#{options[:operation_name]} failed (despite retrying " + + "#{max_retries} times) with\n" + + "#{e.class}: #{e.message}") + end + end + end +end + +def wait_until_tor_is_working + try_for(270) { $vm.execute('/usr/local/sbin/tor-has-bootstrapped').success? } +rescue Timeout::Error => e + c = $vm.execute("journalctl SYSLOG_IDENTIFIER=restart-tor") + if c.success? + debug_log("From the journal:\n" + c.stdout.sub(/^/, " ")) + else + debug_log("Nothing was in the journal about 'restart-tor'") + end + raise e +end + +def convert_bytes_mod(unit) + case unit + when "bytes", "b" then mod = 1 + when "KB" then mod = 10**3 + when "k", "KiB" then mod = 2**10 + when "MB" then mod = 10**6 + when "M", "MiB" then mod = 2**20 + when "GB" then mod = 10**9 + when "G", "GiB" then mod = 2**30 + when "TB" then mod = 10**12 + when "T", "TiB" then mod = 2**40 + else + raise "invalid memory unit '#{unit}'" + end + return mod +end + +def convert_to_bytes(size, unit) + return (size*convert_bytes_mod(unit)).to_i +end + +def convert_to_MiB(size, unit) + return (size*convert_bytes_mod(unit) / (2**20)).to_i +end + +def convert_from_bytes(size, unit) + return size.to_f/convert_bytes_mod(unit).to_f +end + +def cmd_helper(cmd) + if cmd.instance_of?(Array) + cmd << {:err => [:child, :out]} + elsif cmd.instance_of?(String) + cmd += " 2>&1" + end + IO.popen(cmd) do |p| + out = p.readlines.join("\n") + p.close + ret = $? + assert_equal(0, ret, "Command failed (returned #{ret}): #{cmd}:\n#{out}") + return out + end +end + +# This command will grab all router IP addresses from the Tor +# consensus in the VM + the hardcoded TOR_AUTHORITIES. +def get_all_tor_nodes + cmd = 'awk "/^r/ { print \$6 }" /var/lib/tor/cached-microdesc-consensus' + $vm.execute(cmd).stdout.chomp.split("\n") + TOR_AUTHORITIES +end + +def get_free_space(machine, path) + case machine + when 'host' + assert(File.exists?(path), "Path '#{path}' not found on #{machine}.") + free = cmd_helper(["df", path]) + when 'guest' + assert($vm.file_exist?(path), "Path '#{path}' not found on #{machine}.") + free = $vm.execute_successfully("df '#{path}'") + else + raise 'Unsupported machine type #{machine} passed.' + end + output = free.split("\n").last + return output.match(/[^\s]\s+[0-9]+\s+[0-9]+\s+([0-9]+)\s+.*/)[1].chomp.to_i +end + +def random_string_from_set(set, min_len, max_len) + len = (min_len..max_len).to_a.sample + len ||= min_len + (0..len-1).map { |n| set.sample }.join +end + +def random_alpha_string(min_len, max_len = 0) + alpha_set = ('A'..'Z').to_a + ('a'..'z').to_a + random_string_from_set(alpha_set, min_len, max_len) +end + +def random_alnum_string(min_len, max_len = 0) + alnum_set = ('A'..'Z').to_a + ('a'..'z').to_a + (0..9).to_a.map { |n| n.to_s } + random_string_from_set(alnum_set, min_len, max_len) +end + +# Sanitize the filename from unix-hostile filename characters +def sanitize_filename(filename, options = {}) + options[:replacement] ||= '_' + bad_unix_filename_chars = Regexp.new("[^A-Za-z0-9_\\-.,+:]") + filename.gsub(bad_unix_filename_chars, options[:replacement]) +end + +def info_log_artifact_location(type, path) + if $config['ARTIFACTS_BASE_URI'] + # Remove any trailing slashes, we'll add one ourselves + base_url = $config['ARTIFACTS_BASE_URI'].gsub(/\/*$/, "") + path = "#{base_url}/#{File.basename(path)}" + end + info_log("#{type.capitalize}: #{path}") +end + +def pause(message = "Paused") + STDERR.puts + STDERR.puts "#{message} (Press ENTER to continue!)" + STDIN.gets +end diff --git a/cucumber/features/support/helpers/sikuli_helper.rb b/cucumber/features/support/helpers/sikuli_helper.rb new file mode 100644 index 00000000..486b0e2e --- /dev/null +++ b/cucumber/features/support/helpers/sikuli_helper.rb @@ -0,0 +1,213 @@ +require 'rjb' +require 'rjbextension' +$LOAD_PATH << ENV['SIKULI_HOME'] +require 'sikuli-script.jar' +Rjb::load + +package_members = [ + "java.io.FileOutputStream", + "java.io.PrintStream", + "java.lang.System", + "org.sikuli.script.Finder", + "org.sikuli.script.Key", + "org.sikuli.script.KeyModifier", + "org.sikuli.script.Location", + "org.sikuli.script.Match", + "org.sikuli.script.Pattern", + "org.sikuli.script.Region", + "org.sikuli.script.Screen", + "org.sikuli.script.Settings", + ] + +translations = Hash[ + "org.sikuli.script", "Sikuli", + "java.lang", "Java::Lang", + "java.io", "Java::Io", + ] + +for p in package_members + imported_class = Rjb::import(p) + package, ignore, class_name = p.rpartition(".") + next if ! translations.include? package + mod_name = translations[package] + mod = mod_name.split("::").inject(Object) do |parent_obj, child_name| + if parent_obj.const_defined?(child_name, false) + parent_obj.const_get(child_name, false) + else + child_obj = Module.new + parent_obj.const_set(child_name, child_obj) + end + end + mod.const_set(class_name, imported_class) +end + +# Bind Java's stdout to debug_log() via our magical pseudo fifo +# logger. +def bind_java_to_pseudo_fifo_logger + file_output_stream = Java::Io::FileOutputStream.new(DEBUG_LOG_PSEUDO_FIFO) + print_stream = Java::Io::PrintStream.new(file_output_stream) + Java::Lang::System.setOut(print_stream) +end + +def findfailed_hook(pic) + pause("FindFailed for: '#{pic}'") +end + +# Since rjb imports Java classes without creating a corresponding +# Ruby class (it's just an instance of Rjb_JavaProxy) we can't +# monkey patch any class, so additional methods must be added +# to each Screen object. +# +# All Java classes' methods are immediately available in the proxied +# Ruby classes, but care has to be given to match their type. For a +# list of methods, see: . +# The type "PRSML" is a union of Pattern, Region, Screen, Match and +# Location. +# +# Also, due to limitations in Ruby's syntax we can't do: +# def Sikuli::Screen.new +# so we work around it with the following vairable. +sikuli_script_proxy = Sikuli::Screen +$_original_sikuli_screen_new ||= Sikuli::Screen.method :new + +# For waitAny()/findAny() we are forced to throw this exception since +# Rjb::throw doesn't block until the Java exception has been received +# by Ruby, so strange things can happen. +class FindAnyFailed < StandardError +end + +def sikuli_script_proxy.new(*args) + s = $_original_sikuli_screen_new.call(*args) + + if $config["SIKULI_RETRY_FINDFAILED"] + # The usage of `_invoke()` below exemplifies how one can wrap + # around Java objects' methods when they're imported using RJB. It + # isn't pretty. The seconds argument is the parameter signature, + # which can be obtained by creating the intended Java object using + # RJB, and then calling its `java_methods` method. + + def s.wait(pic, time) + self._invoke('wait', 'Ljava.lang.Object;D', pic, time) + rescue FindFailed => e + findfailed_hook(pic) + self._invoke('wait', 'Ljava.lang.Object;D', pic, time) + end + + def s.find(pic) + self._invoke('find', 'Ljava.lang.Object;', pic) + rescue FindFailed => e + findfailed_hook(pic) + self._invoke('find', 'Ljava.lang.Object;', pic) + end + + def s.waitVanish(pic, time) + self._invoke('waitVanish', 'Ljava.lang.Object;D', pic, time) + rescue FindFailed => e + findfailed_hook(pic) + self._invoke('waitVanish', 'Ljava.lang.Object;D', pic, time) + end + + def s.click(pic) + self._invoke('click', 'Ljava.lang.Object;', pic) + rescue FindFailed => e + findfailed_hook(pic) + self._invoke('click', 'Ljava.lang.Object;', pic) + end + end + + def s.click_point(x, y) + self.click(Sikuli::Location.new(x, y)) + end + + def s.doubleClick_point(x, y) + self.doubleClick(Sikuli::Location.new(x, y)) + end + + def s.click_mid_right_edge(pic) + r = self.find(pic) + top_right = r.getTopRight() + x = top_right.getX + y = top_right.getY + r.getH/2 + self.click_point(x, y) + end + + def s.wait_and_click(pic, time) + self.click(self.wait(pic, time)) + end + + def s.wait_and_double_click(pic, time) + self.doubleClick(self.wait(pic, time)) + end + + def s.wait_and_right_click(pic, time) + self.rightClick(self.wait(pic, time)) + end + + def s.wait_and_hover(pic, time) + self.hover(self.wait(pic, time)) + end + + def s.existsAny(images) + images.each do |image| + region = self.exists(image) + return [image, region] if region + end + return nil + end + + def s.findAny(images) + images.each do |image| + begin + return [image, self.find(image)] + rescue FindFailed + # Ignore. We deal we'll throw an appropriate exception after + # having looped through all images and found none of them. + end + end + # If we've reached this point, none of the images could be found. + raise FindAnyFailed.new("can not find any of the images #{images} on the " + + "screen") + end + + def s.waitAny(images, time) + Timeout::timeout(time) do + loop do + result = self.existsAny(images) + return result if result + end + end + rescue Timeout::Error + raise FindAnyFailed.new("can not find any of the images #{images} on the " + + "screen") + end + + def s.hover_point(x, y) + self.hover(Sikuli::Location.new(x, y)) + end + + def s.hide_cursor + self.hover_point(self.w, self.h/2) + end + + s +end + +# Configure sikuli + +# ruby and rjb doesn't play well together when it comes to static +# fields (and possibly methods) so we instantiate and access the field +# via objects instead. It actually works inside this file, but when +# it's required from "outside", and the file has been completely +# required, ruby's require method complains that the method for the +# field accessor is missing. +sikuli_settings = Sikuli::Settings.new +sikuli_settings.OcrDataPath = $config["TMPDIR"] +# sikuli_ruby, which we used before, defaulted to 0.9 minimum +# similarity, so all our current images are adapted to that value. +# Also, Sikuli's default of 0.7 is simply too low (many false +# positives). +sikuli_settings.MinSimilarity = 0.9 +sikuli_settings.ActionLogs = true +sikuli_settings.DebugLogs = false +sikuli_settings.InfoLogs = true +sikuli_settings.ProfileLogs = true diff --git a/cucumber/features/support/helpers/sniffing_helper.rb b/cucumber/features/support/helpers/sniffing_helper.rb new file mode 100644 index 00000000..213411eb --- /dev/null +++ b/cucumber/features/support/helpers/sniffing_helper.rb @@ -0,0 +1,43 @@ +# +# Sniffer is a very dumb wrapper to start and stop tcpdumps instances, possibly +# with customized filters. Captured traffic is stored in files whose name +# depends on the sniffer name. The resulting captured packets for each sniffers +# can be accessed as an array through its `packets` method. +# +# Use of more rubyish internal ways to sniff a network like with pcap-able gems +# is waaay to much resource consumming, notmuch reliable and soooo slow. Let's +# not bother too much with that. :) +# +# Should put all that in a Module. + +class Sniffer + + attr_reader :name, :pcap_file, :pid + + def initialize(name, vmnet) + @name = name + @vmnet = vmnet + pcap_name = sanitize_filename("#{name}.pcap") + @pcap_file = "#{$config["TMPDIR"]}/#{pcap_name}" + end + + def capture(filter="not ether src host #{@vmnet.bridge_mac} and not ether proto \\arp and not ether proto \\rarp") + job = IO.popen(["/usr/sbin/tcpdump", "-n", "-i", @vmnet.bridge_name, "-w", + @pcap_file, "-U", filter, :err => ["/dev/null", "w"]]) + @pid = job.pid + end + + def stop + begin + Process.kill("TERM", @pid) + rescue + # noop + end + end + + def clear + if File.exist?(@pcap_file) + File.delete(@pcap_file) + end + end +end diff --git a/cucumber/features/support/helpers/sshd_helper.rb b/cucumber/features/support/helpers/sshd_helper.rb new file mode 100644 index 00000000..2e0069c0 --- /dev/null +++ b/cucumber/features/support/helpers/sshd_helper.rb @@ -0,0 +1,67 @@ +require 'tempfile' + +class SSHServer + def initialize(sshd_host, sshd_port, authorized_keys = nil) + @sshd_host = sshd_host + @sshd_port = sshd_port + @authorized_keys = authorized_keys + @pid = nil + end + + def start + @sshd_key_file = Tempfile.new("ssh_host_rsa_key", $config["TMPDIR"]) + # 'hack' to prevent ssh-keygen from prompting to overwrite the file + File.delete(@sshd_key_file.path) + cmd_helper(['ssh-keygen', '-t', 'rsa', '-N', "", '-f', "#{@sshd_key_file.path}"]) + @sshd_key_file.close + + sshd_config =< e + if e.message == "No such process" + return false + else + raise e + end + end + assert_equal(1, ret, "This shouldn't happen") + return true + end +end diff --git a/cucumber/features/support/helpers/storage_helper.rb b/cucumber/features/support/helpers/storage_helper.rb new file mode 100644 index 00000000..21537a92 --- /dev/null +++ b/cucumber/features/support/helpers/storage_helper.rb @@ -0,0 +1,216 @@ +# Helper class for manipulating VM storage *volumes*, i.e. it deals +# only with creation of images and keeps a name => volume path lookup +# table (plugging drives or getting info of plugged devices is done in +# the VM class). We'd like better coupling, but given the ridiculous +# disconnect between Libvirt::StoragePool and Libvirt::Domain (hint: +# they have nothing with each other to do whatsoever) it's what makes +# sense. + +require 'libvirt' +require 'guestfs' +require 'rexml/document' +require 'etc' + +class VMStorage + + def initialize(virt, xml_path) + @virt = virt + @xml_path = xml_path + pool_xml = REXML::Document.new(File.read("#{@xml_path}/storage_pool.xml")) + pool_name = pool_xml.elements['pool/name'].text + @pool_path = "#{$config["TMPDIR"]}/#{pool_name}" + begin + @pool = @virt.lookup_storage_pool_by_name(pool_name) + rescue Libvirt::RetrieveError + @pool = nil + end + if @pool and not(KEEP_SNAPSHOTS) + VMStorage.clear_storage_pool(@pool) + @pool = nil + end + unless @pool + pool_xml.elements['pool/target/path'].text = @pool_path + @pool = @virt.define_storage_pool_xml(pool_xml.to_s) + if not(Dir.exists?(@pool_path)) + # We'd like to use @pool.build, which will just create the + # @pool_path directory, but it does so with root:root as owner + # (at least with libvirt 1.2.21-2). libvirt itself can handle + # that situation, but guestfs (at least with <= + # 1:1.28.12-1+b3) cannot when invoked by a non-root user, + # which we want to support. + FileUtils.mkdir(@pool_path) + FileUtils.chown(nil, 'libvirt-qemu', @pool_path) + FileUtils.chmod("ug+wrx", @pool_path) + end + end + @pool.create unless @pool.active? + @pool.refresh + end + + def VMStorage.clear_storage_pool_volumes(pool) + was_not_active = !pool.active? + if was_not_active + pool.create + end + pool.list_volumes.each do |vol_name| + vol = pool.lookup_volume_by_name(vol_name) + vol.delete + end + if was_not_active + pool.destroy + end + rescue + # Some of the above operations can fail if the pool's path was + # deleted by external means; let's ignore that. + end + + def VMStorage.clear_storage_pool(pool) + VMStorage.clear_storage_pool_volumes(pool) + pool.destroy if pool.active? + pool.undefine + end + + def clear_pool + VMStorage.clear_storage_pool(@pool) + end + + def clear_volumes + VMStorage.clear_storage_pool_volumes(@pool) + end + + def delete_volume(name) + @pool.lookup_volume_by_name(name).delete + end + + def create_new_disk(name, options = {}) + options[:size] ||= 2 + options[:unit] ||= "GiB" + options[:type] ||= "qcow2" + # Require 'slightly' more space to be available to give a bit more leeway + # with rounding, temp file creation, etc. + reserved = 500 + needed = convert_to_MiB(options[:size].to_i, options[:unit]) + avail = convert_to_MiB(get_free_space('host', @pool_path), "KiB") + assert(avail - reserved >= needed, + "Error creating disk \"#{name}\" in \"#{@pool_path}\". " \ + "Need #{needed} MiB but only #{avail} MiB is available of " \ + "which #{reserved} MiB is reserved for other temporary files.") + begin + old_vol = @pool.lookup_volume_by_name(name) + rescue Libvirt::RetrieveError + # noop + else + old_vol.delete + end + uid = Etc::getpwnam("libvirt-qemu").uid + gid = Etc::getgrnam("libvirt-qemu").gid + vol_xml = REXML::Document.new(File.read("#{@xml_path}/volume.xml")) + vol_xml.elements['volume/name'].text = name + size_b = convert_to_bytes(options[:size].to_f, options[:unit]) + vol_xml.elements['volume/capacity'].text = size_b.to_s + vol_xml.elements['volume/target/format'].attributes["type"] = options[:type] + vol_xml.elements['volume/target/path'].text = "#{@pool_path}/#{name}" + vol_xml.elements['volume/target/permissions/owner'].text = uid.to_s + vol_xml.elements['volume/target/permissions/group'].text = gid.to_s + vol = @pool.create_volume_xml(vol_xml.to_s) + @pool.refresh + end + + def clone_to_new_disk(from, to) + begin + old_to_vol = @pool.lookup_volume_by_name(to) + rescue Libvirt::RetrieveError + # noop + else + old_to_vol.delete + end + from_vol = @pool.lookup_volume_by_name(from) + xml = REXML::Document.new(from_vol.xml_desc) + pool_path = REXML::Document.new(@pool.xml_desc).elements['pool/target/path'].text + xml.elements['volume/name'].text = to + xml.elements['volume/target/path'].text = "#{pool_path}/#{to}" + @pool.create_volume_xml_from(xml.to_s, from_vol) + end + + def disk_format(name) + vol = @pool.lookup_volume_by_name(name) + vol_xml = REXML::Document.new(vol.xml_desc) + return vol_xml.elements['volume/target/format'].attributes["type"] + end + + def disk_path(name) + @pool.lookup_volume_by_name(name).path + end + + def disk_mklabel(name, parttype) + disk = { + :path => disk_path(name), + :opts => { + :format => disk_format(name) + } + } + guestfs_disk_helper(disk) do |g, disk_handle| + g.part_init(disk_handle, parttype) + end + end + + def disk_mkpartfs(name, parttype, fstype, opts = {}) + opts[:label] ||= nil + opts[:luks_password] ||= nil + disk = { + :path => disk_path(name), + :opts => { + :format => disk_format(name) + } + } + guestfs_disk_helper(disk) do |g, disk_handle| + g.part_disk(disk_handle, parttype) + g.part_set_name(disk_handle, 1, opts[:label]) if opts[:label] + primary_partition = g.list_partitions()[0] + if opts[:luks_password] + g.luks_format(primary_partition, opts[:luks_password], 0) + luks_mapping = File.basename(primary_partition) + "_unlocked" + g.luks_open(primary_partition, opts[:luks_password], luks_mapping) + luks_dev = "/dev/mapper/#{luks_mapping}" + g.mkfs(fstype, luks_dev) + g.luks_close(luks_dev) + else + g.mkfs(fstype, primary_partition) + end + end + end + + def disk_mkswap(name, parttype) + disk = { + :path => disk_path(name), + :opts => { + :format => disk_format(name) + } + } + guestfs_disk_helper(disk) do |g, disk_handle| + g.part_disk(disk_handle, parttype) + primary_partition = g.list_partitions()[0] + g.mkswap(primary_partition) + end + end + + def guestfs_disk_helper(*disks) + assert(block_given?) + g = Guestfs::Guestfs.new() + g.set_trace(1) + message_callback = Proc.new do |event, _, message, _| + debug_log("libguestfs: #{Guestfs.event_to_string(event)}: #{message}") + end + g.set_event_callback(message_callback, + Guestfs::EVENT_TRACE) + g.set_autosync(1) + disks.each do |disk| + g.add_drive_opts(disk[:path], disk[:opts]) + end + g.launch() + yield(g, *g.list_devices()) + ensure + g.close + end + +end diff --git a/cucumber/features/support/helpers/vm_helper.rb b/cucumber/features/support/helpers/vm_helper.rb new file mode 100644 index 00000000..6d7204d4 --- /dev/null +++ b/cucumber/features/support/helpers/vm_helper.rb @@ -0,0 +1,676 @@ +require 'libvirt' +require 'rexml/document' + +class ExecutionFailedInVM < StandardError +end + +class VMNet + + attr_reader :net_name, :net + + def initialize(virt, xml_path) + @virt = virt + @net_name = LIBVIRT_NETWORK_NAME + net_xml = File.read("#{xml_path}/default_net.xml") + rexml = REXML::Document.new(net_xml) + rexml.elements['network'].add_element('name') + rexml.elements['network/name'].text = @net_name + rexml.elements['network'].add_element('uuid') + rexml.elements['network/uuid'].text = LIBVIRT_NETWORK_UUID + update(rexml.to_s) + rescue Exception => e + destroy_and_undefine + raise e + end + + # We lookup by name so we also catch networks from previous test + # suite runs that weren't properly cleaned up (e.g. aborted). + def destroy_and_undefine + begin + old_net = @virt.lookup_network_by_name(@net_name) + old_net.destroy if old_net.active? + old_net.undefine + rescue + end + end + + def update(xml) + destroy_and_undefine + @net = @virt.define_network_xml(xml) + @net.create + end + + def bridge_name + @net.bridge_name + end + + def bridge_ip_addr + net_xml = REXML::Document.new(@net.xml_desc) + IPAddr.new(net_xml.elements['network/ip'].attributes['address']).to_s + end + + def guest_real_mac + net_xml = REXML::Document.new(@net.xml_desc) + net_xml.elements['network/ip/dhcp/host/'].attributes['mac'] + end + + def bridge_mac + File.open("/sys/class/net/#{bridge_name}/address", "rb").read.chomp + end +end + + +class VM + + attr_reader :domain, :display, :vmnet, :storage + + def initialize(virt, xml_path, vmnet, storage, x_display) + @virt = virt + @xml_path = xml_path + @vmnet = vmnet + @storage = storage + @domain_name = LIBVIRT_DOMAIN_NAME + default_domain_xml = File.read("#{@xml_path}/default.xml") + rexml = REXML::Document.new(default_domain_xml) + rexml.elements['domain'].add_element('name') + rexml.elements['domain/name'].text = @domain_name + rexml.elements['domain'].add_element('uuid') + rexml.elements['domain/uuid'].text = LIBVIRT_DOMAIN_UUID + update(rexml.to_s) + @display = Display.new(@domain_name, x_display) + set_cdrom_boot(TAILS_ISO) + plug_network + rescue Exception => e + destroy_and_undefine + raise e + end + + def update(xml) + destroy_and_undefine + @domain = @virt.define_domain_xml(xml) + end + + # We lookup by name so we also catch domains from previous test + # suite runs that weren't properly cleaned up (e.g. aborted). + def destroy_and_undefine + @display.stop if @display && @display.active? + begin + old_domain = @virt.lookup_domain_by_name(@domain_name) + old_domain.destroy if old_domain.active? + old_domain.undefine + rescue + end + end + + def real_mac + @vmnet.guest_real_mac + end + + def set_hardware_clock(time) + assert(not(is_running?), 'The hardware clock cannot be set when the ' + + 'VM is running') + assert(time.instance_of?(Time), "Argument must be of type 'Time'") + adjustment = (time - Time.now).to_i + domain_rexml = REXML::Document.new(@domain.xml_desc) + clock_rexml_element = domain_rexml.elements['domain'].add_element('clock') + clock_rexml_element.add_attributes('offset' => 'variable', + 'basis' => 'utc', + 'adjustment' => adjustment.to_s) + update(domain_rexml.to_s) + end + + def set_network_link_state(state) + domain_xml = REXML::Document.new(@domain.xml_desc) + domain_xml.elements['domain/devices/interface/link'].attributes['state'] = state + if is_running? + @domain.update_device(domain_xml.elements['domain/devices/interface'].to_s) + else + update(domain_xml.to_s) + end + end + + def plug_network + set_network_link_state('up') + end + + def unplug_network + set_network_link_state('down') + end + + def set_boot_device(dev) + if is_running? + raise "boot settings can only be set for inactive vms" + end + domain_xml = REXML::Document.new(@domain.xml_desc) + domain_xml.elements['domain/os/boot'].attributes['dev'] = dev + update(domain_xml.to_s) + end + + def set_cdrom_image(image) + image = nil if image == '' + domain_xml = REXML::Document.new(@domain.xml_desc) + domain_xml.elements.each('domain/devices/disk') do |e| + if e.attribute('device').to_s == "cdrom" + if image.nil? + e.elements.delete('source') + else + if ! e.elements['source'] + e.add_element('source') + end + e.elements['source'].attributes['file'] = image + end + if is_running? + @domain.update_device(e.to_s) + else + update(domain_xml.to_s) + end + end + end + end + + def remove_cdrom + set_cdrom_image(nil) + rescue Libvirt::Error => e + # While the CD-ROM is removed successfully we still get this + # error, so let's ignore it. + acceptable_error = + "Call to virDomainUpdateDeviceFlags failed: internal error: unable to " + + "execute QEMU command 'eject': (Tray of device '.*' is not open|" + + "Device '.*' is locked)" + raise e if not(Regexp.new(acceptable_error).match(e.to_s)) + end + + def set_cdrom_boot(image) + if is_running? + raise "boot settings can only be set for inactive vms" + end + set_boot_device('cdrom') + set_cdrom_image(image) + end + + def list_disk_devs + ret = [] + domain_xml = REXML::Document.new(@domain.xml_desc) + domain_xml.elements.each('domain/devices/disk') do |e| + ret << e.elements['target'].attribute('dev').to_s + end + return ret + end + + def plug_drive(name, type) + if disk_plugged?(name) + raise "disk '#{name}' already plugged" + end + removable_usb = nil + case type + when "removable usb", "usb" + type = "usb" + removable_usb = "on" + when "non-removable usb" + type = "usb" + removable_usb = "off" + end + # Get the next free /dev/sdX on guest + letter = 'a' + dev = "sd" + letter + while list_disk_devs.include?(dev) + letter = (letter[0].ord + 1).chr + dev = "sd" + letter + end + assert letter <= 'z' + + xml = REXML::Document.new(File.read("#{@xml_path}/disk.xml")) + xml.elements['disk/source'].attributes['file'] = @storage.disk_path(name) + xml.elements['disk/driver'].attributes['type'] = @storage.disk_format(name) + xml.elements['disk/target'].attributes['dev'] = dev + xml.elements['disk/target'].attributes['bus'] = type + xml.elements['disk/target'].attributes['removable'] = removable_usb if removable_usb + + if is_running? + @domain.attach_device(xml.to_s) + else + domain_xml = REXML::Document.new(@domain.xml_desc) + domain_xml.elements['domain/devices'].add_element(xml) + update(domain_xml.to_s) + end + end + + def disk_xml_desc(name) + domain_xml = REXML::Document.new(@domain.xml_desc) + domain_xml.elements.each('domain/devices/disk') do |e| + begin + if e.elements['source'].attribute('file').to_s == @storage.disk_path(name) + return e.to_s + end + rescue + next + end + end + return nil + end + + def disk_rexml_desc(name) + xml = disk_xml_desc(name) + if xml + return REXML::Document.new(xml) + else + return nil + end + end + + def unplug_drive(name) + xml = disk_xml_desc(name) + @domain.detach_device(xml) + end + + def disk_type(dev) + domain_xml = REXML::Document.new(@domain.xml_desc) + domain_xml.elements.each('domain/devices/disk') do |e| + if e.elements['target'].attribute('dev').to_s == dev + return e.elements['driver'].attribute('type').to_s + end + end + raise "No such disk device '#{dev}'" + end + + def disk_dev(name) + rexml = disk_rexml_desc(name) or return nil + return "/dev/" + rexml.elements['disk/target'].attribute('dev').to_s + end + + def disk_name(dev) + dev = File.basename(dev) + domain_xml = REXML::Document.new(@domain.xml_desc) + domain_xml.elements.each('domain/devices/disk') do |e| + if /^#{e.elements['target'].attribute('dev').to_s}/.match(dev) + return File.basename(e.elements['source'].attribute('file').to_s) + end + end + raise "No such disk device '#{dev}'" + end + + def udisks_disk_dev(name) + return disk_dev(name).gsub('/dev/', '/org/freedesktop/UDisks/devices/') + end + + def disk_detected?(name) + dev = disk_dev(name) or return false + return execute("test -b #{dev}").success? + end + + def disk_plugged?(name) + return not(disk_xml_desc(name).nil?) + end + + def set_disk_boot(name, type) + if is_running? + raise "boot settings can only be set for inactive vms" + end + plug_drive(name, type) if not(disk_plugged?(name)) + set_boot_device('hd') + # For some reason setting the boot device doesn't prevent cdrom + # boot unless it's empty + remove_cdrom + end + + # XXX-9p: Shares don't work together with snapshot save+restore. See + # XXX-9p in common_steps.rb for more information. + def add_share(source, tag) + if is_running? + raise "shares can only be added to inactive vms" + end + # The complete source directory must be group readable by the user + # running the virtual machine, and world readable so the user inside + # the VM can access it (since we use the passthrough security model). + FileUtils.chown_R(nil, "libvirt-qemu", source) + FileUtils.chmod_R("go+rX", source) + xml = REXML::Document.new(File.read("#{@xml_path}/fs_share.xml")) + xml.elements['filesystem/source'].attributes['dir'] = source + xml.elements['filesystem/target'].attributes['dir'] = tag + domain_xml = REXML::Document.new(@domain.xml_desc) + domain_xml.elements['domain/devices'].add_element(xml) + update(domain_xml.to_s) + end + + def list_shares + list = [] + domain_xml = REXML::Document.new(@domain.xml_desc) + domain_xml.elements.each('domain/devices/filesystem') do |e| + list << e.elements['target'].attribute('dir').to_s + end + return list + end + + def set_ram_size(size, unit = "KiB") + raise "System memory can only be added to inactive vms" if is_running? + domain_xml = REXML::Document.new(@domain.xml_desc) + domain_xml.elements['domain/memory'].text = size + domain_xml.elements['domain/memory'].attributes['unit'] = unit + domain_xml.elements['domain/currentMemory'].text = size + domain_xml.elements['domain/currentMemory'].attributes['unit'] = unit + update(domain_xml.to_s) + end + + def get_ram_size_in_bytes + domain_xml = REXML::Document.new(@domain.xml_desc) + unit = domain_xml.elements['domain/memory'].attribute('unit').to_s + size = domain_xml.elements['domain/memory'].text.to_i + return convert_to_bytes(size, unit) + end + + def set_arch(arch) + raise "System architecture can only be set to inactive vms" if is_running? + domain_xml = REXML::Document.new(@domain.xml_desc) + domain_xml.elements['domain/os/type'].attributes['arch'] = arch + update(domain_xml.to_s) + end + + def add_hypervisor_feature(feature) + raise "Hypervisor features can only be added to inactive vms" if is_running? + domain_xml = REXML::Document.new(@domain.xml_desc) + domain_xml.elements['domain/features'].add_element(feature) + update(domain_xml.to_s) + end + + def drop_hypervisor_feature(feature) + raise "Hypervisor features can only be fropped from inactive vms" if is_running? + domain_xml = REXML::Document.new(@domain.xml_desc) + domain_xml.elements['domain/features'].delete_element(feature) + update(domain_xml.to_s) + end + + def disable_pae_workaround + # add_hypervisor_feature("nonpae") results in a libvirt error, and + # drop_hypervisor_feature("pae") alone won't disable pae. Hence we + # use this workaround. + xml = < + + + +EOF + domain_xml = REXML::Document.new(@domain.xml_desc) + domain_xml.elements['domain'].add_element(REXML::Document.new(xml)) + update(domain_xml.to_s) + end + + def set_os_loader(type) + if is_running? + raise "boot settings can only be set for inactive vms" + end + if type == 'UEFI' + domain_xml = REXML::Document.new(@domain.xml_desc) + domain_xml.elements['domain/os'].add_element(REXML::Document.new( + '/usr/share/ovmf/OVMF.fd' + )) + update(domain_xml.to_s) + else + raise "unsupported OS loader type" + end + end + + def is_running? + begin + return @domain.active? + rescue + return false + end + end + + def execute(cmd, options = {}) + options[:user] ||= "root" + options[:spawn] ||= false + if options[:libs] + libs = options[:libs] + options.delete(:libs) + libs = [libs] if not(libs.methods.include? :map) + cmds = libs.map do |lib_name| + ". /usr/local/lib/tails-shell-library/#{lib_name}.sh" + end + cmds << cmd + cmd = cmds.join(" && ") + end + return VMCommand.new(self, cmd, options) + end + + def execute_successfully(*args) + p = execute(*args) + begin + assert_vmcommand_success(p) + rescue Test::Unit::AssertionFailedError => e + raise ExecutionFailedInVM.new(e) + end + return p + end + + def spawn(cmd, options = {}) + options[:spawn] = true + return execute(cmd, options) + end + + def wait_until_remote_shell_is_up(timeout = 90) + VMCommand.wait_until_remote_shell_is_up(self, timeout) + end + + def host_to_guest_time_sync + host_time= DateTime.now.strftime("%s").to_s + execute("date -s '@#{host_time}'").success? + end + + def has_network? + return execute("/sbin/ifconfig eth0 | grep -q 'inet addr'").success? + end + + def has_process?(process) + return execute("pidof -x -o '%PPID' " + process).success? + end + + def pidof(process) + return execute("pidof -x -o '%PPID' " + process).stdout.chomp.split + end + + def select_virtual_desktop(desktop_number, user = LIVE_USER) + assert(desktop_number >= 0 && desktop_number <=3, + "Only values between 0 and 3 are valid virtual desktop numbers") + execute_successfully( + "xdotool set_desktop '#{desktop_number}'", + :user => user + ) + end + + def focus_window(window_title, user = LIVE_USER) + def do_focus(window_title, user) + execute_successfully( + "xdotool search --name '#{window_title}' windowactivate --sync", + :user => user + ) + end + + begin + do_focus(window_title, user) + rescue ExecutionFailedInVM + # Often when xdotool fails to focus a window it'll work when retried + # after redrawing the screen. Switching to a new virtual desktop then + # back seems to be a reliable way to handle this. + select_virtual_desktop(3) + select_virtual_desktop(0) + sleep 5 # there aren't any visual indicators which can be used here + do_focus(window_title, user) + end + end + + def file_exist?(file) + execute("test -e '#{file}'").success? + end + + def directory_exist?(directory) + execute("test -d '#{directory}'").success? + end + + def file_content(file, user = 'root') + # We don't quote #{file} on purpose: we sometimes pass environment variables + # or globs that we want to be interpreted by the shell. + cmd = execute("cat #{file}", :user => user) + assert(cmd.success?, + "Could not cat '#{file}':\n#{cmd.stdout}\n#{cmd.stderr}") + return cmd.stdout + end + + def file_append(file, lines, user = 'root') + lines = lines.split("\n") if lines.class == String + lines.each do |line| + cmd = execute("echo '#{line}' >> '#{file}'", :user => user) + assert(cmd.success?, + "Could not append to '#{file}':\n#{cmd.stdout}\n#{cmd.stderr}") + end + end + + def set_clipboard(text) + execute_successfully("echo -n '#{text}' | xsel --input --clipboard", + :user => LIVE_USER) + end + + def get_clipboard + execute_successfully("xsel --output --clipboard", :user => LIVE_USER).stdout + end + + def internal_snapshot_xml(name) + disk_devs = list_disk_devs + disks_xml = " \n" + for dev in disk_devs + snapshot_type = disk_type(dev) == "qcow2" ? 'internal' : 'no' + disks_xml += + " \n" + end + disks_xml += " " + return <<-EOF + + #{name} + Snapshot for #{name} +#{disks_xml} + +EOF + end + + def VM.ram_only_snapshot_path(name) + return "#{$config["TMPDIR"]}/#{name}-snapshot.memstate" + end + + def save_snapshot(name) + # If we have no qcow2 disk device, we'll use "memory state" + # snapshots, and if we have at least one qcow2 disk device, we'll + # use internal "system checkpoint" (memory + disks) snapshots. We + # have to do this since internal snapshots don't work when no + # such disk is available. We can do this with external snapshots, + # which are better in many ways, but libvirt doesn't know how to + # restore (revert back to) them yet. + # WARNING: If only transient disks, i.e. disks that were plugged + # after starting the domain, are used then the memory state will + # be dropped. External snapshots would also fix this. + internal_snapshot = false + domain_xml = REXML::Document.new(@domain.xml_desc) + domain_xml.elements.each('domain/devices/disk') do |e| + if e.elements['driver'].attribute('type').to_s == "qcow2" + internal_snapshot = true + break + end + end + + # Note: In this case the "opposite" of `internal_snapshot` is not + # anything relating to external snapshots, but actually "memory + # state"(-only) snapshots. + if internal_snapshot + xml = internal_snapshot_xml(name) + @domain.snapshot_create_xml(xml) + else + snapshot_path = VM.ram_only_snapshot_path(name) + @domain.save(snapshot_path) + # For consistency with the internal snapshot case (which is + # "live", so the domain doesn't go down) we immediately restore + # the snapshot. + # Assumption: that *immediate* save + restore doesn't mess up + # with network state and similar, and is fast enough to not make + # the clock drift too much. + restore_snapshot(name) + end + end + + def restore_snapshot(name) + @domain.destroy if is_running? + @display.stop if @display and @display.active? + # See comment in save_snapshot() for details on why we use two + # different type of snapshots. + potential_ram_only_snapshot_path = VM.ram_only_snapshot_path(name) + if File.exist?(potential_ram_only_snapshot_path) + Libvirt::Domain::restore(@virt, potential_ram_only_snapshot_path) + @domain = @virt.lookup_domain_by_name(@domain_name) + else + begin + potential_internal_snapshot = @domain.lookup_snapshot_by_name(name) + @domain.revert_to_snapshot(potential_internal_snapshot) + rescue Libvirt::RetrieveError + raise "No such (internal nor external) snapshot #{name}" + end + end + @display.start + end + + def VM.remove_snapshot(name) + old_domain = $virt.lookup_domain_by_name(LIBVIRT_DOMAIN_NAME) + potential_ram_only_snapshot_path = VM.ram_only_snapshot_path(name) + if File.exist?(potential_ram_only_snapshot_path) + File.delete(potential_ram_only_snapshot_path) + else + snapshot = old_domain.lookup_snapshot_by_name(name) + snapshot.delete + end + end + + def VM.snapshot_exists?(name) + return true if File.exist?(VM.ram_only_snapshot_path(name)) + old_domain = $virt.lookup_domain_by_name(LIBVIRT_DOMAIN_NAME) + snapshot = old_domain.lookup_snapshot_by_name(name) + return snapshot != nil + rescue Libvirt::RetrieveError + return false + end + + def VM.remove_all_snapshots + Dir.glob("#{$config["TMPDIR"]}/*-snapshot.memstate").each do |file| + File.delete(file) + end + old_domain = $virt.lookup_domain_by_name(LIBVIRT_DOMAIN_NAME) + old_domain.list_all_snapshots.each { |snapshot| snapshot.delete } + rescue Libvirt::RetrieveError + # No such domain, so no snapshots either. + end + + def start + return if is_running? + @domain.create + @display.start + end + + def reset + @domain.reset if is_running? + end + + def power_off + @domain.destroy if is_running? + @display.stop + end + + def take_screenshot(description) + @display.take_screenshot(description) + end + + def get_remote_shell_port + domain_xml = REXML::Document.new(@domain.xml_desc) + domain_xml.elements.each('domain/devices/serial') do |e| + if e.attribute('type').to_s == "tcp" + return e.elements['source'].attribute('service').to_s.to_i + end + end + end + +end -- cgit v1.2.3-54-g00ecf