diff options
author | Philip Hands <phil@hands.com> | 2016-03-14 15:36:16 +0100 |
---|---|---|
committer | Holger Levsen <holger@layer-acht.org> | 2016-04-28 21:52:10 +0200 |
commit | da080c472fc415b0ce918f4dd4a1ab143bb1bca4 (patch) | |
tree | bf63179f32f0eda0c2d5796e3e31c18c3c1185cf /features/support | |
parent | 26a9e8ec2bcae03db4d663d87b44d8708d64fdc2 (diff) | |
download | jenkins.debian.net-da080c472fc415b0ce918f4dd4a1ab143bb1bca4.tar.xz |
rough attempt to grab the good cucumber bits from recent tails
Diffstat (limited to 'features/support')
-rw-r--r-- | features/support/config.rb | 96 | ||||
-rw-r--r-- | features/support/env.rb | 39 | ||||
-rw-r--r-- | features/support/extra_hooks.rb | 144 | ||||
-rw-r--r-- | features/support/helpers/chatbot_helper.rb | 59 | ||||
-rw-r--r-- | features/support/helpers/ctcp_helper.rb | 126 | ||||
-rw-r--r-- | features/support/helpers/display_helper.rb | 51 | ||||
-rw-r--r-- | features/support/helpers/exec_helper.rb | 30 | ||||
-rw-r--r-- | features/support/helpers/firewall_helper.rb | 87 | ||||
-rw-r--r-- | features/support/helpers/misc_helpers.rb | 228 | ||||
-rw-r--r-- | features/support/helpers/sikuli_helper.rb | 91 | ||||
-rw-r--r-- | features/support/helpers/sniffing_helper.rb (renamed from features/support/helpers/net_helper.rb) | 13 | ||||
-rw-r--r-- | features/support/helpers/sshd_helper.rb | 67 | ||||
-rw-r--r-- | features/support/helpers/storage_helper.rb | 135 | ||||
-rw-r--r-- | features/support/helpers/vm_helper.rb | 532 | ||||
-rw-r--r-- | features/support/hooks.rb | 306 |
15 files changed, 1579 insertions, 425 deletions
diff --git a/features/support/config.rb b/features/support/config.rb index 66542cde..9db15929 100644 --- a/features/support/config.rb +++ b/features/support/config.rb @@ -1,5 +1,14 @@ require 'fileutils' -require "features/support/helpers/misc_helpers.rb" +require 'yaml' +require "#{Dir.pwd}/features/support/helpers/misc_helpers.rb" + +# These files deal with options like some of the settings passed +# to the `run_test_suite` script, and "secrets" like credentials +# (passwords, SSH keys) to be used in tests. +CONFIG_DIR = "#{Dir.pwd}/features/config" +DEFAULTS_CONFIG_FILE = "#{CONFIG_DIR}/defaults.yml" +LOCAL_CONFIG_FILE = "#{CONFIG_DIR}/local.yml" +LOCAL_CONFIG_DIRS_FILES_GLOB = "#{CONFIG_DIR}/*.d/*.yml" # Dynamic $tails_iso = ENV['ISO'] || get_newest_iso @@ -15,20 +24,85 @@ $time_at_start = Time.now $live_user = "user" $sikuli_retry_findfailed = !ENV['SIKULI_RETRY_FINDFAILED'].nil? -# Static -$configured_keyserver_hostname = 'hkps.pool.sks-keyservers.net' -$services_expected_on_all_ifaces = +assert File.exists?(DEFAULTS_CONFIG_FILE) +$config = YAML.load(File.read(DEFAULTS_CONFIG_FILE)) +config_files = Dir.glob(LOCAL_CONFIG_DIRS_FILES_GLOB).sort +config_files.insert(0, LOCAL_CONFIG_FILE) if File.exists?(LOCAL_CONFIG_FILE) +config_files.each do |config_file| + yaml_struct = YAML.load(File.read(config_file)) || Hash.new + if not(yaml_struct.instance_of?(Hash)) + raise "Local configuration file '#{config_file}' is malformed" + end + $config.merge!(yaml_struct) +end +# Options passed to the `run_test_suite` script will always take +# precedence. The way we import these keys is only safe for values +# with types boolean or string. If we need more, we'll have to invoke +# YAML's type autodetection on ENV some how. +$config.merge!(ENV) + +# Export TMPDIR back to the environment for subprocesses that we start +# (e.g. guestfs). Note that this export will only make a difference if +# TMPDIR wasn't already set and --tmpdir wasn't passed, i.e. only when +# we use the default. +ENV['TMPDIR'] = $config['TMPDIR'] + +# Dynamic constants initialized through the environment or similar, +# e.g. options we do not want to be configurable through the YAML +# configuration files. +DEBUG_LOG_PSEUDO_FIFO = "#{$config["TMPDIR"]}/debug_log_pseudo_fifo" +DISPLAY = ENV['DISPLAY'] +GIT_DIR = ENV['PWD'] +KEEP_SNAPSHOTS = !ENV['KEEP_SNAPSHOTS'].nil? +LIVE_USER = cmd_helper(". config/chroot_local-includes/etc/live/config.d/username.conf; echo ${LIVE_USERNAME}").chomp +TAILS_ISO = ENV['TAILS_ISO'] +OLD_TAILS_ISO = ENV['OLD_TAILS_ISO'] || TAILS_ISO +TIME_AT_START = Time.now +loop do + ARTIFACTS_DIR = $config['TMPDIR'] + "/run-" + + sanitize_filename(TIME_AT_START.to_s) + "-" + + [ + "git", + sanitize_filename(describe_git_head, + :replacement => '-'), + current_short_commit + ].reject(&:empty?).join("_") + "-" + + random_alnum_string(6) + if not(File.exist?(ARTIFACTS_DIR)) + FileUtils.mkdir_p(ARTIFACTS_DIR) + break + end +end + +# Constants that are statically initialized. +CONFIGURED_KEYSERVER_HOSTNAME = 'hkps.pool.sks-keyservers.net' +LIBVIRT_DOMAIN_NAME = "TailsToaster" +LIBVIRT_DOMAIN_UUID = "203552d5-819c-41f3-800e-2c8ef2545404" +LIBVIRT_NETWORK_NAME = "TailsToasterNet" +LIBVIRT_NETWORK_UUID = "f2305af3-2a64-4f16-afe6-b9dbf02a597e" +MISC_FILES_DIR = "#{Dir.pwd}/features/misc_files" +SERVICES_EXPECTED_ON_ALL_IFACES = [ ["cupsd", "0.0.0.0", "631"], ["dhclient", "0.0.0.0", "*"] ] -$tor_authorities = +# OpenDNS +SOME_DNS_SERVER = "208.67.222.222" +TOR_AUTHORITIES = # List grabbed from Tor's sources, src/or/config.c:~750. [ - "128.31.0.39", "86.59.21.38", "194.109.206.212", - "82.94.251.203", "76.73.17.194", "212.112.245.170", - "193.23.244.244", "208.83.223.34", "171.25.193.9", - "154.35.32.5" + "86.59.21.38", + "128.31.0.39", + "194.109.206.212", + "82.94.251.203", + "199.254.238.52", + "131.188.40.189", + "193.23.244.244", + "208.83.223.34", + "171.25.193.9", + "154.35.175.225", ] -# OpenDNS -$some_dns_server = "208.67.222.222" +VM_XML_PATH = "#{Dir.pwd}/features/domains" + +TAILS_SIGNING_KEY = cmd_helper(". #{Dir.pwd}/config/amnesia; echo ${AMNESIA_DEV_KEYID}").tr(' ', '').chomp +TAILS_DEBIAN_REPO_KEY = "221F9A3C6FA3E09E182E060BC7988EA7A358D82E" diff --git a/features/support/env.rb b/features/support/env.rb index 3fa5c371..2e17ae76 100644 --- a/features/support/env.rb +++ b/features/support/env.rb @@ -3,6 +3,12 @@ require "features/support/extra_hooks.rb" require 'time' require 'rspec' +# Force UTF-8. Ruby will default to the system locale, and if it is +# non-UTF-8, String-methods will fail when operating on non-ASCII +# strings. +Encoding.default_external = Encoding::UTF_8 +Encoding.default_internal = Encoding::UTF_8 + def fatal_system(str) unless system(str) raise StandardError.new("Command exited with #{$?}") @@ -14,6 +20,9 @@ def git_exists? end def create_git + Dir.mkdir 'config' + FileUtils.touch('config/base_branch') + Dir.mkdir('config/APT_overlays.d') Dir.mkdir 'debian' File.open('debian/changelog', 'w') do |changelog| changelog.write(<<END_OF_CHANGELOG) @@ -33,7 +42,35 @@ END_OF_CHANGELOG fatal_system "git branch -M stable" fatal_system "git branch testing stable" fatal_system "git branch devel stable" - fatal_system "git branch experimental devel" + fatal_system "git branch feature/jessie devel" +end + +def current_branch + cmd = 'git rev-parse --symbolic-full-name --abbrev-ref HEAD'.split + branch = cmd_helper(cmd).strip + assert_not_equal("HEAD", branch, "We are in 'detached HEAD' state") + return branch +end + +# In order: if git HEAD is tagged, return its name; if a branch is +# checked out, return its name; otherwise we are in 'detached HEAD' +# state, and we return the empty string. +def describe_git_head + cmd_helper("git describe --tags --exact-match #{current_commit}".split).strip +rescue Test::Unit::AssertionFailedError + begin + current_branch + rescue Test::Unit::AssertionFailedError + "" + end +end + +def current_commit + cmd_helper('git rev-parse HEAD'.split).strip +end + +def current_short_commit + current_commit[0, 7] end RSpec::Matchers.define :have_suite do |suite| diff --git a/features/support/extra_hooks.rb b/features/support/extra_hooks.rb index a8addb35..16196a55 100644 --- a/features/support/extra_hooks.rb +++ b/features/support/extra_hooks.rb @@ -1,8 +1,28 @@ -require 'cucumber/formatter/pretty' +# Make the code below work with cucumber >= 2.0. Once we stop +# supporting <2.0 we should probably do this differently, but this way +# we can easily support both at the same time. +begin + if not(Cucumber::Core::Ast::Feature.instance_methods.include?(:accept_hook?)) + require 'gherkin/tag_expression' + class Cucumber::Core::Ast::Feature + # Code inspired by Cucumber::Core::Test::Case.match_tags?() in + # cucumber-ruby-core 1.1.3, lib/cucumber/core/test/case.rb:~59. + def accept_hook?(hook) + tag_expr = Gherkin::TagExpression.new(hook.tag_expressions.flatten) + tags = @tags.map do |t| + Gherkin::Formatter::Model::Tag.new(t.name, t.line) + end + tag_expr.evaluate(tags) + end + end + end +rescue NameError => e + raise e if e.to_s != "uninitialized constant Cucumber::Core" +end -# Sort of inspired by Cucumber::RbSupport::RbHook, but really we just -# want an object with a 'tag_expressions' attribute to make -# accept_hook?() (used below) happy. +# Sort of inspired by Cucumber::RbSupport::RbHook (from cucumber +# < 2.0) but really we just want an object with a 'tag_expressions' +# attribute to make accept_hook?() (used below) happy. class SimpleHook attr_reader :tag_expressions @@ -26,20 +46,120 @@ def AfterFeature(*tag_expressions, &block) $after_feature_hooks << SimpleHook.new(tag_expressions, block) end -module ExtraHooks - class Pretty < Cucumber::Formatter::Pretty +require 'cucumber/formatter/console' +if not($at_exit_print_artifacts_dir_patching_done) + module Cucumber::Formatter::Console + if method_defined?(:print_stats) + alias old_print_stats print_stats + end + def print_stats(*args) + if Dir.exists?(ARTIFACTS_DIR) and Dir.entries(ARTIFACTS_DIR).size > 2 + @io.puts "Artifacts directory: #{ARTIFACTS_DIR}" + @io.puts + end + if self.class.method_defined?(:old_print_stats) + old_print_stats(*args) + end + end + end + $at_exit_print_artifacts_dir_patching_done = true +end + +def info_log(message = "", options = {}) + options[:color] = :clear + # This trick allows us to use a module's (~private) method on a + # one-off basis. + cucumber_console = Class.new.extend(Cucumber::Formatter::Console) + puts cucumber_console.format_string(message, options[:color]) +end + +def debug_log(message, options = {}) + $debug_log_fns.each { |fn| fn.call(message, options) } if $debug_log_fns +end + +require 'cucumber/formatter/pretty' +# Backport part of commit af940a8 from the cucumber-ruby repo. This +# fixes the "out hook output" for the Pretty formatter so stuff +# written via `puts` after a Scenario has run its last step will be +# written, instead of delayed to the next Feature/Scenario (if any) or +# dropped completely (if not). +# XXX: This can be removed once we stop supporting Debian Jessie +# around when Debian Stretch is released. +if Gem::Version.new(Cucumber::VERSION) < Gem::Version.new('2.0.0.beta.4') + module Cucumber + module Formatter + class Pretty + def after_feature_element(feature_element) + print_messages + @io.puts + @io.flush + end + end + end + end +end + +module ExtraFormatters + # This is a null formatter in the sense that it doesn't ever output + # anything. We only use it do hook into the correct events so we can + # add our extra hooks. + class ExtraHooks + def initialize(*args) + # We do not care about any of the arguments. + end + def before_feature(feature) - for hook in $before_feature_hooks do - hook.invoke(feature) if feature.accept_hook?(hook) + if $before_feature_hooks + $before_feature_hooks.each do |hook| + hook.invoke(feature) if feature.accept_hook?(hook) + end end - super if defined?(super) end def after_feature(feature) - for hook in $after_feature_hooks do - hook.invoke(feature) if feature.accept_hook?(hook) + if $after_feature_hooks + $after_feature_hooks.reverse.each do |hook| + hook.invoke(feature) if feature.accept_hook?(hook) + end end - super if defined?(super) end end + + # The pretty formatter with debug logging mixed into its output. + class PrettyDebug < Cucumber::Formatter::Pretty + def initialize(*args) + super(*args) + $debug_log_fns ||= [] + $debug_log_fns << self.method(:debug_log) + end + + def debug_log(message, options) + options[:color] ||= :blue + @io.puts(format_string(message, options[:color])) + @io.flush + end + end + +end + +module Cucumber + module Cli + class Options + BUILTIN_FORMATS['pretty_debug'] = + [ + 'ExtraFormatters::PrettyDebug', + 'Prints the feature with debugging information - in colours.' + ] + BUILTIN_FORMATS['debug'] = BUILTIN_FORMATS['pretty_debug'] + end + end +end + +AfterConfiguration do |config| + # Cucumber may read this file multiple times, and hence run this + # AfterConfiguration hook multiple times. We only want our + # ExtraHooks formatter to be loaded once, otherwise the hooks would + # be run miltiple times. + extra_hooks = ['ExtraFormatters::ExtraHooks', '/dev/null'] + config.formats << extra_hooks if not(config.formats.include?(extra_hooks)) end diff --git a/features/support/helpers/chatbot_helper.rb b/features/support/helpers/chatbot_helper.rb new file mode 100644 index 00000000..23ce3e1a --- /dev/null +++ b/features/support/helpers/chatbot_helper.rb @@ -0,0 +1,59 @@ +require 'tempfile' + +class ChatBot + + def initialize(account, password, otr_key, opts = Hash.new) + @account = account + @password = password + @otr_key = otr_key + @opts = opts + @pid = nil + @otr_key_file = nil + end + + def start + @otr_key_file = Tempfile.new("otr_key.", $config["TMPDIR"]) + @otr_key_file << @otr_key + @otr_key_file.close + + cmd_helper(['/usr/bin/convertkey', @otr_key_file.path]) + cmd_helper(["mv", "#{@otr_key_file.path}3", @otr_key_file.path]) + + cmd = [ + "#{GIT_DIR}/features/scripts/otr-bot.py", + @account, + @password, + @otr_key_file.path + ] + cmd += ["--connect-server", @opts["connect_server"]] if @opts["connect_server"] + cmd += ["--auto-join"] + @opts["auto_join"] if @opts["auto_join"] + cmd += ["--log-file", DEBUG_LOG_PSEUDO_FIFO] + + job = IO.popen(cmd) + @pid = job.pid + end + + def stop + @otr_key_file.delete + begin + Process.kill("TERM", @pid) + rescue + # noop + end + end + + def active? + begin + ret = Process.kill(0, @pid) + rescue Errno::ESRCH => e + if e.message == "No such process" + return false + else + raise e + end + end + assert_equal(1, ret, "This shouldn't happen") + return true + end + +end diff --git a/features/support/helpers/ctcp_helper.rb b/features/support/helpers/ctcp_helper.rb new file mode 100644 index 00000000..ee5180ab --- /dev/null +++ b/features/support/helpers/ctcp_helper.rb @@ -0,0 +1,126 @@ +require 'net/irc' +require 'timeout' + +class CtcpChecker < Net::IRC::Client + + CTCP_SPAM_DELAY = 5 + + # `spam_target`: the nickname of the IRC user to CTCP spam. + # `ctcp_cmds`: the Array of CTCP commands to send. + # `expected_ctcp_replies`: Hash where the keys are the exact set of replies + # we expect, and their values a regex the reply data must match. + def initialize(host, port, spam_target, ctcp_cmds, expected_ctcp_replies) + @spam_target = spam_target + @ctcp_cmds = ctcp_cmds + @expected_ctcp_replies = expected_ctcp_replies + nickname = self.class.random_irc_nickname + opts = { + :nick => nickname, + :user => nickname, + :real => nickname, + } + opts[:logger] = Logger.new(DEBUG_LOG_PSEUDO_FIFO) + super(host, port, opts) + end + + # Makes sure that only the expected CTCP replies are received. + def verify_ctcp_responses + @sent_ctcp_cmds = Set.new + @received_ctcp_replies = Set.new + + # Give 60 seconds for connecting to the server and other overhead + # beyond the expected time to spam all CTCP commands. + expected_ctcp_spam_time = @ctcp_cmds.length * CTCP_SPAM_DELAY + timeout = expected_ctcp_spam_time + 60 + + begin + Timeout::timeout(timeout) do + start + end + rescue Timeout::Error + # Do nothing as we'll check for errors below. + ensure + finish + end + + ctcp_cmds_not_sent = @ctcp_cmds - @sent_ctcp_cmds.to_a + expected_ctcp_replies_not_received = + @expected_ctcp_replies.keys - @received_ctcp_replies.to_a + + if !ctcp_cmds_not_sent.empty? || !expected_ctcp_replies_not_received.empty? + raise "Failed to spam all CTCP commands and receive the expected " + + "replies within #{timeout} seconds.\n" + + (ctcp_cmds_not_sent.empty? ? "" : + "CTCP commands not sent: #{ctcp_cmds_not_sent}\n") + + (expected_ctcp_replies_not_received.empty? ? "" : + "Expected CTCP replies not received: " + + expected_ctcp_replies_not_received.to_s) + end + + end + + # Generate a random IRC nickname, in this case an alpha-numeric + # string with length 10 to 15. To make it legal, the first character + # is forced to be alpha. + def self.random_irc_nickname + random_alpha_string(1) + random_alnum_string(9, 14) + end + + def spam(spam_target) + post(NOTICE, spam_target, "Hi! I'm gonna test your CTCP capabilities now.") + @ctcp_cmds.each do |cmd| + sleep CTCP_SPAM_DELAY + full_cmd = cmd + case cmd + when "PING" + full_cmd += " #{Time.now.to_i}" + when "ACTION" + full_cmd += " barfs on the floor." + when "ERRMSG" + full_cmd += " Pidgin should not respond to this." + end + post(PRIVMSG, spam_target, ctcp_encode(full_cmd)) + @sent_ctcp_cmds << cmd + end + end + + def on_rpl_welcome(m) + super + Thread.new { spam(@spam_target) } + end + + def on_message(m) + if m.command == ERR_NICKNAMEINUSE + finish + new_nick = self.class.random_irc_nickname + @opts.marshal_load({ + :nick => new_nick, + :user => new_nick, + :real => new_nick, + }) + start + return + end + + if m.ctcp? and /^:#{Regexp.escape(@spam_target)}!/.match(m) + m.ctcps.each do |ctcp_reply| + reply_type, _, reply_data = ctcp_reply.partition(" ") + if @expected_ctcp_replies.has_key?(reply_type) + if @expected_ctcp_replies[reply_type].match(reply_data) + @received_ctcp_replies << reply_type + else + raise "Received expected CTCP reply '#{reply_type}' but with " + + "unexpected data '#{reply_data}' " + end + else + raise "Received unexpected CTCP reply '#{reply_type}' with " + + "data '#{reply_data}'" + end + end + end + if Set.new(@ctcp_cmds) == @sent_ctcp_cmds && \ + Set.new(@expected_ctcp_replies.keys) == @received_ctcp_replies + finish + end + end +end diff --git a/features/support/helpers/display_helper.rb b/features/support/helpers/display_helper.rb index 354935f0..b4dce733 100644 --- a/features/support/helpers/display_helper.rb +++ b/features/support/helpers/display_helper.rb @@ -6,8 +6,22 @@ class Display @x_display = x_display end + def active? + p = IO.popen(["xprop", "-display", @x_display, + "-name", "#{@domain} (1) - Virt Viewer", + :err => ["/dev/null", "w"]]) + Process.wait(p.pid) + $?.success? + end + def start - start_virtviewer(@domain) + @virtviewer = IO.popen(["virt-viewer", "--direct", + "--kiosk", + "--reconnect", + "--connect", "qemu:///system", + "--display", @x_display, + @domain, + :err => ["/dev/null", "w"]]) # We wait for the display to be active to not lose actions # (e.g. key presses via sikuli) that come immediately after # starting (or restoring) a vm @@ -17,35 +31,18 @@ class Display end def stop - stop_virtviewer + return if @virtviewer.nil? + Process.kill("TERM", @virtviewer.pid) + @virtviewer.close + rescue IOError + # IO.pid throws this if the process wasn't started yet. Possibly + # there's a race when doing a start() and then quickly running + # stop(). end def restart - stop_virtviewer - start_virtviewer(@domain) - end - - def start_virtviewer(domain) - # virt-viewer forks, so we cannot (easily) get the child pid - # and use it in active? and stop_virtviewer below... - IO.popen(["virt-viewer", "-d", - "-f", - "-r", - "-c", "qemu:///system", - ["--display=", @x_display].join(''), - domain, - "&"].join(' ')) + stop + start end - def active? - p = IO.popen("xprop -display #{@x_display} " + - "-name '#{@domain} (1) - Virt Viewer' 2>/dev/null") - Process.wait(p.pid) - p.close - $? == 0 - end - - def stop_virtviewer - system("killall virt-viewer") - end end diff --git a/features/support/helpers/exec_helper.rb b/features/support/helpers/exec_helper.rb index b0d3a9cd..42f6532a 100644 --- a/features/support/helpers/exec_helper.rb +++ b/features/support/helpers/exec_helper.rb @@ -10,13 +10,11 @@ class VMCommand @returncode, @stdout, @stderr = VMCommand.execute(vm, cmd, options) end - def VMCommand.wait_until_remote_shell_is_up(vm, timeout = 30) - begin - Timeout::timeout(timeout) do - VMCommand.execute(vm, "true", { :user => "root", :spawn => false }) + def VMCommand.wait_until_remote_shell_is_up(vm, timeout = 90) + try_for(timeout, :msg => "Remote shell seems to be down") do + Timeout::timeout(3) do + VMCommand.execute(vm, "echo 'hello?'") end - rescue Timeout::Error - raise "Remote shell seems to be down" end end @@ -27,21 +25,21 @@ class VMCommand # response will always be [0, "", ""] (only used as an # ACK). execute() will always block until a response is received, # though. Spawning is useful when starting processes in the - # background (or running scripts that does the same) like the - # vidalia-wrapper, or any application we want to interact with. + # background (or running scripts that does the same) like our + # onioncircuits wrapper, or any application we want to interact with. def VMCommand.execute(vm, cmd, options = {}) options[:user] ||= "root" options[:spawn] ||= false type = options[:spawn] ? "spawn" : "call" socket = TCPSocket.new("127.0.0.1", vm.get_remote_shell_port) - STDERR.puts "#{type}ing as #{options[:user]}: #{cmd}" if $debug + debug_log("#{type}ing as #{options[:user]}: #{cmd}") begin socket.puts(JSON.dump([type, options[:user], cmd])) s = socket.readline(sep = "\0").chomp("\0") ensure socket.close end - STDERR.puts "#{type} returned: #{s}" if $debug + debug_log("#{type} returned: #{s}") if not(options[:spawn]) begin return JSON.load(s) rescue JSON::ParserError @@ -58,4 +56,16 @@ class VMCommand return @returncode == 0 end + def failure? + return not(success?) + end + + def to_s + "Return status: #{@returncode}\n" + + "STDOUT:\n" + + @stdout + + "STDERR:\n" + + @stderr + end + end diff --git a/features/support/helpers/firewall_helper.rb b/features/support/helpers/firewall_helper.rb index 400965a5..fce363c5 100644 --- a/features/support/helpers/firewall_helper.rb +++ b/features/support/helpers/firewall_helper.rb @@ -11,21 +11,12 @@ class IPAddr ] PrivateIPv6Ranges = [ - IPAddr.new("fc00::/7"), # private + IPAddr.new("fc00::/7") ] def private? - if self.ipv4? - PrivateIPv4Ranges.each do |ipr| - return true if ipr.include?(self) - end - return false - else - PrivateIPv6Ranges.each do |ipr| - return true if ipr.include?(self) - end - return false - end + private_ranges = self.ipv4? ? PrivateIPv4Ranges : PrivateIPv6Ranges + private_ranges.any? { |range| range.include?(self) } end def public? @@ -34,16 +25,25 @@ class IPAddr end class FirewallLeakCheck - attr_reader :ipv4_tcp_leaks, :ipv4_nontcp_leaks, :ipv6_leaks, :nonip_leaks + attr_reader :ipv4_tcp_leaks, :ipv4_nontcp_leaks, :ipv6_leaks, :nonip_leaks, :mac_leaks - def initialize(pcap_file, tor_relays) - packets = PacketFu::PcapFile.new.file_to_array(:filename => pcap_file) - @tor_relays = tor_relays + def initialize(pcap_file, options = {}) + options[:accepted_hosts] ||= [] + options[:ignore_lan] ||= true + @pcap_file = pcap_file + packets = PacketFu::PcapFile.new.file_to_array(:filename => @pcap_file) + mac_leaks = Set.new ipv4_tcp_packets = [] ipv4_nontcp_packets = [] ipv6_packets = [] nonip_packets = [] packets.each do |p| + if PacketFu::EthPacket.can_parse?(p) + packet = PacketFu::EthPacket.parse(p) + mac_leaks << packet.eth_saddr + mac_leaks << packet.eth_daddr + end + if PacketFu::TCPPacket.can_parse?(p) ipv4_tcp_packets << PacketFu::TCPPacket.parse(p) elsif PacketFu::IPPacket.can_parse?(p) @@ -57,17 +57,25 @@ class FirewallLeakCheck raise "Found something in the pcap file that cannot be parsed" end end - ipv4_tcp_hosts = get_public_hosts_from_ippackets ipv4_tcp_packets - tor_nodes = Set.new(get_all_tor_contacts) - @ipv4_tcp_leaks = ipv4_tcp_hosts.select{|host| !tor_nodes.member?(host)} - @ipv4_nontcp_leaks = get_public_hosts_from_ippackets ipv4_nontcp_packets - @ipv6_leaks = get_public_hosts_from_ippackets ipv6_packets + ipv4_tcp_hosts = filter_hosts_from_ippackets(ipv4_tcp_packets, + options[:ignore_lan]) + accepted = Set.new(options[:accepted_hosts]) + @mac_leaks = mac_leaks + @ipv4_tcp_leaks = ipv4_tcp_hosts.select { |host| !accepted.member?(host) } + @ipv4_nontcp_leaks = filter_hosts_from_ippackets(ipv4_nontcp_packets, + options[:ignore_lan]) + @ipv6_leaks = filter_hosts_from_ippackets(ipv6_packets, + options[:ignore_lan]) @nonip_leaks = nonip_packets end - # Returns a list of all unique non-LAN destination IP addresses - # found in `packets`. - def get_public_hosts_from_ippackets(packets) + def save_pcap_file + save_failure_artifact("Network capture", @pcap_file) + end + + # Returns a list of all unique destination IP addresses found in + # `packets`. Exclude LAN hosts if ignore_lan is set. + def filter_hosts_from_ippackets(packets, ignore_lan) hosts = [] packets.each do |p| candidate = nil @@ -80,21 +88,34 @@ class FirewallLeakCheck raise "Expected an IP{v4,v6} packet, but got something else:\n" + p.peek_format end - if candidate != nil and IPAddr.new(candidate).public? + if candidate != nil and (not(ignore_lan) or IPAddr.new(candidate).public?) hosts << candidate end end hosts.uniq end - # Returns an array of all Tor relays and authorities, i.e. all - # Internet hosts Tails ever should contact. - def get_all_tor_contacts - @tor_relays + $tor_authorities - end - - def empty? - @ipv4_tcp_leaks.empty? and @ipv4_nontcp_leaks.empty? and @ipv6_leaks.empty? and @nonip_leaks.empty? + def assert_no_leaks + err = "" + if !@ipv4_tcp_leaks.empty? + err += "The following IPv4 TCP non-Tor Internet hosts were " + + "contacted:\n" + ipv4_tcp_leaks.join("\n") + end + if !@ipv4_nontcp_leaks.empty? + err += "The following IPv4 non-TCP Internet hosts were contacted:\n" + + ipv4_nontcp_leaks.join("\n") + end + if !@ipv6_leaks.empty? + err += "The following IPv6 Internet hosts were contacted:\n" + + ipv6_leaks.join("\n") + end + if !@nonip_leaks.empty? + err += "Some non-IP packets were sent\n" + end + if !err.empty? + save_pcap_file + raise err + end end end diff --git a/features/support/helpers/misc_helpers.rb b/features/support/helpers/misc_helpers.rb index caf64b80..7e09411f 100644 --- a/features/support/helpers/misc_helpers.rb +++ b/features/support/helpers/misc_helpers.rb @@ -2,6 +2,15 @@ require 'date' require 'timeout' require 'test/unit' +# Test::Unit adds an at_exit hook which, among other things, consumes +# the command-line arguments that were intended for cucumber. If +# e.g. `--format` was passed it will throw an error since it's not a +# valid option for Test::Unit, and it throwing an error at this time +# (at_exit) will make Cucumber think it failed and consequently exit +# with an error. Fooling Test::Unit that this hook has already run +# works around this craziness. +Test::Unit.run = true + # Make all the assert_* methods easily accessible in any context. include Test::Unit::Assertions @@ -12,41 +21,131 @@ def assert_vmcommand_success(p, msg = nil) msg) end -# Call block (ignoring any exceptions it may throw) repeatedly with one -# second breaks until it returns true, or until `t` seconds have -# passed when we throw Timeout::Error. As a precondition, the code -# block cannot throw Timeout::Error. -def try_for(t, options = {}) +# It's forbidden to throw this exception (or subclasses) in anything +# but try_for() below. Just don't use it anywhere else! +class UniqueTryForTimeoutError < Exception +end + +# Call block (ignoring any exceptions it may throw) repeatedly with +# one second breaks until it returns true, or until `timeout` seconds have +# passed when we throw a Timeout::Error exception. +def try_for(timeout, options = {}) options[:delay] ||= 1 - begin - Timeout::timeout(t) do - loop do - begin - return true if yield - rescue Timeout::Error => e - if options[:msg] - raise RuntimeError, options[:msg], caller - else - raise e - end - rescue Exception - # noop - end - sleep options[:delay] + last_exception = nil + # Create a unique exception used only for this particular try_for + # call's Timeout to allow nested try_for:s. If we used the same one, + # the innermost try_for would catch all outer ones', creating a + # really strange situation. + unique_timeout_exception = Class.new(UniqueTryForTimeoutError) + Timeout::timeout(timeout, unique_timeout_exception) do + loop do + begin + return if yield + rescue NameError, UniqueTryForTimeoutError => e + # NameError most likely means typos, and hiding that is rarely + # (never?) a good idea, so we rethrow them. See below why we + # also rethrow *all* the unique exceptions. + raise e + rescue Exception => e + # All other exceptions are ignored while trying the + # block. Well we save the last exception so we can print it in + # case of a timeout. + last_exception = e end + sleep options[:delay] end - rescue Timeout::Error => e - if options[:msg] - raise RuntimeError, options[:msg], caller - else - raise e + end + # At this point the block above either succeeded and we'll return, + # or we are throwing an exception. If the latter, we either have a + # NameError that we'll not catch (and will any try_for below us in + # the stack), or we have a unique exception. That can mean one of + # two things: + # 1. it's the one unique to this try_for, and in that case we'll + # catch it, rethrowing it as something that will be ignored by + # inside the blocks of all try_for:s below us in the stack. + # 2. it's an exception unique to another try_for. Assuming that we + # do not throw the unique exceptions in any other place or way + # than we do it in this function, this means that there is a + # try_for below us in the stack to which this exception must be + # unique to. + # Let 1 be the base step, and 2 the inductive step, and we sort of + # an inductive proof for the correctness of try_for when it's + # nested. It shows that for an infinite stack of try_for:s, any of + # the unique exceptions will be caught only by the try_for instance + # it is unique to, and all try_for:s in between will ignore it so it + # ends up there immediately. +rescue unique_timeout_exception => e + msg = options[:msg] || 'try_for() timeout expired' + if last_exception + msg += "\nLast ignored exception was: " + + "#{last_exception.class}: #{last_exception}" + end + raise Timeout::Error.new(msg) +end + +class TorFailure < StandardError +end + +class MaxRetriesFailure < StandardError +end + +# This will retry the block up to MAX_NEW_TOR_CIRCUIT_RETRIES +# times. The block must raise an exception for a run to be considered +# as a failure. After a failure recovery_proc will be called (if +# given) and the intention with it is to bring us back to the state +# expected by the block, so it can be retried. +def retry_tor(recovery_proc = nil, &block) + tor_recovery_proc = Proc.new do + force_new_tor_circuit + recovery_proc.call if recovery_proc + end + + retry_action($config['MAX_NEW_TOR_CIRCUIT_RETRIES'], + :recovery_proc => tor_recovery_proc, + :operation_name => 'Tor operation', &block) +end + +def retry_i2p(recovery_proc = nil, &block) + retry_action(15, :recovery_proc => recovery_proc, + :operation_name => 'I2P operation', &block) +end + +def retry_action(max_retries, options = {}, &block) + assert(max_retries.is_a?(Integer), "max_retries must be an integer") + options[:recovery_proc] ||= nil + options[:operation_name] ||= 'Operation' + + retries = 1 + loop do + begin + block.call + return + rescue Exception => e + if retries <= max_retries + debug_log("#{options[:operation_name]} failed (Try #{retries} of " + + "#{max_retries}) with:\n" + + "#{e.class}: #{e.message}") + options[:recovery_proc].call if options[:recovery_proc] + retries += 1 + else + raise MaxRetriesFailure.new("#{options[:operation_name]} failed (despite retrying " + + "#{max_retries} times) with\n" + + "#{e.class}: #{e.message}") + end end end end def wait_until_tor_is_working - try_for(240) { @vm.execute( - '. /usr/local/lib/tails-shell-library/tor.sh; tor_is_working').success? } + try_for(270) { $vm.execute('/usr/local/sbin/tor-has-bootstrapped').success? } +rescue Timeout::Error => e + c = $vm.execute("journalctl SYSLOG_IDENTIFIER=restart-tor") + if c.success? + debug_log("From the journal:\n" + c.stdout.sub(/^/, " ")) + else + debug_log("Nothing was in the journal about 'restart-tor'") + end + raise e end def convert_bytes_mod(unit) @@ -79,7 +178,12 @@ def convert_from_bytes(size, unit) end def cmd_helper(cmd) - IO.popen(cmd + " 2>&1") do |p| + if cmd.instance_of?(Array) + cmd << {:err => [:child, :out]} + elsif cmd.instance_of?(String) + cmd += " 2>&1" + end + IO.popen(cmd) do |p| out = p.readlines.join("\n") p.close ret = $? @@ -88,34 +192,62 @@ def cmd_helper(cmd) end end -def tails_iso_creation_date(path) - label = cmd_helper("/sbin/blkid -p -s LABEL -o value #{path}") - assert(label[/^TAILS \d+(\.\d+)+(~rc\d+)? - \d+$/], - "Got invalid label '#{label}' from Tails image '#{path}'") - return label[/\d+$/] +# This command will grab all router IP addresses from the Tor +# consensus in the VM + the hardcoded TOR_AUTHORITIES. +def get_all_tor_nodes + cmd = 'awk "/^r/ { print \$6 }" /var/lib/tor/cached-microdesc-consensus' + $vm.execute(cmd).stdout.chomp.split("\n") + TOR_AUTHORITIES +end + +def get_free_space(machine, path) + case machine + when 'host' + assert(File.exists?(path), "Path '#{path}' not found on #{machine}.") + free = cmd_helper(["df", path]) + when 'guest' + assert($vm.file_exist?(path), "Path '#{path}' not found on #{machine}.") + free = $vm.execute_successfully("df '#{path}'") + else + raise 'Unsupported machine type #{machine} passed.' + end + output = free.split("\n").last + return output.match(/[^\s]\s+[0-9]+\s+[0-9]+\s+([0-9]+)\s+.*/)[1].chomp.to_i +end + +def random_string_from_set(set, min_len, max_len) + len = (min_len..max_len).to_a.sample + len ||= min_len + (0..len-1).map { |n| set.sample }.join end -def sort_isos_by_creation_date - Dir.glob("#{Dir.pwd}/*.iso").sort_by {|f| tails_iso_creation_date(f)} +def random_alpha_string(min_len, max_len = 0) + alpha_set = ('A'..'Z').to_a + ('a'..'z').to_a + random_string_from_set(alpha_set, min_len, max_len) end -def get_newest_iso - return sort_isos_by_creation_date.last +def random_alnum_string(min_len, max_len = 0) + alnum_set = ('A'..'Z').to_a + ('a'..'z').to_a + (0..9).to_a.map { |n| n.to_s } + random_string_from_set(alnum_set, min_len, max_len) end -def get_oldest_iso - return sort_isos_by_creation_date.first +# Sanitize the filename from unix-hostile filename characters +def sanitize_filename(filename, options = {}) + options[:replacement] ||= '_' + bad_unix_filename_chars = Regexp.new("[^A-Za-z0-9_\\-.,+:]") + filename.gsub(bad_unix_filename_chars, options[:replacement]) end -# This command will grab all router IP addresses from the Tor -# consensus in the VM. -def get_tor_relays - cmd = 'awk "/^r/ { print \$6 }" /var/lib/tor/cached-microdesc-consensus' - @vm.execute(cmd).stdout.chomp.split("\n") +def info_log_artifact_location(type, path) + if $config['ARTIFACTS_BASE_URI'] + # Remove any trailing slashes, we'll add one ourselves + base_url = $config['ARTIFACTS_BASE_URI'].gsub(/\/*$/, "") + path = "#{base_url}/#{File.basename(path)}" + end + info_log("#{type.capitalize}: #{path}") end -def save_pcap_file - pcap_copy = "#{$tmp_dir}/pcap_with_leaks-#{DateTime.now}" - FileUtils.cp(@sniffer.pcap_file, pcap_copy) - puts "Full network capture available at: #{pcap_copy}" +def pause(message = "Paused") + STDERR.puts + STDERR.puts "#{message} (Press ENTER to continue!)" + STDIN.gets end diff --git a/features/support/helpers/sikuli_helper.rb b/features/support/helpers/sikuli_helper.rb index 503e08b3..938f4851 100644 --- a/features/support/helpers/sikuli_helper.rb +++ b/features/support/helpers/sikuli_helper.rb @@ -5,6 +5,9 @@ require 'sikuli-script.jar' Rjb::load package_members = [ + "java.io.FileOutputStream", + "java.io.PrintStream", + "java.lang.System", "org.sikuli.script.Finder", "org.sikuli.script.Key", "org.sikuli.script.KeyModifier", @@ -18,6 +21,8 @@ package_members = [ translations = Hash[ "org.sikuli.script", "Sikuli", + "java.lang", "Java::Lang", + "java.io", "Java::Io", ] for p in package_members @@ -36,12 +41,16 @@ for p in package_members mod.const_set(class_name, imported_class) end +# Bind Java's stdout to debug_log() via our magical pseudo fifo +# logger. +def bind_java_to_pseudo_fifo_logger + file_output_stream = Java::Io::FileOutputStream.new(DEBUG_LOG_PSEUDO_FIFO) + print_stream = Java::Io::PrintStream.new(file_output_stream) + Java::Lang::System.setOut(print_stream) +end + def findfailed_hook(pic) - STDERR.puts "" - STDERR.puts "FindFailed for: #{pic}" - STDERR.puts "" - STDERR.puts "Update the image and press RETURN to retry" - STDIN.gets + pause("FindFailed for: '#{pic}'") end # Since rjb imports Java classes without creating a corresponding @@ -61,10 +70,16 @@ end sikuli_script_proxy = Sikuli::Screen $_original_sikuli_screen_new ||= Sikuli::Screen.method :new +# For waitAny()/findAny() we are forced to throw this exception since +# Rjb::throw doesn't block until the Java exception has been received +# by Ruby, so strange things can happen. +class FindAnyFailed < StandardError +end + def sikuli_script_proxy.new(*args) s = $_original_sikuli_screen_new.call(*args) - if $sikuli_retry_findfailed + if $config["SIKULI_RETRY_FINDFAILED"] # The usage of `_invoke()` below exemplifies how one can wrap # around Java objects' methods when they're imported using RJB. It # isn't pretty. The seconds argument is the parameter signature, @@ -104,6 +119,18 @@ def sikuli_script_proxy.new(*args) self.click(Sikuli::Location.new(x, y)) end + def s.doubleClick_point(x, y) + self.doubleClick(Sikuli::Location.new(x, y)) + end + + def s.click_mid_right_edge(pic) + r = self.find(pic) + top_right = r.getTopRight() + x = top_right.getX + y = top_right.getY + r.getH/2 + self.click_point(x, y) + end + def s.wait_and_click(pic, time) self.click(self.wait(pic, time)) end @@ -112,6 +139,48 @@ def sikuli_script_proxy.new(*args) self.doubleClick(self.wait(pic, time)) end + def s.wait_and_right_click(pic, time) + self.rightClick(self.wait(pic, time)) + end + + def s.wait_and_hover(pic, time) + self.hover(self.wait(pic, time)) + end + + def s.existsAny(images) + images.each do |image| + region = self.exists(image) + return [image, region] if region + end + return nil + end + + def s.findAny(images) + images.each do |image| + begin + return [image, self.find(image)] + rescue FindFailed + # Ignore. We deal we'll throw an appropriate exception after + # having looped through all images and found none of them. + end + end + # If we've reached this point, none of the images could be found. + raise FindAnyFailed.new("can not find any of the images #{images} on the " + + "screen") + end + + def s.waitAny(images, time) + Timeout::timeout(time) do + loop do + result = self.existsAny(images) + return result if result + end + end + rescue Timeout::Error + raise FindAnyFailed.new("can not find any of the images #{images} on the " + + "screen") + end + def s.hover_point(x, y) self.hover(Sikuli::Location.new(x, y)) end @@ -132,13 +201,13 @@ end # required, ruby's require method complains that the method for the # field accessor is missing. sikuli_settings = Sikuli::Settings.new -sikuli_settings.OcrDataPath = $tmp_dir +sikuli_settings.OcrDataPath = $config["TMPDIR"] # sikuli_ruby, which we used before, defaulted to 0.9 minimum # similarity, so all our current images are adapted to that value. # Also, Sikuli's default of 0.7 is simply too low (many false # positives). sikuli_settings.MinSimilarity = 0.9 -sikuli_settings.ActionLogs = $debug -sikuli_settings.DebugLogs = $debug -sikuli_settings.InfoLogs = $debug -sikuli_settings.ProfileLogs = $debug +sikuli_settings.ActionLogs = true +sikuli_settings.DebugLogs = true +sikuli_settings.InfoLogs = true +sikuli_settings.ProfileLogs = true diff --git a/features/support/helpers/net_helper.rb b/features/support/helpers/sniffing_helper.rb index 29119195..213411eb 100644 --- a/features/support/helpers/net_helper.rb +++ b/features/support/helpers/sniffing_helper.rb @@ -14,15 +14,16 @@ class Sniffer attr_reader :name, :pcap_file, :pid - def initialize(name, bridge_name) + def initialize(name, vmnet) @name = name - @bridge_name = bridge_name - @bridge_mac = File.open("/sys/class/net/#{@bridge_name}/address", "rb").read.chomp - @pcap_file = "#{$tmp_dir}/#{name}.pcap" + @vmnet = vmnet + pcap_name = sanitize_filename("#{name}.pcap") + @pcap_file = "#{$config["TMPDIR"]}/#{pcap_name}" end - def capture(filter="not ether src host #{@bridge_mac} and not ether proto \\arp and not ether proto \\rarp") - job = IO.popen("/usr/sbin/tcpdump -n -i #{@bridge_name} -w #{@pcap_file} -U '#{filter}' >/dev/null 2>&1") + def capture(filter="not ether src host #{@vmnet.bridge_mac} and not ether proto \\arp and not ether proto \\rarp") + job = IO.popen(["/usr/sbin/tcpdump", "-n", "-i", @vmnet.bridge_name, "-w", + @pcap_file, "-U", filter, :err => ["/dev/null", "w"]]) @pid = job.pid end diff --git a/features/support/helpers/sshd_helper.rb b/features/support/helpers/sshd_helper.rb new file mode 100644 index 00000000..2e0069c0 --- /dev/null +++ b/features/support/helpers/sshd_helper.rb @@ -0,0 +1,67 @@ +require 'tempfile' + +class SSHServer + def initialize(sshd_host, sshd_port, authorized_keys = nil) + @sshd_host = sshd_host + @sshd_port = sshd_port + @authorized_keys = authorized_keys + @pid = nil + end + + def start + @sshd_key_file = Tempfile.new("ssh_host_rsa_key", $config["TMPDIR"]) + # 'hack' to prevent ssh-keygen from prompting to overwrite the file + File.delete(@sshd_key_file.path) + cmd_helper(['ssh-keygen', '-t', 'rsa', '-N', "", '-f', "#{@sshd_key_file.path}"]) + @sshd_key_file.close + + sshd_config =<<EOF +Port #{@sshd_port} +ListenAddress #{@sshd_host} +UsePrivilegeSeparation no +HostKey #{@sshd_key_file.path} +Pidfile #{$config['TMPDIR']}/ssh.pid +EOF + + @sshd_config_file = Tempfile.new("sshd_config", $config["TMPDIR"]) + @sshd_config_file.write(sshd_config) + + if @authorized_keys + @authorized_keys_file = Tempfile.new("authorized_keys", $config['TMPDIR']) + @authorized_keys_file.write(@authorized_keys) + @authorized_keys_file.close + @sshd_config_file.write("AuthorizedKeysFile #{@authorized_keys_file.path}") + end + + @sshd_config_file.close + + cmd = ["/usr/sbin/sshd", "-4", "-f", @sshd_config_file.path, "-D"] + + job = IO.popen(cmd) + @pid = job.pid + end + + def stop + File.delete("#{@sshd_key_file.path}.pub") + File.delete("#{$config['TMPDIR']}/ssh.pid") + begin + Process.kill("TERM", @pid) + rescue + # noop + end + end + + def active? + begin + ret = Process.kill(0, @pid) + rescue Errno::ESRCH => e + if e.message == "No such process" + return false + else + raise e + end + end + assert_equal(1, ret, "This shouldn't happen") + return true + end +end diff --git a/features/support/helpers/storage_helper.rb b/features/support/helpers/storage_helper.rb index 80a1e1e0..21537a92 100644 --- a/features/support/helpers/storage_helper.rb +++ b/features/support/helpers/storage_helper.rb @@ -7,30 +7,43 @@ # sense. require 'libvirt' +require 'guestfs' require 'rexml/document' require 'etc' class VMStorage - @@virt = nil - def initialize(virt, xml_path) - @@virt ||= virt + @virt = virt @xml_path = xml_path pool_xml = REXML::Document.new(File.read("#{@xml_path}/storage_pool.xml")) pool_name = pool_xml.elements['pool/name'].text + @pool_path = "#{$config["TMPDIR"]}/#{pool_name}" begin - @pool = @@virt.lookup_storage_pool_by_name(pool_name) + @pool = @virt.lookup_storage_pool_by_name(pool_name) rescue Libvirt::RetrieveError - # There's no pool with that name, so we don't have to clear it - else + @pool = nil + end + if @pool and not(KEEP_SNAPSHOTS) VMStorage.clear_storage_pool(@pool) + @pool = nil + end + unless @pool + pool_xml.elements['pool/target/path'].text = @pool_path + @pool = @virt.define_storage_pool_xml(pool_xml.to_s) + if not(Dir.exists?(@pool_path)) + # We'd like to use @pool.build, which will just create the + # @pool_path directory, but it does so with root:root as owner + # (at least with libvirt 1.2.21-2). libvirt itself can handle + # that situation, but guestfs (at least with <= + # 1:1.28.12-1+b3) cannot when invoked by a non-root user, + # which we want to support. + FileUtils.mkdir(@pool_path) + FileUtils.chown(nil, 'libvirt-qemu', @pool_path) + FileUtils.chmod("ug+wrx", @pool_path) + end end - @pool_path = "#{$tmp_dir}/#{pool_name}" - pool_xml.elements['pool/target/path'].text = @pool_path - @pool = @@virt.define_storage_pool_xml(pool_xml.to_s) - @pool.build - @pool.create + @pool.create unless @pool.active? @pool.refresh end @@ -65,10 +78,23 @@ class VMStorage VMStorage.clear_storage_pool_volumes(@pool) end + def delete_volume(name) + @pool.lookup_volume_by_name(name).delete + end + def create_new_disk(name, options = {}) options[:size] ||= 2 options[:unit] ||= "GiB" options[:type] ||= "qcow2" + # Require 'slightly' more space to be available to give a bit more leeway + # with rounding, temp file creation, etc. + reserved = 500 + needed = convert_to_MiB(options[:size].to_i, options[:unit]) + avail = convert_to_MiB(get_free_space('host', @pool_path), "KiB") + assert(avail - reserved >= needed, + "Error creating disk \"#{name}\" in \"#{@pool_path}\". " \ + "Need #{needed} MiB but only #{avail} MiB is available of " \ + "which #{reserved} MiB is reserved for other temporary files.") begin old_vol = @pool.lookup_volume_by_name(name) rescue Libvirt::RetrieveError @@ -116,28 +142,75 @@ class VMStorage @pool.lookup_volume_by_name(name).path end - # We use parted for the disk_mk* functions since it can format - # partitions "inside" the super block device; mkfs.* need a - # partition device (think /dev/sdaX), so we'd have to use something - # like losetup or kpartx, which would require administrative - # privileges. These functions only work for raw disk images. - - # TODO: We should switch to guestfish/libguestfs (which has - # ruby-bindings) so we could use qcow2 instead of raw, and more - # easily use LVM volumes. - - # For type, see label-type for mklabel in parted(8) - def disk_mklabel(name, type) - assert_equal("raw", disk_format(name)) - path = disk_path(name) - cmd_helper("/sbin/parted -s '#{path}' mklabel #{type}") + def disk_mklabel(name, parttype) + disk = { + :path => disk_path(name), + :opts => { + :format => disk_format(name) + } + } + guestfs_disk_helper(disk) do |g, disk_handle| + g.part_init(disk_handle, parttype) + end end - # For fstype, see fs-type for mkfs in parted(8) - def disk_mkpartfs(name, fstype) - assert(disk_format(name), "raw") - path = disk_path(name) - cmd_helper("/sbin/parted -s '#{path}' mkpartfs primary '#{fstype}' 0% 100%") + def disk_mkpartfs(name, parttype, fstype, opts = {}) + opts[:label] ||= nil + opts[:luks_password] ||= nil + disk = { + :path => disk_path(name), + :opts => { + :format => disk_format(name) + } + } + guestfs_disk_helper(disk) do |g, disk_handle| + g.part_disk(disk_handle, parttype) + g.part_set_name(disk_handle, 1, opts[:label]) if opts[:label] + primary_partition = g.list_partitions()[0] + if opts[:luks_password] + g.luks_format(primary_partition, opts[:luks_password], 0) + luks_mapping = File.basename(primary_partition) + "_unlocked" + g.luks_open(primary_partition, opts[:luks_password], luks_mapping) + luks_dev = "/dev/mapper/#{luks_mapping}" + g.mkfs(fstype, luks_dev) + g.luks_close(luks_dev) + else + g.mkfs(fstype, primary_partition) + end + end + end + + def disk_mkswap(name, parttype) + disk = { + :path => disk_path(name), + :opts => { + :format => disk_format(name) + } + } + guestfs_disk_helper(disk) do |g, disk_handle| + g.part_disk(disk_handle, parttype) + primary_partition = g.list_partitions()[0] + g.mkswap(primary_partition) + end + end + + def guestfs_disk_helper(*disks) + assert(block_given?) + g = Guestfs::Guestfs.new() + g.set_trace(1) + message_callback = Proc.new do |event, _, message, _| + debug_log("libguestfs: #{Guestfs.event_to_string(event)}: #{message}") + end + g.set_event_callback(message_callback, + Guestfs::EVENT_TRACE) + g.set_autosync(1) + disks.each do |disk| + g.add_drive_opts(disk[:path], disk[:opts]) + end + g.launch() + yield(g, *g.list_devices()) + ensure + g.close end end diff --git a/features/support/helpers/vm_helper.rb b/features/support/helpers/vm_helper.rb index 2b5ad291..6d7204d4 100644 --- a/features/support/helpers/vm_helper.rb +++ b/features/support/helpers/vm_helper.rb @@ -1,79 +1,122 @@ require 'libvirt' require 'rexml/document' -class VM +class ExecutionFailedInVM < StandardError +end + +class VMNet - # These class attributes will be lazily initialized during the first - # instantiation: - # This is the libvirt connection, of which we only want one and - # which can persist for different VM instances (even in parallel) - @@virt = nil - # This is a storage helper that deals with volume manipulation. The - # storage it deals with persists across VMs, by necessity. - @@storage = nil + attr_reader :net_name, :net - def VM.storage - return @@storage + def initialize(virt, xml_path) + @virt = virt + @net_name = LIBVIRT_NETWORK_NAME + net_xml = File.read("#{xml_path}/default_net.xml") + rexml = REXML::Document.new(net_xml) + rexml.elements['network'].add_element('name') + rexml.elements['network/name'].text = @net_name + rexml.elements['network'].add_element('uuid') + rexml.elements['network/uuid'].text = LIBVIRT_NETWORK_UUID + update(rexml.to_s) + rescue Exception => e + destroy_and_undefine + raise e end - def storage - return @@storage + # We lookup by name so we also catch networks from previous test + # suite runs that weren't properly cleaned up (e.g. aborted). + def destroy_and_undefine + begin + old_net = @virt.lookup_network_by_name(@net_name) + old_net.destroy if old_net.active? + old_net.undefine + rescue + end end - attr_reader :domain, :display, :ip, :net + def update(xml) + destroy_and_undefine + @net = @virt.define_network_xml(xml) + @net.create + end + + def bridge_name + @net.bridge_name + end + + def bridge_ip_addr + net_xml = REXML::Document.new(@net.xml_desc) + IPAddr.new(net_xml.elements['network/ip'].attributes['address']).to_s + end + + def guest_real_mac + net_xml = REXML::Document.new(@net.xml_desc) + net_xml.elements['network/ip/dhcp/host/'].attributes['mac'] + end - def initialize(xml_path, x_display) - @@virt ||= Libvirt::open("qemu:///system") + def bridge_mac + File.open("/sys/class/net/#{bridge_name}/address", "rb").read.chomp + end +end + + +class VM + + attr_reader :domain, :display, :vmnet, :storage + + def initialize(virt, xml_path, vmnet, storage, x_display) + @virt = virt @xml_path = xml_path + @vmnet = vmnet + @storage = storage + @domain_name = LIBVIRT_DOMAIN_NAME default_domain_xml = File.read("#{@xml_path}/default.xml") - update_domain(default_domain_xml) - default_net_xml = File.read("#{@xml_path}/default_net.xml") - update_net(default_net_xml) + rexml = REXML::Document.new(default_domain_xml) + rexml.elements['domain'].add_element('name') + rexml.elements['domain/name'].text = @domain_name + rexml.elements['domain'].add_element('uuid') + rexml.elements['domain/uuid'].text = LIBVIRT_DOMAIN_UUID + update(rexml.to_s) @display = Display.new(@domain_name, x_display) - set_cdrom_boot($tails_iso) + set_cdrom_boot(TAILS_ISO) plug_network - # unlike the domain and net the storage pool should survive VM - # teardown (so a new instance can use e.g. a previously created - # USB drive), so we only create a new one if there is none. - @@storage ||= VMStorage.new(@@virt, xml_path) rescue Exception => e - clean_up_net - clean_up_domain + destroy_and_undefine raise e end - def update_domain(xml) - domain_xml = REXML::Document.new(xml) - @domain_name = domain_xml.elements['domain/name'].text - clean_up_domain - @domain = @@virt.define_domain_xml(xml) - end - - def update_net(xml) - net_xml = REXML::Document.new(xml) - @net_name = net_xml.elements['network/name'].text - @ip = net_xml.elements['network/ip/dhcp/host/'].attributes['ip'] - clean_up_net - @net = @@virt.define_network_xml(xml) - @net.create + def update(xml) + destroy_and_undefine + @domain = @virt.define_domain_xml(xml) end - def clean_up_domain + # We lookup by name so we also catch domains from previous test + # suite runs that weren't properly cleaned up (e.g. aborted). + def destroy_and_undefine + @display.stop if @display && @display.active? begin - domain = @@virt.lookup_domain_by_name(@domain_name) - domain.destroy if domain.active? - domain.undefine + old_domain = @virt.lookup_domain_by_name(@domain_name) + old_domain.destroy if old_domain.active? + old_domain.undefine rescue end end - def clean_up_net - begin - net = @@virt.lookup_network_by_name(@net_name) - net.destroy if net.active? - net.undefine - rescue - end + def real_mac + @vmnet.guest_real_mac + end + + def set_hardware_clock(time) + assert(not(is_running?), 'The hardware clock cannot be set when the ' + + 'VM is running') + assert(time.instance_of?(Time), "Argument must be of type 'Time'") + adjustment = (time - Time.now).to_i + domain_rexml = REXML::Document.new(@domain.xml_desc) + clock_rexml_element = domain_rexml.elements['domain'].add_element('clock') + clock_rexml_element.add_attributes('offset' => 'variable', + 'basis' => 'utc', + 'adjustment' => adjustment.to_s) + update(domain_rexml.to_s) end def set_network_link_state(state) @@ -82,7 +125,7 @@ class VM if is_running? @domain.update_device(domain_xml.elements['domain/devices/interface'].to_s) else - update_domain(domain_xml.to_s) + update(domain_xml.to_s) end end @@ -94,97 +137,101 @@ class VM set_network_link_state('down') end - def set_cdrom_tray_state(state) - domain_xml = REXML::Document.new(@domain.xml_desc) - domain_xml.elements.each('domain/devices/disk') do |e| - if e.attribute('device').to_s == "cdrom" - e.elements['target'].attributes['tray'] = state - if is_running? - @domain.update_device(e.to_s) - else - update_domain(domain_xml.to_s) - end - end - end - end - - def eject_cdrom - set_cdrom_tray_state('open') - end - - def close_cdrom - set_cdrom_tray_state('closed') - end - def set_boot_device(dev) if is_running? raise "boot settings can only be set for inactive vms" end domain_xml = REXML::Document.new(@domain.xml_desc) domain_xml.elements['domain/os/boot'].attributes['dev'] = dev - update_domain(domain_xml.to_s) + update(domain_xml.to_s) end def set_cdrom_image(image) + image = nil if image == '' domain_xml = REXML::Document.new(@domain.xml_desc) domain_xml.elements.each('domain/devices/disk') do |e| if e.attribute('device').to_s == "cdrom" - if ! e.elements['source'] - e.add_element('source') + if image.nil? + e.elements.delete('source') + else + if ! e.elements['source'] + e.add_element('source') + end + e.elements['source'].attributes['file'] = image end - e.elements['source'].attributes['file'] = image if is_running? - @domain.update_device(e.to_s, Libvirt::Domain::DEVICE_MODIFY_FORCE) + @domain.update_device(e.to_s) else - update_domain(domain_xml.to_s) + update(domain_xml.to_s) end end end end def remove_cdrom - set_cdrom_image('') + set_cdrom_image(nil) + rescue Libvirt::Error => e + # While the CD-ROM is removed successfully we still get this + # error, so let's ignore it. + acceptable_error = + "Call to virDomainUpdateDeviceFlags failed: internal error: unable to " + + "execute QEMU command 'eject': (Tray of device '.*' is not open|" + + "Device '.*' is locked)" + raise e if not(Regexp.new(acceptable_error).match(e.to_s)) end def set_cdrom_boot(image) if is_running? - raise "boot settings can only be set for inactice vms" + raise "boot settings can only be set for inactive vms" end set_boot_device('cdrom') set_cdrom_image(image) - close_cdrom end - def plug_drive(name, type) - # Get the next free /dev/sdX on guest - used_devs = [] + def list_disk_devs + ret = [] domain_xml = REXML::Document.new(@domain.xml_desc) - domain_xml.elements.each('domain/devices/disk/target') do |e| - used_devs <<= e.attribute('dev').to_s + domain_xml.elements.each('domain/devices/disk') do |e| + ret << e.elements['target'].attribute('dev').to_s + end + return ret + end + + def plug_drive(name, type) + if disk_plugged?(name) + raise "disk '#{name}' already plugged" end + removable_usb = nil + case type + when "removable usb", "usb" + type = "usb" + removable_usb = "on" + when "non-removable usb" + type = "usb" + removable_usb = "off" + end + # Get the next free /dev/sdX on guest letter = 'a' dev = "sd" + letter - while used_devs.include? dev + while list_disk_devs.include?(dev) letter = (letter[0].ord + 1).chr dev = "sd" + letter end assert letter <= 'z' xml = REXML::Document.new(File.read("#{@xml_path}/disk.xml")) - xml.elements['disk/source'].attributes['file'] = @@storage.disk_path(name) - xml.elements['disk/driver'].attributes['type'] = @@storage.disk_format(name) + xml.elements['disk/source'].attributes['file'] = @storage.disk_path(name) + xml.elements['disk/driver'].attributes['type'] = @storage.disk_format(name) xml.elements['disk/target'].attributes['dev'] = dev xml.elements['disk/target'].attributes['bus'] = type - if type == "usb" - xml.elements['disk/target'].attributes['removable'] = 'on' - end + xml.elements['disk/target'].attributes['removable'] = removable_usb if removable_usb if is_running? @domain.attach_device(xml.to_s) else domain_xml = REXML::Document.new(@domain.xml_desc) domain_xml.elements['domain/devices'].add_element(xml) - update_domain(domain_xml.to_s) + update(domain_xml.to_s) end end @@ -192,7 +239,7 @@ class VM domain_xml = REXML::Document.new(@domain.xml_desc) domain_xml.elements.each('domain/devices/disk') do |e| begin - if e.elements['source'].attribute('file').to_s == @@storage.disk_path(name) + if e.elements['source'].attribute('file').to_s == @storage.disk_path(name) return e.to_s end rescue @@ -202,25 +249,64 @@ class VM return nil end + def disk_rexml_desc(name) + xml = disk_xml_desc(name) + if xml + return REXML::Document.new(xml) + else + return nil + end + end + def unplug_drive(name) xml = disk_xml_desc(name) @domain.detach_device(xml) end + def disk_type(dev) + domain_xml = REXML::Document.new(@domain.xml_desc) + domain_xml.elements.each('domain/devices/disk') do |e| + if e.elements['target'].attribute('dev').to_s == dev + return e.elements['driver'].attribute('type').to_s + end + end + raise "No such disk device '#{dev}'" + end + def disk_dev(name) - xml = REXML::Document.new(disk_xml_desc(name)) - return "/dev/" + xml.elements['disk/target'].attribute('dev').to_s + rexml = disk_rexml_desc(name) or return nil + return "/dev/" + rexml.elements['disk/target'].attribute('dev').to_s + end + + def disk_name(dev) + dev = File.basename(dev) + domain_xml = REXML::Document.new(@domain.xml_desc) + domain_xml.elements.each('domain/devices/disk') do |e| + if /^#{e.elements['target'].attribute('dev').to_s}/.match(dev) + return File.basename(e.elements['source'].attribute('file').to_s) + end + end + raise "No such disk device '#{dev}'" + end + + def udisks_disk_dev(name) + return disk_dev(name).gsub('/dev/', '/org/freedesktop/UDisks/devices/') end def disk_detected?(name) - return execute("test -b #{disk_dev(name)}").success? + dev = disk_dev(name) or return false + return execute("test -b #{dev}").success? + end + + def disk_plugged?(name) + return not(disk_xml_desc(name).nil?) end def set_disk_boot(name, type) if is_running? raise "boot settings can only be set for inactive vms" end - plug_drive(name, type) + plug_drive(name, type) if not(disk_plugged?(name)) set_boot_device('hd') # For some reason setting the boot device doesn't prevent cdrom # boot unless it's empty @@ -231,14 +317,19 @@ class VM # XXX-9p in common_steps.rb for more information. def add_share(source, tag) if is_running? - raise "shares can only be added to inactice vms" + raise "shares can only be added to inactive vms" end + # The complete source directory must be group readable by the user + # running the virtual machine, and world readable so the user inside + # the VM can access it (since we use the passthrough security model). + FileUtils.chown_R(nil, "libvirt-qemu", source) + FileUtils.chmod_R("go+rX", source) xml = REXML::Document.new(File.read("#{@xml_path}/fs_share.xml")) xml.elements['filesystem/source'].attributes['dir'] = source xml.elements['filesystem/target'].attributes['dir'] = tag domain_xml = REXML::Document.new(@domain.xml_desc) domain_xml.elements['domain/devices'].add_element(xml) - update_domain(domain_xml.to_s) + update(domain_xml.to_s) end def list_shares @@ -251,13 +342,13 @@ class VM end def set_ram_size(size, unit = "KiB") - raise "System memory can only be added to inactice vms" if is_running? + raise "System memory can only be added to inactive vms" if is_running? domain_xml = REXML::Document.new(@domain.xml_desc) domain_xml.elements['domain/memory'].text = size domain_xml.elements['domain/memory'].attributes['unit'] = unit domain_xml.elements['domain/currentMemory'].text = size domain_xml.elements['domain/currentMemory'].attributes['unit'] = unit - update_domain(domain_xml.to_s) + update(domain_xml.to_s) end def get_ram_size_in_bytes @@ -268,24 +359,24 @@ class VM end def set_arch(arch) - raise "System architecture can only be set to inactice vms" if is_running? + raise "System architecture can only be set to inactive vms" if is_running? domain_xml = REXML::Document.new(@domain.xml_desc) domain_xml.elements['domain/os/type'].attributes['arch'] = arch - update_domain(domain_xml.to_s) + update(domain_xml.to_s) end def add_hypervisor_feature(feature) - raise "Hypervisor features can only be added to inactice vms" if is_running? + raise "Hypervisor features can only be added to inactive vms" if is_running? domain_xml = REXML::Document.new(@domain.xml_desc) domain_xml.elements['domain/features'].add_element(feature) - update_domain(domain_xml.to_s) + update(domain_xml.to_s) end def drop_hypervisor_feature(feature) - raise "Hypervisor features can only be fropped from inactice vms" if is_running? + raise "Hypervisor features can only be fropped from inactive vms" if is_running? domain_xml = REXML::Document.new(@domain.xml_desc) domain_xml.elements['domain/features'].delete_element(feature) - update_domain(domain_xml.to_s) + update(domain_xml.to_s) end def disable_pae_workaround @@ -295,24 +386,24 @@ class VM xml = <<EOF <qemu:commandline xmlns:qemu='http://libvirt.org/schemas/domain/qemu/1.0'> <qemu:arg value='-cpu'/> - <qemu:arg value='pentium,-pae'/> + <qemu:arg value='qemu32,-pae'/> </qemu:commandline> EOF domain_xml = REXML::Document.new(@domain.xml_desc) domain_xml.elements['domain'].add_element(REXML::Document.new(xml)) - update_domain(domain_xml.to_s) + update(domain_xml.to_s) end def set_os_loader(type) if is_running? - raise "boot settings can only be set for inactice vms" + raise "boot settings can only be set for inactive vms" end if type == 'UEFI' domain_xml = REXML::Document.new(@domain.xml_desc) domain_xml.elements['domain/os'].add_element(REXML::Document.new( '<loader>/usr/share/ovmf/OVMF.fd</loader>' )) - update_domain(domain_xml.to_s) + update(domain_xml.to_s) else raise "unsupported OS loader type" end @@ -326,21 +417,38 @@ EOF end end - def execute(cmd, user = "root") - return VMCommand.new(self, cmd, { :user => user, :spawn => false }) + def execute(cmd, options = {}) + options[:user] ||= "root" + options[:spawn] ||= false + if options[:libs] + libs = options[:libs] + options.delete(:libs) + libs = [libs] if not(libs.methods.include? :map) + cmds = libs.map do |lib_name| + ". /usr/local/lib/tails-shell-library/#{lib_name}.sh" + end + cmds << cmd + cmd = cmds.join(" && ") + end + return VMCommand.new(self, cmd, options) end - def execute_successfully(cmd, user = "root") - p = execute(cmd, user) - assert_vmcommand_success(p) + def execute_successfully(*args) + p = execute(*args) + begin + assert_vmcommand_success(p) + rescue Test::Unit::AssertionFailedError => e + raise ExecutionFailedInVM.new(e) + end return p end - def spawn(cmd, user = "root") - return VMCommand.new(self, cmd, { :user => user, :spawn => true }) + def spawn(cmd, options = {}) + options[:spawn] = true + return execute(cmd, options) end - def wait_until_remote_shell_is_up(timeout = 30) + def wait_until_remote_shell_is_up(timeout = 90) VMCommand.wait_until_remote_shell_is_up(self, timeout) end @@ -361,32 +469,182 @@ EOF return execute("pidof -x -o '%PPID' " + process).stdout.chomp.split end + def select_virtual_desktop(desktop_number, user = LIVE_USER) + assert(desktop_number >= 0 && desktop_number <=3, + "Only values between 0 and 3 are valid virtual desktop numbers") + execute_successfully( + "xdotool set_desktop '#{desktop_number}'", + :user => user + ) + end + + def focus_window(window_title, user = LIVE_USER) + def do_focus(window_title, user) + execute_successfully( + "xdotool search --name '#{window_title}' windowactivate --sync", + :user => user + ) + end + + begin + do_focus(window_title, user) + rescue ExecutionFailedInVM + # Often when xdotool fails to focus a window it'll work when retried + # after redrawing the screen. Switching to a new virtual desktop then + # back seems to be a reliable way to handle this. + select_virtual_desktop(3) + select_virtual_desktop(0) + sleep 5 # there aren't any visual indicators which can be used here + do_focus(window_title, user) + end + end + def file_exist?(file) - execute("test -e #{file}").success? + execute("test -e '#{file}'").success? + end + + def directory_exist?(directory) + execute("test -d '#{directory}'").success? end def file_content(file, user = 'root') # We don't quote #{file} on purpose: we sometimes pass environment variables # or globs that we want to be interpreted by the shell. - cmd = execute("cat #{file}", user) + cmd = execute("cat #{file}", :user => user) assert(cmd.success?, "Could not cat '#{file}':\n#{cmd.stdout}\n#{cmd.stderr}") return cmd.stdout end - def save_snapshot(path) - @domain.save(path) - @display.stop + def file_append(file, lines, user = 'root') + lines = lines.split("\n") if lines.class == String + lines.each do |line| + cmd = execute("echo '#{line}' >> '#{file}'", :user => user) + assert(cmd.success?, + "Could not append to '#{file}':\n#{cmd.stdout}\n#{cmd.stderr}") + end + end + + def set_clipboard(text) + execute_successfully("echo -n '#{text}' | xsel --input --clipboard", + :user => LIVE_USER) end - def restore_snapshot(path) - # Clean up current domain so its snapshot can be restored - clean_up_domain - Libvirt::Domain::restore(@@virt, path) - @domain = @@virt.lookup_domain_by_name(@domain_name) + def get_clipboard + execute_successfully("xsel --output --clipboard", :user => LIVE_USER).stdout + end + + def internal_snapshot_xml(name) + disk_devs = list_disk_devs + disks_xml = " <disks>\n" + for dev in disk_devs + snapshot_type = disk_type(dev) == "qcow2" ? 'internal' : 'no' + disks_xml += + " <disk name='#{dev}' snapshot='#{snapshot_type}'></disk>\n" + end + disks_xml += " </disks>" + return <<-EOF +<domainsnapshot> + <name>#{name}</name> + <description>Snapshot for #{name}</description> +#{disks_xml} + </domainsnapshot> +EOF + end + + def VM.ram_only_snapshot_path(name) + return "#{$config["TMPDIR"]}/#{name}-snapshot.memstate" + end + + def save_snapshot(name) + # If we have no qcow2 disk device, we'll use "memory state" + # snapshots, and if we have at least one qcow2 disk device, we'll + # use internal "system checkpoint" (memory + disks) snapshots. We + # have to do this since internal snapshots don't work when no + # such disk is available. We can do this with external snapshots, + # which are better in many ways, but libvirt doesn't know how to + # restore (revert back to) them yet. + # WARNING: If only transient disks, i.e. disks that were plugged + # after starting the domain, are used then the memory state will + # be dropped. External snapshots would also fix this. + internal_snapshot = false + domain_xml = REXML::Document.new(@domain.xml_desc) + domain_xml.elements.each('domain/devices/disk') do |e| + if e.elements['driver'].attribute('type').to_s == "qcow2" + internal_snapshot = true + break + end + end + + # Note: In this case the "opposite" of `internal_snapshot` is not + # anything relating to external snapshots, but actually "memory + # state"(-only) snapshots. + if internal_snapshot + xml = internal_snapshot_xml(name) + @domain.snapshot_create_xml(xml) + else + snapshot_path = VM.ram_only_snapshot_path(name) + @domain.save(snapshot_path) + # For consistency with the internal snapshot case (which is + # "live", so the domain doesn't go down) we immediately restore + # the snapshot. + # Assumption: that *immediate* save + restore doesn't mess up + # with network state and similar, and is fast enough to not make + # the clock drift too much. + restore_snapshot(name) + end + end + + def restore_snapshot(name) + @domain.destroy if is_running? + @display.stop if @display and @display.active? + # See comment in save_snapshot() for details on why we use two + # different type of snapshots. + potential_ram_only_snapshot_path = VM.ram_only_snapshot_path(name) + if File.exist?(potential_ram_only_snapshot_path) + Libvirt::Domain::restore(@virt, potential_ram_only_snapshot_path) + @domain = @virt.lookup_domain_by_name(@domain_name) + else + begin + potential_internal_snapshot = @domain.lookup_snapshot_by_name(name) + @domain.revert_to_snapshot(potential_internal_snapshot) + rescue Libvirt::RetrieveError + raise "No such (internal nor external) snapshot #{name}" + end + end @display.start end + def VM.remove_snapshot(name) + old_domain = $virt.lookup_domain_by_name(LIBVIRT_DOMAIN_NAME) + potential_ram_only_snapshot_path = VM.ram_only_snapshot_path(name) + if File.exist?(potential_ram_only_snapshot_path) + File.delete(potential_ram_only_snapshot_path) + else + snapshot = old_domain.lookup_snapshot_by_name(name) + snapshot.delete + end + end + + def VM.snapshot_exists?(name) + return true if File.exist?(VM.ram_only_snapshot_path(name)) + old_domain = $virt.lookup_domain_by_name(LIBVIRT_DOMAIN_NAME) + snapshot = old_domain.lookup_snapshot_by_name(name) + return snapshot != nil + rescue Libvirt::RetrieveError + return false + end + + def VM.remove_all_snapshots + Dir.glob("#{$config["TMPDIR"]}/*-snapshot.memstate").each do |file| + File.delete(file) + end + old_domain = $virt.lookup_domain_by_name(LIBVIRT_DOMAIN_NAME) + old_domain.list_all_snapshots.each { |snapshot| snapshot.delete } + rescue Libvirt::RetrieveError + # No such domain, so no snapshots either. + end + def start return if is_running? @domain.create @@ -394,9 +652,7 @@ EOF end def reset - # ruby-libvirt 0.4 does not support the reset method. - # XXX: Once we use Jessie, use @domain.reset instead. - system("virsh -c qemu:///system reset " + @domain_name) if is_running? + @domain.reset if is_running? end def power_off @@ -404,12 +660,6 @@ EOF @display.stop end - def destroy - clean_up_domain - clean_up_net - power_off - end - def take_screenshot(description) @display.take_screenshot(description) end diff --git a/features/support/hooks.rb b/features/support/hooks.rb index d9dc03a7..be8a0235 100644 --- a/features/support/hooks.rb +++ b/features/support/hooks.rb @@ -1,55 +1,145 @@ require 'fileutils' +require 'rb-inotify' require 'time' require 'tmpdir' -# For @product tests -#################### - -def delete_snapshot(snapshot) - if snapshot and File.exist?(snapshot) - File.delete(snapshot) +# Run once, before any feature +AfterConfiguration do |config| + # Reorder the execution of some features. As we progress through a + # run we accumulate more and more snapshots and hence use more and + # more disk space, but some features will leave nothing behind + # and/or possibly use large amounts of disk space temporarily for + # various reasons. By running these first we minimize the amount of + # disk space needed. + prioritized_features = [ + # Features not using snapshots but using large amounts of scratch + # space for other reasons: + 'features/erase_memory.feature', + 'features/untrusted_partitions.feature', + # Features using temporary snapshots: + 'features/apt.feature', + 'features/i2p.feature', + 'features/root_access_control.feature', + 'features/time_syncing.feature', + 'features/tor_bridges.feature', + # This feature needs the almost biggest snapshot (USB install, + # excluding persistence) and will create yet another disk and + # install Tails on it. This should be the peak of disk usage. + 'features/usb_install.feature', + ] + feature_files = config.feature_files + # The &-intersection is specified to keep the element ordering of + # the *left* operand. + intersection = prioritized_features & feature_files + if not intersection.empty? + feature_files -= intersection + feature_files = intersection + feature_files + config.define_singleton_method(:feature_files) { feature_files } end -rescue Errno::EACCES => e - STDERR.puts "Couldn't delete background snapshot: #{e.to_s}" -end -def delete_all_snapshots - Dir.glob("#{$tmp_dir}/*.state").each do |snapshot| - delete_snapshot(snapshot) - end -end + # Used to keep track of when we start our first @product feature, when + # we'll do some special things. + $started_first_product_feature = false -BeforeFeature('@product') do |feature| - if File.exist?($tmp_dir) - if !File.directory?($tmp_dir) - raise "Temporary directory '#{$tmp_dir}' exists but is not a " + + if File.exist?($config["TMPDIR"]) + if !File.directory?($config["TMPDIR"]) + raise "Temporary directory '#{$config["TMPDIR"]}' exists but is not a " + "directory" end - if !File.owned?($tmp_dir) - raise "Temporary directory '#{$tmp_dir}' must be owned by the " + + if !File.owned?($config["TMPDIR"]) + raise "Temporary directory '#{$config["TMPDIR"]}' must be owned by the " + "current user" end - FileUtils.chmod(0755, $tmp_dir) + FileUtils.chmod(0755, $config["TMPDIR"]) else begin - Dir.mkdir($tmp_dir) + FileUtils.mkdir_p($config["TMPDIR"]) rescue Errno::EACCES => e raise "Cannot create temporary directory: #{e.to_s}" end end - delete_all_snapshots if !$keep_snapshots - if $tails_iso.nil? + + # Start a thread that monitors a pseudo fifo file and debug_log():s + # anything written to it "immediately" (well, as fast as inotify + # detects it). We're forced to a convoluted solution like this + # because CRuby's thread support is horribly as soon as IO is mixed + # in (other threads get blocked). + FileUtils.rm(DEBUG_LOG_PSEUDO_FIFO) if File.exist?(DEBUG_LOG_PSEUDO_FIFO) + FileUtils.touch(DEBUG_LOG_PSEUDO_FIFO) + at_exit do + FileUtils.rm(DEBUG_LOG_PSEUDO_FIFO) if File.exist?(DEBUG_LOG_PSEUDO_FIFO) + end + Thread.new do + File.open(DEBUG_LOG_PSEUDO_FIFO) do |fd| + watcher = INotify::Notifier.new + watcher.watch(DEBUG_LOG_PSEUDO_FIFO, :modify) do + line = fd.read.chomp + debug_log(line) if line and line.length > 0 + end + watcher.run + end + end + # Fix Sikuli's debug_log():ing. + bind_java_to_pseudo_fifo_logger +end + +# Common +######## + +After do + if @after_scenario_hooks + @after_scenario_hooks.each { |block| block.call } + end + @after_scenario_hooks = Array.new +end + +BeforeFeature('@product', '@source') do |feature| + raise "Feature #{feature.file} is tagged both @product and @source, " + + "which is an impossible combination" +end + +at_exit do + $vm.destroy_and_undefine if $vm + if $virt + unless KEEP_SNAPSHOTS + VM.remove_all_snapshots + $vmstorage.clear_pool + end + $vmnet.destroy_and_undefine + $virt.close + end + # The artifacts directory is empty (and useless) if it contains + # nothing but the mandatory . and .. + if Dir.entries(ARTIFACTS_DIR).size <= 2 + FileUtils.rmdir(ARTIFACTS_DIR) + end +end + +# For @product tests +#################### + +def add_after_scenario_hook(&block) + @after_scenario_hooks ||= Array.new + @after_scenario_hooks << block +end + +def save_failure_artifact(type, path) + $failure_artifacts << [type, path] +end + +BeforeFeature('@product') do |feature| + if TAILS_ISO.nil? raise "No Tails ISO image specified, and none could be found in the " + "current directory" end - if File.exist?($tails_iso) + if File.exist?(TAILS_ISO) # Workaround: when libvirt takes ownership of the ISO image it may # become unreadable for the live user inside the guest in the # host-to-guest share used for some tests. - if !File.world_readable?($tails_iso) - if File.owned?($tails_iso) - File.chmod(0644, $tails_iso) + if !File.world_readable?(TAILS_ISO) + if File.owned?(TAILS_ISO) + File.chmod(0644, TAILS_ISO) else raise "warning: the Tails ISO image must be world readable or be " + "owned by the current user to be available inside the guest " + @@ -57,78 +147,120 @@ BeforeFeature('@product') do |feature| end end else - raise "The specified Tails ISO image '#{$tails_iso}' does not exist" + raise "The specified Tails ISO image '#{TAILS_ISO}' does not exist" + end + if !File.exist?(OLD_TAILS_ISO) + raise "The specified old Tails ISO image '#{OLD_TAILS_ISO}' does not exist" + end + if not($started_first_product_feature) + $virt = Libvirt::open("qemu:///system") + VM.remove_all_snapshots if !KEEP_SNAPSHOTS + $vmnet = VMNet.new($virt, VM_XML_PATH) + $vmstorage = VMStorage.new($virt, VM_XML_PATH) + $started_first_product_feature = true end - puts "Testing ISO image: #{File.basename($tails_iso)}" - base = File.basename(feature.file, ".feature").to_s - $background_snapshot = "#{$tmp_dir}/#{base}_background.state" end AfterFeature('@product') do - delete_snapshot($background_snapshot) if !$keep_snapshots - VM.storage.clear_volumes if VM.storage -end - -BeforeFeature('@product', '@old_iso') do - if $old_tails_iso.nil? - raise "No old Tails ISO image specified, and none could be found in the " + - "current directory" - end - if !File.exist?($old_tails_iso) - raise "The specified old Tails ISO image '#{$old_tails_iso}' does not exist" - end - if $tails_iso == $old_tails_iso - raise "The old Tails ISO is the same as the Tails ISO we're testing" + unless KEEP_SNAPSHOTS + checkpoints.each do |name, vals| + if vals[:temporary] and VM.snapshot_exists?(name) + VM.remove_snapshot(name) + end + end end - puts "Using old ISO image: #{File.basename($old_tails_iso)}" end -# BeforeScenario -Before('@product') do - @screen = Sikuli::Screen.new - if File.size?($background_snapshot) - @skip_steps_while_restoring_background = true - else - @skip_steps_while_restoring_background = false +# Cucumber Before hooks are executed in the order they are listed, and +# we want this hook to always run first, so it must always be the +# *first* Before hook matching @product listed in this file. +Before('@product') do |scenario| + $failure_artifacts = Array.new + if $config["CAPTURE"] + video_name = sanitize_filename("#{scenario.name}.mkv") + @video_path = "#{ARTIFACTS_DIR}/#{video_name}" + capture = IO.popen(['avconv', + '-f', 'x11grab', + '-s', '1024x768', + '-r', '15', + '-i', "#{$config['DISPLAY']}.0", + '-an', + '-c:v', 'libx264', + '-y', + @video_path, + :err => ['/dev/null', 'w'], + ]) + @video_capture_pid = capture.pid end - @theme = "gnome" + @screen = Sikuli::Screen.new + # English will be assumed if this is not overridden + @language = "" @os_loader = "MBR" + @sudo_password = "asdf" + @persistence_password = "asdf" end -# AfterScenario +# Cucumber After hooks are executed in the *reverse* order they are +# listed, and we want this hook to always run second last, so it must always +# be the *second* After hook matching @product listed in this file -- +# hooks added dynamically via add_after_scenario_hook() are supposed to +# truly be last. After('@product') do |scenario| - if (scenario.status != :passed) - time_of_fail = Time.now - $time_at_start + if @video_capture_pid + # We can be incredibly fast at detecting errors sometimes, so the + # screen barely "settles" when we end up here and kill the video + # capture. Let's wait a few seconds more to make it easier to see + # what the error was. + sleep 3 if scenario.failed? + Process.kill("INT", @video_capture_pid) + save_failure_artifact("Video", @video_path) + end + if scenario.failed? + time_of_fail = Time.now - TIME_AT_START secs = "%02d" % (time_of_fail % 60) mins = "%02d" % ((time_of_fail / 60) % 60) hrs = "%02d" % (time_of_fail / (60*60)) - STDERR.puts "Scenario failed at time #{hrs}:#{mins}:#{secs}" - base = File.basename(scenario.feature.file, ".feature").to_s - tmp = @screen.capture.getFilename - out = "#{$tmp_dir}/#{base}-#{DateTime.now}.png" - jenkins_live_screenshot = "#{$tmp_dir}/screenshot.png" - jenkins_live_thumb = "#{$tmp_dir}/screenshot-thumb.png" - FileUtils.mv(tmp, out) - FileUtils.cp(out, jenkins_live_screenshot) - STDERR.puts("Took screenshot \"#{out}\"") - if $pause_on_fail - STDERR.puts "" - STDERR.puts "Press ENTER to continue running the test suite" - STDIN.gets + elapsed = "#{hrs}:#{mins}:#{secs}" + info_log("Scenario failed at time #{elapsed}") + screen_capture = @screen.capture + save_failure_artifact("Screenshot", screen_capture.getFilename) + $failure_artifacts.sort! + $failure_artifacts.each do |type, file| + artifact_name = sanitize_filename("#{elapsed}_#{scenario.name}#{File.extname(file)}") + artifact_path = "#{ARTIFACTS_DIR}/#{artifact_name}" + assert(File.exist?(file)) + FileUtils.mv(file, artifact_path) + info_log + info_log_artifact_location(type, artifact_path) + end + pause("Scenario failed") if $config["PAUSE_ON_FAIL"] + else + if @video_path && File.exist?(@video_path) && not($config['CAPTURE_ALL']) + FileUtils.rm(@video_path) end end - unless system("convert #{jenkins_live_screenshot} -adaptive-resize 128x96 #{jenkins_live_thumb}") - raise StandardError.new("convert command exited with #{$?}") - end - if @sniffer - @sniffer.stop - @sniffer.clear +end + +Before('@product', '@check_tor_leaks') do |scenario| + @tor_leaks_sniffer = Sniffer.new(sanitize_filename(scenario.name), $vmnet) + @tor_leaks_sniffer.capture + add_after_scenario_hook do + @tor_leaks_sniffer.clear end - @vm.destroy if @vm end -After('@product', '~@keep_volumes') do - VM.storage.clear_volumes +After('@product', '@check_tor_leaks') do |scenario| + @tor_leaks_sniffer.stop + if scenario.passed? + if @bridge_hosts.nil? + expected_tor_nodes = get_all_tor_nodes + else + expected_tor_nodes = @bridge_hosts + end + leaks = FirewallLeakCheck.new(@tor_leaks_sniffer.pcap_file, + :accepted_hosts => expected_tor_nodes) + leaks.assert_no_leaks + end end # For @source tests @@ -146,17 +278,3 @@ After('@source') do Dir.chdir @orig_pwd FileUtils.remove_entry_secure @git_clone end - - -# Common -######## - -BeforeFeature('@product', '@source') do |feature| - raise "Feature #{feature.file} is tagged both @product and @source, " + - "which is an impossible combination" -end - -at_exit do - delete_all_snapshots if !$keep_snapshots - VM.storage.clear_pool if VM.storage -end |