summaryrefslogtreecommitdiffstats
path: root/cucumber/features/support
diff options
context:
space:
mode:
authorPhilip Hands <phil@hands.com>2016-05-11 17:11:01 +0200
committerPhilip Hands <phil@hands.com>2016-05-11 17:11:01 +0200
commita5d56e3b5443263b53b0487c81125123411bd0cf (patch)
tree71b1bdafc0a5978bca9073609eff33e228e29a12 /cucumber/features/support
parent555d9414f758cc0062eff700a0352ae177fd9be5 (diff)
downloadjenkins.debian.net-a5d56e3b5443263b53b0487c81125123411bd0cf.tar.xz
move cucumber things under cucumber/
Diffstat (limited to 'cucumber/features/support')
-rw-r--r--cucumber/features/support/config.rb100
-rw-r--r--cucumber/features/support/env.rb90
-rw-r--r--cucumber/features/support/extra_hooks.rb165
-rw-r--r--cucumber/features/support/helpers/chatbot_helper.rb59
-rw-r--r--cucumber/features/support/helpers/ctcp_helper.rb126
-rw-r--r--cucumber/features/support/helpers/display_helper.rb48
-rw-r--r--cucumber/features/support/helpers/exec_helper.rb79
-rw-r--r--cucumber/features/support/helpers/firewall_helper.rb121
-rw-r--r--cucumber/features/support/helpers/misc_helpers.rb253
-rw-r--r--cucumber/features/support/helpers/sikuli_helper.rb213
-rw-r--r--cucumber/features/support/helpers/sniffing_helper.rb43
-rw-r--r--cucumber/features/support/helpers/sshd_helper.rb67
-rw-r--r--cucumber/features/support/helpers/storage_helper.rb216
-rw-r--r--cucumber/features/support/helpers/vm_helper.rb676
-rw-r--r--cucumber/features/support/hooks.rb280
15 files changed, 2536 insertions, 0 deletions
diff --git a/cucumber/features/support/config.rb b/cucumber/features/support/config.rb
new file mode 100644
index 00000000..25c107b4
--- /dev/null
+++ b/cucumber/features/support/config.rb
@@ -0,0 +1,100 @@
+require 'fileutils'
+require 'yaml'
+require "features/support/helpers/misc_helpers.rb"
+
+# These files deal with options like some of the settings passed
+# to the `run_test_suite` script, and "secrets" like credentials
+# (passwords, SSH keys) to be used in tests.
+CONFIG_DIR = "/srv/jenkins/features/config"
+DEFAULTS_CONFIG_FILE = "#{CONFIG_DIR}/defaults.yml"
+LOCAL_CONFIG_FILE = "#{CONFIG_DIR}/local.yml"
+LOCAL_CONFIG_DIRS_FILES_GLOB = "#{CONFIG_DIR}/*.d/*.yml"
+
+# Dynamic
+$tails_iso = ENV['ISO'] || get_newest_iso
+$old_tails_iso = ENV['OLD_ISO'] || get_oldest_iso
+$tmp_dir = ENV['PWD']
+$vm_xml_path = ENV['VM_XML_PATH']
+$misc_files_dir = "features/misc_files"
+$keep_snapshots = !ENV['KEEP_SNAPSHOTS'].nil?
+$x_display = ENV['DISPLAY']
+$debug = !ENV['DEBUG'].nil?
+$pause_on_fail = !ENV['PAUSE_ON_FAIL'].nil?
+$time_at_start = Time.now
+$live_user = "user"
+$sikuli_retry_findfailed = !ENV['SIKULI_RETRY_FINDFAILED'].nil?
+
+assert File.exists?(DEFAULTS_CONFIG_FILE)
+$config = YAML.load(File.read(DEFAULTS_CONFIG_FILE))
+config_files = Dir.glob(LOCAL_CONFIG_DIRS_FILES_GLOB).sort
+config_files.insert(0, LOCAL_CONFIG_FILE) if File.exists?(LOCAL_CONFIG_FILE)
+config_files.each do |config_file|
+ yaml_struct = YAML.load(File.read(config_file)) || Hash.new
+ if not(yaml_struct.instance_of?(Hash))
+ raise "Local configuration file '#{config_file}' is malformed"
+ end
+ $config.merge!(yaml_struct)
+end
+# Options passed to the `run_test_suite` script will always take
+# precedence. The way we import these keys is only safe for values
+# with types boolean or string. If we need more, we'll have to invoke
+# YAML's type autodetection on ENV some how.
+$config.merge!(ENV)
+
+# Export TMPDIR back to the environment for subprocesses that we start
+# (e.g. guestfs). Note that this export will only make a difference if
+# TMPDIR wasn't already set and --tmpdir wasn't passed, i.e. only when
+# we use the default.
+ENV['TMPDIR'] = $config['TMPDIR']
+
+# Dynamic constants initialized through the environment or similar,
+# e.g. options we do not want to be configurable through the YAML
+# configuration files.
+DEBUG_LOG_PSEUDO_FIFO = "#{$config["TMPDIR"]}/debug_log_pseudo_fifo"
+DISPLAY = ENV['DISPLAY']
+GIT_DIR = ENV['PWD']
+KEEP_SNAPSHOTS = !ENV['KEEP_SNAPSHOTS'].nil?
+LIVE_USER = "live_user"
+TAILS_ISO = ENV['ISO']
+OLD_TAILS_ISO = ENV['OLD_ISO'] || TAILS_ISO
+TIME_AT_START = Time.now
+loop do
+ ARTIFACTS_DIR = $config['TMPDIR'] + "/results"
+ if not(File.exist?(ARTIFACTS_DIR))
+ FileUtils.mkdir_p(ARTIFACTS_DIR)
+ break
+ end
+end
+
+# Constants that are statically initialized.
+CONFIGURED_KEYSERVER_HOSTNAME = 'hkps.pool.sks-keyservers.net'
+LIBVIRT_DOMAIN_NAME = "DebianToaster"
+LIBVIRT_DOMAIN_UUID = "203552d5-819c-41f3-800e-2c8ef2545404"
+LIBVIRT_NETWORK_NAME = "DebianToasterNet"
+LIBVIRT_NETWORK_UUID = "f2305af3-2a64-4f16-afe6-b9dbf02a597e"
+MISC_FILES_DIR = "/srv/jenkins/features/misc_files"
+SERVICES_EXPECTED_ON_ALL_IFACES =
+ [
+ ["cupsd", "0.0.0.0", "631"],
+ ["dhclient", "0.0.0.0", "*"]
+ ]
+# OpenDNS
+SOME_DNS_SERVER = "208.67.222.222"
+TOR_AUTHORITIES =
+ # List grabbed from Tor's sources, src/or/config.c:~750.
+ [
+ "86.59.21.38",
+ "128.31.0.39",
+ "194.109.206.212",
+ "82.94.251.203",
+ "199.254.238.52",
+ "131.188.40.189",
+ "193.23.244.244",
+ "208.83.223.34",
+ "171.25.193.9",
+ "154.35.175.225",
+ ]
+VM_XML_PATH = "/srv/jenkins/features/domains"
+
+#TAILS_SIGNING_KEY = cmd_helper(". #{Dir.pwd}/config/amnesia; echo ${AMNESIA_DEV_KEYID}").tr(' ', '').chomp
+TAILS_DEBIAN_REPO_KEY = "221F9A3C6FA3E09E182E060BC7988EA7A358D82E"
diff --git a/cucumber/features/support/env.rb b/cucumber/features/support/env.rb
new file mode 100644
index 00000000..2e17ae76
--- /dev/null
+++ b/cucumber/features/support/env.rb
@@ -0,0 +1,90 @@
+require 'rubygems'
+require "features/support/extra_hooks.rb"
+require 'time'
+require 'rspec'
+
+# Force UTF-8. Ruby will default to the system locale, and if it is
+# non-UTF-8, String-methods will fail when operating on non-ASCII
+# strings.
+Encoding.default_external = Encoding::UTF_8
+Encoding.default_internal = Encoding::UTF_8
+
+def fatal_system(str)
+ unless system(str)
+ raise StandardError.new("Command exited with #{$?}")
+ end
+end
+
+def git_exists?
+ File.exists? '.git'
+end
+
+def create_git
+ Dir.mkdir 'config'
+ FileUtils.touch('config/base_branch')
+ Dir.mkdir('config/APT_overlays.d')
+ Dir.mkdir 'debian'
+ File.open('debian/changelog', 'w') do |changelog|
+ changelog.write(<<END_OF_CHANGELOG)
+tails (0) stable; urgency=low
+
+ * First release.
+
+ -- Tails developers <tails@boum.org> Mon, 30 Jan 2012 01:00:00 +0000
+END_OF_CHANGELOG
+ end
+
+ fatal_system "git init --quiet"
+ fatal_system "git config user.email 'tails@boum.org'"
+ fatal_system "git config user.name 'Tails developers'"
+ fatal_system "git add debian/changelog"
+ fatal_system "git commit --quiet debian/changelog -m 'First release'"
+ fatal_system "git branch -M stable"
+ fatal_system "git branch testing stable"
+ fatal_system "git branch devel stable"
+ fatal_system "git branch feature/jessie devel"
+end
+
+def current_branch
+ cmd = 'git rev-parse --symbolic-full-name --abbrev-ref HEAD'.split
+ branch = cmd_helper(cmd).strip
+ assert_not_equal("HEAD", branch, "We are in 'detached HEAD' state")
+ return branch
+end
+
+# In order: if git HEAD is tagged, return its name; if a branch is
+# checked out, return its name; otherwise we are in 'detached HEAD'
+# state, and we return the empty string.
+def describe_git_head
+ cmd_helper("git describe --tags --exact-match #{current_commit}".split).strip
+rescue Test::Unit::AssertionFailedError
+ begin
+ current_branch
+ rescue Test::Unit::AssertionFailedError
+ ""
+ end
+end
+
+def current_commit
+ cmd_helper('git rev-parse HEAD'.split).strip
+end
+
+def current_short_commit
+ current_commit[0, 7]
+end
+
+RSpec::Matchers.define :have_suite do |suite|
+ match do |string|
+ # e.g.: `deb http://deb.tails.boum.org/ 0.10 main contrib non-free`
+ %r{^deb +http://deb\.tails\.boum\.org/ +#{Regexp.escape(suite)} main}.match(string)
+ end
+ failure_message_for_should do |string|
+ "expected the sources to include #{suite}\nCurrent sources : #{string}"
+ end
+ failure_message_for_should_not do |string|
+ "expected the sources to exclude #{suite}\nCurrent sources : #{string}"
+ end
+ description do
+ "expected an output with #{suite}"
+ end
+end
diff --git a/cucumber/features/support/extra_hooks.rb b/cucumber/features/support/extra_hooks.rb
new file mode 100644
index 00000000..16196a55
--- /dev/null
+++ b/cucumber/features/support/extra_hooks.rb
@@ -0,0 +1,165 @@
+# Make the code below work with cucumber >= 2.0. Once we stop
+# supporting <2.0 we should probably do this differently, but this way
+# we can easily support both at the same time.
+begin
+ if not(Cucumber::Core::Ast::Feature.instance_methods.include?(:accept_hook?))
+ require 'gherkin/tag_expression'
+ class Cucumber::Core::Ast::Feature
+ # Code inspired by Cucumber::Core::Test::Case.match_tags?() in
+ # cucumber-ruby-core 1.1.3, lib/cucumber/core/test/case.rb:~59.
+ def accept_hook?(hook)
+ tag_expr = Gherkin::TagExpression.new(hook.tag_expressions.flatten)
+ tags = @tags.map do |t|
+ Gherkin::Formatter::Model::Tag.new(t.name, t.line)
+ end
+ tag_expr.evaluate(tags)
+ end
+ end
+ end
+rescue NameError => e
+ raise e if e.to_s != "uninitialized constant Cucumber::Core"
+end
+
+# Sort of inspired by Cucumber::RbSupport::RbHook (from cucumber
+# < 2.0) but really we just want an object with a 'tag_expressions'
+# attribute to make accept_hook?() (used below) happy.
+class SimpleHook
+ attr_reader :tag_expressions
+
+ def initialize(tag_expressions, proc)
+ @tag_expressions = tag_expressions
+ @proc = proc
+ end
+
+ def invoke(arg)
+ @proc.call(arg)
+ end
+end
+
+def BeforeFeature(*tag_expressions, &block)
+ $before_feature_hooks ||= []
+ $before_feature_hooks << SimpleHook.new(tag_expressions, block)
+end
+
+def AfterFeature(*tag_expressions, &block)
+ $after_feature_hooks ||= []
+ $after_feature_hooks << SimpleHook.new(tag_expressions, block)
+end
+
+require 'cucumber/formatter/console'
+if not($at_exit_print_artifacts_dir_patching_done)
+ module Cucumber::Formatter::Console
+ if method_defined?(:print_stats)
+ alias old_print_stats print_stats
+ end
+ def print_stats(*args)
+ if Dir.exists?(ARTIFACTS_DIR) and Dir.entries(ARTIFACTS_DIR).size > 2
+ @io.puts "Artifacts directory: #{ARTIFACTS_DIR}"
+ @io.puts
+ end
+ if self.class.method_defined?(:old_print_stats)
+ old_print_stats(*args)
+ end
+ end
+ end
+ $at_exit_print_artifacts_dir_patching_done = true
+end
+
+def info_log(message = "", options = {})
+ options[:color] = :clear
+ # This trick allows us to use a module's (~private) method on a
+ # one-off basis.
+ cucumber_console = Class.new.extend(Cucumber::Formatter::Console)
+ puts cucumber_console.format_string(message, options[:color])
+end
+
+def debug_log(message, options = {})
+ $debug_log_fns.each { |fn| fn.call(message, options) } if $debug_log_fns
+end
+
+require 'cucumber/formatter/pretty'
+# Backport part of commit af940a8 from the cucumber-ruby repo. This
+# fixes the "out hook output" for the Pretty formatter so stuff
+# written via `puts` after a Scenario has run its last step will be
+# written, instead of delayed to the next Feature/Scenario (if any) or
+# dropped completely (if not).
+# XXX: This can be removed once we stop supporting Debian Jessie
+# around when Debian Stretch is released.
+if Gem::Version.new(Cucumber::VERSION) < Gem::Version.new('2.0.0.beta.4')
+ module Cucumber
+ module Formatter
+ class Pretty
+ def after_feature_element(feature_element)
+ print_messages
+ @io.puts
+ @io.flush
+ end
+ end
+ end
+ end
+end
+
+module ExtraFormatters
+ # This is a null formatter in the sense that it doesn't ever output
+ # anything. We only use it do hook into the correct events so we can
+ # add our extra hooks.
+ class ExtraHooks
+ def initialize(*args)
+ # We do not care about any of the arguments.
+ end
+
+ def before_feature(feature)
+ if $before_feature_hooks
+ $before_feature_hooks.each do |hook|
+ hook.invoke(feature) if feature.accept_hook?(hook)
+ end
+ end
+ end
+
+ def after_feature(feature)
+ if $after_feature_hooks
+ $after_feature_hooks.reverse.each do |hook|
+ hook.invoke(feature) if feature.accept_hook?(hook)
+ end
+ end
+ end
+ end
+
+ # The pretty formatter with debug logging mixed into its output.
+ class PrettyDebug < Cucumber::Formatter::Pretty
+ def initialize(*args)
+ super(*args)
+ $debug_log_fns ||= []
+ $debug_log_fns << self.method(:debug_log)
+ end
+
+ def debug_log(message, options)
+ options[:color] ||= :blue
+ @io.puts(format_string(message, options[:color]))
+ @io.flush
+ end
+ end
+
+end
+
+module Cucumber
+ module Cli
+ class Options
+ BUILTIN_FORMATS['pretty_debug'] =
+ [
+ 'ExtraFormatters::PrettyDebug',
+ 'Prints the feature with debugging information - in colours.'
+ ]
+ BUILTIN_FORMATS['debug'] = BUILTIN_FORMATS['pretty_debug']
+ end
+ end
+end
+
+AfterConfiguration do |config|
+ # Cucumber may read this file multiple times, and hence run this
+ # AfterConfiguration hook multiple times. We only want our
+ # ExtraHooks formatter to be loaded once, otherwise the hooks would
+ # be run miltiple times.
+ extra_hooks = ['ExtraFormatters::ExtraHooks', '/dev/null']
+ config.formats << extra_hooks if not(config.formats.include?(extra_hooks))
+end
diff --git a/cucumber/features/support/helpers/chatbot_helper.rb b/cucumber/features/support/helpers/chatbot_helper.rb
new file mode 100644
index 00000000..23ce3e1a
--- /dev/null
+++ b/cucumber/features/support/helpers/chatbot_helper.rb
@@ -0,0 +1,59 @@
+require 'tempfile'
+
+class ChatBot
+
+ def initialize(account, password, otr_key, opts = Hash.new)
+ @account = account
+ @password = password
+ @otr_key = otr_key
+ @opts = opts
+ @pid = nil
+ @otr_key_file = nil
+ end
+
+ def start
+ @otr_key_file = Tempfile.new("otr_key.", $config["TMPDIR"])
+ @otr_key_file << @otr_key
+ @otr_key_file.close
+
+ cmd_helper(['/usr/bin/convertkey', @otr_key_file.path])
+ cmd_helper(["mv", "#{@otr_key_file.path}3", @otr_key_file.path])
+
+ cmd = [
+ "#{GIT_DIR}/features/scripts/otr-bot.py",
+ @account,
+ @password,
+ @otr_key_file.path
+ ]
+ cmd += ["--connect-server", @opts["connect_server"]] if @opts["connect_server"]
+ cmd += ["--auto-join"] + @opts["auto_join"] if @opts["auto_join"]
+ cmd += ["--log-file", DEBUG_LOG_PSEUDO_FIFO]
+
+ job = IO.popen(cmd)
+ @pid = job.pid
+ end
+
+ def stop
+ @otr_key_file.delete
+ begin
+ Process.kill("TERM", @pid)
+ rescue
+ # noop
+ end
+ end
+
+ def active?
+ begin
+ ret = Process.kill(0, @pid)
+ rescue Errno::ESRCH => e
+ if e.message == "No such process"
+ return false
+ else
+ raise e
+ end
+ end
+ assert_equal(1, ret, "This shouldn't happen")
+ return true
+ end
+
+end
diff --git a/cucumber/features/support/helpers/ctcp_helper.rb b/cucumber/features/support/helpers/ctcp_helper.rb
new file mode 100644
index 00000000..ee5180ab
--- /dev/null
+++ b/cucumber/features/support/helpers/ctcp_helper.rb
@@ -0,0 +1,126 @@
+require 'net/irc'
+require 'timeout'
+
+class CtcpChecker < Net::IRC::Client
+
+ CTCP_SPAM_DELAY = 5
+
+ # `spam_target`: the nickname of the IRC user to CTCP spam.
+ # `ctcp_cmds`: the Array of CTCP commands to send.
+ # `expected_ctcp_replies`: Hash where the keys are the exact set of replies
+ # we expect, and their values a regex the reply data must match.
+ def initialize(host, port, spam_target, ctcp_cmds, expected_ctcp_replies)
+ @spam_target = spam_target
+ @ctcp_cmds = ctcp_cmds
+ @expected_ctcp_replies = expected_ctcp_replies
+ nickname = self.class.random_irc_nickname
+ opts = {
+ :nick => nickname,
+ :user => nickname,
+ :real => nickname,
+ }
+ opts[:logger] = Logger.new(DEBUG_LOG_PSEUDO_FIFO)
+ super(host, port, opts)
+ end
+
+ # Makes sure that only the expected CTCP replies are received.
+ def verify_ctcp_responses
+ @sent_ctcp_cmds = Set.new
+ @received_ctcp_replies = Set.new
+
+ # Give 60 seconds for connecting to the server and other overhead
+ # beyond the expected time to spam all CTCP commands.
+ expected_ctcp_spam_time = @ctcp_cmds.length * CTCP_SPAM_DELAY
+ timeout = expected_ctcp_spam_time + 60
+
+ begin
+ Timeout::timeout(timeout) do
+ start
+ end
+ rescue Timeout::Error
+ # Do nothing as we'll check for errors below.
+ ensure
+ finish
+ end
+
+ ctcp_cmds_not_sent = @ctcp_cmds - @sent_ctcp_cmds.to_a
+ expected_ctcp_replies_not_received =
+ @expected_ctcp_replies.keys - @received_ctcp_replies.to_a
+
+ if !ctcp_cmds_not_sent.empty? || !expected_ctcp_replies_not_received.empty?
+ raise "Failed to spam all CTCP commands and receive the expected " +
+ "replies within #{timeout} seconds.\n" +
+ (ctcp_cmds_not_sent.empty? ? "" :
+ "CTCP commands not sent: #{ctcp_cmds_not_sent}\n") +
+ (expected_ctcp_replies_not_received.empty? ? "" :
+ "Expected CTCP replies not received: " +
+ expected_ctcp_replies_not_received.to_s)
+ end
+
+ end
+
+ # Generate a random IRC nickname, in this case an alpha-numeric
+ # string with length 10 to 15. To make it legal, the first character
+ # is forced to be alpha.
+ def self.random_irc_nickname
+ random_alpha_string(1) + random_alnum_string(9, 14)
+ end
+
+ def spam(spam_target)
+ post(NOTICE, spam_target, "Hi! I'm gonna test your CTCP capabilities now.")
+ @ctcp_cmds.each do |cmd|
+ sleep CTCP_SPAM_DELAY
+ full_cmd = cmd
+ case cmd
+ when "PING"
+ full_cmd += " #{Time.now.to_i}"
+ when "ACTION"
+ full_cmd += " barfs on the floor."
+ when "ERRMSG"
+ full_cmd += " Pidgin should not respond to this."
+ end
+ post(PRIVMSG, spam_target, ctcp_encode(full_cmd))
+ @sent_ctcp_cmds << cmd
+ end
+ end
+
+ def on_rpl_welcome(m)
+ super
+ Thread.new { spam(@spam_target) }
+ end
+
+ def on_message(m)
+ if m.command == ERR_NICKNAMEINUSE
+ finish
+ new_nick = self.class.random_irc_nickname
+ @opts.marshal_load({
+ :nick => new_nick,
+ :user => new_nick,
+ :real => new_nick,
+ })
+ start
+ return
+ end
+
+ if m.ctcp? and /^:#{Regexp.escape(@spam_target)}!/.match(m)
+ m.ctcps.each do |ctcp_reply|
+ reply_type, _, reply_data = ctcp_reply.partition(" ")
+ if @expected_ctcp_replies.has_key?(reply_type)
+ if @expected_ctcp_replies[reply_type].match(reply_data)
+ @received_ctcp_replies << reply_type
+ else
+ raise "Received expected CTCP reply '#{reply_type}' but with " +
+ "unexpected data '#{reply_data}' "
+ end
+ else
+ raise "Received unexpected CTCP reply '#{reply_type}' with " +
+ "data '#{reply_data}'"
+ end
+ end
+ end
+ if Set.new(@ctcp_cmds) == @sent_ctcp_cmds && \
+ Set.new(@expected_ctcp_replies.keys) == @received_ctcp_replies
+ finish
+ end
+ end
+end
diff --git a/cucumber/features/support/helpers/display_helper.rb b/cucumber/features/support/helpers/display_helper.rb
new file mode 100644
index 00000000..b4dce733
--- /dev/null
+++ b/cucumber/features/support/helpers/display_helper.rb
@@ -0,0 +1,48 @@
+
+class Display
+
+ def initialize(domain, x_display)
+ @domain = domain
+ @x_display = x_display
+ end
+
+ def active?
+ p = IO.popen(["xprop", "-display", @x_display,
+ "-name", "#{@domain} (1) - Virt Viewer",
+ :err => ["/dev/null", "w"]])
+ Process.wait(p.pid)
+ $?.success?
+ end
+
+ def start
+ @virtviewer = IO.popen(["virt-viewer", "--direct",
+ "--kiosk",
+ "--reconnect",
+ "--connect", "qemu:///system",
+ "--display", @x_display,
+ @domain,
+ :err => ["/dev/null", "w"]])
+ # We wait for the display to be active to not lose actions
+ # (e.g. key presses via sikuli) that come immediately after
+ # starting (or restoring) a vm
+ try_for(20, { :delay => 0.1, :msg => "virt-viewer failed to start"}) {
+ active?
+ }
+ end
+
+ def stop
+ return if @virtviewer.nil?
+ Process.kill("TERM", @virtviewer.pid)
+ @virtviewer.close
+ rescue IOError
+ # IO.pid throws this if the process wasn't started yet. Possibly
+ # there's a race when doing a start() and then quickly running
+ # stop().
+ end
+
+ def restart
+ stop
+ start
+ end
+
+end
diff --git a/cucumber/features/support/helpers/exec_helper.rb b/cucumber/features/support/helpers/exec_helper.rb
new file mode 100644
index 00000000..14e12269
--- /dev/null
+++ b/cucumber/features/support/helpers/exec_helper.rb
@@ -0,0 +1,79 @@
+require 'json'
+require 'socket'
+require 'io/wait'
+
+class VMCommand
+
+ attr_reader :cmd, :returncode, :stdout, :stderr
+
+ def initialize(vm, cmd, options = {})
+ @cmd = cmd
+ @returncode, @stdout, @stderr = VMCommand.execute(vm, cmd, options)
+ end
+
+ def VMCommand.wait_until_remote_shell_is_up(vm, timeout = 90)
+ try_for(timeout, :msg => "Remote shell seems to be down") do
+ sleep(20)
+ Timeout::timeout(10) do
+ VMCommand.execute(vm, "echo 'true'")
+ end
+ end
+ end
+
+ # The parameter `cmd` cannot contain newlines. Separate multiple
+ # commands using ";" instead.
+ # If `:spawn` is false the server will block until it has finished
+ # executing `cmd`. If it's true the server won't block, and the
+ # response will always be [0, "", ""] (only used as an
+ # ACK). execute() will always block until a response is received,
+ # though. Spawning is useful when starting processes in the
+ # background (or running scripts that does the same) like our
+ # onioncircuits wrapper, or any application we want to interact with.
+ def VMCommand.execute(vm, cmd, options = {})
+ options[:user] ||= "root"
+ options[:spawn] ||= false
+ type = options[:spawn] ? "spawn" : "call"
+ socket = TCPSocket.new("127.0.0.1", vm.get_remote_shell_port)
+ debug_log("#{type}ing as #{options[:user]}: #{cmd}")
+ begin
+ #socket.puts(JSON.dump([type, options[:user], cmd]))
+ socket.puts( "\n")
+ sleep(1)
+ socket.puts( "\003")
+ sleep(1)
+ socket.puts( cmd + "\n")
+ sleep(1)
+ while socket.ready?
+ s = socket.readline(sep = "\n").chomp("\n")
+ debug_log("#{type} read: #{s}") if not(options[:spawn])
+ if ('true' == s) then
+ break
+ end
+ end
+ ensure
+ socket.close
+ end
+ if ('true' == s)
+ return true
+ else
+ return VMCommand.execute(vm, cmd, options)
+ end
+ end
+
+ def success?
+ return @returncode == 0
+ end
+
+ def failure?
+ return not(success?)
+ end
+
+ def to_s
+ "Return status: #{@returncode}\n" +
+ "STDOUT:\n" +
+ @stdout +
+ "STDERR:\n" +
+ @stderr
+ end
+
+end
diff --git a/cucumber/features/support/helpers/firewall_helper.rb b/cucumber/features/support/helpers/firewall_helper.rb
new file mode 100644
index 00000000..fce363c5
--- /dev/null
+++ b/cucumber/features/support/helpers/firewall_helper.rb
@@ -0,0 +1,121 @@
+require 'packetfu'
+require 'ipaddr'
+
+# Extent IPAddr with a private/public address space checks
+class IPAddr
+ PrivateIPv4Ranges = [
+ IPAddr.new("10.0.0.0/8"),
+ IPAddr.new("172.16.0.0/12"),
+ IPAddr.new("192.168.0.0/16"),
+ IPAddr.new("255.255.255.255/32")
+ ]
+
+ PrivateIPv6Ranges = [
+ IPAddr.new("fc00::/7")
+ ]
+
+ def private?
+ private_ranges = self.ipv4? ? PrivateIPv4Ranges : PrivateIPv6Ranges
+ private_ranges.any? { |range| range.include?(self) }
+ end
+
+ def public?
+ !private?
+ end
+end
+
+class FirewallLeakCheck
+ attr_reader :ipv4_tcp_leaks, :ipv4_nontcp_leaks, :ipv6_leaks, :nonip_leaks, :mac_leaks
+
+ def initialize(pcap_file, options = {})
+ options[:accepted_hosts] ||= []
+ options[:ignore_lan] ||= true
+ @pcap_file = pcap_file
+ packets = PacketFu::PcapFile.new.file_to_array(:filename => @pcap_file)
+ mac_leaks = Set.new
+ ipv4_tcp_packets = []
+ ipv4_nontcp_packets = []
+ ipv6_packets = []
+ nonip_packets = []
+ packets.each do |p|
+ if PacketFu::EthPacket.can_parse?(p)
+ packet = PacketFu::EthPacket.parse(p)
+ mac_leaks << packet.eth_saddr
+ mac_leaks << packet.eth_daddr
+ end
+
+ if PacketFu::TCPPacket.can_parse?(p)
+ ipv4_tcp_packets << PacketFu::TCPPacket.parse(p)
+ elsif PacketFu::IPPacket.can_parse?(p)
+ ipv4_nontcp_packets << PacketFu::IPPacket.parse(p)
+ elsif PacketFu::IPv6Packet.can_parse?(p)
+ ipv6_packets << PacketFu::IPv6Packet.parse(p)
+ elsif PacketFu::Packet.can_parse?(p)
+ nonip_packets << PacketFu::Packet.parse(p)
+ else
+ save_pcap_file
+ raise "Found something in the pcap file that cannot be parsed"
+ end
+ end
+ ipv4_tcp_hosts = filter_hosts_from_ippackets(ipv4_tcp_packets,
+ options[:ignore_lan])
+ accepted = Set.new(options[:accepted_hosts])
+ @mac_leaks = mac_leaks
+ @ipv4_tcp_leaks = ipv4_tcp_hosts.select { |host| !accepted.member?(host) }
+ @ipv4_nontcp_leaks = filter_hosts_from_ippackets(ipv4_nontcp_packets,
+ options[:ignore_lan])
+ @ipv6_leaks = filter_hosts_from_ippackets(ipv6_packets,
+ options[:ignore_lan])
+ @nonip_leaks = nonip_packets
+ end
+
+ def save_pcap_file
+ save_failure_artifact("Network capture", @pcap_file)
+ end
+
+ # Returns a list of all unique destination IP addresses found in
+ # `packets`. Exclude LAN hosts if ignore_lan is set.
+ def filter_hosts_from_ippackets(packets, ignore_lan)
+ hosts = []
+ packets.each do |p|
+ candidate = nil
+ if p.kind_of?(PacketFu::IPPacket)
+ candidate = p.ip_daddr
+ elsif p.kind_of?(PacketFu::IPv6Packet)
+ candidate = p.ipv6_header.ipv6_daddr
+ else
+ save_pcap_file
+ raise "Expected an IP{v4,v6} packet, but got something else:\n" +
+ p.peek_format
+ end
+ if candidate != nil and (not(ignore_lan) or IPAddr.new(candidate).public?)
+ hosts << candidate
+ end
+ end
+ hosts.uniq
+ end
+
+ def assert_no_leaks
+ err = ""
+ if !@ipv4_tcp_leaks.empty?
+ err += "The following IPv4 TCP non-Tor Internet hosts were " +
+ "contacted:\n" + ipv4_tcp_leaks.join("\n")
+ end
+ if !@ipv4_nontcp_leaks.empty?
+ err += "The following IPv4 non-TCP Internet hosts were contacted:\n" +
+ ipv4_nontcp_leaks.join("\n")
+ end
+ if !@ipv6_leaks.empty?
+ err += "The following IPv6 Internet hosts were contacted:\n" +
+ ipv6_leaks.join("\n")
+ end
+ if !@nonip_leaks.empty?
+ err += "Some non-IP packets were sent\n"
+ end
+ if !err.empty?
+ save_pcap_file
+ raise err
+ end
+ end
+
+end
diff --git a/cucumber/features/support/helpers/misc_helpers.rb b/cucumber/features/support/helpers/misc_helpers.rb
new file mode 100644
index 00000000..7e09411f
--- /dev/null
+++ b/cucumber/features/support/helpers/misc_helpers.rb
@@ -0,0 +1,253 @@
+require 'date'
+require 'timeout'
+require 'test/unit'
+
+# Test::Unit adds an at_exit hook which, among other things, consumes
+# the command-line arguments that were intended for cucumber. If
+# e.g. `--format` was passed it will throw an error since it's not a
+# valid option for Test::Unit, and it throwing an error at this time
+# (at_exit) will make Cucumber think it failed and consequently exit
+# with an error. Fooling Test::Unit that this hook has already run
+# works around this craziness.
+Test::Unit.run = true
+
+# Make all the assert_* methods easily accessible in any context.
+include Test::Unit::Assertions
+
+def assert_vmcommand_success(p, msg = nil)
+ assert(p.success?, msg.nil? ? "Command failed: #{p.cmd}\n" + \
+ "error code: #{p.returncode}\n" \
+ "stderr: #{p.stderr}" : \
+ msg)
+end
+
+# It's forbidden to throw this exception (or subclasses) in anything
+# but try_for() below. Just don't use it anywhere else!
+class UniqueTryForTimeoutError < Exception
+end
+
+# Call block (ignoring any exceptions it may throw) repeatedly with
+# one second breaks until it returns true, or until `timeout` seconds have
+# passed when we throw a Timeout::Error exception.
+def try_for(timeout, options = {})
+ options[:delay] ||= 1
+ last_exception = nil
+ # Create a unique exception used only for this particular try_for
+ # call's Timeout to allow nested try_for:s. If we used the same one,
+ # the innermost try_for would catch all outer ones', creating a
+ # really strange situation.
+ unique_timeout_exception = Class.new(UniqueTryForTimeoutError)
+ Timeout::timeout(timeout, unique_timeout_exception) do
+ loop do
+ begin
+ return if yield
+ rescue NameError, UniqueTryForTimeoutError => e
+ # NameError most likely means typos, and hiding that is rarely
+ # (never?) a good idea, so we rethrow them. See below why we
+ # also rethrow *all* the unique exceptions.
+ raise e
+ rescue Exception => e
+ # All other exceptions are ignored while trying the
+ # block. Well we save the last exception so we can print it in
+ # case of a timeout.
+ last_exception = e
+ end
+ sleep options[:delay]
+ end
+ end
+ # At this point the block above either succeeded and we'll return,
+ # or we are throwing an exception. If the latter, we either have a
+ # NameError that we'll not catch (and will any try_for below us in
+ # the stack), or we have a unique exception. That can mean one of
+ # two things:
+ # 1. it's the one unique to this try_for, and in that case we'll
+ # catch it, rethrowing it as something that will be ignored by
+ # inside the blocks of all try_for:s below us in the stack.
+ # 2. it's an exception unique to another try_for. Assuming that we
+ # do not throw the unique exceptions in any other place or way
+ # than we do it in this function, this means that there is a
+ # try_for below us in the stack to which this exception must be
+ # unique to.
+ # Let 1 be the base step, and 2 the inductive step, and we sort of
+ # an inductive proof for the correctness of try_for when it's
+ # nested. It shows that for an infinite stack of try_for:s, any of
+ # the unique exceptions will be caught only by the try_for instance
+ # it is unique to, and all try_for:s in between will ignore it so it
+ # ends up there immediately.
+rescue unique_timeout_exception => e
+ msg = options[:msg] || 'try_for() timeout expired'
+ if last_exception
+ msg += "\nLast ignored exception was: " +
+ "#{last_exception.class}: #{last_exception}"
+ end
+ raise Timeout::Error.new(msg)
+end
+
+class TorFailure < StandardError
+end
+
+class MaxRetriesFailure < StandardError
+end
+
+# This will retry the block up to MAX_NEW_TOR_CIRCUIT_RETRIES
+# times. The block must raise an exception for a run to be considered
+# as a failure. After a failure recovery_proc will be called (if
+# given) and the intention with it is to bring us back to the state
+# expected by the block, so it can be retried.
+def retry_tor(recovery_proc = nil, &block)
+ tor_recovery_proc = Proc.new do
+ force_new_tor_circuit
+ recovery_proc.call if recovery_proc
+ end
+
+ retry_action($config['MAX_NEW_TOR_CIRCUIT_RETRIES'],
+ :recovery_proc => tor_recovery_proc,
+ :operation_name => 'Tor operation', &block)
+end
+
+def retry_i2p(recovery_proc = nil, &block)
+ retry_action(15, :recovery_proc => recovery_proc,
+ :operation_name => 'I2P operation', &block)
+end
+
+def retry_action(max_retries, options = {}, &block)
+ assert(max_retries.is_a?(Integer), "max_retries must be an integer")
+ options[:recovery_proc] ||= nil
+ options[:operation_name] ||= 'Operation'
+
+ retries = 1
+ loop do
+ begin
+ block.call
+ return
+ rescue Exception => e
+ if retries <= max_retries
+ debug_log("#{options[:operation_name]} failed (Try #{retries} of " +
+ "#{max_retries}) with:\n" +
+ "#{e.class}: #{e.message}")
+ options[:recovery_proc].call if options[:recovery_proc]
+ retries += 1
+ else
+ raise MaxRetriesFailure.new("#{options[:operation_name]} failed (despite retrying " +
+ "#{max_retries} times) with\n" +
+ "#{e.class}: #{e.message}")
+ end
+ end
+ end
+end
+
+def wait_until_tor_is_working
+ try_for(270) { $vm.execute('/usr/local/sbin/tor-has-bootstrapped').success? }
+rescue Timeout::Error => e
+ c = $vm.execute("journalctl SYSLOG_IDENTIFIER=restart-tor")
+ if c.success?
+ debug_log("From the journal:\n" + c.stdout.sub(/^/, " "))
+ else
+ debug_log("Nothing was in the journal about 'restart-tor'")
+ end
+ raise e
+end
+
+def convert_bytes_mod(unit)
+ case unit
+ when "bytes", "b" then mod = 1
+ when "KB" then mod = 10**3
+ when "k", "KiB" then mod = 2**10
+ when "MB" then mod = 10**6
+ when "M", "MiB" then mod = 2**20
+ when "GB" then mod = 10**9
+ when "G", "GiB" then mod = 2**30
+ when "TB" then mod = 10**12
+ when "T", "TiB" then mod = 2**40
+ else
+ raise "invalid memory unit '#{unit}'"
+ end
+ return mod
+end
+
+def convert_to_bytes(size, unit)
+ return (size*convert_bytes_mod(unit)).to_i
+end
+
+def convert_to_MiB(size, unit)
+ return (size*convert_bytes_mod(unit) / (2**20)).to_i
+end
+
+def convert_from_bytes(size, unit)
+ return size.to_f/convert_bytes_mod(unit).to_f
+end
+
+def cmd_helper(cmd)
+ if cmd.instance_of?(Array)
+ cmd << {:err => [:child, :out]}
+ elsif cmd.instance_of?(String)
+ cmd += " 2>&1"
+ end
+ IO.popen(cmd) do |p|
+ out = p.readlines.join("\n")
+ p.close
+ ret = $?
+ assert_equal(0, ret, "Command failed (returned #{ret}): #{cmd}:\n#{out}")
+ return out
+ end
+end
+
+# This command will grab all router IP addresses from the Tor
+# consensus in the VM + the hardcoded TOR_AUTHORITIES.
+def get_all_tor_nodes
+ cmd = 'awk "/^r/ { print \$6 }" /var/lib/tor/cached-microdesc-consensus'
+ $vm.execute(cmd).stdout.chomp.split("\n") + TOR_AUTHORITIES
+end
+
+def get_free_space(machine, path)
+ case machine
+ when 'host'
+ assert(File.exists?(path), "Path '#{path}' not found on #{machine}.")
+ free = cmd_helper(["df", path])
+ when 'guest'
+ assert($vm.file_exist?(path), "Path '#{path}' not found on #{machine}.")
+ free = $vm.execute_successfully("df '#{path}'")
+ else
+ raise 'Unsupported machine type #{machine} passed.'
+ end
+ output = free.split("\n").last
+ return output.match(/[^\s]\s+[0-9]+\s+[0-9]+\s+([0-9]+)\s+.*/)[1].chomp.to_i
+end
+
+def random_string_from_set(set, min_len, max_len)
+ len = (min_len..max_len).to_a.sample
+ len ||= min_len
+ (0..len-1).map { |n| set.sample }.join
+end
+
+def random_alpha_string(min_len, max_len = 0)
+ alpha_set = ('A'..'Z').to_a + ('a'..'z').to_a
+ random_string_from_set(alpha_set, min_len, max_len)
+end
+
+def random_alnum_string(min_len, max_len = 0)
+ alnum_set = ('A'..'Z').to_a + ('a'..'z').to_a + (0..9).to_a.map { |n| n.to_s }
+ random_string_from_set(alnum_set, min_len, max_len)
+end
+
+# Sanitize the filename from unix-hostile filename characters
+def sanitize_filename(filename, options = {})
+ options[:replacement] ||= '_'
+ bad_unix_filename_chars = Regexp.new("[^A-Za-z0-9_\\-.,+:]")
+ filename.gsub(bad_unix_filename_chars, options[:replacement])
+end
+
+def info_log_artifact_location(type, path)
+ if $config['ARTIFACTS_BASE_URI']
+ # Remove any trailing slashes, we'll add one ourselves
+ base_url = $config['ARTIFACTS_BASE_URI'].gsub(/\/*$/, "")
+ path = "#{base_url}/#{File.basename(path)}"
+ end
+ info_log("#{type.capitalize}: #{path}")
+end
+
+def pause(message = "Paused")
+ STDERR.puts
+ STDERR.puts "#{message} (Press ENTER to continue!)"
+ STDIN.gets
+end
diff --git a/cucumber/features/support/helpers/sikuli_helper.rb b/cucumber/features/support/helpers/sikuli_helper.rb
new file mode 100644
index 00000000..486b0e2e
--- /dev/null
+++ b/cucumber/features/support/helpers/sikuli_helper.rb
@@ -0,0 +1,213 @@
+require 'rjb'
+require 'rjbextension'
+$LOAD_PATH << ENV['SIKULI_HOME']
+require 'sikuli-script.jar'
+Rjb::load
+
+package_members = [
+ "java.io.FileOutputStream",
+ "java.io.PrintStream",
+ "java.lang.System",
+ "org.sikuli.script.Finder",
+ "org.sikuli.script.Key",
+ "org.sikuli.script.KeyModifier",
+ "org.sikuli.script.Location",
+ "org.sikuli.script.Match",
+ "org.sikuli.script.Pattern",
+ "org.sikuli.script.Region",
+ "org.sikuli.script.Screen",
+ "org.sikuli.script.Settings",
+ ]
+
+translations = Hash[
+ "org.sikuli.script", "Sikuli",
+ "java.lang", "Java::Lang",
+ "java.io", "Java::Io",
+ ]
+
+for p in package_members
+ imported_class = Rjb::import(p)
+ package, ignore, class_name = p.rpartition(".")
+ next if ! translations.include? package
+ mod_name = translations[package]
+ mod = mod_name.split("::").inject(Object) do |parent_obj, child_name|
+ if parent_obj.const_defined?(child_name, false)
+ parent_obj.const_get(child_name, false)
+ else
+ child_obj = Module.new
+ parent_obj.const_set(child_name, child_obj)
+ end
+ end
+ mod.const_set(class_name, imported_class)
+end
+
+# Bind Java's stdout to debug_log() via our magical pseudo fifo
+# logger.
+def bind_java_to_pseudo_fifo_logger
+ file_output_stream = Java::Io::FileOutputStream.new(DEBUG_LOG_PSEUDO_FIFO)
+ print_stream = Java::Io::PrintStream.new(file_output_stream)
+ Java::Lang::System.setOut(print_stream)
+end
+
+def findfailed_hook(pic)
+ pause("FindFailed for: '#{pic}'")
+end
+
+# Since rjb imports Java classes without creating a corresponding
+# Ruby class (it's just an instance of Rjb_JavaProxy) we can't
+# monkey patch any class, so additional methods must be added
+# to each Screen object.
+#
+# All Java classes' methods are immediately available in the proxied
+# Ruby classes, but care has to be given to match their type. For a
+# list of methods, see: <http://doc.sikuli.org/javadoc/index.html>.
+# The type "PRSML" is a union of Pattern, Region, Screen, Match and
+# Location.
+#
+# Also, due to limitations in Ruby's syntax we can't do:
+# def Sikuli::Screen.new
+# so we work around it with the following vairable.
+sikuli_script_proxy = Sikuli::Screen
+$_original_sikuli_screen_new ||= Sikuli::Screen.method :new
+
+# For waitAny()/findAny() we are forced to throw this exception since
+# Rjb::throw doesn't block until the Java exception has been received
+# by Ruby, so strange things can happen.
+class FindAnyFailed < StandardError
+end
+
+def sikuli_script_proxy.new(*args)
+ s = $_original_sikuli_screen_new.call(*args)
+
+ if $config["SIKULI_RETRY_FINDFAILED"]
+ # The usage of `_invoke()` below exemplifies how one can wrap
+ # around Java objects' methods when they're imported using RJB. It
+ # isn't pretty. The seconds argument is the parameter signature,
+ # which can be obtained by creating the intended Java object using
+ # RJB, and then calling its `java_methods` method.
+
+ def s.wait(pic, time)
+ self._invoke('wait', 'Ljava.lang.Object;D', pic, time)
+ rescue FindFailed => e
+ findfailed_hook(pic)
+ self._invoke('wait', 'Ljava.lang.Object;D', pic, time)
+ end
+
+ def s.find(pic)
+ self._invoke('find', 'Ljava.lang.Object;', pic)
+ rescue FindFailed => e
+ findfailed_hook(pic)
+ self._invoke('find', 'Ljava.lang.Object;', pic)
+ end
+
+ def s.waitVanish(pic, time)
+ self._invoke('waitVanish', 'Ljava.lang.Object;D', pic, time)
+ rescue FindFailed => e
+ findfailed_hook(pic)
+ self._invoke('waitVanish', 'Ljava.lang.Object;D', pic, time)
+ end
+
+ def s.click(pic)
+ self._invoke('click', 'Ljava.lang.Object;', pic)
+ rescue FindFailed => e
+ findfailed_hook(pic)
+ self._invoke('click', 'Ljava.lang.Object;', pic)
+ end
+ end
+
+ def s.click_point(x, y)
+ self.click(Sikuli::Location.new(x, y))
+ end
+
+ def s.doubleClick_point(x, y)
+ self.doubleClick(Sikuli::Location.new(x, y))
+ end
+
+ def s.click_mid_right_edge(pic)
+ r = self.find(pic)
+ top_right = r.getTopRight()
+ x = top_right.getX
+ y = top_right.getY + r.getH/2
+ self.click_point(x, y)
+ end
+
+ def s.wait_and_click(pic, time)
+ self.click(self.wait(pic, time))
+ end
+
+ def s.wait_and_double_click(pic, time)
+ self.doubleClick(self.wait(pic, time))
+ end
+
+ def s.wait_and_right_click(pic, time)
+ self.rightClick(self.wait(pic, time))
+ end
+
+ def s.wait_and_hover(pic, time)
+ self.hover(self.wait(pic, time))
+ end
+
+ def s.existsAny(images)
+ images.each do |image|
+ region = self.exists(image)
+ return [image, region] if region
+ end
+ return nil
+ end
+
+ def s.findAny(images)
+ images.each do |image|
+ begin
+ return [image, self.find(image)]
+ rescue FindFailed
+ # Ignore. We deal we'll throw an appropriate exception after
+ # having looped through all images and found none of them.
+ end
+ end
+ # If we've reached this point, none of the images could be found.
+ raise FindAnyFailed.new("can not find any of the images #{images} on the " +
+ "screen")
+ end
+
+ def s.waitAny(images, time)
+ Timeout::timeout(time) do
+ loop do
+ result = self.existsAny(images)
+ return result if result
+ end
+ end
+ rescue Timeout::Error
+ raise FindAnyFailed.new("can not find any of the images #{images} on the " +
+ "screen")
+ end
+
+ def s.hover_point(x, y)
+ self.hover(Sikuli::Location.new(x, y))
+ end
+
+ def s.hide_cursor
+ self.hover_point(self.w, self.h/2)
+ end
+
+ s
+end
+
+# Configure sikuli
+
+# ruby and rjb doesn't play well together when it comes to static
+# fields (and possibly methods) so we instantiate and access the field
+# via objects instead. It actually works inside this file, but when
+# it's required from "outside", and the file has been completely
+# required, ruby's require method complains that the method for the
+# field accessor is missing.
+sikuli_settings = Sikuli::Settings.new
+sikuli_settings.OcrDataPath = $config["TMPDIR"]
+# sikuli_ruby, which we used before, defaulted to 0.9 minimum
+# similarity, so all our current images are adapted to that value.
+# Also, Sikuli's default of 0.7 is simply too low (many false
+# positives).
+sikuli_settings.MinSimilarity = 0.9
+sikuli_settings.ActionLogs = true
+sikuli_settings.DebugLogs = false
+sikuli_settings.InfoLogs = true
+sikuli_settings.ProfileLogs = true
diff --git a/cucumber/features/support/helpers/sniffing_helper.rb b/cucumber/features/support/helpers/sniffing_helper.rb
new file mode 100644
index 00000000..213411eb
--- /dev/null
+++ b/cucumber/features/support/helpers/sniffing_helper.rb
@@ -0,0 +1,43 @@
+#
+# Sniffer is a very dumb wrapper to start and stop tcpdumps instances, possibly
+# with customized filters. Captured traffic is stored in files whose name
+# depends on the sniffer name. The resulting captured packets for each sniffers
+# can be accessed as an array through its `packets` method.
+#
+# Use of more rubyish internal ways to sniff a network like with pcap-able gems
+# is waaay to much resource consumming, notmuch reliable and soooo slow. Let's
+# not bother too much with that. :)
+#
+# Should put all that in a Module.
+
+class Sniffer
+
+ attr_reader :name, :pcap_file, :pid
+
+ def initialize(name, vmnet)
+ @name = name
+ @vmnet = vmnet
+ pcap_name = sanitize_filename("#{name}.pcap")
+ @pcap_file = "#{$config["TMPDIR"]}/#{pcap_name}"
+ end
+
+ def capture(filter="not ether src host #{@vmnet.bridge_mac} and not ether proto \\arp and not ether proto \\rarp")
+ job = IO.popen(["/usr/sbin/tcpdump", "-n", "-i", @vmnet.bridge_name, "-w",
+ @pcap_file, "-U", filter, :err => ["/dev/null", "w"]])
+ @pid = job.pid
+ end
+
+ def stop
+ begin
+ Process.kill("TERM", @pid)
+ rescue
+ # noop
+ end
+ end
+
+ def clear
+ if File.exist?(@pcap_file)
+ File.delete(@pcap_file)
+ end
+ end
+end
diff --git a/cucumber/features/support/helpers/sshd_helper.rb b/cucumber/features/support/helpers/sshd_helper.rb
new file mode 100644
index 00000000..2e0069c0
--- /dev/null
+++ b/cucumber/features/support/helpers/sshd_helper.rb
@@ -0,0 +1,67 @@
+require 'tempfile'
+
+class SSHServer
+ def initialize(sshd_host, sshd_port, authorized_keys = nil)
+ @sshd_host = sshd_host
+ @sshd_port = sshd_port
+ @authorized_keys = authorized_keys
+ @pid = nil
+ end
+
+ def start
+ @sshd_key_file = Tempfile.new("ssh_host_rsa_key", $config["TMPDIR"])
+ # 'hack' to prevent ssh-keygen from prompting to overwrite the file
+ File.delete(@sshd_key_file.path)
+ cmd_helper(['ssh-keygen', '-t', 'rsa', '-N', "", '-f', "#{@sshd_key_file.path}"])
+ @sshd_key_file.close
+
+ sshd_config =<<EOF
+Port #{@sshd_port}
+ListenAddress #{@sshd_host}
+UsePrivilegeSeparation no
+HostKey #{@sshd_key_file.path}
+Pidfile #{$config['TMPDIR']}/ssh.pid
+EOF
+
+ @sshd_config_file = Tempfile.new("sshd_config", $config["TMPDIR"])
+ @sshd_config_file.write(sshd_config)
+
+ if @authorized_keys
+ @authorized_keys_file = Tempfile.new("authorized_keys", $config['TMPDIR'])
+ @authorized_keys_file.write(@authorized_keys)
+ @authorized_keys_file.close
+ @sshd_config_file.write("AuthorizedKeysFile #{@authorized_keys_file.path}")
+ end
+
+ @sshd_config_file.close
+
+ cmd = ["/usr/sbin/sshd", "-4", "-f", @sshd_config_file.path, "-D"]
+
+ job = IO.popen(cmd)
+ @pid = job.pid
+ end
+
+ def stop
+ File.delete("#{@sshd_key_file.path}.pub")
+ File.delete("#{$config['TMPDIR']}/ssh.pid")
+ begin
+ Process.kill("TERM", @pid)
+ rescue
+ # noop
+ end
+ end
+
+ def active?
+ begin
+ ret = Process.kill(0, @pid)
+ rescue Errno::ESRCH => e
+ if e.message == "No such process"
+ return false
+ else
+ raise e
+ end
+ end
+ assert_equal(1, ret, "This shouldn't happen")
+ return true
+ end
+end
diff --git a/cucumber/features/support/helpers/storage_helper.rb b/cucumber/features/support/helpers/storage_helper.rb
new file mode 100644
index 00000000..21537a92
--- /dev/null
+++ b/cucumber/features/support/helpers/storage_helper.rb
@@ -0,0 +1,216 @@
+# Helper class for manipulating VM storage *volumes*, i.e. it deals
+# only with creation of images and keeps a name => volume path lookup
+# table (plugging drives or getting info of plugged devices is done in
+# the VM class). We'd like better coupling, but given the ridiculous
+# disconnect between Libvirt::StoragePool and Libvirt::Domain (hint:
+# they have nothing with each other to do whatsoever) it's what makes
+# sense.
+
+require 'libvirt'
+require 'guestfs'
+require 'rexml/document'
+require 'etc'
+
+class VMStorage
+
+ def initialize(virt, xml_path)
+ @virt = virt
+ @xml_path = xml_path
+ pool_xml = REXML::Document.new(File.read("#{@xml_path}/storage_pool.xml"))
+ pool_name = pool_xml.elements['pool/name'].text
+ @pool_path = "#{$config["TMPDIR"]}/#{pool_name}"
+ begin
+ @pool = @virt.lookup_storage_pool_by_name(pool_name)
+ rescue Libvirt::RetrieveError
+ @pool = nil
+ end
+ if @pool and not(KEEP_SNAPSHOTS)
+ VMStorage.clear_storage_pool(@pool)
+ @pool = nil
+ end
+ unless @pool
+ pool_xml.elements['pool/target/path'].text = @pool_path
+ @pool = @virt.define_storage_pool_xml(pool_xml.to_s)
+ if not(Dir.exists?(@pool_path))
+ # We'd like to use @pool.build, which will just create the
+ # @pool_path directory, but it does so with root:root as owner
+ # (at least with libvirt 1.2.21-2). libvirt itself can handle
+ # that situation, but guestfs (at least with <=
+ # 1:1.28.12-1+b3) cannot when invoked by a non-root user,
+ # which we want to support.
+ FileUtils.mkdir(@pool_path)
+ FileUtils.chown(nil, 'libvirt-qemu', @pool_path)
+ FileUtils.chmod("ug+wrx", @pool_path)
+ end
+ end
+ @pool.create unless @pool.active?
+ @pool.refresh
+ end
+
+ def VMStorage.clear_storage_pool_volumes(pool)
+ was_not_active = !pool.active?
+ if was_not_active
+ pool.create
+ end
+ pool.list_volumes.each do |vol_name|
+ vol = pool.lookup_volume_by_name(vol_name)
+ vol.delete
+ end
+ if was_not_active
+ pool.destroy
+ end
+ rescue
+ # Some of the above operations can fail if the pool's path was
+ # deleted by external means; let's ignore that.
+ end
+
+ def VMStorage.clear_storage_pool(pool)
+ VMStorage.clear_storage_pool_volumes(pool)
+ pool.destroy if pool.active?
+ pool.undefine
+ end
+
+ def clear_pool
+ VMStorage.clear_storage_pool(@pool)
+ end
+
+ def clear_volumes
+ VMStorage.clear_storage_pool_volumes(@pool)
+ end
+
+ def delete_volume(name)
+ @pool.lookup_volume_by_name(name).delete
+ end
+
+ def create_new_disk(name, options = {})
+ options[:size] ||= 2
+ options[:unit] ||= "GiB"
+ options[:type] ||= "qcow2"
+ # Require 'slightly' more space to be available to give a bit more leeway
+ # with rounding, temp file creation, etc.
+ reserved = 500
+ needed = convert_to_MiB(options[:size].to_i, options[:unit])
+ avail = convert_to_MiB(get_free_space('host', @pool_path), "KiB")
+ assert(avail - reserved >= needed,
+ "Error creating disk \"#{name}\" in \"#{@pool_path}\". " \
+ "Need #{needed} MiB but only #{avail} MiB is available of " \
+ "which #{reserved} MiB is reserved for other temporary files.")
+ begin
+ old_vol = @pool.lookup_volume_by_name(name)
+ rescue Libvirt::RetrieveError
+ # noop
+ else
+ old_vol.delete
+ end
+ uid = Etc::getpwnam("libvirt-qemu").uid
+ gid = Etc::getgrnam("libvirt-qemu").gid
+ vol_xml = REXML::Document.new(File.read("#{@xml_path}/volume.xml"))
+ vol_xml.elements['volume/name'].text = name
+ size_b = convert_to_bytes(options[:size].to_f, options[:unit])
+ vol_xml.elements['volume/capacity'].text = size_b.to_s
+ vol_xml.elements['volume/target/format'].attributes["type"] = options[:type]
+ vol_xml.elements['volume/target/path'].text = "#{@pool_path}/#{name}"
+ vol_xml.elements['volume/target/permissions/owner'].text = uid.to_s
+ vol_xml.elements['volume/target/permissions/group'].text = gid.to_s
+ vol = @pool.create_volume_xml(vol_xml.to_s)
+ @pool.refresh
+ end
+
+ def clone_to_new_disk(from, to)
+ begin
+ old_to_vol = @pool.lookup_volume_by_name(to)
+ rescue Libvirt::RetrieveError
+ # noop
+ else
+ old_to_vol.delete
+ end
+ from_vol = @pool.lookup_volume_by_name(from)
+ xml = REXML::Document.new(from_vol.xml_desc)
+ pool_path = REXML::Document.new(@pool.xml_desc).elements['pool/target/path'].text
+ xml.elements['volume/name'].text = to
+ xml.elements['volume/target/path'].text = "#{pool_path}/#{to}"
+ @pool.create_volume_xml_from(xml.to_s, from_vol)
+ end
+
+ def disk_format(name)
+ vol = @pool.lookup_volume_by_name(name)
+ vol_xml = REXML::Document.new(vol.xml_desc)
+ return vol_xml.elements['volume/target/format'].attributes["type"]
+ end
+
+ def disk_path(name)
+ @pool.lookup_volume_by_name(name).path
+ end
+
+ def disk_mklabel(name, parttype)
+ disk = {
+ :path => disk_path(name),
+ :opts => {
+ :format => disk_format(name)
+ }
+ }
+ guestfs_disk_helper(disk) do |g, disk_handle|
+ g.part_init(disk_handle, parttype)
+ end
+ end
+
+ def disk_mkpartfs(name, parttype, fstype, opts = {})
+ opts[:label] ||= nil
+ opts[:luks_password] ||= nil
+ disk = {
+ :path => disk_path(name),
+ :opts => {
+ :format => disk_format(name)
+ }
+ }
+ guestfs_disk_helper(disk) do |g, disk_handle|
+ g.part_disk(disk_handle, parttype)
+ g.part_set_name(disk_handle, 1, opts[:label]) if opts[:label]
+ primary_partition = g.list_partitions()[0]
+ if opts[:luks_password]
+ g.luks_format(primary_partition, opts[:luks_password], 0)
+ luks_mapping = File.basename(primary_partition) + "_unlocked"
+ g.luks_open(primary_partition, opts[:luks_password], luks_mapping)
+ luks_dev = "/dev/mapper/#{luks_mapping}"
+ g.mkfs(fstype, luks_dev)
+ g.luks_close(luks_dev)
+ else
+ g.mkfs(fstype, primary_partition)
+ end
+ end
+ end
+
+ def disk_mkswap(name, parttype)
+ disk = {
+ :path => disk_path(name),
+ :opts => {
+ :format => disk_format(name)
+ }
+ }
+ guestfs_disk_helper(disk) do |g, disk_handle|
+ g.part_disk(disk_handle, parttype)
+ primary_partition = g.list_partitions()[0]
+ g.mkswap(primary_partition)
+ end
+ end
+
+ def guestfs_disk_helper(*disks)
+ assert(block_given?)
+ g = Guestfs::Guestfs.new()
+ g.set_trace(1)
+ message_callback = Proc.new do |event, _, message, _|
+ debug_log("libguestfs: #{Guestfs.event_to_string(event)}: #{message}")
+ end
+ g.set_event_callback(message_callback,
+ Guestfs::EVENT_TRACE)
+ g.set_autosync(1)
+ disks.each do |disk|
+ g.add_drive_opts(disk[:path], disk[:opts])
+ end
+ g.launch()
+ yield(g, *g.list_devices())
+ ensure
+ g.close
+ end
+
+end
diff --git a/cucumber/features/support/helpers/vm_helper.rb b/cucumber/features/support/helpers/vm_helper.rb
new file mode 100644
index 00000000..6d7204d4
--- /dev/null
+++ b/cucumber/features/support/helpers/vm_helper.rb
@@ -0,0 +1,676 @@
+require 'libvirt'
+require 'rexml/document'
+
+class ExecutionFailedInVM < StandardError
+end
+
+class VMNet
+
+ attr_reader :net_name, :net
+
+ def initialize(virt, xml_path)
+ @virt = virt
+ @net_name = LIBVIRT_NETWORK_NAME
+ net_xml = File.read("#{xml_path}/default_net.xml")
+ rexml = REXML::Document.new(net_xml)
+ rexml.elements['network'].add_element('name')
+ rexml.elements['network/name'].text = @net_name
+ rexml.elements['network'].add_element('uuid')
+ rexml.elements['network/uuid'].text = LIBVIRT_NETWORK_UUID
+ update(rexml.to_s)
+ rescue Exception => e
+ destroy_and_undefine
+ raise e
+ end
+
+ # We lookup by name so we also catch networks from previous test
+ # suite runs that weren't properly cleaned up (e.g. aborted).
+ def destroy_and_undefine
+ begin
+ old_net = @virt.lookup_network_by_name(@net_name)
+ old_net.destroy if old_net.active?
+ old_net.undefine
+ rescue
+ end
+ end
+
+ def update(xml)
+ destroy_and_undefine
+ @net = @virt.define_network_xml(xml)
+ @net.create
+ end
+
+ def bridge_name
+ @net.bridge_name
+ end
+
+ def bridge_ip_addr
+ net_xml = REXML::Document.new(@net.xml_desc)
+ IPAddr.new(net_xml.elements['network/ip'].attributes['address']).to_s
+ end
+
+ def guest_real_mac
+ net_xml = REXML::Document.new(@net.xml_desc)
+ net_xml.elements['network/ip/dhcp/host/'].attributes['mac']
+ end
+
+ def bridge_mac
+ File.open("/sys/class/net/#{bridge_name}/address", "rb").read.chomp
+ end
+end
+
+
+class VM
+
+ attr_reader :domain, :display, :vmnet, :storage
+
+ def initialize(virt, xml_path, vmnet, storage, x_display)
+ @virt = virt
+ @xml_path = xml_path
+ @vmnet = vmnet
+ @storage = storage
+ @domain_name = LIBVIRT_DOMAIN_NAME
+ default_domain_xml = File.read("#{@xml_path}/default.xml")
+ rexml = REXML::Document.new(default_domain_xml)
+ rexml.elements['domain'].add_element('name')
+ rexml.elements['domain/name'].text = @domain_name
+ rexml.elements['domain'].add_element('uuid')
+ rexml.elements['domain/uuid'].text = LIBVIRT_DOMAIN_UUID
+ update(rexml.to_s)
+ @display = Display.new(@domain_name, x_display)
+ set_cdrom_boot(TAILS_ISO)
+ plug_network
+ rescue Exception => e
+ destroy_and_undefine
+ raise e
+ end
+
+ def update(xml)
+ destroy_and_undefine
+ @domain = @virt.define_domain_xml(xml)
+ end
+
+ # We lookup by name so we also catch domains from previous test
+ # suite runs that weren't properly cleaned up (e.g. aborted).
+ def destroy_and_undefine
+ @display.stop if @display && @display.active?
+ begin
+ old_domain = @virt.lookup_domain_by_name(@domain_name)
+ old_domain.destroy if old_domain.active?
+ old_domain.undefine
+ rescue
+ end
+ end
+
+ def real_mac
+ @vmnet.guest_real_mac
+ end
+
+ def set_hardware_clock(time)
+ assert(not(is_running?), 'The hardware clock cannot be set when the ' +
+ 'VM is running')
+ assert(time.instance_of?(Time), "Argument must be of type 'Time'")
+ adjustment = (time - Time.now).to_i
+ domain_rexml = REXML::Document.new(@domain.xml_desc)
+ clock_rexml_element = domain_rexml.elements['domain'].add_element('clock')
+ clock_rexml_element.add_attributes('offset' => 'variable',
+ 'basis' => 'utc',
+ 'adjustment' => adjustment.to_s)
+ update(domain_rexml.to_s)
+ end
+
+ def set_network_link_state(state)
+ domain_xml = REXML::Document.new(@domain.xml_desc)
+ domain_xml.elements['domain/devices/interface/link'].attributes['state'] = state
+ if is_running?
+ @domain.update_device(domain_xml.elements['domain/devices/interface'].to_s)
+ else
+ update(domain_xml.to_s)
+ end
+ end
+
+ def plug_network
+ set_network_link_state('up')
+ end
+
+ def unplug_network
+ set_network_link_state('down')
+ end
+
+ def set_boot_device(dev)
+ if is_running?
+ raise "boot settings can only be set for inactive vms"
+ end
+ domain_xml = REXML::Document.new(@domain.xml_desc)
+ domain_xml.elements['domain/os/boot'].attributes['dev'] = dev
+ update(domain_xml.to_s)
+ end
+
+ def set_cdrom_image(image)
+ image = nil if image == ''
+ domain_xml = REXML::Document.new(@domain.xml_desc)
+ domain_xml.elements.each('domain/devices/disk') do |e|
+ if e.attribute('device').to_s == "cdrom"
+ if image.nil?
+ e.elements.delete('source')
+ else
+ if ! e.elements['source']
+ e.add_element('source')
+ end
+ e.elements['source'].attributes['file'] = image
+ end
+ if is_running?
+ @domain.update_device(e.to_s)
+ else
+ update(domain_xml.to_s)
+ end
+ end
+ end
+ end
+
+ def remove_cdrom
+ set_cdrom_image(nil)
+ rescue Libvirt::Error => e
+ # While the CD-ROM is removed successfully we still get this
+ # error, so let's ignore it.
+ acceptable_error =
+ "Call to virDomainUpdateDeviceFlags failed: internal error: unable to " +
+ "execute QEMU command 'eject': (Tray of device '.*' is not open|" +
+ "Device '.*' is locked)"
+ raise e if not(Regexp.new(acceptable_error).match(e.to_s))
+ end
+
+ def set_cdrom_boot(image)
+ if is_running?
+ raise "boot settings can only be set for inactive vms"
+ end
+ set_boot_device('cdrom')
+ set_cdrom_image(image)
+ end
+
+ def list_disk_devs
+ ret = []
+ domain_xml = REXML::Document.new(@domain.xml_desc)
+ domain_xml.elements.each('domain/devices/disk') do |e|
+ ret << e.elements['target'].attribute('dev').to_s
+ end
+ return ret
+ end
+
+ def plug_drive(name, type)
+ if disk_plugged?(name)
+ raise "disk '#{name}' already plugged"
+ end
+ removable_usb = nil
+ case type
+ when "removable usb", "usb"
+ type = "usb"
+ removable_usb = "on"
+ when "non-removable usb"
+ type = "usb"
+ removable_usb = "off"
+ end
+ # Get the next free /dev/sdX on guest
+ letter = 'a'
+ dev = "sd" + letter
+ while list_disk_devs.include?(dev)
+ letter = (letter[0].ord + 1).chr
+ dev = "sd" + letter
+ end
+ assert letter <= 'z'
+
+ xml = REXML::Document.new(File.read("#{@xml_path}/disk.xml"))
+ xml.elements['disk/source'].attributes['file'] = @storage.disk_path(name)
+ xml.elements['disk/driver'].attributes['type'] = @storage.disk_format(name)
+ xml.elements['disk/target'].attributes['dev'] = dev
+ xml.elements['disk/target'].attributes['bus'] = type
+ xml.elements['disk/target'].attributes['removable'] = removable_usb if removable_usb
+
+ if is_running?
+ @domain.attach_device(xml.to_s)
+ else
+ domain_xml = REXML::Document.new(@domain.xml_desc)
+ domain_xml.elements['domain/devices'].add_element(xml)
+ update(domain_xml.to_s)
+ end
+ end
+
+ def disk_xml_desc(name)
+ domain_xml = REXML::Document.new(@domain.xml_desc)
+ domain_xml.elements.each('domain/devices/disk') do |e|
+ begin
+ if e.elements['source'].attribute('file').to_s == @storage.disk_path(name)
+ return e.to_s
+ end
+ rescue
+ next
+ end
+ end
+ return nil
+ end
+
+ def disk_rexml_desc(name)
+ xml = disk_xml_desc(name)
+ if xml
+ return REXML::Document.new(xml)
+ else
+ return nil
+ end
+ end
+
+ def unplug_drive(name)
+ xml = disk_xml_desc(name)
+ @domain.detach_device(xml)
+ end
+
+ def disk_type(dev)
+ domain_xml = REXML::Document.new(@domain.xml_desc)
+ domain_xml.elements.each('domain/devices/disk') do |e|
+ if e.elements['target'].attribute('dev').to_s == dev
+ return e.elements['driver'].attribute('type').to_s
+ end
+ end
+ raise "No such disk device '#{dev}'"
+ end
+
+ def disk_dev(name)
+ rexml = disk_rexml_desc(name) or return nil
+ return "/dev/" + rexml.elements['disk/target'].attribute('dev').to_s
+ end
+
+ def disk_name(dev)
+ dev = File.basename(dev)
+ domain_xml = REXML::Document.new(@domain.xml_desc)
+ domain_xml.elements.each('domain/devices/disk') do |e|
+ if /^#{e.elements['target'].attribute('dev').to_s}/.match(dev)
+ return File.basename(e.elements['source'].attribute('file').to_s)
+ end
+ end
+ raise "No such disk device '#{dev}'"
+ end
+
+ def udisks_disk_dev(name)
+ return disk_dev(name).gsub('/dev/', '/org/freedesktop/UDisks/devices/')
+ end
+
+ def disk_detected?(name)
+ dev = disk_dev(name) or return false
+ return execute("test -b #{dev}").success?
+ end
+
+ def disk_plugged?(name)
+ return not(disk_xml_desc(name).nil?)
+ end
+
+ def set_disk_boot(name, type)
+ if is_running?
+ raise "boot settings can only be set for inactive vms"
+ end
+ plug_drive(name, type) if not(disk_plugged?(name))
+ set_boot_device('hd')
+ # For some reason setting the boot device doesn't prevent cdrom
+ # boot unless it's empty
+ remove_cdrom
+ end
+
+ # XXX-9p: Shares don't work together with snapshot save+restore. See
+ # XXX-9p in common_steps.rb for more information.
+ def add_share(source, tag)
+ if is_running?
+ raise "shares can only be added to inactive vms"
+ end
+ # The complete source directory must be group readable by the user
+ # running the virtual machine, and world readable so the user inside
+ # the VM can access it (since we use the passthrough security model).
+ FileUtils.chown_R(nil, "libvirt-qemu", source)
+ FileUtils.chmod_R("go+rX", source)
+ xml = REXML::Document.new(File.read("#{@xml_path}/fs_share.xml"))
+ xml.elements['filesystem/source'].attributes['dir'] = source
+ xml.elements['filesystem/target'].attributes['dir'] = tag
+ domain_xml = REXML::Document.new(@domain.xml_desc)
+ domain_xml.elements['domain/devices'].add_element(xml)
+ update(domain_xml.to_s)
+ end
+
+ def list_shares
+ list = []
+ domain_xml = REXML::Document.new(@domain.xml_desc)
+ domain_xml.elements.each('domain/devices/filesystem') do |e|
+ list << e.elements['target'].attribute('dir').to_s
+ end
+ return list
+ end
+
+ def set_ram_size(size, unit = "KiB")
+ raise "System memory can only be added to inactive vms" if is_running?
+ domain_xml = REXML::Document.new(@domain.xml_desc)
+ domain_xml.elements['domain/memory'].text = size
+ domain_xml.elements['domain/memory'].attributes['unit'] = unit
+ domain_xml.elements['domain/currentMemory'].text = size
+ domain_xml.elements['domain/currentMemory'].attributes['unit'] = unit
+ update(domain_xml.to_s)
+ end
+
+ def get_ram_size_in_bytes
+ domain_xml = REXML::Document.new(@domain.xml_desc)
+ unit = domain_xml.elements['domain/memory'].attribute('unit').to_s
+ size = domain_xml.elements['domain/memory'].text.to_i
+ return convert_to_bytes(size, unit)
+ end
+
+ def set_arch(arch)
+ raise "System architecture can only be set to inactive vms" if is_running?
+ domain_xml = REXML::Document.new(@domain.xml_desc)
+ domain_xml.elements['domain/os/type'].attributes['arch'] = arch
+ update(domain_xml.to_s)
+ end
+
+ def add_hypervisor_feature(feature)
+ raise "Hypervisor features can only be added to inactive vms" if is_running?
+ domain_xml = REXML::Document.new(@domain.xml_desc)
+ domain_xml.elements['domain/features'].add_element(feature)
+ update(domain_xml.to_s)
+ end
+
+ def drop_hypervisor_feature(feature)
+ raise "Hypervisor features can only be fropped from inactive vms" if is_running?
+ domain_xml = REXML::Document.new(@domain.xml_desc)
+ domain_xml.elements['domain/features'].delete_element(feature)
+ update(domain_xml.to_s)
+ end
+
+ def disable_pae_workaround
+ # add_hypervisor_feature("nonpae") results in a libvirt error, and
+ # drop_hypervisor_feature("pae") alone won't disable pae. Hence we
+ # use this workaround.
+ xml = <<EOF
+ <qemu:commandline xmlns:qemu='http://libvirt.org/schemas/domain/qemu/1.0'>
+ <qemu:arg value='-cpu'/>
+ <qemu:arg value='qemu32,-pae'/>
+ </qemu:commandline>
+EOF
+ domain_xml = REXML::Document.new(@domain.xml_desc)
+ domain_xml.elements['domain'].add_element(REXML::Document.new(xml))
+ update(domain_xml.to_s)
+ end
+
+ def set_os_loader(type)
+ if is_running?
+ raise "boot settings can only be set for inactive vms"
+ end
+ if type == 'UEFI'
+ domain_xml = REXML::Document.new(@domain.xml_desc)
+ domain_xml.elements['domain/os'].add_element(REXML::Document.new(
+ '<loader>/usr/share/ovmf/OVMF.fd</loader>'
+ ))
+ update(domain_xml.to_s)
+ else
+ raise "unsupported OS loader type"
+ end
+ end
+
+ def is_running?
+ begin
+ return @domain.active?
+ rescue
+ return false
+ end
+ end
+
+ def execute(cmd, options = {})
+ options[:user] ||= "root"
+ options[:spawn] ||= false
+ if options[:libs]
+ libs = options[:libs]
+ options.delete(:libs)
+ libs = [libs] if not(libs.methods.include? :map)
+ cmds = libs.map do |lib_name|
+ ". /usr/local/lib/tails-shell-library/#{lib_name}.sh"
+ end
+ cmds << cmd
+ cmd = cmds.join(" && ")
+ end
+ return VMCommand.new(self, cmd, options)
+ end
+
+ def execute_successfully(*args)
+ p = execute(*args)
+ begin
+ assert_vmcommand_success(p)
+ rescue Test::Unit::AssertionFailedError => e
+ raise ExecutionFailedInVM.new(e)
+ end
+ return p
+ end
+
+ def spawn(cmd, options = {})
+ options[:spawn] = true
+ return execute(cmd, options)
+ end
+
+ def wait_until_remote_shell_is_up(timeout = 90)
+ VMCommand.wait_until_remote_shell_is_up(self, timeout)
+ end
+
+ def host_to_guest_time_sync
+ host_time= DateTime.now.strftime("%s").to_s
+ execute("date -s '@#{host_time}'").success?
+ end
+
+ def has_network?
+ return execute("/sbin/ifconfig eth0 | grep -q 'inet addr'").success?
+ end
+
+ def has_process?(process)
+ return execute("pidof -x -o '%PPID' " + process).success?
+ end
+
+ def pidof(process)
+ return execute("pidof -x -o '%PPID' " + process).stdout.chomp.split
+ end
+
+ def select_virtual_desktop(desktop_number, user = LIVE_USER)
+ assert(desktop_number >= 0 && desktop_number <=3,
+ "Only values between 0 and 3 are valid virtual desktop numbers")
+ execute_successfully(
+ "xdotool set_desktop '#{desktop_number}'",
+ :user => user
+ )
+ end
+
+ def focus_window(window_title, user = LIVE_USER)
+ def do_focus(window_title, user)
+ execute_successfully(
+ "xdotool search --name '#{window_title}' windowactivate --sync",
+ :user => user
+ )
+ end
+
+ begin
+ do_focus(window_title, user)
+ rescue ExecutionFailedInVM
+ # Often when xdotool fails to focus a window it'll work when retried
+ # after redrawing the screen. Switching to a new virtual desktop then
+ # back seems to be a reliable way to handle this.
+ select_virtual_desktop(3)
+ select_virtual_desktop(0)
+ sleep 5 # there aren't any visual indicators which can be used here
+ do_focus(window_title, user)
+ end
+ end
+
+ def file_exist?(file)
+ execute("test -e '#{file}'").success?
+ end
+
+ def directory_exist?(directory)
+ execute("test -d '#{directory}'").success?
+ end
+
+ def file_content(file, user = 'root')
+ # We don't quote #{file} on purpose: we sometimes pass environment variables
+ # or globs that we want to be interpreted by the shell.
+ cmd = execute("cat #{file}", :user => user)
+ assert(cmd.success?,
+ "Could not cat '#{file}':\n#{cmd.stdout}\n#{cmd.stderr}")
+ return cmd.stdout
+ end
+
+ def file_append(file, lines, user = 'root')
+ lines = lines.split("\n") if lines.class == String
+ lines.each do |line|
+ cmd = execute("echo '#{line}' >> '#{file}'", :user => user)
+ assert(cmd.success?,
+ "Could not append to '#{file}':\n#{cmd.stdout}\n#{cmd.stderr}")
+ end
+ end
+
+ def set_clipboard(text)
+ execute_successfully("echo -n '#{text}' | xsel --input --clipboard",
+ :user => LIVE_USER)
+ end
+
+ def get_clipboard
+ execute_successfully("xsel --output --clipboard", :user => LIVE_USER).stdout
+ end
+
+ def internal_snapshot_xml(name)
+ disk_devs = list_disk_devs
+ disks_xml = " <disks>\n"
+ for dev in disk_devs
+ snapshot_type = disk_type(dev) == "qcow2" ? 'internal' : 'no'
+ disks_xml +=
+ " <disk name='#{dev}' snapshot='#{snapshot_type}'></disk>\n"
+ end
+ disks_xml += " </disks>"
+ return <<-EOF
+<domainsnapshot>
+ <name>#{name}</name>
+ <description>Snapshot for #{name}</description>
+#{disks_xml}
+ </domainsnapshot>
+EOF
+ end
+
+ def VM.ram_only_snapshot_path(name)
+ return "#{$config["TMPDIR"]}/#{name}-snapshot.memstate"
+ end
+
+ def save_snapshot(name)
+ # If we have no qcow2 disk device, we'll use "memory state"
+ # snapshots, and if we have at least one qcow2 disk device, we'll
+ # use internal "system checkpoint" (memory + disks) snapshots. We
+ # have to do this since internal snapshots don't work when no
+ # such disk is available. We can do this with external snapshots,
+ # which are better in many ways, but libvirt doesn't know how to
+ # restore (revert back to) them yet.
+ # WARNING: If only transient disks, i.e. disks that were plugged
+ # after starting the domain, are used then the memory state will
+ # be dropped. External snapshots would also fix this.
+ internal_snapshot = false
+ domain_xml = REXML::Document.new(@domain.xml_desc)
+ domain_xml.elements.each('domain/devices/disk') do |e|
+ if e.elements['driver'].attribute('type').to_s == "qcow2"
+ internal_snapshot = true
+ break
+ end
+ end
+
+ # Note: In this case the "opposite" of `internal_snapshot` is not
+ # anything relating to external snapshots, but actually "memory
+ # state"(-only) snapshots.
+ if internal_snapshot
+ xml = internal_snapshot_xml(name)
+ @domain.snapshot_create_xml(xml)
+ else
+ snapshot_path = VM.ram_only_snapshot_path(name)
+ @domain.save(snapshot_path)
+ # For consistency with the internal snapshot case (which is
+ # "live", so the domain doesn't go down) we immediately restore
+ # the snapshot.
+ # Assumption: that *immediate* save + restore doesn't mess up
+ # with network state and similar, and is fast enough to not make
+ # the clock drift too much.
+ restore_snapshot(name)
+ end
+ end
+
+ def restore_snapshot(name)
+ @domain.destroy if is_running?
+ @display.stop if @display and @display.active?
+ # See comment in save_snapshot() for details on why we use two
+ # different type of snapshots.
+ potential_ram_only_snapshot_path = VM.ram_only_snapshot_path(name)
+ if File.exist?(potential_ram_only_snapshot_path)
+ Libvirt::Domain::restore(@virt, potential_ram_only_snapshot_path)
+ @domain = @virt.lookup_domain_by_name(@domain_name)
+ else
+ begin
+ potential_internal_snapshot = @domain.lookup_snapshot_by_name(name)
+ @domain.revert_to_snapshot(potential_internal_snapshot)
+ rescue Libvirt::RetrieveError
+ raise "No such (internal nor external) snapshot #{name}"
+ end
+ end
+ @display.start
+ end
+
+ def VM.remove_snapshot(name)
+ old_domain = $virt.lookup_domain_by_name(LIBVIRT_DOMAIN_NAME)
+ potential_ram_only_snapshot_path = VM.ram_only_snapshot_path(name)
+ if File.exist?(potential_ram_only_snapshot_path)
+ File.delete(potential_ram_only_snapshot_path)
+ else
+ snapshot = old_domain.lookup_snapshot_by_name(name)
+ snapshot.delete
+ end
+ end
+
+ def VM.snapshot_exists?(name)
+ return true if File.exist?(VM.ram_only_snapshot_path(name))
+ old_domain = $virt.lookup_domain_by_name(LIBVIRT_DOMAIN_NAME)
+ snapshot = old_domain.lookup_snapshot_by_name(name)
+ return snapshot != nil
+ rescue Libvirt::RetrieveError
+ return false
+ end
+
+ def VM.remove_all_snapshots
+ Dir.glob("#{$config["TMPDIR"]}/*-snapshot.memstate").each do |file|
+ File.delete(file)
+ end
+ old_domain = $virt.lookup_domain_by_name(LIBVIRT_DOMAIN_NAME)
+ old_domain.list_all_snapshots.each { |snapshot| snapshot.delete }
+ rescue Libvirt::RetrieveError
+ # No such domain, so no snapshots either.
+ end
+
+ def start
+ return if is_running?
+ @domain.create
+ @display.start
+ end
+
+ def reset
+ @domain.reset if is_running?
+ end
+
+ def power_off
+ @domain.destroy if is_running?
+ @display.stop
+ end
+
+ def take_screenshot(description)
+ @display.take_screenshot(description)
+ end
+
+ def get_remote_shell_port
+ domain_xml = REXML::Document.new(@domain.xml_desc)
+ domain_xml.elements.each('domain/devices/serial') do |e|
+ if e.attribute('type').to_s == "tcp"
+ return e.elements['source'].attribute('service').to_s.to_i
+ end
+ end
+ end
+
+end
diff --git a/cucumber/features/support/hooks.rb b/cucumber/features/support/hooks.rb
new file mode 100644
index 00000000..b3bdecef
--- /dev/null
+++ b/cucumber/features/support/hooks.rb
@@ -0,0 +1,280 @@
+require 'fileutils'
+require 'rb-inotify'
+require 'time'
+require 'tmpdir'
+
+# Run once, before any feature
+AfterConfiguration do |config|
+ # Reorder the execution of some features. As we progress through a
+ # run we accumulate more and more snapshots and hence use more and
+ # more disk space, but some features will leave nothing behind
+ # and/or possibly use large amounts of disk space temporarily for
+ # various reasons. By running these first we minimize the amount of
+ # disk space needed.
+ prioritized_features = [
+ # Features not using snapshots but using large amounts of scratch
+ # space for other reasons:
+ 'features/erase_memory.feature',
+ 'features/untrusted_partitions.feature',
+ # Features using temporary snapshots:
+ 'features/apt.feature',
+ 'features/i2p.feature',
+ 'features/root_access_control.feature',
+ 'features/time_syncing.feature',
+ 'features/tor_bridges.feature',
+ # This feature needs the almost biggest snapshot (USB install,
+ # excluding persistence) and will create yet another disk and
+ # install Tails on it. This should be the peak of disk usage.
+ 'features/usb_install.feature',
+ ]
+ feature_files = config.feature_files
+ # The &-intersection is specified to keep the element ordering of
+ # the *left* operand.
+ intersection = prioritized_features & feature_files
+ if not intersection.empty?
+ feature_files -= intersection
+ feature_files = intersection + feature_files
+ config.define_singleton_method(:feature_files) { feature_files }
+ end
+
+ # Used to keep track of when we start our first @product feature, when
+ # we'll do some special things.
+ $started_first_product_feature = false
+
+ if File.exist?($config["TMPDIR"])
+ if !File.directory?($config["TMPDIR"])
+ raise "Temporary directory '#{$config["TMPDIR"]}' exists but is not a " +
+ "directory"
+ end
+ if !File.owned?($config["TMPDIR"])
+ raise "Temporary directory '#{$config["TMPDIR"]}' must be owned by the " +
+ "current user"
+ end
+ FileUtils.chmod(0755, $config["TMPDIR"])
+ else
+ begin
+ FileUtils.mkdir_p($config["TMPDIR"])
+ rescue Errno::EACCES => e
+ raise "Cannot create temporary directory: #{e.to_s}"
+ end
+ end
+
+ # Start a thread that monitors a pseudo fifo file and debug_log():s
+ # anything written to it "immediately" (well, as fast as inotify
+ # detects it). We're forced to a convoluted solution like this
+ # because CRuby's thread support is horribly as soon as IO is mixed
+ # in (other threads get blocked).
+ FileUtils.rm(DEBUG_LOG_PSEUDO_FIFO) if File.exist?(DEBUG_LOG_PSEUDO_FIFO)
+ FileUtils.touch(DEBUG_LOG_PSEUDO_FIFO)
+ at_exit do
+ FileUtils.rm(DEBUG_LOG_PSEUDO_FIFO) if File.exist?(DEBUG_LOG_PSEUDO_FIFO)
+ end
+ Thread.new do
+ File.open(DEBUG_LOG_PSEUDO_FIFO) do |fd|
+ watcher = INotify::Notifier.new
+ watcher.watch(DEBUG_LOG_PSEUDO_FIFO, :modify) do
+ line = fd.read.chomp
+ debug_log(line) if line and line.length > 0
+ end
+ watcher.run
+ end
+ end
+ # Fix Sikuli's debug_log():ing.
+ bind_java_to_pseudo_fifo_logger
+end
+
+# Common
+########
+
+After do
+ if @after_scenario_hooks
+ @after_scenario_hooks.each { |block| block.call }
+ end
+ @after_scenario_hooks = Array.new
+end
+
+BeforeFeature('@product', '@source') do |feature|
+ raise "Feature #{feature.file} is tagged both @product and @source, " +
+ "which is an impossible combination"
+end
+
+at_exit do
+ $vm.destroy_and_undefine if $vm
+ if $virt
+ unless KEEP_SNAPSHOTS
+ VM.remove_all_snapshots
+ $vmstorage.clear_pool
+ end
+ $vmnet.destroy_and_undefine
+ $virt.close
+ end
+ # The artifacts directory is empty (and useless) if it contains
+ # nothing but the mandatory . and ..
+ if Dir.entries(ARTIFACTS_DIR).size <= 2
+ FileUtils.rmdir(ARTIFACTS_DIR)
+ end
+end
+
+# For @product tests
+####################
+
+def add_after_scenario_hook(&block)
+ @after_scenario_hooks ||= Array.new
+ @after_scenario_hooks << block
+end
+
+def save_failure_artifact(type, path)
+ $failure_artifacts << [type, path]
+end
+
+BeforeFeature('@product') do |feature|
+ if TAILS_ISO.nil?
+ raise "No ISO image specified, and none could be found in the " +
+ "current directory"
+ end
+ if File.exist?(TAILS_ISO)
+ # Workaround: when libvirt takes ownership of the ISO image it may
+ # become unreadable for the live user inside the guest in the
+ # host-to-guest share used for some tests.
+
+ if !File.world_readable?(TAILS_ISO)
+ if File.owned?(TAILS_ISO)
+ File.chmod(0644, TAILS_ISO)
+ else
+ raise "warning: the Tails ISO image must be world readable or be " +
+ "owned by the current user to be available inside the guest " +
+ "VM via host-to-guest shares, which is required by some tests"
+ end
+ end
+ else
+ raise "The specified Tails ISO image '#{TAILS_ISO}' does not exist"
+ end
+ if !File.exist?(OLD_TAILS_ISO)
+ raise "The specified old Tails ISO image '#{OLD_TAILS_ISO}' does not exist"
+ end
+ if not($started_first_product_feature)
+ $virt = Libvirt::open("qemu:///system")
+ VM.remove_all_snapshots if !KEEP_SNAPSHOTS
+ $vmnet = VMNet.new($virt, VM_XML_PATH)
+ $vmstorage = VMStorage.new($virt, VM_XML_PATH)
+ $started_first_product_feature = true
+ end
+end
+
+AfterFeature('@product') do
+ unless KEEP_SNAPSHOTS
+ checkpoints.each do |name, vals|
+ if vals[:temporary] and VM.snapshot_exists?(name)
+ VM.remove_snapshot(name)
+ end
+ end
+ end
+end
+
+# Cucumber Before hooks are executed in the order they are listed, and
+# we want this hook to always run first, so it must always be the
+# *first* Before hook matching @product listed in this file.
+Before('@product') do |scenario|
+ $failure_artifacts = Array.new
+ if $config["CAPTURE"]
+ video_name = sanitize_filename("#{scenario.name}.mkv")
+ @video_path = "#{ARTIFACTS_DIR}/#{video_name}"
+ capture = IO.popen(['avconv',
+ '-f', 'x11grab',
+ '-s', '1024x768',
+ '-r', '15',
+ '-i', "#{$config['DISPLAY']}.0",
+ '-an',
+ '-c:v', 'libx264',
+ '-y',
+ @video_path,
+ :err => ['/dev/null', 'w'],
+ ])
+ @video_capture_pid = capture.pid
+ end
+ @screen = Sikuli::Screen.new
+ # English will be assumed if this is not overridden
+ @language = ""
+ @os_loader = "MBR"
+ @sudo_password = "asdf"
+ @persistence_password = "asdf"
+end
+
+# Cucumber After hooks are executed in the *reverse* order they are
+# listed, and we want this hook to always run second last, so it must always
+# be the *second* After hook matching @product listed in this file --
+# hooks added dynamically via add_after_scenario_hook() are supposed to
+# truly be last.
+After('@product') do |scenario|
+ if @video_capture_pid
+ # We can be incredibly fast at detecting errors sometimes, so the
+ # screen barely "settles" when we end up here and kill the video
+ # capture. Let's wait a few seconds more to make it easier to see
+ # what the error was.
+ sleep 3 if scenario.failed?
+ Process.kill("INT", @video_capture_pid)
+ save_failure_artifact("Video", @video_path)
+ end
+ if scenario.failed?
+ time_of_fail = Time.now - TIME_AT_START
+ secs = "%02d" % (time_of_fail % 60)
+ mins = "%02d" % ((time_of_fail / 60) % 60)
+ hrs = "%02d" % (time_of_fail / (60*60))
+ elapsed = "#{hrs}:#{mins}:#{secs}"
+ info_log("Scenario failed at time #{elapsed}")
+ screen_capture = @screen.capture
+ save_failure_artifact("Screenshot", screen_capture.getFilename)
+ $failure_artifacts.sort!
+ $failure_artifacts.each do |type, file|
+ artifact_name = sanitize_filename("#{elapsed}_#{scenario.name}#{File.extname(file)}")
+ artifact_path = "#{ARTIFACTS_DIR}/#{artifact_name}"
+ assert(File.exist?(file))
+ FileUtils.mv(file, artifact_path)
+ info_log
+ info_log_artifact_location(type, artifact_path)
+ end
+ pause("Scenario failed") if $config["PAUSE_ON_FAIL"]
+ else
+ if @video_path && File.exist?(@video_path) && not($config['CAPTURE_ALL'])
+ FileUtils.rm(@video_path)
+ end
+ end
+end
+
+Before('@product', '@check_tor_leaks') do |scenario|
+ @tor_leaks_sniffer = Sniffer.new(sanitize_filename(scenario.name), $vmnet)
+ @tor_leaks_sniffer.capture
+ add_after_scenario_hook do
+ @tor_leaks_sniffer.clear
+ end
+end
+
+After('@product', '@check_tor_leaks') do |scenario|
+ @tor_leaks_sniffer.stop
+ if scenario.passed?
+ if @bridge_hosts.nil?
+ expected_tor_nodes = get_all_tor_nodes
+ else
+ expected_tor_nodes = @bridge_hosts
+ end
+ leaks = FirewallLeakCheck.new(@tor_leaks_sniffer.pcap_file,
+ :accepted_hosts => expected_tor_nodes)
+ leaks.assert_no_leaks
+ end
+end
+
+# For @source tests
+###################
+
+# BeforeScenario
+Before('@source') do
+ @orig_pwd = Dir.pwd
+ @git_clone = Dir.mktmpdir 'tails-apt-tests'
+ Dir.chdir @git_clone
+end
+
+# AfterScenario
+After('@source') do
+ Dir.chdir @orig_pwd
+ FileUtils.remove_entry_secure @git_clone
+end