summaryrefslogtreecommitdiffstats
path: root/cucumber/features/support
diff options
context:
space:
mode:
Diffstat (limited to 'cucumber/features/support')
-rw-r--r--cucumber/features/support/config.rb85
-rw-r--r--cucumber/features/support/env.rb126
-rw-r--r--cucumber/features/support/extra_hooks.rb187
-rw-r--r--cucumber/features/support/helpers/chatbot_helper.rb59
-rw-r--r--cucumber/features/support/helpers/ctcp_helper.rb126
-rw-r--r--cucumber/features/support/helpers/display_helper.rb48
-rw-r--r--cucumber/features/support/helpers/dogtail.rb233
-rw-r--r--cucumber/features/support/helpers/firewall_helper.rb94
-rw-r--r--cucumber/features/support/helpers/misc_helpers.rb344
-rw-r--r--cucumber/features/support/helpers/remote_shell.rb171
-rw-r--r--cucumber/features/support/helpers/sikuli_helper.rb238
-rw-r--r--cucumber/features/support/helpers/sniffing_helper.rb53
-rw-r--r--cucumber/features/support/helpers/sshd_helper.rb67
-rw-r--r--cucumber/features/support/helpers/storage_helper.rb210
-rw-r--r--cucumber/features/support/helpers/vm_helper.rb703
-rw-r--r--cucumber/features/support/hooks.rb313
16 files changed, 0 insertions, 3057 deletions
diff --git a/cucumber/features/support/config.rb b/cucumber/features/support/config.rb
deleted file mode 100644
index 54a0f1cd..00000000
--- a/cucumber/features/support/config.rb
+++ /dev/null
@@ -1,85 +0,0 @@
-require 'fileutils'
-require 'yaml'
-require "cucumber/features/support/helpers/misc_helpers.rb"
-
-# These files deal with options like some of the settings passed
-# to the `run_test_suite` script, and "secrets" like credentials
-# (passwords, SSH keys) to be used in tests.
-CONFIG_DIR = "/srv/jenkins/cucumber/features/config"
-DEFAULTS_CONFIG_FILE = "#{CONFIG_DIR}/defaults.yml"
-LOCAL_CONFIG_FILE = "#{CONFIG_DIR}/local.yml"
-LOCAL_CONFIG_DIRS_FILES_GLOB = "#{CONFIG_DIR}/*.d/*.yml"
-
-# Dynamic
-$tails_iso = ENV['ISO'] || get_newest_iso
-$old_tails_iso = ENV['OLD_ISO'] || get_oldest_iso
-$tmp_dir = ENV['PWD']
-$vm_xml_path = ENV['VM_XML_PATH']
-$misc_files_dir = "cucumber/features/misc_files"
-$keep_snapshots = !ENV['KEEP_SNAPSHOTS'].nil?
-$x_display = ENV['DISPLAY']
-$debug = !ENV['DEBUG'].nil?
-$pause_on_fail = !ENV['PAUSE_ON_FAIL'].nil?
-$time_at_start = Time.now
-$live_user = "user"
-$sikuli_retry_findfailed = !ENV['SIKULI_RETRY_FINDFAILED'].nil?
-$executor_number = ENV['EXECUTOR_NUMBER']
-
-assert File.exists?(DEFAULTS_CONFIG_FILE)
-$config = YAML.load(File.read(DEFAULTS_CONFIG_FILE))
-config_files = Dir.glob(LOCAL_CONFIG_DIRS_FILES_GLOB).sort
-config_files.insert(0, LOCAL_CONFIG_FILE) if File.exists?(LOCAL_CONFIG_FILE)
-config_files.each do |config_file|
- yaml_struct = YAML.load(File.read(config_file)) || Hash.new
- if not(yaml_struct.instance_of?(Hash))
- raise "Local configuration file '#{config_file}' is malformed"
- end
- $config.merge!(yaml_struct)
-end
-# Options passed to the `run_test_suite` script will always take
-# precedence. The way we import these keys is only safe for values
-# with types boolean or string. If we need more, we'll have to invoke
-# YAML's type autodetection on ENV some how.
-$config.merge!(ENV)
-
-# Export TMPDIR back to the environment for subprocesses that we start
-# (e.g. guestfs). Note that this export will only make a difference if
-# TMPDIR wasn't already set and --tmpdir wasn't passed, i.e. only when
-# we use the default.
-ENV['TMPDIR'] = $config['TMPDIR']
-
-# Dynamic constants initialized through the environment or similar,
-# e.g. options we do not want to be configurable through the YAML
-# configuration files.
-DEBUG_LOG_PSEUDO_FIFO = "#{$config["TMPDIR"]}/debug_log_pseudo_fifo"
-DISPLAY = ENV['DISPLAY']
-GIT_DIR = ENV['PWD']
-KEEP_SNAPSHOTS = !ENV['KEEP_SNAPSHOTS'].nil?
-LIVE_USER = "live_user"
-TAILS_ISO = ENV['ISO']
-OLD_TAILS_ISO = ENV['OLD_ISO'] || TAILS_ISO
-TIME_AT_START = Time.now
-ARTIFACTS_DIR = $config['TMPDIR'] + "/results"
-JOB_NAME = ENV['JOB_NAME']
-BUILD_ID = ENV['BUILD_ID']
-PATIENCE = ENV['PATIENCE'] || 1
-
-# Constants that are statically initialized.
-CONFIGURED_KEYSERVER_HOSTNAME = 'hkps.pool.sks-keyservers.net'
-LIBVIRT_DOMAIN_NAME = "lvcVM-" + JOB_NAME
-#LIBVIRT_DOMAIN_UUID = "203552d5-819c-41f3-800e-2c8ef2546%03d" % $executor_number
-LIBVIRT_NETWORK_NAME = "lvcNET"
-#LIBVIRT_NETWORK_UUID = "f2305af3-2a64-4f16-afe6-b9dbf02a597e"
-LIBVIRT_REMOTE_SHELL_PORT = 13370 + Integer($executor_number)
-MISC_FILES_DIR = "/srv/jenkins/cucumber/features/misc_files"
-SERVICES_EXPECTED_ON_ALL_IFACES =
- [
- ["cupsd", "*", "631"],
- ["dhclient", "*", "*"]
- ]
-# OpenDNS
-SOME_DNS_SERVER = "208.67.222.222"
-VM_XML_PATH = "/srv/jenkins/cucumber/features/domains"
-
-#TAILS_SIGNING_KEY = cmd_helper(". #{Dir.pwd}/config/amnesia; echo ${AMNESIA_DEV_KEYID}").tr(' ', '').chomp
-TAILS_DEBIAN_REPO_KEY = "221F9A3C6FA3E09E182E060BC7988EA7A358D82E"
diff --git a/cucumber/features/support/env.rb b/cucumber/features/support/env.rb
deleted file mode 100644
index c52affff..00000000
--- a/cucumber/features/support/env.rb
+++ /dev/null
@@ -1,126 +0,0 @@
-require 'rubygems'
-require "cucumber/features/support/extra_hooks.rb"
-require 'time'
-require 'rspec'
-
-# Force UTF-8. Ruby will default to the system locale, and if it is
-# non-UTF-8, String-methods will fail when operating on non-ASCII
-# strings.
-Encoding.default_external = Encoding::UTF_8
-Encoding.default_internal = Encoding::UTF_8
-
-def fatal_system(str)
- unless system(str)
- raise StandardError.new("Command exited with #{$?}")
- end
-end
-
-def git_exists?
- File.exists? '.git'
-end
-
-def create_git
- Dir.mkdir 'config'
- FileUtils.touch('config/base_branch')
- Dir.mkdir('config/APT_overlays.d')
- Dir.mkdir('config/APT_snapshots.d')
- ['debian', 'debian-security', 'torproject'].map do |origin|
- Dir.mkdir("config/APT_snapshots.d/#{origin}")
- end
- Dir.mkdir 'debian'
- File.open('debian/changelog', 'w') do |changelog|
- changelog.write(<<END_OF_CHANGELOG)
-tails (0) stable; urgency=low
-
- * First release.
-
- -- Tails developers <tails@boum.org> Mon, 30 Jan 2012 01:00:00 +0000
-END_OF_CHANGELOG
- end
-
- fatal_system "git init --quiet"
- fatal_system "git config user.email 'tails@boum.org'"
- fatal_system "git config user.name 'Tails developers'"
- fatal_system "git add debian/changelog"
- fatal_system "git commit --quiet debian/changelog -m 'First release'"
- fatal_system "git branch -M stable"
- fatal_system "git branch testing stable"
- fatal_system "git branch devel stable"
- fatal_system "git branch feature/jessie devel"
-end
-
-def current_branch
- cmd = 'git rev-parse --symbolic-full-name --abbrev-ref HEAD'.split
- branch = cmd_helper(cmd).strip
- assert_not_equal("HEAD", branch, "We are in 'detached HEAD' state")
- return branch
-end
-
-# In order: if git HEAD is tagged, return its name; if a branch is
-# checked out, return its name; otherwise we are in 'detached HEAD'
-# state, and we return the empty string.
-def describe_git_head
- cmd_helper("git describe --tags --exact-match #{current_commit}".split).strip
-rescue Test::Unit::AssertionFailedError
- begin
- current_branch
- rescue Test::Unit::AssertionFailedError
- ""
- end
-end
-
-def current_commit
- cmd_helper('git rev-parse HEAD'.split).strip
-end
-
-def current_short_commit
- current_commit[0, 7]
-end
-
-RSpec::Matchers.define :have_suite do |suite|
- match do |string|
- # e.g.: `deb http://deb.tails.boum.org/ 0.10 main contrib non-free`
- %r{^deb +http://deb\.tails\.boum\.org/ +#{Regexp.escape(suite)} main}.match(string)
- end
- failure_message_for_should do |string|
- "expected the sources to include #{suite}\nCurrent sources : #{string}"
- end
- failure_message_for_should_not do |string|
- "expected the sources to exclude #{suite}\nCurrent sources : #{string}"
- end
- description do
- "expected an output with #{suite}"
- end
-end
-
-RSpec::Matchers.define :have_tagged_snapshot do |tag|
- match do |string|
- # e.g.: `http://tagged.snapshots.deb.tails.boum.org/0.10`
- %r{^http://tagged\.snapshots\.deb\.tails\.boum\.org/#{Regexp.escape(tag)}/[a-z-]+$}.match(string)
- end
- failure_message_for_should do |string|
- "expected the mirror to be #{tag}\nCurrent mirror: #{string}"
- end
- failure_message_for_should_not do |string|
- "expected the mirror not to be #{tag}\nCurrent mirror: #{string}"
- end
- description do
- "expected an output with #{tag}"
- end
-end
-
-RSpec::Matchers.define :have_time_based_snapshot do |tag|
- match do |string|
- # e.g.: `http://time-based.snapshots.deb.tails.boum.org/debian/2016060602`
- %r{^http://time\-based\.snapshots\.deb\.tails\.boum\.org/[^/]+/\d+}.match(string)
- end
- failure_message_for_should do |string|
- "expected the mirror to be a time-based snapshot\nCurrent mirror: #{string}"
- end
- failure_message_for_should_not do |string|
- "expected the mirror not to be a time-based snapshot\nCurrent mirror: #{string}"
- end
- description do
- "expected a time-based snapshot"
- end
-end
diff --git a/cucumber/features/support/extra_hooks.rb b/cucumber/features/support/extra_hooks.rb
deleted file mode 100644
index c2c57494..00000000
--- a/cucumber/features/support/extra_hooks.rb
+++ /dev/null
@@ -1,187 +0,0 @@
-# Make the code below work with cucumber >= 2.0. Once we stop
-# supporting <2.0 we should probably do this differently, but this way
-# we can easily support both at the same time.
-
-begin
- if not(Cucumber::Core::Ast::Feature.instance_methods.include?(:accept_hook?))
- if Gem::Version.new(Cucumber::VERSION) >= Gem::Version.new('2.4.0')
- require 'cucumber/core/gherkin/tag_expression'
- else
- require 'gherkin/tag_expression'
- Cucumber::Core::Gherkin = Gherkin
- end
- class Cucumber::Core::Ast::Feature
- # Code inspired by Cucumber::Core::Test::Case.match_tags?() in
- # cucumber-ruby-core 1.1.3, lib/cucumber/core/test/case.rb:~59.
- def accept_hook?(hook)
- tag_expr = Cucumber::Core::Gherkin::TagExpression.new(hook.tag_expressions.flatten)
- tag_expr.evaluate(@tags)
- end
- end
- end
-rescue NameError => e
- raise e if e.to_s != "uninitialized constant Cucumber::Core"
-end
-
-# Sort of inspired by Cucumber::RbSupport::RbHook (from cucumber
-# < 2.0) but really we just want an object with a 'tag_expressions'
-# attribute to make accept_hook?() (used below) happy.
-class SimpleHook
- attr_reader :tag_expressions
-
- def initialize(tag_expressions, proc)
- @tag_expressions = tag_expressions
- @proc = proc
- end
-
- def invoke(arg)
- @proc.call(arg)
- end
-end
-
-def BeforeFeature(*tag_expressions, &block)
- $before_feature_hooks ||= []
- $before_feature_hooks << SimpleHook.new(tag_expressions, block)
-end
-
-def AfterFeature(*tag_expressions, &block)
- $after_feature_hooks ||= []
- $after_feature_hooks << SimpleHook.new(tag_expressions, block)
-end
-
-require 'cucumber/formatter/console'
-if not($at_exit_print_artifacts_dir_patching_done)
- module Cucumber::Formatter::Console
- if method_defined?(:print_stats)
- alias old_print_stats print_stats
- end
- def print_stats(*args)
- @io.puts "Artifacts directory: #{ARTIFACTS_DIR}"
- @io.puts
- @io.puts "Debug log: #{ARTIFACTS_DIR}/debug.log"
- @io.puts
- if self.class.method_defined?(:old_print_stats)
- old_print_stats(*args)
- end
- end
- end
- $at_exit_print_artifacts_dir_patching_done = true
-end
-
-def info_log(message = "", options = {})
- options[:color] = :clear
- # This trick allows us to use a module's (~private) method on a
- # one-off basis.
- cucumber_console = Class.new.extend(Cucumber::Formatter::Console)
- puts cucumber_console.format_string(message, options[:color])
-end
-
-def debug_log(message, options = {})
- options[:timestamp] = true unless options.has_key?(:timestamp)
- if $debug_log_fns
- if options[:timestamp]
- # Force UTC so the local timezone difference vs UTC won't be
- # added to the result.
- elapsed = (Time.now - TIME_AT_START.to_f).utc.strftime("%H:%M:%S.%9N")
- message = "#{elapsed}: #{message}"
- end
- $debug_log_fns.each { |fn| fn.call(message, options) }
- end
-end
-
-require 'cucumber/formatter/pretty'
-# Backport part of commit af940a8 from the cucumber-ruby repo. This
-# fixes the "out hook output" for the Pretty formatter so stuff
-# written via `puts` after a Scenario has run its last step will be
-# written, instead of delayed to the next Feature/Scenario (if any) or
-# dropped completely (if not).
-# XXX: This can be removed once we stop supporting Debian Jessie
-# around when Debian Stretch is released.
-if Gem::Version.new(Cucumber::VERSION) < Gem::Version.new('2.0.0.beta.4')
- module Cucumber
- module Formatter
- class Pretty
- def after_feature_element(feature_element)
- print_messages
- @io.puts
- @io.flush
- end
- end
- end
- end
-end
-
-module ExtraFormatters
- # This is a null formatter in the sense that it doesn't ever output
- # anything. We only use it do hook into the correct events so we can
- # add our extra hooks.
- class ExtraHooks
- def initialize(runtime, io, options)
- # We do not care about any of the arguments.
- # XXX: We should be able to just have `*args` for the arguments
- # in the prototype, but since moving to cucumber 2.4 that breaks
- # this formatter for some unknown reason.
- end
-
- def before_feature(feature)
- if $before_feature_hooks
- $before_feature_hooks.each do |hook|
- hook.invoke(feature) if feature.accept_hook?(hook)
- end
- end
- end
-
- def after_feature(feature)
- if $after_feature_hooks
- $after_feature_hooks.reverse.each do |hook|
- hook.invoke(feature) if feature.accept_hook?(hook)
- end
- end
- end
- end
-
- # The pretty formatter with debug logging mixed into its output.
- class PrettyDebug < Cucumber::Formatter::Pretty
- def initialize(runtime, io, options)
- super(runtime, io, options)
- $debug_log_fns ||= []
- $debug_log_fns << self.method(:debug_log)
- end
-
- def debug_log(message, options)
- options[:color] ||= :blue
- @io.puts(format_string(message, options[:color]))
- @io.flush
- end
- end
-
-end
-
-module Cucumber
- module Cli
- class Options
- BUILTIN_FORMATS['pretty_debug'] =
- [
- 'ExtraFormatters::PrettyDebug',
- 'Prints the feature with debugging information - in colours.'
- ]
- BUILTIN_FORMATS['debug'] = BUILTIN_FORMATS['pretty_debug']
- end
- end
-end
-
-AfterConfiguration do |config|
- # Cucumber may read this file multiple times, and hence run this
- # AfterConfiguration hook multiple times. We only want our
- # ExtraHooks formatter to be loaded once, otherwise the hooks would
- # be run miltiple times.
- extra_hooks = [
- ['ExtraFormatters::ExtraHooks', '/dev/null'],
- ['Cucumber::Formatter::Pretty', "#{ARTIFACTS_DIR}/pretty.log"],
- ['Cucumber::Formatter::Json', "#{ARTIFACTS_DIR}/cucumber.json"],
- ['ExtraFormatters::PrettyDebug', "#{ARTIFACTS_DIR}/debug.log"],
- ]
- extra_hooks.each do |hook|
- config.formats << hook if not(config.formats.include?(hook))
- end
-end
diff --git a/cucumber/features/support/helpers/chatbot_helper.rb b/cucumber/features/support/helpers/chatbot_helper.rb
deleted file mode 100644
index 23ce3e1a..00000000
--- a/cucumber/features/support/helpers/chatbot_helper.rb
+++ /dev/null
@@ -1,59 +0,0 @@
-require 'tempfile'
-
-class ChatBot
-
- def initialize(account, password, otr_key, opts = Hash.new)
- @account = account
- @password = password
- @otr_key = otr_key
- @opts = opts
- @pid = nil
- @otr_key_file = nil
- end
-
- def start
- @otr_key_file = Tempfile.new("otr_key.", $config["TMPDIR"])
- @otr_key_file << @otr_key
- @otr_key_file.close
-
- cmd_helper(['/usr/bin/convertkey', @otr_key_file.path])
- cmd_helper(["mv", "#{@otr_key_file.path}3", @otr_key_file.path])
-
- cmd = [
- "#{GIT_DIR}/features/scripts/otr-bot.py",
- @account,
- @password,
- @otr_key_file.path
- ]
- cmd += ["--connect-server", @opts["connect_server"]] if @opts["connect_server"]
- cmd += ["--auto-join"] + @opts["auto_join"] if @opts["auto_join"]
- cmd += ["--log-file", DEBUG_LOG_PSEUDO_FIFO]
-
- job = IO.popen(cmd)
- @pid = job.pid
- end
-
- def stop
- @otr_key_file.delete
- begin
- Process.kill("TERM", @pid)
- rescue
- # noop
- end
- end
-
- def active?
- begin
- ret = Process.kill(0, @pid)
- rescue Errno::ESRCH => e
- if e.message == "No such process"
- return false
- else
- raise e
- end
- end
- assert_equal(1, ret, "This shouldn't happen")
- return true
- end
-
-end
diff --git a/cucumber/features/support/helpers/ctcp_helper.rb b/cucumber/features/support/helpers/ctcp_helper.rb
deleted file mode 100644
index ee5180ab..00000000
--- a/cucumber/features/support/helpers/ctcp_helper.rb
+++ /dev/null
@@ -1,126 +0,0 @@
-require 'net/irc'
-require 'timeout'
-
-class CtcpChecker < Net::IRC::Client
-
- CTCP_SPAM_DELAY = 5
-
- # `spam_target`: the nickname of the IRC user to CTCP spam.
- # `ctcp_cmds`: the Array of CTCP commands to send.
- # `expected_ctcp_replies`: Hash where the keys are the exact set of replies
- # we expect, and their values a regex the reply data must match.
- def initialize(host, port, spam_target, ctcp_cmds, expected_ctcp_replies)
- @spam_target = spam_target
- @ctcp_cmds = ctcp_cmds
- @expected_ctcp_replies = expected_ctcp_replies
- nickname = self.class.random_irc_nickname
- opts = {
- :nick => nickname,
- :user => nickname,
- :real => nickname,
- }
- opts[:logger] = Logger.new(DEBUG_LOG_PSEUDO_FIFO)
- super(host, port, opts)
- end
-
- # Makes sure that only the expected CTCP replies are received.
- def verify_ctcp_responses
- @sent_ctcp_cmds = Set.new
- @received_ctcp_replies = Set.new
-
- # Give 60 seconds for connecting to the server and other overhead
- # beyond the expected time to spam all CTCP commands.
- expected_ctcp_spam_time = @ctcp_cmds.length * CTCP_SPAM_DELAY
- timeout = expected_ctcp_spam_time + 60
-
- begin
- Timeout::timeout(timeout) do
- start
- end
- rescue Timeout::Error
- # Do nothing as we'll check for errors below.
- ensure
- finish
- end
-
- ctcp_cmds_not_sent = @ctcp_cmds - @sent_ctcp_cmds.to_a
- expected_ctcp_replies_not_received =
- @expected_ctcp_replies.keys - @received_ctcp_replies.to_a
-
- if !ctcp_cmds_not_sent.empty? || !expected_ctcp_replies_not_received.empty?
- raise "Failed to spam all CTCP commands and receive the expected " +
- "replies within #{timeout} seconds.\n" +
- (ctcp_cmds_not_sent.empty? ? "" :
- "CTCP commands not sent: #{ctcp_cmds_not_sent}\n") +
- (expected_ctcp_replies_not_received.empty? ? "" :
- "Expected CTCP replies not received: " +
- expected_ctcp_replies_not_received.to_s)
- end
-
- end
-
- # Generate a random IRC nickname, in this case an alpha-numeric
- # string with length 10 to 15. To make it legal, the first character
- # is forced to be alpha.
- def self.random_irc_nickname
- random_alpha_string(1) + random_alnum_string(9, 14)
- end
-
- def spam(spam_target)
- post(NOTICE, spam_target, "Hi! I'm gonna test your CTCP capabilities now.")
- @ctcp_cmds.each do |cmd|
- sleep CTCP_SPAM_DELAY
- full_cmd = cmd
- case cmd
- when "PING"
- full_cmd += " #{Time.now.to_i}"
- when "ACTION"
- full_cmd += " barfs on the floor."
- when "ERRMSG"
- full_cmd += " Pidgin should not respond to this."
- end
- post(PRIVMSG, spam_target, ctcp_encode(full_cmd))
- @sent_ctcp_cmds << cmd
- end
- end
-
- def on_rpl_welcome(m)
- super
- Thread.new { spam(@spam_target) }
- end
-
- def on_message(m)
- if m.command == ERR_NICKNAMEINUSE
- finish
- new_nick = self.class.random_irc_nickname
- @opts.marshal_load({
- :nick => new_nick,
- :user => new_nick,
- :real => new_nick,
- })
- start
- return
- end
-
- if m.ctcp? and /^:#{Regexp.escape(@spam_target)}!/.match(m)
- m.ctcps.each do |ctcp_reply|
- reply_type, _, reply_data = ctcp_reply.partition(" ")
- if @expected_ctcp_replies.has_key?(reply_type)
- if @expected_ctcp_replies[reply_type].match(reply_data)
- @received_ctcp_replies << reply_type
- else
- raise "Received expected CTCP reply '#{reply_type}' but with " +
- "unexpected data '#{reply_data}' "
- end
- else
- raise "Received unexpected CTCP reply '#{reply_type}' with " +
- "data '#{reply_data}'"
- end
- end
- end
- if Set.new(@ctcp_cmds) == @sent_ctcp_cmds && \
- Set.new(@expected_ctcp_replies.keys) == @received_ctcp_replies
- finish
- end
- end
-end
diff --git a/cucumber/features/support/helpers/display_helper.rb b/cucumber/features/support/helpers/display_helper.rb
deleted file mode 100644
index b4dce733..00000000
--- a/cucumber/features/support/helpers/display_helper.rb
+++ /dev/null
@@ -1,48 +0,0 @@
-
-class Display
-
- def initialize(domain, x_display)
- @domain = domain
- @x_display = x_display
- end
-
- def active?
- p = IO.popen(["xprop", "-display", @x_display,
- "-name", "#{@domain} (1) - Virt Viewer",
- :err => ["/dev/null", "w"]])
- Process.wait(p.pid)
- $?.success?
- end
-
- def start
- @virtviewer = IO.popen(["virt-viewer", "--direct",
- "--kiosk",
- "--reconnect",
- "--connect", "qemu:///system",
- "--display", @x_display,
- @domain,
- :err => ["/dev/null", "w"]])
- # We wait for the display to be active to not lose actions
- # (e.g. key presses via sikuli) that come immediately after
- # starting (or restoring) a vm
- try_for(20, { :delay => 0.1, :msg => "virt-viewer failed to start"}) {
- active?
- }
- end
-
- def stop
- return if @virtviewer.nil?
- Process.kill("TERM", @virtviewer.pid)
- @virtviewer.close
- rescue IOError
- # IO.pid throws this if the process wasn't started yet. Possibly
- # there's a race when doing a start() and then quickly running
- # stop().
- end
-
- def restart
- stop
- start
- end
-
-end
diff --git a/cucumber/features/support/helpers/dogtail.rb b/cucumber/features/support/helpers/dogtail.rb
deleted file mode 100644
index 2a92649b..00000000
--- a/cucumber/features/support/helpers/dogtail.rb
+++ /dev/null
@@ -1,233 +0,0 @@
-module Dogtail
- module Mouse
- LEFT_CLICK = 1
- MIDDLE_CLICK = 2
- RIGHT_CLICK = 3
- end
-
- TREE_API_NODE_SEARCHES = [
- :button,
- :child,
- :childLabelled,
- :childNamed,
- :dialog,
- :menu,
- :menuItem,
- :tab,
- :textentry,
- ]
-
- TREE_API_NODE_SEARCH_FIELDS = [
- :parent,
- ]
-
- TREE_API_NODE_ACTIONS = [
- :click,
- :doubleClick,
- :grabFocus,
- :keyCombo,
- :point,
- :typeText,
- ]
-
- TREE_API_APP_SEARCHES = TREE_API_NODE_SEARCHES + [
- :dialog,
- :window,
- ]
-
- # We want to keep this class immutable so that handles always are
- # left intact when doing new (proxied) method calls. This way we
- # can support stuff like:
- #
- # app = Dogtail::Application.new('gedit')
- # menu = app.menu('Menu')
- # menu.click()
- # menu.something_else()
- # menu.click()
- #
- # i.e. the object referenced by `menu` is never modified by method
- # calls and can be used as expected.
-
- class Application
- @@node_counter ||= 0
-
- def initialize(app_name, opts = {})
- @var = "node#{@@node_counter += 1}"
- @app_name = app_name
- @opts = opts
- @opts[:user] ||= LIVE_USER
- @find_code = "dogtail.tree.root.application('#{@app_name}')"
- script_lines = [
- "import dogtail.config",
- "import dogtail.tree",
- "import dogtail.predicate",
- "dogtail.config.logDebugToFile = False",
- "dogtail.config.logDebugToStdOut = False",
- "dogtail.config.blinkOnActions = True",
- "dogtail.config.searchShowingOnly = True",
- "#{@var} = #{@find_code}",
- ]
- run(script_lines)
- end
-
- def to_s
- @var
- end
-
- def run(code)
- code = code.join("\n") if code.class == Array
- c = RemoteShell::PythonCommand.new($vm, code, user: @opts[:user])
- if c.failure?
- raise RuntimeError.new("The Dogtail script raised: #{c.exception}")
- end
- return c
- end
-
- def child?(*args)
- !!child(*args)
- rescue
- false
- end
-
- def exist?
- run("dogtail.config.searchCutoffCount = 0")
- run(@find_code)
- return true
- rescue
- return false
- ensure
- run("dogtail.config.searchCutoffCount = 20")
- end
-
- def self.value_to_s(v)
- if v == true
- 'True'
- elsif v == false
- 'False'
- elsif v.class == String
- "'#{v}'"
- elsif [Fixnum, Float].include?(v.class)
- v.to_s
- else
- raise "#{self.class.name} does not know how to handle argument type '#{v.class}'"
- end
- end
-
- # Generates a Python-style parameter list from `args`. If the last
- # element of `args` is a Hash, it's used as Python's kwargs dict.
- # In the end, the resulting string should be possible to copy-paste
- # into the parentheses of a Python function call.
- # Example: [42, {:foo => 'bar'}] => "42, foo = 'bar'"
- def self.args_to_s(args)
- return "" if args.size == 0
- args_list = args
- args_hash = nil
- if args_list.class == Array && args_list.last.class == Hash
- *args_list, args_hash = args_list
- end
- (
- (args_list.nil? ? [] : args_list.map { |e| self.value_to_s(e) }) +
- (args_hash.nil? ? [] : args_hash.map { |k, v| "#{k}=#{self.value_to_s(v)}" })
- ).join(', ')
- end
-
- # Equivalent to the Tree API's Node.findChildren(), with the
- # arguments constructing a GenericPredicate to use as parameter.
- def children(*args)
- non_predicates = [:recursive, :showingOnly]
- findChildren_opts = []
- findChildren_opts_hash = Hash.new
- if args.last.class == Hash
- args_hash = args.last
- non_predicates.each do |opt|
- if args_hash.has_key?(opt)
- findChildren_opts_hash[opt] = args_hash[opt]
- args_hash.delete(opt)
- end
- end
- end
- findChildren_opts = ""
- if findChildren_opts_hash.size > 0
- findChildren_opts = ", " + self.class.args_to_s([findChildren_opts_hash])
- end
- predicate_opts = self.class.args_to_s(args)
- nodes_var = "nodes#{@@node_counter += 1}"
- find_script_lines = [
- "#{nodes_var} = #{@var}.findChildren(dogtail.predicate.GenericPredicate(#{predicate_opts})#{findChildren_opts})",
- "print(len(#{nodes_var}))",
- ]
- size = run(find_script_lines).stdout.chomp.to_i
- return size.times.map do |i|
- Node.new("#{nodes_var}[#{i}]", @opts)
- end
- end
-
- def get_field(key)
- run("print(#{@var}.#{key})").stdout.chomp
- end
-
- def set_field(key, value)
- run("#{@var}.#{key} = #{self.class.value_to_s(value)}")
- end
-
- def text
- get_field('text')
- end
-
- def text=(value)
- set_field('text', value)
- end
-
- def name
- get_field('name')
- end
-
- def roleName
- get_field('roleName')
- end
-
- TREE_API_APP_SEARCHES.each do |method|
- define_method(method) do |*args|
- args_str = self.class.args_to_s(args)
- method_call = "#{method.to_s}(#{args_str})"
- Node.new("#{@var}.#{method_call}", @opts)
- end
- end
-
- TREE_API_NODE_SEARCH_FIELDS.each do |field|
- define_method(field) do
- Node.new("#{@var}.#{field}", @opts)
- end
- end
-
- end
-
- class Node < Application
-
- def initialize(expr, opts = {})
- @expr = expr
- @opts = opts
- @opts[:user] ||= LIVE_USER
- @find_code = expr
- @var = "node#{@@node_counter += 1}"
- run("#{@var} = #{@find_code}")
- end
-
- TREE_API_NODE_SEARCHES.each do |method|
- define_method(method) do |*args|
- args_str = self.class.args_to_s(args)
- method_call = "#{method.to_s}(#{args_str})"
- Node.new("#{@var}.#{method_call}", @opts)
- end
- end
-
- TREE_API_NODE_ACTIONS.each do |method|
- define_method(method) do |*args|
- args_str = self.class.args_to_s(args)
- method_call = "#{method.to_s}(#{args_str})"
- run("#{@var}.#{method_call}")
- end
- end
-
- end
-end
diff --git a/cucumber/features/support/helpers/firewall_helper.rb b/cucumber/features/support/helpers/firewall_helper.rb
deleted file mode 100644
index f88091de..00000000
--- a/cucumber/features/support/helpers/firewall_helper.rb
+++ /dev/null
@@ -1,94 +0,0 @@
-require 'packetfu'
-
-def looks_like_dhcp_packet?(eth_packet, protocol, sport, dport, ip_packet)
- protocol == "udp" && sport == 68 && dport == 67 &&
- eth_packet.eth_daddr == "ff:ff:ff:ff:ff:ff" &&
- ip_packet && ip_packet.ip_daddr == "255.255.255.255"
-end
-
-# Returns the unique edges (based on protocol, source/destination
-# address/port) in the graph of all network flows.
-def pcap_connections_helper(pcap_file, opts = {})
- opts[:ignore_dhcp] = true unless opts.has_key?(:ignore_dhcp)
- connections = Array.new
- packets = PacketFu::PcapFile.new.file_to_array(:filename => pcap_file)
- packets.each do |p|
- if PacketFu::EthPacket.can_parse?(p)
- eth_packet = PacketFu::EthPacket.parse(p)
- else
- raise 'Found something that is not an ethernet packet'
- end
- sport = nil
- dport = nil
- if PacketFu::IPv6Packet.can_parse?(p)
- ip_packet = PacketFu::IPv6Packet.parse(p)
- protocol = 'ipv6'
- elsif PacketFu::TCPPacket.can_parse?(p)
- ip_packet = PacketFu::TCPPacket.parse(p)
- protocol = 'tcp'
- sport = ip_packet.tcp_sport
- dport = ip_packet.tcp_dport
- elsif PacketFu::UDPPacket.can_parse?(p)
- ip_packet = PacketFu::UDPPacket.parse(p)
- protocol = 'udp'
- sport = ip_packet.udp_sport
- dport = ip_packet.udp_dport
- elsif PacketFu::ICMPPacket.can_parse?(p)
- ip_packet = PacketFu::ICMPPacket.parse(p)
- protocol = 'icmp'
- elsif PacketFu::IPPacket.can_parse?(p)
- ip_packet = PacketFu::IPPacket.parse(p)
- protocol = 'ip'
- else
- raise "Found something that cannot be parsed"
- end
-
- next if opts[:ignore_dhcp] &&
- looks_like_dhcp_packet?(eth_packet, protocol,
- sport, dport, ip_packet)
-
- packet_info = {
- mac_saddr: eth_packet.eth_saddr,
- mac_daddr: eth_packet.eth_daddr,
- protocol: protocol,
- sport: sport,
- dport: dport,
- }
-
- begin
- packet_info[:saddr] = ip_packet.ip_saddr
- packet_info[:daddr] = ip_packet.ip_daddr
- rescue NoMethodError, NameError
- begin
- packet_info[:saddr] = ip_packet.ipv6_saddr
- packet_info[:daddr] = ip_packet.ipv6_daddr
- rescue NoMethodError, NameError
- puts "We were hit by #11508. PacketFu bug? Packet info: #{ip_packet}"
- packet_info[:saddr] = nil
- packet_info[:daddr] = nil
- end
- end
- connections << packet_info
- end
- connections.uniq.map { |p| OpenStruct.new(p) }
-end
-
-class FirewallAssertionFailedError < Test::Unit::AssertionFailedError
-end
-
-# These assertions are made from the perspective of the system under
-# testing when it comes to the concepts of "source" and "destination".
-def assert_all_connections(pcap_file, opts = {}, &block)
- all = pcap_connections_helper(pcap_file, opts)
- good = all.find_all(&block)
- bad = all - good
- unless bad.empty?
- raise FirewallAssertionFailedError.new(
- "Unexpected connections were made:\n" +
- bad.map { |e| " #{e}" } .join("\n"))
- end
-end
-
-def assert_no_connections(pcap_file, opts = {}, &block)
- assert_all_connections(pcap_file, opts) { |*args| not(block.call(*args)) }
-end
diff --git a/cucumber/features/support/helpers/misc_helpers.rb b/cucumber/features/support/helpers/misc_helpers.rb
deleted file mode 100644
index 865d2978..00000000
--- a/cucumber/features/support/helpers/misc_helpers.rb
+++ /dev/null
@@ -1,344 +0,0 @@
-require 'date'
-require 'io/console'
-require 'pry'
-require 'timeout'
-require 'test/unit'
-
-# Test::Unit adds an at_exit hook which, among other things, consumes
-# the command-line arguments that were intended for cucumber. If
-# e.g. `--format` was passed it will throw an error since it's not a
-# valid option for Test::Unit, and it throwing an error at this time
-# (at_exit) will make Cucumber think it failed and consequently exit
-# with an error. Fooling Test::Unit that this hook has already run
-# works around this craziness.
-Test::Unit.run = true
-
-# Make all the assert_* methods easily accessible in any context.
-include Test::Unit::Assertions
-
-def assert_vmcommand_success(p, msg = nil)
- assert(p.success?, msg.nil? ? "Command failed: #{p.cmd}\n" + \
- "error code: #{p.returncode}\n" \
- "stderr: #{p.stderr}" : \
- msg)
-end
-
-# It's forbidden to throw this exception (or subclasses) in anything
-# but try_for() below. Just don't use it anywhere else!
-class UniqueTryForTimeoutError < Exception
-end
-
-# Call block (ignoring any exceptions it may throw) repeatedly with
-# one second breaks until it returns true, or until `timeout` seconds have
-# passed when we throw a Timeout::Error exception. If `timeout` is `nil`,
-# then we just run the code block with no timeout.
-def try_for(timeout, options = {})
- if block_given? && timeout.nil?
- return yield
- end
- options[:delay] ||= 1
- last_exception = nil
- # Create a unique exception used only for this particular try_for
- # call's Timeout to allow nested try_for:s. If we used the same one,
- # the innermost try_for would catch all outer ones', creating a
- # really strange situation.
- unique_timeout_exception = Class.new(UniqueTryForTimeoutError)
- Timeout::timeout(timeout, unique_timeout_exception) do
- loop do
- begin
- return if yield
- rescue NameError, UniqueTryForTimeoutError => e
- # NameError most likely means typos, and hiding that is rarely
- # (never?) a good idea, so we rethrow them. See below why we
- # also rethrow *all* the unique exceptions.
- raise e
- rescue Exception => e
- # All other exceptions are ignored while trying the
- # block. Well we save the last exception so we can print it in
- # case of a timeout.
- last_exception = e
- end
- sleep options[:delay]
- end
- end
- # At this point the block above either succeeded and we'll return,
- # or we are throwing an exception. If the latter, we either have a
- # NameError that we'll not catch (and will any try_for below us in
- # the stack), or we have a unique exception. That can mean one of
- # two things:
- # 1. it's the one unique to this try_for, and in that case we'll
- # catch it, rethrowing it as something that will be ignored by
- # inside the blocks of all try_for:s below us in the stack.
- # 2. it's an exception unique to another try_for. Assuming that we
- # do not throw the unique exceptions in any other place or way
- # than we do it in this function, this means that there is a
- # try_for below us in the stack to which this exception must be
- # unique to.
- # Let 1 be the base step, and 2 the inductive step, and we sort of
- # an inductive proof for the correctness of try_for when it's
- # nested. It shows that for an infinite stack of try_for:s, any of
- # the unique exceptions will be caught only by the try_for instance
- # it is unique to, and all try_for:s in between will ignore it so it
- # ends up there immediately.
-rescue unique_timeout_exception => e
- msg = options[:msg] || 'try_for() timeout expired'
- exc_class = options[:exception] || Timeout::Error
- if last_exception
- msg += "\nLast ignored exception was: " +
- "#{last_exception.class}: #{last_exception}"
- end
- raise exc_class.new(msg)
-end
-
-class TorFailure < StandardError
-end
-
-class MaxRetriesFailure < StandardError
-end
-
-def force_new_tor_circuit()
- debug_log("Forcing new Tor circuit...")
- # Tor rate limits NEWNYM to at most one per 10 second period.
- interval = 10
- if $__last_newnym
- elapsed = Time.now - $__last_newnym
- # We sleep an extra second to avoid tight timings.
- sleep interval - elapsed + 1 if 0 < elapsed && elapsed < interval
- end
- $vm.execute_successfully('tor_control_send "signal NEWNYM"', :libs => 'tor')
- $__last_newnym = Time.now
-end
-
-# This will retry the block up to MAX_NEW_TOR_CIRCUIT_RETRIES
-# times. The block must raise an exception for a run to be considered
-# as a failure. After a failure recovery_proc will be called (if
-# given) and the intention with it is to bring us back to the state
-# expected by the block, so it can be retried.
-def retry_tor(recovery_proc = nil, &block)
- tor_recovery_proc = Proc.new do
- force_new_tor_circuit
- recovery_proc.call if recovery_proc
- end
-
- retry_action($config['MAX_NEW_TOR_CIRCUIT_RETRIES'],
- :recovery_proc => tor_recovery_proc,
- :operation_name => 'Tor operation', &block)
-end
-
-def retry_action(max_retries, options = {}, &block)
- assert(max_retries.is_a?(Integer), "max_retries must be an integer")
- options[:recovery_proc] ||= nil
- options[:operation_name] ||= 'Operation'
-
- retries = 1
- loop do
- begin
- block.call
- return
- rescue NameError => e
- # NameError most likely means typos, and hiding that is rarely
- # (never?) a good idea, so we rethrow them.
- raise e
- rescue Exception => e
- if retries <= max_retries
- debug_log("#{options[:operation_name]} failed (Try #{retries} of " +
- "#{max_retries}) with:\n" +
- "#{e.class}: #{e.message}")
- options[:recovery_proc].call if options[:recovery_proc]
- retries += 1
- else
- raise MaxRetriesFailure.new("#{options[:operation_name]} failed (despite retrying " +
- "#{max_retries} times) with\n" +
- "#{e.class}: #{e.message}")
- end
- end
- end
-end
-
-alias :retry_times :retry_action
-
-class TorBootstrapFailure < StandardError
-end
-
-def wait_until_tor_is_working
- try_for(270) { $vm.execute('/usr/local/sbin/tor-has-bootstrapped').success? }
-rescue Timeout::Error
- raise TorBootstrapFailure.new('Tor failed to bootstrap')
-end
-
-def convert_bytes_mod(unit)
- case unit
- when "bytes", "b" then mod = 1
- when "KB" then mod = 10**3
- when "k", "KiB" then mod = 2**10
- when "MB" then mod = 10**6
- when "M", "MiB" then mod = 2**20
- when "GB" then mod = 10**9
- when "G", "GiB" then mod = 2**30
- when "TB" then mod = 10**12
- when "T", "TiB" then mod = 2**40
- else
- raise "invalid memory unit '#{unit}'"
- end
- return mod
-end
-
-def convert_to_bytes(size, unit)
- return (size*convert_bytes_mod(unit)).to_i
-end
-
-def convert_to_MiB(size, unit)
- return (size*convert_bytes_mod(unit) / (2**20)).to_i
-end
-
-def convert_from_bytes(size, unit)
- return size.to_f/convert_bytes_mod(unit).to_f
-end
-
-def cmd_helper(cmd, env = {})
- if cmd.instance_of?(Array)
- cmd << {:err => [:child, :out]}
- elsif cmd.instance_of?(String)
- cmd += " 2>&1"
- end
- env = ENV.to_h.merge(env)
- IO.popen(env, cmd) do |p|
- out = p.readlines.join("\n")
- p.close
- ret = $?
- assert_equal(0, ret, "Command failed (returned #{ret}): #{cmd}:\n#{out}")
- return out
- end
-end
-
-def all_tor_hosts
- nodes = Array.new
- chutney_torrcs = Dir.glob(
- "#{$config['TMPDIR']}/chutney-data/nodes/*/torrc"
- )
- chutney_torrcs.each do |torrc|
- open(torrc) do |f|
- nodes += f.grep(/^(Or|Dir)Port\b/).map do |line|
- { address: $vmnet.bridge_ip_addr, port: line.split.last.to_i }
- end
- end
- end
- return nodes
-end
-
-def allowed_hosts_under_tor_enforcement
- all_tor_hosts + @lan_hosts
-end
-
-def get_free_space(machine, path)
- case machine
- when 'host'
- assert(File.exists?(path), "Path '#{path}' not found on #{machine}.")
- free = cmd_helper(["df", path])
- when 'guest'
- assert($vm.file_exist?(path), "Path '#{path}' not found on #{machine}.")
- free = $vm.execute_successfully("df '#{path}'")
- else
- raise 'Unsupported machine type #{machine} passed.'
- end
- output = free.split("\n").last
- return output.match(/[^\s]\s+[0-9]+\s+[0-9]+\s+([0-9]+)\s+.*/)[1].chomp.to_i
-end
-
-def random_string_from_set(set, min_len, max_len)
- len = (min_len..max_len).to_a.sample
- len ||= min_len
- (0..len-1).map { |n| set.sample }.join
-end
-
-def random_alpha_string(min_len, max_len = 0)
- alpha_set = ('A'..'Z').to_a + ('a'..'z').to_a
- random_string_from_set(alpha_set, min_len, max_len)
-end
-
-def random_alnum_string(min_len, max_len = 0)
- alnum_set = ('A'..'Z').to_a + ('a'..'z').to_a + (0..9).to_a.map { |n| n.to_s }
- random_string_from_set(alnum_set, min_len, max_len)
-end
-
-# Sanitize the filename from unix-hostile filename characters
-def sanitize_filename(filename, options = {})
- options[:replacement] ||= '_'
- bad_unix_filename_chars = Regexp.new("[^A-Za-z0-9_\\-.,+:]")
- filename.gsub(bad_unix_filename_chars, options[:replacement])
-end
-
-def info_log_artifact_location(type, path)
- if $config['ARTIFACTS_BASE_URI']
- # Remove any trailing slashes, we'll add one ourselves
- base_url = $config['ARTIFACTS_BASE_URI'].gsub(/\/*$/, "")
- path = "#{base_url}/#{File.basename(path)}"
- end
- info_log("#{type.capitalize}: #{path}")
-end
-
-def notify_user(message)
- alarm_script = $config['NOTIFY_USER_COMMAND']
- return if alarm_script.nil? || alarm_script.empty?
- cmd_helper(alarm_script.gsub('%m', message))
-end
-
-def pause(message = "Paused")
- notify_user(message)
- STDERR.puts
- STDERR.puts message
- # Ring the ASCII bell for a helpful notification in most terminal
- # emulators.
- STDOUT.write "\a"
- STDERR.puts
- loop do
- STDERR.puts "Return: Continue; d: Debugging REPL"
- c = STDIN.getch
- case c
- when "\r"
- return
- when "d"
- binding.pry(quiet: true)
- end
- end
-end
-
-def dbus_send(service, object_path, method, *args, **opts)
- opts ||= {}
- ruby_type_to_dbus_type = {
- String => 'string',
- Fixnum => 'int32',
- }
- typed_args = args.map do |arg|
- type = ruby_type_to_dbus_type[arg.class]
- assert_not_nil(type, "No DBus type conversion for Ruby type '#{arg.class}'")
- "#{type}:#{arg}"
- end
- ret = $vm.execute_successfully(
- "dbus-send --print-reply --dest=#{service} #{object_path} " +
- " #{method} #{typed_args.join(' ')}",
- **opts
- ).stdout.lines
- # The first line written is about timings and other stuff we don't
- # care about; we only care about the return values.
- ret.shift
- ret.map! do |s|
- type, val = /^\s*(\S+)\s+(\S+)$/.match(s)[1,2]
- case type
- when 'string'
- # Unquote
- val[1, val.length - 2]
- when 'int32'
- val.to_i
- else
- raise "No Ruby type conversion for DBus type '#{type}'"
- end
- end
- if ret.size == 0
- return nil
- elsif ret.size == 1
- return ret.first
- else
- return ret
- end
-end
diff --git a/cucumber/features/support/helpers/remote_shell.rb b/cucumber/features/support/helpers/remote_shell.rb
deleted file mode 100644
index b890578b..00000000
--- a/cucumber/features/support/helpers/remote_shell.rb
+++ /dev/null
@@ -1,171 +0,0 @@
-require 'base64'
-require 'json'
-require 'socket'
-require 'timeout'
-
-module RemoteShell
- class ServerFailure < StandardError
- end
-
- # Used to differentiate vs Timeout::Error, which is thrown by
- # try_for() (by default) and often wraps around remote shell usage
- # -- in that case we don't want to catch that "outer" exception in
- # our handling of remote shell timeouts below.
- class Timeout < ServerFailure
- end
-
- DEFAULT_TIMEOUT = 20*60
-
- # Counter providing unique id:s for each communicate() call.
- @@request_id ||= 0
-
- def communicate(vm, *args, **opts)
- opts[:timeout] ||= DEFAULT_TIMEOUT
- socket = TCPSocket.new("127.0.0.1", vm.get_remote_shell_port)
- id = (@@request_id += 1)
- # Since we already have defined our own Timeout in the current
- # scope, we have to be more careful when referring to the Timeout
- # class from the 'timeout' module. However, note that we want it
- # to throw our own Timeout exception.
- Object::Timeout.timeout(opts[:timeout], Timeout) do
- socket.puts(JSON.dump([id] + args))
- socket.flush
- loop do
- line = socket.readline("\n").chomp("\n")
- response_id, status, *rest = JSON.load(line)
- if response_id == id
- if status != "success"
- if status == "error" and rest.class == Array and rest.size == 1
- msg = rest.first
- raise ServerFailure.new("#{msg}")
- else
- raise ServerFailure.new("Uncaught exception: #{status}: #{rest}")
- end
- end
- return rest
- else
- debug_log("Dropped out-of-order remote shell response: " +
- "got id #{response_id} but expected id #{id}")
- end
- end
- end
- ensure
- socket.close if defined?(socket) && socket
- end
-
- module_function :communicate
- private :communicate
-
- class ShellCommand
- # If `:spawn` is false the server will block until it has finished
- # executing `cmd`. If it's true the server won't block, and the
- # response will always be [0, "", ""] (only used as an
- # ACK). execute() will always block until a response is received,
- # though. Spawning is useful when starting processes in the
- # background (or running scripts that does the same) or any
- # application we want to interact with.
- def self.execute(vm, cmd, **opts)
- opts[:user] ||= "root"
- opts[:spawn] = false unless opts.has_key?(:spawn)
- type = opts[:spawn] ? "spawn" : "call"
- debug_log("#{type}ing as #{opts[:user]}: #{cmd}")
- ret = RemoteShell.communicate(vm, 'sh_' + type, opts[:user], cmd, **opts)
- debug_log("#{type} returned: #{ret}") if not(opts[:spawn])
- return ret
- end
-
- attr_reader :cmd, :returncode, :stdout, :stderr
-
- def initialize(vm, cmd, **opts)
- @cmd = cmd
- @returncode, @stdout, @stderr = self.class.execute(vm, cmd, **opts)
- end
-
- def success?
- return @returncode == 0
- end
-
- def failure?
- return not(success?)
- end
-
- def to_s
- "Return status: #{@returncode}\n" +
- "STDOUT:\n" +
- @stdout +
- "STDERR:\n" +
- @stderr
- end
- end
-
- class PythonCommand
- def self.execute(vm, code, **opts)
- opts[:user] ||= "root"
- show_code = code.chomp
- if show_code["\n"]
- show_code = "\n" + show_code.lines.map { |l| " "*4 + l.chomp } .join("\n")
- end
- debug_log("executing Python as #{opts[:user]}: #{show_code}")
- ret = RemoteShell.communicate(
- vm, 'python_execute', opts[:user], code, **opts
- )
- debug_log("execution complete")
- return ret
- end
-
- attr_reader :code, :exception, :stdout, :stderr
-
- def initialize(vm, code, **opts)
- @code = code
- @exception, @stdout, @stderr = self.class.execute(vm, code, **opts)
- end
-
- def success?
- return @exception == nil
- end
-
- def failure?
- return not(success?)
- end
-
- def to_s
- "Exception: #{@exception}\n" +
- "STDOUT:\n" +
- @stdout +
- "STDERR:\n" +
- @stderr
- end
- end
-
- # An IO-like object that is more or less equivalent to a File object
- # opened in rw mode.
- class File
- def self.open(vm, mode, path, *args, **opts)
- debug_log("opening file #{path} in '#{mode}' mode")
- ret = RemoteShell.communicate(vm, 'file_' + mode, path, *args, **opts)
- if ret.size != 1
- raise ServerFailure.new("expected 1 value but got #{ret.size}")
- end
- debug_log("#{mode} complete")
- return ret.first
- end
-
- attr_reader :vm, :path
-
- def initialize(vm, path)
- @vm, @path = vm, path
- end
-
- def read()
- Base64.decode64(self.class.open(@vm, 'read', @path))
- end
-
- def write(data)
- self.class.open(@vm, 'write', @path, Base64.encode64(data))
- end
-
- def append(data)
- self.class.open(@vm, 'append', @path, Base64.encode64(data))
- end
- end
-end
diff --git a/cucumber/features/support/helpers/sikuli_helper.rb b/cucumber/features/support/helpers/sikuli_helper.rb
deleted file mode 100644
index 264a3ece..00000000
--- a/cucumber/features/support/helpers/sikuli_helper.rb
+++ /dev/null
@@ -1,238 +0,0 @@
-require 'rjb'
-require 'rjbextension'
-$LOAD_PATH << ENV['SIKULI_HOME']
-begin
- require 'sikulixapi.jar'
- USING_SIKULIX = true
-rescue LoadError
- require 'sikuli-script.jar'
- USING_SIKULIX = false
-end
-Rjb::load
-
-def using_sikulix?
- USING_SIKULIX
-end
-
-package_members = [
- "java.io.FileOutputStream",
- "java.io.PrintStream",
- "java.lang.System",
- "org.sikuli.script.Finder",
- "org.sikuli.script.Key",
- "org.sikuli.script.KeyModifier",
- "org.sikuli.script.Location",
- "org.sikuli.script.Match",
- "org.sikuli.script.Pattern",
- "org.sikuli.script.Region",
- "org.sikuli.script.Screen",
- ]
-
-if using_sikulix?
- package_members << "org.sikuli.basics.Settings"
- package_members << "org.sikuli.script.ImagePath"
-else
- package_members << "org.sikuli.script.Settings"
-end
-
-translations = Hash[
- "org.sikuli.script", "Sikuli",
- "org.sikuli.basics", "Sikuli",
- "java.lang", "Java::Lang",
- "java.io", "Java::Io",
- ]
-
-for p in package_members
- imported_class = Rjb::import(p)
- package, ignore, class_name = p.rpartition(".")
- next if ! translations.include? package
- mod_name = translations[package]
- mod = mod_name.split("::").inject(Object) do |parent_obj, child_name|
- if parent_obj.const_defined?(child_name, false)
- parent_obj.const_get(child_name, false)
- else
- child_obj = Module.new
- parent_obj.const_set(child_name, child_obj)
- end
- end
- mod.const_set(class_name, imported_class)
-end
-
-# Bind Java's stdout to debug_log() via our magical pseudo fifo
-# logger.
-def bind_java_to_pseudo_fifo_logger
- file_output_stream = Java::Io::FileOutputStream.new(DEBUG_LOG_PSEUDO_FIFO)
- print_stream = Java::Io::PrintStream.new(file_output_stream)
- Java::Lang::System.setOut(print_stream)
-end
-
-def findfailed_hook(pic)
- pause("FindFailed for: '#{pic}'")
-end
-
-# Since rjb imports Java classes without creating a corresponding
-# Ruby class (it's just an instance of Rjb_JavaProxy) we can't
-# monkey patch any class, so additional methods must be added
-# to each Screen object.
-#
-# All Java classes' methods are immediately available in the proxied
-# Ruby classes, but care has to be given to match their type. For a
-# list of methods, see: <http://doc.sikuli.org/javadoc/index.html>.
-# The type "PRSML" is a union of Pattern, Region, Screen, Match and
-# Location.
-#
-# Also, due to limitations in Ruby's syntax we can't do:
-# def Sikuli::Screen.new
-# so we work around it with the following vairable.
-sikuli_script_proxy = Sikuli::Screen
-$_original_sikuli_screen_new ||= Sikuli::Screen.method :new
-
-# For waitAny()/findAny() we are forced to throw this exception since
-# Rjb::throw doesn't block until the Java exception has been received
-# by Ruby, so strange things can happen.
-class FindAnyFailed < StandardError
-end
-
-def sikuli_script_proxy.new(*args)
- s = $_original_sikuli_screen_new.call(*args)
-
- if $config["SIKULI_RETRY_FINDFAILED"]
- # The usage of `_invoke()` below exemplifies how one can wrap
- # around Java objects' methods when they're imported using RJB. It
- # isn't pretty. The seconds argument is the parameter signature,
- # which can be obtained by creating the intended Java object using
- # RJB, and then calling its `java_methods` method.
-
- def s.wait(pic, time)
- self._invoke('wait', 'Ljava.lang.Object;D', pic, time)
- rescue FindFailed => e
- findfailed_hook(pic)
- self._invoke('wait', 'Ljava.lang.Object;D', pic, time)
- end
-
- def s.find(pic)
- self._invoke('find', 'Ljava.lang.Object;', pic)
- rescue FindFailed => e
- findfailed_hook(pic)
- self._invoke('find', 'Ljava.lang.Object;', pic)
- end
-
- def s.waitVanish(pic, time)
- self._invoke('waitVanish', 'Ljava.lang.Object;D', pic, time)
- rescue FindFailed => e
- findfailed_hook(pic)
- self._invoke('waitVanish', 'Ljava.lang.Object;D', pic, time)
- end
-
- def s.click(pic)
- self._invoke('click', 'Ljava.lang.Object;', pic)
- rescue FindFailed => e
- findfailed_hook(pic)
- self._invoke('click', 'Ljava.lang.Object;', pic)
- end
- end
-
- def s.click_point(x, y)
- self.click(Sikuli::Location.new(x, y))
- end
-
- def s.doubleClick_point(x, y)
- self.doubleClick(Sikuli::Location.new(x, y))
- end
-
- def s.click_mid_right_edge(pic)
- r = self.find(pic)
- top_right = r.getTopRight()
- x = top_right.getX
- y = top_right.getY + r.getH/2
- self.click_point(x, y)
- end
-
- def s.wait_and_click(pic, time)
- self.click(self.wait(pic, time))
- end
-
- def s.wait_and_double_click(pic, time)
- self.doubleClick(self.wait(pic, time))
- end
-
- def s.wait_and_right_click(pic, time)
- self.rightClick(self.wait(pic, time))
- end
-
- def s.wait_and_hover(pic, time)
- self.hover(self.wait(pic, time))
- end
-
- def s.existsAny(images)
- images.each do |image|
- region = self.exists(image)
- return [image, region] if region
- end
- return nil
- end
-
- def s.findAny(images)
- images.each do |image|
- begin
- return [image, self.find(image)]
- rescue FindFailed
- # Ignore. We deal we'll throw an appropriate exception after
- # having looped through all images and found none of them.
- end
- end
- # If we've reached this point, none of the images could be found.
- raise FindAnyFailed.new("can not find any of the images #{images} on the " +
- "screen")
- end
-
- def s.waitAny(images, time)
- Timeout::timeout(time) do
- loop do
- result = self.existsAny(images)
- return result if result
- end
- end
- rescue Timeout::Error
- raise FindAnyFailed.new("can not find any of the images #{images} on the " +
- "screen")
- end
-
- def s.hover_point(x, y)
- self.hover(Sikuli::Location.new(x, y))
- end
-
- def s.hide_cursor
- self.hover_point(self.w - 1, self.h/2)
- end
-
- s
-end
-
-# Configure sikuli
-if using_sikulix?
- Sikuli::ImagePath.add("#{Dir.pwd}/features/images/")
-else
- java.lang.System.setProperty("SIKULI_IMAGE_PATH",
- "#{Dir.pwd}/features/images/")
- ENV["SIKULI_IMAGE_PATH"] = "#{Dir.pwd}/features/images/"
-end
-
-# ruby and rjb doesn't play well together when it comes to static
-# fields (and possibly methods) so we instantiate and access the field
-# via objects instead. It actually works inside this file, but when
-# it's required from "outside", and the file has been completely
-# required, ruby's require method complains that the method for the
-# field accessor is missing.
-sikuli_settings = Sikuli::Settings.new
-sikuli_settings.OcrDataPath = $config["TMPDIR"]
-# sikuli_ruby, which we used before, defaulted to 0.9 minimum
-# similarity, so all our current images are adapted to that value.
-# Also, Sikuli's default of 0.7 is simply too low (many false
-# positives).
-sikuli_settings.MinSimilarity = 0.9
-sikuli_settings.ActionLogs = true
-sikuli_settings.DebugLogs = true
-sikuli_settings.InfoLogs = true
-sikuli_settings.ProfileLogs = false
-sikuli_settings.WaitScanRate = 0.25
diff --git a/cucumber/features/support/helpers/sniffing_helper.rb b/cucumber/features/support/helpers/sniffing_helper.rb
deleted file mode 100644
index 38b13820..00000000
--- a/cucumber/features/support/helpers/sniffing_helper.rb
+++ /dev/null
@@ -1,53 +0,0 @@
-#
-# Sniffer is a very dumb wrapper to start and stop tcpdumps instances, possibly
-# with customized filters. Captured traffic is stored in files whose name
-# depends on the sniffer name. The resulting captured packets for each sniffers
-# can be accessed as an array through its `packets` method.
-#
-# Use of more rubyish internal ways to sniff a network like with pcap-able gems
-# is waaay to much resource consumming, notmuch reliable and soooo slow. Let's
-# not bother too much with that. :)
-#
-# Should put all that in a Module.
-
-class Sniffer
-
- attr_reader :name, :pcap_file, :pid
-
- def initialize(name, vmnet)
- @name = name
- @vmnet = vmnet
- pcap_name = sanitize_filename("#{name}.pcap")
- @pcap_file = "#{$config["TMPDIR"]}/#{pcap_name}"
- end
-
- def capture(filter="not ether src host #{@vmnet.bridge_mac} and not ether proto \\arp and not ether proto \\rarp")
- job = IO.popen(
- [
- "/usr/sbin/tcpdump",
- "-n",
- "-U",
- "--immediate-mode",
- "-i", @vmnet.bridge_name,
- "-w", @pcap_file,
- filter,
- :err => ["/dev/null", "w"]
- ]
- )
- @pid = job.pid
- end
-
- def stop
- begin
- Process.kill("TERM", @pid)
- rescue
- # noop
- end
- end
-
- def clear
- if File.exist?(@pcap_file)
- File.delete(@pcap_file)
- end
- end
-end
diff --git a/cucumber/features/support/helpers/sshd_helper.rb b/cucumber/features/support/helpers/sshd_helper.rb
deleted file mode 100644
index 2e0069c0..00000000
--- a/cucumber/features/support/helpers/sshd_helper.rb
+++ /dev/null
@@ -1,67 +0,0 @@
-require 'tempfile'
-
-class SSHServer
- def initialize(sshd_host, sshd_port, authorized_keys = nil)
- @sshd_host = sshd_host
- @sshd_port = sshd_port
- @authorized_keys = authorized_keys
- @pid = nil
- end
-
- def start
- @sshd_key_file = Tempfile.new("ssh_host_rsa_key", $config["TMPDIR"])
- # 'hack' to prevent ssh-keygen from prompting to overwrite the file
- File.delete(@sshd_key_file.path)
- cmd_helper(['ssh-keygen', '-t', 'rsa', '-N', "", '-f', "#{@sshd_key_file.path}"])
- @sshd_key_file.close
-
- sshd_config =<<EOF
-Port #{@sshd_port}
-ListenAddress #{@sshd_host}
-UsePrivilegeSeparation no
-HostKey #{@sshd_key_file.path}
-Pidfile #{$config['TMPDIR']}/ssh.pid
-EOF
-
- @sshd_config_file = Tempfile.new("sshd_config", $config["TMPDIR"])
- @sshd_config_file.write(sshd_config)
-
- if @authorized_keys
- @authorized_keys_file = Tempfile.new("authorized_keys", $config['TMPDIR'])
- @authorized_keys_file.write(@authorized_keys)
- @authorized_keys_file.close
- @sshd_config_file.write("AuthorizedKeysFile #{@authorized_keys_file.path}")
- end
-
- @sshd_config_file.close
-
- cmd = ["/usr/sbin/sshd", "-4", "-f", @sshd_config_file.path, "-D"]
-
- job = IO.popen(cmd)
- @pid = job.pid
- end
-
- def stop
- File.delete("#{@sshd_key_file.path}.pub")
- File.delete("#{$config['TMPDIR']}/ssh.pid")
- begin
- Process.kill("TERM", @pid)
- rescue
- # noop
- end
- end
-
- def active?
- begin
- ret = Process.kill(0, @pid)
- rescue Errno::ESRCH => e
- if e.message == "No such process"
- return false
- else
- raise e
- end
- end
- assert_equal(1, ret, "This shouldn't happen")
- return true
- end
-end
diff --git a/cucumber/features/support/helpers/storage_helper.rb b/cucumber/features/support/helpers/storage_helper.rb
deleted file mode 100644
index 3bbdb69c..00000000
--- a/cucumber/features/support/helpers/storage_helper.rb
+++ /dev/null
@@ -1,210 +0,0 @@
-# Helper class for manipulating VM storage *volumes*, i.e. it deals
-# only with creation of images and keeps a name => volume path lookup
-# table (plugging drives or getting info of plugged devices is done in
-# the VM class). We'd like better coupling, but given the ridiculous
-# disconnect between Libvirt::StoragePool and Libvirt::Domain (hint:
-# they have nothing with each other to do whatsoever) it's what makes
-# sense.
-
-require 'libvirt'
-require 'guestfs'
-require 'rexml/document'
-require 'etc'
-
-class VMStorage
-
- def initialize(virt, xml_path)
- @virt = virt
- @xml_path = xml_path
- pool_xml = REXML::Document.new(File.read("#{@xml_path}/storage_pool.xml"))
- pool_name = LIBVIRT_DOMAIN_NAME
- pool_xml.elements['pool/name'].text = pool_name
- @pool_path = "/srv/lvc/vm-pools/#{pool_name}" # FIXME -- hardwiring the .../vm-pools path semms like a poor effort
- begin
- @pool = @virt.lookup_storage_pool_by_name(pool_name)
- rescue Libvirt::RetrieveError
- @pool = nil
- end
- if @pool and (not(KEEP_SNAPSHOTS) or
- (KEEP_SNAPSHOTS and not(Dir.exists?(@pool_path))))
- VMStorage.clear_storage_pool(@pool)
- @pool = nil
- end
- if not(Dir.exists?(@pool_path))
- # We'd like to use @pool.build, which will just create the
- # @pool_path directory, but it does so with root:root as owner
- # (at least with libvirt 1.2.21-2). libvirt itself can handle
- # that situation, but guestfs (at least with <=
- # 1:1.28.12-1+b3) cannot when invoked by a non-root user,
- # which we want to support.
- FileUtils.mkdir(@pool_path)
- FileUtils.chown(nil, 'libvirt-qemu', @pool_path)
- FileUtils.chmod("ug+wrx", @pool_path)
- end
- unless @pool
- pool_xml.elements['pool/target/path'].text = @pool_path
- @pool = @virt.define_storage_pool_xml(pool_xml.to_s)
- end
- @pool.create unless @pool.active?
- @pool.refresh
- end
-
- def VMStorage.clear_storage_pool_volumes(pool)
- was_not_active = !pool.active?
- if was_not_active
- pool.create
- end
- pool.list_volumes.each do |vol_name|
- vol = pool.lookup_volume_by_name(vol_name)
- vol.delete
- end
- if was_not_active
- pool.destroy
- end
- rescue
- # Some of the above operations can fail if the pool's path was
- # deleted by external means; let's ignore that.
- end
-
- def VMStorage.clear_storage_pool(pool)
- VMStorage.clear_storage_pool_volumes(pool)
- pool.destroy if pool.active?
- pool.undefine
- end
-
- def clear_pool
- VMStorage.clear_storage_pool(@pool)
- end
-
- def clear_volumes
- VMStorage.clear_storage_pool_volumes(@pool)
- end
-
- def list_volumes
- @pool.list_volumes
- end
-
- def delete_volume(name)
- @pool.lookup_volume_by_name(name).delete
- end
-
- def create_new_disk(name, options = {})
- options[:size] ||= 2
- options[:unit] ||= "GiB"
- options[:type] ||= "qcow2"
- # Require 'slightly' more space to be available to give a bit more leeway
- # with rounding, temp file creation, etc.
- reserved = 500
- needed = convert_to_MiB(options[:size].to_i, options[:unit])
- avail = convert_to_MiB(get_free_space('host', @pool_path), "KiB")
- assert(avail - reserved >= needed,
- "Error creating disk \"#{name}\" in \"#{@pool_path}\". " \
- "Need #{needed} MiB but only #{avail} MiB is available of " \
- "which #{reserved} MiB is reserved for other temporary files.")
- begin
- old_vol = @pool.lookup_volume_by_name(name)
- rescue Libvirt::RetrieveError
- # noop
- else
- old_vol.delete
- end
- uid = Etc::getpwnam("libvirt-qemu").uid
- gid = Etc::getgrnam("libvirt-qemu").gid
- vol_xml = REXML::Document.new(File.read("#{@xml_path}/volume.xml"))
- vol_xml.elements['volume/name'].text = name
- size_b = convert_to_bytes(options[:size].to_f, options[:unit])
- vol_xml.elements['volume/capacity'].text = size_b.to_s
- vol_xml.elements['volume/target/format'].attributes["type"] = options[:type]
- vol_xml.elements['volume/target/path'].text = "#{@pool_path}/#{name}"
- vol_xml.elements['volume/target/permissions/owner'].text = uid.to_s
- vol_xml.elements['volume/target/permissions/group'].text = gid.to_s
- vol = @pool.create_volume_xml(vol_xml.to_s)
- @pool.refresh
- end
-
- def clone_to_new_disk(from, to)
- begin
- old_to_vol = @pool.lookup_volume_by_name(to)
- rescue Libvirt::RetrieveError
- # noop
- else
- old_to_vol.delete
- end
- from_vol = @pool.lookup_volume_by_name(from)
- xml = REXML::Document.new(from_vol.xml_desc)
- pool_path = REXML::Document.new(@pool.xml_desc).elements['pool/target/path'].text
- xml.elements['volume/name'].text = to
- xml.elements['volume/target/path'].text = "#{pool_path}/#{to}"
- @pool.create_volume_xml_from(xml.to_s, from_vol)
- end
-
- def disk_format(name)
- vol = @pool.lookup_volume_by_name(name)
- vol_xml = REXML::Document.new(vol.xml_desc)
- return vol_xml.elements['volume/target/format'].attributes["type"]
- end
-
- def disk_path(name)
- @pool.lookup_volume_by_name(name).path
- end
-
- def disk_mklabel(name, parttype)
- guestfs_disk_helper(name) do |g, disk_handle|
- g.part_init(disk_handle, parttype)
- end
- end
-
- def disk_mkpartfs(name, parttype, fstype, opts = {})
- opts[:label] ||= nil
- opts[:luks_password] ||= nil
- guestfs_disk_helper(name) do |g, disk_handle|
- g.part_disk(disk_handle, parttype)
- g.part_set_name(disk_handle, 1, opts[:label]) if opts[:label]
- primary_partition = g.list_partitions()[0]
- if opts[:luks_password]
- g.luks_format(primary_partition, opts[:luks_password], 0)
- luks_mapping = File.basename(primary_partition) + "_unlocked"
- g.luks_open(primary_partition, opts[:luks_password], luks_mapping)
- luks_dev = "/dev/mapper/#{luks_mapping}"
- g.mkfs(fstype, luks_dev)
- g.luks_close(luks_dev)
- else
- g.mkfs(fstype, primary_partition)
- end
- end
- end
-
- def disk_mkswap(name, parttype)
- guestfs_disk_helper(name) do |g, disk_handle|
- g.part_disk(disk_handle, parttype)
- primary_partition = g.list_partitions()[0]
- g.mkswap(primary_partition)
- end
- end
-
- def guestfs_disk_helper(*disks)
- assert(block_given?)
- g = Guestfs::Guestfs.new()
- g.set_trace(1)
- message_callback = Proc.new do |event, _, message, _|
- debug_log("libguestfs: #{Guestfs.event_to_string(event)}: #{message}")
- end
- g.set_event_callback(message_callback,
- Guestfs::EVENT_TRACE)
- g.set_autosync(1)
- disks.each do |disk|
- if disk.class == String
- g.add_drive_opts(disk_path(disk), format: disk_format(disk))
- elsif disk.class == Hash
- g.add_drive_opts(disk[:path], disk[:opts])
- else
- raise "cannot handle type '#{disk.class}'"
- end
- end
- g.launch()
- yield(g, *g.list_devices())
- ensure
- g.close
- end
-
-end
diff --git a/cucumber/features/support/helpers/vm_helper.rb b/cucumber/features/support/helpers/vm_helper.rb
deleted file mode 100644
index 57227721..00000000
--- a/cucumber/features/support/helpers/vm_helper.rb
+++ /dev/null
@@ -1,703 +0,0 @@
-require 'ipaddr'
-require 'libvirt'
-require 'rexml/document'
-
-class ExecutionFailedInVM < StandardError
-end
-
-class VMNet
-
- attr_reader :net_name, :net
-
- def initialize(virt, xml_path)
- @virt = virt
- @net_name = LIBVIRT_NETWORK_NAME
- net_xml = File.read("#{xml_path}/default_net.xml")
- rexml = REXML::Document.new(net_xml)
- rexml.elements['network'].add_element('name')
- rexml.elements['network/name'].text = @net_name
- begin
- old_net = @virt.lookup_network_by_name(@net_name)
- rexml.elements['network'].add_element('uuid')
- rexml.elements['network/uuid'].text = old_net.uuid
- old_net.undefine
- rescue
- end
- update(rexml.to_s)
- update(rexml.to_s)
- rescue Exception => e
- destroy_and_undefine
- raise e
- end
-
- # We lookup by name so we also catch networks from previous test
- # suite runs that weren't properly cleaned up (e.g. aborted).
- def destroy_and_undefine
- begin
- old_net = @virt.lookup_network_by_name(@net_name)
- old_net.destroy if old_net.active?
- old_net.undefine
- rescue
- end
- end
-
- def update(xml)
- destroy_and_undefine
- @net = @virt.define_network_xml(xml)
- @net.create
- end
-
- def bridge_name
- @net.bridge_name
- end
-
- def bridge_ip_addr
- net_xml = REXML::Document.new(@net.xml_desc)
- IPAddr.new(net_xml.elements['network/ip'].attributes['address']).to_s
- end
-
- def bridge_mac
- File.open("/sys/class/net/#{bridge_name}/address", "rb").read.chomp
- end
-end
-
-
-class VM
-
- attr_reader :domain, :domain_name, :display, :vmnet, :storage
-
- def initialize(virt, xml_path, vmnet, storage, x_display)
- @virt = virt
- @xml_path = xml_path
- @vmnet = vmnet
- @storage = storage
- @domain_name = LIBVIRT_DOMAIN_NAME
- default_domain_xml = File.read("#{@xml_path}/default.xml")
- rexml = REXML::Document.new(default_domain_xml)
- rexml.elements['domain'].add_element('name')
- rexml.elements['domain/name'].text = @domain_name
- begin
- old_domain = @virt.lookup_domain_by_name(@domain_name)
- rexml.elements['domain'].add_element('uuid')
- rexml.elements['domain/uuid'].text = old_domain.uuid
- old_domain.undefine
- rescue
- end
- rexml.elements['domain/devices/serial/source'].attributes['service'] = LIBVIRT_REMOTE_SHELL_PORT
- update(rexml.to_s)
- @display = Display.new(@domain_name, x_display)
- set_cdrom_boot(TAILS_ISO)
- plug_network
- rescue Exception => e
- destroy_and_undefine
- raise e
- end
-
- def update(xml)
- destroy_and_undefine
- @domain = @virt.define_domain_xml(xml)
- end
-
- # We lookup by name so we also catch domains from previous test
- # suite runs that weren't properly cleaned up (e.g. aborted).
- def destroy_and_undefine
- @display.stop if @display && @display.active?
- begin
- old_domain = @virt.lookup_domain_by_name(@domain_name)
- old_domain.destroy if old_domain.active?
- old_domain.undefine
- rescue
- end
- end
-
- def real_mac(alias_name)
- REXML::Document.new(@domain.xml_desc)
- .elements["domain/devices/interface[@type='network']/" +
- "alias[@name='#{alias_name}']"]
- .parent.elements['mac'].attributes['address'].to_s
- end
-
- def all_real_macs
- macs = []
- REXML::Document.new(@domain.xml_desc)
- .elements.each("domain/devices/interface[@type='network']") do |nic|
- macs << nic.elements['mac'].attributes['address'].to_s
- end
- macs
- end
-
- def set_hardware_clock(time)
- assert(not(is_running?), 'The hardware clock cannot be set when the ' +
- 'VM is running')
- assert(time.instance_of?(Time), "Argument must be of type 'Time'")
- adjustment = (time - Time.now).to_i
- domain_rexml = REXML::Document.new(@domain.xml_desc)
- clock_rexml_element = domain_rexml.elements['domain'].add_element('clock')
- clock_rexml_element.add_attributes('offset' => 'variable',
- 'basis' => 'utc',
- 'adjustment' => adjustment.to_s)
- update(domain_rexml.to_s)
- end
-
- def network_link_state
- REXML::Document.new(@domain.xml_desc)
- .elements['domain/devices/interface/link'].attributes['state']
- end
-
- def set_network_link_state(state)
- domain_xml = REXML::Document.new(@domain.xml_desc)
- domain_xml.elements['domain/devices/interface/link'].attributes['state'] = state
- if is_running?
- @domain.update_device(domain_xml.elements['domain/devices/interface'].to_s)
- else
- update(domain_xml.to_s)
- end
- end
-
- def plug_network
- set_network_link_state('up')
- end
-
- def unplug_network
- set_network_link_state('down')
- end
-
- def set_boot_device(dev)
- if is_running?
- raise "boot settings can only be set for inactive vms"
- end
- domain_xml = REXML::Document.new(@domain.xml_desc)
- domain_xml.elements['domain/os/boot'].attributes['dev'] = dev
- update(domain_xml.to_s)
- end
-
- def add_cdrom_device
- if is_running?
- raise "Can't attach a CDROM device to a running domain"
- end
- domain_rexml = REXML::Document.new(@domain.xml_desc)
- if domain_rexml.elements["domain/devices/disk[@device='cdrom']"]
- raise "A CDROM device already exists"
- end
- cdrom_rexml = REXML::Document.new(File.read("#{@xml_path}/cdrom.xml")).root
- domain_rexml.elements['domain/devices'].add_element(cdrom_rexml)
- update(domain_rexml.to_s)
- end
-
- def remove_cdrom_device
- if is_running?
- raise "Can't detach a CDROM device to a running domain"
- end
- domain_rexml = REXML::Document.new(@domain.xml_desc)
- cdrom_el = domain_rexml.elements["domain/devices/disk[@device='cdrom']"]
- if cdrom_el.nil?
- raise "No CDROM device is present"
- end
- domain_rexml.elements["domain/devices"].delete_element(cdrom_el)
- update(domain_rexml.to_s)
- end
-
- def eject_cdrom
- execute_successfully('/usr/bin/eject -m')
- end
-
- def remove_cdrom_image
- domain_rexml = REXML::Document.new(@domain.xml_desc)
- cdrom_el = domain_rexml.elements["domain/devices/disk[@device='cdrom']"]
- if cdrom_el.nil?
- raise "No CDROM device is present"
- end
- cdrom_el.delete_element('source')
- update(domain_rexml.to_s)
- rescue Libvirt::Error => e
- # While the CD-ROM is removed successfully we still get this
- # error, so let's ignore it.
- acceptable_error =
- "Call to virDomainUpdateDeviceFlags failed: internal error: unable to " +
- "execute QEMU command 'eject': (Tray of device '.*' is not open|" +
- "Device '.*' is locked)"
- raise e if not(Regexp.new(acceptable_error).match(e.to_s))
- end
-
- def set_cdrom_image(image)
- if image.nil? or image == ''
- raise "Can't set cdrom image to an empty string"
- end
- remove_cdrom_image
- domain_rexml = REXML::Document.new(@domain.xml_desc)
- cdrom_el = domain_rexml.elements["domain/devices/disk[@device='cdrom']"]
- cdrom_el.add_element('source', { 'file' => image })
- update(domain_rexml.to_s)
- end
-
- def set_cdrom_boot(image)
- if is_running?
- raise "boot settings can only be set for inactive vms"
- end
- domain_rexml = REXML::Document.new(@domain.xml_desc)
- if not domain_rexml.elements["domain/devices/disk[@device='cdrom']"]
- add_cdrom_device
- end
- set_cdrom_image(image)
- set_boot_device('cdrom')
- end
-
- def list_disk_devs
- ret = []
- domain_xml = REXML::Document.new(@domain.xml_desc)
- domain_xml.elements.each('domain/devices/disk') do |e|
- ret << e.elements['target'].attribute('dev').to_s
- end
- return ret
- end
-
- def plug_device(xml)
- if is_running?
- @domain.attach_device(xml.to_s)
- else
- domain_xml = REXML::Document.new(@domain.xml_desc)
- domain_xml.elements['domain/devices'].add_element(xml)
- update(domain_xml.to_s)
- end
- end
-
- def plug_drive(name, type)
- if disk_plugged?(name)
- raise "disk '#{name}' already plugged"
- end
- removable_usb = nil
- case type
- when "removable usb", "usb"
- type = "usb"
- removable_usb = "on"
- when "non-removable usb"
- type = "usb"
- removable_usb = "off"
- end
- # Get the next free /dev/sdX on guest
- letter = 'a'
- dev = "sd" + letter
- while list_disk_devs.include?(dev)
- letter = (letter[0].ord + 1).chr
- dev = "sd" + letter
- end
- assert letter <= 'z'
-
- xml = REXML::Document.new(File.read("#{@xml_path}/disk.xml"))
- xml.elements['disk/source'].attributes['file'] = @storage.disk_path(name)
- xml.elements['disk/driver'].attributes['type'] = @storage.disk_format(name)
- xml.elements['disk/target'].attributes['dev'] = dev
- xml.elements['disk/target'].attributes['bus'] = type
- xml.elements['disk/target'].attributes['removable'] = removable_usb if removable_usb
-
- plug_device(xml)
- end
-
- def disk_xml_desc(name)
- domain_xml = REXML::Document.new(@domain.xml_desc)
- domain_xml.elements.each('domain/devices/disk') do |e|
- begin
- if e.elements['source'].attribute('file').to_s == @storage.disk_path(name)
- return e.to_s
- end
- rescue
- next
- end
- end
- return nil
- end
-
- def disk_rexml_desc(name)
- xml = disk_xml_desc(name)
- if xml
- return REXML::Document.new(xml)
- else
- return nil
- end
- end
-
- def unplug_drive(name)
- xml = disk_xml_desc(name)
- @domain.detach_device(xml)
- end
-
- def disk_type(dev)
- domain_xml = REXML::Document.new(@domain.xml_desc)
- domain_xml.elements.each('domain/devices/disk') do |e|
- if e.elements['target'].attribute('dev').to_s == dev
- return e.elements['driver'].attribute('type').to_s
- end
- end
- raise "No such disk device '#{dev}'"
- end
-
- def disk_dev(name)
- rexml = disk_rexml_desc(name) or return nil
- return "/dev/" + rexml.elements['disk/target'].attribute('dev').to_s
- end
-
- def disk_name(dev)
- dev = File.basename(dev)
- domain_xml = REXML::Document.new(@domain.xml_desc)
- domain_xml.elements.each('domain/devices/disk') do |e|
- if /^#{e.elements['target'].attribute('dev').to_s}/.match(dev)
- return File.basename(e.elements['source'].attribute('file').to_s)
- end
- end
- raise "No such disk device '#{dev}'"
- end
-
- def udisks_disk_dev(name)
- return disk_dev(name).gsub('/dev/', '/org/freedesktop/UDisks/devices/')
- end
-
- def disk_detected?(name)
- dev = disk_dev(name) or return false
- return execute("test -b #{dev}").success?
- end
-
- def disk_plugged?(name)
- return not(disk_xml_desc(name).nil?)
- end
-
- def set_disk_boot(name, type)
- if is_running?
- raise "boot settings can only be set for inactive vms"
- end
- plug_drive(name, type) if not(disk_plugged?(name))
- set_boot_device('hd')
- # XXX:Stretch: since our isotesters upgraded QEMU from
- # 2.5+dfsg-4~bpo8+1 to 2.6+dfsg-3.1~bpo8+1 it seems we must remove
- # the CDROM device to allow disk boot. This is not the case with the same
- # version on Debian Sid. Let's hope we can remove this ugly
- # workaround when we only support running the automated test suite
- # on Stretch.
- domain_rexml = REXML::Document.new(@domain.xml_desc)
- if domain_rexml.elements["domain/devices/disk[@device='cdrom']"]
- remove_cdrom_device
- end
- end
-
- # XXX-9p: Shares don't work together with snapshot save+restore. See
- # XXX-9p in common_steps.rb for more information.
- def add_share(source, tag)
- if is_running?
- raise "shares can only be added to inactive vms"
- end
- # The complete source directory must be group readable by the user
- # running the virtual machine, and world readable so the user inside
- # the VM can access it (since we use the passthrough security model).
- FileUtils.chown_R(nil, "libvirt-qemu", source)
- FileUtils.chmod_R("go+rX", source)
- xml = REXML::Document.new(File.read("#{@xml_path}/fs_share.xml"))
- xml.elements['filesystem/source'].attributes['dir'] = source
- xml.elements['filesystem/target'].attributes['dir'] = tag
- domain_xml = REXML::Document.new(@domain.xml_desc)
- domain_xml.elements['domain/devices'].add_element(xml)
- update(domain_xml.to_s)
- end
-
- def list_shares
- list = []
- domain_xml = REXML::Document.new(@domain.xml_desc)
- domain_xml.elements.each('domain/devices/filesystem') do |e|
- list << e.elements['target'].attribute('dir').to_s
- end
- return list
- end
-
- def set_os_loader(type)
- if is_running?
- raise "boot settings can only be set for inactive vms"
- end
- if type == 'UEFI'
- domain_xml = REXML::Document.new(@domain.xml_desc)
- domain_xml.elements['domain/os'].add_element(REXML::Document.new(
- '<loader>/usr/share/ovmf/OVMF.fd</loader>'
- ))
- update(domain_xml.to_s)
- else
- raise "unsupported OS loader type"
- end
- end
-
- def is_running?
- begin
- return @domain.active?
- rescue
- return false
- end
- end
-
- def execute(cmd, options = {})
- options[:user] ||= "root"
- options[:spawn] = false unless options.has_key?(:spawn)
- if options[:libs]
- libs = options[:libs]
- options.delete(:libs)
- libs = [libs] if not(libs.methods.include? :map)
- cmds = libs.map do |lib_name|
- ". /usr/local/lib/tails-shell-library/#{lib_name}.sh"
- end
- cmds << cmd
- cmd = cmds.join(" && ")
- end
- return RemoteShell::ShellCommand.new(self, cmd, options)
- end
-
- def execute_successfully(*args)
- p = execute(*args)
- begin
- assert_vmcommand_success(p)
- rescue Test::Unit::AssertionFailedError => e
- raise ExecutionFailedInVM.new(e)
- end
- return p
- end
-
- def spawn(cmd, options = {})
- options[:spawn] = true
- return execute(cmd, options)
- end
-
- def wait_until_remote_shell_is_up(timeout = 90)
- msg = 'hello?'
- try_for(timeout, :msg => "Remote shell seems to be down") do
- Timeout::timeout(3) do
- execute_successfully("echo '#{msg}'").stdout.chomp == msg
- end
- end
- end
-
- def host_to_guest_time_sync
- host_time= DateTime.now.strftime("%s").to_s
- execute("date -s '@#{host_time}'").success?
- end
-
- def has_network?
- nmcli_info = execute('nmcli device show eth0').stdout
- has_ipv4_addr = /^IP4.ADDRESS(\[\d+\])?:\s*([0-9.\/]+)$/.match(nmcli_info)
- network_link_state == 'up' && has_ipv4_addr
- end
-
- def has_process?(process)
- return execute("pidof -x -o '%PPID' " + process).success?
- end
-
- def pidof(process)
- return execute("pidof -x -o '%PPID' " + process).stdout.chomp.split
- end
-
- def select_virtual_desktop(desktop_number, user = LIVE_USER)
- assert(desktop_number >= 0 && desktop_number <=3,
- "Only values between 0 and 1 are valid virtual desktop numbers")
- execute_successfully(
- "xdotool set_desktop '#{desktop_number}'",
- :user => user
- )
- end
-
- def focus_window(window_title, user = LIVE_USER)
- def do_focus(window_title, user)
- execute_successfully(
- "xdotool search --name '#{window_title}' windowactivate --sync",
- :user => user
- )
- end
-
- begin
- do_focus(window_title, user)
- rescue ExecutionFailedInVM
- # Often when xdotool fails to focus a window it'll work when retried
- # after redrawing the screen. Switching to a new virtual desktop then
- # back seems to be a reliable way to handle this.
- # Sadly we have to rely on a lot of sleep() here since there's
- # little on the screen etc that we truly can rely on.
- sleep 5
- select_virtual_desktop(1)
- sleep 5
- select_virtual_desktop(0)
- sleep 5
- do_focus(window_title, user)
- end
- rescue
- # noop
- end
-
- def file_exist?(file)
- execute("test -e '#{file}'").success?
- end
-
- def directory_exist?(directory)
- execute("test -d '#{directory}'").success?
- end
-
- def file_open(path)
- f = RemoteShell::File.new(self, path)
- yield f if block_given?
- return f
- end
-
- def file_content(path)
- file_open(path) { |f| return f.read() }
- end
-
- def file_overwrite(path, lines)
- lines = lines.join("\n") if lines.class == Array
- file_open(path) { |f| return f.write(lines) }
- end
-
- def file_append(path, lines)
- lines = lines.join("\n") if lines.class == Array
- file_open(path) { |f| return f.append(lines) }
- end
-
- def set_clipboard(text)
- execute_successfully("echo -n '#{text}' | xsel --input --clipboard",
- :user => LIVE_USER)
- end
-
- def get_clipboard
- execute_successfully("xsel --output --clipboard", :user => LIVE_USER).stdout
- end
-
- def internal_snapshot_xml(name)
- disk_devs = list_disk_devs
- disks_xml = " <disks>\n"
- for dev in disk_devs
- snapshot_type = disk_type(dev) == "qcow2" ? 'internal' : 'no'
- disks_xml +=
- " <disk name='#{dev}' snapshot='#{snapshot_type}'></disk>\n"
- end
- disks_xml += " </disks>"
- return <<-EOF
-<domainsnapshot>
- <name>#{name}</name>
- <description>Snapshot for #{name}</description>
-#{disks_xml}
- </domainsnapshot>
-EOF
- end
-
- def VM.ram_only_snapshot_path(name)
- return "#{$config["TMPDIR"]}/#{name}-snapshot.memstate"
- end
-
- def save_snapshot(name)
- # If we have no qcow2 disk device, we'll use "memory state"
- # snapshots, and if we have at least one qcow2 disk device, we'll
- # use internal "system checkpoint" (memory + disks) snapshots. We
- # have to do this since internal snapshots don't work when no
- # such disk is available. We can do this with external snapshots,
- # which are better in many ways, but libvirt doesn't know how to
- # restore (revert back to) them yet.
- # WARNING: If only transient disks, i.e. disks that were plugged
- # after starting the domain, are used then the memory state will
- # be dropped. External snapshots would also fix this.
- internal_snapshot = false
- domain_xml = REXML::Document.new(@domain.xml_desc)
- domain_xml.elements.each('domain/devices/disk') do |e|
- if e.elements['driver'].attribute('type').to_s == "qcow2"
- internal_snapshot = true
- break
- end
- end
-
- # Note: In this case the "opposite" of `internal_snapshot` is not
- # anything relating to external snapshots, but actually "memory
- # state"(-only) snapshots.
- if internal_snapshot
- xml = internal_snapshot_xml(name)
- @domain.snapshot_create_xml(xml)
- else
- snapshot_path = VM.ram_only_snapshot_path(name)
- @domain.save(snapshot_path)
- # For consistency with the internal snapshot case (which is
- # "live", so the domain doesn't go down) we immediately restore
- # the snapshot.
- # Assumption: that *immediate* save + restore doesn't mess up
- # with network state and similar, and is fast enough to not make
- # the clock drift too much.
- restore_snapshot(name)
- end
- end
-
- def restore_snapshot(name)
- @domain.destroy if is_running?
- @display.stop if @display and @display.active?
- # See comment in save_snapshot() for details on why we use two
- # different type of snapshots.
- potential_ram_only_snapshot_path = VM.ram_only_snapshot_path(name)
- if File.exist?(potential_ram_only_snapshot_path)
- Libvirt::Domain::restore(@virt, potential_ram_only_snapshot_path)
- @domain = @virt.lookup_domain_by_name(@domain_name)
- else
- begin
- potential_internal_snapshot = @domain.lookup_snapshot_by_name(name)
- @domain.revert_to_snapshot(potential_internal_snapshot)
- rescue Libvirt::RetrieveError
- raise "No such (internal nor external) snapshot #{name}"
- end
- end
- @display.start
- end
-
- def VM.remove_snapshot(name)
- old_domain = $virt.lookup_domain_by_name(LIBVIRT_DOMAIN_NAME)
- potential_ram_only_snapshot_path = VM.ram_only_snapshot_path(name)
- if File.exist?(potential_ram_only_snapshot_path)
- File.delete(potential_ram_only_snapshot_path)
- else
- snapshot = old_domain.lookup_snapshot_by_name(name)
- snapshot.delete
- end
- end
-
- def VM.snapshot_exists?(name)
- return true if File.exist?(VM.ram_only_snapshot_path(name))
- old_domain = $virt.lookup_domain_by_name(LIBVIRT_DOMAIN_NAME)
- snapshot = old_domain.lookup_snapshot_by_name(name)
- return snapshot != nil
- rescue Libvirt::RetrieveError
- return false
- end
-
- def VM.remove_all_snapshots
- Dir.glob("#{$config["TMPDIR"]}/*-snapshot.memstate").each do |file|
- File.delete(file)
- end
- old_domain = $virt.lookup_domain_by_name(LIBVIRT_DOMAIN_NAME)
- old_domain.list_all_snapshots.each { |snapshot| snapshot.delete }
- rescue Libvirt::RetrieveError
- # No such domain, so no snapshots either.
- end
-
- def start
- return if is_running?
- @domain.create
- @display.start
- end
-
- def reset
- @domain.reset if is_running?
- end
-
- def power_off
- @domain.destroy if is_running?
- @display.stop
- end
-
- def take_screenshot(description)
- @display.take_screenshot(description)
- end
-
- def get_remote_shell_port
- domain_xml = REXML::Document.new(@domain.xml_desc)
- domain_xml.elements.each('domain/devices/serial') do |e|
- if e.attribute('type').to_s == "tcp"
- return e.elements['source'].attribute('service').to_s.to_i
- end
- end
- end
-
-end
diff --git a/cucumber/features/support/hooks.rb b/cucumber/features/support/hooks.rb
deleted file mode 100644
index 9c3abb56..00000000
--- a/cucumber/features/support/hooks.rb
+++ /dev/null
@@ -1,313 +0,0 @@
-require 'fileutils'
-require 'rb-inotify'
-require 'time'
-require 'tmpdir'
-
-# Run once, before any feature
-AfterConfiguration do |config|
- # Reorder the execution of some features. As we progress through a
- # run we accumulate more and more snapshots and hence use more and
- # more disk space, but some features will leave nothing behind
- # and/or possibly use large amounts of disk space temporarily for
- # various reasons. By running these first we minimize the amount of
- # disk space needed.
- prioritized_features = [
- # Features not using snapshots but using large amounts of scratch
- # space for other reasons:
- 'features/untrusted_partitions.feature',
- # Features using temporary snapshots:
- 'features/apt.feature',
- 'features/root_access_control.feature',
- 'features/time_syncing.feature',
- 'features/tor_bridges.feature',
- # Features using large amounts of scratch space for other reasons:
- 'features/erase_memory.feature',
- # This feature needs the almost biggest snapshot (USB install,
- # excluding persistence) and will create yet another disk and
- # install Tails on it. This should be the peak of disk usage.
- 'features/usb_install.feature',
- # This feature needs a copy of the ISO and creates a new disk.
- 'features/usb_upgrade.feature',
- # This feature needs a very big snapshot (USB install with persistence)
- # and another, network-enabled snapshot.
- 'features/emergency_shutdown.feature',
- ]
- feature_files = config.feature_files
- # The &-intersection is specified to keep the element ordering of
- # the *left* operand.
- intersection = prioritized_features & feature_files
- if not intersection.empty?
- feature_files -= intersection
- feature_files = intersection + feature_files
- config.define_singleton_method(:feature_files) { feature_files }
- end
-
- # Used to keep track of when we start our first @product feature, when
- # we'll do some special things.
- $started_first_product_feature = false
-
- if File.exist?($config["TMPDIR"])
- if !File.directory?($config["TMPDIR"])
- raise "Temporary directory '#{$config["TMPDIR"]}' exists but is not a " +
- "directory"
- end
- if !File.owned?($config["TMPDIR"])
- raise "Temporary directory '#{$config["TMPDIR"]}' must be owned by the " +
- "current user"
- end
- FileUtils.chmod(0755, $config["TMPDIR"])
- else
- begin
- FileUtils.mkdir_p($config["TMPDIR"])
- rescue Errno::EACCES => e
- raise "Cannot create temporary directory: #{e.to_s}"
- end
- end
-
- # Start a thread that monitors a pseudo fifo file and debug_log():s
- # anything written to it "immediately" (well, as fast as inotify
- # detects it). We're forced to a convoluted solution like this
- # because CRuby's thread support is horribly as soon as IO is mixed
- # in (other threads get blocked).
- FileUtils.rm(DEBUG_LOG_PSEUDO_FIFO) if File.exist?(DEBUG_LOG_PSEUDO_FIFO)
- FileUtils.touch(DEBUG_LOG_PSEUDO_FIFO)
- at_exit do
- FileUtils.rm(DEBUG_LOG_PSEUDO_FIFO) if File.exist?(DEBUG_LOG_PSEUDO_FIFO)
- end
- Thread.new do
- File.open(DEBUG_LOG_PSEUDO_FIFO) do |fd|
- watcher = INotify::Notifier.new
- watcher.watch(DEBUG_LOG_PSEUDO_FIFO, :modify) do
- line = fd.read.chomp
- debug_log(line) if line and line.length > 0
- end
- watcher.run
- end
- end
- # Fix Sikuli's debug_log():ing.
- bind_java_to_pseudo_fifo_logger
-end
-
-# Common
-########
-
-After do
- if @after_scenario_hooks
- @after_scenario_hooks.each { |block| block.call }
- end
- @after_scenario_hooks = Array.new
-end
-
-BeforeFeature('@product', '@source') do |feature|
- raise "Feature #{feature.file} is tagged both @product and @source, " +
- "which is an impossible combination"
-end
-
-at_exit do
- $vm.destroy_and_undefine if $vm
- if $virt
- unless KEEP_SNAPSHOTS
- VM.remove_all_snapshots
- $vmstorage.clear_pool
- end
- #$vmnet.destroy_and_undefine
- $virt.close
- end
- # The artifacts directory is empty (and useless) if it contains
- # nothing but the mandatory . and ..
- if Dir.entries(ARTIFACTS_DIR).size <= 2
- FileUtils.rmdir(ARTIFACTS_DIR)
- end
-end
-
-# For @product tests
-####################
-
-def add_after_scenario_hook(&block)
- @after_scenario_hooks ||= Array.new
- @after_scenario_hooks << block
-end
-
-def save_failure_artifact(type, path)
- $failure_artifacts << [type, path]
-end
-
-# Due to Tails' Tor enforcement, we only allow contacting hosts that
-# are Tor nodes or located on the LAN. However, when we try
-# to verify that only such hosts are contacted we have a problem --
-# we run all Tor nodes (via Chutney) *and* LAN hosts (used on some
-# tests) on the same host, the one running the test suite. Hence we
-# need to always explicitly track which nodes are LAN or not.
-#
-# Warning: when a host is added via this function, it is only added
-# for the current scenario. As such, if this is done before saving a
-# snapshot, it will not remain after the snapshot is loaded.
-def add_lan_host(ipaddr, port)
- @lan_hosts ||= []
- @lan_hosts << { address: ipaddr, port: port }
-end
-
-BeforeFeature('@product') do |feature|
- if TAILS_ISO.nil?
- raise "No ISO image specified, and none could be found in the " +
- "current directory"
- end
- if File.exist?(TAILS_ISO)
- # Workaround: when libvirt takes ownership of the ISO image it may
- # become unreadable for the live user inside the guest in the
- # host-to-guest share used for some tests.
-
- if !File.world_readable?(TAILS_ISO)
- if File.owned?(TAILS_ISO)
- File.chmod(0644, TAILS_ISO)
- else
- raise "warning: the Tails ISO image must be world readable or be " +
- "owned by the current user to be available inside the guest " +
- "VM via host-to-guest shares, which is required by some tests"
- end
- end
- else
- raise "The specified Tails ISO image '#{TAILS_ISO}' does not exist"
- end
- if !File.exist?(OLD_TAILS_ISO)
- raise "The specified old Tails ISO image '#{OLD_TAILS_ISO}' does not exist"
- end
- if not($started_first_product_feature)
- $virt = Libvirt::open("qemu:///system")
- VM.remove_all_snapshots if !KEEP_SNAPSHOTS
- $vmnet = VMNet.new($virt, VM_XML_PATH)
- $vmstorage = VMStorage.new($virt, VM_XML_PATH)
- $started_first_product_feature = true
- end
- #ensure_chutney_is_running
-end
-
-AfterFeature('@product') do
- unless KEEP_SNAPSHOTS
- checkpoints.each do |name, vals|
- if vals[:temporary] and VM.snapshot_exists?(name)
- VM.remove_snapshot(name)
- end
- end
- end
- $vmstorage.list_volumes.each do |vol_name|
- next if vol_name == '__internal'
- $vmstorage.delete_volume(vol_name)
- end
-end
-
-# Cucumber Before hooks are executed in the order they are listed, and
-# we want this hook to always run first, so it must always be the
-# *first* Before hook matching @product listed in this file.
-Before('@product') do |scenario|
- $failure_artifacts = Array.new
- if $config["CAPTURE"]
- video_name = sanitize_filename("#{scenario.name}.mpg")
- @video_path = "#{ARTIFACTS_DIR}/#{video_name}"
- capture = IO.popen(['avconv',
- '-f', 'x11grab',
- '-s', '1024x768',
- '-r', '5',
- '-i', "#{$config['DISPLAY']}.0",
- '-an',
- '-c:v', 'libx264',
- '-y',
- @video_path,
- :err => ['/dev/null', 'w'],
- ])
- @video_capture_pid = capture.pid
- end
- @screen = Sikuli::Screen.new
- # English will be assumed if this is not overridden
- @language = ""
- @os_loader = "MBR"
- @sudo_password = "asdf"
- @persistence_password = "asdf"
- # See comment for add_lan_host() above.
- @lan_hosts ||= []
-end
-
-# Cucumber After hooks are executed in the *reverse* order they are
-# listed, and we want this hook to always run second last, so it must always
-# be the *second* After hook matching @product listed in this file --
-# hooks added dynamically via add_after_scenario_hook() are supposed to
-# truly be last.
-After('@product') do |scenario|
- if @video_capture_pid
- # We can be incredibly fast at detecting errors sometimes, so the
- # screen barely "settles" when we end up here and kill the video
- # capture. Let's wait a few seconds more to make it easier to see
- # what the error was.
- sleep 3 if scenario.failed?
- Process.kill("INT", @video_capture_pid)
- save_failure_artifact("Video", @video_path)
- end
- if scenario.failed?
- time_of_fail = Time.now - TIME_AT_START
- secs = "%02d" % (time_of_fail % 60)
- mins = "%02d" % ((time_of_fail / 60) % 60)
- hrs = "%02d" % (time_of_fail / (60*60))
- elapsed = "#{hrs}:#{mins}:#{secs}"
- info_log("Scenario failed at time #{elapsed}")
- screen_capture = @screen.capture
- save_failure_artifact("Screenshot", screen_capture.getFilename)
- if scenario.exception.kind_of?(FirewallAssertionFailedError)
- Dir.glob("#{$config["TMPDIR"]}/*.pcap").each do |pcap_file|
- save_failure_artifact("Network capture", pcap_file)
- end
- end
- $failure_artifacts.sort!
- $failure_artifacts.each do |type, file|
- artifact_name = sanitize_filename("#{elapsed}_#{scenario.name}#{File.extname(file)}")
- artifact_path = "#{ARTIFACTS_DIR}/#{artifact_name}"
- assert(File.exist?(file))
- FileUtils.mv(file, artifact_path)
- info_log
- info_log_artifact_location(type, artifact_path)
- end
- if $config["INTERACTIVE_DEBUGGING"]
- pause(
- "Scenario failed: #{scenario.name}. " +
- "The error was: #{scenario.exception.class.name}: #{scenario.exception}"
- )
- end
- else
- if @video_path && File.exist?(@video_path) && not($config['CAPTURE_ALL'])
- FileUtils.rm(@video_path)
- end
- end
-end
-
-Before('@product', '@check_tor_leaks') do |scenario|
- @tor_leaks_sniffer = Sniffer.new(sanitize_filename(scenario.name), $vmnet)
- @tor_leaks_sniffer.capture
- add_after_scenario_hook do
- @tor_leaks_sniffer.clear
- end
-end
-
-After('@product', '@check_tor_leaks') do |scenario|
- @tor_leaks_sniffer.stop
- if scenario.passed?
- allowed_nodes = @bridge_hosts ? @bridge_hosts : allowed_hosts_under_tor_enforcement
- assert_all_connections(@tor_leaks_sniffer.pcap_file) do |c|
- allowed_nodes.include?({ address: c.daddr, port: c.dport })
- end
- end
-end
-
-# For @source tests
-###################
-
-# BeforeScenario
-Before('@source') do
- @orig_pwd = Dir.pwd
- @git_clone = Dir.mktmpdir 'tails-apt-tests'
- Dir.chdir @git_clone
-end
-
-# AfterScenario
-After('@source') do
- Dir.chdir @orig_pwd
- FileUtils.remove_entry_secure @git_clone
-end