summaryrefslogtreecommitdiffstats
path: root/features/support/helpers
diff options
context:
space:
mode:
authorPhilip Hands <phil@hands.com>2016-03-14 15:36:16 +0100
committerHolger Levsen <holger@layer-acht.org>2016-04-28 21:52:10 +0200
commitda080c472fc415b0ce918f4dd4a1ab143bb1bca4 (patch)
treebf63179f32f0eda0c2d5796e3e31c18c3c1185cf /features/support/helpers
parent26a9e8ec2bcae03db4d663d87b44d8708d64fdc2 (diff)
downloadjenkins.debian.net-da080c472fc415b0ce918f4dd4a1ab143bb1bca4.tar.xz
rough attempt to grab the good cucumber bits from recent tails
Diffstat (limited to 'features/support/helpers')
-rw-r--r--features/support/helpers/chatbot_helper.rb59
-rw-r--r--features/support/helpers/ctcp_helper.rb126
-rw-r--r--features/support/helpers/display_helper.rb51
-rw-r--r--features/support/helpers/exec_helper.rb30
-rw-r--r--features/support/helpers/firewall_helper.rb87
-rw-r--r--features/support/helpers/misc_helpers.rb228
-rw-r--r--features/support/helpers/sikuli_helper.rb91
-rw-r--r--features/support/helpers/sniffing_helper.rb (renamed from features/support/helpers/net_helper.rb)13
-rw-r--r--features/support/helpers/sshd_helper.rb67
-rw-r--r--features/support/helpers/storage_helper.rb135
-rw-r--r--features/support/helpers/vm_helper.rb532
11 files changed, 1112 insertions, 307 deletions
diff --git a/features/support/helpers/chatbot_helper.rb b/features/support/helpers/chatbot_helper.rb
new file mode 100644
index 00000000..23ce3e1a
--- /dev/null
+++ b/features/support/helpers/chatbot_helper.rb
@@ -0,0 +1,59 @@
+require 'tempfile'
+
+class ChatBot
+
+ def initialize(account, password, otr_key, opts = Hash.new)
+ @account = account
+ @password = password
+ @otr_key = otr_key
+ @opts = opts
+ @pid = nil
+ @otr_key_file = nil
+ end
+
+ def start
+ @otr_key_file = Tempfile.new("otr_key.", $config["TMPDIR"])
+ @otr_key_file << @otr_key
+ @otr_key_file.close
+
+ cmd_helper(['/usr/bin/convertkey', @otr_key_file.path])
+ cmd_helper(["mv", "#{@otr_key_file.path}3", @otr_key_file.path])
+
+ cmd = [
+ "#{GIT_DIR}/features/scripts/otr-bot.py",
+ @account,
+ @password,
+ @otr_key_file.path
+ ]
+ cmd += ["--connect-server", @opts["connect_server"]] if @opts["connect_server"]
+ cmd += ["--auto-join"] + @opts["auto_join"] if @opts["auto_join"]
+ cmd += ["--log-file", DEBUG_LOG_PSEUDO_FIFO]
+
+ job = IO.popen(cmd)
+ @pid = job.pid
+ end
+
+ def stop
+ @otr_key_file.delete
+ begin
+ Process.kill("TERM", @pid)
+ rescue
+ # noop
+ end
+ end
+
+ def active?
+ begin
+ ret = Process.kill(0, @pid)
+ rescue Errno::ESRCH => e
+ if e.message == "No such process"
+ return false
+ else
+ raise e
+ end
+ end
+ assert_equal(1, ret, "This shouldn't happen")
+ return true
+ end
+
+end
diff --git a/features/support/helpers/ctcp_helper.rb b/features/support/helpers/ctcp_helper.rb
new file mode 100644
index 00000000..ee5180ab
--- /dev/null
+++ b/features/support/helpers/ctcp_helper.rb
@@ -0,0 +1,126 @@
+require 'net/irc'
+require 'timeout'
+
+class CtcpChecker < Net::IRC::Client
+
+ CTCP_SPAM_DELAY = 5
+
+ # `spam_target`: the nickname of the IRC user to CTCP spam.
+ # `ctcp_cmds`: the Array of CTCP commands to send.
+ # `expected_ctcp_replies`: Hash where the keys are the exact set of replies
+ # we expect, and their values a regex the reply data must match.
+ def initialize(host, port, spam_target, ctcp_cmds, expected_ctcp_replies)
+ @spam_target = spam_target
+ @ctcp_cmds = ctcp_cmds
+ @expected_ctcp_replies = expected_ctcp_replies
+ nickname = self.class.random_irc_nickname
+ opts = {
+ :nick => nickname,
+ :user => nickname,
+ :real => nickname,
+ }
+ opts[:logger] = Logger.new(DEBUG_LOG_PSEUDO_FIFO)
+ super(host, port, opts)
+ end
+
+ # Makes sure that only the expected CTCP replies are received.
+ def verify_ctcp_responses
+ @sent_ctcp_cmds = Set.new
+ @received_ctcp_replies = Set.new
+
+ # Give 60 seconds for connecting to the server and other overhead
+ # beyond the expected time to spam all CTCP commands.
+ expected_ctcp_spam_time = @ctcp_cmds.length * CTCP_SPAM_DELAY
+ timeout = expected_ctcp_spam_time + 60
+
+ begin
+ Timeout::timeout(timeout) do
+ start
+ end
+ rescue Timeout::Error
+ # Do nothing as we'll check for errors below.
+ ensure
+ finish
+ end
+
+ ctcp_cmds_not_sent = @ctcp_cmds - @sent_ctcp_cmds.to_a
+ expected_ctcp_replies_not_received =
+ @expected_ctcp_replies.keys - @received_ctcp_replies.to_a
+
+ if !ctcp_cmds_not_sent.empty? || !expected_ctcp_replies_not_received.empty?
+ raise "Failed to spam all CTCP commands and receive the expected " +
+ "replies within #{timeout} seconds.\n" +
+ (ctcp_cmds_not_sent.empty? ? "" :
+ "CTCP commands not sent: #{ctcp_cmds_not_sent}\n") +
+ (expected_ctcp_replies_not_received.empty? ? "" :
+ "Expected CTCP replies not received: " +
+ expected_ctcp_replies_not_received.to_s)
+ end
+
+ end
+
+ # Generate a random IRC nickname, in this case an alpha-numeric
+ # string with length 10 to 15. To make it legal, the first character
+ # is forced to be alpha.
+ def self.random_irc_nickname
+ random_alpha_string(1) + random_alnum_string(9, 14)
+ end
+
+ def spam(spam_target)
+ post(NOTICE, spam_target, "Hi! I'm gonna test your CTCP capabilities now.")
+ @ctcp_cmds.each do |cmd|
+ sleep CTCP_SPAM_DELAY
+ full_cmd = cmd
+ case cmd
+ when "PING"
+ full_cmd += " #{Time.now.to_i}"
+ when "ACTION"
+ full_cmd += " barfs on the floor."
+ when "ERRMSG"
+ full_cmd += " Pidgin should not respond to this."
+ end
+ post(PRIVMSG, spam_target, ctcp_encode(full_cmd))
+ @sent_ctcp_cmds << cmd
+ end
+ end
+
+ def on_rpl_welcome(m)
+ super
+ Thread.new { spam(@spam_target) }
+ end
+
+ def on_message(m)
+ if m.command == ERR_NICKNAMEINUSE
+ finish
+ new_nick = self.class.random_irc_nickname
+ @opts.marshal_load({
+ :nick => new_nick,
+ :user => new_nick,
+ :real => new_nick,
+ })
+ start
+ return
+ end
+
+ if m.ctcp? and /^:#{Regexp.escape(@spam_target)}!/.match(m)
+ m.ctcps.each do |ctcp_reply|
+ reply_type, _, reply_data = ctcp_reply.partition(" ")
+ if @expected_ctcp_replies.has_key?(reply_type)
+ if @expected_ctcp_replies[reply_type].match(reply_data)
+ @received_ctcp_replies << reply_type
+ else
+ raise "Received expected CTCP reply '#{reply_type}' but with " +
+ "unexpected data '#{reply_data}' "
+ end
+ else
+ raise "Received unexpected CTCP reply '#{reply_type}' with " +
+ "data '#{reply_data}'"
+ end
+ end
+ end
+ if Set.new(@ctcp_cmds) == @sent_ctcp_cmds && \
+ Set.new(@expected_ctcp_replies.keys) == @received_ctcp_replies
+ finish
+ end
+ end
+end
diff --git a/features/support/helpers/display_helper.rb b/features/support/helpers/display_helper.rb
index 354935f0..b4dce733 100644
--- a/features/support/helpers/display_helper.rb
+++ b/features/support/helpers/display_helper.rb
@@ -6,8 +6,22 @@ class Display
@x_display = x_display
end
+ def active?
+ p = IO.popen(["xprop", "-display", @x_display,
+ "-name", "#{@domain} (1) - Virt Viewer",
+ :err => ["/dev/null", "w"]])
+ Process.wait(p.pid)
+ $?.success?
+ end
+
def start
- start_virtviewer(@domain)
+ @virtviewer = IO.popen(["virt-viewer", "--direct",
+ "--kiosk",
+ "--reconnect",
+ "--connect", "qemu:///system",
+ "--display", @x_display,
+ @domain,
+ :err => ["/dev/null", "w"]])
# We wait for the display to be active to not lose actions
# (e.g. key presses via sikuli) that come immediately after
# starting (or restoring) a vm
@@ -17,35 +31,18 @@ class Display
end
def stop
- stop_virtviewer
+ return if @virtviewer.nil?
+ Process.kill("TERM", @virtviewer.pid)
+ @virtviewer.close
+ rescue IOError
+ # IO.pid throws this if the process wasn't started yet. Possibly
+ # there's a race when doing a start() and then quickly running
+ # stop().
end
def restart
- stop_virtviewer
- start_virtviewer(@domain)
- end
-
- def start_virtviewer(domain)
- # virt-viewer forks, so we cannot (easily) get the child pid
- # and use it in active? and stop_virtviewer below...
- IO.popen(["virt-viewer", "-d",
- "-f",
- "-r",
- "-c", "qemu:///system",
- ["--display=", @x_display].join(''),
- domain,
- "&"].join(' '))
+ stop
+ start
end
- def active?
- p = IO.popen("xprop -display #{@x_display} " +
- "-name '#{@domain} (1) - Virt Viewer' 2>/dev/null")
- Process.wait(p.pid)
- p.close
- $? == 0
- end
-
- def stop_virtviewer
- system("killall virt-viewer")
- end
end
diff --git a/features/support/helpers/exec_helper.rb b/features/support/helpers/exec_helper.rb
index b0d3a9cd..42f6532a 100644
--- a/features/support/helpers/exec_helper.rb
+++ b/features/support/helpers/exec_helper.rb
@@ -10,13 +10,11 @@ class VMCommand
@returncode, @stdout, @stderr = VMCommand.execute(vm, cmd, options)
end
- def VMCommand.wait_until_remote_shell_is_up(vm, timeout = 30)
- begin
- Timeout::timeout(timeout) do
- VMCommand.execute(vm, "true", { :user => "root", :spawn => false })
+ def VMCommand.wait_until_remote_shell_is_up(vm, timeout = 90)
+ try_for(timeout, :msg => "Remote shell seems to be down") do
+ Timeout::timeout(3) do
+ VMCommand.execute(vm, "echo 'hello?'")
end
- rescue Timeout::Error
- raise "Remote shell seems to be down"
end
end
@@ -27,21 +25,21 @@ class VMCommand
# response will always be [0, "", ""] (only used as an
# ACK). execute() will always block until a response is received,
# though. Spawning is useful when starting processes in the
- # background (or running scripts that does the same) like the
- # vidalia-wrapper, or any application we want to interact with.
+ # background (or running scripts that does the same) like our
+ # onioncircuits wrapper, or any application we want to interact with.
def VMCommand.execute(vm, cmd, options = {})
options[:user] ||= "root"
options[:spawn] ||= false
type = options[:spawn] ? "spawn" : "call"
socket = TCPSocket.new("127.0.0.1", vm.get_remote_shell_port)
- STDERR.puts "#{type}ing as #{options[:user]}: #{cmd}" if $debug
+ debug_log("#{type}ing as #{options[:user]}: #{cmd}")
begin
socket.puts(JSON.dump([type, options[:user], cmd]))
s = socket.readline(sep = "\0").chomp("\0")
ensure
socket.close
end
- STDERR.puts "#{type} returned: #{s}" if $debug
+ debug_log("#{type} returned: #{s}") if not(options[:spawn])
begin
return JSON.load(s)
rescue JSON::ParserError
@@ -58,4 +56,16 @@ class VMCommand
return @returncode == 0
end
+ def failure?
+ return not(success?)
+ end
+
+ def to_s
+ "Return status: #{@returncode}\n" +
+ "STDOUT:\n" +
+ @stdout +
+ "STDERR:\n" +
+ @stderr
+ end
+
end
diff --git a/features/support/helpers/firewall_helper.rb b/features/support/helpers/firewall_helper.rb
index 400965a5..fce363c5 100644
--- a/features/support/helpers/firewall_helper.rb
+++ b/features/support/helpers/firewall_helper.rb
@@ -11,21 +11,12 @@ class IPAddr
]
PrivateIPv6Ranges = [
- IPAddr.new("fc00::/7"), # private
+ IPAddr.new("fc00::/7")
]
def private?
- if self.ipv4?
- PrivateIPv4Ranges.each do |ipr|
- return true if ipr.include?(self)
- end
- return false
- else
- PrivateIPv6Ranges.each do |ipr|
- return true if ipr.include?(self)
- end
- return false
- end
+ private_ranges = self.ipv4? ? PrivateIPv4Ranges : PrivateIPv6Ranges
+ private_ranges.any? { |range| range.include?(self) }
end
def public?
@@ -34,16 +25,25 @@ class IPAddr
end
class FirewallLeakCheck
- attr_reader :ipv4_tcp_leaks, :ipv4_nontcp_leaks, :ipv6_leaks, :nonip_leaks
+ attr_reader :ipv4_tcp_leaks, :ipv4_nontcp_leaks, :ipv6_leaks, :nonip_leaks, :mac_leaks
- def initialize(pcap_file, tor_relays)
- packets = PacketFu::PcapFile.new.file_to_array(:filename => pcap_file)
- @tor_relays = tor_relays
+ def initialize(pcap_file, options = {})
+ options[:accepted_hosts] ||= []
+ options[:ignore_lan] ||= true
+ @pcap_file = pcap_file
+ packets = PacketFu::PcapFile.new.file_to_array(:filename => @pcap_file)
+ mac_leaks = Set.new
ipv4_tcp_packets = []
ipv4_nontcp_packets = []
ipv6_packets = []
nonip_packets = []
packets.each do |p|
+ if PacketFu::EthPacket.can_parse?(p)
+ packet = PacketFu::EthPacket.parse(p)
+ mac_leaks << packet.eth_saddr
+ mac_leaks << packet.eth_daddr
+ end
+
if PacketFu::TCPPacket.can_parse?(p)
ipv4_tcp_packets << PacketFu::TCPPacket.parse(p)
elsif PacketFu::IPPacket.can_parse?(p)
@@ -57,17 +57,25 @@ class FirewallLeakCheck
raise "Found something in the pcap file that cannot be parsed"
end
end
- ipv4_tcp_hosts = get_public_hosts_from_ippackets ipv4_tcp_packets
- tor_nodes = Set.new(get_all_tor_contacts)
- @ipv4_tcp_leaks = ipv4_tcp_hosts.select{|host| !tor_nodes.member?(host)}
- @ipv4_nontcp_leaks = get_public_hosts_from_ippackets ipv4_nontcp_packets
- @ipv6_leaks = get_public_hosts_from_ippackets ipv6_packets
+ ipv4_tcp_hosts = filter_hosts_from_ippackets(ipv4_tcp_packets,
+ options[:ignore_lan])
+ accepted = Set.new(options[:accepted_hosts])
+ @mac_leaks = mac_leaks
+ @ipv4_tcp_leaks = ipv4_tcp_hosts.select { |host| !accepted.member?(host) }
+ @ipv4_nontcp_leaks = filter_hosts_from_ippackets(ipv4_nontcp_packets,
+ options[:ignore_lan])
+ @ipv6_leaks = filter_hosts_from_ippackets(ipv6_packets,
+ options[:ignore_lan])
@nonip_leaks = nonip_packets
end
- # Returns a list of all unique non-LAN destination IP addresses
- # found in `packets`.
- def get_public_hosts_from_ippackets(packets)
+ def save_pcap_file
+ save_failure_artifact("Network capture", @pcap_file)
+ end
+
+ # Returns a list of all unique destination IP addresses found in
+ # `packets`. Exclude LAN hosts if ignore_lan is set.
+ def filter_hosts_from_ippackets(packets, ignore_lan)
hosts = []
packets.each do |p|
candidate = nil
@@ -80,21 +88,34 @@ class FirewallLeakCheck
raise "Expected an IP{v4,v6} packet, but got something else:\n" +
p.peek_format
end
- if candidate != nil and IPAddr.new(candidate).public?
+ if candidate != nil and (not(ignore_lan) or IPAddr.new(candidate).public?)
hosts << candidate
end
end
hosts.uniq
end
- # Returns an array of all Tor relays and authorities, i.e. all
- # Internet hosts Tails ever should contact.
- def get_all_tor_contacts
- @tor_relays + $tor_authorities
- end
-
- def empty?
- @ipv4_tcp_leaks.empty? and @ipv4_nontcp_leaks.empty? and @ipv6_leaks.empty? and @nonip_leaks.empty?
+ def assert_no_leaks
+ err = ""
+ if !@ipv4_tcp_leaks.empty?
+ err += "The following IPv4 TCP non-Tor Internet hosts were " +
+ "contacted:\n" + ipv4_tcp_leaks.join("\n")
+ end
+ if !@ipv4_nontcp_leaks.empty?
+ err += "The following IPv4 non-TCP Internet hosts were contacted:\n" +
+ ipv4_nontcp_leaks.join("\n")
+ end
+ if !@ipv6_leaks.empty?
+ err += "The following IPv6 Internet hosts were contacted:\n" +
+ ipv6_leaks.join("\n")
+ end
+ if !@nonip_leaks.empty?
+ err += "Some non-IP packets were sent\n"
+ end
+ if !err.empty?
+ save_pcap_file
+ raise err
+ end
end
end
diff --git a/features/support/helpers/misc_helpers.rb b/features/support/helpers/misc_helpers.rb
index caf64b80..7e09411f 100644
--- a/features/support/helpers/misc_helpers.rb
+++ b/features/support/helpers/misc_helpers.rb
@@ -2,6 +2,15 @@ require 'date'
require 'timeout'
require 'test/unit'
+# Test::Unit adds an at_exit hook which, among other things, consumes
+# the command-line arguments that were intended for cucumber. If
+# e.g. `--format` was passed it will throw an error since it's not a
+# valid option for Test::Unit, and it throwing an error at this time
+# (at_exit) will make Cucumber think it failed and consequently exit
+# with an error. Fooling Test::Unit that this hook has already run
+# works around this craziness.
+Test::Unit.run = true
+
# Make all the assert_* methods easily accessible in any context.
include Test::Unit::Assertions
@@ -12,41 +21,131 @@ def assert_vmcommand_success(p, msg = nil)
msg)
end
-# Call block (ignoring any exceptions it may throw) repeatedly with one
-# second breaks until it returns true, or until `t` seconds have
-# passed when we throw Timeout::Error. As a precondition, the code
-# block cannot throw Timeout::Error.
-def try_for(t, options = {})
+# It's forbidden to throw this exception (or subclasses) in anything
+# but try_for() below. Just don't use it anywhere else!
+class UniqueTryForTimeoutError < Exception
+end
+
+# Call block (ignoring any exceptions it may throw) repeatedly with
+# one second breaks until it returns true, or until `timeout` seconds have
+# passed when we throw a Timeout::Error exception.
+def try_for(timeout, options = {})
options[:delay] ||= 1
- begin
- Timeout::timeout(t) do
- loop do
- begin
- return true if yield
- rescue Timeout::Error => e
- if options[:msg]
- raise RuntimeError, options[:msg], caller
- else
- raise e
- end
- rescue Exception
- # noop
- end
- sleep options[:delay]
+ last_exception = nil
+ # Create a unique exception used only for this particular try_for
+ # call's Timeout to allow nested try_for:s. If we used the same one,
+ # the innermost try_for would catch all outer ones', creating a
+ # really strange situation.
+ unique_timeout_exception = Class.new(UniqueTryForTimeoutError)
+ Timeout::timeout(timeout, unique_timeout_exception) do
+ loop do
+ begin
+ return if yield
+ rescue NameError, UniqueTryForTimeoutError => e
+ # NameError most likely means typos, and hiding that is rarely
+ # (never?) a good idea, so we rethrow them. See below why we
+ # also rethrow *all* the unique exceptions.
+ raise e
+ rescue Exception => e
+ # All other exceptions are ignored while trying the
+ # block. Well we save the last exception so we can print it in
+ # case of a timeout.
+ last_exception = e
end
+ sleep options[:delay]
end
- rescue Timeout::Error => e
- if options[:msg]
- raise RuntimeError, options[:msg], caller
- else
- raise e
+ end
+ # At this point the block above either succeeded and we'll return,
+ # or we are throwing an exception. If the latter, we either have a
+ # NameError that we'll not catch (and will any try_for below us in
+ # the stack), or we have a unique exception. That can mean one of
+ # two things:
+ # 1. it's the one unique to this try_for, and in that case we'll
+ # catch it, rethrowing it as something that will be ignored by
+ # inside the blocks of all try_for:s below us in the stack.
+ # 2. it's an exception unique to another try_for. Assuming that we
+ # do not throw the unique exceptions in any other place or way
+ # than we do it in this function, this means that there is a
+ # try_for below us in the stack to which this exception must be
+ # unique to.
+ # Let 1 be the base step, and 2 the inductive step, and we sort of
+ # an inductive proof for the correctness of try_for when it's
+ # nested. It shows that for an infinite stack of try_for:s, any of
+ # the unique exceptions will be caught only by the try_for instance
+ # it is unique to, and all try_for:s in between will ignore it so it
+ # ends up there immediately.
+rescue unique_timeout_exception => e
+ msg = options[:msg] || 'try_for() timeout expired'
+ if last_exception
+ msg += "\nLast ignored exception was: " +
+ "#{last_exception.class}: #{last_exception}"
+ end
+ raise Timeout::Error.new(msg)
+end
+
+class TorFailure < StandardError
+end
+
+class MaxRetriesFailure < StandardError
+end
+
+# This will retry the block up to MAX_NEW_TOR_CIRCUIT_RETRIES
+# times. The block must raise an exception for a run to be considered
+# as a failure. After a failure recovery_proc will be called (if
+# given) and the intention with it is to bring us back to the state
+# expected by the block, so it can be retried.
+def retry_tor(recovery_proc = nil, &block)
+ tor_recovery_proc = Proc.new do
+ force_new_tor_circuit
+ recovery_proc.call if recovery_proc
+ end
+
+ retry_action($config['MAX_NEW_TOR_CIRCUIT_RETRIES'],
+ :recovery_proc => tor_recovery_proc,
+ :operation_name => 'Tor operation', &block)
+end
+
+def retry_i2p(recovery_proc = nil, &block)
+ retry_action(15, :recovery_proc => recovery_proc,
+ :operation_name => 'I2P operation', &block)
+end
+
+def retry_action(max_retries, options = {}, &block)
+ assert(max_retries.is_a?(Integer), "max_retries must be an integer")
+ options[:recovery_proc] ||= nil
+ options[:operation_name] ||= 'Operation'
+
+ retries = 1
+ loop do
+ begin
+ block.call
+ return
+ rescue Exception => e
+ if retries <= max_retries
+ debug_log("#{options[:operation_name]} failed (Try #{retries} of " +
+ "#{max_retries}) with:\n" +
+ "#{e.class}: #{e.message}")
+ options[:recovery_proc].call if options[:recovery_proc]
+ retries += 1
+ else
+ raise MaxRetriesFailure.new("#{options[:operation_name]} failed (despite retrying " +
+ "#{max_retries} times) with\n" +
+ "#{e.class}: #{e.message}")
+ end
end
end
end
def wait_until_tor_is_working
- try_for(240) { @vm.execute(
- '. /usr/local/lib/tails-shell-library/tor.sh; tor_is_working').success? }
+ try_for(270) { $vm.execute('/usr/local/sbin/tor-has-bootstrapped').success? }
+rescue Timeout::Error => e
+ c = $vm.execute("journalctl SYSLOG_IDENTIFIER=restart-tor")
+ if c.success?
+ debug_log("From the journal:\n" + c.stdout.sub(/^/, " "))
+ else
+ debug_log("Nothing was in the journal about 'restart-tor'")
+ end
+ raise e
end
def convert_bytes_mod(unit)
@@ -79,7 +178,12 @@ def convert_from_bytes(size, unit)
end
def cmd_helper(cmd)
- IO.popen(cmd + " 2>&1") do |p|
+ if cmd.instance_of?(Array)
+ cmd << {:err => [:child, :out]}
+ elsif cmd.instance_of?(String)
+ cmd += " 2>&1"
+ end
+ IO.popen(cmd) do |p|
out = p.readlines.join("\n")
p.close
ret = $?
@@ -88,34 +192,62 @@ def cmd_helper(cmd)
end
end
-def tails_iso_creation_date(path)
- label = cmd_helper("/sbin/blkid -p -s LABEL -o value #{path}")
- assert(label[/^TAILS \d+(\.\d+)+(~rc\d+)? - \d+$/],
- "Got invalid label '#{label}' from Tails image '#{path}'")
- return label[/\d+$/]
+# This command will grab all router IP addresses from the Tor
+# consensus in the VM + the hardcoded TOR_AUTHORITIES.
+def get_all_tor_nodes
+ cmd = 'awk "/^r/ { print \$6 }" /var/lib/tor/cached-microdesc-consensus'
+ $vm.execute(cmd).stdout.chomp.split("\n") + TOR_AUTHORITIES
+end
+
+def get_free_space(machine, path)
+ case machine
+ when 'host'
+ assert(File.exists?(path), "Path '#{path}' not found on #{machine}.")
+ free = cmd_helper(["df", path])
+ when 'guest'
+ assert($vm.file_exist?(path), "Path '#{path}' not found on #{machine}.")
+ free = $vm.execute_successfully("df '#{path}'")
+ else
+ raise 'Unsupported machine type #{machine} passed.'
+ end
+ output = free.split("\n").last
+ return output.match(/[^\s]\s+[0-9]+\s+[0-9]+\s+([0-9]+)\s+.*/)[1].chomp.to_i
+end
+
+def random_string_from_set(set, min_len, max_len)
+ len = (min_len..max_len).to_a.sample
+ len ||= min_len
+ (0..len-1).map { |n| set.sample }.join
end
-def sort_isos_by_creation_date
- Dir.glob("#{Dir.pwd}/*.iso").sort_by {|f| tails_iso_creation_date(f)}
+def random_alpha_string(min_len, max_len = 0)
+ alpha_set = ('A'..'Z').to_a + ('a'..'z').to_a
+ random_string_from_set(alpha_set, min_len, max_len)
end
-def get_newest_iso
- return sort_isos_by_creation_date.last
+def random_alnum_string(min_len, max_len = 0)
+ alnum_set = ('A'..'Z').to_a + ('a'..'z').to_a + (0..9).to_a.map { |n| n.to_s }
+ random_string_from_set(alnum_set, min_len, max_len)
end
-def get_oldest_iso
- return sort_isos_by_creation_date.first
+# Sanitize the filename from unix-hostile filename characters
+def sanitize_filename(filename, options = {})
+ options[:replacement] ||= '_'
+ bad_unix_filename_chars = Regexp.new("[^A-Za-z0-9_\\-.,+:]")
+ filename.gsub(bad_unix_filename_chars, options[:replacement])
end
-# This command will grab all router IP addresses from the Tor
-# consensus in the VM.
-def get_tor_relays
- cmd = 'awk "/^r/ { print \$6 }" /var/lib/tor/cached-microdesc-consensus'
- @vm.execute(cmd).stdout.chomp.split("\n")
+def info_log_artifact_location(type, path)
+ if $config['ARTIFACTS_BASE_URI']
+ # Remove any trailing slashes, we'll add one ourselves
+ base_url = $config['ARTIFACTS_BASE_URI'].gsub(/\/*$/, "")
+ path = "#{base_url}/#{File.basename(path)}"
+ end
+ info_log("#{type.capitalize}: #{path}")
end
-def save_pcap_file
- pcap_copy = "#{$tmp_dir}/pcap_with_leaks-#{DateTime.now}"
- FileUtils.cp(@sniffer.pcap_file, pcap_copy)
- puts "Full network capture available at: #{pcap_copy}"
+def pause(message = "Paused")
+ STDERR.puts
+ STDERR.puts "#{message} (Press ENTER to continue!)"
+ STDIN.gets
end
diff --git a/features/support/helpers/sikuli_helper.rb b/features/support/helpers/sikuli_helper.rb
index 503e08b3..938f4851 100644
--- a/features/support/helpers/sikuli_helper.rb
+++ b/features/support/helpers/sikuli_helper.rb
@@ -5,6 +5,9 @@ require 'sikuli-script.jar'
Rjb::load
package_members = [
+ "java.io.FileOutputStream",
+ "java.io.PrintStream",
+ "java.lang.System",
"org.sikuli.script.Finder",
"org.sikuli.script.Key",
"org.sikuli.script.KeyModifier",
@@ -18,6 +21,8 @@ package_members = [
translations = Hash[
"org.sikuli.script", "Sikuli",
+ "java.lang", "Java::Lang",
+ "java.io", "Java::Io",
]
for p in package_members
@@ -36,12 +41,16 @@ for p in package_members
mod.const_set(class_name, imported_class)
end
+# Bind Java's stdout to debug_log() via our magical pseudo fifo
+# logger.
+def bind_java_to_pseudo_fifo_logger
+ file_output_stream = Java::Io::FileOutputStream.new(DEBUG_LOG_PSEUDO_FIFO)
+ print_stream = Java::Io::PrintStream.new(file_output_stream)
+ Java::Lang::System.setOut(print_stream)
+end
+
def findfailed_hook(pic)
- STDERR.puts ""
- STDERR.puts "FindFailed for: #{pic}"
- STDERR.puts ""
- STDERR.puts "Update the image and press RETURN to retry"
- STDIN.gets
+ pause("FindFailed for: '#{pic}'")
end
# Since rjb imports Java classes without creating a corresponding
@@ -61,10 +70,16 @@ end
sikuli_script_proxy = Sikuli::Screen
$_original_sikuli_screen_new ||= Sikuli::Screen.method :new
+# For waitAny()/findAny() we are forced to throw this exception since
+# Rjb::throw doesn't block until the Java exception has been received
+# by Ruby, so strange things can happen.
+class FindAnyFailed < StandardError
+end
+
def sikuli_script_proxy.new(*args)
s = $_original_sikuli_screen_new.call(*args)
- if $sikuli_retry_findfailed
+ if $config["SIKULI_RETRY_FINDFAILED"]
# The usage of `_invoke()` below exemplifies how one can wrap
# around Java objects' methods when they're imported using RJB. It
# isn't pretty. The seconds argument is the parameter signature,
@@ -104,6 +119,18 @@ def sikuli_script_proxy.new(*args)
self.click(Sikuli::Location.new(x, y))
end
+ def s.doubleClick_point(x, y)
+ self.doubleClick(Sikuli::Location.new(x, y))
+ end
+
+ def s.click_mid_right_edge(pic)
+ r = self.find(pic)
+ top_right = r.getTopRight()
+ x = top_right.getX
+ y = top_right.getY + r.getH/2
+ self.click_point(x, y)
+ end
+
def s.wait_and_click(pic, time)
self.click(self.wait(pic, time))
end
@@ -112,6 +139,48 @@ def sikuli_script_proxy.new(*args)
self.doubleClick(self.wait(pic, time))
end
+ def s.wait_and_right_click(pic, time)
+ self.rightClick(self.wait(pic, time))
+ end
+
+ def s.wait_and_hover(pic, time)
+ self.hover(self.wait(pic, time))
+ end
+
+ def s.existsAny(images)
+ images.each do |image|
+ region = self.exists(image)
+ return [image, region] if region
+ end
+ return nil
+ end
+
+ def s.findAny(images)
+ images.each do |image|
+ begin
+ return [image, self.find(image)]
+ rescue FindFailed
+ # Ignore. We deal we'll throw an appropriate exception after
+ # having looped through all images and found none of them.
+ end
+ end
+ # If we've reached this point, none of the images could be found.
+ raise FindAnyFailed.new("can not find any of the images #{images} on the " +
+ "screen")
+ end
+
+ def s.waitAny(images, time)
+ Timeout::timeout(time) do
+ loop do
+ result = self.existsAny(images)
+ return result if result
+ end
+ end
+ rescue Timeout::Error
+ raise FindAnyFailed.new("can not find any of the images #{images} on the " +
+ "screen")
+ end
+
def s.hover_point(x, y)
self.hover(Sikuli::Location.new(x, y))
end
@@ -132,13 +201,13 @@ end
# required, ruby's require method complains that the method for the
# field accessor is missing.
sikuli_settings = Sikuli::Settings.new
-sikuli_settings.OcrDataPath = $tmp_dir
+sikuli_settings.OcrDataPath = $config["TMPDIR"]
# sikuli_ruby, which we used before, defaulted to 0.9 minimum
# similarity, so all our current images are adapted to that value.
# Also, Sikuli's default of 0.7 is simply too low (many false
# positives).
sikuli_settings.MinSimilarity = 0.9
-sikuli_settings.ActionLogs = $debug
-sikuli_settings.DebugLogs = $debug
-sikuli_settings.InfoLogs = $debug
-sikuli_settings.ProfileLogs = $debug
+sikuli_settings.ActionLogs = true
+sikuli_settings.DebugLogs = true
+sikuli_settings.InfoLogs = true
+sikuli_settings.ProfileLogs = true
diff --git a/features/support/helpers/net_helper.rb b/features/support/helpers/sniffing_helper.rb
index 29119195..213411eb 100644
--- a/features/support/helpers/net_helper.rb
+++ b/features/support/helpers/sniffing_helper.rb
@@ -14,15 +14,16 @@ class Sniffer
attr_reader :name, :pcap_file, :pid
- def initialize(name, bridge_name)
+ def initialize(name, vmnet)
@name = name
- @bridge_name = bridge_name
- @bridge_mac = File.open("/sys/class/net/#{@bridge_name}/address", "rb").read.chomp
- @pcap_file = "#{$tmp_dir}/#{name}.pcap"
+ @vmnet = vmnet
+ pcap_name = sanitize_filename("#{name}.pcap")
+ @pcap_file = "#{$config["TMPDIR"]}/#{pcap_name}"
end
- def capture(filter="not ether src host #{@bridge_mac} and not ether proto \\arp and not ether proto \\rarp")
- job = IO.popen("/usr/sbin/tcpdump -n -i #{@bridge_name} -w #{@pcap_file} -U '#{filter}' >/dev/null 2>&1")
+ def capture(filter="not ether src host #{@vmnet.bridge_mac} and not ether proto \\arp and not ether proto \\rarp")
+ job = IO.popen(["/usr/sbin/tcpdump", "-n", "-i", @vmnet.bridge_name, "-w",
+ @pcap_file, "-U", filter, :err => ["/dev/null", "w"]])
@pid = job.pid
end
diff --git a/features/support/helpers/sshd_helper.rb b/features/support/helpers/sshd_helper.rb
new file mode 100644
index 00000000..2e0069c0
--- /dev/null
+++ b/features/support/helpers/sshd_helper.rb
@@ -0,0 +1,67 @@
+require 'tempfile'
+
+class SSHServer
+ def initialize(sshd_host, sshd_port, authorized_keys = nil)
+ @sshd_host = sshd_host
+ @sshd_port = sshd_port
+ @authorized_keys = authorized_keys
+ @pid = nil
+ end
+
+ def start
+ @sshd_key_file = Tempfile.new("ssh_host_rsa_key", $config["TMPDIR"])
+ # 'hack' to prevent ssh-keygen from prompting to overwrite the file
+ File.delete(@sshd_key_file.path)
+ cmd_helper(['ssh-keygen', '-t', 'rsa', '-N', "", '-f', "#{@sshd_key_file.path}"])
+ @sshd_key_file.close
+
+ sshd_config =<<EOF
+Port #{@sshd_port}
+ListenAddress #{@sshd_host}
+UsePrivilegeSeparation no
+HostKey #{@sshd_key_file.path}
+Pidfile #{$config['TMPDIR']}/ssh.pid
+EOF
+
+ @sshd_config_file = Tempfile.new("sshd_config", $config["TMPDIR"])
+ @sshd_config_file.write(sshd_config)
+
+ if @authorized_keys
+ @authorized_keys_file = Tempfile.new("authorized_keys", $config['TMPDIR'])
+ @authorized_keys_file.write(@authorized_keys)
+ @authorized_keys_file.close
+ @sshd_config_file.write("AuthorizedKeysFile #{@authorized_keys_file.path}")
+ end
+
+ @sshd_config_file.close
+
+ cmd = ["/usr/sbin/sshd", "-4", "-f", @sshd_config_file.path, "-D"]
+
+ job = IO.popen(cmd)
+ @pid = job.pid
+ end
+
+ def stop
+ File.delete("#{@sshd_key_file.path}.pub")
+ File.delete("#{$config['TMPDIR']}/ssh.pid")
+ begin
+ Process.kill("TERM", @pid)
+ rescue
+ # noop
+ end
+ end
+
+ def active?
+ begin
+ ret = Process.kill(0, @pid)
+ rescue Errno::ESRCH => e
+ if e.message == "No such process"
+ return false
+ else
+ raise e
+ end
+ end
+ assert_equal(1, ret, "This shouldn't happen")
+ return true
+ end
+end
diff --git a/features/support/helpers/storage_helper.rb b/features/support/helpers/storage_helper.rb
index 80a1e1e0..21537a92 100644
--- a/features/support/helpers/storage_helper.rb
+++ b/features/support/helpers/storage_helper.rb
@@ -7,30 +7,43 @@
# sense.
require 'libvirt'
+require 'guestfs'
require 'rexml/document'
require 'etc'
class VMStorage
- @@virt = nil
-
def initialize(virt, xml_path)
- @@virt ||= virt
+ @virt = virt
@xml_path = xml_path
pool_xml = REXML::Document.new(File.read("#{@xml_path}/storage_pool.xml"))
pool_name = pool_xml.elements['pool/name'].text
+ @pool_path = "#{$config["TMPDIR"]}/#{pool_name}"
begin
- @pool = @@virt.lookup_storage_pool_by_name(pool_name)
+ @pool = @virt.lookup_storage_pool_by_name(pool_name)
rescue Libvirt::RetrieveError
- # There's no pool with that name, so we don't have to clear it
- else
+ @pool = nil
+ end
+ if @pool and not(KEEP_SNAPSHOTS)
VMStorage.clear_storage_pool(@pool)
+ @pool = nil
+ end
+ unless @pool
+ pool_xml.elements['pool/target/path'].text = @pool_path
+ @pool = @virt.define_storage_pool_xml(pool_xml.to_s)
+ if not(Dir.exists?(@pool_path))
+ # We'd like to use @pool.build, which will just create the
+ # @pool_path directory, but it does so with root:root as owner
+ # (at least with libvirt 1.2.21-2). libvirt itself can handle
+ # that situation, but guestfs (at least with <=
+ # 1:1.28.12-1+b3) cannot when invoked by a non-root user,
+ # which we want to support.
+ FileUtils.mkdir(@pool_path)
+ FileUtils.chown(nil, 'libvirt-qemu', @pool_path)
+ FileUtils.chmod("ug+wrx", @pool_path)
+ end
end
- @pool_path = "#{$tmp_dir}/#{pool_name}"
- pool_xml.elements['pool/target/path'].text = @pool_path
- @pool = @@virt.define_storage_pool_xml(pool_xml.to_s)
- @pool.build
- @pool.create
+ @pool.create unless @pool.active?
@pool.refresh
end
@@ -65,10 +78,23 @@ class VMStorage
VMStorage.clear_storage_pool_volumes(@pool)
end
+ def delete_volume(name)
+ @pool.lookup_volume_by_name(name).delete
+ end
+
def create_new_disk(name, options = {})
options[:size] ||= 2
options[:unit] ||= "GiB"
options[:type] ||= "qcow2"
+ # Require 'slightly' more space to be available to give a bit more leeway
+ # with rounding, temp file creation, etc.
+ reserved = 500
+ needed = convert_to_MiB(options[:size].to_i, options[:unit])
+ avail = convert_to_MiB(get_free_space('host', @pool_path), "KiB")
+ assert(avail - reserved >= needed,
+ "Error creating disk \"#{name}\" in \"#{@pool_path}\". " \
+ "Need #{needed} MiB but only #{avail} MiB is available of " \
+ "which #{reserved} MiB is reserved for other temporary files.")
begin
old_vol = @pool.lookup_volume_by_name(name)
rescue Libvirt::RetrieveError
@@ -116,28 +142,75 @@ class VMStorage
@pool.lookup_volume_by_name(name).path
end
- # We use parted for the disk_mk* functions since it can format
- # partitions "inside" the super block device; mkfs.* need a
- # partition device (think /dev/sdaX), so we'd have to use something
- # like losetup or kpartx, which would require administrative
- # privileges. These functions only work for raw disk images.
-
- # TODO: We should switch to guestfish/libguestfs (which has
- # ruby-bindings) so we could use qcow2 instead of raw, and more
- # easily use LVM volumes.
-
- # For type, see label-type for mklabel in parted(8)
- def disk_mklabel(name, type)
- assert_equal("raw", disk_format(name))
- path = disk_path(name)
- cmd_helper("/sbin/parted -s '#{path}' mklabel #{type}")
+ def disk_mklabel(name, parttype)
+ disk = {
+ :path => disk_path(name),
+ :opts => {
+ :format => disk_format(name)
+ }
+ }
+ guestfs_disk_helper(disk) do |g, disk_handle|
+ g.part_init(disk_handle, parttype)
+ end
end
- # For fstype, see fs-type for mkfs in parted(8)
- def disk_mkpartfs(name, fstype)
- assert(disk_format(name), "raw")
- path = disk_path(name)
- cmd_helper("/sbin/parted -s '#{path}' mkpartfs primary '#{fstype}' 0% 100%")
+ def disk_mkpartfs(name, parttype, fstype, opts = {})
+ opts[:label] ||= nil
+ opts[:luks_password] ||= nil
+ disk = {
+ :path => disk_path(name),
+ :opts => {
+ :format => disk_format(name)
+ }
+ }
+ guestfs_disk_helper(disk) do |g, disk_handle|
+ g.part_disk(disk_handle, parttype)
+ g.part_set_name(disk_handle, 1, opts[:label]) if opts[:label]
+ primary_partition = g.list_partitions()[0]
+ if opts[:luks_password]
+ g.luks_format(primary_partition, opts[:luks_password], 0)
+ luks_mapping = File.basename(primary_partition) + "_unlocked"
+ g.luks_open(primary_partition, opts[:luks_password], luks_mapping)
+ luks_dev = "/dev/mapper/#{luks_mapping}"
+ g.mkfs(fstype, luks_dev)
+ g.luks_close(luks_dev)
+ else
+ g.mkfs(fstype, primary_partition)
+ end
+ end
+ end
+
+ def disk_mkswap(name, parttype)
+ disk = {
+ :path => disk_path(name),
+ :opts => {
+ :format => disk_format(name)
+ }
+ }
+ guestfs_disk_helper(disk) do |g, disk_handle|
+ g.part_disk(disk_handle, parttype)
+ primary_partition = g.list_partitions()[0]
+ g.mkswap(primary_partition)
+ end
+ end
+
+ def guestfs_disk_helper(*disks)
+ assert(block_given?)
+ g = Guestfs::Guestfs.new()
+ g.set_trace(1)
+ message_callback = Proc.new do |event, _, message, _|
+ debug_log("libguestfs: #{Guestfs.event_to_string(event)}: #{message}")
+ end
+ g.set_event_callback(message_callback,
+ Guestfs::EVENT_TRACE)
+ g.set_autosync(1)
+ disks.each do |disk|
+ g.add_drive_opts(disk[:path], disk[:opts])
+ end
+ g.launch()
+ yield(g, *g.list_devices())
+ ensure
+ g.close
end
end
diff --git a/features/support/helpers/vm_helper.rb b/features/support/helpers/vm_helper.rb
index 2b5ad291..6d7204d4 100644
--- a/features/support/helpers/vm_helper.rb
+++ b/features/support/helpers/vm_helper.rb
@@ -1,79 +1,122 @@
require 'libvirt'
require 'rexml/document'
-class VM
+class ExecutionFailedInVM < StandardError
+end
+
+class VMNet
- # These class attributes will be lazily initialized during the first
- # instantiation:
- # This is the libvirt connection, of which we only want one and
- # which can persist for different VM instances (even in parallel)
- @@virt = nil
- # This is a storage helper that deals with volume manipulation. The
- # storage it deals with persists across VMs, by necessity.
- @@storage = nil
+ attr_reader :net_name, :net
- def VM.storage
- return @@storage
+ def initialize(virt, xml_path)
+ @virt = virt
+ @net_name = LIBVIRT_NETWORK_NAME
+ net_xml = File.read("#{xml_path}/default_net.xml")
+ rexml = REXML::Document.new(net_xml)
+ rexml.elements['network'].add_element('name')
+ rexml.elements['network/name'].text = @net_name
+ rexml.elements['network'].add_element('uuid')
+ rexml.elements['network/uuid'].text = LIBVIRT_NETWORK_UUID
+ update(rexml.to_s)
+ rescue Exception => e
+ destroy_and_undefine
+ raise e
end
- def storage
- return @@storage
+ # We lookup by name so we also catch networks from previous test
+ # suite runs that weren't properly cleaned up (e.g. aborted).
+ def destroy_and_undefine
+ begin
+ old_net = @virt.lookup_network_by_name(@net_name)
+ old_net.destroy if old_net.active?
+ old_net.undefine
+ rescue
+ end
end
- attr_reader :domain, :display, :ip, :net
+ def update(xml)
+ destroy_and_undefine
+ @net = @virt.define_network_xml(xml)
+ @net.create
+ end
+
+ def bridge_name
+ @net.bridge_name
+ end
+
+ def bridge_ip_addr
+ net_xml = REXML::Document.new(@net.xml_desc)
+ IPAddr.new(net_xml.elements['network/ip'].attributes['address']).to_s
+ end
+
+ def guest_real_mac
+ net_xml = REXML::Document.new(@net.xml_desc)
+ net_xml.elements['network/ip/dhcp/host/'].attributes['mac']
+ end
- def initialize(xml_path, x_display)
- @@virt ||= Libvirt::open("qemu:///system")
+ def bridge_mac
+ File.open("/sys/class/net/#{bridge_name}/address", "rb").read.chomp
+ end
+end
+
+
+class VM
+
+ attr_reader :domain, :display, :vmnet, :storage
+
+ def initialize(virt, xml_path, vmnet, storage, x_display)
+ @virt = virt
@xml_path = xml_path
+ @vmnet = vmnet
+ @storage = storage
+ @domain_name = LIBVIRT_DOMAIN_NAME
default_domain_xml = File.read("#{@xml_path}/default.xml")
- update_domain(default_domain_xml)
- default_net_xml = File.read("#{@xml_path}/default_net.xml")
- update_net(default_net_xml)
+ rexml = REXML::Document.new(default_domain_xml)
+ rexml.elements['domain'].add_element('name')
+ rexml.elements['domain/name'].text = @domain_name
+ rexml.elements['domain'].add_element('uuid')
+ rexml.elements['domain/uuid'].text = LIBVIRT_DOMAIN_UUID
+ update(rexml.to_s)
@display = Display.new(@domain_name, x_display)
- set_cdrom_boot($tails_iso)
+ set_cdrom_boot(TAILS_ISO)
plug_network
- # unlike the domain and net the storage pool should survive VM
- # teardown (so a new instance can use e.g. a previously created
- # USB drive), so we only create a new one if there is none.
- @@storage ||= VMStorage.new(@@virt, xml_path)
rescue Exception => e
- clean_up_net
- clean_up_domain
+ destroy_and_undefine
raise e
end
- def update_domain(xml)
- domain_xml = REXML::Document.new(xml)
- @domain_name = domain_xml.elements['domain/name'].text
- clean_up_domain
- @domain = @@virt.define_domain_xml(xml)
- end
-
- def update_net(xml)
- net_xml = REXML::Document.new(xml)
- @net_name = net_xml.elements['network/name'].text
- @ip = net_xml.elements['network/ip/dhcp/host/'].attributes['ip']
- clean_up_net
- @net = @@virt.define_network_xml(xml)
- @net.create
+ def update(xml)
+ destroy_and_undefine
+ @domain = @virt.define_domain_xml(xml)
end
- def clean_up_domain
+ # We lookup by name so we also catch domains from previous test
+ # suite runs that weren't properly cleaned up (e.g. aborted).
+ def destroy_and_undefine
+ @display.stop if @display && @display.active?
begin
- domain = @@virt.lookup_domain_by_name(@domain_name)
- domain.destroy if domain.active?
- domain.undefine
+ old_domain = @virt.lookup_domain_by_name(@domain_name)
+ old_domain.destroy if old_domain.active?
+ old_domain.undefine
rescue
end
end
- def clean_up_net
- begin
- net = @@virt.lookup_network_by_name(@net_name)
- net.destroy if net.active?
- net.undefine
- rescue
- end
+ def real_mac
+ @vmnet.guest_real_mac
+ end
+
+ def set_hardware_clock(time)
+ assert(not(is_running?), 'The hardware clock cannot be set when the ' +
+ 'VM is running')
+ assert(time.instance_of?(Time), "Argument must be of type 'Time'")
+ adjustment = (time - Time.now).to_i
+ domain_rexml = REXML::Document.new(@domain.xml_desc)
+ clock_rexml_element = domain_rexml.elements['domain'].add_element('clock')
+ clock_rexml_element.add_attributes('offset' => 'variable',
+ 'basis' => 'utc',
+ 'adjustment' => adjustment.to_s)
+ update(domain_rexml.to_s)
end
def set_network_link_state(state)
@@ -82,7 +125,7 @@ class VM
if is_running?
@domain.update_device(domain_xml.elements['domain/devices/interface'].to_s)
else
- update_domain(domain_xml.to_s)
+ update(domain_xml.to_s)
end
end
@@ -94,97 +137,101 @@ class VM
set_network_link_state('down')
end
- def set_cdrom_tray_state(state)
- domain_xml = REXML::Document.new(@domain.xml_desc)
- domain_xml.elements.each('domain/devices/disk') do |e|
- if e.attribute('device').to_s == "cdrom"
- e.elements['target'].attributes['tray'] = state
- if is_running?
- @domain.update_device(e.to_s)
- else
- update_domain(domain_xml.to_s)
- end
- end
- end
- end
-
- def eject_cdrom
- set_cdrom_tray_state('open')
- end
-
- def close_cdrom
- set_cdrom_tray_state('closed')
- end
-
def set_boot_device(dev)
if is_running?
raise "boot settings can only be set for inactive vms"
end
domain_xml = REXML::Document.new(@domain.xml_desc)
domain_xml.elements['domain/os/boot'].attributes['dev'] = dev
- update_domain(domain_xml.to_s)
+ update(domain_xml.to_s)
end
def set_cdrom_image(image)
+ image = nil if image == ''
domain_xml = REXML::Document.new(@domain.xml_desc)
domain_xml.elements.each('domain/devices/disk') do |e|
if e.attribute('device').to_s == "cdrom"
- if ! e.elements['source']
- e.add_element('source')
+ if image.nil?
+ e.elements.delete('source')
+ else
+ if ! e.elements['source']
+ e.add_element('source')
+ end
+ e.elements['source'].attributes['file'] = image
end
- e.elements['source'].attributes['file'] = image
if is_running?
- @domain.update_device(e.to_s, Libvirt::Domain::DEVICE_MODIFY_FORCE)
+ @domain.update_device(e.to_s)
else
- update_domain(domain_xml.to_s)
+ update(domain_xml.to_s)
end
end
end
end
def remove_cdrom
- set_cdrom_image('')
+ set_cdrom_image(nil)
+ rescue Libvirt::Error => e
+ # While the CD-ROM is removed successfully we still get this
+ # error, so let's ignore it.
+ acceptable_error =
+ "Call to virDomainUpdateDeviceFlags failed: internal error: unable to " +
+ "execute QEMU command 'eject': (Tray of device '.*' is not open|" +
+ "Device '.*' is locked)"
+ raise e if not(Regexp.new(acceptable_error).match(e.to_s))
end
def set_cdrom_boot(image)
if is_running?
- raise "boot settings can only be set for inactice vms"
+ raise "boot settings can only be set for inactive vms"
end
set_boot_device('cdrom')
set_cdrom_image(image)
- close_cdrom
end
- def plug_drive(name, type)
- # Get the next free /dev/sdX on guest
- used_devs = []
+ def list_disk_devs
+ ret = []
domain_xml = REXML::Document.new(@domain.xml_desc)
- domain_xml.elements.each('domain/devices/disk/target') do |e|
- used_devs <<= e.attribute('dev').to_s
+ domain_xml.elements.each('domain/devices/disk') do |e|
+ ret << e.elements['target'].attribute('dev').to_s
+ end
+ return ret
+ end
+
+ def plug_drive(name, type)
+ if disk_plugged?(name)
+ raise "disk '#{name}' already plugged"
end
+ removable_usb = nil
+ case type
+ when "removable usb", "usb"
+ type = "usb"
+ removable_usb = "on"
+ when "non-removable usb"
+ type = "usb"
+ removable_usb = "off"
+ end
+ # Get the next free /dev/sdX on guest
letter = 'a'
dev = "sd" + letter
- while used_devs.include? dev
+ while list_disk_devs.include?(dev)
letter = (letter[0].ord + 1).chr
dev = "sd" + letter
end
assert letter <= 'z'
xml = REXML::Document.new(File.read("#{@xml_path}/disk.xml"))
- xml.elements['disk/source'].attributes['file'] = @@storage.disk_path(name)
- xml.elements['disk/driver'].attributes['type'] = @@storage.disk_format(name)
+ xml.elements['disk/source'].attributes['file'] = @storage.disk_path(name)
+ xml.elements['disk/driver'].attributes['type'] = @storage.disk_format(name)
xml.elements['disk/target'].attributes['dev'] = dev
xml.elements['disk/target'].attributes['bus'] = type
- if type == "usb"
- xml.elements['disk/target'].attributes['removable'] = 'on'
- end
+ xml.elements['disk/target'].attributes['removable'] = removable_usb if removable_usb
if is_running?
@domain.attach_device(xml.to_s)
else
domain_xml = REXML::Document.new(@domain.xml_desc)
domain_xml.elements['domain/devices'].add_element(xml)
- update_domain(domain_xml.to_s)
+ update(domain_xml.to_s)
end
end
@@ -192,7 +239,7 @@ class VM
domain_xml = REXML::Document.new(@domain.xml_desc)
domain_xml.elements.each('domain/devices/disk') do |e|
begin
- if e.elements['source'].attribute('file').to_s == @@storage.disk_path(name)
+ if e.elements['source'].attribute('file').to_s == @storage.disk_path(name)
return e.to_s
end
rescue
@@ -202,25 +249,64 @@ class VM
return nil
end
+ def disk_rexml_desc(name)
+ xml = disk_xml_desc(name)
+ if xml
+ return REXML::Document.new(xml)
+ else
+ return nil
+ end
+ end
+
def unplug_drive(name)
xml = disk_xml_desc(name)
@domain.detach_device(xml)
end
+ def disk_type(dev)
+ domain_xml = REXML::Document.new(@domain.xml_desc)
+ domain_xml.elements.each('domain/devices/disk') do |e|
+ if e.elements['target'].attribute('dev').to_s == dev
+ return e.elements['driver'].attribute('type').to_s
+ end
+ end
+ raise "No such disk device '#{dev}'"
+ end
+
def disk_dev(name)
- xml = REXML::Document.new(disk_xml_desc(name))
- return "/dev/" + xml.elements['disk/target'].attribute('dev').to_s
+ rexml = disk_rexml_desc(name) or return nil
+ return "/dev/" + rexml.elements['disk/target'].attribute('dev').to_s
+ end
+
+ def disk_name(dev)
+ dev = File.basename(dev)
+ domain_xml = REXML::Document.new(@domain.xml_desc)
+ domain_xml.elements.each('domain/devices/disk') do |e|
+ if /^#{e.elements['target'].attribute('dev').to_s}/.match(dev)
+ return File.basename(e.elements['source'].attribute('file').to_s)
+ end
+ end
+ raise "No such disk device '#{dev}'"
+ end
+
+ def udisks_disk_dev(name)
+ return disk_dev(name).gsub('/dev/', '/org/freedesktop/UDisks/devices/')
end
def disk_detected?(name)
- return execute("test -b #{disk_dev(name)}").success?
+ dev = disk_dev(name) or return false
+ return execute("test -b #{dev}").success?
+ end
+
+ def disk_plugged?(name)
+ return not(disk_xml_desc(name).nil?)
end
def set_disk_boot(name, type)
if is_running?
raise "boot settings can only be set for inactive vms"
end
- plug_drive(name, type)
+ plug_drive(name, type) if not(disk_plugged?(name))
set_boot_device('hd')
# For some reason setting the boot device doesn't prevent cdrom
# boot unless it's empty
@@ -231,14 +317,19 @@ class VM
# XXX-9p in common_steps.rb for more information.
def add_share(source, tag)
if is_running?
- raise "shares can only be added to inactice vms"
+ raise "shares can only be added to inactive vms"
end
+ # The complete source directory must be group readable by the user
+ # running the virtual machine, and world readable so the user inside
+ # the VM can access it (since we use the passthrough security model).
+ FileUtils.chown_R(nil, "libvirt-qemu", source)
+ FileUtils.chmod_R("go+rX", source)
xml = REXML::Document.new(File.read("#{@xml_path}/fs_share.xml"))
xml.elements['filesystem/source'].attributes['dir'] = source
xml.elements['filesystem/target'].attributes['dir'] = tag
domain_xml = REXML::Document.new(@domain.xml_desc)
domain_xml.elements['domain/devices'].add_element(xml)
- update_domain(domain_xml.to_s)
+ update(domain_xml.to_s)
end
def list_shares
@@ -251,13 +342,13 @@ class VM
end
def set_ram_size(size, unit = "KiB")
- raise "System memory can only be added to inactice vms" if is_running?
+ raise "System memory can only be added to inactive vms" if is_running?
domain_xml = REXML::Document.new(@domain.xml_desc)
domain_xml.elements['domain/memory'].text = size
domain_xml.elements['domain/memory'].attributes['unit'] = unit
domain_xml.elements['domain/currentMemory'].text = size
domain_xml.elements['domain/currentMemory'].attributes['unit'] = unit
- update_domain(domain_xml.to_s)
+ update(domain_xml.to_s)
end
def get_ram_size_in_bytes
@@ -268,24 +359,24 @@ class VM
end
def set_arch(arch)
- raise "System architecture can only be set to inactice vms" if is_running?
+ raise "System architecture can only be set to inactive vms" if is_running?
domain_xml = REXML::Document.new(@domain.xml_desc)
domain_xml.elements['domain/os/type'].attributes['arch'] = arch
- update_domain(domain_xml.to_s)
+ update(domain_xml.to_s)
end
def add_hypervisor_feature(feature)
- raise "Hypervisor features can only be added to inactice vms" if is_running?
+ raise "Hypervisor features can only be added to inactive vms" if is_running?
domain_xml = REXML::Document.new(@domain.xml_desc)
domain_xml.elements['domain/features'].add_element(feature)
- update_domain(domain_xml.to_s)
+ update(domain_xml.to_s)
end
def drop_hypervisor_feature(feature)
- raise "Hypervisor features can only be fropped from inactice vms" if is_running?
+ raise "Hypervisor features can only be fropped from inactive vms" if is_running?
domain_xml = REXML::Document.new(@domain.xml_desc)
domain_xml.elements['domain/features'].delete_element(feature)
- update_domain(domain_xml.to_s)
+ update(domain_xml.to_s)
end
def disable_pae_workaround
@@ -295,24 +386,24 @@ class VM
xml = <<EOF
<qemu:commandline xmlns:qemu='http://libvirt.org/schemas/domain/qemu/1.0'>
<qemu:arg value='-cpu'/>
- <qemu:arg value='pentium,-pae'/>
+ <qemu:arg value='qemu32,-pae'/>
</qemu:commandline>
EOF
domain_xml = REXML::Document.new(@domain.xml_desc)
domain_xml.elements['domain'].add_element(REXML::Document.new(xml))
- update_domain(domain_xml.to_s)
+ update(domain_xml.to_s)
end
def set_os_loader(type)
if is_running?
- raise "boot settings can only be set for inactice vms"
+ raise "boot settings can only be set for inactive vms"
end
if type == 'UEFI'
domain_xml = REXML::Document.new(@domain.xml_desc)
domain_xml.elements['domain/os'].add_element(REXML::Document.new(
'<loader>/usr/share/ovmf/OVMF.fd</loader>'
))
- update_domain(domain_xml.to_s)
+ update(domain_xml.to_s)
else
raise "unsupported OS loader type"
end
@@ -326,21 +417,38 @@ EOF
end
end
- def execute(cmd, user = "root")
- return VMCommand.new(self, cmd, { :user => user, :spawn => false })
+ def execute(cmd, options = {})
+ options[:user] ||= "root"
+ options[:spawn] ||= false
+ if options[:libs]
+ libs = options[:libs]
+ options.delete(:libs)
+ libs = [libs] if not(libs.methods.include? :map)
+ cmds = libs.map do |lib_name|
+ ". /usr/local/lib/tails-shell-library/#{lib_name}.sh"
+ end
+ cmds << cmd
+ cmd = cmds.join(" && ")
+ end
+ return VMCommand.new(self, cmd, options)
end
- def execute_successfully(cmd, user = "root")
- p = execute(cmd, user)
- assert_vmcommand_success(p)
+ def execute_successfully(*args)
+ p = execute(*args)
+ begin
+ assert_vmcommand_success(p)
+ rescue Test::Unit::AssertionFailedError => e
+ raise ExecutionFailedInVM.new(e)
+ end
return p
end
- def spawn(cmd, user = "root")
- return VMCommand.new(self, cmd, { :user => user, :spawn => true })
+ def spawn(cmd, options = {})
+ options[:spawn] = true
+ return execute(cmd, options)
end
- def wait_until_remote_shell_is_up(timeout = 30)
+ def wait_until_remote_shell_is_up(timeout = 90)
VMCommand.wait_until_remote_shell_is_up(self, timeout)
end
@@ -361,32 +469,182 @@ EOF
return execute("pidof -x -o '%PPID' " + process).stdout.chomp.split
end
+ def select_virtual_desktop(desktop_number, user = LIVE_USER)
+ assert(desktop_number >= 0 && desktop_number <=3,
+ "Only values between 0 and 3 are valid virtual desktop numbers")
+ execute_successfully(
+ "xdotool set_desktop '#{desktop_number}'",
+ :user => user
+ )
+ end
+
+ def focus_window(window_title, user = LIVE_USER)
+ def do_focus(window_title, user)
+ execute_successfully(
+ "xdotool search --name '#{window_title}' windowactivate --sync",
+ :user => user
+ )
+ end
+
+ begin
+ do_focus(window_title, user)
+ rescue ExecutionFailedInVM
+ # Often when xdotool fails to focus a window it'll work when retried
+ # after redrawing the screen. Switching to a new virtual desktop then
+ # back seems to be a reliable way to handle this.
+ select_virtual_desktop(3)
+ select_virtual_desktop(0)
+ sleep 5 # there aren't any visual indicators which can be used here
+ do_focus(window_title, user)
+ end
+ end
+
def file_exist?(file)
- execute("test -e #{file}").success?
+ execute("test -e '#{file}'").success?
+ end
+
+ def directory_exist?(directory)
+ execute("test -d '#{directory}'").success?
end
def file_content(file, user = 'root')
# We don't quote #{file} on purpose: we sometimes pass environment variables
# or globs that we want to be interpreted by the shell.
- cmd = execute("cat #{file}", user)
+ cmd = execute("cat #{file}", :user => user)
assert(cmd.success?,
"Could not cat '#{file}':\n#{cmd.stdout}\n#{cmd.stderr}")
return cmd.stdout
end
- def save_snapshot(path)
- @domain.save(path)
- @display.stop
+ def file_append(file, lines, user = 'root')
+ lines = lines.split("\n") if lines.class == String
+ lines.each do |line|
+ cmd = execute("echo '#{line}' >> '#{file}'", :user => user)
+ assert(cmd.success?,
+ "Could not append to '#{file}':\n#{cmd.stdout}\n#{cmd.stderr}")
+ end
+ end
+
+ def set_clipboard(text)
+ execute_successfully("echo -n '#{text}' | xsel --input --clipboard",
+ :user => LIVE_USER)
end
- def restore_snapshot(path)
- # Clean up current domain so its snapshot can be restored
- clean_up_domain
- Libvirt::Domain::restore(@@virt, path)
- @domain = @@virt.lookup_domain_by_name(@domain_name)
+ def get_clipboard
+ execute_successfully("xsel --output --clipboard", :user => LIVE_USER).stdout
+ end
+
+ def internal_snapshot_xml(name)
+ disk_devs = list_disk_devs
+ disks_xml = " <disks>\n"
+ for dev in disk_devs
+ snapshot_type = disk_type(dev) == "qcow2" ? 'internal' : 'no'
+ disks_xml +=
+ " <disk name='#{dev}' snapshot='#{snapshot_type}'></disk>\n"
+ end
+ disks_xml += " </disks>"
+ return <<-EOF
+<domainsnapshot>
+ <name>#{name}</name>
+ <description>Snapshot for #{name}</description>
+#{disks_xml}
+ </domainsnapshot>
+EOF
+ end
+
+ def VM.ram_only_snapshot_path(name)
+ return "#{$config["TMPDIR"]}/#{name}-snapshot.memstate"
+ end
+
+ def save_snapshot(name)
+ # If we have no qcow2 disk device, we'll use "memory state"
+ # snapshots, and if we have at least one qcow2 disk device, we'll
+ # use internal "system checkpoint" (memory + disks) snapshots. We
+ # have to do this since internal snapshots don't work when no
+ # such disk is available. We can do this with external snapshots,
+ # which are better in many ways, but libvirt doesn't know how to
+ # restore (revert back to) them yet.
+ # WARNING: If only transient disks, i.e. disks that were plugged
+ # after starting the domain, are used then the memory state will
+ # be dropped. External snapshots would also fix this.
+ internal_snapshot = false
+ domain_xml = REXML::Document.new(@domain.xml_desc)
+ domain_xml.elements.each('domain/devices/disk') do |e|
+ if e.elements['driver'].attribute('type').to_s == "qcow2"
+ internal_snapshot = true
+ break
+ end
+ end
+
+ # Note: In this case the "opposite" of `internal_snapshot` is not
+ # anything relating to external snapshots, but actually "memory
+ # state"(-only) snapshots.
+ if internal_snapshot
+ xml = internal_snapshot_xml(name)
+ @domain.snapshot_create_xml(xml)
+ else
+ snapshot_path = VM.ram_only_snapshot_path(name)
+ @domain.save(snapshot_path)
+ # For consistency with the internal snapshot case (which is
+ # "live", so the domain doesn't go down) we immediately restore
+ # the snapshot.
+ # Assumption: that *immediate* save + restore doesn't mess up
+ # with network state and similar, and is fast enough to not make
+ # the clock drift too much.
+ restore_snapshot(name)
+ end
+ end
+
+ def restore_snapshot(name)
+ @domain.destroy if is_running?
+ @display.stop if @display and @display.active?
+ # See comment in save_snapshot() for details on why we use two
+ # different type of snapshots.
+ potential_ram_only_snapshot_path = VM.ram_only_snapshot_path(name)
+ if File.exist?(potential_ram_only_snapshot_path)
+ Libvirt::Domain::restore(@virt, potential_ram_only_snapshot_path)
+ @domain = @virt.lookup_domain_by_name(@domain_name)
+ else
+ begin
+ potential_internal_snapshot = @domain.lookup_snapshot_by_name(name)
+ @domain.revert_to_snapshot(potential_internal_snapshot)
+ rescue Libvirt::RetrieveError
+ raise "No such (internal nor external) snapshot #{name}"
+ end
+ end
@display.start
end
+ def VM.remove_snapshot(name)
+ old_domain = $virt.lookup_domain_by_name(LIBVIRT_DOMAIN_NAME)
+ potential_ram_only_snapshot_path = VM.ram_only_snapshot_path(name)
+ if File.exist?(potential_ram_only_snapshot_path)
+ File.delete(potential_ram_only_snapshot_path)
+ else
+ snapshot = old_domain.lookup_snapshot_by_name(name)
+ snapshot.delete
+ end
+ end
+
+ def VM.snapshot_exists?(name)
+ return true if File.exist?(VM.ram_only_snapshot_path(name))
+ old_domain = $virt.lookup_domain_by_name(LIBVIRT_DOMAIN_NAME)
+ snapshot = old_domain.lookup_snapshot_by_name(name)
+ return snapshot != nil
+ rescue Libvirt::RetrieveError
+ return false
+ end
+
+ def VM.remove_all_snapshots
+ Dir.glob("#{$config["TMPDIR"]}/*-snapshot.memstate").each do |file|
+ File.delete(file)
+ end
+ old_domain = $virt.lookup_domain_by_name(LIBVIRT_DOMAIN_NAME)
+ old_domain.list_all_snapshots.each { |snapshot| snapshot.delete }
+ rescue Libvirt::RetrieveError
+ # No such domain, so no snapshots either.
+ end
+
def start
return if is_running?
@domain.create
@@ -394,9 +652,7 @@ EOF
end
def reset
- # ruby-libvirt 0.4 does not support the reset method.
- # XXX: Once we use Jessie, use @domain.reset instead.
- system("virsh -c qemu:///system reset " + @domain_name) if is_running?
+ @domain.reset if is_running?
end
def power_off
@@ -404,12 +660,6 @@ EOF
@display.stop
end
- def destroy
- clean_up_domain
- clean_up_net
- power_off
- end
-
def take_screenshot(description)
@display.take_screenshot(description)
end