summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSteve Kemp <steve@steve.org.uk>2015-06-03 14:59:43 +0100
committerSteve Kemp <steve@steve.org.uk>2015-06-03 14:59:43 +0100
commit96ca2e6f659c8d15c6d080c58226e4ebd9162f25 (patch)
tree7cc251e2ae33301599b55b7eda5bc4f9747ec917
parentc9c52d21442b763d63cc1cda7e15e59f8727f52a (diff)
Reformatted the code to lose literal TABs, etc.
-rw-r--r--lib/byteback/backup_directory.rb271
-rw-r--r--lib/byteback/disk_free_history.rb212
-rw-r--r--lib/byteback/log.rb122
-rw-r--r--lib/byteback/util.rb116
4 files changed, 369 insertions, 352 deletions
diff --git a/lib/byteback/backup_directory.rb b/lib/byteback/backup_directory.rb
index f485297..ceb2490 100644
--- a/lib/byteback/backup_directory.rb
+++ b/lib/byteback/backup_directory.rb
@@ -1,28 +1,27 @@
module Byteback
+ # Represents a particular timestamped backup directory
+ class Snapshot
+ class << self
+ # What order to remove snapshots in to regain disk space?
+ #
+ # Order backups by their closeness to defined backup times, which are
+ # listed in a set order (i.e. today's backup is more important than yesterday's).
+ #
+ BACKUP_IMPORTANCE = [1, 2, 7, 14, 21, 28, 56, 112]
- # Represents a particular timestamped backup directory
- class Snapshot
- class << self
- # What order to remove snapshots in to regain disk space?
- #
- # Order backups by their closeness to defined backup times, which are
- # listed in a set order (i.e. today's backup is more important than yesterday's).
- #
- BACKUP_IMPORTANCE = [1, 2, 7, 14, 21, 28, 56, 112]
+ def sort_by_importance(snapshots_unsorted, now = Time.now)
+ return snapshots_unsorted if snapshots_unsorted.size < 1
- def sort_by_importance(snapshots_unsorted, now=Time.now)
- return snapshots_unsorted if ( snapshots_unsorted.size < 1 )
-
- #
+ #
# Keep the last 7 days backups
#
snapshots_sorted = []
snapshots_unsorted = snapshots_unsorted.sort_by(&:time).reverse
-
+
#
# Group snapshots by host
#
- snapshots_by_host = Hash.new{|h,k| h[k] = []}
+ snapshots_by_host = Hash.new { |h, k| h[k] = [] }
snapshots_unsorted.each do |snapshot|
snapshots_by_host[snapshot.host] << snapshot
@@ -31,31 +30,31 @@ module Byteback
#
# We want the snapshot nearest to the middle of the day each day.
#
- today_midday = Time.mktime(*([0,0,12]+now.utc.to_a.last(7)))
+ today_midday = Time.mktime(*([0, 0, 12] + now.utc.to_a.last(7)))
#
# We want today, and the previous seven days
#
targets = [today_midday]
- targets += 6.times.map{ today_midday -= 86400 }
+ targets += 6.times.map { today_midday -= 86_400 }
#
# Now the previous four Sundays (we should bump on a week if today is a Sunday!)
#
- today_midday -= (today_midday.wday == 0 ? 7 : today_midday.wday )*86400
+ today_midday -= (today_midday.wday == 0 ? 7 : today_midday.wday) * 86_400
targets << today_midday
- targets += 3.times.map{ today_midday -= 7*86400 }
+ targets += 3.times.map { today_midday -= 7 * 86_400 }
#
# Our 28 day periods are anchored on Time.at(0). However this was a
# Thursday, so we have to add 3 days to get it to Sunday.
#
- targets << (today_midday -= ((today_midday.to_i / 86400.0).floor % 28 - 3)*86400)
+ targets << (today_midday -= ((today_midday.to_i / 86_400.0).floor % 28 - 3) * 86_400)
#
# Continue removing 28 day periods until we get beyond the oldest backup time.
#
- targets << (today_midday -= 28*86400) while today_midday > snapshots_unsorted.last.time
+ targets << (today_midday -= 28 * 86_400) while today_midday > snapshots_unsorted.last.time
#
# This has records the last nearest snapshot for each host
@@ -68,22 +67,18 @@ module Byteback
targets.each do |target|
snapshots_by_host.each do |host, snapshots|
next if snapshots.empty?
-
- nearest = snapshots.sort{|a,b| (a.time - target).abs <=> (b.time - target).abs }.first
-
+ nearest = snapshots.sort { |a, b| (a.time - target).abs <=> (b.time - target).abs }.first
#
# Don't process any more if the last snapshot for this for this
# host was more recent, i.e. we've reached the oldest, and are
# bouncing back again.
#
- if last_nearest[host].nil? or last_nearest[host].time > nearest.time
+ if last_nearest[host].nil? || last_nearest[host].time > nearest.time
last_nearest[host] = nearest
snapshots_by_host[host] -= [nearest]
snapshots_sorted << nearest
end
-
end
-
end
#
@@ -92,124 +87,122 @@ module Byteback
snapshots_unsorted -= snapshots_sorted
snapshots_sorted += snapshots_unsorted
- snapshots_sorted
- end
- end
+ snapshots_sorted
+ end
+ end
- attr_reader :backup_directory, :path
+ attr_reader :backup_directory, :path
- def initialize(backup_directory, snapshot_path)
- @backup_directory = backup_directory
- @path = snapshot_path
- @time = Time.parse(File.basename(path)) # throws ArgumentError if it can't parse
- nil
- end
+ def initialize(backup_directory, snapshot_path)
+ @backup_directory = backup_directory
+ @path = snapshot_path
+ @time = Time.parse(File.basename(path)) # throws ArgumentError if it can't parse
+ nil
+ end
- def time
- @time
- end
+ attr_reader :time
def host
File.basename(File.dirname(path))
end
- def <=>(b)
- time <=> b.time
- end
-
- def create!(from)
- system_no_error("/sbin/btrfs subvolume snapshot #{from} #{path}")
- end
-
- def delete!
- system_no_error("/sbin/btrfs subvolume delete #{path}")
- end
-
- # Returns the size of the given snapshot (runs du, may be slow)
- #
- # Would much prefer to take advantage of this feature:
- # http://dustymabe.com/2013/09/22/btrfs-how-big-are-my-snapshots/
- # but it's not currently in Debian/wheezy.
- #
- def du
- `du -s -b #{path}`.to_i
- end
-
- protected
-
- def system_no_error(*args)
- args[-1] += " > /dev/null" unless @verbose
- raise RuntimeError.new("Command failed: "+args.join(" ")) unless
- system(*args)
- end
- end
-
- # Represent a directory full of backups where "current" is a subvolume
- # which is snapshotted to frozen backup directories called e.g.
- # "yyyy-mm-ddThh:mm+zzzz".
- #
- class BackupDirectory
- class << self
- # Return all backup directories
- #
- def all
- Dir.new(ENV['HOME']).entries.map do |entry|
- next if entry[0] == '.'
- name = File.expand_path(ENV['HOME'] + "/" + entry)
- File.directory?(name + "/current") ? BackupDirectory.new(name) : nil
- end.
- compact
- end
-
- # Returns every snapshot in every backup directory
- #
- def all_snapshots
- all.map { |dir| dir.snapshots }.flatten
- end
- end
-
- attr_reader :dir
-
- def initialize(dir)
- @dir = Dir.new(dir)
- raise Errno::ENOENT unless File.directory?(dir)
- current
- end
-
- # Return total amount of free space in backup directory (bytes)
- #
- def free
- df = DiskFree.new(@dir.path)
- df.total - df.used
- end
-
- # Return an array of Times representing the current list of
- # snapshots.
- #
- def snapshots
- @dir.entries.map do |entry|
- next if entry[0] == '.' || entry == 'current'
- snapshot_path = File.expand_path(@dir.path + "/" + entry)
- next unless File.directory?(snapshot_path)
- begin
- Snapshot.new(self, snapshot_path)
- rescue ArgumentError => ae
- # directory name must represent a parseable Time
- nil
- end
- end.
- compact
- end
-
- # Create a new snapshot of 'current'
- #
- def new_snapshot!(time = Time.now)
- snapshot_path = time.strftime(dir.path + "/%Y-%m-%dT%H:%M%z")
- Snapshot.new(self, snapshot_path).create!(current.path)
- end
-
- def current
- Dir.new("#{dir.path}/current")
- end
- end
+ def <=>(b)
+ time <=> b.time
+ end
+
+ def create!(from)
+ system_no_error("/sbin/btrfs subvolume snapshot #{from} #{path}")
+ end
+
+ def delete!
+ system_no_error("/sbin/btrfs subvolume delete #{path}")
+ end
+
+ # Returns the size of the given snapshot (runs du, may be slow)
+ #
+ # Would much prefer to take advantage of this feature:
+ # http://dustymabe.com/2013/09/22/btrfs-how-big-are-my-snapshots/
+ # but it's not currently in Debian/wheezy.
+ #
+ def du
+ `du -s -b #{path}`.to_i
+ end
+
+ protected
+
+ def system_no_error(*args)
+ args[-1] += ' > /dev/null' unless @verbose
+ fail RuntimeError.new('Command failed: ' + args.join(' ')) unless
+ system(*args)
+ end
+ end
+
+ # Represent a directory full of backups where "current" is a subvolume
+ # which is snapshotted to frozen backup directories called e.g.
+ # "yyyy-mm-ddThh:mm+zzzz".
+ #
+ class BackupDirectory
+ class << self
+ # Return all backup directories
+ #
+ def all
+ Dir.new(ENV['HOME']).entries.map do |entry|
+ next if entry[0] == '.'
+ name = File.expand_path(ENV['HOME'] + '/' + entry)
+ File.directory?(name + '/current') ? BackupDirectory.new(name) : nil
+ end
+ .compact
+ end
+
+ # Returns every snapshot in every backup directory
+ #
+ def all_snapshots
+ all.map(&:snapshots).flatten
+ end
+ end
+
+ attr_reader :dir
+
+ def initialize(dir)
+ @dir = Dir.new(dir)
+ fail Errno::ENOENT unless File.directory?(dir)
+ current
+ end
+
+ # Return total amount of free space in backup directory (bytes)
+ #
+ def free
+ df = DiskFree.new(@dir.path)
+ df.total - df.used
+ end
+
+ # Return an array of Times representing the current list of
+ # snapshots.
+ #
+ def snapshots
+ @dir.entries.map do |entry|
+ next if entry[0] == '.' || entry == 'current'
+ snapshot_path = File.expand_path(@dir.path + '/' + entry)
+ next unless File.directory?(snapshot_path)
+ begin
+ Snapshot.new(self, snapshot_path)
+ rescue ArgumentError => ae
+ # directory name must represent a parseable Time
+ nil
+ end
+ end
+ .compact
+ end
+
+ # Create a new snapshot of 'current'
+ #
+ def new_snapshot!(time = Time.now)
+ snapshot_path = time.strftime(dir.path + '/%Y-%m-%dT%H:%M%z')
+ Snapshot.new(self, snapshot_path).create!(current.path)
+ end
+
+ def current
+ Dir.new("#{dir.path}/current")
+ end
+ end
end
diff --git a/lib/byteback/disk_free_history.rb b/lib/byteback/disk_free_history.rb
index 6be8143..0c67370 100644
--- a/lib/byteback/disk_free_history.rb
+++ b/lib/byteback/disk_free_history.rb
@@ -3,111 +3,109 @@
require 'sys/filesystem'
module Byteback
- class DiskFreeReading < Struct.new(:fsstat, :time)
- def initialize(fsstat,time=Time.now)
- self.fsstat = fsstat
- self.time = time
- end
-
- # helper method to return %age of disc space free
- #
- def percent_free
- fsstat.blocks_available * 100 / fsstat.blocks
- end
- end
-
- # A simple round-robin list to store a short history of a given mount
- # point's disk space history.
- #
- class DiskFreeHistory
- attr_reader :mountpoint, :history_file
-
- MINIMUM_INTERVAL = 5*60 # don't take readings more than 5 mins apart
- MAXIMUM_AGE = 7*24*60*60 # delete readings after a week
-
- # Initialize a new list storing the disc space history for the given
- # mount point.
- #
- def initialize(mountpoint, history_file=nil)
- history_file = "#{mountpoint}/.disk_free_history" unless
- history_file
- @history_file = history_file
- @mountpoint = mountpoint
- load!
- end
-
- # Take a new reading
- #
- def new_reading!
- reading = DiskFreeReading.new(Sys::Filesystem.stat(@mountpoint))
-
- # Don't record a new reading if it's exactly the same as last time,
- # and less than the minimum interval.
- #
- return nil if @list.last &&
- @list.last.fsstat.blocks_available == reading.fsstat.blocks_available &&
- Time.now - @list.last.time < MINIMUM_INTERVAL
-
- @list << reading
-
- save!
- end
-
- def list
- load! unless @list
- @list
- end
-
- def gradient(last_n_seconds, &value_from_reading)
- value_from_reading ||= proc { |r| r.fsstat.blocks_available }
- earliest = Time.now - last_n_seconds
-
- total = 0
- readings = 0
- later_reading = nil
-
- list.reverse.each do |reading|
- if later_reading
- difference =
- value_from_reading.call(reading) -
- value_from_reading.call(later_reading)
- total += difference
- end
- readings += 1
- break if reading.time < earliest
- later_reading = reading
- end
-
- return 0 if readings == 0
-
- total / readings
- end
-
- protected
-
- def load!
- begin
- File.open(@history_file) do |fh|
- @list = Marshal.restore(fh.read(1000000))
- end
- rescue Errno::ENOENT, TypeError => err
- @list = []
- new_reading!
- end
- end
-
- def save!
- list.shift while Time.now - list.first.time > MAXIMUM_AGE
-
- tmp = "#{@history_file}.#{$$}.#{rand(9999999999)}"
- begin
- File.open(tmp, "w") do |fh|
- fh.write(Marshal.dump(list))
- File.rename(tmp, @history_file)
- end
- ensure
- File.unlink(tmp) if File.exists?(tmp)
- end
- end
- end
+ class DiskFreeReading < Struct.new(:fsstat, :time)
+ def initialize(fsstat, time = Time.now)
+ self.fsstat = fsstat
+ self.time = time
+ end
+
+ # helper method to return %age of disc space free
+ #
+ def percent_free
+ fsstat.blocks_available * 100 / fsstat.blocks
+ end
+ end
+
+ # A simple round-robin list to store a short history of a given mount
+ # point's disk space history.
+ #
+ class DiskFreeHistory
+ attr_reader :mountpoint, :history_file
+
+ MINIMUM_INTERVAL = 5 * 60 # don't take readings more than 5 mins apart
+ MAXIMUM_AGE = 7 * 24 * 60 * 60 # delete readings after a week
+
+ # Initialize a new list storing the disc space history for the given
+ # mount point.
+ #
+ def initialize(mountpoint, history_file = nil)
+ history_file = "#{mountpoint}/.disk_free_history" unless
+ history_file
+ @history_file = history_file
+ @mountpoint = mountpoint
+ load!
+ end
+
+ # Take a new reading
+ #
+ def new_reading!
+ reading = DiskFreeReading.new(Sys::Filesystem.stat(@mountpoint))
+
+ # Don't record a new reading if it's exactly the same as last time,
+ # and less than the minimum interval.
+ #
+ return nil if @list.last &&
+ @list.last.fsstat.blocks_available == reading.fsstat.blocks_available &&
+ Time.now - @list.last.time < MINIMUM_INTERVAL
+
+ @list << reading
+
+ save!
+ end
+
+ def list
+ load! unless @list
+ @list
+ end
+
+ def gradient(last_n_seconds, &value_from_reading)
+ value_from_reading ||= proc { |r| r.fsstat.blocks_available }
+ earliest = Time.now - last_n_seconds
+
+ total = 0
+ readings = 0
+ later_reading = nil
+
+ list.reverse.each do |reading|
+ if later_reading
+ difference =
+ value_from_reading.call(reading) -
+ value_from_reading.call(later_reading)
+ total += difference
+ end
+ readings += 1
+ break if reading.time < earliest
+ later_reading = reading
+ end
+
+ return 0 if readings == 0
+
+ total / readings
+ end
+
+ protected
+
+ def load!
+ File.open(@history_file) do |fh|
+ @list = Marshal.restore(fh.read(1_000_000))
+ end
+ rescue Errno::ENOENT, TypeError => err
+ @list = []
+ new_reading!
+ end
+
+ def save!
+ list.shift while Time.now - list.first.time > MAXIMUM_AGE
+
+ tmp = "#{@history_file}.#{$PROCESS_ID}.#{rand(9_999_999_999)}"
+ begin
+ File.open(tmp, 'w') do |fh|
+ fh.write(Marshal.dump(list))
+ File.rename(tmp, @history_file)
+ end
+ ensure
+ File.unlink(tmp) if File.exist?(tmp)
+ end
+ end
+ end
end
diff --git a/lib/byteback/log.rb b/lib/byteback/log.rb
index cfd3feb..79f759b 100644
--- a/lib/byteback/log.rb
+++ b/lib/byteback/log.rb
@@ -2,17 +2,28 @@ require 'logger'
require 'syslog'
module Byteback
- # Translates Ruby's Logger calls to similar calls to Syslog
- # (implemented in Ruby 2.0 as Syslog::Logger).
- #
- # We need to neuter % signs which are taken as format strings.
- #
- class SyslogProxy
- class << self
- def debug(m); log_nopc(Syslog::LOG_DEBUG, m); end
- def info(m); log_nopc(Syslog::LOG_INFO, m); end
- def warn(m); log_nopc(Syslog::LOG_WARNING, m); end
- def error(m); log_nopc(Syslog::LOG_ERR, m); end
+ # Translates Ruby's Logger calls to similar calls to Syslog
+ # (implemented in Ruby 2.0 as Syslog::Logger).
+ #
+ # We need to neuter % signs which are taken as format strings.
+ #
+ class SyslogProxy
+ class << self
+ def debug(m)
+ log_nopc(Syslog::LOG_DEBUG, m)
+ end
+
+ def info(m)
+ log_nopc(Syslog::LOG_INFO, m)
+ end
+
+ def warn(m)
+ log_nopc(Syslog::LOG_WARNING, m)
+ end
+
+ def error(m)
+ log_nopc(Syslog::LOG_ERR, m)
+ end
#
# syslog(3) says:
#
@@ -22,40 +33,57 @@ module Byteback
# Errors might be fatal to Byteback, but they're unlikely to make the
# whole server unusable. So lets dial this down to ERR from EMERG.
#
- def fatal(m); log_nopc(Syslog::LOG_ERR, m); end
-
- def log_nopc(level, m)
- Syslog.log(level, m.gsub("%","%%"))
- end
- end
- end
-
- # Log proxy class that we can include in our scripts for some simple
- # logging defaults.
- #
- module Log
- @@me = File.expand_path($0).split("/").last
-
- @@logger = if STDIN.tty? && !ENV['BYTEBACK_TO_SYSLOG']
- logger = Logger.new(STDERR)
- logger.level = Logger::DEBUG
- logger.formatter = proc { |severity, datetime, progname, msg|
- if severity == "FATAL" || severity == "ERROR"
- "*** #{msg}\n"
- else
- "#{msg}\n"
- end
- }
- logger
- else
- Syslog.open(@@me)
- SyslogProxy
- end
-
- def debug(*a); @@logger.__send__(:debug, *a); end
- def info(*a); @@logger.__send__(:info, *a); end
- def warn(*a); @@logger.__send__(:warn, *a); end
- def fatal(*a); @@logger.__send__(:fatal, *a); exit 1; end
- def error(*a); @@logger.__send__(:error, *a); end
- end
+ def fatal(m)
+ log_nopc(Syslog::LOG_ERR, m)
+ end
+
+ def log_nopc(level, m)
+ Syslog.log(level, m.gsub('%', '%%'))
+ end
+ end
+ end
+
+ # Log proxy class that we can include in our scripts for some simple
+ # logging defaults.
+ #
+ module Log
+ @@me = File.expand_path($PROGRAM_NAME).split('/').last
+
+ @@logger = if STDIN.tty? && !ENV['BYTEBACK_TO_SYSLOG']
+ logger = Logger.new(STDERR)
+ logger.level = Logger::DEBUG
+ logger.formatter = proc { |severity, _datetime, _progname, msg|
+ if severity == 'FATAL' || severity == 'ERROR'
+ "*** #{msg}\n"
+ else
+ "#{msg}\n"
+ end
+ }
+ logger
+ else
+ Syslog.open(@@me)
+ SyslogProxy
+ end
+
+ def debug(*a)
+ @@logger.__send__(:debug, *a)
+ end
+
+ def info(*a)
+ @@logger.__send__(:info, *a)
+ end
+
+ def warn(*a)
+ @@logger.__send__(:warn, *a)
+ end
+
+ def fatal(*a)
+ @@logger.__send__(:fatal, *a)
+ exit 1
+ end
+
+ def error(*a)
+ @@logger.__send__(:error, *a)
+ end
+ end
end
diff --git a/lib/byteback/util.rb b/lib/byteback/util.rb
index 391e051..99a5b23 100644
--- a/lib/byteback/util.rb
+++ b/lib/byteback/util.rb
@@ -1,68 +1,66 @@
require 'tempfile'
module Byteback
- module Util
- @@lockfile = "/var/lock/byteback/byteback.lock"
+ module Util
+ @@lockfile = '/var/lock/byteback/byteback.lock'
- def remove_lockfile!
- begin
- File.unlink(@@lockfile)
- rescue Errno::ENOENT
- end
- end
+ def remove_lockfile!
+ File.unlink(@@lockfile)
+ rescue Errno::ENOENT
+ end
- def claim_lockfile!
- # Check the lockfile first
- if File.directory?(File.dirname(@@lockfile))
- if File.exists? @@lockfile
- # check the lockfile is sane
- exist_pid = File.read(@@lockfile).to_i
- if exist_pid > 1 and exist_pid < (File.read("/proc/sys/kernel/pid_max").to_i)
- begin
- Process.getpgid(exist_pid)
- # if no exception, process is running, abort
- fatal("Process is running (#{exist_pid} from #{@@lockfile})")
- rescue Errno::ESRCH
- # no process running with that pid, pidfile is stale
- remove_lockfile!
- end
- else
- # lockfile isn't sane, remove it and continue
- remove_lockfile!
- end
- end
- else
- Dir.mkdir(File.dirname(@@lockfile))
- # lockfile didn't exist so just carry on
- end
+ def claim_lockfile!
+ # Check the lockfile first
+ if File.directory?(File.dirname(@@lockfile))
+ if File.exist? @@lockfile
+ # check the lockfile is sane
+ exist_pid = File.read(@@lockfile).to_i
+ if exist_pid > 1 && exist_pid < (File.read('/proc/sys/kernel/pid_max').to_i)
+ begin
+ Process.getpgid(exist_pid)
+ # if no exception, process is running, abort
+ fatal("Process is running (#{exist_pid} from #{@@lockfile})")
+ rescue Errno::ESRCH
+ # no process running with that pid, pidfile is stale
+ remove_lockfile!
+ end
+ else
+ # lockfile isn't sane, remove it and continue
+ remove_lockfile!
+ end
+ end
+ else
+ Dir.mkdir(File.dirname(@@lockfile))
+ # lockfile didn't exist so just carry on
+ end
- # Own the pidfile ourselves
- File.open(@@lockfile, "w") do |lockfile|
- lockfile.puts Process::pid
- end
- end
+ # Own the pidfile ourselves
+ File.open(@@lockfile, 'w') do |lockfile|
+ lockfile.puts Process.pid
+ end
+ end
- def lock_out_other_processes(name)
- @@lockfile = "/var/lock/byteback/#{name}.lock"
- claim_lockfile!
- at_exit { remove_lockfile! }
- end
+ def lock_out_other_processes(name)
+ @@lockfile = "/var/lock/byteback/#{name}.lock"
+ claim_lockfile!
+ at_exit { remove_lockfile! }
+ end
- def log_system(*args)
- debug("system: " + args.map { |a| / /.match(a) ? "\"#{a}\"" : a }.join(" "))
- rd, wr = IO.pipe
- pid = fork
- if pid.nil? # child
- rd.close
- STDOUT.reopen(wr)
- STDERR.reopen(wr)
- # any cleanup actually necessary here?
- exec(*args)
- end
- wr.close
- rd.each_line { |line| debug(line.chomp) }
- pid2, status = Process.waitpid2(pid, 0)
- status.exitstatus
- end
- end
+ def log_system(*args)
+ debug('system: ' + args.map { |a| / /.match(a) ? "\"#{a}\"" : a }.join(' '))
+ rd, wr = IO.pipe
+ pid = fork
+ if pid.nil? # child
+ rd.close
+ STDOUT.reopen(wr)
+ STDERR.reopen(wr)
+ # any cleanup actually necessary here?
+ exec(*args)
+ end
+ wr.close
+ rd.each_line { |line| debug(line.chomp) }
+ pid2, status = Process.waitpid2(pid, 0)
+ status.exitstatus
+ end
+ end
end