From 6e2b99a0870e467e35c8b4b95aeb1e665dded413 Mon Sep 17 00:00:00 2001 From: Jonathan Herman Date: Thu, 21 Feb 2013 18:32:24 -0500 Subject: Many bugfixes motivated by some end-to-end testing. --- common.py | 2 +- gen/generators.py | 22 ++++++++++------- parse/dir_map.py | 2 +- parse/ft.py | 14 ++++++++--- parse/sched.py | 8 ++++++ parse/tuple_table.py | 3 --- parse_exps.py | 58 +++++++++++++++++++++++++++++--------------- plot/style.py | 3 +++ plot_exps.py | 29 +++++++++++++++++++--- run/executable/executable.py | 9 ++++--- run/executable/ftcat.py | 21 ++++++++-------- run/experiment.py | 9 ++++--- run/litmus_util.py | 10 ++++---- run/tracer.py | 19 +++++++-------- run_exps.py | 2 +- 15 files changed, 138 insertions(+), 73 deletions(-) diff --git a/common.py b/common.py index 6d1db97..d080e1a 100644 --- a/common.py +++ b/common.py @@ -105,7 +105,7 @@ def recordtype(typename, field_names, default=0): namespace = {} try: exec template in namespace - except SyntaxError, e: + except SyntaxError as e: raise SyntaxError(e.message + ':\n' + template) cls = namespace[typename] diff --git a/gen/generators.py b/gen/generators.py index 09ae979..dd6f1cc 100644 --- a/gen/generators.py +++ b/gen/generators.py @@ -53,7 +53,7 @@ GenOption = namedtuple('GenOption', ['name', 'types', 'default', 'help']) class BaseGenerator(object): '''Creates sporadic task sets with the most common Litmus options.''' def __init__(self, name, templates, options, params): - self.options = self.__make_options() + options + self.options = self.__make_options(params) + options self.__setup_params(params) @@ -61,11 +61,14 @@ class BaseGenerator(object): self.template = "\n".join([TP_RM] + templates) self.name = name - def __make_options(self): + def __make_options(self, params): '''Return generic Litmus options.''' # Guess defaults using the properties of this computer - cpus = lu.num_cpus() + if 'cpus' in params: + cpus = min(map(int, params['cpus'])) + else: + cpus = lu.num_cpus() try: config = get_config_option("RELEASE_MASTER") and True except: @@ -127,9 +130,10 @@ class BaseGenerator(object): f.write(str(Template(self.template, searchList=[exp_params]))) del exp_params['task_set'] + del exp_params['num_tasks'] exp_params_file = out_dir + "/" + DEFAULTS['params_file'] with open(exp_params_file, 'wa') as f: - exp_params['scheduler'] = 'CEDF' + exp_params['scheduler'] = self.name f.write(str(exp_params)) def __setup_params(self, params): @@ -195,7 +199,7 @@ class BaseGenerator(object): col_map = builder.build() for dp in DesignPointGenerator(self.params): - dir_leaf = "sched=%s_%s" % (self.name, col_map.get_encoding(dp)) + dir_leaf = "sched=%s_%s" % (self.name, col_map.encode(dp)) dir_path = "%s/%s" % (out_dir, dir_leaf.strip('_')) if os.path.exists(dir_path): @@ -225,10 +229,10 @@ class BaseGenerator(object): i+= len(word) res += [word] if i > 80: - print ", ".join(res[:-1]) + print(", ".join(res[:-1])) res = ["\t\t "+res[-1]] i = line.index("'") - print ", ".join(res) + print(", ".join(res)) class PartitionedGenerator(BaseGenerator): def __init__(self, name, templates, options, params): @@ -243,7 +247,7 @@ class PartitionedGenerator(BaseGenerator): class PedfGenerator(PartitionedGenerator): def __init__(self, params={}): - super(PedfGenerator, self).__init__("P-EDF", [], [], params) + super(PedfGenerator, self).__init__("PSN-EDF", [], [], params) class CedfGenerator(PartitionedGenerator): LEVEL_OPTION = GenOption('level', ['L2', 'L3', 'All'], ['L2'], @@ -255,4 +259,4 @@ class CedfGenerator(PartitionedGenerator): class GedfGenerator(BaseGenerator): def __init__(self, params={}): - super(GedfGenerator, self).__init__("G-EDF", [TP_GLOB_TASK], [], params) + super(GedfGenerator, self).__init__("GSN-EDF", [TP_GLOB_TASK], [], params) diff --git a/parse/dir_map.py b/parse/dir_map.py index 1c17f40..601dd3b 100644 --- a/parse/dir_map.py +++ b/parse/dir_map.py @@ -46,7 +46,7 @@ class DirMap(object): def remove_childless(self): def remove_childless2(node): - for key, child in node: + for key, child in node.children.items(): remove_childless2(child) if not (child.children or child.values): node.children.pop(key) diff --git a/parse/ft.py b/parse/ft.py index 5293b00..19453d1 100644 --- a/parse/ft.py +++ b/parse/ft.py @@ -3,6 +3,7 @@ import numpy as np import os import re import shutil as sh +import sys import subprocess from point import Measurement,Type @@ -28,7 +29,6 @@ def parse_overhead(result, overhead_bin, overhead, cycles, out_dir, err_file): raise Exception("Failed (%d) with command: %s" % (ret, " ".join(cmd))) if not size: os.remove(ovh_fname) - if size and not ret: # Map and sort file for stats data = np.memmap(ovh_fname, dtype="float32", mode='c') @@ -47,19 +47,22 @@ def parse_overhead(result, overhead_bin, overhead, cycles, out_dir, err_file): def sort_ft(ft_file, err_file, out_dir): '''Create and return file with sorted overheads from @ft_file.''' - out_fname = "{}/{}".format("%s/%s" % (os.getcwd(), out_dir), FT_SORTED_NAME) + out_fname = "{}/{}".format(out_dir, FT_SORTED_NAME) # Sort happens in-place sh.copyfile(ft_file, out_fname) cmd = [conf.BINS['ftsort'], out_fname] - ret = subprocess.call(cmd, cwd="%s/%s" % (os.getcwd(), out_dir), stderr=err_file, stdout=err_file) + ret = subprocess.call(cmd, cwd=out_dir, stderr=err_file, stdout=err_file) if ret: - raise Exception("Sort failed with command: %s" % " ".join(cmd)) + raise Exception("Sort failed (%d) with command: %s" % (ret, " ".join(cmd))) return out_fname def extract_ft_data(result, data_dir, work_dir, cycles): + data_dir = os.path.abspath(data_dir) + work_dir = os.path.abspath(work_dir) + freg = conf.FILES['ft_data'] + "$" bins = [f for f in os.listdir(data_dir) if re.match(freg, f)] @@ -67,6 +70,9 @@ def extract_ft_data(result, data_dir, work_dir, cycles): return False bin_file = "{}/{}".format(data_dir, bins[0]) + if not os.path.getsize(bin_file): + sys.stderr.write("Empty feather trace file %s!" % bin_file) + return False with open("%s/%s" % (work_dir, FT_ERR_NAME), 'w') as err_file: sorted_bin = sort_ft(bin_file, err_file, work_dir) diff --git a/parse/sched.py b/parse/sched.py index ba0df5e..2da0149 100644 --- a/parse/sched.py +++ b/parse/sched.py @@ -2,6 +2,7 @@ import config.config as conf import os import re import struct +import sys import subprocess from collections import defaultdict,namedtuple @@ -66,6 +67,10 @@ def register_record(name, id, method, fmt, fields): def make_iterator(fname): '''Iterate over (parsed record, processing method) in a sched-trace file.''' + if not os.path.getsize(fname): + sys.stderr.write("Empty sched_trace file %s!" % fname) + return + f = open(fname, 'rb') max_type = len(record_map) @@ -182,4 +187,7 @@ def extract_sched_data(result, data_dir, work_dir): # Summarize value groups for name, data in stat_data.iteritems(): + if not data: + continue result[name] = Measurement(str(name)).from_array(data) + diff --git a/parse/tuple_table.py b/parse/tuple_table.py index 86baa08..ee94772 100644 --- a/parse/tuple_table.py +++ b/parse/tuple_table.py @@ -3,8 +3,6 @@ from collections import defaultdict,namedtuple from point import SummaryPoint,Type from dir_map import DirMap from col_map import ColMap,ColMapBuilder - - from pprint import pprint class TupleTable(object): @@ -88,7 +86,6 @@ class ReducedTupleTable(TupleTable): self.__add_to_dirmap(dir_map, col, kv, point) dir_map.remove_childless() - print("wrote: %s" % self) return dir_map @staticmethod diff --git a/parse_exps.py b/parse_exps.py index f27021a..4cdc0a1 100755 --- a/parse_exps.py +++ b/parse_exps.py @@ -8,13 +8,13 @@ import parse.sched as st import pickle import shutil as sh import sys +import traceback from collections import namedtuple from common import load_params from optparse import OptionParser -from parse.dir_map import DirMap from parse.point import ExpPoint -from parse.tuple_table import TupleTable,ReducedTupleTable +from parse.tuple_table import TupleTable from parse.col_map import ColMapBuilder from multiprocessing import Pool, cpu_count @@ -23,7 +23,6 @@ def parse_args(): parser = OptionParser("usage: %prog [options] [data_dir]...") print("default to no params.py") - print("save measurements in temp directory for faster reloading") parser.add_option('-o', '--out', dest='out', help='file or directory for data output', default='parse-data') @@ -85,16 +84,24 @@ def load_exps(exp_dirs, cm_builder, clean): return exps -def parse_exp(exp, force): +def parse_exp(exp_force): + # Tupled for multiprocessing + exp, force = exp_force + result_file = exp.work_dir + "/exp_point.pkl" should_load = not force and os.path.exists(result_file) - mode = 'r' if should_load else 'w' - with open(result_file, mode + 'b') as f: - if should_load: - # No need to go through this work twice - result = pickle.load(f) - else: + result = None + if should_load: + with open(result_file, 'rb') as f: + try: + # No need to go through this work twice + result = pickle.load(f) + except: + pass + + if not result: + try: result = ExpPoint(exp.path) cycles = exp.params[conf.PARAMS['cycles']] @@ -104,7 +111,10 @@ def parse_exp(exp, force): # Write scheduling statistics into result st.extract_sched_data(result, exp.path, exp.work_dir) - pickle.dump(result, f) + with open(result_file, 'wb') as f: + pickle.dump(result, f) + except: + traceback.print_exc() return (exp, result) @@ -128,14 +138,24 @@ def main(): sys.stderr.write("Parsing data...\n") procs = min(len(exps), cpu_count()/2) - pool = Pool(processes=procs) - enum = pool.imap_unordered(parse_exp, exps, [opts.force]*len(exps)) - for i, (exp, result) in enumerate(enum): - if opts.verbose: - print(result) - else: - sys.stderr.write('\r {0:.2%}'.format(float(i)/len(exps))) - result_table[exp.params] += [result] + pool = Pool(processes=procs) + pool_args = zip(exps, [opts.force]*len(exps)) + enum = pool.imap_unordered(parse_exp, pool_args, 1) + + try: + for i, (exp, result) in enumerate(enum): + if opts.verbose: + print(result) + else: + sys.stderr.write('\r {0:.2%}'.format(float(i)/len(exps))) + result_table[exp.params] += [result] + pool.close() + except: + pool.terminate() + traceback.print_exc() + raise Exception("Failed parsing!") + finally: + pool.join() sys.stderr.write('\n') diff --git a/plot/style.py b/plot/style.py index ca7a112..fd1fa97 100644 --- a/plot/style.py +++ b/plot/style.py @@ -16,6 +16,9 @@ class StyleMap(object): self.field_map = {} for field, values in self.__get_all()._asdict().iteritems(): + if not col_list: + break + next_column = col_list.pop(0) value_dict = {} diff --git a/plot_exps.py b/plot_exps.py index 8fbef99..49cc729 100755 --- a/plot_exps.py +++ b/plot_exps.py @@ -5,6 +5,7 @@ import matplotlib.pyplot as plot import os import shutil as sh import sys +import traceback from collections import namedtuple from optparse import OptionParser from parse.col_map import ColMap,ColMapBuilder @@ -83,6 +84,15 @@ def plot_by_variable(details): plot.savefig(details.out, format=OUT_FORMAT) + return True + +def plot_wrapper(details): + '''Wrap exceptions in named method for printing in multiprocessing pool.''' + try: + return plot_by_variable(details) + except: + traceback.print_exc() + def plot_dir(data_dir, out_dir, force): sys.stderr.write("Reading data...\n") dir_map = DirMap.read(data_dir) @@ -102,11 +112,24 @@ def plot_dir(data_dir, out_dir, force): if force or not os.path.exists(details.out): plot_details += [details] + if not plot_details: + return + procs = min(len(plot_details), cpu_count()/2) pool = Pool(processes=procs) - enum = pool.imap_unordered(plot_by_variable, plot_details) - for i, _ in enumerate(enum): - sys.stderr.write('\r {0:.2%}'.format(float(i)/num_plots)) + enum = pool.imap_unordered(plot_wrapper, plot_details) + + try: + for i, _ in enumerate(enum): + sys.stderr.write('\r {0:.2%}'.format(float(i)/num_plots)) + pool.close() + except: + pool.terminate() + traceback.print_exc() + raise Exception("Failed plotting!") + finally: + pool.join() + sys.stderr.write('\n') def main(): diff --git a/run/executable/executable.py b/run/executable/executable.py index bc8edd7..0a408b7 100644 --- a/run/executable/executable.py +++ b/run/executable/executable.py @@ -44,7 +44,6 @@ class Executable(object): return full_command def __str__(self): - print("Full command: %s" % self.__get_full_command()) return " ".join(self.__get_full_command()) def execute(self): @@ -63,7 +62,7 @@ class Executable(object): '''Send the terminate signal to the binary.''' self.sp.terminate() - def wait(self): + def wait(self, error=True): '''Wait until the executable is finished, checking return code. If the exit status is non-zero, raise an exception. @@ -71,8 +70,10 @@ class Executable(object): ''' self.sp.wait() - if self.sp.returncode != 0: - print >>sys.stderr, "Non-zero return: %s %s" % (self.exec_file, " ".join(self.extra_args)) + if self.sp.returncode != 0 and error: + print >>sys.stderr, "Non-zero return %d: %s %s" % (self.sp.returncode, + self.exec_file, + " ".join(self.extra_args)) return 0 else: return 1 diff --git a/run/executable/ftcat.py b/run/executable/ftcat.py index 5da8fa7..1f0420b 100644 --- a/run/executable/ftcat.py +++ b/run/executable/ftcat.py @@ -1,18 +1,15 @@ import os import stat -from executable import Executable +from .executable import Executable class FTcat(Executable): '''Used to wrap the ftcat binary in the Experiment object.''' def __init__(self, ft_cat_bin, stdout_file, stderr_file, dev, events, cpu=None): '''Extends the Executable initializer method with ftcat attributes.''' + super(FTcat, self).__init__('/usr/bin/taskset') - # hack to run FTCat at higher priority - chrt_bin = '/usr/bin/chrt' - - super(FTcat, self).__init__(chrt_bin) self.stdout_file = stdout_file self.stderr_file = stderr_file @@ -23,11 +20,15 @@ class FTcat(Executable): if events is None: raise Exception('No events!') - # hack to run FTCat at higher priority - self.extra_args = ['-f', '40'] if cpu is not None: - # and bind it to a CPU - self.extra_args.extend(['/usr/bin/taskset', '-c', str(cpu)]) + # Execute only on the given CPU + self.extra_args = ['-c', str(cpu)] + else: + # Execute on any cpu + self.extra_args = ['0xFFFFFFFF'] + events_str_arr = map(str, events) - self.extra_args.extend([ft_cat_bin, dev] + events_str_arr) + ft_cat_cmd = [ft_cat_bin, dev] + list(events_str_arr) + + self.extra_args.extend(ft_cat_cmd) diff --git a/run/experiment.py b/run/experiment.py index c8fc228..ecb0241 100644 --- a/run/experiment.py +++ b/run/experiment.py @@ -1,8 +1,9 @@ import os import time -import litmus_util as lu +import run.litmus_util as lu +import shutil as sh from operator import methodcaller -from tracer import SchedTracer, LogTracer, PerfTracer, LinuxTracer, OverheadTracer +from run.tracer import SchedTracer, LogTracer, PerfTracer, LinuxTracer, OverheadTracer class ExperimentException(Exception): '''Used to indicate when there are problems with an experiment.''' @@ -78,6 +79,8 @@ class Experiment(object): Experiment.INTERRUPTED_DIR) interrupted = "%s/%s" % (os.path.split(self.working_dir)[0], Experiment.INTERRUPTED_DIR) + if os.path.exists(interrupted): + sh.rmtree(interrupted) os.rename(self.working_dir, interrupted) os.mkdir(self.working_dir) @@ -154,7 +157,7 @@ class Experiment(object): os.rename(self.working_dir, self.finished_dir) def log(self, msg): - print "[Exp %s]: %s" % (self.name, msg) + print("[Exp %s]: %s" % (self.name, msg)) def run_exp(self): succ = False diff --git a/run/litmus_util.py b/run/litmus_util.py index ec1700e..8a7f87d 100644 --- a/run/litmus_util.py +++ b/run/litmus_util.py @@ -4,7 +4,6 @@ import subprocess import os import stat import config.config as conf -from common import get_config_option def num_cpus(): '''Return the number of CPUs in the system.''' @@ -19,11 +18,12 @@ def num_cpus(): return cpus def ft_freq(): - '''The frequency (in MHz) of the clock used by feather trace.''' - if get_config_option('CPU_V7') == 'y': + umachine = subprocess.check_output(["uname", "-m"]) + + if re.match("armv7", umachine): # Arm V7s use a millisecond timer freq = 1000.0 - elif get_config_option('X86') == 'y': + elif re.match("x86", umachine): # X86 timer is equal to processor clock reg = re.compile(r'^cpu MHz\s*:\s*(?P\d+)', re.M) with open('/proc/cpuinfo', 'r') as f: @@ -76,7 +76,7 @@ def is_device(dev): return not (not mode & stat.S_IFCHR) def waiting_tasks(): - reg = re.compile(r'^ready.*(?P\d+)$', re.M) + reg = re.compile(r'^ready.*?(?P\d+)$', re.M) with open('/proc/litmus/stats', 'r') as f: data = f.read() diff --git a/run/tracer.py b/run/tracer.py index 5d00e86..723bcad 100644 --- a/run/tracer.py +++ b/run/tracer.py @@ -1,10 +1,9 @@ -import litmus_util +from . import litmus_util import os import config.config as conf from operator import methodcaller -from executable.ftcat import FTcat,Executable - +from run.executable.ftcat import FTcat,Executable class Tracer(object): def __init__(self, name, output_dir): @@ -19,7 +18,6 @@ class Tracer(object): map(methodcaller('terminate'), self.bins) map(methodcaller('wait'), self.bins) - class LinuxTracer(Tracer): EVENT_ROOT = "/sys/kernel/debug/tracing" LITMUS_EVENTS = "%s/events/litmus" % EVENT_ROOT @@ -45,7 +43,6 @@ class LinuxTracer(Tracer): map(methodcaller('interrupt'), self.bins) map(methodcaller('wait'), self.bins) - class LogTracer(Tracer): DEVICE_STR = '/dev/litmus/log' @@ -63,6 +60,9 @@ class LogTracer(Tracer): def enabled(): return litmus_util.is_device(LogTracer.DEVICE_STR) + def stop_tracing(self): + map(methodcaller('interrupt'), self.bins) + map(methodcaller('wait', False), self.bins) class SchedTracer(Tracer): DEVICE_STR = '/dev/litmus/sched_trace' @@ -76,14 +76,14 @@ class SchedTracer(Tracer): stdout_f = open('%s/st-%d.bin' % (self.output_dir, cpu), 'w') stderr_f = open('%s/st-%d-stderr.txt' % (self.output_dir, cpu), 'w') dev = '{0}{1}'.format(SchedTracer.DEVICE_STR, cpu) - ftc = FTcat(conf.BINS['ftcat'], stdout_f, stderr_f, dev, conf.SCHED_EVENTS, cpu=cpu) + ftc = FTcat(conf.BINS['ftcat'], stdout_f, stderr_f, dev, + conf.SCHED_EVENTS, cpu=cpu) self.bins.append(ftc) @staticmethod def enabled(): - return litmus_util.is_device("%s%d" % (SchedTracer.DEVICE_STR, 0)) - + return litmus_util.is_device("%s%d" % (SchedTracer.DEVICE_STR, 0)) class OverheadTracer(Tracer): DEVICE_STR = '/dev/litmus/ft_trace0' @@ -100,8 +100,7 @@ class OverheadTracer(Tracer): @staticmethod def enabled(): - return litmus_util.is_device(OverheadTracer.DEVICE_STR) - + return litmus_util.is_device(OverheadTracer.DEVICE_STR) class PerfTracer(Tracer): def __init__(self, output_dir): diff --git a/run_exps.py b/run_exps.py index 84e2b4c..195d3f8 100755 --- a/run_exps.py +++ b/run_exps.py @@ -50,7 +50,7 @@ def convert_data(data): r"(?P[\w\-\/]+)" r"\s*{\s*(?P.*?)\s*?}$)|" r"(?P^" - r"(?:(?P\w+) )?\s*" + r"(?:(?P[^\d\-]\w*?) )?\s*" r"(?P[\w\-_\d\. \=]+)\s*$)", re.S|re.I|re.M) -- cgit v1.2.2