diff options
-rw-r--r-- | common.py | 74 | ||||
-rw-r--r-- | config/config.example.py | 39 | ||||
-rw-r--r-- | experiment/executable/executable.py | 10 | ||||
-rw-r--r-- | experiment/executable/ftcat.py | 4 | ||||
-rw-r--r-- | experiment/experiment.py | 10 | ||||
-rw-r--r-- | experiment/litmus_util.py | 12 | ||||
-rw-r--r-- | parse/ft.py | 47 | ||||
-rw-r--r-- | parse/point.py | 4 | ||||
-rw-r--r-- | parse/sched.py | 445 | ||||
-rwxr-xr-x | parse_exps.py | 122 | ||||
-rwxr-xr-x | run_exps.py | 4 |
11 files changed, 327 insertions, 444 deletions
@@ -1,4 +1,78 @@ | |||
1 | import sys | ||
1 | from collections import defaultdict | 2 | from collections import defaultdict |
3 | from textwrap import dedent | ||
4 | |||
5 | def recordtype(typename, field_names, default=0): | ||
6 | ''' Mutable namedtuple. Recipe from George Sakkis of MIT.''' | ||
7 | field_names = tuple(map(str, field_names)) | ||
8 | # Create and fill-in the class template | ||
9 | numfields = len(field_names) | ||
10 | argtxt = ', '.join(field_names) | ||
11 | reprtxt = ', '.join('%s=%%r' % f for f in field_names) | ||
12 | dicttxt = ', '.join('%r: self.%s' % (f,f) for f in field_names) | ||
13 | tupletxt = repr(tuple('self.%s' % f for f in field_names)).replace("'",'') | ||
14 | inittxt = '; '.join('self.%s=%s' % (f,f) for f in field_names) | ||
15 | itertxt = '; '.join('yield self.%s' % f for f in field_names) | ||
16 | eqtxt = ' and '.join('self.%s==other.%s' % (f,f) for f in field_names) | ||
17 | template = dedent(''' | ||
18 | class %(typename)s(object): | ||
19 | '%(typename)s(%(argtxt)s)' | ||
20 | |||
21 | __slots__ = %(field_names)r | ||
22 | |||
23 | def __init__(self, %(argtxt)s): | ||
24 | %(inittxt)s | ||
25 | |||
26 | def __len__(self): | ||
27 | return %(numfields)d | ||
28 | |||
29 | def __iter__(self): | ||
30 | %(itertxt)s | ||
31 | |||
32 | def __getitem__(self, index): | ||
33 | return getattr(self, self.__slots__[index]) | ||
34 | |||
35 | def __setitem__(self, index, value): | ||
36 | return setattr(self, self.__slots__[index], value) | ||
37 | |||
38 | def todict(self): | ||
39 | 'Return a new dict which maps field names to their values' | ||
40 | return {%(dicttxt)s} | ||
41 | |||
42 | def __repr__(self): | ||
43 | return '%(typename)s(%(reprtxt)s)' %% %(tupletxt)s | ||
44 | |||
45 | def __eq__(self, other): | ||
46 | return isinstance(other, self.__class__) and %(eqtxt)s | ||
47 | |||
48 | def __ne__(self, other): | ||
49 | return not self==other | ||
50 | |||
51 | def __getstate__(self): | ||
52 | return %(tupletxt)s | ||
53 | |||
54 | def __setstate__(self, state): | ||
55 | %(tupletxt)s = state | ||
56 | ''') % locals() | ||
57 | # Execute the template string in a temporary namespace | ||
58 | namespace = {} | ||
59 | try: | ||
60 | exec template in namespace | ||
61 | except SyntaxError, e: | ||
62 | raise SyntaxError(e.message + ':\n' + template) | ||
63 | cls = namespace[typename] | ||
64 | |||
65 | # Setup defaults | ||
66 | init_defaults = tuple(default for f in field_names) | ||
67 | cls.__init__.im_func.func_defaults = init_defaults | ||
68 | |||
69 | # For pickling to work, the __module__ variable needs to be set to the frame | ||
70 | # where the named tuple is created. Bypass this step in environments where | ||
71 | # sys._getframe is not defined (Jython for example). | ||
72 | if hasattr(sys, '_getframe') and sys.platform != 'cli': | ||
73 | cls.__module__ = sys._getframe(1).f_globals['__name__'] | ||
74 | |||
75 | return cls | ||
2 | 76 | ||
3 | def load_params(fname): | 77 | def load_params(fname): |
4 | params = defaultdict(int) | 78 | params = defaultdict(int) |
diff --git a/config/config.example.py b/config/config.example.py index 50d30ba..9f24097 100644 --- a/config/config.example.py +++ b/config/config.example.py | |||
@@ -3,56 +3,43 @@ import os | |||
3 | import sys | 3 | import sys |
4 | import itertools | 4 | import itertools |
5 | 5 | ||
6 | """ | 6 | ''' |
7 | These are paths to repository directories. | 7 | These are paths to repository directories. |
8 | 8 | ||
9 | """ | 9 | ''' |
10 | REPOS = {'liblitmus' : '/home/hermanjl/git/liblitmus', | 10 | REPOS = {'liblitmus' : '/home/hermanjl/git/liblitmus', |
11 | 'sched_trace' : '/home/hermanjl/git/sched_trace', | 11 | 'sched_trace' : '/home/hermanjl/git/sched_trace', |
12 | 'analysis' : '/home/hermanjl/git/overhead-analysis-cjk', | ||
13 | 'ft_tools' : '/home/hermanjl/git/feather-trace-tools', | 12 | 'ft_tools' : '/home/hermanjl/git/feather-trace-tools', |
14 | 'trace-cmd' : '/home/hermanjl/git/trace-cmd'} | 13 | 'trace-cmd' : '/home/hermanjl/git/trace-cmd'} |
15 | 14 | ||
16 | BINS = {'bespin' : '{}/bespin'.format(REPOS['liblitmus']), | 15 | BINS = {'rtspin' : '{}/rtspin'.format(REPOS['liblitmus']), |
17 | 'colorspin' : '{}/colorspin'.format(REPOS['liblitmus']), | ||
18 | 'rtspin' : '{}/rtspin'.format(REPOS['liblitmus']), | ||
19 | 'release' : '{}/release_ts'.format(REPOS['liblitmus']), | 16 | 'release' : '{}/release_ts'.format(REPOS['liblitmus']), |
20 | 'ftcat' : '{}/ftcat'.format(REPOS['ft_tools']), | 17 | 'ftcat' : '{}/ftcat'.format(REPOS['ft_tools']), |
18 | 'ftsplit' : '{}/ft2csv'.format(REPOS['ft_tools']), | ||
19 | 'ftsort' : '{}/ftsort'.format(REPOS['ft_tools']), | ||
21 | 'st_trace' : '{}/st_trace'.format(REPOS['ft_tools']), | 20 | 'st_trace' : '{}/st_trace'.format(REPOS['ft_tools']), |
22 | 'split' : '{}/split'.format(REPOS['analysis']), | ||
23 | 'sort' : '{}/sort-all'.format(REPOS['analysis']), | ||
24 | 'analyze' : '{}/analyze'.format(REPOS['analysis']), | ||
25 | 'trace-cmd' : '{}/trace-cmd'.format(REPOS['trace-cmd']), | 21 | 'trace-cmd' : '{}/trace-cmd'.format(REPOS['trace-cmd']), |
26 | 'st_show' : '{}/st_show'.format(REPOS['sched_trace'])} | 22 | 'st_show' : '{}/st_show'.format(REPOS['sched_trace'])} |
27 | 23 | ||
28 | DEFAULTS = {'params_file' : 'params.py', | 24 | DEFAULTS = {'params_file' : 'params.py', |
29 | 'sched_file' : 'sched.py', | 25 | 'sched_file' : 'sched.py', |
30 | 'exps_file' : 'exps.py', | 26 | 'exps_file' : 'exps.py', |
31 | 'duration' : '10', | 27 | 'duration' : 10, |
32 | 'spin' : 'rtspin'} | 28 | 'spin' : 'rtspin', |
29 | 'cycles' : 2000} | ||
33 | 30 | ||
34 | FILES = {'ft_data' : 'ft.bin', | 31 | FILES = {'ft_data' : 'ft.bin', |
35 | 'linux_data' : 'trace.dat', | 32 | 'linux_data' : 'trace.dat', |
36 | 'sched_data' : 'st-{}.bin', | 33 | 'sched_data' : 'st-{}.bin', |
37 | 'log_data' : 'trace.slog',} | 34 | 'log_data' : 'trace.slog',} |
38 | 35 | ||
39 | PARAMS = {'sched' : 'scheduler', | 36 | PARAMS = {'sched' : 'scheduler', |
40 | 'dur' : 'duration', | 37 | 'dur' : 'duration', |
41 | 'kernel' : 'uname'} | 38 | 'kernel': 'uname', |
39 | 'cycles' : 'cpu-frequency'} | ||
42 | 40 | ||
43 | SCHED_EVENTS = range(501, 513) | 41 | SCHED_EVENTS = range(501, 513) |
44 | BASE_EVENTS = ['SCHED', 'RELEASE', 'SCHED2', 'TICK', 'CXS', 'SEND_RESCHED'] | 42 | BASE_EVENTS = ['SCHED', 'RELEASE', 'SCHED2', 'TICK', 'CXS'] |
45 | BASE_EVENTS += ['CQ_ENQUEUE_READ', 'CQ_ENQUEUE_FLUSH', 'CQ_SUBMIT_WORK', | ||
46 | 'CQ_LOOP_WORK_CHECK', 'CQ_LOOP_PEACE_OUT', 'CQ_LOOP_BRANCH', | ||
47 | 'CQ_WORK_DO_WORK', 'CQ_WORK_NOTIFY', 'CQ_PHASE_WAIT'] | ||
48 | |||
49 | # Expand for mixed-crit | ||
50 | # TODO don't use split | ||
51 | CRIT_EVENTS = ['LVL{}_SCHED', 'LVL{}_RELEASE'] | ||
52 | CRIT_LEVELS = ['A', 'B', 'C'] | ||
53 | BASE_EVENTS += [s.format(l) for (l,s) in | ||
54 | itertools.product(CRIT_LEVELS, CRIT_EVENTS)] | ||
55 | |||
56 | ALL_EVENTS = ["%s_%s" % (e, t) for (e,t) in | 43 | ALL_EVENTS = ["%s_%s" % (e, t) for (e,t) in |
57 | itertools.product(BASE_EVENTS, ["START","END"])] | 44 | itertools.product(BASE_EVENTS, ["START","END"])] |
58 | ALL_EVENTS += ['RELEASE_LATENCY'] | 45 | ALL_EVENTS += ['RELEASE_LATENCY'] |
diff --git a/experiment/executable/executable.py b/experiment/executable/executable.py index b964699..628f711 100644 --- a/experiment/executable/executable.py +++ b/experiment/executable/executable.py | |||
@@ -4,7 +4,7 @@ import signal | |||
4 | from ..litmus_util import is_executable | 4 | from ..litmus_util import is_executable |
5 | 5 | ||
6 | class Executable(object): | 6 | class Executable(object): |
7 | """Parent object that represents an executable for use in task-sets.""" | 7 | '''Parent object that represents an executable for use in task-sets.''' |
8 | 8 | ||
9 | def __init__(self, exec_file, extra_args=None, stdout_file = None, stderr_file = None): | 9 | def __init__(self, exec_file, extra_args=None, stdout_file = None, stderr_file = None): |
10 | self.exec_file = exec_file | 10 | self.exec_file = exec_file |
@@ -47,7 +47,7 @@ class Executable(object): | |||
47 | return " ".join(self.__get_full_command()) | 47 | return " ".join(self.__get_full_command()) |
48 | 48 | ||
49 | def execute(self): | 49 | def execute(self): |
50 | """Execute the binary.""" | 50 | '''Execute the binary.''' |
51 | full_command = self.__get_full_command() | 51 | full_command = self.__get_full_command() |
52 | self.sp = subprocess.Popen(full_command, stdout=self.stdout_file, | 52 | self.sp = subprocess.Popen(full_command, stdout=self.stdout_file, |
53 | stderr=self.stderr_file, cwd=self.cwd) | 53 | stderr=self.stderr_file, cwd=self.cwd) |
@@ -59,15 +59,15 @@ class Executable(object): | |||
59 | self.sp.send_signal(signal.SIGINT) | 59 | self.sp.send_signal(signal.SIGINT) |
60 | 60 | ||
61 | def terminate(self): | 61 | def terminate(self): |
62 | """Send the terminate signal to the binary.""" | 62 | '''Send the terminate signal to the binary.''' |
63 | self.sp.terminate() | 63 | self.sp.terminate() |
64 | 64 | ||
65 | def wait(self): | 65 | def wait(self): |
66 | """Wait until the executable is finished, checking return code. | 66 | '''Wait until the executable is finished, checking return code. |
67 | 67 | ||
68 | If the exit status is non-zero, raise an exception. | 68 | If the exit status is non-zero, raise an exception. |
69 | 69 | ||
70 | """ | 70 | ''' |
71 | 71 | ||
72 | self.sp.wait() | 72 | self.sp.wait() |
73 | if self.sp.returncode != 0: | 73 | if self.sp.returncode != 0: |
diff --git a/experiment/executable/ftcat.py b/experiment/executable/ftcat.py index 9966312..5da8fa7 100644 --- a/experiment/executable/ftcat.py +++ b/experiment/executable/ftcat.py | |||
@@ -4,10 +4,10 @@ import stat | |||
4 | from executable import Executable | 4 | from executable import Executable |
5 | 5 | ||
6 | class FTcat(Executable): | 6 | class FTcat(Executable): |
7 | """Used to wrap the ftcat binary in the Experiment object.""" | 7 | '''Used to wrap the ftcat binary in the Experiment object.''' |
8 | 8 | ||
9 | def __init__(self, ft_cat_bin, stdout_file, stderr_file, dev, events, cpu=None): | 9 | def __init__(self, ft_cat_bin, stdout_file, stderr_file, dev, events, cpu=None): |
10 | """Extends the Executable initializer method with ftcat attributes.""" | 10 | '''Extends the Executable initializer method with ftcat attributes.''' |
11 | 11 | ||
12 | # hack to run FTCat at higher priority | 12 | # hack to run FTCat at higher priority |
13 | chrt_bin = '/usr/bin/chrt' | 13 | chrt_bin = '/usr/bin/chrt' |
diff --git a/experiment/experiment.py b/experiment/experiment.py index deb4ff2..4bd47c6 100644 --- a/experiment/experiment.py +++ b/experiment/experiment.py | |||
@@ -5,19 +5,19 @@ from operator import methodcaller | |||
5 | from tracer import SchedTracer, LogTracer, PerfTracer, LinuxTracer, OverheadTracer | 5 | from tracer import SchedTracer, LogTracer, PerfTracer, LinuxTracer, OverheadTracer |
6 | 6 | ||
7 | class ExperimentException(Exception): | 7 | class ExperimentException(Exception): |
8 | """Used to indicate when there are problems with an experiment.""" | 8 | '''Used to indicate when there are problems with an experiment.''' |
9 | def __init__(self, name): | 9 | def __init__(self, name): |
10 | self.name = name | 10 | self.name = name |
11 | 11 | ||
12 | 12 | ||
13 | class ExperimentDone(ExperimentException): | 13 | class ExperimentDone(ExperimentException): |
14 | """Raised when an experiment looks like it's been run already.""" | 14 | '''Raised when an experiment looks like it's been run already.''' |
15 | def __str__(self): | 15 | def __str__(self): |
16 | return "Experiment finished already: %d" % self.name | 16 | return "Experiment finished already: %d" % self.name |
17 | 17 | ||
18 | 18 | ||
19 | class ExperimentInterrupted(ExperimentException): | 19 | class ExperimentInterrupted(ExperimentException): |
20 | """Raised when an experiment appears to be interrupted (partial results).""" | 20 | '''Raised when an experiment appears to be interrupted (partial results).''' |
21 | def __str__(self): | 21 | def __str__(self): |
22 | return "Experiment was interrupted in progress: %d" % self.name | 22 | return "Experiment was interrupted in progress: %d" % self.name |
23 | 23 | ||
@@ -28,11 +28,11 @@ class ExperimentFailed(ExperimentException): | |||
28 | 28 | ||
29 | 29 | ||
30 | class Experiment(object): | 30 | class Experiment(object): |
31 | """Execute one task-set and save the results. Experiments have unique IDs.""" | 31 | '''Execute one task-set and save the results. Experiments have unique IDs.''' |
32 | INTERRUPTED_DIR = ".interrupted" | 32 | INTERRUPTED_DIR = ".interrupted" |
33 | 33 | ||
34 | def __init__(self, name, scheduler, working_dir, finished_dir, proc_entries, executables): | 34 | def __init__(self, name, scheduler, working_dir, finished_dir, proc_entries, executables): |
35 | """Run an experiment, optionally wrapped in tracing.""" | 35 | '''Run an experiment, optionally wrapped in tracing.''' |
36 | 36 | ||
37 | self.name = name | 37 | self.name = name |
38 | self.scheduler = scheduler | 38 | self.scheduler = scheduler |
diff --git a/experiment/litmus_util.py b/experiment/litmus_util.py index 42d3e5f..fb2b341 100644 --- a/experiment/litmus_util.py +++ b/experiment/litmus_util.py | |||
@@ -6,7 +6,7 @@ import stat | |||
6 | import config.config as conf | 6 | import config.config as conf |
7 | 7 | ||
8 | def num_cpus(): | 8 | def num_cpus(): |
9 | """Return the number of CPUs in the system.""" | 9 | '''Return the number of CPUs in the system.''' |
10 | 10 | ||
11 | lnx_re = re.compile(r'^(processor|online)') | 11 | lnx_re = re.compile(r'^(processor|online)') |
12 | cpus = 0 | 12 | cpus = 0 |
@@ -18,9 +18,9 @@ def num_cpus(): | |||
18 | return cpus | 18 | return cpus |
19 | 19 | ||
20 | def cpu_freq(): | 20 | def cpu_freq(): |
21 | """ | 21 | ''' |
22 | The frequency (in MHz) of the CPU. | 22 | The frequency (in MHz) of the CPU. |
23 | """ | 23 | ''' |
24 | reg = re.compile(r'^cpu MHz\s*:\s*(\d+)', re.M) | 24 | reg = re.compile(r'^cpu MHz\s*:\s*(\d+)', re.M) |
25 | with open('/proc/cpuinfo', 'r') as f: | 25 | with open('/proc/cpuinfo', 'r') as f: |
26 | data = f.read() | 26 | data = f.read() |
@@ -31,12 +31,12 @@ def cpu_freq(): | |||
31 | return int(match.group(1)) | 31 | return int(match.group(1)) |
32 | 32 | ||
33 | def switch_scheduler(switch_to_in): | 33 | def switch_scheduler(switch_to_in): |
34 | """Switch the scheduler to whatever is passed in. | 34 | '''Switch the scheduler to whatever is passed in. |
35 | 35 | ||
36 | This methods sleeps for two seconds to give Linux the chance to execute | 36 | This methods sleeps for two seconds to give Linux the chance to execute |
37 | schedule switching code. Raises an exception if the switch does not work. | 37 | schedule switching code. Raises an exception if the switch does not work. |
38 | 38 | ||
39 | """ | 39 | ''' |
40 | 40 | ||
41 | switch_to = str(switch_to_in).strip() | 41 | switch_to = str(switch_to_in).strip() |
42 | 42 | ||
@@ -57,7 +57,7 @@ def uname_matches(reg): | |||
57 | return bool( re.match(reg, data) ) | 57 | return bool( re.match(reg, data) ) |
58 | 58 | ||
59 | def is_executable(fname): | 59 | def is_executable(fname): |
60 | """Return whether the file passed in is executable""" | 60 | '''Return whether the file passed in is executable''' |
61 | mode = os.stat(fname)[stat.ST_MODE] | 61 | mode = os.stat(fname)[stat.ST_MODE] |
62 | return mode & stat.S_IXUSR and mode & stat.S_IRUSR | 62 | return mode & stat.S_IXUSR and mode & stat.S_IRUSR |
63 | 63 | ||
diff --git a/parse/ft.py b/parse/ft.py index c5f1522..fea246a 100644 --- a/parse/ft.py +++ b/parse/ft.py | |||
@@ -11,27 +11,8 @@ FT_SPLIT_NAME = "overhead={}.bin" | |||
11 | FT_SORTED_NAME = "sorted-ft.bin" | 11 | FT_SORTED_NAME = "sorted-ft.bin" |
12 | FT_ERR_NAME = "err-ft" | 12 | FT_ERR_NAME = "err-ft" |
13 | 13 | ||
14 | def extract_ft_data(result, data_dir, cycles, tmp_dir): | ||
15 | freg = conf.FILES['ft_data'] + "$" | ||
16 | bins = [f for f in os.listdir(data_dir) if re.match(freg, f)] | ||
17 | |||
18 | if not len(bins): | ||
19 | return False | ||
20 | |||
21 | bin_file = "{}/{}".format(data_dir, bins[0]) | ||
22 | |||
23 | with open("%s/%s" % (tmp_dir, FT_ERR_NAME), 'w') as err_file: | ||
24 | sorted_bin = sort_ft(bin_file, err_file, tmp_dir) | ||
25 | |||
26 | for event in conf.BASE_EVENTS: | ||
27 | parse_overhead(result, sorted_bin, event, cycles, | ||
28 | tmp_dir, err_file) | ||
29 | |||
30 | os.remove(sorted_bin) | ||
31 | |||
32 | return True | ||
33 | |||
34 | def parse_overhead(result, overhead_bin, overhead, cycles, out_dir, err_file): | 14 | def parse_overhead(result, overhead_bin, overhead, cycles, out_dir, err_file): |
15 | '''Store statistics for @overhead in @overhead_bin into @result.''' | ||
35 | ovh_fname = "{}/{}".format(out_dir, FT_SPLIT_NAME).format(overhead) | 16 | ovh_fname = "{}/{}".format(out_dir, FT_SPLIT_NAME).format(overhead) |
36 | 17 | ||
37 | if os.path.exists(ovh_fname): | 18 | if os.path.exists(ovh_fname): |
@@ -39,7 +20,7 @@ def parse_overhead(result, overhead_bin, overhead, cycles, out_dir, err_file): | |||
39 | ovh_file = open(ovh_fname, 'w') | 20 | ovh_file = open(ovh_fname, 'w') |
40 | 21 | ||
41 | # Extract matching overhead events into a seperate file | 22 | # Extract matching overhead events into a seperate file |
42 | cmd = [conf.BINS["split"], "-r", "-b", overhead, overhead_bin] | 23 | cmd = [conf.BINS["ftsplit"], "-r", "-b", overhead, overhead_bin] |
43 | ret = subprocess.call(cmd, cwd=out_dir, stderr=err_file, stdout=ovh_file) | 24 | ret = subprocess.call(cmd, cwd=out_dir, stderr=err_file, stdout=ovh_file) |
44 | size = os.stat(ovh_fname).st_size | 25 | size = os.stat(ovh_fname).st_size |
45 | 26 | ||
@@ -65,9 +46,7 @@ def parse_overhead(result, overhead_bin, overhead, cycles, out_dir, err_file): | |||
65 | os.remove(ovh_fname) | 46 | os.remove(ovh_fname) |
66 | 47 | ||
67 | def sort_ft(ft_file, err_file, out_dir): | 48 | def sort_ft(ft_file, err_file, out_dir): |
68 | """ | 49 | '''Create and return file with sorted overheads from @ft_file.''' |
69 | Create and return file with sorted overheads from @ft_file. | ||
70 | """ | ||
71 | out_fname = "{}/{}".format(out_dir, FT_SORTED_NAME) | 50 | out_fname = "{}/{}".format(out_dir, FT_SORTED_NAME) |
72 | 51 | ||
73 | # Sort happens in-place | 52 | # Sort happens in-place |
@@ -79,3 +58,23 @@ def sort_ft(ft_file, err_file, out_dir): | |||
79 | raise Exception("Sort failed with command: %s" % " ".join(cmd)) | 58 | raise Exception("Sort failed with command: %s" % " ".join(cmd)) |
80 | 59 | ||
81 | return out_fname | 60 | return out_fname |
61 | |||
62 | def extract_ft_data(result, data_dir, work_dir, cycles): | ||
63 | freg = conf.FILES['ft_data'] + "$" | ||
64 | bins = [f for f in os.listdir(data_dir) if re.match(freg, f)] | ||
65 | |||
66 | if not len(bins): | ||
67 | return False | ||
68 | |||
69 | bin_file = "{}/{}".format(data_dir, bins[0]) | ||
70 | |||
71 | with open("%s/%s" % (work_dir, FT_ERR_NAME), 'w') as err_file: | ||
72 | sorted_bin = sort_ft(bin_file, err_file, work_dir) | ||
73 | |||
74 | for event in conf.BASE_EVENTS: | ||
75 | parse_overhead(result, sorted_bin, event, cycles, | ||
76 | work_dir, err_file) | ||
77 | |||
78 | os.remove(sorted_bin) | ||
79 | |||
80 | return True | ||
diff --git a/parse/point.py b/parse/point.py index d5f4a5e..8e27869 100644 --- a/parse/point.py +++ b/parse/point.py | |||
@@ -1,6 +1,6 @@ | |||
1 | """ | 1 | ''' |
2 | Too much duplicate code in this file | 2 | Too much duplicate code in this file |
3 | """ | 3 | ''' |
4 | 4 | ||
5 | import copy | 5 | import copy |
6 | import numpy as np | 6 | import numpy as np |
diff --git a/parse/sched.py b/parse/sched.py index 3e30880..ffc6224 100644 --- a/parse/sched.py +++ b/parse/sched.py | |||
@@ -1,306 +1,183 @@ | |||
1 | """ | ||
2 | TODO: No longer very pythonic, lot of duplicate code | ||
3 | print out task execution times or something | ||
4 | get miss ratio and tardiness directly from schedule OR | ||
5 | email list about turning on optional summary statistics OR | ||
6 | set up run exps to only get release and completions to get these stats | ||
7 | """ | ||
8 | |||
9 | import config.config as conf | 1 | import config.config as conf |
10 | import os | 2 | import os |
11 | import re | 3 | import re |
12 | import numpy as np | 4 | import struct |
13 | import subprocess | 5 | import subprocess |
14 | 6 | ||
15 | from collections import namedtuple,defaultdict | 7 | from collections import defaultdict,namedtuple |
16 | from operator import methodcaller | 8 | from common import recordtype |
17 | from point import Measurement,Type | 9 | from point import Measurement |
18 | |||
19 | PARAM_RECORD = r"(?P<RECORD>" +\ | ||
20 | r"PARAM *?(?P<PID>\d+)\/.*?" +\ | ||
21 | r"cost.*?(?P<WCET>[\d\.]+)ms.*?" +\ | ||
22 | r"period.*?(?P<PERIOD>[\d.]+)ms.*?" +\ | ||
23 | r"part.*?(?P<CPU>\d+)[, ]*" +\ | ||
24 | r"(?:class=(?P<CLASS>\w+))?[, ]*" +\ | ||
25 | r"(?:level=(?P<LEVEL>\w+))?).*$" | ||
26 | EXIT_RECORD = r"(?P<RECORD>" +\ | ||
27 | r"TASK_EXIT *?(?P<PID>\d+)/.*?" +\ | ||
28 | r"Avg.*?(?P<AVG>\d+).*?" +\ | ||
29 | r"Max.*?(?P<MAX>\d+))" | ||
30 | TARDY_RECORD = r"(?P<RECORD>" +\ | ||
31 | r"TASK_TARDY.*?(?P<PID>\d+)/(?P<JOB>\d+).*?" +\ | ||
32 | r"Tot.*?(?P<TOTAL>[\d\.]+).*?ms.*?" +\ | ||
33 | r"(?P<MAX>[\d\.]+).*?ms.*?" +\ | ||
34 | r"(?P<MISSES>[\d\.]+))" | ||
35 | COMPLETION_RECORD = r"(?P<RECORD>" +\ | ||
36 | r"COMPLETION.*?(?P<PID>\d+)/.*?" +\ | ||
37 | r"exec:.*?(?P<EXEC>[\d\.]+)ms.*?" +\ | ||
38 | r"flush:.*?(?P<FLUSH>[\d\.]+)ms.*?" +\ | ||
39 | r"flush_work:.*?(?P<FLUSH_WORK>[\d]+).*?" +\ | ||
40 | r"load:.*?(?P<LOAD>[\d\.]+)ms.*?" +\ | ||
41 | r"load_work:.*?(?P<LOAD_WORK>[\d]+))" | ||
42 | |||
43 | TaskConfig = namedtuple('TaskConfig', ['cpu','wcet','period','type','level']) | ||
44 | Task = namedtuple('Task', ['pid', 'config', 'run']) | ||
45 | |||
46 | class LeveledArray(object): | ||
47 | """ | ||
48 | Groups statistics by the level of the task to which they apply | ||
49 | """ | ||
50 | def __init__(self, name): | ||
51 | self.name = name | ||
52 | self.vals = defaultdict(lambda:[]) | ||
53 | |||
54 | def add(self, task, value): | ||
55 | self.vals[task.config.level] += [value] | ||
56 | |||
57 | |||
58 | def write_measurements(self, result): | ||
59 | for level, arr in self.vals.iteritems(): | ||
60 | name = "%s%s" % ("%s-" % level if level else "", self.name) | ||
61 | result[name] = Measurement(name).from_array(arr) | ||
62 | |||
63 | def get_st_output(data_dir, out_dir, force=False): | ||
64 | """ | ||
65 | Create and return files containing unpacked sched data | ||
66 | """ | ||
67 | bin_files = conf.FILES['sched_data'].format(".*") | ||
68 | bins = [f for f in os.listdir(data_dir) if re.match(bin_files, f)] | ||
69 | 10 | ||
70 | output_file = "%s/out-st" % out_dir | 11 | class TimeTracker: |
71 | 12 | '''Store stats for durations of time demarcated by sched_trace records.''' | |
72 | if os.path.isfile(output_file): | 13 | def __init__(self): |
73 | if force: | 14 | self.begin = self.avg = self.max = self.num = self.job = 0 |
74 | os.remove(output_file) | ||
75 | else: | ||
76 | return output_file | ||
77 | |||
78 | if len(bins) != 0: | ||
79 | cmd_arr = [conf.BINS['st_show']] | ||
80 | cmd_arr.extend(bins) | ||
81 | with open(output_file, "w") as f: | ||
82 | subprocess.call(cmd_arr, cwd=data_dir, stdout=f) | ||
83 | else: | ||
84 | return None | ||
85 | return output_file | ||
86 | |||
87 | def get_tasks(data): | ||
88 | ret = [] | ||
89 | for match in re.finditer(PARAM_RECORD, data, re.M): | ||
90 | try: | ||
91 | t = Task( int(match.group('PID')), | ||
92 | TaskConfig( int(match.group('CPU')), | ||
93 | float(match.group('WCET')), | ||
94 | float(match.group('PERIOD')), | ||
95 | match.group("CLASS"), | ||
96 | match.group("LEVEL")), []) | ||
97 | if not (t.config.period and t.pid): | ||
98 | raise Exception() | ||
99 | ret += [t] | ||
100 | except Exception as e: | ||
101 | raise Exception("Invalid task record: %s\nparsed:\n\t%s\n\t%s" % | ||
102 | (e, match.groupdict(), match.group('RECORD'))) | ||
103 | return ret | ||
104 | |||
105 | def get_task_dict(data): | ||
106 | tasks_list = get_tasks(data) | ||
107 | tasks_dict = {} | ||
108 | for t in tasks_list: | ||
109 | tasks_dict[t.pid] = t | ||
110 | return tasks_dict | ||
111 | |||
112 | def get_task_exits(data): | ||
113 | ret = [] | ||
114 | for match in re.finditer(EXIT_RECORD, data): | ||
115 | try: | ||
116 | m = Measurement( int(match.group('PID')), | ||
117 | {Type.Max : float(match.group('MAX')), | ||
118 | Type.Avg : float(match.group('AVG'))}) | ||
119 | except: | ||
120 | raise Exception("Invalid exit record, parsed:\n\t%s\n\t%s" % | ||
121 | (match.groupdict(), match.group('RECORD'))) | ||
122 | 15 | ||
123 | ret += [m] | 16 | def store_time(self, record): |
124 | return ret | 17 | '''End duration of time.''' |
18 | dur = record.when - self.begin | ||
125 | 19 | ||
20 | if self.job == record.job and dur > 0: | ||
21 | self.max = max(self.max, dur) | ||
22 | self.avg *= float(self.num / (self.num + 1)) | ||
23 | self.num += 1 | ||
24 | self.avg += dur / float(self.num) | ||
126 | 25 | ||
127 | def extract_tardy_vals(task_dict, data, exp_point): | 26 | self.begin = 0 |
128 | ratios = LeveledArray("miss-ratio") | 27 | self.job = 0 |
129 | avg_tards = LeveledArray("avg-rel-tardiness") | 28 | |
130 | max_tards = LeveledArray("max-rel-tardiness") | 29 | def start_time(self, record): |
30 | '''Start duration of time.''' | ||
31 | self.begin = record.when | ||
32 | self.job = record.job | ||
33 | |||
34 | # Data stored for each task | ||
35 | TaskParams = namedtuple('TaskParams', ['wcet', 'period', 'cpu']) | ||
36 | TaskData = recordtype('TaskData', ['params', 'jobs', 'blocks', 'misses']) | ||
37 | |||
38 | # Map of event ids to corresponding class, binary format, and processing methods | ||
39 | RecordInfo = namedtuple('RecordInfo', ['clazz', 'fmt', 'method']) | ||
40 | record_map = [0]*10 | ||
41 | |||
42 | # Common to all records | ||
43 | HEADER_FORMAT = '<bbhi' | ||
44 | HEADER_FIELDS = ['type', 'cpu', 'pid', 'job'] | ||
45 | RECORD_SIZE = 24 | ||
46 | |||
47 | NSEC_PER_MSEC = 1000000 | ||
48 | |||
49 | def register_record(name, id, method, fmt, fields): | ||
50 | '''Create record description from @fmt and @fields and map to @id, using | ||
51 | @method to process parsed record.''' | ||
52 | # Format of binary data (see python struct documentation) | ||
53 | rec_fmt = HEADER_FORMAT + fmt | ||
54 | |||
55 | # Corresponding field data | ||
56 | rec_fields = HEADER_FIELDS + fields | ||
57 | if "when" not in rec_fields: # Force a "when" field for everything | ||
58 | rec_fields += ["when"] | ||
59 | |||
60 | # Create mutable class with the given fields | ||
61 | field_class = recordtype(name, list(rec_fields)) | ||
62 | clazz = type(name, (field_class, object), {}) | ||
63 | |||
64 | record_map[id] = RecordInfo(clazz, rec_fmt, method) | ||
65 | |||
66 | def make_iterator(fname): | ||
67 | '''Iterate over (parsed record, processing method) in a | ||
68 | sched-trace file.''' | ||
69 | f = open(fname, 'rb') | ||
70 | max_type = len(record_map) | ||
71 | |||
72 | while True: | ||
73 | data = f.read(RECORD_SIZE) | ||
131 | 74 | ||
132 | for match in re.finditer(TARDY_RECORD, data): | ||
133 | try: | ||
134 | pid = int(match.group("PID")) | ||
135 | jobs = int(match.group("JOB")) | ||
136 | misses = int(match.group("MISSES")) | ||
137 | total_tard = float(match.group("TOTAL")) | ||
138 | max_tard = float(match.group("MAX")) | ||
139 | |||
140 | if not (jobs and pid): raise Exception() | ||
141 | except: | ||
142 | raise Exception("Invalid tardy record:\n\t%s\n\t%s" % | ||
143 | (match.groupdict(), match.group("RECORD"))) | ||
144 | |||
145 | if pid not in task_dict: | ||
146 | raise Exception("Invalid pid '%d' in tardy record:\n\t%s" % | ||
147 | (pid, match.group("RECORD"))) | ||
148 | |||
149 | t = task_dict[pid] | ||
150 | avg_tards.add(t, total_tard / (jobs * t.config.period)) | ||
151 | max_tards.add(t, max_tard / t.config.period) | ||
152 | ratios.add(t, misses / jobs) | ||
153 | |||
154 | map(methodcaller('write_measurements', exp_point), | ||
155 | [ratios, avg_tards, max_tards]) | ||
156 | |||
157 | # TODO: rename | ||
158 | def extract_variance(task_dict, data, exp_point): | ||
159 | varz = LeveledArray("exec-variance") | ||
160 | flushes = LeveledArray("cache-flush") | ||
161 | loads = LeveledArray("cache-load") | ||
162 | fworks = LeveledArray("flush-work") | ||
163 | lworks = LeveledArray("load-work") | ||
164 | |||
165 | completions = defaultdict(lambda: []) | ||
166 | missed = defaultdict(lambda: int()) | ||
167 | |||
168 | for match in re.finditer(COMPLETION_RECORD, data): | ||
169 | try: | 75 | try: |
170 | pid = int(match.group("PID")) | 76 | type_num = struct.unpack_from('b',data)[0] |
171 | duration = float(match.group("EXEC")) | 77 | except struct.error: |
172 | load = float(match.group("LOAD")) | 78 | break |
173 | flush = float(match.group("FLUSH")) | 79 | |
174 | lwork = int(match.group("LOAD_WORK")) | 80 | rdata = record_map[type_num] if type_num <= max_type else 0 |
175 | fwork = int(match.group("FLUSH_WORK")) | 81 | if not rdata: |
176 | |||
177 | if load: | ||
178 | loads.add(task_dict[pid], load) | ||
179 | lworks.add(task_dict[pid], lwork) | ||
180 | if not lwork: raise Exception() | ||
181 | if flush: | ||
182 | flushes.add(task_dict[pid], flush) | ||
183 | fworks.add(task_dict[pid], fwork) | ||
184 | if not fwork: raise Exception() | ||
185 | |||
186 | # Last (exit) record often has exec time of 0 | ||
187 | missed[pid] += not bool(duration) | ||
188 | |||
189 | if missed[pid] > 1 or not pid: #TODO: fix, raise Exception() | ||
190 | continue | ||
191 | except: | ||
192 | raise Exception("Invalid completion record, missed: %d:" | ||
193 | "\n\t%s\n\t%s" % (missed[pid], match.groupdict(), | ||
194 | match.group("RECORD"))) | ||
195 | completions[pid] += [duration] | ||
196 | |||
197 | for pid, durations in completions.iteritems(): | ||
198 | m = Measurement(pid).from_array(durations) | ||
199 | |||
200 | # TODO: not this, please | ||
201 | if not task_dict[pid].run: | ||
202 | task_dict[pid].run.append(m) | ||
203 | |||
204 | job_times = np.array(durations) | ||
205 | mean = job_times.mean() | ||
206 | |||
207 | if not mean or not durations: | ||
208 | continue | 82 | continue |
209 | 83 | ||
210 | # Coefficient of variation | 84 | try: |
211 | cv = job_times.std() / job_times.mean() | 85 | values = struct.unpack_from(rdata.fmt, data) |
212 | # Correction, assuming normal distributions | 86 | except struct.error: |
213 | corrected = (1 + 1/(4 * len(job_times))) * cv | ||
214 | |||
215 | varz.add(task_dict[pid], corrected) | ||
216 | # varz.add(task_dict[pid], m[Type.Var]) | ||
217 | |||
218 | if exp_point: | ||
219 | map(methodcaller('write_measurements', exp_point), | ||
220 | [varz, flushes, loads, fworks, lworks]) | ||
221 | |||
222 | def config_exit_stats(task_dict, data): | ||
223 | # # Dictionary of task exit measurements by pid | ||
224 | # exits = get_task_exits(data) | ||
225 | # exit_dict = dict((e.id, e) for e in exits) | ||
226 | extract_variance(task_dict, data, None) | ||
227 | |||
228 | # Dictionary where keys are configurations, values are list | ||
229 | # of tasks with those configuratino | ||
230 | config_dict = defaultdict(lambda: []) | ||
231 | for t in task_dict.itervalues(): | ||
232 | config_dict[t.config] += [t] | ||
233 | |||
234 | for config in config_dict: | ||
235 | task_list = sorted(config_dict[config]) | ||
236 | |||
237 | # # Replace tasks with corresponding exit stats | ||
238 | # if not t.pid in exit_dict: | ||
239 | # raise Exception("Missing exit record for task '%s' in '%s'" % | ||
240 | # (t, file.name)) | ||
241 | # exit_list = [exit_dict[t.pid] for t in task_list] | ||
242 | exit_list = [t.run[0] for t in task_list] | ||
243 | config_dict[config] = exit_list | ||
244 | |||
245 | return config_dict | ||
246 | |||
247 | saved_stats = {} | ||
248 | def get_base_stats(base_file): | ||
249 | if base_file in saved_stats: | ||
250 | return saved_stats[base_file] | ||
251 | with open(base_file, 'r') as f: | ||
252 | data = f.read() | ||
253 | task_dict = get_task_dict(data) | ||
254 | |||
255 | result = config_exit_stats(task_dict, data) | ||
256 | saved_stats[base_file] = result | ||
257 | return result | ||
258 | |||
259 | def extract_scaling_data(task_dict, data, result, base_file): | ||
260 | # Generate trees of tasks with matching configurations | ||
261 | data_stats = config_exit_stats(task_dict, data) | ||
262 | base_stats = get_base_stats(base_file) | ||
263 | |||
264 | # Scaling factors are calculated by matching groups of tasks with the same | ||
265 | # config, then comparing task-to-task exec times in order of PID within | ||
266 | # each group | ||
267 | max_scales = LeveledArray("max-scaling") | ||
268 | avg_scales = LeveledArray("avg-scaling") | ||
269 | |||
270 | for config in data_stats: | ||
271 | if len(data_stats[config]) != len(base_stats[config]): | ||
272 | # Quit, we are missing a record and can't guarantee | ||
273 | # a task-to-task comparison | ||
274 | continue | 87 | continue |
275 | 88 | ||
276 | for data_stat, base_stat in zip(data_stats[config],base_stats[config]): | 89 | obj = rdata.clazz(*values) |
277 | if not base_stat[Type.Avg] or not base_stat[Type.Max] or \ | 90 | yield (obj, rdata.method) |
278 | not data_stat[Type.Avg] or not data_stat[Type.Max]: | 91 | |
279 | continue | 92 | def read_data(task_dict, fnames): |
280 | # How much larger is their exec stat than ours? | 93 | '''Read records from @fnames and store per-pid stats in @task_dict.''' |
281 | avg_scale = float(base_stat[Type.Avg]) / float(data_stat[Type.Avg]) | 94 | buff = [] |
282 | max_scale = float(base_stat[Type.Max]) / float(data_stat[Type.Max]) | 95 | |
96 | def add_record(itera): | ||
97 | # Ordered insertion into buff | ||
98 | try: | ||
99 | next_ret = itera.next() | ||
100 | except StopIteration: | ||
101 | return | ||
283 | 102 | ||
284 | task = task_dict[data_stat.id] | 103 | arecord, method = next_ret |
104 | i = 0 | ||
105 | for (i, (brecord, m, t)) in enumerate(buff): | ||
106 | if brecord.when > arecord.when: | ||
107 | break | ||
108 | buff.insert(i, (arecord, method, itera)) | ||
285 | 109 | ||
286 | avg_scales.add(task, avg_scale) | 110 | for fname in fnames: |
287 | max_scales.add(task, max_scale) | 111 | itera = make_iterator(fname) |
112 | add_record(itera) | ||
288 | 113 | ||
289 | avg_scales.write_measurements(result) | 114 | while buff: |
290 | max_scales.write_measurements(result) | 115 | (record, method, itera) = buff.pop(0) |
291 | 116 | ||
292 | def extract_sched_data(data_file, result, base_file): | 117 | add_record(itera) |
293 | with open(data_file, 'r') as f: | 118 | method(task_dict, record) |
294 | data = f.read() | ||
295 | 119 | ||
296 | task_dict = get_task_dict(data) | 120 | def process_completion(task_dict, record): |
121 | task_dict[record.pid].misses.store_time(record) | ||
297 | 122 | ||
298 | try: | 123 | def process_release(task_dict, record): |
299 | extract_tardy_vals(task_dict, data, result) | 124 | data = task_dict[record.pid] |
300 | extract_variance(task_dict, data, result) | 125 | data.jobs += 1 |
301 | except Exception as e: | 126 | data.misses.start_time(record) |
302 | print("Error in %s" % data_file) | ||
303 | raise e | ||
304 | 127 | ||
305 | if (base_file): | 128 | def process_param(task_dict, record): |
306 | extract_scaling_data(task_dict, data, result, base_file) | 129 | params = TaskParams(record.wcet, record.period, record.partition) |
130 | task_dict[record.pid].params = params | ||
131 | |||
132 | def process_block(task_dict, record): | ||
133 | task_dict[record.pid].blocks.start_time(record) | ||
134 | |||
135 | def process_resume(task_dict, record): | ||
136 | task_dict[record.pid].blocks.store_time(record) | ||
137 | |||
138 | register_record('ResumeRecord', 9, process_resume, 'Q8x', ['when']) | ||
139 | register_record('BlockRecord', 8, process_block, 'Q8x', ['when']) | ||
140 | register_record('CompletionRecord', 7, process_completion, 'Q8x', ['when']) | ||
141 | register_record('ReleaseRecord', 3, process_release, 'QQ', ['release', 'when']) | ||
142 | register_record('ParamRecord', 2, process_param, 'IIIcc2x', | ||
143 | ['wcet','period','phase','partition', 'task_class']) | ||
144 | |||
145 | def extract_sched_data(result, data_dir, work_dir): | ||
146 | bin_files = conf.FILES['sched_data'].format(".*") | ||
147 | output_file = "%s/out-st" % work_dir | ||
148 | |||
149 | bins = [f for f in os.listdir(data_dir) if re.match(bin_files, f)] | ||
150 | if not len(bins): | ||
151 | return | ||
152 | |||
153 | # Save an in-english version of the data for debugging | ||
154 | cmd_arr = [conf.BINS['st_show']] | ||
155 | cmd_arr.extend(bins) | ||
156 | with open(output_file, "w") as f: | ||
157 | subprocess.call(cmd_arr, cwd=data_dir, stdout=f) | ||
158 | |||
159 | task_dict = defaultdict(lambda : | ||
160 | TaskData(0, 0, TimeTracker(), TimeTracker())) | ||
161 | |||
162 | # Gather per-task values | ||
163 | read_data(task_dict, bins) | ||
164 | |||
165 | stat_data = {"avg-tard" : [], "max-tard" : [], | ||
166 | "avg-block" : [], "max-block" : [], | ||
167 | "miss-ratio" : []} | ||
168 | |||
169 | # Group per-task values | ||
170 | for tdata in task_dict.itervalues(): | ||
171 | miss_ratio = float(tdata.misses.num) / tdata.jobs | ||
172 | # Scale average down to account for jobs with 0 tardiness | ||
173 | avg_tard = tdata.misses.avg * miss_ratio | ||
174 | |||
175 | stat_data["miss-ratio"].append(miss_ratio) | ||
176 | stat_data["avg-tard" ].append(avg_tard / tdata.params.wcet) | ||
177 | stat_data["max-tard" ].append(tdata.misses.max / tdata.params.wcet) | ||
178 | stat_data["avg-block" ].append(tdata.blocks.avg / NSEC_PER_MSEC) | ||
179 | stat_data["max-block" ].append(tdata.blocks.max / NSEC_PER_MSEC) | ||
180 | |||
181 | # Summarize value groups | ||
182 | for name, data in stat_data.iteritems(): | ||
183 | result[name] = Measurement(str(name)).from_array(data) | ||
diff --git a/parse_exps.py b/parse_exps.py index d932b0d..c8cd8b1 100755 --- a/parse_exps.py +++ b/parse_exps.py | |||
@@ -2,11 +2,9 @@ | |||
2 | from __future__ import print_function | 2 | from __future__ import print_function |
3 | 3 | ||
4 | import config.config as conf | 4 | import config.config as conf |
5 | import copy | ||
6 | import os | 5 | import os |
7 | import parse.ft as ft | 6 | import parse.ft as ft |
8 | import parse.sched as st | 7 | import parse.sched as st |
9 | import re | ||
10 | import shutil as sh | 8 | import shutil as sh |
11 | import sys | 9 | import sys |
12 | 10 | ||
@@ -22,13 +20,8 @@ def parse_args(): | |||
22 | 20 | ||
23 | parser.add_option('-o', '--out', dest='out', | 21 | parser.add_option('-o', '--out', dest='out', |
24 | help='file or directory for data output', default='parse-data') | 22 | help='file or directory for data output', default='parse-data') |
25 | |||
26 | # TODO: this means nothing, also remove dests | ||
27 | parser.add_option('-c', '--clean', action='store_true', default=False, | 23 | parser.add_option('-c', '--clean', action='store_true', default=False, |
28 | dest='clean', help='do not output single-point csvs') | 24 | dest='clean', help='do not output single-point csvs') |
29 | parser.add_option('-s', '--scale-against', dest='scale_against', | ||
30 | metavar='PARAM=VALUE', default="", | ||
31 | help='calculate task scaling factors against these configs') | ||
32 | parser.add_option('-i', '--ignore', metavar='[PARAM...]', default="", | 25 | parser.add_option('-i', '--ignore', metavar='[PARAM...]', default="", |
33 | help='ignore changing parameter values') | 26 | help='ignore changing parameter values') |
34 | parser.add_option('-f', '--force', action='store_true', default=False, | 27 | parser.add_option('-f', '--force', action='store_true', default=False, |
@@ -41,136 +34,89 @@ def parse_args(): | |||
41 | 34 | ||
42 | return parser.parse_args() | 35 | return parser.parse_args() |
43 | 36 | ||
44 | ExpData = namedtuple('ExpData', ['name', 'params', 'data_files', 'is_base']) | 37 | ExpData = namedtuple('ExpData', ['path', 'params', 'work_dir']) |
45 | DataFiles = namedtuple('DataFiles', ['st']) | ||
46 | 38 | ||
47 | def get_exp_params(data_dir, col_map): | 39 | def get_exp_params(data_dir, col_map): |
48 | param_file = "%s/%s" % (data_dir, conf.DEFAULTS['params_file']) | 40 | param_file = "%s/%s" % (data_dir, conf.DEFAULTS['params_file']) |
49 | if not os.path.isfile: | 41 | if not os.path.isfile: |
50 | raise Exception("No param file '%s' exists!" % param_file) | 42 | raise Exception("No param file '%s' exists!" % param_file) |
51 | 43 | ||
52 | # Keep only params that uniquely identify the experiment | 44 | # Ignore 'magic' parameters used by these scripts |
53 | params = load_params(param_file) | 45 | params = load_params(param_file) |
54 | for ignored in conf.PARAMS.itervalues(): | 46 | for ignored in conf.PARAMS.itervalues(): |
55 | # Always include cycles or overhead parsing fails | 47 | # With the exception of cycles which is used by overhead parsing |
56 | if ignored in params and ignored != conf.PARAMS['cycles']: | 48 | if ignored in params and ignored != conf.PARAMS['cycles']: |
57 | params.pop(ignored) | 49 | params.pop(ignored) |
58 | 50 | ||
59 | # Track all changed params | 51 | # Store parameters in col_map, which will track which parameters change |
52 | # across experiments | ||
60 | for key, value in params.iteritems(): | 53 | for key, value in params.iteritems(): |
61 | col_map.try_add(key, value) | 54 | col_map.try_add(key, value) |
62 | 55 | ||
56 | # Cycles must be present | ||
63 | if conf.PARAMS['cycles'] not in params: | 57 | if conf.PARAMS['cycles'] not in params: |
64 | params[conf.PARAMS['cycles']] = conf.DEFAULTS['cycles'] | 58 | params[conf.PARAMS['cycles']] = conf.DEFAULTS['cycles'] |
65 | 59 | ||
66 | return params | 60 | return params |
67 | 61 | ||
68 | 62 | ||
69 | def gen_exp_data(exp_dirs, base_conf, col_map, force): | 63 | def load_exps(exp_dirs, col_map, clean): |
70 | plain_exps = [] | 64 | exps = [] |
71 | scaling_bases = [] | ||
72 | 65 | ||
73 | sys.stderr.write("Generating data...\n") | 66 | sys.stderr.write("Loading experiments...\n") |
74 | 67 | ||
75 | for i, data_dir in enumerate(exp_dirs): | 68 | for data_dir in exp_dirs: |
76 | if not os.path.isdir(data_dir): | 69 | if not os.path.isdir(data_dir): |
77 | raise IOError("Invalid experiment '%s'" % os.path.abspath(data_dir)) | 70 | raise IOError("Invalid experiment '%s'" % os.path.abspath(data_dir)) |
78 | 71 | ||
79 | tmp_dir = data_dir + "/tmp" | 72 | # Used to store error output and debugging info |
80 | if not os.path.exists(tmp_dir): | 73 | work_dir = data_dir + "/tmp" |
81 | os.mkdir(tmp_dir) | ||
82 | |||
83 | # Read and translate exp output files | ||
84 | params = get_exp_params(data_dir, col_map) | ||
85 | st_output = st.get_st_output(data_dir, tmp_dir, force) | ||
86 | |||
87 | if base_conf and base_conf.viewitems() & params.viewitems(): | ||
88 | if not st_output: | ||
89 | raise Exception("Scaling base '%s' useless without sched data!" | ||
90 | % data_dir) | ||
91 | is_base = True | ||
92 | |||
93 | base_params = copy.deepcopy(params) | ||
94 | base_params.pop(base_conf.keys()[0]) | ||
95 | 74 | ||
96 | base_exp = ExpData(data_dir, base_params, | 75 | if not os.path.exists(work_dir): |
97 | DataFiles(st_output), True) | 76 | os.mkdir(work_dir) |
98 | scaling_bases += [base_exp] | 77 | elif clean: |
99 | else: | 78 | sh.rmtree(work_dir) |
100 | is_base = False | ||
101 | 79 | ||
102 | # Create experiment named after the data dir | 80 | params = get_exp_params(data_dir, col_map) |
103 | exp_data = ExpData(data_dir, params, | ||
104 | DataFiles(st_output), is_base) | ||
105 | 81 | ||
106 | plain_exps += [exp_data] | 82 | exps += [ ExpData(data_dir, params, work_dir) ] |
107 | 83 | ||
108 | sys.stderr.write('\r {0:.2%}'.format(float(i)/len(exp_dirs))) | 84 | return exps |
109 | sys.stderr.write('\n') | ||
110 | return (plain_exps, scaling_bases) | ||
111 | 85 | ||
112 | def main(): | 86 | def main(): |
113 | opts, args = parse_args() | 87 | opts, args = parse_args() |
114 | 88 | ||
115 | args = args or [os.getcwd()] | 89 | args = args or [os.getcwd()] |
116 | 90 | ||
117 | # Configuration key for task systems used to calculate task | 91 | # Load exp parameters into col_map |
118 | # execution scaling factors | ||
119 | base_conf = dict(re.findall("(.*)=(.*)", opts.scale_against)) | ||
120 | |||
121 | col_map = ColMap() | 92 | col_map = ColMap() |
93 | exps = load_exps(args, col_map, opts.force) | ||
122 | 94 | ||
123 | (plain_exps, scaling_bases) = gen_exp_data(args, base_conf, col_map, opts.force) | 95 | # Don't track changes in ignored parameters |
124 | |||
125 | if base_conf and base_conf.keys()[0] not in col_map: | ||
126 | raise IOError("Base column '%s' not present in any parameters!" % | ||
127 | base_conf.keys()[0]) | ||
128 | |||
129 | base_map = copy.deepcopy(col_map) | ||
130 | if opts.ignore: | 96 | if opts.ignore: |
131 | for param in opts.ignore.split(","): | 97 | for param in opts.ignore.split(","): |
132 | col_map.try_remove(param) | 98 | col_map.try_remove(param) |
133 | 99 | ||
134 | base_table = TupleTable(base_map) # For tracking 'base' experiments | 100 | result_table = TupleTable(col_map) |
135 | result_table = TupleTable(col_map) # For generating output | ||
136 | |||
137 | # Used to find matching scaling_base for each experiment | ||
138 | for base in scaling_bases: | ||
139 | base_table.add_exp(base.params, base) | ||
140 | 101 | ||
141 | sys.stderr.write("Parsing data...\n") | 102 | sys.stderr.write("Parsing data...\n") |
142 | for exp in args: | 103 | for i,exp in enumerate(exps): |
143 | result = ExpPoint(exp) | 104 | result = ExpPoint(exp.path) |
144 | params = get_exp_params(exp, col_map) | 105 | cycles = exp.params[conf.PARAMS['cycles']] |
145 | # Write overheads into result | ||
146 | ft.extract_ft_data(result, exp, | ||
147 | params[conf.PARAMS['cycles']], | ||
148 | exp + "/tmp") | ||
149 | |||
150 | if opts.verbose: | ||
151 | print(result) | ||
152 | |||
153 | for i,exp in enumerate(plain_exps): | ||
154 | result = ExpPoint(exp.name) | ||
155 | |||
156 | if exp.data_files.st: | ||
157 | base = None | ||
158 | if base_conf and not exp.is_base: | ||
159 | # Try to find a scaling base | ||
160 | base_params = copy.deepcopy(exp.params) | ||
161 | base_params.pop(base_conf.keys()[0]) | ||
162 | base = base_table.get_exps(base_params)[0] | ||
163 | 106 | ||
164 | # Write deadline misses / tardiness into result | 107 | # Write overheads into result |
165 | st.extract_sched_data(exp.data_files.st, result, | 108 | ft.extract_ft_data(result, exp.path, exp.work_dir, cycles) |
166 | base.data_files.st if base else None) | ||
167 | 109 | ||
168 | result_table.add_exp(exp.params, result) | 110 | # Write scheduling statistics into result |
111 | st.extract_sched_data(result, exp.path, exp.work_dir) | ||
169 | 112 | ||
170 | if opts.verbose: | 113 | if opts.verbose: |
171 | print(result) | 114 | print(result) |
172 | else: | 115 | else: |
173 | sys.stderr.write('\r {0:.2%}'.format(float(i)/len(plain_exps))) | 116 | sys.stderr.write('\r {0:.2%}'.format(float(i)/len(exps))) |
117 | |||
118 | result_table.add_exp(exp.params, result) | ||
119 | |||
174 | sys.stderr.write('\n') | 120 | sys.stderr.write('\n') |
175 | 121 | ||
176 | if opts.force and os.path.exists(opts.out): | 122 | if opts.force and os.path.exists(opts.out): |
diff --git a/run_exps.py b/run_exps.py index 1d2cc2e..24f71e4 100755 --- a/run_exps.py +++ b/run_exps.py | |||
@@ -39,7 +39,7 @@ def parse_args(): | |||
39 | 39 | ||
40 | 40 | ||
41 | def convert_data(data): | 41 | def convert_data(data): |
42 | """Convert a non-python schedule file into the python format""" | 42 | '''Convert a non-python schedule file into the python format''' |
43 | regex = re.compile( | 43 | regex = re.compile( |
44 | r"(?P<PROC>^" | 44 | r"(?P<PROC>^" |
45 | r"(?P<HEADER>/proc/[\w\-]+?/)?" | 45 | r"(?P<HEADER>/proc/[\w\-]+?/)?" |
@@ -67,7 +67,7 @@ def convert_data(data): | |||
67 | return {'proc' : procs, 'spin' : spins} | 67 | return {'proc' : procs, 'spin' : spins} |
68 | 68 | ||
69 | def fix_paths(schedule, exp_dir, sched_file): | 69 | def fix_paths(schedule, exp_dir, sched_file): |
70 | """Replace relative paths of command line arguments with absolute ones.""" | 70 | '''Replace relative paths of command line arguments with absolute ones.''' |
71 | for (idx, (spin, args)) in enumerate(schedule['spin']): | 71 | for (idx, (spin, args)) in enumerate(schedule['spin']): |
72 | for arg in re.split(" +", args): | 72 | for arg in re.split(" +", args): |
73 | abspath = "%s/%s" % (exp_dir, arg) | 73 | abspath = "%s/%s" % (exp_dir, arg) |