aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--common.py2
-rw-r--r--gen/generators.py22
-rw-r--r--parse/dir_map.py2
-rw-r--r--parse/ft.py14
-rw-r--r--parse/sched.py8
-rw-r--r--parse/tuple_table.py3
-rwxr-xr-xparse_exps.py58
-rw-r--r--plot/style.py3
-rwxr-xr-xplot_exps.py29
-rw-r--r--run/executable/executable.py9
-rw-r--r--run/executable/ftcat.py21
-rw-r--r--run/experiment.py9
-rw-r--r--run/litmus_util.py10
-rw-r--r--run/tracer.py19
-rwxr-xr-xrun_exps.py2
15 files changed, 138 insertions, 73 deletions
diff --git a/common.py b/common.py
index 6d1db97..d080e1a 100644
--- a/common.py
+++ b/common.py
@@ -105,7 +105,7 @@ def recordtype(typename, field_names, default=0):
105 namespace = {} 105 namespace = {}
106 try: 106 try:
107 exec template in namespace 107 exec template in namespace
108 except SyntaxError, e: 108 except SyntaxError as e:
109 raise SyntaxError(e.message + ':\n' + template) 109 raise SyntaxError(e.message + ':\n' + template)
110 cls = namespace[typename] 110 cls = namespace[typename]
111 111
diff --git a/gen/generators.py b/gen/generators.py
index 09ae979..dd6f1cc 100644
--- a/gen/generators.py
+++ b/gen/generators.py
@@ -53,7 +53,7 @@ GenOption = namedtuple('GenOption', ['name', 'types', 'default', 'help'])
53class BaseGenerator(object): 53class BaseGenerator(object):
54 '''Creates sporadic task sets with the most common Litmus options.''' 54 '''Creates sporadic task sets with the most common Litmus options.'''
55 def __init__(self, name, templates, options, params): 55 def __init__(self, name, templates, options, params):
56 self.options = self.__make_options() + options 56 self.options = self.__make_options(params) + options
57 57
58 self.__setup_params(params) 58 self.__setup_params(params)
59 59
@@ -61,11 +61,14 @@ class BaseGenerator(object):
61 self.template = "\n".join([TP_RM] + templates) 61 self.template = "\n".join([TP_RM] + templates)
62 self.name = name 62 self.name = name
63 63
64 def __make_options(self): 64 def __make_options(self, params):
65 '''Return generic Litmus options.''' 65 '''Return generic Litmus options.'''
66 66
67 # Guess defaults using the properties of this computer 67 # Guess defaults using the properties of this computer
68 cpus = lu.num_cpus() 68 if 'cpus' in params:
69 cpus = min(map(int, params['cpus']))
70 else:
71 cpus = lu.num_cpus()
69 try: 72 try:
70 config = get_config_option("RELEASE_MASTER") and True 73 config = get_config_option("RELEASE_MASTER") and True
71 except: 74 except:
@@ -127,9 +130,10 @@ class BaseGenerator(object):
127 f.write(str(Template(self.template, searchList=[exp_params]))) 130 f.write(str(Template(self.template, searchList=[exp_params])))
128 131
129 del exp_params['task_set'] 132 del exp_params['task_set']
133 del exp_params['num_tasks']
130 exp_params_file = out_dir + "/" + DEFAULTS['params_file'] 134 exp_params_file = out_dir + "/" + DEFAULTS['params_file']
131 with open(exp_params_file, 'wa') as f: 135 with open(exp_params_file, 'wa') as f:
132 exp_params['scheduler'] = 'CEDF' 136 exp_params['scheduler'] = self.name
133 f.write(str(exp_params)) 137 f.write(str(exp_params))
134 138
135 def __setup_params(self, params): 139 def __setup_params(self, params):
@@ -195,7 +199,7 @@ class BaseGenerator(object):
195 col_map = builder.build() 199 col_map = builder.build()
196 200
197 for dp in DesignPointGenerator(self.params): 201 for dp in DesignPointGenerator(self.params):
198 dir_leaf = "sched=%s_%s" % (self.name, col_map.get_encoding(dp)) 202 dir_leaf = "sched=%s_%s" % (self.name, col_map.encode(dp))
199 dir_path = "%s/%s" % (out_dir, dir_leaf.strip('_')) 203 dir_path = "%s/%s" % (out_dir, dir_leaf.strip('_'))
200 204
201 if os.path.exists(dir_path): 205 if os.path.exists(dir_path):
@@ -225,10 +229,10 @@ class BaseGenerator(object):
225 i+= len(word) 229 i+= len(word)
226 res += [word] 230 res += [word]
227 if i > 80: 231 if i > 80:
228 print ", ".join(res[:-1]) 232 print(", ".join(res[:-1]))
229 res = ["\t\t "+res[-1]] 233 res = ["\t\t "+res[-1]]
230 i = line.index("'") 234 i = line.index("'")
231 print ", ".join(res) 235 print(", ".join(res))
232 236
233class PartitionedGenerator(BaseGenerator): 237class PartitionedGenerator(BaseGenerator):
234 def __init__(self, name, templates, options, params): 238 def __init__(self, name, templates, options, params):
@@ -243,7 +247,7 @@ class PartitionedGenerator(BaseGenerator):
243 247
244class PedfGenerator(PartitionedGenerator): 248class PedfGenerator(PartitionedGenerator):
245 def __init__(self, params={}): 249 def __init__(self, params={}):
246 super(PedfGenerator, self).__init__("P-EDF", [], [], params) 250 super(PedfGenerator, self).__init__("PSN-EDF", [], [], params)
247 251
248class CedfGenerator(PartitionedGenerator): 252class CedfGenerator(PartitionedGenerator):
249 LEVEL_OPTION = GenOption('level', ['L2', 'L3', 'All'], ['L2'], 253 LEVEL_OPTION = GenOption('level', ['L2', 'L3', 'All'], ['L2'],
@@ -255,4 +259,4 @@ class CedfGenerator(PartitionedGenerator):
255 259
256class GedfGenerator(BaseGenerator): 260class GedfGenerator(BaseGenerator):
257 def __init__(self, params={}): 261 def __init__(self, params={}):
258 super(GedfGenerator, self).__init__("G-EDF", [TP_GLOB_TASK], [], params) 262 super(GedfGenerator, self).__init__("GSN-EDF", [TP_GLOB_TASK], [], params)
diff --git a/parse/dir_map.py b/parse/dir_map.py
index 1c17f40..601dd3b 100644
--- a/parse/dir_map.py
+++ b/parse/dir_map.py
@@ -46,7 +46,7 @@ class DirMap(object):
46 46
47 def remove_childless(self): 47 def remove_childless(self):
48 def remove_childless2(node): 48 def remove_childless2(node):
49 for key, child in node: 49 for key, child in node.children.items():
50 remove_childless2(child) 50 remove_childless2(child)
51 if not (child.children or child.values): 51 if not (child.children or child.values):
52 node.children.pop(key) 52 node.children.pop(key)
diff --git a/parse/ft.py b/parse/ft.py
index 5293b00..19453d1 100644
--- a/parse/ft.py
+++ b/parse/ft.py
@@ -3,6 +3,7 @@ import numpy as np
3import os 3import os
4import re 4import re
5import shutil as sh 5import shutil as sh
6import sys
6import subprocess 7import subprocess
7 8
8from point import Measurement,Type 9from point import Measurement,Type
@@ -28,7 +29,6 @@ def parse_overhead(result, overhead_bin, overhead, cycles, out_dir, err_file):
28 raise Exception("Failed (%d) with command: %s" % (ret, " ".join(cmd))) 29 raise Exception("Failed (%d) with command: %s" % (ret, " ".join(cmd)))
29 if not size: 30 if not size:
30 os.remove(ovh_fname) 31 os.remove(ovh_fname)
31
32 if size and not ret: 32 if size and not ret:
33 # Map and sort file for stats 33 # Map and sort file for stats
34 data = np.memmap(ovh_fname, dtype="float32", mode='c') 34 data = np.memmap(ovh_fname, dtype="float32", mode='c')
@@ -47,19 +47,22 @@ def parse_overhead(result, overhead_bin, overhead, cycles, out_dir, err_file):
47 47
48def sort_ft(ft_file, err_file, out_dir): 48def sort_ft(ft_file, err_file, out_dir):
49 '''Create and return file with sorted overheads from @ft_file.''' 49 '''Create and return file with sorted overheads from @ft_file.'''
50 out_fname = "{}/{}".format("%s/%s" % (os.getcwd(), out_dir), FT_SORTED_NAME) 50 out_fname = "{}/{}".format(out_dir, FT_SORTED_NAME)
51 51
52 # Sort happens in-place 52 # Sort happens in-place
53 sh.copyfile(ft_file, out_fname) 53 sh.copyfile(ft_file, out_fname)
54 cmd = [conf.BINS['ftsort'], out_fname] 54 cmd = [conf.BINS['ftsort'], out_fname]
55 ret = subprocess.call(cmd, cwd="%s/%s" % (os.getcwd(), out_dir), stderr=err_file, stdout=err_file)
56 55
56 ret = subprocess.call(cmd, cwd=out_dir, stderr=err_file, stdout=err_file)
57 if ret: 57 if ret:
58 raise Exception("Sort failed with command: %s" % " ".join(cmd)) 58 raise Exception("Sort failed (%d) with command: %s" % (ret, " ".join(cmd)))
59 59
60 return out_fname 60 return out_fname
61 61
62def extract_ft_data(result, data_dir, work_dir, cycles): 62def extract_ft_data(result, data_dir, work_dir, cycles):
63 data_dir = os.path.abspath(data_dir)
64 work_dir = os.path.abspath(work_dir)
65
63 freg = conf.FILES['ft_data'] + "$" 66 freg = conf.FILES['ft_data'] + "$"
64 bins = [f for f in os.listdir(data_dir) if re.match(freg, f)] 67 bins = [f for f in os.listdir(data_dir) if re.match(freg, f)]
65 68
@@ -67,6 +70,9 @@ def extract_ft_data(result, data_dir, work_dir, cycles):
67 return False 70 return False
68 71
69 bin_file = "{}/{}".format(data_dir, bins[0]) 72 bin_file = "{}/{}".format(data_dir, bins[0])
73 if not os.path.getsize(bin_file):
74 sys.stderr.write("Empty feather trace file %s!" % bin_file)
75 return False
70 76
71 with open("%s/%s" % (work_dir, FT_ERR_NAME), 'w') as err_file: 77 with open("%s/%s" % (work_dir, FT_ERR_NAME), 'w') as err_file:
72 sorted_bin = sort_ft(bin_file, err_file, work_dir) 78 sorted_bin = sort_ft(bin_file, err_file, work_dir)
diff --git a/parse/sched.py b/parse/sched.py
index ba0df5e..2da0149 100644
--- a/parse/sched.py
+++ b/parse/sched.py
@@ -2,6 +2,7 @@ import config.config as conf
2import os 2import os
3import re 3import re
4import struct 4import struct
5import sys
5import subprocess 6import subprocess
6 7
7from collections import defaultdict,namedtuple 8from collections import defaultdict,namedtuple
@@ -66,6 +67,10 @@ def register_record(name, id, method, fmt, fields):
66def make_iterator(fname): 67def make_iterator(fname):
67 '''Iterate over (parsed record, processing method) in a 68 '''Iterate over (parsed record, processing method) in a
68 sched-trace file.''' 69 sched-trace file.'''
70 if not os.path.getsize(fname):
71 sys.stderr.write("Empty sched_trace file %s!" % fname)
72 return
73
69 f = open(fname, 'rb') 74 f = open(fname, 'rb')
70 max_type = len(record_map) 75 max_type = len(record_map)
71 76
@@ -182,4 +187,7 @@ def extract_sched_data(result, data_dir, work_dir):
182 187
183 # Summarize value groups 188 # Summarize value groups
184 for name, data in stat_data.iteritems(): 189 for name, data in stat_data.iteritems():
190 if not data:
191 continue
185 result[name] = Measurement(str(name)).from_array(data) 192 result[name] = Measurement(str(name)).from_array(data)
193
diff --git a/parse/tuple_table.py b/parse/tuple_table.py
index 86baa08..ee94772 100644
--- a/parse/tuple_table.py
+++ b/parse/tuple_table.py
@@ -3,8 +3,6 @@ from collections import defaultdict,namedtuple
3from point import SummaryPoint,Type 3from point import SummaryPoint,Type
4from dir_map import DirMap 4from dir_map import DirMap
5from col_map import ColMap,ColMapBuilder 5from col_map import ColMap,ColMapBuilder
6
7
8from pprint import pprint 6from pprint import pprint
9 7
10class TupleTable(object): 8class TupleTable(object):
@@ -88,7 +86,6 @@ class ReducedTupleTable(TupleTable):
88 self.__add_to_dirmap(dir_map, col, kv, point) 86 self.__add_to_dirmap(dir_map, col, kv, point)
89 87
90 dir_map.remove_childless() 88 dir_map.remove_childless()
91 print("wrote: %s" % self)
92 return dir_map 89 return dir_map
93 90
94 @staticmethod 91 @staticmethod
diff --git a/parse_exps.py b/parse_exps.py
index f27021a..4cdc0a1 100755
--- a/parse_exps.py
+++ b/parse_exps.py
@@ -8,13 +8,13 @@ import parse.sched as st
8import pickle 8import pickle
9import shutil as sh 9import shutil as sh
10import sys 10import sys
11import traceback
11 12
12from collections import namedtuple 13from collections import namedtuple
13from common import load_params 14from common import load_params
14from optparse import OptionParser 15from optparse import OptionParser
15from parse.dir_map import DirMap
16from parse.point import ExpPoint 16from parse.point import ExpPoint
17from parse.tuple_table import TupleTable,ReducedTupleTable 17from parse.tuple_table import TupleTable
18from parse.col_map import ColMapBuilder 18from parse.col_map import ColMapBuilder
19from multiprocessing import Pool, cpu_count 19from multiprocessing import Pool, cpu_count
20 20
@@ -23,7 +23,6 @@ def parse_args():
23 parser = OptionParser("usage: %prog [options] [data_dir]...") 23 parser = OptionParser("usage: %prog [options] [data_dir]...")
24 24
25 print("default to no params.py") 25 print("default to no params.py")
26 print("save measurements in temp directory for faster reloading")
27 26
28 parser.add_option('-o', '--out', dest='out', 27 parser.add_option('-o', '--out', dest='out',
29 help='file or directory for data output', default='parse-data') 28 help='file or directory for data output', default='parse-data')
@@ -85,16 +84,24 @@ def load_exps(exp_dirs, cm_builder, clean):
85 84
86 return exps 85 return exps
87 86
88def parse_exp(exp, force): 87def parse_exp(exp_force):
88 # Tupled for multiprocessing
89 exp, force = exp_force
90
89 result_file = exp.work_dir + "/exp_point.pkl" 91 result_file = exp.work_dir + "/exp_point.pkl"
90 should_load = not force and os.path.exists(result_file) 92 should_load = not force and os.path.exists(result_file)
91 mode = 'r' if should_load else 'w'
92 93
93 with open(result_file, mode + 'b') as f: 94 result = None
94 if should_load: 95 if should_load:
95 # No need to go through this work twice 96 with open(result_file, 'rb') as f:
96 result = pickle.load(f) 97 try:
97 else: 98 # No need to go through this work twice
99 result = pickle.load(f)
100 except:
101 pass
102
103 if not result:
104 try:
98 result = ExpPoint(exp.path) 105 result = ExpPoint(exp.path)
99 cycles = exp.params[conf.PARAMS['cycles']] 106 cycles = exp.params[conf.PARAMS['cycles']]
100 107
@@ -104,7 +111,10 @@ def parse_exp(exp, force):
104 # Write scheduling statistics into result 111 # Write scheduling statistics into result
105 st.extract_sched_data(result, exp.path, exp.work_dir) 112 st.extract_sched_data(result, exp.path, exp.work_dir)
106 113
107 pickle.dump(result, f) 114 with open(result_file, 'wb') as f:
115 pickle.dump(result, f)
116 except:
117 traceback.print_exc()
108 118
109 return (exp, result) 119 return (exp, result)
110 120
@@ -128,14 +138,24 @@ def main():
128 sys.stderr.write("Parsing data...\n") 138 sys.stderr.write("Parsing data...\n")
129 139
130 procs = min(len(exps), cpu_count()/2) 140 procs = min(len(exps), cpu_count()/2)
131 pool = Pool(processes=procs) 141 pool = Pool(processes=procs)
132 enum = pool.imap_unordered(parse_exp, exps, [opts.force]*len(exps)) 142 pool_args = zip(exps, [opts.force]*len(exps))
133 for i, (exp, result) in enumerate(enum): 143 enum = pool.imap_unordered(parse_exp, pool_args, 1)
134 if opts.verbose: 144
135 print(result) 145 try:
136 else: 146 for i, (exp, result) in enumerate(enum):
137 sys.stderr.write('\r {0:.2%}'.format(float(i)/len(exps))) 147 if opts.verbose:
138 result_table[exp.params] += [result] 148 print(result)
149 else:
150 sys.stderr.write('\r {0:.2%}'.format(float(i)/len(exps)))
151 result_table[exp.params] += [result]
152 pool.close()
153 except:
154 pool.terminate()
155 traceback.print_exc()
156 raise Exception("Failed parsing!")
157 finally:
158 pool.join()
139 159
140 sys.stderr.write('\n') 160 sys.stderr.write('\n')
141 161
diff --git a/plot/style.py b/plot/style.py
index ca7a112..fd1fa97 100644
--- a/plot/style.py
+++ b/plot/style.py
@@ -16,6 +16,9 @@ class StyleMap(object):
16 self.field_map = {} 16 self.field_map = {}
17 17
18 for field, values in self.__get_all()._asdict().iteritems(): 18 for field, values in self.__get_all()._asdict().iteritems():
19 if not col_list:
20 break
21
19 next_column = col_list.pop(0) 22 next_column = col_list.pop(0)
20 value_dict = {} 23 value_dict = {}
21 24
diff --git a/plot_exps.py b/plot_exps.py
index 8fbef99..49cc729 100755
--- a/plot_exps.py
+++ b/plot_exps.py
@@ -5,6 +5,7 @@ import matplotlib.pyplot as plot
5import os 5import os
6import shutil as sh 6import shutil as sh
7import sys 7import sys
8import traceback
8from collections import namedtuple 9from collections import namedtuple
9from optparse import OptionParser 10from optparse import OptionParser
10from parse.col_map import ColMap,ColMapBuilder 11from parse.col_map import ColMap,ColMapBuilder
@@ -83,6 +84,15 @@ def plot_by_variable(details):
83 84
84 plot.savefig(details.out, format=OUT_FORMAT) 85 plot.savefig(details.out, format=OUT_FORMAT)
85 86
87 return True
88
89def plot_wrapper(details):
90 '''Wrap exceptions in named method for printing in multiprocessing pool.'''
91 try:
92 return plot_by_variable(details)
93 except:
94 traceback.print_exc()
95
86def plot_dir(data_dir, out_dir, force): 96def plot_dir(data_dir, out_dir, force):
87 sys.stderr.write("Reading data...\n") 97 sys.stderr.write("Reading data...\n")
88 dir_map = DirMap.read(data_dir) 98 dir_map = DirMap.read(data_dir)
@@ -102,11 +112,24 @@ def plot_dir(data_dir, out_dir, force):
102 if force or not os.path.exists(details.out): 112 if force or not os.path.exists(details.out):
103 plot_details += [details] 113 plot_details += [details]
104 114
115 if not plot_details:
116 return
117
105 procs = min(len(plot_details), cpu_count()/2) 118 procs = min(len(plot_details), cpu_count()/2)
106 pool = Pool(processes=procs) 119 pool = Pool(processes=procs)
107 enum = pool.imap_unordered(plot_by_variable, plot_details) 120 enum = pool.imap_unordered(plot_wrapper, plot_details)
108 for i, _ in enumerate(enum): 121
109 sys.stderr.write('\r {0:.2%}'.format(float(i)/num_plots)) 122 try:
123 for i, _ in enumerate(enum):
124 sys.stderr.write('\r {0:.2%}'.format(float(i)/num_plots))
125 pool.close()
126 except:
127 pool.terminate()
128 traceback.print_exc()
129 raise Exception("Failed plotting!")
130 finally:
131 pool.join()
132
110 sys.stderr.write('\n') 133 sys.stderr.write('\n')
111 134
112def main(): 135def main():
diff --git a/run/executable/executable.py b/run/executable/executable.py
index bc8edd7..0a408b7 100644
--- a/run/executable/executable.py
+++ b/run/executable/executable.py
@@ -44,7 +44,6 @@ class Executable(object):
44 return full_command 44 return full_command
45 45
46 def __str__(self): 46 def __str__(self):
47 print("Full command: %s" % self.__get_full_command())
48 return " ".join(self.__get_full_command()) 47 return " ".join(self.__get_full_command())
49 48
50 def execute(self): 49 def execute(self):
@@ -63,7 +62,7 @@ class Executable(object):
63 '''Send the terminate signal to the binary.''' 62 '''Send the terminate signal to the binary.'''
64 self.sp.terminate() 63 self.sp.terminate()
65 64
66 def wait(self): 65 def wait(self, error=True):
67 '''Wait until the executable is finished, checking return code. 66 '''Wait until the executable is finished, checking return code.
68 67
69 If the exit status is non-zero, raise an exception. 68 If the exit status is non-zero, raise an exception.
@@ -71,8 +70,10 @@ class Executable(object):
71 ''' 70 '''
72 71
73 self.sp.wait() 72 self.sp.wait()
74 if self.sp.returncode != 0: 73 if self.sp.returncode != 0 and error:
75 print >>sys.stderr, "Non-zero return: %s %s" % (self.exec_file, " ".join(self.extra_args)) 74 print >>sys.stderr, "Non-zero return %d: %s %s" % (self.sp.returncode,
75 self.exec_file,
76 " ".join(self.extra_args))
76 return 0 77 return 0
77 else: 78 else:
78 return 1 79 return 1
diff --git a/run/executable/ftcat.py b/run/executable/ftcat.py
index 5da8fa7..1f0420b 100644
--- a/run/executable/ftcat.py
+++ b/run/executable/ftcat.py
@@ -1,18 +1,15 @@
1import os 1import os
2import stat 2import stat
3 3
4from executable import Executable 4from .executable import Executable
5 5
6class FTcat(Executable): 6class FTcat(Executable):
7 '''Used to wrap the ftcat binary in the Experiment object.''' 7 '''Used to wrap the ftcat binary in the Experiment object.'''
8 8
9 def __init__(self, ft_cat_bin, stdout_file, stderr_file, dev, events, cpu=None): 9 def __init__(self, ft_cat_bin, stdout_file, stderr_file, dev, events, cpu=None):
10 '''Extends the Executable initializer method with ftcat attributes.''' 10 '''Extends the Executable initializer method with ftcat attributes.'''
11 super(FTcat, self).__init__('/usr/bin/taskset')
11 12
12 # hack to run FTCat at higher priority
13 chrt_bin = '/usr/bin/chrt'
14
15 super(FTcat, self).__init__(chrt_bin)
16 self.stdout_file = stdout_file 13 self.stdout_file = stdout_file
17 self.stderr_file = stderr_file 14 self.stderr_file = stderr_file
18 15
@@ -23,11 +20,15 @@ class FTcat(Executable):
23 if events is None: 20 if events is None:
24 raise Exception('No events!') 21 raise Exception('No events!')
25 22
26 # hack to run FTCat at higher priority
27 self.extra_args = ['-f', '40']
28 if cpu is not None: 23 if cpu is not None:
29 # and bind it to a CPU 24 # Execute only on the given CPU
30 self.extra_args.extend(['/usr/bin/taskset', '-c', str(cpu)]) 25 self.extra_args = ['-c', str(cpu)]
26 else:
27 # Execute on any cpu
28 self.extra_args = ['0xFFFFFFFF']
29
31 events_str_arr = map(str, events) 30 events_str_arr = map(str, events)
32 self.extra_args.extend([ft_cat_bin, dev] + events_str_arr) 31 ft_cat_cmd = [ft_cat_bin, dev] + list(events_str_arr)
32
33 self.extra_args.extend(ft_cat_cmd)
33 34
diff --git a/run/experiment.py b/run/experiment.py
index c8fc228..ecb0241 100644
--- a/run/experiment.py
+++ b/run/experiment.py
@@ -1,8 +1,9 @@
1import os 1import os
2import time 2import time
3import litmus_util as lu 3import run.litmus_util as lu
4import shutil as sh
4from operator import methodcaller 5from operator import methodcaller
5from tracer import SchedTracer, LogTracer, PerfTracer, LinuxTracer, OverheadTracer 6from run.tracer import SchedTracer, LogTracer, PerfTracer, LinuxTracer, OverheadTracer
6 7
7class ExperimentException(Exception): 8class ExperimentException(Exception):
8 '''Used to indicate when there are problems with an experiment.''' 9 '''Used to indicate when there are problems with an experiment.'''
@@ -78,6 +79,8 @@ class Experiment(object):
78 Experiment.INTERRUPTED_DIR) 79 Experiment.INTERRUPTED_DIR)
79 interrupted = "%s/%s" % (os.path.split(self.working_dir)[0], 80 interrupted = "%s/%s" % (os.path.split(self.working_dir)[0],
80 Experiment.INTERRUPTED_DIR) 81 Experiment.INTERRUPTED_DIR)
82 if os.path.exists(interrupted):
83 sh.rmtree(interrupted)
81 os.rename(self.working_dir, interrupted) 84 os.rename(self.working_dir, interrupted)
82 85
83 os.mkdir(self.working_dir) 86 os.mkdir(self.working_dir)
@@ -154,7 +157,7 @@ class Experiment(object):
154 os.rename(self.working_dir, self.finished_dir) 157 os.rename(self.working_dir, self.finished_dir)
155 158
156 def log(self, msg): 159 def log(self, msg):
157 print "[Exp %s]: %s" % (self.name, msg) 160 print("[Exp %s]: %s" % (self.name, msg))
158 161
159 def run_exp(self): 162 def run_exp(self):
160 succ = False 163 succ = False
diff --git a/run/litmus_util.py b/run/litmus_util.py
index ec1700e..8a7f87d 100644
--- a/run/litmus_util.py
+++ b/run/litmus_util.py
@@ -4,7 +4,6 @@ import subprocess
4import os 4import os
5import stat 5import stat
6import config.config as conf 6import config.config as conf
7from common import get_config_option
8 7
9def num_cpus(): 8def num_cpus():
10 '''Return the number of CPUs in the system.''' 9 '''Return the number of CPUs in the system.'''
@@ -19,11 +18,12 @@ def num_cpus():
19 return cpus 18 return cpus
20 19
21def ft_freq(): 20def ft_freq():
22 '''The frequency (in MHz) of the clock used by feather trace.''' 21 umachine = subprocess.check_output(["uname", "-m"])
23 if get_config_option('CPU_V7') == 'y': 22
23 if re.match("armv7", umachine):
24 # Arm V7s use a millisecond timer 24 # Arm V7s use a millisecond timer
25 freq = 1000.0 25 freq = 1000.0
26 elif get_config_option('X86') == 'y': 26 elif re.match("x86", umachine):
27 # X86 timer is equal to processor clock 27 # X86 timer is equal to processor clock
28 reg = re.compile(r'^cpu MHz\s*:\s*(?P<FREQ>\d+)', re.M) 28 reg = re.compile(r'^cpu MHz\s*:\s*(?P<FREQ>\d+)', re.M)
29 with open('/proc/cpuinfo', 'r') as f: 29 with open('/proc/cpuinfo', 'r') as f:
@@ -76,7 +76,7 @@ def is_device(dev):
76 return not (not mode & stat.S_IFCHR) 76 return not (not mode & stat.S_IFCHR)
77 77
78def waiting_tasks(): 78def waiting_tasks():
79 reg = re.compile(r'^ready.*(?P<READY>\d+)$', re.M) 79 reg = re.compile(r'^ready.*?(?P<READY>\d+)$', re.M)
80 with open('/proc/litmus/stats', 'r') as f: 80 with open('/proc/litmus/stats', 'r') as f:
81 data = f.read() 81 data = f.read()
82 82
diff --git a/run/tracer.py b/run/tracer.py
index 5d00e86..723bcad 100644
--- a/run/tracer.py
+++ b/run/tracer.py
@@ -1,10 +1,9 @@
1import litmus_util 1from . import litmus_util
2import os 2import os
3import config.config as conf 3import config.config as conf
4 4
5from operator import methodcaller 5from operator import methodcaller
6from executable.ftcat import FTcat,Executable 6from run.executable.ftcat import FTcat,Executable
7
8 7
9class Tracer(object): 8class Tracer(object):
10 def __init__(self, name, output_dir): 9 def __init__(self, name, output_dir):
@@ -19,7 +18,6 @@ class Tracer(object):
19 map(methodcaller('terminate'), self.bins) 18 map(methodcaller('terminate'), self.bins)
20 map(methodcaller('wait'), self.bins) 19 map(methodcaller('wait'), self.bins)
21 20
22
23class LinuxTracer(Tracer): 21class LinuxTracer(Tracer):
24 EVENT_ROOT = "/sys/kernel/debug/tracing" 22 EVENT_ROOT = "/sys/kernel/debug/tracing"
25 LITMUS_EVENTS = "%s/events/litmus" % EVENT_ROOT 23 LITMUS_EVENTS = "%s/events/litmus" % EVENT_ROOT
@@ -45,7 +43,6 @@ class LinuxTracer(Tracer):
45 map(methodcaller('interrupt'), self.bins) 43 map(methodcaller('interrupt'), self.bins)
46 map(methodcaller('wait'), self.bins) 44 map(methodcaller('wait'), self.bins)
47 45
48
49class LogTracer(Tracer): 46class LogTracer(Tracer):
50 DEVICE_STR = '/dev/litmus/log' 47 DEVICE_STR = '/dev/litmus/log'
51 48
@@ -63,6 +60,9 @@ class LogTracer(Tracer):
63 def enabled(): 60 def enabled():
64 return litmus_util.is_device(LogTracer.DEVICE_STR) 61 return litmus_util.is_device(LogTracer.DEVICE_STR)
65 62
63 def stop_tracing(self):
64 map(methodcaller('interrupt'), self.bins)
65 map(methodcaller('wait', False), self.bins)
66 66
67class SchedTracer(Tracer): 67class SchedTracer(Tracer):
68 DEVICE_STR = '/dev/litmus/sched_trace' 68 DEVICE_STR = '/dev/litmus/sched_trace'
@@ -76,14 +76,14 @@ class SchedTracer(Tracer):
76 stdout_f = open('%s/st-%d.bin' % (self.output_dir, cpu), 'w') 76 stdout_f = open('%s/st-%d.bin' % (self.output_dir, cpu), 'w')
77 stderr_f = open('%s/st-%d-stderr.txt' % (self.output_dir, cpu), 'w') 77 stderr_f = open('%s/st-%d-stderr.txt' % (self.output_dir, cpu), 'w')
78 dev = '{0}{1}'.format(SchedTracer.DEVICE_STR, cpu) 78 dev = '{0}{1}'.format(SchedTracer.DEVICE_STR, cpu)
79 ftc = FTcat(conf.BINS['ftcat'], stdout_f, stderr_f, dev, conf.SCHED_EVENTS, cpu=cpu) 79 ftc = FTcat(conf.BINS['ftcat'], stdout_f, stderr_f, dev,
80 conf.SCHED_EVENTS, cpu=cpu)
80 81
81 self.bins.append(ftc) 82 self.bins.append(ftc)
82 83
83 @staticmethod 84 @staticmethod
84 def enabled(): 85 def enabled():
85 return litmus_util.is_device("%s%d" % (SchedTracer.DEVICE_STR, 0)) 86 return litmus_util.is_device("%s%d" % (SchedTracer.DEVICE_STR, 0))
86
87 87
88class OverheadTracer(Tracer): 88class OverheadTracer(Tracer):
89 DEVICE_STR = '/dev/litmus/ft_trace0' 89 DEVICE_STR = '/dev/litmus/ft_trace0'
@@ -100,8 +100,7 @@ class OverheadTracer(Tracer):
100 100
101 @staticmethod 101 @staticmethod
102 def enabled(): 102 def enabled():
103 return litmus_util.is_device(OverheadTracer.DEVICE_STR) 103 return litmus_util.is_device(OverheadTracer.DEVICE_STR)
104
105 104
106class PerfTracer(Tracer): 105class PerfTracer(Tracer):
107 def __init__(self, output_dir): 106 def __init__(self, output_dir):
diff --git a/run_exps.py b/run_exps.py
index 84e2b4c..195d3f8 100755
--- a/run_exps.py
+++ b/run_exps.py
@@ -50,7 +50,7 @@ def convert_data(data):
50 r"(?P<ENTRY>[\w\-\/]+)" 50 r"(?P<ENTRY>[\w\-\/]+)"
51 r"\s*{\s*(?P<CONTENT>.*?)\s*?}$)|" 51 r"\s*{\s*(?P<CONTENT>.*?)\s*?}$)|"
52 r"(?P<SPIN>^" 52 r"(?P<SPIN>^"
53 r"(?:(?P<TYPE>\w+) )?\s*" 53 r"(?:(?P<TYPE>[^\d\-]\w*?) )?\s*"
54 r"(?P<ARGS>[\w\-_\d\. \=]+)\s*$)", 54 r"(?P<ARGS>[\w\-_\d\. \=]+)\s*$)",
55 re.S|re.I|re.M) 55 re.S|re.I|re.M)
56 56