aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJonathan Herman <hermanjl@cs.unc.edu>2013-04-27 15:34:13 -0400
committerJonathan Herman <hermanjl@cs.unc.edu>2013-04-27 15:34:13 -0400
commit3f706cd240d039365cbb8f9975ad97480d8b6145 (patch)
tree1cdcbaf78b84c7a6383505896e89e29574cf65b2
parent15f231a79320cbc97cd88d8a4751515a47ce223e (diff)
Fixes.
-rw-r--r--parse/col_map.py5
-rw-r--r--parse/sched.py85
-rwxr-xr-xparse_exps.py50
-rw-r--r--plot/style.py16
4 files changed, 95 insertions, 61 deletions
diff --git a/parse/col_map.py b/parse/col_map.py
index 15e1d64..ceb8867 100644
--- a/parse/col_map.py
+++ b/parse/col_map.py
@@ -11,12 +11,15 @@ class ColMapBuilder(object):
11 return ColMap(col_list, self.value_map) 11 return ColMap(col_list, self.value_map)
12 12
13 def try_add(self, column, value): 13 def try_add(self, column, value):
14 self.value_map[column].add( value ) 14 self.value_map[column].add( str(value) )
15 15
16 def try_remove(self, column): 16 def try_remove(self, column):
17 if column in self.value_map: 17 if column in self.value_map:
18 del(self.value_map[column]) 18 del(self.value_map[column])
19 19
20 def __contains__(self, col):
21 return col in self.value_map
22
20class ColMap(object): 23class ColMap(object):
21 def __init__(self, col_list, values = None): 24 def __init__(self, col_list, values = None):
22 self.col_list = col_list 25 self.col_list = col_list
diff --git a/parse/sched.py b/parse/sched.py
index 5a36da9..6e1fbe6 100644
--- a/parse/sched.py
+++ b/parse/sched.py
@@ -41,29 +41,23 @@ class TimeTracker:
41 # any task is always skipped 41 # any task is always skipped
42 self.last_record = None 42 self.last_record = None
43 43
44 self.stored_dur = 0
45
46 def store_time(self, next_record): 44 def store_time(self, next_record):
47 '''End duration of time.''' 45 '''End duration of time.'''
48 dur = (self.last_record.when - self.begin) if self.last_record else -1 46 dur = (self.last_record.when - self.begin) if self.last_record else -1
49 dur += self.stored_dur
50 47
51 if self.next_job == next_record.job: 48 if self.next_job == next_record.job:
52 self.last_record = next_record
53
54 if self.last_record: 49 if self.last_record:
55 self.matches += 1 50 self.matches += 1
56 51
57 if self.join_job and self.next_job == self.last_record.job: 52 self.last_record = next_record
58 self.stored_dur += dur 53
59 elif dur > 0: 54 if dur > 0:
60 self.max = max(self.max, dur) 55 self.max = max(self.max, dur)
61 self.avg *= float(self.num / (self.num + 1)) 56 self.avg *= float(self.num / (self.num + 1))
62 self.num += 1 57 self.num += 1
63 self.avg += dur / float(self.num) 58 self.avg += dur / float(self.num)
64 59
65 self.begin = 0 60 self.begin = 0
66 self.stored_dur = 0
67 self.next_job = 0 61 self.next_job = 0
68 else: 62 else:
69 self.disjoints += 1 63 self.disjoints += 1
@@ -81,7 +75,6 @@ class TimeTracker:
81class LeveledArray(object): 75class LeveledArray(object):
82 """Groups statistics by the level of the task to which they apply""" 76 """Groups statistics by the level of the task to which they apply"""
83 def __init__(self): 77 def __init__(self):
84 self.name = name
85 self.vals = defaultdict(lambda: defaultdict(lambda:[])) 78 self.vals = defaultdict(lambda: defaultdict(lambda:[]))
86 79
87 def add(self, name, level, value): 80 def add(self, name, level, value):
@@ -92,12 +85,12 @@ class LeveledArray(object):
92 def write_measurements(self, result): 85 def write_measurements(self, result):
93 for stat_name, stat_data in self.vals.iteritems(): 86 for stat_name, stat_data in self.vals.iteritems():
94 for level, values in stat_data.iteritems(): 87 for level, values in stat_data.iteritems():
95 if not values or not sum(values): 88 # if not values or not sum(values):
96 log_once(SKIP_MSG, SKIP_MSG % stat_name) 89 # log_once(SKIP_MSG, SKIP_MSG % stat_name)
97 continue 90 # continue
98 91
99 name = "%s%s" % ("%s-" % level if level else "", stat_name) 92 name = "%s%s" % ("%s-" % level.capitalize() if level else "", stat_name)
100 result[name] = Measurement(name).from_array(arr) 93 result[name] = Measurement(name).from_array(values)
101 94
102# Map of event ids to corresponding class and format 95# Map of event ids to corresponding class and format
103record_map = {} 96record_map = {}
@@ -201,8 +194,9 @@ class ParamRecord(SchedRecord):
201 ('class', c_uint8), ('level', c_uint8)] 194 ('class', c_uint8), ('level', c_uint8)]
202 195
203 def process(self, task_dict): 196 def process(self, task_dict):
197 level = chr(97 + self.level)
204 params = TaskParams(self.wcet, self.period, 198 params = TaskParams(self.wcet, self.period,
205 self.partition, self.level) 199 self.partition, level)
206 task_dict[self.pid].params = params 200 task_dict[self.pid].params = params
207 201
208class ReleaseRecord(SchedRecord): 202class ReleaseRecord(SchedRecord):
@@ -214,11 +208,13 @@ class ReleaseRecord(SchedRecord):
214 if data.params: 208 if data.params:
215 data.misses.start_time(self, self.when + data.params.period) 209 data.misses.start_time(self, self.when + data.params.period)
216 210
211NSEC_PER_USEC = 1000
217class CompletionRecord(SchedRecord): 212class CompletionRecord(SchedRecord):
218 FIELDS = [('when', c_uint64)] 213 FIELDS = [('when', c_uint64), ('load', c_uint64)]
219 214
220 def process(self, task_dict): 215 def process(self, task_dict):
221 task_dict[self.pid].misses.store_time(self) 216 task_dict[self.pid].misses.store_time(self)
217 task_dict[self.pid].loads += [float(self.load) / NSEC_PER_USEC ]
222 218
223class BlockRecord(SchedRecord): 219class BlockRecord(SchedRecord):
224 FIELDS = [('when', c_uint64)] 220 FIELDS = [('when', c_uint64)]
@@ -232,9 +228,24 @@ class ResumeRecord(SchedRecord):
232 def process(self, task_dict): 228 def process(self, task_dict):
233 task_dict[self.pid].blocks.store_time(self) 229 task_dict[self.pid].blocks.store_time(self)
234 230
231class SwitchToRecord(SchedRecord):
232 FIELDS = [('when', c_uint64)]
233
234 def process(self, task_dict):
235 task_dict[self.pid].execs.start_time(self)
236
237class SwitchAwayRecord(SchedRecord):
238 FIELDS = [('when', c_uint64)]
239
240 def process(self, task_dict):
241 task_dict[self.pid].execs.store_time(self)
242
243
235# Map records to sched_trace ids (see include/litmus/sched_trace.h 244# Map records to sched_trace ids (see include/litmus/sched_trace.h
236register_record(2, ParamRecord) 245register_record(2, ParamRecord)
237register_record(3, ReleaseRecord) 246register_record(3, ReleaseRecord)
247register_record(5, SwitchToRecord)
248register_record(6, SwitchAwayRecord)
238register_record(7, CompletionRecord) 249register_record(7, CompletionRecord)
239register_record(8, BlockRecord) 250register_record(8, BlockRecord)
240register_record(9, ResumeRecord) 251register_record(9, ResumeRecord)
@@ -250,7 +261,8 @@ def create_task_dict(data_dir, work_dir = None):
250 output_file = "%s/out-st" % work_dir 261 output_file = "%s/out-st" % work_dir
251 262
252 task_dict = defaultdict(lambda : 263 task_dict = defaultdict(lambda :
253 TaskData(None, 1, TimeTracker(), TimeTracker())) 264 TaskData(None, 1, [], TimeTracker(),
265 TimeTracker(), TimeTracker(True)))
254 266
255 bin_names = [f for f in os.listdir(data_dir) if re.match(bin_files, f)] 267 bin_names = [f for f in os.listdir(data_dir) if re.match(bin_files, f)]
256 if not len(bin_names): 268 if not len(bin_names):
@@ -281,11 +293,11 @@ def extract_sched_data(result, data_dir, work_dir):
281 # Currently unknown where these invalid tasks come from... 293 # Currently unknown where these invalid tasks come from...
282 continue 294 continue
283 295
284 level = tdata.config.level 296 level = tdata.params.level
285 miss = tdata.misses 297 miss = tdata.misses
286 298
287 record_loss = float(miss.disjoints)/(miss.matches + miss.disjoints) 299 record_loss = float(miss.disjoints)/(miss.matches + miss.disjoints)
288 stat_data("record-loss", level, record_loss) 300 stat_data.add("record-loss", level, record_loss)
289 301
290 if record_loss > conf.MAX_RECORD_LOSS: 302 if record_loss > conf.MAX_RECORD_LOSS:
291 log_once(LOSS_MSG) 303 log_once(LOSS_MSG)
@@ -294,26 +306,27 @@ def extract_sched_data(result, data_dir, work_dir):
294 miss_ratio = float(miss.num) / miss.matches 306 miss_ratio = float(miss.num) / miss.matches
295 avg_tard = miss.avg * miss_ratio 307 avg_tard = miss.avg * miss_ratio
296 308
297 stat_data("miss-ratio", level, miss_ratio) 309 stat_data.add("miss-ratio", level, miss_ratio)
298 310
299 stat_data("max-tard", level, miss.max / tdata.params.period) 311 stat_data.add("max-tard", level, miss.max / tdata.params.period)
300 stat_data("avg-tard", level, avg_tard / tdata.params.period) 312 stat_data.add("avg-tard", level, avg_tard / tdata.params.period)
301 313
302 stat_data("avg-block", level, tdata.blocks.avg / NSEC_PER_MSEC) 314 stat_data.add("avg-block", level, tdata.blocks.avg / NSEC_PER_MSEC)
303 stat_data("max-block", level, tdata.blocks.max / NSEC_PER_MSEC) 315 stat_data.add("max-block", level, tdata.blocks.max / NSEC_PER_MSEC)
304 316
305 stat_data.write_measurements(result) 317 if tdata.params.level == 'b':
318 stat_data.add('LOAD', tdata.params.level, tdata.loads)
306 319
307def extract_mc_data(result, data_dir, base_dir): 320 stat_data.write_measurements(result)
308 task_dict = get_task_data(data_dir)
309 base_dict = get_task_data(base_dir)
310 321
311 stat_data = LeveledArray() 322def extract_scaling_data(result, data_dir, base_dir):
323 log_once("Scaling factor extraction currently broken, disabled.")
324 return
312 325
313 # Only level B loads are measured 326 task_dict = create_task_dict(data_dir)
314 for tdata in filter(task_dict.iteritems(), lambda x: x.level == 'b'): 327 base_dict = create_task_dict(base_dir)
315 stat_data.add('load', tdata.config.level, tdata.loads)
316 328
329 stat_data = LeveledArray()
317 tasks_by_config = defaultdict(lambda: ScaleData([], [])) 330 tasks_by_config = defaultdict(lambda: ScaleData([], []))
318 331
319 # Add task execution times in order of pid to tasks_by_config 332 # Add task execution times in order of pid to tasks_by_config
@@ -324,11 +337,11 @@ def extract_mc_data(result, data_dir, base_dir):
324 for pid in sorted(tasks.keys()): 337 for pid in sorted(tasks.keys()):
325 tdata = tasks[pid] 338 tdata = tasks[pid]
326 339
327 tlist = getattr(tasks_by_config[tdata.params], field) 340 tlist = getattr(tasks_by_config[tdata.params], field)
328 tlist += [tdata.execs] 341 tlist += [tdata.execs]
329 342
330 # Write scaling factors 343 # Write scaling factors
331 for config, scale_data in tasks_by_config: 344 for config, scale_data in tasks_by_config.iteritems():
332 if len(scale_data.reg_tasks) != len(scale_data.base_tasks): 345 if len(scale_data.reg_tasks) != len(scale_data.base_tasks):
333 # Can't make comparison if different numbers of tasks! 346 # Can't make comparison if different numbers of tasks!
334 continue 347 continue
diff --git a/parse_exps.py b/parse_exps.py
index 7a99d8a..98f95df 100755
--- a/parse_exps.py
+++ b/parse_exps.py
@@ -52,7 +52,7 @@ ExpData = namedtuple('ExpData', ['path', 'params', 'work_dir'])
52 52
53def parse_exp(exp_force_base): 53def parse_exp(exp_force_base):
54 # Tupled for multiprocessing 54 # Tupled for multiprocessing
55 exp, force, base_table = exp_force_base 55 exp, force, base_exp = exp_force_base
56 56
57 result_file = exp.work_dir + "/exp_point.pkl" 57 result_file = exp.work_dir + "/exp_point.pkl"
58 should_load = not force and os.path.exists(result_file) 58 should_load = not force and os.path.exists(result_file)
@@ -81,11 +81,9 @@ def parse_exp(exp_force_base):
81 # Write scheduling statistics into result 81 # Write scheduling statistics into result
82 st.extract_sched_data(result, exp.path, exp.work_dir) 82 st.extract_sched_data(result, exp.path, exp.work_dir)
83 83
84 # Write scaling factors into result 84 if base_exp:
85 if base_table and exp.params in base_table: 85 # Write scaling factors into result
86 base_exp = base_table[exp.params] 86 st.extract_scaling_data(result, exp.path, base_exp.path)
87 if base_exp != exp:
88 st.extract_mc_data(result, exp.path, base_exp.path)
89 87
90 with open(result_file, 'wb') as f: 88 with open(result_file, 'wb') as f:
91 pickle.dump(result, f) 89 pickle.dump(result, f)
@@ -138,18 +136,22 @@ def load_exps(exp_dirs, cm_builder, force):
138 return exps 136 return exps
139 137
140 138
141def make_base_table(cmd_scale, col_map, exps): 139def make_base_table(cmd_scale, builder, exps):
142 if not cmd_scale: 140 if not cmd_scale:
143 return None 141 return None
144 142
145 # Configuration key for task systems used to calculate task 143 # Configuration key for task systems used to calculate task
146 # execution scaling factors 144 # execution scaling factors
147 [(param, value)] = dict(re.findall("(.*)=(.*)", cmd_scale)) 145 [(param, value)] = re.findall("(.*)=(.*)", cmd_scale)
148 146
149 if param not in col_map:
150 raise IOError("Base column '%s' not present in any parameters!" % param)
151 147
152 base_table = TupleTable(copy.deepcopy(col_map)) 148 if param not in builder:
149 com.log_once("Base column '%s' not present in any parameters!" % param)
150 com.log_once("Scaling factors will not be included.")
151
152 builder.try_remove(param)
153 base_map = builder.build()
154 base_table = TupleTable(base_map)
153 155
154 # Fill table with exps who we will scale against 156 # Fill table with exps who we will scale against
155 for exp in exps: 157 for exp in exps:
@@ -171,23 +173,35 @@ def get_dirs(args):
171 return [os.getcwd()] 173 return [os.getcwd()]
172 174
173 175
174def fill_table(table, exps, opts): 176def get_bases(builder, exps, opts):
175 sys.stderr.write("Parsing data...\n") 177 '''Return a matching array of experiments against which scaling factors
178 will be calculated.'''
179 bases = [None]*len(exps)
180
181 base_table = make_base_table(opts.scale_against, builder, exps)
182 if not base_table:
183 return bases
184
185 for i,exp in enumerate(exps):
186 if exp.params in base_table and base_table[exp.params] != exp:
187 bases[i] = base_table[exp.params]
176 188
189 return bases
190
191
192def fill_table(table, builder, exps, opts):
177 procs = min(len(exps), opts.processors) 193 procs = min(len(exps), opts.processors)
178 logged = multiprocessing.Manager().list() 194 logged = multiprocessing.Manager().list()
195 bases = get_bases(builder, exps, opts)
179 196
180 sys.stderr.write("Parsing data...\n") 197 sys.stderr.write("Parsing data...\n")
181 198
182 base_table = make_base_table(opts.scale_against,
183 table.get_col_map(), exps)
184
185 pool = multiprocessing.Pool(processes=procs, 199 pool = multiprocessing.Pool(processes=procs,
186 # Share a list of previously logged messages amongst processes 200 # Share a list of previously logged messages amongst processes
187 # This is for the com.log_once method to use 201 # This is for the com.log_once method to use
188 initializer=com.set_logged_list, initargs=(logged,)) 202 initializer=com.set_logged_list, initargs=(logged,))
189 203
190 pool_args = zip(exps, [opts.force]*len(exps), [base_table]*len(exps)) 204 pool_args = zip(exps, [opts.force]*len(exps), bases)
191 enum = pool.imap_unordered(parse_exp, pool_args, 1) 205 enum = pool.imap_unordered(parse_exp, pool_args, 1)
192 206
193 try: 207 try:
@@ -259,7 +273,7 @@ def main():
259 col_map = builder.build() 273 col_map = builder.build()
260 table = TupleTable(col_map) 274 table = TupleTable(col_map)
261 275
262 fill_table(table, exps, opts) 276 fill_table(table, builder, exps, opts)
263 277
264 if not table: 278 if not table:
265 sys.stderr.write("Found no data to parse!") 279 sys.stderr.write("Found no data to parse!")
diff --git a/plot/style.py b/plot/style.py
index 4e2057f..5c2d661 100644
--- a/plot/style.py
+++ b/plot/style.py
@@ -1,7 +1,7 @@
1from collections import namedtuple 1from collections import namedtuple
2import matplotlib.pyplot as plot 2import matplotlib.pyplot as plot
3 3
4class Style(namedtuple('SS', ['marker', 'line', 'color'])): 4class Style(namedtuple('SS', ['marker', 'color', 'line'])):
5 def fmt(self): 5 def fmt(self):
6 return self.marker + self.line + self.color 6 return self.marker + self.line + self.color
7 7
@@ -24,11 +24,12 @@ class StyleMap(object):
24 t = float if float(value) % 1.0 else int 24 t = float if float(value) % 1.0 else int
25 except: 25 except:
26 t = bool if value in ['True','False'] else str 26 t = bool if value in ['True','False'] else str
27 return StyleMap.ORDER.index(t) 27 # return StyleMap.ORDER.index(t)
28 col_list = sorted(col_list, key=type_priority) 28 return len(col_values[column])
29 col_list = sorted(col_list, key=type_priority, reverse=True)
29 30
30 # TODO: undo this, switch to popping mechanism 31 # TODO: undo this, switch to popping mechanism
31 for field, values in reversed([x for x in self.__get_all()._asdict().iteritems()]): 32 for field, values in [x for x in self.__get_all()._asdict().iteritems()]:
32 if not col_list: 33 if not col_list:
33 break 34 break
34 35
@@ -36,7 +37,10 @@ class StyleMap(object):
36 value_dict = {} 37 value_dict = {}
37 38
38 for value in sorted(col_values[next_column]): 39 for value in sorted(col_values[next_column]):
39 value_dict[value] = values.pop(0) 40 try:
41 value_dict[value] = values.pop(0)
42 except Exception as e:
43 raise e
40 44
41 self.value_map[next_column] = value_dict 45 self.value_map[next_column] = value_dict
42 self.field_map[next_column] = field 46 self.field_map[next_column] = field
@@ -44,7 +48,7 @@ class StyleMap(object):
44 def __get_all(self): 48 def __get_all(self):
45 '''A Style holding all possible values for each property.''' 49 '''A Style holding all possible values for each property.'''
46 return Style(marker=list('.,ov^<>1234sp*hH+xDd|_'), 50 return Style(marker=list('.,ov^<>1234sp*hH+xDd|_'),
47 line=['-', ':', '--'], 51 line=['-', ':', '--', '_'],
48 color=list('bgrcmyk')) 52 color=list('bgrcmyk'))
49 53
50 def get_style(self, kv): 54 def get_style(self, kv):