aboutsummaryrefslogtreecommitdiffstats
path: root/parse/sched.py
diff options
context:
space:
mode:
Diffstat (limited to 'parse/sched.py')
-rw-r--r--parse/sched.py134
1 files changed, 108 insertions, 26 deletions
diff --git a/parse/sched.py b/parse/sched.py
index 4933037..1033989 100644
--- a/parse/sched.py
+++ b/parse/sched.py
@@ -9,6 +9,21 @@ from common import recordtype,log_once
9from point import Measurement 9from point import Measurement
10from ctypes import * 10from ctypes import *
11 11
12LOSS_MSG = """Found task missing more than %d%% of its scheduling records.
13These won't be included in scheduling statistics!"""%(100*conf.MAX_RECORD_LOSS)
14SKIP_MSG = """Measurement '%s' has no non-zero values.
15Measurements like these are not included in scheduling statistics.
16If a measurement is missing, this is why."""
17SCALE_MSG = """Task in {} with config {} has < 1.0 scale!
18These scales are skipped in measurements."""
19
20# Data stored for each task
21TaskParams = namedtuple('TaskParams', ['wcet', 'period', 'cpu', 'level'])
22TaskData = recordtype('TaskData', ['params', 'jobs', 'loads',
23 'blocks', 'misses', 'execs'])
24
25ScaleData = namedtuple('ScaleData', ['reg_tasks', 'base_tasks'])
26
12class TimeTracker: 27class TimeTracker:
13 '''Store stats for durations of time demarcated by sched_trace records.''' 28 '''Store stats for durations of time demarcated by sched_trace records.'''
14 def __init__(self): 29 def __init__(self):
@@ -55,9 +70,27 @@ class TimeTracker:
55 70
56 self.next_job = record.job 71 self.next_job = record.job
57 72
58# Data stored for each task 73
59TaskParams = namedtuple('TaskParams', ['wcet', 'period', 'cpu']) 74class LeveledArray(object):
60TaskData = recordtype('TaskData', ['params', 'jobs', 'blocks', 'misses']) 75 """Groups statistics by the level of the task to which they apply"""
76 def __init__(self):
77 self.name = name
78 self.vals = defaultdict(lambda: defaultdict(lambda:[]))
79
80 def add(self, name, level, value):
81 if type(value) != type([]):
82 value = [value]
83 self.vals[name][level] += value
84
85 def write_measurements(self, result):
86 for stat_name, stat_data in self.vals.iteritems():
87 for level, values in stat_data.iteritems():
88 if not values or not sum(values):
89 log_once(SKIP_MSG, SKIP_MSG % stat_name)
90 continue
91
92 name = "%s%s" % ("%s-" % level if level else "", stat_name)
93 result[name] = Measurement(name).from_array(arr)
61 94
62# Map of event ids to corresponding class and format 95# Map of event ids to corresponding class and format
63record_map = {} 96record_map = {}
@@ -157,10 +190,12 @@ class SchedRecord(object):
157 190
158class ParamRecord(SchedRecord): 191class ParamRecord(SchedRecord):
159 FIELDS = [('wcet', c_uint32), ('period', c_uint32), 192 FIELDS = [('wcet', c_uint32), ('period', c_uint32),
160 ('phase', c_uint32), ('partition', c_uint8)] 193 ('phase', c_uint32), ('partition', c_uint8),
194 ('class', c_uint8), ('level', c_uint8)]
161 195
162 def process(self, task_dict): 196 def process(self, task_dict):
163 params = TaskParams(self.wcet, self.period, self.partition) 197 params = TaskParams(self.wcet, self.period,
198 self.partition, self.level)
164 task_dict[self.pid].params = params 199 task_dict[self.pid].params = params
165 200
166class ReleaseRecord(SchedRecord): 201class ReleaseRecord(SchedRecord):
@@ -197,8 +232,13 @@ register_record(7, CompletionRecord)
197register_record(8, BlockRecord) 232register_record(8, BlockRecord)
198register_record(9, ResumeRecord) 233register_record(9, ResumeRecord)
199 234
235__all_dicts = {}
236
200def create_task_dict(data_dir, work_dir = None): 237def create_task_dict(data_dir, work_dir = None):
201 '''Parse sched trace files''' 238 '''Parse sched trace files'''
239 if data_dir in __all_dicts:
240 return __all_dicts[data_dir]
241
202 bin_files = conf.FILES['sched_data'].format(".*") 242 bin_files = conf.FILES['sched_data'].format(".*")
203 output_file = "%s/out-st" % work_dir 243 output_file = "%s/out-st" % work_dir
204 244
@@ -211,7 +251,7 @@ def create_task_dict(data_dir, work_dir = None):
211 251
212 # Save an in-english version of the data for debugging 252 # Save an in-english version of the data for debugging
213 # This is optional and will only be done if 'st_show' is in PATH 253 # This is optional and will only be done if 'st_show' is in PATH
214 if conf.BINS['st_show']: 254 if work_dir and conf.BINS['st_show']:
215 cmd_arr = [conf.BINS['st_show']] 255 cmd_arr = [conf.BINS['st_show']]
216 cmd_arr.extend(bin_names) 256 cmd_arr.extend(bin_names)
217 with open(output_file, "w") as f: 257 with open(output_file, "w") as f:
@@ -221,28 +261,24 @@ def create_task_dict(data_dir, work_dir = None):
221 bin_paths = ["%s/%s" % (data_dir,f) for f in bin_names] 261 bin_paths = ["%s/%s" % (data_dir,f) for f in bin_names]
222 read_data(task_dict, bin_paths) 262 read_data(task_dict, bin_paths)
223 263
224 return task_dict 264 __all_dicts[data_dir] = task_dict
225 265
226LOSS_MSG = """Found task missing more than %d%% of its scheduling records. 266 return task_dict
227These won't be included in scheduling statistics!"""%(100*conf.MAX_RECORD_LOSS)
228SKIP_MSG = """Measurement '%s' has no non-zero values.
229Measurements like these are not included in scheduling statistics.
230If a measurement is missing, this is why."""
231 267
232def extract_sched_data(result, data_dir, work_dir): 268def extract_sched_data(result, data_dir, work_dir):
233 task_dict = create_task_dict(data_dir, work_dir) 269 task_dict = create_task_dict(data_dir, work_dir)
234 stat_data = defaultdict(list) 270 stat_data = LeveledArray()
235 271
236 # Group per-task values
237 for tdata in task_dict.itervalues(): 272 for tdata in task_dict.itervalues():
238 if not tdata.params: 273 if not tdata.params:
239 # Currently unknown where these invalid tasks come from... 274 # Currently unknown where these invalid tasks come from...
240 continue 275 continue
241 276
242 miss = tdata.misses 277 level = tdata.config.level
278 miss = tdata.misses
243 279
244 record_loss = float(miss.disjoints)/(miss.matches + miss.disjoints) 280 record_loss = float(miss.disjoints)/(miss.matches + miss.disjoints)
245 stat_data["record-loss"].append(record_loss) 281 stat_data("record-loss", level, record_loss)
246 282
247 if record_loss > conf.MAX_RECORD_LOSS: 283 if record_loss > conf.MAX_RECORD_LOSS:
248 log_once(LOSS_MSG) 284 log_once(LOSS_MSG)
@@ -251,17 +287,63 @@ def extract_sched_data(result, data_dir, work_dir):
251 miss_ratio = float(miss.num) / miss.matches 287 miss_ratio = float(miss.num) / miss.matches
252 avg_tard = miss.avg * miss_ratio 288 avg_tard = miss.avg * miss_ratio
253 289
254 stat_data["miss-ratio" ].append(miss_ratio) 290 stat_data("miss-ratio", level, miss_ratio)
291
292 stat_data("max-tard", level, miss.max / tdata.params.period)
293 stat_data("avg-tard", level, avg_tard / tdata.params.period)
294
295 stat_data("avg-block", level, tdata.blocks.avg / NSEC_PER_MSEC)
296 stat_data("max-block", level, tdata.blocks.max / NSEC_PER_MSEC)
297
298 stat_data.write_measurements(result)
299
300def extract_mc_data(result, data_dir, base_dir):
301 task_dict = get_task_data(data_dir)
302 base_dict = get_task_data(base_dir)
255 303
256 stat_data["max-tard"].append(miss.max / tdata.params.period) 304 stat_data = LeveledArray()
257 stat_data["avg-tard"].append(avg_tard / tdata.params.period)
258 305
259 stat_data["avg-block"].append(tdata.blocks.avg / NSEC_PER_MSEC) 306 # Only level B loads are measured
260 stat_data["max-block"].append(tdata.blocks.max / NSEC_PER_MSEC) 307 for tdata in filter(task_dict.iteritems(), lambda x: x.level == 'b'):
308 stat_data.add('load', tdata.config.level, tdata.loads)
261 309
262 # Summarize value groups 310 tasks_by_config = defaultdict(lambda: ScaleData([], []))
263 for name, data in stat_data.iteritems(): 311
264 if not data or not sum(data): 312 # Add task execution times in order of pid to tasks_by_config
265 log_once(SKIP_MSG, SKIP_MSG % name) 313 for tasks, field in ((task_dict, 'reg_tasks'), (base_dict, 'base_tasks')):
314 # Sorted for tie breaking: if 3 regular tasks have the same config
315 # (so 3 base tasks also have the same config), match first pid regular
316 # with first pid base, etc. This matches tie breaking in kernel
317 for pid in sorted(tasks.keys()):
318 tdata = tasks[pid]
319
320 tlist = getattr(tasks_by_config[tdata.params], field)
321 tlist += [tdata.execs]
322
323 # Write scaling factors
324 for config, scale_data in tasks_by_config:
325 if len(scale_data.reg_tasks) != len(scale_data.base_tasks):
326 # Can't make comparison if different numbers of tasks!
266 continue 327 continue
267 result[name] = Measurement(str(name)).from_array(data) 328
329 # Tuples of (regular task execution times, base task execution times)
330 # where each has the same configuration
331 all_pairs = zip(scale_data.reg_tasks, scale_data.base_tasks)
332
333 for reg_execs, base_execs in all_pairs:
334 if not reg_execs.max or not reg_execs.avg or\
335 not base_execs.max or not base_execs.avg:
336 # This was an issue at some point, not sure if it still is
337 continue
338
339 max_scale = float(base_execs.max) / reg_execs.max
340 avg_scale = float(base_execs.avg) / reg_execs.avg
341
342 if (avg_scale < 1 or max_scale < 1) and config.level == "b":
343 log_once(SCALE_MSG, SCALE_MSG.format(data_dir, config))
344 continue
345
346 stat_data.add('max-scale', config.level, max_scale)
347 stat_data.add('avg-scale', config.level, avg_scale)
348
349 stat_data.write_measurements(result)