aboutsummaryrefslogtreecommitdiffstats
path: root/parse/sched.py
diff options
context:
space:
mode:
Diffstat (limited to 'parse/sched.py')
-rw-r--r--parse/sched.py133
1 files changed, 108 insertions, 25 deletions
diff --git a/parse/sched.py b/parse/sched.py
index 1213f0d..1f07751 100644
--- a/parse/sched.py
+++ b/parse/sched.py
@@ -2,8 +2,8 @@ import config.config as conf
2import os 2import os
3import re 3import re
4import struct 4import struct
5import sys
6import subprocess 5import subprocess
6import sys
7 7
8from collections import defaultdict,namedtuple 8from collections import defaultdict,namedtuple
9from common import recordtype 9from common import recordtype
@@ -33,8 +33,9 @@ class TimeTracker:
33 self.job = record.job 33 self.job = record.job
34 34
35# Data stored for each task 35# Data stored for each task
36TaskParams = namedtuple('TaskParams', ['wcet', 'period', 'cpu']) 36TaskParams = namedtuple('TaskParams', ['wcet', 'period', 'cpu', 'level'])
37TaskData = recordtype('TaskData', ['params', 'jobs', 'blocks', 'misses']) 37TaskData = recordtype('TaskData', ['params', 'jobs', 'loads',
38 'blocks', 'misses', 'execs'])
38 39
39# Map of event ids to corresponding class, binary format, and processing methods 40# Map of event ids to corresponding class, binary format, and processing methods
40RecordInfo = namedtuple('RecordInfo', ['clazz', 'fmt', 'method']) 41RecordInfo = namedtuple('RecordInfo', ['clazz', 'fmt', 'method'])
@@ -124,6 +125,7 @@ def read_data(task_dict, fnames):
124 125
125def process_completion(task_dict, record): 126def process_completion(task_dict, record):
126 task_dict[record.pid].misses.store_time(record) 127 task_dict[record.pid].misses.store_time(record)
128 task_dict[record.pid].loads += [record.load]
127 129
128def process_release(task_dict, record): 130def process_release(task_dict, record):
129 data = task_dict[record.pid] 131 data = task_dict[record.pid]
@@ -131,7 +133,9 @@ def process_release(task_dict, record):
131 data.misses.start_time(record) 133 data.misses.start_time(record)
132 134
133def process_param(task_dict, record): 135def process_param(task_dict, record):
134 params = TaskParams(record.wcet, record.period, record.partition) 136 level = chr(97 + record.level)
137 params = TaskParams(record.wcet, record.period,
138 record.partition, level)
135 task_dict[record.pid].params = params 139 task_dict[record.pid].params = params
136 140
137def process_block(task_dict, record): 141def process_block(task_dict, record):
@@ -140,14 +144,27 @@ def process_block(task_dict, record):
140def process_resume(task_dict, record): 144def process_resume(task_dict, record):
141 task_dict[record.pid].blocks.store_time(record) 145 task_dict[record.pid].blocks.store_time(record)
142 146
147def process_switch_to(task_dict, record):
148 task_dict[record.pid].execs.start_time(record)
149
150def process_switch_away(task_dict, record):
151 task_dict[record.pid].execs.store_time(record)
152
143register_record('ResumeRecord', 9, process_resume, 'Q8x', ['when']) 153register_record('ResumeRecord', 9, process_resume, 'Q8x', ['when'])
144register_record('BlockRecord', 8, process_block, 'Q8x', ['when']) 154register_record('BlockRecord', 8, process_block, 'Q8x', ['when'])
145register_record('CompletionRecord', 7, process_completion, 'Q8x', ['when']) 155register_record('CompletionRecord', 7, process_completion, 'QQ', ['when', 'load'])
146register_record('ReleaseRecord', 3, process_release, 'QQ', ['release', 'when']) 156register_record('ReleaseRecord', 3, process_release, 'QQ', ['release', 'when'])
147register_record('ParamRecord', 2, process_param, 'IIIcc2x', 157register_record('SwitchToRecord', 5, process_switch_to, 'Q8x', ['when'])
148 ['wcet','period','phase','partition', 'task_class']) 158register_record('SwitchAwayRecord', 6, process_switch_away, 'Q8x', ['when'])
159register_record('ParamRecord', 2, process_param, 'IIIcccx',
160 ['wcet','period','phase','partition', 'task_class', 'level'])
161
162saved_stats = []
163def get_task_data(data_dir, work_dir = None):
164 '''Parse sched trace files'''
165 if data_dir in saved_stats:
166 return data_dir
149 167
150def extract_sched_data(result, data_dir, work_dir):
151 bin_files = conf.FILES['sched_data'].format(".*") 168 bin_files = conf.FILES['sched_data'].format(".*")
152 output_file = "%s/out-st" % work_dir 169 output_file = "%s/out-st" % work_dir
153 170
@@ -157,24 +174,46 @@ def extract_sched_data(result, data_dir, work_dir):
157 174
158 # Save an in-english version of the data for debugging 175 # Save an in-english version of the data for debugging
159 # This is optional and will only be done if 'st_show' is in PATH 176 # This is optional and will only be done if 'st_show' is in PATH
160 if conf.BINS['st_show']: 177 if work_dir and conf.BINS['st_show']:
161 cmd_arr = [conf.BINS['st_show']] 178 cmd_arr = [conf.BINS['st_show']]
162 cmd_arr.extend(bins) 179 cmd_arr.extend(bins)
163 with open(output_file, "w") as f: 180 with open(output_file, "w") as f:
164 print("calling %s" % cmd_arr) 181 print("calling %s" % cmd_arr)
165 subprocess.call(cmd_arr, cwd=data_dir, stdout=f) 182 subprocess.call(cmd_arr, cwd=data_dir, stdout=f)
166 183
167 task_dict = defaultdict(lambda : 184 task_dict = defaultdict(lambda :TaskData(0, 0, 0, [], TimeTracker(),
168 TaskData(0, 0, TimeTracker(), TimeTracker())) 185 TimeTracker(), TimeTracker()))
169 186
170 # Gather per-task values 187 # Gather per-task values
171 read_data(task_dict, bins) 188 read_data(task_dict, bins)
172 189
173 stat_data = {"avg-tard" : [], "max-tard" : [], 190 saved_stats[data_dir] = task_dict
174 "avg-block" : [], "max-block" : [], 191 return task_dict
175 "miss-ratio" : []} 192
193class LeveledArray(object):
194 """Groups statistics by the level of the task to which they apply"""
195 def __init__(self):
196 self.name = name
197 self.vals = defaultdict(lambda: defaultdict(lambda:[]))
198
199 def add(self, name, level, value):
200 if type(value) != type([]):
201 value = [value]
202 self.vals[name][task.config.level] += value
176 203
177 # Group per-task values 204 def write_measurements(self, result):
205 for stat_name, stat_data in self.vals.iteritems():
206 for level, values in stat_data.iteritems():
207 if not values:
208 continue
209
210 name = "%s%s" % ("%s-" % level if level else "", stat_name)
211 result[name] = Measurement(name).from_array(arr)
212
213def extract_sched_data(result, data_dir, work_dir):
214 task_dict = get_task_data(data_dir, work_dir)
215
216 stat_data = LeveledArray()
178 for tdata in task_dict.itervalues(): 217 for tdata in task_dict.itervalues():
179 if not tdata.params: 218 if not tdata.params:
180 # Currently unknown where these invalid tasks come from... 219 # Currently unknown where these invalid tasks come from...
@@ -184,15 +223,59 @@ def extract_sched_data(result, data_dir, work_dir):
184 # Scale average down to account for jobs with 0 tardiness 223 # Scale average down to account for jobs with 0 tardiness
185 avg_tard = tdata.misses.avg * miss_ratio 224 avg_tard = tdata.misses.avg * miss_ratio
186 225
187 stat_data["miss-ratio"].append(miss_ratio) 226 level = tdata.params.level
188 stat_data["avg-tard" ].append(avg_tard / tdata.params.wcet) 227 stat_data.add("miss-ratio", level, miss_ratio)
189 stat_data["max-tard" ].append(tdata.misses.max / tdata.params.wcet) 228 stat_data.add("avg-tard", level, avg_tard / tdata.params.wcet)
190 stat_data["avg-block" ].append(tdata.blocks.avg / NSEC_PER_MSEC) 229 stat_data.add("max-tard", level, tdata.misses.max / tdata.params.wcet)
191 stat_data["max-block" ].append(tdata.blocks.max / NSEC_PER_MSEC) 230 stat_data.add("avg-block", level, tdata.blocks.avg / NSEC_PER_MSEC)
192 231 stat_data.add("max-block", level, tdata.blocks.max / NSEC_PER_MSEC)
193 # Summarize value groups 232
194 for name, data in stat_data.iteritems(): 233 stat_data.write_measurements(result)
195 if not data: 234
235ScaleData = namedtuple('ScaleData', ['reg_tasks', 'base_tasks'])
236def extract_mc_data(result, data_dir, base_dir):
237 task_dict = get_task_data(data_dir)
238 base_dict = get_task_data(base_dir)
239
240 stat_data = LeveledArray()
241
242 # Only level B loads are measured
243 for tdata in filter(task_dict.iteritems(), lambda x: x.level == 'b'):
244 stat_data.add('load', tdata.config.level, tdata.loads)
245
246 tasks_by_config = defaultdict(lambda: ScaleData([], []))
247
248 # Add tasks in order of pid to tasks_by_config
249 # Tasks must be ordered by pid or we can't make 1 to 1 comparisons
250 # when multiple tasks have the same config in each task set
251 for tasks, field in ((task_dict, 'reg_tasks'), (base_dict, 'base_tasks')):
252 for pid in sorted(tasks.keys()):
253 tdata = tasks[pid]
254 tlist = getattr(tasks_by_config[tdata.params], field)
255 tlist += [tdata.execs]
256
257 # Write scaling factors
258 for config, scale_data in tasks_by_config:
259 if len(scale_data.reg_tasks) != len(scale_data.base_tasks):
260 # Can't make comparison if different numbers of tasks!
196 continue 261 continue
197 result[name] = Measurement(str(name)).from_array(data)
198 262
263 all_pairs = zip(scale_data.reg_tasks, scale_data.base_tasks)
264 for reg_execs, base_execs in all_pairs:
265 if not reg_execs.max or not reg_execs.avg or\
266 not base_execs.max or not base_execs.avg:
267 # This was an issue at some point, not sure if it still is
268 continue
269
270 max_scale = float(base_execs.max) / reg_execs.max
271 avg_scale = float(base_execs.avg) / reg_execs.avg
272
273 if (avg_scale < 1 or max_scale < 1) and config.level == "b":
274 sys.stderr.write("Task in {} with config {} has <1.0 scale!"
275 .format(data_dir, config)
276 continue
277
278 stat_data.add('max-scale', config.level, max_scale)
279 stat_data.add('avg-scale', config.level, avg_scale)
280
281 stat_data.write_measurements(result)