aboutsummaryrefslogtreecommitdiffstats
path: root/parse/sched.py
diff options
context:
space:
mode:
Diffstat (limited to 'parse/sched.py')
-rw-r--r--parse/sched.py60
1 files changed, 41 insertions, 19 deletions
diff --git a/parse/sched.py b/parse/sched.py
index 7dd80e0..cbb051e 100644
--- a/parse/sched.py
+++ b/parse/sched.py
@@ -1,5 +1,6 @@
1""" 1"""
2TODO: No longer very pythonic, lot of duplicate code 2TODO: No longer very pythonic, lot of duplicate code
3print out task execution times
3""" 4"""
4 5
5import config.config as conf 6import config.config as conf
@@ -9,6 +10,7 @@ import numpy as np
9import subprocess 10import subprocess
10 11
11from collections import namedtuple,defaultdict 12from collections import namedtuple,defaultdict
13from operator import methodcaller
12from point import Measurement,Type 14from point import Measurement,Type
13 15
14PARAM_RECORD = r"(?P<RECORD>" +\ 16PARAM_RECORD = r"(?P<RECORD>" +\
@@ -29,12 +31,14 @@ TARDY_RECORD = r"(?P<RECORD>" +\
29 r"(?P<MISSES>[\d\.]+))" 31 r"(?P<MISSES>[\d\.]+))"
30COMPLETION_RECORD = r"(?P<RECORD>" +\ 32COMPLETION_RECORD = r"(?P<RECORD>" +\
31 r"COMPLETION.*?(?P<PID>\d+)/.*?" +\ 33 r"COMPLETION.*?(?P<PID>\d+)/.*?" +\
32 r"exec.*?(?P<EXEC>[\d\.]+)ms.*?" +\ 34 r"exec:.*?(?P<EXEC>[\d\.]+)ms.*?" +\
33 r"flush.*?(?P<FLUSH>[\d\.]+)ms.*?" +\ 35 r"flush:.*?(?P<FLUSH>[\d\.]+)ms.*?" +\
34 r"load.*?(?P<LOAD>[\d\.]+)ms)" 36 r"flush_work:.*?(?P<FLUSH_WORK>[\d]+).*?" +\
37 r"load:.*?(?P<LOAD>[\d\.]+)ms.*?" +\
38 r"load_work:.*?(?P<LOAD_WORK>[\d]+))"
35 39
36TaskConfig = namedtuple('TaskConfig', ['cpu','wcet','period','type','level']) 40TaskConfig = namedtuple('TaskConfig', ['cpu','wcet','period','type','level'])
37Task = namedtuple('Task', ['pid', 'config']) 41Task = namedtuple('Task', ['pid', 'config', 'run'])
38 42
39class LeveledArray(object): 43class LeveledArray(object):
40 """ 44 """
@@ -86,7 +90,7 @@ def get_tasks(data):
86 float(match.group('WCET')), 90 float(match.group('WCET')),
87 float(match.group('PERIOD')), 91 float(match.group('PERIOD')),
88 match.group("CLASS"), 92 match.group("CLASS"),
89 match.group("LEVEL"))) 93 match.group("LEVEL")), [])
90 if not (t.config.period and t.pid): 94 if not (t.config.period and t.pid):
91 raise Exception() 95 raise Exception()
92 ret += [t] 96 ret += [t]
@@ -144,15 +148,16 @@ def extract_tardy_vals(task_dict, data, exp_point):
144 max_tards.add(t, max_tard / t.config.period) 148 max_tards.add(t, max_tard / t.config.period)
145 ratios.add(t, misses / jobs) 149 ratios.add(t, misses / jobs)
146 150
147 ratios.write_measurements(exp_point) 151 map(methodcaller('write_measurements', exp_point),
148 avg_tards.write_measurements(exp_point) 152 [ratios, avg_tards, max_tards])
149 max_tards.write_measurements(exp_point)
150 153
151# TODO: rename 154# TODO: rename
152def extract_variance(task_dict, data, exp_point): 155def extract_variance(task_dict, data, exp_point):
153 varz = LeveledArray("exec-variance") 156 varz = LeveledArray("exec-variance")
154 flushes = LeveledArray("cache-flush") 157 flushes = LeveledArray("cache-flush")
155 loads = LeveledArray("cache-load") 158 loads = LeveledArray("cache-load")
159 fworks = LeveledArray("flush-work")
160 lworks = LeveledArray("load-work")
156 161
157 completions = defaultdict(lambda: []) 162 completions = defaultdict(lambda: [])
158 missed = defaultdict(lambda: int()) 163 missed = defaultdict(lambda: int())
@@ -163,11 +168,17 @@ def extract_variance(task_dict, data, exp_point):
163 duration = float(match.group("EXEC")) 168 duration = float(match.group("EXEC"))
164 load = float(match.group("LOAD")) 169 load = float(match.group("LOAD"))
165 flush = float(match.group("FLUSH")) 170 flush = float(match.group("FLUSH"))
171 lwork = int(match.group("LOAD_WORK"))
172 fwork = int(match.group("FLUSH_WORK"))
166 173
167 if load: 174 if load:
168 loads.add(task_dict[pid], load) 175 loads.add(task_dict[pid], load)
176 lworks.add(task_dict[pid], lwork)
177 if not lwork: raise Exception()
169 if flush: 178 if flush:
170 flushes.add(task_dict[pid], flush) 179 flushes.add(task_dict[pid], flush)
180 fworks.add(task_dict[pid], fwork)
181 if not fwork: raise Exception()
171 182
172 # Last (exit) record often has exec time of 0 183 # Last (exit) record often has exec time of 0
173 missed[pid] += not bool(duration) 184 missed[pid] += not bool(duration)
@@ -181,6 +192,9 @@ def extract_variance(task_dict, data, exp_point):
181 completions[pid] += [duration] 192 completions[pid] += [duration]
182 193
183 for pid, durations in completions.iteritems(): 194 for pid, durations in completions.iteritems():
195 # TODO: not this, please
196 task_dict[pid].run.append(Measurement(pid).from_array(durations))
197
184 job_times = np.array(durations) 198 job_times = np.array(durations)
185 mean = job_times.mean() 199 mean = job_times.mean()
186 200
@@ -194,14 +208,15 @@ def extract_variance(task_dict, data, exp_point):
194 208
195 varz.add(task_dict[pid], corrected) 209 varz.add(task_dict[pid], corrected)
196 210
197 varz.write_measurements(exp_point) 211 if exp_point:
198 flushes.write_measurements(exp_point) 212 map(methodcaller('write_measurements', exp_point),
199 loads.write_measurements(exp_point) 213 [varz, flushes, loads, fworks, lworks])
200 214
201def config_exit_stats(task_dict, data): 215def config_exit_stats(task_dict, data):
202 # Dictionary of task exit measurements by pid 216 # # Dictionary of task exit measurements by pid
203 exits = get_task_exits(data) 217 # exits = get_task_exits(data)
204 exit_dict = dict((e.id, e) for e in exits) 218 # exit_dict = dict((e.id, e) for e in exits)
219 extract_variance(task_dict, data, None)
205 220
206 # Dictionary where keys are configurations, values are list 221 # Dictionary where keys are configurations, values are list
207 # of tasks with those configuratino 222 # of tasks with those configuratino
@@ -212,11 +227,12 @@ def config_exit_stats(task_dict, data):
212 for config in config_dict: 227 for config in config_dict:
213 task_list = sorted(config_dict[config]) 228 task_list = sorted(config_dict[config])
214 229
215 # Replace tasks with corresponding exit stats 230 # # Replace tasks with corresponding exit stats
216 if not t.pid in exit_dict: 231 # if not t.pid in exit_dict:
217 raise Exception("Missing exit record for task '%s' in '%s'" % 232 # raise Exception("Missing exit record for task '%s' in '%s'" %
218 (t, file.name)) 233 # (t, file.name))
219 exit_list = [exit_dict[t.pid] for t in task_list] 234 # exit_list = [exit_dict[t.pid] for t in task_list]
235 exit_list = [t.run[0] for t in task_list]
220 config_dict[config] = exit_list 236 config_dict[config] = exit_list
221 237
222 return config_dict 238 return config_dict
@@ -228,6 +244,7 @@ def get_base_stats(base_file):
228 with open(base_file, 'r') as f: 244 with open(base_file, 'r') as f:
229 data = f.read() 245 data = f.read()
230 task_dict = get_task_dict(data) 246 task_dict = get_task_dict(data)
247
231 result = config_exit_stats(task_dict, data) 248 result = config_exit_stats(task_dict, data)
232 saved_stats[base_file] = result 249 saved_stats[base_file] = result
233 return result 250 return result
@@ -248,16 +265,21 @@ def extract_scaling_data(task_dict, data, result, base_file):
248 # Quit, we are missing a record and can't guarantee 265 # Quit, we are missing a record and can't guarantee
249 # a task-to-task comparison 266 # a task-to-task comparison
250 continue 267 continue
268
251 for data_stat, base_stat in zip(data_stats[config],base_stats[config]): 269 for data_stat, base_stat in zip(data_stats[config],base_stats[config]):
252 if not base_stat[Type.Avg] or not base_stat[Type.Max] or \ 270 if not base_stat[Type.Avg] or not base_stat[Type.Max] or \
253 not data_stat[Type.Avg] or not data_stat[Type.Max]: 271 not data_stat[Type.Avg] or not data_stat[Type.Max]:
272 print("missing a thing: {},{}".format(base_stat, data_stat))
254 continue 273 continue
255 # How much larger is their exec stat than ours? 274 # How much larger is their exec stat than ours?
275 print("%s vs %s" % (base_stat, data_stat))
256 avg_scale = float(base_stat[Type.Avg]) / float(data_stat[Type.Avg]) 276 avg_scale = float(base_stat[Type.Avg]) / float(data_stat[Type.Avg])
257 max_scale = float(base_stat[Type.Max]) / float(data_stat[Type.Max]) 277 max_scale = float(base_stat[Type.Max]) / float(data_stat[Type.Max])
258 278
259 task = task_dict[data_stat.id] 279 task = task_dict[data_stat.id]
260 280
281 print("scaling for %s" % data_stat.id)
282
261 avg_scales.add(task, avg_scale) 283 avg_scales.add(task, avg_scale)
262 max_scales.add(task, max_scale) 284 max_scales.add(task, max_scale)
263 285