aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJonathan Herman <hermanjl@cs.unc.edu>2012-11-19 14:53:04 -0500
committerJonathan Herman <hermanjl@cs.unc.edu>2012-11-19 14:53:04 -0500
commitad684697958471077d325ae144a48ad49acd7716 (patch)
treeabb7426e3f4e12417e5baca56aac4e94408c1bf0
parent53cfcf10531256d0e4411a7e0bda431ec27f28e7 (diff)
Bug fixes for scaling factors.wip-mc
-rw-r--r--parse/dir_map.py4
-rw-r--r--parse/sched.py35
-rwxr-xr-xparse_exps.py38
3 files changed, 68 insertions, 9 deletions
diff --git a/parse/dir_map.py b/parse/dir_map.py
index 319a5de..499c712 100644
--- a/parse/dir_map.py
+++ b/parse/dir_map.py
@@ -44,7 +44,7 @@ class DirMap(object):
44 if not base_type in measurement: 44 if not base_type in measurement:
45 continue 45 continue
46 # Ex: wcet/avg/max/vary-type/other-stuff.csv 46 # Ex: wcet/avg/max/vary-type/other-stuff.csv
47 path = [ stat, summary_type, base_type, "vary-%s" % vary ] 47 path = [ stat, summary_type, base_type, "vary-%s" % vary ]
48 result = measurement[base_type] 48 result = measurement[base_type]
49 49
50 self.__update_node(path, keys, (vary_value, result)) 50 self.__update_node(path, keys, (vary_value, result))
@@ -68,7 +68,7 @@ class DirMap(object):
68 if node.values: 68 if node.values:
69 # Leaf 69 # Leaf
70 with open("/".join(path), "w") as f: 70 with open("/".join(path), "w") as f:
71 arr = [",".join([str(b) for b in n]) for n in node.values] 71 arr = [", ".join([str(b) for b in n]) for n in sorted(node.values, key=lambda node: int(node[0]))]
72 f.write("\n".join(arr) + "\n") 72 f.write("\n".join(arr) + "\n")
73 elif not os.path.isdir(out_path): 73 elif not os.path.isdir(out_path):
74 os.mkdir(out_path) 74 os.mkdir(out_path)
diff --git a/parse/sched.py b/parse/sched.py
index bbf6e10..65df8ac 100644
--- a/parse/sched.py
+++ b/parse/sched.py
@@ -15,7 +15,7 @@ import pprint
15 15
16from collections import namedtuple,defaultdict 16from collections import namedtuple,defaultdict
17from operator import methodcaller 17from operator import methodcaller
18from point import Measurement,Type 18from point import Measurement,Type,ExpPoint
19 19
20PARAM_RECORD = r"(?P<RECORD>" +\ 20PARAM_RECORD = r"(?P<RECORD>" +\
21 r"PARAM *?(?P<PID>\d+)\/.*?" +\ 21 r"PARAM *?(?P<PID>\d+)\/.*?" +\
@@ -43,6 +43,7 @@ COMPLETION_RECORD = r"(?P<RECORD>" +\
43 43
44TaskConfig = namedtuple('TaskConfig', ['cpu','wcet','period','type','level']) 44TaskConfig = namedtuple('TaskConfig', ['cpu','wcet','period','type','level'])
45Task = namedtuple('Task', ['pid', 'config', 'run']) 45Task = namedtuple('Task', ['pid', 'config', 'run'])
46SchedReturn = namedtuple('SchedReturn', ['util', 'wcet_scales'])
46 47
47class LeveledArray(object): 48class LeveledArray(object):
48 """ 49 """
@@ -193,7 +194,8 @@ def extract_variance(task_dict, data, exp_point):
193 raise Exception("Invalid completion record, missed: %d:" 194 raise Exception("Invalid completion record, missed: %d:"
194 "\n\t%s\n\t%s" % (missed[pid], match.groupdict(), 195 "\n\t%s\n\t%s" % (missed[pid], match.groupdict(),
195 match.group("RECORD"))) 196 match.group("RECORD")))
196 completions[pid] += [duration] 197 if duration or not completions[pid]:
198 completions[pid] += [duration]
197 199
198 for pid, durations in completions.iteritems(): 200 for pid, durations in completions.iteritems():
199 m = Measurement(pid).from_array(durations) 201 m = Measurement(pid).from_array(durations)
@@ -257,11 +259,21 @@ def get_base_stats(base_file):
257 saved_stats[base_file] = result 259 saved_stats[base_file] = result
258 return result 260 return result
259 261
262def compute_util(task_dict):
263 util = 0
264 for task in task_dict.itervalues():
265 if task.config.level.lower() == "b":
266 util += float(task.config.wcet) / task.config.period
267 return util
268
260def extract_scaling_data(task_dict, data, result, base_file): 269def extract_scaling_data(task_dict, data, result, base_file):
261 # Generate trees of tasks with matching configurations 270 # Generate trees of tasks with matching configurations
262 data_stats = config_exit_stats(task_dict, data) 271 data_stats = config_exit_stats(task_dict, data)
263 base_stats = get_base_stats(base_file) 272 base_stats = get_base_stats(base_file)
264 273
274 util = compute_util(task_dict)
275 by_wcet = defaultdict(lambda:[])
276
265 # Scaling factors are calculated by matching groups of tasks with the same 277 # Scaling factors are calculated by matching groups of tasks with the same
266 # config, then comparing task-to-task exec times in order of PID within 278 # config, then comparing task-to-task exec times in order of PID within
267 # each group 279 # each group
@@ -269,7 +281,8 @@ def extract_scaling_data(task_dict, data, result, base_file):
269 avg_scales = LeveledArray("avg-scaling") 281 avg_scales = LeveledArray("avg-scaling")
270 282
271 for config in data_stats: 283 for config in data_stats:
272 if len(data_stats[config]) != len(base_stats[config]): 284 # if len(data_stats[config]) != len(base_stats[config]):
285 if len(data_stats[config]) >1 or len(base_stats[config]) > 1:
273 # Quit, we are missing a record and can't guarantee 286 # Quit, we are missing a record and can't guarantee
274 # a task-to-task comparison 287 # a task-to-task comparison
275 continue 288 continue
@@ -282,14 +295,25 @@ def extract_scaling_data(task_dict, data, result, base_file):
282 avg_scale = float(base_stat[Type.Avg]) / float(data_stat[Type.Avg]) 295 avg_scale = float(base_stat[Type.Avg]) / float(data_stat[Type.Avg])
283 max_scale = float(base_stat[Type.Max]) / float(data_stat[Type.Max]) 296 max_scale = float(base_stat[Type.Max]) / float(data_stat[Type.Max])
284 297
298 if (avg_scale < 1 or max_scale < 1) and config.level.lower() == "b":
299 print("Task with config {} has sub 1.0 scaling factors!".format(config))
300 continue
301
285 task = task_dict[data_stat.id] 302 task = task_dict[data_stat.id]
286 303
287 avg_scales.add(task, avg_scale) 304 avg_scales.add(task, avg_scale)
288 max_scales.add(task, max_scale) 305 max_scales.add(task, max_scale)
289 306
307 name = "scaling-exp-{}".format(config.level)
308 loop_data = {Type.Avg:avg_scale, Type.Max:max_scale, Type.Var:0}
309 loop_exp = ExpPoint("scaling-id",{name: Measurement("", loop_data)})
310 by_wcet[config.wcet] += [loop_exp]
311
290 avg_scales.write_measurements(result) 312 avg_scales.write_measurements(result)
291 max_scales.write_measurements(result) 313 max_scales.write_measurements(result)
292 314
315 return SchedReturn(util, by_wcet)
316
293def extract_sched_data(data_file, result, base_file): 317def extract_sched_data(data_file, result, base_file):
294 with open(data_file, 'r') as f: 318 with open(data_file, 'r') as f:
295 data = f.read() 319 data = f.read()
@@ -300,8 +324,9 @@ def extract_sched_data(data_file, result, base_file):
300 extract_tardy_vals(task_dict, data, result) 324 extract_tardy_vals(task_dict, data, result)
301 extract_variance(task_dict, data, result) 325 extract_variance(task_dict, data, result)
302 except Exception as e: 326 except Exception as e:
303 print("Error in %s" % data_file)
304 raise e 327 raise e
305 328
306 if (base_file): 329 if (base_file):
307 extract_scaling_data(task_dict, data, result, base_file) 330 return extract_scaling_data(task_dict, data, result, base_file)
331 else:
332 return None
diff --git a/parse_exps.py b/parse_exps.py
index 87d0783..6a07ebc 100755
--- a/parse_exps.py
+++ b/parse_exps.py
@@ -16,6 +16,8 @@ from optparse import OptionParser
16from parse.point import ExpPoint 16from parse.point import ExpPoint
17from parse.tuple_table import ColMap,TupleTable 17from parse.tuple_table import ColMap,TupleTable
18 18
19# TODO: make 2-level graph optional
20
19def parse_args(): 21def parse_args():
20 # TODO: convert data-dir to proper option, clean 'dest' options 22 # TODO: convert data-dir to proper option, clean 'dest' options
21 parser = OptionParser("usage: %prog [options] [data_dir]...") 23 parser = OptionParser("usage: %prog [options] [data_dir]...")
@@ -107,6 +109,17 @@ def gen_exp_data(exp_dirs, base_conf, col_map, force):
107 sys.stderr.write('\n') 109 sys.stderr.write('\n')
108 return (plain_exps, scaling_bases) 110 return (plain_exps, scaling_bases)
109 111
112PAGE_SIZE=4092
113PROCS=4
114def compute_loops(util, dist, cost, wss):
115 scale = PROCS * dist / util
116 cost *= scale
117 pages = wss / PAGE_SIZE
118 per_loop = .023 * pages
119 loops = int(cost / per_loop) + 1
120 loops = loops + (5 - loops % 5)
121 return loops
122
110def main(): 123def main():
111 opts, args = parse_args() 124 opts, args = parse_args()
112 125
@@ -129,8 +142,14 @@ def main():
129 for param in opts.ignore.split(","): 142 for param in opts.ignore.split(","):
130 col_map.try_remove(param) 143 col_map.try_remove(param)
131 144
145 # Begin coupling
146 loop_map = copy.deepcopy(col_map)
147 loop_map.try_remove('num_tasks')
148 loop_map.force_add('loops')
149
132 base_table = TupleTable(base_map) # For tracking 'base' experiments 150 base_table = TupleTable(base_map) # For tracking 'base' experiments
133 result_table = TupleTable(col_map) # For generating output 151 result_table = TupleTable(col_map) # For generating output
152 loop_table = TupleTable(loop_map) # For by-reuse scaling factor
134 153
135 # Used to find matching scaling_base for each experiment 154 # Used to find matching scaling_base for each experiment
136 for base in scaling_bases: 155 for base in scaling_bases:
@@ -153,8 +172,17 @@ def main():
153 base = base_table.get_exps(base_params)[0] 172 base = base_table.get_exps(base_params)[0]
154 173
155 # Write deadline misses / tardiness into result 174 # Write deadline misses / tardiness into result
156 st.extract_sched_data(exp.data_files.st, result, 175 sret = st.extract_sched_data(exp.data_files.st, result,
157 base.data_files.st if base else None) 176 base.data_files.st if base else None)
177 # Terrible
178 if sret:
179 for wcet, points in sret.wcet_scales.iteritems():
180 loop_params = copy.deepcopy(exp.params)
181 # loops = compute_loops(sret.util, float(exp.params['hrt_dist']),
182 # wcet, float(exp.params['wss']))
183 # loop_params['loops'] = loops# * int(exp.params['wss'])
184 for point in points:
185 loop_table.add_exp(loop_params, point)
158 186
159 result_table.add_exp(exp.params, result) 187 result_table.add_exp(exp.params, result)
160 188
@@ -168,14 +196,20 @@ def main():
168 sh.rmtree(opts.out) 196 sh.rmtree(opts.out)
169 197
170 result_table.reduce() 198 result_table.reduce()
199 loop_table.reduce()
171 200
172 sys.stderr.write("Writing result...\n") 201 sys.stderr.write("Writing result...\n")
173 if opts.write_map: 202 if opts.write_map:
174 # Write summarized results into map 203 # Write summarized results into map
175 result_table.write_map(opts.out) 204 result_table.write_map(opts.out)
205 if base_conf:
206 loop_table.write_map(opts.out)
176 else: 207 else:
177 # Write out csv directories for all variable params 208 # Write out csv directories for all variable params
178 result_table.write_csvs(opts.out) 209 result_table.write_csvs(opts.out)
210 if base_conf:
211 loop_table.write_csvs(opts.out)
212
179 213
180if __name__ == '__main__': 214if __name__ == '__main__':
181 main() 215 main()