diff options
| author | Jonathan Herman <hermanjl@cs.unc.edu> | 2012-11-19 14:53:04 -0500 |
|---|---|---|
| committer | Jonathan Herman <hermanjl@cs.unc.edu> | 2012-11-19 14:53:04 -0500 |
| commit | ad684697958471077d325ae144a48ad49acd7716 (patch) | |
| tree | abb7426e3f4e12417e5baca56aac4e94408c1bf0 /parse | |
| parent | 53cfcf10531256d0e4411a7e0bda431ec27f28e7 (diff) | |
Bug fixes for scaling factors.wip-mc
Diffstat (limited to 'parse')
| -rw-r--r-- | parse/dir_map.py | 4 | ||||
| -rw-r--r-- | parse/sched.py | 35 |
2 files changed, 32 insertions, 7 deletions
diff --git a/parse/dir_map.py b/parse/dir_map.py index 319a5de..499c712 100644 --- a/parse/dir_map.py +++ b/parse/dir_map.py | |||
| @@ -44,7 +44,7 @@ class DirMap(object): | |||
| 44 | if not base_type in measurement: | 44 | if not base_type in measurement: |
| 45 | continue | 45 | continue |
| 46 | # Ex: wcet/avg/max/vary-type/other-stuff.csv | 46 | # Ex: wcet/avg/max/vary-type/other-stuff.csv |
| 47 | path = [ stat, summary_type, base_type, "vary-%s" % vary ] | 47 | path = [ stat, summary_type, base_type, "vary-%s" % vary ] |
| 48 | result = measurement[base_type] | 48 | result = measurement[base_type] |
| 49 | 49 | ||
| 50 | self.__update_node(path, keys, (vary_value, result)) | 50 | self.__update_node(path, keys, (vary_value, result)) |
| @@ -68,7 +68,7 @@ class DirMap(object): | |||
| 68 | if node.values: | 68 | if node.values: |
| 69 | # Leaf | 69 | # Leaf |
| 70 | with open("/".join(path), "w") as f: | 70 | with open("/".join(path), "w") as f: |
| 71 | arr = [",".join([str(b) for b in n]) for n in node.values] | 71 | arr = [", ".join([str(b) for b in n]) for n in sorted(node.values, key=lambda node: int(node[0]))] |
| 72 | f.write("\n".join(arr) + "\n") | 72 | f.write("\n".join(arr) + "\n") |
| 73 | elif not os.path.isdir(out_path): | 73 | elif not os.path.isdir(out_path): |
| 74 | os.mkdir(out_path) | 74 | os.mkdir(out_path) |
diff --git a/parse/sched.py b/parse/sched.py index bbf6e10..65df8ac 100644 --- a/parse/sched.py +++ b/parse/sched.py | |||
| @@ -15,7 +15,7 @@ import pprint | |||
| 15 | 15 | ||
| 16 | from collections import namedtuple,defaultdict | 16 | from collections import namedtuple,defaultdict |
| 17 | from operator import methodcaller | 17 | from operator import methodcaller |
| 18 | from point import Measurement,Type | 18 | from point import Measurement,Type,ExpPoint |
| 19 | 19 | ||
| 20 | PARAM_RECORD = r"(?P<RECORD>" +\ | 20 | PARAM_RECORD = r"(?P<RECORD>" +\ |
| 21 | r"PARAM *?(?P<PID>\d+)\/.*?" +\ | 21 | r"PARAM *?(?P<PID>\d+)\/.*?" +\ |
| @@ -43,6 +43,7 @@ COMPLETION_RECORD = r"(?P<RECORD>" +\ | |||
| 43 | 43 | ||
| 44 | TaskConfig = namedtuple('TaskConfig', ['cpu','wcet','period','type','level']) | 44 | TaskConfig = namedtuple('TaskConfig', ['cpu','wcet','period','type','level']) |
| 45 | Task = namedtuple('Task', ['pid', 'config', 'run']) | 45 | Task = namedtuple('Task', ['pid', 'config', 'run']) |
| 46 | SchedReturn = namedtuple('SchedReturn', ['util', 'wcet_scales']) | ||
| 46 | 47 | ||
| 47 | class LeveledArray(object): | 48 | class LeveledArray(object): |
| 48 | """ | 49 | """ |
| @@ -193,7 +194,8 @@ def extract_variance(task_dict, data, exp_point): | |||
| 193 | raise Exception("Invalid completion record, missed: %d:" | 194 | raise Exception("Invalid completion record, missed: %d:" |
| 194 | "\n\t%s\n\t%s" % (missed[pid], match.groupdict(), | 195 | "\n\t%s\n\t%s" % (missed[pid], match.groupdict(), |
| 195 | match.group("RECORD"))) | 196 | match.group("RECORD"))) |
| 196 | completions[pid] += [duration] | 197 | if duration or not completions[pid]: |
| 198 | completions[pid] += [duration] | ||
| 197 | 199 | ||
| 198 | for pid, durations in completions.iteritems(): | 200 | for pid, durations in completions.iteritems(): |
| 199 | m = Measurement(pid).from_array(durations) | 201 | m = Measurement(pid).from_array(durations) |
| @@ -257,11 +259,21 @@ def get_base_stats(base_file): | |||
| 257 | saved_stats[base_file] = result | 259 | saved_stats[base_file] = result |
| 258 | return result | 260 | return result |
| 259 | 261 | ||
| 262 | def compute_util(task_dict): | ||
| 263 | util = 0 | ||
| 264 | for task in task_dict.itervalues(): | ||
| 265 | if task.config.level.lower() == "b": | ||
| 266 | util += float(task.config.wcet) / task.config.period | ||
| 267 | return util | ||
| 268 | |||
| 260 | def extract_scaling_data(task_dict, data, result, base_file): | 269 | def extract_scaling_data(task_dict, data, result, base_file): |
| 261 | # Generate trees of tasks with matching configurations | 270 | # Generate trees of tasks with matching configurations |
| 262 | data_stats = config_exit_stats(task_dict, data) | 271 | data_stats = config_exit_stats(task_dict, data) |
| 263 | base_stats = get_base_stats(base_file) | 272 | base_stats = get_base_stats(base_file) |
| 264 | 273 | ||
| 274 | util = compute_util(task_dict) | ||
| 275 | by_wcet = defaultdict(lambda:[]) | ||
| 276 | |||
| 265 | # Scaling factors are calculated by matching groups of tasks with the same | 277 | # Scaling factors are calculated by matching groups of tasks with the same |
| 266 | # config, then comparing task-to-task exec times in order of PID within | 278 | # config, then comparing task-to-task exec times in order of PID within |
| 267 | # each group | 279 | # each group |
| @@ -269,7 +281,8 @@ def extract_scaling_data(task_dict, data, result, base_file): | |||
| 269 | avg_scales = LeveledArray("avg-scaling") | 281 | avg_scales = LeveledArray("avg-scaling") |
| 270 | 282 | ||
| 271 | for config in data_stats: | 283 | for config in data_stats: |
| 272 | if len(data_stats[config]) != len(base_stats[config]): | 284 | # if len(data_stats[config]) != len(base_stats[config]): |
| 285 | if len(data_stats[config]) >1 or len(base_stats[config]) > 1: | ||
| 273 | # Quit, we are missing a record and can't guarantee | 286 | # Quit, we are missing a record and can't guarantee |
| 274 | # a task-to-task comparison | 287 | # a task-to-task comparison |
| 275 | continue | 288 | continue |
| @@ -282,14 +295,25 @@ def extract_scaling_data(task_dict, data, result, base_file): | |||
| 282 | avg_scale = float(base_stat[Type.Avg]) / float(data_stat[Type.Avg]) | 295 | avg_scale = float(base_stat[Type.Avg]) / float(data_stat[Type.Avg]) |
| 283 | max_scale = float(base_stat[Type.Max]) / float(data_stat[Type.Max]) | 296 | max_scale = float(base_stat[Type.Max]) / float(data_stat[Type.Max]) |
| 284 | 297 | ||
| 298 | if (avg_scale < 1 or max_scale < 1) and config.level.lower() == "b": | ||
| 299 | print("Task with config {} has sub 1.0 scaling factors!".format(config)) | ||
| 300 | continue | ||
| 301 | |||
| 285 | task = task_dict[data_stat.id] | 302 | task = task_dict[data_stat.id] |
| 286 | 303 | ||
| 287 | avg_scales.add(task, avg_scale) | 304 | avg_scales.add(task, avg_scale) |
| 288 | max_scales.add(task, max_scale) | 305 | max_scales.add(task, max_scale) |
| 289 | 306 | ||
| 307 | name = "scaling-exp-{}".format(config.level) | ||
| 308 | loop_data = {Type.Avg:avg_scale, Type.Max:max_scale, Type.Var:0} | ||
| 309 | loop_exp = ExpPoint("scaling-id",{name: Measurement("", loop_data)}) | ||
| 310 | by_wcet[config.wcet] += [loop_exp] | ||
| 311 | |||
| 290 | avg_scales.write_measurements(result) | 312 | avg_scales.write_measurements(result) |
| 291 | max_scales.write_measurements(result) | 313 | max_scales.write_measurements(result) |
| 292 | 314 | ||
| 315 | return SchedReturn(util, by_wcet) | ||
| 316 | |||
| 293 | def extract_sched_data(data_file, result, base_file): | 317 | def extract_sched_data(data_file, result, base_file): |
| 294 | with open(data_file, 'r') as f: | 318 | with open(data_file, 'r') as f: |
| 295 | data = f.read() | 319 | data = f.read() |
| @@ -300,8 +324,9 @@ def extract_sched_data(data_file, result, base_file): | |||
| 300 | extract_tardy_vals(task_dict, data, result) | 324 | extract_tardy_vals(task_dict, data, result) |
| 301 | extract_variance(task_dict, data, result) | 325 | extract_variance(task_dict, data, result) |
| 302 | except Exception as e: | 326 | except Exception as e: |
| 303 | print("Error in %s" % data_file) | ||
| 304 | raise e | 327 | raise e |
| 305 | 328 | ||
| 306 | if (base_file): | 329 | if (base_file): |
| 307 | extract_scaling_data(task_dict, data, result, base_file) | 330 | return extract_scaling_data(task_dict, data, result, base_file) |
| 331 | else: | ||
| 332 | return None | ||
