diff options
| author | Jonathan Herman <hermanjl@cs.unc.edu> | 2012-10-29 09:59:35 -0400 |
|---|---|---|
| committer | Jonathan Herman <hermanjl@cs.unc.edu> | 2012-10-29 09:59:35 -0400 |
| commit | 5b50c58ea4881dd185897dfa93860c60f551d815 (patch) | |
| tree | 1eee3271b5ab9fad7774a073110287b27e4d32fd /parse | |
| parent | f1e90e1a5f7b148cf8113fe463615bd95d5bf26d (diff) | |
Prettied up parse output.
Diffstat (limited to 'parse')
| -rw-r--r-- | parse/ft.py | 1 | ||||
| -rw-r--r-- | parse/point.py | 11 | ||||
| -rw-r--r-- | parse/sched.py | 60 | ||||
| -rw-r--r-- | parse/tuple_table.py | 5 |
4 files changed, 56 insertions, 21 deletions
diff --git a/parse/ft.py b/parse/ft.py index 2c2b597..feb338f 100644 --- a/parse/ft.py +++ b/parse/ft.py | |||
| @@ -41,6 +41,7 @@ def get_ft_output(data_dir, out_dir, force=False): | |||
| 41 | # Analyze will summarize those | 41 | # Analyze will summarize those |
| 42 | # todo pass in f | 42 | # todo pass in f |
| 43 | cmd_arr = [conf.BINS['analyze']] | 43 | cmd_arr = [conf.BINS['analyze']] |
| 44 | print("cmd arr: %s-%s" % (cmd_arr, bins)) | ||
| 44 | cmd_arr.extend(bins) | 45 | cmd_arr.extend(bins) |
| 45 | with open(output_file, "w") as f: | 46 | with open(output_file, "w") as f: |
| 46 | subprocess.call(cmd_arr, cwd=out_dir, stdout=f, stderr=err_file) | 47 | subprocess.call(cmd_arr, cwd=out_dir, stdout=f, stderr=err_file) |
diff --git a/parse/point.py b/parse/point.py index 30fcd97..8fdd115 100644 --- a/parse/point.py +++ b/parse/point.py | |||
| @@ -16,7 +16,14 @@ def make_typemap(): | |||
| 16 | return copy.deepcopy(default_typemap) | 16 | return copy.deepcopy(default_typemap) |
| 17 | 17 | ||
| 18 | def dict_str(adict, sep = "\n"): | 18 | def dict_str(adict, sep = "\n"): |
| 19 | return sep.join(["%s: %s" % (k, str(v)) for (k,v) in sorted(adict.iteritems())]) | 19 | def num_str(v): |
| 20 | try: | ||
| 21 | float(v) | ||
| 22 | return "%6.3f" % v | ||
| 23 | except: | ||
| 24 | return v | ||
| 25 | size = 20 if sep == "\n" else 4 | ||
| 26 | return sep.join([("%" + str(size) + "s: %9s") % (k, num_str(v)) for (k,v) in sorted(adict.iteritems())]) | ||
| 20 | 27 | ||
| 21 | class Measurement(object): | 28 | class Measurement(object): |
| 22 | def __init__(self, id = None, kv = {}): | 29 | def __init__(self, id = None, kv = {}): |
| @@ -52,7 +59,7 @@ class Measurement(object): | |||
| 52 | self.stats[type] = value | 59 | self.stats[type] = value |
| 53 | 60 | ||
| 54 | def __str__(self): | 61 | def __str__(self): |
| 55 | return "<Measurement-%s> %s" % (self.id, dict_str(self.stats, " ")) | 62 | return "%s" % dict_str(self.stats, " ") |
| 56 | 63 | ||
| 57 | 64 | ||
| 58 | class Summary(Measurement): | 65 | class Summary(Measurement): |
diff --git a/parse/sched.py b/parse/sched.py index 7dd80e0..cbb051e 100644 --- a/parse/sched.py +++ b/parse/sched.py | |||
| @@ -1,5 +1,6 @@ | |||
| 1 | """ | 1 | """ |
| 2 | TODO: No longer very pythonic, lot of duplicate code | 2 | TODO: No longer very pythonic, lot of duplicate code |
| 3 | print out task execution times | ||
| 3 | """ | 4 | """ |
| 4 | 5 | ||
| 5 | import config.config as conf | 6 | import config.config as conf |
| @@ -9,6 +10,7 @@ import numpy as np | |||
| 9 | import subprocess | 10 | import subprocess |
| 10 | 11 | ||
| 11 | from collections import namedtuple,defaultdict | 12 | from collections import namedtuple,defaultdict |
| 13 | from operator import methodcaller | ||
| 12 | from point import Measurement,Type | 14 | from point import Measurement,Type |
| 13 | 15 | ||
| 14 | PARAM_RECORD = r"(?P<RECORD>" +\ | 16 | PARAM_RECORD = r"(?P<RECORD>" +\ |
| @@ -29,12 +31,14 @@ TARDY_RECORD = r"(?P<RECORD>" +\ | |||
| 29 | r"(?P<MISSES>[\d\.]+))" | 31 | r"(?P<MISSES>[\d\.]+))" |
| 30 | COMPLETION_RECORD = r"(?P<RECORD>" +\ | 32 | COMPLETION_RECORD = r"(?P<RECORD>" +\ |
| 31 | r"COMPLETION.*?(?P<PID>\d+)/.*?" +\ | 33 | r"COMPLETION.*?(?P<PID>\d+)/.*?" +\ |
| 32 | r"exec.*?(?P<EXEC>[\d\.]+)ms.*?" +\ | 34 | r"exec:.*?(?P<EXEC>[\d\.]+)ms.*?" +\ |
| 33 | r"flush.*?(?P<FLUSH>[\d\.]+)ms.*?" +\ | 35 | r"flush:.*?(?P<FLUSH>[\d\.]+)ms.*?" +\ |
| 34 | r"load.*?(?P<LOAD>[\d\.]+)ms)" | 36 | r"flush_work:.*?(?P<FLUSH_WORK>[\d]+).*?" +\ |
| 37 | r"load:.*?(?P<LOAD>[\d\.]+)ms.*?" +\ | ||
| 38 | r"load_work:.*?(?P<LOAD_WORK>[\d]+))" | ||
| 35 | 39 | ||
| 36 | TaskConfig = namedtuple('TaskConfig', ['cpu','wcet','period','type','level']) | 40 | TaskConfig = namedtuple('TaskConfig', ['cpu','wcet','period','type','level']) |
| 37 | Task = namedtuple('Task', ['pid', 'config']) | 41 | Task = namedtuple('Task', ['pid', 'config', 'run']) |
| 38 | 42 | ||
| 39 | class LeveledArray(object): | 43 | class LeveledArray(object): |
| 40 | """ | 44 | """ |
| @@ -86,7 +90,7 @@ def get_tasks(data): | |||
| 86 | float(match.group('WCET')), | 90 | float(match.group('WCET')), |
| 87 | float(match.group('PERIOD')), | 91 | float(match.group('PERIOD')), |
| 88 | match.group("CLASS"), | 92 | match.group("CLASS"), |
| 89 | match.group("LEVEL"))) | 93 | match.group("LEVEL")), []) |
| 90 | if not (t.config.period and t.pid): | 94 | if not (t.config.period and t.pid): |
| 91 | raise Exception() | 95 | raise Exception() |
| 92 | ret += [t] | 96 | ret += [t] |
| @@ -144,15 +148,16 @@ def extract_tardy_vals(task_dict, data, exp_point): | |||
| 144 | max_tards.add(t, max_tard / t.config.period) | 148 | max_tards.add(t, max_tard / t.config.period) |
| 145 | ratios.add(t, misses / jobs) | 149 | ratios.add(t, misses / jobs) |
| 146 | 150 | ||
| 147 | ratios.write_measurements(exp_point) | 151 | map(methodcaller('write_measurements', exp_point), |
| 148 | avg_tards.write_measurements(exp_point) | 152 | [ratios, avg_tards, max_tards]) |
| 149 | max_tards.write_measurements(exp_point) | ||
| 150 | 153 | ||
| 151 | # TODO: rename | 154 | # TODO: rename |
| 152 | def extract_variance(task_dict, data, exp_point): | 155 | def extract_variance(task_dict, data, exp_point): |
| 153 | varz = LeveledArray("exec-variance") | 156 | varz = LeveledArray("exec-variance") |
| 154 | flushes = LeveledArray("cache-flush") | 157 | flushes = LeveledArray("cache-flush") |
| 155 | loads = LeveledArray("cache-load") | 158 | loads = LeveledArray("cache-load") |
| 159 | fworks = LeveledArray("flush-work") | ||
| 160 | lworks = LeveledArray("load-work") | ||
| 156 | 161 | ||
| 157 | completions = defaultdict(lambda: []) | 162 | completions = defaultdict(lambda: []) |
| 158 | missed = defaultdict(lambda: int()) | 163 | missed = defaultdict(lambda: int()) |
| @@ -163,11 +168,17 @@ def extract_variance(task_dict, data, exp_point): | |||
| 163 | duration = float(match.group("EXEC")) | 168 | duration = float(match.group("EXEC")) |
| 164 | load = float(match.group("LOAD")) | 169 | load = float(match.group("LOAD")) |
| 165 | flush = float(match.group("FLUSH")) | 170 | flush = float(match.group("FLUSH")) |
| 171 | lwork = int(match.group("LOAD_WORK")) | ||
| 172 | fwork = int(match.group("FLUSH_WORK")) | ||
| 166 | 173 | ||
| 167 | if load: | 174 | if load: |
| 168 | loads.add(task_dict[pid], load) | 175 | loads.add(task_dict[pid], load) |
| 176 | lworks.add(task_dict[pid], lwork) | ||
| 177 | if not lwork: raise Exception() | ||
| 169 | if flush: | 178 | if flush: |
| 170 | flushes.add(task_dict[pid], flush) | 179 | flushes.add(task_dict[pid], flush) |
| 180 | fworks.add(task_dict[pid], fwork) | ||
| 181 | if not fwork: raise Exception() | ||
| 171 | 182 | ||
| 172 | # Last (exit) record often has exec time of 0 | 183 | # Last (exit) record often has exec time of 0 |
| 173 | missed[pid] += not bool(duration) | 184 | missed[pid] += not bool(duration) |
| @@ -181,6 +192,9 @@ def extract_variance(task_dict, data, exp_point): | |||
| 181 | completions[pid] += [duration] | 192 | completions[pid] += [duration] |
| 182 | 193 | ||
| 183 | for pid, durations in completions.iteritems(): | 194 | for pid, durations in completions.iteritems(): |
| 195 | # TODO: not this, please | ||
| 196 | task_dict[pid].run.append(Measurement(pid).from_array(durations)) | ||
| 197 | |||
| 184 | job_times = np.array(durations) | 198 | job_times = np.array(durations) |
| 185 | mean = job_times.mean() | 199 | mean = job_times.mean() |
| 186 | 200 | ||
| @@ -194,14 +208,15 @@ def extract_variance(task_dict, data, exp_point): | |||
| 194 | 208 | ||
| 195 | varz.add(task_dict[pid], corrected) | 209 | varz.add(task_dict[pid], corrected) |
| 196 | 210 | ||
| 197 | varz.write_measurements(exp_point) | 211 | if exp_point: |
| 198 | flushes.write_measurements(exp_point) | 212 | map(methodcaller('write_measurements', exp_point), |
| 199 | loads.write_measurements(exp_point) | 213 | [varz, flushes, loads, fworks, lworks]) |
| 200 | 214 | ||
| 201 | def config_exit_stats(task_dict, data): | 215 | def config_exit_stats(task_dict, data): |
| 202 | # Dictionary of task exit measurements by pid | 216 | # # Dictionary of task exit measurements by pid |
| 203 | exits = get_task_exits(data) | 217 | # exits = get_task_exits(data) |
| 204 | exit_dict = dict((e.id, e) for e in exits) | 218 | # exit_dict = dict((e.id, e) for e in exits) |
| 219 | extract_variance(task_dict, data, None) | ||
| 205 | 220 | ||
| 206 | # Dictionary where keys are configurations, values are list | 221 | # Dictionary where keys are configurations, values are list |
| 207 | # of tasks with those configuratino | 222 | # of tasks with those configuratino |
| @@ -212,11 +227,12 @@ def config_exit_stats(task_dict, data): | |||
| 212 | for config in config_dict: | 227 | for config in config_dict: |
| 213 | task_list = sorted(config_dict[config]) | 228 | task_list = sorted(config_dict[config]) |
| 214 | 229 | ||
| 215 | # Replace tasks with corresponding exit stats | 230 | # # Replace tasks with corresponding exit stats |
| 216 | if not t.pid in exit_dict: | 231 | # if not t.pid in exit_dict: |
| 217 | raise Exception("Missing exit record for task '%s' in '%s'" % | 232 | # raise Exception("Missing exit record for task '%s' in '%s'" % |
| 218 | (t, file.name)) | 233 | # (t, file.name)) |
| 219 | exit_list = [exit_dict[t.pid] for t in task_list] | 234 | # exit_list = [exit_dict[t.pid] for t in task_list] |
| 235 | exit_list = [t.run[0] for t in task_list] | ||
| 220 | config_dict[config] = exit_list | 236 | config_dict[config] = exit_list |
| 221 | 237 | ||
| 222 | return config_dict | 238 | return config_dict |
| @@ -228,6 +244,7 @@ def get_base_stats(base_file): | |||
| 228 | with open(base_file, 'r') as f: | 244 | with open(base_file, 'r') as f: |
| 229 | data = f.read() | 245 | data = f.read() |
| 230 | task_dict = get_task_dict(data) | 246 | task_dict = get_task_dict(data) |
| 247 | |||
| 231 | result = config_exit_stats(task_dict, data) | 248 | result = config_exit_stats(task_dict, data) |
| 232 | saved_stats[base_file] = result | 249 | saved_stats[base_file] = result |
| 233 | return result | 250 | return result |
| @@ -248,16 +265,21 @@ def extract_scaling_data(task_dict, data, result, base_file): | |||
| 248 | # Quit, we are missing a record and can't guarantee | 265 | # Quit, we are missing a record and can't guarantee |
| 249 | # a task-to-task comparison | 266 | # a task-to-task comparison |
| 250 | continue | 267 | continue |
| 268 | |||
| 251 | for data_stat, base_stat in zip(data_stats[config],base_stats[config]): | 269 | for data_stat, base_stat in zip(data_stats[config],base_stats[config]): |
| 252 | if not base_stat[Type.Avg] or not base_stat[Type.Max] or \ | 270 | if not base_stat[Type.Avg] or not base_stat[Type.Max] or \ |
| 253 | not data_stat[Type.Avg] or not data_stat[Type.Max]: | 271 | not data_stat[Type.Avg] or not data_stat[Type.Max]: |
| 272 | print("missing a thing: {},{}".format(base_stat, data_stat)) | ||
| 254 | continue | 273 | continue |
| 255 | # How much larger is their exec stat than ours? | 274 | # How much larger is their exec stat than ours? |
| 275 | print("%s vs %s" % (base_stat, data_stat)) | ||
| 256 | avg_scale = float(base_stat[Type.Avg]) / float(data_stat[Type.Avg]) | 276 | avg_scale = float(base_stat[Type.Avg]) / float(data_stat[Type.Avg]) |
| 257 | max_scale = float(base_stat[Type.Max]) / float(data_stat[Type.Max]) | 277 | max_scale = float(base_stat[Type.Max]) / float(data_stat[Type.Max]) |
| 258 | 278 | ||
| 259 | task = task_dict[data_stat.id] | 279 | task = task_dict[data_stat.id] |
| 260 | 280 | ||
| 281 | print("scaling for %s" % data_stat.id) | ||
| 282 | |||
| 261 | avg_scales.add(task, avg_scale) | 283 | avg_scales.add(task, avg_scale) |
| 262 | max_scales.add(task, max_scale) | 284 | max_scales.add(task, max_scale) |
| 263 | 285 | ||
diff --git a/parse/tuple_table.py b/parse/tuple_table.py index cb5a72a..434eb22 100644 --- a/parse/tuple_table.py +++ b/parse/tuple_table.py | |||
| @@ -48,6 +48,7 @@ class TupleTable(object): | |||
| 48 | self.table = defaultdict(lambda: []) | 48 | self.table = defaultdict(lambda: []) |
| 49 | self.reduced = False | 49 | self.reduced = False |
| 50 | 50 | ||
| 51 | # TODO: rename, make exp agnostic, extend for exps | ||
| 51 | def add_exp(self, kv, point): | 52 | def add_exp(self, kv, point): |
| 52 | key = self.col_map.get_key(kv) | 53 | key = self.col_map.get_key(kv) |
| 53 | self.table[key] += [point] | 54 | self.table[key] += [point] |
| @@ -56,6 +57,10 @@ class TupleTable(object): | |||
| 56 | key = self.col_map.get_key(kv) | 57 | key = self.col_map.get_key(kv) |
| 57 | return self.table[key] | 58 | return self.table[key] |
| 58 | 59 | ||
| 60 | def __contains__(self, kv): | ||
| 61 | key = self.col_map.get_key(kv) | ||
| 62 | return key in self.table | ||
| 63 | |||
| 59 | def reduce(self): | 64 | def reduce(self): |
| 60 | if self.reduced: | 65 | if self.reduced: |
| 61 | raise Exception("cannot reduce twice!") | 66 | raise Exception("cannot reduce twice!") |
