diff options
-rw-r--r-- | experiment/experiment.py | 2 | ||||
-rw-r--r-- | parse/ft.py | 2 | ||||
-rw-r--r-- | parse/sched.py | 123 | ||||
-rw-r--r-- | parse/tuple_table.py | 8 | ||||
-rwxr-xr-x | parse_exps.py | 67 | ||||
-rwxr-xr-x | run_exps.py | 2 |
6 files changed, 165 insertions, 39 deletions
diff --git a/experiment/experiment.py b/experiment/experiment.py index 5ed6480..a95ca42 100644 --- a/experiment/experiment.py +++ b/experiment/experiment.py | |||
@@ -161,7 +161,7 @@ class Experiment(object): | |||
161 | 161 | ||
162 | self.log("Starting %d tracers" % len(self.tracers)) | 162 | self.log("Starting %d tracers" % len(self.tracers)) |
163 | map(methodcaller('start_tracing'), self.tracers) | 163 | map(methodcaller('start_tracing'), self.tracers) |
164 | time.sleep(2) | 164 | time.sleep(4) |
165 | 165 | ||
166 | def teardown(self): | 166 | def teardown(self): |
167 | sleep_time = 5 | 167 | sleep_time = 5 |
diff --git a/parse/ft.py b/parse/ft.py index 9837898..868c8ca 100644 --- a/parse/ft.py +++ b/parse/ft.py | |||
@@ -41,7 +41,7 @@ def get_ft_output(data_dir, out_dir): | |||
41 | return None | 41 | return None |
42 | return output_file | 42 | return output_file |
43 | 43 | ||
44 | def get_ft_data(data_file, result, overheads): | 44 | def extract_ft_data(data_file, result, overheads): |
45 | rstr = r",(?:\s+[^\s]+){3}.*?([\d\.]+).*?([\d\.]+),(?:\s+[^\s]+){3}.*?([\d\.]+)" | 45 | rstr = r",(?:\s+[^\s]+){3}.*?([\d\.]+).*?([\d\.]+),(?:\s+[^\s]+){3}.*?([\d\.]+)" |
46 | 46 | ||
47 | with open(data_file) as f: | 47 | with open(data_file) as f: |
diff --git a/parse/sched.py b/parse/sched.py index a84aece..94ab000 100644 --- a/parse/sched.py +++ b/parse/sched.py | |||
@@ -1,5 +1,6 @@ | |||
1 | """ | 1 | """ |
2 | TODO: make regexes indexable by name | 2 | TODO: make regexes indexable by name |
3 | |||
3 | """ | 4 | """ |
4 | 5 | ||
5 | import config.config as conf | 6 | import config.config as conf |
@@ -8,10 +9,11 @@ import re | |||
8 | import numpy as np | 9 | import numpy as np |
9 | import subprocess | 10 | import subprocess |
10 | 11 | ||
11 | from collections import namedtuple | 12 | from collections import namedtuple,defaultdict |
12 | from point import Measurement | 13 | from point import Measurement,Type |
13 | 14 | ||
14 | Task = namedtuple('Task', ['pid', 'period']) | 15 | TaskConfig = namedtuple('TaskConfig', ['cpu','wcet','period']) |
16 | Task = namedtuple('Task', ['pid', 'config']) | ||
15 | 17 | ||
16 | def get_st_output(data_dir, out_dir): | 18 | def get_st_output(data_dir, out_dir): |
17 | bin_files = conf.FILES['sched_data'].format(".*") | 19 | bin_files = conf.FILES['sched_data'].format(".*") |
@@ -32,32 +34,54 @@ def get_st_output(data_dir, out_dir): | |||
32 | return output_file | 34 | return output_file |
33 | 35 | ||
34 | def get_tasks(data): | 36 | def get_tasks(data): |
35 | reg = r"PARAM.*?(\d+).*?cost:\s+[\d\.]+ms.*?period.*?([\d.]+)" | 37 | reg = r"PARAM *?(\d+)\/.*?cost:\s+([\d\.]+)ms.*?period.*?([\d.]+)ms.*?part.*?(\d+)" |
36 | return [Task(x[0], x[1]) for x in re.findall(reg, data)] | 38 | ret = [] |
39 | for match in re.findall(reg, data): | ||
40 | t = Task(match[0], TaskConfig(match[3],match[1],match[2])) | ||
41 | ret += [t] | ||
42 | return ret | ||
43 | |||
44 | def get_task_exits(data): | ||
45 | reg = r"TASK_EXIT *?(\d+)/.*?Avg.*?(\d+).*?Max.*?(\d+)" | ||
46 | ret = [] | ||
47 | for match in re.findall(reg, data): | ||
48 | m = Measurement(match[0], {Type.Max : match[2], Type.Avg : match[1]}) | ||
49 | ret += [m] | ||
50 | return ret | ||
51 | |||
37 | 52 | ||
38 | def extract_tardy_vals(data, exp_point): | 53 | def extract_tardy_vals(data, exp_point): |
39 | ratios = [] | 54 | ratios = [] |
40 | tards = [] | 55 | avg_tards = [] |
56 | max_tards = [] | ||
41 | 57 | ||
42 | for t in get_tasks(data): | 58 | for t in get_tasks(data): |
43 | reg = r"TARDY.*?" + t.pid + "/(\d+).*?Tot.*?([\d.]+).*?ms.*([\d.]+).*?ms.*?([\d.]+)" | 59 | reg = r"TARDY.*?" + t.pid + "/(\d+).*?Tot.*?([\d\.]+).*?ms.*([\d\.]+).*?ms.*?([\d\.]+)" |
44 | matches = re.findall(reg, data) | 60 | matches = re.findall(reg, data) |
45 | if len(matches) != 0: | 61 | if len(matches) != 0: |
46 | jobs = float(matches[0][0]) | 62 | jobs = float(matches[0][0]) |
63 | |||
47 | total_tard = float(matches[0][1]) | 64 | total_tard = float(matches[0][1]) |
48 | # max_tard = float(matches[0][2]) | 65 | print("total tard: %s" % total_tard) |
66 | avg_tard = (total_tard / jobs) / float(t.config.period) | ||
67 | max_tard = float(matches[0][2]) / float(t.config.period) | ||
68 | |||
69 | print("avg tard: %s" % avg_tard) | ||
70 | |||
49 | misses = float(matches[0][3]) | 71 | misses = float(matches[0][3]) |
50 | rel_tard = (total_tard / jobs) / float(t.period) | ||
51 | if misses != 0: | 72 | if misses != 0: |
52 | miss_ratio = (misses / jobs) | 73 | miss_ratio = (misses / jobs) |
74 | print("misses is %d, jobs is %d" % (misses, jobs)) | ||
53 | else: | 75 | else: |
54 | miss_ratio = 0 | 76 | miss_ratio = 0 |
55 | 77 | ||
56 | ratios.append(miss_ratio) | 78 | ratios += [miss_ratio] |
57 | tards.append(rel_tard) | 79 | avg_tards += [avg_tard] |
80 | max_tards += [max_tard] | ||
58 | 81 | ||
59 | for (array, name) in ((tards, "rel-tard"), (ratios, "miss-ratio")): | 82 | exp_point["avg-rel-tard"] = Measurement().from_array(avg_tards) |
60 | exp_point[name] = Measurement().from_array(array) | 83 | exp_point["max-rel-tard"] = Measurement().from_array(max_tards) |
84 | exp_point["miss-ratio"] = Measurement().from_array(ratios) | ||
61 | 85 | ||
62 | def extract_variance(data, exp_point): | 86 | def extract_variance(data, exp_point): |
63 | varz = [] | 87 | varz = [] |
@@ -77,17 +101,70 @@ def extract_variance(data, exp_point): | |||
77 | 101 | ||
78 | varz.append(corrected) | 102 | varz.append(corrected) |
79 | 103 | ||
80 | exp_point['var'] = Measurement().from_array(varz) | 104 | exp_point['exec-var'] = Measurement().from_array(varz) |
81 | 105 | ||
82 | def get_sched_data(data_file, result): | 106 | def extract_sched_data(data_file, result): |
83 | with open(data_file, 'r') as f: | 107 | with open(data_file, 'r') as f: |
84 | data = f.read() | 108 | data = f.read() |
85 | 109 | ||
86 | # if conf != BASE: | 110 | extract_tardy_vals(data, result) |
87 | # (our_values, their_values) = extract_exec_vals(our_data, their_data) | 111 | extract_variance(data, result) |
88 | # conf_result = get_stats(our_values, their_values) | ||
89 | # for key in conf_result.keys(): | ||
90 | # result[key][conf] = conf_result[key] | ||
91 | 112 | ||
92 | extract_tardy_vals(data, result) | 113 | def config_exit_stats(file): |
93 | extract_variance(data, result) | 114 | with open(file, 'r') as f: |
115 | data = f.read() | ||
116 | |||
117 | tasks = get_tasks(data) | ||
118 | |||
119 | # Dictionary of task exit measurements by pid | ||
120 | exits = get_task_exits(data) | ||
121 | exit_dict = dict((e.id, e) for e in exits) | ||
122 | |||
123 | # Dictionary where keys are configurations, values are list | ||
124 | # of tasks with those configuratino | ||
125 | config_dict = defaultdict(lambda: []) | ||
126 | for t in tasks: | ||
127 | config_dict[t.config] += [t] | ||
128 | |||
129 | for config in config_dict: | ||
130 | task_list = sorted(config_dict[config]) | ||
131 | |||
132 | # Replace tasks with corresponding exit stats | ||
133 | exit_list = [exit_dict[t.pid] for t in task_list] | ||
134 | config_dict[config] = exit_list | ||
135 | |||
136 | return config_dict | ||
137 | |||
138 | saved_stats = {} | ||
139 | def get_base_stats(base_file): | ||
140 | if base_file in saved_stats: | ||
141 | return saved_stats[base_file] | ||
142 | result = config_exit_stats(base_file) | ||
143 | saved_stats[base_file] = result | ||
144 | return result | ||
145 | |||
146 | def extract_scaling_data(data_file, base_file, result): | ||
147 | # Generate trees of tasks with matching configurations | ||
148 | data_stats = config_exit_stats(data_file) | ||
149 | base_stats = get_base_stats(base_file) | ||
150 | |||
151 | # Scaling factors are calculated by matching groups of tasks with the same | ||
152 | # config, then comparing task-to-task exec times in order of PID within | ||
153 | # each group | ||
154 | max_scales = [] | ||
155 | avg_scales = [] | ||
156 | for config in data_stats: | ||
157 | if len(data_stats[config]) != len(base_stats[config]): | ||
158 | # Quit, we are missing a record and can't guarantee | ||
159 | # a task-to-task comparison | ||
160 | continue | ||
161 | for data_stat, base_stat in zip(data_stats[config],base_stats[config]): | ||
162 | # How much larger is their exec stat than ours? | ||
163 | avg_scale = float(base_stat[Type.Avg]) / float(base_stat[Type.Avg]) | ||
164 | max_scale = float(base_stat[Type.Max]) / float(base_stat[Type.Max]) | ||
165 | |||
166 | avg_scales += [avg_scale] | ||
167 | max_scales += [max_scale] | ||
168 | |||
169 | result['max-scale'] = Measurement().from_array(max_scales) | ||
170 | result['avg-scale'] = Measurement().from_array(avg_scales) | ||
diff --git a/parse/tuple_table.py b/parse/tuple_table.py index 0cf6bec..b56fa6c 100644 --- a/parse/tuple_table.py +++ b/parse/tuple_table.py | |||
@@ -21,8 +21,8 @@ class ColMap(object): | |||
21 | added += 1 | 21 | added += 1 |
22 | key += (kv[col],) | 22 | key += (kv[col],) |
23 | 23 | ||
24 | if added != len(kv): | 24 | if added < len(kv): |
25 | raise Exception("column map '%s' missed field in map\n%s" % | 25 | raise Exception("column map '%s' missed field in map '%s'" % |
26 | (self.col_list, kv)) | 26 | (self.col_list, kv)) |
27 | 27 | ||
28 | return key | 28 | return key |
@@ -51,6 +51,10 @@ class TupleTable(object): | |||
51 | key = self.col_map.get_key(kv) | 51 | key = self.col_map.get_key(kv) |
52 | self.table[key] += [point] | 52 | self.table[key] += [point] |
53 | 53 | ||
54 | def get_exps(self, kv): | ||
55 | key = self.col_map.get_key(kv) | ||
56 | return self.table[key] | ||
57 | |||
54 | def __reduce(self): | 58 | def __reduce(self): |
55 | if self.reduced: | 59 | if self.reduced: |
56 | raise Exception("cannot reduce twice!") | 60 | raise Exception("cannot reduce twice!") |
diff --git a/parse_exps.py b/parse_exps.py index ecb1cac..3a1d1b9 100755 --- a/parse_exps.py +++ b/parse_exps.py | |||
@@ -2,9 +2,11 @@ | |||
2 | from __future__ import print_function | 2 | from __future__ import print_function |
3 | 3 | ||
4 | import config.config as conf | 4 | import config.config as conf |
5 | import copy | ||
5 | import os | 6 | import os |
6 | import parse.ft as ft | 7 | import parse.ft as ft |
7 | import parse.sched as st | 8 | import parse.sched as st |
9 | import re | ||
8 | 10 | ||
9 | from collections import namedtuple | 11 | from collections import namedtuple |
10 | from common import load_params | 12 | from common import load_params |
@@ -17,6 +19,9 @@ def parse_args(): | |||
17 | 19 | ||
18 | parser.add_option('-o', '--out-dir', dest='out_dir', | 20 | parser.add_option('-o', '--out-dir', dest='out_dir', |
19 | help='directory for data output', default=os.getcwd()) | 21 | help='directory for data output', default=os.getcwd()) |
22 | parser.add_option('-s', '--scale-against', dest='scale_against', | ||
23 | metavar='PARAM=VALUE', default="", | ||
24 | help='calculate task scaling factors against these configs') | ||
20 | 25 | ||
21 | return parser.parse_args() | 26 | return parser.parse_args() |
22 | 27 | ||
@@ -41,8 +46,10 @@ def get_exp_params(data_dir, col_map): | |||
41 | return params | 46 | return params |
42 | 47 | ||
43 | 48 | ||
44 | def gen_exp_data(exp_dirs, col_map): | 49 | def gen_exp_data(exp_dirs, base_conf, col_map): |
45 | exps = [] | 50 | plain_exps = [] |
51 | scaling_bases = [] | ||
52 | |||
46 | for data_dir in exp_dirs: | 53 | for data_dir in exp_dirs: |
47 | if not os.path.isdir(data_dir): | 54 | if not os.path.isdir(data_dir): |
48 | raise IOError("Invalid experiment '%s'" % os.path.abspath(data_dir)) | 55 | raise IOError("Invalid experiment '%s'" % os.path.abspath(data_dir)) |
@@ -51,34 +58,72 @@ def gen_exp_data(exp_dirs, col_map): | |||
51 | if not os.path.exists(tmp_dir): | 58 | if not os.path.exists(tmp_dir): |
52 | os.mkdir(tmp_dir) | 59 | os.mkdir(tmp_dir) |
53 | 60 | ||
61 | # Read and translate exp output files | ||
54 | params = get_exp_params(data_dir, col_map) | 62 | params = get_exp_params(data_dir, col_map) |
55 | st_output = st.get_st_output(data_dir, tmp_dir) | 63 | st_output = st.get_st_output(data_dir, tmp_dir) |
56 | ft_output = ft.get_ft_output(data_dir, tmp_dir) | 64 | ft_output = ft.get_ft_output(data_dir, tmp_dir) |
57 | 65 | ||
66 | # Create experiment named after the data dir | ||
58 | exp_data = ExpData(data_dir, params, DataFiles(ft_output, st_output)) | 67 | exp_data = ExpData(data_dir, params, DataFiles(ft_output, st_output)) |
59 | exps += [exp_data] | ||
60 | 68 | ||
61 | return exps | 69 | if base_conf and base_conf.viewitems() & params.viewitems(): |
70 | if not st_output: | ||
71 | raise Exception("Scaling base '%s' useless without sched data!" | ||
72 | % data_dir) | ||
73 | params.pop(base_conf.keys()[0]) | ||
74 | scaling_bases += [exp_data] | ||
75 | else: | ||
76 | plain_exps += [exp_data] | ||
77 | |||
78 | return (plain_exps, scaling_bases) | ||
62 | 79 | ||
63 | def main(): | 80 | def main(): |
64 | opts, args = parse_args() | 81 | opts, args = parse_args() |
65 | 82 | ||
66 | args = args or [os.getcwd()] | 83 | args = args or [os.getcwd()] |
84 | |||
85 | # Configuration key for task systems used to calculate task | ||
86 | # execution scaling factors | ||
87 | base_conf = dict(re.findall("(.*)=(.*)", opts.scale_against)) | ||
88 | |||
67 | col_map = ColMap() | 89 | col_map = ColMap() |
68 | exps = gen_exp_data(args, col_map) | ||
69 | 90 | ||
70 | table = TupleTable(col_map) | 91 | (plain_exps, scaling_bases) = gen_exp_data(args, base_conf, col_map) |
92 | |||
93 | base_table = TupleTable(col_map) | ||
94 | result_table = TupleTable(col_map) | ||
71 | 95 | ||
72 | for exp in exps: | 96 | # Used to find matching scaling_base for each experiment |
97 | for base in scaling_bases: | ||
98 | base_table.add_exp(base.params, base) | ||
99 | |||
100 | for exp in plain_exps: | ||
73 | result = ExpPoint(exp.name) | 101 | result = ExpPoint(exp.name) |
102 | |||
74 | if exp.data_files.ft: | 103 | if exp.data_files.ft: |
75 | ft.get_ft_data(exp.data_files.ft, result, conf.BASE_EVENTS) | 104 | # Write overheads into result |
105 | ft.extract_ft_data(exp.data_files.ft, result, conf.BASE_EVENTS) | ||
106 | |||
76 | if exp.data_files.st: | 107 | if exp.data_files.st: |
77 | st.get_sched_data(exp.data_files.st, result) | 108 | if base_conf: |
109 | # Try to find a scaling base | ||
110 | base_params = copy.deepcopy(exp.params) | ||
111 | base_params.pop(base_conf.keys()[0]) | ||
112 | base = base_table.get_exps(base_params)[0] | ||
113 | if base: | ||
114 | # Write scaling factor (vs base) into result | ||
115 | st.extract_scaling_data(exp.data_files.st, | ||
116 | base.data_files.st, | ||
117 | result) | ||
118 | # Write deadline misses / tardiness into result | ||
119 | st.extract_sched_data(exp.data_files.st, result) | ||
120 | |||
121 | result_table.add_exp(exp.params, result) | ||
122 | |||
123 | print(result) | ||
78 | 124 | ||
79 | table.add_exp(exp.params, result) | ||
80 | 125 | ||
81 | table.write_result(opts.out_dir) | 126 | result_table.write_result(opts.out_dir) |
82 | 127 | ||
83 | if __name__ == '__main__': | 128 | if __name__ == '__main__': |
84 | main() | 129 | main() |
diff --git a/run_exps.py b/run_exps.py index bda0e40..4484952 100755 --- a/run_exps.py +++ b/run_exps.py | |||
@@ -218,7 +218,7 @@ def main(): | |||
218 | print(" Successful:\t\t%d" % succ) | 218 | print(" Successful:\t\t%d" % succ) |
219 | print(" Failed:\t\t%d" % failed) | 219 | print(" Failed:\t\t%d" % failed) |
220 | print(" Already Done:\t\t%d" % done) | 220 | print(" Already Done:\t\t%d" % done) |
221 | print(" Invalid Kernel:\t\t%d" % invalid) | 221 | print(" Wrong Kernel:\t\t%d" % invalid) |
222 | 222 | ||
223 | 223 | ||
224 | if __name__ == '__main__': | 224 | if __name__ == '__main__': |