diff options
Diffstat (limited to 'parse/sched.py')
-rw-r--r-- | parse/sched.py | 65 |
1 files changed, 44 insertions, 21 deletions
diff --git a/parse/sched.py b/parse/sched.py index a65f001..7dd80e0 100644 --- a/parse/sched.py +++ b/parse/sched.py | |||
@@ -13,7 +13,7 @@ from point import Measurement,Type | |||
13 | 13 | ||
14 | PARAM_RECORD = r"(?P<RECORD>" +\ | 14 | PARAM_RECORD = r"(?P<RECORD>" +\ |
15 | r"PARAM *?(?P<PID>\d+)\/.*?" +\ | 15 | r"PARAM *?(?P<PID>\d+)\/.*?" +\ |
16 | r"cost:\s+(?P<WCET>[\d\.]+)ms.*?" +\ | 16 | r"cost.*?(?P<WCET>[\d\.]+)ms.*?" +\ |
17 | r"period.*?(?P<PERIOD>[\d.]+)ms.*?" +\ | 17 | r"period.*?(?P<PERIOD>[\d.]+)ms.*?" +\ |
18 | r"part.*?(?P<CPU>\d+)[, ]*" +\ | 18 | r"part.*?(?P<CPU>\d+)[, ]*" +\ |
19 | r"(?:class=(?P<CLASS>\w+))?[, ]*" +\ | 19 | r"(?:class=(?P<CLASS>\w+))?[, ]*" +\ |
@@ -23,13 +23,15 @@ EXIT_RECORD = r"(?P<RECORD>" +\ | |||
23 | r"Avg.*?(?P<AVG>\d+).*?" +\ | 23 | r"Avg.*?(?P<AVG>\d+).*?" +\ |
24 | r"Max.*?(?P<MAX>\d+))" | 24 | r"Max.*?(?P<MAX>\d+))" |
25 | TARDY_RECORD = r"(?P<RECORD>" +\ | 25 | TARDY_RECORD = r"(?P<RECORD>" +\ |
26 | r"TARDY.*?(?P<PID>\d+)/(?P<JOB>\d+).*?" +\ | 26 | r"TASK_TARDY.*?(?P<PID>\d+)/(?P<JOB>\d+).*?" +\ |
27 | r"Tot.*?(?P<TOTAL>[\d\.]+).*?ms.*?" +\ | 27 | r"Tot.*?(?P<TOTAL>[\d\.]+).*?ms.*?" +\ |
28 | r"(?P<MAX>[\d\.]+).*?ms.*?" +\ | 28 | r"(?P<MAX>[\d\.]+).*?ms.*?" +\ |
29 | r"(?P<MISSES>[\d\.]+))" | 29 | r"(?P<MISSES>[\d\.]+))" |
30 | COMPLETION_RECORD = r"(?P<RECORD>" +\ | 30 | COMPLETION_RECORD = r"(?P<RECORD>" +\ |
31 | r"COMPLETION.*?(?P<PID>\d+)/.*?" +\ | 31 | r"COMPLETION.*?(?P<PID>\d+)/.*?" +\ |
32 | r"(?P<EXEC>[\d\.]+)ms)" | 32 | r"exec.*?(?P<EXEC>[\d\.]+)ms.*?" +\ |
33 | r"flush.*?(?P<FLUSH>[\d\.]+)ms.*?" +\ | ||
34 | r"load.*?(?P<LOAD>[\d\.]+)ms)" | ||
33 | 35 | ||
34 | TaskConfig = namedtuple('TaskConfig', ['cpu','wcet','period','type','level']) | 36 | TaskConfig = namedtuple('TaskConfig', ['cpu','wcet','period','type','level']) |
35 | Task = namedtuple('Task', ['pid', 'config']) | 37 | Task = namedtuple('Task', ['pid', 'config']) |
@@ -107,11 +109,9 @@ def get_task_exits(data): | |||
107 | m = Measurement( int(match.group('PID')), | 109 | m = Measurement( int(match.group('PID')), |
108 | {Type.Max : float(match.group('MAX')), | 110 | {Type.Max : float(match.group('MAX')), |
109 | Type.Avg : float(match.group('AVG'))}) | 111 | Type.Avg : float(match.group('AVG'))}) |
110 | for (type, value) in m: | ||
111 | if not value: raise Exception() | ||
112 | except: | 112 | except: |
113 | raise Exception("Invalid exit record, parsed:\n\t%s\n\t%s" % | 113 | raise Exception("Invalid exit record, parsed:\n\t%s\n\t%s" % |
114 | (match.groupdict(), m.group('RECORD'))) | 114 | (match.groupdict(), match.group('RECORD'))) |
115 | 115 | ||
116 | ret += [m] | 116 | ret += [m] |
117 | return ret | 117 | return ret |
@@ -137,7 +137,7 @@ def extract_tardy_vals(task_dict, data, exp_point): | |||
137 | 137 | ||
138 | if pid not in task_dict: | 138 | if pid not in task_dict: |
139 | raise Exception("Invalid pid '%d' in tardy record:\n\t%s" % | 139 | raise Exception("Invalid pid '%d' in tardy record:\n\t%s" % |
140 | match.group("RECORD")) | 140 | (pid, match.group("RECORD"))) |
141 | 141 | ||
142 | t = task_dict[pid] | 142 | t = task_dict[pid] |
143 | avg_tards.add(t, total_tard / (jobs * t.config.period)) | 143 | avg_tards.add(t, total_tard / (jobs * t.config.period)) |
@@ -148,8 +148,12 @@ def extract_tardy_vals(task_dict, data, exp_point): | |||
148 | avg_tards.write_measurements(exp_point) | 148 | avg_tards.write_measurements(exp_point) |
149 | max_tards.write_measurements(exp_point) | 149 | max_tards.write_measurements(exp_point) |
150 | 150 | ||
151 | # TODO: rename | ||
151 | def extract_variance(task_dict, data, exp_point): | 152 | def extract_variance(task_dict, data, exp_point): |
152 | varz = LeveledArray("exec-variance") | 153 | varz = LeveledArray("exec-variance") |
154 | flushes = LeveledArray("cache-flush") | ||
155 | loads = LeveledArray("cache-load") | ||
156 | |||
153 | completions = defaultdict(lambda: []) | 157 | completions = defaultdict(lambda: []) |
154 | missed = defaultdict(lambda: int()) | 158 | missed = defaultdict(lambda: int()) |
155 | 159 | ||
@@ -157,19 +161,31 @@ def extract_variance(task_dict, data, exp_point): | |||
157 | try: | 161 | try: |
158 | pid = int(match.group("PID")) | 162 | pid = int(match.group("PID")) |
159 | duration = float(match.group("EXEC")) | 163 | duration = float(match.group("EXEC")) |
164 | load = float(match.group("LOAD")) | ||
165 | flush = float(match.group("FLUSH")) | ||
166 | |||
167 | if load: | ||
168 | loads.add(task_dict[pid], load) | ||
169 | if flush: | ||
170 | flushes.add(task_dict[pid], flush) | ||
160 | 171 | ||
161 | # Last (exit) record often has exec time of 0 | 172 | # Last (exit) record often has exec time of 0 |
162 | missed[pid] += not bool(duration) | 173 | missed[pid] += not bool(duration) |
163 | 174 | ||
164 | if missed[pid] > 1 or not pid: raise Exception() | 175 | if missed[pid] > 1 or not pid: #TODO: fix, raise Exception() |
176 | continue | ||
165 | except: | 177 | except: |
166 | raise Exception("Invalid completion record, missed - %d:" | 178 | raise Exception("Invalid completion record, missed: %d:" |
167 | "\n\t%s\n\t%s" % (missed[pid], match.groupdict(), | 179 | "\n\t%s\n\t%s" % (missed[pid], match.groupdict(), |
168 | match.group("RECORD"))) | 180 | match.group("RECORD"))) |
169 | completions[pid] += [duration] | 181 | completions[pid] += [duration] |
170 | 182 | ||
171 | for pid, durations in completions.iteritems(): | 183 | for pid, durations in completions.iteritems(): |
172 | job_times = np.array(durations) | 184 | job_times = np.array(durations) |
185 | mean = job_times.mean() | ||
186 | |||
187 | if not mean or not durations: | ||
188 | continue | ||
173 | 189 | ||
174 | # Coefficient of variation | 190 | # Coefficient of variation |
175 | cv = job_times.std() / job_times.mean() | 191 | cv = job_times.std() / job_times.mean() |
@@ -179,11 +195,10 @@ def extract_variance(task_dict, data, exp_point): | |||
179 | varz.add(task_dict[pid], corrected) | 195 | varz.add(task_dict[pid], corrected) |
180 | 196 | ||
181 | varz.write_measurements(exp_point) | 197 | varz.write_measurements(exp_point) |
198 | flushes.write_measurements(exp_point) | ||
199 | loads.write_measurements(exp_point) | ||
182 | 200 | ||
183 | def config_exit_stats(task_dict, file): | 201 | def config_exit_stats(task_dict, data): |
184 | with open(file, 'r') as f: | ||
185 | data = f.read() | ||
186 | |||
187 | # Dictionary of task exit measurements by pid | 202 | # Dictionary of task exit measurements by pid |
188 | exits = get_task_exits(data) | 203 | exits = get_task_exits(data) |
189 | exit_dict = dict((e.id, e) for e in exits) | 204 | exit_dict = dict((e.id, e) for e in exits) |
@@ -200,7 +215,7 @@ def config_exit_stats(task_dict, file): | |||
200 | # Replace tasks with corresponding exit stats | 215 | # Replace tasks with corresponding exit stats |
201 | if not t.pid in exit_dict: | 216 | if not t.pid in exit_dict: |
202 | raise Exception("Missing exit record for task '%s' in '%s'" % | 217 | raise Exception("Missing exit record for task '%s' in '%s'" % |
203 | (t, file)) | 218 | (t, file.name)) |
204 | exit_list = [exit_dict[t.pid] for t in task_list] | 219 | exit_list = [exit_dict[t.pid] for t in task_list] |
205 | config_dict[config] = exit_list | 220 | config_dict[config] = exit_list |
206 | 221 | ||
@@ -212,13 +227,14 @@ def get_base_stats(base_file): | |||
212 | return saved_stats[base_file] | 227 | return saved_stats[base_file] |
213 | with open(base_file, 'r') as f: | 228 | with open(base_file, 'r') as f: |
214 | data = f.read() | 229 | data = f.read() |
215 | result = config_exit_stats(data) | 230 | task_dict = get_task_dict(data) |
231 | result = config_exit_stats(task_dict, data) | ||
216 | saved_stats[base_file] = result | 232 | saved_stats[base_file] = result |
217 | return result | 233 | return result |
218 | 234 | ||
219 | def extract_scaling_data(task_dict, data, result, base_file): | 235 | def extract_scaling_data(task_dict, data, result, base_file): |
220 | # Generate trees of tasks with matching configurations | 236 | # Generate trees of tasks with matching configurations |
221 | data_stats = config_exit_stats(data) | 237 | data_stats = config_exit_stats(task_dict, data) |
222 | base_stats = get_base_stats(base_file) | 238 | base_stats = get_base_stats(base_file) |
223 | 239 | ||
224 | # Scaling factors are calculated by matching groups of tasks with the same | 240 | # Scaling factors are calculated by matching groups of tasks with the same |
@@ -233,9 +249,12 @@ def extract_scaling_data(task_dict, data, result, base_file): | |||
233 | # a task-to-task comparison | 249 | # a task-to-task comparison |
234 | continue | 250 | continue |
235 | for data_stat, base_stat in zip(data_stats[config],base_stats[config]): | 251 | for data_stat, base_stat in zip(data_stats[config],base_stats[config]): |
252 | if not base_stat[Type.Avg] or not base_stat[Type.Max] or \ | ||
253 | not data_stat[Type.Avg] or not data_stat[Type.Max]: | ||
254 | continue | ||
236 | # How much larger is their exec stat than ours? | 255 | # How much larger is their exec stat than ours? |
237 | avg_scale = float(base_stat[Type.Avg]) / float(base_stat[Type.Avg]) | 256 | avg_scale = float(base_stat[Type.Avg]) / float(data_stat[Type.Avg]) |
238 | max_scale = float(base_stat[Type.Max]) / float(base_stat[Type.Max]) | 257 | max_scale = float(base_stat[Type.Max]) / float(data_stat[Type.Max]) |
239 | 258 | ||
240 | task = task_dict[data_stat.id] | 259 | task = task_dict[data_stat.id] |
241 | 260 | ||
@@ -251,8 +270,12 @@ def extract_sched_data(data_file, result, base_file): | |||
251 | 270 | ||
252 | task_dict = get_task_dict(data) | 271 | task_dict = get_task_dict(data) |
253 | 272 | ||
254 | extract_tardy_vals(task_dict, data, result) | 273 | try: |
255 | extract_variance(task_dict, data, result) | 274 | extract_tardy_vals(task_dict, data, result) |
275 | extract_variance(task_dict, data, result) | ||
276 | except Exception as e: | ||
277 | print("Error in %s" % data_file) | ||
278 | raise e | ||
256 | 279 | ||
257 | if (base_file): | 280 | if (base_file): |
258 | extract_scaling_data(task_dict, data, result, base_file) | 281 | extract_scaling_data(task_dict, data, result, base_file) |