diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2014-01-08 18:26:42 -0500 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2014-01-08 18:26:42 -0500 |
commit | 573bbba0c0cddacf4ffcd0392c32fb7335561806 (patch) | |
tree | 2259714d9714a8f884f82cd8706aeab227fb7123 /ecrts14/ecrts14.py | |
parent | 49463510def7ba6e951dcce97db7a47e79b8d497 (diff) |
1) store latency info. 2) heur. aggress. dp param
Two changes. (1) Compute comparative latency information
for task sets that are schedulable under all partitioning
methods. (2) Make heuristic aggressiveness a design point
parameter.
Diffstat (limited to 'ecrts14/ecrts14.py')
-rwxr-xr-x | ecrts14/ecrts14.py | 155 |
1 files changed, 98 insertions, 57 deletions
diff --git a/ecrts14/ecrts14.py b/ecrts14/ecrts14.py index 85c2ff6..9955227 100755 --- a/ecrts14/ecrts14.py +++ b/ecrts14/ecrts14.py | |||
@@ -107,15 +107,17 @@ TESTS = [ | |||
107 | (1, "MaximizeParallelism", tests.test_partition_parallel), | 107 | (1, "MaximizeParallelism", tests.test_partition_parallel), |
108 | (2, "CacheAware", tests.test_partition_cache_aware), | 108 | (2, "CacheAware", tests.test_partition_cache_aware), |
109 | (3, "CacheAwareEdges", tests.test_partition_cache_aware_edges), | 109 | (3, "CacheAwareEdges", tests.test_partition_cache_aware_edges), |
110 | # ("MaximizeParallelismCacheAware", tests.test_partition_parallel2) | ||
111 | (4, "CacheAwareBFSEdges", tests.test_partition_cache_aware_bfs), | 110 | (4, "CacheAwareBFSEdges", tests.test_partition_cache_aware_bfs), |
112 | (5, "CacheAwareDFSEdges", tests.test_partition_cache_aware_dfs) | 111 | (5, "CacheAwareDFSEdges", tests.test_partition_cache_aware_dfs) |
112 | # (6, "MaximizeParallelismCacheAware", tests.test_partition_parallel2) | ||
113 | ] | 113 | ] |
114 | 114 | ||
115 | MIN_SAMPLES = 200 | 115 | #MIN_SAMPLES = 2 |
116 | MAX_SAMPLES = 500 | 116 | #MAX_SAMPLES = 2 |
117 | #MIN_SAMPLES = 1000 | 117 | #MIN_SAMPLES = 200 |
118 | #MAX_SAMPLES = 10000 | 118 | #MAX_SAMPLES = 500 |
119 | MIN_SAMPLES = 500 | ||
120 | MAX_SAMPLES = 10000 | ||
119 | MAX_CI = 0.05 | 121 | MAX_CI = 0.05 |
120 | CONFIDENCE = 0.95 | 122 | CONFIDENCE = 0.95 |
121 | 123 | ||
@@ -281,64 +283,90 @@ def process_dp(_dp): | |||
281 | dp.sched = 'edf' | 283 | dp.sched = 'edf' |
282 | dp.walk = 'seq' | 284 | dp.walk = 'seq' |
283 | 285 | ||
284 | results = defaultdict(float) | 286 | __avg_sched = defaultdict(float) |
285 | 287 | ||
286 | # avg_nr_graphs = defaultdict(float) | 288 | __avg_nr_graphs = defaultdict(float) |
287 | # avg_graph_k = defaultdict(float) | 289 | __avg_k = defaultdict(float) |
288 | avg_latencies = defaultdict(float) | 290 | __avg_latencies = defaultdict(float) |
289 | avg_ideal_ratios = defaultdict(float) | 291 | __avg_tard_ratios = defaultdict(float) |
290 | avg_hrt_ratios = defaultdict(float) | 292 | __avg_hrt_ratios = defaultdict(float) |
291 | nsched = defaultdict(int) | 293 | |
292 | 294 | n_methods = len(TESTS) | |
293 | n = 0 | 295 | n = 0 |
296 | n_all_sched = 0 | ||
294 | 297 | ||
295 | overheads = get_overheads(dp, dp.system) | 298 | overheads = get_overheads(dp, dp.system) |
296 | 299 | ||
297 | while not complete(results, n): | 300 | while not complete(__avg_sched, n): |
298 | ts, graphs, subts = create_pgm_task_set(dp) | 301 | ts, graphs, subts = create_pgm_task_set(dp) |
299 | 302 | ||
300 | # hrt_ideal_response_times = map(graph.compute_hrt_ideal_response_time, graphs) | 303 | num_graphs = len(graphs) |
301 | # num_graphs = len(graphs) | 304 | avg_depth = sum([g.depth for g in graphs])/float(num_graphs) |
305 | hrt_ideal_response_times = map(graph.compute_hrt_ideal_response_time, graphs) | ||
306 | |||
307 | this_task_set = {} | ||
302 | 308 | ||
303 | if dp.nr_clusters != 1: | 309 | if dp.nr_clusters != 1: |
304 | for method, _, test in TESTS: | 310 | for method, _, test in TESTS: |
305 | result, processed_ts = test(ts, graphs, subts, dp, overheads) | 311 | is_sched, processed_ts = test(ts, graphs, subts, dp, overheads) |
306 | if result: | 312 | this_method = {} |
307 | # ideal_response_times = map(graph.compute_ideal_response_time, graphs) | 313 | this_method['sched'] = is_sched |
308 | # srt_response_times = map(graph.bound_graph_response_time, graphs) | 314 | if is_sched: |
309 | # ideal_ratio = 0.0 | 315 | this_method['latencies'] = map(graph.bound_graph_response_time, graphs) |
310 | # hrt_ratio = 0.0 | 316 | this_method['ideal_latencies'] = map(graph.compute_ideal_response_time, graphs) |
311 | # for i, h, s in zip(ideal_response_times, hrt_ideal_response_times, srt_response_times): | 317 | this_task_set[method] = this_method |
312 | # ideal_ratio += s/i | ||
313 | # hrt_ratio += s/h | ||
314 | # | ||
315 | # ideal_ratio /= num_graphs | ||
316 | # hrt_ratio /= num_graphs | ||
317 | # avg_latency = sum(srt_response_times)/num_graphs | ||
318 | # avg_latencies[method] = update_mean(avg_latencies[method], nsched[method], avg_latency) | ||
319 | # avg_ideal_ratios[method] = update_mean(avg_ideal_ratios[method], nsched[method], ideal_ratio) | ||
320 | # avg_hrt_ratios[method] = update_mean(avg_hrt_ratios[method], nsched[method], hrt_ratio) | ||
321 | nsched[method] += 1 | ||
322 | results[method] = update_mean(results[method], n, result) | ||
323 | |||
324 | # if there is no partitioning, then same results hold for all tests | ||
325 | else: | 318 | else: |
326 | result, processed_ts = TESTS[0][2](ts, graphs, subts, dp, overheads) | 319 | # global. no partitioning. all methods equivelent |
327 | if result: | 320 | is_sched, processed_ts = TESTS[0][2](ts, graphs, subts, dp, overheads) |
328 | for method, name, test in TESTS: | 321 | this_method = {} |
329 | nsched[method] += 1 | 322 | this_method['sched'] = is_sched |
330 | for method, name, test in TESTS: | 323 | if is_sched: |
331 | results[method] = update_mean(results[method], n, result) | 324 | this_method['latencies'] = map(graph.bound_graph_response_time, graphs) |
332 | 325 | this_method['ideal_latencies'] = map(graph.compute_ideal_response_time, graphs) | |
326 | for method, _, _ in TESTS: | ||
327 | this_task_set[method] = this_method | ||
328 | |||
329 | num_method_sched = sum([1 for sched_data in this_task_set.itervalues() if sched_data['sched']]) | ||
330 | all_sched = True if num_method_sched == n_methods else False | ||
331 | |||
332 | # process the results | ||
333 | |||
334 | for method, sched_data in this_task_set.iteritems(): | ||
335 | is_sched = sched_data['sched'] | ||
336 | __avg_sched[method] = update_mean(__avg_sched[method], n, is_sched) | ||
337 | |||
338 | # only include latency data for task sets that were schedulable for all methods | ||
339 | if all_sched: | ||
340 | avg_latency = sum(sched_data['latencies'])/float(num_graphs) | ||
341 | avg_tard_ratio = 0.0 | ||
342 | avg_hrt_tard_ratio = 0.0 | ||
343 | for latency, ideal_latency, hrt_latency in zip(sched_data['latencies'], sched_data['ideal_latencies'], hrt_ideal_response_times): | ||
344 | avg_tard_ratio += (latency / ideal_latency) | ||
345 | avg_hrt_tard_ratio += (latency / hrt_latency) | ||
346 | avg_tard_ratio /= float(num_graphs) | ||
347 | avg_hrt_tard_ratio /= float(num_graphs) | ||
348 | |||
349 | __avg_latencies[method] = update_mean(__avg_latencies[method], n_all_sched, avg_latency) | ||
350 | __avg_tard_ratios[method] = update_mean(__avg_tard_ratios[method], n_all_sched, avg_tard_ratio) | ||
351 | __avg_hrt_ratios[method] = update_mean(__avg_hrt_ratios[method], n_all_sched, avg_hrt_tard_ratio) | ||
352 | |||
353 | # we could share these values across all methods | ||
354 | __avg_nr_graphs[method] = update_mean(__avg_nr_graphs[method], n_all_sched, num_graphs) | ||
355 | __avg_k[method] = update_mean(__avg_k[method], n_all_sched, avg_depth) | ||
356 | |||
357 | if all_sched: | ||
358 | n_all_sched += 1 | ||
333 | n += 1 | 359 | n += 1 |
334 | 360 | ||
335 | # for method, _, _ in TESTS: | 361 | if n_all_sched == 0: |
336 | # if nsched[method] == 0: | 362 | for method, _, _ in TESTS: |
337 | # avg_latencies[method] = -1.0 | 363 | __avg_latencies[method] = -1.0 |
338 | # avg_ideal_ratios[method] = -1.0 | 364 | __avg_tard_ratios[method] = -1.0 |
339 | # avg_hrt_ratios[method] = -1.0 | 365 | __avg_hrt_ratios[method] = -1.0 |
366 | __avg_nr_graphs[method] = 0.0 | ||
367 | __avg_k[method] = 0.0 | ||
340 | 368 | ||
341 | return results, avg_latencies, avg_ideal_ratios, avg_hrt_ratios | 369 | return __avg_sched, __avg_latencies, __avg_tard_ratios, __avg_hrt_ratios, __avg_nr_graphs, __avg_k |
342 | 370 | ||
343 | def process_design_points(args): | 371 | def process_design_points(args): |
344 | try: | 372 | try: |
@@ -347,13 +375,25 @@ def process_design_points(args): | |||
347 | while True: | 375 | while True: |
348 | dp = db.get_design_point(db_name) | 376 | dp = db.get_design_point(db_name) |
349 | if dp and not db.already_processed(dp, db_name = db_name): | 377 | if dp and not db.already_processed(dp, db_name = db_name): |
350 | (sched, avg_lat, avg_idl_rat, avg_hrt_rat) = process_dp(dp) | 378 | (avg_sched, avg_lat, avg_tard_ratio, avg_hrt_tard_ratio, avg_nr_graphs, avg_k) = process_dp(dp) |
351 | db.store_sched_results(db_name, dp, sched) | 379 | |
380 | sched_data = {} | ||
381 | for m, _, _ in TESTS: | ||
382 | results = storage() | ||
383 | results.avg_sched = avg_sched[m] | ||
384 | results.avg_latency = avg_lat[m] | ||
385 | results.avg_tard_ratio = avg_tard_ratio[m] | ||
386 | results.avg_hrt_tard_ratio = avg_hrt_tard_ratio[m] | ||
387 | results.avg_nr_graphs = avg_nr_graphs[m] | ||
388 | results.avg_k = avg_k[m] | ||
389 | sched_data[m] = results | ||
390 | |||
391 | db.store_sched_results(db_name, dp, sched_data) | ||
352 | nr_processed += 1 | 392 | nr_processed += 1 |
353 | else: | 393 | else: |
354 | break | 394 | break |
355 | except lite.OperationalError: | 395 | except lite.OperationalError: |
356 | print "FUCK!" | 396 | print "CRAP. Database Error!" |
357 | print traceback.format_exc() | 397 | print traceback.format_exc() |
358 | return nr_processed | 398 | return nr_processed |
359 | 399 | ||
@@ -390,14 +430,14 @@ def main(): | |||
390 | # system parameters | 430 | # system parameters |
391 | exp.processors = [int(cpus)] | 431 | exp.processors = [int(cpus)] |
392 | # exp.nr_clusters = [1, 4, 12, 24] | 432 | # exp.nr_clusters = [1, 4, 12, 24] |
393 | exp.nr_clusters = [1, 4, 12] | 433 | exp.nr_clusters = [1, 4, 12, 24] |
394 | exp.host = ['ludwig'] | 434 | exp.host = ['ludwig'] |
395 | exp.polluters = [False] | 435 | exp.polluters = [False] |
396 | exp.ovh_type = ['max'] | 436 | exp.ovh_type = ['max'] |
397 | 437 | ||
398 | # task parameters | 438 | # task parameters |
399 | step_size = 0.2 | 439 | step_size = 0.1 |
400 | exp.sys_util = [float(v) for v in arange(6.0, cpus+step_size, step_size)] | 440 | exp.sys_util = [float(v) for v in arange(step_size, cpus+step_size, step_size)] |
401 | exp.task_util = ['uni-medium'] | 441 | exp.task_util = ['uni-medium'] |
402 | exp.period = ['uni-long'] | 442 | exp.period = ['uni-long'] |
403 | exp.wcycle = [0] | 443 | exp.wcycle = [0] |
@@ -411,6 +451,7 @@ def main(): | |||
411 | exp.edge_distance = ['geometric_3'] | 451 | exp.edge_distance = ['geometric_3'] |
412 | exp.wss = ['bimo-medium'] | 452 | exp.wss = ['bimo-medium'] |
413 | exp.fan_in_cap = [3] | 453 | exp.fan_in_cap = [3] |
454 | exp.heur_aggressiveness = [0.6, 0.75, 0.9] | ||
414 | 455 | ||
415 | design_points = [dp for dp in DesignPointGenerator(exp, is_valid = valid)] | 456 | design_points = [dp for dp in DesignPointGenerator(exp, is_valid = valid)] |
416 | 457 | ||