diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2014-01-10 13:19:44 -0500 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2014-01-10 13:21:19 -0500 |
commit | 6eaf83e1b470e8d0a72661b54bed2c9886ac7b64 (patch) | |
tree | 9da2a9688c67f47455b8de9e574162ffbe0c0941 /ecrts14/ecrts14.py | |
parent | 573bbba0c0cddacf4ffcd0392c32fb7335561806 (diff) |
Track avg graph size, taskset size. D.P. for job split
Diffstat (limited to 'ecrts14/ecrts14.py')
-rwxr-xr-x | ecrts14/ecrts14.py | 60 |
1 files changed, 45 insertions, 15 deletions
diff --git a/ecrts14/ecrts14.py b/ecrts14/ecrts14.py index 9955227..ff772b5 100755 --- a/ecrts14/ecrts14.py +++ b/ecrts14/ecrts14.py | |||
@@ -57,12 +57,12 @@ NAMED_PERIODS_US = { | |||
57 | #based off of a 24-core system | 57 | #based off of a 24-core system |
58 | # fewer graphs = harder partitioning | 58 | # fewer graphs = harder partitioning |
59 | NAMED_NUM_GRAPHS = { | 59 | NAMED_NUM_GRAPHS = { |
60 | 'uni-many' : graph.uniform(24, 24*4), | 60 | 'uni-many' : graph.uniform(24, 24*3), |
61 | 'uni-medium' : graph.uniform(12, 48), | 61 | 'uni-medium' : graph.uniform(12, 24), |
62 | 'uni-few' : graph.uniform(1,12), | 62 | 'uni-few' : graph.uniform(1,12), |
63 | 63 | ||
64 | 'bimo-many' : graph.binomial(24, 24*4), | 64 | 'bimo-many' : graph.binomial(24, 24*3), |
65 | 'bimo-medium' : graph.binomial(12, 48), | 65 | 'bimo-medium' : graph.binomial(12, 24), |
66 | 'bimo-few' : graph.binomial(1,12), | 66 | 'bimo-few' : graph.binomial(1,12), |
67 | } | 67 | } |
68 | 68 | ||
@@ -81,13 +81,15 @@ NAMED_HEIGHT_FACTORS = { | |||
81 | NAMED_FAN = { | 81 | NAMED_FAN = { |
82 | 'none' : graph.uniform(1,1), | 82 | 'none' : graph.uniform(1,1), |
83 | 'uniform_3' : graph.uniform(1,3), | 83 | 'uniform_3' : graph.uniform(1,3), |
84 | 'uniform_6' : graph.uniform(1,6), | ||
84 | 'geometric_3' : graph.geometric(1,3), | 85 | 'geometric_3' : graph.geometric(1,3), |
86 | 'geometric_6' : graph.geometric(1,3), | ||
85 | } | 87 | } |
86 | 88 | ||
87 | NAMED_EDGE_HOP = { | 89 | NAMED_EDGE_HOP = { |
88 | 'none' : graph.uniform(1,1), | 90 | 'none' : graph.uniform(1,1), |
89 | 'uniform_3' : graph.uniform(1,3), | 91 | 'uniform_3' : graph.uniform(1,3), |
90 | 'uniform_deep' : graph.uniform(1,100), | 92 | 'uniform_deep' : graph.uniform(1,100), |
91 | 'geometric_3': graph.geometric(1,3), | 93 | 'geometric_3': graph.geometric(1,3), |
92 | } | 94 | } |
93 | 95 | ||
@@ -116,7 +118,7 @@ TESTS = [ | |||
116 | #MAX_SAMPLES = 2 | 118 | #MAX_SAMPLES = 2 |
117 | #MIN_SAMPLES = 200 | 119 | #MIN_SAMPLES = 200 |
118 | #MAX_SAMPLES = 500 | 120 | #MAX_SAMPLES = 500 |
119 | MIN_SAMPLES = 500 | 121 | MIN_SAMPLES = 1000 |
120 | MAX_SAMPLES = 10000 | 122 | MAX_SAMPLES = 10000 |
121 | MAX_CI = 0.05 | 123 | MAX_CI = 0.05 |
122 | CONFIDENCE = 0.95 | 124 | CONFIDENCE = 0.95 |
@@ -285,7 +287,9 @@ def process_dp(_dp): | |||
285 | 287 | ||
286 | __avg_sched = defaultdict(float) | 288 | __avg_sched = defaultdict(float) |
287 | 289 | ||
290 | __avg_ts_size = defaultdict(float) | ||
288 | __avg_nr_graphs = defaultdict(float) | 291 | __avg_nr_graphs = defaultdict(float) |
292 | __avg_graph_size = defaultdict(float) | ||
289 | __avg_k = defaultdict(float) | 293 | __avg_k = defaultdict(float) |
290 | __avg_latencies = defaultdict(float) | 294 | __avg_latencies = defaultdict(float) |
291 | __avg_tard_ratios = defaultdict(float) | 295 | __avg_tard_ratios = defaultdict(float) |
@@ -302,6 +306,7 @@ def process_dp(_dp): | |||
302 | 306 | ||
303 | num_graphs = len(graphs) | 307 | num_graphs = len(graphs) |
304 | avg_depth = sum([g.depth for g in graphs])/float(num_graphs) | 308 | avg_depth = sum([g.depth for g in graphs])/float(num_graphs) |
309 | avg_graph_size = sum([len(g.nodes) for g in graphs])/float(num_graphs) | ||
305 | hrt_ideal_response_times = map(graph.compute_hrt_ideal_response_time, graphs) | 310 | hrt_ideal_response_times = map(graph.compute_hrt_ideal_response_time, graphs) |
306 | 311 | ||
307 | this_task_set = {} | 312 | this_task_set = {} |
@@ -326,7 +331,7 @@ def process_dp(_dp): | |||
326 | for method, _, _ in TESTS: | 331 | for method, _, _ in TESTS: |
327 | this_task_set[method] = this_method | 332 | this_task_set[method] = this_method |
328 | 333 | ||
329 | num_method_sched = sum([1 for sched_data in this_task_set.itervalues() if sched_data['sched']]) | 334 | num_method_sched = sum([1 for sched_data in this_task_set.itervalues() if sched_data['sched'] == True]) |
330 | all_sched = True if num_method_sched == n_methods else False | 335 | all_sched = True if num_method_sched == n_methods else False |
331 | 336 | ||
332 | # process the results | 337 | # process the results |
@@ -341,6 +346,8 @@ def process_dp(_dp): | |||
341 | avg_tard_ratio = 0.0 | 346 | avg_tard_ratio = 0.0 |
342 | avg_hrt_tard_ratio = 0.0 | 347 | avg_hrt_tard_ratio = 0.0 |
343 | for latency, ideal_latency, hrt_latency in zip(sched_data['latencies'], sched_data['ideal_latencies'], hrt_ideal_response_times): | 348 | for latency, ideal_latency, hrt_latency in zip(sched_data['latencies'], sched_data['ideal_latencies'], hrt_ideal_response_times): |
349 | if ideal_latency == 0.0: | ||
350 | print 'ecrts14.py: bad latency. latency values:',sched_data | ||
344 | avg_tard_ratio += (latency / ideal_latency) | 351 | avg_tard_ratio += (latency / ideal_latency) |
345 | avg_hrt_tard_ratio += (latency / hrt_latency) | 352 | avg_hrt_tard_ratio += (latency / hrt_latency) |
346 | avg_tard_ratio /= float(num_graphs) | 353 | avg_tard_ratio /= float(num_graphs) |
@@ -351,7 +358,9 @@ def process_dp(_dp): | |||
351 | __avg_hrt_ratios[method] = update_mean(__avg_hrt_ratios[method], n_all_sched, avg_hrt_tard_ratio) | 358 | __avg_hrt_ratios[method] = update_mean(__avg_hrt_ratios[method], n_all_sched, avg_hrt_tard_ratio) |
352 | 359 | ||
353 | # we could share these values across all methods | 360 | # we could share these values across all methods |
361 | __avg_ts_size[method] = update_mean(__avg_ts_size[method], n_all_sched, len(ts)) | ||
354 | __avg_nr_graphs[method] = update_mean(__avg_nr_graphs[method], n_all_sched, num_graphs) | 362 | __avg_nr_graphs[method] = update_mean(__avg_nr_graphs[method], n_all_sched, num_graphs) |
363 | __avg_graph_size[method] = update_mean(__avg_graph_size[method], n_all_sched, avg_graph_size) | ||
355 | __avg_k[method] = update_mean(__avg_k[method], n_all_sched, avg_depth) | 364 | __avg_k[method] = update_mean(__avg_k[method], n_all_sched, avg_depth) |
356 | 365 | ||
357 | if all_sched: | 366 | if all_sched: |
@@ -363,10 +372,12 @@ def process_dp(_dp): | |||
363 | __avg_latencies[method] = -1.0 | 372 | __avg_latencies[method] = -1.0 |
364 | __avg_tard_ratios[method] = -1.0 | 373 | __avg_tard_ratios[method] = -1.0 |
365 | __avg_hrt_ratios[method] = -1.0 | 374 | __avg_hrt_ratios[method] = -1.0 |
375 | __avg_ts_size[method] = 0.0 | ||
366 | __avg_nr_graphs[method] = 0.0 | 376 | __avg_nr_graphs[method] = 0.0 |
377 | __avg_graph_size[method] = 0.0 | ||
367 | __avg_k[method] = 0.0 | 378 | __avg_k[method] = 0.0 |
368 | 379 | ||
369 | return __avg_sched, __avg_latencies, __avg_tard_ratios, __avg_hrt_ratios, __avg_nr_graphs, __avg_k | 380 | return __avg_sched, __avg_latencies, __avg_tard_ratios, __avg_hrt_ratios, __avg_ts_size, __avg_nr_graphs, __avg_graph_size, __avg_k |
370 | 381 | ||
371 | def process_design_points(args): | 382 | def process_design_points(args): |
372 | try: | 383 | try: |
@@ -375,7 +386,7 @@ def process_design_points(args): | |||
375 | while True: | 386 | while True: |
376 | dp = db.get_design_point(db_name) | 387 | dp = db.get_design_point(db_name) |
377 | if dp and not db.already_processed(dp, db_name = db_name): | 388 | if dp and not db.already_processed(dp, db_name = db_name): |
378 | (avg_sched, avg_lat, avg_tard_ratio, avg_hrt_tard_ratio, avg_nr_graphs, avg_k) = process_dp(dp) | 389 | (avg_sched, avg_lat, avg_tard_ratio, avg_hrt_tard_ratio, avg_ts_size, avg_nr_graphs, avg_size, avg_k) = process_dp(dp) |
379 | 390 | ||
380 | sched_data = {} | 391 | sched_data = {} |
381 | for m, _, _ in TESTS: | 392 | for m, _, _ in TESTS: |
@@ -384,7 +395,9 @@ def process_design_points(args): | |||
384 | results.avg_latency = avg_lat[m] | 395 | results.avg_latency = avg_lat[m] |
385 | results.avg_tard_ratio = avg_tard_ratio[m] | 396 | results.avg_tard_ratio = avg_tard_ratio[m] |
386 | results.avg_hrt_tard_ratio = avg_hrt_tard_ratio[m] | 397 | results.avg_hrt_tard_ratio = avg_hrt_tard_ratio[m] |
398 | results.avg_ts_size = avg_ts_size[m] | ||
387 | results.avg_nr_graphs = avg_nr_graphs[m] | 399 | results.avg_nr_graphs = avg_nr_graphs[m] |
400 | results.avg_graph_size = avg_size[m] | ||
388 | results.avg_k = avg_k[m] | 401 | results.avg_k = avg_k[m] |
389 | sched_data[m] = results | 402 | sched_data[m] = results |
390 | 403 | ||
@@ -400,6 +413,17 @@ def process_design_points(args): | |||
400 | def valid(dp): | 413 | def valid(dp): |
401 | return True | 414 | return True |
402 | 415 | ||
416 | # TODO: | ||
417 | #XXX 1. Track average graph size. | ||
418 | #XXX 2. Increase minimum number of task sets | ||
419 | #XXX 3. Remove 'mean' overhead type | ||
420 | #XXX 4. Explore more branchy graphs | ||
421 | #XXX 5. Pick one heur_aggress value (0.75) | ||
422 | #XXX 6. Add wss parameters. | ||
423 | #XXX 7. Remove polluters (for now). | ||
424 | # 8. Why are graphs so shallow? | ||
425 | #XXX 9. Job splitting | ||
426 | |||
403 | def main(): | 427 | def main(): |
404 | random.seed(12345) | 428 | random.seed(12345) |
405 | 429 | ||
@@ -432,26 +456,29 @@ def main(): | |||
432 | # exp.nr_clusters = [1, 4, 12, 24] | 456 | # exp.nr_clusters = [1, 4, 12, 24] |
433 | exp.nr_clusters = [1, 4, 12, 24] | 457 | exp.nr_clusters = [1, 4, 12, 24] |
434 | exp.host = ['ludwig'] | 458 | exp.host = ['ludwig'] |
459 | # exp.polluters = [False, True] | ||
435 | exp.polluters = [False] | 460 | exp.polluters = [False] |
436 | exp.ovh_type = ['max'] | 461 | exp.ovh_type = ['max'] |
437 | 462 | ||
438 | # task parameters | 463 | # task parameters |
439 | step_size = 0.1 | 464 | step_size = 0.1 |
440 | exp.sys_util = [float(v) for v in arange(step_size, cpus+step_size, step_size)] | 465 | exp.sys_util = [float(v) for v in arange(step_size, cpus+step_size, step_size)] |
441 | exp.task_util = ['uni-medium'] | 466 | exp.task_util = ['uni-light', 'uni-medium'] |
442 | exp.period = ['uni-long'] | 467 | exp.period = ['uni-long'] |
468 | # exp.job_splitting = [True, False] | ||
469 | exp.job_splitting = [True] | ||
443 | exp.wcycle = [0] | 470 | exp.wcycle = [0] |
444 | 471 | ||
445 | # graph parameters | 472 | # graph parameters |
446 | exp.num_graphs = ['bimo-few', 'bimo-medium'] | 473 | exp.num_graphs = ['uni-few', 'uni-medium'] |
447 | # exp.depth_factor = ['uni-medium'] | 474 | # exp.depth_factor = ['uni-medium'] |
448 | exp.depth_factor = ['uni-short', 'uni-tall'] | 475 | exp.depth_factor = ['uni-short', 'uni-medium', 'uni-tall'] |
449 | exp.node_placement = ['binomial'] | 476 | exp.node_placement = ['binomial'] |
450 | exp.fan_out = ['geometric_3'] | 477 | exp.fan_out = ['uniform_3', 'uniform_6'] |
451 | exp.edge_distance = ['geometric_3'] | 478 | exp.edge_distance = ['geometric_3'] |
452 | exp.wss = ['bimo-medium'] | 479 | exp.wss = ['uni-light', 'uni-medium', 'bimo-medium'] |
453 | exp.fan_in_cap = [3] | 480 | exp.fan_in_cap = [3] |
454 | exp.heur_aggressiveness = [0.6, 0.75, 0.9] | 481 | exp.heur_aggressiveness = [0.75] |
455 | 482 | ||
456 | design_points = [dp for dp in DesignPointGenerator(exp, is_valid = valid)] | 483 | design_points = [dp for dp in DesignPointGenerator(exp, is_valid = valid)] |
457 | 484 | ||
@@ -464,6 +491,9 @@ def main(): | |||
464 | if args.pretend or args.initonly: | 491 | if args.pretend or args.initonly: |
465 | exit(0) | 492 | exit(0) |
466 | 493 | ||
494 | if args.worker: | ||
495 | print "Running as worker process." | ||
496 | |||
467 | total_nr_processed = 0 | 497 | total_nr_processed = 0 |
468 | if args.processors > 1: | 498 | if args.processors > 1: |
469 | pool = Pool(processes = args.processors) | 499 | pool = Pool(processes = args.processors) |