aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGlenn Elliott <gelliott@cs.unc.edu>2014-01-21 17:51:21 -0500
committerGlenn Elliott <gelliott@cs.unc.edu>2014-01-21 17:51:21 -0500
commitb917354f1522f020212765647d2ada20a275c41d (patch)
tree82d102a086670634f47a1c83b03c1095cc8f6f08
parentf56f4982aa3db2c62e52cdff26d0f5caf5e8c7ab (diff)
parent424917db79a1e8855c5e867bcc602476899fa28e (diff)
Merge branch 'wip-ecrts14-pgm' of ssh://rtsrv.cs.unc.edu/home/litmus/experiment-scripts into wip-ecrts14-pgm
Conflicts: gen/edf_generators.py gen/generator.py
-rw-r--r--gen/edf_generators.py10
-rw-r--r--gen/generator.py100
2 files changed, 98 insertions, 12 deletions
diff --git a/gen/edf_generators.py b/gen/edf_generators.py
index 0e5cb03..e8507f6 100644
--- a/gen/edf_generators.py
+++ b/gen/edf_generators.py
@@ -85,6 +85,9 @@ class EdfPgmGenerator(gen.Generator):
85 gen.Generator._dist_option('depth_factor', ['pipeline'], 85 gen.Generator._dist_option('depth_factor', ['pipeline'],
86 ecrts14.NAMED_HEIGHT_FACTORS, 86 ecrts14.NAMED_HEIGHT_FACTORS,
87 'Depth of graphs.'), 87 'Depth of graphs.'),
88 gen.Generator._dist_option('clustering', ['L1', 'L2', 'L3', 'ALL'],
89 {},
90 'Clustering configurations'),
88 gen.Generator._dist_option('partitions', ['no_cache', 'parallel', 'cache_aware'], 91 gen.Generator._dist_option('partitions', ['no_cache', 'parallel', 'cache_aware'],
89 PARTITION_METHOD, 92 PARTITION_METHOD,
90 'Partition methods.'), 93 'Partition methods.'),
@@ -108,7 +111,7 @@ class EdfPgmGenerator(gen.Generator):
108 gen.Generator._dist_option('task_util', ['uni-medium'], 111 gen.Generator._dist_option('task_util', ['uni-medium'],
109 NAMED_UTILIZATIONS, 112 NAMED_UTILIZATIONS,
110 'Task utilization.'), 113 'Task utilization.'),
111 gen.Generator._dist_option('polluters', False, 114 gen.Generator._dist_option('polluters', [False, True],
112 {}, 115 {},
113 'Polluters.'), 116 'Polluters.'),
114 gen.Generator._dist_option('job_splitting', True, 117 gen.Generator._dist_option('job_splitting', True,
@@ -247,6 +250,8 @@ class CflSplitPgmGenerator(EdfPgmGenerator):
247 else: 250 else:
248 assert False 251 assert False
249 252
253 exp_params['fan_in_cap'] = int(exp_params['fan_in_cap'])
254
250 dp.nr_clusters = cpus / cluster_sz 255 dp.nr_clusters = cpus / cluster_sz
251 assert dp.nr_clusters * cluster_sz == cpus 256 assert dp.nr_clusters * cluster_sz == cpus
252 257
@@ -261,6 +266,7 @@ class CflSplitPgmGenerator(EdfPgmGenerator):
261 # compute split factor 266 # compute split factor
262 working_ts = ts 267 working_ts = ts
263 partitions = get_partitions(working_ts, dp.nr_clusters, cluster_sz) 268 partitions = get_partitions(working_ts, dp.nr_clusters, cluster_sz)
264 is_srt_sched = split.compute_splits_nolock(overheads, False, working_ts, partitions, bypass_split = not dp.job_splitting) 269 do_splits = dp.job_splitting == 'True'
270 is_srt_sched = split.compute_splits_nolock(overheads, False, working_ts, partitions, bypass_split = not do_splits)
265 271
266 return True, working_ts 272 return True, working_ts
diff --git a/gen/generator.py b/gen/generator.py
index 043124b..409cd43 100644
--- a/gen/generator.py
+++ b/gen/generator.py
@@ -3,6 +3,7 @@ import os
3import sys 3import sys
4import copy 4import copy
5import math 5import math
6import itertools
6import pprint 7import pprint
7import schedcat.generator.tasks as tasks 8import schedcat.generator.tasks as tasks
8import shutil as sh 9import shutil as sh
@@ -361,17 +362,44 @@ class Generator(object):
361 exp.sched = ['edf'] 362 exp.sched = ['edf']
362 exp.update(self.params) 363 exp.update(self.params)
363 364
365 # extract the parameters we want to test the same task set under
366 polluter_method = exp['polluters']
367 split_method = exp['job_splitting']
368 del exp['polluters']
369 del exp['job_splitting']
370
364 # Track changing values so only relevant parameters are included 371 # Track changing values so only relevant parameters are included
365 # in directory names 372 # in directory names
366 for dp in PgmDesignPointGenerator(exp): 373 for dp in PgmDesignPointGenerator(exp):
367 for k, v in dp.iteritems(): 374 for k, v in dp.iteritems():
368 builder.try_add(k, v) 375 builder.try_add(k, v)
369 col_map = builder.build() 376 col_map = builder.build()
370 377
378 # extract the parameters we want to test the same task set under
371 partition_method = exp['partitions'] 379 partition_method = exp['partitions']
380 cluster_method = exp['clustering']
372 del exp['partitions'] 381 del exp['partitions']
382 del exp['clustering']
383
384 shared_params = []
385 for part, clust, pol, splt in list(itertools.product(partition_method, cluster_method, polluter_method, split_method)):
386 if clust == 'ALL' and part != 'no_cache':
387 # skip over partition methods when there is no clustering/partitioning
388 continue
389 p = storage()
390 p.partitioning = part
391 p.clustering = clust
392 p.polluting = pol
393 p.splitting = splt
394 shared_params.append(p)
373 395
374 for _dp in PgmDesignPointGenerator(exp): 396 for _dp in PgmDesignPointGenerator(exp):
397
398 # TODO: Find out why fan_in_cap is set to a string. >:(
399 # Force it to be int.
400 for i,c in enumerate(_dp.fan_in_cap):
401 _dp.fan_in_cap = int(c)
402
375 for trial in xrange(trials): 403 for trial in xrange(trials):
376 dp = copy.deepcopy(_dp) 404 dp = copy.deepcopy(_dp)
377 dp.num_graphs = NAMED_NUM_GRAPHS[dp.num_graphs] 405 dp.num_graphs = NAMED_NUM_GRAPHS[dp.num_graphs]
@@ -398,14 +426,8 @@ class Generator(object):
398 ts, graphs, subts = self._create_tasks(dp) 426 ts, graphs, subts = self._create_tasks(dp)
399 dp.tasks = len(ts) 427 dp.tasks = len(ts)
400 428
401 # TODO: Reuse the same task set and partition with polluter overheads
402 # and with and without job splitting.
403 # That is, same task set for:
404 # {poluters, no polluters} x {job splitting, no job splitting} x
405 # {cluster levels} x {partition methods}
406
407 levels = ['L1', 'L2', 'L3', 'ALL']
408 try: 429 try:
430<<<<<<< HEAD
409 for lvl in levels: 431 for lvl in levels:
410 dp['level'] = lvl 432 dp['level'] = lvl
411 _dp['level'] = lvl 433 _dp['level'] = lvl
@@ -455,12 +477,70 @@ class Generator(object):
455 # just generate one experiment for global 477 # just generate one experiment for global
456 if dp['level'] == 'ALL': 478 if dp['level'] == 'ALL':
457 break 479 break
480=======
481 for shp in shared_params:
482 dp['level'] = shp.clustering
483 _dp['level'] = shp.clustering
484
485 # load in the shared parameters
486 dp.partitions = shp.partitioning
487 dp.cluster = shp.clustering
488 dp.polluters = shp.polluting
489 dp.job_splitting = shp.splitting
490
491 # Create directory name from relevant parameters
492 dir_parts = []
493 dir_parts.append("sched=%s" % self.scheduler)
494 dir_parts.append("cluster=%s" % shp.clustering)
495 dir_parts.append("polluterovh=%s" % shp.polluting)
496 dir_parts.append("splitting=%s" % shp.splitting)
497 others = col_map.encode(dp)
498 if others != "":
499 dir_parts.append(others)
500 if trials > 1:
501 dir_parts.append("trial=%d" % trial)
502 dir_leaf = "_".join(dir_parts)
503 dir_path = "%s/%s" % (out_dir, dir_leaf)
504
505 print("Generating %s" % dir_leaf)
506
507 if os.path.exists(dir_path):
508 if force:
509 sh.rmtree(dir_path)
510 else:
511 print("Skipping existing experiment: '%s'" % dir_path)
512 continue
513
514 os.mkdir(dir_path)
515 created_dirs.append(dir_path)
516
517 if trials > 1:
518 dp[PARAMS['trial']] = trial
519 _dp[PARAMS['trial']] = trial
520 self.out_dir = dir_path
521
522 _dp.system = topology.Topology(machines[dp.host])
523 _dp.partitions = dp.partitions
524 _dp.polluters = dp.polluters
525 _dp.job_splitting = dp.job_splitting
526
527 # Write a sched.py and param.py for each partition method
528 ret = self._create_exp(_dp, ts, graphs, subts)
529 if not ret:
530 print("Bin-packing fails for " + dir_leaf)
531 last_failed = dir_leaf
532 raise Exception("Failed to partition.")
533 del(self.out_dir)
534 if PARAMS['trial'] in dp:
535 del dp[PARAMS['trial']]
536 del _dp[PARAMS['trial']]
537>>>>>>> 424917db79a1e8855c5e867bcc602476899fa28e
458 success = True 538 success = True
459 except Exception, e: 539 except Exception, e:
460 for d in created_dirs: 540 for d in created_dirs:
461 sh.rmtree(d) 541 sh.rmtree(d)
462 if not success: 542 if not success:
463 print("Failed to generate experiments. Last failed: %s" % last_failed) 543 print("Failed to generate experiment (%s). Try count = %d" % (last_failed, tries))
464 544
465 545
466 def print_help(self): 546 def print_help(self):