aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGlenn Elliott <gelliott@cs.unc.edu>2014-01-17 00:23:13 -0500
committerGlenn Elliott <gelliott@cs.unc.edu>2014-01-17 00:23:13 -0500
commit1e7eade9baeb53fc629cc2db98367897d0fd7b13 (patch)
tree7107339b6fa9c00729d3c82e2fbea1d2878e7aec
parent2f20d388126dc907d2e0759a94d300ebca6cb93c (diff)
Use same taskset for L1-ALL partitionings.
We want to compare the partitioning heuristics amongst themselves, as well as different clustering options. Use the same task set to span these combinations in order to make direct comparisons easier to make.
-rw-r--r--gen/edf_generators.py18
-rw-r--r--gen/generator.py87
2 files changed, 58 insertions, 47 deletions
diff --git a/gen/edf_generators.py b/gen/edf_generators.py
index 27bc4e6..7a30b4f 100644
--- a/gen/edf_generators.py
+++ b/gen/edf_generators.py
@@ -108,9 +108,9 @@ class EdfPgmGenerator(gen.Generator):
108 gen.Generator._dist_option('polluters', False, 108 gen.Generator._dist_option('polluters', False,
109 {}, 109 {},
110 'Polluters.'), 110 'Polluters.'),
111 gen.Generator._dist_option('release_master', False, 111# gen.Generator._dist_option('release_master', False,
112 {}, 112# {},
113 'Release master.'), 113# 'Release master.'),
114 gen.Generator._dist_option('job_splitting', True, 114 gen.Generator._dist_option('job_splitting', True,
115 {}, 115 {},
116 'Job splitting.'), 116 'Job splitting.'),
@@ -120,9 +120,9 @@ class EdfPgmGenerator(gen.Generator):
120 gen.Generator._dist_option('heur_aggressiveness', 0.75, 120 gen.Generator._dist_option('heur_aggressiveness', 0.75,
121 {}, 121 {},
122 'heur_aggressiveness.'), 122 'heur_aggressiveness.'),
123 gen.Generator._dist_option('sys_util', [20.0, 9.0, 8.0, 7.0], 123 gen.Generator._dist_option('sys_util', [20.0, 16.0, 12.0, 9.0],
124 {}, 124 {},
125 'System utilization.')] 125 'Task set utilization.')]
126 126
127 def _create_exp(self, dp, ts, graphs, subts): 127 def _create_exp(self, dp, ts, graphs, subts):
128 '''Create a single experiment with @exp_params in @out_dir.''' 128 '''Create a single experiment with @exp_params in @out_dir.'''
@@ -131,7 +131,7 @@ class EdfPgmGenerator(gen.Generator):
131 131
132 if ret: 132 if ret:
133 self._write_pgm_schedule(dict(dp.items() + [('task_set', ts)] + [('graphs', graphs)] + [('sub_task_set', subts)])) 133 self._write_pgm_schedule(dict(dp.items() + [('task_set', ts)] + [('graphs', graphs)] + [('sub_task_set', subts)]))
134 self._write_params(dict(dp.items() + [('num_tasks', len(ts))])) 134 self._write_params(dict(dp.items() + [('num_tasks', len(ts)), ('num_graphs', len(graphs))]))
135 135
136 return ret 136 return ret
137 137
@@ -223,13 +223,13 @@ class GedfGenerator(EdfGenerator):
223 223
224class CflSplitPgmGenerator(EdfPgmGenerator): 224class CflSplitPgmGenerator(EdfPgmGenerator):
225 TP_CLUSTER = "plugins/C-FL-split/cluster{$level}" 225 TP_CLUSTER = "plugins/C-FL-split/cluster{$level}"
226 CLUSTER_OPTION = gen.GenOption('level', ['L1', 'L2', 'L3', 'ALL'], 'L2', 226# CLUSTER_OPTION = gen.GenOption('level', ['L1', 'L2', 'L3', 'ALL'], 'L2',
227 'Cache clustering level.',) 227# 'Cache clustering level.',)
228 228
229 def __init__(self, params={}): 229 def __init__(self, params={}):
230 super(CflSplitPgmGenerator, self).__init__("C-FL-split", 230 super(CflSplitPgmGenerator, self).__init__("C-FL-split",
231 [CflSplitPgmGenerator.TP_CLUSTER], 231 [CflSplitPgmGenerator.TP_CLUSTER],
232 [CflSplitPgmGenerator.CLUSTER_OPTION], 232 [],
233 params) 233 params)
234 234
235 def _customize(self, ts, graphs, subts, dp): 235 def _customize(self, ts, graphs, subts, dp):
diff --git a/gen/generator.py b/gen/generator.py
index 2c84aa3..7b91a93 100644
--- a/gen/generator.py
+++ b/gen/generator.py
@@ -1,5 +1,6 @@
1import gen.rv as rv 1import gen.rv as rv
2import os 2import os
3import copy
3import pprint 4import pprint
4import schedcat.generator.tasks as tasks 5import schedcat.generator.tasks as tasks
5import shutil as sh 6import shutil as sh
@@ -364,8 +365,9 @@ class Generator(object):
364 partition_method = exp['partitions'] 365 partition_method = exp['partitions']
365 del exp['partitions'] 366 del exp['partitions']
366 367
367 for dp in PgmDesignPointGenerator(exp): 368 for _dp in PgmDesignPointGenerator(exp):
368 for trial in xrange(trials): 369 for trial in xrange(trials):
370 dp = copy.deepcopy(_dp)
369 dp.num_graphs = NAMED_NUM_GRAPHS[dp.num_graphs] 371 dp.num_graphs = NAMED_NUM_GRAPHS[dp.num_graphs]
370 dp.depth_factor = NAMED_HEIGHT_FACTORS[dp.depth_factor] 372 dp.depth_factor = NAMED_HEIGHT_FACTORS[dp.depth_factor]
371 dp.node_placement = NAMED_SHAPES[dp.node_placement] 373 dp.node_placement = NAMED_SHAPES[dp.node_placement]
@@ -386,44 +388,53 @@ class Generator(object):
386 ts, graphs, subts = self._create_tasks(dp) 388 ts, graphs, subts = self._create_tasks(dp)
387 dp.tasks = len(ts) 389 dp.tasks = len(ts)
388 390
391 levels = ['L1', 'L2', 'L3', 'ALL']
389 try: 392 try:
390 for pm in partition_method: 393 for lvl in levels:
391 dp.partitions = pm 394 dp['level'] = lvl
392 # Create directory name from relevant parameters 395 _dp['level'] = lvl
393 dir_leaf = "sched=%s_%s" % (self.scheduler, col_map.encode(dp)) 396 for pm in partition_method:
394 dir_leaf = dir_leaf.strip('_') # If there are none 397 dp.partitions = pm
395 dir_leaf += ("_trial=%s" % trial) if trials > 1 else "" 398 # Create directory name from relevant parameters
396 399 dir_leaf = "sched=%s_cluster=%s_%s" % (self.scheduler, lvl, col_map.encode(dp))
397 dir_path = "%s/%s" % (out_dir, dir_leaf.strip('_')) 400 dir_leaf = dir_leaf.strip('_') # If there are none
398 401 dir_leaf += ("_trial=%s" % trial) if trials > 1 else ""
399 if os.path.exists(dir_path): 402
400 if force: 403 dir_path = "%s/%s" % (out_dir, dir_leaf.strip('_'))
401 sh.rmtree(dir_path) 404
402 else: 405 print("Generating for %s" % dir_path)
403 print("Skipping existing experiment: '%s'" % dir_path) 406
404 continue 407 if os.path.exists(dir_path):
405 408 if force:
406 os.mkdir(dir_path) 409 sh.rmtree(dir_path)
407 created_dirs.append(dir_path) 410 else:
408 411 print("Skipping existing experiment: '%s'" % dir_path)
409 if trials > 1: 412 continue
410 dp[PARAMS['trial']] = trial 413
411 self.out_dir = dir_path 414 os.mkdir(dir_path)
412 415 created_dirs.append(dir_path)
413 dp.system = topology.Topology(machines[dp.host]) 416
414 417 if trials > 1:
415 # Write a sched.py and param.py for each partition method 418 dp[PARAMS['trial']] = trial
416 ret = self._create_exp(dp, ts, graphs, subts) 419 _dp[PARAMS['trial']] = trial
417 if not ret: 420 self.out_dir = dir_path
418 print("Bin-packing fails for " + dir_leaf) 421
419 last_failed = dir_leaf 422 _dp.system = topology.Topology(machines[dp.host])
420 raise Exception("Failed to partition.") 423 _dp.partitions = pm
421 del(self.out_dir) 424
422 if PARAMS['trial'] in dp: 425 # Write a sched.py and param.py for each partition method
423 del dp[PARAMS['trial']] 426 ret = self._create_exp(_dp, ts, graphs, subts)
424 # just generate one experiment for global 427 if not ret:
425 if dp['level'] == 'ALL': 428 print("Bin-packing fails for " + dir_leaf)
426 break 429 last_failed = dir_leaf
430 raise Exception("Failed to partition.")
431 del(self.out_dir)
432 if PARAMS['trial'] in dp:
433 del dp[PARAMS['trial']]
434 del _dp[PARAMS['trial']]
435 # just generate one experiment for global
436 if dp['level'] == 'ALL':
437 break
427 success = True 438 success = True
428 except Exception, e: 439 except Exception, e:
429 for d in created_dirs: 440 for d in created_dirs: