diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2014-01-17 16:58:13 -0500 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2014-01-17 16:58:13 -0500 |
commit | cdfc95b4a4fe1bbccac56ad40c8b18e898c8f684 (patch) | |
tree | e3fa8a51309c003a911237f1852da65f9ab6aa4d | |
parent | d90a8d25b1026ea41e4cd3041ad2ba03732b99a3 (diff) | |
parent | a12e11ba151e058c79b79432a18bba1e8946f336 (diff) |
Merge branch 'wip-ecrts14-pgm' of ssh://rtsrv.cs.unc.edu/home/litmus/experiment-scripts into wip-ecrts14-pgm
-rw-r--r-- | config/config.py | 20 | ||||
-rw-r--r-- | gen/edf_generators.py | 89 | ||||
-rw-r--r-- | gen/generator.py | 167 | ||||
-rwxr-xr-x | gen_pgm_exps.py | 12 |
4 files changed, 179 insertions, 109 deletions
diff --git a/config/config.py b/config/config.py index f2d068c..5196228 100644 --- a/config/config.py +++ b/config/config.py | |||
@@ -53,16 +53,16 @@ DEFAULTS = {'duration' : 10, | |||
53 | # task name - 501 | 53 | # task name - 501 |
54 | # task param - 502 | 54 | # task param - 502 |
55 | # release - 503 | 55 | # release - 503 |
56 | # switch to - 504 | 56 | # switch to - 505 |
57 | # switch from - 505 | 57 | # switch from - 506 |
58 | # job completion - 506 | 58 | # job completion - 507 |
59 | # block - 507 | 59 | # block - 508 |
60 | # resume - 508 | 60 | # resume - 509 |
61 | # action - 509 | 61 | # action - 510 |
62 | # sys release - 510 | 62 | # sys release - 511 |
63 | # pgm param - 511 | 63 | # pgm param - 512 |
64 | # pgm release - 512 | 64 | # pgm release - 513 |
65 | SCHED_EVENTS = [501, 502, 503, 506, 510, 511, 512] | 65 | SCHED_EVENTS = [501, 502, 503, 507, 511, 512, 513] |
66 | 66 | ||
67 | '''Overhead events.''' | 67 | '''Overhead events.''' |
68 | OVH_BASE_EVENTS = ['SCHED', 'RELEASE', 'SCHED2', 'TICK', 'CXS', 'LOCK', 'UNLOCK'] | 68 | OVH_BASE_EVENTS = ['SCHED', 'RELEASE', 'SCHED2', 'TICK', 'CXS', 'LOCK', 'UNLOCK'] |
diff --git a/gen/edf_generators.py b/gen/edf_generators.py index 58cbb0d..7a30b4f 100644 --- a/gen/edf_generators.py +++ b/gen/edf_generators.py | |||
@@ -2,12 +2,15 @@ import generator as gen | |||
2 | import random | 2 | import random |
3 | import ecrts14.partition as partition | 3 | import ecrts14.partition as partition |
4 | import schedcat.sched.split_heuristic as split | 4 | import schedcat.sched.split_heuristic as split |
5 | import ecrts14.ecrts14 as ecrts14 | ||
5 | 6 | ||
6 | from ecrts14.ecrts14 import create_pgm_task_set | 7 | from ecrts14.ecrts14 import create_pgm_task_set |
7 | from ecrts14.ecrts14 import get_overheads | 8 | from ecrts14.ecrts14 import get_overheads |
8 | from config.config import FILES,PARAMS | 9 | from config.config import FILES,PARAMS |
9 | from schedcat.overheads.model import Overheads, CacheDelay, ConsumerOverheads, ProducerOverheads | 10 | from schedcat.overheads.model import Overheads, CacheDelay, ConsumerOverheads, ProducerOverheads |
10 | from ecrts14.tests import get_partitions | 11 | from ecrts14.tests import get_partitions |
12 | from schedcat.generator.tasksets import NAMED_UTILIZATIONS | ||
13 | from schedcat.mapping.binpack import DidNotFit | ||
11 | 14 | ||
12 | TP_TBASE = """#for $t in $task_set | 15 | TP_TBASE = """#for $t in $task_set |
13 | {} $t.cost $t.period | 16 | {} $t.cost $t.period |
@@ -72,30 +75,69 @@ class EdfPgmGenerator(gen.Generator): | |||
72 | 75 | ||
73 | def __make_options(self): | 76 | def __make_options(self): |
74 | '''Return generic EDF options.''' | 77 | '''Return generic EDF options.''' |
75 | return [gen.Generator._dist_option('utils', 'uni-medium', | 78 | return [gen.Generator._dist_option('period', ['uni-long'], |
76 | gen.NAMED_UTILIZATIONS, | 79 | ecrts14.NAMED_PERIODS_US, |
77 | 'Task utilization distributions.'), | 80 | 'Task period distributions.'), |
78 | gen.Generator._dist_option('periods', 'harmonic', | 81 | gen.Generator._dist_option('num_graphs', 'uni-medium', |
79 | gen.NAMED_PERIODS, | 82 | ecrts14.NAMED_NUM_GRAPHS, |
80 | 'Task period distributions.')] | 83 | 'Number of graphs.'), |
84 | gen.Generator._dist_option('depth_factor', ['uni-medium'], | ||
85 | ecrts14.NAMED_HEIGHT_FACTORS, | ||
86 | 'Depth of graphs.'), | ||
87 | gen.Generator._dist_option('partitions', ['no_cache', 'parallel', 'cache_aware'], | ||
88 | PARTITION_METHOD, | ||
89 | 'Partition methods.'), | ||
90 | gen.Generator._dist_option('node_placement', ['uniform'], | ||
91 | ecrts14.NAMED_SHAPES, | ||
92 | 'The node placement of a graph.'), | ||
93 | gen.Generator._dist_option('fan_out', ['uniform_3'], | ||
94 | ecrts14.NAMED_FAN, | ||
95 | 'The number of out edges of a node.'), | ||
96 | gen.Generator._dist_option('fan_in_cap', [3], | ||
97 | {}, | ||
98 | 'The maximum number of in-edges of a node.'), | ||
99 | gen.Generator._dist_option('edge_distance', ['uniform_3'], | ||
100 | ecrts14.NAMED_EDGE_HOP, | ||
101 | 'The number of hops for an edge.'), | ||
102 | gen.Generator._dist_option('wss', ['uni-medium'], | ||
103 | ecrts14.NAMED_EDGE_WSS, | ||
104 | 'Working set size.'), | ||
105 | gen.Generator._dist_option('task_util', ['uni-light'], | ||
106 | NAMED_UTILIZATIONS, | ||
107 | 'Task utilization.'), | ||
108 | gen.Generator._dist_option('polluters', False, | ||
109 | {}, | ||
110 | 'Polluters.'), | ||
111 | # gen.Generator._dist_option('release_master', False, | ||
112 | # {}, | ||
113 | # 'Release master.'), | ||
114 | gen.Generator._dist_option('job_splitting', True, | ||
115 | {}, | ||
116 | 'Job splitting.'), | ||
117 | gen.Generator._dist_option('ovh_type', 'max', | ||
118 | {}, | ||
119 | 'Overhead type.'), | ||
120 | gen.Generator._dist_option('heur_aggressiveness', 0.75, | ||
121 | {}, | ||
122 | 'heur_aggressiveness.'), | ||
123 | gen.Generator._dist_option('sys_util', [20.0, 16.0, 12.0, 9.0], | ||
124 | {}, | ||
125 | 'Task set utilization.')] | ||
81 | 126 | ||
82 | def _create_exp(self, dp, ts, graphs, subts): | 127 | def _create_exp(self, dp, ts, graphs, subts): |
83 | '''Create a single experiment with @exp_params in @out_dir.''' | 128 | '''Create a single experiment with @exp_params in @out_dir.''' |
84 | 129 | ||
85 | ts = self._customize(ts, graphs, subts, dp) | 130 | ret, ts = self._customize(ts, graphs, subts, dp) |
86 | 131 | ||
87 | self._write_pgm_schedule(dict(dp.items() + [('task_set', ts)] + [('graphs', graphs)] + [('sub_task_set', subts)])) | 132 | if ret: |
88 | self._write_params(dict(dp.items() + [('num_tasks', len(ts))])) | 133 | self._write_pgm_schedule(dict(dp.items() + [('task_set', ts)] + [('graphs', graphs)] + [('sub_task_set', subts)])) |
134 | self._write_params(dict(dp.items() + [('num_tasks', len(ts)), ('num_graphs', len(graphs))])) | ||
135 | |||
136 | return ret | ||
89 | 137 | ||
90 | def _create_tasks(self, dp): | 138 | def _create_tasks(self, dp): |
91 | '''Create a task set.''' | 139 | '''Create a task set.''' |
92 | ts, graphs, subts = create_pgm_task_set(dp) | 140 | ts, graphs, subts = create_pgm_task_set(dp) |
93 | # convert to ms | ||
94 | for t in ts: | ||
95 | t.cost = t.cost / 1000.0 | ||
96 | t.period = t.period / 1000.0 | ||
97 | t.deadline = t.deadline /1000.0 | ||
98 | |||
99 | return ts, graphs, subts | 141 | return ts, graphs, subts |
100 | 142 | ||
101 | def _customize(self, taskset, exp_params): | 143 | def _customize(self, taskset, exp_params): |
@@ -131,7 +173,7 @@ class PedfGenerator(PartitionedGenerator): | |||
131 | 173 | ||
132 | class CedfGenerator(PartitionedGenerator): | 174 | class CedfGenerator(PartitionedGenerator): |
133 | TP_CLUSTER = "plugins/C-EDF/cluster{$level}" | 175 | TP_CLUSTER = "plugins/C-EDF/cluster{$level}" |
134 | CLUSTER_OPTION = gen.GenOption('level', ['L2', 'L3', 'All'], 'L2', | 176 | CLUSTER_OPTION = gen.GenOption('level', ['L2', 'L3', 'ALL'], 'L2', |
135 | 'Cache clustering level.',) | 177 | 'Cache clustering level.',) |
136 | 178 | ||
137 | def __init__(self, params={}): | 179 | def __init__(self, params={}): |
@@ -181,13 +223,13 @@ class GedfGenerator(EdfGenerator): | |||
181 | 223 | ||
182 | class CflSplitPgmGenerator(EdfPgmGenerator): | 224 | class CflSplitPgmGenerator(EdfPgmGenerator): |
183 | TP_CLUSTER = "plugins/C-FL-split/cluster{$level}" | 225 | TP_CLUSTER = "plugins/C-FL-split/cluster{$level}" |
184 | CLUSTER_OPTION = gen.GenOption('level', ['L1', 'L2', 'L3', 'ALL'], 'L2', | 226 | # CLUSTER_OPTION = gen.GenOption('level', ['L1', 'L2', 'L3', 'ALL'], 'L2', |
185 | 'Cache clustering level.',) | 227 | # 'Cache clustering level.',) |
186 | 228 | ||
187 | def __init__(self, params={}): | 229 | def __init__(self, params={}): |
188 | super(CflSplitPgmGenerator, self).__init__("C-FL-split", | 230 | super(CflSplitPgmGenerator, self).__init__("C-FL-split", |
189 | [CflSplitPgmGenerator.TP_CLUSTER], | 231 | [CflSplitPgmGenerator.TP_CLUSTER], |
190 | [CflSplitPgmGenerator.CLUSTER_OPTION], | 232 | [], |
191 | params) | 233 | params) |
192 | 234 | ||
193 | def _customize(self, ts, graphs, subts, dp): | 235 | def _customize(self, ts, graphs, subts, dp): |
@@ -211,11 +253,14 @@ class CflSplitPgmGenerator(EdfPgmGenerator): | |||
211 | overheads = get_overheads(dp, dp.system) | 253 | overheads = get_overheads(dp, dp.system) |
212 | # do the partition here | 254 | # do the partition here |
213 | ts = partition.clear_partitioning(ts) | 255 | ts = partition.clear_partitioning(ts) |
214 | ts = PARTITION_METHOD[exp_params['partitions']](ts, graphs, subts, cluster_sz, dp.nr_clusters, dp.system, dp.heur_aggressiveness, overheads) | 256 | try: |
257 | ts = PARTITION_METHOD[exp_params['partitions']](ts, graphs, subts, cluster_sz, dp.nr_clusters, dp.system, dp.heur_aggressiveness, overheads) | ||
258 | except DidNotFit: | ||
259 | return False, ts | ||
215 | 260 | ||
216 | # compute split factor | 261 | # compute split factor |
217 | working_ts = ts | 262 | working_ts = ts |
218 | partitions = get_partitions(working_ts, dp.nr_clusters, cluster_sz) | 263 | partitions = get_partitions(working_ts, dp.nr_clusters, cluster_sz) |
219 | is_srt_sched = split.compute_splits_nolock(overheads, False, working_ts, partitions, bypass_split = not dp.job_splitting) | 264 | is_srt_sched = split.compute_splits_nolock(overheads, False, working_ts, partitions, bypass_split = not dp.job_splitting) |
220 | 265 | ||
221 | return working_ts | 266 | return True, working_ts |
diff --git a/gen/generator.py b/gen/generator.py index 7456021..7b91a93 100644 --- a/gen/generator.py +++ b/gen/generator.py | |||
@@ -1,5 +1,6 @@ | |||
1 | import gen.rv as rv | 1 | import gen.rv as rv |
2 | import os | 2 | import os |
3 | import copy | ||
3 | import pprint | 4 | import pprint |
4 | import schedcat.generator.tasks as tasks | 5 | import schedcat.generator.tasks as tasks |
5 | import shutil as sh | 6 | import shutil as sh |
@@ -15,6 +16,7 @@ from parse.col_map import ColMapBuilder | |||
15 | from numpy import arange | 16 | from numpy import arange |
16 | from schedcat.util.storage import storage | 17 | from schedcat.util.storage import storage |
17 | from ecrts14.machines import machines | 18 | from ecrts14.machines import machines |
19 | from ecrts14.ecrts14 import NAMED_NUM_GRAPHS, NAMED_SHAPES, NAMED_HEIGHT_FACTORS, NAMED_FAN, NAMED_EDGE_HOP, NAMED_EDGE_WSS | ||
18 | 20 | ||
19 | NAMED_PERIODS = { | 21 | NAMED_PERIODS = { |
20 | 'harmonic' : rv.uniform_choice([25, 50, 100, 200]), | 22 | 'harmonic' : rv.uniform_choice([25, 50, 100, 200]), |
@@ -81,9 +83,7 @@ class Generator(object): | |||
81 | release_master = list(set([False, bool(rm_config)])) | 83 | release_master = list(set([False, bool(rm_config)])) |
82 | 84 | ||
83 | 85 | ||
84 | return [GenOption('partitions', str, ['no_cache', 'parallel', 'cache_aware', 'cache_aware_edges', 'cache_aware_bfs', 'cache_aware_dfs'], | 86 | return [GenOption('tasks', int, [0], |
85 | 'Partition methods.'), | ||
86 | GenOption('tasks', int, [0], | ||
87 | 'Number of tasks'), | 87 | 'Number of tasks'), |
88 | GenOption('cpus', int, [cpus], | 88 | GenOption('cpus', int, [cpus], |
89 | 'Number of processors on target system.'), | 89 | 'Number of processors on target system.'), |
@@ -138,6 +138,8 @@ class Generator(object): | |||
138 | # make pgmrt arguments using graphs and tasks. | 138 | # make pgmrt arguments using graphs and tasks. |
139 | sched_file = self.out_dir + "/" + FILES['sched_file'] | 139 | sched_file = self.out_dir + "/" + FILES['sched_file'] |
140 | 140 | ||
141 | # task set is in microseconds. we must convert to milliseconds | ||
142 | |||
141 | graph_desc_arg = [] | 143 | graph_desc_arg = [] |
142 | rates_arg = [] | 144 | rates_arg = [] |
143 | exec_arg = [] | 145 | exec_arg = [] |
@@ -154,13 +156,17 @@ class Generator(object): | |||
154 | split_arg_t = [] | 156 | split_arg_t = [] |
155 | 157 | ||
156 | for n in g.nodes: | 158 | for n in g.nodes: |
157 | assert n.graph.id == g.id | 159 | # task set is in microseconds. we must convert to milliseconds |
160 | |||
158 | cluster_arg_t.append('node_' + str(n.id) + ':' + str(n.task.partition)) | 161 | cluster_arg_t.append('node_' + str(n.id) + ':' + str(n.task.partition)) |
159 | exec_arg_t.append('node_' + str(n.id) + ':' + str(n.task.cost)) | 162 | cost_str = format(n.task.cost/1000.0, '.4f').rstrip('0').rstrip('.') |
160 | split_arg_t.append('node_' + str(n.id) + ':' + str(n.task.split)) | 163 | exec_arg_t.append('node_' + str(n.id) + ':' + cost_str) |
164 | if n.task.split != 1: | ||
165 | split_arg_t.append('node_' + str(n.id) + ':' + str(n.task.split)) | ||
161 | if n.isSrc == True: | 166 | if n.isSrc == True: |
162 | # assume that x=1 | 167 | # assume that x=1 |
163 | rates_arg_t.append('node_' + str(n.id) + ':1:' + str(n.task.period)) | 168 | period_str = format(n.task.period/1000.0, '.4f').rstrip('0').rstrip('.') |
169 | rates_arg_t.append('node_' + str(n.id) + ':1:' + period_str) | ||
164 | # get cluster size | 170 | # get cluster size |
165 | clustersz_arg_t = str(pgm_params['cpus'] / pgm_params['nr_clusters']) | 171 | clustersz_arg_t = str(pgm_params['cpus'] / pgm_params['nr_clusters']) |
166 | if len(g.nodes) == 1: | 172 | if len(g.nodes) == 1: |
@@ -169,7 +175,8 @@ class Generator(object): | |||
169 | graph_desc_arg_t.append('node_' + str(n.id) + ':node_' + str(succ.id)) | 175 | graph_desc_arg_t.append('node_' + str(n.id) + ':node_' + str(succ.id)) |
170 | # wss parameter | 176 | # wss parameter |
171 | for e in n.outEdges: | 177 | for e in n.outEdges: |
172 | wss_arg_t.append('node_' + str(n.id) + ':node_' + str(e.s.id) + ':' + str(e.wss)) | 178 | wss_kb_str = format(e.wss, '.4f').rstrip('0').rstrip('.') |
179 | wss_arg_t.append('node_' + str(n.id) + ':node_' + str(e.s.id) + ':' + wss_kb_str) | ||
173 | 180 | ||
174 | # combine arguments to a comma-separated string | 181 | # combine arguments to a comma-separated string |
175 | cluster_arg_t = ','.join(cluster_arg_t) | 182 | cluster_arg_t = ','.join(cluster_arg_t) |
@@ -192,9 +199,10 @@ class Generator(object): | |||
192 | pgm_args = [] | 199 | pgm_args = [] |
193 | for i in range(len(pgm_params['graphs'])): | 200 | for i in range(len(pgm_params['graphs'])): |
194 | pgm_args_t = ''; | 201 | pgm_args_t = ''; |
195 | pgm_args_t += '--wait --cluster ' + cluster_arg[i] + ' --clusterSize ' + clustersz_arg[i] + ' --name graph_' + str(pgm_params['graphs'][i].id) | 202 | pgm_args_t += '--wait --cluster ' + cluster_arg[i] + ' --clusterSize ' + clustersz_arg[i] + ' --name graph_' + str(i) |
196 | pgm_args_t += ' --graph ' + graph_desc_arg[i] + ' --rates ' + rates_arg[i] + ' --execution ' + exec_arg[i] | 203 | pgm_args_t += ' --graph ' + graph_desc_arg[i] + ' --rates ' + rates_arg[i] + ' --execution ' + exec_arg[i] |
197 | pgm_args_t += ' --split ' + split_arg[i] | 204 | if len(split_arg[i]) != 0: |
205 | pgm_args_t += ' --split ' + split_arg[i] | ||
198 | if len(wss_arg[i]) != 0: | 206 | if len(wss_arg[i]) != 0: |
199 | pgm_args_t += ' --wss ' + wss_arg[i] | 207 | pgm_args_t += ' --wss ' + wss_arg[i] |
200 | 208 | ||
@@ -334,79 +342,106 @@ class Generator(object): | |||
334 | out_dir = opts.out_dir | 342 | out_dir = opts.out_dir |
335 | force = opts.force | 343 | force = opts.force |
336 | trials = opts.trials | 344 | trials = opts.trials |
337 | 345 | ||
338 | # hard coded here | 346 | # Hardcoded design points |
339 | exp = storage() | 347 | exp = storage() |
340 | exp.host = ['ludwig'] | 348 | exp.host = ['ludwig'] |
341 | cpus = 24.0 | 349 | cpus = 24.0 |
342 | exp.processors = [cpus] | 350 | exp.processors = [cpus] |
343 | exp.task_util = ['uni-medium'] | ||
344 | exp.period = ['uni-long'] | ||
345 | exp.sched = ['edf'] | ||
346 | exp.sys_util = [ 20.0 ] #arange(1, cpus+0.1, 0.1) | ||
347 | |||
348 | exp.wcycle = [ 0 ] | 351 | exp.wcycle = [ 0 ] |
349 | exp.walk = ['seq'] | 352 | exp.walk = ['seq'] |
350 | exp.huge_pages = [False] | 353 | exp.huge_pages = [False] |
351 | exp.uncached = [False] | 354 | exp.uncached = [False] |
352 | exp.polluters = [False] | 355 | exp.sched = ['edf'] |
353 | exp.ovh_type = ['max'] | ||
354 | exp.release_master = [False] | ||
355 | exp.heur_aggressiveness = [0.75] | ||
356 | exp.job_splitting = [True] | ||
357 | exp.update(self.params) | 356 | exp.update(self.params) |
358 | 357 | ||
359 | # Track changing values so only relevant parameters are included | 358 | # Track changing values so only relevant parameters are included |
360 | # in directory names | 359 | # in directory names |
361 | for dp in PgmDesignPointGenerator(exp): | 360 | for dp in PgmDesignPointGenerator(exp): |
362 | for k, v in dp.iteritems(): | 361 | for k, v in dp.iteritems(): |
363 | builder.try_add(k, v) | 362 | builder.try_add(k, v) |
364 | col_map = builder.build() | 363 | col_map = builder.build() |
364 | |||
365 | partition_method = exp['partitions'] | ||
366 | del exp['partitions'] | ||
365 | 367 | ||
366 | for trial in xrange(trials): | 368 | for _dp in PgmDesignPointGenerator(exp): |
367 | dp.num_graphs = graph.uniform(opts.num_graphs, opts.num_graphs) | 369 | for trial in xrange(trials): |
368 | dp.depth_factor = [1.0/3.0, 2.0/3.0] | 370 | dp = copy.deepcopy(_dp) |
369 | dp.node_placement = graph.uniform(opts.node_placement, opts.node_placement) | 371 | dp.num_graphs = NAMED_NUM_GRAPHS[dp.num_graphs] |
370 | dp.fan_out = graph.geometric(1, opts.fan_out) | 372 | dp.depth_factor = NAMED_HEIGHT_FACTORS[dp.depth_factor] |
371 | dp.fan_in_cap = opts.fan_in_cap | 373 | dp.node_placement = NAMED_SHAPES[dp.node_placement] |
372 | dp.edge_distance = graph.geometric(1, opts.edge_distance) | 374 | dp.fan_out = NAMED_FAN[dp.fan_out] |
373 | dp.nr_source = graph.uniform(opts.nr_source, opts.nr_source) | 375 | dp.edge_distance = NAMED_EDGE_HOP[dp.edge_distance] |
374 | dp.nr_sink = graph.uniform(opts.nr_sink, opts.nr_sink) | 376 | dp.nr_source = graph.uniform(opts.nr_source, opts.nr_source) |
375 | dp.wss = tasks.multimodal([(tasks.uniform_int(1,2), 6), (tasks.uniform_int(2, 8), 3)]) | 377 | dp.nr_sink = graph.uniform(opts.nr_sink, opts.nr_sink) |
376 | 378 | dp.wss = NAMED_EDGE_WSS[dp.wss] | |
377 | # Generate a task set | 379 | |
378 | ts, graphs, subts = self._create_tasks(dp) | 380 | last_failed = '' |
379 | dp.tasks = len(ts) | 381 | tries = 0 |
380 | 382 | success = False | |
381 | for dp in PgmDesignPointGenerator(exp): | 383 | while tries < 100 and not success: |
382 | # Create directory name from relevant parameters | 384 | created_dirs = [] |
383 | dir_leaf = "sched=%s_%s" % (self.scheduler, col_map.encode(dp)) | 385 | tries += 1 |
384 | dir_leaf = dir_leaf.strip('_') # If there are none | 386 | |
385 | dir_leaf += ("_trial=%s" % trial) if trials > 1 else "" | 387 | # Generate a task set |
386 | 388 | ts, graphs, subts = self._create_tasks(dp) | |
387 | dir_path = "%s/%s" % (out_dir, dir_leaf.strip('_')) | 389 | dp.tasks = len(ts) |
388 | 390 | ||
389 | if os.path.exists(dir_path): | 391 | levels = ['L1', 'L2', 'L3', 'ALL'] |
390 | if force: | 392 | try: |
391 | sh.rmtree(dir_path) | 393 | for lvl in levels: |
392 | else: | 394 | dp['level'] = lvl |
393 | print("Skipping existing experiment: '%s'" % dir_path) | 395 | _dp['level'] = lvl |
394 | continue | 396 | for pm in partition_method: |
395 | 397 | dp.partitions = pm | |
396 | os.mkdir(dir_path) | 398 | # Create directory name from relevant parameters |
397 | 399 | dir_leaf = "sched=%s_cluster=%s_%s" % (self.scheduler, lvl, col_map.encode(dp)) | |
398 | if trials > 1: | 400 | dir_leaf = dir_leaf.strip('_') # If there are none |
399 | dp[PARAMS['trial']] = trial | 401 | dir_leaf += ("_trial=%s" % trial) if trials > 1 else "" |
400 | self.out_dir = dir_path | 402 | |
401 | 403 | dir_path = "%s/%s" % (out_dir, dir_leaf.strip('_')) | |
402 | dp.system = topology.Topology(machines[dp.host]) | 404 | |
403 | 405 | print("Generating for %s" % dir_path) | |
404 | # Write a sched.py and param.py for each partition method | 406 | |
405 | self._create_exp(dp, ts, graphs, subts) | 407 | if os.path.exists(dir_path): |
408 | if force: | ||
409 | sh.rmtree(dir_path) | ||
410 | else: | ||
411 | print("Skipping existing experiment: '%s'" % dir_path) | ||
412 | continue | ||
413 | |||
414 | os.mkdir(dir_path) | ||
415 | created_dirs.append(dir_path) | ||
416 | |||
417 | if trials > 1: | ||
418 | dp[PARAMS['trial']] = trial | ||
419 | _dp[PARAMS['trial']] = trial | ||
420 | self.out_dir = dir_path | ||
421 | |||
422 | _dp.system = topology.Topology(machines[dp.host]) | ||
423 | _dp.partitions = pm | ||
424 | |||
425 | # Write a sched.py and param.py for each partition method | ||
426 | ret = self._create_exp(_dp, ts, graphs, subts) | ||
427 | if not ret: | ||
428 | print("Bin-packing fails for " + dir_leaf) | ||
429 | last_failed = dir_leaf | ||
430 | raise Exception("Failed to partition.") | ||
431 | del(self.out_dir) | ||
432 | if PARAMS['trial'] in dp: | ||
433 | del dp[PARAMS['trial']] | ||
434 | del _dp[PARAMS['trial']] | ||
435 | # just generate one experiment for global | ||
436 | if dp['level'] == 'ALL': | ||
437 | break | ||
438 | success = True | ||
439 | except Exception, e: | ||
440 | for d in created_dirs: | ||
441 | sh.rmtree(d) | ||
442 | if not success: | ||
443 | print("Failed to generate experiments. Last failed: %s" % last_failed) | ||
406 | 444 | ||
407 | del(self.out_dir) | ||
408 | if PARAMS['trial'] in dp: | ||
409 | del dp[PARAMS['trial']] | ||
410 | 445 | ||
411 | def print_help(self): | 446 | def print_help(self): |
412 | display_options = [o for o in self.options if not o.hidden] | 447 | display_options = [o for o in self.options if not o.hidden] |
diff --git a/gen_pgm_exps.py b/gen_pgm_exps.py index 62723ec..c6f6198 100755 --- a/gen_pgm_exps.py +++ b/gen_pgm_exps.py | |||
@@ -30,19 +30,9 @@ def parse_args(): | |||
30 | parser.add_option('-d', '--describe-generators', metavar='generator[,..]', | 30 | parser.add_option('-d', '--describe-generators', metavar='generator[,..]', |
31 | dest='described', default=None, | 31 | dest='described', default=None, |
32 | help='describe parameters for generator(s)') | 32 | help='describe parameters for generator(s)') |
33 | parser.add_option('-m', '--num-graphs', default=24, type='int', dest='num_graphs', | ||
34 | help='number of graphs for a taskset') | ||
35 | parser.add_option('-p', '--node-placement', default=1, type='int', dest='node_placement', | ||
36 | help='node placement of the graph') | ||
37 | parser.add_option('-t', '--fan-out', default=3, type='int', dest='fan_out', | ||
38 | help='fan out of a node') | ||
39 | parser.add_option('-i', '--fan-in-cap', default=3, type='int', dest='fan_in_cap', | ||
40 | help='fan in cap') | ||
41 | parser.add_option('-g', '--edge-distance', default=3, type='int', dest='edge_distance', | ||
42 | help='edge distance') | ||
43 | parser.add_option('-u', '--nr-source', default=1, type='int', dest='nr_source', | 33 | parser.add_option('-u', '--nr-source', default=1, type='int', dest='nr_source', |
44 | help='number of source nodes') | 34 | help='number of source nodes') |
45 | parser.add_option('-v', '--nr_sink', default=1, type='int', dest='nr_sink', | 35 | parser.add_option('-v', '--nr-sink', default=1, type='int', dest='nr_sink', |
46 | help='number of sink nodes') | 36 | help='number of sink nodes') |
47 | 37 | ||
48 | 38 | ||