aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGlenn Elliott <gelliott@cs.unc.edu>2014-01-14 15:02:33 -0500
committerGlenn Elliott <gelliott@cs.unc.edu>2014-01-14 15:02:33 -0500
commitf3106e83c7404e9de96117770f210cf6a207cc2d (patch)
treeee2343ebfd8cdf41c3d85d4c654faa0d65f06cd6
parent1a000342dacd3dce7859c07dcf664852b21d7793 (diff)
Manual merge of Namhoon's PGM exp generation
This patch merges in the work Namhoon did to support PGM-based task set experiments.
-rw-r--r--config/config.py3
-rw-r--r--gen/__init__.py9
-rw-r--r--gen/edf_generators.py102
-rw-r--r--gen/generator.py176
-rwxr-xr-xgen_pgm_exps.py122
-rw-r--r--run/experiment.py21
-rwxr-xr-xrun_exps.py11
7 files changed, 414 insertions, 30 deletions
diff --git a/config/config.py b/config/config.py
index 976974a..f2d068c 100644
--- a/config/config.py
+++ b/config/config.py
@@ -38,7 +38,8 @@ PARAMS = {'sched' : 'scheduler', # Scheduler used by run_exps
38 38
39'''Default values for program options.''' 39'''Default values for program options.'''
40DEFAULTS = {'duration' : 10, 40DEFAULTS = {'duration' : 10,
41 'prog' : 'rtspin', 41# 'prog' : 'rtspin',
42 'prog' : 'pgmrt',
42 'out-gen' : 'exps', 43 'out-gen' : 'exps',
43 'out-run' : 'run-data', 44 'out-run' : 'run-data',
44 'out-parse' : 'parse-data', 45 'out-parse' : 'parse-data',
diff --git a/gen/__init__.py b/gen/__init__.py
index 654ac19..58f5cfa 100644
--- a/gen/__init__.py
+++ b/gen/__init__.py
@@ -1,7 +1,8 @@
1import generator as gen 1import generator as gen
2import edf_generators as edf 2import edf_generators as edf
3 3
4gen.register_generator("G-EDF", edf.GedfGenerator) 4#gen.register_generator("G-EDF", edf.GedfGenerator)
5gen.register_generator("P-EDF", edf.PedfGenerator) 5#gen.register_generator("P-EDF", edf.PedfGenerator)
6gen.register_generator("C-EDF", edf.CedfGenerator) 6#gen.register_generator("C-EDF", edf.CedfGenerator)
7gen.register_generator("C-FL-split", edf.CflSplitGenerator) 7#gen.register_generator("C-FL-split", edf.CflSplitGenerator)
8gen.register_generator("C-FL-split", edf.CflSplitPgmGenerator)
diff --git a/gen/edf_generators.py b/gen/edf_generators.py
index ce99d05..58cbb0d 100644
--- a/gen/edf_generators.py
+++ b/gen/edf_generators.py
@@ -1,5 +1,13 @@
1import generator as gen 1import generator as gen
2import random 2import random
3import ecrts14.partition as partition
4import schedcat.sched.split_heuristic as split
5
6from ecrts14.ecrts14 import create_pgm_task_set
7from ecrts14.ecrts14 import get_overheads
8from config.config import FILES,PARAMS
9from schedcat.overheads.model import Overheads, CacheDelay, ConsumerOverheads, ProducerOverheads
10from ecrts14.tests import get_partitions
3 11
4TP_TBASE = """#for $t in $task_set 12TP_TBASE = """#for $t in $task_set
5{} $t.cost $t.period 13{} $t.cost $t.period
@@ -8,6 +16,15 @@ TP_GLOB_TASK = TP_TBASE.format("")
8TP_PART_TASK = TP_TBASE.format("-p $t.cpu") 16TP_PART_TASK = TP_TBASE.format("-p $t.cpu")
9TP_CLST_TASK = TP_TBASE.format("-p $t.cluster -z $t.cluster_sz") 17TP_CLST_TASK = TP_TBASE.format("-p $t.cluster -z $t.cluster_sz")
10 18
19PARTITION_METHOD = {
20 'no_cache' : partition.partition_no_cache,
21 'parallel' : partition.partition_parallel,
22 'cache_aware' : partition.partition_cache_aware,
23 'cache_aware_edges' : partition.partition_cache_aware_edges,
24 'cache_aware_bfs' : partition.partition_cache_aware_bfs,
25 'cache_aware_dfs' : partition.partition_cache_aware_dfs,
26}
27
11class EdfGenerator(gen.Generator): 28class EdfGenerator(gen.Generator):
12 '''Creates sporadic task sets with the most common Litmus options.''' 29 '''Creates sporadic task sets with the most common Litmus options.'''
13 def __init__(self, scheduler, templates, options, params): 30 def __init__(self, scheduler, templates, options, params):
@@ -39,12 +56,51 @@ class EdfGenerator(gen.Generator):
39 self._customize(ts, exp_params) 56 self._customize(ts, exp_params)
40 57
41 self._write_schedule(dict(exp_params.items() + [('task_set', ts)])) 58 self._write_schedule(dict(exp_params.items() + [('task_set', ts)]))
59
42 self._write_params(exp_params) 60 self._write_params(exp_params)
43 61
44 def _customize(self, taskset, exp_params): 62 def _customize(self, taskset, exp_params):
45 '''Configure a generated taskset with extra parameters.''' 63 '''Configure a generated taskset with extra parameters.'''
46 pass 64 pass
47 65
66class EdfPgmGenerator(gen.Generator):
67 '''Creates sporadic task sets with the most common Litmus options.'''
68 def __init__(self, scheduler, templates, options, params):
69 super(EdfPgmGenerator, self).__init__(scheduler, templates,
70 self.__make_options() + options,
71 params)
72
73 def __make_options(self):
74 '''Return generic EDF options.'''
75 return [gen.Generator._dist_option('utils', 'uni-medium',
76 gen.NAMED_UTILIZATIONS,
77 'Task utilization distributions.'),
78 gen.Generator._dist_option('periods', 'harmonic',
79 gen.NAMED_PERIODS,
80 'Task period distributions.')]
81
82 def _create_exp(self, dp, ts, graphs, subts):
83 '''Create a single experiment with @exp_params in @out_dir.'''
84
85 ts = self._customize(ts, graphs, subts, dp)
86
87 self._write_pgm_schedule(dict(dp.items() + [('task_set', ts)] + [('graphs', graphs)] + [('sub_task_set', subts)]))
88 self._write_params(dict(dp.items() + [('num_tasks', len(ts))]))
89
90 def _create_tasks(self, dp):
91 '''Create a task set.'''
92 ts, graphs, subts = create_pgm_task_set(dp)
93 # convert to ms
94 for t in ts:
95 t.cost = t.cost / 1000.0
96 t.period = t.period / 1000.0
97 t.deadline = t.deadline /1000.0
98
99 return ts, graphs, subts
100
101 def _customize(self, taskset, exp_params):
102 '''Configure a generated taskset with extra parameters.'''
103 pass
48 104
49class PartitionedGenerator(EdfGenerator): 105class PartitionedGenerator(EdfGenerator):
50 def __init__(self, scheduler, templates, options, params): 106 def __init__(self, scheduler, templates, options, params):
@@ -88,9 +144,6 @@ class CflSplitGenerator(EdfGenerator):
88 TP_CLUSTER = "plugins/C-FL-split/cluster{$level}" 144 TP_CLUSTER = "plugins/C-FL-split/cluster{$level}"
89 CLUSTER_OPTION = gen.GenOption('level', ['L1', 'L2', 'L3', 'ALL'], 'L2', 145 CLUSTER_OPTION = gen.GenOption('level', ['L1', 'L2', 'L3', 'ALL'], 'L2',
90 'Cache clustering level.',) 146 'Cache clustering level.',)
91# CLUSTER_SZ_OPTION = gen.GenOption('cluster_size', ['1', '2', '6', '24'], '2',
92# 'Cluster size.',)
93
94 def __init__(self, params={}): 147 def __init__(self, params={}):
95 super(CflSplitGenerator, self).__init__("C-FL-split", 148 super(CflSplitGenerator, self).__init__("C-FL-split",
96 [CflSplitGenerator.TP_CLUSTER, TP_CLST_TASK], 149 [CflSplitGenerator.TP_CLUSTER, TP_CLST_TASK],
@@ -99,8 +152,6 @@ class CflSplitGenerator(EdfGenerator):
99 152
100 def _customize(self, taskset, exp_params): 153 def _customize(self, taskset, exp_params):
101 cpus = int(exp_params['cpus']) 154 cpus = int(exp_params['cpus'])
102# cluster_sz = int(exp_params['cluster_size'])
103
104 if exp_params['level'] == 'L1': 155 if exp_params['level'] == 'L1':
105 cluster_sz = 1 156 cluster_sz = 1
106 elif exp_params['level'] == 'L2': 157 elif exp_params['level'] == 'L2':
@@ -127,3 +178,44 @@ class GedfGenerator(EdfGenerator):
127 def __init__(self, params={}): 178 def __init__(self, params={}):
128 super(GedfGenerator, self).__init__("GSN-EDF", [TP_GLOB_TASK], 179 super(GedfGenerator, self).__init__("GSN-EDF", [TP_GLOB_TASK],
129 [], params) 180 [], params)
181
182class CflSplitPgmGenerator(EdfPgmGenerator):
183 TP_CLUSTER = "plugins/C-FL-split/cluster{$level}"
184 CLUSTER_OPTION = gen.GenOption('level', ['L1', 'L2', 'L3', 'ALL'], 'L2',
185 'Cache clustering level.',)
186
187 def __init__(self, params={}):
188 super(CflSplitPgmGenerator, self).__init__("C-FL-split",
189 [CflSplitPgmGenerator.TP_CLUSTER],
190 [CflSplitPgmGenerator.CLUSTER_OPTION],
191 params)
192
193 def _customize(self, ts, graphs, subts, dp):
194 exp_params = dict(dp.items())
195 cpus = int(exp_params['cpus'])
196
197 if exp_params['level'] == 'L1':
198 cluster_sz = 1
199 elif exp_params['level'] == 'L2':
200 cluster_sz = 2
201 elif exp_params['level'] == 'L3':
202 cluster_sz = 6
203 elif exp_params['level'] == 'ALL':
204 cluster_sz = 24
205 else:
206 assert False
207
208 dp.nr_clusters = cpus / cluster_sz
209 assert dp.nr_clusters * cluster_sz == cpus
210
211 overheads = get_overheads(dp, dp.system)
212 # do the partition here
213 ts = partition.clear_partitioning(ts)
214 ts = PARTITION_METHOD[exp_params['partitions']](ts, graphs, subts, cluster_sz, dp.nr_clusters, dp.system, dp.heur_aggressiveness, overheads)
215
216 # compute split factor
217 working_ts = ts
218 partitions = get_partitions(working_ts, dp.nr_clusters, cluster_sz)
219 is_srt_sched = split.compute_splits_nolock(overheads, False, working_ts, partitions, bypass_split = not dp.job_splitting)
220
221 return working_ts
diff --git a/gen/generator.py b/gen/generator.py
index 0999e84..7456021 100644
--- a/gen/generator.py
+++ b/gen/generator.py
@@ -3,12 +3,18 @@ import os
3import pprint 3import pprint
4import schedcat.generator.tasks as tasks 4import schedcat.generator.tasks as tasks
5import shutil as sh 5import shutil as sh
6import ecrts14.topology as topology
7import ecrts14.graph as graph
6 8
7from Cheetah.Template import Template 9from Cheetah.Template import Template
8from common import get_config_option,num_cpus,recordtype 10from common import get_config_option,num_cpus,recordtype
9from config.config import FILES,PARAMS 11from config.config import FILES,PARAMS
10from gen.dp import DesignPointGenerator 12from gen.dp import DesignPointGenerator
13from ecrts14.generator import DesignPointGenerator as PgmDesignPointGenerator
11from parse.col_map import ColMapBuilder 14from parse.col_map import ColMapBuilder
15from numpy import arange
16from schedcat.util.storage import storage
17from ecrts14.machines import machines
12 18
13NAMED_PERIODS = { 19NAMED_PERIODS = {
14 'harmonic' : rv.uniform_choice([25, 50, 100, 200]), 20 'harmonic' : rv.uniform_choice([25, 50, 100, 200]),
@@ -75,13 +81,14 @@ class Generator(object):
75 release_master = list(set([False, bool(rm_config)])) 81 release_master = list(set([False, bool(rm_config)]))
76 82
77 83
78 return [GenOption('tasks', int, range(cpus, 5*cpus, cpus), 84 return [GenOption('partitions', str, ['no_cache', 'parallel', 'cache_aware', 'cache_aware_edges', 'cache_aware_bfs', 'cache_aware_dfs'],
79 'Number of tasks per experiment.'), 85 'Partition methods.'),
86 GenOption('tasks', int, [0],
87 'Number of tasks'),
80 GenOption('cpus', int, [cpus], 88 GenOption('cpus', int, [cpus],
81 'Number of processors on target system.'), 89 'Number of processors on target system.'),
82 GenOption('release_master', [True,False], release_master, 90 GenOption('release_master', [True,False], release_master,
83 'Redirect release interrupts to a single CPU.'), 91 'Redirect release interrupts to a single CPU.')]
84 GenOption('duration', float, [30], 'Experiment duration.')]
85 92
86 @staticmethod 93 @staticmethod
87 def _dist_option(name, default, distribution, help): 94 def _dist_option(name, default, distribution, help):
@@ -122,14 +129,93 @@ class Generator(object):
122 def _write_schedule(self, params): 129 def _write_schedule(self, params):
123 '''Write schedule file using current template for @params.''' 130 '''Write schedule file using current template for @params.'''
124 sched_file = self.out_dir + "/" + FILES['sched_file'] 131 sched_file = self.out_dir + "/" + FILES['sched_file']
132
125 with open(sched_file, 'wa') as f: 133 with open(sched_file, 'wa') as f:
126 f.write(str(Template(self.template, searchList=[params]))) 134 f.write(str(Template(self.template, searchList=[params])))
127 135
136 def _write_pgm_schedule(self, pgm_params):
137 '''Write schedule file using current template for @params.'''
138 # make pgmrt arguments using graphs and tasks.
139 sched_file = self.out_dir + "/" + FILES['sched_file']
140
141 graph_desc_arg = []
142 rates_arg = []
143 exec_arg = []
144 cluster_arg = []
145 clustersz_arg = []
146 wss_arg = []
147 split_arg = []
148 for g in pgm_params['graphs']:
149 cluster_arg_t = []
150 graph_desc_arg_t = []
151 exec_arg_t = []
152 rates_arg_t = []
153 wss_arg_t = []
154 split_arg_t = []
155
156 for n in g.nodes:
157 assert n.graph.id == g.id
158 cluster_arg_t.append('node_' + str(n.id) + ':' + str(n.task.partition))
159 exec_arg_t.append('node_' + str(n.id) + ':' + str(n.task.cost))
160 split_arg_t.append('node_' + str(n.id) + ':' + str(n.task.split))
161 if n.isSrc == True:
162 # assume that x=1
163 rates_arg_t.append('node_' + str(n.id) + ':1:' + str(n.task.period))
164 # get cluster size
165 clustersz_arg_t = str(pgm_params['cpus'] / pgm_params['nr_clusters'])
166 if len(g.nodes) == 1:
167 graph_desc_arg_t.append('node_' + str(n.id))
168 for succ in n.succ:
169 graph_desc_arg_t.append('node_' + str(n.id) + ':node_' + str(succ.id))
170 # wss parameter
171 for e in n.outEdges:
172 wss_arg_t.append('node_' + str(n.id) + ':node_' + str(e.s.id) + ':' + str(e.wss))
173
174 # combine arguments to a comma-separated string
175 cluster_arg_t = ','.join(cluster_arg_t)
176 graph_desc_arg_t = ','.join(graph_desc_arg_t)
177 exec_arg_t = ','.join(exec_arg_t)
178 wss_arg_t = ','.join(wss_arg_t)
179 split_arg_t = ','.join(split_arg_t)
180 rates_arg_t = ','.join(rates_arg_t)
181
182 cluster_arg.append(cluster_arg_t)
183 exec_arg.append(exec_arg_t)
184 graph_desc_arg.append(graph_desc_arg_t)
185 wss_arg.append(wss_arg_t)
186 split_arg.append(split_arg_t)
187 rates_arg.append(rates_arg_t)
188 clustersz_arg.append(clustersz_arg_t)
189 #print('----------')
190 #print(pgm_params)
191
192 pgm_args = []
193 for i in range(len(pgm_params['graphs'])):
194 pgm_args_t = '';
195 pgm_args_t += '--wait --cluster ' + cluster_arg[i] + ' --clusterSize ' + clustersz_arg[i] + ' --name graph_' + str(pgm_params['graphs'][i].id)
196 pgm_args_t += ' --graph ' + graph_desc_arg[i] + ' --rates ' + rates_arg[i] + ' --execution ' + exec_arg[i]
197 pgm_args_t += ' --split ' + split_arg[i]
198 if len(wss_arg[i]) != 0:
199 pgm_args_t += ' --wss ' + wss_arg[i]
200
201# pgm_args_t += ' --duration ' + str(pgm_params['duration'])
202 # last argument must always be duration. actual duration given by run_exps.py
203 pgm_args_t += ' --duration'
204
205 pgm_args.append(pgm_args_t)
206
207 with open(sched_file, 'wa') as f:
208 f.write(str(Template(self.template, searchList=[pgm_params])) + '\n')
209 for s in pgm_args:
210 f.write(s + '\n')
128 211
129 def _write_params(self, params): 212 def _write_params(self, params):
130 '''Write out file with relevant parameters.''' 213 '''Write out file with relevant parameters.'''
131 # Don't include this in the parameters. It will be automatically added 214 # Don't include this in the parameters. It will be automatically added
132 # in run_exps.py 215 # in run_exps.py
216 if 'system' in params:
217 del params['system']
218
133 if 'tasks' in params: 219 if 'tasks' in params:
134 tasks = params.pop('tasks') 220 tasks = params.pop('tasks')
135 else: 221 else:
@@ -240,6 +326,88 @@ class Generator(object):
240 326
241 HELP_INDENT = 17 327 HELP_INDENT = 17
242 328
329 def create_pgm_exps(self, opts):
330 '''Create experiments for all possible combinations of params in
331 @out_dir. Overwrite existing files if @force is True.'''
332 builder = ColMapBuilder()
333
334 out_dir = opts.out_dir
335 force = opts.force
336 trials = opts.trials
337
338 # hard coded here
339 exp = storage()
340 exp.host = ['ludwig']
341 cpus = 24.0
342 exp.processors = [cpus]
343 exp.task_util = ['uni-medium']
344 exp.period = ['uni-long']
345 exp.sched = ['edf']
346 exp.sys_util = [ 20.0 ] #arange(1, cpus+0.1, 0.1)
347
348 exp.wcycle = [ 0 ]
349 exp.walk = ['seq']
350 exp.huge_pages = [False]
351 exp.uncached = [False]
352 exp.polluters = [False]
353 exp.ovh_type = ['max']
354 exp.release_master = [False]
355 exp.heur_aggressiveness = [0.75]
356 exp.job_splitting = [True]
357 exp.update(self.params)
358
359 # Track changing values so only relevant parameters are included
360 # in directory names
361 for dp in PgmDesignPointGenerator(exp):
362 for k, v in dp.iteritems():
363 builder.try_add(k, v)
364 col_map = builder.build()
365
366 for trial in xrange(trials):
367 dp.num_graphs = graph.uniform(opts.num_graphs, opts.num_graphs)
368 dp.depth_factor = [1.0/3.0, 2.0/3.0]
369 dp.node_placement = graph.uniform(opts.node_placement, opts.node_placement)
370 dp.fan_out = graph.geometric(1, opts.fan_out)
371 dp.fan_in_cap = opts.fan_in_cap
372 dp.edge_distance = graph.geometric(1, opts.edge_distance)
373 dp.nr_source = graph.uniform(opts.nr_source, opts.nr_source)
374 dp.nr_sink = graph.uniform(opts.nr_sink, opts.nr_sink)
375 dp.wss = tasks.multimodal([(tasks.uniform_int(1,2), 6), (tasks.uniform_int(2, 8), 3)])
376
377 # Generate a task set
378 ts, graphs, subts = self._create_tasks(dp)
379 dp.tasks = len(ts)
380
381 for dp in PgmDesignPointGenerator(exp):
382 # Create directory name from relevant parameters
383 dir_leaf = "sched=%s_%s" % (self.scheduler, col_map.encode(dp))
384 dir_leaf = dir_leaf.strip('_') # If there are none
385 dir_leaf += ("_trial=%s" % trial) if trials > 1 else ""
386
387 dir_path = "%s/%s" % (out_dir, dir_leaf.strip('_'))
388
389 if os.path.exists(dir_path):
390 if force:
391 sh.rmtree(dir_path)
392 else:
393 print("Skipping existing experiment: '%s'" % dir_path)
394 continue
395
396 os.mkdir(dir_path)
397
398 if trials > 1:
399 dp[PARAMS['trial']] = trial
400 self.out_dir = dir_path
401
402 dp.system = topology.Topology(machines[dp.host])
403
404 # Write a sched.py and param.py for each partition method
405 self._create_exp(dp, ts, graphs, subts)
406
407 del(self.out_dir)
408 if PARAMS['trial'] in dp:
409 del dp[PARAMS['trial']]
410
243 def print_help(self): 411 def print_help(self):
244 display_options = [o for o in self.options if not o.hidden] 412 display_options = [o for o in self.options if not o.hidden]
245 s = str(Template("""scheduler $scheduler: 413 s = str(Template("""scheduler $scheduler:
diff --git a/gen_pgm_exps.py b/gen_pgm_exps.py
new file mode 100755
index 0000000..62723ec
--- /dev/null
+++ b/gen_pgm_exps.py
@@ -0,0 +1,122 @@
1#!/usr/bin/env python
2from __future__ import print_function
3
4import gen.generator as gen
5import os
6import re
7import shutil as sh
8import sys
9
10from config.config import DEFAULTS
11from optparse import OptionParser
12
13import ecrts14.graph as graph
14import schedcat.generator.tasks as tasks
15
16def parse_args():
17 parser = OptionParser("usage: %prog [options] [files...] "
18 "[generators...] [param=val[,val]...]")
19
20 parser.add_option('-o', '--out-dir', dest='out_dir',
21 help='directory for data output',
22 default=("%s/%s"% (os.getcwd(), DEFAULTS['out-gen'])))
23 parser.add_option('-f', '--force', action='store_true', default=True,
24 dest='force', help='overwrite existing data')
25 parser.add_option('-n', '--num-trials', default=1, type='int', dest='trials',
26 help='number of task systems for every config')
27 parser.add_option('-l', '--list-generators', dest='list_gens',
28 help='list allowed generators', action='store_true',
29 default=False)
30 parser.add_option('-d', '--describe-generators', metavar='generator[,..]',
31 dest='described', default=None,
32 help='describe parameters for generator(s)')
33 parser.add_option('-m', '--num-graphs', default=24, type='int', dest='num_graphs',
34 help='number of graphs for a taskset')
35 parser.add_option('-p', '--node-placement', default=1, type='int', dest='node_placement',
36 help='node placement of the graph')
37 parser.add_option('-t', '--fan-out', default=3, type='int', dest='fan_out',
38 help='fan out of a node')
39 parser.add_option('-i', '--fan-in-cap', default=3, type='int', dest='fan_in_cap',
40 help='fan in cap')
41 parser.add_option('-g', '--edge-distance', default=3, type='int', dest='edge_distance',
42 help='edge distance')
43 parser.add_option('-u', '--nr-source', default=1, type='int', dest='nr_source',
44 help='number of source nodes')
45 parser.add_option('-v', '--nr_sink', default=1, type='int', dest='nr_sink',
46 help='number of sink nodes')
47
48
49 return parser.parse_args()
50
51def load_file(fname):
52 with open(fname, 'r') as f:
53 data = f.read().strip()
54 try:
55 values = eval(data)
56 if 'generator' not in values:
57 raise ValueError()
58 generator = values['generator']
59 del values['generator']
60 return generator, values
61 except:
62 raise IOError("Invalid generation file: %s" % fname)
63
64def print_descriptions(described):
65 for generator in described.split(','):
66 if generator not in gen.get_generators():
67 sys.stderr.write("No generator '%s'\n" % generator)
68 else:
69 print("Generator '%s', " % generator)
70 gen.get_generators()[generator]().print_help()
71
72def main():
73 opts, args = parse_args()
74
75 # Print generator information on the command line
76 if opts.list_gens:
77 print(", ".join(gen.get_generators()))
78 if opts.described != None:
79 print_descriptions(opts.described)
80 if opts.list_gens or opts.described:
81 return 0
82
83 params = filter(lambda x : re.match("\w+=\w+", x), args)
84
85 # Ensure some generator is loaded
86 args = list(set(args) - set(params))
87 args = args or gen.get_generators().keys()
88
89 # Split into files to load and named generators
90 files = filter(os.path.exists, args)
91 gen_list = list(set(args) - set(files))
92
93 # Parse all specified parameters to be applied to every experiment
94 global_params = dict(map(lambda x : tuple(x.split("=")), params))
95 for k, v in global_params.iteritems():
96 global_params[k] = v.split(',')
97
98 exp_sets = map(load_file, files)
99 exp_sets += map(lambda x: (x, {}), gen_list)
100
101 if opts.force and os.path.exists(opts.out_dir):
102 sh.rmtree(opts.out_dir)
103 if not os.path.exists(opts.out_dir):
104 os.mkdir(opts.out_dir)
105
106 for gen_name, gen_params in exp_sets:
107 if gen_name not in gen.get_generators():
108 raise ValueError("Invalid generator '%s'" % gen_name)
109
110 sys.stderr.write("Creating experiments with %s generator...\n" % gen_name)
111
112 params = dict(gen_params.items() + global_params.items())
113 clazz = gen.get_generators()[gen_name]
114
115 generator = clazz(params=params)
116
117 generator.create_pgm_exps(opts)
118
119 sys.stderr.write("Experiments saved in %s.\n" % opts.out_dir)
120
121if __name__ == '__main__':
122 main()
diff --git a/run/experiment.py b/run/experiment.py
index da0e32e..522e490 100644
--- a/run/experiment.py
+++ b/run/experiment.py
@@ -23,7 +23,7 @@ class Experiment(object):
23 INTERRUPTED_DIR = ".interrupted" 23 INTERRUPTED_DIR = ".interrupted"
24 24
25 def __init__(self, name, scheduler, working_dir, finished_dir, 25 def __init__(self, name, scheduler, working_dir, finished_dir,
26 proc_entries, executables, tracer_types): 26 proc_entries, executables, tracer_types, num_tasks):
27 '''Run an experiment, optionally wrapped in tracing.''' 27 '''Run an experiment, optionally wrapped in tracing.'''
28 self.name = name 28 self.name = name
29 self.scheduler = scheduler 29 self.scheduler = scheduler
@@ -34,6 +34,7 @@ class Experiment(object):
34 self.exec_out = None 34 self.exec_out = None
35 self.exec_err = None 35 self.exec_err = None
36 self.tracer_types = tracer_types 36 self.tracer_types = tracer_types
37 self.num_tasks = num_tasks
37 38
38 self.regular_tracers = [] 39 self.regular_tracers = []
39 self.exact_tracers = [] 40 self.exact_tracers = []
@@ -134,11 +135,11 @@ class Experiment(object):
134 wait_start = time.time() 135 wait_start = time.time()
135 num_ready = lu.waiting_tasks() 136 num_ready = lu.waiting_tasks()
136 137
137 while num_ready < len(self.executables): 138 while num_ready < self.num_tasks:
138 # Quit if too much time passes without a task becoming ready 139 # Quit if too much time passes without a task becoming ready
139 if time.time() - wait_start > 180.0: 140 if time.time() - wait_start > 180.0:
140 s = "waiting: %d, submitted: %d" %\ 141 s = "waiting: %d, submitted: %d" %\
141 (lu.waiting_tasks(), len(self.executables)) 142 (lu.waiting_tasks(), self.num_tasks)
142 raise Exception("Too much time spent waiting for tasks! %s" % s) 143 raise Exception("Too much time spent waiting for tasks! %s" % s)
143 144
144 time.sleep(1) 145 time.sleep(1)
@@ -153,14 +154,13 @@ class Experiment(object):
153 num_ready = now_ready 154 num_ready = now_ready
154 155
155 def __run_tasks(self): 156 def __run_tasks(self):
156 self.log("Starting %d tasks" % len(self.executables)) 157 self.log("Starting %d real-time tasks, %d 'pgmrt' instances" % (self.num_tasks, len(self.executables)))
157 158
158 for i,e in enumerate(self.executables): 159 for i,e in enumerate(self.executables):
159 try: 160 try:
160 e.execute() 161 e.execute()
161 except: 162 except:
162 raise Exception("Executable failed to start: %s" % e) 163 raise Exception("Executable failed to start: %s" % e)
163
164 self.__wait_for_ready() 164 self.__wait_for_ready()
165 165
166 # Exact tracers (like overheads) must be started right after release or 166 # Exact tracers (like overheads) must be started right after release or
@@ -170,14 +170,14 @@ class Experiment(object):
170 time.sleep(1) 170 time.sleep(1)
171 171
172 try: 172 try:
173 self.log("Releasing %d tasks" % len(self.executables)) 173 self.log("Releasing %d tasks" % self.num_tasks)
174 released = lu.release_tasks() 174 released = lu.release_tasks()
175 175
176 if released != len(self.executables): 176 if released != self.num_tasks:
177 # Some tasks failed to release, kill all tasks and fail 177 # Some tasks failed to release, kill all tasks and fail
178 # Need to release non-released tasks before they can be killed 178 # Need to release non-released tasks before they can be killed
179 raise Exception("Released %s tasks, expected %s tasks" % 179 raise Exception("Released %s tasks, expected %s tasks" %
180 (released, len(self.executables))) 180 (released, self.num_tasks))
181 181
182 self.log("Waiting for program to finish...") 182 self.log("Waiting for program to finish...")
183 for e in self.executables: 183 for e in self.executables:
@@ -212,6 +212,7 @@ class Experiment(object):
212 212
213 def __setup(self): 213 def __setup(self):
214 self.__make_dirs() 214 self.__make_dirs()
215 self.__clean_up_shm()
215 self.__assign_executable_cwds() 216 self.__assign_executable_cwds()
216 self.__setup_tracers() 217 self.__setup_tracers()
217 218
@@ -244,6 +245,10 @@ class Experiment(object):
244 245
245 os.system('sync') 246 os.system('sync')
246 247
248 def __clean_up_shm(self):
249 self.log("Removing temporary files in /dev/shm")
250 os.system('rm /dev/shm/* -r')
251
247 def log(self, msg): 252 def log(self, msg):
248 print("[Exp %s]: %s" % (self.name, msg)) 253 print("[Exp %s]: %s" % (self.name, msg))
249 254
diff --git a/run_exps.py b/run_exps.py
index 21666a9..3fff667 100755
--- a/run_exps.py
+++ b/run_exps.py
@@ -65,7 +65,7 @@ def parse_args():
65 65
66 parser.add_option('-s', '--scheduler', dest='scheduler', 66 parser.add_option('-s', '--scheduler', dest='scheduler',
67 help='scheduler for all experiments') 67 help='scheduler for all experiments')
68 parser.add_option('-d', '--duration', dest='duration', type='int', 68 parser.add_option('-d', '--duration', dest='duration', type='int', default=30,
69 help='duration (seconds) of tasks') 69 help='duration (seconds) of tasks')
70 parser.add_option('-i', '--ignore-environment', dest='ignore', 70 parser.add_option('-i', '--ignore-environment', dest='ignore',
71 action='store_true', default=False, 71 action='store_true', default=False,
@@ -109,7 +109,7 @@ def convert_data(data):
109 r"\s*{\s*(?P<CONTENT>.*?)\s*?}$)|" 109 r"\s*{\s*(?P<CONTENT>.*?)\s*?}$)|"
110 r"(?P<TASK>^" 110 r"(?P<TASK>^"
111 r"(?:(?P<PROG>[^\d\-\s][\w\.]*?) )?\s*" 111 r"(?:(?P<PROG>[^\d\-\s][\w\.]*?) )?\s*"
112 r"(?P<ARGS>[\w\-_\d\. \=]+)\s*$)", 112 r"(?P<ARGS>[\w\-_\d\. \=\:\,]+)\s*$)",
113 re.S|re.I|re.M) 113 re.S|re.I|re.M)
114 114
115 procs = [] 115 procs = []
@@ -140,10 +140,8 @@ def fix_paths(schedule, exp_dir, sched_file):
140 elif re.match(r'.*\w+\.[a-zA-Z]\w*', arg): 140 elif re.match(r'.*\w+\.[a-zA-Z]\w*', arg):
141 sys.stderr.write("WARNING: non-existent file '%s' " % arg + 141 sys.stderr.write("WARNING: non-existent file '%s' " % arg +
142 "may be referenced:\n\t%s" % sched_file) 142 "may be referenced:\n\t%s" % sched_file)
143
144 schedule['task'][idx] = (task, args) 143 schedule['task'][idx] = (task, args)
145 144
146
147def load_schedule(name, fname, duration): 145def load_schedule(name, fname, duration):
148 '''Turn schedule file @fname into ProcEntry's and Executable's which execute 146 '''Turn schedule file @fname into ProcEntry's and Executable's which execute
149 for @duration time.''' 147 for @duration time.'''
@@ -153,7 +151,6 @@ def load_schedule(name, fname, duration):
153 schedule = eval(data) 151 schedule = eval(data)
154 except: 152 except:
155 schedule = convert_data(data) 153 schedule = convert_data(data)
156
157 sched_dir = os.path.split(fname)[0] 154 sched_dir = os.path.split(fname)[0]
158 155
159 # Make paths relative to the file's directory 156 # Make paths relative to the file's directory
@@ -183,7 +180,6 @@ def load_schedule(name, fname, duration):
183 real_args = ['-w'] + real_args 180 real_args = ['-w'] + real_args
184 181
185 executables += [Executable(real_task, real_args)] 182 executables += [Executable(real_task, real_args)]
186
187 return proc_entries, executables 183 return proc_entries, executables
188 184
189 185
@@ -287,7 +283,7 @@ def run_experiment(data, start_message, ignore, jabber):
287 procs, execs = load_schedule(data.name, data.sched_file, data.params.duration) 283 procs, execs = load_schedule(data.name, data.sched_file, data.params.duration)
288 284
289 exp = Experiment(data.name, data.params.scheduler, work_dir, 285 exp = Experiment(data.name, data.params.scheduler, work_dir,
290 data.out_dir, procs, execs, data.params.tracers) 286 data.out_dir, procs, execs, data.params.tracers, data.params.file_params['num_tasks'])
291 287
292 exp.log(start_message) 288 exp.log(start_message)
293 289
@@ -484,7 +480,6 @@ def run_exps(exps, opts):
484 sys.stderr.write("Failed experiment %s\n" % exp.name) 480 sys.stderr.write("Failed experiment %s\n" % exp.name)
485 sys.stderr.write("%s\n" % e) 481 sys.stderr.write("%s\n" % e)
486 exp.state = ExpState.Failed 482 exp.state = ExpState.Failed
487
488 if exp.state is ExpState.Failed and opts.retry: 483 if exp.state is ExpState.Failed and opts.retry:
489 exps_remaining += [(i, exp)] 484 exps_remaining += [(i, exp)]
490 485