diff options
-rw-r--r-- | gen/edf_generators.py | 30 | ||||
-rw-r--r-- | gen/generator.py | 14 | ||||
-rw-r--r-- | parse/sched.py | 6 | ||||
-rwxr-xr-x | run_exps.py | 2 |
4 files changed, 35 insertions, 17 deletions
diff --git a/gen/edf_generators.py b/gen/edf_generators.py index ca77556..bee0119 100644 --- a/gen/edf_generators.py +++ b/gen/edf_generators.py | |||
@@ -76,13 +76,14 @@ class EdfPgmGenerator(gen.Generator): | |||
76 | 76 | ||
77 | def __make_options(self): | 77 | def __make_options(self): |
78 | '''Return generic EDF options.''' | 78 | '''Return generic EDF options.''' |
79 | return [gen.Generator._dist_option('period', ['uni-long'], | 79 | return [gen.Generator._dist_option('period', ['uni-moderate'], |
80 | ecrts14.NAMED_PERIODS_US, | 80 | ecrts14.NAMED_PERIODS_US, |
81 | 'Task period distributions.'), | 81 | 'Task period distributions.'), |
82 | gen.Generator._dist_option('num_graphs', 'uni-medium', | 82 | gen.Generator._dist_option('num_graphs', ['uni-medium'], |
83 | ecrts14.NAMED_NUM_GRAPHS, | 83 | ecrts14.NAMED_NUM_GRAPHS, |
84 | 'Number of graphs.'), | 84 | 'Number of graphs.'), |
85 | gen.Generator._dist_option('depth_factor', ['uni-medium'], | 85 | # gen.Generator._dist_option('depth_factor', ['uni-medium'], |
86 | gen.Generator._dist_option('depth_factor', ['pipeline'], | ||
86 | ecrts14.NAMED_HEIGHT_FACTORS, | 87 | ecrts14.NAMED_HEIGHT_FACTORS, |
87 | 'Depth of graphs.'), | 88 | 'Depth of graphs.'), |
88 | gen.Generator._dist_option('clustering', ['L1', 'L2', 'L3', 'ALL'], | 89 | gen.Generator._dist_option('clustering', ['L1', 'L2', 'L3', 'ALL'], |
@@ -94,25 +95,27 @@ class EdfPgmGenerator(gen.Generator): | |||
94 | gen.Generator._dist_option('node_placement', ['uniform'], | 95 | gen.Generator._dist_option('node_placement', ['uniform'], |
95 | ecrts14.NAMED_SHAPES, | 96 | ecrts14.NAMED_SHAPES, |
96 | 'The node placement of a graph.'), | 97 | 'The node placement of a graph.'), |
97 | gen.Generator._dist_option('fan_out', ['uniform_3'], | 98 | # gen.Generator._dist_option('fan_out', ['uniform_3'], |
99 | gen.Generator._dist_option('fan_out', ['none'], | ||
98 | ecrts14.NAMED_FAN, | 100 | ecrts14.NAMED_FAN, |
99 | 'The number of out edges of a node.'), | 101 | 'The number of out edges of a node.'), |
100 | gen.Generator._dist_option('fan_in_cap', [3], | 102 | gen.Generator._dist_option('fan_in_cap', [3], |
101 | {}, | 103 | {}, |
102 | 'The maximum number of in-edges of a node.'), | 104 | 'The maximum number of in-edges of a node.'), |
103 | gen.Generator._dist_option('edge_distance', ['uniform_3'], | 105 | # gen.Generator._dist_option('edge_distance', ['uniform_3'], |
106 | gen.Generator._dist_option('edge_distance', ['none'], | ||
104 | ecrts14.NAMED_EDGE_HOP, | 107 | ecrts14.NAMED_EDGE_HOP, |
105 | 'The number of hops for an edge.'), | 108 | 'The number of hops for an edge.'), |
106 | gen.Generator._dist_option('wss', ['uni-medium'], | 109 | gen.Generator._dist_option('wss', ['uni-light', 'uni-medium', 'uni-heavy'], |
107 | ecrts14.NAMED_EDGE_WSS, | 110 | ecrts14.NAMED_EDGE_WSS, |
108 | 'Working set size.'), | 111 | 'Working set size.'), |
109 | gen.Generator._dist_option('task_util', ['uni-light'], | 112 | gen.Generator._dist_option('task_util', ['uni-medium'], |
110 | NAMED_UTILIZATIONS, | 113 | NAMED_UTILIZATIONS, |
111 | 'Task utilization.'), | 114 | 'Task utilization.'), |
112 | gen.Generator._dist_option('polluters', [False, True], | 115 | gen.Generator._dist_option('polluters', [False], |
113 | {}, | 116 | {}, |
114 | 'Polluters.'), | 117 | 'Polluters.'), |
115 | gen.Generator._dist_option('job_splitting', [False, True], | 118 | gen.Generator._dist_option('job_splitting', [True], |
116 | {}, | 119 | {}, |
117 | 'Job splitting.'), | 120 | 'Job splitting.'), |
118 | gen.Generator._dist_option('ovh_type', 'max', | 121 | gen.Generator._dist_option('ovh_type', 'max', |
@@ -121,7 +124,7 @@ class EdfPgmGenerator(gen.Generator): | |||
121 | gen.Generator._dist_option('heur_aggressiveness', 0.75, | 124 | gen.Generator._dist_option('heur_aggressiveness', 0.75, |
122 | {}, | 125 | {}, |
123 | 'heur_aggressiveness.'), | 126 | 'heur_aggressiveness.'), |
124 | gen.Generator._dist_option('sys_util', [20.0, 16.0, 12.0, 9.0], | 127 | gen.Generator._dist_option('sys_util', [18.0, 13.0, 8.0], |
125 | {}, | 128 | {}, |
126 | 'Task set utilization.')] | 129 | 'Task set utilization.')] |
127 | 130 | ||
@@ -256,6 +259,11 @@ class CflSplitPgmGenerator(EdfPgmGenerator): | |||
256 | overheads = get_overheads(dp, dp.system) | 259 | overheads = get_overheads(dp, dp.system) |
257 | # do the partition here | 260 | # do the partition here |
258 | ts = partition.clear_partitioning(ts) | 261 | ts = partition.clear_partitioning(ts) |
262 | |||
263 | if overheads.consumer is not None: | ||
264 | for t in ts: | ||
265 | overheads.consumer.place_production(t) | ||
266 | |||
259 | try: | 267 | try: |
260 | ts = PARTITION_METHOD[exp_params['partitions']](ts, graphs, subts, cluster_sz, dp.nr_clusters, dp.system, dp.heur_aggressiveness, overheads) | 268 | ts = PARTITION_METHOD[exp_params['partitions']](ts, graphs, subts, cluster_sz, dp.nr_clusters, dp.system, dp.heur_aggressiveness, overheads) |
261 | except DidNotFit: | 269 | except DidNotFit: |
@@ -264,7 +272,7 @@ class CflSplitPgmGenerator(EdfPgmGenerator): | |||
264 | # compute split factor | 272 | # compute split factor |
265 | working_ts = ts | 273 | working_ts = ts |
266 | partitions = get_partitions(working_ts, dp.nr_clusters, cluster_sz) | 274 | partitions = get_partitions(working_ts, dp.nr_clusters, cluster_sz) |
267 | do_splits = dp.job_splitting == 'True' | 275 | do_splits = dp.job_splitting |
268 | is_srt_sched = split.compute_splits_nolock(overheads, False, working_ts, partitions, bypass_split = not do_splits) | 276 | is_srt_sched = split.compute_splits_nolock(overheads, False, working_ts, partitions, bypass_split = not do_splits) |
269 | 277 | ||
270 | return True, working_ts | 278 | return True, working_ts |
diff --git a/gen/generator.py b/gen/generator.py index 7b254b5..7a994e9 100644 --- a/gen/generator.py +++ b/gen/generator.py | |||
@@ -389,8 +389,9 @@ class Generator(object): | |||
389 | p = storage() | 389 | p = storage() |
390 | p.partitioning = part | 390 | p.partitioning = part |
391 | p.clustering = clust | 391 | p.clustering = clust |
392 | p.polluting = pol | 392 | # convert from string to bool |
393 | p.splitting = splt | 393 | p.polluting = True if pol == 'True' else False |
394 | p.splitting = True if splt == 'True' else False | ||
394 | shared_params.append(p) | 395 | shared_params.append(p) |
395 | 396 | ||
396 | for _dp in PgmDesignPointGenerator(exp): | 397 | for _dp in PgmDesignPointGenerator(exp): |
@@ -419,6 +420,9 @@ class Generator(object): | |||
419 | created_dirs = [] | 420 | created_dirs = [] |
420 | tries += 1 | 421 | tries += 1 |
421 | 422 | ||
423 | if tries > 1: | ||
424 | print('Retrying...') | ||
425 | |||
422 | # Generate a task set | 426 | # Generate a task set |
423 | ts, graphs, subts = self._create_tasks(dp) | 427 | ts, graphs, subts = self._create_tasks(dp) |
424 | dp.tasks = len(ts) | 428 | dp.tasks = len(ts) |
@@ -435,6 +439,9 @@ class Generator(object): | |||
435 | dp.job_splitting = shp.splitting | 439 | dp.job_splitting = shp.splitting |
436 | 440 | ||
437 | # Create directory name from relevant parameters | 441 | # Create directory name from relevant parameters |
442 | temp = dp.wss | ||
443 | # slam the wss parameter to get a text-based name | ||
444 | dp.wss = _dp.wss | ||
438 | dir_parts = [] | 445 | dir_parts = [] |
439 | dir_parts.append("sched=%s" % self.scheduler) | 446 | dir_parts.append("sched=%s" % self.scheduler) |
440 | dir_parts.append("cluster=%s" % shp.clustering) | 447 | dir_parts.append("cluster=%s" % shp.clustering) |
@@ -450,6 +457,8 @@ class Generator(object): | |||
450 | 457 | ||
451 | print("Generating %s" % dir_leaf) | 458 | print("Generating %s" % dir_leaf) |
452 | 459 | ||
460 | dp.wss = temp | ||
461 | |||
453 | if os.path.exists(dir_path): | 462 | if os.path.exists(dir_path): |
454 | if force: | 463 | if force: |
455 | sh.rmtree(dir_path) | 464 | sh.rmtree(dir_path) |
@@ -482,6 +491,7 @@ class Generator(object): | |||
482 | del _dp[PARAMS['trial']] | 491 | del _dp[PARAMS['trial']] |
483 | success = True | 492 | success = True |
484 | except Exception, e: | 493 | except Exception, e: |
494 | print e | ||
485 | for d in created_dirs: | 495 | for d in created_dirs: |
486 | sh.rmtree(d) | 496 | sh.rmtree(d) |
487 | if not success: | 497 | if not success: |
diff --git a/parse/sched.py b/parse/sched.py index d773c8a..83004f2 100644 --- a/parse/sched.py +++ b/parse/sched.py | |||
@@ -13,7 +13,7 @@ from heapq import * | |||
13 | 13 | ||
14 | class TimeTracker: | 14 | class TimeTracker: |
15 | '''Store stats for durations of time demarcated by sched_trace records.''' | 15 | '''Store stats for durations of time demarcated by sched_trace records.''' |
16 | def __init__(self, is_valid_duration = lambda x: True, delay_buffer_size = 1, max_pending = 100): | 16 | def __init__(self, is_valid_duration = lambda x: True, delay_buffer_size = 1, max_pending = -1): |
17 | self.validator = is_valid_duration | 17 | self.validator = is_valid_duration |
18 | self.avg = self.max = self.num = 0 | 18 | self.avg = self.max = self.num = 0 |
19 | 19 | ||
@@ -51,13 +51,13 @@ class TimeTracker: | |||
51 | # Give up on some jobs if they've been hanging around too long. | 51 | # Give up on some jobs if they've been hanging around too long. |
52 | # While not strictly needed, it helps improve performance and | 52 | # While not strictly needed, it helps improve performance and |
53 | # it is unlikey to cause too much trouble. | 53 | # it is unlikey to cause too much trouble. |
54 | if(len(self.start_records) > self.max_pending): | 54 | if(self.max_pending >= 0 and len(self.start_records) > self.max_pending): |
55 | to_discard = len(self.start_records) - self.max_pending | 55 | to_discard = len(self.start_records) - self.max_pending |
56 | for i in range(to_discard): | 56 | for i in range(to_discard): |
57 | # pop off the oldest jobs | 57 | # pop off the oldest jobs |
58 | del self.start_records[self.start_records.iterkeys().next()] | 58 | del self.start_records[self.start_records.iterkeys().next()] |
59 | self.discarded += to_discard | 59 | self.discarded += to_discard |
60 | if(len(self.end_records) > self.max_pending): | 60 | if(self.max_pending >= 0 and len(self.end_records) > self.max_pending): |
61 | to_discard = len(self.end_records) - self.max_pending | 61 | to_discard = len(self.end_records) - self.max_pending |
62 | for i in range(to_discard): | 62 | for i in range(to_discard): |
63 | # pop off the oldest jobs | 63 | # pop off the oldest jobs |
diff --git a/run_exps.py b/run_exps.py index 3fff667..8736d19 100755 --- a/run_exps.py +++ b/run_exps.py | |||
@@ -31,7 +31,7 @@ ExpData = com.recordtype('ExpData', ['name', 'params', 'sched_file', 'out_dir', | |||
31 | ConfigResult = namedtuple('ConfigResult', ['param', 'wanted', 'actual']) | 31 | ConfigResult = namedtuple('ConfigResult', ['param', 'wanted', 'actual']) |
32 | 32 | ||
33 | '''Maximum times an experiment will be retried''' | 33 | '''Maximum times an experiment will be retried''' |
34 | MAX_RETRY = 5 | 34 | MAX_RETRY = 15 |
35 | '''Location experiment retry count is stored''' | 35 | '''Location experiment retry count is stored''' |
36 | TRIES_FNAME = ".tries.pkl" | 36 | TRIES_FNAME = ".tries.pkl" |
37 | 37 | ||