aboutsummaryrefslogtreecommitdiffstats
path: root/parse_exps.py
diff options
context:
space:
mode:
authorJonathan Herman <hermanjl@cs.unc.edu>2013-04-27 15:34:13 -0400
committerJonathan Herman <hermanjl@cs.unc.edu>2013-04-27 15:34:13 -0400
commit3f706cd240d039365cbb8f9975ad97480d8b6145 (patch)
tree1cdcbaf78b84c7a6383505896e89e29574cf65b2 /parse_exps.py
parent15f231a79320cbc97cd88d8a4751515a47ce223e (diff)
Fixes.
Diffstat (limited to 'parse_exps.py')
-rwxr-xr-xparse_exps.py50
1 files changed, 32 insertions, 18 deletions
diff --git a/parse_exps.py b/parse_exps.py
index 7a99d8a..98f95df 100755
--- a/parse_exps.py
+++ b/parse_exps.py
@@ -52,7 +52,7 @@ ExpData = namedtuple('ExpData', ['path', 'params', 'work_dir'])
52 52
53def parse_exp(exp_force_base): 53def parse_exp(exp_force_base):
54 # Tupled for multiprocessing 54 # Tupled for multiprocessing
55 exp, force, base_table = exp_force_base 55 exp, force, base_exp = exp_force_base
56 56
57 result_file = exp.work_dir + "/exp_point.pkl" 57 result_file = exp.work_dir + "/exp_point.pkl"
58 should_load = not force and os.path.exists(result_file) 58 should_load = not force and os.path.exists(result_file)
@@ -81,11 +81,9 @@ def parse_exp(exp_force_base):
81 # Write scheduling statistics into result 81 # Write scheduling statistics into result
82 st.extract_sched_data(result, exp.path, exp.work_dir) 82 st.extract_sched_data(result, exp.path, exp.work_dir)
83 83
84 # Write scaling factors into result 84 if base_exp:
85 if base_table and exp.params in base_table: 85 # Write scaling factors into result
86 base_exp = base_table[exp.params] 86 st.extract_scaling_data(result, exp.path, base_exp.path)
87 if base_exp != exp:
88 st.extract_mc_data(result, exp.path, base_exp.path)
89 87
90 with open(result_file, 'wb') as f: 88 with open(result_file, 'wb') as f:
91 pickle.dump(result, f) 89 pickle.dump(result, f)
@@ -138,18 +136,22 @@ def load_exps(exp_dirs, cm_builder, force):
138 return exps 136 return exps
139 137
140 138
141def make_base_table(cmd_scale, col_map, exps): 139def make_base_table(cmd_scale, builder, exps):
142 if not cmd_scale: 140 if not cmd_scale:
143 return None 141 return None
144 142
145 # Configuration key for task systems used to calculate task 143 # Configuration key for task systems used to calculate task
146 # execution scaling factors 144 # execution scaling factors
147 [(param, value)] = dict(re.findall("(.*)=(.*)", cmd_scale)) 145 [(param, value)] = re.findall("(.*)=(.*)", cmd_scale)
148 146
149 if param not in col_map:
150 raise IOError("Base column '%s' not present in any parameters!" % param)
151 147
152 base_table = TupleTable(copy.deepcopy(col_map)) 148 if param not in builder:
149 com.log_once("Base column '%s' not present in any parameters!" % param)
150 com.log_once("Scaling factors will not be included.")
151
152 builder.try_remove(param)
153 base_map = builder.build()
154 base_table = TupleTable(base_map)
153 155
154 # Fill table with exps who we will scale against 156 # Fill table with exps who we will scale against
155 for exp in exps: 157 for exp in exps:
@@ -171,23 +173,35 @@ def get_dirs(args):
171 return [os.getcwd()] 173 return [os.getcwd()]
172 174
173 175
174def fill_table(table, exps, opts): 176def get_bases(builder, exps, opts):
175 sys.stderr.write("Parsing data...\n") 177 '''Return a matching array of experiments against which scaling factors
178 will be calculated.'''
179 bases = [None]*len(exps)
180
181 base_table = make_base_table(opts.scale_against, builder, exps)
182 if not base_table:
183 return bases
184
185 for i,exp in enumerate(exps):
186 if exp.params in base_table and base_table[exp.params] != exp:
187 bases[i] = base_table[exp.params]
176 188
189 return bases
190
191
192def fill_table(table, builder, exps, opts):
177 procs = min(len(exps), opts.processors) 193 procs = min(len(exps), opts.processors)
178 logged = multiprocessing.Manager().list() 194 logged = multiprocessing.Manager().list()
195 bases = get_bases(builder, exps, opts)
179 196
180 sys.stderr.write("Parsing data...\n") 197 sys.stderr.write("Parsing data...\n")
181 198
182 base_table = make_base_table(opts.scale_against,
183 table.get_col_map(), exps)
184
185 pool = multiprocessing.Pool(processes=procs, 199 pool = multiprocessing.Pool(processes=procs,
186 # Share a list of previously logged messages amongst processes 200 # Share a list of previously logged messages amongst processes
187 # This is for the com.log_once method to use 201 # This is for the com.log_once method to use
188 initializer=com.set_logged_list, initargs=(logged,)) 202 initializer=com.set_logged_list, initargs=(logged,))
189 203
190 pool_args = zip(exps, [opts.force]*len(exps), [base_table]*len(exps)) 204 pool_args = zip(exps, [opts.force]*len(exps), bases)
191 enum = pool.imap_unordered(parse_exp, pool_args, 1) 205 enum = pool.imap_unordered(parse_exp, pool_args, 1)
192 206
193 try: 207 try:
@@ -259,7 +273,7 @@ def main():
259 col_map = builder.build() 273 col_map = builder.build()
260 table = TupleTable(col_map) 274 table = TupleTable(col_map)
261 275
262 fill_table(table, exps, opts) 276 fill_table(table, builder, exps, opts)
263 277
264 if not table: 278 if not table:
265 sys.stderr.write("Found no data to parse!") 279 sys.stderr.write("Found no data to parse!")