diff options
Diffstat (limited to 'parse_exps.py')
-rwxr-xr-x | parse_exps.py | 43 |
1 files changed, 40 insertions, 3 deletions
diff --git a/parse_exps.py b/parse_exps.py index cc4372a..7a99d8a 100755 --- a/parse_exps.py +++ b/parse_exps.py | |||
@@ -1,12 +1,14 @@ | |||
1 | #!/usr/bin/env python | 1 | #!/usr/bin/env python |
2 | from __future__ import print_function | 2 | from __future__ import print_function |
3 | 3 | ||
4 | import copy | ||
4 | import common as com | 5 | import common as com |
5 | import multiprocessing | 6 | import multiprocessing |
6 | import os | 7 | import os |
7 | import parse.ft as ft | 8 | import parse.ft as ft |
8 | import parse.sched as st | 9 | import parse.sched as st |
9 | import pickle | 10 | import pickle |
11 | import re | ||
10 | import shutil as sh | 12 | import shutil as sh |
11 | import sys | 13 | import sys |
12 | import traceback | 14 | import traceback |
@@ -38,6 +40,9 @@ def parse_args(): | |||
38 | default=max(multiprocessing.cpu_count() - 1, 1), | 40 | default=max(multiprocessing.cpu_count() - 1, 1), |
39 | type='int', dest='processors', | 41 | type='int', dest='processors', |
40 | help='number of threads for processing') | 42 | help='number of threads for processing') |
43 | parser.add_option('-s', '--scale-against', dest='scale_against', | ||
44 | metavar='PARAM=VALUE', default="type=unmanaged", | ||
45 | help='calculate task scaling factors against these configs') | ||
41 | 46 | ||
42 | return parser.parse_args() | 47 | return parser.parse_args() |
43 | 48 | ||
@@ -45,9 +50,9 @@ def parse_args(): | |||
45 | ExpData = namedtuple('ExpData', ['path', 'params', 'work_dir']) | 50 | ExpData = namedtuple('ExpData', ['path', 'params', 'work_dir']) |
46 | 51 | ||
47 | 52 | ||
48 | def parse_exp(exp_force): | 53 | def parse_exp(exp_force_base): |
49 | # Tupled for multiprocessing | 54 | # Tupled for multiprocessing |
50 | exp, force = exp_force | 55 | exp, force, base_table = exp_force_base |
51 | 56 | ||
52 | result_file = exp.work_dir + "/exp_point.pkl" | 57 | result_file = exp.work_dir + "/exp_point.pkl" |
53 | should_load = not force and os.path.exists(result_file) | 58 | should_load = not force and os.path.exists(result_file) |
@@ -76,6 +81,12 @@ def parse_exp(exp_force): | |||
76 | # Write scheduling statistics into result | 81 | # Write scheduling statistics into result |
77 | st.extract_sched_data(result, exp.path, exp.work_dir) | 82 | st.extract_sched_data(result, exp.path, exp.work_dir) |
78 | 83 | ||
84 | # Write scaling factors into result | ||
85 | if base_table and exp.params in base_table: | ||
86 | base_exp = base_table[exp.params] | ||
87 | if base_exp != exp: | ||
88 | st.extract_mc_data(result, exp.path, base_exp.path) | ||
89 | |||
79 | with open(result_file, 'wb') as f: | 90 | with open(result_file, 'wb') as f: |
80 | pickle.dump(result, f) | 91 | pickle.dump(result, f) |
81 | except: | 92 | except: |
@@ -127,6 +138,27 @@ def load_exps(exp_dirs, cm_builder, force): | |||
127 | return exps | 138 | return exps |
128 | 139 | ||
129 | 140 | ||
141 | def make_base_table(cmd_scale, col_map, exps): | ||
142 | if not cmd_scale: | ||
143 | return None | ||
144 | |||
145 | # Configuration key for task systems used to calculate task | ||
146 | # execution scaling factors | ||
147 | [(param, value)] = dict(re.findall("(.*)=(.*)", cmd_scale)) | ||
148 | |||
149 | if param not in col_map: | ||
150 | raise IOError("Base column '%s' not present in any parameters!" % param) | ||
151 | |||
152 | base_table = TupleTable(copy.deepcopy(col_map)) | ||
153 | |||
154 | # Fill table with exps who we will scale against | ||
155 | for exp in exps: | ||
156 | if exp.params[param] == value: | ||
157 | base_table[exp.params] = exp | ||
158 | |||
159 | return base_table | ||
160 | |||
161 | |||
130 | def get_dirs(args): | 162 | def get_dirs(args): |
131 | if args: | 163 | if args: |
132 | return args | 164 | return args |
@@ -145,12 +177,17 @@ def fill_table(table, exps, opts): | |||
145 | procs = min(len(exps), opts.processors) | 177 | procs = min(len(exps), opts.processors) |
146 | logged = multiprocessing.Manager().list() | 178 | logged = multiprocessing.Manager().list() |
147 | 179 | ||
180 | sys.stderr.write("Parsing data...\n") | ||
181 | |||
182 | base_table = make_base_table(opts.scale_against, | ||
183 | table.get_col_map(), exps) | ||
184 | |||
148 | pool = multiprocessing.Pool(processes=procs, | 185 | pool = multiprocessing.Pool(processes=procs, |
149 | # Share a list of previously logged messages amongst processes | 186 | # Share a list of previously logged messages amongst processes |
150 | # This is for the com.log_once method to use | 187 | # This is for the com.log_once method to use |
151 | initializer=com.set_logged_list, initargs=(logged,)) | 188 | initializer=com.set_logged_list, initargs=(logged,)) |
152 | 189 | ||
153 | pool_args = zip(exps, [opts.force]*len(exps)) | 190 | pool_args = zip(exps, [opts.force]*len(exps), [base_table]*len(exps)) |
154 | enum = pool.imap_unordered(parse_exp, pool_args, 1) | 191 | enum = pool.imap_unordered(parse_exp, pool_args, 1) |
155 | 192 | ||
156 | try: | 193 | try: |