diff options
Diffstat (limited to 'parse_exps.py')
| -rwxr-xr-x | parse_exps.py | 39 |
1 files changed, 36 insertions, 3 deletions
diff --git a/parse_exps.py b/parse_exps.py index c254536..9475cfc 100755 --- a/parse_exps.py +++ b/parse_exps.py | |||
| @@ -2,10 +2,12 @@ | |||
| 2 | from __future__ import print_function | 2 | from __future__ import print_function |
| 3 | 3 | ||
| 4 | import config.config as conf | 4 | import config.config as conf |
| 5 | import copy | ||
| 5 | import os | 6 | import os |
| 6 | import parse.ft as ft | 7 | import parse.ft as ft |
| 7 | import parse.sched as st | 8 | import parse.sched as st |
| 8 | import pickle | 9 | import pickle |
| 10 | import re | ||
| 9 | import shutil as sh | 11 | import shutil as sh |
| 10 | import sys | 12 | import sys |
| 11 | import traceback | 13 | import traceback |
| @@ -35,6 +37,9 @@ def parse_args(): | |||
| 35 | parser.add_option('-p', '--processors', default=max(cpu_count() - 1, 1), | 37 | parser.add_option('-p', '--processors', default=max(cpu_count() - 1, 1), |
| 36 | type='int', dest='processors', | 38 | type='int', dest='processors', |
| 37 | help='number of threads for processing') | 39 | help='number of threads for processing') |
| 40 | parser.add_option('-s', '--scale-against', dest='scale_against', | ||
| 41 | metavar='PARAM=VALUE', default="type=unmanaged", | ||
| 42 | help='calculate task scaling factors against these configs') | ||
| 38 | 43 | ||
| 39 | return parser.parse_args() | 44 | return parser.parse_args() |
| 40 | 45 | ||
| @@ -82,9 +87,9 @@ def load_exps(exp_dirs, cm_builder, force): | |||
| 82 | 87 | ||
| 83 | return exps | 88 | return exps |
| 84 | 89 | ||
| 85 | def parse_exp(exp_force): | 90 | def parse_exp(exp_force_base): |
| 86 | # Tupled for multiprocessing | 91 | # Tupled for multiprocessing |
| 87 | exp, force = exp_force | 92 | exp, force, base_table = exp_force_base |
| 88 | 93 | ||
| 89 | result_file = exp.work_dir + "/exp_point.pkl" | 94 | result_file = exp.work_dir + "/exp_point.pkl" |
| 90 | should_load = not force and os.path.exists(result_file) | 95 | should_load = not force and os.path.exists(result_file) |
| @@ -109,6 +114,11 @@ def parse_exp(exp_force): | |||
| 109 | # Write scheduling statistics into result | 114 | # Write scheduling statistics into result |
| 110 | st.extract_sched_data(result, exp.path, exp.work_dir) | 115 | st.extract_sched_data(result, exp.path, exp.work_dir) |
| 111 | 116 | ||
| 117 | if base_table and exp.params in base_table: | ||
| 118 | base_exp = base_table[exp.params] | ||
| 119 | if base_exp != exp: | ||
| 120 | st.extract_scaling_data(result, exp.path, base_exp.path) | ||
| 121 | |||
| 112 | with open(result_file, 'wb') as f: | 122 | with open(result_file, 'wb') as f: |
| 113 | pickle.dump(result, f) | 123 | pickle.dump(result, f) |
| 114 | except: | 124 | except: |
| @@ -116,6 +126,27 @@ def parse_exp(exp_force): | |||
| 116 | 126 | ||
| 117 | return (exp, result) | 127 | return (exp, result) |
| 118 | 128 | ||
| 129 | def make_base_table(cmd_scale, col_map, exps): | ||
| 130 | if not cmd_scale: | ||
| 131 | return None | ||
| 132 | |||
| 133 | # Configuration key for task systems used to calculate task | ||
| 134 | # execution scaling factors | ||
| 135 | [(param, value)] = dict(re.findall("(.*)=(.*)", cmd_scale)) | ||
| 136 | |||
| 137 | if param not in col_map: | ||
| 138 | raise IOError("Base column '%s' not present in any parameters!" % param) | ||
| 139 | |||
| 140 | base_map = copy.deepcopy(col_map) | ||
| 141 | base_table = TupleTable(base_map) | ||
| 142 | |||
| 143 | # Fill table with exps who we will scale against | ||
| 144 | for exp in exps: | ||
| 145 | if exp.params[param] == value: | ||
| 146 | base_table[exp.params] = exp | ||
| 147 | |||
| 148 | return base_table | ||
| 149 | |||
| 119 | def main(): | 150 | def main(): |
| 120 | opts, args = parse_args() | 151 | opts, args = parse_args() |
| 121 | 152 | ||
| @@ -135,11 +166,13 @@ def main(): | |||
| 135 | col_map = builder.build() | 166 | col_map = builder.build() |
| 136 | result_table = TupleTable(col_map) | 167 | result_table = TupleTable(col_map) |
| 137 | 168 | ||
| 169 | base_table = make_base_table(opts.scale_against, col_map, exps) | ||
| 170 | |||
| 138 | sys.stderr.write("Parsing data...\n") | 171 | sys.stderr.write("Parsing data...\n") |
| 139 | 172 | ||
| 140 | procs = min(len(exps), opts.processors) | 173 | procs = min(len(exps), opts.processors) |
| 141 | pool = Pool(processes=procs) | 174 | pool = Pool(processes=procs) |
| 142 | pool_args = zip(exps, [opts.force]*len(exps)) | 175 | pool_args = zip(exps, [opts.force]*len(exps), [base_table]*len(exps)) |
| 143 | enum = pool.imap_unordered(parse_exp, pool_args, 1) | 176 | enum = pool.imap_unordered(parse_exp, pool_args, 1) |
| 144 | 177 | ||
| 145 | try: | 178 | try: |
