diff options
author | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-04-17 10:43:53 -0400 |
---|---|---|
committer | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-04-17 10:43:53 -0400 |
commit | a0e4b9fe9d7fab9a50a626cfeda3c614a9a6af5d (patch) | |
tree | c2d753ec6533dfd8be46214bc19e2b4aa044af74 | |
parent | f659a11e1c888b01cce64fee5ae064a67aa4d777 (diff) |
Created infrastructure for calculating scaling factors.
-rw-r--r-- | gen/__init__.py | 2 | ||||
-rw-r--r-- | gen/generator.py | 2 | ||||
-rw-r--r-- | gen/mc_generators.py | 7 | ||||
-rw-r--r-- | parse/sched.py | 133 | ||||
-rwxr-xr-x | parse_exps.py | 39 |
5 files changed, 148 insertions, 35 deletions
diff --git a/gen/__init__.py b/gen/__init__.py index 803bb37..dbc7b12 100644 --- a/gen/__init__.py +++ b/gen/__init__.py | |||
@@ -6,4 +6,4 @@ gen.register_generator("G-EDF", edf.GedfGenerator) | |||
6 | gen.register_generator("P-EDF", edf.PedfGenerator) | 6 | gen.register_generator("P-EDF", edf.PedfGenerator) |
7 | gen.register_generator("C-EDF", edf.CedfGenerator) | 7 | gen.register_generator("C-EDF", edf.CedfGenerator) |
8 | gen.register_generator("MC", mc.McGenerator) | 8 | gen.register_generator("MC", mc.McGenerator) |
9 | gen.register_generator("Color-MC", mc.ColorMcGenerator) | 9 | gen.register_generator("COLOR-MC", mc.ColorMcGenerator) |
diff --git a/gen/generator.py b/gen/generator.py index 1205490..693e52f 100644 --- a/gen/generator.py +++ b/gen/generator.py | |||
@@ -77,7 +77,7 @@ class Generator(object): | |||
77 | 77 | ||
78 | def __make_options(self, params): | 78 | def __make_options(self, params): |
79 | '''Return generic Litmus options.''' | 79 | '''Return generic Litmus options.''' |
80 | return [GenOption('num_tasks', int, | 80 | return [GenOption('tasks', int, |
81 | range(self.cpus, 5*self.cpus, self.cpus), | 81 | range(self.cpus, 5*self.cpus, self.cpus), |
82 | 'Number of tasks per experiment.'), | 82 | 'Number of tasks per experiment.'), |
83 | GenOption('cpus', int, [self.cpus], | 83 | GenOption('cpus', int, [self.cpus], |
diff --git a/gen/mc_generators.py b/gen/mc_generators.py index bbb1ab9..704bcc3 100644 --- a/gen/mc_generators.py +++ b/gen/mc_generators.py | |||
@@ -256,7 +256,7 @@ COLOR_TYPES = ['scheduling', 'locking', 'unmanaged'] | |||
256 | 256 | ||
257 | class ColorMcGenerator(McGenerator): | 257 | class ColorMcGenerator(McGenerator): |
258 | def __init__(self, params = {}): | 258 | def __init__(self, params = {}): |
259 | super(ColorMcGenerator, self).__init__("COLOR-MC", | 259 | super(ColorMcGenerator, self).__init__("MC", |
260 | templates=[TP_TYPE, TP_CHUNK, TP_COLOR_B, TP_COLOR_C], | 260 | templates=[TP_TYPE, TP_CHUNK, TP_COLOR_B, TP_COLOR_C], |
261 | options=self.__make_options(), | 261 | options=self.__make_options(), |
262 | params=self.__extend_params(params)) | 262 | params=self.__extend_params(params)) |
@@ -313,10 +313,7 @@ class ColorMcGenerator(McGenerator): | |||
313 | 'System page size.'), | 313 | 'System page size.'), |
314 | GenOption('wss', [float, int], [.5], | 314 | GenOption('wss', [float, int], [.5], |
315 | 'Task working set sizes. Can be expressed as a fraction ' + | 315 | 'Task working set sizes. Can be expressed as a fraction ' + |
316 | 'of the cache.'), | 316 | 'of the cache.')] |
317 | GenOption('align_unmanaged', [True, False], [True], | ||
318 | 'Place all working sets of unmanaged task systems in '+ | ||
319 | 'the same location, for maximum interference.')] | ||
320 | 317 | ||
321 | 318 | ||
322 | def __get_wss_pages(self, params): | 319 | def __get_wss_pages(self, params): |
diff --git a/parse/sched.py b/parse/sched.py index 1213f0d..1f07751 100644 --- a/parse/sched.py +++ b/parse/sched.py | |||
@@ -2,8 +2,8 @@ import config.config as conf | |||
2 | import os | 2 | import os |
3 | import re | 3 | import re |
4 | import struct | 4 | import struct |
5 | import sys | ||
6 | import subprocess | 5 | import subprocess |
6 | import sys | ||
7 | 7 | ||
8 | from collections import defaultdict,namedtuple | 8 | from collections import defaultdict,namedtuple |
9 | from common import recordtype | 9 | from common import recordtype |
@@ -33,8 +33,9 @@ class TimeTracker: | |||
33 | self.job = record.job | 33 | self.job = record.job |
34 | 34 | ||
35 | # Data stored for each task | 35 | # Data stored for each task |
36 | TaskParams = namedtuple('TaskParams', ['wcet', 'period', 'cpu']) | 36 | TaskParams = namedtuple('TaskParams', ['wcet', 'period', 'cpu', 'level']) |
37 | TaskData = recordtype('TaskData', ['params', 'jobs', 'blocks', 'misses']) | 37 | TaskData = recordtype('TaskData', ['params', 'jobs', 'loads', |
38 | 'blocks', 'misses', 'execs']) | ||
38 | 39 | ||
39 | # Map of event ids to corresponding class, binary format, and processing methods | 40 | # Map of event ids to corresponding class, binary format, and processing methods |
40 | RecordInfo = namedtuple('RecordInfo', ['clazz', 'fmt', 'method']) | 41 | RecordInfo = namedtuple('RecordInfo', ['clazz', 'fmt', 'method']) |
@@ -124,6 +125,7 @@ def read_data(task_dict, fnames): | |||
124 | 125 | ||
125 | def process_completion(task_dict, record): | 126 | def process_completion(task_dict, record): |
126 | task_dict[record.pid].misses.store_time(record) | 127 | task_dict[record.pid].misses.store_time(record) |
128 | task_dict[record.pid].loads += [record.load] | ||
127 | 129 | ||
128 | def process_release(task_dict, record): | 130 | def process_release(task_dict, record): |
129 | data = task_dict[record.pid] | 131 | data = task_dict[record.pid] |
@@ -131,7 +133,9 @@ def process_release(task_dict, record): | |||
131 | data.misses.start_time(record) | 133 | data.misses.start_time(record) |
132 | 134 | ||
133 | def process_param(task_dict, record): | 135 | def process_param(task_dict, record): |
134 | params = TaskParams(record.wcet, record.period, record.partition) | 136 | level = chr(97 + record.level) |
137 | params = TaskParams(record.wcet, record.period, | ||
138 | record.partition, level) | ||
135 | task_dict[record.pid].params = params | 139 | task_dict[record.pid].params = params |
136 | 140 | ||
137 | def process_block(task_dict, record): | 141 | def process_block(task_dict, record): |
@@ -140,14 +144,27 @@ def process_block(task_dict, record): | |||
140 | def process_resume(task_dict, record): | 144 | def process_resume(task_dict, record): |
141 | task_dict[record.pid].blocks.store_time(record) | 145 | task_dict[record.pid].blocks.store_time(record) |
142 | 146 | ||
147 | def process_switch_to(task_dict, record): | ||
148 | task_dict[record.pid].execs.start_time(record) | ||
149 | |||
150 | def process_switch_away(task_dict, record): | ||
151 | task_dict[record.pid].execs.store_time(record) | ||
152 | |||
143 | register_record('ResumeRecord', 9, process_resume, 'Q8x', ['when']) | 153 | register_record('ResumeRecord', 9, process_resume, 'Q8x', ['when']) |
144 | register_record('BlockRecord', 8, process_block, 'Q8x', ['when']) | 154 | register_record('BlockRecord', 8, process_block, 'Q8x', ['when']) |
145 | register_record('CompletionRecord', 7, process_completion, 'Q8x', ['when']) | 155 | register_record('CompletionRecord', 7, process_completion, 'QQ', ['when', 'load']) |
146 | register_record('ReleaseRecord', 3, process_release, 'QQ', ['release', 'when']) | 156 | register_record('ReleaseRecord', 3, process_release, 'QQ', ['release', 'when']) |
147 | register_record('ParamRecord', 2, process_param, 'IIIcc2x', | 157 | register_record('SwitchToRecord', 5, process_switch_to, 'Q8x', ['when']) |
148 | ['wcet','period','phase','partition', 'task_class']) | 158 | register_record('SwitchAwayRecord', 6, process_switch_away, 'Q8x', ['when']) |
159 | register_record('ParamRecord', 2, process_param, 'IIIcccx', | ||
160 | ['wcet','period','phase','partition', 'task_class', 'level']) | ||
161 | |||
162 | saved_stats = [] | ||
163 | def get_task_data(data_dir, work_dir = None): | ||
164 | '''Parse sched trace files''' | ||
165 | if data_dir in saved_stats: | ||
166 | return data_dir | ||
149 | 167 | ||
150 | def extract_sched_data(result, data_dir, work_dir): | ||
151 | bin_files = conf.FILES['sched_data'].format(".*") | 168 | bin_files = conf.FILES['sched_data'].format(".*") |
152 | output_file = "%s/out-st" % work_dir | 169 | output_file = "%s/out-st" % work_dir |
153 | 170 | ||
@@ -157,24 +174,46 @@ def extract_sched_data(result, data_dir, work_dir): | |||
157 | 174 | ||
158 | # Save an in-english version of the data for debugging | 175 | # Save an in-english version of the data for debugging |
159 | # This is optional and will only be done if 'st_show' is in PATH | 176 | # This is optional and will only be done if 'st_show' is in PATH |
160 | if conf.BINS['st_show']: | 177 | if work_dir and conf.BINS['st_show']: |
161 | cmd_arr = [conf.BINS['st_show']] | 178 | cmd_arr = [conf.BINS['st_show']] |
162 | cmd_arr.extend(bins) | 179 | cmd_arr.extend(bins) |
163 | with open(output_file, "w") as f: | 180 | with open(output_file, "w") as f: |
164 | print("calling %s" % cmd_arr) | 181 | print("calling %s" % cmd_arr) |
165 | subprocess.call(cmd_arr, cwd=data_dir, stdout=f) | 182 | subprocess.call(cmd_arr, cwd=data_dir, stdout=f) |
166 | 183 | ||
167 | task_dict = defaultdict(lambda : | 184 | task_dict = defaultdict(lambda :TaskData(0, 0, 0, [], TimeTracker(), |
168 | TaskData(0, 0, TimeTracker(), TimeTracker())) | 185 | TimeTracker(), TimeTracker())) |
169 | 186 | ||
170 | # Gather per-task values | 187 | # Gather per-task values |
171 | read_data(task_dict, bins) | 188 | read_data(task_dict, bins) |
172 | 189 | ||
173 | stat_data = {"avg-tard" : [], "max-tard" : [], | 190 | saved_stats[data_dir] = task_dict |
174 | "avg-block" : [], "max-block" : [], | 191 | return task_dict |
175 | "miss-ratio" : []} | 192 | |
193 | class LeveledArray(object): | ||
194 | """Groups statistics by the level of the task to which they apply""" | ||
195 | def __init__(self): | ||
196 | self.name = name | ||
197 | self.vals = defaultdict(lambda: defaultdict(lambda:[])) | ||
198 | |||
199 | def add(self, name, level, value): | ||
200 | if type(value) != type([]): | ||
201 | value = [value] | ||
202 | self.vals[name][task.config.level] += value | ||
176 | 203 | ||
177 | # Group per-task values | 204 | def write_measurements(self, result): |
205 | for stat_name, stat_data in self.vals.iteritems(): | ||
206 | for level, values in stat_data.iteritems(): | ||
207 | if not values: | ||
208 | continue | ||
209 | |||
210 | name = "%s%s" % ("%s-" % level if level else "", stat_name) | ||
211 | result[name] = Measurement(name).from_array(arr) | ||
212 | |||
213 | def extract_sched_data(result, data_dir, work_dir): | ||
214 | task_dict = get_task_data(data_dir, work_dir) | ||
215 | |||
216 | stat_data = LeveledArray() | ||
178 | for tdata in task_dict.itervalues(): | 217 | for tdata in task_dict.itervalues(): |
179 | if not tdata.params: | 218 | if not tdata.params: |
180 | # Currently unknown where these invalid tasks come from... | 219 | # Currently unknown where these invalid tasks come from... |
@@ -184,15 +223,59 @@ def extract_sched_data(result, data_dir, work_dir): | |||
184 | # Scale average down to account for jobs with 0 tardiness | 223 | # Scale average down to account for jobs with 0 tardiness |
185 | avg_tard = tdata.misses.avg * miss_ratio | 224 | avg_tard = tdata.misses.avg * miss_ratio |
186 | 225 | ||
187 | stat_data["miss-ratio"].append(miss_ratio) | 226 | level = tdata.params.level |
188 | stat_data["avg-tard" ].append(avg_tard / tdata.params.wcet) | 227 | stat_data.add("miss-ratio", level, miss_ratio) |
189 | stat_data["max-tard" ].append(tdata.misses.max / tdata.params.wcet) | 228 | stat_data.add("avg-tard", level, avg_tard / tdata.params.wcet) |
190 | stat_data["avg-block" ].append(tdata.blocks.avg / NSEC_PER_MSEC) | 229 | stat_data.add("max-tard", level, tdata.misses.max / tdata.params.wcet) |
191 | stat_data["max-block" ].append(tdata.blocks.max / NSEC_PER_MSEC) | 230 | stat_data.add("avg-block", level, tdata.blocks.avg / NSEC_PER_MSEC) |
192 | 231 | stat_data.add("max-block", level, tdata.blocks.max / NSEC_PER_MSEC) | |
193 | # Summarize value groups | 232 | |
194 | for name, data in stat_data.iteritems(): | 233 | stat_data.write_measurements(result) |
195 | if not data: | 234 | |
235 | ScaleData = namedtuple('ScaleData', ['reg_tasks', 'base_tasks']) | ||
236 | def extract_mc_data(result, data_dir, base_dir): | ||
237 | task_dict = get_task_data(data_dir) | ||
238 | base_dict = get_task_data(base_dir) | ||
239 | |||
240 | stat_data = LeveledArray() | ||
241 | |||
242 | # Only level B loads are measured | ||
243 | for tdata in filter(task_dict.iteritems(), lambda x: x.level == 'b'): | ||
244 | stat_data.add('load', tdata.config.level, tdata.loads) | ||
245 | |||
246 | tasks_by_config = defaultdict(lambda: ScaleData([], [])) | ||
247 | |||
248 | # Add tasks in order of pid to tasks_by_config | ||
249 | # Tasks must be ordered by pid or we can't make 1 to 1 comparisons | ||
250 | # when multiple tasks have the same config in each task set | ||
251 | for tasks, field in ((task_dict, 'reg_tasks'), (base_dict, 'base_tasks')): | ||
252 | for pid in sorted(tasks.keys()): | ||
253 | tdata = tasks[pid] | ||
254 | tlist = getattr(tasks_by_config[tdata.params], field) | ||
255 | tlist += [tdata.execs] | ||
256 | |||
257 | # Write scaling factors | ||
258 | for config, scale_data in tasks_by_config: | ||
259 | if len(scale_data.reg_tasks) != len(scale_data.base_tasks): | ||
260 | # Can't make comparison if different numbers of tasks! | ||
196 | continue | 261 | continue |
197 | result[name] = Measurement(str(name)).from_array(data) | ||
198 | 262 | ||
263 | all_pairs = zip(scale_data.reg_tasks, scale_data.base_tasks) | ||
264 | for reg_execs, base_execs in all_pairs: | ||
265 | if not reg_execs.max or not reg_execs.avg or\ | ||
266 | not base_execs.max or not base_execs.avg: | ||
267 | # This was an issue at some point, not sure if it still is | ||
268 | continue | ||
269 | |||
270 | max_scale = float(base_execs.max) / reg_execs.max | ||
271 | avg_scale = float(base_execs.avg) / reg_execs.avg | ||
272 | |||
273 | if (avg_scale < 1 or max_scale < 1) and config.level == "b": | ||
274 | sys.stderr.write("Task in {} with config {} has <1.0 scale!" | ||
275 | .format(data_dir, config) | ||
276 | continue | ||
277 | |||
278 | stat_data.add('max-scale', config.level, max_scale) | ||
279 | stat_data.add('avg-scale', config.level, avg_scale) | ||
280 | |||
281 | stat_data.write_measurements(result) | ||
diff --git a/parse_exps.py b/parse_exps.py index c254536..9475cfc 100755 --- a/parse_exps.py +++ b/parse_exps.py | |||
@@ -2,10 +2,12 @@ | |||
2 | from __future__ import print_function | 2 | from __future__ import print_function |
3 | 3 | ||
4 | import config.config as conf | 4 | import config.config as conf |
5 | import copy | ||
5 | import os | 6 | import os |
6 | import parse.ft as ft | 7 | import parse.ft as ft |
7 | import parse.sched as st | 8 | import parse.sched as st |
8 | import pickle | 9 | import pickle |
10 | import re | ||
9 | import shutil as sh | 11 | import shutil as sh |
10 | import sys | 12 | import sys |
11 | import traceback | 13 | import traceback |
@@ -35,6 +37,9 @@ def parse_args(): | |||
35 | parser.add_option('-p', '--processors', default=max(cpu_count() - 1, 1), | 37 | parser.add_option('-p', '--processors', default=max(cpu_count() - 1, 1), |
36 | type='int', dest='processors', | 38 | type='int', dest='processors', |
37 | help='number of threads for processing') | 39 | help='number of threads for processing') |
40 | parser.add_option('-s', '--scale-against', dest='scale_against', | ||
41 | metavar='PARAM=VALUE', default="type=unmanaged", | ||
42 | help='calculate task scaling factors against these configs') | ||
38 | 43 | ||
39 | return parser.parse_args() | 44 | return parser.parse_args() |
40 | 45 | ||
@@ -82,9 +87,9 @@ def load_exps(exp_dirs, cm_builder, force): | |||
82 | 87 | ||
83 | return exps | 88 | return exps |
84 | 89 | ||
85 | def parse_exp(exp_force): | 90 | def parse_exp(exp_force_base): |
86 | # Tupled for multiprocessing | 91 | # Tupled for multiprocessing |
87 | exp, force = exp_force | 92 | exp, force, base_table = exp_force_base |
88 | 93 | ||
89 | result_file = exp.work_dir + "/exp_point.pkl" | 94 | result_file = exp.work_dir + "/exp_point.pkl" |
90 | should_load = not force and os.path.exists(result_file) | 95 | should_load = not force and os.path.exists(result_file) |
@@ -109,6 +114,11 @@ def parse_exp(exp_force): | |||
109 | # Write scheduling statistics into result | 114 | # Write scheduling statistics into result |
110 | st.extract_sched_data(result, exp.path, exp.work_dir) | 115 | st.extract_sched_data(result, exp.path, exp.work_dir) |
111 | 116 | ||
117 | if base_table and exp.params in base_table: | ||
118 | base_exp = base_table[exp.params] | ||
119 | if base_exp != exp: | ||
120 | st.extract_scaling_data(result, exp.path, base_exp.path) | ||
121 | |||
112 | with open(result_file, 'wb') as f: | 122 | with open(result_file, 'wb') as f: |
113 | pickle.dump(result, f) | 123 | pickle.dump(result, f) |
114 | except: | 124 | except: |
@@ -116,6 +126,27 @@ def parse_exp(exp_force): | |||
116 | 126 | ||
117 | return (exp, result) | 127 | return (exp, result) |
118 | 128 | ||
129 | def make_base_table(cmd_scale, col_map, exps): | ||
130 | if not cmd_scale: | ||
131 | return None | ||
132 | |||
133 | # Configuration key for task systems used to calculate task | ||
134 | # execution scaling factors | ||
135 | [(param, value)] = dict(re.findall("(.*)=(.*)", cmd_scale)) | ||
136 | |||
137 | if param not in col_map: | ||
138 | raise IOError("Base column '%s' not present in any parameters!" % param) | ||
139 | |||
140 | base_map = copy.deepcopy(col_map) | ||
141 | base_table = TupleTable(base_map) | ||
142 | |||
143 | # Fill table with exps who we will scale against | ||
144 | for exp in exps: | ||
145 | if exp.params[param] == value: | ||
146 | base_table[exp.params] = exp | ||
147 | |||
148 | return base_table | ||
149 | |||
119 | def main(): | 150 | def main(): |
120 | opts, args = parse_args() | 151 | opts, args = parse_args() |
121 | 152 | ||
@@ -135,11 +166,13 @@ def main(): | |||
135 | col_map = builder.build() | 166 | col_map = builder.build() |
136 | result_table = TupleTable(col_map) | 167 | result_table = TupleTable(col_map) |
137 | 168 | ||
169 | base_table = make_base_table(opts.scale_against, col_map, exps) | ||
170 | |||
138 | sys.stderr.write("Parsing data...\n") | 171 | sys.stderr.write("Parsing data...\n") |
139 | 172 | ||
140 | procs = min(len(exps), opts.processors) | 173 | procs = min(len(exps), opts.processors) |
141 | pool = Pool(processes=procs) | 174 | pool = Pool(processes=procs) |
142 | pool_args = zip(exps, [opts.force]*len(exps)) | 175 | pool_args = zip(exps, [opts.force]*len(exps), [base_table]*len(exps)) |
143 | enum = pool.imap_unordered(parse_exp, pool_args, 1) | 176 | enum = pool.imap_unordered(parse_exp, pool_args, 1) |
144 | 177 | ||
145 | try: | 178 | try: |