diff options
| author | Bryan Ward <bcw@cs.unc.edu> | 2013-04-17 16:36:58 -0400 |
|---|---|---|
| committer | Bryan Ward <bcw@cs.unc.edu> | 2013-04-17 16:36:58 -0400 |
| commit | b8f3c7a1ccd2fffc54f15e808505a568ce5fa492 (patch) | |
| tree | df2a20912c6604d398da508bc818a6bb67b5f90f /parse | |
| parent | b1860fce856c1d579008bc30cbf3513a860c3e69 (diff) | |
| parent | 2a4b1c11751632dcc1f47c3c13ab2e2a718b883c (diff) | |
Merge branch 'master' of github.com:hermanjl/experiment-scripts
Diffstat (limited to 'parse')
| -rw-r--r-- | parse/sched.py | 61 |
1 files changed, 36 insertions, 25 deletions
diff --git a/parse/sched.py b/parse/sched.py index 1213f0d..a38c61b 100644 --- a/parse/sched.py +++ b/parse/sched.py | |||
| @@ -2,7 +2,6 @@ import config.config as conf | |||
| 2 | import os | 2 | import os |
| 3 | import re | 3 | import re |
| 4 | import struct | 4 | import struct |
| 5 | import sys | ||
| 6 | import subprocess | 5 | import subprocess |
| 7 | 6 | ||
| 8 | from collections import defaultdict,namedtuple | 7 | from collections import defaultdict,namedtuple |
| @@ -27,10 +26,13 @@ class TimeTracker: | |||
| 27 | self.begin = 0 | 26 | self.begin = 0 |
| 28 | self.job = 0 | 27 | self.job = 0 |
| 29 | 28 | ||
| 30 | def start_time(self, record): | 29 | def start_time(self, record, time = None): |
| 31 | '''Start duration of time.''' | 30 | '''Start duration of time.''' |
| 32 | self.begin = record.when | 31 | if not time: |
| 33 | self.job = record.job | 32 | self.begin = record.when |
| 33 | else: | ||
| 34 | self.begin = time | ||
| 35 | self.job = record.job | ||
| 34 | 36 | ||
| 35 | # Data stored for each task | 37 | # Data stored for each task |
| 36 | TaskParams = namedtuple('TaskParams', ['wcet', 'period', 'cpu']) | 38 | TaskParams = namedtuple('TaskParams', ['wcet', 'period', 'cpu']) |
| @@ -92,7 +94,12 @@ def make_iterator(fname): | |||
| 92 | continue | 94 | continue |
| 93 | 95 | ||
| 94 | obj = rdata.clazz(*values) | 96 | obj = rdata.clazz(*values) |
| 95 | yield (obj, rdata.method) | 97 | |
| 98 | if obj.job != 1: | ||
| 99 | yield (obj, rdata.method) | ||
| 100 | else: | ||
| 101 | # Results from the first job are nonsense | ||
| 102 | pass | ||
| 96 | 103 | ||
| 97 | def read_data(task_dict, fnames): | 104 | def read_data(task_dict, fnames): |
| 98 | '''Read records from @fnames and store per-pid stats in @task_dict.''' | 105 | '''Read records from @fnames and store per-pid stats in @task_dict.''' |
| @@ -128,7 +135,8 @@ def process_completion(task_dict, record): | |||
| 128 | def process_release(task_dict, record): | 135 | def process_release(task_dict, record): |
| 129 | data = task_dict[record.pid] | 136 | data = task_dict[record.pid] |
| 130 | data.jobs += 1 | 137 | data.jobs += 1 |
| 131 | data.misses.start_time(record) | 138 | if data.params: |
| 139 | data.misses.start_time(record, record.when + data.params.period) | ||
| 132 | 140 | ||
| 133 | def process_param(task_dict, record): | 141 | def process_param(task_dict, record): |
| 134 | params = TaskParams(record.wcet, record.period, record.partition) | 142 | params = TaskParams(record.wcet, record.period, record.partition) |
| @@ -143,36 +151,39 @@ def process_resume(task_dict, record): | |||
| 143 | register_record('ResumeRecord', 9, process_resume, 'Q8x', ['when']) | 151 | register_record('ResumeRecord', 9, process_resume, 'Q8x', ['when']) |
| 144 | register_record('BlockRecord', 8, process_block, 'Q8x', ['when']) | 152 | register_record('BlockRecord', 8, process_block, 'Q8x', ['when']) |
| 145 | register_record('CompletionRecord', 7, process_completion, 'Q8x', ['when']) | 153 | register_record('CompletionRecord', 7, process_completion, 'Q8x', ['when']) |
| 146 | register_record('ReleaseRecord', 3, process_release, 'QQ', ['release', 'when']) | 154 | register_record('ReleaseRecord', 3, process_release, 'QQ', ['when', 'release']) |
| 147 | register_record('ParamRecord', 2, process_param, 'IIIcc2x', | 155 | register_record('ParamRecord', 2, process_param, 'IIIcc2x', |
| 148 | ['wcet','period','phase','partition', 'task_class']) | 156 | ['wcet','period','phase','partition', 'task_class']) |
| 149 | 157 | ||
| 150 | def extract_sched_data(result, data_dir, work_dir): | 158 | def create_task_dict(data_dir, work_dir = None): |
| 159 | '''Parse sched trace files''' | ||
| 151 | bin_files = conf.FILES['sched_data'].format(".*") | 160 | bin_files = conf.FILES['sched_data'].format(".*") |
| 152 | output_file = "%s/out-st" % work_dir | 161 | output_file = "%s/out-st" % work_dir |
| 153 | 162 | ||
| 154 | bins = ["%s/%s" % (data_dir,f) for f in os.listdir(data_dir) if re.match(bin_files, f)] | 163 | task_dict = defaultdict(lambda : |
| 155 | if not len(bins): | 164 | TaskData(None, 1, TimeTracker(), TimeTracker())) |
| 156 | return | 165 | |
| 166 | bin_names = [f for f in os.listdir(data_dir) if re.match(bin_files, f)] | ||
| 167 | if not len(bin_names): | ||
| 168 | return task_dict | ||
| 157 | 169 | ||
| 158 | # Save an in-english version of the data for debugging | 170 | # Save an in-english version of the data for debugging |
| 159 | # This is optional and will only be done if 'st_show' is in PATH | 171 | # This is optional and will only be done if 'st_show' is in PATH |
| 160 | if conf.BINS['st_show']: | 172 | if conf.BINS['st_show']: |
| 161 | cmd_arr = [conf.BINS['st_show']] | 173 | cmd_arr = [conf.BINS['st_show']] |
| 162 | cmd_arr.extend(bins) | 174 | cmd_arr.extend(bin_names) |
| 163 | with open(output_file, "w") as f: | 175 | with open(output_file, "w") as f: |
| 164 | print("calling %s" % cmd_arr) | ||
| 165 | subprocess.call(cmd_arr, cwd=data_dir, stdout=f) | 176 | subprocess.call(cmd_arr, cwd=data_dir, stdout=f) |
| 166 | 177 | ||
| 167 | task_dict = defaultdict(lambda : | ||
| 168 | TaskData(0, 0, TimeTracker(), TimeTracker())) | ||
| 169 | |||
| 170 | # Gather per-task values | 178 | # Gather per-task values |
| 171 | read_data(task_dict, bins) | 179 | bin_paths = ["%s/%s" % (data_dir,f) for f in bin_names] |
| 180 | read_data(task_dict, bin_paths) | ||
| 172 | 181 | ||
| 173 | stat_data = {"avg-tard" : [], "max-tard" : [], | 182 | return task_dict |
| 174 | "avg-block" : [], "max-block" : [], | 183 | |
| 175 | "miss-ratio" : []} | 184 | def extract_sched_data(result, data_dir, work_dir): |
| 185 | task_dict = create_task_dict(data_dir, work_dir) | ||
| 186 | stat_data = defaultdict(list) | ||
| 176 | 187 | ||
| 177 | # Group per-task values | 188 | # Group per-task values |
| 178 | for tdata in task_dict.itervalues(): | 189 | for tdata in task_dict.itervalues(): |
| @@ -181,18 +192,18 @@ def extract_sched_data(result, data_dir, work_dir): | |||
| 181 | continue | 192 | continue |
| 182 | 193 | ||
| 183 | miss_ratio = float(tdata.misses.num) / tdata.jobs | 194 | miss_ratio = float(tdata.misses.num) / tdata.jobs |
| 195 | stat_data["miss-ratio"].append(float(tdata.misses.num) / tdata.jobs) | ||
| 196 | |||
| 197 | stat_data["max-tard" ].append(tdata.misses.max / tdata.params.wcet) | ||
| 184 | # Scale average down to account for jobs with 0 tardiness | 198 | # Scale average down to account for jobs with 0 tardiness |
| 185 | avg_tard = tdata.misses.avg * miss_ratio | 199 | avg_tard = tdata.misses.avg * miss_ratio |
| 186 | |||
| 187 | stat_data["miss-ratio"].append(miss_ratio) | ||
| 188 | stat_data["avg-tard" ].append(avg_tard / tdata.params.wcet) | 200 | stat_data["avg-tard" ].append(avg_tard / tdata.params.wcet) |
| 189 | stat_data["max-tard" ].append(tdata.misses.max / tdata.params.wcet) | 201 | |
| 190 | stat_data["avg-block" ].append(tdata.blocks.avg / NSEC_PER_MSEC) | 202 | stat_data["avg-block" ].append(tdata.blocks.avg / NSEC_PER_MSEC) |
| 191 | stat_data["max-block" ].append(tdata.blocks.max / NSEC_PER_MSEC) | 203 | stat_data["max-block" ].append(tdata.blocks.max / NSEC_PER_MSEC) |
| 192 | 204 | ||
| 193 | # Summarize value groups | 205 | # Summarize value groups |
| 194 | for name, data in stat_data.iteritems(): | 206 | for name, data in stat_data.iteritems(): |
| 195 | if not data: | 207 | if not data or not sum(data): |
| 196 | continue | 208 | continue |
| 197 | result[name] = Measurement(str(name)).from_array(data) | 209 | result[name] = Measurement(str(name)).from_array(data) |
| 198 | |||
