aboutsummaryrefslogtreecommitdiffstats
path: root/parse/sched.py
diff options
context:
space:
mode:
Diffstat (limited to 'parse/sched.py')
-rw-r--r--parse/sched.py46
1 files changed, 27 insertions, 19 deletions
diff --git a/parse/sched.py b/parse/sched.py
index 1213f0d..13c7ca2 100644
--- a/parse/sched.py
+++ b/parse/sched.py
@@ -2,7 +2,6 @@ import config.config as conf
2import os 2import os
3import re 3import re
4import struct 4import struct
5import sys
6import subprocess 5import subprocess
7 6
8from collections import defaultdict,namedtuple 7from collections import defaultdict,namedtuple
@@ -92,7 +91,12 @@ def make_iterator(fname):
92 continue 91 continue
93 92
94 obj = rdata.clazz(*values) 93 obj = rdata.clazz(*values)
95 yield (obj, rdata.method) 94
95 if obj.job != 1:
96 yield (obj, rdata.method)
97 else:
98 # Results from the first job are nonsense
99 pass
96 100
97def read_data(task_dict, fnames): 101def read_data(task_dict, fnames):
98 '''Read records from @fnames and store per-pid stats in @task_dict.''' 102 '''Read records from @fnames and store per-pid stats in @task_dict.'''
@@ -147,32 +151,35 @@ register_record('ReleaseRecord', 3, process_release, 'QQ', ['release', 'when'])
147register_record('ParamRecord', 2, process_param, 'IIIcc2x', 151register_record('ParamRecord', 2, process_param, 'IIIcc2x',
148 ['wcet','period','phase','partition', 'task_class']) 152 ['wcet','period','phase','partition', 'task_class'])
149 153
150def extract_sched_data(result, data_dir, work_dir): 154def create_task_dict(data_dir, work_dir = None):
155 '''Parse sched trace files'''
151 bin_files = conf.FILES['sched_data'].format(".*") 156 bin_files = conf.FILES['sched_data'].format(".*")
152 output_file = "%s/out-st" % work_dir 157 output_file = "%s/out-st" % work_dir
153 158
154 bins = ["%s/%s" % (data_dir,f) for f in os.listdir(data_dir) if re.match(bin_files, f)] 159 task_dict = defaultdict(lambda :
155 if not len(bins): 160 TaskData(None, 1, TimeTracker(), TimeTracker()))
156 return 161
162 bin_names = [f for f in os.listdir(data_dir) if re.match(bin_files, f)]
163 if not len(bin_names):
164 return task_dict
157 165
158 # Save an in-english version of the data for debugging 166 # Save an in-english version of the data for debugging
159 # This is optional and will only be done if 'st_show' is in PATH 167 # This is optional and will only be done if 'st_show' is in PATH
160 if conf.BINS['st_show']: 168 if conf.BINS['st_show']:
161 cmd_arr = [conf.BINS['st_show']] 169 cmd_arr = [conf.BINS['st_show']]
162 cmd_arr.extend(bins) 170 cmd_arr.extend(bin_names)
163 with open(output_file, "w") as f: 171 with open(output_file, "w") as f:
164 print("calling %s" % cmd_arr)
165 subprocess.call(cmd_arr, cwd=data_dir, stdout=f) 172 subprocess.call(cmd_arr, cwd=data_dir, stdout=f)
166 173
167 task_dict = defaultdict(lambda :
168 TaskData(0, 0, TimeTracker(), TimeTracker()))
169
170 # Gather per-task values 174 # Gather per-task values
171 read_data(task_dict, bins) 175 bin_paths = ["%s/%s" % (data_dir,f) for f in bin_names]
176 read_data(task_dict, bin_paths)
172 177
173 stat_data = {"avg-tard" : [], "max-tard" : [], 178 return task_dict
174 "avg-block" : [], "max-block" : [], 179
175 "miss-ratio" : []} 180def extract_sched_data(result, data_dir, work_dir):
181 task_dict = create_task_dict(data_dir, work_dir)
182 stat_data = defaultdict(list)
176 183
177 # Group per-task values 184 # Group per-task values
178 for tdata in task_dict.itervalues(): 185 for tdata in task_dict.itervalues():
@@ -181,18 +188,19 @@ def extract_sched_data(result, data_dir, work_dir):
181 continue 188 continue
182 189
183 miss_ratio = float(tdata.misses.num) / tdata.jobs 190 miss_ratio = float(tdata.misses.num) / tdata.jobs
191 stat_data["miss-ratio"].append(float(tdata.misses.num) / tdata.jobs)
192
193 stat_data["max-tard" ].append(tdata.misses.max / tdata.params.wcet)
184 # Scale average down to account for jobs with 0 tardiness 194 # Scale average down to account for jobs with 0 tardiness
185 avg_tard = tdata.misses.avg * miss_ratio 195 avg_tard = tdata.misses.avg * miss_ratio
186
187 stat_data["miss-ratio"].append(miss_ratio)
188 stat_data["avg-tard" ].append(avg_tard / tdata.params.wcet) 196 stat_data["avg-tard" ].append(avg_tard / tdata.params.wcet)
189 stat_data["max-tard" ].append(tdata.misses.max / tdata.params.wcet) 197
190 stat_data["avg-block" ].append(tdata.blocks.avg / NSEC_PER_MSEC) 198 stat_data["avg-block" ].append(tdata.blocks.avg / NSEC_PER_MSEC)
191 stat_data["max-block" ].append(tdata.blocks.max / NSEC_PER_MSEC) 199 stat_data["max-block" ].append(tdata.blocks.max / NSEC_PER_MSEC)
192 200
193 # Summarize value groups 201 # Summarize value groups
194 for name, data in stat_data.iteritems(): 202 for name, data in stat_data.iteritems():
195 if not data: 203 if not data or not sum(data):
196 continue 204 continue
197 result[name] = Measurement(str(name)).from_array(data) 205 result[name] = Measurement(str(name)).from_array(data)
198 206