aboutsummaryrefslogtreecommitdiffstats
path: root/parse/sched.py
diff options
context:
space:
mode:
Diffstat (limited to 'parse/sched.py')
-rw-r--r--parse/sched.py84
1 files changed, 60 insertions, 24 deletions
diff --git a/parse/sched.py b/parse/sched.py
index b56324b..4933037 100644
--- a/parse/sched.py
+++ b/parse/sched.py
@@ -5,35 +5,55 @@ import struct
5import subprocess 5import subprocess
6 6
7from collections import defaultdict,namedtuple 7from collections import defaultdict,namedtuple
8from common import recordtype 8from common import recordtype,log_once
9from point import Measurement 9from point import Measurement
10from ctypes import * 10from ctypes import *
11 11
12class TimeTracker: 12class TimeTracker:
13 '''Store stats for durations of time demarcated by sched_trace records.''' 13 '''Store stats for durations of time demarcated by sched_trace records.'''
14 def __init__(self): 14 def __init__(self):
15 self.begin = self.avg = self.max = self.num = self.job = 0 15 self.begin = self.avg = self.max = self.num = self.next_job = 0
16 16
17 def store_time(self, record): 17 # Count of times the job in start_time matched that in store_time
18 self.matches = 0
19 # And the times it didn't
20 self.disjoints = 0
21
22 # Measurements are recorded in store_ time using the previous matching
23 # record which was passed to store_time. This way, the last record for
24 # any task is always skipped
25 self.last_record = None
26
27 def store_time(self, next_record):
18 '''End duration of time.''' 28 '''End duration of time.'''
19 dur = record.when - self.begin 29 dur = (self.last_record.when - self.begin) if self.last_record else -1
20 30
21 if self.job == record.job and dur > 0: 31 if self.next_job == next_record.job:
22 self.max = max(self.max, dur) 32 self.last_record = next_record
23 self.avg *= float(self.num / (self.num + 1))
24 self.num += 1
25 self.avg += dur / float(self.num)
26 33
27 self.begin = 0 34 if self.last_record:
28 self.job = 0 35 self.matches += 1
36
37 if dur > 0:
38 self.max = max(self.max, dur)
39 self.avg *= float(self.num / (self.num + 1))
40 self.num += 1
41 self.avg += dur / float(self.num)
42
43 self.begin = 0
44 self.next_job = 0
45 else:
46 self.disjoints += 1
29 47
30 def start_time(self, record, time = None): 48 def start_time(self, record, time = None):
31 '''Start duration of time.''' 49 '''Start duration of time.'''
32 if not time: 50 if self.last_record:
33 self.begin = record.when 51 if not time:
34 else: 52 self.begin = self.last_record.when
35 self.begin = time 53 else:
36 self.job = record.job 54 self.begin = time
55
56 self.next_job = record.job
37 57
38# Data stored for each task 58# Data stored for each task
39TaskParams = namedtuple('TaskParams', ['wcet', 'period', 'cpu']) 59TaskParams = namedtuple('TaskParams', ['wcet', 'period', 'cpu'])
@@ -203,6 +223,12 @@ def create_task_dict(data_dir, work_dir = None):
203 223
204 return task_dict 224 return task_dict
205 225
226LOSS_MSG = """Found task missing more than %d%% of its scheduling records.
227These won't be included in scheduling statistics!"""%(100*conf.MAX_RECORD_LOSS)
228SKIP_MSG = """Measurement '%s' has no non-zero values.
229Measurements like these are not included in scheduling statistics.
230If a measurement is missing, this is why."""
231
206def extract_sched_data(result, data_dir, work_dir): 232def extract_sched_data(result, data_dir, work_dir):
207 task_dict = create_task_dict(data_dir, work_dir) 233 task_dict = create_task_dict(data_dir, work_dir)
208 stat_data = defaultdict(list) 234 stat_data = defaultdict(list)
@@ -213,19 +239,29 @@ def extract_sched_data(result, data_dir, work_dir):
213 # Currently unknown where these invalid tasks come from... 239 # Currently unknown where these invalid tasks come from...
214 continue 240 continue
215 241
216 miss_ratio = float(tdata.misses.num) / tdata.jobs 242 miss = tdata.misses
217 stat_data["miss-ratio"].append(float(tdata.misses.num) / tdata.jobs) 243
244 record_loss = float(miss.disjoints)/(miss.matches + miss.disjoints)
245 stat_data["record-loss"].append(record_loss)
246
247 if record_loss > conf.MAX_RECORD_LOSS:
248 log_once(LOSS_MSG)
249 continue
250
251 miss_ratio = float(miss.num) / miss.matches
252 avg_tard = miss.avg * miss_ratio
253
254 stat_data["miss-ratio" ].append(miss_ratio)
218 255
219 stat_data["max-tard" ].append(tdata.misses.max / tdata.params.wcet) 256 stat_data["max-tard"].append(miss.max / tdata.params.period)
220 # Scale average down to account for jobs with 0 tardiness 257 stat_data["avg-tard"].append(avg_tard / tdata.params.period)
221 avg_tard = tdata.misses.avg * miss_ratio
222 stat_data["avg-tard" ].append(avg_tard / tdata.params.wcet)
223 258
224 stat_data["avg-block" ].append(tdata.blocks.avg / NSEC_PER_MSEC) 259 stat_data["avg-block"].append(tdata.blocks.avg / NSEC_PER_MSEC)
225 stat_data["max-block" ].append(tdata.blocks.max / NSEC_PER_MSEC) 260 stat_data["max-block"].append(tdata.blocks.max / NSEC_PER_MSEC)
226 261
227 # Summarize value groups 262 # Summarize value groups
228 for name, data in stat_data.iteritems(): 263 for name, data in stat_data.iteritems():
229 if not data or not sum(data): 264 if not data or not sum(data):
265 log_once(SKIP_MSG, SKIP_MSG % name)
230 continue 266 continue
231 result[name] = Measurement(str(name)).from_array(data) 267 result[name] = Measurement(str(name)).from_array(data)