diff options
author | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-04-18 17:42:23 -0400 |
---|---|---|
committer | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-04-18 17:42:23 -0400 |
commit | c0405807b7f7f75fa1cf93265e6b2a739e449596 (patch) | |
tree | 6e5d2001850946b46589389b9e48416a667dd19e | |
parent | b8f3c7a1ccd2fffc54f15e808505a568ce5fa492 (diff) |
Switched sched_trace data to verbose ctypes structs.
-rw-r--r-- | parse/sched.py | 136 |
1 files changed, 79 insertions, 57 deletions
diff --git a/parse/sched.py b/parse/sched.py index a38c61b..b56324b 100644 --- a/parse/sched.py +++ b/parse/sched.py | |||
@@ -7,6 +7,7 @@ import subprocess | |||
7 | from collections import defaultdict,namedtuple | 7 | from collections import defaultdict,namedtuple |
8 | from common import recordtype | 8 | from common import recordtype |
9 | from point import Measurement | 9 | from point import Measurement |
10 | from ctypes import * | ||
10 | 11 | ||
11 | class TimeTracker: | 12 | class TimeTracker: |
12 | '''Store stats for durations of time demarcated by sched_trace records.''' | 13 | '''Store stats for durations of time demarcated by sched_trace records.''' |
@@ -38,33 +39,30 @@ class TimeTracker: | |||
38 | TaskParams = namedtuple('TaskParams', ['wcet', 'period', 'cpu']) | 39 | TaskParams = namedtuple('TaskParams', ['wcet', 'period', 'cpu']) |
39 | TaskData = recordtype('TaskData', ['params', 'jobs', 'blocks', 'misses']) | 40 | TaskData = recordtype('TaskData', ['params', 'jobs', 'blocks', 'misses']) |
40 | 41 | ||
41 | # Map of event ids to corresponding class, binary format, and processing methods | 42 | # Map of event ids to corresponding class and format |
42 | RecordInfo = namedtuple('RecordInfo', ['clazz', 'fmt', 'method']) | 43 | record_map = {} |
43 | record_map = [0]*10 | ||
44 | 44 | ||
45 | # Common to all records | ||
46 | HEADER_FORMAT = '<bbhi' | ||
47 | HEADER_FIELDS = ['type', 'cpu', 'pid', 'job'] | ||
48 | RECORD_SIZE = 24 | 45 | RECORD_SIZE = 24 |
49 | |||
50 | NSEC_PER_MSEC = 1000000 | 46 | NSEC_PER_MSEC = 1000000 |
51 | 47 | ||
52 | def register_record(name, id, method, fmt, fields): | 48 | def register_record(id, clazz): |
53 | '''Create record description from @fmt and @fields and map to @id, using | 49 | fields = clazz.FIELDS |
54 | @method to process parsed record.''' | ||
55 | # Format of binary data (see python struct documentation) | ||
56 | rec_fmt = HEADER_FORMAT + fmt | ||
57 | 50 | ||
58 | # Corresponding field data | 51 | fsize = lambda fields : sum([sizeof(list(f)[1]) for f in fields]) |
59 | rec_fields = HEADER_FIELDS + fields | 52 | diff = RECORD_SIZE - fsize(SchedRecord.FIELDS) - fsize(fields) |
60 | if "when" not in rec_fields: # Force a "when" field for everything | ||
61 | rec_fields += ["when"] | ||
62 | 53 | ||
63 | # Create mutable class with the given fields | 54 | # Create extra padding fields to make record the proper size |
64 | field_class = recordtype(name, list(rec_fields)) | 55 | # Creating one big field of c_uint64 and giving it a size of 8*diff |
65 | clazz = type(name, (field_class, object), {}) | 56 | # _shoud_ work, but doesn't. This is an uglier way of accomplishing |
57 | # the same goal | ||
58 | for d in range(diff): | ||
59 | fields += [("extra%d" % d, c_char)] | ||
66 | 60 | ||
67 | record_map[id] = RecordInfo(clazz, rec_fmt, method) | 61 | # Create structure with fields and methods of clazz |
62 | clazz2 = type("Dummy%d" % id, (LittleEndianStructure,clazz), | ||
63 | {'_fields_': SchedRecord.FIELDS + fields, | ||
64 | '_pack_' : 1}) | ||
65 | record_map[id] = clazz2 | ||
68 | 66 | ||
69 | def make_iterator(fname): | 67 | def make_iterator(fname): |
70 | '''Iterate over (parsed record, processing method) in a | 68 | '''Iterate over (parsed record, processing method) in a |
@@ -74,7 +72,6 @@ def make_iterator(fname): | |||
74 | return | 72 | return |
75 | 73 | ||
76 | f = open(fname, 'rb') | 74 | f = open(fname, 'rb') |
77 | max_type = len(record_map) | ||
78 | 75 | ||
79 | while True: | 76 | while True: |
80 | data = f.read(RECORD_SIZE) | 77 | data = f.read(RECORD_SIZE) |
@@ -84,19 +81,15 @@ def make_iterator(fname): | |||
84 | except struct.error: | 81 | except struct.error: |
85 | break | 82 | break |
86 | 83 | ||
87 | rdata = record_map[type_num] if type_num <= max_type else 0 | 84 | if type_num not in record_map: |
88 | if not rdata: | ||
89 | continue | 85 | continue |
90 | 86 | ||
91 | try: | 87 | clazz = record_map[type_num] |
92 | values = struct.unpack_from(rdata.fmt, data) | 88 | obj = clazz() |
93 | except struct.error: | 89 | obj.fill(data) |
94 | continue | ||
95 | |||
96 | obj = rdata.clazz(*values) | ||
97 | 90 | ||
98 | if obj.job != 1: | 91 | if obj.job != 1: |
99 | yield (obj, rdata.method) | 92 | yield obj |
100 | else: | 93 | else: |
101 | # Results from the first job are nonsense | 94 | # Results from the first job are nonsense |
102 | pass | 95 | pass |
@@ -105,55 +98,84 @@ def read_data(task_dict, fnames): | |||
105 | '''Read records from @fnames and store per-pid stats in @task_dict.''' | 98 | '''Read records from @fnames and store per-pid stats in @task_dict.''' |
106 | buff = [] | 99 | buff = [] |
107 | 100 | ||
101 | def get_time(record): | ||
102 | return record.when if hasattr(record, 'when') else 0 | ||
103 | |||
108 | def add_record(itera): | 104 | def add_record(itera): |
109 | # Ordered insertion into buff | 105 | # Ordered insertion into buff |
110 | try: | 106 | try: |
111 | next_ret = itera.next() | 107 | arecord = itera.next() |
112 | except StopIteration: | 108 | except StopIteration: |
113 | return | 109 | return |
114 | 110 | ||
115 | arecord, method = next_ret | ||
116 | i = 0 | 111 | i = 0 |
117 | for (i, (brecord, m, t)) in enumerate(buff): | 112 | for (i, (brecord, _)) in enumerate(buff): |
118 | if brecord.when > arecord.when: | 113 | if get_time(brecord) > get_time(arecord): |
119 | break | 114 | break |
120 | buff.insert(i, (arecord, method, itera)) | 115 | buff.insert(i, (arecord, itera)) |
121 | 116 | ||
122 | for fname in fnames: | 117 | for fname in fnames: |
123 | itera = make_iterator(fname) | 118 | itera = make_iterator(fname) |
124 | add_record(itera) | 119 | add_record(itera) |
125 | 120 | ||
126 | while buff: | 121 | while buff: |
127 | (record, method, itera) = buff.pop(0) | 122 | record, itera = buff.pop(0) |
128 | 123 | ||
129 | add_record(itera) | 124 | add_record(itera) |
130 | method(task_dict, record) | 125 | record.process(task_dict) |
126 | |||
127 | class SchedRecord(object): | ||
128 | # Subclasses will have their FIELDs merged into this one | ||
129 | FIELDS = [('type', c_uint8), ('cpu', c_uint8), | ||
130 | ('pid', c_uint16), ('job', c_uint32)] | ||
131 | |||
132 | def fill(self, data): | ||
133 | memmove(addressof(self), data, RECORD_SIZE) | ||
134 | |||
135 | def process(self, task_dict): | ||
136 | raise NotImplementedError() | ||
137 | |||
138 | class ParamRecord(SchedRecord): | ||
139 | FIELDS = [('wcet', c_uint32), ('period', c_uint32), | ||
140 | ('phase', c_uint32), ('partition', c_uint8)] | ||
141 | |||
142 | def process(self, task_dict): | ||
143 | params = TaskParams(self.wcet, self.period, self.partition) | ||
144 | task_dict[self.pid].params = params | ||
145 | |||
146 | class ReleaseRecord(SchedRecord): | ||
147 | FIELDS = [('when', c_uint64), ('release', c_uint64)] | ||
148 | |||
149 | def process(self, task_dict): | ||
150 | data = task_dict[self.pid] | ||
151 | data.jobs += 1 | ||
152 | if data.params: | ||
153 | data.misses.start_time(self, self.when + data.params.period) | ||
154 | |||
155 | class CompletionRecord(SchedRecord): | ||
156 | FIELDS = [('when', c_uint64)] | ||
131 | 157 | ||
132 | def process_completion(task_dict, record): | 158 | def process(self, task_dict): |
133 | task_dict[record.pid].misses.store_time(record) | 159 | task_dict[self.pid].misses.store_time(self) |
134 | 160 | ||
135 | def process_release(task_dict, record): | 161 | class BlockRecord(SchedRecord): |
136 | data = task_dict[record.pid] | 162 | FIELDS = [('when', c_uint64)] |
137 | data.jobs += 1 | ||
138 | if data.params: | ||
139 | data.misses.start_time(record, record.when + data.params.period) | ||
140 | 163 | ||
141 | def process_param(task_dict, record): | 164 | def process(self, task_dict): |
142 | params = TaskParams(record.wcet, record.period, record.partition) | 165 | task_dict[self.pid].blocks.start_time(self) |
143 | task_dict[record.pid].params = params | ||
144 | 166 | ||
145 | def process_block(task_dict, record): | 167 | class ResumeRecord(SchedRecord): |
146 | task_dict[record.pid].blocks.start_time(record) | 168 | FIELDS = [('when', c_uint64)] |
147 | 169 | ||
148 | def process_resume(task_dict, record): | 170 | def process(self, task_dict): |
149 | task_dict[record.pid].blocks.store_time(record) | 171 | task_dict[self.pid].blocks.store_time(self) |
150 | 172 | ||
151 | register_record('ResumeRecord', 9, process_resume, 'Q8x', ['when']) | 173 | # Map records to sched_trace ids (see include/litmus/sched_trace.h |
152 | register_record('BlockRecord', 8, process_block, 'Q8x', ['when']) | 174 | register_record(2, ParamRecord) |
153 | register_record('CompletionRecord', 7, process_completion, 'Q8x', ['when']) | 175 | register_record(3, ReleaseRecord) |
154 | register_record('ReleaseRecord', 3, process_release, 'QQ', ['when', 'release']) | 176 | register_record(7, CompletionRecord) |
155 | register_record('ParamRecord', 2, process_param, 'IIIcc2x', | 177 | register_record(8, BlockRecord) |
156 | ['wcet','period','phase','partition', 'task_class']) | 178 | register_record(9, ResumeRecord) |
157 | 179 | ||
158 | def create_task_dict(data_dir, work_dir = None): | 180 | def create_task_dict(data_dir, work_dir = None): |
159 | '''Parse sched trace files''' | 181 | '''Parse sched trace files''' |