summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMac Mollison <mollison@cs.unc.edu>2010-02-09 02:54:49 -0500
committerMac Mollison <mollison@cs.unc.edu>2010-02-09 02:54:49 -0500
commitd17333bf1b869323bab559169d202d1a0c1cba73 (patch)
treee2f0da9d7ba6464b1397a2b09c17da71c5960a00
parent91bbf2b4af0cf95c6dc4eccc05341f0119e63a00 (diff)
Fixed bug in trace_reader.py and implemented unit test in response.
trace_reader.py was losing records (1 per CPU) so I created naive_trace_reader.py which reads records in the simplest way possible but does not sort them. The unit test ensures that both readers get the same number of records.
-rw-r--r--naive_trace_reader.py165
-rwxr-xr-xruntests.py13
-rw-r--r--trace_reader.py2
3 files changed, 179 insertions, 1 deletions
diff --git a/naive_trace_reader.py b/naive_trace_reader.py
new file mode 100644
index 0000000..9a59353
--- /dev/null
+++ b/naive_trace_reader.py
@@ -0,0 +1,165 @@
1###############################################################################
2# Description
3###############################################################################
4
5# trace_reader(files) returns an iterator which produces records
6# OUT OF ORDER from the files given. (the param is a list of files.)
7#
8# The non-naive trace_reader has a lot of complex logic which attempts to
9# produce records in order (even though they are being pulled from multiple
10# files which themselves are only approximately ordered). This trace_reader
11# attempts to be as simple as possible and is used in the unit tests to
12# make sure the total number of records read by the normal trace_reader is
13# the same as the number of records read by this one.
14
15###############################################################################
16# Imports
17###############################################################################
18
19import struct
20
21
22###############################################################################
23# Public functions
24###############################################################################
25
26# Generator function returning an iterable over records in a trace file.
27def trace_reader(files):
28 for file in files:
29 with open(file,'rb') as f:
30 while True:
31 data = f.read(RECORD_HEAD_SIZE)
32 try:
33 type_num = struct.unpack_from('b',data)[0]
34 except struct.error:
35 break #We read to the end of the file
36 type = _get_type(type_num)
37 try:
38 values = struct.unpack_from(StHeader.format +
39 type.format,data)
40 record_dict = dict(zip(type.keys,values))
41 except struct.error:
42 f.close()
43 print("Invalid record detected, stopping.")
44 exit()
45
46 # Convert the record_dict into an object
47 record = _dict2obj(record_dict)
48
49 # Give it a type name (easier to work with than type number)
50 record.type_name = _get_type_name(type_num)
51
52 # All records should have a 'record type' field.
53 # e.g. these are 'event's as opposed to 'error's
54 record.record_type = "event"
55
56 # If there is no timestamp, set the time to 0
57 if 'when' not in record.__dict__.keys():
58 record.when = 0
59
60 yield record
61
62###############################################################################
63# Private functions
64###############################################################################
65
66# Convert a dict into an object
67def _dict2obj(d):
68 class Obj: pass
69 o = Obj()
70 for key in d.keys():
71 o.__dict__[key] = d[key]
72 return o
73
74###############################################################################
75# Trace record data types and accessor functions
76###############################################################################
77
78# Each class below represents a type of event record. The format attribute
79# specifies how to decode the binary record and the keys attribute
80# specifies how to name the pieces of information decoded. Note that all
81# event records have a common initial 24 bytes, represented by the StHeader
82# class.
83
84RECORD_HEAD_SIZE = 24
85
86class StHeader:
87 format = '<bbhi'
88 formatStr = struct.Struct(format)
89 keys = ['type','cpu','pid','job']
90 message = 'The header.'
91
92class StNameData:
93 format = '16s'
94 formatStr = struct.Struct(StHeader.format + format)
95 keys = StHeader.keys + ['name']
96 message = 'The name of the executable of this process.'
97
98class StParamData:
99 format = 'IIIc'
100 formatStr = struct.Struct(StHeader.format + format)
101 keys = StHeader.keys + ['wcet','period','phase','partition']
102 message = 'Regular parameters.'
103
104class StReleaseData:
105 format = 'QQ'
106 formatStr = struct.Struct(StHeader.format + format)
107 keys = StHeader.keys + ['when','deadline']
108 message = 'A job was/is going to be released.'
109
110#Not yet used by Sched Trace
111class StAssignedData:
112 format = 'Qc'
113 formatStr = struct.Struct(StHeader.format + format)
114 keys = StHeader.keys + ['when','target']
115 message = 'A job was assigned to a CPU.'
116
117class StSwitchToData:
118 format = 'QI'
119 formatStr = struct.Struct(StHeader.format + format)
120 keys = StHeader.keys + ['when','exec_time']
121 message = 'A process was switched to on a given CPU.'
122
123class StSwitchAwayData:
124 format = 'QI'
125 formatStr = struct.Struct(StHeader.format + format)
126 keys = StHeader.keys + ['when','exec_time']
127 message = 'A process was switched away on a given CPU.'
128
129class StCompletionData:
130 format = 'Q3x?c'
131 formatStr = struct.Struct(StHeader.format + format)
132 keys = StHeader.keys + ['when','forced?','flags']
133 message = 'A job completed.'
134
135class StBlockData:
136 format = 'Q'
137 formatStr = struct.Struct(StHeader.format + format)
138 keys = StHeader.keys + ['when']
139 message = 'A task blocks.'
140
141class StResumeData:
142 format = 'Q'
143 formatStr = struct.Struct(StHeader.format + format)
144 keys = StHeader.keys + ['when']
145 message = 'A task resumes.'
146
147class StSysReleaseData:
148 format = 'QQ'
149 formatStr = struct.Struct(StHeader.format + format)
150 keys = StHeader.keys + ['when','release']
151 message = 'All tasks have checked in, task system released by user'
152
153# Return the binary data type, given the type_num
154def _get_type(type_num):
155 types = [None,StNameData,StParamData,StReleaseData,StAssignedData,
156 StSwitchToData,StSwitchAwayData,StCompletionData,StBlockData,
157 StResumeData,StSysReleaseData]
158 return types[type_num]
159
160# Return the type name, given the type_num (this is simply a convenience to
161# programmers of other modules)
162def _get_type_name(type_num):
163 type_names = [None,"name","params","release","assign","switch_to",
164 "switch_away","completion","block","resume","sys_release"]
165 return type_names[type_num]
diff --git a/runtests.py b/runtests.py
index f4a9274..1ae29f4 100755
--- a/runtests.py
+++ b/runtests.py
@@ -12,6 +12,7 @@
12############################################################################### 12###############################################################################
13 13
14import trace_reader 14import trace_reader
15import naive_trace_reader
15import os 16import os
16 17
17############################################################################### 18###############################################################################
@@ -39,4 +40,16 @@ def test1():
39 last_time = item.when 40 last_time = item.when
40 return "[SUCCESS]" 41 return "[SUCCESS]"
41 42
43# Does our fancy trace reader get the same number of files as our naive one?
44# (See naive_trace_reader.py for further explanation)
45def test2():
46 stream = trace_reader.trace_reader(files)
47 num_records = len(list(stream))
48 stream = naive_trace_reader.trace_reader(files)
49 naive_num_records = len(list(stream))
50 if num_records != naive_num_records:
51 return "[FAIL]"
52 return "[SUCCESS]"
53
42print("Test 1: {}".format(test1())) 54print("Test 1: {}".format(test1()))
55print("Test 2: {}".format(test2()))
diff --git a/trace_reader.py b/trace_reader.py
index 7b579b6..31f1812 100644
--- a/trace_reader.py
+++ b/trace_reader.py
@@ -40,7 +40,7 @@ def trace_reader(files):
40 file_iter_buff = [] # file iterator buffers 40 file_iter_buff = [] # file iterator buffers
41 for file in files: 41 for file in files:
42 file_iter = _get_file_iter(file) 42 file_iter = _get_file_iter(file)
43 file_iters.append(_get_file_iter(file)) 43 file_iters.append(file_iter)
44 file_iter_buff.append([next(file_iter)]) 44 file_iter_buff.append([next(file_iter)])
45 45
46 # We keep 100 records in each buffer and then keep the buffer sorted 46 # We keep 100 records in each buffer and then keep the buffer sorted