summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGary Bressler <garybressler@nc.rr.com>2010-03-20 16:43:26 -0400
committerGary Bressler <garybressler@nc.rr.com>2010-03-20 16:43:26 -0400
commit58573c2a7e7595b952f6f23f22c3b9ddab36748d (patch)
tree69b0105638c3a50d3a1185da314728ccad21ddfb
parenta495421200df47243d546be7078618fcbe6d2ea4 (diff)
More cleanup
-rwxr-xr-xreader/sample_script.py41
-rw-r--r--unit_trace/viz/trace_reader.py259
2 files changed, 0 insertions, 300 deletions
diff --git a/reader/sample_script.py b/reader/sample_script.py
deleted file mode 100755
index 676cfac..0000000
--- a/reader/sample_script.py
+++ /dev/null
@@ -1,41 +0,0 @@
1#!/usr/bin/python
2
3# This is a sample script for using the tool. I would recommend copying
4# this and modifying it to suit your needs for a particular test. Make
5# sure you redirect the output to a file (e.g. ./sample_script.py > output).
6
7# Import the modules we need. You should not need to know about
8# their internals.
9import trace_reader
10import sanitizer
11import gedf_test
12import stats
13import stdout_printer
14
15# Specify your trace files
16g6 = [
17'../traces/st-g6-0.bin',
18'../traces/st-g6-1.bin',
19'../traces/st-g6-2.bin',
20'../traces/st-g6-3.bin',
21]
22
23# Here is an example of a custom filter function.
24# It will remove from the error stream all inversion_end records indicating
25# an inversion of less than 4000000 time units. Thus, you can grep through
26# the output looking 'Inversion end' and find only errors for particularly
27# long inversions. This is commented out in the pipeline (below) since you
28# probably don't want it in general.
29def my_filter(record):
30 if record.record_type == 'error' and record.type_name == 'inversion_end':
31 if record.job.inversion_end - record.job.inversion_start < 4000000:
32 return False
33 return True
34
35# Pipeline
36stream = trace_reader.trace_reader(g6) # Read events from traces
37stream = sanitizer.sanitizer(stream) # Remove garbage events
38stream = gedf_test.gedf_test(stream) # Produce G-EDF error records
39stream = stats.stats(stream) # Produce a statistics record
40#stream = filter(my_filter, stream) # Filter some records before printing
41stdout_printer.stdout_printer(stream) # Print records to stdout
diff --git a/unit_trace/viz/trace_reader.py b/unit_trace/viz/trace_reader.py
deleted file mode 100644
index 831a06e..0000000
--- a/unit_trace/viz/trace_reader.py
+++ /dev/null
@@ -1,259 +0,0 @@
1###############################################################################
2# Description
3###############################################################################
4
5# trace_reader(files) returns an iterator which produces records
6# in order from the files given. (the param is a list of files.)
7#
8# Each record is just a Python object. It is guaranteed to have the following
9# attributes:
10# - 'pid': pid of the task
11# - 'job': job number for that task
12# - 'cpu', given by LITMUS
13# - 'when', given by LITMUS as a timestamp. LITMUS does not provide a
14# timestamp for all records. In this case, when is set to 0.
15# - 'type', a numerical value given by LITMUS
16# - 'type_name', a human-readable name defined in this module
17# - 'record_type', set to 'event' by this module (to distinguish from, e.g.,
18# error records produced elsewhere).
19# - Possible additional attributes, depending on the type of record.
20#
21# To find out exactly what attributes are set for each record type, look at
22# the trace-parsing information at the bottom of this file.
23
24###############################################################################
25# Imports
26###############################################################################
27
28import struct
29
30###############################################################################
31# Class definitions
32###############################################################################
33class InvalidRecordError(Exception):
34 pass
35
36###############################################################################
37# Public functions
38###############################################################################
39
40# Generator function returning an iterable over records in a trace file.
41def trace_reader(files):
42
43 # Yield a record indicating the number of CPUs, used by the G-EDF test
44 class Obj: pass
45 record = Obj()
46 record.record_type = "meta"
47 record.type_name = "num_cpus"
48 record.num_cpus = len(files)
49 yield record
50
51 # Create iterators for each file and a buffer to store records in
52 file_iters = [] # file iterators
53 file_iter_buff = [] # file iterator buffers
54 for file in files:
55 file_iter = _get_file_iter(file)
56 file_iters.append(file_iter)
57 file_iter_buff.append([])
58
59 # We keep 100 records in each buffer and then keep the buffer sorted
60 # This is because records may have been recorded slightly out of order
61 # This cannot guarantee records are produced in order, but it makes it
62 # overwhelmingly probably.
63 for x in range(0,len(file_iter_buff)):
64 for y in range(0,100):
65 try:
66 file_iter_buff[x].append(file_iters[x].next())
67 except StopIteration:
68 break
69
70 for x in range(0,len(file_iter_buff)):
71 file_iter_buff[x] = sorted(file_iter_buff[x],key=lambda rec: rec.when)
72
73 # Remember the time of the last record. This way, we can make sure records
74 # truly are produced in monotonically increasing order by time and terminate
75 # fatally if they are not.
76 last_time = None
77
78 # Keep pulling records as long as we have a buffer
79 while len(file_iter_buff) > 0:
80
81 # Select the earliest record from those at the heads of the buffers
82 earliest = -1
83 buff_to_refill = -1
84 for x in range(0,len(file_iter_buff)):
85 if earliest==-1 or file_iter_buff[x][0].when < earliest.when:
86 earliest = file_iter_buff[x][0]
87 buff_to_refill = x
88
89 # Take it out of the buffer
90 del file_iter_buff[buff_to_refill][0]
91
92 # Try to append a new record to the buffer (if there is another) and
93 # then keep the buffer sorted
94 try:
95 file_iter_buff[buff_to_refill].append(file_iters[buff_to_refill].next())
96 file_iter_buff[buff_to_refill] = sorted(file_iter_buff[buff_to_refill],
97 key=lambda rec: rec.when)
98
99 # If there aren't any more records, fine. Unless the buffer is also empty.
100 # If that is the case, delete the buffer.
101 except StopIteration:
102 if len(file_iter_buff[buff_to_refill]) < 1:
103 del file_iter_buff[buff_to_refill]
104 del file_iters[buff_to_refill]
105
106 # Check for monotonically increasing time
107 if last_time is not None and earliest.when < last_time:
108 raise InvalidRecordError("out-of-order record produced")
109 else:
110 last_time = earliest.when
111
112 # Yield the record
113 yield earliest
114
115###############################################################################
116# Private functions
117###############################################################################
118
119# Returns an iterator to pull records from a file
120def _get_file_iter(file):
121 f = open(file,'rb')
122 while True:
123 data = f.read(RECORD_HEAD_SIZE)
124 if data == '':
125 break
126 try:
127 type_num = struct.unpack_from('b',data)[0]
128 except struct.error:
129 raise InvalidRecordError("Invalid record detected, stopping.")
130 type = _get_type(type_num)
131 try:
132 values = struct.unpack_from(StHeader.format +
133 type.format,data)
134 record_dict = dict(zip(type.keys,values))
135 except struct.error:
136 f.close()
137 raise InvalidRecordError("Invalid record detected, stopping.")
138
139 # Convert the record_dict into an object
140 record = _dict2obj(record_dict)
141
142 # Give it a type name (easier to work with than type number)
143 record.type_name = _get_type_name(type_num)
144
145 # All records should have a 'record type' field.
146 # e.g. these are 'event's as opposed to 'error's
147 record.record_type = "event"
148
149 # If there is no timestamp, set the time to 0
150 if 'when' not in record.__dict__.keys():
151 record.when = 0
152
153 yield record
154
155# Convert a dict into an object
156def _dict2obj(d):
157 class Obj(object): pass
158 o = Obj()
159 for key in d.keys():
160 o.__dict__[key] = d[key]
161 return o
162
163###############################################################################
164# Trace record data types and accessor functions
165###############################################################################
166
167# Each class below represents a type of event record. The format attribute
168# specifies how to decode the binary record and the keys attribute
169# specifies how to name the pieces of information decoded. Note that all
170# event records have a common initial 24 bytes, represented by the StHeader
171# class.
172
173RECORD_HEAD_SIZE = 24
174
175class StHeader:
176 format = '<bbhi'
177 formatStr = struct.Struct(format)
178 keys = ['type','cpu','pid','job']
179 message = 'The header.'
180
181class StNameData:
182 format = '16s'
183 formatStr = struct.Struct(StHeader.format + format)
184 keys = StHeader.keys + ['name']
185 message = 'The name of the executable of this process.'
186
187class StParamData:
188 format = 'IIIc'
189 formatStr = struct.Struct(StHeader.format + format)
190 keys = StHeader.keys + ['wcet','period','phase','partition']
191 message = 'Regular parameters.'
192
193class StReleaseData:
194 format = 'QQ'
195 formatStr = struct.Struct(StHeader.format + format)
196 keys = StHeader.keys + ['when','deadline']
197 message = 'A job was/is going to be released.'
198
199#Not yet used by Sched Trace
200class StAssignedData:
201 format = 'Qc'
202 formatStr = struct.Struct(StHeader.format + format)
203 keys = StHeader.keys + ['when','target']
204 message = 'A job was assigned to a CPU.'
205
206class StSwitchToData:
207 format = 'QI'
208 formatStr = struct.Struct(StHeader.format + format)
209 keys = StHeader.keys + ['when','exec_time']
210 message = 'A process was switched to on a given CPU.'
211
212class StSwitchAwayData:
213 format = 'QI'
214 formatStr = struct.Struct(StHeader.format + format)
215 keys = StHeader.keys + ['when','exec_time']
216 message = 'A process was switched away on a given CPU.'
217
218class StCompletionData:
219 #format = 'Q3x?c'
220 format = 'Q3xcc'
221 formatStr = struct.Struct(StHeader.format + format)
222 keys = StHeader.keys + ['when','forced?','flags']
223 message = 'A job completed.'
224
225class StBlockData:
226 format = 'Q'
227 formatStr = struct.Struct(StHeader.format + format)
228 keys = StHeader.keys + ['when']
229 message = 'A task blocks.'
230
231class StResumeData:
232 format = 'Q'
233 formatStr = struct.Struct(StHeader.format + format)
234 keys = StHeader.keys + ['when']
235 message = 'A task resumes.'
236
237class StSysReleaseData:
238 format = 'QQ'
239 formatStr = struct.Struct(StHeader.format + format)
240 keys = StHeader.keys + ['when','release']
241 message = 'All tasks have checked in, task system released by user'
242
243# Return the binary data type, given the type_num
244def _get_type(type_num):
245 types = [None,StNameData,StParamData,StReleaseData,StAssignedData,
246 StSwitchToData,StSwitchAwayData,StCompletionData,StBlockData,
247 StResumeData,StSysReleaseData]
248 if type_num >= len(types) or type_num < 0:
249 raise InvalidRecordError("Invalid record detected, stopping.")
250 return types[type_num]
251
252# Return the type name, given the type_num (this is simply a convenience to
253# programmers of other modules)
254def _get_type_name(type_num):
255 type_names = [None,"name","params","release","assign","switch_to",
256 "switch_away","completion","block","resume","sys_release"]
257 if type_num >= len(type_names) or type_num < 0:
258 raise InvalidRecordError("Invalid record detected, stopping.")
259 return type_names[type_num]