diff options
author | Mac Mollison <mollison@cs.unc.edu> | 2010-03-13 12:09:00 -0500 |
---|---|---|
committer | Mac Mollison <mollison@cs.unc.edu> | 2010-03-13 12:09:00 -0500 |
commit | 14a40b99735f09f6e70b8e897acbb622f9115ca3 (patch) | |
tree | e8420397374fd31aa36a66d0dc75872142d339e6 /reader | |
parent | a643f0465d608552433ba326847eec5d0bef3108 (diff) |
Directory restructuring
Reworked directory (and modified some code a bit) to match
intended architecture.
Diffstat (limited to 'reader')
-rw-r--r-- | reader/__init__.py | 4 | ||||
-rw-r--r-- | reader/gedf_test.py | 163 | ||||
-rw-r--r-- | reader/naive_trace_reader.py | 165 | ||||
-rwxr-xr-x | reader/runtests.py | 47 | ||||
-rwxr-xr-x | reader/sample_script.py | 41 | ||||
-rw-r--r-- | reader/sample_script.py~ | 41 | ||||
-rw-r--r-- | reader/sanitizer.py | 53 | ||||
-rw-r--r-- | reader/stats.py | 39 | ||||
-rw-r--r-- | reader/stdout_printer.py | 69 | ||||
-rwxr-xr-x | reader/test.py | 15 | ||||
-rw-r--r-- | reader/trace_reader.py | 245 |
11 files changed, 0 insertions, 882 deletions
diff --git a/reader/__init__.py b/reader/__init__.py deleted file mode 100644 index afbfe44..0000000 --- a/reader/__init__.py +++ /dev/null | |||
@@ -1,4 +0,0 @@ | |||
1 | import trace_reader | ||
2 | import gedf_test | ||
3 | import sanitizer | ||
4 | import stats | ||
diff --git a/reader/gedf_test.py b/reader/gedf_test.py deleted file mode 100644 index e31fb19..0000000 --- a/reader/gedf_test.py +++ /dev/null | |||
@@ -1,163 +0,0 @@ | |||
1 | ############################################################################### | ||
2 | # Description | ||
3 | ############################################################################### | ||
4 | |||
5 | # G-EDF Test | ||
6 | |||
7 | ############################################################################### | ||
8 | # Imports | ||
9 | ############################################################################### | ||
10 | |||
11 | import copy | ||
12 | |||
13 | |||
14 | ############################################################################## | ||
15 | # Public Functions | ||
16 | ############################################################################## | ||
17 | |||
18 | def gedf_test(stream): | ||
19 | |||
20 | # Two lists to model the system: tasks occupying a CPU and tasks eligible | ||
21 | # to do so. Also, m = the number of CPUs. | ||
22 | eligible = [] | ||
23 | on_cpu = [] | ||
24 | m = None | ||
25 | |||
26 | # Time of the last record we saw. Only run the G-EDF test when the time | ||
27 | # is updated. | ||
28 | last_time = None | ||
29 | |||
30 | for record in stream: | ||
31 | if record.record_type != "event": | ||
32 | if record.record_type == "meta" and record.type_name == "num_cpus": | ||
33 | m = record.num_cpus | ||
34 | continue | ||
35 | |||
36 | # Check for inversion starts and ends and yield them. | ||
37 | # Only to the check when time has moved forward. | ||
38 | # (It is common to have records with simultaneous timestamps.) | ||
39 | if last_time is not None and last_time != record.when: | ||
40 | errors = _gedf_check(eligible,on_cpu,record.when,m) | ||
41 | for error in errors: | ||
42 | yield error | ||
43 | |||
44 | # Add a newly-released Job to the eligible queue | ||
45 | if record.type_name == 'release': | ||
46 | eligible.append(Job(record)) | ||
47 | |||
48 | # Move a Job from the eligible queue to on_cpu | ||
49 | elif record.type_name == 'switch_to': | ||
50 | pos = _find_job(record,eligible) | ||
51 | job = eligible[pos] | ||
52 | del eligible[pos] | ||
53 | on_cpu.append(job) | ||
54 | |||
55 | # Mark a Job as completed. | ||
56 | # The only time a Job completes when it is not on a | ||
57 | # CPU is when it is the last job of the task. | ||
58 | elif record.type_name == 'completion': | ||
59 | pos = _find_job(record,on_cpu) | ||
60 | if pos is not None: | ||
61 | on_cpu[pos].is_complete = True | ||
62 | else: | ||
63 | pos = _find_job(record,eligible) | ||
64 | del eligible[pos] | ||
65 | |||
66 | # A job is switched away from a CPU. If it has | ||
67 | # been marked as complete, remove it from the model. | ||
68 | elif record.type_name == 'switch_away': | ||
69 | pos = _find_job(record,on_cpu) | ||
70 | job = on_cpu[pos] | ||
71 | del on_cpu[pos] | ||
72 | if job.is_complete == False: | ||
73 | eligible.append(job) | ||
74 | |||
75 | last_time = record.when | ||
76 | yield record | ||
77 | |||
78 | ############################################################################### | ||
79 | # Private Functions | ||
80 | ############################################################################### | ||
81 | |||
82 | # Internal representation of a Job | ||
83 | class Job(object): | ||
84 | def __init__(self, record): | ||
85 | self.pid = record.pid | ||
86 | self.job = record.job | ||
87 | self.deadline = record.deadline | ||
88 | self.is_complete = False | ||
89 | self.inversion_start = None | ||
90 | self.inversion_end = None | ||
91 | def __str__(self): | ||
92 | return "(%d.%d:%d)" % (self.pid,self.job,self.deadline) | ||
93 | |||
94 | # G-EDF errors: the start or end of an inversion | ||
95 | class Error(object): | ||
96 | def __init__(self, job, eligible, on_cpu): | ||
97 | self.job = copy.copy(job) | ||
98 | self.eligible = copy.copy(eligible) | ||
99 | self.on_cpu = copy.copy(on_cpu) | ||
100 | self.record_type = 'error' | ||
101 | if job.inversion_end is None: | ||
102 | self.type_name = 'inversion_start' | ||
103 | else: | ||
104 | self.type_name = 'inversion_end' | ||
105 | |||
106 | # Returns the position of a Job in a list, or None | ||
107 | def _find_job(record,list): | ||
108 | for i in range(0,len(list)): | ||
109 | if list[i].pid == record.pid and list[i].job == record.job: | ||
110 | return i | ||
111 | return None | ||
112 | |||
113 | # Return records for any inversion_starts and inversion_ends | ||
114 | def _gedf_check(eligible,on_cpu,when,m): | ||
115 | |||
116 | # List of error records to be returned | ||
117 | errors = [] | ||
118 | |||
119 | # List of all jobs that are not complete | ||
120 | all = [] | ||
121 | for x in on_cpu: | ||
122 | if x.is_complete is not True: | ||
123 | all.append(x) | ||
124 | all += eligible | ||
125 | |||
126 | # Sort by on_cpu and then by deadline. sort() is guaranteed to be stable. | ||
127 | # Thus, this gives us jobs ordered by deadline with preference to those | ||
128 | # actually running. | ||
129 | all.sort(key=lambda x: 0 if (x in on_cpu) else 1) | ||
130 | all.sort(key=lambda x: x.deadline) | ||
131 | |||
132 | # Check those that actually should be running | ||
133 | for x in range(0,min(m,len(all))): | ||
134 | job = all[x] | ||
135 | |||
136 | # It's not running and an inversion_start has not been recorded | ||
137 | if job not in on_cpu and job.inversion_start is None: | ||
138 | job.inversion_start = when | ||
139 | errors.append(Error(job, eligible, on_cpu)) | ||
140 | |||
141 | # It is running and an inversion_start exists (i.e. it it still | ||
142 | # marked as being inverted) | ||
143 | elif job in on_cpu and job.inversion_start is not None: | ||
144 | job.inversion_end = when | ||
145 | errors.append(Error(job, eligible, on_cpu)) | ||
146 | job.inversion_start = None | ||
147 | job.inversion_end = None | ||
148 | |||
149 | # Check those that actually should not be running | ||
150 | for x in range(m,len(all)): | ||
151 | job = all[x] | ||
152 | |||
153 | # It actually is running. We don't care. | ||
154 | |||
155 | # It isn't running, but an inversion_start exists (i.e. it is still | ||
156 | # marked as being inverted) | ||
157 | if job not in on_cpu and job.inversion_start is not None: | ||
158 | job.inversion_end = when | ||
159 | errors.append(Error(job, eligible, on_cpu)) | ||
160 | job.inversion_start = None | ||
161 | job.inversion_end = None | ||
162 | |||
163 | return errors | ||
diff --git a/reader/naive_trace_reader.py b/reader/naive_trace_reader.py deleted file mode 100644 index 0f117b8..0000000 --- a/reader/naive_trace_reader.py +++ /dev/null | |||
@@ -1,165 +0,0 @@ | |||
1 | ############################################################################### | ||
2 | # Description | ||
3 | ############################################################################### | ||
4 | |||
5 | # trace_reader(files) returns an iterator which produces records | ||
6 | # OUT OF ORDER from the files given. (the param is a list of files.) | ||
7 | # | ||
8 | # The non-naive trace_reader has a lot of complex logic which attempts to | ||
9 | # produce records in order (even though they are being pulled from multiple | ||
10 | # files which themselves are only approximately ordered). This trace_reader | ||
11 | # attempts to be as simple as possible and is used in the unit tests to | ||
12 | # make sure the total number of records read by the normal trace_reader is | ||
13 | # the same as the number of records read by this one. | ||
14 | |||
15 | ############################################################################### | ||
16 | # Imports | ||
17 | ############################################################################### | ||
18 | |||
19 | import struct | ||
20 | |||
21 | |||
22 | ############################################################################### | ||
23 | # Public functions | ||
24 | ############################################################################### | ||
25 | |||
26 | # Generator function returning an iterable over records in a trace file. | ||
27 | def trace_reader(files): | ||
28 | for file in files: | ||
29 | f = open(file,'rb') | ||
30 | while True: | ||
31 | data = f.read(RECORD_HEAD_SIZE) | ||
32 | try: | ||
33 | type_num = struct.unpack_from('b',data)[0] | ||
34 | except struct.error: | ||
35 | break #We read to the end of the file | ||
36 | type = _get_type(type_num) | ||
37 | try: | ||
38 | values = struct.unpack_from(StHeader.format + | ||
39 | type.format,data) | ||
40 | record_dict = dict(zip(type.keys,values)) | ||
41 | except struct.error: | ||
42 | f.close() | ||
43 | print "Invalid record detected, stopping." | ||
44 | exit() | ||
45 | |||
46 | # Convert the record_dict into an object | ||
47 | record = _dict2obj(record_dict) | ||
48 | |||
49 | # Give it a type name (easier to work with than type number) | ||
50 | record.type_name = _get_type_name(type_num) | ||
51 | |||
52 | # All records should have a 'record type' field. | ||
53 | # e.g. these are 'event's as opposed to 'error's | ||
54 | record.record_type = "event" | ||
55 | |||
56 | # If there is no timestamp, set the time to 0 | ||
57 | if 'when' not in record.__dict__.keys(): | ||
58 | record.when = 0 | ||
59 | |||
60 | yield record | ||
61 | |||
62 | ############################################################################### | ||
63 | # Private functions | ||
64 | ############################################################################### | ||
65 | |||
66 | # Convert a dict into an object | ||
67 | def _dict2obj(d): | ||
68 | class Obj: pass | ||
69 | o = Obj() | ||
70 | for key in d.keys(): | ||
71 | o.__dict__[key] = d[key] | ||
72 | return o | ||
73 | |||
74 | ############################################################################### | ||
75 | # Trace record data types and accessor functions | ||
76 | ############################################################################### | ||
77 | |||
78 | # Each class below represents a type of event record. The format attribute | ||
79 | # specifies how to decode the binary record and the keys attribute | ||
80 | # specifies how to name the pieces of information decoded. Note that all | ||
81 | # event records have a common initial 24 bytes, represented by the StHeader | ||
82 | # class. | ||
83 | |||
84 | RECORD_HEAD_SIZE = 24 | ||
85 | |||
86 | class StHeader(object): | ||
87 | format = '<bbhi' | ||
88 | formatStr = struct.Struct(format) | ||
89 | keys = ['type','cpu','pid','job'] | ||
90 | message = 'The header.' | ||
91 | |||
92 | class StNameData(object): | ||
93 | format = '16s' | ||
94 | formatStr = struct.Struct(StHeader.format + format) | ||
95 | keys = StHeader.keys + ['name'] | ||
96 | message = 'The name of the executable of this process.' | ||
97 | |||
98 | class StParamData(object): | ||
99 | format = 'IIIc' | ||
100 | formatStr = struct.Struct(StHeader.format + format) | ||
101 | keys = StHeader.keys + ['wcet','period','phase','partition'] | ||
102 | message = 'Regular parameters.' | ||
103 | |||
104 | class StReleaseData(object): | ||
105 | format = 'QQ' | ||
106 | formatStr = struct.Struct(StHeader.format + format) | ||
107 | keys = StHeader.keys + ['when','deadline'] | ||
108 | message = 'A job was/is going to be released.' | ||
109 | |||
110 | #Not yet used by Sched Trace | ||
111 | class StAssignedData(object): | ||
112 | format = 'Qc' | ||
113 | formatStr = struct.Struct(StHeader.format + format) | ||
114 | keys = StHeader.keys + ['when','target'] | ||
115 | message = 'A job was assigned to a CPU.' | ||
116 | |||
117 | class StSwitchToData(object): | ||
118 | format = 'QI' | ||
119 | formatStr = struct.Struct(StHeader.format + format) | ||
120 | keys = StHeader.keys + ['when','exec_time'] | ||
121 | message = 'A process was switched to on a given CPU.' | ||
122 | |||
123 | class StSwitchAwayData(object): | ||
124 | format = 'QI' | ||
125 | formatStr = struct.Struct(StHeader.format + format) | ||
126 | keys = StHeader.keys + ['when','exec_time'] | ||
127 | message = 'A process was switched away on a given CPU.' | ||
128 | |||
129 | class StCompletionData(object): | ||
130 | format = 'Q3xcc' | ||
131 | formatStr = struct.Struct(StHeader.format + format) | ||
132 | keys = StHeader.keys + ['when','forced?','flags'] | ||
133 | message = 'A job completed.' | ||
134 | |||
135 | class StBlockData(object): | ||
136 | format = 'Q' | ||
137 | formatStr = struct.Struct(StHeader.format + format) | ||
138 | keys = StHeader.keys + ['when'] | ||
139 | message = 'A task blocks.' | ||
140 | |||
141 | class StResumeData(object): | ||
142 | format = 'Q' | ||
143 | formatStr = struct.Struct(StHeader.format + format) | ||
144 | keys = StHeader.keys + ['when'] | ||
145 | message = 'A task resumes.' | ||
146 | |||
147 | class StSysReleaseData(object): | ||
148 | format = 'QQ' | ||
149 | formatStr = struct.Struct(StHeader.format + format) | ||
150 | keys = StHeader.keys + ['when','release'] | ||
151 | message = 'All tasks have checked in, task system released by user' | ||
152 | |||
153 | # Return the binary data type, given the type_num | ||
154 | def _get_type(type_num): | ||
155 | types = [None,StNameData,StParamData,StReleaseData,StAssignedData, | ||
156 | StSwitchToData,StSwitchAwayData,StCompletionData,StBlockData, | ||
157 | StResumeData,StSysReleaseData] | ||
158 | return types[type_num] | ||
159 | |||
160 | # Return the type name, given the type_num (this is simply a convenience to | ||
161 | # programmers of other modules) | ||
162 | def _get_type_name(type_num): | ||
163 | type_names = [None,"name","params","release","assign","switch_to", | ||
164 | "switch_away","completion","block","resume","sys_release"] | ||
165 | return type_names[type_num] | ||
diff --git a/reader/runtests.py b/reader/runtests.py deleted file mode 100755 index 88dddf4..0000000 --- a/reader/runtests.py +++ /dev/null | |||
@@ -1,47 +0,0 @@ | |||
1 | #!/usr/bin/python | ||
2 | |||
3 | ############################################################################### | ||
4 | # Description | ||
5 | ############################################################################### | ||
6 | |||
7 | # Unit Tests | ||
8 | |||
9 | |||
10 | ############################################################################### | ||
11 | # Imports | ||
12 | ############################################################################### | ||
13 | |||
14 | import trace_reader | ||
15 | import naive_trace_reader | ||
16 | import os | ||
17 | |||
18 | ############################################################################### | ||
19 | # Trace files | ||
20 | ############################################################################### | ||
21 | |||
22 | files = [ | ||
23 | './sample_traces/st-g6-0.bin', | ||
24 | './sample_traces/st-g6-1.bin', | ||
25 | './sample_traces/st-g6-2.bin', | ||
26 | './sample_traces/st-g6-3.bin', | ||
27 | ] | ||
28 | |||
29 | ############################################################################### | ||
30 | # Tests | ||
31 | ############################################################################### | ||
32 | |||
33 | # Does our fancy trace reader get the same number of files as our naive one? | ||
34 | # (See naive_trace_reader.py for further explanation) | ||
35 | def test1(): | ||
36 | stream = trace_reader.trace_reader(files) | ||
37 | num_records = len(list(stream)) | ||
38 | stream = naive_trace_reader.trace_reader(files) | ||
39 | naive_num_records = len(list(stream)) | ||
40 | |||
41 | # We need a +1 here because the fancy reader produces a 'meta' record | ||
42 | # indicating the number of CPUs | ||
43 | if num_records != naive_num_records + 1: | ||
44 | return "[FAIL]" | ||
45 | return "[SUCCESS]" | ||
46 | |||
47 | print "Test 1: %s" % (test1()) | ||
diff --git a/reader/sample_script.py b/reader/sample_script.py deleted file mode 100755 index f7e9297..0000000 --- a/reader/sample_script.py +++ /dev/null | |||
@@ -1,41 +0,0 @@ | |||
1 | #!/usr/bin/python | ||
2 | |||
3 | # This is a sample script for using the tool. I would recommend copying | ||
4 | # this and modifying it to suit your needs for a particular test. Make | ||
5 | # sure you redirect the output to a file (e.g. ./sample_script.py > output). | ||
6 | |||
7 | # Import the modules we need. You should not need to know about | ||
8 | # their internals. | ||
9 | import trace_reader | ||
10 | import sanitizer | ||
11 | import gedf_test | ||
12 | import stats | ||
13 | import stdout_printer | ||
14 | |||
15 | # Specify your trace files | ||
16 | g6 = [ | ||
17 | '../sample_traces/st-g6-0.bin', | ||
18 | '../sample_traces/st-g6-1.bin', | ||
19 | '../sample_traces/st-g6-2.bin', | ||
20 | '../sample_traces/st-g6-3.bin', | ||
21 | ] | ||
22 | |||
23 | # Here is an example of a custom filter function. | ||
24 | # It will remove from the error stream all inversion_end records indicating | ||
25 | # an inversion of less than 4000000 time units. Thus, you can grep through | ||
26 | # the output looking 'Inversion end' and find only errors for particularly | ||
27 | # long inversions. This is commented out in the pipeline (below) since you | ||
28 | # probably don't want it in general. | ||
29 | def my_filter(record): | ||
30 | if record.record_type == 'error' and record.type_name == 'inversion_end': | ||
31 | if record.job.inversion_end - record.job.inversion_start < 4000000: | ||
32 | return False | ||
33 | return True | ||
34 | |||
35 | # Pipeline | ||
36 | stream = trace_reader.trace_reader(g6) # Read events from traces | ||
37 | stream = sanitizer.sanitizer(stream) # Remove garbage events | ||
38 | stream = gedf_test.gedf_test(stream) # Produce G-EDF error records | ||
39 | stream = stats.stats(stream) # Produce a statistics record | ||
40 | #stream = filter(my_filter, stream) # Filter some records before printing | ||
41 | stdout_printer.stdout_printer(stream) # Print records to stdout | ||
diff --git a/reader/sample_script.py~ b/reader/sample_script.py~ deleted file mode 100644 index c3b7843..0000000 --- a/reader/sample_script.py~ +++ /dev/null | |||
@@ -1,41 +0,0 @@ | |||
1 | #!/usr/bin/python | ||
2 | |||
3 | # This is a sample script for using the tool. I would recommend copying | ||
4 | # this and modifying it to suit your needs for a particular test. Make | ||
5 | # sure you redirect the output to a file (e.g. ./sample_script.py > output). | ||
6 | |||
7 | # Import the modules we need. You should not need to know about | ||
8 | # their internals. | ||
9 | import trace_reader | ||
10 | import sanitizer | ||
11 | import gedf_test | ||
12 | import stats | ||
13 | import stdout_printer | ||
14 | |||
15 | # Specify your trace files | ||
16 | g6 = [ | ||
17 | './sample_traces/st-g6-0.bin', | ||
18 | './sample_traces/st-g6-1.bin', | ||
19 | './sample_traces/st-g6-2.bin', | ||
20 | './sample_traces/st-g6-3.bin', | ||
21 | ] | ||
22 | |||
23 | # Here is an example of a custom filter function. | ||
24 | # It will remove from the error stream all inversion_end records indicating | ||
25 | # an inversion of less than 4000000 time units. Thus, you can grep through | ||
26 | # the output looking 'Inversion end' and find only errors for particularly | ||
27 | # long inversions. This is commented out in the pipeline (below) since you | ||
28 | # probably don't want it in general. | ||
29 | def my_filter(record): | ||
30 | if record.record_type == 'error' and record.type_name == 'inversion_end': | ||
31 | if record.job.inversion_end - record.job.inversion_start < 4000000: | ||
32 | return False | ||
33 | return True | ||
34 | |||
35 | # Pipeline | ||
36 | stream = trace_reader.trace_reader(g6) # Read events from traces | ||
37 | stream = sanitizer.sanitizer(stream) # Remove garbage events | ||
38 | stream = gedf_test.gedf_test(stream) # Produce G-EDF error records | ||
39 | stream = stats.stats(stream) # Produce a statistics record | ||
40 | #stream = filter(my_filter, stream) # Filter some records before printing | ||
41 | stdout_printer.stdout_printer(stream) # Print records to stdout | ||
diff --git a/reader/sanitizer.py b/reader/sanitizer.py deleted file mode 100644 index 79315cc..0000000 --- a/reader/sanitizer.py +++ /dev/null | |||
@@ -1,53 +0,0 @@ | |||
1 | ############################################################################### | ||
2 | # Description | ||
3 | ############################################################################### | ||
4 | |||
5 | # Sanitize input. (There are a number of goofy issues with the sched_trace | ||
6 | # output.) | ||
7 | |||
8 | ############################################################################### | ||
9 | # Public functions | ||
10 | ############################################################################### | ||
11 | |||
12 | def sanitizer(stream): | ||
13 | |||
14 | job_2s_released = [] # list of tasks which have released their job 2s | ||
15 | jobs_switched_to = [] | ||
16 | |||
17 | for record in stream: | ||
18 | |||
19 | # Ignore records which are not events (e.g. the num_cpus record) | ||
20 | if record.record_type != 'event': | ||
21 | yield record | ||
22 | continue | ||
23 | |||
24 | # All records with job < 2 are garbage | ||
25 | if record.job < 2: | ||
26 | continue | ||
27 | |||
28 | # Some records with job == 2 are garbage | ||
29 | if record.job==2: | ||
30 | |||
31 | # There is a duplicate release of every job 2 | ||
32 | # This will throw away the second one | ||
33 | if record.type_name == 'release': | ||
34 | if record.pid in job_2s_released: | ||
35 | continue | ||
36 | else: | ||
37 | job_2s_released.append(record.pid) | ||
38 | |||
39 | # Job 2 has a resume that is garbage | ||
40 | if record.type_name == 'resume': | ||
41 | continue | ||
42 | |||
43 | # By default, the switch_away for a job (after it has completed) | ||
44 | # is maked as being for job+1, which has never been switched to. | ||
45 | # We can correct this if we note which jobs really | ||
46 | # have been switched to. | ||
47 | if record.type_name == 'switch_to': | ||
48 | jobs_switched_to.append((record.pid,record.job)) | ||
49 | if record.type_name == 'switch_away': | ||
50 | if (record.pid,record.job) not in jobs_switched_to: | ||
51 | record.job -= 1 | ||
52 | |||
53 | yield record | ||
diff --git a/reader/stats.py b/reader/stats.py deleted file mode 100644 index 34a842f..0000000 --- a/reader/stats.py +++ /dev/null | |||
@@ -1,39 +0,0 @@ | |||
1 | ############################################################################### | ||
2 | # Description | ||
3 | ############################################################################### | ||
4 | # Compute and produce statistics | ||
5 | |||
6 | |||
7 | ############################################################################### | ||
8 | # Public Functions | ||
9 | ############################################################################### | ||
10 | |||
11 | def stats(stream): | ||
12 | min_inversion = -1 | ||
13 | max_inversion = -1 | ||
14 | sum_inversions = 0 | ||
15 | num_inversions = 0 | ||
16 | for record in stream: | ||
17 | if record.type_name == 'inversion_end': | ||
18 | length = record.job.inversion_end - record.job.inversion_start | ||
19 | if length > 0: | ||
20 | num_inversions += 1 | ||
21 | if length > max_inversion: | ||
22 | max_inversion = length | ||
23 | if length < min_inversion or min_inversion == -1: | ||
24 | min_inversion = length | ||
25 | sum_inversions += length | ||
26 | yield record | ||
27 | if num_inversions > 0: | ||
28 | avg_inversion = int(sum_inversions / num_inversions) | ||
29 | else: | ||
30 | avg_inversion = 0 | ||
31 | class Obj(object): pass | ||
32 | rec = Obj() | ||
33 | rec.record_type = "meta" | ||
34 | rec.type_name = "stats" | ||
35 | rec.num_inversions = num_inversions | ||
36 | rec.min_inversion = min_inversion | ||
37 | rec.max_inversion = max_inversion | ||
38 | rec.avg_inversion = avg_inversion | ||
39 | yield rec | ||
diff --git a/reader/stdout_printer.py b/reader/stdout_printer.py deleted file mode 100644 index f8d9a84..0000000 --- a/reader/stdout_printer.py +++ /dev/null | |||
@@ -1,69 +0,0 @@ | |||
1 | ############################################################################### | ||
2 | # Description | ||
3 | ############################################################################### | ||
4 | |||
5 | # Prints records to standard out | ||
6 | |||
7 | ############################################################################### | ||
8 | # Public functions | ||
9 | ############################################################################### | ||
10 | |||
11 | def stdout_printer(stream): | ||
12 | for record in stream: | ||
13 | if record.record_type == "event": | ||
14 | _print_event(record) | ||
15 | elif record.record_type == "meta" and record.type_name == "stats": | ||
16 | _print_stats(record) | ||
17 | elif record.record_type == "error" and record.type_name == 'inversion_start': | ||
18 | _print_inversion_start(record) | ||
19 | elif record.record_type == "error" and record.type_name == 'inversion_end': | ||
20 | _print_inversion_end(record) | ||
21 | else: | ||
22 | continue | ||
23 | print "" | ||
24 | |||
25 | ############################################################################### | ||
26 | # Private functions | ||
27 | ############################################################################### | ||
28 | |||
29 | def _print_event(record): | ||
30 | print "Job: %d.%d" % (record.pid,record.job) | ||
31 | print "Type: %s" % (record.type_name) | ||
32 | print "Time: %d" % (record.when) | ||
33 | |||
34 | def _print_inversion_start(record): | ||
35 | print "Type: %s" % ("Inversion start") | ||
36 | print "Time: %d" % (record.job.inversion_start) | ||
37 | print "Job: %d.%d" % (record.job.pid,record.job.job) | ||
38 | print "Deadline: %d" % (record.job.deadline) | ||
39 | print "Eligible: ", | ||
40 | for job in record.eligible: | ||
41 | print str(job) + " ", | ||
42 | |||
43 | print "On CPU: ", | ||
44 | for job in record.on_cpu: | ||
45 | print str(job) + " ", | ||
46 | print #newline | ||
47 | |||
48 | def _print_inversion_end(record): | ||
49 | print "Type: %s" % ("Inversion end") | ||
50 | print "Time: %d" % (record.job.inversion_end) | ||
51 | print "Duration: %d" % ( | ||
52 | record.job.inversion_end - record.job.inversion_start) | ||
53 | print "Job: %d.%d" % (record.job.pid,record.job.job) | ||
54 | print "Deadline: %d" % (record.job.deadline) | ||
55 | print "Eligible: ", | ||
56 | for job in record.eligible: | ||
57 | print str(job) + " ", | ||
58 | |||
59 | print "On CPU: ", | ||
60 | for job in record.on_cpu: | ||
61 | print str(job) + " ", | ||
62 | print #newline | ||
63 | |||
64 | def _print_stats(record): | ||
65 | print "Inversion statistics" | ||
66 | print "Num inversions: %d" % (record.num_inversions) | ||
67 | print "Min inversion: %d" % (record.min_inversion) | ||
68 | print "Max inversion: %d" % (record.max_inversion) | ||
69 | print "Avg inversion: %d" % (record.avg_inversion) | ||
diff --git a/reader/test.py b/reader/test.py deleted file mode 100755 index b260314..0000000 --- a/reader/test.py +++ /dev/null | |||
@@ -1,15 +0,0 @@ | |||
1 | #!/usr/bin/python | ||
2 | |||
3 | import cairo | ||
4 | |||
5 | if __name__ == '__main__': | ||
6 | surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, 500, 500) | ||
7 | ctx = cairo.Context(surface) | ||
8 | ctx.move_to(10, 10) | ||
9 | ctx.line_to(-100, 10) | ||
10 | ctx.set_line_width(2) | ||
11 | |||
12 | ctx.move_to(10, 10) | ||
13 | ctx.line_to(20, 10) | ||
14 | ctx.stroke() | ||
15 | surface.write_to_png('test.png') | ||
diff --git a/reader/trace_reader.py b/reader/trace_reader.py deleted file mode 100644 index a4ff964..0000000 --- a/reader/trace_reader.py +++ /dev/null | |||
@@ -1,245 +0,0 @@ | |||
1 | ############################################################################### | ||
2 | # Description | ||
3 | ############################################################################### | ||
4 | |||
5 | # trace_reader(files) returns an iterator which produces records | ||
6 | # in order from the files given. (the param is a list of files.) | ||
7 | # | ||
8 | # Each record is just a Python object. It is guaranteed to have the following | ||
9 | # attributes: | ||
10 | # - 'pid': pid of the task | ||
11 | # - 'job': job number for that task | ||
12 | # - 'cpu', given by LITMUS | ||
13 | # - 'when', given by LITMUS as a timestamp. LITMUS does not provide a | ||
14 | # timestamp for all records. In this case, when is set to 0. | ||
15 | # - 'type', a numerical value given by LITMUS | ||
16 | # - 'type_name', a human-readable name defined in this module | ||
17 | # - 'record_type', set to 'event' by this module (to distinguish from, e.g., | ||
18 | # error records produced elsewhere). | ||
19 | # - Possible additional attributes, depending on the type of record. | ||
20 | # | ||
21 | # To find out exactly what attributes are set for each record type, look at | ||
22 | # the trace-parsing information at the bottom of this file. | ||
23 | |||
24 | ############################################################################### | ||
25 | # Imports | ||
26 | ############################################################################### | ||
27 | |||
28 | import struct | ||
29 | |||
30 | |||
31 | ############################################################################### | ||
32 | # Public functions | ||
33 | ############################################################################### | ||
34 | |||
35 | # Generator function returning an iterable over records in a trace file. | ||
36 | def trace_reader(files): | ||
37 | |||
38 | # Yield a record indicating the number of CPUs, used by the G-EDF test | ||
39 | class Obj: pass | ||
40 | record = Obj() | ||
41 | record.record_type = "meta" | ||
42 | record.type_name = "num_cpus" | ||
43 | record.num_cpus = len(files) | ||
44 | yield record | ||
45 | |||
46 | # Create iterators for each file and a buffer to store records in | ||
47 | file_iters = [] # file iterators | ||
48 | file_iter_buff = [] # file iterator buffers | ||
49 | for file in files: | ||
50 | file_iter = _get_file_iter(file) | ||
51 | file_iters.append(file_iter) | ||
52 | file_iter_buff.append([file_iter.next()]) | ||
53 | |||
54 | # We keep 100 records in each buffer and then keep the buffer sorted | ||
55 | # This is because records may have been recorded slightly out of order | ||
56 | # This cannot guarantee records are produced in order, but it makes it | ||
57 | # overwhelmingly probably. | ||
58 | for x in range(0,len(file_iter_buff)): | ||
59 | for y in range(0,100): | ||
60 | file_iter_buff[x].append(file_iters[x].next()) | ||
61 | for x in range(0,len(file_iter_buff)): | ||
62 | file_iter_buff[x] = sorted(file_iter_buff[x],key=lambda rec: rec.when) | ||
63 | |||
64 | # Remember the time of the last record. This way, we can make sure records | ||
65 | # truly are produced in monotonically increasing order by time and terminate | ||
66 | # fatally if they are not. | ||
67 | last_time = None | ||
68 | |||
69 | # Keep pulling records as long as we have a buffer | ||
70 | while len(file_iter_buff) > 0: | ||
71 | |||
72 | # Select the earliest record from those at the heads of the buffers | ||
73 | earliest = -1 | ||
74 | buff_to_refill = -1 | ||
75 | for x in range(0,len(file_iter_buff)): | ||
76 | if earliest==-1 or file_iter_buff[x][0].when < earliest.when: | ||
77 | earliest = file_iter_buff[x][0] | ||
78 | buff_to_refill = x | ||
79 | |||
80 | # Take it out of the buffer | ||
81 | del file_iter_buff[buff_to_refill][0] | ||
82 | |||
83 | # Try to append a new record to the buffer (if there is another) and | ||
84 | # then keep the buffer sorted | ||
85 | try: | ||
86 | file_iter_buff[buff_to_refill].append(file_iters[buff_to_refill].next()) | ||
87 | file_iter_buff[buff_to_refill] = sorted(file_iter_buff[buff_to_refill], | ||
88 | key=lambda rec: rec.when) | ||
89 | |||
90 | # If there aren't any more records, fine. Unless the buffer is also empty. | ||
91 | # If that is the case, delete the buffer. | ||
92 | except StopIteration: | ||
93 | if len(file_iter_buff[buff_to_refill]) < 1: | ||
94 | del file_iter_buff[buff_to_refill] | ||
95 | del file_iters[buff_to_refill] | ||
96 | |||
97 | # Check for monotonically increasing time | ||
98 | if last_time is not None and earliest.when < last_time: | ||
99 | exit("FATAL ERROR: trace_reader.py: out-of-order record produced") | ||
100 | else: | ||
101 | last_time = earliest.when | ||
102 | |||
103 | # Yield the record | ||
104 | yield earliest | ||
105 | |||
106 | ############################################################################### | ||
107 | # Private functions | ||
108 | ############################################################################### | ||
109 | |||
110 | # Returns an iterator to pull records from a file | ||
111 | def _get_file_iter(file): | ||
112 | f = open(file,'rb') | ||
113 | while True: | ||
114 | data = f.read(RECORD_HEAD_SIZE) | ||
115 | try: | ||
116 | type_num = struct.unpack_from('b',data)[0] | ||
117 | except struct.error: | ||
118 | break #We read to the end of the file | ||
119 | type = _get_type(type_num) | ||
120 | try: | ||
121 | values = struct.unpack_from(StHeader.format + | ||
122 | type.format,data) | ||
123 | record_dict = dict(zip(type.keys,values)) | ||
124 | except struct.error: | ||
125 | f.close() | ||
126 | print "Invalid record detected, stopping." | ||
127 | exit() | ||
128 | |||
129 | # Convert the record_dict into an object | ||
130 | record = _dict2obj(record_dict) | ||
131 | |||
132 | # Give it a type name (easier to work with than type number) | ||
133 | record.type_name = _get_type_name(type_num) | ||
134 | |||
135 | # All records should have a 'record type' field. | ||
136 | # e.g. these are 'event's as opposed to 'error's | ||
137 | record.record_type = "event" | ||
138 | |||
139 | # If there is no timestamp, set the time to 0 | ||
140 | if 'when' not in record.__dict__.keys(): | ||
141 | record.when = 0 | ||
142 | |||
143 | yield record | ||
144 | |||
145 | # Convert a dict into an object | ||
146 | def _dict2obj(d): | ||
147 | class Obj(object): pass | ||
148 | o = Obj() | ||
149 | for key in d.keys(): | ||
150 | o.__dict__[key] = d[key] | ||
151 | return o | ||
152 | |||
153 | ############################################################################### | ||
154 | # Trace record data types and accessor functions | ||
155 | ############################################################################### | ||
156 | |||
157 | # Each class below represents a type of event record. The format attribute | ||
158 | # specifies how to decode the binary record and the keys attribute | ||
159 | # specifies how to name the pieces of information decoded. Note that all | ||
160 | # event records have a common initial 24 bytes, represented by the StHeader | ||
161 | # class. | ||
162 | |||
163 | RECORD_HEAD_SIZE = 24 | ||
164 | |||
165 | class StHeader: | ||
166 | format = '<bbhi' | ||
167 | formatStr = struct.Struct(format) | ||
168 | keys = ['type','cpu','pid','job'] | ||
169 | message = 'The header.' | ||
170 | |||
171 | class StNameData: | ||
172 | format = '16s' | ||
173 | formatStr = struct.Struct(StHeader.format + format) | ||
174 | keys = StHeader.keys + ['name'] | ||
175 | message = 'The name of the executable of this process.' | ||
176 | |||
177 | class StParamData: | ||
178 | format = 'IIIc' | ||
179 | formatStr = struct.Struct(StHeader.format + format) | ||
180 | keys = StHeader.keys + ['wcet','period','phase','partition'] | ||
181 | message = 'Regular parameters.' | ||
182 | |||
183 | class StReleaseData: | ||
184 | format = 'QQ' | ||
185 | formatStr = struct.Struct(StHeader.format + format) | ||
186 | keys = StHeader.keys + ['when','deadline'] | ||
187 | message = 'A job was/is going to be released.' | ||
188 | |||
189 | #Not yet used by Sched Trace | ||
190 | class StAssignedData: | ||
191 | format = 'Qc' | ||
192 | formatStr = struct.Struct(StHeader.format + format) | ||
193 | keys = StHeader.keys + ['when','target'] | ||
194 | message = 'A job was assigned to a CPU.' | ||
195 | |||
196 | class StSwitchToData: | ||
197 | format = 'QI' | ||
198 | formatStr = struct.Struct(StHeader.format + format) | ||
199 | keys = StHeader.keys + ['when','exec_time'] | ||
200 | message = 'A process was switched to on a given CPU.' | ||
201 | |||
202 | class StSwitchAwayData: | ||
203 | format = 'QI' | ||
204 | formatStr = struct.Struct(StHeader.format + format) | ||
205 | keys = StHeader.keys + ['when','exec_time'] | ||
206 | message = 'A process was switched away on a given CPU.' | ||
207 | |||
208 | class StCompletionData: | ||
209 | #format = 'Q3x?c' | ||
210 | format = 'Q3xcc' | ||
211 | formatStr = struct.Struct(StHeader.format + format) | ||
212 | keys = StHeader.keys + ['when','forced?','flags'] | ||
213 | message = 'A job completed.' | ||
214 | |||
215 | class StBlockData: | ||
216 | format = 'Q' | ||
217 | formatStr = struct.Struct(StHeader.format + format) | ||
218 | keys = StHeader.keys + ['when'] | ||
219 | message = 'A task blocks.' | ||
220 | |||
221 | class StResumeData: | ||
222 | format = 'Q' | ||
223 | formatStr = struct.Struct(StHeader.format + format) | ||
224 | keys = StHeader.keys + ['when'] | ||
225 | message = 'A task resumes.' | ||
226 | |||
227 | class StSysReleaseData: | ||
228 | format = 'QQ' | ||
229 | formatStr = struct.Struct(StHeader.format + format) | ||
230 | keys = StHeader.keys + ['when','release'] | ||
231 | message = 'All tasks have checked in, task system released by user' | ||
232 | |||
233 | # Return the binary data type, given the type_num | ||
234 | def _get_type(type_num): | ||
235 | types = [None,StNameData,StParamData,StReleaseData,StAssignedData, | ||
236 | StSwitchToData,StSwitchAwayData,StCompletionData,StBlockData, | ||
237 | StResumeData,StSysReleaseData] | ||
238 | return types[type_num] | ||
239 | |||
240 | # Return the type name, given the type_num (this is simply a convenience to | ||
241 | # programmers of other modules) | ||
242 | def _get_type_name(type_num): | ||
243 | type_names = [None,"name","params","release","assign","switch_to", | ||
244 | "switch_away","completion","block","resume","sys_release"] | ||
245 | return type_names[type_num] | ||