summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.gitignore1
-rw-r--r--README1
-rw-r--r--TODO0
-rw-r--r--convert.py102
-rw-r--r--gedf_test.py163
-rw-r--r--litmus01.pdfbin0 -> 681011 bytes
-rw-r--r--litmus02.pdfbin0 -> 899823 bytes
m---------mac6
-rw-r--r--naive_trace_reader.py165
-rw-r--r--reader/__init__.py4
-rw-r--r--reader/gedf_test.py163
-rw-r--r--reader/naive_trace_reader.py165
-rwxr-xr-xreader/runtests.py47
-rwxr-xr-xreader/sample_script.py41
-rw-r--r--reader/sample_script.py~41
-rw-r--r--reader/sanitizer.py53
-rw-r--r--reader/stats.py39
-rw-r--r--reader/stdout_printer.py69
-rwxr-xr-xreader/test.py15
-rw-r--r--reader/trace_reader.py245
-rwxr-xr-xruntests.py47
-rwxr-xr-xsample_script.py41
-rw-r--r--sample_traces/.runtests.py.swpbin0 -> 12288 bytes
-rw-r--r--sample_traces/st-g6-0.binbin0 -> 46656 bytes
-rw-r--r--sample_traces/st-g6-1.binbin0 -> 53064 bytes
-rw-r--r--sample_traces/st-g6-2.binbin0 -> 52368 bytes
-rw-r--r--sample_traces/st-g6-3.binbin0 -> 54960 bytes
-rw-r--r--sanitizer.py53
-rw-r--r--stats.py39
-rw-r--r--stdout_printer.py69
-rw-r--r--trace_reader.py245
-rw-r--r--traces/g1.pdfbin0 -> 12747 bytes
-rw-r--r--traces/g2.pdfbin0 -> 21284 bytes
-rw-r--r--traces/g3.pdfbin0 -> 18260 bytes
-rw-r--r--traces/g4.pdfbin0 -> 50607 bytes
-rw-r--r--traces/g5.pdfbin0 -> 141031 bytes
-rw-r--r--traces/g6.pdfbin0 -> 142968 bytes
-rw-r--r--traces/heavy.ts9
-rw-r--r--traces/heavy2.ts7
-rw-r--r--traces/light.ts9
-rw-r--r--traces/medium.ts7
-rw-r--r--traces/mixed.ts15
-rw-r--r--traces/st-g1-0.binbin0 -> 42528 bytes
-rw-r--r--traces/st-g1-1.binbin0 -> 42768 bytes
-rw-r--r--traces/st-g1-2.binbin0 -> 47592 bytes
-rw-r--r--traces/st-g1-3.binbin0 -> 50496 bytes
-rw-r--r--traces/st-g3-0.binbin0 -> 2640 bytes
-rw-r--r--traces/st-g3-1.binbin0 -> 1872 bytes
-rw-r--r--traces/st-g3-2.binbin0 -> 2088 bytes
-rw-r--r--traces/st-g3-3.binbin0 -> 2832 bytes
-rw-r--r--traces/st-g4-0.binbin0 -> 11208 bytes
-rw-r--r--traces/st-g4-1.binbin0 -> 10848 bytes
-rw-r--r--traces/st-g4-2.binbin0 -> 10392 bytes
-rw-r--r--traces/st-g4-3.binbin0 -> 10536 bytes
-rw-r--r--traces/st-g5-0.binbin0 -> 44904 bytes
-rw-r--r--traces/st-g5-1.binbin0 -> 47520 bytes
-rw-r--r--traces/st-g5-2.binbin0 -> 51552 bytes
-rw-r--r--traces/st-g5-3.binbin0 -> 56064 bytes
-rw-r--r--traces/st-g6-0.binbin0 -> 46656 bytes
-rw-r--r--traces/st-g6-1.binbin0 -> 53064 bytes
-rw-r--r--traces/st-g6-2.binbin0 -> 52368 bytes
-rw-r--r--traces/st-g6-3.binbin0 -> 54960 bytes
-rw-r--r--traces/st-heavy-0.binbin0 -> 33648 bytes
-rw-r--r--traces/st-heavy-1.binbin0 -> 34224 bytes
-rw-r--r--traces/st-heavy-2.binbin0 -> 35472 bytes
-rw-r--r--traces/st-heavy-3.binbin0 -> 34008 bytes
-rw-r--r--traces/st-heavy2-0.binbin0 -> 28608 bytes
-rw-r--r--traces/st-heavy2-1.binbin0 -> 32352 bytes
-rw-r--r--traces/st-heavy2-2.binbin0 -> 27864 bytes
-rw-r--r--traces/st-heavy2-3.binbin0 -> 29424 bytes
-rw-r--r--traces/st-heavy3-0.binbin0 -> 28896 bytes
-rw-r--r--traces/st-heavy3-1.binbin0 -> 30936 bytes
-rw-r--r--traces/st-heavy3-2.binbin0 -> 28704 bytes
-rw-r--r--traces/st-heavy3-3.binbin0 -> 30144 bytes
-rw-r--r--traces/st-mac-0.binbin0 -> 9288 bytes
-rw-r--r--traces/st-mac-1.binbin0 -> 10152 bytes
-rw-r--r--traces/st-mac-test-0.binbin0 -> 58944 bytes
-rw-r--r--traces/st-mac2-0.binbin0 -> 10536 bytes
-rw-r--r--traces/st-s1-0.binbin0 -> 47424 bytes
-rw-r--r--traces/st-s1-1.binbin0 -> 61488 bytes
-rw-r--r--traces/st-s1-2.binbin0 -> 57336 bytes
-rw-r--r--traces/st-s1-3.binbin0 -> 48984 bytes
-rw-r--r--traces/st-s2-0.binbin0 -> 82320 bytes
-rw-r--r--traces/st-s2-1.binbin0 -> 135288 bytes
-rw-r--r--traces/st-s2-2.binbin0 -> 93264 bytes
-rw-r--r--traces/st-s2-3.binbin0 -> 58560 bytes
-rw-r--r--traces/st-x10-0.binbin0 -> 44448 bytes
-rw-r--r--traces/st-x10-1.binbin0 -> 7536 bytes
-rw-r--r--traces/st-x10-2.binbin0 -> 30216 bytes
-rw-r--r--traces/st-x10-3.binbin0 -> 7440 bytes
-rw-r--r--traces/st-x11-0.binbin0 -> 33408 bytes
-rw-r--r--traces/st-x11-1.binbin0 -> 34656 bytes
-rw-r--r--traces/st-x11-2.binbin0 -> 33216 bytes
-rw-r--r--traces/st-x11-3.binbin0 -> 33240 bytes
-rw-r--r--traces/st-x12-0.binbin0 -> 80352 bytes
-rw-r--r--traces/st-x12-1.binbin0 -> 174744 bytes
-rw-r--r--traces/st-x12-2.binbin0 -> 90552 bytes
-rw-r--r--traces/st-x12-3.binbin0 -> 104088 bytes
-rw-r--r--traces/st-x13-0.binbin0 -> 72456 bytes
-rw-r--r--traces/st-x13-1.binbin0 -> 315360 bytes
-rw-r--r--traces/st-x13-2.binbin0 -> 318888 bytes
-rw-r--r--traces/st-x13-3.binbin0 -> 258192 bytes
-rw-r--r--traces/st-x14-0.binbin0 -> 73272 bytes
-rw-r--r--traces/st-x14-1.binbin0 -> 336192 bytes
-rw-r--r--traces/st-x14-2.binbin0 -> 309768 bytes
-rw-r--r--traces/st-x14-3.binbin0 -> 268992 bytes
-rw-r--r--traces/st-x15-0.binbin0 -> 86592 bytes
-rw-r--r--traces/st-x15-1.binbin0 -> 371160 bytes
-rw-r--r--traces/st-x15-2.binbin0 -> 365496 bytes
-rw-r--r--traces/st-x15-3.binbin0 -> 301728 bytes
-rw-r--r--traces/st-x16-0.binbin0 -> 44016 bytes
-rw-r--r--traces/st-x16-1.binbin0 -> 77568 bytes
-rw-r--r--traces/st-x16-2.binbin0 -> 76296 bytes
-rw-r--r--traces/st-x16-3.binbin0 -> 65040 bytes
-rw-r--r--traces/st-x17-0.binbin0 -> 203472 bytes
-rw-r--r--traces/st-x17-1.binbin0 -> 484368 bytes
-rw-r--r--traces/st-x17-2.binbin0 -> 331704 bytes
-rw-r--r--traces/st-x17-3.binbin0 -> 280800 bytes
-rw-r--r--traces/st-x18-0.binbin0 -> 49632 bytes
-rw-r--r--traces/st-x18-1.binbin0 -> 61488 bytes
-rw-r--r--traces/st-x18-2.binbin0 -> 60264 bytes
-rw-r--r--traces/st-x18-3.binbin0 -> 54048 bytes
-rw-r--r--traces/st-x19-0.binbin0 -> 81888 bytes
-rw-r--r--traces/st-x19-1.binbin0 -> 185208 bytes
-rw-r--r--traces/st-x19-2.binbin0 -> 135936 bytes
-rw-r--r--traces/st-x19-3.binbin0 -> 102552 bytes
-rw-r--r--traces/st-x2-0.binbin0 -> 24480 bytes
-rw-r--r--traces/st-x2-1.binbin0 -> 25176 bytes
-rw-r--r--traces/st-x2-2.binbin0 -> 1632 bytes
-rw-r--r--traces/st-x2-3.binbin0 -> 1056 bytes
-rw-r--r--traces/st-x3-0.binbin0 -> 34752 bytes
-rw-r--r--traces/st-x3-1.binbin0 -> 36936 bytes
-rw-r--r--traces/st-x3-2.binbin0 -> 36576 bytes
-rw-r--r--traces/st-x3-3.binbin0 -> 33888 bytes
-rw-r--r--traces/st-x4-0.binbin0 -> 20640 bytes
-rw-r--r--traces/st-x4-1.binbin0 -> 22632 bytes
-rw-r--r--traces/st-x4-2.binbin0 -> 1800 bytes
-rw-r--r--traces/st-x4-3.binbin0 -> 936 bytes
-rw-r--r--traces/st-x5-0.binbin0 -> 16824 bytes
-rw-r--r--traces/st-x5-1.binbin0 -> 31824 bytes
-rw-r--r--traces/st-x5-2.binbin0 -> 2544 bytes
-rw-r--r--traces/st-x5-3.binbin0 -> 30048 bytes
-rw-r--r--traces/st-x6-0.binbin0 -> 22632 bytes
-rw-r--r--traces/st-x6-1.binbin0 -> 22728 bytes
-rw-r--r--traces/st-x6-2.binbin0 -> 1848 bytes
-rw-r--r--traces/st-x6-3.binbin0 -> 720 bytes
-rw-r--r--traces/st-x7-0.binbin0 -> 22032 bytes
-rw-r--r--traces/st-x7-1.binbin0 -> 22848 bytes
-rw-r--r--traces/st-x7-2.binbin0 -> 960 bytes
-rw-r--r--traces/st-x7-3.binbin0 -> 936 bytes
-rw-r--r--traces/st-x9-0.binbin0 -> 33096 bytes
-rw-r--r--traces/st-x9-1.binbin0 -> 33384 bytes
-rw-r--r--traces/st-x9-2.binbin0 -> 33072 bytes
-rw-r--r--traces/st-x9-3.binbin0 -> 33000 bytes
-rw-r--r--traces/st-xxx-0.binbin0 -> 22392 bytes
-rw-r--r--traces/st-xxx-1.binbin0 -> 24120 bytes
-rw-r--r--traces/st-xxx-2.binbin0 -> 23784 bytes
-rw-r--r--traces/st-xxx-3.binbin0 -> 1008 bytes
-rw-r--r--traces/st0.fgbin0 -> 319488 bytes
-rw-r--r--traces/st1.fgbin0 -> 286720 bytes
-rw-r--r--traces/stg20.binbin0 -> 1416 bytes
-rw-r--r--traces/stg21.binbin0 -> 672 bytes
-rw-r--r--traces/stg22.binbin0 -> 744 bytes
-rw-r--r--traces/stg23.binbin0 -> 888 bytes
-rw-r--r--traces/test.pdfbin0 -> 3893 bytes
-rw-r--r--traces/x11.pdfbin0 -> 53568 bytes
-rw-r--r--traces/x12.pdfbin0 -> 13280 bytes
-rw-r--r--traces/x13.pdfbin0 -> 113497 bytes
-rw-r--r--traces/x14.pdfbin0 -> 113369 bytes
-rw-r--r--traces/x15.pdfbin0 -> 138880 bytes
-rw-r--r--traces/x16.pdfbin0 -> 80694 bytes
-rw-r--r--traces/x17.pdfbin0 -> 374581 bytes
-rw-r--r--traces/x18.pdfbin0 -> 80777 bytes
-rw-r--r--traces/x19.pdfbin0 -> 176625 bytes
-rwxr-xr-xvisualizer.py31
-rw-r--r--viz/__init__.py10
-rw-r--r--viz/draw.py1254
-rw-r--r--viz/format.py92
-rw-r--r--viz/renderer.py40
-rw-r--r--viz/schedule.py571
-rw-r--r--viz/util.py9
-rw-r--r--viz/viewer.py193
182 files changed, 4061 insertions, 0 deletions
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..0d20b64
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1 @@
*.pyc
diff --git a/README b/README
new file mode 100644
index 0000000..b2da190
--- /dev/null
+++ b/README
@@ -0,0 +1 @@
See the LITMUS Wiki page for an explanation of this tool.
diff --git a/TODO b/TODO
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/TODO
diff --git a/convert.py b/convert.py
new file mode 100644
index 0000000..1db4ad0
--- /dev/null
+++ b/convert.py
@@ -0,0 +1,102 @@
1#!/usr/bin/env python
2from viz.schedule import *
3from reader.trace_reader import *
4
5"""Class that interprets the raw trace data, outputting it
6to a Python schedule object.
7
8Doesn't do any checking on the logic of the schedule (except to
9check for corrupted data)"""
10
11def get_type(type_num):
12 """Return the binary data type, given the type_num"""
13 return Trace.DATA_TYPES[type_num]
14
15def get_type_num(type):
16 nums = dict(zip(Trace.DATA_TYPES, range(0, 11)))
17 return nums[type]
18
19def _get_job_from_record(sched, record):
20 tname = _pid_to_task_name(record.pid)
21 job_no = record.job
22 if tname not in sched.get_tasks():
23 sched.add_task(Task(tname, []))
24 if job_no not in sched.get_tasks()[tname].get_jobs():
25 sched.get_tasks()[tname].add_job(Job(job_no, []))
26 job = sched.get_tasks()[tname].get_jobs()[job_no]
27 return job
28
29def convert_trace_to_schedule(stream):
30 """The main function of interest in this module. Coverts a stream of records
31 to a Schedule object."""
32 def noop():
33 pass
34
35 num_cpus, stream = _find_num_cpus(stream)
36 sched = Schedule('sched', num_cpus)
37 for record in stream:
38 #if record.record_type == 'meta':
39 # if record.type_name == 'num_cpus':
40 # sched = Schedule('sched', record.num_cpus)
41 # continue
42 if record.record_type == 'event':
43 job = _get_job_from_record(sched, record)
44 cpu = record.cpu
45
46 if not hasattr(record, 'deadline'):
47 record.deadline = None
48
49 actions = {
50 'name' : (noop),
51 'params' : (noop),
52 'release' : (lambda :
53 (job.add_event(ReleaseEvent(record.when, cpu)),
54 job.add_event(DeadlineEvent(record.deadline, cpu)))),
55 'switch_to' : (lambda :
56 job.add_event(SwitchToEvent(record.when, cpu))),
57 'switch_away' : lambda :
58 job.add_event(SwitchAwayEvent(record.when, cpu)),
59 'assign' : (noop),
60 'completion' : (lambda :
61 job.add_event(CompleteEvent(record.when, cpu))),
62 'block' : (lambda :
63 job.add_event(SuspendEvent(record.when, cpu))),
64 'resume' : (lambda :
65 job.add_event(ResumeEvent(record.when, cpu))),
66 'sys_release' : (noop)
67 }
68
69 actions[record.type_name]()
70
71 elif record.record_type == 'error':
72 job = _get_job_from_record(sched, record.job)
73
74 actions = {
75 'inversion_start' : (lambda :
76 job.add_event(InversionStartEvent(record.job.inversion_start))),
77 'inversion_end' : (lambda :
78 job.add_event(InversionEndEvent(record.job.inversion_end)))
79 }
80
81 actions[record.type_name]()
82
83 return sched
84
85def _pid_to_task_name(pid):
86 """Converts a PID to an appropriate name for a task."""
87 return str(pid)
88
89def _find_num_cpus(stream):
90 """Determines the number of CPUs used by scanning the binary format."""
91 max = 0
92 stream_list = []
93 for record in stream:
94 stream_list.append(record)
95 if record.record_type == 'event':
96 if record.cpu > max:
97 max = record.cpu
98
99 def recycle(l):
100 for record in l:
101 yield record
102 return (max + 1, recycle(stream_list))
diff --git a/gedf_test.py b/gedf_test.py
new file mode 100644
index 0000000..8457901
--- /dev/null
+++ b/gedf_test.py
@@ -0,0 +1,163 @@
1###############################################################################
2# Description
3###############################################################################
4
5# G-EDF Test
6
7###############################################################################
8# Imports
9###############################################################################
10
11import copy
12
13
14###############################################################################
15# Public Functions
16###############################################################################
17
18def gedf_test(stream):
19
20 # Two lists to model the system: tasks occupying a CPU and tasks eligible
21 # to do so. Also, m = the number of CPUs.
22 eligible = []
23 on_cpu = []
24 m = None
25
26 # Time of the last record we saw. Only run the G-EDF test when the time
27 # is updated.
28 last_time = None
29
30 for record in stream:
31 if record.record_type != "event":
32 if record.record_type == "meta" and record.type_name == "num_cpus":
33 m = record.num_cpus
34 continue
35
36 # Check for inversion starts and ends and yield them.
37 # Only to the check when time has moved forward.
38 # (It is common to have records with simultaneous timestamps.)
39 if last_time is not None and last_time != record.when:
40 errors = _gedf_check(eligible,on_cpu,record.when,m)
41 for error in errors:
42 yield error
43
44 # Add a newly-released Job to the eligible queue
45 if record.type_name == 'release':
46 eligible.append(Job(record))
47
48 # Move a Job from the eligible queue to on_cpu
49 elif record.type_name == 'switch_to':
50 pos = _find_job(record,eligible)
51 job = eligible[pos]
52 del eligible[pos]
53 on_cpu.append(job)
54
55 # Mark a Job as completed.
56 # The only time a Job completes when it is not on a
57 # CPU is when it is the last job of the task.
58 elif record.type_name == 'completion':
59 pos = _find_job(record,on_cpu)
60 if pos is not None:
61 on_cpu[pos].is_complete = True
62 else:
63 pos = _find_job(record,eligible)
64 del eligible[pos]
65
66 # A job is switched away from a CPU. If it has
67 # been marked as complete, remove it from the model.
68 elif record.type_name == 'switch_away':
69 pos = _find_job(record,on_cpu)
70 job = on_cpu[pos]
71 del on_cpu[pos]
72 if job.is_complete == False:
73 eligible.append(job)
74
75 last_time = record.when
76 yield record
77
78###############################################################################
79# Private Functions
80###############################################################################
81
82# Internal representation of a Job
83class Job(object):
84 def __init__(self, record):
85 self.pid = record.pid
86 self.job = record.job
87 self.deadline = record.deadline
88 self.is_complete = False
89 self.inversion_start = None
90 self.inversion_end = None
91 def __str__(self):
92 return "(%d.%d:%d)" % (self.pid,self.job,self.deadline)
93
94# G-EDF errors: the start or end of an inversion
95class Error(object):
96 def __init__(self, job, eligible, on_cpu):
97 self.job = copy.copy(job)
98 self.eligible = copy.copy(eligible)
99 self.on_cpu = copy.copy(on_cpu)
100 self.record_type = 'error'
101 if job.inversion_end is None:
102 self.type_name = 'inversion_start'
103 else:
104 self.type_name = 'inversion_end'
105
106# Returns the position of a Job in a list, or None
107def _find_job(record,list):
108 for i in range(0,len(list)):
109 if list[i].pid == record.pid and list[i].job == record.job:
110 return i
111 return None
112
113# Return records for any inversion_starts and inversion_ends
114def _gedf_check(eligible,on_cpu,when,m):
115
116 # List of error records to be returned
117 errors = []
118
119 # List of all jobs that are not complete
120 all = []
121 for x in on_cpu:
122 if x.is_complete is not True:
123 all.append(x)
124 all += eligible
125
126 # Sort by on_cpu and then by deadline. sort() is guaranteed to be stable.
127 # Thus, this gives us jobs ordered by deadline with preference to those
128 # actually running.
129 all.sort(key=lambda x: 0 if (x in on_cpu) else 1)
130 all.sort(key=lambda x: x.deadline)
131
132 # Check those that actually should be running
133 for x in range(0,min(m,len(all))):
134 job = all[x]
135
136 # It's not running and an inversion_start has not been recorded
137 if job not in on_cpu and job.inversion_start is None:
138 job.inversion_start = when
139 errors.append(Error(job, eligible, on_cpu))
140
141 # It is running and an inversion_start exists (i.e. it it still
142 # marked as being inverted)
143 elif job in on_cpu and job.inversion_start is not None:
144 job.inversion_end = when
145 errors.append(Error(job, eligible, on_cpu))
146 job.inversion_start = None
147 job.inversion_end = None
148
149 # Check those that actually should not be running
150 for x in range(m,len(all)):
151 job = all[x]
152
153 # It actually is running. We don't care.
154
155 # It isn't running, but an inversion_start exists (i.e. it is still
156 # marked as being inverted)
157 if job not in on_cpu and job.inversion_start is not None:
158 job.inversion_end = when
159 errors.append(Error(job, eligible, on_cpu))
160 job.inversion_start = None
161 job.inversion_end = None
162
163 return errors
diff --git a/litmus01.pdf b/litmus01.pdf
new file mode 100644
index 0000000..4fba2fa
--- /dev/null
+++ b/litmus01.pdf
Binary files differ
diff --git a/litmus02.pdf b/litmus02.pdf
new file mode 100644
index 0000000..bcfdcab
--- /dev/null
+++ b/litmus02.pdf
Binary files differ
diff --git a/mac b/mac
new file mode 160000
Subproject cd6e43f37856f7fe6b60e0e2ae45f864a4bd6d6
diff --git a/naive_trace_reader.py b/naive_trace_reader.py
new file mode 100644
index 0000000..0f117b8
--- /dev/null
+++ b/naive_trace_reader.py
@@ -0,0 +1,165 @@
1###############################################################################
2# Description
3###############################################################################
4
5# trace_reader(files) returns an iterator which produces records
6# OUT OF ORDER from the files given. (the param is a list of files.)
7#
8# The non-naive trace_reader has a lot of complex logic which attempts to
9# produce records in order (even though they are being pulled from multiple
10# files which themselves are only approximately ordered). This trace_reader
11# attempts to be as simple as possible and is used in the unit tests to
12# make sure the total number of records read by the normal trace_reader is
13# the same as the number of records read by this one.
14
15###############################################################################
16# Imports
17###############################################################################
18
19import struct
20
21
22###############################################################################
23# Public functions
24###############################################################################
25
26# Generator function returning an iterable over records in a trace file.
27def trace_reader(files):
28 for file in files:
29 f = open(file,'rb')
30 while True:
31 data = f.read(RECORD_HEAD_SIZE)
32 try:
33 type_num = struct.unpack_from('b',data)[0]
34 except struct.error:
35 break #We read to the end of the file
36 type = _get_type(type_num)
37 try:
38 values = struct.unpack_from(StHeader.format +
39 type.format,data)
40 record_dict = dict(zip(type.keys,values))
41 except struct.error:
42 f.close()
43 print "Invalid record detected, stopping."
44 exit()
45
46 # Convert the record_dict into an object
47 record = _dict2obj(record_dict)
48
49 # Give it a type name (easier to work with than type number)
50 record.type_name = _get_type_name(type_num)
51
52 # All records should have a 'record type' field.
53 # e.g. these are 'event's as opposed to 'error's
54 record.record_type = "event"
55
56 # If there is no timestamp, set the time to 0
57 if 'when' not in record.__dict__.keys():
58 record.when = 0
59
60 yield record
61
62###############################################################################
63# Private functions
64###############################################################################
65
66# Convert a dict into an object
67def _dict2obj(d):
68 class Obj: pass
69 o = Obj()
70 for key in d.keys():
71 o.__dict__[key] = d[key]
72 return o
73
74###############################################################################
75# Trace record data types and accessor functions
76###############################################################################
77
78# Each class below represents a type of event record. The format attribute
79# specifies how to decode the binary record and the keys attribute
80# specifies how to name the pieces of information decoded. Note that all
81# event records have a common initial 24 bytes, represented by the StHeader
82# class.
83
84RECORD_HEAD_SIZE = 24
85
86class StHeader(object):
87 format = '<bbhi'
88 formatStr = struct.Struct(format)
89 keys = ['type','cpu','pid','job']
90 message = 'The header.'
91
92class StNameData(object):
93 format = '16s'
94 formatStr = struct.Struct(StHeader.format + format)
95 keys = StHeader.keys + ['name']
96 message = 'The name of the executable of this process.'
97
98class StParamData(object):
99 format = 'IIIc'
100 formatStr = struct.Struct(StHeader.format + format)
101 keys = StHeader.keys + ['wcet','period','phase','partition']
102 message = 'Regular parameters.'
103
104class StReleaseData(object):
105 format = 'QQ'
106 formatStr = struct.Struct(StHeader.format + format)
107 keys = StHeader.keys + ['when','deadline']
108 message = 'A job was/is going to be released.'
109
110#Not yet used by Sched Trace
111class StAssignedData(object):
112 format = 'Qc'
113 formatStr = struct.Struct(StHeader.format + format)
114 keys = StHeader.keys + ['when','target']
115 message = 'A job was assigned to a CPU.'
116
117class StSwitchToData(object):
118 format = 'QI'
119 formatStr = struct.Struct(StHeader.format + format)
120 keys = StHeader.keys + ['when','exec_time']
121 message = 'A process was switched to on a given CPU.'
122
123class StSwitchAwayData(object):
124 format = 'QI'
125 formatStr = struct.Struct(StHeader.format + format)
126 keys = StHeader.keys + ['when','exec_time']
127 message = 'A process was switched away on a given CPU.'
128
129class StCompletionData(object):
130 format = 'Q3xcc'
131 formatStr = struct.Struct(StHeader.format + format)
132 keys = StHeader.keys + ['when','forced?','flags']
133 message = 'A job completed.'
134
135class StBlockData(object):
136 format = 'Q'
137 formatStr = struct.Struct(StHeader.format + format)
138 keys = StHeader.keys + ['when']
139 message = 'A task blocks.'
140
141class StResumeData(object):
142 format = 'Q'
143 formatStr = struct.Struct(StHeader.format + format)
144 keys = StHeader.keys + ['when']
145 message = 'A task resumes.'
146
147class StSysReleaseData(object):
148 format = 'QQ'
149 formatStr = struct.Struct(StHeader.format + format)
150 keys = StHeader.keys + ['when','release']
151 message = 'All tasks have checked in, task system released by user'
152
153# Return the binary data type, given the type_num
154def _get_type(type_num):
155 types = [None,StNameData,StParamData,StReleaseData,StAssignedData,
156 StSwitchToData,StSwitchAwayData,StCompletionData,StBlockData,
157 StResumeData,StSysReleaseData]
158 return types[type_num]
159
160# Return the type name, given the type_num (this is simply a convenience to
161# programmers of other modules)
162def _get_type_name(type_num):
163 type_names = [None,"name","params","release","assign","switch_to",
164 "switch_away","completion","block","resume","sys_release"]
165 return type_names[type_num]
diff --git a/reader/__init__.py b/reader/__init__.py
new file mode 100644
index 0000000..afbfe44
--- /dev/null
+++ b/reader/__init__.py
@@ -0,0 +1,4 @@
1import trace_reader
2import gedf_test
3import sanitizer
4import stats
diff --git a/reader/gedf_test.py b/reader/gedf_test.py
new file mode 100644
index 0000000..e31fb19
--- /dev/null
+++ b/reader/gedf_test.py
@@ -0,0 +1,163 @@
1###############################################################################
2# Description
3###############################################################################
4
5# G-EDF Test
6
7###############################################################################
8# Imports
9###############################################################################
10
11import copy
12
13
14##############################################################################
15# Public Functions
16##############################################################################
17
18def gedf_test(stream):
19
20 # Two lists to model the system: tasks occupying a CPU and tasks eligible
21 # to do so. Also, m = the number of CPUs.
22 eligible = []
23 on_cpu = []
24 m = None
25
26 # Time of the last record we saw. Only run the G-EDF test when the time
27 # is updated.
28 last_time = None
29
30 for record in stream:
31 if record.record_type != "event":
32 if record.record_type == "meta" and record.type_name == "num_cpus":
33 m = record.num_cpus
34 continue
35
36 # Check for inversion starts and ends and yield them.
37 # Only to the check when time has moved forward.
38 # (It is common to have records with simultaneous timestamps.)
39 if last_time is not None and last_time != record.when:
40 errors = _gedf_check(eligible,on_cpu,record.when,m)
41 for error in errors:
42 yield error
43
44 # Add a newly-released Job to the eligible queue
45 if record.type_name == 'release':
46 eligible.append(Job(record))
47
48 # Move a Job from the eligible queue to on_cpu
49 elif record.type_name == 'switch_to':
50 pos = _find_job(record,eligible)
51 job = eligible[pos]
52 del eligible[pos]
53 on_cpu.append(job)
54
55 # Mark a Job as completed.
56 # The only time a Job completes when it is not on a
57 # CPU is when it is the last job of the task.
58 elif record.type_name == 'completion':
59 pos = _find_job(record,on_cpu)
60 if pos is not None:
61 on_cpu[pos].is_complete = True
62 else:
63 pos = _find_job(record,eligible)
64 del eligible[pos]
65
66 # A job is switched away from a CPU. If it has
67 # been marked as complete, remove it from the model.
68 elif record.type_name == 'switch_away':
69 pos = _find_job(record,on_cpu)
70 job = on_cpu[pos]
71 del on_cpu[pos]
72 if job.is_complete == False:
73 eligible.append(job)
74
75 last_time = record.when
76 yield record
77
78###############################################################################
79# Private Functions
80###############################################################################
81
82# Internal representation of a Job
83class Job(object):
84 def __init__(self, record):
85 self.pid = record.pid
86 self.job = record.job
87 self.deadline = record.deadline
88 self.is_complete = False
89 self.inversion_start = None
90 self.inversion_end = None
91 def __str__(self):
92 return "(%d.%d:%d)" % (self.pid,self.job,self.deadline)
93
94# G-EDF errors: the start or end of an inversion
95class Error(object):
96 def __init__(self, job, eligible, on_cpu):
97 self.job = copy.copy(job)
98 self.eligible = copy.copy(eligible)
99 self.on_cpu = copy.copy(on_cpu)
100 self.record_type = 'error'
101 if job.inversion_end is None:
102 self.type_name = 'inversion_start'
103 else:
104 self.type_name = 'inversion_end'
105
106# Returns the position of a Job in a list, or None
107def _find_job(record,list):
108 for i in range(0,len(list)):
109 if list[i].pid == record.pid and list[i].job == record.job:
110 return i
111 return None
112
113# Return records for any inversion_starts and inversion_ends
114def _gedf_check(eligible,on_cpu,when,m):
115
116 # List of error records to be returned
117 errors = []
118
119 # List of all jobs that are not complete
120 all = []
121 for x in on_cpu:
122 if x.is_complete is not True:
123 all.append(x)
124 all += eligible
125
126 # Sort by on_cpu and then by deadline. sort() is guaranteed to be stable.
127 # Thus, this gives us jobs ordered by deadline with preference to those
128 # actually running.
129 all.sort(key=lambda x: 0 if (x in on_cpu) else 1)
130 all.sort(key=lambda x: x.deadline)
131
132 # Check those that actually should be running
133 for x in range(0,min(m,len(all))):
134 job = all[x]
135
136 # It's not running and an inversion_start has not been recorded
137 if job not in on_cpu and job.inversion_start is None:
138 job.inversion_start = when
139 errors.append(Error(job, eligible, on_cpu))
140
141 # It is running and an inversion_start exists (i.e. it it still
142 # marked as being inverted)
143 elif job in on_cpu and job.inversion_start is not None:
144 job.inversion_end = when
145 errors.append(Error(job, eligible, on_cpu))
146 job.inversion_start = None
147 job.inversion_end = None
148
149 # Check those that actually should not be running
150 for x in range(m,len(all)):
151 job = all[x]
152
153 # It actually is running. We don't care.
154
155 # It isn't running, but an inversion_start exists (i.e. it is still
156 # marked as being inverted)
157 if job not in on_cpu and job.inversion_start is not None:
158 job.inversion_end = when
159 errors.append(Error(job, eligible, on_cpu))
160 job.inversion_start = None
161 job.inversion_end = None
162
163 return errors
diff --git a/reader/naive_trace_reader.py b/reader/naive_trace_reader.py
new file mode 100644
index 0000000..0f117b8
--- /dev/null
+++ b/reader/naive_trace_reader.py
@@ -0,0 +1,165 @@
1###############################################################################
2# Description
3###############################################################################
4
5# trace_reader(files) returns an iterator which produces records
6# OUT OF ORDER from the files given. (the param is a list of files.)
7#
8# The non-naive trace_reader has a lot of complex logic which attempts to
9# produce records in order (even though they are being pulled from multiple
10# files which themselves are only approximately ordered). This trace_reader
11# attempts to be as simple as possible and is used in the unit tests to
12# make sure the total number of records read by the normal trace_reader is
13# the same as the number of records read by this one.
14
15###############################################################################
16# Imports
17###############################################################################
18
19import struct
20
21
22###############################################################################
23# Public functions
24###############################################################################
25
26# Generator function returning an iterable over records in a trace file.
27def trace_reader(files):
28 for file in files:
29 f = open(file,'rb')
30 while True:
31 data = f.read(RECORD_HEAD_SIZE)
32 try:
33 type_num = struct.unpack_from('b',data)[0]
34 except struct.error:
35 break #We read to the end of the file
36 type = _get_type(type_num)
37 try:
38 values = struct.unpack_from(StHeader.format +
39 type.format,data)
40 record_dict = dict(zip(type.keys,values))
41 except struct.error:
42 f.close()
43 print "Invalid record detected, stopping."
44 exit()
45
46 # Convert the record_dict into an object
47 record = _dict2obj(record_dict)
48
49 # Give it a type name (easier to work with than type number)
50 record.type_name = _get_type_name(type_num)
51
52 # All records should have a 'record type' field.
53 # e.g. these are 'event's as opposed to 'error's
54 record.record_type = "event"
55
56 # If there is no timestamp, set the time to 0
57 if 'when' not in record.__dict__.keys():
58 record.when = 0
59
60 yield record
61
62###############################################################################
63# Private functions
64###############################################################################
65
66# Convert a dict into an object
67def _dict2obj(d):
68 class Obj: pass
69 o = Obj()
70 for key in d.keys():
71 o.__dict__[key] = d[key]
72 return o
73
74###############################################################################
75# Trace record data types and accessor functions
76###############################################################################
77
78# Each class below represents a type of event record. The format attribute
79# specifies how to decode the binary record and the keys attribute
80# specifies how to name the pieces of information decoded. Note that all
81# event records have a common initial 24 bytes, represented by the StHeader
82# class.
83
84RECORD_HEAD_SIZE = 24
85
86class StHeader(object):
87 format = '<bbhi'
88 formatStr = struct.Struct(format)
89 keys = ['type','cpu','pid','job']
90 message = 'The header.'
91
92class StNameData(object):
93 format = '16s'
94 formatStr = struct.Struct(StHeader.format + format)
95 keys = StHeader.keys + ['name']
96 message = 'The name of the executable of this process.'
97
98class StParamData(object):
99 format = 'IIIc'
100 formatStr = struct.Struct(StHeader.format + format)
101 keys = StHeader.keys + ['wcet','period','phase','partition']
102 message = 'Regular parameters.'
103
104class StReleaseData(object):
105 format = 'QQ'
106 formatStr = struct.Struct(StHeader.format + format)
107 keys = StHeader.keys + ['when','deadline']
108 message = 'A job was/is going to be released.'
109
110#Not yet used by Sched Trace
111class StAssignedData(object):
112 format = 'Qc'
113 formatStr = struct.Struct(StHeader.format + format)
114 keys = StHeader.keys + ['when','target']
115 message = 'A job was assigned to a CPU.'
116
117class StSwitchToData(object):
118 format = 'QI'
119 formatStr = struct.Struct(StHeader.format + format)
120 keys = StHeader.keys + ['when','exec_time']
121 message = 'A process was switched to on a given CPU.'
122
123class StSwitchAwayData(object):
124 format = 'QI'
125 formatStr = struct.Struct(StHeader.format + format)
126 keys = StHeader.keys + ['when','exec_time']
127 message = 'A process was switched away on a given CPU.'
128
129class StCompletionData(object):
130 format = 'Q3xcc'
131 formatStr = struct.Struct(StHeader.format + format)
132 keys = StHeader.keys + ['when','forced?','flags']
133 message = 'A job completed.'
134
135class StBlockData(object):
136 format = 'Q'
137 formatStr = struct.Struct(StHeader.format + format)
138 keys = StHeader.keys + ['when']
139 message = 'A task blocks.'
140
141class StResumeData(object):
142 format = 'Q'
143 formatStr = struct.Struct(StHeader.format + format)
144 keys = StHeader.keys + ['when']
145 message = 'A task resumes.'
146
147class StSysReleaseData(object):
148 format = 'QQ'
149 formatStr = struct.Struct(StHeader.format + format)
150 keys = StHeader.keys + ['when','release']
151 message = 'All tasks have checked in, task system released by user'
152
153# Return the binary data type, given the type_num
154def _get_type(type_num):
155 types = [None,StNameData,StParamData,StReleaseData,StAssignedData,
156 StSwitchToData,StSwitchAwayData,StCompletionData,StBlockData,
157 StResumeData,StSysReleaseData]
158 return types[type_num]
159
160# Return the type name, given the type_num (this is simply a convenience to
161# programmers of other modules)
162def _get_type_name(type_num):
163 type_names = [None,"name","params","release","assign","switch_to",
164 "switch_away","completion","block","resume","sys_release"]
165 return type_names[type_num]
diff --git a/reader/runtests.py b/reader/runtests.py
new file mode 100755
index 0000000..88dddf4
--- /dev/null
+++ b/reader/runtests.py
@@ -0,0 +1,47 @@
1#!/usr/bin/python
2
3###############################################################################
4# Description
5###############################################################################
6
7# Unit Tests
8
9
10###############################################################################
11# Imports
12###############################################################################
13
14import trace_reader
15import naive_trace_reader
16import os
17
18###############################################################################
19# Trace files
20###############################################################################
21
22files = [
23'./sample_traces/st-g6-0.bin',
24'./sample_traces/st-g6-1.bin',
25'./sample_traces/st-g6-2.bin',
26'./sample_traces/st-g6-3.bin',
27]
28
29###############################################################################
30# Tests
31###############################################################################
32
33# Does our fancy trace reader get the same number of files as our naive one?
34# (See naive_trace_reader.py for further explanation)
35def test1():
36 stream = trace_reader.trace_reader(files)
37 num_records = len(list(stream))
38 stream = naive_trace_reader.trace_reader(files)
39 naive_num_records = len(list(stream))
40
41 # We need a +1 here because the fancy reader produces a 'meta' record
42 # indicating the number of CPUs
43 if num_records != naive_num_records + 1:
44 return "[FAIL]"
45 return "[SUCCESS]"
46
47print "Test 1: %s" % (test1())
diff --git a/reader/sample_script.py b/reader/sample_script.py
new file mode 100755
index 0000000..f7e9297
--- /dev/null
+++ b/reader/sample_script.py
@@ -0,0 +1,41 @@
1#!/usr/bin/python
2
3# This is a sample script for using the tool. I would recommend copying
4# this and modifying it to suit your needs for a particular test. Make
5# sure you redirect the output to a file (e.g. ./sample_script.py > output).
6
7# Import the modules we need. You should not need to know about
8# their internals.
9import trace_reader
10import sanitizer
11import gedf_test
12import stats
13import stdout_printer
14
15# Specify your trace files
16g6 = [
17'../sample_traces/st-g6-0.bin',
18'../sample_traces/st-g6-1.bin',
19'../sample_traces/st-g6-2.bin',
20'../sample_traces/st-g6-3.bin',
21]
22
23# Here is an example of a custom filter function.
24# It will remove from the error stream all inversion_end records indicating
25# an inversion of less than 4000000 time units. Thus, you can grep through
26# the output looking 'Inversion end' and find only errors for particularly
27# long inversions. This is commented out in the pipeline (below) since you
28# probably don't want it in general.
29def my_filter(record):
30 if record.record_type == 'error' and record.type_name == 'inversion_end':
31 if record.job.inversion_end - record.job.inversion_start < 4000000:
32 return False
33 return True
34
35# Pipeline
36stream = trace_reader.trace_reader(g6) # Read events from traces
37stream = sanitizer.sanitizer(stream) # Remove garbage events
38stream = gedf_test.gedf_test(stream) # Produce G-EDF error records
39stream = stats.stats(stream) # Produce a statistics record
40#stream = filter(my_filter, stream) # Filter some records before printing
41stdout_printer.stdout_printer(stream) # Print records to stdout
diff --git a/reader/sample_script.py~ b/reader/sample_script.py~
new file mode 100644
index 0000000..c3b7843
--- /dev/null
+++ b/reader/sample_script.py~
@@ -0,0 +1,41 @@
1#!/usr/bin/python
2
3# This is a sample script for using the tool. I would recommend copying
4# this and modifying it to suit your needs for a particular test. Make
5# sure you redirect the output to a file (e.g. ./sample_script.py > output).
6
7# Import the modules we need. You should not need to know about
8# their internals.
9import trace_reader
10import sanitizer
11import gedf_test
12import stats
13import stdout_printer
14
15# Specify your trace files
16g6 = [
17'./sample_traces/st-g6-0.bin',
18'./sample_traces/st-g6-1.bin',
19'./sample_traces/st-g6-2.bin',
20'./sample_traces/st-g6-3.bin',
21]
22
23# Here is an example of a custom filter function.
24# It will remove from the error stream all inversion_end records indicating
25# an inversion of less than 4000000 time units. Thus, you can grep through
26# the output looking 'Inversion end' and find only errors for particularly
27# long inversions. This is commented out in the pipeline (below) since you
28# probably don't want it in general.
29def my_filter(record):
30 if record.record_type == 'error' and record.type_name == 'inversion_end':
31 if record.job.inversion_end - record.job.inversion_start < 4000000:
32 return False
33 return True
34
35# Pipeline
36stream = trace_reader.trace_reader(g6) # Read events from traces
37stream = sanitizer.sanitizer(stream) # Remove garbage events
38stream = gedf_test.gedf_test(stream) # Produce G-EDF error records
39stream = stats.stats(stream) # Produce a statistics record
40#stream = filter(my_filter, stream) # Filter some records before printing
41stdout_printer.stdout_printer(stream) # Print records to stdout
diff --git a/reader/sanitizer.py b/reader/sanitizer.py
new file mode 100644
index 0000000..79315cc
--- /dev/null
+++ b/reader/sanitizer.py
@@ -0,0 +1,53 @@
1###############################################################################
2# Description
3###############################################################################
4
5# Sanitize input. (There are a number of goofy issues with the sched_trace
6# output.)
7
8###############################################################################
9# Public functions
10###############################################################################
11
12def sanitizer(stream):
13
14 job_2s_released = [] # list of tasks which have released their job 2s
15 jobs_switched_to = []
16
17 for record in stream:
18
19 # Ignore records which are not events (e.g. the num_cpus record)
20 if record.record_type != 'event':
21 yield record
22 continue
23
24 # All records with job < 2 are garbage
25 if record.job < 2:
26 continue
27
28 # Some records with job == 2 are garbage
29 if record.job==2:
30
31 # There is a duplicate release of every job 2
32 # This will throw away the second one
33 if record.type_name == 'release':
34 if record.pid in job_2s_released:
35 continue
36 else:
37 job_2s_released.append(record.pid)
38
39 # Job 2 has a resume that is garbage
40 if record.type_name == 'resume':
41 continue
42
43 # By default, the switch_away for a job (after it has completed)
44 # is maked as being for job+1, which has never been switched to.
45 # We can correct this if we note which jobs really
46 # have been switched to.
47 if record.type_name == 'switch_to':
48 jobs_switched_to.append((record.pid,record.job))
49 if record.type_name == 'switch_away':
50 if (record.pid,record.job) not in jobs_switched_to:
51 record.job -= 1
52
53 yield record
diff --git a/reader/stats.py b/reader/stats.py
new file mode 100644
index 0000000..34a842f
--- /dev/null
+++ b/reader/stats.py
@@ -0,0 +1,39 @@
1###############################################################################
2# Description
3###############################################################################
4# Compute and produce statistics
5
6
7###############################################################################
8# Public Functions
9###############################################################################
10
11def stats(stream):
12 min_inversion = -1
13 max_inversion = -1
14 sum_inversions = 0
15 num_inversions = 0
16 for record in stream:
17 if record.type_name == 'inversion_end':
18 length = record.job.inversion_end - record.job.inversion_start
19 if length > 0:
20 num_inversions += 1
21 if length > max_inversion:
22 max_inversion = length
23 if length < min_inversion or min_inversion == -1:
24 min_inversion = length
25 sum_inversions += length
26 yield record
27 if num_inversions > 0:
28 avg_inversion = int(sum_inversions / num_inversions)
29 else:
30 avg_inversion = 0
31 class Obj(object): pass
32 rec = Obj()
33 rec.record_type = "meta"
34 rec.type_name = "stats"
35 rec.num_inversions = num_inversions
36 rec.min_inversion = min_inversion
37 rec.max_inversion = max_inversion
38 rec.avg_inversion = avg_inversion
39 yield rec
diff --git a/reader/stdout_printer.py b/reader/stdout_printer.py
new file mode 100644
index 0000000..f8d9a84
--- /dev/null
+++ b/reader/stdout_printer.py
@@ -0,0 +1,69 @@
1###############################################################################
2# Description
3###############################################################################
4
5# Prints records to standard out
6
7###############################################################################
8# Public functions
9###############################################################################
10
11def stdout_printer(stream):
12 for record in stream:
13 if record.record_type == "event":
14 _print_event(record)
15 elif record.record_type == "meta" and record.type_name == "stats":
16 _print_stats(record)
17 elif record.record_type == "error" and record.type_name == 'inversion_start':
18 _print_inversion_start(record)
19 elif record.record_type == "error" and record.type_name == 'inversion_end':
20 _print_inversion_end(record)
21 else:
22 continue
23 print ""
24
25###############################################################################
26# Private functions
27###############################################################################
28
29def _print_event(record):
30 print "Job: %d.%d" % (record.pid,record.job)
31 print "Type: %s" % (record.type_name)
32 print "Time: %d" % (record.when)
33
34def _print_inversion_start(record):
35 print "Type: %s" % ("Inversion start")
36 print "Time: %d" % (record.job.inversion_start)
37 print "Job: %d.%d" % (record.job.pid,record.job.job)
38 print "Deadline: %d" % (record.job.deadline)
39 print "Eligible: ",
40 for job in record.eligible:
41 print str(job) + " ",
42 print
43 print "On CPU: ",
44 for job in record.on_cpu:
45 print str(job) + " ",
46 print #newline
47
48def _print_inversion_end(record):
49 print "Type: %s" % ("Inversion end")
50 print "Time: %d" % (record.job.inversion_end)
51 print "Duration: %d" % (
52 record.job.inversion_end - record.job.inversion_start)
53 print "Job: %d.%d" % (record.job.pid,record.job.job)
54 print "Deadline: %d" % (record.job.deadline)
55 print "Eligible: ",
56 for job in record.eligible:
57 print str(job) + " ",
58 print
59 print "On CPU: ",
60 for job in record.on_cpu:
61 print str(job) + " ",
62 print #newline
63
64def _print_stats(record):
65 print "Inversion statistics"
66 print "Num inversions: %d" % (record.num_inversions)
67 print "Min inversion: %d" % (record.min_inversion)
68 print "Max inversion: %d" % (record.max_inversion)
69 print "Avg inversion: %d" % (record.avg_inversion)
diff --git a/reader/test.py b/reader/test.py
new file mode 100755
index 0000000..b260314
--- /dev/null
+++ b/reader/test.py
@@ -0,0 +1,15 @@
1#!/usr/bin/python
2
3import cairo
4
5if __name__ == '__main__':
6 surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, 500, 500)
7 ctx = cairo.Context(surface)
8 ctx.move_to(10, 10)
9 ctx.line_to(-100, 10)
10 ctx.set_line_width(2)
11
12 ctx.move_to(10, 10)
13 ctx.line_to(20, 10)
14 ctx.stroke()
15 surface.write_to_png('test.png')
diff --git a/reader/trace_reader.py b/reader/trace_reader.py
new file mode 100644
index 0000000..a4ff964
--- /dev/null
+++ b/reader/trace_reader.py
@@ -0,0 +1,245 @@
1###############################################################################
2# Description
3###############################################################################
4
5# trace_reader(files) returns an iterator which produces records
6# in order from the files given. (the param is a list of files.)
7#
8# Each record is just a Python object. It is guaranteed to have the following
9# attributes:
10# - 'pid': pid of the task
11# - 'job': job number for that task
12# - 'cpu', given by LITMUS
13# - 'when', given by LITMUS as a timestamp. LITMUS does not provide a
14# timestamp for all records. In this case, when is set to 0.
15# - 'type', a numerical value given by LITMUS
16# - 'type_name', a human-readable name defined in this module
17# - 'record_type', set to 'event' by this module (to distinguish from, e.g.,
18# error records produced elsewhere).
19# - Possible additional attributes, depending on the type of record.
20#
21# To find out exactly what attributes are set for each record type, look at
22# the trace-parsing information at the bottom of this file.
23
24###############################################################################
25# Imports
26###############################################################################
27
28import struct
29
30
31###############################################################################
32# Public functions
33###############################################################################
34
35# Generator function returning an iterable over records in a trace file.
36def trace_reader(files):
37
38 # Yield a record indicating the number of CPUs, used by the G-EDF test
39 class Obj: pass
40 record = Obj()
41 record.record_type = "meta"
42 record.type_name = "num_cpus"
43 record.num_cpus = len(files)
44 yield record
45
46 # Create iterators for each file and a buffer to store records in
47 file_iters = [] # file iterators
48 file_iter_buff = [] # file iterator buffers
49 for file in files:
50 file_iter = _get_file_iter(file)
51 file_iters.append(file_iter)
52 file_iter_buff.append([file_iter.next()])
53
54 # We keep 100 records in each buffer and then keep the buffer sorted
55 # This is because records may have been recorded slightly out of order
56 # This cannot guarantee records are produced in order, but it makes it
57 # overwhelmingly probably.
58 for x in range(0,len(file_iter_buff)):
59 for y in range(0,100):
60 file_iter_buff[x].append(file_iters[x].next())
61 for x in range(0,len(file_iter_buff)):
62 file_iter_buff[x] = sorted(file_iter_buff[x],key=lambda rec: rec.when)
63
64 # Remember the time of the last record. This way, we can make sure records
65 # truly are produced in monotonically increasing order by time and terminate
66 # fatally if they are not.
67 last_time = None
68
69 # Keep pulling records as long as we have a buffer
70 while len(file_iter_buff) > 0:
71
72 # Select the earliest record from those at the heads of the buffers
73 earliest = -1
74 buff_to_refill = -1
75 for x in range(0,len(file_iter_buff)):
76 if earliest==-1 or file_iter_buff[x][0].when < earliest.when:
77 earliest = file_iter_buff[x][0]
78 buff_to_refill = x
79
80 # Take it out of the buffer
81 del file_iter_buff[buff_to_refill][0]
82
83 # Try to append a new record to the buffer (if there is another) and
84 # then keep the buffer sorted
85 try:
86 file_iter_buff[buff_to_refill].append(file_iters[buff_to_refill].next())
87 file_iter_buff[buff_to_refill] = sorted(file_iter_buff[buff_to_refill],
88 key=lambda rec: rec.when)
89
90 # If there aren't any more records, fine. Unless the buffer is also empty.
91 # If that is the case, delete the buffer.
92 except StopIteration:
93 if len(file_iter_buff[buff_to_refill]) < 1:
94 del file_iter_buff[buff_to_refill]
95 del file_iters[buff_to_refill]
96
97 # Check for monotonically increasing time
98 if last_time is not None and earliest.when < last_time:
99 exit("FATAL ERROR: trace_reader.py: out-of-order record produced")
100 else:
101 last_time = earliest.when
102
103 # Yield the record
104 yield earliest
105
106###############################################################################
107# Private functions
108###############################################################################
109
110# Returns an iterator to pull records from a file
111def _get_file_iter(file):
112 f = open(file,'rb')
113 while True:
114 data = f.read(RECORD_HEAD_SIZE)
115 try:
116 type_num = struct.unpack_from('b',data)[0]
117 except struct.error:
118 break #We read to the end of the file
119 type = _get_type(type_num)
120 try:
121 values = struct.unpack_from(StHeader.format +
122 type.format,data)
123 record_dict = dict(zip(type.keys,values))
124 except struct.error:
125 f.close()
126 print "Invalid record detected, stopping."
127 exit()
128
129 # Convert the record_dict into an object
130 record = _dict2obj(record_dict)
131
132 # Give it a type name (easier to work with than type number)
133 record.type_name = _get_type_name(type_num)
134
135 # All records should have a 'record type' field.
136 # e.g. these are 'event's as opposed to 'error's
137 record.record_type = "event"
138
139 # If there is no timestamp, set the time to 0
140 if 'when' not in record.__dict__.keys():
141 record.when = 0
142
143 yield record
144
145# Convert a dict into an object
146def _dict2obj(d):
147 class Obj(object): pass
148 o = Obj()
149 for key in d.keys():
150 o.__dict__[key] = d[key]
151 return o
152
153###############################################################################
154# Trace record data types and accessor functions
155###############################################################################
156
157# Each class below represents a type of event record. The format attribute
158# specifies how to decode the binary record and the keys attribute
159# specifies how to name the pieces of information decoded. Note that all
160# event records have a common initial 24 bytes, represented by the StHeader
161# class.
162
163RECORD_HEAD_SIZE = 24
164
165class StHeader:
166 format = '<bbhi'
167 formatStr = struct.Struct(format)
168 keys = ['type','cpu','pid','job']
169 message = 'The header.'
170
171class StNameData:
172 format = '16s'
173 formatStr = struct.Struct(StHeader.format + format)
174 keys = StHeader.keys + ['name']
175 message = 'The name of the executable of this process.'
176
177class StParamData:
178 format = 'IIIc'
179 formatStr = struct.Struct(StHeader.format + format)
180 keys = StHeader.keys + ['wcet','period','phase','partition']
181 message = 'Regular parameters.'
182
183class StReleaseData:
184 format = 'QQ'
185 formatStr = struct.Struct(StHeader.format + format)
186 keys = StHeader.keys + ['when','deadline']
187 message = 'A job was/is going to be released.'
188
189#Not yet used by Sched Trace
190class StAssignedData:
191 format = 'Qc'
192 formatStr = struct.Struct(StHeader.format + format)
193 keys = StHeader.keys + ['when','target']
194 message = 'A job was assigned to a CPU.'
195
196class StSwitchToData:
197 format = 'QI'
198 formatStr = struct.Struct(StHeader.format + format)
199 keys = StHeader.keys + ['when','exec_time']
200 message = 'A process was switched to on a given CPU.'
201
202class StSwitchAwayData:
203 format = 'QI'
204 formatStr = struct.Struct(StHeader.format + format)
205 keys = StHeader.keys + ['when','exec_time']
206 message = 'A process was switched away on a given CPU.'
207
208class StCompletionData:
209 #format = 'Q3x?c'
210 format = 'Q3xcc'
211 formatStr = struct.Struct(StHeader.format + format)
212 keys = StHeader.keys + ['when','forced?','flags']
213 message = 'A job completed.'
214
215class StBlockData:
216 format = 'Q'
217 formatStr = struct.Struct(StHeader.format + format)
218 keys = StHeader.keys + ['when']
219 message = 'A task blocks.'
220
221class StResumeData:
222 format = 'Q'
223 formatStr = struct.Struct(StHeader.format + format)
224 keys = StHeader.keys + ['when']
225 message = 'A task resumes.'
226
227class StSysReleaseData:
228 format = 'QQ'
229 formatStr = struct.Struct(StHeader.format + format)
230 keys = StHeader.keys + ['when','release']
231 message = 'All tasks have checked in, task system released by user'
232
233# Return the binary data type, given the type_num
234def _get_type(type_num):
235 types = [None,StNameData,StParamData,StReleaseData,StAssignedData,
236 StSwitchToData,StSwitchAwayData,StCompletionData,StBlockData,
237 StResumeData,StSysReleaseData]
238 return types[type_num]
239
240# Return the type name, given the type_num (this is simply a convenience to
241# programmers of other modules)
242def _get_type_name(type_num):
243 type_names = [None,"name","params","release","assign","switch_to",
244 "switch_away","completion","block","resume","sys_release"]
245 return type_names[type_num]
diff --git a/runtests.py b/runtests.py
new file mode 100755
index 0000000..88dddf4
--- /dev/null
+++ b/runtests.py
@@ -0,0 +1,47 @@
1#!/usr/bin/python
2
3###############################################################################
4# Description
5###############################################################################
6
7# Unit Tests
8
9
10###############################################################################
11# Imports
12###############################################################################
13
14import trace_reader
15import naive_trace_reader
16import os
17
18###############################################################################
19# Trace files
20###############################################################################
21
22files = [
23'./sample_traces/st-g6-0.bin',
24'./sample_traces/st-g6-1.bin',
25'./sample_traces/st-g6-2.bin',
26'./sample_traces/st-g6-3.bin',
27]
28
29###############################################################################
30# Tests
31###############################################################################
32
33# Does our fancy trace reader get the same number of files as our naive one?
34# (See naive_trace_reader.py for further explanation)
35def test1():
36 stream = trace_reader.trace_reader(files)
37 num_records = len(list(stream))
38 stream = naive_trace_reader.trace_reader(files)
39 naive_num_records = len(list(stream))
40
41 # We need a +1 here because the fancy reader produces a 'meta' record
42 # indicating the number of CPUs
43 if num_records != naive_num_records + 1:
44 return "[FAIL]"
45 return "[SUCCESS]"
46
47print "Test 1: %s" % (test1())
diff --git a/sample_script.py b/sample_script.py
new file mode 100755
index 0000000..c3b7843
--- /dev/null
+++ b/sample_script.py
@@ -0,0 +1,41 @@
1#!/usr/bin/python
2
3# This is a sample script for using the tool. I would recommend copying
4# this and modifying it to suit your needs for a particular test. Make
5# sure you redirect the output to a file (e.g. ./sample_script.py > output).
6
7# Import the modules we need. You should not need to know about
8# their internals.
9import trace_reader
10import sanitizer
11import gedf_test
12import stats
13import stdout_printer
14
15# Specify your trace files
16g6 = [
17'./sample_traces/st-g6-0.bin',
18'./sample_traces/st-g6-1.bin',
19'./sample_traces/st-g6-2.bin',
20'./sample_traces/st-g6-3.bin',
21]
22
23# Here is an example of a custom filter function.
24# It will remove from the error stream all inversion_end records indicating
25# an inversion of less than 4000000 time units. Thus, you can grep through
26# the output looking 'Inversion end' and find only errors for particularly
27# long inversions. This is commented out in the pipeline (below) since you
28# probably don't want it in general.
29def my_filter(record):
30 if record.record_type == 'error' and record.type_name == 'inversion_end':
31 if record.job.inversion_end - record.job.inversion_start < 4000000:
32 return False
33 return True
34
35# Pipeline
36stream = trace_reader.trace_reader(g6) # Read events from traces
37stream = sanitizer.sanitizer(stream) # Remove garbage events
38stream = gedf_test.gedf_test(stream) # Produce G-EDF error records
39stream = stats.stats(stream) # Produce a statistics record
40#stream = filter(my_filter, stream) # Filter some records before printing
41stdout_printer.stdout_printer(stream) # Print records to stdout
diff --git a/sample_traces/.runtests.py.swp b/sample_traces/.runtests.py.swp
new file mode 100644
index 0000000..d9a9acd
--- /dev/null
+++ b/sample_traces/.runtests.py.swp
Binary files differ
diff --git a/sample_traces/st-g6-0.bin b/sample_traces/st-g6-0.bin
new file mode 100644
index 0000000..cebc7fd
--- /dev/null
+++ b/sample_traces/st-g6-0.bin
Binary files differ
diff --git a/sample_traces/st-g6-1.bin b/sample_traces/st-g6-1.bin
new file mode 100644
index 0000000..a51cce9
--- /dev/null
+++ b/sample_traces/st-g6-1.bin
Binary files differ
diff --git a/sample_traces/st-g6-2.bin b/sample_traces/st-g6-2.bin
new file mode 100644
index 0000000..5d76010
--- /dev/null
+++ b/sample_traces/st-g6-2.bin
Binary files differ
diff --git a/sample_traces/st-g6-3.bin b/sample_traces/st-g6-3.bin
new file mode 100644
index 0000000..471fc8d
--- /dev/null
+++ b/sample_traces/st-g6-3.bin
Binary files differ
diff --git a/sanitizer.py b/sanitizer.py
new file mode 100644
index 0000000..79315cc
--- /dev/null
+++ b/sanitizer.py
@@ -0,0 +1,53 @@
1###############################################################################
2# Description
3###############################################################################
4
5# Sanitize input. (There are a number of goofy issues with the sched_trace
6# output.)
7
8###############################################################################
9# Public functions
10###############################################################################
11
12def sanitizer(stream):
13
14 job_2s_released = [] # list of tasks which have released their job 2s
15 jobs_switched_to = []
16
17 for record in stream:
18
19 # Ignore records which are not events (e.g. the num_cpus record)
20 if record.record_type != 'event':
21 yield record
22 continue
23
24 # All records with job < 2 are garbage
25 if record.job < 2:
26 continue
27
28 # Some records with job == 2 are garbage
29 if record.job==2:
30
31 # There is a duplicate release of every job 2
32 # This will throw away the second one
33 if record.type_name == 'release':
34 if record.pid in job_2s_released:
35 continue
36 else:
37 job_2s_released.append(record.pid)
38
39 # Job 2 has a resume that is garbage
40 if record.type_name == 'resume':
41 continue
42
43 # By default, the switch_away for a job (after it has completed)
44 # is maked as being for job+1, which has never been switched to.
45 # We can correct this if we note which jobs really
46 # have been switched to.
47 if record.type_name == 'switch_to':
48 jobs_switched_to.append((record.pid,record.job))
49 if record.type_name == 'switch_away':
50 if (record.pid,record.job) not in jobs_switched_to:
51 record.job -= 1
52
53 yield record
diff --git a/stats.py b/stats.py
new file mode 100644
index 0000000..34a842f
--- /dev/null
+++ b/stats.py
@@ -0,0 +1,39 @@
1###############################################################################
2# Description
3###############################################################################
4# Compute and produce statistics
5
6
7###############################################################################
8# Public Functions
9###############################################################################
10
11def stats(stream):
12 min_inversion = -1
13 max_inversion = -1
14 sum_inversions = 0
15 num_inversions = 0
16 for record in stream:
17 if record.type_name == 'inversion_end':
18 length = record.job.inversion_end - record.job.inversion_start
19 if length > 0:
20 num_inversions += 1
21 if length > max_inversion:
22 max_inversion = length
23 if length < min_inversion or min_inversion == -1:
24 min_inversion = length
25 sum_inversions += length
26 yield record
27 if num_inversions > 0:
28 avg_inversion = int(sum_inversions / num_inversions)
29 else:
30 avg_inversion = 0
31 class Obj(object): pass
32 rec = Obj()
33 rec.record_type = "meta"
34 rec.type_name = "stats"
35 rec.num_inversions = num_inversions
36 rec.min_inversion = min_inversion
37 rec.max_inversion = max_inversion
38 rec.avg_inversion = avg_inversion
39 yield rec
diff --git a/stdout_printer.py b/stdout_printer.py
new file mode 100644
index 0000000..f8d9a84
--- /dev/null
+++ b/stdout_printer.py
@@ -0,0 +1,69 @@
1###############################################################################
2# Description
3###############################################################################
4
5# Prints records to standard out
6
7###############################################################################
8# Public functions
9###############################################################################
10
11def stdout_printer(stream):
12 for record in stream:
13 if record.record_type == "event":
14 _print_event(record)
15 elif record.record_type == "meta" and record.type_name == "stats":
16 _print_stats(record)
17 elif record.record_type == "error" and record.type_name == 'inversion_start':
18 _print_inversion_start(record)
19 elif record.record_type == "error" and record.type_name == 'inversion_end':
20 _print_inversion_end(record)
21 else:
22 continue
23 print ""
24
25###############################################################################
26# Private functions
27###############################################################################
28
29def _print_event(record):
30 print "Job: %d.%d" % (record.pid,record.job)
31 print "Type: %s" % (record.type_name)
32 print "Time: %d" % (record.when)
33
34def _print_inversion_start(record):
35 print "Type: %s" % ("Inversion start")
36 print "Time: %d" % (record.job.inversion_start)
37 print "Job: %d.%d" % (record.job.pid,record.job.job)
38 print "Deadline: %d" % (record.job.deadline)
39 print "Eligible: ",
40 for job in record.eligible:
41 print str(job) + " ",
42 print
43 print "On CPU: ",
44 for job in record.on_cpu:
45 print str(job) + " ",
46 print #newline
47
48def _print_inversion_end(record):
49 print "Type: %s" % ("Inversion end")
50 print "Time: %d" % (record.job.inversion_end)
51 print "Duration: %d" % (
52 record.job.inversion_end - record.job.inversion_start)
53 print "Job: %d.%d" % (record.job.pid,record.job.job)
54 print "Deadline: %d" % (record.job.deadline)
55 print "Eligible: ",
56 for job in record.eligible:
57 print str(job) + " ",
58 print
59 print "On CPU: ",
60 for job in record.on_cpu:
61 print str(job) + " ",
62 print #newline
63
64def _print_stats(record):
65 print "Inversion statistics"
66 print "Num inversions: %d" % (record.num_inversions)
67 print "Min inversion: %d" % (record.min_inversion)
68 print "Max inversion: %d" % (record.max_inversion)
69 print "Avg inversion: %d" % (record.avg_inversion)
diff --git a/trace_reader.py b/trace_reader.py
new file mode 100644
index 0000000..a4ff964
--- /dev/null
+++ b/trace_reader.py
@@ -0,0 +1,245 @@
1###############################################################################
2# Description
3###############################################################################
4
5# trace_reader(files) returns an iterator which produces records
6# in order from the files given. (the param is a list of files.)
7#
8# Each record is just a Python object. It is guaranteed to have the following
9# attributes:
10# - 'pid': pid of the task
11# - 'job': job number for that task
12# - 'cpu', given by LITMUS
13# - 'when', given by LITMUS as a timestamp. LITMUS does not provide a
14# timestamp for all records. In this case, when is set to 0.
15# - 'type', a numerical value given by LITMUS
16# - 'type_name', a human-readable name defined in this module
17# - 'record_type', set to 'event' by this module (to distinguish from, e.g.,
18# error records produced elsewhere).
19# - Possible additional attributes, depending on the type of record.
20#
21# To find out exactly what attributes are set for each record type, look at
22# the trace-parsing information at the bottom of this file.
23
24###############################################################################
25# Imports
26###############################################################################
27
28import struct
29
30
31###############################################################################
32# Public functions
33###############################################################################
34
35# Generator function returning an iterable over records in a trace file.
36def trace_reader(files):
37
38 # Yield a record indicating the number of CPUs, used by the G-EDF test
39 class Obj: pass
40 record = Obj()
41 record.record_type = "meta"
42 record.type_name = "num_cpus"
43 record.num_cpus = len(files)
44 yield record
45
46 # Create iterators for each file and a buffer to store records in
47 file_iters = [] # file iterators
48 file_iter_buff = [] # file iterator buffers
49 for file in files:
50 file_iter = _get_file_iter(file)
51 file_iters.append(file_iter)
52 file_iter_buff.append([file_iter.next()])
53
54 # We keep 100 records in each buffer and then keep the buffer sorted
55 # This is because records may have been recorded slightly out of order
56 # This cannot guarantee records are produced in order, but it makes it
57 # overwhelmingly probably.
58 for x in range(0,len(file_iter_buff)):
59 for y in range(0,100):
60 file_iter_buff[x].append(file_iters[x].next())
61 for x in range(0,len(file_iter_buff)):
62 file_iter_buff[x] = sorted(file_iter_buff[x],key=lambda rec: rec.when)
63
64 # Remember the time of the last record. This way, we can make sure records
65 # truly are produced in monotonically increasing order by time and terminate
66 # fatally if they are not.
67 last_time = None
68
69 # Keep pulling records as long as we have a buffer
70 while len(file_iter_buff) > 0:
71
72 # Select the earliest record from those at the heads of the buffers
73 earliest = -1
74 buff_to_refill = -1
75 for x in range(0,len(file_iter_buff)):
76 if earliest==-1 or file_iter_buff[x][0].when < earliest.when:
77 earliest = file_iter_buff[x][0]
78 buff_to_refill = x
79
80 # Take it out of the buffer
81 del file_iter_buff[buff_to_refill][0]
82
83 # Try to append a new record to the buffer (if there is another) and
84 # then keep the buffer sorted
85 try:
86 file_iter_buff[buff_to_refill].append(file_iters[buff_to_refill].next())
87 file_iter_buff[buff_to_refill] = sorted(file_iter_buff[buff_to_refill],
88 key=lambda rec: rec.when)
89
90 # If there aren't any more records, fine. Unless the buffer is also empty.
91 # If that is the case, delete the buffer.
92 except StopIteration:
93 if len(file_iter_buff[buff_to_refill]) < 1:
94 del file_iter_buff[buff_to_refill]
95 del file_iters[buff_to_refill]
96
97 # Check for monotonically increasing time
98 if last_time is not None and earliest.when < last_time:
99 exit("FATAL ERROR: trace_reader.py: out-of-order record produced")
100 else:
101 last_time = earliest.when
102
103 # Yield the record
104 yield earliest
105
106###############################################################################
107# Private functions
108###############################################################################
109
110# Returns an iterator to pull records from a file
111def _get_file_iter(file):
112 f = open(file,'rb')
113 while True:
114 data = f.read(RECORD_HEAD_SIZE)
115 try:
116 type_num = struct.unpack_from('b',data)[0]
117 except struct.error:
118 break #We read to the end of the file
119 type = _get_type(type_num)
120 try:
121 values = struct.unpack_from(StHeader.format +
122 type.format,data)
123 record_dict = dict(zip(type.keys,values))
124 except struct.error:
125 f.close()
126 print "Invalid record detected, stopping."
127 exit()
128
129 # Convert the record_dict into an object
130 record = _dict2obj(record_dict)
131
132 # Give it a type name (easier to work with than type number)
133 record.type_name = _get_type_name(type_num)
134
135 # All records should have a 'record type' field.
136 # e.g. these are 'event's as opposed to 'error's
137 record.record_type = "event"
138
139 # If there is no timestamp, set the time to 0
140 if 'when' not in record.__dict__.keys():
141 record.when = 0
142
143 yield record
144
145# Convert a dict into an object
146def _dict2obj(d):
147 class Obj(object): pass
148 o = Obj()
149 for key in d.keys():
150 o.__dict__[key] = d[key]
151 return o
152
153###############################################################################
154# Trace record data types and accessor functions
155###############################################################################
156
157# Each class below represents a type of event record. The format attribute
158# specifies how to decode the binary record and the keys attribute
159# specifies how to name the pieces of information decoded. Note that all
160# event records have a common initial 24 bytes, represented by the StHeader
161# class.
162
163RECORD_HEAD_SIZE = 24
164
165class StHeader:
166 format = '<bbhi'
167 formatStr = struct.Struct(format)
168 keys = ['type','cpu','pid','job']
169 message = 'The header.'
170
171class StNameData:
172 format = '16s'
173 formatStr = struct.Struct(StHeader.format + format)
174 keys = StHeader.keys + ['name']
175 message = 'The name of the executable of this process.'
176
177class StParamData:
178 format = 'IIIc'
179 formatStr = struct.Struct(StHeader.format + format)
180 keys = StHeader.keys + ['wcet','period','phase','partition']
181 message = 'Regular parameters.'
182
183class StReleaseData:
184 format = 'QQ'
185 formatStr = struct.Struct(StHeader.format + format)
186 keys = StHeader.keys + ['when','deadline']
187 message = 'A job was/is going to be released.'
188
189#Not yet used by Sched Trace
190class StAssignedData:
191 format = 'Qc'
192 formatStr = struct.Struct(StHeader.format + format)
193 keys = StHeader.keys + ['when','target']
194 message = 'A job was assigned to a CPU.'
195
196class StSwitchToData:
197 format = 'QI'
198 formatStr = struct.Struct(StHeader.format + format)
199 keys = StHeader.keys + ['when','exec_time']
200 message = 'A process was switched to on a given CPU.'
201
202class StSwitchAwayData:
203 format = 'QI'
204 formatStr = struct.Struct(StHeader.format + format)
205 keys = StHeader.keys + ['when','exec_time']
206 message = 'A process was switched away on a given CPU.'
207
208class StCompletionData:
209 #format = 'Q3x?c'
210 format = 'Q3xcc'
211 formatStr = struct.Struct(StHeader.format + format)
212 keys = StHeader.keys + ['when','forced?','flags']
213 message = 'A job completed.'
214
215class StBlockData:
216 format = 'Q'
217 formatStr = struct.Struct(StHeader.format + format)
218 keys = StHeader.keys + ['when']
219 message = 'A task blocks.'
220
221class StResumeData:
222 format = 'Q'
223 formatStr = struct.Struct(StHeader.format + format)
224 keys = StHeader.keys + ['when']
225 message = 'A task resumes.'
226
227class StSysReleaseData:
228 format = 'QQ'
229 formatStr = struct.Struct(StHeader.format + format)
230 keys = StHeader.keys + ['when','release']
231 message = 'All tasks have checked in, task system released by user'
232
233# Return the binary data type, given the type_num
234def _get_type(type_num):
235 types = [None,StNameData,StParamData,StReleaseData,StAssignedData,
236 StSwitchToData,StSwitchAwayData,StCompletionData,StBlockData,
237 StResumeData,StSysReleaseData]
238 return types[type_num]
239
240# Return the type name, given the type_num (this is simply a convenience to
241# programmers of other modules)
242def _get_type_name(type_num):
243 type_names = [None,"name","params","release","assign","switch_to",
244 "switch_away","completion","block","resume","sys_release"]
245 return type_names[type_num]
diff --git a/traces/g1.pdf b/traces/g1.pdf
new file mode 100644
index 0000000..c9ee920
--- /dev/null
+++ b/traces/g1.pdf
Binary files differ
diff --git a/traces/g2.pdf b/traces/g2.pdf
new file mode 100644
index 0000000..a807c9f
--- /dev/null
+++ b/traces/g2.pdf
Binary files differ
diff --git a/traces/g3.pdf b/traces/g3.pdf
new file mode 100644
index 0000000..3bb4e78
--- /dev/null
+++ b/traces/g3.pdf
Binary files differ
diff --git a/traces/g4.pdf b/traces/g4.pdf
new file mode 100644
index 0000000..867f413
--- /dev/null
+++ b/traces/g4.pdf
Binary files differ
diff --git a/traces/g5.pdf b/traces/g5.pdf
new file mode 100644
index 0000000..c12ebde
--- /dev/null
+++ b/traces/g5.pdf
Binary files differ
diff --git a/traces/g6.pdf b/traces/g6.pdf
new file mode 100644
index 0000000..3c8d667
--- /dev/null
+++ b/traces/g6.pdf
Binary files differ
diff --git a/traces/heavy.ts b/traces/heavy.ts
new file mode 100644
index 0000000..8df12a0
--- /dev/null
+++ b/traces/heavy.ts
@@ -0,0 +1,9 @@
1#EXE=200
2#PER=300
3EXE=17
4PER=30
5
6for i in `seq 1 6`
7do
8 rt_launch -w $EXE $PER run
9done
diff --git a/traces/heavy2.ts b/traces/heavy2.ts
new file mode 100644
index 0000000..23d5e0f
--- /dev/null
+++ b/traces/heavy2.ts
@@ -0,0 +1,7 @@
1EXE=18
2PER=30
3
4for i in `seq 1 6`
5do
6 rtspin -w $EXE $PER 3 &
7done
diff --git a/traces/light.ts b/traces/light.ts
new file mode 100644
index 0000000..db961b2
--- /dev/null
+++ b/traces/light.ts
@@ -0,0 +1,9 @@
1#EXE=200
2#PER=300
3EXE=2
4PER=11
5
6for i in `seq 1 18`
7do
8 rt_launch -w $EXE $PER run
9done
diff --git a/traces/medium.ts b/traces/medium.ts
new file mode 100644
index 0000000..a800180
--- /dev/null
+++ b/traces/medium.ts
@@ -0,0 +1,7 @@
1EXE=2
2PER=12
3
4for i in `seq 1 6`
5do
6 rt_launch -w $EXE $PER run
7done
diff --git a/traces/mixed.ts b/traces/mixed.ts
new file mode 100644
index 0000000..1258d1a
--- /dev/null
+++ b/traces/mixed.ts
@@ -0,0 +1,15 @@
1#EXE=200
2#PER=300
3EXE=2
4PER=11
5
6for i in `seq 10 20`
7do
8 rtspin -w $EXE $i 2 &
9done
10
11EXE=17
12for i in 31 32 33
13do
14 rtspin -w $EXE $i 2 &
15done
diff --git a/traces/st-g1-0.bin b/traces/st-g1-0.bin
new file mode 100644
index 0000000..a88bd3e
--- /dev/null
+++ b/traces/st-g1-0.bin
Binary files differ
diff --git a/traces/st-g1-1.bin b/traces/st-g1-1.bin
new file mode 100644
index 0000000..8439bb8
--- /dev/null
+++ b/traces/st-g1-1.bin
Binary files differ
diff --git a/traces/st-g1-2.bin b/traces/st-g1-2.bin
new file mode 100644
index 0000000..371bf49
--- /dev/null
+++ b/traces/st-g1-2.bin
Binary files differ
diff --git a/traces/st-g1-3.bin b/traces/st-g1-3.bin
new file mode 100644
index 0000000..fa59c60
--- /dev/null
+++ b/traces/st-g1-3.bin
Binary files differ
diff --git a/traces/st-g3-0.bin b/traces/st-g3-0.bin
new file mode 100644
index 0000000..37fdbb3
--- /dev/null
+++ b/traces/st-g3-0.bin
Binary files differ
diff --git a/traces/st-g3-1.bin b/traces/st-g3-1.bin
new file mode 100644
index 0000000..583e1ec
--- /dev/null
+++ b/traces/st-g3-1.bin
Binary files differ
diff --git a/traces/st-g3-2.bin b/traces/st-g3-2.bin
new file mode 100644
index 0000000..d93c7ee
--- /dev/null
+++ b/traces/st-g3-2.bin
Binary files differ
diff --git a/traces/st-g3-3.bin b/traces/st-g3-3.bin
new file mode 100644
index 0000000..21e0de7
--- /dev/null
+++ b/traces/st-g3-3.bin
Binary files differ
diff --git a/traces/st-g4-0.bin b/traces/st-g4-0.bin
new file mode 100644
index 0000000..c52ed7b
--- /dev/null
+++ b/traces/st-g4-0.bin
Binary files differ
diff --git a/traces/st-g4-1.bin b/traces/st-g4-1.bin
new file mode 100644
index 0000000..77fea28
--- /dev/null
+++ b/traces/st-g4-1.bin
Binary files differ
diff --git a/traces/st-g4-2.bin b/traces/st-g4-2.bin
new file mode 100644
index 0000000..4d5d3cd
--- /dev/null
+++ b/traces/st-g4-2.bin
Binary files differ
diff --git a/traces/st-g4-3.bin b/traces/st-g4-3.bin
new file mode 100644
index 0000000..1cd7ee4
--- /dev/null
+++ b/traces/st-g4-3.bin
Binary files differ
diff --git a/traces/st-g5-0.bin b/traces/st-g5-0.bin
new file mode 100644
index 0000000..c576250
--- /dev/null
+++ b/traces/st-g5-0.bin
Binary files differ
diff --git a/traces/st-g5-1.bin b/traces/st-g5-1.bin
new file mode 100644
index 0000000..90c4960
--- /dev/null
+++ b/traces/st-g5-1.bin
Binary files differ
diff --git a/traces/st-g5-2.bin b/traces/st-g5-2.bin
new file mode 100644
index 0000000..a06aa10
--- /dev/null
+++ b/traces/st-g5-2.bin
Binary files differ
diff --git a/traces/st-g5-3.bin b/traces/st-g5-3.bin
new file mode 100644
index 0000000..561b776
--- /dev/null
+++ b/traces/st-g5-3.bin
Binary files differ
diff --git a/traces/st-g6-0.bin b/traces/st-g6-0.bin
new file mode 100644
index 0000000..cebc7fd
--- /dev/null
+++ b/traces/st-g6-0.bin
Binary files differ
diff --git a/traces/st-g6-1.bin b/traces/st-g6-1.bin
new file mode 100644
index 0000000..a51cce9
--- /dev/null
+++ b/traces/st-g6-1.bin
Binary files differ
diff --git a/traces/st-g6-2.bin b/traces/st-g6-2.bin
new file mode 100644
index 0000000..5d76010
--- /dev/null
+++ b/traces/st-g6-2.bin
Binary files differ
diff --git a/traces/st-g6-3.bin b/traces/st-g6-3.bin
new file mode 100644
index 0000000..471fc8d
--- /dev/null
+++ b/traces/st-g6-3.bin
Binary files differ
diff --git a/traces/st-heavy-0.bin b/traces/st-heavy-0.bin
new file mode 100644
index 0000000..7a86776
--- /dev/null
+++ b/traces/st-heavy-0.bin
Binary files differ
diff --git a/traces/st-heavy-1.bin b/traces/st-heavy-1.bin
new file mode 100644
index 0000000..9b8d976
--- /dev/null
+++ b/traces/st-heavy-1.bin
Binary files differ
diff --git a/traces/st-heavy-2.bin b/traces/st-heavy-2.bin
new file mode 100644
index 0000000..eb59023
--- /dev/null
+++ b/traces/st-heavy-2.bin
Binary files differ
diff --git a/traces/st-heavy-3.bin b/traces/st-heavy-3.bin
new file mode 100644
index 0000000..06f73b5
--- /dev/null
+++ b/traces/st-heavy-3.bin
Binary files differ
diff --git a/traces/st-heavy2-0.bin b/traces/st-heavy2-0.bin
new file mode 100644
index 0000000..4931d70
--- /dev/null
+++ b/traces/st-heavy2-0.bin
Binary files differ
diff --git a/traces/st-heavy2-1.bin b/traces/st-heavy2-1.bin
new file mode 100644
index 0000000..fbe14a0
--- /dev/null
+++ b/traces/st-heavy2-1.bin
Binary files differ
diff --git a/traces/st-heavy2-2.bin b/traces/st-heavy2-2.bin
new file mode 100644
index 0000000..d8edc0a
--- /dev/null
+++ b/traces/st-heavy2-2.bin
Binary files differ
diff --git a/traces/st-heavy2-3.bin b/traces/st-heavy2-3.bin
new file mode 100644
index 0000000..c173014
--- /dev/null
+++ b/traces/st-heavy2-3.bin
Binary files differ
diff --git a/traces/st-heavy3-0.bin b/traces/st-heavy3-0.bin
new file mode 100644
index 0000000..4af21ed
--- /dev/null
+++ b/traces/st-heavy3-0.bin
Binary files differ
diff --git a/traces/st-heavy3-1.bin b/traces/st-heavy3-1.bin
new file mode 100644
index 0000000..b62ffac
--- /dev/null
+++ b/traces/st-heavy3-1.bin
Binary files differ
diff --git a/traces/st-heavy3-2.bin b/traces/st-heavy3-2.bin
new file mode 100644
index 0000000..39a12e6
--- /dev/null
+++ b/traces/st-heavy3-2.bin
Binary files differ
diff --git a/traces/st-heavy3-3.bin b/traces/st-heavy3-3.bin
new file mode 100644
index 0000000..ae4a6a4
--- /dev/null
+++ b/traces/st-heavy3-3.bin
Binary files differ
diff --git a/traces/st-mac-0.bin b/traces/st-mac-0.bin
new file mode 100644
index 0000000..7bac67d
--- /dev/null
+++ b/traces/st-mac-0.bin
Binary files differ
diff --git a/traces/st-mac-1.bin b/traces/st-mac-1.bin
new file mode 100644
index 0000000..a8a2f7e
--- /dev/null
+++ b/traces/st-mac-1.bin
Binary files differ
diff --git a/traces/st-mac-test-0.bin b/traces/st-mac-test-0.bin
new file mode 100644
index 0000000..06e8403
--- /dev/null
+++ b/traces/st-mac-test-0.bin
Binary files differ
diff --git a/traces/st-mac2-0.bin b/traces/st-mac2-0.bin
new file mode 100644
index 0000000..5afe202
--- /dev/null
+++ b/traces/st-mac2-0.bin
Binary files differ
diff --git a/traces/st-s1-0.bin b/traces/st-s1-0.bin
new file mode 100644
index 0000000..5ad50f9
--- /dev/null
+++ b/traces/st-s1-0.bin
Binary files differ
diff --git a/traces/st-s1-1.bin b/traces/st-s1-1.bin
new file mode 100644
index 0000000..7554c44
--- /dev/null
+++ b/traces/st-s1-1.bin
Binary files differ
diff --git a/traces/st-s1-2.bin b/traces/st-s1-2.bin
new file mode 100644
index 0000000..047248c
--- /dev/null
+++ b/traces/st-s1-2.bin
Binary files differ
diff --git a/traces/st-s1-3.bin b/traces/st-s1-3.bin
new file mode 100644
index 0000000..039e1a9
--- /dev/null
+++ b/traces/st-s1-3.bin
Binary files differ
diff --git a/traces/st-s2-0.bin b/traces/st-s2-0.bin
new file mode 100644
index 0000000..639c561
--- /dev/null
+++ b/traces/st-s2-0.bin
Binary files differ
diff --git a/traces/st-s2-1.bin b/traces/st-s2-1.bin
new file mode 100644
index 0000000..36f0254
--- /dev/null
+++ b/traces/st-s2-1.bin
Binary files differ
diff --git a/traces/st-s2-2.bin b/traces/st-s2-2.bin
new file mode 100644
index 0000000..777c1f4
--- /dev/null
+++ b/traces/st-s2-2.bin
Binary files differ
diff --git a/traces/st-s2-3.bin b/traces/st-s2-3.bin
new file mode 100644
index 0000000..917cb86
--- /dev/null
+++ b/traces/st-s2-3.bin
Binary files differ
diff --git a/traces/st-x10-0.bin b/traces/st-x10-0.bin
new file mode 100644
index 0000000..a372e75
--- /dev/null
+++ b/traces/st-x10-0.bin
Binary files differ
diff --git a/traces/st-x10-1.bin b/traces/st-x10-1.bin
new file mode 100644
index 0000000..bcf8d01
--- /dev/null
+++ b/traces/st-x10-1.bin
Binary files differ
diff --git a/traces/st-x10-2.bin b/traces/st-x10-2.bin
new file mode 100644
index 0000000..3149ca8
--- /dev/null
+++ b/traces/st-x10-2.bin
Binary files differ
diff --git a/traces/st-x10-3.bin b/traces/st-x10-3.bin
new file mode 100644
index 0000000..cf899fd
--- /dev/null
+++ b/traces/st-x10-3.bin
Binary files differ
diff --git a/traces/st-x11-0.bin b/traces/st-x11-0.bin
new file mode 100644
index 0000000..8404d49
--- /dev/null
+++ b/traces/st-x11-0.bin
Binary files differ
diff --git a/traces/st-x11-1.bin b/traces/st-x11-1.bin
new file mode 100644
index 0000000..67e9063
--- /dev/null
+++ b/traces/st-x11-1.bin
Binary files differ
diff --git a/traces/st-x11-2.bin b/traces/st-x11-2.bin
new file mode 100644
index 0000000..a16c740
--- /dev/null
+++ b/traces/st-x11-2.bin
Binary files differ
diff --git a/traces/st-x11-3.bin b/traces/st-x11-3.bin
new file mode 100644
index 0000000..cbc4c54
--- /dev/null
+++ b/traces/st-x11-3.bin
Binary files differ
diff --git a/traces/st-x12-0.bin b/traces/st-x12-0.bin
new file mode 100644
index 0000000..66a9995
--- /dev/null
+++ b/traces/st-x12-0.bin
Binary files differ
diff --git a/traces/st-x12-1.bin b/traces/st-x12-1.bin
new file mode 100644
index 0000000..515f953
--- /dev/null
+++ b/traces/st-x12-1.bin
Binary files differ
diff --git a/traces/st-x12-2.bin b/traces/st-x12-2.bin
new file mode 100644
index 0000000..c21fce8
--- /dev/null
+++ b/traces/st-x12-2.bin
Binary files differ
diff --git a/traces/st-x12-3.bin b/traces/st-x12-3.bin
new file mode 100644
index 0000000..bc7be12
--- /dev/null
+++ b/traces/st-x12-3.bin
Binary files differ
diff --git a/traces/st-x13-0.bin b/traces/st-x13-0.bin
new file mode 100644
index 0000000..023e917
--- /dev/null
+++ b/traces/st-x13-0.bin
Binary files differ
diff --git a/traces/st-x13-1.bin b/traces/st-x13-1.bin
new file mode 100644
index 0000000..6072f16
--- /dev/null
+++ b/traces/st-x13-1.bin
Binary files differ
diff --git a/traces/st-x13-2.bin b/traces/st-x13-2.bin
new file mode 100644
index 0000000..c59a852
--- /dev/null
+++ b/traces/st-x13-2.bin
Binary files differ
diff --git a/traces/st-x13-3.bin b/traces/st-x13-3.bin
new file mode 100644
index 0000000..295de3a
--- /dev/null
+++ b/traces/st-x13-3.bin
Binary files differ
diff --git a/traces/st-x14-0.bin b/traces/st-x14-0.bin
new file mode 100644
index 0000000..19808b3
--- /dev/null
+++ b/traces/st-x14-0.bin
Binary files differ
diff --git a/traces/st-x14-1.bin b/traces/st-x14-1.bin
new file mode 100644
index 0000000..34b3e63
--- /dev/null
+++ b/traces/st-x14-1.bin
Binary files differ
diff --git a/traces/st-x14-2.bin b/traces/st-x14-2.bin
new file mode 100644
index 0000000..055851c
--- /dev/null
+++ b/traces/st-x14-2.bin
Binary files differ
diff --git a/traces/st-x14-3.bin b/traces/st-x14-3.bin
new file mode 100644
index 0000000..9ade092
--- /dev/null
+++ b/traces/st-x14-3.bin
Binary files differ
diff --git a/traces/st-x15-0.bin b/traces/st-x15-0.bin
new file mode 100644
index 0000000..ce0b025
--- /dev/null
+++ b/traces/st-x15-0.bin
Binary files differ
diff --git a/traces/st-x15-1.bin b/traces/st-x15-1.bin
new file mode 100644
index 0000000..1b2c23e
--- /dev/null
+++ b/traces/st-x15-1.bin
Binary files differ
diff --git a/traces/st-x15-2.bin b/traces/st-x15-2.bin
new file mode 100644
index 0000000..9da74e5
--- /dev/null
+++ b/traces/st-x15-2.bin
Binary files differ
diff --git a/traces/st-x15-3.bin b/traces/st-x15-3.bin
new file mode 100644
index 0000000..9645cd9
--- /dev/null
+++ b/traces/st-x15-3.bin
Binary files differ
diff --git a/traces/st-x16-0.bin b/traces/st-x16-0.bin
new file mode 100644
index 0000000..175e14a
--- /dev/null
+++ b/traces/st-x16-0.bin
Binary files differ
diff --git a/traces/st-x16-1.bin b/traces/st-x16-1.bin
new file mode 100644
index 0000000..73a1ae5
--- /dev/null
+++ b/traces/st-x16-1.bin
Binary files differ
diff --git a/traces/st-x16-2.bin b/traces/st-x16-2.bin
new file mode 100644
index 0000000..8fcb27e
--- /dev/null
+++ b/traces/st-x16-2.bin
Binary files differ
diff --git a/traces/st-x16-3.bin b/traces/st-x16-3.bin
new file mode 100644
index 0000000..6f1d246
--- /dev/null
+++ b/traces/st-x16-3.bin
Binary files differ
diff --git a/traces/st-x17-0.bin b/traces/st-x17-0.bin
new file mode 100644
index 0000000..c03c249
--- /dev/null
+++ b/traces/st-x17-0.bin
Binary files differ
diff --git a/traces/st-x17-1.bin b/traces/st-x17-1.bin
new file mode 100644
index 0000000..c0ea44d
--- /dev/null
+++ b/traces/st-x17-1.bin
Binary files differ
diff --git a/traces/st-x17-2.bin b/traces/st-x17-2.bin
new file mode 100644
index 0000000..2da0356
--- /dev/null
+++ b/traces/st-x17-2.bin
Binary files differ
diff --git a/traces/st-x17-3.bin b/traces/st-x17-3.bin
new file mode 100644
index 0000000..207d1a7
--- /dev/null
+++ b/traces/st-x17-3.bin
Binary files differ
diff --git a/traces/st-x18-0.bin b/traces/st-x18-0.bin
new file mode 100644
index 0000000..2ed81f1
--- /dev/null
+++ b/traces/st-x18-0.bin
Binary files differ
diff --git a/traces/st-x18-1.bin b/traces/st-x18-1.bin
new file mode 100644
index 0000000..dd791c1
--- /dev/null
+++ b/traces/st-x18-1.bin
Binary files differ
diff --git a/traces/st-x18-2.bin b/traces/st-x18-2.bin
new file mode 100644
index 0000000..f90d9ab
--- /dev/null
+++ b/traces/st-x18-2.bin
Binary files differ
diff --git a/traces/st-x18-3.bin b/traces/st-x18-3.bin
new file mode 100644
index 0000000..944156a
--- /dev/null
+++ b/traces/st-x18-3.bin
Binary files differ
diff --git a/traces/st-x19-0.bin b/traces/st-x19-0.bin
new file mode 100644
index 0000000..1aeea29
--- /dev/null
+++ b/traces/st-x19-0.bin
Binary files differ
diff --git a/traces/st-x19-1.bin b/traces/st-x19-1.bin
new file mode 100644
index 0000000..9e2129a
--- /dev/null
+++ b/traces/st-x19-1.bin
Binary files differ
diff --git a/traces/st-x19-2.bin b/traces/st-x19-2.bin
new file mode 100644
index 0000000..e9cd71b
--- /dev/null
+++ b/traces/st-x19-2.bin
Binary files differ
diff --git a/traces/st-x19-3.bin b/traces/st-x19-3.bin
new file mode 100644
index 0000000..530523d
--- /dev/null
+++ b/traces/st-x19-3.bin
Binary files differ
diff --git a/traces/st-x2-0.bin b/traces/st-x2-0.bin
new file mode 100644
index 0000000..e24dcab
--- /dev/null
+++ b/traces/st-x2-0.bin
Binary files differ
diff --git a/traces/st-x2-1.bin b/traces/st-x2-1.bin
new file mode 100644
index 0000000..4a66180
--- /dev/null
+++ b/traces/st-x2-1.bin
Binary files differ
diff --git a/traces/st-x2-2.bin b/traces/st-x2-2.bin
new file mode 100644
index 0000000..76cde24
--- /dev/null
+++ b/traces/st-x2-2.bin
Binary files differ
diff --git a/traces/st-x2-3.bin b/traces/st-x2-3.bin
new file mode 100644
index 0000000..faa0b90
--- /dev/null
+++ b/traces/st-x2-3.bin
Binary files differ
diff --git a/traces/st-x3-0.bin b/traces/st-x3-0.bin
new file mode 100644
index 0000000..60107c7
--- /dev/null
+++ b/traces/st-x3-0.bin
Binary files differ
diff --git a/traces/st-x3-1.bin b/traces/st-x3-1.bin
new file mode 100644
index 0000000..7a5fe39
--- /dev/null
+++ b/traces/st-x3-1.bin
Binary files differ
diff --git a/traces/st-x3-2.bin b/traces/st-x3-2.bin
new file mode 100644
index 0000000..1d40df6
--- /dev/null
+++ b/traces/st-x3-2.bin
Binary files differ
diff --git a/traces/st-x3-3.bin b/traces/st-x3-3.bin
new file mode 100644
index 0000000..afba2e1
--- /dev/null
+++ b/traces/st-x3-3.bin
Binary files differ
diff --git a/traces/st-x4-0.bin b/traces/st-x4-0.bin
new file mode 100644
index 0000000..260bec7
--- /dev/null
+++ b/traces/st-x4-0.bin
Binary files differ
diff --git a/traces/st-x4-1.bin b/traces/st-x4-1.bin
new file mode 100644
index 0000000..17669ae
--- /dev/null
+++ b/traces/st-x4-1.bin
Binary files differ
diff --git a/traces/st-x4-2.bin b/traces/st-x4-2.bin
new file mode 100644
index 0000000..a02713f
--- /dev/null
+++ b/traces/st-x4-2.bin
Binary files differ
diff --git a/traces/st-x4-3.bin b/traces/st-x4-3.bin
new file mode 100644
index 0000000..db5e880
--- /dev/null
+++ b/traces/st-x4-3.bin
Binary files differ
diff --git a/traces/st-x5-0.bin b/traces/st-x5-0.bin
new file mode 100644
index 0000000..6fb2ac3
--- /dev/null
+++ b/traces/st-x5-0.bin
Binary files differ
diff --git a/traces/st-x5-1.bin b/traces/st-x5-1.bin
new file mode 100644
index 0000000..6466f25
--- /dev/null
+++ b/traces/st-x5-1.bin
Binary files differ
diff --git a/traces/st-x5-2.bin b/traces/st-x5-2.bin
new file mode 100644
index 0000000..93915e9
--- /dev/null
+++ b/traces/st-x5-2.bin
Binary files differ
diff --git a/traces/st-x5-3.bin b/traces/st-x5-3.bin
new file mode 100644
index 0000000..69fb611
--- /dev/null
+++ b/traces/st-x5-3.bin
Binary files differ
diff --git a/traces/st-x6-0.bin b/traces/st-x6-0.bin
new file mode 100644
index 0000000..ed18dae
--- /dev/null
+++ b/traces/st-x6-0.bin
Binary files differ
diff --git a/traces/st-x6-1.bin b/traces/st-x6-1.bin
new file mode 100644
index 0000000..4689df1
--- /dev/null
+++ b/traces/st-x6-1.bin
Binary files differ
diff --git a/traces/st-x6-2.bin b/traces/st-x6-2.bin
new file mode 100644
index 0000000..0977d93
--- /dev/null
+++ b/traces/st-x6-2.bin
Binary files differ
diff --git a/traces/st-x6-3.bin b/traces/st-x6-3.bin
new file mode 100644
index 0000000..3b2cdc9
--- /dev/null
+++ b/traces/st-x6-3.bin
Binary files differ
diff --git a/traces/st-x7-0.bin b/traces/st-x7-0.bin
new file mode 100644
index 0000000..2d11122
--- /dev/null
+++ b/traces/st-x7-0.bin
Binary files differ
diff --git a/traces/st-x7-1.bin b/traces/st-x7-1.bin
new file mode 100644
index 0000000..4e13060
--- /dev/null
+++ b/traces/st-x7-1.bin
Binary files differ
diff --git a/traces/st-x7-2.bin b/traces/st-x7-2.bin
new file mode 100644
index 0000000..07768b8
--- /dev/null
+++ b/traces/st-x7-2.bin
Binary files differ
diff --git a/traces/st-x7-3.bin b/traces/st-x7-3.bin
new file mode 100644
index 0000000..b3e71da
--- /dev/null
+++ b/traces/st-x7-3.bin
Binary files differ
diff --git a/traces/st-x9-0.bin b/traces/st-x9-0.bin
new file mode 100644
index 0000000..be09f48
--- /dev/null
+++ b/traces/st-x9-0.bin
Binary files differ
diff --git a/traces/st-x9-1.bin b/traces/st-x9-1.bin
new file mode 100644
index 0000000..19b14e8
--- /dev/null
+++ b/traces/st-x9-1.bin
Binary files differ
diff --git a/traces/st-x9-2.bin b/traces/st-x9-2.bin
new file mode 100644
index 0000000..dcbb59c
--- /dev/null
+++ b/traces/st-x9-2.bin
Binary files differ
diff --git a/traces/st-x9-3.bin b/traces/st-x9-3.bin
new file mode 100644
index 0000000..1aea9a2
--- /dev/null
+++ b/traces/st-x9-3.bin
Binary files differ
diff --git a/traces/st-xxx-0.bin b/traces/st-xxx-0.bin
new file mode 100644
index 0000000..1b661a5
--- /dev/null
+++ b/traces/st-xxx-0.bin
Binary files differ
diff --git a/traces/st-xxx-1.bin b/traces/st-xxx-1.bin
new file mode 100644
index 0000000..d82d027
--- /dev/null
+++ b/traces/st-xxx-1.bin
Binary files differ
diff --git a/traces/st-xxx-2.bin b/traces/st-xxx-2.bin
new file mode 100644
index 0000000..d3586e9
--- /dev/null
+++ b/traces/st-xxx-2.bin
Binary files differ
diff --git a/traces/st-xxx-3.bin b/traces/st-xxx-3.bin
new file mode 100644
index 0000000..5d9168c
--- /dev/null
+++ b/traces/st-xxx-3.bin
Binary files differ
diff --git a/traces/st0.fg b/traces/st0.fg
new file mode 100644
index 0000000..4008086
--- /dev/null
+++ b/traces/st0.fg
Binary files differ
diff --git a/traces/st1.fg b/traces/st1.fg
new file mode 100644
index 0000000..5d5c5ae
--- /dev/null
+++ b/traces/st1.fg
Binary files differ
diff --git a/traces/stg20.bin b/traces/stg20.bin
new file mode 100644
index 0000000..a12ca3e
--- /dev/null
+++ b/traces/stg20.bin
Binary files differ
diff --git a/traces/stg21.bin b/traces/stg21.bin
new file mode 100644
index 0000000..b0ab240
--- /dev/null
+++ b/traces/stg21.bin
Binary files differ
diff --git a/traces/stg22.bin b/traces/stg22.bin
new file mode 100644
index 0000000..1d1a0a5
--- /dev/null
+++ b/traces/stg22.bin
Binary files differ
diff --git a/traces/stg23.bin b/traces/stg23.bin
new file mode 100644
index 0000000..fed856f
--- /dev/null
+++ b/traces/stg23.bin
Binary files differ
diff --git a/traces/test.pdf b/traces/test.pdf
new file mode 100644
index 0000000..e0c2850
--- /dev/null
+++ b/traces/test.pdf
Binary files differ
diff --git a/traces/x11.pdf b/traces/x11.pdf
new file mode 100644
index 0000000..6c909a9
--- /dev/null
+++ b/traces/x11.pdf
Binary files differ
diff --git a/traces/x12.pdf b/traces/x12.pdf
new file mode 100644
index 0000000..0d16db3
--- /dev/null
+++ b/traces/x12.pdf
Binary files differ
diff --git a/traces/x13.pdf b/traces/x13.pdf
new file mode 100644
index 0000000..fb527ce
--- /dev/null
+++ b/traces/x13.pdf
Binary files differ
diff --git a/traces/x14.pdf b/traces/x14.pdf
new file mode 100644
index 0000000..f22df22
--- /dev/null
+++ b/traces/x14.pdf
Binary files differ
diff --git a/traces/x15.pdf b/traces/x15.pdf
new file mode 100644
index 0000000..a2da624
--- /dev/null
+++ b/traces/x15.pdf
Binary files differ
diff --git a/traces/x16.pdf b/traces/x16.pdf
new file mode 100644
index 0000000..bc19a32
--- /dev/null
+++ b/traces/x16.pdf
Binary files differ
diff --git a/traces/x17.pdf b/traces/x17.pdf
new file mode 100644
index 0000000..b74b90f
--- /dev/null
+++ b/traces/x17.pdf
Binary files differ
diff --git a/traces/x18.pdf b/traces/x18.pdf
new file mode 100644
index 0000000..6d8feb0
--- /dev/null
+++ b/traces/x18.pdf
Binary files differ
diff --git a/traces/x19.pdf b/traces/x19.pdf
new file mode 100644
index 0000000..60c0af0
--- /dev/null
+++ b/traces/x19.pdf
Binary files differ
diff --git a/visualizer.py b/visualizer.py
new file mode 100755
index 0000000..80c2af9
--- /dev/null
+++ b/visualizer.py
@@ -0,0 +1,31 @@
1#!/usr/bin/python
2
3"""Runs the visualizer."""
4
5import convert
6import reader
7import viz
8
9import gtk
10
11path = 'sample_traces/'
12
13trace_list = [
14path + 'st-g6-0.bin',
15path + 'st-g6-1.bin',
16path + 'st-g6-2.bin',
17path + 'st-g6-3.bin'
18]
19
20if __name__ == '__main__':
21 stream = reader.trace_reader.trace_reader(trace_list)
22 stream = reader.sanitizer.sanitizer(stream)
23 stream = reader.gedf_test.gedf_test(stream)
24 sched = convert.convert_trace_to_schedule(stream)
25 sched.scan(10000000)
26 renderer = viz.renderer.Renderer(sched)
27 renderer.prepare_task_graph(attrs=viz.format.GraphFormat(time_per_maj=10000000))
28
29 viz.viewer.MainWindow(renderer)
30 gtk.main()
31
diff --git a/viz/__init__.py b/viz/__init__.py
new file mode 100644
index 0000000..ef409f6
--- /dev/null
+++ b/viz/__init__.py
@@ -0,0 +1,10 @@
1import viewer
2import renderer
3import format
4import gobject
5import gtk
6
7gobject.signal_new('set-scroll-adjustments', viewer.GraphArea, gobject.SIGNAL_RUN_FIRST,
8 None, (gtk.Adjustment, gtk.Adjustment))
9gobject.signal_new('update-event-description', viewer.GraphArea, gobject.SIGNAL_RUN_FIRST,
10 None, (gobject.TYPE_PYOBJECT, gobject.TYPE_PYOBJECT))
diff --git a/viz/draw.py b/viz/draw.py
new file mode 100644
index 0000000..c3ab756
--- /dev/null
+++ b/viz/draw.py
@@ -0,0 +1,1254 @@
1#!/usr/bin/python
2
3import math
4import cairo
5import os
6
7import util
8import schedule
9from format import *
10
11def snap(pos):
12 """Takes in an x- or y-coordinate ``pos'' and snaps it to the pixel grid.
13 This is necessary because integer coordinates in Cairo actually denote
14 the spaces between pixels, not the pixels themselves, so if we draw a
15 line of width 1 on integer coordinates, it will come out blurry unless we shift it,
16 since the line will get distributed over two pixels. We actually apply this to all
17 coordinates to make sure everything is aligned."""
18 return pos - 0.5
19
20class Surface(object):
21 def __init__(self, fname='temp', ctx=None):
22 self.virt_x = 0
23 self.virt_y = 0
24 self.surface = None
25 self.width = 0
26 self.height = 0
27 self.fname = fname
28 self.ctx = ctx
29
30 def renew(self, width, height):
31 raise NotImplementedError
32
33 def change_ctx(self, ctx):
34 self.ctx = ctx
35
36 def get_fname(self):
37 return self.fname
38
39 def write_out(self, fname):
40 raise NotImplementedError
41
42 def pan(self, x, y, width, height):
43 """A surface might actually represent just a ``window'' into
44 what we are drawing on. For instance, if we are scrolling through
45 a graph, then the surface represents the area in the GUI window,
46 not the entire graph (visible or not). So this method basically
47 moves the ``window's'' upper-left corner to (x, y), and resizes
48 the dimensions to (width, height)."""
49 self.virt_x = x
50 self.virt_y = y
51 self.width = width
52 self.height = height
53
54 def get_real_coor(self, x, y):
55 """Translates the coordinates (x, y)
56 in the ``theoretical'' plane to the true (x, y) coordinates on this surface
57 that we should draw to. Note that these might actually be outside the
58 bounds of the surface,
59 if we want something outside the surface's ``window''."""
60 return (x - self.virt_x, y - self.virt_y)
61
62class SVGSurface(Surface):
63 def renew(self, width, height):
64 iwidth = int(math.ceil(width))
65 iheight = int(math.ceil(height))
66 self.surface = cairo.SVGSurface(self.fname, iwidth, iheight)
67 self.ctx = cairo.Context(self.surface)
68
69 def write_out(self, fname):
70 os.execl('cp', self.fname, fname)
71
72class ImageSurface(Surface):
73 def renew(self, width, height):
74 iwidth = int(math.ceil(width))
75 iheight = int(math.ceil(height))
76 self.surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, iwidth, iheight)
77 self.ctx = cairo.Context(self.surface)
78
79 def write_out(self, fname):
80 if self.surface is None:
81 raise ValueError('Don\'t own surface, can\'t write to to file')
82
83 self.surface.write_to_png(fname)
84
85class Pattern(object):
86 DEF_STRIPE_SIZE = 10
87
88 def __init__(self, color_list, stripe_size=DEF_STRIPE_SIZE):
89 self.color_list = color_list
90 self.stripe_size = stripe_size
91
92 def render_on_canvas(self, canvas, x, y, width, height, fade=False):
93 if len(self.color_list) == 1:
94 if fade:
95 canvas.fill_rect_fade(x, y, width, height, (1.0, 1.0, 1.0), \
96 self.color_list[0])
97 else:
98 canvas.fill_rect(x, y, width, height, self.color_list[0])
99
100 else:
101 n = 0
102 bottom = y + height
103 while y < bottom:
104 linear = cairo.LinearGradient(x, y, x + width, math.min(y + self.stripe_size, bottom))
105 i = n % len(self.color_list)
106 if fade:
107 canvas.fill_rect_fade(x, y, width, min(self.stripe_size, bottom - y), (1.0, 1.0, 1.0), \
108 self.color_list[i])
109 else:
110 canvas.fill_rect(x, y, width, min(self.stripe_size, bottom - y), self.color_list[i])
111
112 y += self.stripe_size
113 n += 1
114
115class Canvas(object):
116 """This is a basic class that stores and draws on a Cairo surface,
117 using various primitives related to drawing a real-time graph (up-arrows,
118 down-arrows, bars, ...).
119
120 This is the lowest-level representation (aside perhaps from the Cairo
121 surface itself) of a real-time graph. It allows the user to draw
122 primitives at certain locations, but for the most part does not know
123 anything about real-time scheduling, just how to draw the basic parts
124 that make up a schedule graph. For that, see Graph or its descendants."""
125
126 BOTTOM_LAYER = 0
127 MIDDLE_LAYER = 1
128 TOP_LAYER = 2
129
130 LAYERS = (BOTTOM_LAYER, MIDDLE_LAYER, TOP_LAYER)
131
132 SQRT3 = math.sqrt(3.0)
133
134 def __init__(self, width, height, item_clist, bar_plist, surface):
135 """Creates a new Canvas of dimensions (width, height). The
136 parameters ``item_plist'' and ``bar_plist'' each specify a list
137 of patterns to choose from when drawing the items on the y-axis
138 or filling in bars, respectively."""
139
140 self.surface = surface
141
142 self.width = int(math.ceil(width))
143 self.height = int(math.ceil(height))
144 self.item_clist = item_clist
145 self.bar_plist = bar_plist
146
147 self.selectable_regions = {}
148
149 self.scale = 1.0
150
151 # clears the canvas.
152 def clear(self):
153 raise NotImplementedError
154
155 def scaled(self, *coors):
156 return [coor * self.scale for coor in coors]
157
158 def draw_rect(self, x, y, width, height, color, thickness):
159 """Draws a rectangle somewhere (border only)."""
160 raise NotImplementedError
161
162 def fill_rect(self, x, y, width, height, color):
163 """Draws a filled rectangle somewhere. ``color'' is a 3-tuple."""
164 raise NotImplementedError
165
166 def fill_rect_fade(self, x, y, width, height, lcolor, rcolor):
167 """Draws a rectangle somewhere, filled in with the fade."""
168 raise NotImplementedError
169
170 def draw_line(self, p0, p1, color, thickness):
171 """Draws a line from p0 to p1 with a certain color and thickness."""
172 raise NotImplementedError
173
174 def draw_polyline(self, coor_list, color, thickness):
175 """Draws a polyline, where coor_list = [(x_0, y_0), (x_1, y_1), ... (x_m, y_m)]
176 specifies a polyline from (x_0, y_0) to (x_1, y_1), etc."""
177 raise NotImplementedError
178
179 def fill_polyline(self, coor_list, color, thickness):
180 """Draws a polyline (probably a polygon) and fills it."""
181 raise NotImplementedError
182
183 def draw_label(self, text, x, y, fopts=GraphFormat.DEF_FOPTS_LABEL, halign=AlignMode.LEFT, valign=AlignMode.BOTTOM):
184 """Draws text at a position with a certain alignment."""
185 raise NotImplementedError
186
187 def draw_label_with_sscripts(self, text, supscript, subscript, x, y, \
188 textfopts=GraphFormat.DEF_FOPTS_LABEL,
189 sscriptfopts=GraphFormat.DEF_FOPTS_LABEL_SSCRIPT, \
190 halign=AlignMode.LEFT, valign=AlignMode.BOTTOM):
191 """Draws text at a position with a certain alignment, along with optionally a superscript and
192 subscript (which are None if either is not used.)"""
193 raise NotImplementedError
194
195 def draw_y_axis(self, x, y, height):
196 """Draws the y-axis, starting from the bottom at the point x, y."""
197 self.surface.ctx.set_source_rgb(0.0, 0.0, 0.0)
198
199 self.draw_line((x, y), (x, y - height), (0.0, 0.0, 0.0), GraphFormat.AXIS_THICKNESS)
200
201 def draw_y_axis_labels(self, x, y, height, item_list, item_size, fopts=None):
202 """Draws the item labels on the y-axis. ``item_list'' is the list
203 of strings to print, while item_size gives the vertical amount of
204 space that each item shall take up, in pixels."""
205 if fopts is None:
206 fopts = GraphFormat.DEF_FOPTS_ITEM
207
208 x -= GraphFormat.Y_AXIS_ITEM_GAP
209 y -= height - item_size / 2.0
210
211 orig_color = fopts.color
212 for ctr, item in enumerate(item_list):
213 fopts.color = self.get_item_color(ctr)
214 self.draw_label(item, x, y, fopts, AlignMode.RIGHT, AlignMode.CENTER)
215 y += item_size
216
217 fopts.color = orig_color
218
219 def draw_x_axis(self, x, y, start_tick, end_tick, maj_sep, min_per_maj):
220 """Draws the x-axis, including all the major and minor ticks (but not the labels).
221 ``num_maj'' gives the number of major ticks, ``maj_sep'' the number of pixels between
222 major ticks, and ``min_per_maj'' the number of minor ticks between two major ticks
223 (including the first major tick)"""
224 self.draw_line((x, y), (x + GraphFormat.X_AXIS_MEASURE_OFS, y),
225 (0.0, 0.0, 0.0), GraphFormat.AXIS_THICKNESS)
226 x += GraphFormat.X_AXIS_MEASURE_OFS + start_tick * maj_sep
227
228 for i in range(start_tick, end_tick + 1):
229 self.draw_line((x, y), (x, y + GraphFormat.MAJ_TICK_SIZE),
230 (0.0, 0.0, 0.0), GraphFormat.AXIS_THICKNESS)
231
232 if (i < end_tick):
233 for j in range(0, min_per_maj):
234 self.draw_line((x, y), (x + maj_sep / min_per_maj, y),
235 (0.0, 0.0, 0.0), GraphFormat.AXIS_THICKNESS)
236
237 x += 1.0 * maj_sep / min_per_maj
238 if j < min_per_maj - 1:
239 self.draw_line((x, y), (x, y + GraphFormat.MIN_TICK_SIZE),
240 (0.0, 0.0, 0.0), GraphFormat.AXIS_THICKNESS)
241
242 def draw_x_axis_labels(self, x, y, start_tick, end_tick, maj_sep, min_per_maj, start=0, incr=1, show_min=False, \
243 majfopts=GraphFormat.DEF_FOPTS_MAJ, minfopts=GraphFormat.DEF_FOPTS_MIN):
244 """Draws the labels for the x-axis. (x, y) should give the origin.
245 how far down you want the text. ``incr'' gives the increment per major
246 tick. ``start'' gives the value of the first tick. ``show_min'' specifies
247 whether to draw labels at minor ticks."""
248
249 x += GraphFormat.X_AXIS_MEASURE_OFS + start_tick * maj_sep
250 y += GraphFormat.X_AXIS_LABEL_GAP + GraphFormat.MAJ_TICK_SIZE
251
252 minincr = incr / (min_per_maj * 1.0)
253
254 cur = start * 1.0
255
256 for i in range(start_tick, end_tick + 1):
257 text = util.format_float(cur, 2)
258 self.draw_label(text, x, y, majfopts, AlignMode.CENTER, AlignMode.TOP)
259
260 if (i < end_tick):
261 if show_min:
262 for j in range(0, min_per_maj):
263 x += 1.0 * maj_sep / min_per_maj
264 cur += minincr
265 text = util.format_float(cur, 2)
266
267 if j < min_per_maj - 1:
268 self.draw_label(text, x, y, minfopts, AlignMode.CENTER, AlignMode.TOP)
269 else:
270 x += maj_sep
271 cur += incr
272
273 def draw_grid(self, x, y, height, start_tick, end_tick, start_item, end_item, maj_sep, item_size, \
274 min_per_maj=None, show_min=False):
275 """Draws a grid dividing along the item boundaries and the major ticks.
276 (x, y) gives the origin. ``show_min'' specifies whether to draw vertical grid lines at minor ticks.
277 ``start_tick'' and ``end_tick'' give the major ticks to start and end at for drawing vertical lines.
278 ``start_item'' and ``end_item'' give the item boundaries to start and end drawing horizontal lines."""
279 if start_tick > end_tick or start_item > end_item:
280 raise ValueError('start must be less than end')
281
282 line_width = (end_tick - start_tick) * maj_sep
283 line_height = (end_item - start_item) * item_size
284
285 origin = (x, y)
286
287 # draw horizontal lines first
288 x = origin[0] + GraphFormat.X_AXIS_MEASURE_OFS + start_tick * maj_sep
289 y = origin[1] - height + start_item * item_size
290 for i in range(start_item, end_item + 1):
291 self.draw_line((x, y), (x + line_width, y), GraphFormat.GRID_COLOR, GraphFormat.GRID_THICKNESS)
292 y += item_size
293
294 x = origin[0] + GraphFormat.X_AXIS_MEASURE_OFS + start_tick * maj_sep
295 y = origin[1] - height + start_item * item_size
296
297 if show_min:
298 for i in range(0, (end_tick - start_tick) * min_per_maj + 1):
299 self.draw_line((x, y), (x, y + line_height), GraphFormat.GRID_COLOR, GraphFormat.GRID_THICKNESS)
300 x += maj_sep * 1.0 / min_per_maj
301 else:
302 for i in range(start_tick, end_tick + 1):
303 self.draw_line((x, y), (x, y + line_height), GraphFormat.GRID_COLOR, GraphFormat.GRID_THICKNESS)
304 x += maj_sep
305
306 def draw_bar(self, x, y, width, height, n, selected):
307 """Draws a bar with a certain set of dimensions, using pattern ``n'' from the
308 bar pattern list."""
309
310 color, thickness = {False : (GraphFormat.BORDER_COLOR, GraphFormat.BORDER_THICKNESS),
311 True : (GraphFormat.HIGHLIGHT_COLOR, GraphFormat.BORDER_THICKNESS * 2.0)}[selected]
312
313 # use a pattern to be pretty
314 self.get_bar_pattern(n).render_on_canvas(self, x, y, width, height, True)
315 self.draw_rect(x, y, width, height, color, thickness)
316
317 def add_sel_bar(self, x, y, width, height, event):
318 self.add_sel_region(SelectableRegion(x, y, width, height, event))
319
320 def draw_mini_bar(self, x, y, width, height, n, selected):
321 """Like the above, except it draws a miniature version. This is usually used for
322 secondary purposes (i.e. to show jobs that _should_ have been running at a certain time).
323
324 Of course we don't enforce the fact that this is mini, since the user can pass in width
325 and height (but the mini bars do look slightly different: namely the borders are a different
326 color)"""
327
328 color, thickness = {False : (GraphFormat.LITE_BORDER_COLOR, GraphFormat.BORDER_THICKNESS),
329 True : (GraphFormat.HIGHLIGHT_COLOR, GraphFormat.BORDER_THICKNESS * 1.5)}[selected]
330
331 self.get_bar_pattern(n).render_on_canvas(self, x, y, width, height, True)
332 self.draw_rect(x, y, width, height, color, thickness)
333
334 def add_sel_mini_bar(self, x, y, width, height, event):
335 self.add_sel_region(SelectableRegion(x, y, width, height, event))
336
337 def draw_completion_marker(self, x, y, height, selected):
338 """Draws the symbol that represents a job completion, using a certain height."""
339
340 color = {False : GraphFormat.BORDER_COLOR, True : GraphFormat.HIGHLIGHT_COLOR}[selected]
341 self.draw_line((x - height * GraphFormat.TEE_FACTOR / 2.0, y),
342 (x + height * GraphFormat.TEE_FACTOR / 2.0, y),
343 color, GraphFormat.BORDER_THICKNESS)
344 self.draw_line((x, y), (x, y + height), color, GraphFormat.BORDER_THICKNESS)
345
346 def add_sel_completion_marker(self, x, y, height, event):
347 self.add_sel_region(SelectableRegion(x - height * GraphFormat.TEE_FACTOR / 2.0, y,
348 height * GraphFormat.TEE_FACTOR, height, event))
349
350 def draw_release_arrow_big(self, x, y, height, selected):
351 """Draws a release arrow of a certain height: (x, y) should give the top
352 (northernmost point) of the arrow. The height includes the arrowhead."""
353 big_arrowhead_height = GraphFormat.BIG_ARROWHEAD_FACTOR * height
354
355 color = {False : GraphFormat.BORDER_COLOR, True : GraphFormat.HIGHLIGHT_COLOR}[selected]
356 colors = [(1.0, 1.0, 1.0), color]
357 draw_funcs = [self.__class__.fill_polyline, self.__class__.draw_polyline]
358 for i in range(0, 2):
359 color = colors[i]
360 draw_func = draw_funcs[i]
361
362 draw_func(self, [(x, y), (x - big_arrowhead_height / Canvas.SQRT3, y + big_arrowhead_height), \
363 (x + big_arrowhead_height / Canvas.SQRT3, y + big_arrowhead_height), (x, y)], \
364 color, GraphFormat.BORDER_THICKNESS)
365
366 self.draw_line((x, y + big_arrowhead_height), (x, y + height), color, GraphFormat.BORDER_THICKNESS)
367
368 def add_sel_release_arrow_big(self, x, y, height, event):
369 self.add_sel_arrow_big(x, y, height, event)
370
371 def draw_deadline_arrow_big(self, x, y, height, selected):
372 """Draws a release arrow: x, y should give the top (northernmost
373 point) of the arrow. The height includes the arrowhead."""
374 big_arrowhead_height = GraphFormat.BIG_ARROWHEAD_FACTOR * height
375
376 color = {False : GraphFormat.BORDER_COLOR, True : GraphFormat.HIGHLIGHT_COLOR}[selected]
377 colors = [(1.0, 1.0, 1.0), color]
378 draw_funcs = [self.__class__.fill_polyline, self.__class__.draw_polyline]
379 for i in range(0, 2):
380 color = colors[i]
381 draw_func = draw_funcs[i]
382
383 draw_func(self, [(x, y + height), (x - big_arrowhead_height / Canvas.SQRT3, \
384 y + height - big_arrowhead_height), \
385 (x + big_arrowhead_height / Canvas.SQRT3, \
386 y + height - big_arrowhead_height), \
387 (x, y + height)], color, GraphFormat.BORDER_THICKNESS)
388
389 self.draw_line((x, y), (x, y + height - big_arrowhead_height),
390 color, GraphFormat.BORDER_THICKNESS)
391
392 def add_sel_deadline_arrow_big(self, x, y, height, event):
393 self.add_sel_arrow_big(x, y, height, event)
394
395 def add_sel_arrow_big(self, x, y, height, event):
396 big_arrowhead_height = GraphFormat.BIG_ARROWHEAD_FACTOR * height
397
398 self.add_sel_region(SelectableRegion(x - big_arrowhead_height / Canvas.SQRT3,
399 y, 2.0 * big_arrowhead_height / Canvas.SQRT3, height, event))
400
401 def draw_release_arrow_small(self, x, y, height, selected):
402 """Draws a small release arrow (most likely coming off the x-axis, although
403 this method doesn't enforce this): x, y should give the top of the arrow"""
404 small_arrowhead_height = GraphFormat.SMALL_ARROWHEAD_FACTOR * height
405
406 color = {False : GraphFormat.BORDER_COLOR, True : GraphFormat.HIGHLIGHT_COLOR}[selected]
407
408 self.draw_line((x, y), (x - small_arrowhead_height, y + small_arrowhead_height), \
409 color, GraphFormat.BORDER_THICKNESS)
410 self.draw_line((x, y), (x + small_arrowhead_height, y + small_arrowhead_height), \
411 color, GraphFormat.BORDER_THICKNESS)
412 self.draw_line((x, y), (x, y + height), color, GraphFormat.BORDER_THICKNESS)
413
414 def add_sel_release_arrow_small(self, x, y, height, event):
415 self.add_sel_arrow_small(x, y, height, event)
416
417 def draw_deadline_arrow_small(self, x, y, height, selected):
418 """Draws a small deadline arrow (most likely coming off the x-axis, although
419 this method doesn't enforce this): x, y should give the top of the arrow"""
420 small_arrowhead_height = GraphFormat.SMALL_ARROWHEAD_FACTOR * height
421
422 color = {False : GraphFormat.BORDER_COLOR, True : GraphFormat.HIGHLIGHT_COLOR}[selected]
423
424 self.draw_line((x, y), (x, y + height), color, GraphFormat.BORDER_THICKNESS)
425 self.draw_line((x - small_arrowhead_height, y + height - small_arrowhead_height), \
426 (x, y + height), color, GraphFormat.BORDER_THICKNESS)
427 self.draw_line((x + small_arrowhead_height, y + height - small_arrowhead_height), \
428 (x, y + height), color, GraphFormat.BORDER_THICKNESS)
429
430 def add_sel_deadline_arrow_small(self, x, y, height, event):
431 self.add_sel_arrow_small(x, y, height, event)
432
433 def add_sel_arrow_small(self, x, y, height, event):
434 small_arrowhead_height = GraphFormat.SMALL_ARROWHEAD_FACTOR * height
435
436 self.add_sel_region(SelectableRegion(x - small_arrowhead_height, y,
437 small_arrowhead_height * 2.0, height, event))
438
439 def draw_suspend_triangle(self, x, y, height, selected):
440 """Draws the triangle that marks a suspension. (x, y) gives the topmost (northernmost) point
441 of the symbol."""
442
443 color = {False : GraphFormat.BORDER_COLOR, True : GraphFormat.HIGHLIGHT_COLOR}[selected]
444 colors = [(0.0, 0.0, 0.0), color]
445
446 draw_funcs = [self.__class__.fill_polyline, self.__class__.draw_polyline]
447 for i in range(0, 2):
448 color = colors[i]
449 draw_func = draw_funcs[i]
450 draw_func(self, [(x, y), (x + height / 2.0, y + height / 2.0), (x, y + height), (x, y)], \
451 color, GraphFormat.BORDER_THICKNESS)
452
453 def add_sel_suspend_triangle(self, x, y, height, event):
454 self.add_sel_region(SelectableRegion(x, y, height / 2.0, height, event))
455
456 def draw_resume_triangle(self, x, y, height, selected):
457 """Draws the triangle that marks a resumption. (x, y) gives the topmost (northernmost) point
458 of the symbol."""
459
460 color = {False : GraphFormat.BORDER_COLOR, True : GraphFormat.HIGHLIGHT_COLOR}[selected]
461 colors = [(1.0, 1.0, 1.0), color]
462
463 draw_funcs = [self.__class__.fill_polyline, self.__class__.draw_polyline]
464 for i in range(0, 2):
465 color = colors[i]
466 draw_func = draw_funcs[i]
467 draw_func(self, [(x, y), (x - height / 2.0, y + height / 2.0), (x, y + height), (x, y)], \
468 color, GraphFormat.BORDER_THICKNESS)
469
470 def add_sel_resume_triangle(self, x, y, height, event):
471 self.add_sel_region(SelectableRegion(x - height / 2.0, y, height / 2.0, height, event))
472
473 def clear_selectable_regions(self):
474 self.selectable_regions = {}
475
476 def add_sel_region(self, region):
477 self.selectable_regions[region.get_event()] = region
478
479 def get_selected_regions(self, real_x, real_y):
480 x = real_x + self.surface.virt_x
481 y = real_y + self.surface.virt_y
482
483 selected = {}
484 for event in self.selectable_regions:
485 region = self.selectable_regions[event]
486 if region.contains(x, y):
487 selected[event] = region
488
489 return selected
490
491 def whiteout(self):
492 """Overwrites the surface completely white, but technically doesn't delete anything"""
493 self.fill_rect(self.surface.virt_x, self.surface.virt_y, self.surface.width,
494 self.surface.height, (1.0, 1.0, 1.0))
495
496 def get_item_color(self, n):
497 """Gets the nth color in the item color list, which are the colors used to draw the items
498 on the y-axis. Note that there are conceptually infinitely
499 many patterns because the patterns repeat -- that is, we just mod out by the size of the pattern
500 list when indexing."""
501 return self.item_clist[n % len(self.item_clist)]
502
503 def get_bar_pattern(self, n):
504 """Gets the nth pattern in the bar pattern list, which is a list of surfaces that are used to
505 fill in the bars. Note that there are conceptually infinitely
506 many patterns because the patterns repeat -- that is, we just mod out by the size of the pattern
507 list when indexing."""
508 return self.bar_plist[n % len(self.bar_plist)]
509
510class CairoCanvas(Canvas):
511 """This is a basic class that stores and draws on a Cairo surface,
512 using various primitives related to drawing a real-time graph (up-arrows,
513 down-arrows, bars, ...).
514
515 This is the lowest-level non-abstract representation
516 (aside perhaps from the Cairo surface itself) of a real-time graph.
517 It allows the user to draw primitives at certain locations, but for
518 the most part does not know anything about real-time scheduling,
519 just how to draw the basic parts that make up a schedule graph.
520 For that, see Graph or its descendants."""
521
522 #def __init__(self, fname, width, height, item_clist, bar_plist, surface):
523 # """Creates a new Canvas of dimensions (width, height). The
524 # parameters ``item_plist'' and ``bar_plist'' each specify a list
525 # of patterns to choose from when drawing the items on the y-axis
526 # or filling in bars, respectively."""
527
528 # super(CairoCanvas, self).__init__(fname, width, height, item_clist, bar_plist, surface)
529
530 #def clear(self):
531 # self.surface = self.SurfaceType(self.width, self.height, self.fname)
532 # self.whiteout()
533
534 def get_surface(self):
535 """Gets the Surface that we are drawing on in its current state."""
536 return self.surface
537
538 def _rect_common(self, x, y, width, height, color, thickness):
539 x, y, width, height = self.scaled(x, y, width, height)
540 x, y = self.surface.get_real_coor(x, y)
541 self.surface.ctx.rectangle(snap(x), snap(y), width, height)
542 self.surface.ctx.set_line_width(thickness * self.scale)
543 self.surface.ctx.set_source_rgb(color[0], color[1], color[2])
544
545 def draw_rect(self, x, y, width, height, color, thickness):
546 self._rect_common(x, y, width, height, color, thickness)
547 self.surface.ctx.stroke()
548
549 def fill_rect(self, x, y, width, height, color):
550 self._rect_common(x, y, width, height, color, 1)
551 self.surface.ctx.fill()
552
553 def fill_rect_fade(self, x, y, width, height, lcolor, rcolor):
554 """Draws a rectangle somewhere, filled in with the fade."""
555 x, y, width, height = self.scaled(x, y, width, height)
556 x, y = self.surface.get_real_coor(x, y)
557
558 linear = cairo.LinearGradient(snap(x), snap(y), \
559 snap(x + width), snap(y + height))
560 linear.add_color_stop_rgb(0.0, lcolor[0], lcolor[1], lcolor[2])
561 linear.add_color_stop_rgb(1.0, rcolor[0], rcolor[1], rcolor[2])
562 self.surface.ctx.set_source(linear)
563 self.surface.ctx.rectangle(snap(x), snap(y), width, height)
564 self.surface.ctx.fill()
565
566 def draw_line(self, p0, p1, color, thickness):
567 """Draws a line from p0 to p1 with a certain color and thickness."""
568 p0 = self.scaled(p0[0], p0[1])
569 p0 = self.surface.get_real_coor(p0[0], p0[1])
570 p1 = self.scaled(p1[0], p1[1])
571 p1 = self.surface.get_real_coor(p1[0], p1[1])
572 self.surface.ctx.move_to(p0[0], p0[1])
573 self.surface.ctx.line_to(p1[0], p1[1])
574 self.surface.ctx.set_source_rgb(color[0], color[1], color[2])
575 self.surface.ctx.set_line_width(thickness * self.scale)
576 self.surface.ctx.stroke()
577
578 def _polyline_common(self, coor_list, color, thickness):
579 real_coor_list = [self.surface.get_real_coor(coor[0], coor[1]) for coor in coor_list]
580 self.surface.ctx.move_to(real_coor_list[0][0], real_coor_list[0][1])
581 for coor in real_coor_list[1:]:
582 self.surface.ctx.line_to(coor[0], coor[1])
583
584 self.surface.ctx.set_line_width(thickness)
585 self.surface.ctx.set_source_rgb(color[0], color[1], color[2])
586
587 def draw_polyline(self, coor_list, color, thickness):
588 self._polyline_common(coor_list, color, thickness)
589 self.surface.ctx.stroke()
590
591 def fill_polyline(self, coor_list, color, thickness):
592 self._polyline_common(coor_list, color, thickness)
593 self.surface.ctx.fill()
594
595 def _draw_label_common(self, text, x, y, fopts, x_bearing_factor, \
596 f_descent_factor, width_factor, f_height_factor):
597 """Helper function for drawing a label with some alignment. Instead of taking in an alignment,
598 it takes in the scale factor for the font extent parameters, which give the raw data of how much to adjust
599 the x and y parameters. Only should be used internally."""
600 x, y = self.scaled(x, y)
601 x, y = self.surface.get_real_coor(x, y)
602
603 self.surface.ctx.set_source_rgb(0.0, 0.0, 0.0)
604
605 self.surface.ctx.select_font_face(fopts.name, cairo.FONT_SLANT_NORMAL, cairo.FONT_WEIGHT_BOLD)
606 self.surface.ctx.set_font_size(fopts.size)
607
608 fe = self.surface.ctx.font_extents()
609 f_ascent, f_descent, f_height = fe[:3]
610
611 te = self.surface.ctx.text_extents(text)
612 x_bearing, y_bearing, width, height = te[:4]
613
614 actual_x = x - x_bearing * x_bearing_factor - width * width_factor
615 actual_y = y - f_descent * f_descent_factor + f_height * f_height_factor
616
617 self.surface.ctx.set_source_rgb(fopts.color[0], fopts.color[1], fopts.color[2])
618
619 self.surface.ctx.move_to(snap(actual_x), snap(actual_y))
620
621 self.surface.ctx.show_text(text)
622
623 def draw_label(self, text, x, y, fopts=GraphFormat.DEF_FOPTS_LABEL, halign=AlignMode.LEFT, valign=AlignMode.BOTTOM):
624 """Draws a label with the given parameters, with the given horizontal and vertical justification. One can override
625 the color from ``fopts'' by passing something in to ``pattern'', which overrides the color with an arbitrary
626 pattern."""
627 x_bearing_factor, f_descent_factor, width_factor, f_height_factor = 0.0, 0.0, 0.0, 0.0
628 halign_factors = {AlignMode.LEFT : (0.0, 0.0), AlignMode.CENTER : (1.0, 0.5), AlignMode.RIGHT : (1.0, 1.0)}
629 if halign not in halign_factors:
630 raise ValueError('Invalid alignment value')
631 x_bearing_factor, width_factor = halign_factors[halign]
632
633 valign_factors = {AlignMode.BOTTOM : (0.0, 0.0), AlignMode.CENTER : (1.0, 0.5), AlignMode.TOP : (1.0, 1.0)}
634 if valign not in valign_factors:
635 raise ValueError('Invalid alignment value')
636 f_descent_factor, f_height_factor = valign_factors[valign]
637
638 self._draw_label_common(text, x, y, fopts, x_bearing_factor, \
639 f_descent_factor, width_factor, f_height_factor)
640
641 def draw_label_with_sscripts(self, text, supscript, subscript, x, y, \
642 textfopts=GraphFormat.DEF_FOPTS_LABEL, sscriptfopts=GraphFormat.DEF_FOPTS_LABEL_SSCRIPT, \
643 halign=AlignMode.LEFT, valign=AlignMode.BOTTOM):
644 """Draws a label, but also optionally allows a superscript and subscript to be rendered."""
645 self.draw_label(text, x, y, textfopts, halign, valign)
646
647 self.surface.ctx.set_source_rgb(0.0, 0.0, 0.0)
648 self.surface.ctx.select_font_face(textfopts.name, cairo.FONT_SLANT_NORMAL, cairo.FONT_WEIGHT_BOLD)
649 self.surface.ctx.set_font_size(textfopts.size)
650 te = self.surface.ctx.text_extents(text)
651 fe = self.surface.ctx.font_extents()
652 if supscript is not None:
653 f_height = fe[2]
654 x_advance = te[4]
655 xtmp = x + x_advance
656 ytmp = y
657 ytmp = y - f_height / 4.0
658 self.draw_label(supscript, xtmp, ytmp, sscriptfopts, halign, valign)
659 if subscript is not None:
660 f_height = fe[2]
661 x_advance = te[4]
662 xtmp = x + x_advance
663 ytmp = y
664 ytmp = y + f_height / 4.0
665 self.draw_label(subscript, xtmp, ytmp, sscriptfopts, halign, valign)
666
667# represents a selectable region of the graph
668class SelectableRegion(object):
669 def __init__(self, x, y, width, height, event):
670 self.x = x
671 self.y = y
672 self.width = width
673 self.height = height
674 self.event = event
675
676 def get_dimensions(self):
677 return (self.x, self.y, self.width, self.height)
678
679 def get_event(self):
680 return self.event
681
682 def contains(self, x, y):
683 return self.x <= x <= self.x + self.width and self.y <= y <= self.y + self.height
684
685class Graph(object):
686 DEF_BAR_PLIST = [Pattern([(0.0, 0.9, 0.9)]), Pattern([(0.9, 0.3, 0.0)]), Pattern([(0.9, 0.7, 0.0)]),
687 Pattern([(0.0, 0.0, 0.8)]), Pattern([(0.0, 0.2, 0.9)]), Pattern([(0.0, 0.6, 0.6)]),
688 Pattern([(0.75, 0.75, 0.75)])]
689 DEF_ITEM_CLIST = [(0.3, 0.0, 0.0), (0.0, 0.3, 0.0), (0.0, 0.0, 0.3), (0.3, 0.3, 0.0), (0.0, 0.3, 0.3),
690 (0.3, 0.0, 0.3)]
691
692 def __init__(self, CanvasType, surface, start_time, end_time, y_item_list, attrs=GraphFormat(),
693 item_clist=DEF_ITEM_CLIST, bar_plist=DEF_BAR_PLIST):
694 if start_time > end_time:
695 raise ValueError("Litmus is not a time machine")
696
697 self.attrs = attrs
698 self.start_time = start_time
699 self.end_time = end_time
700 self.y_item_list = y_item_list
701 self.num_maj = int(math.ceil((self.end_time - self.start_time) * 1.0 / self.attrs.time_per_maj)) + 1
702
703 width = self.num_maj * self.attrs.maj_sep + GraphFormat.X_AXIS_MEASURE_OFS + GraphFormat.WIDTH_PAD
704 height = (len(self.y_item_list) + 1) * self.attrs.y_item_size + GraphFormat.HEIGHT_PAD
705
706 # We need to stretch the width in order to fit the y-axis labels. To do this we need
707 # the extents information, but we haven't set up a surface yet, so we just use a
708 # temporary one.
709 extra_width = 0.0
710 dummy_surface = surface.__class__()
711 dummy_surface.renew(10, 10)
712
713 dummy_surface.ctx.select_font_face(self.attrs.item_fopts.name, cairo.FONT_SLANT_NORMAL, cairo.FONT_WEIGHT_BOLD)
714 dummy_surface.ctx.set_font_size(self.attrs.item_fopts.size)
715 for item in self.y_item_list:
716 dummy_surface.ctx.set_source_rgb(0.0, 0.0, 0.0)
717 te = dummy_surface.ctx.text_extents(item)
718 cur_width = te[2]
719 if cur_width > extra_width:
720 extra_width = cur_width
721
722 width += extra_width
723 self.origin = (extra_width + GraphFormat.WIDTH_PAD / 2.0, height - GraphFormat.HEIGHT_PAD / 2.0)
724
725 self.width = width
726 self.height = height
727
728 #if surface.ctx is None:
729 # surface.renew(width, height)
730
731 self.canvas = CanvasType(width, height, item_clist, bar_plist, surface)
732
733 def get_selected_regions(self, real_x, real_y):
734 return self.canvas.get_selected_regions(real_x, real_y)
735
736 def get_width(self):
737 return self.width
738
739 def get_height(self):
740 return self.height
741
742 def get_attrs(self):
743 return self.attrs
744
745 def update_view(self, x, y, width, height, ctx):
746 """Proxy into the surface's pan."""
747 self.canvas.surface.pan(x, y, width, height)
748 self.canvas.surface.change_ctx(ctx)
749
750 def _get_time_xpos(self, time):
751 """get x so that x is at instant ``time'' on the graph"""
752 return self.origin[0] + GraphFormat.X_AXIS_MEASURE_OFS + 1.0 * (time - self.start_time) / self.attrs.time_per_maj * self.attrs.maj_sep
753
754 def _get_item_ypos(self, item_no):
755 """get y so that y is where the top of a bar would be in item #n's area"""
756 return self.origin[1] - self._get_y_axis_height() + self.attrs.y_item_size * (item_no + 0.5 - GraphFormat.BAR_SIZE_FACTOR / 2.0)
757
758 def _get_bar_width(self, start_time, end_time):
759 return 1.0 * (end_time - start_time) / self.attrs.time_per_maj * self.attrs.maj_sep
760
761 def _get_bar_height(self):
762 return self.attrs.y_item_size * GraphFormat.BAR_SIZE_FACTOR
763
764 def _get_mini_bar_height(self):
765 return self.attrs.y_item_size * GraphFormat.MINI_BAR_SIZE_FACTOR
766
767 def _get_mini_bar_ofs(self):
768 return self.attrs.y_item_size * (GraphFormat.MINI_BAR_SIZE_FACTOR + GraphFormat.BAR_MINI_BAR_GAP_FACTOR)
769
770 def _get_y_axis_height(self):
771 return (len(self.y_item_list) + 1) * self.attrs.y_item_size
772
773 def _get_bottom_tick(self, time):
774 return int(math.floor((time - self.start_time) / self.attrs.time_per_maj))
775
776 def _get_top_tick(self, time):
777 return int(math.ceil((time - self.start_time) / self.attrs.time_per_maj))
778
779 def get_surface(self):
780 """Gets the underlying surface."""
781 return self.canvas.get_surface()
782
783 def xcoor_to_time(self, x):
784 #x = self.origin[0] + GraphFormat.X_AXIS_MEASURE_OFS + (time - self.start) / self.attrs.time_per_maj * self.attrs.maj_sep
785 return (x - self.origin[0] - GraphFormat.X_AXIS_MEASURE_OFS) / self.attrs.maj_sep \
786 * self.attrs.time_per_maj + self.start_time
787
788 def ycoor_to_item_no(self, y):
789 return int((y - self.origin[1] + self._get_y_axis_height()) // self.attrs.y_item_size)
790
791 def get_offset_params(self):
792 start_time = self.xcoor_to_time(self.canvas.surface.virt_x)
793 end_time = self.xcoor_to_time(self.canvas.surface.virt_x + self.canvas.surface.width)
794
795 start_item = self.ycoor_to_item_no(self.canvas.surface.virt_y)
796 end_item = 1 + self.ycoor_to_item_no(self.canvas.surface.virt_y + self.canvas.surface.height)
797
798 return (start_time, end_time, start_item, end_item)
799
800 def draw_skeleton(self, start_time, end_time, start_item, end_item):
801 self.draw_grid_at_time(start_time, end_time, start_item, end_item)
802 self.draw_x_axis_with_labels_at_time(start_time, end_time)
803 self.draw_y_axis_with_labels()
804
805 def render_surface(self, sched, list_type):
806 raise NotImplementedError
807
808 def render_all(self, schedule):
809 raise NotImplementedError
810
811 def render_events(self, event_list):
812 for layer in Canvas.LAYERS:
813 prev_events = {}
814 for event in event_list:
815 event.render(self, layer, prev_events)
816
817 def draw_axes(self, x_axis_label, y_axis_label):
818 """Draws and labels the axes according to the parameters that we were initialized
819 with."""
820 self.draw_grid_at_time(self.start_time, self.end_time, 0, len(self.attrs.y_item_list) - 1)
821
822 self.canvas.draw_x_axis(self.origin[0], self.origin[1], self.num_maj, self.attrs.maj_sep, self.attrs.min_per_maj)
823 self.canvas.draw_y_axis(self.origin[0], self.origin[1], self._get_y_axis_height())
824 self.canvas.draw_x_axis_labels(self.origin[0], self.origin[1], 0, self.num_maj - 1,\
825 self.attrs.maj_sep, self.attrs.min_per_maj, self.start_time, \
826 self.attrs.time_per_maj, self.attrs.show_min, self.attrs.majfopts, self.attrs.minfopts)
827 self.canvas.draw_y_axis_labels(self.origin[0], self.origin[1], self._get_y_axis_height(), self.y_item_list, \
828 self.attrs.y_item_size, self.attrs.item_fopts)
829
830 def draw_grid_at_time(self, start_time, end_time, start_item, end_item):
831 """Draws the grid, but only in a certain time and item range."""
832 start_tick = max(0, self._get_bottom_tick(start_time))
833 end_tick = min(self.num_maj - 1, self._get_top_tick(end_time))
834
835 start_item = max(0, start_item)
836 end_item = min(len(self.y_item_list), end_item)
837
838 self.canvas.draw_grid(self.origin[0], self.origin[1], self._get_y_axis_height(),
839 start_tick, end_tick, start_item, end_item, self.attrs.maj_sep, self.attrs.y_item_size, \
840 self.attrs.min_per_maj, True)
841
842 def draw_x_axis_with_labels_at_time(self, start_time, end_time):
843 start_tick = max(0, self._get_bottom_tick(start_time))
844 end_tick = min(self.num_maj - 1, self._get_top_tick(end_time))
845
846 self.canvas.draw_x_axis(self.origin[0], self.origin[1], start_tick, end_tick, \
847 self.attrs.maj_sep, self.attrs.min_per_maj)
848 self.canvas.draw_x_axis_labels(self.origin[0], self.origin[1], start_tick, \
849 end_tick, self.attrs.maj_sep, self.attrs.min_per_maj,
850 self.start_time + start_tick * self.attrs.time_per_maj,
851 self.attrs.time_per_maj, False)
852
853 def draw_y_axis_with_labels(self):
854 self.canvas.draw_y_axis(self.origin[0], self.origin[1], self._get_y_axis_height())
855 self.canvas.draw_y_axis_labels(self.origin[0], self.origin[1], self._get_y_axis_height(), \
856 self.y_item_list, self.attrs.y_item_size)
857
858 def draw_suspend_triangle_at_time(self, time, task_no, cpu_no, selected=False):
859 """Draws a suspension symbol for a certain task at an instant in time."""
860 raise NotImplementedError
861
862 def add_sel_suspend_triangle_at_time(self, time, task_no, cpu_no, event):
863 """Same as above, except instead of drawing adds a selectable region at
864 a certain time."""
865 raise NotImplementedError
866
867 def draw_resume_triangle_at_time(self, time, task_no, cpu_no, selected=False):
868 """Draws a resumption symbol for a certain task at an instant in time."""
869 raise NotImplementedError
870
871 def add_sel_resume_triangle_at_time(self, time, task_no, cpu_no, event):
872 """Same as above, except instead of drawing adds a selectable region at
873 a certain time."""
874 raise NotImplementedError
875
876 def draw_completion_marker_at_time(self, time, task_no, cpu_no, selected=False):
877 """Draws a completion marker for a certain task at an instant in time."""
878 raise NotImplementedError
879
880 def add_sel_completion_marker_at_time(self, time, task_no, cpu_no, event):
881 """Same as above, except instead of drawing adds a selectable region at
882 a certain time."""
883 raise NotImplementedError
884
885 def draw_release_arrow_at_time(self, time, task_no, job_no, selected=False):
886 """Draws a release arrow at a certain time for some task and job"""
887 raise NotImplementedError
888
889 def add_sel_release_arrow_at_time(self, time, task_no, event):
890 """Same as above, except instead of drawing adds a selectable region at
891 a certain time."""
892 raise NotImplementedError
893
894 def draw_deadline_arrow_at_time(self, time, task_no, job_no, selected=False):
895 """Draws a deadline arrow at a certain time for some task and job"""
896 raise NotImplementedError
897
898 def add_sel_deadline_arrow_at_time(self, time, task_no, event):
899 """Same as above, except instead of drawing adds a selectable region at
900 a certain time."""
901 raise NotImplementedError
902
903 def draw_bar_at_time(self, start_time, end_time, task_no, cpu_no, job_no=None):
904 """Draws a bar over a certain time period for some task, optionally labelling it."""
905 raise NotImplementedError
906
907 def add_sel_bar_at_time(self, start_time, end_time, task_no, cpu_no, event):
908 """Same as above, except instead of drawing adds a selectable region at
909 a certain time."""
910 raise NotImplementedError
911
912 def draw_mini_bar_at_time(self, start_time, end_time, task_no, cpu_no, job_no=None):
913 """Draws a mini bar over a certain time period for some task, optionally labelling it."""
914 raise NotImplementedError
915
916 def add_sel_mini_bar_at_time(self, start_time, end_time, task_no, cpu_no, event):
917 """Same as above, except instead of drawing adds a selectable region at
918 a certain time."""
919 raise NotImplementedError
920
921class TaskGraph(Graph):
922 def render_surface(self, sched):
923 self.canvas.whiteout()
924 self.canvas.clear_selectable_regions()
925
926 start_time, end_time, start_item, end_item = self.get_offset_params()
927
928 self.draw_skeleton(start_time, end_time, start_item, end_item)
929
930 for layer in Canvas.LAYERS:
931 prev_events = {}
932 for event in sched.get_time_slot_array().iter_over_period(
933 start_time, end_time, start_item, end_item,
934 schedule.TimeSlotArray.TASK_LIST, schedule.EVENT_LIST):
935 event.render(self, layer, prev_events)
936
937 def draw_suspend_triangle_at_time(self, time, task_no, cpu_no, selected=False):
938 height = self._get_bar_height() * GraphFormat.BLOCK_TRIANGLE_FACTOR
939 x = self._get_time_xpos(time)
940 y = self._get_item_ypos(task_no) + self._get_bar_height() / 2.0 - height / 2.0
941 self.canvas.draw_suspend_triangle(x, y, height, selected)
942
943 def add_sel_suspend_triangle_at_time(self, time, task_no, cpu_no, event):
944 height = self._get_bar_height() * GraphFormat.BLOCK_TRIANGLE_FACTOR
945 x = self._get_time_xpos(time)
946 y = self._get_item_ypos(task_no) + self._get_bar_height() / 2.0 - height / 2.0
947
948 self.canvas.add_sel_suspend_triangle(x, y, height, event)
949
950 def draw_resume_triangle_at_time(self, time, task_no, cpu_no, selected=False):
951 height = self._get_bar_height() * GraphFormat.BLOCK_TRIANGLE_FACTOR
952 x = self._get_time_xpos(time)
953 y = self._get_item_ypos(task_no) + self._get_bar_height() / 2.0 - height / 2.0
954
955 self.canvas.draw_resume_triangle(x, y, height, selected)
956
957 def add_sel_resume_triangle_at_time(self, time, task_no, cpu_no, event):
958 height = self._get_bar_height() * GraphFormat.BLOCK_TRIANGLE_FACTOR
959 x = self._get_time_xpos(time)
960 y = self._get_item_ypos(task_no) + self._get_bar_height() / 2.0 - height / 2.0
961
962 self.canvas.add_sel_resume_triangle(x, y, height, event)
963
964 def draw_completion_marker_at_time(self, time, task_no, cpu_no, selected=False):
965 height = self._get_bar_height() * GraphFormat.COMPLETION_MARKER_FACTOR
966 x = self._get_time_xpos(time)
967 y = self._get_item_ypos(task_no) + self._get_bar_height() - height
968
969 self.canvas.draw_completion_marker(x, y, height, selected)
970
971 def add_sel_completion_marker_at_time(self, time, task_no, cpu_no, event):
972 height = self._get_bar_height() * GraphFormat.COMPLETION_MARKER_FACTOR
973
974 x = self._get_time_xpos(time)
975 y = self._get_item_ypos(task_no) + self._get_bar_height() - height
976
977 self.canvas.add_sel_completion_marker(x, y, height, event)
978
979 def draw_release_arrow_at_time(self, time, task_no, job_no=None, selected=False):
980 height = self._get_bar_height() * GraphFormat.BIG_ARROW_FACTOR
981
982 x = self._get_time_xpos(time)
983 y = self._get_item_ypos(task_no) + self._get_bar_height() - height
984
985 self.canvas.draw_release_arrow_big(x, y, height, selected)
986
987 def add_sel_release_arrow_at_time(self, time, task_no, event):
988 height = self._get_bar_height() * GraphFormat.BIG_ARROW_FACTOR
989
990 x = self._get_time_xpos(time)
991 y = self._get_item_ypos(task_no) + self._get_bar_height() - height
992
993 self.canvas.add_sel_release_arrow_big(x, y, height, event)
994
995 def draw_deadline_arrow_at_time(self, time, task_no, job_no=None, selected=False):
996 height = self._get_bar_height() * GraphFormat.BIG_ARROW_FACTOR
997
998 x = self._get_time_xpos(time)
999 y = self._get_item_ypos(task_no)
1000
1001 self.canvas.draw_deadline_arrow_big(x, y, height, selected)
1002
1003 def add_sel_deadline_arrow_at_time(self, time, task_no, event):
1004 height = self._get_bar_height() * GraphFormat.BIG_ARROW_FACTOR
1005
1006 x = self._get_time_xpos(time)
1007 y = self._get_item_ypos(task_no)
1008
1009 self.canvas.add_sel_deadline_arrow_big(x, y, height, event)
1010
1011 def draw_bar_at_time(self, start_time, end_time, task_no, cpu_no, job_no=None, selected=False):
1012 if start_time > end_time:
1013 raise ValueError("Litmus is not a time machine")
1014
1015 x = self._get_time_xpos(start_time)
1016 y = self._get_item_ypos(task_no)
1017 width = self._get_bar_width(start_time, end_time)
1018 height = self._get_bar_height()
1019
1020 self.canvas.draw_bar(x, y, width, height, cpu_no, selected)
1021
1022 # if a job number is specified, we want to draw a superscript and subscript for the task and job number, respectively
1023 if job_no is not None:
1024 x += GraphFormat.BAR_LABEL_OFS
1025 y += self.attrs.y_item_size * GraphFormat.BAR_SIZE_FACTOR / 2.0
1026 self.canvas.draw_label_with_sscripts('T', str(task_no), str(job_no), x, y, \
1027 GraphFormat.DEF_FOPTS_BAR, GraphFormat.DEF_FOPTS_BAR_SSCRIPT, AlignMode.LEFT, AlignMode.CENTER)
1028
1029 def add_sel_bar_at_time(self, start_time, end_time, task_no, cpu_no, event):
1030 if start_time > end_time:
1031 raise ValueError("Litmus is not a time machine")
1032
1033 x = self._get_time_xpos(start_time)
1034 y = self._get_item_ypos(task_no)
1035 width = self._get_bar_width(start_time, end_time)
1036 height = self._get_bar_height()
1037
1038 self.canvas.add_sel_bar(x, y, width, height, event)
1039
1040 def draw_mini_bar_at_time(self, start_time, end_time, task_no, cpu_no, job_no=None, selected=False):
1041 if start_time > end_time:
1042 raise ValueError("Litmus is not a time machine")
1043
1044 x = self._get_time_xpos(start_time)
1045 y = self._get_item_ypos(task_no) - self._get_mini_bar_ofs()
1046 width = self._get_bar_width(start_time, end_time)
1047 height = self._get_mini_bar_height()
1048
1049 self.canvas.draw_mini_bar(x, y, width, height, cpu_no, selected)
1050
1051 if job_no is not None:
1052 x += GraphFormat.MINI_BAR_LABEL_OFS
1053 y += self.attrs.y_item_size * GraphFormat.MINI_BAR_SIZE_FACTOR / 2.0
1054 self.canvas.draw_label_with_sscripts('T', str(task_no), str(job_no), x, y, \
1055 GraphFormat.DEF_FOPTS_MINI_BAR, GraphFormat.DEF_FOPTS_MINI_BAR_SSCRIPT, AlignMode.LEFT, AlignMode.CENTER)
1056
1057 def add_sel_mini_bar_at_time(self, start_time, end_time, task_no, cpu_no, event):
1058 x = self._get_time_xpos(start_time)
1059 y = self._get_item_ypos(task_no) - self._get_mini_bar_ofs()
1060 width = self._get_bar_width(start_time, end_time)
1061 height = self._get_mini_bar_height()
1062
1063 self.canvas.add_sel_mini_bar(x, y, width, height, event)
1064
1065class CpuGraph(Graph):
1066 def render_surface(self, sched):
1067 self.canvas.whiteout()
1068 self.canvas.clear_selectable_regions()
1069
1070 start_time, end_time, start_item, end_item = self.get_offset_params()
1071
1072 self.draw_skeleton(start_time, end_time, start_item, end_item)
1073
1074 event_list = dict(schedule.EVENT_LIST)
1075
1076 del event_list[schedule.ReleaseEvent]
1077 del event_list[schedule.DeadlineEvent]
1078
1079 for layer in Canvas.LAYERS:
1080 prev_events = {}
1081 for event in sched.get_time_slot_array().iter_over_period(
1082 start_time, end_time, start_item, end_item,
1083 schedule.TimeSlotArray.CPU_LIST, schedule.EVENT_LIST):
1084 event.render(self, layer, prev_events)
1085
1086 if end_item >= len(self.y_item_list):
1087 # we are far down enough that we should render the releases and deadlines
1088 for layer in Canvas.LAYERS:
1089 prev_events = {}
1090 for event in sched.get_time_slot_array().iter_over_period(
1091 start_time, end_time, start_item, end_item,
1092 schedule.TimeSlotArray.CPU_LIST,
1093 (schedule.ReleaseEvent, schedule.DeadlineEvent)):
1094 event.render(self, layer, prev_events)
1095
1096 def render(self, schedule, start_time=None, end_time=None):
1097 if end_time < start_time:
1098 raise ValueError('start must be less than end')
1099
1100 if start_time is None:
1101 start_time = self.start
1102 if end_time is None:
1103 end_time = self.end
1104 start_slot = self.get_time_slot(start_time)
1105 end_slot = min(len(self.time_slots), self.get_time_slot(end_time) + 1)
1106
1107 for layer in Canvas.LAYERS:
1108 prev_events = {}
1109 for i in range(start_slot, end_slot):
1110 for event in self.time_slots[i]:
1111 event.render(graph, layer, prev_events)
1112
1113 def draw_suspend_triangle_at_time(self, time, task_no, cpu_no, selected=False):
1114 height = self._get_bar_height() * GraphFormat.BLOCK_TRIANGLE_FACTOR
1115 x = self._get_time_xpos(time)
1116 y = self._get_item_ypos(cpu_no) + self._get_bar_height() / 2.0 - height / 2.0
1117 self.canvas.draw_suspend_triangle(x, y, height, selected)
1118
1119 def add_sel_suspend_triangle_at_time(self, time, task_no, cpu_no, event):
1120 height = self._get_bar_height() * GraphFormat.BLOCK_TRIANGLE_FACTOR
1121 x = self._get_time_xpos(time)
1122 y = self._get_item_ypos(cpu_no) + self._get_bar_height() / 2.0 - height / 2.0
1123
1124 self.canvas.add_sel_suspend_triangle(x, y, height, event)
1125
1126 def draw_resume_triangle_at_time(self, time, task_no, cpu_no, selected=False):
1127 height = self._get_bar_height() * GraphFormat.BLOCK_TRIANGLE_FACTOR
1128 x = self._get_time_xpos(time)
1129 y = self._get_item_ypos(cpu_no) + self._get_bar_height() / 2.0 - height / 2.0
1130
1131 self.canvas.draw_resume_triangle(x, y, height, selected)
1132
1133 def add_sel_resume_triangle_at_time(self, time, task_no, cpu_no, event):
1134 height = self._get_bar_height() * GraphFormat.BLOCK_TRIANGLE_FACTOR
1135 x = self._get_time_xpos(time)
1136 y = self._get_item_ypos(cpu_no) + self._get_bar_height() / 2.0 - height / 2.0
1137
1138 self.canvas.add_sel_suspend_triangle(x, y, height, event)
1139
1140 def draw_completion_marker_at_time(self, time, task_no, cpu_no, selected=False):
1141 height = self._get_bar_height() * GraphFormat.COMPLETION_MARKER_FACTOR
1142 x = self._get_time_xpos(time)
1143 y = self._get_item_ypos(cpu_no) + self._get_bar_height() - height
1144
1145 self.canvas.draw_completion_marker(x, y, height, selected)
1146
1147 def add_sel_completion_marker_at_time(self, time, task_no, cpu_no, event):
1148 height = self._get_bar_height() * GraphFormat.BLOCK_TRIANGLE_FACTOR
1149 x = self._get_time_xpos(time)
1150 y = self._get_item_ypos(cpu_no) + self._get_bar_height() / 2.0 - height / 2.0
1151
1152 self.canvas.add_sel_completion_marker(x, y, height, event)
1153
1154 def draw_release_arrow_at_time(self, time, task_no, job_no=None, selected=False):
1155 if job_no is None and task_no is not None:
1156 raise ValueError("Must specify a job number along with the task number")
1157
1158 height = self._get_bar_height() * GraphFormat.SMALL_ARROW_FACTOR
1159
1160 x = self._get_time_xpos(time)
1161 y = self.origin[1] - height
1162
1163 self.canvas.draw_release_arrow_small(x, y, height, selected)
1164
1165 if task_no is not None:
1166 y -= GraphFormat.ARROW_LABEL_OFS
1167 self.canvas.draw_label_with_sscripts('T', str(task_no), str(job_no), x, y, \
1168 GraphFormat.DEF_FOPTS_ARROW, GraphFormat.DEF_FOPTS_ARROW_SSCRIPT, \
1169 AlignMode.CENTER, AlignMode.BOTTOM)
1170
1171 def add_sel_release_arrow_at_time(self, time, task_no, event):
1172 height = self._get_bar_height() * GraphFormat.BIG_ARROW_FACTOR
1173
1174 x = self._get_time_xpos(time)
1175 y = self.origin[1] - height
1176
1177 self.canvas.add_sel_release_arrow_small(x, y, height, event)
1178
1179 def draw_deadline_arrow_at_time(self, time, task_no, job_no=None, selected=False):
1180 if job_no is None and task_no is not None:
1181 raise ValueError("Must specify a job number along with the task number")
1182
1183 height = self._get_bar_height() * GraphFormat.SMALL_ARROW_FACTOR
1184
1185 x = self._get_time_xpos(time)
1186 y = self.origin[1] - height
1187
1188 self.canvas.draw_deadline_arrow_small(x, y, height, selected)
1189
1190 if task_no is not None:
1191 y -= GraphFormat.ARROW_LABEL_OFS
1192 self.canvas.draw_label_with_sscripts('T', str(task_no), str(job_no), x, y, \
1193 GraphFormat.DEF_FOPTS_ARROW, GraphFormat.DEF_FOPTS_ARROW_SSCRIPT, \
1194 AlignMode.CENTER, AlignMode.BOTTOM)
1195
1196 def add_sel_deadline_arrow_at_time(self, time, task_no, event):
1197 height = self._get_bar_height() * GraphFormat.BIG_ARROW_FACTOR
1198
1199 x = self._get_time_xpos(time)
1200 y = self.origin[1] - height
1201
1202 self.canvas.add_sel_deadline_arrow_small(x, y, height, event)
1203
1204 def draw_bar_at_time(self, start_time, end_time, task_no, cpu_no, job_no=None, selected=False):
1205 if start_time > end_time:
1206 raise ValueError("Litmus is not a time machine")
1207
1208 x = self._get_time_xpos(start_time)
1209 y = self._get_item_ypos(cpu_no)
1210 width = self._get_bar_width(start_time, end_time)
1211 height = self._get_bar_height()
1212
1213 self.canvas.draw_bar(x, y, width, height, task_no, selected)
1214
1215 # if a job number is specified, we want to draw a superscript and subscript for the task and job number, respectively
1216 if job_no is not None:
1217 x += GraphFormat.BAR_LABEL_OFS
1218 y += self.attrs.y_item_size * GraphFormat.BAR_SIZE_FACTOR / 2.0
1219 self.canvas.draw_label_with_sscripts('T', str(task_no), str(job_no), x, y, \
1220 GraphFormat.DEF_FOPTS_BAR, GraphFormat.DEF_FOPTS_BAR_SSCRIPT, \
1221 AlignMode.LEFT, AlignMode.CENTER)
1222
1223 def add_sel_bar_at_time(self, start_time, end_time, task_no, cpu_no, event):
1224 x = self._get_time_xpos(start_time)
1225 y = self._get_item_ypos(cpu_no)
1226 width = self._get_bar_width(start_time, end_time)
1227 height = self._get_bar_height()
1228
1229 self.canvas.add_sel_region(SelectableRegion(x, y, width, height, event))
1230
1231 def draw_mini_bar_at_time(self, start_time, end_time, task_no, cpu_no, job_no=None, selected=False):
1232 if start_time > end_time:
1233 raise ValueError("Litmus is not a time machine")
1234
1235 x = self._get_time_xpos(start_time)
1236 y = self._get_item_ypos(cpu_no) - self._get_mini_bar_ofs()
1237 width = self._get_bar_width(start_time, end_time)
1238 height = self._get_mini_bar_height()
1239
1240 self.canvas.draw_mini_bar(x, y, width, height, cpu_no, selected)
1241
1242 if job_no is not None:
1243 x += GraphFormat.MINI_BAR_LABEL_OFS
1244 y += self.attrs.y_item_size * GraphFormat.MINI_BAR_SIZE_FACTOR / 2.0
1245 self.canvas.draw_label_with_sscripts('T', str(task_no), str(job_no), x, y, \
1246 GraphFormat.DEF_FOPTS_MINI_BAR, GraphFormat.DEF_FOPTS_MINI_BAR_SSCRIPT, AlignMode.LEFT, AlignMode.CENTER)
1247
1248 def add_sel_mini_bar_at_time(self, start_time, end_time, task_no, cpu_no, event):
1249 x = self._get_time_xpos(start_time)
1250 y = self._get_item_ypos(cpu_no) - self._get_mini_bar_ofs()
1251 width = self._get_bar_width(start_time, end_time)
1252 height = self._get_mini_bar_height()
1253
1254 self.canvas.add_sel_mini_bar(x, y, width, height, cpu_no, selected)
diff --git a/viz/format.py b/viz/format.py
new file mode 100644
index 0000000..fed39f0
--- /dev/null
+++ b/viz/format.py
@@ -0,0 +1,92 @@
1"""Various formatting parameters intended to be accessible by the client."""
2
3class FontOptions(object):
4 """Class for combining assorted simple font options."""
5 def __init__(self, name, size, color):
6 self.name = name
7 self.size = size
8 self.color = color
9
10class AlignMode(object):
11 """Type that specifies the way something (probably text)
12 should be aligned, horizontally and/or vertically."""
13 LEFT = 0
14 CENTER = 1
15 RIGHT = 2
16
17 BOTTOM = 3
18 TOP = 4
19
20class GraphFormat(object):
21 """Container class for a bunch of optional and non-optional attributes to configure the appearance of the graph
22 (because it would be annoying to just have these all as raw arguments to the Graph constructor, and many people
23 probably don't care about most of them anyway)."""
24
25 GRID_COLOR = (0.7, 0.7, 0.7)
26 HIGHLIGHT_COLOR = (0.8, 0.0, 0.0)
27 BORDER_COLOR = (0.0, 0.0, 0.0)
28 LITE_BORDER_COLOR = (0.4, 0.4, 0.4)
29
30 BORDER_THICKNESS = 1
31 GRID_THICKNESS = 1
32 AXIS_THICKNESS = 1
33
34 X_AXIS_MEASURE_OFS = 30
35 X_AXIS_LABEL_GAP = 10
36 Y_AXIS_ITEM_GAP = 10
37 MAJ_TICK_SIZE = 20
38 MIN_TICK_SIZE = 12
39
40 BIG_ARROWHEAD_FACTOR = 0.2
41 SMALL_ARROWHEAD_FACTOR = 0.3
42 TEE_FACTOR = 0.3
43
44 DEF_FOPTS_LABEL = FontOptions("Times", 16, (0.0, 0.0, 0.0))
45 DEF_FOPTS_LABEL_SSCRIPT = FontOptions("Times", 8, (0.0, 0.0, 0.0))
46 DEF_FOPTS_MAJ = FontOptions("Times", 14, (0.1, 0.1, 0.1))
47 DEF_FOPTS_MIN = FontOptions("Times", 9, (0.1, 0.1, 0.1))
48 DEF_FOPTS_ITEM = FontOptions("Times", 20, (0.0, 0.5, 0.1))
49 DEF_FOPTS_BAR = FontOptions("Times", 14, (0.0, 0.0, 0.0))
50 DEF_FOPTS_BAR_SSCRIPT = FontOptions("Times", 7, (0.0, 0.0, 0.0))
51 DEF_FOPTS_MINI_BAR = FontOptions("Times", 11, (0.0, 0.0, 0.0))
52 DEF_FOPTS_MINI_BAR_SSCRIPT = FontOptions("Times", 7, (0.0, 0.0, 0.0))
53 DEF_FOPTS_ARROW = FontOptions("Times", 12, (0.0, 0.0, 0.0))
54 DEF_FOPTS_ARROW_SSCRIPT = FontOptions("Times", 7, (0.0, 0.0, 0.0))
55
56 LEFT_SIDE_PAD = 30
57 WIDTH_PAD = 50
58 HEIGHT_PAD = 150
59 Y_ITEM_PAD_FACTOR = 0.5
60
61 DEF_TIME_PER_MAJ = 10
62 DEF_MAJ_SEP = 200
63 DEF_MIN_PER_MAJ = 5
64 DEF_Y_ITEM_SIZE = 50
65
66 AXIS_LABEL_VERT_OFS = 30
67 BAR_SIZE_FACTOR = 0.4
68 MINI_BAR_SIZE_FACTOR = 0.2
69 BAR_MINI_BAR_GAP_FACTOR = 0.1
70
71 BAR_LABEL_OFS = 2
72 MINI_BAR_LABEL_OFS = 1
73 ARROW_LABEL_OFS = 2
74
75 BLOCK_TRIANGLE_FACTOR = 0.7
76 BIG_ARROW_FACTOR = 1.6
77 SMALL_ARROW_FACTOR = 0.6
78 COMPLETION_MARKER_FACTOR = 1.6
79
80 def __init__(self, time_per_maj=DEF_TIME_PER_MAJ, maj_sep=DEF_MAJ_SEP, \
81 min_per_maj=DEF_MIN_PER_MAJ, y_item_size=DEF_Y_ITEM_SIZE, bar_fopts=DEF_FOPTS_BAR, \
82 item_fopts=DEF_FOPTS_ITEM, show_min=False, majfopts=DEF_FOPTS_MAJ, \
83 minfopts=DEF_FOPTS_MIN):
84 self.time_per_maj = time_per_maj
85 self.maj_sep = maj_sep
86 self.min_per_maj = min_per_maj
87 self.y_item_size = y_item_size
88 self.item_fopts = item_fopts
89 self.bar_fopts = bar_fopts
90 self.show_min = show_min
91 self.majfopts = majfopts
92 self.minfopts = minfopts
diff --git a/viz/renderer.py b/viz/renderer.py
new file mode 100644
index 0000000..d94129c
--- /dev/null
+++ b/viz/renderer.py
@@ -0,0 +1,40 @@
1#!/usr/bin/python
2from schedule import *
3from draw import *
4
5"""The renderer, a glue object which converts a schedule to its representation
6on a graph."""
7
8class Renderer(object):
9 def __init__(self, schedule):
10 self.schedule = schedule
11
12 def prepare_task_graph(self, SurfaceType=ImageSurface, attrs=GraphFormat()):
13 """Outputs the fully-rendered graph (y-axis = tasks) to a Cairo ImageSurface"""
14 item_list = self.get_task_item_list()
15 start, end = self.schedule.get_time_bounds()
16 self.graph = TaskGraph(CairoCanvas, SurfaceType(), start, end, item_list, attrs)
17
18 def prepare_cpu_graph(self, SurfaceType=ImageSurface, attrs=GraphFormat()):
19 item_list = ['CPU %d' % i for i in range(0, self.schedule.get_num_cpus())]
20 start, end = self.schedule.get_time_bounds()
21 self.graph = CpuGraph(CairoCanvas, SurfaceType(), start, end, item_list, attrs)
22
23 def render_graph_full(self):
24 """Does the heavy lifting for rendering a task or CPU graph, by scanning the schedule
25 and drawing it piece by piece"""
26 #graph.draw_axes('Time', '')
27 self.schedule.render(self.graph)
28
29 def write_out(self, fname):
30 self.graph.surface.write_out(fname)
31
32 def get_graph(self):
33 return self.graph
34
35 def get_schedule(self):
36 return self.schedule
37
38 def get_task_item_list(self):
39 return [task.get_name() for task in self.schedule.get_task_list()]
40
diff --git a/viz/schedule.py b/viz/schedule.py
new file mode 100644
index 0000000..f842c8d
--- /dev/null
+++ b/viz/schedule.py
@@ -0,0 +1,571 @@
1#!/usr/bin/env python
2
3"""The data structures to store a schedule (task system), along with all
4the job releases and other events that have occurred for each task. This gives
5a high-level representation of a schedule that can be converted to, say, a
6graphic."""
7
8from draw import *
9import util
10
11class TimeSlotArray(object):
12 """Represents another way of organizing the events. This structure organizes events by
13 the (approximate) time at which they occur. Events that occur at approximately the same
14 time are assigned the same ``slot'', and each slot organizes its events by task number
15 as well as by CPU."""
16
17 TASK_LIST = 0
18 CPU_LIST = 1
19
20 def __init__(self, start, end, time_per_maj, num_tasks, num_cpus):
21 self.start = start
22 self.end = end
23 self.time_per_maj = time_per_maj
24 self.list_sizes = { TimeSlotArray.TASK_LIST : num_tasks, TimeSlotArray.CPU_LIST : num_cpus }
25 self.array = [{TimeSlotArray.TASK_LIST : [{} for i in range(0, num_tasks)], \
26 TimeSlotArray.CPU_LIST : [{} for i in range (0, num_cpus)]} \
27 for i in range(0, (end - start) // self.time_per_maj + 1)]
28
29 def get_time_slot(self, time):
30 return int((time - self.start) // self.time_per_maj)
31
32 def add_event_to_time_slot(self, event):
33 task_no = event.get_job().get_task().get_task_no()
34 cpu = event.get_cpu()
35 time_slot = self.get_time_slot(event.get_time())
36
37 self.array[time_slot][TimeSlotArray.TASK_LIST][task_no][event.__class__] = event
38 self.array[time_slot][TimeSlotArray.CPU_LIST][cpu][event.__class__] = event
39
40 span_events = { SwitchAwayEvent : IsRunningDummy, InversionEndEvent : InversionDummy}
41
42 for span_event in span_events:
43 if isinstance(event, span_event) and not event.is_erroneous():
44 start_slot = self.get_time_slot(event.corresp_start_event.get_time())
45 end_slot = self.get_time_slot(event.get_time())
46 for slot in range(start_slot + 1, end_slot):
47 dummy = span_events[span_event](task_no, cpu)
48 dummy.corresp_start_event = event.corresp_start_event
49 self.array[slot][TimeSlotArray.TASK_LIST][task_no][dummy.__class__] = dummy
50 self.array[slot][TimeSlotArray.CPU_LIST][cpu][dummy.__class__] = dummy
51
52 def iter_over_period(self, start, end, start_no, end_no, list_type, event_types):
53 if start > end:
54 raise ValueError('Litmus is not a time machine')
55 if start_no > end_no:
56 raise ValueError('start no should be less than end no')
57
58 start_slot = max(0, self.get_time_slot(start))
59 end_slot = min(len(self.array), self.get_time_slot(end) + 2)
60
61 start_no = max(0, start_no)
62 end_no = min(self.list_sizes[list_type] - 1, end_no)
63
64 for slot in range(start_slot, end_slot):
65 for no in range(start_no, end_no + 1):
66 for type in event_types:
67 if type in self.array[slot][list_type][no]:
68 yield self.array[slot][list_type][no][type]
69
70class Schedule(object):
71 """The total schedule (task system), consisting of a certain number of
72 tasks."""
73
74 def __init__(self, name, num_cpus, task_list=[]):
75 self.name = name
76 self.tasks = {}
77 self.task_list = []
78 self.time_slot_array = None
79 self.cur_task_no = 0
80 self.num_cpus = num_cpus
81 for task in task_list:
82 self.add_task(task)
83
84 def set_time_params(self, time_per_maj=None):
85 if self.get_task_list() is None:
86 return (0, 0)
87
88 def find_extreme_time_sched(sched, cmp):
89 def find_extreme_time_task(task, cmp):
90 def find_extreme_time_job(job, cmp):
91 extreme_time = None
92 for time in job.get_events():
93 if extreme_time is None or cmp(time, extreme_time) < 0:
94 extreme_time = time
95 return extreme_time
96
97 extreme_time = None
98 for job_no in task.get_jobs():
99 time = find_extreme_time_job(task.get_jobs()[job_no], cmp)
100 if time is not None and (extreme_time is None or cmp(time, extreme_time) < 0):
101 extreme_time = time
102 return extreme_time
103
104 extreme_time = None
105 for task in sched.get_task_list():
106 time = find_extreme_time_task(task, cmp)
107 if time is not None and (extreme_time is None or cmp(time, extreme_time) < 0):
108 extreme_time = time
109
110 return extreme_time
111
112 def earliest_cmp(x, y):
113 diff = x - y
114 if diff > 0.0:
115 return 1
116 elif diff == 0.0:
117 return 0
118 elif diff < 0.0:
119 return -1
120
121 def latest_cmp(x, y):
122 diff = x - y
123 if diff < 0.0:
124 return 1
125 elif diff == 0.0:
126 return 0
127 elif diff > 0.0:
128 return -1
129
130 self.start = find_extreme_time_sched(self, earliest_cmp)
131 self.end = find_extreme_time_sched(self, latest_cmp)
132 self.time_per_maj = time_per_maj
133 self.time_slot_array = None
134 if self.time_per_maj is not None:
135 self.time_slot_array = TimeSlotArray(self.start, self.end, time_per_maj, \
136 len(self.task_list), self.num_cpus)
137
138 def get_time_slot_array(self):
139 return self.time_slot_array
140
141 def get_time_bounds(self):
142 return (self.start, self.end)
143
144 def scan(self, time_per_maj):
145 self.set_time_params(time_per_maj)
146
147 # we scan the graph task by task, and job by job
148 switches = {}
149 for event in EVENT_LIST:
150 switches[event] = None
151 for task_no, task in enumerate(self.get_task_list()):
152 cur_cpu = [Event.NO_CPU]
153 for job_no in sorted(task.get_jobs().keys()):
154 job = task.get_jobs()[job_no]
155 for event_time in sorted(job.get_events().keys()):
156 # could have multiple events at the same time (unlikely but possible)
157 for event in job.get_events()[event_time]:
158 print "task, job, event:", task.name, job.job_no, event.__class__.__name__
159 event.scan(cur_cpu, switches)
160
161 def add_task(self, task):
162 if task.name in self.tasks:
163 raise ValueError("task already in list!")
164 self.tasks[task.name] = task
165 self.task_list.append(task)
166 task.schedule = self
167 task.task_no = self.cur_task_no
168 self.cur_task_no += 1
169
170 def get_tasks(self):
171 return self.tasks
172
173 def get_task_list(self):
174 return self.task_list
175
176 def get_name(self):
177 return self.name
178
179 def get_num_cpus(self):
180 return self.num_cpus
181
182class Task(object):
183 """Represents a task, including the set of jobs that were run under
184 this task."""
185
186 def __init__(self, name, job_list=[]):
187 self.name = name
188 self.jobs = {}
189 self.task_no = None
190 self.schedule = None
191 for job in job_list:
192 self.add_job(job)
193
194 def add_job(self, job):
195 if job.job_no in self.jobs:
196 raise ScheduleError("a job is already being released at this time for this task")
197 self.jobs[job.job_no] = job
198 job.task = self
199
200 def get_schedule(self):
201 return self.schedule
202
203 def get_jobs(self):
204 return self.jobs
205
206 def get_task_no(self):
207 return self.task_no
208
209 def get_name(self):
210 return self.name
211
212class Job(object):
213 """Represents a job, including everything that happens related to the job"""
214 def __init__(self, job_no, event_list=[]):
215 self.job_no = job_no
216 self.events = {}
217 self.task = None
218 for event in event_list:
219 self.add_event(event)
220
221 def add_event(self, event):
222 if event.time not in self.events:
223 self.events[event.time] = []
224 self.events[event.time].append(event)
225 event.job = self
226
227 def get_events(self):
228 return self.events
229
230 def get_task(self):
231 return self.task
232
233 def get_job_no(self):
234 return self.job_no
235
236class DummyEvent(object):
237 """Represents some event that occurs, but might not actually be
238 a full-fledged ``event'' in the schedule. It might instead be a dummy
239 event added by the application to speed things up or keep track of
240 something. Such an event won't be added to the schedule tree, but
241 might appear in the time slot array."""
242
243 def __init__(self, time, cpu):
244 self.time = time
245 self.cpu = cpu
246 self.job = None
247 self.layer = None
248
249 def __str__(self):
250 return '[Dummy Event]'
251
252 def get_time(self):
253 return self.time
254
255 def get_cpu(self):
256 return self.cpu
257
258 def get_job(self):
259 return self.job
260
261 def get_layer(self):
262 return self.layer
263
264 def render(self, graph, layer, prev_events):
265 """Method that the visualizer calls to tell the event to render itself
266 Obviously only implemented by subclasses (actual event types)"""
267 raise NotImplementdError
268
269class Event(DummyEvent):
270 """Represents an event that occurs while a job is running (e.g. get scheduled
271 on a CPU, block, ...)"""
272 NO_CPU = -1
273 NUM_DEC_PLACES = 2
274
275 def __init__(self, time, cpu):
276 super(Event, self).__init__(time, cpu)
277 self.erroneous = False
278 self.selected = False
279
280 def __str__(self):
281 return '[Event]'
282
283 def _common_str(self):
284 job = self.get_job()
285 task = job.get_task()
286 return ' for task ' + str(task.get_name()) + ': (TASK, JOB)=' + str((task.get_task_no(), \
287 job.get_job_no())) + ', CPU=' + str(self.get_cpu())
288
289 def is_erroneous(self):
290 """An erroneous event is where something with the event is not quite right,
291 something significantly wrong that we don't have logical information telling
292 us how we should render the event."""
293 return self.erroneous
294
295 def is_selected(self):
296 """Returns whether the event has been selected by the user. (needed for rendering)"""
297 return self.selected
298
299 def set_selected(self, sel):
300 """Sets the event's state to selected."""
301 self.selected = sel
302
303 def scan(self, cur_cpu, switches):
304 """Part of the procedure that walks through all the events and sets
305 some parameters that are unknown at first. For instance, a SwitchAwayEvent
306 should know when the previous corresponding SwitchToEvent occurred, but
307 the data does not tell us this, so we have to figure that out on our own
308 by scanning through the events. ``cur_cpu'' gives the current CPU at this
309 time in the scan, and ``switches'' gives the last time a certain switch
310 (e.g. SwitchToEvent, InversionStartEvent) occurred"""
311
312 self.get_job().get_task().get_schedule().get_time_slot_array().add_event_to_time_slot(self)
313
314class ErrorEvent(Event):
315 pass
316
317class SuspendEvent(Event):
318 def __init__(self, time, cpu):
319 super(SuspendEvent, self).__init__(time, cpu)
320 self.layer = Canvas.MIDDLE_LAYER
321
322 def __str__(self):
323 return 'Suspend' + self._common_str() + ', TIME=' + util.format_float(self.get_time(), Event.NUM_DEC_PLACES)
324
325 def scan(self, cur_cpu, switches):
326 if self.get_cpu() != cur_cpu[0]:
327 self.erroneous = True
328 print "suspending on a CPU different from the CPU we are on!"
329 super(SuspendEvent, self).scan(cur_cpu, switches)
330
331 def render(self, graph, layer, prev_events):
332 if layer == self.layer:
333 prev_events[self] = None
334 graph.draw_suspend_triangle_at_time(self.get_time(), self.get_job().get_task().get_task_no(),
335 self.get_cpu(), self.is_selected())
336 graph.add_sel_suspend_triangle_at_time(self.get_time(), self.get_job().get_task().get_task_no(),
337 self.get_cpu(), self)
338
339class ResumeEvent(Event):
340 def __init__(self, time, cpu):
341 super(ResumeEvent, self).__init__(time, cpu)
342 self.layer = Canvas.MIDDLE_LAYER
343
344 def __str__(self):
345 return 'Resume' + self._common_str() + ', TIME=' + util.format_float(self.get_time(), Event.NUM_DEC_PLACES)
346
347 def scan(self, cur_cpu, switches):
348 if cur_cpu[0] != Event.NO_CPU and cur_cpu[0] != self.get_cpu():
349 self.erroneous = True
350 print "Resuming when currently scheduled on a CPU, but on a different CPU from the current CPU!"
351 super(ResumeEvent, self).scan(cur_cpu, switches)
352
353 def render(self, graph, layer, prev_events):
354 if layer == self.layer:
355 prev_events[self] = None
356 graph.draw_resume_triangle_at_time(self.get_time(), self.get_job().get_task().get_task_no(),
357 self.get_cpu(), self.is_selected())
358 graph.add_sel_resume_triangle_at_time(self.get_time(), self.get_job().get_task().get_task_no(),
359 self.get_cpu(), self)
360
361class CompleteEvent(Event):
362 def __init__(self, time, cpu):
363 super(CompleteEvent, self).__init__(time, cpu)
364 self.layer = Canvas.TOP_LAYER
365
366 def __str__(self):
367 return 'Complete' + self._common_str() + ', TIME=' + util.format_float(self.get_time(), Event.NUM_DEC_PLACES)
368
369 def scan(self, cur_cpu, switches):
370 super(CompleteEvent, self).scan(cur_cpu, switches)
371
372 def render(self, graph, layer, prev_events):
373 if layer == Canvas.TOP_LAYER:
374 prev_events[self] = None
375 graph.draw_completion_marker_at_time(self.get_time(), self.get_job().get_task().get_task_no(),
376 self.get_cpu(), self.is_selected())
377 graph.add_sel_completion_marker_at_time(self.get_time(), self.get_job().get_task().get_task_no(),
378 self.get_cpu(), self)
379
380
381class SwitchAwayEvent(Event):
382 def __init__(self, time, cpu):
383 super(SwitchAwayEvent, self).__init__(time, cpu)
384 self.layer = Canvas.BOTTOM_LAYER
385
386 def __str__(self):
387 if self.corresp_start_event is None:
388 return 'Switch Away (w/o Switch To)' + self._common_str() + 'TIME=' \
389 + self.get_time()
390 return str(self.corresp_start_event)
391
392 def scan(self, cur_cpu, switches):
393 old_cur_cpu = cur_cpu[0]
394
395 self.corresp_start_event = switches[SwitchToEvent]
396
397 cur_cpu[0] = Event.NO_CPU
398 switches[SwitchToEvent] = None
399
400 if self.corresp_start_event is not None:
401 self.corresp_start_event.corresp_end_event = self
402
403 if self.get_cpu() != old_cur_cpu:
404 self.erroneous = True
405 print "switching away from a CPU different from the CPU we are currently on"
406 if self.corresp_start_event is None:
407 self.erroneous = True
408 print "switch away was not matched by a corresponding switch to"
409 elif self.get_time() < self.corresp_start_event.get_time():
410 self.erroneous = True
411 print "switching away from a processor before we switched to it?!"
412
413 super(SwitchAwayEvent, self).scan(cur_cpu, switches)
414
415 def render(self, graph, layer, prev_events):
416 if self.corresp_start_event is None or self.corresp_start_event in prev_events:
417 return # erroneous switch away or already rendered
418 self.corresp_start_event.render(graph, layer, prev_events)
419
420class SwitchToEvent(Event):
421 def __init__(self, time, cpu):
422 super(SwitchToEvent, self).__init__(time, cpu)
423 self.layer = Canvas.BOTTOM_LAYER
424
425 def __str__(self):
426 if self.corresp_end_event is None:
427 return 'Switch To (w/o Switch Away)' + self._common_str() + ', TIME=' \
428 + self.get_time()
429 return 'Scheduled' + self._common_str() + ', START=' \
430 + util.format_float(self.get_time(), Event.NUM_DEC_PLACES) \
431 + ', END=' + util.format_float(self.corresp_end_event.get_time(), Event.NUM_DEC_PLACES)
432
433 def scan(self, cur_cpu, switches):
434 old_cur_cpu = cur_cpu[0]
435 cur_cpu[0] = self.get_cpu()
436 switches[SwitchToEvent] = self
437 self.corresp_end_event = None
438
439 if old_cur_cpu != Event.NO_CPU:
440 self.erroneous = True
441 print "currently scheduled somewhere, can't switch to a CPU"
442
443 super(SwitchToEvent, self).scan(cur_cpu, switches)
444
445 def render(self, graph, layer, prev_events):
446 if self.is_erroneous():
447 return # erroneous switch to
448 if layer == Canvas.BOTTOM_LAYER:
449 prev_events[self] = None
450 cpu = self.get_cpu()
451 task_no = self.get_job().get_task().get_task_no()
452 graph.draw_bar_at_time(self.get_time(), self.corresp_end_event.get_time(),
453 task_no, cpu, self.get_job().get_job_no(), self.is_selected())
454 graph.add_sel_bar_at_time(self.get_time(), self.corresp_end_event.get_time(),
455 task_no, cpu, self)
456
457class ReleaseEvent(Event):
458 def __init__(self, time, cpu):
459 super(ReleaseEvent, self).__init__(time, cpu)
460 self.layer = Canvas.TOP_LAYER
461
462 def __str__(self):
463 return 'Release' + self._common_str() + ', TIME=' + util.format_float(self.get_time(), Event.NUM_DEC_PLACES)
464
465 def scan(self, cur_cpu, switches):
466 super(ReleaseEvent, self).scan(cur_cpu, switches)
467
468 def render(self, graph, layer, prev_events):
469 prev_events[self] = None
470 if layer == Canvas.TOP_LAYER:
471 graph.draw_release_arrow_at_time(self.get_time(), self.get_job().get_task().get_task_no(),
472 self.get_job().get_job_no(), self.is_selected())
473 graph.add_sel_release_arrow_at_time(self.get_time(), self.get_job().get_task().get_task_no(),
474 self)
475
476class DeadlineEvent(Event):
477 def __init__(self, time, cpu):
478 super(DeadlineEvent, self).__init__(time, cpu)
479 self.layer = Canvas.TOP_LAYER
480
481 def __str__(self):
482 return 'Deadline' + self._common_str() + ', TIME=' + util.format_float(self.get_time(), Event.NUM_DEC_PLACES)
483
484 def scan(self, cur_cpu, switches):
485 super(DeadlineEvent, self).scan(cur_cpu, switches)
486
487 def render(self, graph, layer, prev_events):
488 prev_events[self] = None
489 if layer == Canvas.TOP_LAYER:
490 graph.draw_deadline_arrow_at_time(self.get_time(), self.get_job().get_task().get_task_no(),
491 self.get_job().get_job_no(), self.is_selected())
492 graph.add_sel_deadline_arrow_at_time(self.get_time(), self.get_job().get_task().get_task_no(),
493 self)
494
495class InversionStartEvent(ErrorEvent):
496 def __init__(self, time):
497 super(InversionStartEvent, self).__init__(time, Event.NO_CPU)
498 self.layer = Canvas.BOTTOM_LAYER
499
500 def __str__(self):
501 if self.corresp_end_event is None:
502 print 'Inversion Start (w/o Inversion End)' + self._common_str() \
503 + ', TIME=' + util.format_float(self.get_time(), Event.NUM_DEC_PLACES)
504 return 'Priority Inversion' + self._common_str() + ', START=' \
505 + util.format_float(self.get_time(), Event.NUM_DEC_PLACES) \
506 + ', END=' + util.format_float(self.corresp_end_event.get_time(), Event.NUM_DEC_PLACES)
507
508 def scan(self, cur_cpu, switches):
509 switches[InversionStartEvent] = self
510 self.corresp_end_event = None
511
512 # the corresp_end_event should already be set
513 super(InversionStartEvent, self).scan(cur_cpu, switches)
514
515 def render(self, graph, layer, prev_events):
516 if layer == Canvas.BOTTOM_LAYER:
517 prev_events[self] = None
518 cpu = self.get_cpu()
519 task_no = self.get_job().get_task().get_task_no()
520 graph.draw_mini_bar_at_time(self.get_time(), self.corresp_end_event.get_time(),
521 task_no, cpu, self.get_job().get_job_no(), self.is_selected())
522 graph.add_sel_mini_bar_at_time(self.get_time(), self.corresp_end_event.get_time(),
523 task_no, cpu, self)
524
525class InversionEndEvent(ErrorEvent):
526 def __init__(self, time):
527 super(InversionEndEvent, self).__init__(time, Event.NO_CPU)
528 self.layer = Canvas.BOTTOM_LAYER
529
530 def __str__(self):
531 if self.corresp_start_event is None:
532 print 'Inversion End (w/o Inversion Start)' + self._common_str() \
533 + ', TIME=' + util.format_float(self.get_time(), Event.NUM_DEC_PLACES)
534
535 return str(self.corresp_start_event)
536
537 def scan(self, cur_cpu, switches):
538 self.corresp_start_event = switches[InversionStartEvent]
539
540 cur_cpu[0] = Event.NO_CPU
541 switches[InversionStartEvent] = None
542
543 if self.corresp_start_event is not None:
544 self.corresp_start_event.corresp_end_event = self
545
546 if self.corresp_start_event is None:
547 self.erroneous = True
548 print "inversion end was not matched by a corresponding inversion start"
549
550 super(InversionEndEvent, self).scan(cur_cpu, switches)
551
552 def render(self, graph, layer, prev_events):
553 if self.corresp_start_event is None or self.corresp_start_event in prev_events:
554 return # erroneous inversion end or already rendered
555 self.corresp_start_event.render(graph, layer, prev_events)
556
557class InversionDummy(DummyEvent):
558 def render(self, graph, layer, prev_events):
559 if self.corresp_start_event in prev_events:
560 return # we have already been rendered
561 self.corresp_start_event.render(graph, layer, prev_events)
562
563class IsRunningDummy(DummyEvent):
564 def render(self, graph, layer, prev_events):
565 if self.corresp_start_event in prev_events:
566 return # we have already been rendered
567 self.corresp_start_event.render(graph, layer, prev_events)
568
569EVENT_LIST = {SuspendEvent : None, ResumeEvent : None, CompleteEvent : None, SwitchAwayEvent : None,
570 SwitchToEvent : None, ReleaseEvent : None, DeadlineEvent : None, IsRunningDummy : None,
571 InversionStartEvent : None, InversionEndEvent : None, InversionDummy : None}
diff --git a/viz/util.py b/viz/util.py
new file mode 100644
index 0000000..3111f39
--- /dev/null
+++ b/viz/util.py
@@ -0,0 +1,9 @@
1#!/usr/bin/python
2
3"""Miscellanious utility functions that don't fit anywhere."""
4
5def format_float(num, numplaces):
6 if abs(round(num, numplaces) - round(num, 0)) == 0.0:
7 return '%.0f' % float(num)
8 else:
9 return ('%.' + numplaces + 'f') % round(float(num), numplaces)
diff --git a/viz/viewer.py b/viz/viewer.py
new file mode 100644
index 0000000..a695473
--- /dev/null
+++ b/viz/viewer.py
@@ -0,0 +1,193 @@
1#!/usr/bin/python
2
3"""GUI stuff."""
4
5from schedule import *
6from renderer import *
7
8import pygtk
9import gtk
10import gobject
11
12class GraphArea(gtk.DrawingArea):
13 DAREA_WIDTH_REQ = 500
14 DAREA_HEIGHT_REQ = 300
15 HORIZ_PAGE_SCROLL_FACTOR = 4.8
16 HORIZ_STEP_SCROLL_FACTOR = 0.8
17 VERT_PAGE_SCROLL_FACTOR = 3.0
18 VERT_STEP_SCROLL_FACTOR = 0.5
19
20 def __init__(self, renderer):
21 super(GraphArea, self).__init__()
22
23 self.renderer = renderer
24
25 self.cur_x = 0
26 self.cur_y = 0
27 self.width = 0
28 self.height = 0
29
30 self.now_selected = []
31
32 self.set_set_scroll_adjustments_signal('set-scroll-adjustments')
33
34 self.add_events(gtk.gdk.BUTTON_PRESS_MASK | gtk.gdk.POINTER_MOTION_MASK | gtk.gdk.POINTER_MOTION_HINT_MASK)
35
36 self.connect('expose-event', self.expose)
37 self.connect('size-allocate', self.size_allocate)
38 self.connect('set-scroll-adjustments', self.set_scroll_adjustments)
39 self.connect('button-press-event', self.button_press)
40 self.connect('motion-notify-event', self.motion_notify)
41
42 self.set_size_request(GraphArea.DAREA_WIDTH_REQ, GraphArea.DAREA_HEIGHT_REQ)
43
44 def expose(self, widget, event, data=None):
45 ctx = widget.window.cairo_create()
46 graph = self.renderer.get_graph()
47 graph.update_view(self.cur_x, self.cur_y, self.width, self.height, ctx)
48 graph.render_surface(self.renderer.get_schedule())
49
50 def set_scroll_adjustments(self, widget, horizontal, vertical, data=None):
51 graph = self.renderer.get_graph()
52 width = graph.get_width()
53 height = graph.get_height()
54
55 self.horizontal = horizontal
56 self.vertical = vertical
57 self.config_scrollbars(self.cur_x, self.cur_y)
58
59 self.horizontal.connect('value-changed', self.horizontal_value_changed)
60 self.vertical.connect('value-changed', self.vertical_value_changed)
61
62 def horizontal_value_changed(self, adjustment):
63 self.cur_x = min(adjustment.value, self.renderer.get_graph().get_width())
64 self.cur_x = max(adjustment.value, 0.0)
65
66 rect = gtk.gdk.Rectangle(0, 0, self.width, self.height)
67 self.window.invalidate_rect(rect, True)
68
69 def vertical_value_changed(self, adjustment):
70 self.cur_y = min(adjustment.value, self.renderer.get_graph().get_height())
71 self.cur_y = max(adjustment.value, 0.0)
72
73 rect = gtk.gdk.Rectangle(0, 0, self.width, self.height)
74 self.window.invalidate_rect(rect, True)
75
76 def size_allocate(self, widget, allocation):
77 self.width = allocation.width
78 self.height = allocation.height
79 self.config_scrollbars(self.cur_x, self.cur_y)
80
81 def config_scrollbars(self, hvalue, vvalue):
82 graph = self.renderer.get_graph()
83 width = graph.get_width()
84 height = graph.get_height()
85
86 if self.horizontal is not None:
87 self.horizontal.set_all(hvalue, 0.0, width, graph.get_attrs().maj_sep * GraphArea.HORIZ_STEP_SCROLL_FACTOR,
88 graph.get_attrs().maj_sep * GraphArea.HORIZ_PAGE_SCROLL_FACTOR, self.width)
89 if self.vertical is not None:
90 self.vertical.set_all(vvalue, 0.0, height, graph.get_attrs().y_item_size * GraphArea.VERT_STEP_SCROLL_FACTOR,
91 graph.get_attrs().y_item_size * GraphArea.VERT_PAGE_SCROLL_FACTOR, self.height)
92
93 def _find_max_layer(self, regions):
94 max_layer = Canvas.BOTTOM_LAYER
95 for event in regions:
96 if event.get_layer() > max_layer:
97 max_layer = event.get_layer()
98 return max_layer
99
100 def motion_notify(self, widget, motion_event, data=None):
101 msg = None
102
103 graph = self.renderer.get_graph()
104 just_selected = graph.get_selected_regions(motion_event.x, motion_event.y)
105 if not just_selected:
106 msg = ''
107 the_event = None
108 else:
109 max_layer = self._find_max_layer(just_selected)
110
111 for event in just_selected:
112 if event.get_layer() == max_layer:
113 the_event = event
114 break
115
116 msg = str(the_event)
117
118 self.emit('update-event-description', the_event, msg)
119
120 def button_press(self, widget, button_event, data=None):
121 graph = self.renderer.get_graph()
122
123 if button_event.button == 1:
124 just_selected = graph.get_selected_regions(button_event.x, button_event.y)
125
126 max_layer = self._find_max_layer(just_selected)
127
128 # only select those events which were in the top layer (it's
129 # not intuitive to click something and then have something
130 # below it get selected). Also, clicking something that
131 # is selected deselects it
132 new_now_selected = {}
133 for event in just_selected:
134 if event.get_layer() == max_layer:
135 if not event.is_selected():
136 new_now_selected[event] = None
137 event.set_selected(not event.is_selected())
138 break
139
140 for event in self.now_selected:
141 if event not in new_now_selected:
142 event.set_selected(False)
143
144 self.now_selected = new_now_selected
145
146 rect = gtk.gdk.Rectangle(0, 0, self.width, self.height)
147 self.window.invalidate_rect(rect, True)
148
149class GraphWindow(gtk.ScrolledWindow):
150 def __init__(self, renderer):
151 super(GraphWindow, self).__init__(None, None)
152
153 self.garea = GraphArea(renderer)
154 self.add(self.garea)
155 self.garea.show()
156
157 def get_graph_area(self):
158 return self.garea
159
160class MainWindow(object):
161 def __init__(self, renderer):
162 self.window = gtk.Window(gtk.WINDOW_TOPLEVEL)
163
164 self.window.connect('delete_event', self.delete_event)
165 self.window.connect('destroy', self.destroy)
166
167 self.vbox = gtk.VBox(False, 0)
168
169 self.gwindow = GraphWindow(renderer)
170 self.gwindow.get_graph_area().connect('update-event-description',
171 self.update_event_description)
172 self.gwindow.show()
173
174 self.desc_label = gtk.Label('')
175 self.desc_label.set_justify(gtk.JUSTIFY_LEFT)
176 self.desc_label.show()
177
178 self.vbox.pack_start(self.gwindow, True, True, 0)
179 self.vbox.pack_start(self.desc_label, False, False, 0)
180 self.vbox.show()
181
182 self.window.add(self.vbox)
183 self.window.show()
184
185 def update_event_description(self, widget, event, msg):
186 self.desc_label.set_text(msg)
187
188 def delete_event(self, widget, event, data=None):
189 return False
190
191 def destroy(self, widget, data=None):
192 gtk.main_quit()
193