diff options
Diffstat (limited to 'unit_trace')
-rw-r--r-- | unit_trace/latest.py | 4 | ||||
-rw-r--r-- | unit_trace/progress.py | 6 | ||||
-rw-r--r-- | unit_trace/stdout_printer.py | 50 | ||||
-rw-r--r-- | unit_trace/trace_reader.py | 53 | ||||
-rw-r--r-- | unit_trace/viz/convert.py | 52 | ||||
-rw-r--r-- | unit_trace/viz/schedule.py | 4 | ||||
-rwxr-xr-x | unit_trace/viz/visualizer.py | 1 |
7 files changed, 85 insertions, 85 deletions
diff --git a/unit_trace/latest.py b/unit_trace/latest.py index 38676a0..4effced 100644 --- a/unit_trace/latest.py +++ b/unit_trace/latest.py | |||
@@ -10,8 +10,8 @@ | |||
10 | 10 | ||
11 | def latest(stream, latest): | 11 | def latest(stream, latest): |
12 | for record in stream: | 12 | for record in stream: |
13 | if record.record_type=="event": | 13 | if record['record_type']=="event": |
14 | if record.id > latest: | 14 | if record['id'] > latest: |
15 | break | 15 | break |
16 | else: | 16 | else: |
17 | yield record | 17 | yield record |
diff --git a/unit_trace/progress.py b/unit_trace/progress.py index d0f0482..90f2c2c 100644 --- a/unit_trace/progress.py +++ b/unit_trace/progress.py | |||
@@ -27,13 +27,13 @@ def progress(stream): | |||
27 | count = 0 | 27 | count = 0 |
28 | 28 | ||
29 | for record in stream: | 29 | for record in stream: |
30 | if record.record_type=="event": | 30 | if record['record_type']=="event": |
31 | count += 1 | 31 | count += 1 |
32 | if (count % 1000) == 0 and count > 0: | 32 | if (count % 1000) == 0 and count > 0: |
33 | sys.stderr.write(("Parsed %d event records\n") % (count)) | 33 | sys.stderr.write(("Parsed %d event records\n") % (count)) |
34 | if record.record_type=="meta" and record.type_name=="trace_files": | 34 | if record['record_type']=="meta" and record['type_name']=="trace_files": |
35 | bytes = 0 | 35 | bytes = 0 |
36 | for file in record.files: | 36 | for file in record['files']: |
37 | bytes += int(os.path.getsize(file)) | 37 | bytes += int(os.path.getsize(file)) |
38 | sys.stderr.write(("Total bytes : %d\n") % (bytes)) | 38 | sys.stderr.write(("Total bytes : %d\n") % (bytes)) |
39 | # 192 bits per event record, 8 bits per byte | 39 | # 192 bits per event record, 8 bits per byte |
diff --git a/unit_trace/stdout_printer.py b/unit_trace/stdout_printer.py index b70b31a..004d9cf 100644 --- a/unit_trace/stdout_printer.py +++ b/unit_trace/stdout_printer.py | |||
@@ -10,13 +10,14 @@ | |||
10 | 10 | ||
11 | def stdout_printer(stream): | 11 | def stdout_printer(stream): |
12 | for record in stream: | 12 | for record in stream: |
13 | if record.record_type == "event": | 13 | if record['record_type'] == "event": |
14 | _print_event(record) | 14 | _print_event(record) |
15 | elif record.record_type == "meta" and record.type_name == "stats": | 15 | elif record['record_type'] == "meta" and record['type_name'] == "stats": |
16 | _print_stats(record) | 16 | _print_stats(record) |
17 | elif record.record_type == "error" and record.type_name == 'inversion_start': | 17 | elif record['record_type'] == "error" and \ |
18 | record['type_name'] == "inversion_start": | ||
18 | _print_inversion_start(record) | 19 | _print_inversion_start(record) |
19 | elif record.record_type == "error" and record.type_name == 'inversion_end': | 20 | elif record['record_type'] == "error" and record['type_name'] == "inversion_end": |
20 | _print_inversion_end(record) | 21 | _print_inversion_end(record) |
21 | else: | 22 | else: |
22 | continue | 23 | continue |
@@ -27,46 +28,45 @@ def stdout_printer(stream): | |||
27 | ############################################################################### | 28 | ############################################################################### |
28 | 29 | ||
29 | def _print_event(record): | 30 | def _print_event(record): |
30 | print "Event ID: %d" % (record.id) | 31 | print "Event ID: %d" % (record['id']) |
31 | print "Job: %d.%d" % (record.pid,record.job) | 32 | print "Job: %d.%d" % (record['pid'],record['job']) |
32 | print "Type: %s" % (record.type_name) | 33 | print "Type: %s" % (record['type_name']) |
33 | print "Time: %d" % (record.when) | 34 | print "Time: %d" % (record['when']) |
34 | 35 | ||
35 | def _print_inversion_start(record): | 36 | def _print_inversion_start(record): |
36 | print "Type: %s" % ("Inversion start") | 37 | print "Type: %s" % ("Inversion start") |
37 | print "Inversion Record IDs: (%d, U)" % (record.id) | 38 | print "Inversion Record IDs: (%d, U)" % (record['id']) |
38 | print "Triggering Event IDs: (%d, U)" % (record.triggering_event_id) | 39 | print "Triggering Event IDs: (%d, U)" % (record['triggering_event_id']) |
39 | print "Time: %d" % (record.job.inversion_start) | 40 | print "Time: %d" % (record['job']['inversion_start']) |
40 | print "Job: %d.%d" % (record.job.pid,record.job.job) | 41 | print "Job: %d.%d" % (record['job']['pid'],record['job']['job']) |
41 | print "Deadline: %d" % (record.job.deadline) | 42 | print "Deadline']: %d" % (record['job']['deadline']) |
42 | print "Off CPU: ", | 43 | print "Off CPU: ", |
43 | for job in record.off_cpu: | 44 | for job in record['off_cpu']: |
44 | print str(job) + " ", | 45 | print str(job) + " ", |
45 | 46 | ||
46 | print "On CPU: ", | 47 | print "On CPU: ", |
47 | for job in record.on_cpu: | 48 | for job in record['on_cpu']: |
48 | print str(job) + " ", | 49 | print str(job) + " ", |
49 | print #newline | 50 | print #newline |
50 | 51 | ||
51 | def _print_inversion_end(record): | 52 | def _print_inversion_end(record): |
52 | print "Type: %s" % ("Inversion end") | 53 | print "Type: %s" % ("Inversion end") |
53 | print "Inversion record IDs: (%d, %d)" % (record.inversion_start_id, | 54 | print "Inversion record IDs: (%d, %d)" % (record['inversion_start'], |
54 | record.id) | 55 | record['id']) |
55 | print("Triggering Event IDs: (%d, %d)" % | 56 | print("Triggering Event IDs: (%d, %d)" % |
56 | (record.inversion_start_triggering_event_id, | 57 | (record['inversion_start'], record['triggering_event_id'])) |
57 | record.triggering_event_id)) | 58 | print "Time: %d" % (record['job']['inversion_end']) |
58 | print "Time: %d" % (record.job.inversion_end) | ||
59 | # NOTE: Here, we assume nanoseconds as the time unit. | 59 | # NOTE: Here, we assume nanoseconds as the time unit. |
60 | # May have to be changed in the future. | 60 | # May have to be changed in the future. |
61 | print "Duration: %f ms" % ( | 61 | print "Duration: %f ms" % ( |
62 | float(record.job.inversion_end - record.job.inversion_start)/1000000) | 62 | float(record['job']['inversion_end'] - record['job']['inversion_start'])/1000000) |
63 | print "Job: %d.%d" % (record.job.pid,record.job.job) | 63 | print "Job: %d.%d" % (record['job']['pid'],record['job']['job']) |
64 | print "Deadline: %d" % (record.job.deadline) | 64 | print "Deadline']: %d" % (record['job']['deadline']) |
65 | print "Off CPU: ", | 65 | print "Off CPU: ", |
66 | for job in record.off_cpu: | 66 | for job in record['off_cpu']: |
67 | print str(job) + " ", | 67 | print str(job) + " ", |
68 | 68 | ||
69 | print "On CPU: ", | 69 | print "On CPU: ", |
70 | for job in record.on_cpu: | 70 | for job in record['on_cpu']: |
71 | print str(job) + " ", | 71 | print str(job) + " ", |
72 | print #newline | 72 | print #newline |
diff --git a/unit_trace/trace_reader.py b/unit_trace/trace_reader.py index a4c3c05..7ab6083 100644 --- a/unit_trace/trace_reader.py +++ b/unit_trace/trace_reader.py | |||
@@ -27,6 +27,7 @@ | |||
27 | 27 | ||
28 | import struct | 28 | import struct |
29 | import sys | 29 | import sys |
30 | import mmap | ||
30 | 31 | ||
31 | 32 | ||
32 | ############################################################################### | 33 | ############################################################################### |
@@ -38,18 +39,11 @@ def trace_reader(files): | |||
38 | 39 | ||
39 | # Yield a record containing the input files | 40 | # Yield a record containing the input files |
40 | # This is used by progress.py to calculate progress | 41 | # This is used by progress.py to calculate progress |
41 | class Obj: pass | 42 | record = dict(record_type='meta', type_name='trace_files', files=files) |
42 | record = Obj() | ||
43 | record.record_type = "meta" | ||
44 | record.type_name = "trace_files" | ||
45 | record.files = files | ||
46 | yield record | 43 | yield record |
47 | 44 | ||
48 | # Yield a record indicating the number of CPUs, used by the G-EDF test | 45 | # Yield a record indicating the number of CPUs, used by the G-EDF test |
49 | record = Obj() | 46 | record = dict(record_type='meta', type_name='num_cpus', num_cpus=len(files)) |
50 | record.record_type = "meta" | ||
51 | record.type_name = "num_cpus" | ||
52 | record.num_cpus = len(files) | ||
53 | yield record | 47 | yield record |
54 | 48 | ||
55 | # Create iterators for each file and a buffer to store records in | 49 | # Create iterators for each file and a buffer to store records in |
@@ -80,8 +74,8 @@ def trace_reader(files): | |||
80 | file_iter_buff[x].append(file_iters[x].next()) | 74 | file_iter_buff[x].append(file_iters[x].next()) |
81 | except StopIteration: | 75 | except StopIteration: |
82 | pass | 76 | pass |
83 | for x in range(0,len(file_iter_buff)): | 77 | # for x in range(0,len(file_iter_buff)): |
84 | file_iter_buff[x] = sorted(file_iter_buff[x],key=lambda rec: rec.when) | 78 | # file_iter_buff[x] = sorted(file_iter_buff[x],key=lambda rec: rec['when']) |
85 | 79 | ||
86 | # Remember the time of the last record. This way, we can make sure records | 80 | # Remember the time of the last record. This way, we can make sure records |
87 | # truly are produced in monotonically increasing order by time and terminate | 81 | # truly are produced in monotonically increasing order by time and terminate |
@@ -98,7 +92,7 @@ def trace_reader(files): | |||
98 | earliest = -1 | 92 | earliest = -1 |
99 | buff_to_refill = -1 | 93 | buff_to_refill = -1 |
100 | for x in range(0,len(file_iter_buff)): | 94 | for x in range(0,len(file_iter_buff)): |
101 | if earliest==-1 or file_iter_buff[x][0].when < earliest.when: | 95 | if earliest==-1 or file_iter_buff[x][0]['when'] < earliest['when']: |
102 | earliest = file_iter_buff[x][0] | 96 | earliest = file_iter_buff[x][0] |
103 | buff_to_refill = x | 97 | buff_to_refill = x |
104 | 98 | ||
@@ -109,8 +103,8 @@ def trace_reader(files): | |||
109 | # then keep the buffer sorted | 103 | # then keep the buffer sorted |
110 | try: | 104 | try: |
111 | file_iter_buff[buff_to_refill].append(file_iters[buff_to_refill].next()) | 105 | file_iter_buff[buff_to_refill].append(file_iters[buff_to_refill].next()) |
112 | file_iter_buff[buff_to_refill] = sorted(file_iter_buff[buff_to_refill], | 106 | # file_iter_buff[buff_to_refill] = sorted(file_iter_buff[buff_to_refill], |
113 | key=lambda rec: rec.when) | 107 | # key=lambda rec: rec['when']) |
114 | 108 | ||
115 | # If there aren't any more records, fine. Unless the buffer is also empty. | 109 | # If there aren't any more records, fine. Unless the buffer is also empty. |
116 | # If that is the case, delete the buffer. | 110 | # If that is the case, delete the buffer. |
@@ -120,14 +114,14 @@ def trace_reader(files): | |||
120 | del file_iters[buff_to_refill] | 114 | del file_iters[buff_to_refill] |
121 | 115 | ||
122 | # Check for monotonically increasing time | 116 | # Check for monotonically increasing time |
123 | if last_time is not None and earliest.when < last_time: | 117 | if last_time is not None and earliest['when'] < last_time: |
124 | exit("FATAL ERROR: trace_reader.py: out-of-order record produced") | 118 | # exit("FATAL ERROR: trace_reader.py: out-of-order record produced") |
125 | else: | 119 | # else: |
126 | last_time = earliest.when | 120 | last_time = earliest['when'] |
127 | 121 | ||
128 | # Give the record an id number | 122 | # Give the record an id number |
129 | id += 1 | 123 | id += 1 |
130 | earliest.id = id | 124 | earliest['id'] = id |
131 | 125 | ||
132 | # Yield the record | 126 | # Yield the record |
133 | yield earliest | 127 | yield earliest |
@@ -135,10 +129,12 @@ def trace_reader(files): | |||
135 | ############################################################################### | 129 | ############################################################################### |
136 | # Private functions | 130 | # Private functions |
137 | ############################################################################### | 131 | ############################################################################### |
138 | 132 | import traceback | |
139 | # Returns an iterator to pull records from a file | 133 | # Returns an iterator to pull records from a file |
140 | def _get_file_iter(file): | 134 | def _get_file_iter(file): |
141 | f = open(file,'rb') | 135 | f = open(file,'r+b') |
136 | f = mmap.mmap(f.fileno(), 0) | ||
137 | |||
142 | while True: | 138 | while True: |
143 | data = f.read(RECORD_HEAD_SIZE) | 139 | data = f.read(RECORD_HEAD_SIZE) |
144 | try: | 140 | try: |
@@ -154,26 +150,23 @@ def _get_file_iter(file): | |||
154 | try: | 150 | try: |
155 | values = struct.unpack_from(StHeader.format + | 151 | values = struct.unpack_from(StHeader.format + |
156 | type.format,data) | 152 | type.format,data) |
157 | record_dict = dict(zip(type.keys,values)) | 153 | record = dict(zip(type.keys,values)) |
158 | except struct.error: | 154 | except: |
159 | f.close() | 155 | f.close() |
160 | sys.stderr.write("Skipping record that does not match proper" + | 156 | sys.stderr.write("Skipping record that does not match proper" + |
161 | " struct formatting\n") | 157 | " struct formatting\n") |
162 | continue | 158 | continue |
163 | 159 | ||
164 | # Convert the record_dict into an object | ||
165 | record = _dict2obj(record_dict) | ||
166 | |||
167 | # Give it a type name (easier to work with than type number) | 160 | # Give it a type name (easier to work with than type number) |
168 | record.type_name = _get_type_name(type_num) | 161 | record['type_name'] = _get_type_name(type_num) |
169 | 162 | ||
170 | # All records should have a 'record type' field. | 163 | # All records should have a 'record type' field. |
171 | # e.g. these are 'event's as opposed to 'error's | 164 | # e.g. these are 'event's as opposed to 'error's |
172 | record.record_type = "event" | 165 | record['record_type'] = "event" |
173 | 166 | ||
174 | # If there is no timestamp, set the time to 0 | 167 | # If there is no timestamp, set the time to 0 |
175 | if 'when' not in record.__dict__.keys(): | 168 | if 'when' not in record.keys(): |
176 | record.when = 0 | 169 | record['when'] = 0 |
177 | 170 | ||
178 | yield record | 171 | yield record |
179 | 172 | ||
diff --git a/unit_trace/viz/convert.py b/unit_trace/viz/convert.py index d19bc73..f9f6793 100644 --- a/unit_trace/viz/convert.py +++ b/unit_trace/viz/convert.py | |||
@@ -16,8 +16,8 @@ def get_type_num(type): | |||
16 | return nums[type] | 16 | return nums[type] |
17 | 17 | ||
18 | def _get_job_from_record(sched, record): | 18 | def _get_job_from_record(sched, record): |
19 | tname = _pid_to_task_name(record.pid) | 19 | tname = _pid_to_task_name(record['pid']) |
20 | job_no = record.job | 20 | job_no = record['job'] |
21 | if tname not in sched.get_tasks(): | 21 | if tname not in sched.get_tasks(): |
22 | sched.add_task(Task(tname, [])) | 22 | sched.add_task(Task(tname, [])) |
23 | if job_no not in sched.get_tasks()[tname].get_jobs(): | 23 | if job_no not in sched.get_tasks()[tname].get_jobs(): |
@@ -31,53 +31,57 @@ def convert_trace_to_schedule(stream): | |||
31 | def noop(): | 31 | def noop(): |
32 | pass | 32 | pass |
33 | 33 | ||
34 | print "Converting" | ||
35 | |||
34 | num_cpus, stream = _find_num_cpus(stream) | 36 | num_cpus, stream = _find_num_cpus(stream) |
35 | sched = Schedule('sched', num_cpus) | 37 | sched = Schedule('sched', num_cpus) |
36 | for record in stream: | 38 | for record in stream: |
37 | #if record.record_type == 'meta': | 39 | #if record['record_type'] == 'meta': |
38 | # if record.type_name == 'num_cpus': | 40 | # if record['type_name'] == 'num_cpus': |
39 | # sched = Schedule('sched', record.num_cpus) | 41 | # sched = Schedule('sched', record.num_cpus) |
40 | # continue | 42 | # continue |
41 | if record.record_type == 'event': | 43 | if record['record_type'] == 'event': |
42 | job = _get_job_from_record(sched, record) | 44 | job = _get_job_from_record(sched, record) |
43 | cpu = record.cpu | 45 | cpu = record['cpu'] |
44 | 46 | ||
45 | if not hasattr(record, 'deadline'): | 47 | # if not hasattr(record, 'deadline'): |
46 | record.deadline = None | 48 | # record['deadline'] = None |
47 | 49 | ||
48 | actions = { | 50 | actions = { |
49 | 'name' : (noop), | 51 | 'name' : (noop), |
50 | 'params' : (noop), | 52 | 'params' : (noop), |
51 | 'release' : (lambda : | 53 | 'release' : (lambda : |
52 | (job.add_event(ReleaseEvent(record.when, cpu)), | 54 | (job.add_event(ReleaseEvent(record['when'], cpu)), |
53 | job.add_event(DeadlineEvent(record.deadline, cpu)))), | 55 | job.add_event(DeadlineEvent(record['deadline'], cpu)))), |
54 | 'switch_to' : (lambda : | 56 | 'switch_to' : (lambda : |
55 | job.add_event(SwitchToEvent(record.when, cpu))), | 57 | job.add_event(SwitchToEvent(record['when'], cpu))), |
56 | 'switch_away' : (lambda : | 58 | 'switch_away' : (lambda : |
57 | job.add_event(SwitchAwayEvent(record.when, cpu))), | 59 | job.add_event(SwitchAwayEvent(record['when'], cpu))), |
58 | 'assign' : (noop), | 60 | 'assign' : (noop), |
59 | 'completion' : (lambda : | 61 | 'completion' : (lambda : |
60 | job.add_event(CompleteEvent(record.when, cpu))), | 62 | job.add_event(CompleteEvent(record['when'], cpu))), |
61 | 'block' : (lambda : | 63 | 'block' : (lambda : |
62 | job.add_event(SuspendEvent(record.when, cpu))), | 64 | job.add_event(SuspendEvent(record['when'], cpu))), |
63 | 'resume' : (lambda : | 65 | 'resume' : (lambda : |
64 | job.add_event(ResumeEvent(record.when, cpu))), | 66 | job.add_event(ResumeEvent(record['when'], cpu))), |
65 | 'sys_release' : (noop) | 67 | 'sys_release' : (noop) |
66 | } | 68 | } |
67 | 69 | ||
68 | actions[record.type_name]() | 70 | actions[record['type_name']]() |
69 | 71 | ||
70 | elif record.record_type == 'error': | 72 | elif record['record_type'] == 'error': |
71 | job = _get_job_from_record(sched, record.job) | 73 | job = _get_job_from_record(sched, record['job']) |
72 | 74 | ||
73 | actions = { | 75 | actions = { |
74 | 'inversion_start' : (lambda : | 76 | 'inversion_start' : (lambda : |
75 | job.add_event(InversionStartEvent(record.job.inversion_start))), | 77 | job.add_event(InversionStartEvent(record['job'].inversion_start))), |
76 | 'inversion_end' : (lambda : | 78 | 'inversion_end' : (lambda : |
77 | job.add_event(InversionEndEvent(record.job.inversion_end))) | 79 | job.add_event(InversionEndEvent(record['job'].inversion_end))) |
78 | } | 80 | } |
79 | 81 | ||
80 | actions[record.type_name]() | 82 | actions[record['type_name']]() |
83 | |||
84 | print "Done converting" | ||
81 | 85 | ||
82 | return sched | 86 | return sched |
83 | 87 | ||
@@ -91,9 +95,9 @@ def _find_num_cpus(stream): | |||
91 | stream_list = [] | 95 | stream_list = [] |
92 | for record in stream: | 96 | for record in stream: |
93 | stream_list.append(record) | 97 | stream_list.append(record) |
94 | if record.record_type == 'event': | 98 | if record['record_type'] == 'event': |
95 | if record.cpu > max: | 99 | if record['cpu'] > max: |
96 | max = record.cpu | 100 | max = record['cpu'] |
97 | 101 | ||
98 | def recycle(l): | 102 | def recycle(l): |
99 | for record in l: | 103 | for record in l: |
diff --git a/unit_trace/viz/schedule.py b/unit_trace/viz/schedule.py index 542524b..a009733 100644 --- a/unit_trace/viz/schedule.py +++ b/unit_trace/viz/schedule.py | |||
@@ -6,6 +6,7 @@ a high-level representation of a schedule that can be converted to, say, a | |||
6 | graphic.""" | 6 | graphic.""" |
7 | 7 | ||
8 | from graph import * | 8 | from graph import * |
9 | from collections import * | ||
9 | import util | 10 | import util |
10 | 11 | ||
11 | import copy | 12 | import copy |
@@ -58,7 +59,6 @@ class TimeSlotArray(object): | |||
58 | if slot not in self.array[list_type][no][klass]: | 59 | if slot not in self.array[list_type][no][klass]: |
59 | self.array[list_type][no][klass][slot] = [] | 60 | self.array[list_type][no][klass][slot] = [] |
60 | self.array[list_type][no][klass][slot].append(event) | 61 | self.array[list_type][no][klass][slot].append(event) |
61 | |||
62 | if self.min_slot is None or slot < self.min_slot: | 62 | if self.min_slot is None or slot < self.min_slot: |
63 | self.min_slot = slot | 63 | self.min_slot = slot |
64 | if self.max_slot is None or slot > self.max_slot: | 64 | if self.max_slot is None or slot > self.max_slot: |
@@ -223,6 +223,7 @@ class Schedule(object): | |||
223 | return (self.start, self.end) | 223 | return (self.start, self.end) |
224 | 224 | ||
225 | def scan(self, time_per_maj): | 225 | def scan(self, time_per_maj): |
226 | print "Scanning" | ||
226 | self.start = None | 227 | self.start = None |
227 | self.end = None | 228 | self.end = None |
228 | 229 | ||
@@ -254,6 +255,7 @@ class Schedule(object): | |||
254 | 255 | ||
255 | # add events that correspond to the tasks and CPUS, at the very beginning | 256 | # add events that correspond to the tasks and CPUS, at the very beginning |
256 | self.time_slot_array.add_item_dummies(self) | 257 | self.time_slot_array.add_item_dummies(self) |
258 | print "Done scanning" | ||
257 | 259 | ||
258 | def add_task(self, task): | 260 | def add_task(self, task): |
259 | if task.name in self.tasks: | 261 | if task.name in self.tasks: |
diff --git a/unit_trace/viz/visualizer.py b/unit_trace/viz/visualizer.py index e80b764..ac56cf7 100755 --- a/unit_trace/viz/visualizer.py +++ b/unit_trace/viz/visualizer.py | |||
@@ -10,6 +10,7 @@ import gtk | |||
10 | 10 | ||
11 | def visualizer(stream, time_per_maj): | 11 | def visualizer(stream, time_per_maj): |
12 | sched = convert.convert_trace_to_schedule(stream) | 12 | sched = convert.convert_trace_to_schedule(stream) |
13 | print "No for real" | ||
13 | sched.scan(time_per_maj) | 14 | sched.scan(time_per_maj) |
14 | 15 | ||
15 | task_renderer = renderer.Renderer(sched) | 16 | task_renderer = renderer.Renderer(sched) |