summaryrefslogtreecommitdiffstats
path: root/convert.py
diff options
context:
space:
mode:
Diffstat (limited to 'convert.py')
-rw-r--r--convert.py102
1 files changed, 102 insertions, 0 deletions
diff --git a/convert.py b/convert.py
new file mode 100644
index 0000000..1db4ad0
--- /dev/null
+++ b/convert.py
@@ -0,0 +1,102 @@
1#!/usr/bin/env python
2from viz.schedule import *
3from reader.trace_reader import *
4
5"""Class that interprets the raw trace data, outputting it
6to a Python schedule object.
7
8Doesn't do any checking on the logic of the schedule (except to
9check for corrupted data)"""
10
11def get_type(type_num):
12 """Return the binary data type, given the type_num"""
13 return Trace.DATA_TYPES[type_num]
14
15def get_type_num(type):
16 nums = dict(zip(Trace.DATA_TYPES, range(0, 11)))
17 return nums[type]
18
19def _get_job_from_record(sched, record):
20 tname = _pid_to_task_name(record.pid)
21 job_no = record.job
22 if tname not in sched.get_tasks():
23 sched.add_task(Task(tname, []))
24 if job_no not in sched.get_tasks()[tname].get_jobs():
25 sched.get_tasks()[tname].add_job(Job(job_no, []))
26 job = sched.get_tasks()[tname].get_jobs()[job_no]
27 return job
28
29def convert_trace_to_schedule(stream):
30 """The main function of interest in this module. Coverts a stream of records
31 to a Schedule object."""
32 def noop():
33 pass
34
35 num_cpus, stream = _find_num_cpus(stream)
36 sched = Schedule('sched', num_cpus)
37 for record in stream:
38 #if record.record_type == 'meta':
39 # if record.type_name == 'num_cpus':
40 # sched = Schedule('sched', record.num_cpus)
41 # continue
42 if record.record_type == 'event':
43 job = _get_job_from_record(sched, record)
44 cpu = record.cpu
45
46 if not hasattr(record, 'deadline'):
47 record.deadline = None
48
49 actions = {
50 'name' : (noop),
51 'params' : (noop),
52 'release' : (lambda :
53 (job.add_event(ReleaseEvent(record.when, cpu)),
54 job.add_event(DeadlineEvent(record.deadline, cpu)))),
55 'switch_to' : (lambda :
56 job.add_event(SwitchToEvent(record.when, cpu))),
57 'switch_away' : lambda :
58 job.add_event(SwitchAwayEvent(record.when, cpu)),
59 'assign' : (noop),
60 'completion' : (lambda :
61 job.add_event(CompleteEvent(record.when, cpu))),
62 'block' : (lambda :
63 job.add_event(SuspendEvent(record.when, cpu))),
64 'resume' : (lambda :
65 job.add_event(ResumeEvent(record.when, cpu))),
66 'sys_release' : (noop)
67 }
68
69 actions[record.type_name]()
70
71 elif record.record_type == 'error':
72 job = _get_job_from_record(sched, record.job)
73
74 actions = {
75 'inversion_start' : (lambda :
76 job.add_event(InversionStartEvent(record.job.inversion_start))),
77 'inversion_end' : (lambda :
78 job.add_event(InversionEndEvent(record.job.inversion_end)))
79 }
80
81 actions[record.type_name]()
82
83 return sched
84
85def _pid_to_task_name(pid):
86 """Converts a PID to an appropriate name for a task."""
87 return str(pid)
88
89def _find_num_cpus(stream):
90 """Determines the number of CPUs used by scanning the binary format."""
91 max = 0
92 stream_list = []
93 for record in stream:
94 stream_list.append(record)
95 if record.record_type == 'event':
96 if record.cpu > max:
97 max = record.cpu
98
99 def recycle(l):
100 for record in l:
101 yield record
102 return (max + 1, recycle(stream_list))