aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBjoern B. Brandenburg <bbb@cs.unc.edu>2011-02-25 18:08:33 -0500
committerBjoern B. Brandenburg <bbb@cs.unc.edu>2011-02-25 18:08:33 -0500
commit7beec7bb786dbfef674ec7a3ef8836836bf3680b (patch)
treebc0eed51cc3674fa76bc51087d682e03516b01be
parent736d4a8cc2c8b906ccaa9c39b145cd8cf20bdd88 (diff)
Add simple tool for transforming sched. results
-rwxr-xr-xtransform.py165
1 files changed, 165 insertions, 0 deletions
diff --git a/transform.py b/transform.py
new file mode 100755
index 0000000..65d34d4
--- /dev/null
+++ b/transform.py
@@ -0,0 +1,165 @@
1#!/usr/bin/env python
2
3import sys
4
5from plot import decode
6from os.path import splitext, basename
7
8from glob import glob
9from collections import defaultdict
10import optparse
11
12from util import load_csv_file, write_csv_file
13
14o = optparse.make_option
15
16opts = [
17# o('-r', '--result-type', action='store', dest='',
18# type='choice', choices=['hard', 'soft', 'tardiness', 'rel-tardiness'],
19# help='what data should be emitted?'),
20 ]
21
22defaults = {
23 }
24
25options = None
26
27#G-EDF/testpoint_ucap=13.75_wss=1792_dist=exp-10-10-100_deadlines=implicit_host=ludwig_scheduler=G-EDF.csv
28
29IDX_WSS = 4
30IDX_UCAP = 5
31
32IDX_HARD_IDLE = 6
33IDX_HARD_LOAD = 7
34
35IDX_SOFT_IDLE = 8
36IDX_SOFT_LOAD = 17
37
38IDX_TARD_MAX_IDLE = 9
39IDX_TARD_AVG_IDLE = 10
40IDX_TARD_STD_IDLE = 12
41
42IDX_TARD_MAX_LOAD = 18
43IDX_TARD_AVG_LOAD = 19
44IDX_TARD_STD_LOAD = 21
45
46IDX_RTARD_MAX_IDLE = 13
47IDX_RTARD_AVG_IDLE = 14
48IDX_RTARD_STD_IDLE = 16
49
50IDX_RTARD_MAX_LOAD = 22
51IDX_RTARD_AVG_LOAD = 23
52IDX_RTARD_STD_LOAD = 25
53
54
55def key(fname):
56 name, ext = splitext(basename(fname))
57 conf = decode(name)
58
59 sched = conf['scheduler']
60
61 if 'quanta' in conf and conf['quanta'] == 'staggered':
62 sched = 'S-' + sched
63
64 d = 'dist=%s' % conf['dist']
65 dl = 'deadlines=%s' % conf['deadlines']
66 h = 'host=%s' % conf['host']
67
68 return ('_'.join([d, dl, h]), sched, fname)
69
70def get_row(i, key_col, arrays, cols):
71 row = []
72 data = arrays[0]
73 keys = [data[i,x] for x in key_col]
74 for idx, data in enumerate(arrays):
75 # make sure we are not combining apples and oranges...
76 d_keys = [data[i,x] for x in key_col]
77 if not d_keys == keys:
78 print 'Bad: missing data sched=%d row=%d %s!=%s' % (idx + 1, i+1, d_keys, keys)
79 assert False
80 row += [data[i,x] for x in cols]
81 return keys + row
82
83def get_table(arrays, key_col, data_col):
84 data = arrays[0]
85 return [get_row(i, key_col, arrays, data_col) for i in xrange(len(data))]
86
87def write_sched(name, components, hard=True):
88 header = ['WSS', 'ucap']
89 arrays = []
90 for (sched, data) in components:
91# print sched
92 header += ["%s (load)" % sched , "%s (idle)" % sched]
93 arrays.append(data)
94 if hard:
95 data_col = [IDX_HARD_LOAD, IDX_HARD_IDLE]
96 else:
97 data_col = [IDX_SOFT_LOAD, IDX_SOFT_IDLE]
98 result = get_table(arrays, [IDX_WSS, IDX_UCAP], data_col)
99 # sort by WSS, then by ucap
100 result.sort(key=lambda row: (row[0], row[1]))
101 fname = '%s_%s.csv' % ('hard' if hard else 'soft',
102 name)
103 write_csv_file(fname, result, header=header, width=20)
104
105def write_tardiness(name, components, relative=True):
106 header = ['WSS', 'ucap']
107 arrays = []
108 for (sched, data) in components:
109 header += ["%s (max, load)" % sched,
110 "%s (avg, load)" % sched,
111 "%s (std, load)" % sched,
112 "%s (max, idle)" % sched,
113 "%s (avg, idle)" % sched,
114 "%s (std, idle)" % sched]
115 arrays.append(data)
116
117 if relative:
118 data_col = [IDX_RTARD_MAX_LOAD, IDX_RTARD_AVG_LOAD, IDX_RTARD_STD_LOAD,
119 IDX_RTARD_MAX_IDLE, IDX_RTARD_MAX_IDLE, IDX_RTARD_STD_IDLE]
120 else:
121 data_col = [IDX_TARD_MAX_LOAD, IDX_TARD_AVG_LOAD, IDX_TARD_STD_LOAD,
122 IDX_TARD_MAX_IDLE, IDX_TARD_MAX_IDLE, IDX_TARD_STD_IDLE]
123
124 result = get_table(arrays, [IDX_WSS, IDX_UCAP], data_col)
125 # sort by WSS, then by ucap
126 result.sort(key=lambda row: (row[0], row[1]))
127 fname = '%s_%s.csv' % ('rel-tard' if relative else 'abs-tard',
128 name)
129 write_csv_file(fname, result, header=header, width=35)
130
131
132def assemble_results(dir):
133 files = glob(dir + '/*.csv')
134
135 parts = defaultdict(list)
136
137 print 'Organizing %d files...' % len(files)
138 for f in files:
139 k, sched, fname = key(f)
140 parts[k].append((sched, fname))
141
142 for k in parts:
143 # sort by scheduler name
144 parts[k].sort()
145
146 for i, k in enumerate(parts):
147 comment = 1
148 print '[%d/%d] Processing %s' % (i+ 1, len(parts), k)
149 print 'Loading files.'
150 components = [(sched, load_csv_file(fname)) for (sched, fname) in parts[k]]
151
152 print 'Generating output.'
153 write_sched(k, components, hard=True)
154 write_sched(k, components, hard=False)
155 write_tardiness(k, components, relative=True)
156 write_tardiness(k, components, relative=False)
157
158if __name__ == '__main__':
159 parser = optparse.OptionParser(option_list=opts)
160 parser.set_defaults(**defaults)
161 (options, dirs) = parser.parse_args()
162
163 for d in dirs:
164 assemble_results(d)
165