diff options
author | Bjoern B. Brandenburg <bbb@cs.unc.edu> | 2011-02-25 18:08:33 -0500 |
---|---|---|
committer | Bjoern B. Brandenburg <bbb@cs.unc.edu> | 2011-02-25 18:08:33 -0500 |
commit | 7beec7bb786dbfef674ec7a3ef8836836bf3680b (patch) | |
tree | bc0eed51cc3674fa76bc51087d682e03516b01be /transform.py | |
parent | 736d4a8cc2c8b906ccaa9c39b145cd8cf20bdd88 (diff) |
Add simple tool for transforming sched. results
Diffstat (limited to 'transform.py')
-rwxr-xr-x | transform.py | 165 |
1 files changed, 165 insertions, 0 deletions
diff --git a/transform.py b/transform.py new file mode 100755 index 0000000..65d34d4 --- /dev/null +++ b/transform.py | |||
@@ -0,0 +1,165 @@ | |||
1 | #!/usr/bin/env python | ||
2 | |||
3 | import sys | ||
4 | |||
5 | from plot import decode | ||
6 | from os.path import splitext, basename | ||
7 | |||
8 | from glob import glob | ||
9 | from collections import defaultdict | ||
10 | import optparse | ||
11 | |||
12 | from util import load_csv_file, write_csv_file | ||
13 | |||
14 | o = optparse.make_option | ||
15 | |||
16 | opts = [ | ||
17 | # o('-r', '--result-type', action='store', dest='', | ||
18 | # type='choice', choices=['hard', 'soft', 'tardiness', 'rel-tardiness'], | ||
19 | # help='what data should be emitted?'), | ||
20 | ] | ||
21 | |||
22 | defaults = { | ||
23 | } | ||
24 | |||
25 | options = None | ||
26 | |||
27 | #G-EDF/testpoint_ucap=13.75_wss=1792_dist=exp-10-10-100_deadlines=implicit_host=ludwig_scheduler=G-EDF.csv | ||
28 | |||
29 | IDX_WSS = 4 | ||
30 | IDX_UCAP = 5 | ||
31 | |||
32 | IDX_HARD_IDLE = 6 | ||
33 | IDX_HARD_LOAD = 7 | ||
34 | |||
35 | IDX_SOFT_IDLE = 8 | ||
36 | IDX_SOFT_LOAD = 17 | ||
37 | |||
38 | IDX_TARD_MAX_IDLE = 9 | ||
39 | IDX_TARD_AVG_IDLE = 10 | ||
40 | IDX_TARD_STD_IDLE = 12 | ||
41 | |||
42 | IDX_TARD_MAX_LOAD = 18 | ||
43 | IDX_TARD_AVG_LOAD = 19 | ||
44 | IDX_TARD_STD_LOAD = 21 | ||
45 | |||
46 | IDX_RTARD_MAX_IDLE = 13 | ||
47 | IDX_RTARD_AVG_IDLE = 14 | ||
48 | IDX_RTARD_STD_IDLE = 16 | ||
49 | |||
50 | IDX_RTARD_MAX_LOAD = 22 | ||
51 | IDX_RTARD_AVG_LOAD = 23 | ||
52 | IDX_RTARD_STD_LOAD = 25 | ||
53 | |||
54 | |||
55 | def key(fname): | ||
56 | name, ext = splitext(basename(fname)) | ||
57 | conf = decode(name) | ||
58 | |||
59 | sched = conf['scheduler'] | ||
60 | |||
61 | if 'quanta' in conf and conf['quanta'] == 'staggered': | ||
62 | sched = 'S-' + sched | ||
63 | |||
64 | d = 'dist=%s' % conf['dist'] | ||
65 | dl = 'deadlines=%s' % conf['deadlines'] | ||
66 | h = 'host=%s' % conf['host'] | ||
67 | |||
68 | return ('_'.join([d, dl, h]), sched, fname) | ||
69 | |||
70 | def get_row(i, key_col, arrays, cols): | ||
71 | row = [] | ||
72 | data = arrays[0] | ||
73 | keys = [data[i,x] for x in key_col] | ||
74 | for idx, data in enumerate(arrays): | ||
75 | # make sure we are not combining apples and oranges... | ||
76 | d_keys = [data[i,x] for x in key_col] | ||
77 | if not d_keys == keys: | ||
78 | print 'Bad: missing data sched=%d row=%d %s!=%s' % (idx + 1, i+1, d_keys, keys) | ||
79 | assert False | ||
80 | row += [data[i,x] for x in cols] | ||
81 | return keys + row | ||
82 | |||
83 | def get_table(arrays, key_col, data_col): | ||
84 | data = arrays[0] | ||
85 | return [get_row(i, key_col, arrays, data_col) for i in xrange(len(data))] | ||
86 | |||
87 | def write_sched(name, components, hard=True): | ||
88 | header = ['WSS', 'ucap'] | ||
89 | arrays = [] | ||
90 | for (sched, data) in components: | ||
91 | # print sched | ||
92 | header += ["%s (load)" % sched , "%s (idle)" % sched] | ||
93 | arrays.append(data) | ||
94 | if hard: | ||
95 | data_col = [IDX_HARD_LOAD, IDX_HARD_IDLE] | ||
96 | else: | ||
97 | data_col = [IDX_SOFT_LOAD, IDX_SOFT_IDLE] | ||
98 | result = get_table(arrays, [IDX_WSS, IDX_UCAP], data_col) | ||
99 | # sort by WSS, then by ucap | ||
100 | result.sort(key=lambda row: (row[0], row[1])) | ||
101 | fname = '%s_%s.csv' % ('hard' if hard else 'soft', | ||
102 | name) | ||
103 | write_csv_file(fname, result, header=header, width=20) | ||
104 | |||
105 | def write_tardiness(name, components, relative=True): | ||
106 | header = ['WSS', 'ucap'] | ||
107 | arrays = [] | ||
108 | for (sched, data) in components: | ||
109 | header += ["%s (max, load)" % sched, | ||
110 | "%s (avg, load)" % sched, | ||
111 | "%s (std, load)" % sched, | ||
112 | "%s (max, idle)" % sched, | ||
113 | "%s (avg, idle)" % sched, | ||
114 | "%s (std, idle)" % sched] | ||
115 | arrays.append(data) | ||
116 | |||
117 | if relative: | ||
118 | data_col = [IDX_RTARD_MAX_LOAD, IDX_RTARD_AVG_LOAD, IDX_RTARD_STD_LOAD, | ||
119 | IDX_RTARD_MAX_IDLE, IDX_RTARD_MAX_IDLE, IDX_RTARD_STD_IDLE] | ||
120 | else: | ||
121 | data_col = [IDX_TARD_MAX_LOAD, IDX_TARD_AVG_LOAD, IDX_TARD_STD_LOAD, | ||
122 | IDX_TARD_MAX_IDLE, IDX_TARD_MAX_IDLE, IDX_TARD_STD_IDLE] | ||
123 | |||
124 | result = get_table(arrays, [IDX_WSS, IDX_UCAP], data_col) | ||
125 | # sort by WSS, then by ucap | ||
126 | result.sort(key=lambda row: (row[0], row[1])) | ||
127 | fname = '%s_%s.csv' % ('rel-tard' if relative else 'abs-tard', | ||
128 | name) | ||
129 | write_csv_file(fname, result, header=header, width=35) | ||
130 | |||
131 | |||
132 | def assemble_results(dir): | ||
133 | files = glob(dir + '/*.csv') | ||
134 | |||
135 | parts = defaultdict(list) | ||
136 | |||
137 | print 'Organizing %d files...' % len(files) | ||
138 | for f in files: | ||
139 | k, sched, fname = key(f) | ||
140 | parts[k].append((sched, fname)) | ||
141 | |||
142 | for k in parts: | ||
143 | # sort by scheduler name | ||
144 | parts[k].sort() | ||
145 | |||
146 | for i, k in enumerate(parts): | ||
147 | comment = 1 | ||
148 | print '[%d/%d] Processing %s' % (i+ 1, len(parts), k) | ||
149 | print 'Loading files.' | ||
150 | components = [(sched, load_csv_file(fname)) for (sched, fname) in parts[k]] | ||
151 | |||
152 | print 'Generating output.' | ||
153 | write_sched(k, components, hard=True) | ||
154 | write_sched(k, components, hard=False) | ||
155 | write_tardiness(k, components, relative=True) | ||
156 | write_tardiness(k, components, relative=False) | ||
157 | |||
158 | if __name__ == '__main__': | ||
159 | parser = optparse.OptionParser(option_list=opts) | ||
160 | parser.set_defaults(**defaults) | ||
161 | (options, dirs) = parser.parse_args() | ||
162 | |||
163 | for d in dirs: | ||
164 | assemble_results(d) | ||
165 | |||