aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--config/config.example.py16
-rw-r--r--parse/ft.py1
-rw-r--r--parse/point.py11
-rw-r--r--parse/sched.py60
-rw-r--r--parse/tuple_table.py5
-rwxr-xr-x[-rw-r--r--]plot_exps.py115
6 files changed, 182 insertions, 26 deletions
diff --git a/config/config.example.py b/config/config.example.py
index 06f06b6..50d30ba 100644
--- a/config/config.example.py
+++ b/config/config.example.py
@@ -41,16 +41,22 @@ PARAMS = {'sched' : 'scheduler',
41 'kernel' : 'uname'} 41 'kernel' : 'uname'}
42 42
43SCHED_EVENTS = range(501, 513) 43SCHED_EVENTS = range(501, 513)
44BASE_EVENTS = ['SCHED', 'RELEASE', 'SCHED2', 'TICK', 'CXS'] 44BASE_EVENTS = ['SCHED', 'RELEASE', 'SCHED2', 'TICK', 'CXS', 'SEND_RESCHED']
45BASE_EVENTS += ['CQ_ENQUEUE_READ', 'CQ_ENQUEUE_FLUSH', 'CQ_SUBMIT_WORK',
46 'CQ_LOOP_WORK_CHECK', 'CQ_LOOP_PEACE_OUT', 'CQ_LOOP_BRANCH',
47 'CQ_WORK_DO_WORK', 'CQ_WORK_NOTIFY', 'CQ_PHASE_WAIT']
45 48
46# Expand for mixed-crit 49# Expand for mixed-crit
47# CRIT_EVENTS = ['LVL{}_SCHED', 'LVL{}_RELEASE'] 50# TODO don't use split
48# CRIT_LEVELS = ['A', 'B', 'C'] 51CRIT_EVENTS = ['LVL{}_SCHED', 'LVL{}_RELEASE']
49# BASE_EVENTS += [s.format(l) for (l,s) in 52CRIT_LEVELS = ['A', 'B', 'C']
50# itertools.product(CRIT_LEVELS, CRIT_EVENTS)] 53BASE_EVENTS += [s.format(l) for (l,s) in
54 itertools.product(CRIT_LEVELS, CRIT_EVENTS)]
51 55
52ALL_EVENTS = ["%s_%s" % (e, t) for (e,t) in 56ALL_EVENTS = ["%s_%s" % (e, t) for (e,t) in
53 itertools.product(BASE_EVENTS, ["START","END"])] 57 itertools.product(BASE_EVENTS, ["START","END"])]
58ALL_EVENTS += ['RELEASE_LATENCY']
59BASE_EVENTS += ['RELEASE_LATENCY']
54 60
55valid = True 61valid = True
56for repo, loc in REPOS.items(): 62for repo, loc in REPOS.items():
diff --git a/parse/ft.py b/parse/ft.py
index 2c2b597..feb338f 100644
--- a/parse/ft.py
+++ b/parse/ft.py
@@ -41,6 +41,7 @@ def get_ft_output(data_dir, out_dir, force=False):
41 # Analyze will summarize those 41 # Analyze will summarize those
42 # todo pass in f 42 # todo pass in f
43 cmd_arr = [conf.BINS['analyze']] 43 cmd_arr = [conf.BINS['analyze']]
44 print("cmd arr: %s-%s" % (cmd_arr, bins))
44 cmd_arr.extend(bins) 45 cmd_arr.extend(bins)
45 with open(output_file, "w") as f: 46 with open(output_file, "w") as f:
46 subprocess.call(cmd_arr, cwd=out_dir, stdout=f, stderr=err_file) 47 subprocess.call(cmd_arr, cwd=out_dir, stdout=f, stderr=err_file)
diff --git a/parse/point.py b/parse/point.py
index 30fcd97..8fdd115 100644
--- a/parse/point.py
+++ b/parse/point.py
@@ -16,7 +16,14 @@ def make_typemap():
16 return copy.deepcopy(default_typemap) 16 return copy.deepcopy(default_typemap)
17 17
18def dict_str(adict, sep = "\n"): 18def dict_str(adict, sep = "\n"):
19 return sep.join(["%s: %s" % (k, str(v)) for (k,v) in sorted(adict.iteritems())]) 19 def num_str(v):
20 try:
21 float(v)
22 return "%6.3f" % v
23 except:
24 return v
25 size = 20 if sep == "\n" else 4
26 return sep.join([("%" + str(size) + "s: %9s") % (k, num_str(v)) for (k,v) in sorted(adict.iteritems())])
20 27
21class Measurement(object): 28class Measurement(object):
22 def __init__(self, id = None, kv = {}): 29 def __init__(self, id = None, kv = {}):
@@ -52,7 +59,7 @@ class Measurement(object):
52 self.stats[type] = value 59 self.stats[type] = value
53 60
54 def __str__(self): 61 def __str__(self):
55 return "<Measurement-%s> %s" % (self.id, dict_str(self.stats, " ")) 62 return "%s" % dict_str(self.stats, " ")
56 63
57 64
58class Summary(Measurement): 65class Summary(Measurement):
diff --git a/parse/sched.py b/parse/sched.py
index 7dd80e0..cbb051e 100644
--- a/parse/sched.py
+++ b/parse/sched.py
@@ -1,5 +1,6 @@
1""" 1"""
2TODO: No longer very pythonic, lot of duplicate code 2TODO: No longer very pythonic, lot of duplicate code
3print out task execution times
3""" 4"""
4 5
5import config.config as conf 6import config.config as conf
@@ -9,6 +10,7 @@ import numpy as np
9import subprocess 10import subprocess
10 11
11from collections import namedtuple,defaultdict 12from collections import namedtuple,defaultdict
13from operator import methodcaller
12from point import Measurement,Type 14from point import Measurement,Type
13 15
14PARAM_RECORD = r"(?P<RECORD>" +\ 16PARAM_RECORD = r"(?P<RECORD>" +\
@@ -29,12 +31,14 @@ TARDY_RECORD = r"(?P<RECORD>" +\
29 r"(?P<MISSES>[\d\.]+))" 31 r"(?P<MISSES>[\d\.]+))"
30COMPLETION_RECORD = r"(?P<RECORD>" +\ 32COMPLETION_RECORD = r"(?P<RECORD>" +\
31 r"COMPLETION.*?(?P<PID>\d+)/.*?" +\ 33 r"COMPLETION.*?(?P<PID>\d+)/.*?" +\
32 r"exec.*?(?P<EXEC>[\d\.]+)ms.*?" +\ 34 r"exec:.*?(?P<EXEC>[\d\.]+)ms.*?" +\
33 r"flush.*?(?P<FLUSH>[\d\.]+)ms.*?" +\ 35 r"flush:.*?(?P<FLUSH>[\d\.]+)ms.*?" +\
34 r"load.*?(?P<LOAD>[\d\.]+)ms)" 36 r"flush_work:.*?(?P<FLUSH_WORK>[\d]+).*?" +\
37 r"load:.*?(?P<LOAD>[\d\.]+)ms.*?" +\
38 r"load_work:.*?(?P<LOAD_WORK>[\d]+))"
35 39
36TaskConfig = namedtuple('TaskConfig', ['cpu','wcet','period','type','level']) 40TaskConfig = namedtuple('TaskConfig', ['cpu','wcet','period','type','level'])
37Task = namedtuple('Task', ['pid', 'config']) 41Task = namedtuple('Task', ['pid', 'config', 'run'])
38 42
39class LeveledArray(object): 43class LeveledArray(object):
40 """ 44 """
@@ -86,7 +90,7 @@ def get_tasks(data):
86 float(match.group('WCET')), 90 float(match.group('WCET')),
87 float(match.group('PERIOD')), 91 float(match.group('PERIOD')),
88 match.group("CLASS"), 92 match.group("CLASS"),
89 match.group("LEVEL"))) 93 match.group("LEVEL")), [])
90 if not (t.config.period and t.pid): 94 if not (t.config.period and t.pid):
91 raise Exception() 95 raise Exception()
92 ret += [t] 96 ret += [t]
@@ -144,15 +148,16 @@ def extract_tardy_vals(task_dict, data, exp_point):
144 max_tards.add(t, max_tard / t.config.period) 148 max_tards.add(t, max_tard / t.config.period)
145 ratios.add(t, misses / jobs) 149 ratios.add(t, misses / jobs)
146 150
147 ratios.write_measurements(exp_point) 151 map(methodcaller('write_measurements', exp_point),
148 avg_tards.write_measurements(exp_point) 152 [ratios, avg_tards, max_tards])
149 max_tards.write_measurements(exp_point)
150 153
151# TODO: rename 154# TODO: rename
152def extract_variance(task_dict, data, exp_point): 155def extract_variance(task_dict, data, exp_point):
153 varz = LeveledArray("exec-variance") 156 varz = LeveledArray("exec-variance")
154 flushes = LeveledArray("cache-flush") 157 flushes = LeveledArray("cache-flush")
155 loads = LeveledArray("cache-load") 158 loads = LeveledArray("cache-load")
159 fworks = LeveledArray("flush-work")
160 lworks = LeveledArray("load-work")
156 161
157 completions = defaultdict(lambda: []) 162 completions = defaultdict(lambda: [])
158 missed = defaultdict(lambda: int()) 163 missed = defaultdict(lambda: int())
@@ -163,11 +168,17 @@ def extract_variance(task_dict, data, exp_point):
163 duration = float(match.group("EXEC")) 168 duration = float(match.group("EXEC"))
164 load = float(match.group("LOAD")) 169 load = float(match.group("LOAD"))
165 flush = float(match.group("FLUSH")) 170 flush = float(match.group("FLUSH"))
171 lwork = int(match.group("LOAD_WORK"))
172 fwork = int(match.group("FLUSH_WORK"))
166 173
167 if load: 174 if load:
168 loads.add(task_dict[pid], load) 175 loads.add(task_dict[pid], load)
176 lworks.add(task_dict[pid], lwork)
177 if not lwork: raise Exception()
169 if flush: 178 if flush:
170 flushes.add(task_dict[pid], flush) 179 flushes.add(task_dict[pid], flush)
180 fworks.add(task_dict[pid], fwork)
181 if not fwork: raise Exception()
171 182
172 # Last (exit) record often has exec time of 0 183 # Last (exit) record often has exec time of 0
173 missed[pid] += not bool(duration) 184 missed[pid] += not bool(duration)
@@ -181,6 +192,9 @@ def extract_variance(task_dict, data, exp_point):
181 completions[pid] += [duration] 192 completions[pid] += [duration]
182 193
183 for pid, durations in completions.iteritems(): 194 for pid, durations in completions.iteritems():
195 # TODO: not this, please
196 task_dict[pid].run.append(Measurement(pid).from_array(durations))
197
184 job_times = np.array(durations) 198 job_times = np.array(durations)
185 mean = job_times.mean() 199 mean = job_times.mean()
186 200
@@ -194,14 +208,15 @@ def extract_variance(task_dict, data, exp_point):
194 208
195 varz.add(task_dict[pid], corrected) 209 varz.add(task_dict[pid], corrected)
196 210
197 varz.write_measurements(exp_point) 211 if exp_point:
198 flushes.write_measurements(exp_point) 212 map(methodcaller('write_measurements', exp_point),
199 loads.write_measurements(exp_point) 213 [varz, flushes, loads, fworks, lworks])
200 214
201def config_exit_stats(task_dict, data): 215def config_exit_stats(task_dict, data):
202 # Dictionary of task exit measurements by pid 216 # # Dictionary of task exit measurements by pid
203 exits = get_task_exits(data) 217 # exits = get_task_exits(data)
204 exit_dict = dict((e.id, e) for e in exits) 218 # exit_dict = dict((e.id, e) for e in exits)
219 extract_variance(task_dict, data, None)
205 220
206 # Dictionary where keys are configurations, values are list 221 # Dictionary where keys are configurations, values are list
207 # of tasks with those configuratino 222 # of tasks with those configuratino
@@ -212,11 +227,12 @@ def config_exit_stats(task_dict, data):
212 for config in config_dict: 227 for config in config_dict:
213 task_list = sorted(config_dict[config]) 228 task_list = sorted(config_dict[config])
214 229
215 # Replace tasks with corresponding exit stats 230 # # Replace tasks with corresponding exit stats
216 if not t.pid in exit_dict: 231 # if not t.pid in exit_dict:
217 raise Exception("Missing exit record for task '%s' in '%s'" % 232 # raise Exception("Missing exit record for task '%s' in '%s'" %
218 (t, file.name)) 233 # (t, file.name))
219 exit_list = [exit_dict[t.pid] for t in task_list] 234 # exit_list = [exit_dict[t.pid] for t in task_list]
235 exit_list = [t.run[0] for t in task_list]
220 config_dict[config] = exit_list 236 config_dict[config] = exit_list
221 237
222 return config_dict 238 return config_dict
@@ -228,6 +244,7 @@ def get_base_stats(base_file):
228 with open(base_file, 'r') as f: 244 with open(base_file, 'r') as f:
229 data = f.read() 245 data = f.read()
230 task_dict = get_task_dict(data) 246 task_dict = get_task_dict(data)
247
231 result = config_exit_stats(task_dict, data) 248 result = config_exit_stats(task_dict, data)
232 saved_stats[base_file] = result 249 saved_stats[base_file] = result
233 return result 250 return result
@@ -248,16 +265,21 @@ def extract_scaling_data(task_dict, data, result, base_file):
248 # Quit, we are missing a record and can't guarantee 265 # Quit, we are missing a record and can't guarantee
249 # a task-to-task comparison 266 # a task-to-task comparison
250 continue 267 continue
268
251 for data_stat, base_stat in zip(data_stats[config],base_stats[config]): 269 for data_stat, base_stat in zip(data_stats[config],base_stats[config]):
252 if not base_stat[Type.Avg] or not base_stat[Type.Max] or \ 270 if not base_stat[Type.Avg] or not base_stat[Type.Max] or \
253 not data_stat[Type.Avg] or not data_stat[Type.Max]: 271 not data_stat[Type.Avg] or not data_stat[Type.Max]:
272 print("missing a thing: {},{}".format(base_stat, data_stat))
254 continue 273 continue
255 # How much larger is their exec stat than ours? 274 # How much larger is their exec stat than ours?
275 print("%s vs %s" % (base_stat, data_stat))
256 avg_scale = float(base_stat[Type.Avg]) / float(data_stat[Type.Avg]) 276 avg_scale = float(base_stat[Type.Avg]) / float(data_stat[Type.Avg])
257 max_scale = float(base_stat[Type.Max]) / float(data_stat[Type.Max]) 277 max_scale = float(base_stat[Type.Max]) / float(data_stat[Type.Max])
258 278
259 task = task_dict[data_stat.id] 279 task = task_dict[data_stat.id]
260 280
281 print("scaling for %s" % data_stat.id)
282
261 avg_scales.add(task, avg_scale) 283 avg_scales.add(task, avg_scale)
262 max_scales.add(task, max_scale) 284 max_scales.add(task, max_scale)
263 285
diff --git a/parse/tuple_table.py b/parse/tuple_table.py
index cb5a72a..434eb22 100644
--- a/parse/tuple_table.py
+++ b/parse/tuple_table.py
@@ -48,6 +48,7 @@ class TupleTable(object):
48 self.table = defaultdict(lambda: []) 48 self.table = defaultdict(lambda: [])
49 self.reduced = False 49 self.reduced = False
50 50
51 # TODO: rename, make exp agnostic, extend for exps
51 def add_exp(self, kv, point): 52 def add_exp(self, kv, point):
52 key = self.col_map.get_key(kv) 53 key = self.col_map.get_key(kv)
53 self.table[key] += [point] 54 self.table[key] += [point]
@@ -56,6 +57,10 @@ class TupleTable(object):
56 key = self.col_map.get_key(kv) 57 key = self.col_map.get_key(kv)
57 return self.table[key] 58 return self.table[key]
58 59
60 def __contains__(self, kv):
61 key = self.col_map.get_key(kv)
62 return key in self.table
63
59 def reduce(self): 64 def reduce(self):
60 if self.reduced: 65 if self.reduced:
61 raise Exception("cannot reduce twice!") 66 raise Exception("cannot reduce twice!")
diff --git a/plot_exps.py b/plot_exps.py
index 06f43b0..46784bc 100644..100755
--- a/plot_exps.py
+++ b/plot_exps.py
@@ -1,7 +1,77 @@
1#!/usr/bin/env python 1#!/usr/bin/env python
2from __future__ import print_function 2from __future__ import print_function
3 3
4import os
5import re
6import plot
7import shutil as sh
8
9from collections import defaultdict
4from optparse import OptionParser 10from optparse import OptionParser
11from gnuplot import Plot, curve
12from random import randrange
13
14class StyleMaker(object):
15 LINE_WIDTH = 1.5
16 POINT_SIZE = 0.6
17 BEST_COLORS = [
18 '#ff0000', # red
19 '#000001', # black
20 '#0000ff', # blue
21 '#be00c4', # purple
22 '#ffd700', # yellow
23 ]
24
25 def __init__(csvs):
26 self.main_key, self.col_map = __find_columns(csvs)
27 self.cur_style = 1
28
29 # Use this for least-common varying attribute
30 self.main_map = {}
31 # Everything else is a color
32 self.color_map = TupleTable(self.col_map)
33
34 def __find_columns(csvs):
35 vals = defaultdict(lambda:set)
36
37 for csv in csvs:
38 to_decode = os.path.splitext(csv_file)[0]
39 params = plot.decode(to_decode)
40 for k,v in params.iteritems:
41 vals[k].add(v)
42
43 try:
44 main_key = min([(k,v) for (k,v) in thing.iteritems() if len(v) > 1],
45 key=operator.itemgetter(1))[0]
46 except ValueError:
47 main_key = None
48
49 col_map = ColMap()
50 for k,v in vals.iterkeys():
51 if k == self.main_key: continue
52 for i in v:
53 self.col_map.try_add(k, i)
54 return (main_key, col_map)
55
56 def __rand_color():
57 return "#%s" % "".join([hex(randrange(0, 255))[2:] for i in range(3)])
58
59 def get_style(csv):
60 to_decode = os.path.splitext(csv_file)[0]
61 params = plot.decode(to_decode)
62
63 if kv not in self.color_map:
64 color = best.pop() if BEST_COLORS else __rand_color()
65 self.color_map.add_exp(params, color)
66
67 if self.main_key in params:
68 val = params[self.main_key]
69 if val not in self.main_map:
70 self.main_map[val] = self.cur_style
71 self.cur_style += 1
72 style = self.main_map[val]
73 else:
74 style = 1
5 75
6def parse_args(): 76def parse_args():
7 parser = OptionParser("usage: %prog [options] [csv_dir]...") 77 parser = OptionParser("usage: %prog [options] [csv_dir]...")
@@ -13,9 +83,54 @@ def parse_args():
13 83
14 return parser.parse_args() 84 return parser.parse_args()
15 85
86def get_label(kv):
87 label = []
88 for key, value in kv.iteritems():
89 label += ["%s=%s" % (key.capitalize(), value)]
90 return ", ".join(label)
91
92def add_line(plot, csv_file):
93 to_decode = os.path.splitext(csv_file)[0]
94 params = plot.decode(to_decode)
95
96def get_stat(path, name):
97 full = os.path.abspath(path)
98 rstr = r"(?P<STAT>[^/]+)/((max|min|var|avg)/)*(%s/?)?$" % name
99 regex = re.compile(rstr, re.I | re.M)
100 match = regex.search(full)
101 return match.group("STAT")
102
103def plot_exp(name, data_dir, out_dir):
104 p = Plot()
105 p.format = 'pdf'
106 p.output = "%s/%s.pdf" % (out_dir, name)
107 p.xlabel = name.replace("vary-", "")
108 p.ylabel = get_stat(data_dir, name)
109 p.font = 'Helvetica'
110 p.dashed_lines = True
111 p.enhanced_text = True
112 p.size = ('5.0cm', '5.0cm')
113 p.font_size = '6pt'
114 p.key = 'on bmargin center horizontal'
115
116 csvs = [f for f in os.listdir(data_dir) if re.match("*.csv", f)]
117 col_map = get_col_map(csvs)
118
119
16def main(): 120def main():
17 opts, args = parse_args() 121 opts, args = parse_args()
18 args = args or [os.getcwd()] 122 args = args or [os.getcwd()]
19 123
124 # if opts.force and os.path.exists(opts.out_dir):
125 # sh.rmtree(opts.out_dir)
126 # if not os.path.exists(opts.out_dir):
127 # os.mkdir(opts.out_dir)
128
129 for exp in args:
130 name = os.path.split(exp)[1]
131 out_dir = "%s/%s" % (opts.out_dir, exp)
132
133 plot_exp(name, exp, out_dir)
134
20if __name__ == '__main__': 135if __name__ == '__main__':
21 main() 136 main()