aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJonathan Herman <hermanjl@cs.unc.edu>2013-04-12 15:12:22 -0400
committerJonathan Herman <hermanjl@cs.unc.edu>2013-04-12 15:12:22 -0400
commit7eb34b5312974f601d1117eeaf6393b9648be31c (patch)
tree838df63d06886bd3bbec560add8a1ac4ef4dd069
parent09bc409657606a37346d82ab1e4c44a165bd3541 (diff)
Improved error handling in parse_ and plot_exps.py.
-rw-r--r--parse/dir_map.py5
-rw-r--r--parse/ft.py1
-rw-r--r--parse/point.py4
-rw-r--r--parse/sched.py6
-rw-r--r--parse/tuple_table.py5
-rwxr-xr-xparse_exps.py8
-rw-r--r--plot/style.py20
-rwxr-xr-xplot_exps.py11
8 files changed, 41 insertions, 19 deletions
diff --git a/parse/dir_map.py b/parse/dir_map.py
index a8d2a83..231d21a 100644
--- a/parse/dir_map.py
+++ b/parse/dir_map.py
@@ -96,7 +96,10 @@ class DirMap(object):
96 return 96 return
97 97
98 with open(path, 'rb') as f: 98 with open(path, 'rb') as f:
99 data = np.loadtxt(f, delimiter=",") 99 try:
100 data = np.loadtxt(f, delimiter=",")
101 except Exception as e:
102 raise IOError("Cannot load '%s': %s" % (path, e.message))
100 103
101 # Convert to tuples of ints if possible, else floats 104 # Convert to tuples of ints if possible, else floats
102 values = [map(lambda a:a if a%1 else int(a), t) for t in data] 105 values = [map(lambda a:a if a%1 else int(a), t) for t in data]
diff --git a/parse/ft.py b/parse/ft.py
index 98405f4..1f05323 100644
--- a/parse/ft.py
+++ b/parse/ft.py
@@ -71,7 +71,6 @@ def extract_ft_data(result, data_dir, work_dir, cycles):
71 71
72 bin_file = "{}/{}".format(data_dir, bins[0]) 72 bin_file = "{}/{}".format(data_dir, bins[0])
73 if not os.path.getsize(bin_file): 73 if not os.path.getsize(bin_file):
74 sys.stderr.write("Empty feather trace file %s!" % bin_file)
75 return False 74 return False
76 75
77 with open("%s/%s" % (work_dir, FT_ERR_NAME), 'w') as err_file: 76 with open("%s/%s" % (work_dir, FT_ERR_NAME), 'w') as err_file:
diff --git a/parse/point.py b/parse/point.py
index f2b266a..ac47c70 100644
--- a/parse/point.py
+++ b/parse/point.py
@@ -8,8 +8,8 @@ from enum import Enum
8from collections import defaultdict 8from collections import defaultdict
9 9
10Type = Enum(['Min','Max','Avg','Var']) 10Type = Enum(['Min','Max','Avg','Var'])
11default_typemap = {Type.Max : {Type.Max : 1, Type.Min : 1, Type.Avg : 1, Type.Var : 1}, 11default_typemap = {Type.Max : {Type.Max : 1, Type.Min : 0, Type.Avg : 0, Type.Var : 0},
12 Type.Min : {Type.Max : 1, Type.Min : 1, Type.Avg : 1, Type.Var : 1}, 12 Type.Min : {Type.Max : 0, Type.Min : 1, Type.Avg : 0, Type.Var : 0},
13 Type.Avg : {Type.Max : 1, Type.Min : 1, Type.Avg : 1, Type.Var : 1}} 13 Type.Avg : {Type.Max : 1, Type.Min : 1, Type.Avg : 1, Type.Var : 1}}
14 14
15def make_typemap(): 15def make_typemap():
diff --git a/parse/sched.py b/parse/sched.py
index 147a2e5..1213f0d 100644
--- a/parse/sched.py
+++ b/parse/sched.py
@@ -68,7 +68,7 @@ def make_iterator(fname):
68 '''Iterate over (parsed record, processing method) in a 68 '''Iterate over (parsed record, processing method) in a
69 sched-trace file.''' 69 sched-trace file.'''
70 if not os.path.getsize(fname): 70 if not os.path.getsize(fname):
71 sys.stderr.write("Empty sched_trace file %s!" % fname) 71 # Likely a release master CPU
72 return 72 return
73 73
74 f = open(fname, 'rb') 74 f = open(fname, 'rb')
@@ -176,6 +176,10 @@ def extract_sched_data(result, data_dir, work_dir):
176 176
177 # Group per-task values 177 # Group per-task values
178 for tdata in task_dict.itervalues(): 178 for tdata in task_dict.itervalues():
179 if not tdata.params:
180 # Currently unknown where these invalid tasks come from...
181 continue
182
179 miss_ratio = float(tdata.misses.num) / tdata.jobs 183 miss_ratio = float(tdata.misses.num) / tdata.jobs
180 # Scale average down to account for jobs with 0 tardiness 184 # Scale average down to account for jobs with 0 tardiness
181 avg_tard = tdata.misses.avg * miss_ratio 185 avg_tard = tdata.misses.avg * miss_ratio
diff --git a/parse/tuple_table.py b/parse/tuple_table.py
index 491ea7b..47fb6b6 100644
--- a/parse/tuple_table.py
+++ b/parse/tuple_table.py
@@ -78,7 +78,7 @@ class ReducedTupleTable(TupleTable):
78 val = kv[col] 78 val = kv[col]
79 79
80 try: 80 try:
81 float(val) 81 float(str(val))
82 except: 82 except:
83 # Only vary numbers. Otherwise, just have seperate files 83 # Only vary numbers. Otherwise, just have seperate files
84 continue 84 continue
@@ -93,9 +93,6 @@ class ReducedTupleTable(TupleTable):
93 Leaf = namedtuple('Leaf', ['stat', 'variable', 'base', 93 Leaf = namedtuple('Leaf', ['stat', 'variable', 'base',
94 'summary', 'config', 'values']) 94 'summary', 'config', 'values'])
95 95
96 def next_type(path):
97 return path.pop() if path[-1] in Type else Type.Avg
98
99 def leafs(): 96 def leafs():
100 for path, node in dir_map.leafs(): 97 for path, node in dir_map.leafs():
101 # The path will be of at least size 1: the filename 98 # The path will be of at least size 1: the filename
diff --git a/parse_exps.py b/parse_exps.py
index 8aa9b43..c254536 100755
--- a/parse_exps.py
+++ b/parse_exps.py
@@ -32,6 +32,9 @@ def parse_args():
32 parser.add_option('-m', '--write-map', action='store_true', default=False, 32 parser.add_option('-m', '--write-map', action='store_true', default=False,
33 dest='write_map', 33 dest='write_map',
34 help='Output map of values instead of csv tree') 34 help='Output map of values instead of csv tree')
35 parser.add_option('-p', '--processors', default=max(cpu_count() - 1, 1),
36 type='int', dest='processors',
37 help='number of threads for processing')
35 38
36 return parser.parse_args() 39 return parser.parse_args()
37 40
@@ -134,7 +137,7 @@ def main():
134 137
135 sys.stderr.write("Parsing data...\n") 138 sys.stderr.write("Parsing data...\n")
136 139
137 procs = min(len(exps), max(cpu_count()/2, 1)) 140 procs = min(len(exps), opts.processors)
138 pool = Pool(processes=procs) 141 pool = Pool(processes=procs)
139 pool_args = zip(exps, [opts.force]*len(exps)) 142 pool_args = zip(exps, [opts.force]*len(exps))
140 enum = pool.imap_unordered(parse_exp, pool_args, 1) 143 enum = pool.imap_unordered(parse_exp, pool_args, 1)
@@ -161,8 +164,8 @@ def main():
161 164
162 reduced_table = result_table.reduce() 165 reduced_table = result_table.reduce()
163 166
164 sys.stderr.write("Writing result...\n")
165 if opts.write_map: 167 if opts.write_map:
168 sys.stderr.write("Writing python map into %s...\n" % opts.out)
166 # Write summarized results into map 169 # Write summarized results into map
167 reduced_table.write_map(opts.out) 170 reduced_table.write_map(opts.out)
168 else: 171 else:
@@ -177,6 +180,7 @@ def main():
177 for e in exp: 180 for e in exp:
178 print(e) 181 print(e)
179 else: 182 else:
183 sys.stderr.write("Writing csvs into %s...\n" % opts.out)
180 dir_map.write(opts.out) 184 dir_map.write(opts.out)
181 185
182if __name__ == '__main__': 186if __name__ == '__main__':
diff --git a/plot/style.py b/plot/style.py
index 21c4e7e..4e2057f 100644
--- a/plot/style.py
+++ b/plot/style.py
@@ -7,7 +7,8 @@ class Style(namedtuple('SS', ['marker', 'line', 'color'])):
7 7
8class StyleMap(object): 8class StyleMap(object):
9 '''Maps configs (dicts) to specific line styles.''' 9 '''Maps configs (dicts) to specific line styles.'''
10 DEFAULT = Style('', '-', 'k') 10 DEFAULT = Style(marker='', line= '-', color='k')
11 ORDER = [ str, bool, float, int ]
11 12
12 def __init__(self, col_list, col_values): 13 def __init__(self, col_list, col_values):
13 '''Assign (some) columns in @col_list to fields in @Style to vary, and 14 '''Assign (some) columns in @col_list to fields in @Style to vary, and
@@ -15,6 +16,17 @@ class StyleMap(object):
15 self.value_map = {} 16 self.value_map = {}
16 self.field_map = {} 17 self.field_map = {}
17 18
19 # Prioritize non-numbers
20 def type_priority(column):
21 value = col_values[column].pop()
22 col_values[column].add(value)
23 try:
24 t = float if float(value) % 1.0 else int
25 except:
26 t = bool if value in ['True','False'] else str
27 return StyleMap.ORDER.index(t)
28 col_list = sorted(col_list, key=type_priority)
29
18 # TODO: undo this, switch to popping mechanism 30 # TODO: undo this, switch to popping mechanism
19 for field, values in reversed([x for x in self.__get_all()._asdict().iteritems()]): 31 for field, values in reversed([x for x in self.__get_all()._asdict().iteritems()]):
20 if not col_list: 32 if not col_list:
@@ -31,9 +43,9 @@ class StyleMap(object):
31 43
32 def __get_all(self): 44 def __get_all(self):
33 '''A Style holding all possible values for each property.''' 45 '''A Style holding all possible values for each property.'''
34 return Style(list('.,ov^<>1234sp*hH+xDd|_'), # markers 46 return Style(marker=list('.,ov^<>1234sp*hH+xDd|_'),
35 ['-', ':', '--'], # lines 47 line=['-', ':', '--'],
36 list('bgrcmyk')) # colors 48 color=list('bgrcmyk'))
37 49
38 def get_style(self, kv): 50 def get_style(self, kv):
39 '''Translate column values to unique line style.''' 51 '''Translate column values to unique line style.'''
diff --git a/plot_exps.py b/plot_exps.py
index 3b5636b..76e7396 100755
--- a/plot_exps.py
+++ b/plot_exps.py
@@ -7,11 +7,11 @@ import shutil as sh
7import sys 7import sys
8import traceback 8import traceback
9from collections import namedtuple 9from collections import namedtuple
10from multiprocessing import Pool, cpu_count
10from optparse import OptionParser 11from optparse import OptionParser
11from parse.col_map import ColMap,ColMapBuilder 12from parse.col_map import ColMap,ColMapBuilder
12from parse.dir_map import DirMap 13from parse.dir_map import DirMap
13from plot.style import StyleMap 14from plot.style import StyleMap
14from multiprocessing import Pool, cpu_count
15 15
16def parse_args(): 16def parse_args():
17 parser = OptionParser("usage: %prog [options] [csv_dir]...") 17 parser = OptionParser("usage: %prog [options] [csv_dir]...")
@@ -20,6 +20,9 @@ def parse_args():
20 help='directory for plot output', default='plot-data') 20 help='directory for plot output', default='plot-data')
21 parser.add_option('-f', '--force', action='store_true', default=False, 21 parser.add_option('-f', '--force', action='store_true', default=False,
22 dest='force', help='overwrite existing data') 22 dest='force', help='overwrite existing data')
23 parser.add_option('-p', '--processors', default=max(cpu_count() - 1, 1),
24 type='int', dest='processors',
25 help='number of threads for processing')
23 26
24 return parser.parse_args() 27 return parser.parse_args()
25 28
@@ -97,7 +100,7 @@ def plot_wrapper(details):
97 except: 100 except:
98 traceback.print_exc() 101 traceback.print_exc()
99 102
100def plot_dir(data_dir, out_dir, force): 103def plot_dir(data_dir, out_dir, max_procs, force):
101 sys.stderr.write("Reading data...\n") 104 sys.stderr.write("Reading data...\n")
102 dir_map = DirMap.read(data_dir) 105 dir_map = DirMap.read(data_dir)
103 106
@@ -119,7 +122,7 @@ def plot_dir(data_dir, out_dir, force):
119 if not plot_details: 122 if not plot_details:
120 return 123 return
121 124
122 procs = min(len(plot_details), max(cpu_count()/2, 1)) 125 procs = min(len(plot_details), max_procs)
123 pool = Pool(processes=procs) 126 pool = Pool(processes=procs)
124 enum = pool.imap_unordered(plot_wrapper, plot_details) 127 enum = pool.imap_unordered(plot_wrapper, plot_details)
125 128
@@ -150,7 +153,7 @@ def main():
150 out_dir = "%s/%s" % (opts.out_dir, os.path.split(dir)[1]) 153 out_dir = "%s/%s" % (opts.out_dir, os.path.split(dir)[1])
151 else: 154 else:
152 out_dir = opts.out_dir 155 out_dir = opts.out_dir
153 plot_dir(dir, out_dir, opts.force) 156 plot_dir(dir, out_dir, opts.processors, opts.force)
154 157
155if __name__ == '__main__': 158if __name__ == '__main__':
156 main() 159 main()