aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJonathan Herman <hermanjl@cs.unc.edu>2012-10-07 23:40:12 -0400
committerJonathan Herman <hermanjl@cs.unc.edu>2012-10-07 23:40:12 -0400
commit5d97a6baf6166b74355c6e744e010949a46fd625 (patch)
tree9a0ac19bd5cb0b5a366fc2e7a9c814a7ab520a18
parentc8cb14963511d5d1a3eb46624bcc0d2bcdf3b9bc (diff)
Split scheduling data by task criticality.
-rw-r--r--experiment/experiment.py32
-rw-r--r--parse/ft.py2
-rw-r--r--parse/sched.py125
-rw-r--r--parse/tuple_table.py8
-rwxr-xr-xparse_exps.py20
-rwxr-xr-xrun_exps.py22
6 files changed, 121 insertions, 88 deletions
diff --git a/experiment/experiment.py b/experiment/experiment.py
index a44f798..e6dc92d 100644
--- a/experiment/experiment.py
+++ b/experiment/experiment.py
@@ -40,6 +40,8 @@ class Experiment(object):
40 self.finished_dir = finished_dir 40 self.finished_dir = finished_dir
41 self.proc_entries = proc_entries 41 self.proc_entries = proc_entries
42 self.executables = executables 42 self.executables = executables
43 self.exec_out = None
44 self.exec_err = None
43 45
44 self.__make_dirs() 46 self.__make_dirs()
45 self.__assign_executable_cwds() 47 self.__assign_executable_cwds()
@@ -151,23 +153,26 @@ class Experiment(object):
151 print "[Exp %s]: %s" % (self.name, msg) 153 print "[Exp %s]: %s" % (self.name, msg)
152 154
153 def run_exp(self): 155 def run_exp(self):
154 self.setup()
155
156 succ = False 156 succ = False
157
158 try: 157 try:
159 self.__run_tasks() 158 self.setup()
160 self.log("Saving results in %s" % self.finished_dir) 159
161 succ = True 160 try:
161 self.__run_tasks()
162 self.log("Saving results in %s" % self.finished_dir)
163 succ = True
164 finally:
165 self.teardown()
162 finally: 166 finally:
163 self.teardown() 167 self.log("Switching to Linux scheduler")
168 litmus_util.switch_scheduler("Linux")
164 169
165 if succ: 170 if succ:
166 self.__save_results() 171 self.__save_results()
167 self.log("Experiment done!") 172 self.log("Experiment done!")
168 173
169 174
170 def setup(self): 175 def setup(self):
171 self.log("Writing %d proc entries" % len(self.proc_entries)) 176 self.log("Writing %d proc entries" % len(self.proc_entries))
172 map(methodcaller('write_proc'), self.proc_entries) 177 map(methodcaller('write_proc'), self.proc_entries)
173 178
@@ -185,13 +190,13 @@ class Experiment(object):
185 executable.stdout_file = self.exec_out 190 executable.stdout_file = self.exec_out
186 executable.stderr_file = self.exec_err 191 executable.stderr_file = self.exec_err
187 map(set_out, self.executables) 192 map(set_out, self.executables)
188 193
189 time.sleep(4) 194 time.sleep(4)
190 195
191 def teardown(self): 196 def teardown(self):
192 self.exec_out.close() 197 self.exec_out and self.exec_out.close()
193 self.exec_err.close() 198 self.exec_err and self.exec_err.close()
194 199
195 sleep_time = 5 200 sleep_time = 5
196 self.log("Sleeping %d seconds to allow buffer flushing" % sleep_time) 201 self.log("Sleeping %d seconds to allow buffer flushing" % sleep_time)
197 time.sleep(sleep_time) 202 time.sleep(sleep_time)
@@ -199,6 +204,3 @@ class Experiment(object):
199 self.log("Stopping tracers") 204 self.log("Stopping tracers")
200 map(methodcaller('stop_tracing'), self.tracers) 205 map(methodcaller('stop_tracing'), self.tracers)
201 206
202 self.log("Switching to Linux scheduler")
203 litmus_util.switch_scheduler("Linux")
204
diff --git a/parse/ft.py b/parse/ft.py
index 20a430e..127e49f 100644
--- a/parse/ft.py
+++ b/parse/ft.py
@@ -12,7 +12,7 @@ def get_ft_output(data_dir, out_dir):
12 12
13 FT_DATA_NAME = "scheduler=x-ft" 13 FT_DATA_NAME = "scheduler=x-ft"
14 output_file = "{}/out-ft".format(out_dir) 14 output_file = "{}/out-ft".format(out_dir)
15 15
16 if os.path.isfile(output_file): 16 if os.path.isfile(output_file):
17 print("ft-output already exists for %s" % data_dir) 17 print("ft-output already exists for %s" % data_dir)
18 return output_file 18 return output_file
diff --git a/parse/sched.py b/parse/sched.py
index b84e16e..300c569 100644
--- a/parse/sched.py
+++ b/parse/sched.py
@@ -34,7 +34,26 @@ COMPLETION_RECORD = r"(?P<RECORD>" +\
34TaskConfig = namedtuple('TaskConfig', ['cpu','wcet','period','type','level']) 34TaskConfig = namedtuple('TaskConfig', ['cpu','wcet','period','type','level'])
35Task = namedtuple('Task', ['pid', 'config']) 35Task = namedtuple('Task', ['pid', 'config'])
36 36
37class LeveledArray(object):
38 """
39 Groups statistics by the level of the task to which they apply
40 """
41 def __init__(self, name):
42 self.name = name
43 self.vals = defaultdict(lambda:[])
44
45 def add(self, task, value):
46 self.vals[task.config.level] += [value]
47
48 def write_measurements(self, result):
49 for level, arr in self.vals.iteritems():
50 name = "%s%s" % ("%s-" % level if level else "", self.name)
51 result[name] = Measurement(name).from_array(arr)
52
37def get_st_output(data_dir, out_dir): 53def get_st_output(data_dir, out_dir):
54 """
55 Create and return files containing unpacked sched data
56 """
38 bin_files = conf.FILES['sched_data'].format(".*") 57 bin_files = conf.FILES['sched_data'].format(".*")
39 bins = [f for f in os.listdir(data_dir) if re.match(bin_files, f)] 58 bins = [f for f in os.listdir(data_dir) if re.match(bin_files, f)]
40 59
@@ -70,7 +89,7 @@ def get_tasks(data):
70 (e, match.groupdict(), match.group('RECORD'))) 89 (e, match.groupdict(), match.group('RECORD')))
71 return ret 90 return ret
72 91
73def get_tasks_dict(data): 92def get_task_dict(data):
74 tasks_list = get_tasks(data) 93 tasks_list = get_tasks(data)
75 tasks_dict = {} 94 tasks_dict = {}
76 for t in tasks_list: 95 for t in tasks_list:
@@ -89,17 +108,15 @@ def get_task_exits(data):
89 except: 108 except:
90 raise Exception("Invalid exit record, parsed:\n\t%s\n\t%s" % 109 raise Exception("Invalid exit record, parsed:\n\t%s\n\t%s" %
91 (match.groupdict(), m.group('RECORD'))) 110 (match.groupdict(), m.group('RECORD')))
92 111
93 ret += [m] 112 ret += [m]
94 return ret 113 return ret
95
96 114
97def extract_tardy_vals(data, exp_point):
98 ratios = []
99 avg_tards = []
100 max_tards = []
101 115
102 tasks = get_tasks_dict(data) 116def extract_tardy_vals(task_dict, data, exp_point):
117 ratios = LeveledArray("miss-ratio")
118 avg_tards = LeveledArray("avg-rel-tardiness")
119 max_tards = LeveledArray("max-rel-tardiness")
103 120
104 for match in re.finditer(TARDY_RECORD, data): 121 for match in re.finditer(TARDY_RECORD, data):
105 try: 122 try:
@@ -114,35 +131,40 @@ def extract_tardy_vals(data, exp_point):
114 raise Exception("Invalid tardy record:\n\t%s\n\t%s" % 131 raise Exception("Invalid tardy record:\n\t%s\n\t%s" %
115 (match.groupdict(), match.group("RECORD"))) 132 (match.groupdict(), match.group("RECORD")))
116 133
117 if pid not in tasks: 134 if pid not in task_dict:
118 raise Exception("Invalid pid '%d' in tardy record:\n\t%s" % 135 raise Exception("Invalid pid '%d' in tardy record:\n\t%s" %
119 match.group("RECORD")) 136 match.group("RECORD"))
120 137
121 t = tasks[pid] 138 t = task_dict[pid]
122 avg_tards += [ total_tard / (jobs * t.config.period) ] 139 avg_tards.add(t, total_tard / (jobs * t.config.period))
123 max_tards += [ max_tard / t.config.period ] 140 max_tards.add(t, max_tard / t.config.period)
124 ratios += [ misses / jobs ] 141 ratios.add(t, misses / jobs)
125 142
126 exp_point["avg-rel-tard"] = Measurement().from_array(avg_tards) 143 ratios.write_measurements(exp_point)
127 exp_point["max-rel-tard"] = Measurement().from_array(max_tards) 144 avg_tards.write_measurements(exp_point)
128 exp_point["miss-ratio"] = Measurement().from_array(ratios) 145 max_tards.write_measurements(exp_point)
129 146
130def extract_variance(data, exp_point): 147def extract_variance(task_dict, data, exp_point):
131 varz = [] 148 varz = LeveledArray("exec-variance")
132 completions = defaultdict(lambda: []) 149 completions = defaultdict(lambda: [])
150 missed = defaultdict(lambda: int())
133 151
134 for match in re.finditer(COMPLETION_RECORD, data): 152 for match in re.finditer(COMPLETION_RECORD, data):
135 try: 153 try:
136 pid = int(match.group("PID")) 154 pid = int(match.group("PID"))
137 duration = float(match.group("EXEC")) 155 duration = float(match.group("EXEC"))
138 156
139 if not (duration and pid): raise Exception() 157 # Last (exit) record often has exec time of 0
158 missed[pid] += not bool(duration)
159
160 if missed[pid] > 1 or not pid: raise Exception()
140 except: 161 except:
141 raise Exception("Invalid completion record:\n\t%s\n\t%s" % 162 raise Exception("Invalid completion record, missed - %d:"
142 (match.groupdict(), match.group("RECORD"))) 163 "\n\t%s\n\t%s" % (missed[pid], match.groupdict(),
164 match.group("RECORD")))
143 completions[pid] += [duration] 165 completions[pid] += [duration]
144 166
145 for (pid, durations) in completions: 167 for pid, durations in completions.iteritems():
146 job_times = np.array(durations) 168 job_times = np.array(durations)
147 169
148 # Coefficient of variation 170 # Coefficient of variation
@@ -150,32 +172,22 @@ def extract_variance(data, exp_point):
150 # Correction, assuming normal distributions 172 # Correction, assuming normal distributions
151 corrected = (1 + 1/(4 * len(job_times))) * cv 173 corrected = (1 + 1/(4 * len(job_times))) * cv
152 174
153 varz.append(corrected) 175 varz.add(task_dict[pid], corrected)
154
155 exp_point['exec-var'] = Measurement().from_array(varz)
156
157def extract_sched_data(data_file, result):
158 with open(data_file, 'r') as f:
159 data = f.read()
160 176
161 extract_tardy_vals(data, result) 177 varz.write_measurements(exp_point)
162 extract_variance(data, result)
163 178
164def config_exit_stats(file): 179def config_exit_stats(task_dict, file):
165 with open(file, 'r') as f: 180 with open(file, 'r') as f:
166 data = f.read() 181 data = f.read()
167
168 tasks = get_tasks(data)
169 182
170 # Dictionary of task exit measurements by pid 183 # Dictionary of task exit measurements by pid
171 exits = get_task_exits(data) 184 exits = get_task_exits(data)
172
173 exit_dict = dict((e.id, e) for e in exits) 185 exit_dict = dict((e.id, e) for e in exits)
174 186
175 # Dictionary where keys are configurations, values are list 187 # Dictionary where keys are configurations, values are list
176 # of tasks with those configuratino 188 # of tasks with those configuratino
177 config_dict = defaultdict(lambda: []) 189 config_dict = defaultdict(lambda: [])
178 for t in tasks: 190 for t in task_dict.itervalues():
179 config_dict[t.config] += [t] 191 config_dict[t.config] += [t]
180 192
181 for config in config_dict: 193 for config in config_dict:
@@ -185,7 +197,6 @@ def config_exit_stats(file):
185 if not t.pid in exit_dict: 197 if not t.pid in exit_dict:
186 raise Exception("Missing exit record for task '%s' in '%s'" % 198 raise Exception("Missing exit record for task '%s' in '%s'" %
187 (t, file)) 199 (t, file))
188
189 exit_list = [exit_dict[t.pid] for t in task_list] 200 exit_list = [exit_dict[t.pid] for t in task_list]
190 config_dict[config] = exit_list 201 config_dict[config] = exit_list
191 202
@@ -195,20 +206,22 @@ saved_stats = {}
195def get_base_stats(base_file): 206def get_base_stats(base_file):
196 if base_file in saved_stats: 207 if base_file in saved_stats:
197 return saved_stats[base_file] 208 return saved_stats[base_file]
198 result = config_exit_stats(base_file) 209 with open(base_file, 'r') as f:
210 data = f.read()
211 result = config_exit_stats(data)
199 saved_stats[base_file] = result 212 saved_stats[base_file] = result
200 return result 213 return result
201 214
202def extract_scaling_data(data_file, base_file, result): 215def extract_scaling_data(task_dict, data, result, base_file):
203 # Generate trees of tasks with matching configurations 216 # Generate trees of tasks with matching configurations
204 data_stats = config_exit_stats(data_file) 217 data_stats = config_exit_stats(data)
205 base_stats = get_base_stats(base_file) 218 base_stats = get_base_stats(base_file)
206 219
207 # Scaling factors are calculated by matching groups of tasks with the same 220 # Scaling factors are calculated by matching groups of tasks with the same
208 # config, then comparing task-to-task exec times in order of PID within 221 # config, then comparing task-to-task exec times in order of PID within
209 # each group 222 # each group
210 max_scales = [] 223 max_scales = LeveledArray("max-scaling")
211 avg_scales = [] 224 avg_scales = LeveledArray("avg-scaling")
212 225
213 for config in data_stats: 226 for config in data_stats:
214 if len(data_stats[config]) != len(base_stats[config]): 227 if len(data_stats[config]) != len(base_stats[config]):
@@ -220,8 +233,22 @@ def extract_scaling_data(data_file, base_file, result):
220 avg_scale = float(base_stat[Type.Avg]) / float(base_stat[Type.Avg]) 233 avg_scale = float(base_stat[Type.Avg]) / float(base_stat[Type.Avg])
221 max_scale = float(base_stat[Type.Max]) / float(base_stat[Type.Max]) 234 max_scale = float(base_stat[Type.Max]) / float(base_stat[Type.Max])
222 235
223 avg_scales += [avg_scale] 236 task = task_dict[data_stat.id]
224 max_scales += [max_scale] 237
238 avg_scales.add(task, avg_scale)
239 max_scales.add(task, max_scale)
240
241 avg_scales.write_measurements(result)
242 max_scales.write_measurements(result)
243
244def extract_sched_data(data_file, result, base_file):
245 with open(data_file, 'r') as f:
246 data = f.read()
247
248 task_dict = get_task_dict(data)
249
250 extract_tardy_vals(task_dict, data, result)
251 extract_variance(task_dict, data, result)
225 252
226 result['max-scale'] = Measurement().from_array(max_scales) 253 if (base_file):
227 result['avg-scale'] = Measurement().from_array(avg_scales) 254 extract_scaling_data(task_dict, data, result, base_file)
diff --git a/parse/tuple_table.py b/parse/tuple_table.py
index 6363b80..e6f0cc5 100644
--- a/parse/tuple_table.py
+++ b/parse/tuple_table.py
@@ -13,7 +13,7 @@ class ColMap(object):
13 def get_key(self, kv): 13 def get_key(self, kv):
14 key = () 14 key = ()
15 added = 0 15 added = 0
16 16
17 for col in self.col_list: 17 for col in self.col_list:
18 if col not in kv: 18 if col not in kv:
19 key += (None,) 19 key += (None,)
@@ -24,7 +24,7 @@ class ColMap(object):
24 if added < len(kv): 24 if added < len(kv):
25 raise Exception("column map '%s' missed field in map '%s'" % 25 raise Exception("column map '%s' missed field in map '%s'" %
26 (self.col_list, kv)) 26 (self.col_list, kv))
27 27
28 return key 28 return key
29 29
30 def __contains__(self, col): 30 def __contains__(self, col):
@@ -43,7 +43,7 @@ class ColMap(object):
43 43
44 def __str__(self): 44 def __str__(self):
45 return "<ColMap>%s" % (self.rev_map) 45 return "<ColMap>%s" % (self.rev_map)
46 46
47class TupleTable(object): 47class TupleTable(object):
48 def __init__(self, col_map): 48 def __init__(self, col_map):
49 self.col_map = col_map 49 self.col_map = col_map
@@ -63,7 +63,7 @@ class TupleTable(object):
63 raise Exception("cannot reduce twice!") 63 raise Exception("cannot reduce twice!")
64 self.reduced = True 64 self.reduced = True
65 for key, values in self.table.iteritems(): 65 for key, values in self.table.iteritems():
66 self.table[key] = SummaryPoint(key, values) 66 self.table[key] = SummaryPoint(str(key), values)
67 67
68 def write_result(self, out_dir): 68 def write_result(self, out_dir):
69 dir_map = DirMap(out_dir) 69 dir_map = DirMap(out_dir)
diff --git a/parse_exps.py b/parse_exps.py
index c91a654..8f98309 100755
--- a/parse_exps.py
+++ b/parse_exps.py
@@ -42,18 +42,18 @@ def get_exp_params(data_dir, col_map):
42 # Track all changed params 42 # Track all changed params
43 for key in params.keys(): 43 for key in params.keys():
44 col_map.try_add(key) 44 col_map.try_add(key)
45 45
46 return params 46 return params
47 47
48 48
49def gen_exp_data(exp_dirs, base_conf, col_map): 49def gen_exp_data(exp_dirs, base_conf, col_map):
50 plain_exps = [] 50 plain_exps = []
51 scaling_bases = [] 51 scaling_bases = []
52 52
53 for data_dir in exp_dirs: 53 for data_dir in exp_dirs:
54 if not os.path.isdir(data_dir): 54 if not os.path.isdir(data_dir):
55 raise IOError("Invalid experiment '%s'" % os.path.abspath(data_dir)) 55 raise IOError("Invalid experiment '%s'" % os.path.abspath(data_dir))
56 56
57 tmp_dir = data_dir + "/tmp" 57 tmp_dir = data_dir + "/tmp"
58 if not os.path.exists(tmp_dir): 58 if not os.path.exists(tmp_dir):
59 os.mkdir(tmp_dir) 59 os.mkdir(tmp_dir)
@@ -85,7 +85,7 @@ def main():
85 # Configuration key for task systems used to calculate task 85 # Configuration key for task systems used to calculate task
86 # execution scaling factors 86 # execution scaling factors
87 base_conf = dict(re.findall("(.*)=(.*)", opts.scale_against)) 87 base_conf = dict(re.findall("(.*)=(.*)", opts.scale_against))
88 88
89 col_map = ColMap() 89 col_map = ColMap()
90 90
91 (plain_exps, scaling_bases) = gen_exp_data(args, base_conf, col_map) 91 (plain_exps, scaling_bases) = gen_exp_data(args, base_conf, col_map)
@@ -103,7 +103,7 @@ def main():
103 103
104 for exp in plain_exps: 104 for exp in plain_exps:
105 result = ExpPoint(exp.name) 105 result = ExpPoint(exp.name)
106 106
107 if exp.data_files.ft: 107 if exp.data_files.ft:
108 # Write overheads into result 108 # Write overheads into result
109 ft.extract_ft_data(exp.data_files.ft, result, conf.BASE_EVENTS) 109 ft.extract_ft_data(exp.data_files.ft, result, conf.BASE_EVENTS)
@@ -115,13 +115,9 @@ def main():
115 base_params = copy.deepcopy(exp.params) 115 base_params = copy.deepcopy(exp.params)
116 base_params.pop(base_conf.keys()[0]) 116 base_params.pop(base_conf.keys()[0])
117 base = base_table.get_exps(base_params)[0] 117 base = base_table.get_exps(base_params)[0]
118 if base:
119 # Write scaling factor (vs base) into result
120 st.extract_scaling_data(exp.data_files.st,
121 base.data_files.st,
122 result)
123 # Write deadline misses / tardiness into result 118 # Write deadline misses / tardiness into result
124 st.extract_sched_data(exp.data_files.st, result) 119 st.extract_sched_data(exp.data_files.st, result,
120 base.data_files.st if base else None)
125 121
126 result_table.add_exp(exp.params, result) 122 result_table.add_exp(exp.params, result)
127 123
@@ -129,6 +125,6 @@ def main():
129 125
130 126
131 result_table.write_result(opts.out_dir) 127 result_table.write_result(opts.out_dir)
132 128
133if __name__ == '__main__': 129if __name__ == '__main__':
134 main() 130 main()
diff --git a/run_exps.py b/run_exps.py
index 825ad5b..8f72adb 100755
--- a/run_exps.py
+++ b/run_exps.py
@@ -1,5 +1,9 @@
1#!/usr/bin/env python 1#!/usr/bin/env python
2from __future__ import print_function 2from __future__ import print_function
3"""
4TODO: no -f flag, instead allow individual schedules to be passed in.
5 -f flag now forced, which removes old data directories
6"""
3 7
4import config.config as conf 8import config.config as conf
5import experiment.litmus_util as lu 9import experiment.litmus_util as lu
@@ -39,11 +43,11 @@ def convert_data(data):
39 """Convert a non-python schedule file into the python format""" 43 """Convert a non-python schedule file into the python format"""
40 regex = re.compile( 44 regex = re.compile(
41 r"(?P<PROC>^" 45 r"(?P<PROC>^"
42 r"(?P<HEADER>/proc/\w+?/)?" 46 r"(?P<HEADER>/proc/[\w\-]+?/)?"
43 r"(?P<ENTRY>[\w\/]+)" 47 r"(?P<ENTRY>[\w\-\/]+)"
44 r"\s*{\s*(?P<CONTENT>.*?)\s*?}$)|" 48 r"\s*{\s*(?P<CONTENT>.*?)\s*?}$)|"
45 r"(?P<SPIN>^" 49 r"(?P<SPIN>^"
46 r"(?P<TYPE>\w+?spin)?\s+" 50 r"(?P<TYPE>\w+?spin)?\s*"
47 r"(?P<ARGS>[\w\-_\d\. ]+)\s*$)", 51 r"(?P<ARGS>[\w\-_\d\. ]+)\s*$)",
48 re.S|re.I|re.M) 52 re.S|re.I|re.M)
49 53
@@ -70,6 +74,7 @@ def fix_paths(schedule, exp_dir):
70 abspath = "%s/%s" % (exp_dir, arg) 74 abspath = "%s/%s" % (exp_dir, arg)
71 if os.path.exists(abspath): 75 if os.path.exists(abspath):
72 args = args.replace(arg, abspath) 76 args = args.replace(arg, abspath)
77 break
73 78
74 schedule['spin'][idx] = (spin, args) 79 schedule['spin'][idx] = (spin, args)
75 80
@@ -96,7 +101,7 @@ def load_experiment(sched_file, scheduler, duration, param_file, out_base):
96 101
97 params = {} 102 params = {}
98 kernel = "" 103 kernel = ""
99 104
100 param_file = param_file or \ 105 param_file = param_file or \
101 "%s/%s" % (dirname, conf.DEFAULTS['params_file']) 106 "%s/%s" % (dirname, conf.DEFAULTS['params_file'])
102 107
@@ -181,7 +186,6 @@ def run_exp(name, schedule, scheduler, kernel, duration, work_dir, out_dir):
181 proc_entries, executables) 186 proc_entries, executables)
182 187
183 exp.run_exp() 188 exp.run_exp()
184
185 189
186def main(): 190def main():
187 opts, args = parse_args() 191 opts, args = parse_args()
@@ -193,14 +197,16 @@ def main():
193 197
194 args = args or [opts.sched_file] 198 args = args or [opts.sched_file]
195 199
200 created = False
196 if not os.path.exists(out_base): 201 if not os.path.exists(out_base):
202 created = True
197 os.mkdir(out_base) 203 os.mkdir(out_base)
198 204
199 done = 0 205 done = 0
200 succ = 0 206 succ = 0
201 failed = 0 207 failed = 0
202 invalid = 0 208 invalid = 0
203 209
204 for exp in args: 210 for exp in args:
205 path = "%s/%s" % (os.getcwd(), exp) 211 path = "%s/%s" % (os.getcwd(), exp)
206 212
@@ -223,7 +229,9 @@ def main():
223 traceback.print_exc() 229 traceback.print_exc()
224 failed += 1 230 failed += 1
225 231
226 232 if not os.listdir(out_base) and created and not succ:
233 os.rmdir(out_base)
234
227 print("Experiments run:\t%d" % len(args)) 235 print("Experiments run:\t%d" % len(args))
228 print(" Successful:\t\t%d" % succ) 236 print(" Successful:\t\t%d" % succ)
229 print(" Failed:\t\t%d" % failed) 237 print(" Failed:\t\t%d" % failed)