diff options
| author | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-04-25 16:43:49 -0400 |
|---|---|---|
| committer | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-04-25 16:43:49 -0400 |
| commit | 15f231a79320cbc97cd88d8a4751515a47ce223e (patch) | |
| tree | b86b202cadc816a5da7b96372b9de4362da88116 | |
| parent | 2ceaa6c607ef85bde4f14017634d9d1621efca29 (diff) | |
Bug fixes from testing.
| -rw-r--r-- | common.py | 9 | ||||
| -rw-r--r-- | config/config.py | 13 | ||||
| -rw-r--r-- | gen/color.py | 10 | ||||
| -rw-r--r-- | gen/mc_generators.py | 28 | ||||
| -rwxr-xr-x | gen_exps.py | 2 | ||||
| -rw-r--r-- | parse/sched.py | 15 | ||||
| -rw-r--r-- | run/experiment.py | 82 | ||||
| -rw-r--r-- | run/proc_entry.py | 11 | ||||
| -rwxr-xr-x | run_exps.py | 2 |
9 files changed, 142 insertions, 30 deletions
| @@ -200,13 +200,16 @@ def set_logged_list(logged): | |||
| 200 | global __logged | 200 | global __logged |
| 201 | __logged = logged | 201 | __logged = logged |
| 202 | 202 | ||
| 203 | def log_once(id, msg = None, indent = True): | 203 | def log_once(id, msg = None): |
| 204 | global __logged | 204 | global __logged |
| 205 | 205 | ||
| 206 | msg = msg if msg else id | 206 | # Indent if a multithreaded list was specified |
| 207 | indent = type(__logged) != type([]) | ||
| 208 | |||
| 209 | msg = msg.strip('\n') if msg else id | ||
| 207 | 210 | ||
| 208 | if id not in __logged: | 211 | if id not in __logged: |
| 209 | __logged += [id] | 212 | __logged += [id] |
| 210 | if indent: | 213 | if indent: |
| 211 | msg = ' ' + msg.strip('\t').replace('\n', '\n\t') | 214 | msg = ' ' + msg.strip('\t').replace('\n', '\n\t') |
| 212 | sys.stderr.write('\n' + msg.strip('\n') + '\n') | 215 | sys.stderr.write('\n' + msg + '\n') |
diff --git a/config/config.py b/config/config.py index 5e6f9e3..28e78c9 100644 --- a/config/config.py +++ b/config/config.py | |||
| @@ -50,7 +50,15 @@ DEFAULTS = {'params_file' : 'params.py', | |||
| 50 | SCHED_EVENTS = range(501, 513) | 50 | SCHED_EVENTS = range(501, 513) |
| 51 | 51 | ||
| 52 | '''Overhead events.''' | 52 | '''Overhead events.''' |
| 53 | OVH_BASE_EVENTS = ['SCHED', 'RELEASE', 'SCHED2', 'TICK', 'CXS', 'LOCK', 'UNLOCK'] | 53 | OVH_BASE_EVENTS = ['SCHED', 'RELEASE', 'SCHED2', 'TICK', 'CXS', |
| 54 | 'SEND_RESCHED', 'LOCK', 'UNLOCK'] | ||
| 55 | |||
| 56 | '''Mixed-criticality overheads.''' | ||
| 57 | MC_EVENTS = ['LVL{}_SCHED', 'LVL{}_RELEASE'] | ||
| 58 | MC_LEVELS = ['A', 'B', 'C'] | ||
| 59 | OVH_BASE_EVENTS += [s.format(l) for (l,s) in | ||
| 60 | itertools.product(MC_LEVELS, MC_EVENTS)] | ||
| 61 | |||
| 54 | OVH_ALL_EVENTS = ["%s_%s" % (e, t) for (e,t) in | 62 | OVH_ALL_EVENTS = ["%s_%s" % (e, t) for (e,t) in |
| 55 | itertools.product(OVH_BASE_EVENTS, ["START","END"])] | 63 | itertools.product(OVH_BASE_EVENTS, ["START","END"])] |
| 56 | OVH_ALL_EVENTS += ['RELEASE_LATENCY'] | 64 | OVH_ALL_EVENTS += ['RELEASE_LATENCY'] |
| @@ -60,3 +68,6 @@ OVH_BASE_EVENTS += ['RELEASE_LATENCY'] | |||
| 60 | # If a task is missing more than this many records, its measurements | 68 | # If a task is missing more than this many records, its measurements |
| 61 | # are not included in sched_trace summaries | 69 | # are not included in sched_trace summaries |
| 62 | MAX_RECORD_LOSS = .2 | 70 | MAX_RECORD_LOSS = .2 |
| 71 | |||
| 72 | # Number of pages needed for each color before experiments are run | ||
| 73 | PAGES_PER_COLOR = 1024 | ||
diff --git a/gen/color.py b/gen/color.py index 8184b8b..46ec8dc 100644 --- a/gen/color.py +++ b/gen/color.py | |||
| @@ -29,19 +29,22 @@ class BlockColorScheme(ColorScheme): | |||
| 29 | if self.way_first: | 29 | if self.way_first: |
| 30 | # Way first means maximize ways | 30 | # Way first means maximize ways |
| 31 | pages_per_color = min(self.ways, pages_needed) | 31 | pages_per_color = min(self.ways, pages_needed) |
| 32 | colors_per_task = int(ceil(pages_needed/pages_per_color)) | 32 | colors_per_task = int(ceil(float(pages_needed)/pages_per_color)) |
| 33 | else: | 33 | else: |
| 34 | # Color first means maximize colors | 34 | # Color first means maximize colors |
| 35 | colors_per_task = min(self.colors, pages_needed) | 35 | colors_per_task = min(self.colors, pages_needed) |
| 36 | pages_per_color = int(ceil(pages_needed/colors_per_task)) | 36 | pages_per_color = int(ceil(float(pages_needed)/colors_per_task)) |
| 37 | 37 | ||
| 38 | curr_color = 0 | 38 | curr_color = 0 |
| 39 | for cpu, tasks in cpus.iteritems(): | 39 | for cpu, tasks in cpus.iteritems(): |
| 40 | # All tasks on a CPU have the same coloring scheme | 40 | # All tasks on a CPU have the same coloring scheme |
| 41 | cpu_colors = defaultdict(int) | 41 | cpu_colors = defaultdict(int) |
| 42 | for _ in xrange(colors_per_task): | 42 | for _ in xrange(colors_per_task): |
| 43 | curr_color = (curr_color + 1) % self.colors | ||
| 44 | cpu_colors[curr_color] = pages_per_color | 43 | cpu_colors[curr_color] = pages_per_color |
| 44 | curr_color = (curr_color + 1) % self.colors | ||
| 45 | |||
| 46 | if sum(cpu_colors.values()) < pages_needed: | ||
| 47 | raise Exception("Failed to block color cpu, %s" % cpu_colors) | ||
| 45 | 48 | ||
| 46 | for t in tasks: | 49 | for t in tasks: |
| 47 | t.colors = cpu_colors | 50 | t.colors = cpu_colors |
| @@ -80,7 +83,6 @@ class EvilColorScheme(ColorScheme): | |||
| 80 | for t in tasks: | 83 | for t in tasks: |
| 81 | t.colors = colors | 84 | t.colors = colors |
| 82 | 85 | ||
| 83 | |||
| 84 | INFO_FIELDS = ['cache', 'line', 'page', 'ways', 'sets', 'colors'] | 86 | INFO_FIELDS = ['cache', 'line', 'page', 'ways', 'sets', 'colors'] |
| 85 | INFO_PROC = '/proc/sys/litmus/color/cache_info' | 87 | INFO_PROC = '/proc/sys/litmus/color/cache_info' |
| 86 | 88 | ||
diff --git a/gen/mc_generators.py b/gen/mc_generators.py index 8f5bd84..d8c172d 100644 --- a/gen/mc_generators.py +++ b/gen/mc_generators.py | |||
| @@ -243,8 +243,9 @@ TP_TYPE = """#if $type != 'unmanaged' | |||
| 243 | /proc/sys/litmus/color/preempt_cache{0} | 243 | /proc/sys/litmus/color/preempt_cache{0} |
| 244 | #end if""" | 244 | #end if""" |
| 245 | 245 | ||
| 246 | # Always add some pages | 246 | # Now done by experiment.py |
| 247 | TP_ADD = """/proc/sys/litmus/color/add_pages{1}""" | 247 | # # Always add some pages |
| 248 | # TP_ADD = """/proc/sys/litmus/color/add_pages{1}""" | ||
| 248 | 249 | ||
| 249 | # Use special spin for color tasks | 250 | # Use special spin for color tasks |
| 250 | TP_COLOR_BASE = """colorspin -y $t.id -x $t.colorcsv -q $t.wss -l $t.loops """ | 251 | TP_COLOR_BASE = """colorspin -y $t.id -x $t.colorcsv -q $t.wss -l $t.loops """ |
| @@ -253,8 +254,8 @@ TP_COLOR_B = TP_BASE.format("b", TP_COLOR_BASE + "-p $t.cpu ") | |||
| 253 | TP_COLOR_C = TP_BASE.format("c", TP_COLOR_BASE) | 254 | TP_COLOR_C = TP_BASE.format("c", TP_COLOR_BASE) |
| 254 | 255 | ||
| 255 | # Not even sure job splitting is still possible | 256 | # Not even sure job splitting is still possible |
| 256 | TP_CHUNK = """#if $chunk_size > 0 | 257 | TP_CHUNK = """#if $chunk_size_ns > 0 |
| 257 | /proc/sys/litmus/color/chunk_size{$chunk_size} | 258 | /proc/sys/litmus/color/chunk_size{$chunk_size_ns} |
| 258 | #end if""" | 259 | #end if""" |
| 259 | 260 | ||
| 260 | COLOR_TYPES = ['scheduling', 'locking', 'unmanaged'] | 261 | COLOR_TYPES = ['scheduling', 'locking', 'unmanaged'] |
| @@ -264,7 +265,7 @@ class ColorMcGenerator(McGenerator): | |||
| 264 | 265 | ||
| 265 | def __init__(self, params = {}): | 266 | def __init__(self, params = {}): |
| 266 | super(ColorMcGenerator, self).__init__("MC", | 267 | super(ColorMcGenerator, self).__init__("MC", |
| 267 | templates=[TP_ADD, TP_TYPE, TP_CHUNK, TP_COLOR_B, TP_COLOR_C], | 268 | templates=[TP_TYPE, TP_CHUNK, TP_COLOR_B, TP_COLOR_C], |
| 268 | options=self.__make_options(), | 269 | options=self.__make_options(), |
| 269 | params=self.__extend_params(params)) | 270 | params=self.__extend_params(params)) |
| 270 | 271 | ||
| @@ -336,7 +337,7 @@ class ColorMcGenerator(McGenerator): | |||
| 336 | 'System colors (cache size / ways).'), | 337 | 'System colors (cache size / ways).'), |
| 337 | GenOption('page_size', int, self.cache.page, | 338 | GenOption('page_size', int, self.cache.page, |
| 338 | 'System page size.'), | 339 | 'System page size.'), |
| 339 | GenOption('wss', [float, int], .5, | 340 | GenOption('wss', [float, int], .25, |
| 340 | 'Task working set sizes. Can be expressed as a fraction ' + | 341 | 'Task working set sizes. Can be expressed as a fraction ' + |
| 341 | 'of the cache.')] | 342 | 'of the cache.')] |
| 342 | 343 | ||
| @@ -359,7 +360,8 @@ class ColorMcGenerator(McGenerator): | |||
| 359 | if pages > cache_pages: | 360 | if pages > cache_pages: |
| 360 | raise Exception('WSS (%d) larger than the cache!' % (wss)) | 361 | raise Exception('WSS (%d) larger than the cache!' % (wss)) |
| 361 | 362 | ||
| 362 | return pages | 363 | # Divide in half for HRT, SRT divide |
| 364 | return pages / 2 | ||
| 363 | 365 | ||
| 364 | 366 | ||
| 365 | def __make_csv(self, task): | 367 | def __make_csv(self, task): |
| @@ -410,11 +412,23 @@ class ColorMcGenerator(McGenerator): | |||
| 410 | hrt_colorer = EvilColorScheme(c, w) | 412 | hrt_colorer = EvilColorScheme(c, w) |
| 411 | srt_colorer = hrt_colorer | 413 | srt_colorer = hrt_colorer |
| 412 | else: | 414 | else: |
| 415 | # Divide cache between hrt and srt | ||
| 416 | c /= 2 | ||
| 413 | srt_colorer = RandomColorScheme(c, w) | 417 | srt_colorer = RandomColorScheme(c, w) |
| 414 | hrt_colorer = BlockColorScheme(c, w, way_first=True) | 418 | hrt_colorer = BlockColorScheme(c, w, way_first=True) |
| 415 | 419 | ||
| 416 | hrt_colorer.color(task_system['lvlb'], pages_needed) | 420 | hrt_colorer.color(task_system['lvlb'], pages_needed) |
| 417 | srt_colorer.color(task_system['lvlc'], pages_needed) | 421 | srt_colorer.color(task_system['lvlc'], pages_needed) |
| 418 | 422 | ||
| 423 | # This check has saved me a lot of trouble already, leave it in | ||
| 424 | for t in task_system['lvlb'] + task_system['lvlc']: | ||
| 425 | if sum(t.colors.values()) * params['page_size'] < real_wss: | ||
| 426 | raise Exception("Didn't color enough pages for %s" % params) | ||
| 427 | |||
| 428 | if params['type'] != 'unmanaged': | ||
| 429 | # Bump srt into the second half of the cache | ||
| 430 | for t in task_system['lvlc']: | ||
| 431 | t.colors = {colors+c:w for colors, w in t.colors.iteritems()} | ||
| 432 | |||
| 419 | for t in all_tasks: | 433 | for t in all_tasks: |
| 420 | self.__make_csv(t) | 434 | self.__make_csv(t) |
diff --git a/gen_exps.py b/gen_exps.py index b847661..65f50d8 100755 --- a/gen_exps.py +++ b/gen_exps.py | |||
| @@ -59,7 +59,7 @@ def main(): | |||
| 59 | if opts.list_gens or opts.described: | 59 | if opts.list_gens or opts.described: |
| 60 | return 0 | 60 | return 0 |
| 61 | 61 | ||
| 62 | params = filter(lambda x : re.match("\w+=\w+", x), args) | 62 | params = filter(lambda x : re.match("\w+=[\.\w]+", x), args) |
| 63 | 63 | ||
| 64 | # Ensure some generator is loaded | 64 | # Ensure some generator is loaded |
| 65 | args = list(set(args) - set(params)) | 65 | args = list(set(args) - set(params)) |
diff --git a/parse/sched.py b/parse/sched.py index 1033989..5a36da9 100644 --- a/parse/sched.py +++ b/parse/sched.py | |||
| @@ -26,9 +26,11 @@ ScaleData = namedtuple('ScaleData', ['reg_tasks', 'base_tasks']) | |||
| 26 | 26 | ||
| 27 | class TimeTracker: | 27 | class TimeTracker: |
| 28 | '''Store stats for durations of time demarcated by sched_trace records.''' | 28 | '''Store stats for durations of time demarcated by sched_trace records.''' |
| 29 | def __init__(self): | 29 | def __init__(self, join_job = False): |
| 30 | self.begin = self.avg = self.max = self.num = self.next_job = 0 | 30 | self.begin = self.avg = self.max = self.num = self.next_job = 0 |
| 31 | 31 | ||
| 32 | self.join_job = join_job | ||
| 33 | |||
| 32 | # Count of times the job in start_time matched that in store_time | 34 | # Count of times the job in start_time matched that in store_time |
| 33 | self.matches = 0 | 35 | self.matches = 0 |
| 34 | # And the times it didn't | 36 | # And the times it didn't |
| @@ -39,9 +41,12 @@ class TimeTracker: | |||
| 39 | # any task is always skipped | 41 | # any task is always skipped |
| 40 | self.last_record = None | 42 | self.last_record = None |
| 41 | 43 | ||
| 44 | self.stored_dur = 0 | ||
| 45 | |||
| 42 | def store_time(self, next_record): | 46 | def store_time(self, next_record): |
| 43 | '''End duration of time.''' | 47 | '''End duration of time.''' |
| 44 | dur = (self.last_record.when - self.begin) if self.last_record else -1 | 48 | dur = (self.last_record.when - self.begin) if self.last_record else -1 |
| 49 | dur += self.stored_dur | ||
| 45 | 50 | ||
| 46 | if self.next_job == next_record.job: | 51 | if self.next_job == next_record.job: |
| 47 | self.last_record = next_record | 52 | self.last_record = next_record |
| @@ -49,13 +54,16 @@ class TimeTracker: | |||
| 49 | if self.last_record: | 54 | if self.last_record: |
| 50 | self.matches += 1 | 55 | self.matches += 1 |
| 51 | 56 | ||
| 52 | if dur > 0: | 57 | if self.join_job and self.next_job == self.last_record.job: |
| 58 | self.stored_dur += dur | ||
| 59 | elif dur > 0: | ||
| 53 | self.max = max(self.max, dur) | 60 | self.max = max(self.max, dur) |
| 54 | self.avg *= float(self.num / (self.num + 1)) | 61 | self.avg *= float(self.num / (self.num + 1)) |
| 55 | self.num += 1 | 62 | self.num += 1 |
| 56 | self.avg += dur / float(self.num) | 63 | self.avg += dur / float(self.num) |
| 57 | 64 | ||
| 58 | self.begin = 0 | 65 | self.begin = 0 |
| 66 | self.stored_dur = 0 | ||
| 59 | self.next_job = 0 | 67 | self.next_job = 0 |
| 60 | else: | 68 | else: |
| 61 | self.disjoints += 1 | 69 | self.disjoints += 1 |
| @@ -70,7 +78,6 @@ class TimeTracker: | |||
| 70 | 78 | ||
| 71 | self.next_job = record.job | 79 | self.next_job = record.job |
| 72 | 80 | ||
| 73 | |||
| 74 | class LeveledArray(object): | 81 | class LeveledArray(object): |
| 75 | """Groups statistics by the level of the task to which they apply""" | 82 | """Groups statistics by the level of the task to which they apply""" |
| 76 | def __init__(self): | 83 | def __init__(self): |
diff --git a/run/experiment.py b/run/experiment.py index b0e46b6..4667cb1 100644 --- a/run/experiment.py +++ b/run/experiment.py | |||
| @@ -1,8 +1,13 @@ | |||
| 1 | import config.config as conf | ||
| 1 | import os | 2 | import os |
| 2 | import time | 3 | import re |
| 3 | import run.litmus_util as lu | 4 | import run.litmus_util as lu |
| 4 | import shutil as sh | 5 | import shutil as sh |
| 6 | import sys | ||
| 7 | import time | ||
| 8 | |||
| 5 | from operator import methodcaller | 9 | from operator import methodcaller |
| 10 | from run.proc_entry import ProcEntry | ||
| 6 | 11 | ||
| 7 | class ExperimentException(Exception): | 12 | class ExperimentException(Exception): |
| 8 | '''Used to indicate when there are problems with an experiment.''' | 13 | '''Used to indicate when there are problems with an experiment.''' |
| @@ -17,6 +22,10 @@ class ExperimentDone(ExperimentException): | |||
| 17 | class SystemCorrupted(Exception): | 22 | class SystemCorrupted(Exception): |
| 18 | pass | 23 | pass |
| 19 | 24 | ||
| 25 | PROC_ADD_PAGES = '/proc/sys/litmus/color/add_pages' | ||
| 26 | PROC_NR_PAGES = '/proc/sys/litmus/color/nr_pages' | ||
| 27 | REG_NR_PAGES = re.compile(r'\s*\d+\s*:\s*(\d+)', re.M) | ||
| 28 | |||
| 20 | class Experiment(object): | 29 | class Experiment(object): |
| 21 | '''Execute one task-set and save the results. Experiments have unique IDs.''' | 30 | '''Execute one task-set and save the results. Experiments have unique IDs.''' |
| 22 | INTERRUPTED_DIR = ".interrupted" | 31 | INTERRUPTED_DIR = ".interrupted" |
| @@ -100,7 +109,7 @@ class Experiment(object): | |||
| 100 | for e in self.executables: | 109 | for e in self.executables: |
| 101 | status = e.poll() | 110 | status = e.poll() |
| 102 | if status != None and status: | 111 | if status != None and status: |
| 103 | err_msg = "Task %s failed with status: %s" % (e.wait(), status) | 112 | err_msg = "Task %s failed with status: %s" % (e, status) |
| 104 | msgs += [err_msg] | 113 | msgs += [err_msg] |
| 105 | 114 | ||
| 106 | if msgs: | 115 | if msgs: |
| @@ -108,7 +117,7 @@ class Experiment(object): | |||
| 108 | # up the terminal | 117 | # up the terminal |
| 109 | if len(msgs) > 3: | 118 | if len(msgs) > 3: |
| 110 | num_errs = len(msgs) - 3 | 119 | num_errs = len(msgs) - 3 |
| 111 | msgs = msgs[0:4] + ["...%d more task errors..." % num_errs] | 120 | msgs = msgs[0:3] + ["...%d more task errors..." % num_errs] |
| 112 | 121 | ||
| 113 | out_name = self.__strip_path(self.exec_out.name) | 122 | out_name = self.__strip_path(self.exec_out.name) |
| 114 | err_name = self.__strip_path(self.exec_err.name) | 123 | err_name = self.__strip_path(self.exec_err.name) |
| @@ -138,7 +147,7 @@ class Experiment(object): | |||
| 138 | now_ready = lu.waiting_tasks() | 147 | now_ready = lu.waiting_tasks() |
| 139 | if now_ready != num_ready: | 148 | if now_ready != num_ready: |
| 140 | wait_start = time.time() | 149 | wait_start = time.time() |
| 141 | num_ready = lu.now_ready | 150 | num_ready = now_ready |
| 142 | 151 | ||
| 143 | def __run_tasks(self): | 152 | def __run_tasks(self): |
| 144 | self.log("Starting %d tasks" % len(self.executables)) | 153 | self.log("Starting %d tasks" % len(self.executables)) |
| @@ -197,11 +206,67 @@ class Experiment(object): | |||
| 197 | if msgs: | 206 | if msgs: |
| 198 | raise SystemCorrupted("\n".join(msgs)) | 207 | raise SystemCorrupted("\n".join(msgs)) |
| 199 | 208 | ||
| 209 | def __get_nr_pages(self): | ||
| 210 | with open(PROC_NR_PAGES, 'r') as f: | ||
| 211 | data = f.read() | ||
| 212 | |||
| 213 | pages = map(int, REG_NR_PAGES.findall(data)) | ||
| 214 | return pages | ||
| 215 | |||
| 216 | def __create_colored_pages(self): | ||
| 217 | if self.scheduler != 'COLOR' and self.scheduler != 'MC': | ||
| 218 | return | ||
| 219 | |||
| 220 | self.log("Creating colored pages...") | ||
| 221 | |||
| 222 | # On system startup, it takes some time for these entries to appear | ||
| 223 | start = time.time() | ||
| 224 | while not os.path.exists(PROC_ADD_PAGES) or\ | ||
| 225 | not os.path.exists(PROC_NR_PAGES): | ||
| 226 | |||
| 227 | if time.time() - start > 30.0: | ||
| 228 | raise Exception("Cannot find %s or %s!" % | ||
| 229 | (PROC_ADD_PAGES, PROC_NR_PAGES)) | ||
| 230 | time.sleep(1) | ||
| 231 | |||
| 232 | start_pages = self.__get_nr_pages() | ||
| 233 | num_started = sum(start_pages) | ||
| 234 | num_created = 0 | ||
| 235 | num_needed = len(start_pages) * conf.PAGES_PER_COLOR | ||
| 236 | |||
| 237 | ProcEntry(PROC_ADD_PAGES, 1).write_proc() | ||
| 238 | |||
| 239 | # Spin until pages are done adding | ||
| 240 | start = time.time() | ||
| 241 | while True: | ||
| 242 | if time.time() - start > 30.0: | ||
| 243 | raise Exception("Too much time spent creating pages!") | ||
| 244 | |||
| 245 | pages = sum(self.__get_nr_pages()) | ||
| 246 | |||
| 247 | if pages == num_needed: | ||
| 248 | break | ||
| 249 | else: | ||
| 250 | if pages > num_created: | ||
| 251 | num_created = pages | ||
| 252 | start = time.time() | ||
| 253 | sys.stderr.write('\rPages needed: {0: 4}'.format(num_needed - pages)) | ||
| 254 | |||
| 255 | # Unknown why this has to be done again.... | ||
| 256 | ProcEntry(PROC_ADD_PAGES, 1).write_proc() | ||
| 257 | time.sleep(1) | ||
| 258 | |||
| 259 | if num_created: | ||
| 260 | sys.stderr.write('\n') | ||
| 261 | self.log("Created %d colored pages." % (num_needed - num_started)) | ||
| 262 | |||
| 200 | def __setup(self): | 263 | def __setup(self): |
| 201 | self.__make_dirs() | 264 | self.__make_dirs() |
| 202 | self.__assign_executable_cwds() | 265 | self.__assign_executable_cwds() |
| 203 | self.__setup_tracers() | 266 | self.__setup_tracers() |
| 204 | 267 | ||
| 268 | self.__create_colored_pages() | ||
| 269 | |||
| 205 | self.log("Writing %d proc entries" % len(self.proc_entries)) | 270 | self.log("Writing %d proc entries" % len(self.proc_entries)) |
| 206 | map(methodcaller('write_proc'), self.proc_entries) | 271 | map(methodcaller('write_proc'), self.proc_entries) |
| 207 | 272 | ||
| @@ -229,6 +294,8 @@ class Experiment(object): | |||
| 229 | self.log("Stopping regular tracers") | 294 | self.log("Stopping regular tracers") |
| 230 | map(methodcaller('stop_tracing'), self.regular_tracers) | 295 | map(methodcaller('stop_tracing'), self.regular_tracers) |
| 231 | 296 | ||
| 297 | os.system('sync') | ||
| 298 | |||
| 232 | def log(self, msg): | 299 | def log(self, msg): |
| 233 | print("[Exp %s]: %s" % (self.name, msg)) | 300 | print("[Exp %s]: %s" % (self.name, msg)) |
| 234 | 301 | ||
| @@ -253,8 +320,11 @@ class Experiment(object): | |||
| 253 | self.__teardown() | 320 | self.__teardown() |
| 254 | finally: | 321 | finally: |
| 255 | self.log("Switching back to Linux scheduler") | 322 | self.log("Switching back to Linux scheduler") |
| 256 | self.__to_linux() | 323 | try: |
| 257 | 324 | self.__to_linux() | |
| 325 | except Exception as e: | ||
| 326 | print(e) | ||
| 327 | |||
| 258 | if succ: | 328 | if succ: |
| 259 | self.__save_results() | 329 | self.__save_results() |
| 260 | self.log("Experiment done!") | 330 | self.log("Experiment done!") |
diff --git a/run/proc_entry.py b/run/proc_entry.py index 56f7c24..e222c3d 100644 --- a/run/proc_entry.py +++ b/run/proc_entry.py | |||
| @@ -1,9 +1,10 @@ | |||
| 1 | import os | 1 | import os |
| 2 | import traceback | ||
| 2 | 3 | ||
| 3 | class ProcEntry(object): | 4 | class ProcEntry(object): |
| 4 | def __init__(self, proc, data): | 5 | def __init__(self, proc, data): |
| 5 | self.proc = proc | 6 | self.proc = proc |
| 6 | self.data = data | 7 | self.data = str(data) |
| 7 | 8 | ||
| 8 | if not os.path.exists(self.proc): | 9 | if not os.path.exists(self.proc): |
| 9 | raise ValueError("Invalid proc entry %s" % self.proc) | 10 | raise ValueError("Invalid proc entry %s" % self.proc) |
| @@ -13,4 +14,10 @@ class ProcEntry(object): | |||
| 13 | with open(self.proc, 'w') as entry: | 14 | with open(self.proc, 'w') as entry: |
| 14 | entry.write(self.data) | 15 | entry.write(self.data) |
| 15 | except: | 16 | except: |
| 16 | print("Failed to write into %s value:\n%s" % (self.proc, self.data)) | 17 | traceback.print_exc() |
| 18 | |||
| 19 | val = str(self.data) | ||
| 20 | val = val if '\n' not in val else '\n'+val | ||
| 21 | |||
| 22 | raise IOError("Failed to write into %s value: %s" % | ||
| 23 | (self.proc, val)) | ||
diff --git a/run_exps.py b/run_exps.py index a15018d..afabca8 100755 --- a/run_exps.py +++ b/run_exps.py | |||
| @@ -356,10 +356,8 @@ def make_paths(exp, out_base_dir, opts): | |||
| 356 | 356 | ||
| 357 | return sched_file, out_dir | 357 | return sched_file, out_dir |
| 358 | 358 | ||
| 359 | |||
| 360 | def main(): | 359 | def main(): |
| 361 | opts, args = parse_args() | 360 | opts, args = parse_args() |
| 362 | |||
| 363 | exps = get_exps(opts, args) | 361 | exps = get_exps(opts, args) |
| 364 | 362 | ||
| 365 | jabber = setup_jabber(opts.jabber) if opts.jabber else None | 363 | jabber = setup_jabber(opts.jabber) if opts.jabber else None |
