diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2014-01-08 10:55:21 -0500 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2014-01-08 10:55:21 -0500 |
commit | 49463510def7ba6e951dcce97db7a47e79b8d497 (patch) | |
tree | bd4d615041e4725e130af14b1e42ea7d0f9b3f41 /ecrts14/ecrts14.py | |
parent | 4b78a67b36049f5ff6b3b6695c79f5a35da1eb60 (diff) |
Store sched results to SQL db instead of csv files
Diffstat (limited to 'ecrts14/ecrts14.py')
-rwxr-xr-x | ecrts14/ecrts14.py | 473 |
1 files changed, 212 insertions, 261 deletions
diff --git a/ecrts14/ecrts14.py b/ecrts14/ecrts14.py index 2a81399..85c2ff6 100755 --- a/ecrts14/ecrts14.py +++ b/ecrts14/ecrts14.py | |||
@@ -9,8 +9,10 @@ import os | |||
9 | import math | 9 | import math |
10 | import time | 10 | import time |
11 | 11 | ||
12 | #import logging, multiprocessing | 12 | import sqlite3 as lite |
13 | import json | ||
13 | 14 | ||
15 | import copy | ||
14 | from collections import defaultdict | 16 | from collections import defaultdict |
15 | from csv import DictWriter | 17 | from csv import DictWriter |
16 | from itertools import product | 18 | from itertools import product |
@@ -18,7 +20,6 @@ from math import ceil | |||
18 | from multiprocessing import Pool, cpu_count | 20 | from multiprocessing import Pool, cpu_count |
19 | from numpy import arange | 21 | from numpy import arange |
20 | from pprint import pprint | 22 | from pprint import pprint |
21 | from copy import deepcopy | ||
22 | import traceback | 23 | import traceback |
23 | 24 | ||
24 | from schedcat.model.tasks import SporadicTask, TaskSystem | 25 | from schedcat.model.tasks import SporadicTask, TaskSystem |
@@ -40,8 +41,11 @@ import tests | |||
40 | import topology | 41 | import topology |
41 | from machines import machines | 42 | from machines import machines |
42 | 43 | ||
43 | import gc | 44 | #import gc |
44 | import resource | 45 | #import resource |
46 | import traceback | ||
47 | |||
48 | import database as db | ||
45 | 49 | ||
46 | NAMED_PERIODS_US = { | 50 | NAMED_PERIODS_US = { |
47 | # Named period distributions used in several UNC papers, in microseconds | 51 | # Named period distributions used in several UNC papers, in microseconds |
@@ -50,8 +54,66 @@ NAMED_PERIODS_US = { | |||
50 | 'uni-long' : tasks.uniform_int(50*1000, 250*1000), | 54 | 'uni-long' : tasks.uniform_int(50*1000, 250*1000), |
51 | } | 55 | } |
52 | 56 | ||
53 | MIN_SAMPLES = 500 | 57 | #based off of a 24-core system |
54 | MAX_SAMPLES = 10000 | 58 | # fewer graphs = harder partitioning |
59 | NAMED_NUM_GRAPHS = { | ||
60 | 'uni-many' : graph.uniform(24, 24*4), | ||
61 | 'uni-medium' : graph.uniform(12, 48), | ||
62 | 'uni-few' : graph.uniform(1,12), | ||
63 | |||
64 | 'bimo-many' : graph.binomial(24, 24*4), | ||
65 | 'bimo-medium' : graph.binomial(12, 48), | ||
66 | 'bimo-few' : graph.binomial(1,12), | ||
67 | } | ||
68 | |||
69 | NAMED_SHAPES = { | ||
70 | 'uniform' : graph.uniform(), | ||
71 | 'binomial' : graph.binomial(), | ||
72 | # 'geometric': graph.geometric(), | ||
73 | } | ||
74 | |||
75 | NAMED_HEIGHT_FACTORS = { | ||
76 | 'uni-short' : [1.0/3.0, 1.0/2.0], | ||
77 | 'uni-medium' : [1.0/2.0, 3.0/4.0], | ||
78 | 'uni-tall' : [3.0/4.0, 1.0], | ||
79 | } | ||
80 | |||
81 | NAMED_FAN = { | ||
82 | 'none' : graph.uniform(1,1), | ||
83 | 'uniform_3' : graph.uniform(1,3), | ||
84 | 'geometric_3' : graph.geometric(1,3), | ||
85 | } | ||
86 | |||
87 | NAMED_EDGE_HOP = { | ||
88 | 'none' : graph.uniform(1,1), | ||
89 | 'uniform_3' : graph.uniform(1,3), | ||
90 | 'uniform_deep' : graph.uniform(1,100), | ||
91 | 'geometric_3': graph.geometric(1,3), | ||
92 | } | ||
93 | |||
94 | NAMED_EDGE_WSS = { | ||
95 | 'uni-light' : tasks.uniform(1, 64), | ||
96 | 'uni-medium' : tasks.uniform(256, 1024), | ||
97 | 'uni-heavy' : tasks.uniform(2*1024, 8*1024), | ||
98 | |||
99 | 'bimo-light' : tasks.multimodal([(tasks.uniform_int(64,256), 8), (tasks.uniform_int(2*1024, 8*1024), 1)]), | ||
100 | 'bimo-medium' : tasks.multimodal([(tasks.uniform_int(64,256), 6), (tasks.uniform_int(2*1024, 8*1024), 3)]), | ||
101 | 'bimo-heavy' : tasks.multimodal([(tasks.uniform_int(64,256), 4), (tasks.uniform_int(2*1024, 8*1024), 5)]), | ||
102 | } | ||
103 | |||
104 | |||
105 | TESTS = [ | ||
106 | (0, "CacheAgnostic", tests.test_partition_no_cache), | ||
107 | (1, "MaximizeParallelism", tests.test_partition_parallel), | ||
108 | (2, "CacheAware", tests.test_partition_cache_aware), | ||
109 | (3, "CacheAwareEdges", tests.test_partition_cache_aware_edges), | ||
110 | # ("MaximizeParallelismCacheAware", tests.test_partition_parallel2) | ||
111 | (4, "CacheAwareBFSEdges", tests.test_partition_cache_aware_bfs), | ||
112 | (5, "CacheAwareDFSEdges", tests.test_partition_cache_aware_dfs) | ||
113 | ] | ||
114 | |||
115 | MIN_SAMPLES = 200 | ||
116 | MAX_SAMPLES = 500 | ||
55 | #MIN_SAMPLES = 1000 | 117 | #MIN_SAMPLES = 1000 |
56 | #MAX_SAMPLES = 10000 | 118 | #MAX_SAMPLES = 10000 |
57 | MAX_CI = 0.05 | 119 | MAX_CI = 0.05 |
@@ -159,8 +221,8 @@ def complete(results, n): | |||
159 | elif n > MAX_SAMPLES: | 221 | elif n > MAX_SAMPLES: |
160 | return True | 222 | return True |
161 | else: | 223 | else: |
162 | for name, _ in TESTS: | 224 | for method, _, _ in TESTS: |
163 | if proportion_ci(results[name], n, CONFIDENCE) > MAX_CI: | 225 | if proportion_ci(results[method], n, CONFIDENCE) > MAX_CI: |
164 | return False | 226 | return False |
165 | return True | 227 | return True |
166 | 228 | ||
@@ -186,30 +248,43 @@ def get_overheads(dp, system = None): | |||
186 | cluster_size = dp.processors/dp.nr_clusters | 248 | cluster_size = dp.processors/dp.nr_clusters |
187 | max_dist = dp.system.distance(0, cluster_size-1) | 249 | max_dist = dp.system.distance(0, cluster_size-1) |
188 | lvl = dp.system.levels[max_dist] | 250 | lvl = dp.system.levels[max_dist] |
251 | max_wss = dp.system.max_wss() | ||
189 | ovh_file = 'overheads/ovh_host=%s_sched=%s_lvl=%s_type=%s.csv' % (dp.host, dp.sched, lvl, dp.ovh_type) | 252 | ovh_file = 'overheads/ovh_host=%s_sched=%s_lvl=%s_type=%s.csv' % (dp.host, dp.sched, lvl, dp.ovh_type) |
190 | ovh = Overheads.from_file(ovh_file) | 253 | ovh = Overheads.from_file(ovh_file) |
191 | ovh.shared_cache = dp.system.schedcat_distance(0, max_dist) | 254 | ovh.shared_cache = dp.system.schedcat_distance(0, max_dist) |
192 | ovh.cache_affinity_loss = get_cpmds(dp) | 255 | ovh.cache_affinity_loss = get_cpmds(dp) |
256 | ovh.cache_affinity_loss.set_max_wss(max_wss) | ||
193 | ovh.consumer = get_consumer_overheads(dp, system) | 257 | ovh.consumer = get_consumer_overheads(dp, system) |
194 | ovh.producer = get_producer_overheads(dp) | 258 | ovh.producer = get_producer_overheads(dp) |
195 | return ovh | 259 | return ovh |
196 | 260 | ||
197 | def process_dp(dp): | 261 | def process_dp(_dp): |
198 | 262 | ||
263 | dp = copy.deepcopy(_dp) | ||
264 | |||
199 | # kludge in parameters that pickle doesn't like... | 265 | # kludge in parameters that pickle doesn't like... |
200 | dp.system = topology.Topology(machines[dp.host]) | 266 | dp.system = topology.Topology(machines[dp.host]) |
201 | 267 | ||
202 | dp.num_graphs = graph.binomial(1, 24) | 268 | # convert names to distributions |
203 | dp.depth_factor = [1.0/3.0, 2.0/3.0] | 269 | dp.num_graphs = NAMED_NUM_GRAPHS[dp.num_graphs] |
204 | dp.node_placement = graph.binomial() | 270 | dp.depth_factor = NAMED_HEIGHT_FACTORS[dp.depth_factor] |
205 | dp.fan_out = graph.geometric(1, 3) | 271 | dp.node_placement = NAMED_SHAPES[dp.node_placement] |
206 | dp.fan_in_cap = 3 | 272 | dp.fan_out = NAMED_FAN[dp.fan_out] |
207 | dp.edge_distance = graph.geometric(1, 3) | 273 | dp.edge_distance = NAMED_EDGE_HOP[dp.edge_distance] |
274 | dp.wss = NAMED_EDGE_WSS[dp.wss] | ||
275 | |||
276 | # slam in unchaging values | ||
208 | dp.nr_source = graph.uniform(1,1) | 277 | dp.nr_source = graph.uniform(1,1) |
209 | dp.nr_sink = graph.uniform(1,1) | 278 | dp.nr_sink = graph.uniform(1,1) |
210 | dp.wss = tasks.multimodal([(tasks.uniform_int(128,1024), 6), (tasks.uniform_int(2048, 8*1024), 3)]) | 279 | dp.uncached = False |
280 | dp.huge_pages = False | ||
281 | dp.sched = 'edf' | ||
282 | dp.walk = 'seq' | ||
211 | 283 | ||
212 | results = defaultdict(float) | 284 | results = defaultdict(float) |
285 | |||
286 | # avg_nr_graphs = defaultdict(float) | ||
287 | # avg_graph_k = defaultdict(float) | ||
213 | avg_latencies = defaultdict(float) | 288 | avg_latencies = defaultdict(float) |
214 | avg_ideal_ratios = defaultdict(float) | 289 | avg_ideal_ratios = defaultdict(float) |
215 | avg_hrt_ratios = defaultdict(float) | 290 | avg_hrt_ratios = defaultdict(float) |
@@ -219,276 +294,152 @@ def process_dp(dp): | |||
219 | 294 | ||
220 | overheads = get_overheads(dp, dp.system) | 295 | overheads = get_overheads(dp, dp.system) |
221 | 296 | ||
222 | # 512MB | ||
223 | upper_memlimit_bytes = 1024*1024*512 | ||
224 | # ru_maxrss does not go back down, so we can only play this trick once... | ||
225 | if gc.isenabled() and resource.getrusage(resource.RUSAGE_SELF).ru_maxrss < upper_memlimit_bytes: | ||
226 | gc.disable() | ||
227 | |||
228 | while not complete(results, n): | 297 | while not complete(results, n): |
229 | if (not gc.isenabled()) and (resource.getrusage(resource.RUSAGE_SELF).ru_maxrss > upper_memlimit_bytes): | ||
230 | gc.enable() | ||
231 | |||
232 | ts, graphs, subts = create_pgm_task_set(dp) | 298 | ts, graphs, subts = create_pgm_task_set(dp) |
233 | 299 | ||
234 | hrt_ideal_response_times = map(graph.compute_hrt_ideal_response_time, graphs) | 300 | # hrt_ideal_response_times = map(graph.compute_hrt_ideal_response_time, graphs) |
235 | num_graphs = len(graphs) | 301 | # num_graphs = len(graphs) |
302 | |||
236 | if dp.nr_clusters != 1: | 303 | if dp.nr_clusters != 1: |
237 | for name, test in TESTS: | 304 | for method, _, test in TESTS: |
238 | result, processed_ts = test(ts, graphs, subts, dp, overheads) | 305 | result, processed_ts = test(ts, graphs, subts, dp, overheads) |
239 | if result: | 306 | if result: |
240 | ideal_response_times = map(graph.compute_ideal_response_time, graphs) | 307 | # ideal_response_times = map(graph.compute_ideal_response_time, graphs) |
241 | srt_response_times = map(graph.bound_graph_response_time, graphs) | 308 | # srt_response_times = map(graph.bound_graph_response_time, graphs) |
242 | ideal_ratio = 0.0 | 309 | # ideal_ratio = 0.0 |
243 | hrt_ratio = 0.0 | 310 | # hrt_ratio = 0.0 |
244 | for i, h, s in zip(ideal_response_times, hrt_ideal_response_times, srt_response_times): | 311 | # for i, h, s in zip(ideal_response_times, hrt_ideal_response_times, srt_response_times): |
245 | ideal_ratio += s/i | 312 | # ideal_ratio += s/i |
246 | hrt_ratio += s/h | 313 | # hrt_ratio += s/h |
247 | 314 | # | |
248 | ideal_ratio /= num_graphs | 315 | # ideal_ratio /= num_graphs |
249 | hrt_ratio /= num_graphs | 316 | # hrt_ratio /= num_graphs |
250 | avg_latency = sum(srt_response_times)/num_graphs | 317 | # avg_latency = sum(srt_response_times)/num_graphs |
251 | avg_latencies[name] = update_mean(avg_latencies[name], nsched[name], avg_latency) | 318 | # avg_latencies[method] = update_mean(avg_latencies[method], nsched[method], avg_latency) |
252 | avg_ideal_ratios[name] = update_mean(avg_ideal_ratios[name], nsched[name], ideal_ratio) | 319 | # avg_ideal_ratios[method] = update_mean(avg_ideal_ratios[method], nsched[method], ideal_ratio) |
253 | avg_hrt_ratios[name] = update_mean(avg_hrt_ratios[name], nsched[name], hrt_ratio) | 320 | # avg_hrt_ratios[method] = update_mean(avg_hrt_ratios[method], nsched[method], hrt_ratio) |
254 | nsched[name] += 1 | 321 | nsched[method] += 1 |
255 | results[name] = update_mean(results[name], n, result) | 322 | results[method] = update_mean(results[method], n, result) |
323 | |||
256 | # if there is no partitioning, then same results hold for all tests | 324 | # if there is no partitioning, then same results hold for all tests |
257 | else: | 325 | else: |
258 | result, processed_ts = TESTS[0][1](ts, graphs, subts, dp, overheads) | 326 | result, processed_ts = TESTS[0][2](ts, graphs, subts, dp, overheads) |
259 | if result: | 327 | if result: |
260 | ideal_response_times = map(graph.compute_ideal_response_time, graphs) | 328 | for method, name, test in TESTS: |
261 | srt_response_times = map(graph.bound_graph_response_time, graphs) | 329 | nsched[method] += 1 |
262 | ideal_ratio = 0.0 | 330 | for method, name, test in TESTS: |
263 | hrt_ratio = 0.0 | 331 | results[method] = update_mean(results[method], n, result) |
264 | for i, h, s in zip(ideal_response_times, hrt_ideal_response_times, srt_response_times): | ||
265 | ideal_ratio += s/i | ||
266 | hrt_ratio += s/h | ||
267 | |||
268 | ideal_ratio /= num_graphs | ||
269 | hrt_ratio /= num_graphs | ||
270 | avg_latency = sum(srt_response_times)/num_graphs | ||
271 | for name, test in TESTS: | ||
272 | avg_latencies[name] = update_mean(avg_latencies[name], nsched[name], avg_latency) | ||
273 | avg_ideal_ratios[name] = update_mean(avg_ideal_ratios[name], nsched[name], ideal_ratio) | ||
274 | avg_hrt_ratios[name] = update_mean(avg_hrt_ratios[name], nsched[name], hrt_ratio) | ||
275 | nsched[name] += 1 | ||
276 | |||
277 | for name, test in TESTS: | ||
278 | results[name] = update_mean(results[name], n, result) | ||
279 | 332 | ||
280 | n += 1 | 333 | n += 1 |
281 | # global TOTAL_TESTED | 334 | |
282 | # TOTAL_TESTED += 1 | 335 | # for method, _, _ in TESTS: |
283 | # print TOTAL_TESTED | 336 | # if nsched[method] == 0: |
284 | 337 | # avg_latencies[method] = -1.0 | |
285 | for name, test in TESTS: | 338 | # avg_ideal_ratios[method] = -1.0 |
286 | if nsched[name] == 0: | 339 | # avg_hrt_ratios[method] = -1.0 |
287 | avg_latencies[name] = -1.0 | 340 | |
288 | avg_ideal_ratios[name] = -1.0 | 341 | return results, avg_latencies, avg_ideal_ratios, avg_hrt_ratios |
289 | avg_hrt_ratios[name] = -1.0 | 342 | |
290 | 343 | def process_design_points(args): | |
291 | del dp.system | 344 | try: |
292 | del dp.num_graphs | 345 | (worker_id, db_name) = args |
293 | del dp.depth_factor | 346 | nr_processed = 0 |
294 | del dp.node_placement | 347 | while True: |
295 | del dp.fan_out | 348 | dp = db.get_design_point(db_name) |
296 | del dp.fan_in_cap | 349 | if dp and not db.already_processed(dp, db_name = db_name): |
297 | del dp.edge_distance | 350 | (sched, avg_lat, avg_idl_rat, avg_hrt_rat) = process_dp(dp) |
298 | del dp.nr_source | 351 | db.store_sched_results(db_name, dp, sched) |
299 | del dp.nr_sink | 352 | nr_processed += 1 |
300 | del dp.wss | 353 | else: |
301 | 354 | break | |
302 | # return dict(dp.items() + results.items()) | 355 | except lite.OperationalError: |
303 | # return dict(dp.items() + results.items()), dict(dp.items() + avg_latencies.items()), dict(dp.items() + avg_ideal_ratios.items()), dict(dp.items() + avg_hrt_ratios.items()) | 356 | print "FUCK!" |
304 | return dp, results, avg_latencies, avg_ideal_ratios, avg_hrt_ratios | 357 | print traceback.format_exc() |
358 | return nr_processed | ||
305 | 359 | ||
306 | def valid(dp): | 360 | def valid(dp): |
307 | return True | 361 | return True |
308 | 362 | ||
309 | TESTS = [ | ||
310 | ("CacheAgnostic", tests.test_partition_no_cache), | ||
311 | ("MaximizeParallelism", tests.test_partition_parallel), | ||
312 | ("CacheAware", tests.test_partition_cache_aware), | ||
313 | ("CacheAwareEdges", tests.test_partition_cache_aware_edges), | ||
314 | # ("MaximizeParallelismCacheAware", tests.test_partition_parallel2) | ||
315 | ("CacheAwareBFSEdges", tests.test_partition_cache_aware_bfs), | ||
316 | ("CacheAwareDFSEdges", tests.test_partition_cache_aware_dfs) | ||
317 | ] | ||
318 | |||
319 | def myrange(start, end, inc): | ||
320 | return arange(start, end+inc, inc) | ||
321 | |||
322 | |||
323 | def main(): | 363 | def main(): |
324 | random.seed(12345) | 364 | random.seed(12345) |
325 | 365 | ||
326 | parser = argparse.ArgumentParser() | 366 | parser = argparse.ArgumentParser() |
327 | parser.add_argument('-o', "--outfile", type = str, | ||
328 | default = "", | ||
329 | help = "store results to <filename>.csv") | ||
330 | parser.add_argument('-p', "--pretend", action='store_true', | 367 | parser.add_argument('-p', "--pretend", action='store_true', |
331 | help = "Only print design point, do not execute") | 368 | help = "Only print design point, do not execute") |
332 | parser.add_argument('-m', "--processors", default=1, type = int, | 369 | parser.add_argument('-m', "--processors", default=1, type = int, |
333 | help="Number of processors to execute on") | 370 | help="Number of processors to execute on") |
334 | # parser.add_argument('-s', "--model", type = str, | 371 | parser.add_argument('-d', "--database", type = str, |
335 | # default = "", | 372 | default = "", |
336 | # help = "Overhead model of the system") | 373 | help = "Database for holding experiment data") |
374 | parser.add_argument('--initonly', action='store_true', | ||
375 | help = "Only store design points to database") | ||
376 | parser.add_argument('--worker', action='store_true', | ||
377 | help = "Only process design points from database") | ||
378 | parser.add_argument('--resume', action='store_true', | ||
379 | help = "Preserve existing database entries") | ||
337 | args = parser.parse_args() | 380 | args = parser.parse_args() |
338 | 381 | ||
339 | exp = storage() | 382 | if args.database == "": |
340 | exp.host = ['ludwig'] | 383 | print "Database name required." |
341 | 384 | exit(-1) | |
342 | cpus = 24.0 | 385 | |
343 | exp.processors = [cpus] | 386 | if not args.worker: |
344 | # exp.nr_clusters = [24] | 387 | cpus = 24.0 |
345 | exp.nr_clusters = [1] | 388 | exp = storage() |
346 | exp.sched = ['edf'] | 389 | |
347 | # exp.nr_clusters = [1, 4, 12, 24] | 390 | # system parameters |
348 | exp.task_util = ['uni-medium'] | 391 | exp.processors = [int(cpus)] |
349 | exp.period = ['uni-long'] | 392 | # exp.nr_clusters = [1, 4, 12, 24] |
350 | exp.sys_util = myrange(1, cpus, 0.1) | 393 | exp.nr_clusters = [1, 4, 12] |
351 | # | 394 | exp.host = ['ludwig'] |
352 | # exp.num_graphs = [graph.binomial(1, 24)] | 395 | exp.polluters = [False] |
353 | # exp.depth_factor = [[1.0/3.0, 2.0/3.0]] | 396 | exp.ovh_type = ['max'] |
354 | # exp.node_placement = [graph.binomial()] | 397 | |
355 | # exp.fan_out = [graph.geometric(1, 3)] | 398 | # task parameters |
356 | # exp.fan_in_cap = [3] | 399 | step_size = 0.2 |
357 | # exp.edge_distance = [graph.geometric(1, 3)] | 400 | exp.sys_util = [float(v) for v in arange(6.0, cpus+step_size, step_size)] |
358 | # exp.nr_source = [graph.uniform(1,1)] | 401 | exp.task_util = ['uni-medium'] |
359 | # exp.nr_sink = [graph.uniform(1,1)] | 402 | exp.period = ['uni-long'] |
360 | # | 403 | exp.wcycle = [0] |
361 | # exp.wss = [ tasks.multimodal([(tasks.uniform_int(128,1024), 6), (tasks.uniform_int(2048, 8*1024), 3)]) ] | 404 | |
362 | exp.wcycle = [ 0 ] | 405 | # graph parameters |
363 | exp.walk = ['seq'] | 406 | exp.num_graphs = ['bimo-few', 'bimo-medium'] |
364 | exp.huge_pages = [False] | 407 | # exp.depth_factor = ['uni-medium'] |
365 | exp.uncached = [False] | 408 | exp.depth_factor = ['uni-short', 'uni-tall'] |
366 | exp.polluters = [False] | 409 | exp.node_placement = ['binomial'] |
367 | exp.ovh_type = ['max'] | 410 | exp.fan_out = ['geometric_3'] |
368 | # exp.ovh_type = ['max', 'median', 'mean'] | 411 | exp.edge_distance = ['geometric_3'] |
369 | 412 | exp.wss = ['bimo-medium'] | |
370 | design_points = [dp for dp in DesignPointGenerator(exp, is_valid = valid)] | 413 | exp.fan_in_cap = [3] |
371 | design_points.reverse() | 414 | |
372 | 415 | design_points = [dp for dp in DesignPointGenerator(exp, is_valid = valid)] | |
373 | # hopefully this makes the % done more indicative of progress. | 416 | |
374 | # random.shuffle(design_points) | 417 | nr_dp = len(design_points) |
375 | 418 | if not args.pretend: | |
376 | print "Total design points: ", len(design_points) | 419 | db.create_tables(args.database, dummy_dp = design_points[0], clean = not args.resume) |
377 | 420 | num_stored = db.store_design_points(args.database, design_points, clean = not args.resume) | |
378 | if not args.pretend: | 421 | print "Loaded %d of %d design points. (%d already completed)" % (num_stored, nr_dp, nr_dp - num_stored) |
379 | # if args.outfile == "": | 422 | |
380 | # sched_out = DictWriter(sys.stdout, exp.keys()+[t[0] for t in TESTS]) | 423 | if args.pretend or args.initonly: |
381 | # lat_out = DictWriter(open(os.devnull, 'w'), exp.keys()+[t[0] for t in TESTS]) | 424 | exit(0) |
382 | # ir_out = DictWriter(open(os.devnull, 'w'), exp.keys()+[t[0] for t in TESTS]) | 425 | |
383 | # hr_out = DictWriter(open(os.devnull, 'w'), exp.keys()+[t[0] for t in TESTS]) | 426 | total_nr_processed = 0 |
384 | # else: | 427 | if args.processors > 1: |
385 | # sched_out = DictWriter(open(args.outfile+'_sched.csv', 'w'), exp.keys()+[t[0] for t in TESTS]) | 428 | pool = Pool(processes = args.processors) |
386 | # lat_out = DictWriter(open(args.outfile+'_latency.csv', 'w'), exp.keys()+[t[0] for t in TESTS]) | 429 | args = zip(range(args.processors), [args.database]*args.processors) |
387 | # ir_out = DictWriter(open(args.outfile+'_idealratio.csv', 'w'), exp.keys()+[t[0] for t in TESTS]) | 430 | try: |
388 | # hr_out = DictWriter(open(args.outfile+'_hrtratio.csv', 'w'), exp.keys()+[t[0] for t in TESTS]) | 431 | for i,nr_processed in enumerate(pool.map(process_design_points, args)): |
389 | # | 432 | print 'worker %d: processed %d design points.' % (i,nr_processed) |
390 | # sched_out.writeheader() | 433 | total_nr_processed += nr_processed |
391 | # lat_out.writeheader() | 434 | pool.close() |
392 | # ir_out.writeheader() | 435 | except Exception as e: |
393 | # hr_out.writeheader() | 436 | pool.terminate() |
394 | 437 | print e | |
395 | if args.outfile == "": | 438 | raise |
396 | sched_out = sys.stdout | 439 | else: |
397 | lat_out = open(os.devnull, 'w') | 440 | total_nr_processed = process_design_points((0, args.database)) |
398 | ir_out = open(os.devnull, 'w') | ||
399 | hr_out = open(os.devnull, 'w') | ||
400 | else: | ||
401 | sched_out = open(args.outfile+'_sched.csv', 'w') | ||
402 | lat_out = open(args.outfile+'_latency.csv', 'w') | ||
403 | ir_out = open(args.outfile+'_idealratio.csv', 'w') | ||
404 | hr_out = open(args.outfile+'_hrtratio.csv', 'w') | ||
405 | |||
406 | hdr = 'processors,nr_clusters,task_util,period,wcycle,polluters,ovh_type,sys_util' | ||
407 | for t in TESTS: | ||
408 | hdr += ','+t[0] | ||
409 | hdr += '\n' | ||
410 | |||
411 | sched_out.write(hdr) | ||
412 | lat_out.write(hdr) | ||
413 | ir_out.write(hdr) | ||
414 | hr_out.write(hdr) | ||
415 | |||
416 | if args.processors > 1: | ||
417 | pool = Pool(processes = args.processors) | ||
418 | # logger = multiprocessing.log_to_stderr() | ||
419 | # logger.setLevel(multiprocessing.SUBDEBUG) | ||
420 | try: | ||
421 | for i, row in enumerate(pool.imap_unordered(process_dp, design_points)): | ||
422 | if not row: | ||
423 | continue | ||
424 | if sched_out != sys.stdout: | ||
425 | sys.stderr.write('\rdone {0:%}'.format(i/len(design_points))) | ||
426 | |||
427 | dp, sched, latency, iratio, hrtratio = row | ||
428 | |||
429 | keys = '%d,%d,%s,%s,%d,%d,%s,%f' % (dp.processors, dp.nr_clusters, dp.task_util, dp.period, dp.wcycle, dp.polluters, dp.ovh_type, dp.sys_util) | ||
430 | |||
431 | values = '' | ||
432 | for t in TESTS: | ||
433 | values += ',%f' % sched[t[0]] | ||
434 | sched_out.write('%s%s\n' % (keys, values)) | ||
435 | |||
436 | values = '' | ||
437 | for t in TESTS: | ||
438 | values += ',%f' % latency[t[0]] | ||
439 | lat_out.write('%s%s\n' % (keys, values)) | ||
440 | |||
441 | values = '' | ||
442 | for t in TESTS: | ||
443 | values += ',%f' % iratio[t[0]] | ||
444 | ir_out.write('%s%s\n' % (keys, values)) | ||
445 | |||
446 | values = '' | ||
447 | for t in TESTS: | ||
448 | values += ',%f' % hrtratio[t[0]] | ||
449 | hr_out.write('%s%s\n' % (keys, values)) | ||
450 | |||
451 | pool.close() | ||
452 | |||
453 | except Exception as e: | ||
454 | pool.terminate() | ||
455 | print e | ||
456 | raise | ||
457 | else: | ||
458 | |||
459 | for i, row in enumerate(map(process_dp, design_points)): | ||
460 | if not row: | ||
461 | continue | ||
462 | if sched_out != sys.stdout: | ||
463 | sys.stderr.write('\rdone {0:%}'.format(i/len(design_points))) | ||
464 | |||
465 | dp, sched, latency, iratio, hrtratio = row | ||
466 | |||
467 | entry_stem = '%d,%d,%s,%s,%d,%d,%s,%f' % (dp.processors, dp.nr_clusters, dp.task_util, dp.period, dp.wcycle, dp.polluters, dp.ovh_type, dp.sys_util) | ||
468 | |||
469 | sched_out.write(entry_stem) | ||
470 | for t in TESTS: | ||
471 | sched_out.write(',%f' % sched[t[0]]) | ||
472 | sched_out.write('\n') | ||
473 | |||
474 | lat_out.write(entry_stem) | ||
475 | for t in TESTS: | ||
476 | lat_out.write(',%f' % latency[t[0]]) | ||
477 | lat_out.write('\n') | ||
478 | |||
479 | ir_out.write(entry_stem) | ||
480 | for t in TESTS: | ||
481 | ir_out.write(',%f' % iratio[t[0]]) | ||
482 | ir_out.write('\n') | ||
483 | |||
484 | hr_out.write(entry_stem) | ||
485 | for t in TESTS: | ||
486 | hr_out.write(',%f' % hrtratio[t[0]]) | ||
487 | hr_out.write('\n') | ||
488 | |||
489 | # global TOTAL_TESTED | ||
490 | # print 'total tasksets:', TOTAL_TESTED | ||
491 | 441 | ||
442 | print 'Processed %d design points!' % total_nr_processed | ||
492 | 443 | ||
493 | if __name__ == '__main__': | 444 | if __name__ == '__main__': |
494 | main() | 445 | main() |