aboutsummaryrefslogtreecommitdiffstats
path: root/parse/ft.py
diff options
context:
space:
mode:
authorJonathan Herman <hermanjl@cs.unc.edu>2012-11-20 16:02:40 -0500
committerJonathan Herman <hermanjl@cs.unc.edu>2012-11-20 16:02:40 -0500
commit41c867480f1e20bd3b168258ed71450499ea6ccf (patch)
treeb47963b417ba9bdd53f03d5c621b72bcca297ef6 /parse/ft.py
parent1abea5f67c2c70053af0a59db715a210df2e0bef (diff)
Removed 2-step parse for overheads.
Diffstat (limited to 'parse/ft.py')
-rw-r--r--parse/ft.py129
1 files changed, 39 insertions, 90 deletions
diff --git a/parse/ft.py b/parse/ft.py
index cbf75f2..c5f1522 100644
--- a/parse/ft.py
+++ b/parse/ft.py
@@ -7,94 +7,68 @@ import subprocess
7 7
8from point import Measurement,Type 8from point import Measurement,Type
9 9
10SPLIT_DATA_NAME = "overhead={}.bin" 10FT_SPLIT_NAME = "overhead={}.bin"
11FT_DATA_NAME = "sorted-ft.bin" 11FT_SORTED_NAME = "sorted-ft.bin"
12FIELDS = ["Overhead", "samples", "max", "avg", "min", "med", "std", "var"] 12FT_ERR_NAME = "err-ft"
13 13
14def get_ft_output(data_dir, cycles, out_dir, force=False): 14def extract_ft_data(result, data_dir, cycles, tmp_dir):
15 """
16 Create and return file containing analyzed overhead data
17 """
18 freg = conf.FILES['ft_data'] + "$" 15 freg = conf.FILES['ft_data'] + "$"
19 bins = [f for f in os.listdir(data_dir) if re.match(freg, f)] 16 bins = [f for f in os.listdir(data_dir) if re.match(freg, f)]
20 17
21 output_file = "{}/out-ft".format(out_dir) 18 if not len(bins):
19 return False
22 20
23 if os.path.isfile(output_file): 21 bin_file = "{}/{}".format(data_dir, bins[0])
24 if force:
25 os.remove(output_file)
26 else:
27 return output_file
28 22
29 if len(bins) != 0: 23 with open("%s/%s" % (tmp_dir, FT_ERR_NAME), 'w') as err_file:
30 bin_file = "{}/{}".format(data_dir, bins[0]) 24 sorted_bin = sort_ft(bin_file, err_file, tmp_dir)
31 err_file = open("%s/err-ft" % out_dir, 'w')
32 25
33 sorted_bin = sort_ft(bin_file, err_file, out_dir) 26 for event in conf.BASE_EVENTS:
34 make_data_file(sorted_bin, cycles, output_file, err_file, out_dir) 27 parse_overhead(result, sorted_bin, event, cycles,
28 tmp_dir, err_file)
35 29
36 os.remove(sorted_bin) 30 os.remove(sorted_bin)
37 31
38 return output_file 32 return True
39 else:
40 return None
41 return output_file
42 33
43def fmt_cell(x): 34def parse_overhead(result, overhead_bin, overhead, cycles, out_dir, err_file):
44 if type(x) == str: 35 ovh_fname = "{}/{}".format(out_dir, FT_SPLIT_NAME).format(overhead)
45 return "%15s" % x
46 if type(x) == int:
47 return "%15d" % x
48 else:
49 return "%15.3f" % x
50 36
51def make_data_file(sorted_bin, cycles, out_fname, err_file, out_dir): 37 if os.path.exists(ovh_fname):
52 """ 38 os.remove(ovh_fname)
53 Create file containing all overhead information. 39 ovh_file = open(ovh_fname, 'w')
54 """
55 base_name = "{}/{}".format(out_dir, SPLIT_DATA_NAME)
56 40
57 with open(out_fname, "w") as f: 41 # Extract matching overhead events into a seperate file
58 f.write("#%s" % ", ".join(fmt_cell(x) for x in FIELDS)) 42 cmd = [conf.BINS["split"], "-r", "-b", overhead, overhead_bin]
59 f.write("\n") 43 ret = subprocess.call(cmd, cwd=out_dir, stderr=err_file, stdout=ovh_file)
44 size = os.stat(ovh_fname).st_size
60 45
61 for event in conf.BASE_EVENTS: 46 if ret:
62 ovh_fname = base_name.format(event.replace("_", "-")) 47 raise Exception("Failed (%d) with command: %s" % (ret, " ".join(cmd)))
63 48 if not size:
64 if os.path.exists(ovh_fname): 49 os.remove(ovh_fname)
65 os.remove(ovh_fname)
66 ovh_file = open(ovh_fname, 'w')
67
68 # Extract matching overhead events into a seperate file
69 cmd = [conf.BINS["split"], "-r", "-b", event, sorted_bin]
70 ret = subprocess.call(cmd, cwd=out_dir,
71 stderr=err_file, stdout=ovh_file)
72 size = os.stat(ovh_fname).st_size
73 50
74 if ret: 51 if size and not ret:
75 err_file.write("Failed with command: %s" % " ".join(cmd)) 52 # Map and sort file for stats
76 if not size: 53 data = np.memmap(ovh_fname, dtype="float32", mode='c')
77 os.remove(ovh_fname) 54 data /= float(cycles) # Scale for processor speed
78 if not size or ret: 55 data.sort()
79 continue
80 56
81 # Map and sort file for stats 57 m = Measurement("%s-%s" % (overhead_bin, overhead))
82 data = np.memmap(ovh_fname, dtype="float32", mode='c') 58 m[Type.Max] = data[-1]
83 data /= float(cycles) # Scale for processor speed 59 m[Type.Avg] = np.mean(data)
84 data.sort() 60 m[Type.Min] = data[0]
61 m[Type.Var] = np.var(data)
85 62
86 stats = [event, len(data), data[-1], np.mean(data), data[0], 63 result[overhead] = m
87 np.median(data), np.std(data, ddof=1), np.var(data)]
88 f.write(", ".join([fmt_cell(x) for x in stats]))
89 f.write("\n")
90 64
91 os.remove(ovh_fname) 65 os.remove(ovh_fname)
92 66
93def sort_ft(ft_file, err_file, out_dir): 67def sort_ft(ft_file, err_file, out_dir):
94 """ 68 """
95 Create and return file with sorted overheads from @ft_file. 69 Create and return file with sorted overheads from @ft_file.
96 """ 70 """
97 out_fname = "{}/{}".format(out_dir, FT_DATA_NAME) 71 out_fname = "{}/{}".format(out_dir, FT_SORTED_NAME)
98 72
99 # Sort happens in-place 73 # Sort happens in-place
100 sh.copyfile(ft_file, out_fname) 74 sh.copyfile(ft_file, out_fname)
@@ -105,28 +79,3 @@ def sort_ft(ft_file, err_file, out_dir):
105 raise Exception("Sort failed with command: %s" % " ".join(cmd)) 79 raise Exception("Sort failed with command: %s" % " ".join(cmd))
106 80
107 return out_fname 81 return out_fname
108
109def extract_ft_data(data_file, result, overheads):
110 """
111 Return exp point with overhead measurements from data_file
112 """
113 with open(data_file) as f:
114 data = f.read()
115
116 for ovh in overheads:
117 regex = r"({}[^\n]*)".format(ovh)
118 line = re.search(regex, data)
119
120 if not line:
121 continue
122
123 vals = re.split(r"[,\s]+", line.groups(1)[0])
124
125 measure = Measurement("%s-%s" % (data_file, ovh))
126 measure[Type.Max] = float(vals[FIELDS.index("max")])
127 measure[Type.Avg] = float(vals[FIELDS.index("avg")])
128 measure[Type.Var] = float(vals[FIELDS.index("var")])
129
130 result[ovh] = measure
131
132 return result