summaryrefslogtreecommitdiffstats
path: root/smt_analysis
diff options
context:
space:
mode:
authorleochanj105 <leochanj@live.unc.edu>2020-10-23 02:12:49 -0400
committerleochanj105 <leochanj@live.unc.edu>2020-10-23 02:12:49 -0400
commite23e931be4776b89149fdb2596f47096e6cdb78c (patch)
treefaf3963cc501f103cd2554553ec74ce21157b42e /smt_analysis
parente2d933df44b7b387b41c8c7805393ad3857c4448 (diff)
parente0217a963c6c0e0667d41d075038685956bcfacf (diff)
Merge branch 'sd-vbs' of ssh://rtsrv.cs.unc.edu/public/mc2-scripts-and-benchmarks into sd-vbs
Diffstat (limited to 'smt_analysis')
-rwxr-xr-xsmt_analysis/computeLCslowdown.py73
-rwxr-xr-xsmt_analysis/computeSMTslowdown.py155
-rwxr-xr-xsmt_analysis/libSMT.py151
m---------smt_analysis/plotille0
4 files changed, 379 insertions, 0 deletions
diff --git a/smt_analysis/computeLCslowdown.py b/smt_analysis/computeLCslowdown.py
new file mode 100755
index 0000000..bcd22da
--- /dev/null
+++ b/smt_analysis/computeLCslowdown.py
@@ -0,0 +1,73 @@
1#!/usr/bin/python3
2import numpy as np
3import sys
4import plotille.plotille as plt
5from libSMT import *
6TIMING_ERROR = 1000 #ns
7ASYNC_FORMAT = False
8
9def print_usage():
10 print("This program takes in the all-pairs and baseline SMT data and computes the worst-case slowdown against any other task when SMT is enabled.", file=sys.stderr)
11 print("Level-A/B usage: {} <file -A> <file -B> <baseline file>".format(sys.argv[0]), file=sys.stderr)
12 print("Level-C usage: {} <continuous pairs> <baseline file>".format(sys.argv[0]), file=sys.stderr)
13
14# Check that we got the right number of parameters
15if len(sys.argv) < 3:
16 print_usage()
17 exit()
18
19if len(sys.argv) > 3:
20 print("Reading file using synchronous pair format...")
21 print("Are you sure you want to do this? For the RTAS'21 paper, L-A/-B pairs should use the other script.")
22 input("Press enter to continue, Ctrl+C to exit...")
23else:
24 print("Reading file using asynchronous pair format...")
25 ASYNC_FORMAT = True
26
27assert_valid_input_files(sys.argv[1:-1], print_usage)
28
29# Pull in the data
30if not ASYNC_FORMAT:
31 baseline_times, baseline_sample_cnt, baseline_max_times = load_baseline(sys.argv[3])
32 paired_times, paired_offsets, name_to_idx, idx_to_name = load_paired(sys.argv[1], sys.argv[2], len(list(baseline_times.keys())))
33 for key in baseline_times:
34 print(key,max(baseline_times[key]))
35else:
36 baseline_times, baseline_sample_cnt, baseline_max_times = load_baseline(sys.argv[2])
37 paired_times, name_to_idx, idx_to_name = load_fake_paired(sys.argv[1])
38
39# We work iff the baseline was run for the same set of benchmarks as the pairs were
40assert_base_and_pair_keys_match(baseline_times, name_to_idx)
41
42# Only consider benchmarks that are at least an order of magnitude longer than the timing error
43reliableNames = []
44for i in range(0, len(name_to_idx)):
45 benchmark = idx_to_name[i]
46 if min(baseline_times[benchmark]) > TIMING_ERROR * 10:
47 reliableNames.append(benchmark)
48
49# Compute worst-case SMT slowdown for each benchmark
50print("Bench Mi")
51# Print rows
52sample_f = np.mean # Change this to np.mean to use mean values in Mi generation
53M_vals = []
54for b1 in reliableNames:
55 print("{:<14.14}:".format(b1), end=" ")
56 max_mi = 0
57 # Scan through everyone we ran against and find our maximum slowdown
58 for b2 in reliableNames:
59 time_with_smt = sample_f(paired_times[name_to_idx[b1]][name_to_idx[b2]])
60 time_wout_smt = sample_f(baseline_times[b1])
61 M = time_with_smt / time_wout_smt
62 max_mi = max(max_mi, M)
63 print("{:>10.3}".format(max_mi), end=" ")
64 M_vals.append(max_mi)
65 print("")
66# Print some statistics about the distribution
67print("Average: {:>5.3} with standard deviation {:>5.3} using `{}`".format(np.mean(M_vals), np.std(M_vals), sample_f.__name__))
68Ms = np.asarray(M_vals, dtype=np.float32)
69print(np.sum(Ms <= 1), "of", len(M_vals), "M_i values are at most one -", 100*np.sum(Ms <= 1)/len(M_vals), "percent")
70print(np.sum(Ms > 2), "of", len(M_vals), "M_i values are greater than two -", 100*np.sum(Ms > 2)/len(M_vals), "percent")
71M_vals_to_plot = Ms
72
73print(plt.hist(M_vals_to_plot, bins=10))
diff --git a/smt_analysis/computeSMTslowdown.py b/smt_analysis/computeSMTslowdown.py
new file mode 100755
index 0000000..805def1
--- /dev/null
+++ b/smt_analysis/computeSMTslowdown.py
@@ -0,0 +1,155 @@
1#!/usr/bin/python3
2from typing import List, Any
3import numpy as np
4from scipy import stats
5import sys
6import plotille.plotille as plt
7TIMING_ERROR = 1000 #ns
8LEVEL_C_ANALYSIS = False
9from libSMT import *
10
11def print_usage():
12 print("This program takes in the all-pairs and baseline SMT data and computes how much each program is slowed when SMT in enabled.", file=sys.stderr)
13 print("Level-A/B usage: {} <file -A> <file -B> <baseline file> --cij".format(sys.argv[0]), file=sys.stderr)
14 print("Level-C usage: {} <continuous pairs> <baseline file>".format(sys.argv[0]), file=sys.stderr)
15
16# Check that we got the right number of parameters
17if len(sys.argv) < 3:
18 print_usage()
19 exit()
20
21if len(sys.argv) > 3:
22 print("Analyzing results using Level-A/B methodology...")
23else:
24 print("Analyzing results using Level-C methodology...")
25 LEVEL_C_ANALYSIS = True
26
27assert_valid_input_files(sys.argv[1:-1], print_usage);
28
29# Print Cij values rather than Mij
30TIMES_ONLY = len(sys.argv) > 4 and "--cij" in sys.argv[4]
31OK_PAIRS_ONLY = len(sys.argv) > 4 and "--cij-ok" in sys.argv[4]
32
33# Pull in the data
34if not LEVEL_C_ANALYSIS:
35 baseline_times, baseline_sample_cnt, baseline_max_times = load_baseline(sys.argv[3])
36 paired_times, paired_offsets, name_to_idx, idx_to_name = load_paired(sys.argv[1], sys.argv[2], len(list(baseline_times.keys())))
37 for key in baseline_times:
38 print(key,max(baseline_times[key]))
39else:
40 # Paired times use an abuse of the baseline file format
41 baseline_times, baseline_sample_cnt, baseline_max_times = load_baseline(sys.argv[2])
42 paired_times, name_to_idx, idx_to_name = load_fake_paired(sys.argv[1])
43
44# We work iff the baseline was run for the same set of benchmarks as the pairs were
45assert_base_and_pair_keys_match(baseline_times, name_to_idx)
46
47# Only consider benchmarks that are at least an order of magnitude longer than the timing error
48reliableNames = []
49for i in range(0, len(name_to_idx)):
50 benchmark = idx_to_name[i]
51 if min(baseline_times[benchmark]) > TIMING_ERROR * 10:
52 reliableNames.append(benchmark)
53
54# Compute SMT slowdown for each benchmark
55# Output format: table, each row is one benchmark and each column is one benchmark
56# each cell is base1 + base2*m = pair solved for m, aka (pair - base1) / base2
57# Print table header
58print("Bench ", end=" ")
59for name in reliableNames:
60 if not TIMES_ONLY: print("{:<10.10}".format(name), end=" ")
61 if TIMES_ONLY: print("{:<12.12}".format(name), end=" ")
62print()
63# Print rows
64sample_f = max # Change this to np.mean to use mean values in Mij generation
65M_vals = []
66for b1 in reliableNames:
67 if not TIMES_ONLY: print("{:<14.14}:".format(b1), end=" ")
68 if TIMES_ONLY: print("{:<14.14}:".format(b1), end=" ")
69 for b2 in reliableNames:
70 if not LEVEL_C_ANALYSIS:
71 Ci = max(sample_f(baseline_times[b1]), sample_f(baseline_times[b2]))
72 Cj = min(sample_f(baseline_times[b1]), sample_f(baseline_times[b2]))
73 Cij = sample_f(paired_times[name_to_idx[b1]][name_to_idx[b2]])
74 if False:
75 M = np.std(paired_times[name_to_idx[b1]][name_to_idx[b2]]) / np.mean(paired_times[name_to_idx[b1]][name_to_idx[b2]])
76 else:
77 M = (Cij - Ci) / Cj
78 if Cij and Cj * 10 > Ci: # We don't pair tasks with more than a 10x difference in length
79 M_vals.append(M)
80 if not TIMES_ONLY: print("{:>10.3}".format(M), end=" ")
81 else:
82 if not TIMES_ONLY: print("{:>10}".format("N/A"), end=" ")
83
84 if TIMES_ONLY and (not OK_PAIRS_ONLY or Cj * 10 > Ci):
85 print("{:>12}".format(Cij), end=" ")
86 elif OK_PAIRS_ONLY and Cj * 10 <= Ci:
87 print("{:>12}".format("0"), end=" ")
88
89 else:
90 time_with_smt = sample_f(paired_times[name_to_idx[b1]][name_to_idx[b2]])
91 time_wout_smt = sample_f(baseline_times[b1])
92 M = time_with_smt / time_wout_smt
93 M_vals.append(M)
94 print("{:>10.3}".format(M), end=" ")
95 print("")
96# Print some statistics about the distribution
97print("Average: {:>5.3} with standard deviation {:>5.3} using `{}`".format(np.mean(M_vals), np.std(M_vals), sample_f.__name__))
98Ms = np.asarray(M_vals, dtype=np.float32)
99if not LEVEL_C_ANALYSIS:
100 print(np.sum(Ms <= 0), "of", len(M_vals), "M_i:j values are at most zero -", 100*np.sum(Ms <= 0)/len(M_vals), "percent")
101 print(np.sum(Ms > 1), "of", len(M_vals), "M_i:j values are greater than one -", 100*np.sum(Ms > 1)/len(M_vals), "percent")
102 M_vals_to_plot = Ms[np.logical_and(Ms > 0, Ms <= 1)]
103else:
104 print(np.sum(Ms <= 1), "of", len(M_vals), "M_i:j values are at most one -", 100*np.sum(Ms <= 1)/len(M_vals), "percent")
105 print(np.sum(Ms > 2), "of", len(M_vals), "M_i:j values are greater than two -", 100*np.sum(Ms > 2)/len(M_vals), "percent")
106 M_vals_to_plot = Ms
107
108print("Using Sim's analysis, average: {:>5.3} with standard deviation {:>5.3} using `{}`".format(np.mean(list(M_vals_to_plot)), np.std(list(M_vals_to_plot)), sample_f.__name__))
109print(plt.hist(M_vals_to_plot, bins=10))
110
111##### BELOW TEXT IS OLD OFFSET CODE (patched) #####
112## This still works, but is hacky and deprecated ##
113## PearsonR doesn't work though ##
114if not LEVEL_C_ANALYSIS and False:
115 benchmarkNames = idx_to_name
116 benchmarkCount = len(benchmarkNames)
117 numJobs = len(paired_times[0][0])
118
119 reliableNames=["ndes", "cjpeg_wrbmp", "adpcm_enc", "cjpeg_transupp", "epic", "gsm_dec", "h264_dec", "huff_enc", "rijndael_enc", "rijndael_dec", "gsm_enc", "ammunition", "mpeg2"]
120
121 #stats.pearsonr(time[b1][b2], oList),
122
123 with open("weakRelPairs_offset.csv", mode="w+") as f3:
124 print("Benchmark1", "Benchmark2", "minOffset", "maxOffset", "meanOffset", "meddOffset", "stdOffset", "minLength", "maxLength", sep=",", file=f3)
125 for b1 in range (0, benchmarkCount):
126 for b2 in range (0, benchmarkCount):
127 if benchmarkNames[b1] in reliableNames and benchmarkNames[b2] in reliableNames:
128 #exclude last job due to inccurate timing
129 oList = paired_offsets[b1][b2][:numJobs-1]
130 jList = paired_times[b1][b2][:numJobs-1]
131# plt.scatter(oList, jList)
132# plt.title(benchmarkNames[b1] + ", " + benchmarkNames[b2])
133# plt.show()
134# print(benchmarkNames[b1], benchmarkNames[b2], min(oList), max(oList), np.mean(oList), np.median(oList), np.std(oList), stats.pearsonr(jList, oList), stats.spearmanr(jList, oList), sep=",", file=f3)
135 print(benchmarkNames[b1], benchmarkNames[b2], min(oList), max(oList), np.mean(oList), np.median(oList), np.std(oList), min(jList), max(jList), sep=",", file=f3)
136"""
137#with open("reliableGraphs.csv", mode="x") as f3:
138 for b1 in range(0, benchmarkCount):
139 for b2 in range(0, benchmarkCount):
140 if benchmarkNames[b1] in reliableNames and benchmarkNames[b2] in reliableNames:
141 oList = offset[b1][b2][:numJobs - 1]
142 jList=time[b1][b2][:numJobs-1]
143 # offset, time scatterplot
144 plt.scatter(oList, jList)
145 plt.title(benchmarkNames[b1] + " " + benchmarkNames[b2] + " Offsets v. Time")
146 plt.show()
147 #time histogram
148 #plt.hist(jList, bins=10)
149 #plt.title(benchmarkNames[b1] + benchmarkNames[b2] + "Completion Times")
150 #plt.show()
151 #offset histogram
152 #plt.hist(oList, bins=10)
153 #plt.title(benchmarkNames[b1] + benchmarkNames[b2] + "Offsets")
154 #plt.show()
155"""
diff --git a/smt_analysis/libSMT.py b/smt_analysis/libSMT.py
new file mode 100755
index 0000000..cca2fce
--- /dev/null
+++ b/smt_analysis/libSMT.py
@@ -0,0 +1,151 @@
1import numpy as np
2import sys
3import os
4
5def assert_valid_input_files(names, on_fail):
6 # Check that all input files are valid
7 for f in names:
8 if not os.path.exists(f) or os.path.getsize(f) == 0:
9 print("ERROR: File '{}' does not exist or is empty".format(f), file=sys.stderr);
10 on_fail()
11 exit()
12
13# This parses the result data from unthreaded timing experiments
14# @param f File name to load
15# @returns res Map of benchmark name to sample count
16# @returns samples Map of benchmark name to list of execution time samples
17# @returns max_res May of benchmark to maximum execution time among all samples for that benchmark
18def load_baseline(f):
19 # constants for columns of baseline data files
20 TOTAL_NS = 5
21 BENCH_NAME = 0
22 SAMPLES = 4
23
24 # Load baseline data. This logic is based off the summarize programs
25 res = {} # Map of benchmark to list of all execution time samples
26 samples = {} # Map of benchmark name to sample count
27 max_res = {} # Map of benchmark name to maximum execution time
28
29 with open(f) as fp:
30 for line in fp:
31 s = line.split()
32 if s[BENCH_NAME] not in res:
33 res[s[BENCH_NAME]] = list([int(s[TOTAL_NS])])
34 samples[s[BENCH_NAME]] = int(s[SAMPLES])
35 max_res[s[BENCH_NAME]] = int(s[TOTAL_NS])
36 else:
37 res[s[BENCH_NAME]].append(int(s[TOTAL_NS]))
38 max_res[s[BENCH_NAME]] = max(int(s[TOTAL_NS]), max_res[s[BENCH_NAME]])
39 return res, samples, max_res
40
41# This parses the result data from paired, threaded timing experiements
42# @param file1 The -A file name
43# @param file2 The -B file name
44# @returns time 2D array of benchmark IDs to list of total container execution times
45# @returns offset 2D array of benchmark IDs to list of differences between the start
46# of the first and the start of the second benchmark
47# @returns name_to_idx Map of benchmark names to benchmark IDs
48# @returns idx_to_name List which when indexed with benchmark ID will yield the benchmark name
49def load_paired(file1, file2, benchmarkCount):
50 # constants for columns of paired data files
51 FIRST_PROG = 0
52 SECOND_PROG = 1
53 FIRST_CORE = 2
54 SECOND_CORE = 3
55 TRIALS = 4
56 START_S = 5 # Start seconds
57 START_N = 6 # Start nanoseconds
58 END_S = 7 # End seconds
59 END_N = 8 # End nanoseconds
60 RUN_ID = 9
61 JOB_NUM = 10
62
63 with open(file1) as f1:
64 numJobs = int(f1.readline().split()[TRIALS])
65 assert numJobs > 0
66 assert benchmarkCount > 0
67
68 # Total times of each container
69 time=[[[0 for x in range(numJobs)]for y in range(benchmarkCount)]for z in range(benchmarkCount)]
70 # Difference in time between when the first and the second task start in the container
71 offset=[[[0 for x in range(numJobs)]for y in range(benchmarkCount)]for z in range(benchmarkCount)]
72
73 # Some aggregate counters that we update as we go along
74 avg_off = 0
75 avg_off_samp = 0
76
77 # Load paired data
78 bench1 = 0 # Index to what's the current first benchmark being examined
79 bench2 = 0 # Index to what's the current second benchmark being examined
80
81 name_to_idx = {}
82 idx_to_name = [0 for x in range(benchmarkCount)]
83
84 job_idx = 0
85 with open(file1) as f1, open(file2) as f2:
86 for line1, line2 in zip(f1, f2):
87 lineArr1 = line1.split()
88 lineArr2 = line2.split()
89 start1 = int(lineArr1[START_S]) * 10**9 + int(lineArr1[START_N])
90 start2 = int(lineArr2[START_S]) * 10**9 + int(lineArr2[START_N])
91 minStart = min(start1, start2)
92 end1 = int(lineArr1[END_S]) * 10**9 + int(lineArr1[END_N])
93 end2 = int(lineArr2[END_S]) * 10**9 + int(lineArr2[END_N])
94 maxEnd = max(end1, end2)
95 # Time actually co-scheduled is minEnd - maxStart, but Sims uses a different model
96# time[bench1][bench2][int(lineArr1[JOB_NUM])] = maxEnd - minStart
97 time[bench1][bench2][job_idx] = maxEnd - minStart
98 if lineArr1[SECOND_PROG] == "h264_dec" and lineArr2[JOB_NUM] == 0:
99 print(maxEnd - minStart)
100 # Compute offset: if first job starts at t=0, when does second start?
101# offset[bench1][bench2][int(lineArr1[JOB_NUM])] = abs(start2-start1)
102 offset[bench1][bench2][job_idx] = abs(start2-start1)
103 # Compute some running statistics
104 avg_off += abs(start2-start1)
105 avg_off_samp += 1
106 # Increment to the next benchmark, this is weird because of the zip()
107 # This is doubly weird because our results are an upper trianguler matrix
108 if job_idx == numJobs - 1: #int(lineArr1[JOB_NUM]) == numJobs - 1:
109 if bench2 < benchmarkCount-1:
110 bench2 = bench2 + 1
111 job_idx = 0
112 else:
113 name_to_idx[lineArr1[FIRST_PROG]] = bench1
114 idx_to_name[bench1] = lineArr1[FIRST_PROG]
115 bench1 = bench1 + 1
116 bench2 = bench1 # bench1 will never again appear as bench2
117 job_idx = 0
118 else:
119 job_idx += 1
120 print("Average offset is: " + str(avg_off/avg_off_samp) + "ns")
121 return time, offset, name_to_idx, idx_to_name
122
123# Paired times use an abuse of the baseline file format
124def load_fake_paired(fake_paired_filename):
125 paired_times_raw, _, _ = load_baseline(fake_paired_filename)
126 benchmarkCount = int(np.sqrt(len(list(paired_times_raw.keys()))))
127 numJobs = len(next(iter(paired_times_raw.values())))
128 paired_times=[[[0 for x in range(numJobs)]for y in range(benchmarkCount)]for z in range(benchmarkCount)]
129 idx_to_name=[]
130 name_to_idx={}
131 bench1 = -1
132 #Generate the indexing approach
133 for pair in sorted(paired_times_raw.keys()):
134 [bench1name, bench2name] = pair.split('+') # Benchmark name is pair concatenated together with a '+' delimiter
135 if bench1 == -1 or bench1name != idx_to_name[-1]:
136 idx_to_name.append(bench1name)
137 name_to_idx[bench1name] = len(idx_to_name) - 1
138 bench1 += 1
139 # Populate the array
140 for bench1 in range(len(idx_to_name)):
141 for bench2 in range(len(idx_to_name)):
142 paired_times[bench1][bench2] = paired_times_raw[idx_to_name[bench1]+"+"+idx_to_name[bench2]]
143 return paired_times, name_to_idx, idx_to_name
144
145def assert_base_and_pair_keys_match(baseline_times, name_to_idx):
146 if sorted(baseline_times.keys()) != sorted(name_to_idx.keys()):
147 print("ERROR: The baseline and paired experiments were over a different set of benchmarks!", file=sys.stderr)
148 print("Baseline keys:", baseline_times.keys(), file=sys.stderr)
149 print("Paired keys:", name_to_idx.keys(), file=sys.stderr)
150 exit();
151
diff --git a/smt_analysis/plotille b/smt_analysis/plotille
new file mode 160000
Subproject 41f50df2f5b499425465f506a9aae5acf1a39c0