summaryrefslogtreecommitdiffstats
path: root/smt_analysis/computeLCslowdown.py
diff options
context:
space:
mode:
authorleochanj <jbakita@cs.unc.edu>2020-10-23 00:13:06 -0400
committerleochanj <jbakita@cs.unc.edu>2020-10-23 00:13:06 -0400
commitd427b910baffcc330b0b24d87c9b3216f306d0fb (patch)
treeef312bc5757860a03673316be421c1624a5bb6b7 /smt_analysis/computeLCslowdown.py
parentb839934c04b214c9bdab399628ee2b94a65bcd10 (diff)
parenta7c3210215bd1181ae93b23c313941dfb44519fb (diff)
merged
Diffstat (limited to 'smt_analysis/computeLCslowdown.py')
-rwxr-xr-xsmt_analysis/computeLCslowdown.py73
1 files changed, 73 insertions, 0 deletions
diff --git a/smt_analysis/computeLCslowdown.py b/smt_analysis/computeLCslowdown.py
new file mode 100755
index 0000000..bcd22da
--- /dev/null
+++ b/smt_analysis/computeLCslowdown.py
@@ -0,0 +1,73 @@
1#!/usr/bin/python3
2import numpy as np
3import sys
4import plotille.plotille as plt
5from libSMT import *
6TIMING_ERROR = 1000 #ns
7ASYNC_FORMAT = False
8
9def print_usage():
10 print("This program takes in the all-pairs and baseline SMT data and computes the worst-case slowdown against any other task when SMT is enabled.", file=sys.stderr)
11 print("Level-A/B usage: {} <file -A> <file -B> <baseline file>".format(sys.argv[0]), file=sys.stderr)
12 print("Level-C usage: {} <continuous pairs> <baseline file>".format(sys.argv[0]), file=sys.stderr)
13
14# Check that we got the right number of parameters
15if len(sys.argv) < 3:
16 print_usage()
17 exit()
18
19if len(sys.argv) > 3:
20 print("Reading file using synchronous pair format...")
21 print("Are you sure you want to do this? For the RTAS'21 paper, L-A/-B pairs should use the other script.")
22 input("Press enter to continue, Ctrl+C to exit...")
23else:
24 print("Reading file using asynchronous pair format...")
25 ASYNC_FORMAT = True
26
27assert_valid_input_files(sys.argv[1:-1], print_usage)
28
29# Pull in the data
30if not ASYNC_FORMAT:
31 baseline_times, baseline_sample_cnt, baseline_max_times = load_baseline(sys.argv[3])
32 paired_times, paired_offsets, name_to_idx, idx_to_name = load_paired(sys.argv[1], sys.argv[2], len(list(baseline_times.keys())))
33 for key in baseline_times:
34 print(key,max(baseline_times[key]))
35else:
36 baseline_times, baseline_sample_cnt, baseline_max_times = load_baseline(sys.argv[2])
37 paired_times, name_to_idx, idx_to_name = load_fake_paired(sys.argv[1])
38
39# We work iff the baseline was run for the same set of benchmarks as the pairs were
40assert_base_and_pair_keys_match(baseline_times, name_to_idx)
41
42# Only consider benchmarks that are at least an order of magnitude longer than the timing error
43reliableNames = []
44for i in range(0, len(name_to_idx)):
45 benchmark = idx_to_name[i]
46 if min(baseline_times[benchmark]) > TIMING_ERROR * 10:
47 reliableNames.append(benchmark)
48
49# Compute worst-case SMT slowdown for each benchmark
50print("Bench Mi")
51# Print rows
52sample_f = np.mean # Change this to np.mean to use mean values in Mi generation
53M_vals = []
54for b1 in reliableNames:
55 print("{:<14.14}:".format(b1), end=" ")
56 max_mi = 0
57 # Scan through everyone we ran against and find our maximum slowdown
58 for b2 in reliableNames:
59 time_with_smt = sample_f(paired_times[name_to_idx[b1]][name_to_idx[b2]])
60 time_wout_smt = sample_f(baseline_times[b1])
61 M = time_with_smt / time_wout_smt
62 max_mi = max(max_mi, M)
63 print("{:>10.3}".format(max_mi), end=" ")
64 M_vals.append(max_mi)
65 print("")
66# Print some statistics about the distribution
67print("Average: {:>5.3} with standard deviation {:>5.3} using `{}`".format(np.mean(M_vals), np.std(M_vals), sample_f.__name__))
68Ms = np.asarray(M_vals, dtype=np.float32)
69print(np.sum(Ms <= 1), "of", len(M_vals), "M_i values are at most one -", 100*np.sum(Ms <= 1)/len(M_vals), "percent")
70print(np.sum(Ms > 2), "of", len(M_vals), "M_i values are greater than two -", 100*np.sum(Ms > 2)/len(M_vals), "percent")
71M_vals_to_plot = Ms
72
73print(plt.hist(M_vals_to_plot, bins=10))