diff options
| -rwxr-xr-x | smt_analysis/computeLCslowdown.py | 73 | ||||
| -rwxr-xr-x | smt_analysis/computeSMTslowdown.py | 154 | ||||
| -rwxr-xr-x | smt_analysis/libSMT.py | 151 |
3 files changed, 233 insertions, 145 deletions
diff --git a/smt_analysis/computeLCslowdown.py b/smt_analysis/computeLCslowdown.py new file mode 100755 index 0000000..bcd22da --- /dev/null +++ b/smt_analysis/computeLCslowdown.py | |||
| @@ -0,0 +1,73 @@ | |||
| 1 | #!/usr/bin/python3 | ||
| 2 | import numpy as np | ||
| 3 | import sys | ||
| 4 | import plotille.plotille as plt | ||
| 5 | from libSMT import * | ||
| 6 | TIMING_ERROR = 1000 #ns | ||
| 7 | ASYNC_FORMAT = False | ||
| 8 | |||
| 9 | def print_usage(): | ||
| 10 | print("This program takes in the all-pairs and baseline SMT data and computes the worst-case slowdown against any other task when SMT is enabled.", file=sys.stderr) | ||
| 11 | print("Level-A/B usage: {} <file -A> <file -B> <baseline file>".format(sys.argv[0]), file=sys.stderr) | ||
| 12 | print("Level-C usage: {} <continuous pairs> <baseline file>".format(sys.argv[0]), file=sys.stderr) | ||
| 13 | |||
| 14 | # Check that we got the right number of parameters | ||
| 15 | if len(sys.argv) < 3: | ||
| 16 | print_usage() | ||
| 17 | exit() | ||
| 18 | |||
| 19 | if len(sys.argv) > 3: | ||
| 20 | print("Reading file using synchronous pair format...") | ||
| 21 | print("Are you sure you want to do this? For the RTAS'21 paper, L-A/-B pairs should use the other script.") | ||
| 22 | input("Press enter to continue, Ctrl+C to exit...") | ||
| 23 | else: | ||
| 24 | print("Reading file using asynchronous pair format...") | ||
| 25 | ASYNC_FORMAT = True | ||
| 26 | |||
| 27 | assert_valid_input_files(sys.argv[1:-1], print_usage) | ||
| 28 | |||
| 29 | # Pull in the data | ||
| 30 | if not ASYNC_FORMAT: | ||
| 31 | baseline_times, baseline_sample_cnt, baseline_max_times = load_baseline(sys.argv[3]) | ||
| 32 | paired_times, paired_offsets, name_to_idx, idx_to_name = load_paired(sys.argv[1], sys.argv[2], len(list(baseline_times.keys()))) | ||
| 33 | for key in baseline_times: | ||
| 34 | print(key,max(baseline_times[key])) | ||
| 35 | else: | ||
| 36 | baseline_times, baseline_sample_cnt, baseline_max_times = load_baseline(sys.argv[2]) | ||
| 37 | paired_times, name_to_idx, idx_to_name = load_fake_paired(sys.argv[1]) | ||
| 38 | |||
| 39 | # We work iff the baseline was run for the same set of benchmarks as the pairs were | ||
| 40 | assert_base_and_pair_keys_match(baseline_times, name_to_idx) | ||
| 41 | |||
| 42 | # Only consider benchmarks that are at least an order of magnitude longer than the timing error | ||
| 43 | reliableNames = [] | ||
| 44 | for i in range(0, len(name_to_idx)): | ||
| 45 | benchmark = idx_to_name[i] | ||
| 46 | if min(baseline_times[benchmark]) > TIMING_ERROR * 10: | ||
| 47 | reliableNames.append(benchmark) | ||
| 48 | |||
| 49 | # Compute worst-case SMT slowdown for each benchmark | ||
| 50 | print("Bench Mi") | ||
| 51 | # Print rows | ||
| 52 | sample_f = np.mean # Change this to np.mean to use mean values in Mi generation | ||
| 53 | M_vals = [] | ||
| 54 | for b1 in reliableNames: | ||
| 55 | print("{:<14.14}:".format(b1), end=" ") | ||
| 56 | max_mi = 0 | ||
| 57 | # Scan through everyone we ran against and find our maximum slowdown | ||
| 58 | for b2 in reliableNames: | ||
| 59 | time_with_smt = sample_f(paired_times[name_to_idx[b1]][name_to_idx[b2]]) | ||
| 60 | time_wout_smt = sample_f(baseline_times[b1]) | ||
| 61 | M = time_with_smt / time_wout_smt | ||
| 62 | max_mi = max(max_mi, M) | ||
| 63 | print("{:>10.3}".format(max_mi), end=" ") | ||
| 64 | M_vals.append(max_mi) | ||
| 65 | print("") | ||
| 66 | # Print some statistics about the distribution | ||
| 67 | print("Average: {:>5.3} with standard deviation {:>5.3} using `{}`".format(np.mean(M_vals), np.std(M_vals), sample_f.__name__)) | ||
| 68 | Ms = np.asarray(M_vals, dtype=np.float32) | ||
| 69 | print(np.sum(Ms <= 1), "of", len(M_vals), "M_i values are at most one -", 100*np.sum(Ms <= 1)/len(M_vals), "percent") | ||
| 70 | print(np.sum(Ms > 2), "of", len(M_vals), "M_i values are greater than two -", 100*np.sum(Ms > 2)/len(M_vals), "percent") | ||
| 71 | M_vals_to_plot = Ms | ||
| 72 | |||
| 73 | print(plt.hist(M_vals_to_plot, bins=10)) | ||
diff --git a/smt_analysis/computeSMTslowdown.py b/smt_analysis/computeSMTslowdown.py index aa7aa21..805def1 100755 --- a/smt_analysis/computeSMTslowdown.py +++ b/smt_analysis/computeSMTslowdown.py | |||
| @@ -3,19 +3,19 @@ from typing import List, Any | |||
| 3 | import numpy as np | 3 | import numpy as np |
| 4 | from scipy import stats | 4 | from scipy import stats |
| 5 | import sys | 5 | import sys |
| 6 | import os | ||
| 7 | import plotille.plotille as plt | 6 | import plotille.plotille as plt |
| 8 | TIMING_ERROR = 1000 #ns | 7 | TIMING_ERROR = 1000 #ns |
| 9 | LEVEL_C_ANALYSIS = False | 8 | LEVEL_C_ANALYSIS = False |
| 9 | from libSMT import * | ||
| 10 | 10 | ||
| 11 | def print_usage(argv): | 11 | def print_usage(): |
| 12 | print("This program takes in the all-pairs and baseline SMT data and computes how much each program is slowed when SMT in enabled.", file=sys.stderr) | 12 | print("This program takes in the all-pairs and baseline SMT data and computes how much each program is slowed when SMT in enabled.", file=sys.stderr) |
| 13 | print("Level-A/B usage: {} <file -A> <file -B> <baseline file> --cij".format(argv[0]), file=sys.stderr) | 13 | print("Level-A/B usage: {} <file -A> <file -B> <baseline file> --cij".format(sys.argv[0]), file=sys.stderr) |
| 14 | print("Level-C usage: {} <continuous pairs> <baseline file>".format(argv[0]), file=sys.stderr) | 14 | print("Level-C usage: {} <continuous pairs> <baseline file>".format(sys.argv[0]), file=sys.stderr) |
| 15 | 15 | ||
| 16 | # Check that we got the right number of parameters | 16 | # Check that we got the right number of parameters |
| 17 | if len(sys.argv) < 3: | 17 | if len(sys.argv) < 3: |
| 18 | print_usage(sys.argv) | 18 | print_usage() |
| 19 | exit() | 19 | exit() |
| 20 | 20 | ||
| 21 | if len(sys.argv) > 3: | 21 | if len(sys.argv) > 3: |
| @@ -24,127 +24,12 @@ else: | |||
| 24 | print("Analyzing results using Level-C methodology...") | 24 | print("Analyzing results using Level-C methodology...") |
| 25 | LEVEL_C_ANALYSIS = True | 25 | LEVEL_C_ANALYSIS = True |
| 26 | 26 | ||
| 27 | # Check that all input files are valid | 27 | assert_valid_input_files(sys.argv[1:-1], print_usage); |
| 28 | for f in sys.argv[1:-1]: | ||
| 29 | if not os.path.exists(f) or os.path.getsize(f) == 0: | ||
| 30 | print("ERROR: File '{}' does not exist or is empty".format(f), file=sys.stderr); | ||
| 31 | print_usage(sys.argv) | ||
| 32 | exit() | ||
| 33 | 28 | ||
| 34 | # Print Cij values rather than Mij | 29 | # Print Cij values rather than Mij |
| 35 | TIMES_ONLY = len(sys.argv) > 4 and "--cij" in sys.argv[4] | 30 | TIMES_ONLY = len(sys.argv) > 4 and "--cij" in sys.argv[4] |
| 36 | OK_PAIRS_ONLY = len(sys.argv) > 4 and "--cij-ok" in sys.argv[4] | 31 | OK_PAIRS_ONLY = len(sys.argv) > 4 and "--cij-ok" in sys.argv[4] |
| 37 | 32 | ||
| 38 | # This parses the result data from unthreaded timing experiments | ||
| 39 | # @param f File name to load | ||
| 40 | # @returns res Map of benchmark name to sample count | ||
| 41 | # @returns samples Map of benchmark name to list of execution time samples | ||
| 42 | # @returns max_res May of benchmark to maximum execution time among all samples for that benchmark | ||
| 43 | def load_baseline(f): | ||
| 44 | # constants for columns of baseline data files | ||
| 45 | TOTAL_NS = 5 | ||
| 46 | BENCH_NAME = 0 | ||
| 47 | SAMPLES = 4 | ||
| 48 | |||
| 49 | # Load baseline data. This logic is based off the summarize programs | ||
| 50 | res = {} # Map of benchmark to list of all execution time samples | ||
| 51 | samples = {} # Map of benchmark name to sample count | ||
| 52 | max_res = {} # Map of benchmark name to maximum execution time | ||
| 53 | |||
| 54 | with open(f) as fp: | ||
| 55 | for line in fp: | ||
| 56 | s = line.split() | ||
| 57 | if s[BENCH_NAME] not in res: | ||
| 58 | res[s[BENCH_NAME]] = list([int(s[TOTAL_NS])]) | ||
| 59 | samples[s[BENCH_NAME]] = int(s[SAMPLES]) | ||
| 60 | max_res[s[BENCH_NAME]] = int(s[TOTAL_NS]) | ||
| 61 | else: | ||
| 62 | res[s[BENCH_NAME]].append(int(s[TOTAL_NS])) | ||
| 63 | max_res[s[BENCH_NAME]] = max(int(s[TOTAL_NS]), max_res[s[BENCH_NAME]]) | ||
| 64 | return res, samples, max_res | ||
| 65 | |||
| 66 | # This parses the result data from paired, threaded timing experiements | ||
| 67 | # @param file1 The -A file name | ||
| 68 | # @param file2 The -B file name | ||
| 69 | # @returns time 2D array of benchmark IDs to list of total container execution times | ||
| 70 | # @returns offset 2D array of benchmark IDs to list of differences between the start | ||
| 71 | # of the first and the start of the second benchmark | ||
| 72 | # @returns name_to_idx Map of benchmark names to benchmark IDs | ||
| 73 | # @returns idx_to_name List which when indexed with benchmark ID will yield the benchmark name | ||
| 74 | def load_paired(file1, file2, benchmarkCount): | ||
| 75 | # constants for columns of paired data files | ||
| 76 | FIRST_PROG = 0 | ||
| 77 | SECOND_PROG = 1 | ||
| 78 | FIRST_CORE = 2 | ||
| 79 | SECOND_CORE = 3 | ||
| 80 | TRIALS = 4 | ||
| 81 | START_S = 5 # Start seconds | ||
| 82 | START_N = 6 # Start nanoseconds | ||
| 83 | END_S = 7 # End seconds | ||
| 84 | END_N = 8 # End nanoseconds | ||
| 85 | RUN_ID = 9 | ||
| 86 | JOB_NUM = 10 | ||
| 87 | |||
| 88 | with open(file1) as f1: | ||
| 89 | numJobs = int(f1.readline().split()[TRIALS]) | ||
| 90 | assert numJobs > 0 | ||
| 91 | assert benchmarkCount > 0 | ||
| 92 | |||
| 93 | # Total times of each container | ||
| 94 | time=[[[0 for x in range(numJobs)]for y in range(benchmarkCount)]for z in range(benchmarkCount)] | ||
| 95 | # Difference in time between when the first and the second task start in the container | ||
| 96 | offset=[[[0 for x in range(numJobs)]for y in range(benchmarkCount)]for z in range(benchmarkCount)] | ||
| 97 | |||
| 98 | # Some aggregate counters that we update as we go along | ||
| 99 | avg_off = 0 | ||
| 100 | avg_off_samp = 0 | ||
| 101 | |||
| 102 | # Load paired data | ||
| 103 | bench1 = 0 # Index to what's the current first benchmark being examined | ||
| 104 | bench2 = 0 # Index to what's the current second benchmark being examined | ||
| 105 | |||
| 106 | name_to_idx = {} | ||
| 107 | idx_to_name = [0 for x in range(benchmarkCount)] | ||
| 108 | |||
| 109 | job_idx = 0 | ||
| 110 | with open(file1) as f1, open(file2) as f2: | ||
| 111 | for line1, line2 in zip(f1, f2): | ||
| 112 | lineArr1 = line1.split() | ||
| 113 | lineArr2 = line2.split() | ||
| 114 | start1 = int(lineArr1[START_S]) * 10**9 + int(lineArr1[START_N]) | ||
| 115 | start2 = int(lineArr2[START_S]) * 10**9 + int(lineArr2[START_N]) | ||
| 116 | minStart = min(start1, start2) | ||
| 117 | end1 = int(lineArr1[END_S]) * 10**9 + int(lineArr1[END_N]) | ||
| 118 | end2 = int(lineArr2[END_S]) * 10**9 + int(lineArr2[END_N]) | ||
| 119 | maxEnd = max(end1, end2) | ||
| 120 | # Time actually co-scheduled is minEnd - maxStart, but Sims uses a different model | ||
| 121 | # time[bench1][bench2][int(lineArr1[JOB_NUM])] = maxEnd - minStart | ||
| 122 | time[bench1][bench2][job_idx] = maxEnd - minStart | ||
| 123 | if lineArr1[SECOND_PROG] == "h264_dec" and lineArr2[JOB_NUM] == 0: | ||
| 124 | print(maxEnd - minStart) | ||
| 125 | # Compute offset: if first job starts at t=0, when does second start? | ||
| 126 | # offset[bench1][bench2][int(lineArr1[JOB_NUM])] = abs(start2-start1) | ||
| 127 | offset[bench1][bench2][job_idx] = abs(start2-start1) | ||
| 128 | # Compute some running statistics | ||
| 129 | avg_off += abs(start2-start1) | ||
| 130 | avg_off_samp += 1 | ||
| 131 | # Increment to the next benchmark, this is weird because of the zip() | ||
| 132 | # This is doubly weird because our results are an upper trianguler matrix | ||
| 133 | if job_idx == numJobs - 1: #int(lineArr1[JOB_NUM]) == numJobs - 1: | ||
| 134 | if bench2 < benchmarkCount-1: | ||
| 135 | bench2 = bench2 + 1 | ||
| 136 | job_idx = 0 | ||
| 137 | else: | ||
| 138 | name_to_idx[lineArr1[FIRST_PROG]] = bench1 | ||
| 139 | idx_to_name[bench1] = lineArr1[FIRST_PROG] | ||
| 140 | bench1 = bench1 + 1 | ||
| 141 | bench2 = bench1 # bench1 will never again appear as bench2 | ||
| 142 | job_idx = 0 | ||
| 143 | else: | ||
| 144 | job_idx += 1 | ||
| 145 | print("Average offset is: " + str(avg_off/avg_off_samp) + "ns") | ||
| 146 | return time, offset, name_to_idx, idx_to_name | ||
| 147 | |||
| 148 | # Pull in the data | 33 | # Pull in the data |
| 149 | if not LEVEL_C_ANALYSIS: | 34 | if not LEVEL_C_ANALYSIS: |
| 150 | baseline_times, baseline_sample_cnt, baseline_max_times = load_baseline(sys.argv[3]) | 35 | baseline_times, baseline_sample_cnt, baseline_max_times = load_baseline(sys.argv[3]) |
| @@ -152,33 +37,12 @@ if not LEVEL_C_ANALYSIS: | |||
| 152 | for key in baseline_times: | 37 | for key in baseline_times: |
| 153 | print(key,max(baseline_times[key])) | 38 | print(key,max(baseline_times[key])) |
| 154 | else: | 39 | else: |
| 155 | baseline_times, baseline_sample_cnt, baseline_max_times = load_baseline(sys.argv[2]) | ||
| 156 | # Paired times use an abuse of the baseline file format | 40 | # Paired times use an abuse of the baseline file format |
| 157 | paired_times_raw, _, _ = load_baseline(sys.argv[1]) | 41 | baseline_times, baseline_sample_cnt, baseline_max_times = load_baseline(sys.argv[2]) |
| 158 | benchmarkCount = int(np.sqrt(len(list(paired_times_raw.keys())))) | 42 | paired_times, name_to_idx, idx_to_name = load_fake_paired(sys.argv[1]) |
| 159 | numJobs = len(next(iter(paired_times_raw.values()))) | ||
| 160 | paired_times=[[[0 for x in range(numJobs)]for y in range(benchmarkCount)]for z in range(benchmarkCount)] | ||
| 161 | idx_to_name=[] | ||
| 162 | name_to_idx={} | ||
| 163 | bench1 = -1 | ||
| 164 | #Generate the indexing approach | ||
| 165 | for pair in sorted(paired_times_raw.keys()): | ||
| 166 | [bench1name, bench2name] = pair.split('+') # Benchmark name is pair concatenated together with a '+' delimiter | ||
| 167 | if bench1 == -1 or bench1name != idx_to_name[-1]: | ||
| 168 | idx_to_name.append(bench1name) | ||
| 169 | name_to_idx[bench1name] = len(idx_to_name) - 1 | ||
| 170 | bench1 += 1 | ||
| 171 | # Populate the array | ||
| 172 | for bench1 in range(len(idx_to_name)): | ||
| 173 | for bench2 in range(len(idx_to_name)): | ||
| 174 | paired_times[bench1][bench2] = paired_times_raw[idx_to_name[bench1]+"+"+idx_to_name[bench2]] | ||
| 175 | 43 | ||
| 176 | # We work iff the baseline was run for the same set of benchmarks as the pairs were | 44 | # We work iff the baseline was run for the same set of benchmarks as the pairs were |
| 177 | if sorted(baseline_times.keys()) != sorted(name_to_idx.keys()): | 45 | assert_base_and_pair_keys_match(baseline_times, name_to_idx) |
| 178 | print("ERROR: The baseline and paired experiments were over a different set of benchmarks!", file=sys.stderr) | ||
| 179 | print("Baseline keys:", baseline_times.keys(), file=sys.stderr) | ||
| 180 | print("Paired keys:", name_to_idx.keys(), file=sys.stderr) | ||
| 181 | exit(); | ||
| 182 | 46 | ||
| 183 | # Only consider benchmarks that are at least an order of magnitude longer than the timing error | 47 | # Only consider benchmarks that are at least an order of magnitude longer than the timing error |
| 184 | reliableNames = [] | 48 | reliableNames = [] |
diff --git a/smt_analysis/libSMT.py b/smt_analysis/libSMT.py new file mode 100755 index 0000000..cca2fce --- /dev/null +++ b/smt_analysis/libSMT.py | |||
| @@ -0,0 +1,151 @@ | |||
| 1 | import numpy as np | ||
| 2 | import sys | ||
| 3 | import os | ||
| 4 | |||
| 5 | def assert_valid_input_files(names, on_fail): | ||
| 6 | # Check that all input files are valid | ||
| 7 | for f in names: | ||
| 8 | if not os.path.exists(f) or os.path.getsize(f) == 0: | ||
| 9 | print("ERROR: File '{}' does not exist or is empty".format(f), file=sys.stderr); | ||
| 10 | on_fail() | ||
| 11 | exit() | ||
| 12 | |||
| 13 | # This parses the result data from unthreaded timing experiments | ||
| 14 | # @param f File name to load | ||
| 15 | # @returns res Map of benchmark name to sample count | ||
| 16 | # @returns samples Map of benchmark name to list of execution time samples | ||
| 17 | # @returns max_res May of benchmark to maximum execution time among all samples for that benchmark | ||
| 18 | def load_baseline(f): | ||
| 19 | # constants for columns of baseline data files | ||
| 20 | TOTAL_NS = 5 | ||
| 21 | BENCH_NAME = 0 | ||
| 22 | SAMPLES = 4 | ||
| 23 | |||
| 24 | # Load baseline data. This logic is based off the summarize programs | ||
| 25 | res = {} # Map of benchmark to list of all execution time samples | ||
| 26 | samples = {} # Map of benchmark name to sample count | ||
| 27 | max_res = {} # Map of benchmark name to maximum execution time | ||
| 28 | |||
| 29 | with open(f) as fp: | ||
| 30 | for line in fp: | ||
| 31 | s = line.split() | ||
| 32 | if s[BENCH_NAME] not in res: | ||
| 33 | res[s[BENCH_NAME]] = list([int(s[TOTAL_NS])]) | ||
| 34 | samples[s[BENCH_NAME]] = int(s[SAMPLES]) | ||
| 35 | max_res[s[BENCH_NAME]] = int(s[TOTAL_NS]) | ||
| 36 | else: | ||
| 37 | res[s[BENCH_NAME]].append(int(s[TOTAL_NS])) | ||
| 38 | max_res[s[BENCH_NAME]] = max(int(s[TOTAL_NS]), max_res[s[BENCH_NAME]]) | ||
| 39 | return res, samples, max_res | ||
| 40 | |||
| 41 | # This parses the result data from paired, threaded timing experiements | ||
| 42 | # @param file1 The -A file name | ||
| 43 | # @param file2 The -B file name | ||
| 44 | # @returns time 2D array of benchmark IDs to list of total container execution times | ||
| 45 | # @returns offset 2D array of benchmark IDs to list of differences between the start | ||
| 46 | # of the first and the start of the second benchmark | ||
| 47 | # @returns name_to_idx Map of benchmark names to benchmark IDs | ||
| 48 | # @returns idx_to_name List which when indexed with benchmark ID will yield the benchmark name | ||
| 49 | def load_paired(file1, file2, benchmarkCount): | ||
| 50 | # constants for columns of paired data files | ||
| 51 | FIRST_PROG = 0 | ||
| 52 | SECOND_PROG = 1 | ||
| 53 | FIRST_CORE = 2 | ||
| 54 | SECOND_CORE = 3 | ||
| 55 | TRIALS = 4 | ||
| 56 | START_S = 5 # Start seconds | ||
| 57 | START_N = 6 # Start nanoseconds | ||
| 58 | END_S = 7 # End seconds | ||
| 59 | END_N = 8 # End nanoseconds | ||
| 60 | RUN_ID = 9 | ||
| 61 | JOB_NUM = 10 | ||
| 62 | |||
| 63 | with open(file1) as f1: | ||
| 64 | numJobs = int(f1.readline().split()[TRIALS]) | ||
| 65 | assert numJobs > 0 | ||
| 66 | assert benchmarkCount > 0 | ||
| 67 | |||
| 68 | # Total times of each container | ||
| 69 | time=[[[0 for x in range(numJobs)]for y in range(benchmarkCount)]for z in range(benchmarkCount)] | ||
| 70 | # Difference in time between when the first and the second task start in the container | ||
| 71 | offset=[[[0 for x in range(numJobs)]for y in range(benchmarkCount)]for z in range(benchmarkCount)] | ||
| 72 | |||
| 73 | # Some aggregate counters that we update as we go along | ||
| 74 | avg_off = 0 | ||
| 75 | avg_off_samp = 0 | ||
| 76 | |||
| 77 | # Load paired data | ||
| 78 | bench1 = 0 # Index to what's the current first benchmark being examined | ||
| 79 | bench2 = 0 # Index to what's the current second benchmark being examined | ||
| 80 | |||
| 81 | name_to_idx = {} | ||
| 82 | idx_to_name = [0 for x in range(benchmarkCount)] | ||
| 83 | |||
| 84 | job_idx = 0 | ||
| 85 | with open(file1) as f1, open(file2) as f2: | ||
| 86 | for line1, line2 in zip(f1, f2): | ||
| 87 | lineArr1 = line1.split() | ||
| 88 | lineArr2 = line2.split() | ||
| 89 | start1 = int(lineArr1[START_S]) * 10**9 + int(lineArr1[START_N]) | ||
| 90 | start2 = int(lineArr2[START_S]) * 10**9 + int(lineArr2[START_N]) | ||
| 91 | minStart = min(start1, start2) | ||
| 92 | end1 = int(lineArr1[END_S]) * 10**9 + int(lineArr1[END_N]) | ||
| 93 | end2 = int(lineArr2[END_S]) * 10**9 + int(lineArr2[END_N]) | ||
| 94 | maxEnd = max(end1, end2) | ||
| 95 | # Time actually co-scheduled is minEnd - maxStart, but Sims uses a different model | ||
| 96 | # time[bench1][bench2][int(lineArr1[JOB_NUM])] = maxEnd - minStart | ||
| 97 | time[bench1][bench2][job_idx] = maxEnd - minStart | ||
| 98 | if lineArr1[SECOND_PROG] == "h264_dec" and lineArr2[JOB_NUM] == 0: | ||
| 99 | print(maxEnd - minStart) | ||
| 100 | # Compute offset: if first job starts at t=0, when does second start? | ||
| 101 | # offset[bench1][bench2][int(lineArr1[JOB_NUM])] = abs(start2-start1) | ||
| 102 | offset[bench1][bench2][job_idx] = abs(start2-start1) | ||
| 103 | # Compute some running statistics | ||
| 104 | avg_off += abs(start2-start1) | ||
| 105 | avg_off_samp += 1 | ||
| 106 | # Increment to the next benchmark, this is weird because of the zip() | ||
| 107 | # This is doubly weird because our results are an upper trianguler matrix | ||
| 108 | if job_idx == numJobs - 1: #int(lineArr1[JOB_NUM]) == numJobs - 1: | ||
| 109 | if bench2 < benchmarkCount-1: | ||
| 110 | bench2 = bench2 + 1 | ||
| 111 | job_idx = 0 | ||
| 112 | else: | ||
| 113 | name_to_idx[lineArr1[FIRST_PROG]] = bench1 | ||
| 114 | idx_to_name[bench1] = lineArr1[FIRST_PROG] | ||
| 115 | bench1 = bench1 + 1 | ||
| 116 | bench2 = bench1 # bench1 will never again appear as bench2 | ||
| 117 | job_idx = 0 | ||
| 118 | else: | ||
| 119 | job_idx += 1 | ||
| 120 | print("Average offset is: " + str(avg_off/avg_off_samp) + "ns") | ||
| 121 | return time, offset, name_to_idx, idx_to_name | ||
| 122 | |||
| 123 | # Paired times use an abuse of the baseline file format | ||
| 124 | def load_fake_paired(fake_paired_filename): | ||
| 125 | paired_times_raw, _, _ = load_baseline(fake_paired_filename) | ||
| 126 | benchmarkCount = int(np.sqrt(len(list(paired_times_raw.keys())))) | ||
| 127 | numJobs = len(next(iter(paired_times_raw.values()))) | ||
| 128 | paired_times=[[[0 for x in range(numJobs)]for y in range(benchmarkCount)]for z in range(benchmarkCount)] | ||
| 129 | idx_to_name=[] | ||
| 130 | name_to_idx={} | ||
| 131 | bench1 = -1 | ||
| 132 | #Generate the indexing approach | ||
| 133 | for pair in sorted(paired_times_raw.keys()): | ||
| 134 | [bench1name, bench2name] = pair.split('+') # Benchmark name is pair concatenated together with a '+' delimiter | ||
| 135 | if bench1 == -1 or bench1name != idx_to_name[-1]: | ||
| 136 | idx_to_name.append(bench1name) | ||
| 137 | name_to_idx[bench1name] = len(idx_to_name) - 1 | ||
| 138 | bench1 += 1 | ||
| 139 | # Populate the array | ||
| 140 | for bench1 in range(len(idx_to_name)): | ||
| 141 | for bench2 in range(len(idx_to_name)): | ||
| 142 | paired_times[bench1][bench2] = paired_times_raw[idx_to_name[bench1]+"+"+idx_to_name[bench2]] | ||
| 143 | return paired_times, name_to_idx, idx_to_name | ||
| 144 | |||
| 145 | def assert_base_and_pair_keys_match(baseline_times, name_to_idx): | ||
| 146 | if sorted(baseline_times.keys()) != sorted(name_to_idx.keys()): | ||
| 147 | print("ERROR: The baseline and paired experiments were over a different set of benchmarks!", file=sys.stderr) | ||
| 148 | print("Baseline keys:", baseline_times.keys(), file=sys.stderr) | ||
| 149 | print("Paired keys:", name_to_idx.keys(), file=sys.stderr) | ||
| 150 | exit(); | ||
| 151 | |||
