aboutsummaryrefslogtreecommitdiffstats
path: root/parse/sched.py
diff options
context:
space:
mode:
Diffstat (limited to 'parse/sched.py')
-rw-r--r--parse/sched.py123
1 files changed, 100 insertions, 23 deletions
diff --git a/parse/sched.py b/parse/sched.py
index a84aece..94ab000 100644
--- a/parse/sched.py
+++ b/parse/sched.py
@@ -1,5 +1,6 @@
1""" 1"""
2TODO: make regexes indexable by name 2TODO: make regexes indexable by name
3
3""" 4"""
4 5
5import config.config as conf 6import config.config as conf
@@ -8,10 +9,11 @@ import re
8import numpy as np 9import numpy as np
9import subprocess 10import subprocess
10 11
11from collections import namedtuple 12from collections import namedtuple,defaultdict
12from point import Measurement 13from point import Measurement,Type
13 14
14Task = namedtuple('Task', ['pid', 'period']) 15TaskConfig = namedtuple('TaskConfig', ['cpu','wcet','period'])
16Task = namedtuple('Task', ['pid', 'config'])
15 17
16def get_st_output(data_dir, out_dir): 18def get_st_output(data_dir, out_dir):
17 bin_files = conf.FILES['sched_data'].format(".*") 19 bin_files = conf.FILES['sched_data'].format(".*")
@@ -32,32 +34,54 @@ def get_st_output(data_dir, out_dir):
32 return output_file 34 return output_file
33 35
34def get_tasks(data): 36def get_tasks(data):
35 reg = r"PARAM.*?(\d+).*?cost:\s+[\d\.]+ms.*?period.*?([\d.]+)" 37 reg = r"PARAM *?(\d+)\/.*?cost:\s+([\d\.]+)ms.*?period.*?([\d.]+)ms.*?part.*?(\d+)"
36 return [Task(x[0], x[1]) for x in re.findall(reg, data)] 38 ret = []
39 for match in re.findall(reg, data):
40 t = Task(match[0], TaskConfig(match[3],match[1],match[2]))
41 ret += [t]
42 return ret
43
44def get_task_exits(data):
45 reg = r"TASK_EXIT *?(\d+)/.*?Avg.*?(\d+).*?Max.*?(\d+)"
46 ret = []
47 for match in re.findall(reg, data):
48 m = Measurement(match[0], {Type.Max : match[2], Type.Avg : match[1]})
49 ret += [m]
50 return ret
51
37 52
38def extract_tardy_vals(data, exp_point): 53def extract_tardy_vals(data, exp_point):
39 ratios = [] 54 ratios = []
40 tards = [] 55 avg_tards = []
56 max_tards = []
41 57
42 for t in get_tasks(data): 58 for t in get_tasks(data):
43 reg = r"TARDY.*?" + t.pid + "/(\d+).*?Tot.*?([\d.]+).*?ms.*([\d.]+).*?ms.*?([\d.]+)" 59 reg = r"TARDY.*?" + t.pid + "/(\d+).*?Tot.*?([\d\.]+).*?ms.*([\d\.]+).*?ms.*?([\d\.]+)"
44 matches = re.findall(reg, data) 60 matches = re.findall(reg, data)
45 if len(matches) != 0: 61 if len(matches) != 0:
46 jobs = float(matches[0][0]) 62 jobs = float(matches[0][0])
63
47 total_tard = float(matches[0][1]) 64 total_tard = float(matches[0][1])
48 # max_tard = float(matches[0][2]) 65 print("total tard: %s" % total_tard)
66 avg_tard = (total_tard / jobs) / float(t.config.period)
67 max_tard = float(matches[0][2]) / float(t.config.period)
68
69 print("avg tard: %s" % avg_tard)
70
49 misses = float(matches[0][3]) 71 misses = float(matches[0][3])
50 rel_tard = (total_tard / jobs) / float(t.period)
51 if misses != 0: 72 if misses != 0:
52 miss_ratio = (misses / jobs) 73 miss_ratio = (misses / jobs)
74 print("misses is %d, jobs is %d" % (misses, jobs))
53 else: 75 else:
54 miss_ratio = 0 76 miss_ratio = 0
55 77
56 ratios.append(miss_ratio) 78 ratios += [miss_ratio]
57 tards.append(rel_tard) 79 avg_tards += [avg_tard]
80 max_tards += [max_tard]
58 81
59 for (array, name) in ((tards, "rel-tard"), (ratios, "miss-ratio")): 82 exp_point["avg-rel-tard"] = Measurement().from_array(avg_tards)
60 exp_point[name] = Measurement().from_array(array) 83 exp_point["max-rel-tard"] = Measurement().from_array(max_tards)
84 exp_point["miss-ratio"] = Measurement().from_array(ratios)
61 85
62def extract_variance(data, exp_point): 86def extract_variance(data, exp_point):
63 varz = [] 87 varz = []
@@ -77,17 +101,70 @@ def extract_variance(data, exp_point):
77 101
78 varz.append(corrected) 102 varz.append(corrected)
79 103
80 exp_point['var'] = Measurement().from_array(varz) 104 exp_point['exec-var'] = Measurement().from_array(varz)
81 105
82def get_sched_data(data_file, result): 106def extract_sched_data(data_file, result):
83 with open(data_file, 'r') as f: 107 with open(data_file, 'r') as f:
84 data = f.read() 108 data = f.read()
85 109
86 # if conf != BASE: 110 extract_tardy_vals(data, result)
87 # (our_values, their_values) = extract_exec_vals(our_data, their_data) 111 extract_variance(data, result)
88 # conf_result = get_stats(our_values, their_values)
89 # for key in conf_result.keys():
90 # result[key][conf] = conf_result[key]
91 112
92 extract_tardy_vals(data, result) 113def config_exit_stats(file):
93 extract_variance(data, result) 114 with open(file, 'r') as f:
115 data = f.read()
116
117 tasks = get_tasks(data)
118
119 # Dictionary of task exit measurements by pid
120 exits = get_task_exits(data)
121 exit_dict = dict((e.id, e) for e in exits)
122
123 # Dictionary where keys are configurations, values are list
124 # of tasks with those configuratino
125 config_dict = defaultdict(lambda: [])
126 for t in tasks:
127 config_dict[t.config] += [t]
128
129 for config in config_dict:
130 task_list = sorted(config_dict[config])
131
132 # Replace tasks with corresponding exit stats
133 exit_list = [exit_dict[t.pid] for t in task_list]
134 config_dict[config] = exit_list
135
136 return config_dict
137
138saved_stats = {}
139def get_base_stats(base_file):
140 if base_file in saved_stats:
141 return saved_stats[base_file]
142 result = config_exit_stats(base_file)
143 saved_stats[base_file] = result
144 return result
145
146def extract_scaling_data(data_file, base_file, result):
147 # Generate trees of tasks with matching configurations
148 data_stats = config_exit_stats(data_file)
149 base_stats = get_base_stats(base_file)
150
151 # Scaling factors are calculated by matching groups of tasks with the same
152 # config, then comparing task-to-task exec times in order of PID within
153 # each group
154 max_scales = []
155 avg_scales = []
156 for config in data_stats:
157 if len(data_stats[config]) != len(base_stats[config]):
158 # Quit, we are missing a record and can't guarantee
159 # a task-to-task comparison
160 continue
161 for data_stat, base_stat in zip(data_stats[config],base_stats[config]):
162 # How much larger is their exec stat than ours?
163 avg_scale = float(base_stat[Type.Avg]) / float(base_stat[Type.Avg])
164 max_scale = float(base_stat[Type.Max]) / float(base_stat[Type.Max])
165
166 avg_scales += [avg_scale]
167 max_scales += [max_scale]
168
169 result['max-scale'] = Measurement().from_array(max_scales)
170 result['avg-scale'] = Measurement().from_array(avg_scales)