aboutsummaryrefslogtreecommitdiffstats
path: root/native
diff options
context:
space:
mode:
authorJeremy Erickson <jerickso@cs.unc.edu>2012-09-20 20:41:44 -0400
committerJeremy Erickson <jerickso@cs.unc.edu>2012-09-25 12:39:56 -0400
commit37dbd04e4f9d8956cf4be1c196e282760aa37011 (patch)
treed2d2994c5dd697ec493555b22bbce999e1bfe4ba /native
parent8ba27be920e6bd47442e33946783a38c0fa37356 (diff)
Initial GEL Piecewise Linear implementation
Diffstat (limited to 'native')
-rw-r--r--native/Makefile2
-rw-r--r--native/include/edf/gel_pl.h80
-rw-r--r--native/interface/sched.i2
-rw-r--r--native/src/edf/gel_pl.cpp206
4 files changed, 289 insertions, 1 deletions
diff --git a/native/Makefile b/native/Makefile
index d9c5cbd..f9fbb75 100644
--- a/native/Makefile
+++ b/native/Makefile
@@ -51,7 +51,7 @@ vpath %.cpp src src/edf src/blocking
51 51
52# #### Common C++ source files #### 52# #### Common C++ source files ####
53 53
54EDF_OBJ = baker.o baruah.o gfb.o bcl.o bcl_iterative.o rta.o ffdbf.o gedf.o load.o cpu_time.o 54EDF_OBJ = baker.o baruah.o gfb.o bcl.o bcl_iterative.o rta.o ffdbf.o gedf.o gel_pl.o load.o cpu_time.o
55SCHED_OBJ = sim.o schedule_sim.o 55SCHED_OBJ = sim.o schedule_sim.o
56CORE_OBJ = tasks.o 56CORE_OBJ = tasks.o
57SYNC_OBJ = sharedres.o dpcp.o mpcp.o 57SYNC_OBJ = sharedres.o dpcp.o mpcp.o
diff --git a/native/include/edf/gel_pl.h b/native/include/edf/gel_pl.h
new file mode 100644
index 0000000..4093d95
--- /dev/null
+++ b/native/include/edf/gel_pl.h
@@ -0,0 +1,80 @@
1#ifndef GEL_PL_H
2#define GEL_PL_H
3
4#ifndef SWIG
5#include <vector>
6#endif
7
8class GELPl
9{
10
11 private:
12 std::vector<unsigned long> bounds;
13 int no_cpus;
14 const TaskSet& tasks;
15 int rounds;
16 std::vector<double> S_i;
17 std::vector<double> G_i;
18
19 // For faster lookups, to avoid too many conversions.
20 std::vector<double> utilizations;
21
22 double compute_exact_s(double S, const std::vector<double>& Y_ints);
23 double compute_binsearch_s(double S, const std::vector<double>& Y_ints);
24
25 inline double compute_M(double s, double S,
26 const std::vector<double>& Y_ints);
27
28 // These are basically just structs that override operator< to allow
29 // sort algorithms to work.
30 class ReplacementType {
31 public:
32 unsigned int old_task;
33 unsigned int new_task;
34 double location;
35 double old_task_utilization;
36
37 bool operator<(const ReplacementType& other) const {
38 return (location < other.location)
39 || ((location == other.location)
40 && (old_task_utilization < other.old_task_utilization));
41 }
42 };
43
44 class TaggedValue {
45 public:
46 unsigned int task;
47 double value;
48
49 //Order is reversed - we are going to want the largest, rather than the
50 //smallest, values.
51 bool operator<(const TaggedValue& other) const {
52 return other.value < value;
53 }
54 };
55
56 public:
57 enum Scheduler {
58 GEDF,
59 GFL
60 };
61
62 GELPl(Scheduler sched,
63 unsigned int num_processors,
64 const TaskSet& tasks,
65 unsigned int rounds);
66
67 unsigned long get_bound(unsigned int index) {
68 return bounds[index];
69 }
70
71 double get_Si(unsigned int index) {
72 return S_i[index];
73 }
74
75 double get_Gi(unsigned int index) {
76 return G_i[index];
77 }
78};
79
80#endif
diff --git a/native/interface/sched.i b/native/interface/sched.i
index 255531a..d0809b6 100644
--- a/native/interface/sched.i
+++ b/native/interface/sched.i
@@ -12,6 +12,7 @@
12#include "edf/ffdbf.h" 12#include "edf/ffdbf.h"
13#include "edf/load.h" 13#include "edf/load.h"
14#include "edf/gedf.h" 14#include "edf/gedf.h"
15#include "edf/gel_pl.h"
15#include "sharedres.h" 16#include "sharedres.h"
16%} 17%}
17 18
@@ -39,3 +40,4 @@
39#include "edf/ffdbf.h" 40#include "edf/ffdbf.h"
40#include "edf/load.h" 41#include "edf/load.h"
41#include "edf/gedf.h" 42#include "edf/gedf.h"
43#include "edf/gel_pl.h"
diff --git a/native/src/edf/gel_pl.cpp b/native/src/edf/gel_pl.cpp
new file mode 100644
index 0000000..53ae575
--- /dev/null
+++ b/native/src/edf/gel_pl.cpp
@@ -0,0 +1,206 @@
1#include "tasks.h"
2
3#include "edf/gel_pl.h"
4
5#include <vector>
6#include <limits>
7#include <algorithm>
8#include <cmath>
9
10static bool reversed_order(double first, double second) {
11 return second < first;
12}
13
14GELPl::GELPl(Scheduler sched, unsigned int num_processors, const TaskSet& ts,
15 unsigned int num_rounds)
16:no_cpus(num_processors), tasks(ts), rounds(num_rounds)
17{
18 std::vector<unsigned long> pps;
19 double S;
20 std::vector<double> Y_ints;
21
22 int task_count = tasks.get_task_count();
23 // Reserve capacity in all vectors to minimize allocation costs.
24 pps.reserve(task_count);
25 Y_ints.reserve(task_count);
26 S_i.reserve(task_count);
27 G_i.reserve(task_count);
28
29 // For faster lookups
30 utilizations.reserve(task_count);
31 for (int i = 0; i < task_count; i++) {
32 utilizations.push_back(double(tasks[i].get_wcet())
33 / double(tasks[i].get_period()));
34 }
35
36 unsigned long min_pp = std::numeric_limits<unsigned long>::max();
37
38 // Compute initial priority points, including minimum.
39 for (int i = 0; i < task_count; i++) {
40 const Task& task = tasks[i];
41 unsigned long new_pp = task.get_deadline();
42 if (sched == GFL) {
43 new_pp -= ((num_processors - 1) * task.get_wcet()) / num_processors;
44 }
45 pps.push_back(new_pp);
46 if (new_pp < min_pp) {
47 min_pp = new_pp;
48 }
49 }
50
51 // Reduce to compute minimum. Also compute Y intercepts, S_i values, and
52 // S.
53 S = 0.0;
54 for (int i = 0; i < task_count; i++) {
55 pps[i] -= min_pp;
56 const Task& task = tasks[i];
57 double wcet = double(task.get_wcet());
58 double period = double(task.get_period());
59 S_i[i] = std::max(0.0, wcet * (1.0 - double(pps[i])/ period));
60 S += S_i[i];
61 Y_ints.push_back((0.0 - wcet/no_cpus) * (wcet / period)
62 + task.get_wcet() - S_i[i]);
63 }
64
65 double s;
66 if (rounds == 0) {
67 s = compute_exact_s(S, Y_ints);
68 }
69 else {
70 s = compute_binsearch_s(S, Y_ints);
71 }
72
73 for (int i = 0; i < task_count; i++) {
74 bounds.push_back(pps[i]
75 + tasks[i].get_wcet()
76 + (unsigned long)std::ceil(
77 s - (double(tasks[i].get_wcet() / double(no_cpus)))));
78 G_i.push_back(Y_ints[i] + s * utilizations[i]);
79 }
80}
81
82double GELPl::compute_exact_s(double S, const std::vector<double>& Y_ints) {
83 int task_count = tasks.get_task_count();
84
85 std::vector<ReplacementType> replacements;
86 for (int i = 0; i < task_count; i++) {
87 for (int j = i + 1; j < task_count; j++) {
88 // We can ignore parallel and identical lines - either don't
89 // intersect or we don't care which is picked.
90 if (utilizations[i] != utilizations[j]) {
91 double intersect = (Y_ints[j] - Y_ints[i])
92 / (utilizations[i] - utilizations[j]);
93 ReplacementType replacement;
94 replacement.location = intersect;
95 if (intersect >= 0.0) {
96 if (utilizations[i] < utilizations[j]) {
97 replacement.old_task = i;
98 replacement.old_task_utilization = utilizations[i];
99 replacement.new_task = j;
100 }
101 else {
102 replacement.old_task = j;
103 replacement.old_task_utilization = utilizations[j];
104 replacement.new_task = i;
105 }
106 replacements.push_back(replacement);
107 }
108 }
109 }
110 }
111 std::sort(replacements.begin(), replacements.end());
112
113 std::vector<bool> task_pres;
114 task_pres.assign(task_count, false);
115
116 double current_value = S;
117 double current_slope = -1 * no_cpus;
118
119 std::vector<TaggedValue> init_pairs;
120 init_pairs.reserve(task_count);
121 for (int i = 0; i < task_count; i++) {
122 TaggedValue new_pair;
123 new_pair.task = i;
124 new_pair.value = Y_ints[i];
125 init_pairs.push_back(new_pair);
126 }
127
128 // Allows us to efficiently compute sum of top m-1 elements. They may not
129 // be in order but must be the correct choices.
130 std::nth_element(init_pairs.begin(), init_pairs.begin() + no_cpus - 2,
131 init_pairs.end());
132
133 for (int i = 0; i < no_cpus - 1; i++) {
134 unsigned int task_index = init_pairs[i].task;
135 task_pres[task_index] = true;
136 current_value += init_pairs[i].value;
137 current_slope += utilizations[task_index];
138 }
139
140 unsigned int rindex = 0;
141 double next_s = 0.0;
142 double zero = std::numeric_limits<double>::infinity();
143 while (zero > next_s) {
144 double current_s = next_s;
145 zero = current_s - current_value / current_slope;
146 if (rindex < replacements.size()) {
147 ReplacementType replacement = replacements[rindex];
148 next_s = replacement.location;
149 current_value += (next_s - current_s) * current_slope;
150 // Apply replacement, if appropriate
151 if (task_pres[replacement.old_task]
152 && !task_pres[replacement.new_task]) {
153 task_pres[replacement.old_task] = false;
154 current_slope -= utilizations[replacement.old_task];
155 task_pres[replacement.new_task] = true;
156 current_slope += utilizations[replacement.new_task];
157 }
158 rindex++;
159 }
160 else {
161 next_s = std::numeric_limits<double>::infinity();
162 }
163 }
164 return zero;
165}
166
167double GELPl::compute_binsearch_s(double S, const std::vector<double>& Y_ints) {
168 double min_s = 0.0;
169 double max_s = 1.0;
170 while (compute_M(max_s, S, Y_ints) > 0) {
171 min_s = max_s;
172 max_s *= 2.0;
173 }
174
175 for (int i = 0; i < rounds; i++) {
176 double middle = (min_s + max_s) / 2.0;
177 if (compute_M(middle, S, Y_ints) < 0) {
178 max_s = middle;
179 }
180 else {
181 min_s = middle;
182 }
183 }
184
185 // max_s is guaranteed to be a legal bound.
186 return max_s;
187}
188
189double GELPl::compute_M(double s, double S, const std::vector<double>& Y_ints) {
190 std::vector<double> Gvals;
191 int task_count = tasks.get_task_count();
192 for (int i = 0; i < task_count; i++) {
193 Gvals.push_back(Y_ints[i] + utilizations[i] * s);
194 }
195
196 // Again, more efficient computation by not totally sorting.
197 std::nth_element(Gvals.begin(), Gvals.begin() + no_cpus - 2, Gvals.end(),
198 reversed_order);
199 double to_return = S - no_cpus * s;
200
201 for (int i = 0; i < no_cpus - 1; i++) {
202 to_return += Gvals[i];
203 }
204
205 return to_return;
206}