diff options
| author | Bjoern Brandenburg <bbb@mpi-sws.org> | 2012-10-02 06:12:43 -0400 |
|---|---|---|
| committer | Bjoern Brandenburg <bbb@mpi-sws.org> | 2013-02-12 06:55:16 -0500 |
| commit | adfef1cb9a8960690fb85cc0b99e8fe266e173eb (patch) | |
| tree | 71bcc895f00a20c9c631963b223cf4c297bdce6f | |
| parent | 5b01c84940965f053f5eacd401a132a2008dac71 (diff) | |
Add LP-based blocking analysis for MPCP
| -rw-r--r-- | native/Makefile | 4 | ||||
| -rw-r--r-- | native/include/lp_analysis.h | 2 | ||||
| -rw-r--r-- | native/include/lp_common.h | 22 | ||||
| -rw-r--r-- | native/src/blocking/linprog/lp_common.cpp | 108 | ||||
| -rw-r--r-- | native/src/blocking/linprog/lp_mpcp.cpp | 589 | ||||
| -rw-r--r-- | schedcat/locking/bounds.py | 11 |
6 files changed, 734 insertions, 2 deletions
diff --git a/native/Makefile b/native/Makefile index d722fcd..2df9893 100644 --- a/native/Makefile +++ b/native/Makefile | |||
| @@ -118,7 +118,7 @@ ALL = testmain _sched.so _locking.so _sim.so | |||
| 118 | 118 | ||
| 119 | # Compile LP-based code only if we have a solver. | 119 | # Compile LP-based code only if we have a solver. |
| 120 | ifneq ($(CPLEX_PATH)$(GLPK_PATH),) | 120 | ifneq ($(CPLEX_PATH)$(GLPK_PATH),) |
| 121 | LP_OBJ = lp_common.o io.o lp_dflp.o lp_dpcp.o | 121 | LP_OBJ = lp_common.o io.o lp_dflp.o lp_dpcp.o lp_mpcp.o |
| 122 | 122 | ||
| 123 | ifneq ($(CPLEX_PATH),) | 123 | ifneq ($(CPLEX_PATH),) |
| 124 | LP_OBJ += cplex.o cpx.o | 124 | LP_OBJ += cplex.o cpx.o |
| @@ -159,5 +159,5 @@ _locking.so: ${SYNC_OBJ} interface/locking_wrap.o | |||
| 159 | _sim.so: ${CORE_OBJ} ${SCHED_OBJ} interface/sim_wrap.o | 159 | _sim.so: ${CORE_OBJ} ${SCHED_OBJ} interface/sim_wrap.o |
| 160 | $(CXX) $(SOFLAGS) $(PYTHON_LIB) -o $@ $+ $(LDFLAGS) | 160 | $(CXX) $(SOFLAGS) $(PYTHON_LIB) -o $@ $+ $(LDFLAGS) |
| 161 | 161 | ||
| 162 | _lp_analysis.so: ${LP_OBJ} sharedres.o cpu_time.o interface/lp_analysis_wrap.o | 162 | _lp_analysis.so: ${LP_OBJ} sharedres.o mpcp.o cpu_time.o interface/lp_analysis_wrap.o |
| 163 | $(CXX) $(SOFLAGS) $(PYTHON_LIB) -o $@ $+ $(LDFLAGS) | 163 | $(CXX) $(SOFLAGS) $(PYTHON_LIB) -o $@ $+ $(LDFLAGS) |
diff --git a/native/include/lp_analysis.h b/native/include/lp_analysis.h index c33be61..182eff1 100644 --- a/native/include/lp_analysis.h +++ b/native/include/lp_analysis.h | |||
| @@ -9,4 +9,6 @@ extern BlockingBounds* lp_dpcp_bounds(const ResourceSharingInfo& info, | |||
| 9 | extern BlockingBounds* lp_dflp_bounds(const ResourceSharingInfo& info, | 9 | extern BlockingBounds* lp_dflp_bounds(const ResourceSharingInfo& info, |
| 10 | const ResourceLocality& locality); | 10 | const ResourceLocality& locality); |
| 11 | 11 | ||
| 12 | extern BlockingBounds* lp_mpcp_bounds(const ResourceSharingInfo& info); | ||
| 13 | |||
| 12 | #endif /* LP_ANALYSYS_H_ */ | 14 | #endif /* LP_ANALYSYS_H_ */ |
diff --git a/native/include/lp_common.h b/native/include/lp_common.h index 06c5e78..b459664 100644 --- a/native/include/lp_common.h +++ b/native/include/lp_common.h | |||
| @@ -118,6 +118,18 @@ void add_topology_constraints_shm( | |||
| 118 | const TaskInfo& ti, | 118 | const TaskInfo& ti, |
| 119 | LinearProgram& lp); | 119 | LinearProgram& lp); |
| 120 | 120 | ||
| 121 | void add_local_lower_priority_constraints_shm( | ||
| 122 | VarMapper& vars, | ||
| 123 | const ResourceSharingInfo& info, | ||
| 124 | const TaskInfo& ti, | ||
| 125 | LinearProgram& lp); | ||
| 126 | |||
| 127 | void add_local_higher_priority_constraints_shm( | ||
| 128 | VarMapper& vars, | ||
| 129 | const ResourceSharingInfo& info, | ||
| 130 | const TaskInfo& ti, | ||
| 131 | LinearProgram& lp); | ||
| 132 | |||
| 121 | // A generic for loop that iterates 'request_index_variable' from 0 to the | 133 | // A generic for loop that iterates 'request_index_variable' from 0 to the |
| 122 | // maximum number of requests issued by task tx while ti is pending. 'tx_request' | 134 | // maximum number of requests issued by task tx while ti is pending. 'tx_request' |
| 123 | // should be of type RequestBound&. | 135 | // should be of type RequestBound&. |
| @@ -169,6 +181,16 @@ void add_topology_constraints_shm( | |||
| 169 | foreach(tasks, task_iter) \ | 181 | foreach(tasks, task_iter) \ |
| 170 | if (task_iter->get_cluster() != (local_task).get_cluster()) | 182 | if (task_iter->get_cluster() != (local_task).get_cluster()) |
| 171 | 183 | ||
| 184 | // iterate only over tasks with equal or lower priority, excluding local tasks | ||
| 185 | #define foreach_remote_lowereq_priority_task(tasks, reference_task, task_iter) \ | ||
| 186 | foreach_remote_task(tasks, reference_task, task_iter) \ | ||
| 187 | if (task_iter->get_priority() >= (reference_task).get_priority()) | ||
| 188 | |||
| 189 | // iterate only over tasks with higher priority, excluding local tasks | ||
| 190 | #define foreach_remote_higher_priority_task(tasks, reference_task, task_iter) \ | ||
| 191 | foreach_remote_task(tasks, reference_task, task_iter) \ | ||
| 192 | if (task_iter->get_priority() < (reference_task).get_priority()) | ||
| 193 | |||
| 172 | #define foreach_local_task(tasks, local_task, task_iter) \ | 194 | #define foreach_local_task(tasks, local_task, task_iter) \ |
| 173 | foreach(tasks, task_iter) \ | 195 | foreach(tasks, task_iter) \ |
| 174 | if (task_iter->get_cluster() == (local_task).get_cluster()) | 196 | if (task_iter->get_cluster() == (local_task).get_cluster()) |
diff --git a/native/src/blocking/linprog/lp_common.cpp b/native/src/blocking/linprog/lp_common.cpp index a7bb1b5..1715d3b 100644 --- a/native/src/blocking/linprog/lp_common.cpp +++ b/native/src/blocking/linprog/lp_common.cpp | |||
| @@ -234,6 +234,7 @@ void add_local_lower_priority_constraints( | |||
| 234 | } | 234 | } |
| 235 | 235 | ||
| 236 | 236 | ||
| 237 | // Constraint 10 in [Brandenburg 2013] | ||
| 237 | // For shared-memory protocols. | 238 | // For shared-memory protocols. |
| 238 | // Remote tasks cannot preempt Ti since they are not scheduled | 239 | // Remote tasks cannot preempt Ti since they are not scheduled |
| 239 | // on Ti's assigned task; therefore force BLOCKING_PREEMPT to zero. | 240 | // on Ti's assigned task; therefore force BLOCKING_PREEMPT to zero. |
| @@ -261,3 +262,110 @@ void add_topology_constraints_shm( | |||
| 261 | } | 262 | } |
| 262 | lp.add_equality(exp, 0); | 263 | lp.add_equality(exp, 0); |
| 263 | } | 264 | } |
| 265 | |||
| 266 | // Constraint 9 in [Brandenburg 2013] | ||
| 267 | // local higher-priority tasks never cause blocking under SHM protocols | ||
| 268 | void add_local_higher_priority_constraints_shm( | ||
| 269 | VarMapper& vars, | ||
| 270 | const ResourceSharingInfo& info, | ||
| 271 | const TaskInfo& ti, | ||
| 272 | LinearProgram& lp) | ||
| 273 | { | ||
| 274 | LinearExpression *exp = new LinearExpression(); | ||
| 275 | |||
| 276 | foreach_local_task(info.get_tasks(), ti, tx) | ||
| 277 | { | ||
| 278 | if (tx->get_priority() < ti.get_priority()) | ||
| 279 | { | ||
| 280 | unsigned int t = tx->get_id(); | ||
| 281 | foreach(tx->get_requests(), request) | ||
| 282 | { | ||
| 283 | unsigned int q = request->get_resource_id(); | ||
| 284 | foreach_request_instance(*request, ti, v) | ||
| 285 | { | ||
| 286 | unsigned int var_id; | ||
| 287 | var_id = vars.lookup(t, q, v, | ||
| 288 | BLOCKING_PREEMPT); | ||
| 289 | exp->add_var(var_id); | ||
| 290 | |||
| 291 | var_id = vars.lookup(t, q, v, | ||
| 292 | BLOCKING_INDIRECT); | ||
| 293 | exp->add_var(var_id); | ||
| 294 | |||
| 295 | var_id = vars.lookup(t, q, v, | ||
| 296 | BLOCKING_DIRECT); | ||
| 297 | exp->add_var(var_id); | ||
| 298 | } | ||
| 299 | } | ||
| 300 | } | ||
| 301 | } | ||
| 302 | lp.add_equality(exp, 0); | ||
| 303 | } | ||
| 304 | |||
| 305 | static unsigned int max_num_arrivals_shm( | ||
| 306 | const ResourceSharingInfo& info, | ||
| 307 | const TaskInfo& ti) | ||
| 308 | { | ||
| 309 | hashmap<unsigned int, unsigned int> request_counts; | ||
| 310 | |||
| 311 | foreach(ti.get_requests(), req) | ||
| 312 | request_counts[req->get_resource_id()] = 0; | ||
| 313 | |||
| 314 | // count how often each resource is accessed on remote cores | ||
| 315 | foreach_remote_task(info.get_tasks(), ti, tx) | ||
| 316 | { | ||
| 317 | foreach(tx->get_requests(), req) | ||
| 318 | { | ||
| 319 | unsigned int q = req->get_resource_id(); | ||
| 320 | if (request_counts.find(q) != request_counts.end()) | ||
| 321 | request_counts[q] += req->get_max_num_requests(ti.get_response()); | ||
| 322 | } | ||
| 323 | } | ||
| 324 | |||
| 325 | // initialize to 1 to account for job release | ||
| 326 | unsigned int total = 1; | ||
| 327 | |||
| 328 | foreach(ti.get_requests(), req) | ||
| 329 | total += std::min(request_counts[req->get_resource_id()], | ||
| 330 | req->get_num_requests()); | ||
| 331 | |||
| 332 | return total; | ||
| 333 | } | ||
| 334 | |||
| 335 | // Constraint 11 in [Brandenburg 2013] | ||
| 336 | // Local lower-priority tasks block at most once each time | ||
| 337 | // that Ti suspends (and once after release). | ||
| 338 | void add_local_lower_priority_constraints_shm( | ||
| 339 | VarMapper& vars, | ||
| 340 | const ResourceSharingInfo& info, | ||
| 341 | const TaskInfo& ti, | ||
| 342 | LinearProgram& lp) | ||
| 343 | { | ||
| 344 | unsigned int num_arrivals = max_num_arrivals_shm(info, ti); | ||
| 345 | |||
| 346 | foreach_local_lowereq_priority_task_except(info.get_tasks(), ti, tx) | ||
| 347 | { | ||
| 348 | LinearExpression *exp = new LinearExpression(); | ||
| 349 | unsigned int t = tx->get_id(); | ||
| 350 | foreach(tx->get_requests(), request) | ||
| 351 | { | ||
| 352 | unsigned int q = request->get_resource_id(); | ||
| 353 | foreach_request_instance(*request, ti, v) | ||
| 354 | { | ||
| 355 | unsigned int var_id; | ||
| 356 | var_id = vars.lookup(t, q, v, | ||
| 357 | BLOCKING_PREEMPT); | ||
| 358 | exp->add_var(var_id); | ||
| 359 | |||
| 360 | var_id = vars.lookup(t, q, v, | ||
| 361 | BLOCKING_INDIRECT); | ||
| 362 | exp->add_var(var_id); | ||
| 363 | |||
| 364 | var_id = vars.lookup(t, q, v, | ||
| 365 | BLOCKING_DIRECT); | ||
| 366 | exp->add_var(var_id); | ||
| 367 | } | ||
| 368 | } | ||
| 369 | lp.add_equality(exp, num_arrivals); | ||
| 370 | } | ||
| 371 | } | ||
diff --git a/native/src/blocking/linprog/lp_mpcp.cpp b/native/src/blocking/linprog/lp_mpcp.cpp new file mode 100644 index 0000000..bccea5d --- /dev/null +++ b/native/src/blocking/linprog/lp_mpcp.cpp | |||
| @@ -0,0 +1,589 @@ | |||
| 1 | #include <iostream> | ||
| 2 | #include <set> | ||
| 3 | #include <algorithm> | ||
| 4 | |||
| 5 | #include "lp_common.h" | ||
| 6 | #include "blocking.h" | ||
| 7 | #include "stl-hashmap.h" | ||
| 8 | |||
| 9 | #include "cpu_time.h" | ||
| 10 | |||
| 11 | #include "mpcp.h" | ||
| 12 | |||
| 13 | #define NO_BOUND (-1) | ||
| 14 | |||
| 15 | typedef hashmap<unsigned int, unsigned int> PerResourceCounts; | ||
| 16 | typedef hashmap<unsigned int, hashmap<unsigned int, unsigned int> > PerTaskPerRequestDirectBlockingBound; | ||
| 17 | typedef hashmap<unsigned int, unsigned int> PerTaskIndirectBlockingBound; | ||
| 18 | |||
| 19 | class GcsResponseTimes | ||
| 20 | { | ||
| 21 | private: | ||
| 22 | // per each task, and per each global critical section (gcs), | ||
| 23 | // the maximum wait time bound | ||
| 24 | hashmap< unsigned long, hashmap< unsigned int, long> > remote_delay; | ||
| 25 | |||
| 26 | hashmap< unsigned long, hashmap< unsigned int, unsigned long> > gcs_response; | ||
| 27 | |||
| 28 | const ResourceSharingInfo &info; | ||
| 29 | const MPCPCeilings &prio_ceilings; | ||
| 30 | |||
| 31 | long bound_remote_delay(const TaskInfo &tsk, | ||
| 32 | unsigned int res_id); | ||
| 33 | |||
| 34 | void bound_gcs_response_times(void); | ||
| 35 | |||
| 36 | |||
| 37 | public: | ||
| 38 | GcsResponseTimes(const ResourceSharingInfo &i, | ||
| 39 | const MPCPCeilings &pc) | ||
| 40 | : info(i), prio_ceilings(pc) | ||
| 41 | { | ||
| 42 | bound_gcs_response_times(); | ||
| 43 | } | ||
| 44 | |||
| 45 | |||
| 46 | long get_max_remote_delay(const TaskInfo &ti, unsigned int res_id) | ||
| 47 | { | ||
| 48 | if (remote_delay.find(ti.get_id()) == remote_delay.end()) | ||
| 49 | remote_delay[ti.get_id()] = hashmap<unsigned int, long>(); | ||
| 50 | |||
| 51 | hashmap<unsigned int, long> &tmap = remote_delay[ti.get_id()]; | ||
| 52 | |||
| 53 | if (tmap.find(res_id) == tmap.end()) | ||
| 54 | tmap[res_id] = bound_remote_delay(ti, res_id); | ||
| 55 | |||
| 56 | return tmap[res_id]; | ||
| 57 | } | ||
| 58 | |||
| 59 | long get_gcs_response(const TaskInfo &ti, unsigned int res_id) | ||
| 60 | { | ||
| 61 | // look up the task | ||
| 62 | if (gcs_response.find(ti.get_id()) == gcs_response.end()) | ||
| 63 | return NO_BOUND; | ||
| 64 | |||
| 65 | hashmap<unsigned int, unsigned long> &tmap = gcs_response[ti.get_id()]; | ||
| 66 | |||
| 67 | // look up the resource | ||
| 68 | if (tmap.find(res_id) == tmap.end()) | ||
| 69 | return NO_BOUND; | ||
| 70 | |||
| 71 | return tmap[res_id]; | ||
| 72 | } | ||
| 73 | }; | ||
| 74 | |||
| 75 | |||
| 76 | void GcsResponseTimes::bound_gcs_response_times() | ||
| 77 | { | ||
| 78 | Clusters clusters; | ||
| 79 | split_by_cluster(info, clusters); | ||
| 80 | |||
| 81 | ClusterResponseTimes responses; | ||
| 82 | determine_gcs_response_times(clusters, prio_ceilings, responses); | ||
| 83 | |||
| 84 | for (unsigned int c = 0; c < clusters.size(); c++) | ||
| 85 | { | ||
| 86 | const Cluster &cluster = clusters[c]; | ||
| 87 | const TaskResponseTimes &tasks_response_times = responses[c]; | ||
| 88 | |||
| 89 | for (unsigned int i = 0; i < cluster.size(); i++) | ||
| 90 | { | ||
| 91 | const TaskInfo *ti = cluster[i]; | ||
| 92 | const ResponseTimes &ri = tasks_response_times[i]; | ||
| 93 | |||
| 94 | gcs_response[ti->get_id()] = hashmap<unsigned int, unsigned long>(); | ||
| 95 | hashmap<unsigned int, unsigned long> &response = gcs_response[ti->get_id()]; | ||
| 96 | |||
| 97 | for (unsigned int r = 0; r < ti->get_requests().size(); r++) | ||
| 98 | { | ||
| 99 | unsigned int q = ti->get_requests()[r].get_resource_id(); | ||
| 100 | response[q] = ri[r]; | ||
| 101 | } | ||
| 102 | } | ||
| 103 | } | ||
| 104 | } | ||
| 105 | |||
| 106 | // This function computes an upper bound on remote blocking using response-time analysis. | ||
| 107 | // This corresponds to Equation (3) in LNR:09. | ||
| 108 | long GcsResponseTimes::bound_remote_delay(const TaskInfo &ti, unsigned int res_id) | ||
| 109 | { | ||
| 110 | unsigned long delay_by_lower = 0, delay_by_equal = 0, delay_by_higher = 0; | ||
| 111 | |||
| 112 | // find maximum lower-priority request span that can block directly | ||
| 113 | foreach_lowereq_priority_task_except(info.get_tasks(), ti, tx) | ||
| 114 | { | ||
| 115 | foreach(tx->get_requests(), request) | ||
| 116 | { | ||
| 117 | unsigned int q = request->get_resource_id(); | ||
| 118 | if (q == res_id) | ||
| 119 | { | ||
| 120 | unsigned long resp = get_gcs_response(*tx, q); | ||
| 121 | |||
| 122 | if (tx->get_priority() > ti.get_priority()) | ||
| 123 | delay_by_lower = std::max(delay_by_lower, | ||
| 124 | resp); | ||
| 125 | else | ||
| 126 | delay_by_equal += resp; | ||
| 127 | } | ||
| 128 | } | ||
| 129 | } | ||
| 130 | |||
| 131 | // response-time analysis to find final maximum wait time | ||
| 132 | |||
| 133 | unsigned long next_estimate = delay_by_lower + delay_by_equal; | ||
| 134 | unsigned long estimate = 0; | ||
| 135 | |||
| 136 | while (next_estimate <= ti.get_response() && next_estimate != estimate) | ||
| 137 | { | ||
| 138 | delay_by_higher = 0; | ||
| 139 | estimate = next_estimate; | ||
| 140 | |||
| 141 | // accumulate direct higher-priority blocking | ||
| 142 | foreach_higher_priority_task(info.get_tasks(), ti, tx) | ||
| 143 | { | ||
| 144 | foreach(tx->get_requests(), request) | ||
| 145 | { | ||
| 146 | unsigned int q = request->get_resource_id(); | ||
| 147 | |||
| 148 | if (res_id == q) | ||
| 149 | { | ||
| 150 | unsigned int nreqs = request->get_max_num_requests(estimate); | ||
| 151 | delay_by_higher += nreqs * get_gcs_response(*tx, q); | ||
| 152 | } | ||
| 153 | } | ||
| 154 | } | ||
| 155 | |||
| 156 | next_estimate = delay_by_lower + delay_by_equal + delay_by_higher; | ||
| 157 | } | ||
| 158 | |||
| 159 | if (estimate <= ti.get_response()) | ||
| 160 | return estimate; | ||
| 161 | else | ||
| 162 | return NO_BOUND; | ||
| 163 | } | ||
| 164 | |||
| 165 | static unsigned int count_gcs_preemption_opportunities( | ||
| 166 | const ResourceSharingInfo& info, | ||
| 167 | const RequestBound &req, | ||
| 168 | PerTaskPerRequestDirectBlockingBound &db_bounds, | ||
| 169 | const MPCPCeilings &prio_ceilings, | ||
| 170 | const TaskInfo& ti) | ||
| 171 | { | ||
| 172 | unsigned int count = 0; | ||
| 173 | const TaskInfo &tr = *req.get_task(); | ||
| 174 | unsigned int req_prio = prio_ceilings[req.get_task()->get_cluster()][req.get_resource_id()]; | ||
| 175 | |||
| 176 | // Count everything that can be preempted by 'req' and that | ||
| 177 | // is also potentially blocking ti. | ||
| 178 | |||
| 179 | foreach_local_task(info.get_tasks(), tr, tx) | ||
| 180 | { | ||
| 181 | if (tx->get_id() != tr.get_id() && | ||
| 182 | tx->get_id() != ti.get_id()) | ||
| 183 | { | ||
| 184 | foreach(tx->get_requests(), request) | ||
| 185 | { | ||
| 186 | unsigned int q = request->get_resource_id(); | ||
| 187 | if (q != req.get_resource_id() && | ||
| 188 | db_bounds[tx->get_id()][q] > 0) | ||
| 189 | { | ||
| 190 | // request can directly block Ti | ||
| 191 | unsigned int xprio = prio_ceilings[tx->get_cluster()][q]; | ||
| 192 | |||
| 193 | if (xprio >= req_prio) | ||
| 194 | { | ||
| 195 | // indirect delay is possible | ||
| 196 | // each time that Ti is directly blocked by 'request'. | ||
| 197 | // This kind of blocking is possible | ||
| 198 | count += db_bounds[tx->get_id()][q]; | ||
| 199 | } | ||
| 200 | } | ||
| 201 | } | ||
| 202 | } | ||
| 203 | } | ||
| 204 | |||
| 205 | return count; | ||
| 206 | } | ||
| 207 | |||
| 208 | // Constraint 18 | ||
| 209 | static void add_per_request_indirect_constraints( | ||
| 210 | VarMapper& vars, | ||
| 211 | const ResourceSharingInfo& info, | ||
| 212 | PerTaskPerRequestDirectBlockingBound &db_bounds, | ||
| 213 | const MPCPCeilings &prio_ceilings, | ||
| 214 | const TaskInfo& ti, | ||
| 215 | LinearProgram& lp) | ||
| 216 | { | ||
| 217 | PerTaskIndirectBlockingBound bounds; | ||
| 218 | |||
| 219 | foreach_remote_task(info.get_tasks(), ti, tx) | ||
| 220 | { | ||
| 221 | unsigned int t = tx->get_id(); | ||
| 222 | foreach(tx->get_requests(), request) | ||
| 223 | { | ||
| 224 | unsigned int q = request->get_resource_id(); | ||
| 225 | LinearExpression *exp = new LinearExpression(); | ||
| 226 | foreach_request_instance(*request, ti, v) | ||
| 227 | { | ||
| 228 | unsigned int var_id; | ||
| 229 | var_id = vars.lookup(t, q, v, BLOCKING_INDIRECT); | ||
| 230 | exp->add_var(var_id); | ||
| 231 | } | ||
| 232 | unsigned int bound = count_gcs_preemption_opportunities( | ||
| 233 | info, *request, db_bounds, prio_ceilings, ti); | ||
| 234 | lp.add_inequality(exp, bound); | ||
| 235 | } | ||
| 236 | } | ||
| 237 | } | ||
| 238 | |||
| 239 | // Constraint 17 | ||
| 240 | static void add_per_task_indirect_constraints( | ||
| 241 | VarMapper& vars, | ||
| 242 | const ResourceSharingInfo& info, | ||
| 243 | PerTaskPerRequestDirectBlockingBound &db_bounds, | ||
| 244 | const MPCPCeilings &prio_ceilings, | ||
| 245 | const TaskInfo& ti, | ||
| 246 | LinearProgram& lp) | ||
| 247 | { | ||
| 248 | PerTaskIndirectBlockingBound bounds; | ||
| 249 | |||
| 250 | // initialize | ||
| 251 | foreach_remote_task(info.get_tasks(), ti, tx) | ||
| 252 | { | ||
| 253 | bounds[tx->get_id()] = 0; | ||
| 254 | } | ||
| 255 | |||
| 256 | foreach_remote_task(info.get_tasks(), ti, tx) | ||
| 257 | { | ||
| 258 | foreach(tx->get_requests(), request) | ||
| 259 | { | ||
| 260 | unsigned int q = request->get_resource_id(); | ||
| 261 | if (db_bounds[tx->get_id()][q] > 0) | ||
| 262 | { | ||
| 263 | // note for each local task an opportunity to cause indirect delay | ||
| 264 | |||
| 265 | unsigned int prio = prio_ceilings[tx->get_cluster()][request->get_resource_id()]; | ||
| 266 | foreach_local_task_except(info.get_tasks(), *tx, tl) | ||
| 267 | { | ||
| 268 | // see if it has anything to preempt us with | ||
| 269 | foreach(tl->get_requests(), lreq) | ||
| 270 | { | ||
| 271 | if (prio_ceilings[tl->get_cluster()][lreq->get_resource_id()] <= prio) | ||
| 272 | { | ||
| 273 | // yes, has one, count opportunity per direct blocking | ||
| 274 | bounds[tl->get_id()] += db_bounds[tx->get_id()][q]; | ||
| 275 | break; | ||
| 276 | } | ||
| 277 | } | ||
| 278 | } | ||
| 279 | } | ||
| 280 | } | ||
| 281 | } | ||
| 282 | |||
| 283 | foreach_remote_task(info.get_tasks(), ti, tx) | ||
| 284 | { | ||
| 285 | unsigned int t = tx->get_id(); | ||
| 286 | LinearExpression *exp = new LinearExpression(); | ||
| 287 | |||
| 288 | foreach(tx->get_requests(), request) | ||
| 289 | { | ||
| 290 | unsigned int q = request->get_resource_id(); | ||
| 291 | foreach_request_instance(*request, ti, v) | ||
| 292 | { | ||
| 293 | unsigned int var_id; | ||
| 294 | var_id = vars.lookup(t, q, v, BLOCKING_INDIRECT); | ||
| 295 | exp->add_var(var_id); | ||
| 296 | } | ||
| 297 | } | ||
| 298 | lp.add_inequality(exp, bounds[t]); | ||
| 299 | } | ||
| 300 | } | ||
| 301 | |||
| 302 | static PerResourceCounts get_per_resource_counts(const TaskInfo &ti) | ||
| 303 | { | ||
| 304 | // Count the number of times that each resource is accessed by ti. | ||
| 305 | PerResourceCounts per_resource_counts; | ||
| 306 | foreach(ti.get_requests(), req) | ||
| 307 | per_resource_counts[req->get_resource_id()] += req->get_num_requests(); | ||
| 308 | |||
| 309 | return per_resource_counts; | ||
| 310 | } | ||
| 311 | |||
| 312 | // Constraints 15, 16, and 19 | ||
| 313 | static void add_direct_blocking_constraints( | ||
| 314 | VarMapper& vars, | ||
| 315 | const ResourceSharingInfo& info, | ||
| 316 | GcsResponseTimes &rta, | ||
| 317 | const TaskInfo& ti, | ||
| 318 | PerResourceCounts &per_resource_counts, | ||
| 319 | LinearProgram& lp, | ||
| 320 | PerTaskPerRequestDirectBlockingBound &db_bounds) | ||
| 321 | { | ||
| 322 | // Each request can be directly delayed at most once by a lower-priority task | ||
| 323 | // accessing the same resource. | ||
| 324 | // Ti is never directly delayed due to resources it does not request, | ||
| 325 | // regardless of the priority of the lock holder. | ||
| 326 | |||
| 327 | // build one constraint for each resource. | ||
| 328 | hashmap<unsigned int, LinearExpression *> constraints; | ||
| 329 | |||
| 330 | foreach_task_except(info.get_tasks(), ti, tx) | ||
| 331 | { | ||
| 332 | unsigned int t = tx->get_id(); | ||
| 333 | // Is Tx a higher-priority task? | ||
| 334 | bool hiprio = tx->get_priority() < ti.get_priority(); | ||
| 335 | |||
| 336 | db_bounds[t] = hashmap<unsigned int, unsigned int>(); | ||
| 337 | |||
| 338 | foreach(tx->get_requests(), request) | ||
| 339 | { | ||
| 340 | unsigned int q = request->get_resource_id(); | ||
| 341 | bool accessed = per_resource_counts.find(q) != | ||
| 342 | per_resource_counts.end(); | ||
| 343 | |||
| 344 | db_bounds[t][q] = 0; | ||
| 345 | |||
| 346 | if (!hiprio || !accessed) | ||
| 347 | { | ||
| 348 | LinearExpression *exp; | ||
| 349 | |||
| 350 | if (constraints.find(q) == constraints.end()) | ||
| 351 | constraints[q] = new LinearExpression(); | ||
| 352 | |||
| 353 | exp = constraints[q]; | ||
| 354 | |||
| 355 | foreach_request_instance(*request, ti, v) | ||
| 356 | { | ||
| 357 | unsigned int var_id; | ||
| 358 | var_id = vars.lookup(t, q, v, BLOCKING_DIRECT); | ||
| 359 | exp->add_var(var_id); | ||
| 360 | } | ||
| 361 | |||
| 362 | if (accessed) | ||
| 363 | // loprio => direct blocking at most once per request | ||
| 364 | db_bounds[t][q] = std::min( | ||
| 365 | request->get_max_num_requests(ti.get_response()), | ||
| 366 | per_resource_counts[q]); | ||
| 367 | } | ||
| 368 | else | ||
| 369 | { | ||
| 370 | // higher-priority request, conflicting | ||
| 371 | // how many blocking instances per request? | ||
| 372 | long interval = rta.get_max_remote_delay(ti, q); | ||
| 373 | if (interval != NO_BOUND) | ||
| 374 | { | ||
| 375 | // can add constraints | ||
| 376 | unsigned int request_count; | ||
| 377 | // max per request? | ||
| 378 | request_count = request->get_max_num_requests(interval); | ||
| 379 | // how many requests? | ||
| 380 | request_count *= per_resource_counts[q]; | ||
| 381 | // add constraint for this task | ||
| 382 | |||
| 383 | db_bounds[t][q] = std::min( | ||
| 384 | request->get_max_num_requests(ti.get_response()), | ||
| 385 | request_count); | ||
| 386 | |||
| 387 | LinearExpression *exp = new LinearExpression(); | ||
| 388 | foreach_request_instance(*request, ti, v) | ||
| 389 | { | ||
| 390 | unsigned int var_id; | ||
| 391 | var_id = vars.lookup(t, q, v, BLOCKING_DIRECT); | ||
| 392 | exp->add_var(var_id); | ||
| 393 | } | ||
| 394 | lp.add_inequality(exp, request_count); | ||
| 395 | } | ||
| 396 | } | ||
| 397 | } | ||
| 398 | } | ||
| 399 | |||
| 400 | // add each per-resource constraint | ||
| 401 | foreach(constraints, it) | ||
| 402 | { | ||
| 403 | unsigned int bound = 0; | ||
| 404 | if (per_resource_counts.find(it->first) != | ||
| 405 | per_resource_counts.end()) | ||
| 406 | bound = per_resource_counts[it->first]; | ||
| 407 | lp.add_inequality(it->second, bound); | ||
| 408 | } | ||
| 409 | } | ||
| 410 | |||
| 411 | // Constraint 20 | ||
| 412 | static void add_remote_blocking_constraint( | ||
| 413 | VarMapper& vars, | ||
| 414 | const ResourceSharingInfo& info, | ||
| 415 | GcsResponseTimes &rta, | ||
| 416 | const TaskInfo& ti, | ||
| 417 | LinearProgram& lp) | ||
| 418 | { | ||
| 419 | unsigned long remote_blocking_bound = 0; | ||
| 420 | |||
| 421 | // sum up maximum remote blocking on a per-request basis | ||
| 422 | foreach(ti.get_requests(), req) | ||
| 423 | { | ||
| 424 | unsigned int num = req->get_num_requests(); | ||
| 425 | unsigned int q = req->get_resource_id(); | ||
| 426 | unsigned long rem_blocking = rta.get_max_remote_delay(ti, q); | ||
| 427 | remote_blocking_bound += rem_blocking * num; | ||
| 428 | } | ||
| 429 | |||
| 430 | LinearExpression *exp = new LinearExpression(); | ||
| 431 | |||
| 432 | foreach_remote_task(info.get_tasks(), ti, tx) | ||
| 433 | { | ||
| 434 | unsigned int t = tx->get_id(); | ||
| 435 | foreach(tx->get_requests(), request) | ||
| 436 | { | ||
| 437 | unsigned int q = request->get_resource_id(); | ||
| 438 | double length = request->get_request_length();; | ||
| 439 | |||
| 440 | foreach_request_instance(*request, ti, v) | ||
| 441 | { | ||
| 442 | unsigned int var_id; | ||
| 443 | var_id = vars.lookup(t, q, v, BLOCKING_DIRECT); | ||
| 444 | exp->add_term(length, var_id); | ||
| 445 | |||
| 446 | var_id = vars.lookup(t, q, v, BLOCKING_INDIRECT); | ||
| 447 | exp->add_term(length, var_id); | ||
| 448 | } | ||
| 449 | } | ||
| 450 | } | ||
| 451 | |||
| 452 | lp.add_inequality(exp, remote_blocking_bound); | ||
| 453 | } | ||
| 454 | |||
| 455 | static void add_mpcp_constraints( | ||
| 456 | VarMapper& vars, | ||
| 457 | const ResourceSharingInfo& info, | ||
| 458 | const TaskInfo& ti, | ||
| 459 | const MPCPCeilings& prio_ceilings, | ||
| 460 | GcsResponseTimes &gcs_response, | ||
| 461 | LinearProgram& lp) | ||
| 462 | { | ||
| 463 | PerResourceCounts counts = get_per_resource_counts(ti); | ||
| 464 | PerTaskPerRequestDirectBlockingBound db_bounds; | ||
| 465 | |||
| 466 | // Constraint 1 | ||
| 467 | add_mutex_constraints(vars, info, ti, lp); | ||
| 468 | // Constraint 9 | ||
| 469 | add_local_higher_priority_constraints_shm(vars, info, ti, lp); | ||
| 470 | // Constraint 10 | ||
| 471 | add_topology_constraints_shm(vars, info, ti, lp); | ||
| 472 | // Constraint 11 | ||
| 473 | add_local_lower_priority_constraints_shm(vars, info, ti, lp); | ||
| 474 | // Constraints 15, 16, and 19 | ||
| 475 | add_direct_blocking_constraints(vars, info, gcs_response, ti, counts, lp, db_bounds); | ||
| 476 | // Constraint 17 | ||
| 477 | add_per_task_indirect_constraints(vars, info, db_bounds, prio_ceilings, ti, lp); | ||
| 478 | // Constraint 18 | ||
| 479 | add_per_request_indirect_constraints(vars, info, db_bounds, prio_ceilings, ti, lp); | ||
| 480 | // Constraint 20 | ||
| 481 | add_remote_blocking_constraint(vars, info, gcs_response, ti, lp); | ||
| 482 | } | ||
| 483 | |||
| 484 | static void apply_mpcp_bounds_for_task( | ||
| 485 | unsigned int i, | ||
| 486 | BlockingBounds& bounds, | ||
| 487 | const ResourceSharingInfo& info, | ||
| 488 | const MPCPCeilings& prio_ceilings, | ||
| 489 | GcsResponseTimes &gcs_response) | ||
| 490 | { | ||
| 491 | LinearProgram lp; | ||
| 492 | VarMapper vars; | ||
| 493 | const TaskInfo& ti = info.get_tasks()[i]; | ||
| 494 | LinearExpression *local_obj = new LinearExpression(); | ||
| 495 | LinearExpression *remote_obj = new LinearExpression(); | ||
| 496 | |||
| 497 | #if DEBUG_LP_OVERHEADS >= 2 | ||
| 498 | static DEFINE_CPU_CLOCK(model_gen_cost); | ||
| 499 | static DEFINE_CPU_CLOCK(solver_cost); | ||
| 500 | static DEFINE_CPU_CLOCK(remote_cost); | ||
| 501 | |||
| 502 | std::cout << "---- " << __FUNCTION__ << " ----" << std::endl; | ||
| 503 | |||
| 504 | model_gen_cost.start(); | ||
| 505 | #endif | ||
| 506 | |||
| 507 | set_blocking_objective_part_shm(vars, info, ti, lp, local_obj, remote_obj); | ||
| 508 | vars.seal(); | ||
| 509 | |||
| 510 | add_mpcp_constraints(vars, info, ti, | ||
| 511 | prio_ceilings, | ||
| 512 | gcs_response, lp); | ||
| 513 | |||
| 514 | #if DEBUG_LP_OVERHEADS >= 2 | ||
| 515 | model_gen_cost.stop(); | ||
| 516 | std::cout << model_gen_cost << std::endl; | ||
| 517 | solver_cost.start(); | ||
| 518 | #endif | ||
| 519 | |||
| 520 | Solution *sol = linprog_solve(lp, vars.get_num_vars()); | ||
| 521 | |||
| 522 | #if DEBUG_LP_OVERHEADS >= 2 | ||
| 523 | solver_cost.stop(); | ||
| 524 | std::cout << solver_cost << std::endl; | ||
| 525 | #endif | ||
| 526 | assert(sol != NULL); | ||
| 527 | |||
| 528 | Interference total, remote, local; | ||
| 529 | |||
| 530 | total.total_length = sol->evaluate(*lp.get_objective()); | ||
| 531 | local.total_length = sol->evaluate(*local_obj); | ||
| 532 | |||
| 533 | bounds[i] = total; | ||
| 534 | bounds.set_local_blocking(i, local); | ||
| 535 | |||
| 536 | delete local_obj; | ||
| 537 | delete sol; | ||
| 538 | |||
| 539 | #if DEBUG_LP_OVERHEADS >= 2 | ||
| 540 | remote_cost.start(); | ||
| 541 | #endif | ||
| 542 | |||
| 543 | // compute remote blocking maximum | ||
| 544 | lp.set_objective(remote_obj); | ||
| 545 | sol = linprog_solve(lp, vars.get_num_vars()); | ||
| 546 | |||
| 547 | assert(sol != NULL); | ||
| 548 | |||
| 549 | remote.total_length = sol->evaluate(*lp.get_objective()); | ||
| 550 | bounds.set_remote_blocking(i, remote); | ||
| 551 | |||
| 552 | #if DEBUG_LP_OVERHEADS >= 2 | ||
| 553 | remote_cost.stop(); | ||
| 554 | std::cout << remote_cost << std::endl; | ||
| 555 | #endif | ||
| 556 | delete sol; | ||
| 557 | } | ||
| 558 | |||
| 559 | |||
| 560 | static BlockingBounds* _lp_mpcp_bounds(const ResourceSharingInfo& info) | ||
| 561 | { | ||
| 562 | BlockingBounds* results = new BlockingBounds(info); | ||
| 563 | |||
| 564 | MPCPCeilings prio_ceilings = get_mpcp_ceilings(info); | ||
| 565 | GcsResponseTimes gcs_response = GcsResponseTimes(info, prio_ceilings); | ||
| 566 | |||
| 567 | for (unsigned int i = 0; i < info.get_tasks().size(); i++) | ||
| 568 | apply_mpcp_bounds_for_task(i, *results, info, prio_ceilings, gcs_response); | ||
| 569 | |||
| 570 | return results; | ||
| 571 | } | ||
| 572 | |||
| 573 | BlockingBounds* lp_mpcp_bounds(const ResourceSharingInfo& info) | ||
| 574 | { | ||
| 575 | #if DEBUG_LP_OVERHEADS >= 1 | ||
| 576 | static DEFINE_CPU_CLOCK(cpu_costs); | ||
| 577 | |||
| 578 | cpu_costs.start(); | ||
| 579 | #endif | ||
| 580 | |||
| 581 | BlockingBounds *results = _lp_mpcp_bounds(info); | ||
| 582 | |||
| 583 | #if DEBUG_LP_OVERHEADS >= 1 | ||
| 584 | cpu_costs.stop(); | ||
| 585 | std::cout << cpu_costs << std::endl; | ||
| 586 | #endif | ||
| 587 | |||
| 588 | return results; | ||
| 589 | } | ||
diff --git a/schedcat/locking/bounds.py b/schedcat/locking/bounds.py index b2093a9..dc20241 100644 --- a/schedcat/locking/bounds.py +++ b/schedcat/locking/bounds.py | |||
| @@ -62,6 +62,7 @@ def apply_mpcp_bounds(all_tasks, use_virtual_spin=False): | |||
| 62 | t.suspended = res.get_remote_blocking(i) | 62 | t.suspended = res.get_remote_blocking(i) |
| 63 | # all blocking, including arrival blocking | 63 | # all blocking, including arrival blocking |
| 64 | t.blocked = res.get_blocking_term(i) | 64 | t.blocked = res.get_blocking_term(i) |
| 65 | t.locally_blocked = res.get_local_blocking(i) | ||
| 65 | 66 | ||
| 66 | def get_round_robin_resource_mapping(num_resources, num_cpus, | 67 | def get_round_robin_resource_mapping(num_resources, num_cpus, |
| 67 | dedicated_irq=cpp.NO_CPU): | 68 | dedicated_irq=cpp.NO_CPU): |
| @@ -223,3 +224,13 @@ def apply_lp_dpcp_bounds(all_tasks, resource_mapping, | |||
| 223 | t.suspended = res.get_remote_blocking(i) | 224 | t.suspended = res.get_remote_blocking(i) |
| 224 | t.blocked = res.get_blocking_term(i) | 225 | t.blocked = res.get_blocking_term(i) |
| 225 | 226 | ||
| 227 | def apply_lp_mpcp_bounds(all_tasks): | ||
| 228 | model = get_cpp_model(all_tasks) | ||
| 229 | res = lp_cpp.lp_mpcp_bounds(model) | ||
| 230 | |||
| 231 | for i,t in enumerate(all_tasks): | ||
| 232 | # remote blocking <=> self-suspension time | ||
| 233 | t.suspended = res.get_remote_blocking(i) | ||
| 234 | # all blocking, including local blocking | ||
| 235 | t.blocked = res.get_blocking_term(i) | ||
| 236 | t.locally_blocked = res.get_local_blocking(i) | ||
