aboutsummaryrefslogtreecommitdiffstats
path: root/litmus
diff options
context:
space:
mode:
authorGlenn Elliott <gelliott@cs.unc.edu>2013-04-08 17:21:34 -0400
committerGlenn Elliott <gelliott@cs.unc.edu>2013-04-08 17:21:34 -0400
commit6f3810f79aaf624b5c781dcf04a4203affcad903 (patch)
tree1a27dd3432be0c2aedba3fd709c6e2da3f852ea6 /litmus
parenta4d63ac7ff059f81a0488d1cda4de16142508189 (diff)
Fixed IKGLP request aborts.
Fixes bug that failed to move new requests to the FIFO queue after an aborted request in a FIFO makes room.
Diffstat (limited to 'litmus')
-rw-r--r--litmus/budget.c12
-rw-r--r--litmus/ikglp_lock.c641
-rw-r--r--litmus/locking.c13
3 files changed, 394 insertions, 272 deletions
diff --git a/litmus/budget.c b/litmus/budget.c
index 4f692fd4a103..aa254349a85e 100644
--- a/litmus/budget.c
+++ b/litmus/budget.c
@@ -57,12 +57,12 @@ inline static void arm_enforcement_timer(struct task_struct* t, int force)
57 } 57 }
58 58
59 if (!force) { 59 if (!force) {
60 /* Calling this when there is no budget left for the task 60// /* Calling this when there is no budget left for the task
61 * makes no sense, unless the task is non-preemptive. */ 61// * makes no sense, unless the task is non-preemptive. */
62 if (budget_exhausted(t)) { 62// if (budget_exhausted(t)) {
63 TRACE_TASK(t, "can't arm timer because no budget remaining\n"); 63// TRACE_TASK(t, "can't arm timer because no budget remaining\n");
64 return; 64// return;
65 } 65// }
66 66
67 if ( (!budget_enforced(t) || 67 if ( (!budget_enforced(t) ||
68 (budget_enforced(t) && bt_flag_is_set(t, BTF_BUDGET_EXHAUSTED))) 68 (budget_enforced(t) && bt_flag_is_set(t, BTF_BUDGET_EXHAUSTED)))
diff --git a/litmus/ikglp_lock.c b/litmus/ikglp_lock.c
index 6d7ea24ce79b..3058aa8fb2fe 100644
--- a/litmus/ikglp_lock.c
+++ b/litmus/ikglp_lock.c
@@ -141,6 +141,98 @@ static inline struct task_struct* ikglp_mth_highest(struct ikglp_semaphore *sem)
141} 141}
142 142
143 143
144static void __ikglp_dump_pq(struct binheap_node *n, int depth)
145{
146 ikglp_heap_node_t *request;
147 char padding[81] = " ";
148
149 if(n == NULL) {
150 TRACE("+-> %p\n", NULL);
151 return;
152 }
153
154 request = binheap_entry(n, ikglp_heap_node_t, node);
155
156 if(depth*2 <= 80)
157 padding[depth*2] = '\0';
158
159
160 TRACE("%s+-> %s/%d\n",
161 padding,
162 request->task->comm,
163 request->task->pid);
164
165 if(n->left) __ikglp_dump_pq(n->left, depth+1);
166 if(n->right) __ikglp_dump_pq(n->right, depth+1);
167}
168
169static void __ikglp_dump_donors(struct binheap_node *n, int depth)
170{
171 ikglp_wait_state_t *donor_node;
172 char padding[81] = " ";
173
174 if(n == NULL) {
175 TRACE("+-> %p\n", NULL);
176 return;
177 }
178
179 donor_node = binheap_entry(n, ikglp_wait_state_t, node);
180
181 if(depth*2 <= 80)
182 padding[depth*2] = '\0';
183
184
185 TRACE("%s+-> %s/%d (donee: %s/%d)\n",
186 padding,
187 donor_node->task->comm,
188 donor_node->task->pid,
189 donor_node->donee_info->task->comm,
190 donor_node->donee_info->task->pid);
191
192 if(n->left) __ikglp_dump_donors(n->left, depth+1);
193 if(n->right) __ikglp_dump_donors(n->right, depth+1);
194}
195
196static void __ikglp_dump_fifoq(int i, struct fifo_queue* fq)
197{
198 TRACE(" FIFO %d: Owner = %s/%d, HP Waiter = %s/%d, # Waiters = %u\n",
199 i,
200 (fq->owner) ? fq->owner->comm : "null",
201 (fq->owner) ? fq->owner->pid : 0,
202 (fq->hp_waiter) ? fq->hp_waiter->comm : "null",
203 (fq->hp_waiter) ? fq->hp_waiter->pid : 0,
204 fq->count);
205 if (waitqueue_active(&fq->wait)) {
206 struct list_head *pos;
207 list_for_each(pos, &fq->wait.task_list) {
208 wait_queue_t *q = list_entry(pos, wait_queue_t, task_list);
209 struct task_struct *t = (struct task_struct*) q->private;
210 TRACE(" %s/%d (effective priority: %s/%d)\n",
211 t->comm, t->pid,
212 (tsk_rt(t)->inh_task) ? tsk_rt(t)->inh_task->comm : "null",
213 (tsk_rt(t)->inh_task) ? tsk_rt(t)->inh_task->pid : 0);
214 }
215 }
216}
217
218static void __ikglp_dump_state(struct ikglp_semaphore *sem)
219{
220 int i;
221 TRACE("IKGLP Lock %d\n", sem->litmus_lock.ident);
222 TRACE("# Replicas: %u Max FIFO Len: %u Max in FIFOs: %u Cur # in FIFOs: %u\n",
223 sem->nr_replicas, sem->max_fifo_len, sem->max_in_fifos, sem->nr_in_fifos);
224 TRACE("# requests in top-m: %u\n", sem->top_m_size);
225
226 for (i = 0; i < sem->nr_replicas; ++i)
227 __ikglp_dump_fifoq(i, &sem->fifo_queues[i]);
228
229 TRACE(" PQ:\n");
230 __ikglp_dump_pq(sem->priority_queue.root, 1);
231
232 TRACE(" Donors:\n");
233 __ikglp_dump_donors(sem->donors.root, 1);
234}
235
144 236
145#if 0 237#if 0
146static void print_global_list(struct binheap_node* n, int depth) 238static void print_global_list(struct binheap_node* n, int depth)
@@ -1133,9 +1225,8 @@ static void __drop_from_fq(struct ikglp_semaphore *sem,
1133 --(fq->count); 1225 --(fq->count);
1134 1226
1135#ifdef CONFIG_LITMUS_AFFINITY_LOCKING 1227#ifdef CONFIG_LITMUS_AFFINITY_LOCKING
1136 if(sem->aff_obs) { 1228 if(sem->aff_obs)
1137 sem->aff_obs->ops->notify_dequeue(sem->aff_obs, fq, t); 1229 sem->aff_obs->ops->notify_dequeue(sem->aff_obs, fq, t);
1138 }
1139#endif 1230#endif
1140 1231
1141 if(t == fq->hp_waiter) { 1232 if(t == fq->hp_waiter) {
@@ -1149,6 +1240,7 @@ static void __drop_from_fq(struct ikglp_semaphore *sem,
1149 // Update shortest. 1240 // Update shortest.
1150 if(fq->count < sem->shortest_fifo_queue->count) 1241 if(fq->count < sem->shortest_fifo_queue->count)
1151 sem->shortest_fifo_queue = fq; 1242 sem->shortest_fifo_queue = fq;
1243 --(sem->nr_in_fifos);
1152 1244
1153 wait->cur_q = IKGLP_INVL; 1245 wait->cur_q = IKGLP_INVL;
1154} 1246}
@@ -1201,68 +1293,125 @@ static void ikglp_migrate_fq_to_owner_heap_nodes(struct ikglp_semaphore *sem,
1201 ikglp_donee_heap_node_t, node); // re-add 1293 ikglp_donee_heap_node_t, node); // re-add
1202} 1294}
1203 1295
1204int ikglp_unlock(struct litmus_lock* l)
1205{
1206 struct ikglp_semaphore *sem = ikglp_from_lock(l);
1207 struct task_struct *t = current;
1208 struct task_struct *donee = NULL;
1209 struct task_struct *next = NULL;
1210 struct task_struct *new_on_fq = NULL;
1211 struct fifo_queue *fq_of_new_on_fq = NULL;
1212 1296
1213 ikglp_wait_state_t *other_donor_info = NULL;
1214 struct fifo_queue *to_steal = NULL;
1215 int need_steal_prio_reeval = 0;
1216 struct fifo_queue *fq;
1217 1297
1218#ifdef CONFIG_LITMUS_DGL_SUPPORT 1298void ikglp_grant_replica_to_next(struct ikglp_semaphore *sem, struct fifo_queue *fq)
1219 raw_spinlock_t *dgl_lock; 1299{
1220#endif 1300 wait_queue_t *wait;
1301 ikglp_wait_state_t *fq_wait;
1302 struct task_struct *next;
1221 1303
1222 unsigned long flags = 0, more_flags; 1304 BUG_ON(!waitqueue_active(&fq->wait));
1223 1305
1224 int err = 0; 1306 wait = list_entry(fq->wait.task_list.next, wait_queue_t, task_list);
1307 fq_wait = container_of(wait, ikglp_wait_state_t, fq_node);
1308 next = (struct task_struct*) wait->private;
1225 1309
1226 fq = ikglp_get_queue(sem, t); // returns NULL if 't' is not owner. 1310 __remove_wait_queue(&fq->wait, wait);
1227 1311
1228 if (!fq) { 1312 TRACE_CUR("queue %d: ASSIGNING %s/%d as owner - next\n",
1229 err = -EINVAL; 1313 ikglp_get_idx(sem, fq),
1230 goto out; 1314 next->comm, next->pid);
1231 }
1232 1315
1233 BUG_ON(l != tsk_rt(t)->outermost_lock); 1316 // migrate wait-state to fifo-memory.
1317 ikglp_migrate_fq_to_owner_heap_nodes(sem, fq, fq_wait);
1234 1318
1235#ifdef CONFIG_LITMUS_DGL_SUPPORT 1319 /* next becomes the resouce holder */
1236 dgl_lock = litmus->get_dgl_spinlock(t); 1320 fq->owner = next;
1321 tsk_rt(next)->blocked_lock = NULL;
1322
1323#ifdef CONFIG_LITMUS_AFFINITY_LOCKING
1324 if(sem->aff_obs) {
1325 sem->aff_obs->ops->notify_acquired(sem->aff_obs, fq, next);
1326 }
1237#endif 1327#endif
1238 lock_global_irqsave(dgl_lock, flags);
1239 raw_spin_lock_irqsave(&sem->real_lock, more_flags);
1240 lock_fine_irqsave(&sem->lock, flags);
1241 1328
1242 TRACE_TASK(t, "Freeing replica %d.\n", ikglp_get_idx(sem, fq)); 1329 /* determine new hp_waiter if necessary */
1330 if (next == fq->hp_waiter) {
1331 TRACE_TASK(next, "was highest-prio waiter\n");
1332 /* next has the highest priority --- it doesn't need to
1333 * inherit. However, we need to make sure that the
1334 * next-highest priority in the queue is reflected in
1335 * hp_waiter. */
1336 fq->hp_waiter = ikglp_find_hp_waiter(fq, NULL);
1337 TRACE_TASK(next, "New hp_waiter for fq %d is %s/%d!\n",
1338 ikglp_get_idx(sem, fq),
1339 (fq->hp_waiter) ? fq->hp_waiter->comm : "null",
1340 (fq->hp_waiter) ? fq->hp_waiter->pid : 0);
1243 1341
1244 // Remove 't' from the heaps, but data in nodes will still be good. 1342 fq->nest.hp_waiter_eff_prio = (fq->hp_waiter) ?
1245 ikglp_del_global_list(sem, t, &fq->global_heap_node); 1343 effective_priority(fq->hp_waiter) : NULL;
1246 binheap_delete(&fq->donee_heap_node.node, &sem->donees);
1247 1344
1248 fq->owner = NULL; // no longer owned!! 1345 if (fq->hp_waiter)
1249 --(fq->count); 1346 TRACE_TASK(fq->hp_waiter, "is new highest-prio waiter\n");
1250 if(fq->count < sem->shortest_fifo_queue->count) { 1347 else
1251 sem->shortest_fifo_queue = fq; 1348 TRACE("no further waiters\n");
1349
1350 raw_spin_lock(&tsk_rt(next)->hp_blocked_tasks_lock);
1351 binheap_add(&fq->nest.hp_binheap_node,
1352 &tsk_rt(next)->hp_blocked_tasks,
1353 struct nested_info,
1354 hp_binheap_node);
1355 raw_spin_unlock(&tsk_rt(next)->hp_blocked_tasks_lock);
1252 } 1356 }
1253 --(sem->nr_in_fifos); 1357 else {
1358 /* Well, if 'next' is not the highest-priority waiter,
1359 * then it (probably) ought to inherit the highest-priority
1360 * waiter's priority. */
1361 TRACE_TASK(next, "is not hp_waiter of replica %d. hp_waiter is %s/%d\n",
1362 ikglp_get_idx(sem, fq),
1363 (fq->hp_waiter) ? fq->hp_waiter->comm : "null",
1364 (fq->hp_waiter) ? fq->hp_waiter->pid : 0);
1254 1365
1255#ifdef CONFIG_LITMUS_AFFINITY_LOCKING 1366 raw_spin_lock(&tsk_rt(next)->hp_blocked_tasks_lock);
1256 if(sem->aff_obs) { 1367
1257 sem->aff_obs->ops->notify_dequeue(sem->aff_obs, fq, t); 1368 binheap_add(&fq->nest.hp_binheap_node,
1258 sem->aff_obs->ops->notify_freed(sem->aff_obs, fq, t); 1369 &tsk_rt(next)->hp_blocked_tasks,
1370 struct nested_info,
1371 hp_binheap_node);
1372
1373 /* It is possible that 'next' *should* be the hp_waiter, but isn't
1374 * because that update hasn't yet executed (update operation is
1375 * probably blocked on mutex->lock). So only inherit if the top of
1376 * 'next's top heap node is indeed the effective prio. of hp_waiter.
1377 * (We use fq->hp_waiter_eff_prio instead of effective_priority(hp_waiter)
1378 * since the effective priority of hp_waiter can change (and the
1379 * update has not made it to this lock).)
1380 */
1381 if(likely(top_priority(&tsk_rt(next)->hp_blocked_tasks) ==
1382 fq->nest.hp_waiter_eff_prio))
1383 {
1384 if(fq->nest.hp_waiter_eff_prio)
1385 litmus->increase_prio(next, fq->nest.hp_waiter_eff_prio);
1386 else
1387 WARN_ON(1);
1388 }
1389
1390 raw_spin_unlock(&tsk_rt(next)->hp_blocked_tasks_lock);
1259 } 1391 }
1260#endif
1261 1392
1262 // Move the next request into the FQ and update heaps as needed. 1393 // wake up the new resource holder!
1263 // We defer re-evaluation of priorities to later in the function. 1394 wake_up_for_lock(next);
1264 if(fq->donee_heap_node.donor_info) { // move my donor to FQ 1395}
1265 ikglp_wait_state_t *donor_info = fq->donee_heap_node.donor_info; 1396
1397
1398void ikglp_move_next_to_fq(struct ikglp_semaphore *sem,
1399 struct fifo_queue *fq,
1400 struct task_struct *t,
1401 ikglp_donee_heap_node_t *donee_node,
1402 int allow_stealing)
1403{
1404 struct task_struct *donee = NULL;
1405 struct task_struct *new_on_fq = NULL;
1406 struct fifo_queue *fq_of_new_on_fq = NULL;
1407
1408 ikglp_wait_state_t *other_donor_info = NULL;
1409 struct fifo_queue *to_steal = NULL;
1410 int need_steal_prio_reeval = 0;
1411 unsigned long flags = 0;
1412
1413 if (donee_node->donor_info) {
1414 ikglp_wait_state_t *donor_info = donee_node->donor_info;
1266 1415
1267 new_on_fq = donor_info->task; 1416 new_on_fq = donor_info->task;
1268 1417
@@ -1288,15 +1437,14 @@ int ikglp_unlock(struct litmus_lock* l)
1288 ikglp_get_idx(sem, fq_of_new_on_fq), 1437 ikglp_get_idx(sem, fq_of_new_on_fq),
1289 ikglp_get_idx(sem, fq)); 1438 ikglp_get_idx(sem, fq));
1290 1439
1291
1292 ikglp_move_donor_to_fq(sem, fq_of_new_on_fq, donor_info); 1440 ikglp_move_donor_to_fq(sem, fq_of_new_on_fq, donor_info);
1293 } 1441 }
1294 else if(!binheap_empty(&sem->donors)) { // No donor, so move any donor to FQ 1442 else if(!binheap_empty(&sem->donors)) { // No donor, so move any donor to FQ
1295 // Select a donor 1443 // Select a donor
1296#ifdef CONFIG_LITMUS_AFFINITY_LOCKING 1444#ifdef CONFIG_LITMUS_AFFINITY_LOCKING
1297 other_donor_info = (sem->aff_obs) ? 1445 other_donor_info = (sem->aff_obs) ?
1298 sem->aff_obs->ops->advise_donor_to_fq(sem->aff_obs, fq) : 1446 sem->aff_obs->ops->advise_donor_to_fq(sem->aff_obs, fq) :
1299 binheap_top_entry(&sem->donors, ikglp_wait_state_t, node); 1447 binheap_top_entry(&sem->donors, ikglp_wait_state_t, node);
1300#else 1448#else
1301 other_donor_info = binheap_top_entry(&sem->donors, ikglp_wait_state_t, node); 1449 other_donor_info = binheap_top_entry(&sem->donors, ikglp_wait_state_t, node);
1302#endif 1450#endif
@@ -1327,7 +1475,6 @@ int ikglp_unlock(struct litmus_lock* l)
1327 ikglp_get_idx(sem, fq_of_new_on_fq), 1475 ikglp_get_idx(sem, fq_of_new_on_fq),
1328 ikglp_get_idx(sem, fq)); 1476 ikglp_get_idx(sem, fq));
1329 1477
1330
1331 ikglp_move_donor_to_fq(sem, fq_of_new_on_fq, other_donor_info); 1478 ikglp_move_donor_to_fq(sem, fq_of_new_on_fq, other_donor_info);
1332 } 1479 }
1333 else if(!binheap_empty(&sem->priority_queue)) { // No donors, so move PQ 1480 else if(!binheap_empty(&sem->priority_queue)) { // No donors, so move PQ
@@ -1359,7 +1506,7 @@ int ikglp_unlock(struct litmus_lock* l)
1359 1506
1360 ikglp_move_pq_to_fq(sem, fq_of_new_on_fq, pq_wait); 1507 ikglp_move_pq_to_fq(sem, fq_of_new_on_fq, pq_wait);
1361 } 1508 }
1362 else if(fq->count == 0) { // No PQ and this queue is empty, so steal. 1509 else if(allow_stealing && fq->count == 0) { // No PQ and this queue is empty, so steal.
1363 ikglp_wait_state_t *fq_wait; 1510 ikglp_wait_state_t *fq_wait;
1364 1511
1365 TRACE_TASK(t, "Looking to steal a request for fq %d...\n", 1512 TRACE_TASK(t, "Looking to steal a request for fq %d...\n",
@@ -1367,8 +1514,8 @@ int ikglp_unlock(struct litmus_lock* l)
1367 1514
1368#ifdef CONFIG_LITMUS_AFFINITY_LOCKING 1515#ifdef CONFIG_LITMUS_AFFINITY_LOCKING
1369 fq_wait = (sem->aff_obs) ? 1516 fq_wait = (sem->aff_obs) ?
1370 sem->aff_obs->ops->advise_steal(sem->aff_obs, fq) : 1517 sem->aff_obs->ops->advise_steal(sem->aff_obs, fq) :
1371 ikglp_find_hp_waiter_to_steal(sem); 1518 ikglp_find_hp_waiter_to_steal(sem);
1372#else 1519#else
1373 fq_wait = ikglp_find_hp_waiter_to_steal(sem); 1520 fq_wait = ikglp_find_hp_waiter_to_steal(sem);
1374#endif 1521#endif
@@ -1395,27 +1542,6 @@ int ikglp_unlock(struct litmus_lock* l)
1395 else { // move no one 1542 else { // move no one
1396 } 1543 }
1397 1544
1398 // 't' must drop all priority and clean up data structures before hand-off.
1399
1400 // DROP ALL INHERITANCE. IKGLP MUST BE OUTER-MOST
1401 // This kills any inheritance from a donor.
1402 raw_spin_lock(&tsk_rt(t)->hp_blocked_tasks_lock);
1403 {
1404 int count = 0;
1405 TRACE_TASK(t, "discarding _all_ inheritance because IKGLP is outermost\n");
1406 while(!binheap_empty(&tsk_rt(t)->hp_blocked_tasks)) {
1407 binheap_delete_root(&tsk_rt(t)->hp_blocked_tasks,
1408 struct nested_info, hp_binheap_node);
1409 ++count;
1410 }
1411 if (count) {
1412 litmus->decrease_prio(t, NULL, 0);
1413 }
1414 WARN_ON(count > 2); // should not be greater than 2. only local fq inh and donation can be possible.
1415 }
1416 raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock);
1417
1418
1419 1545
1420 // Now patch up other priorities. 1546 // Now patch up other priorities.
1421 // 1547 //
@@ -1479,147 +1605,94 @@ int ikglp_unlock(struct litmus_lock* l)
1479 1605
1480 // check for new HP waiter. 1606 // check for new HP waiter.
1481 if(new_on_fq) { 1607 if(new_on_fq) {
1482 if(fq == fq_of_new_on_fq) { 1608 ikglp_refresh_owners_prio_increase(new_on_fq, fq_of_new_on_fq, sem, flags); // unlocks sem->lock. reacquire it.
1483 // fq->owner is null, so just update the hp_waiter without locking. 1609 lock_fine_irqsave(&sem->lock, flags); // there should be no contention!!!!
1484 if(new_on_fq == fq->hp_waiter) { 1610 }
1485 TRACE_TASK(t, "new_on_fq is already hp_waiter.\n",
1486 fq->hp_waiter->comm, fq->hp_waiter->pid);
1487 fq->nest.hp_waiter_eff_prio = effective_priority(fq->hp_waiter); // set this just to be sure...
1488 }
1489 else if(litmus->compare(new_on_fq, fq->hp_waiter)) {
1490 if(fq->hp_waiter)
1491 TRACE_TASK(t, "has higher prio than hp_waiter (%s/%d).\n",
1492 fq->hp_waiter->comm, fq->hp_waiter->pid);
1493 else
1494 TRACE_TASK(t, "has higher prio than hp_waiter (NIL).\n");
1495
1496 fq->hp_waiter = new_on_fq;
1497 fq->nest.hp_waiter_eff_prio = effective_priority(fq->hp_waiter);
1498 1611
1499 TRACE_TASK(t, "New hp_waiter for fq %d is %s/%d!\n", 1612 /* we moved a request to an empty FQ. wake it up */
1500 ikglp_get_idx(sem, fq), 1613 if(unlikely(fq_of_new_on_fq &&
1501 (fq->hp_waiter) ? fq->hp_waiter->comm : "null", 1614 fq_of_new_on_fq != fq &&
1502 (fq->hp_waiter) ? fq->hp_waiter->pid : 0); 1615 fq_of_new_on_fq->count == 1)) {
1503 } 1616 ikglp_grant_replica_to_next(sem, fq_of_new_on_fq);
1504 }
1505 else {
1506 ikglp_refresh_owners_prio_increase(new_on_fq, fq_of_new_on_fq, sem, flags); // unlocks sem->lock. reacquire it.
1507 lock_fine_irqsave(&sem->lock, flags); // there should be no contention!!!!
1508 }
1509 } 1617 }
1618}
1510 1619
1620int ikglp_unlock(struct litmus_lock* l)
1621{
1622 struct ikglp_semaphore *sem = ikglp_from_lock(l);
1623 struct task_struct *t = current;
1624 struct fifo_queue *fq;
1511 1625
1512wake_kludge: 1626#ifdef CONFIG_LITMUS_DGL_SUPPORT
1513 if(waitqueue_active(&fq->wait)) 1627 raw_spinlock_t *dgl_lock;
1514 { 1628#endif
1515 wait_queue_t *wait = list_entry(fq->wait.task_list.next, wait_queue_t, task_list);
1516 ikglp_wait_state_t *fq_wait = container_of(wait, ikglp_wait_state_t, fq_node);
1517 next = (struct task_struct*) wait->private;
1518 1629
1519 __remove_wait_queue(&fq->wait, wait); 1630 unsigned long flags = 0, more_flags;
1520 1631
1521 TRACE_CUR("queue %d: ASSIGNING %s/%d as owner - next\n", 1632 int err = 0;
1522 ikglp_get_idx(sem, fq),
1523 next->comm, next->pid);
1524 1633
1525 // migrate wait-state to fifo-memory. 1634 fq = ikglp_get_queue(sem, t); // returns NULL if 't' is not owner.
1526 ikglp_migrate_fq_to_owner_heap_nodes(sem, fq, fq_wait);
1527 1635
1528 /* next becomes the resouce holder */ 1636 if (!fq) {
1529 fq->owner = next; 1637 err = -EINVAL;
1530 tsk_rt(next)->blocked_lock = NULL; 1638 goto out;
1639 }
1531 1640
1532#ifdef CONFIG_LITMUS_AFFINITY_LOCKING 1641#ifdef CONFIG_LITMUS_DGL_SUPPORT
1533 if(sem->aff_obs) { 1642 dgl_lock = litmus->get_dgl_spinlock(t);
1534 sem->aff_obs->ops->notify_acquired(sem->aff_obs, fq, next);
1535 }
1536#endif 1643#endif
1644 lock_global_irqsave(dgl_lock, flags);
1645 raw_spin_lock_irqsave(&sem->real_lock, more_flags);
1646 lock_fine_irqsave(&sem->lock, flags);
1537 1647
1538 /* determine new hp_waiter if necessary */ 1648 TRACE_TASK(t, "Freeing replica %d.\n", ikglp_get_idx(sem, fq));
1539 if (next == fq->hp_waiter) {
1540 TRACE_TASK(next, "was highest-prio waiter\n");
1541 /* next has the highest priority --- it doesn't need to
1542 * inherit. However, we need to make sure that the
1543 * next-highest priority in the queue is reflected in
1544 * hp_waiter. */
1545 fq->hp_waiter = ikglp_find_hp_waiter(fq, NULL);
1546 TRACE_TASK(next, "New hp_waiter for fq %d is %s/%d!\n",
1547 ikglp_get_idx(sem, fq),
1548 (fq->hp_waiter) ? fq->hp_waiter->comm : "null",
1549 (fq->hp_waiter) ? fq->hp_waiter->pid : 0);
1550
1551 fq->nest.hp_waiter_eff_prio = (fq->hp_waiter) ?
1552 effective_priority(fq->hp_waiter) : NULL;
1553
1554 if (fq->hp_waiter)
1555 TRACE_TASK(fq->hp_waiter, "is new highest-prio waiter\n");
1556 else
1557 TRACE("no further waiters\n");
1558 1649
1559 raw_spin_lock(&tsk_rt(next)->hp_blocked_tasks_lock); 1650 // Remove 't' from the heaps, but data in nodes will still be good.
1651 ikglp_del_global_list(sem, t, &fq->global_heap_node);
1652 binheap_delete(&fq->donee_heap_node.node, &sem->donees);
1560 1653
1561// TRACE_TASK(next, "Heap Before:\n"); 1654 fq->owner = NULL; // no longer owned!!
1562// print_hp_waiters(tsk_rt(next)->hp_blocked_tasks.root, 0); 1655 --(fq->count);
1656 if(fq->count < sem->shortest_fifo_queue->count) {
1657 sem->shortest_fifo_queue = fq;
1658 }
1659 --(sem->nr_in_fifos);
1563 1660
1564 binheap_add(&fq->nest.hp_binheap_node, 1661#ifdef CONFIG_LITMUS_AFFINITY_LOCKING
1565 &tsk_rt(next)->hp_blocked_tasks, 1662 if(sem->aff_obs) {
1566 struct nested_info, 1663 sem->aff_obs->ops->notify_dequeue(sem->aff_obs, fq, t);
1567 hp_binheap_node); 1664 sem->aff_obs->ops->notify_freed(sem->aff_obs, fq, t);
1665 }
1666#endif
1568 1667
1569// TRACE_TASK(next, "Heap After:\n"); 1668 // 't' must drop all priority and clean up data structures before hand-off.
1570// print_hp_waiters(tsk_rt(next)->hp_blocked_tasks.root, 0);
1571 1669
1572 raw_spin_unlock(&tsk_rt(next)->hp_blocked_tasks_lock); 1670 // DROP ALL INHERITANCE. IKGLP MUST BE OUTER-MOST
1573 } 1671 // This kills any inheritance from a donor.
1574 else { 1672 raw_spin_lock(&tsk_rt(t)->hp_blocked_tasks_lock);
1575 /* Well, if 'next' is not the highest-priority waiter, 1673 {
1576 * then it (probably) ought to inherit the highest-priority 1674 int count = 0;
1577 * waiter's priority. */ 1675
1578 TRACE_TASK(next, "is not hp_waiter of replica %d. hp_waiter is %s/%d\n", 1676 TRACE_TASK(t, "discarding _all_ inheritance because IKGLP is outermost\n");
1579 ikglp_get_idx(sem, fq),
1580 (fq->hp_waiter) ? fq->hp_waiter->comm : "null",
1581 (fq->hp_waiter) ? fq->hp_waiter->pid : 0);
1582
1583 raw_spin_lock(&tsk_rt(next)->hp_blocked_tasks_lock);
1584
1585 binheap_add(&fq->nest.hp_binheap_node,
1586 &tsk_rt(next)->hp_blocked_tasks,
1587 struct nested_info,
1588 hp_binheap_node);
1589
1590 /* It is possible that 'next' *should* be the hp_waiter, but isn't
1591 * because that update hasn't yet executed (update operation is
1592 * probably blocked on mutex->lock). So only inherit if the top of
1593 * 'next's top heap node is indeed the effective prio. of hp_waiter.
1594 * (We use fq->hp_waiter_eff_prio instead of effective_priority(hp_waiter)
1595 * since the effective priority of hp_waiter can change (and the
1596 * update has not made it to this lock).)
1597 */
1598 if(likely(top_priority(&tsk_rt(next)->hp_blocked_tasks) ==
1599 fq->nest.hp_waiter_eff_prio))
1600 {
1601 if(fq->nest.hp_waiter_eff_prio)
1602 litmus->increase_prio(next, fq->nest.hp_waiter_eff_prio);
1603 else
1604 WARN_ON(1);
1605 }
1606 1677
1607 raw_spin_unlock(&tsk_rt(next)->hp_blocked_tasks_lock); 1678 while(!binheap_empty(&tsk_rt(t)->hp_blocked_tasks)) {
1679 binheap_delete_root(&tsk_rt(t)->hp_blocked_tasks,
1680 struct nested_info, hp_binheap_node);
1681 ++count;
1608 } 1682 }
1609 1683
1610 // wake up the new resource holder! 1684 if (count)
1611 wake_up_for_lock(next); 1685 litmus->decrease_prio(t, NULL, 0);
1686 WARN_ON(count > 2); // should not be greater than 2. only local fq inh and donation can be possible.
1612 } 1687 }
1613 if(fq_of_new_on_fq && fq_of_new_on_fq != fq && fq_of_new_on_fq->count == 1) { 1688 raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock);
1614 // The guy we promoted when to an empty FQ. (Why didn't stealing pick this up?)
1615 // Wake up the new guy too.
1616 1689
1617 BUG_ON(fq_of_new_on_fq->owner != NULL); 1690 // Move the next request into the FQ and update heaps as needed.
1691 // We defer re-evaluation of priorities to later in the function.
1692 ikglp_move_next_to_fq(sem, fq, t, &fq->donee_heap_node, 1);
1618 1693
1619 fq = fq_of_new_on_fq; 1694 if (waitqueue_active(&fq->wait))
1620 fq_of_new_on_fq = NULL; 1695 ikglp_grant_replica_to_next(sem, fq);
1621 goto wake_kludge;
1622 }
1623 1696
1624 unlock_fine_irqrestore(&sem->lock, flags); 1697 unlock_fine_irqrestore(&sem->lock, flags);
1625 raw_spin_unlock_irqrestore(&sem->real_lock, more_flags); 1698 raw_spin_unlock_irqrestore(&sem->real_lock, more_flags);
@@ -1632,6 +1705,108 @@ out:
1632} 1705}
1633 1706
1634 1707
1708
1709void ikglp_abort_request(struct ikglp_semaphore *sem, struct task_struct *t, unsigned long flags)
1710{
1711 ikglp_wait_state_t *wait = (ikglp_wait_state_t*)tsk_rt(t)->blocked_lock_data;
1712 ikglp_donee_heap_node_t *donee_info;
1713 struct task_struct *donee;
1714 struct fifo_queue *donee_fq;
1715 struct fifo_queue *fq = wait->fq;
1716
1717 BUG_ON(!wait);
1718
1719 /* drop the request from the proper IKGLP data structure and re-eval
1720 * priority relations */
1721 switch(wait->cur_q)
1722 {
1723 case IKGLP_PQ:
1724 // No one inherits from waiters in PQ. Just drop the request.
1725 __drop_from_pq(sem, wait);
1726 break;
1727
1728
1729 case IKGLP_FQ:
1730 ikglp_del_global_list(sem, t, &wait->global_heap_node);
1731 binheap_delete(&wait->donee_heap_node.node, &sem->donees);
1732
1733 /* remove the task from the FQ */
1734#ifdef CONFIG_LITMUS_AFFINITY_LOCKING
1735 if(sem->aff_obs)
1736 sem->aff_obs->ops->notify_dequeue(sem->aff_obs, fq, t);
1737#endif
1738 __drop_from_fq(sem, wait);
1739
1740 // Drop any and all inheritance t receives.
1741 raw_spin_lock(&tsk_rt(t)->hp_blocked_tasks_lock);
1742 {
1743 int count = 0;
1744 TRACE_TASK(t, "discarding _all_ inheritance because IKGLP is outermost\n");
1745 while(!binheap_empty(&tsk_rt(t)->hp_blocked_tasks)) {
1746 binheap_delete_root(&tsk_rt(t)->hp_blocked_tasks,
1747 struct nested_info, hp_binheap_node);
1748 ++count;
1749 }
1750 if (count)
1751 litmus->decrease_prio(t, NULL, 0);
1752 WARN_ON(count > 2); // should not be greater than 2. only local fq inh and donation can be possible.
1753 }
1754 raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock);
1755
1756 ikglp_refresh_owners_prio_decrease(wait->donee_heap_node.fq, sem, flags, 1); // unlocks sem->lock. reacquire it.
1757 lock_fine_irqsave(&sem->lock, flags); // there should be no contention!!!!
1758 ikglp_move_next_to_fq(sem, fq, t, &wait->donee_heap_node, 1);
1759 break;
1760
1761
1762 case IKGLP_DONOR:
1763 ikglp_del_global_list(sem, t, &wait->global_heap_node);
1764 __drop_from_donor(sem, wait);
1765
1766 /* update donee */
1767 donee_info = wait->donee_info;
1768 donee_info->donor_info = NULL; // clear the cross-link
1769 binheap_decrease(&donee_info->node, &sem->donees);
1770
1771 donee = donee_info->task;
1772 donee_fq = donee_info->fq;
1773 if (donee == donee_fq->owner) {
1774 TRACE_TASK(t, "Donee %s/%d is an owner of fq %d.\n",
1775 donee->comm, donee->pid,
1776 ikglp_get_idx(sem, donee_fq));
1777 ikglp_remove_donation_from_owner(&wait->prio_donation.hp_binheap_node, donee_fq, sem, flags); // unlocks sem->lock. reacquire it.
1778 lock_fine_irqsave(&sem->lock, flags); // there should be no contention!!!!
1779 }
1780 else {
1781 TRACE_TASK(t, "Donee %s/%d is blocked in of fq %d.\n",
1782 donee->comm, donee->pid,
1783 ikglp_get_idx(sem, donee_fq));
1784
1785 ikglp_remove_donation_from_fq_waiter(donee, &wait->prio_donation.hp_binheap_node);
1786 if(donee == donee_fq->hp_waiter) {
1787 TRACE_TASK(t, "Donee %s/%d was an hp_waiter of fq %d. Rechecking hp_waiter.\n",
1788 donee->comm, donee->pid,
1789 ikglp_get_idx(sem, donee_fq));
1790
1791 donee_fq->hp_waiter = ikglp_find_hp_waiter(donee_fq, NULL);
1792 TRACE_TASK(t, "New hp_waiter for fq %d is %s/%d!\n",
1793 ikglp_get_idx(sem, donee_fq),
1794 (donee_fq->hp_waiter) ? donee_fq->hp_waiter->comm : "null",
1795 (donee_fq->hp_waiter) ? donee_fq->hp_waiter->pid : 0);
1796
1797 ikglp_refresh_owners_prio_decrease(donee_fq, sem, flags, 1); // unlocks sem->lock. reacquire it.
1798 lock_fine_irqsave(&sem->lock, flags); // there should be no contention!!!!
1799 }
1800 }
1801
1802 break;
1803 default:
1804 BUG();
1805 }
1806
1807 BUG_ON(wait->cur_q != IKGLP_INVL); /* state should now be invalid */
1808}
1809
1635void ikglp_budget_exhausted(struct litmus_lock* l, struct task_struct* t) 1810void ikglp_budget_exhausted(struct litmus_lock* l, struct task_struct* t)
1636{ 1811{
1637 /* 1812 /*
@@ -1642,9 +1817,6 @@ void ikglp_budget_exhausted(struct litmus_lock* l, struct task_struct* t)
1642 * 1817 *
1643 * step 1: first check that we are actually blocked. 1818 * step 1: first check that we are actually blocked.
1644 * step 2: remove our request from ANY data structure: 1819 * step 2: remove our request from ANY data structure:
1645 * - donor heap
1646 * - pq
1647 * - fq
1648 * step 3: reissue the request 1820 * step 3: reissue the request
1649 */ 1821 */
1650 1822
@@ -1657,75 +1829,23 @@ void ikglp_budget_exhausted(struct litmus_lock* l, struct task_struct* t)
1657 1829
1658 blocked_lock = tsk_rt(t)->blocked_lock; 1830 blocked_lock = tsk_rt(t)->blocked_lock;
1659 if (blocked_lock == l) { 1831 if (blocked_lock == l) {
1660 ikglp_wait_state_t *wait = (ikglp_wait_state_t*)tsk_rt(t)->blocked_lock_data; 1832 ikglp_wait_state_t *wait;
1661 ikglp_donee_heap_node_t *donee_info;
1662 struct task_struct *donee;
1663 struct fifo_queue *donee_fq;
1664 BUG_ON(!wait);
1665 1833
1666 /* drop the request from the proper IKGLP data structure and re-eval 1834 TRACE_TASK(t, "IKGLP before reject:\n");
1667 * priority relations */ 1835 __ikglp_dump_state(sem);
1668 switch(wait->cur_q)
1669 {
1670 case IKGLP_PQ:
1671 // No one inherits from waiters in PQ. Just drop the request.
1672 __drop_from_pq(sem, wait);
1673 break;
1674 case IKGLP_FQ:
1675 __drop_from_fq(sem, wait);
1676 ikglp_refresh_owners_prio_decrease(wait->donee_heap_node.fq, sem, flags, 1); // unlocks sem->lock. reacquire it.
1677 lock_fine_irqsave(&sem->lock, flags); // there should be no contention!!!!
1678 break;
1679 case IKGLP_DONOR:
1680 __drop_from_donor(sem, wait);
1681 /* update donee */
1682 donee_info = wait->donee_info;
1683 donee_info->donor_info = NULL; // clear the cross-link
1684 binheap_decrease(&donee_info->node, &sem->donees);
1685
1686 donee = donee_info->task;
1687 donee_fq = donee_info->fq;
1688 if (donee == donee_fq->owner) {
1689 TRACE_TASK(t, "Donee %s/%d is an owner of fq %d.\n",
1690 donee->comm, donee->pid,
1691 ikglp_get_idx(sem, donee_fq));
1692 ikglp_remove_donation_from_owner(&wait->prio_donation.hp_binheap_node, donee_fq, sem, flags); // unlocks sem->lock. reacquire it.
1693 lock_fine_irqsave(&sem->lock, flags); // there should be no contention!!!!
1694 }
1695 else {
1696 TRACE_TASK(t, "Donee %s/%d is blocked in of fq %d.\n",
1697 donee->comm, donee->pid,
1698 ikglp_get_idx(sem, donee_fq));
1699 1836
1700 ikglp_remove_donation_from_fq_waiter(donee, &wait->prio_donation.hp_binheap_node); 1837 ikglp_abort_request(sem, t, flags);
1701 if(donee == donee_fq->hp_waiter) {
1702 TRACE_TASK(t, "Donee %s/%d was an hp_waiter of fq %d. Rechecking hp_waiter.\n",
1703 donee->comm, donee->pid,
1704 ikglp_get_idx(sem, donee_fq));
1705 1838
1706 donee_fq->hp_waiter = ikglp_find_hp_waiter(donee_fq, NULL); 1839 TRACE_TASK(t, "IKGLP after reject (before reissue):\n");
1707 TRACE_TASK(t, "New hp_waiter for fq %d is %s/%d!\n", 1840 __ikglp_dump_state(sem);
1708 ikglp_get_idx(sem, donee_fq),
1709 (donee_fq->hp_waiter) ? donee_fq->hp_waiter->comm : "null",
1710 (donee_fq->hp_waiter) ? donee_fq->hp_waiter->pid : 0);
1711
1712 ikglp_refresh_owners_prio_decrease(donee_fq, sem, flags, 1); // unlocks sem->lock. reacquire it.
1713 lock_fine_irqsave(&sem->lock, flags); // there should be no contention!!!!
1714 }
1715 }
1716
1717 break;
1718 default:
1719 BUG();
1720 }
1721
1722 BUG_ON(wait->cur_q != IKGLP_INVL); /* state should now be invalid */
1723 1841
1724 /* now re-issue the request */ 1842 /* now re-issue the request */
1725 1843
1726 TRACE_TASK(t, "Reissuing a request for replica from lock %d.\n", l->ident); 1844 TRACE_TASK(t, "Reissuing a request for replica from lock %d.\n", l->ident);
1727 1845
1846 wait = (ikglp_wait_state_t*)tsk_rt(t)->blocked_lock_data;
1728 if(sem->nr_in_fifos < sem->max_in_fifos) { 1847 if(sem->nr_in_fifos < sem->max_in_fifos) {
1848
1729 struct fifo_queue *fq; 1849 struct fifo_queue *fq;
1730 1850
1731 // enqueue somwhere 1851 // enqueue somwhere
@@ -1753,6 +1873,9 @@ void ikglp_budget_exhausted(struct litmus_lock* l, struct task_struct* t)
1753 ikglp_enqueue_on_donor(sem, wait, flags); // unlocks sem->lock 1873 ikglp_enqueue_on_donor(sem, wait, flags); // unlocks sem->lock
1754 } 1874 }
1755 1875
1876 TRACE_TASK(t, "IKGLP after reissue:\n");
1877 __ikglp_dump_state(sem);
1878
1756 raw_spin_unlock_irqrestore(&sem->real_lock, more_flags); 1879 raw_spin_unlock_irqrestore(&sem->real_lock, more_flags);
1757 } 1880 }
1758 else if (blocked_lock) { 1881 else if (blocked_lock) {
diff --git a/litmus/locking.c b/litmus/locking.c
index 88905d4cff26..43afa920d3f1 100644
--- a/litmus/locking.c
+++ b/litmus/locking.c
@@ -157,18 +157,17 @@ asmlinkage long sys_litmus_unlock(int lock_od)
157 entry = get_entry_for_od(lock_od); 157 entry = get_entry_for_od(lock_od);
158 if (entry && is_lock(entry)) { 158 if (entry && is_lock(entry)) {
159 l = get_lock(entry); 159 l = get_lock(entry);
160
161 if (l == tsk_rt(current)->outermost_lock) {
162 TRACE_CUR("Lock %d assumed to be outermost lock.\n", l->ident);
163 tsk_rt(current)->outermost_lock = NULL;
164 }
165
160 TRACE_CUR("Attempts to unlock %d\n", l->ident); 166 TRACE_CUR("Attempts to unlock %d\n", l->ident);
161 err = l->ops->unlock(l); 167 err = l->ops->unlock(l);
162 if (!err) { 168 if (!err) {
163 sched_trace_lock(current, l->ident, 0); 169 sched_trace_lock(current, l->ident, 0);
164
165 TRACE_CUR("Unlocked %d\n", l->ident); 170 TRACE_CUR("Unlocked %d\n", l->ident);
166
167 if (tsk_rt(current)->outermost_lock == l) {
168 TRACE_CUR("Lock %d assumed to be outermost lock.\n", l->ident);
169 tsk_rt(current)->outermost_lock = NULL;
170 WARN_ON(holds_locks(current));
171 }
172 } 171 }
173 } 172 }
174 173