aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGlenn Elliott <gelliott@cs.unc.edu>2013-02-08 20:11:35 -0500
committerGlenn Elliott <gelliott@cs.unc.edu>2013-02-08 20:11:35 -0500
commit71d7f3404ed6d11497ead7d6e41a49188e094f74 (patch)
tree3c25b061ec0df7afa4c354caecddafe142de7a05
parente55f02611ab64ec1163f3035257f0e2120f0f3ed (diff)
Make PRIOQ &DGLs work. hangs w/ ikglp though
-rw-r--r--include/litmus/fifo_lock.h1
-rw-r--r--include/litmus/locking.h6
-rw-r--r--include/litmus/prioq_lock.h1
-rw-r--r--litmus/fifo_lock.c56
-rw-r--r--litmus/locking.c181
-rw-r--r--litmus/prioq_lock.c493
-rw-r--r--litmus/sched_cedf.c13
7 files changed, 569 insertions, 182 deletions
diff --git a/include/litmus/fifo_lock.h b/include/litmus/fifo_lock.h
index 7f13863f11e6..0b2337b42155 100644
--- a/include/litmus/fifo_lock.h
+++ b/include/litmus/fifo_lock.h
@@ -30,6 +30,7 @@ static inline struct fifo_mutex* fifo_mutex_from_lock(struct litmus_lock* lock)
30 30
31#ifdef CONFIG_LITMUS_DGL_SUPPORT 31#ifdef CONFIG_LITMUS_DGL_SUPPORT
32int fifo_mutex_is_owner(struct litmus_lock *l, struct task_struct *t); 32int fifo_mutex_is_owner(struct litmus_lock *l, struct task_struct *t);
33struct task_struct* fifo_mutex_get_owner(struct litmus_lock *l);
33int fifo_mutex_dgl_lock(struct litmus_lock *l, dgl_wait_state_t* dgl_wait, wait_queue_t* wq_node); 34int fifo_mutex_dgl_lock(struct litmus_lock *l, dgl_wait_state_t* dgl_wait, wait_queue_t* wq_node);
34void fifo_mutex_enable_priority(struct litmus_lock *l, dgl_wait_state_t* dgl_wait); 35void fifo_mutex_enable_priority(struct litmus_lock *l, dgl_wait_state_t* dgl_wait);
35#endif 36#endif
diff --git a/include/litmus/locking.h b/include/litmus/locking.h
index 660bfc7f8174..cbc99ee54020 100644
--- a/include/litmus/locking.h
+++ b/include/litmus/locking.h
@@ -11,7 +11,7 @@ struct nested_info
11 struct litmus_lock *lock; 11 struct litmus_lock *lock;
12 struct task_struct *hp_waiter_eff_prio; 12 struct task_struct *hp_waiter_eff_prio;
13 struct task_struct **hp_waiter_ptr; 13 struct task_struct **hp_waiter_ptr;
14 struct task_struct **owner_ptr; 14// struct task_struct **owner_ptr;
15 struct binheap_node hp_binheap_node; 15 struct binheap_node hp_binheap_node;
16}; 16};
17 17
@@ -71,8 +71,7 @@ void select_next_lock(dgl_wait_state_t* dgl_wait /*, struct litmus_lock* prev_lo
71 71
72void init_dgl_waitqueue_entry(wait_queue_t *wq_node, dgl_wait_state_t* dgl_wait); 72void init_dgl_waitqueue_entry(wait_queue_t *wq_node, dgl_wait_state_t* dgl_wait);
73int dgl_wake_up(wait_queue_t *wq_node, unsigned mode, int sync, void *key); 73int dgl_wake_up(wait_queue_t *wq_node, unsigned mode, int sync, void *key);
74void __waitqueue_dgl_remove_first(wait_queue_head_t *wq, dgl_wait_state_t** dgl_wait, struct task_struct **task); 74struct task_struct* __waitqueue_dgl_remove_first(wait_queue_head_t *wq, dgl_wait_state_t** dgl_wait);
75
76 75
77int __attempt_atomic_dgl_acquire(struct litmus_lock *cur_lock, dgl_wait_state_t *dgl_wait); 76int __attempt_atomic_dgl_acquire(struct litmus_lock *cur_lock, dgl_wait_state_t *dgl_wait);
78#endif 77#endif
@@ -131,6 +130,7 @@ struct litmus_lock_ops {
131 raw_spinlock_t* (*get_dgl_spin_lock)(struct litmus_lock *l); 130 raw_spinlock_t* (*get_dgl_spin_lock)(struct litmus_lock *l);
132 int (*dgl_lock)(struct litmus_lock *l, dgl_wait_state_t* dgl_wait, wait_queue_t* wq_node); 131 int (*dgl_lock)(struct litmus_lock *l, dgl_wait_state_t* dgl_wait, wait_queue_t* wq_node);
133 int (*is_owner)(struct litmus_lock *l, struct task_struct *t); 132 int (*is_owner)(struct litmus_lock *l, struct task_struct *t);
133 struct task_struct* (*get_owner)(struct litmus_lock *l);
134 void (*enable_priority)(struct litmus_lock *l, dgl_wait_state_t* dgl_wait); 134 void (*enable_priority)(struct litmus_lock *l, dgl_wait_state_t* dgl_wait);
135 135
136 int (*dgl_can_quick_lock)(struct litmus_lock *l, struct task_struct *t); 136 int (*dgl_can_quick_lock)(struct litmus_lock *l, struct task_struct *t);
diff --git a/include/litmus/prioq_lock.h b/include/litmus/prioq_lock.h
index f3c11d241991..5c135ef0bdc6 100644
--- a/include/litmus/prioq_lock.h
+++ b/include/litmus/prioq_lock.h
@@ -32,6 +32,7 @@ static inline struct prioq_mutex* prioq_mutex_from_lock(struct litmus_lock* lock
32 32
33#ifdef CONFIG_LITMUS_DGL_SUPPORT 33#ifdef CONFIG_LITMUS_DGL_SUPPORT
34int prioq_mutex_is_owner(struct litmus_lock *l, struct task_struct *t); 34int prioq_mutex_is_owner(struct litmus_lock *l, struct task_struct *t);
35struct task_struct* prioq_mutex_get_owner(struct litmus_lock *l);
35int prioq_mutex_dgl_lock(struct litmus_lock *l, dgl_wait_state_t* dgl_wait, wait_queue_t* wq_node); 36int prioq_mutex_dgl_lock(struct litmus_lock *l, dgl_wait_state_t* dgl_wait, wait_queue_t* wq_node);
36void prioq_mutex_enable_priority(struct litmus_lock *l, dgl_wait_state_t* dgl_wait); 37void prioq_mutex_enable_priority(struct litmus_lock *l, dgl_wait_state_t* dgl_wait);
37void prioq_mutex_dgl_quick_lock(struct litmus_lock *l, struct litmus_lock *cur_lock, 38void prioq_mutex_dgl_quick_lock(struct litmus_lock *l, struct litmus_lock *cur_lock,
diff --git a/litmus/fifo_lock.c b/litmus/fifo_lock.c
index be49cd3a58d2..cc3e1ab5a965 100644
--- a/litmus/fifo_lock.c
+++ b/litmus/fifo_lock.c
@@ -7,8 +7,6 @@
7 7
8#include <litmus/litmus_proc.h> 8#include <litmus/litmus_proc.h>
9 9
10//#include <litmus/edf_common.h>
11
12#if defined(CONFIG_LITMUS_AFFINITY_LOCKING) && defined(CONFIG_LITMUS_NVIDIA) 10#if defined(CONFIG_LITMUS_AFFINITY_LOCKING) && defined(CONFIG_LITMUS_NVIDIA)
13#include <litmus/gpu_affinity.h> 11#include <litmus/gpu_affinity.h>
14#endif 12#endif
@@ -22,33 +20,16 @@ static struct task_struct* fifo_mutex_find_hp_waiter(struct fifo_mutex *mutex,
22 struct list_head *pos; 20 struct list_head *pos;
23 struct task_struct *queued = NULL, *found = NULL; 21 struct task_struct *queued = NULL, *found = NULL;
24 22
25#ifdef CONFIG_LITMUS_DGL_SUPPORT
26 dgl_wait_state_t *dgl_wait = NULL;
27#endif
28
29 list_for_each(pos, &mutex->wait.task_list) { 23 list_for_each(pos, &mutex->wait.task_list) {
30 q = list_entry(pos, wait_queue_t, task_list); 24 q = list_entry(pos, wait_queue_t, task_list);
31 25
32#ifdef CONFIG_LITMUS_DGL_SUPPORT 26 queued = get_queued_task(q);
33 if(q->func == dgl_wake_up) {
34 dgl_wait = (dgl_wait_state_t*) q->private;
35 if(tsk_rt(dgl_wait->task)->blocked_lock == &mutex->litmus_lock) {
36 queued = dgl_wait->task;
37 }
38 else {
39 queued = NULL; // skip it.
40 }
41 }
42 else {
43 queued = (struct task_struct*) q->private;
44 }
45#else
46 queued = (struct task_struct*) q->private;
47#endif
48 27
49 /* Compare task prios, find high prio task. */ 28 /* Compare task prios, find high prio task. */
50 //if (queued && queued != skip && edf_higher_prio(queued, found)) { 29 if (queued &&
51 if (queued && queued != skip && litmus->compare(queued, found)) { 30 (queued != skip) &&
31 (tsk_rt(queued)->blocked_lock == &mutex->litmus_lock) &&
32 litmus->compare(queued, found)) {
52 found = queued; 33 found = queued;
53 } 34 }
54 } 35 }
@@ -64,6 +45,12 @@ int fifo_mutex_is_owner(struct litmus_lock *l, struct task_struct *t)
64 return(mutex->owner == t); 45 return(mutex->owner == t);
65} 46}
66 47
48struct task_struct* fifo_mutex_get_owner(struct litmus_lock *l)
49{
50 struct fifo_mutex *mutex = fifo_mutex_from_lock(l);
51 return(mutex->owner);
52}
53
67// return 1 if resource was immediatly acquired. 54// return 1 if resource was immediatly acquired.
68// Assumes mutex->lock is held. 55// Assumes mutex->lock is held.
69// Must set task state to TASK_UNINTERRUPTIBLE if task blocks. 56// Must set task state to TASK_UNINTERRUPTIBLE if task blocks.
@@ -110,12 +97,16 @@ void fifo_mutex_enable_priority(struct litmus_lock *l,
110 struct task_struct *owner = mutex->owner; 97 struct task_struct *owner = mutex->owner;
111 unsigned long flags = 0; // these are unused under DGL coarse-grain locking 98 unsigned long flags = 0; // these are unused under DGL coarse-grain locking
112 99
100 /**************************************
101 * This code looks like it supports fine-grain locking, but it does not!
102 * TODO: Gaurantee that mutex->lock is held by the caller to support fine-grain locking.
103 **************************************/
104
113 BUG_ON(owner == t); 105 BUG_ON(owner == t);
114 106
115 tsk_rt(t)->blocked_lock = l; 107 tsk_rt(t)->blocked_lock = l;
116 mb(); 108 mb();
117 109
118 //if (edf_higher_prio(t, mutex->hp_waiter)) {
119 if (litmus->compare(t, mutex->hp_waiter)) { 110 if (litmus->compare(t, mutex->hp_waiter)) {
120 111
121 struct task_struct *old_max_eff_prio; 112 struct task_struct *old_max_eff_prio;
@@ -141,7 +132,6 @@ void fifo_mutex_enable_priority(struct litmus_lock *l,
141 TRACE_TASK(t, "is new hp_waiter.\n"); 132 TRACE_TASK(t, "is new hp_waiter.\n");
142 133
143 if ((effective_priority(owner) == old_max_eff_prio) || 134 if ((effective_priority(owner) == old_max_eff_prio) ||
144 //(__edf_higher_prio(new_max_eff_prio, BASE, owner, EFFECTIVE))){
145 (litmus->__compare(new_max_eff_prio, BASE, owner, EFFECTIVE))){ 135 (litmus->__compare(new_max_eff_prio, BASE, owner, EFFECTIVE))){
146 new_prio = new_max_eff_prio; 136 new_prio = new_max_eff_prio;
147 } 137 }
@@ -234,7 +224,6 @@ int fifo_mutex_lock(struct litmus_lock* l)
234 __add_wait_queue_tail_exclusive(&mutex->wait, &wait); 224 __add_wait_queue_tail_exclusive(&mutex->wait, &wait);
235 225
236 /* check if we need to activate priority inheritance */ 226 /* check if we need to activate priority inheritance */
237 //if (edf_higher_prio(t, mutex->hp_waiter)) {
238 if (litmus->compare(t, mutex->hp_waiter)) { 227 if (litmus->compare(t, mutex->hp_waiter)) {
239 228
240 struct task_struct *old_max_eff_prio; 229 struct task_struct *old_max_eff_prio;
@@ -260,7 +249,6 @@ int fifo_mutex_lock(struct litmus_lock* l)
260 TRACE_TASK(t, "is new hp_waiter.\n"); 249 TRACE_TASK(t, "is new hp_waiter.\n");
261 250
262 if ((effective_priority(owner) == old_max_eff_prio) || 251 if ((effective_priority(owner) == old_max_eff_prio) ||
263 //(__edf_higher_prio(new_max_eff_prio, BASE, owner, EFFECTIVE))){
264 (litmus->__compare(new_max_eff_prio, BASE, owner, EFFECTIVE))){ 252 (litmus->__compare(new_max_eff_prio, BASE, owner, EFFECTIVE))){
265 new_prio = new_max_eff_prio; 253 new_prio = new_max_eff_prio;
266 } 254 }
@@ -368,8 +356,6 @@ int fifo_mutex_unlock(struct litmus_lock* l)
368 (effective_priority(t) == old_max_eff_prio)) ) 356 (effective_priority(t) == old_max_eff_prio)) )
369 { 357 {
370 // old_max_eff_prio > new_max_eff_prio 358 // old_max_eff_prio > new_max_eff_prio
371
372 //if(__edf_higher_prio(new_max_eff_prio, BASE, t, EFFECTIVE)) {
373 if(litmus->__compare(new_max_eff_prio, BASE, t, EFFECTIVE)) { 359 if(litmus->__compare(new_max_eff_prio, BASE, t, EFFECTIVE)) {
374 TRACE_TASK(t, "new_max_eff_prio > task's eff_prio-- new_max_eff_prio: %s/%d task: %s/%d [%s/%d]\n", 360 TRACE_TASK(t, "new_max_eff_prio > task's eff_prio-- new_max_eff_prio: %s/%d task: %s/%d [%s/%d]\n",
375 new_max_eff_prio->comm, new_max_eff_prio->pid, 361 new_max_eff_prio->comm, new_max_eff_prio->pid,
@@ -395,10 +381,7 @@ int fifo_mutex_unlock(struct litmus_lock* l)
395 381
396 /* check if there are jobs waiting for this resource */ 382 /* check if there are jobs waiting for this resource */
397#ifdef CONFIG_LITMUS_DGL_SUPPORT 383#ifdef CONFIG_LITMUS_DGL_SUPPORT
398 __waitqueue_dgl_remove_first(&mutex->wait, &dgl_wait, &next); 384 next = __waitqueue_dgl_remove_first(&mutex->wait, &dgl_wait);
399 if(dgl_wait) {
400 next = dgl_wait->task;
401 }
402#else 385#else
403 next = __waitqueue_remove_first(&mutex->wait); 386 next = __waitqueue_remove_first(&mutex->wait);
404#endif 387#endif
@@ -434,7 +417,6 @@ int fifo_mutex_unlock(struct litmus_lock* l)
434#ifdef CONFIG_LITMUS_DGL_SUPPORT 417#ifdef CONFIG_LITMUS_DGL_SUPPORT
435 if(dgl_wait) { 418 if(dgl_wait) {
436 select_next_lock_if_primary(l, dgl_wait); 419 select_next_lock_if_primary(l, dgl_wait);
437 //wake_up_task = atomic_dec_and_test(&dgl_wait->nr_remaining);
438 --(dgl_wait->nr_remaining); 420 --(dgl_wait->nr_remaining);
439 wake_up_task = (dgl_wait->nr_remaining == 0); 421 wake_up_task = (dgl_wait->nr_remaining == 0);
440 } 422 }
@@ -476,7 +458,6 @@ int fifo_mutex_unlock(struct litmus_lock* l)
476 { 458 {
477 if(dgl_wait && tsk_rt(next)->blocked_lock) { 459 if(dgl_wait && tsk_rt(next)->blocked_lock) {
478 BUG_ON(wake_up_task); 460 BUG_ON(wake_up_task);
479 //if(__edf_higher_prio(l->nest.hp_waiter_eff_prio, BASE, next, EFFECTIVE)) {
480 if(litmus->__compare(l->nest.hp_waiter_eff_prio, BASE, next, EFFECTIVE)) { 461 if(litmus->__compare(l->nest.hp_waiter_eff_prio, BASE, next, EFFECTIVE)) {
481 litmus->nested_increase_prio(next, 462 litmus->nested_increase_prio(next,
482 l->nest.hp_waiter_eff_prio, &mutex->lock, flags); // unlocks lock && hp_blocked_tasks_lock. 463 l->nest.hp_waiter_eff_prio, &mutex->lock, flags); // unlocks lock && hp_blocked_tasks_lock.
@@ -557,7 +538,6 @@ void fifo_mutex_propagate_increase_inheritance(struct litmus_lock* l,
557 538
558 old_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); 539 old_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks);
559 540
560 //if((t != mutex->hp_waiter) && edf_higher_prio(t, mutex->hp_waiter)) {
561 if((t != mutex->hp_waiter) && litmus->compare(t, mutex->hp_waiter)) { 541 if((t != mutex->hp_waiter) && litmus->compare(t, mutex->hp_waiter)) {
562 TRACE_TASK(t, "is new highest-prio waiter by propagation.\n"); 542 TRACE_TASK(t, "is new highest-prio waiter by propagation.\n");
563 mutex->hp_waiter = t; 543 mutex->hp_waiter = t;
@@ -580,7 +560,6 @@ void fifo_mutex_propagate_increase_inheritance(struct litmus_lock* l,
580 if(new_max_eff_prio != old_max_eff_prio) { 560 if(new_max_eff_prio != old_max_eff_prio) {
581 // new_max_eff_prio > old_max_eff_prio holds. 561 // new_max_eff_prio > old_max_eff_prio holds.
582 if ((effective_priority(owner) == old_max_eff_prio) || 562 if ((effective_priority(owner) == old_max_eff_prio) ||
583 //(__edf_higher_prio(new_max_eff_prio, BASE, owner, EFFECTIVE))) {
584 (litmus->__compare(new_max_eff_prio, BASE, owner, EFFECTIVE))) { 563 (litmus->__compare(new_max_eff_prio, BASE, owner, EFFECTIVE))) {
585 TRACE_CUR("Propagating inheritance to holder of lock %d.\n", 564 TRACE_CUR("Propagating inheritance to holder of lock %d.\n",
586 l->ident); 565 l->ident);
@@ -675,7 +654,6 @@ void fifo_mutex_propagate_decrease_inheritance(struct litmus_lock* l,
675 TRACE_CUR("Propagating decreased inheritance to holder of lock %d.\n", 654 TRACE_CUR("Propagating decreased inheritance to holder of lock %d.\n",
676 l->ident); 655 l->ident);
677 656
678 //if(__edf_higher_prio(new_max_eff_prio, BASE, owner, BASE)) {
679 if(litmus->__compare(new_max_eff_prio, BASE, owner, BASE)) { 657 if(litmus->__compare(new_max_eff_prio, BASE, owner, BASE)) {
680 TRACE_CUR("%s/%d has greater base priority than base priority of owner (%s/%d) of lock %d.\n", 658 TRACE_CUR("%s/%d has greater base priority than base priority of owner (%s/%d) of lock %d.\n",
681 (new_max_eff_prio) ? new_max_eff_prio->comm : "null", 659 (new_max_eff_prio) ? new_max_eff_prio->comm : "null",
diff --git a/litmus/locking.c b/litmus/locking.c
index 4fe572c28aea..0b5e162c0c02 100644
--- a/litmus/locking.c
+++ b/litmus/locking.c
@@ -234,8 +234,13 @@ void print_hp_waiters(struct binheap_node* n, int depth)
234 234
235#ifdef CONFIG_LITMUS_DGL_SUPPORT 235#ifdef CONFIG_LITMUS_DGL_SUPPORT
236 236
237struct prioq_mutex;
238
237void select_next_lock(dgl_wait_state_t* dgl_wait /*, struct litmus_lock* prev_lock*/) 239void select_next_lock(dgl_wait_state_t* dgl_wait /*, struct litmus_lock* prev_lock*/)
238{ 240{
241 int start = dgl_wait->last_primary;
242 extern void __dump_prioq_lock_info(struct prioq_mutex *mutex);
243
239 /* 244 /*
240 We pick the next lock in reverse order. This causes inheritance propagation 245 We pick the next lock in reverse order. This causes inheritance propagation
241 from locks received earlier to flow in the same direction as regular nested 246 from locks received earlier to flow in the same direction as regular nested
@@ -244,30 +249,56 @@ void select_next_lock(dgl_wait_state_t* dgl_wait /*, struct litmus_lock* prev_lo
244 249
245 BUG_ON(tsk_rt(dgl_wait->task)->blocked_lock); 250 BUG_ON(tsk_rt(dgl_wait->task)->blocked_lock);
246 251
247 //WARN_ON(dgl_wait->locks[dgl_wait->last_primary] != prev_lock); 252 // note reverse order
253 for(dgl_wait->last_primary = (dgl_wait->last_primary != 0) ? dgl_wait->last_primary - 1 : dgl_wait->size-1;
254 dgl_wait->last_primary != start;
255 dgl_wait->last_primary = (dgl_wait->last_primary != 0) ? dgl_wait->last_primary - 1 : dgl_wait->size-1)
256 {
257
258 struct litmus_lock *l = dgl_wait->locks[dgl_wait->last_primary];
259
260 if(!l->ops->is_owner(l, dgl_wait->task) &&
261 l->ops->get_owner(l)) {
262
263 tsk_rt(dgl_wait->task)->blocked_lock =
264 dgl_wait->locks[dgl_wait->last_primary];
265 mb();
266
267 TRACE_TASK(dgl_wait->task, "New blocked lock is %d\n", l->ident);
268
269 l->ops->enable_priority(l, dgl_wait);
248 270
249 if (dgl_wait->last_primary == 0) { 271 return;
250 /* loop around */ 272 }
251 dgl_wait->last_primary = dgl_wait->size;
252 } 273 }
253 274
275 // There was no one to push on. This can happen if the blocked task is
276 // behind a task that is idling a prioq-mutex.
277
254 // note reverse order 278 // note reverse order
255 for(dgl_wait->last_primary = dgl_wait->last_primary - 1; 279 dgl_wait->last_primary = start;
256 dgl_wait->last_primary >= 0; 280 for(dgl_wait->last_primary = (dgl_wait->last_primary != 0) ? dgl_wait->last_primary - 1 : dgl_wait->size-1;
257 --(dgl_wait->last_primary)){ 281 dgl_wait->last_primary != start;
258 if(!dgl_wait->locks[dgl_wait->last_primary]->ops->is_owner( 282 dgl_wait->last_primary = (dgl_wait->last_primary != 0) ? dgl_wait->last_primary - 1 : dgl_wait->size-1)
259 dgl_wait->locks[dgl_wait->last_primary], dgl_wait->task)) { 283 {
284
285 struct litmus_lock *l = dgl_wait->locks[dgl_wait->last_primary];
286
287 if(!l->ops->is_owner(l, dgl_wait->task)) {
260 288
261 tsk_rt(dgl_wait->task)->blocked_lock = 289 tsk_rt(dgl_wait->task)->blocked_lock =
262 dgl_wait->locks[dgl_wait->last_primary]; 290 dgl_wait->locks[dgl_wait->last_primary];
263 mb(); 291 mb();
264 292
265 TRACE_CUR("New blocked lock is %d\n", 293 TRACE_TASK(dgl_wait->task, "New blocked lock is %d\n", l->ident);
266 dgl_wait->locks[dgl_wait->last_primary]->ident);
267 294
268 break; 295 l->ops->enable_priority(l, dgl_wait);
296
297 return;
269 } 298 }
270 } 299 }
300
301 BUG();
271} 302}
272 303
273int dgl_wake_up(wait_queue_t *wq_node, unsigned mode, int sync, void *key) 304int dgl_wake_up(wait_queue_t *wq_node, unsigned mode, int sync, void *key)
@@ -277,14 +308,13 @@ int dgl_wake_up(wait_queue_t *wq_node, unsigned mode, int sync, void *key)
277 return 1; 308 return 1;
278} 309}
279 310
280void __waitqueue_dgl_remove_first(wait_queue_head_t *wq, 311struct task_struct* __waitqueue_dgl_remove_first(wait_queue_head_t *wq,
281 dgl_wait_state_t** dgl_wait, 312 dgl_wait_state_t** dgl_wait)
282 struct task_struct **task)
283{ 313{
284 wait_queue_t *q; 314 wait_queue_t *q;
315 struct task_struct *task = NULL;
285 316
286 *dgl_wait = NULL; 317 *dgl_wait = NULL;
287 *task = NULL;
288 318
289 if (waitqueue_active(wq)) { 319 if (waitqueue_active(wq)) {
290 q = list_entry(wq->task_list.next, 320 q = list_entry(wq->task_list.next,
@@ -292,13 +322,15 @@ void __waitqueue_dgl_remove_first(wait_queue_head_t *wq,
292 322
293 if(q->func == dgl_wake_up) { 323 if(q->func == dgl_wake_up) {
294 *dgl_wait = (dgl_wait_state_t*) q->private; 324 *dgl_wait = (dgl_wait_state_t*) q->private;
325 task = (*dgl_wait)->task;
295 } 326 }
296 else { 327 else {
297 *task = (struct task_struct*) q->private; 328 task = (struct task_struct*) q->private;
298 } 329 }
299 330
300 __remove_wait_queue(wq, q); 331 __remove_wait_queue(wq, q);
301 } 332 }
333 return task;
302} 334}
303 335
304void init_dgl_waitqueue_entry(wait_queue_t *wq_node, dgl_wait_state_t* dgl_wait) 336void init_dgl_waitqueue_entry(wait_queue_t *wq_node, dgl_wait_state_t* dgl_wait)
@@ -354,7 +386,7 @@ int __attempt_atomic_dgl_acquire(struct litmus_lock *cur_lock, dgl_wait_state_t
354 386
355 l->ops->dgl_quick_lock(l, cur_lock, dgl_wait->task, &dgl_wait->wq_nodes[i]); 387 l->ops->dgl_quick_lock(l, cur_lock, dgl_wait->task, &dgl_wait->wq_nodes[i]);
356 388
357 BUG_ON(dgl_wait->task != *(l->nest.owner_ptr)); 389 BUG_ON(!(l->ops->is_owner(l, dgl_wait->task)));
358 } 390 }
359 391
360 return 0; /* success */ 392 return 0; /* success */
@@ -456,15 +488,108 @@ all_acquired:
456 return 0; 488 return 0;
457} 489}
458 490
491
492
493static long do_litmus_dgl_atomic_lock(dgl_wait_state_t *dgl_wait)
494{
495 int i;
496 unsigned long irqflags; //, dummyflags;
497 raw_spinlock_t *dgl_lock;
498 struct litmus_lock *l;
499
500#ifdef CONFIG_SCHED_DEBUG_TRACE
501 char dglstr[CONFIG_LITMUS_MAX_DGL_SIZE*5];
502 snprintf_dgl(dglstr, sizeof(dglstr), dgl_wait->locks, dgl_wait->size);
503 TRACE_CUR("Atomic locking DGL with size %d: %s\n", dgl_wait->size, dglstr);
504#endif
505
506 dgl_lock = litmus->get_dgl_spinlock(dgl_wait->task);
507
508 BUG_ON(dgl_wait->task != current);
509
510 raw_spin_lock_irqsave(dgl_lock, irqflags);
511
512
513 dgl_wait->nr_remaining = dgl_wait->size;
514
515 for(i = 0; i < dgl_wait->size; ++i) {
516 struct litmus_lock *l = dgl_wait->locks[i];
517 l->ops->dgl_lock(l, dgl_wait, &dgl_wait->wq_nodes[i]); // this should be a forced enqueue if atomic DGLs are needed.
518 }
519
520 if(__attempt_atomic_dgl_acquire(NULL, dgl_wait)) {
521 /* Failed to acquire all locks at once.
522 * Pick a lock to push on and suspend. */
523 TRACE_CUR("Could not atomically acquire all locks.\n");
524
525
526#if defined(CONFIG_LITMUS_AFFINITY_LOCKING) && defined(CONFIG_LITMUS_NVIDIA)
527 // KLUDGE: don't count this suspension as time in the critical gpu
528 // critical section
529 if(tsk_rt(dgl_wait->task)->held_gpus) {
530 tsk_rt(dgl_wait->task)->suspend_gpu_tracker_on_block = 1;
531 }
532#endif
533 // we are not the owner of any lock, so push on the last one in the DGL
534 // by default.
535
536 l = dgl_wait->locks[dgl_wait->size - 1];
537
538 TRACE_CUR("Activating priority inheritance on lock %d\n",
539 l->ident);
540
541 TS_DGL_LOCK_SUSPEND;
542
543 l->ops->enable_priority(l, dgl_wait);
544 dgl_wait->last_primary = dgl_wait->size - 1;
545
546 TRACE_CUR("Suspending for lock %d\n", l->ident);
547
548 raw_spin_unlock_irqrestore(dgl_lock, irqflags); // free dgl_lock before suspending
549
550 suspend_for_lock(); // suspend!!!
551
552 TS_DGL_LOCK_RESUME;
553
554 TRACE_CUR("Woken up from DGL suspension.\n");
555
556 goto all_acquired; // we should hold all locks when we wake up.
557 }
558 raw_spin_unlock_irqrestore(dgl_lock, irqflags);
559
560all_acquired:
561
562 dgl_wait->nr_remaining = 0;
563
564 // SANITY CHECK FOR TESTING
565 for(i = 0; i < dgl_wait->size; ++i) {
566 struct litmus_lock *l = dgl_wait->locks[i];
567 BUG_ON(!l->ops->is_owner(l, dgl_wait->task));
568 }
569
570 TRACE_CUR("Acquired entire DGL\n");
571
572 return 0;
573}
574
575
576
577
459static int supports_dgl(struct litmus_lock *l) 578static int supports_dgl(struct litmus_lock *l)
460{ 579{
461 struct litmus_lock_ops* ops = l->ops; 580 struct litmus_lock_ops* ops = l->ops;
462
463 return (ops->dgl_lock && 581 return (ops->dgl_lock &&
464 ops->is_owner && 582 ops->is_owner &&
583 ops->get_owner &&
465 ops->enable_priority); 584 ops->enable_priority);
466} 585}
467 586
587static int needs_atomic_dgl(struct litmus_lock *l)
588{
589 struct litmus_lock_ops* ops = l->ops;
590 return (ops->dgl_quick_lock != NULL);
591}
592
468asmlinkage long sys_litmus_dgl_lock(void* __user usr_dgl_ods, int dgl_size) 593asmlinkage long sys_litmus_dgl_lock(void* __user usr_dgl_ods, int dgl_size)
469{ 594{
470 struct task_struct *t = current; 595 struct task_struct *t = current;
@@ -472,6 +597,8 @@ asmlinkage long sys_litmus_dgl_lock(void* __user usr_dgl_ods, int dgl_size)
472 int dgl_ods[MAX_DGL_SIZE]; 597 int dgl_ods[MAX_DGL_SIZE];
473 int i; 598 int i;
474 599
600 int num_need_atomic = 0;
601
475 dgl_wait_state_t dgl_wait_state; // lives on the stack until all resources in DGL are held. 602 dgl_wait_state_t dgl_wait_state; // lives on the stack until all resources in DGL are held.
476 603
477 if(dgl_size > MAX_DGL_SIZE || dgl_size < 1) 604 if(dgl_size > MAX_DGL_SIZE || dgl_size < 1)
@@ -503,6 +630,10 @@ asmlinkage long sys_litmus_dgl_lock(void* __user usr_dgl_ods, int dgl_size)
503 dgl_wait_state.locks[i]->ident); 630 dgl_wait_state.locks[i]->ident);
504 goto out; 631 goto out;
505 } 632 }
633
634 if (needs_atomic_dgl(dgl_wait_state.locks[i])) {
635 ++num_need_atomic;
636 }
506 } 637 }
507 else { 638 else {
508 TRACE_CUR("Invalid lock identifier\n"); 639 TRACE_CUR("Invalid lock identifier\n");
@@ -510,11 +641,19 @@ asmlinkage long sys_litmus_dgl_lock(void* __user usr_dgl_ods, int dgl_size)
510 } 641 }
511 } 642 }
512 643
644 if (num_need_atomic && num_need_atomic != dgl_size) {
645 TRACE_CUR("All locks in DGL must support atomic acquire if any one does.\n");
646 goto out;
647 }
648
513 dgl_wait_state.task = t; 649 dgl_wait_state.task = t;
514 dgl_wait_state.size = dgl_size; 650 dgl_wait_state.size = dgl_size;
515 651
516 TS_DGL_LOCK_START; 652 TS_DGL_LOCK_START;
517 err = do_litmus_dgl_lock(&dgl_wait_state); 653 if (!num_need_atomic)
654 err = do_litmus_dgl_lock(&dgl_wait_state);
655 else
656 err = do_litmus_dgl_atomic_lock(&dgl_wait_state);
518 657
519 /* Note: task my have been suspended or preempted in between! Take 658 /* Note: task my have been suspended or preempted in between! Take
520 * this into account when computing overheads. */ 659 * this into account when computing overheads. */
diff --git a/litmus/prioq_lock.c b/litmus/prioq_lock.c
index 0091e4c1901e..ff6419ba1a13 100644
--- a/litmus/prioq_lock.c
+++ b/litmus/prioq_lock.c
@@ -12,10 +12,10 @@
12#include <litmus/gpu_affinity.h> 12#include <litmus/gpu_affinity.h>
13#endif 13#endif
14 14
15static void __attribute__((unused)) 15void __attribute__((unused))
16__dump_lock_info(struct prioq_mutex *mutex) 16__dump_prioq_lock_info(struct prioq_mutex *mutex)
17{ 17{
18#ifdef CONFIG_SCHED_DEBUG_TRACE 18#ifdef CONFIG_SCHED_DEBUG_TRACE
19 TRACE_CUR("%s (mutex: %p):\n", mutex->litmus_lock.name, mutex); 19 TRACE_CUR("%s (mutex: %p):\n", mutex->litmus_lock.name, mutex);
20 TRACE_CUR("owner: %s/%d (inh: %s/%d)\n", 20 TRACE_CUR("owner: %s/%d (inh: %s/%d)\n",
21 (mutex->owner) ? 21 (mutex->owner) ?
@@ -48,12 +48,12 @@ __dump_lock_info(struct prioq_mutex *mutex)
48 int enabled = 1; 48 int enabled = 1;
49#endif 49#endif
50 q = list_entry(pos, wait_queue_t, task_list); 50 q = list_entry(pos, wait_queue_t, task_list);
51 51
52#ifdef CONFIG_LITMUS_DGL_SUPPORT 52#ifdef CONFIG_LITMUS_DGL_SUPPORT
53 if(q->func == dgl_wake_up) { 53 if(q->func == dgl_wake_up) {
54 dgl_wait = (dgl_wait_state_t*) q->private; 54 dgl_wait = (dgl_wait_state_t*) q->private;
55 blocked_task = dgl_wait->task; 55 blocked_task = dgl_wait->task;
56 56
57 if(tsk_rt(blocked_task)->blocked_lock != &mutex->litmus_lock) 57 if(tsk_rt(blocked_task)->blocked_lock != &mutex->litmus_lock)
58 enabled = 0; 58 enabled = 0;
59 } 59 }
@@ -92,14 +92,14 @@ static void __add_wait_queue_sorted(wait_queue_head_t *q, wait_queue_t *add_node
92 struct task_struct *queued_task; 92 struct task_struct *queued_task;
93 struct task_struct *add_task; 93 struct task_struct *add_task;
94 struct list_head *pos; 94 struct list_head *pos;
95 95
96 if (list_empty(pq)) { 96 if (list_empty(pq)) {
97 list_add_tail(&add_node->task_list, pq); 97 list_add_tail(&add_node->task_list, pq);
98 return; 98 return;
99 } 99 }
100 100
101 add_task = get_queued_task(add_node); 101 add_task = get_queued_task(add_node);
102 102
103 /* less priority than tail? if so, go to tail */ 103 /* less priority than tail? if so, go to tail */
104 q_node = list_entry(pq->prev, wait_queue_t, task_list); 104 q_node = list_entry(pq->prev, wait_queue_t, task_list);
105 queued_task = get_queued_task(q_node); 105 queued_task = get_queued_task(q_node);
@@ -107,7 +107,7 @@ static void __add_wait_queue_sorted(wait_queue_head_t *q, wait_queue_t *add_node
107 list_add_tail(&add_node->task_list, pq); 107 list_add_tail(&add_node->task_list, pq);
108 return; 108 return;
109 } 109 }
110 110
111 /* belongs at head or between nodes */ 111 /* belongs at head or between nodes */
112 list_for_each(pos, pq) { 112 list_for_each(pos, pq) {
113 q_node = list_entry(pos, wait_queue_t, task_list); 113 q_node = list_entry(pos, wait_queue_t, task_list);
@@ -117,8 +117,9 @@ static void __add_wait_queue_sorted(wait_queue_head_t *q, wait_queue_t *add_node
117 return; 117 return;
118 } 118 }
119 } 119 }
120 120
121 BUG(); 121 WARN_ON(1);
122 list_add_tail(&add_node->task_list, pq);
122} 123}
123 124
124static inline void __add_wait_queue_sorted_exclusive(wait_queue_head_t *q, wait_queue_t *wait) 125static inline void __add_wait_queue_sorted_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
@@ -132,7 +133,7 @@ static void __prioq_increase_pos(struct prioq_mutex *mutex, struct task_struct *
132 wait_queue_t *q; 133 wait_queue_t *q;
133 struct list_head *pos; 134 struct list_head *pos;
134 struct task_struct *queued; 135 struct task_struct *queued;
135 136
136 /* TODO: Make this efficient instead of remove/add */ 137 /* TODO: Make this efficient instead of remove/add */
137 list_for_each(pos, &mutex->wait.task_list) { 138 list_for_each(pos, &mutex->wait.task_list) {
138 q = list_entry(pos, wait_queue_t, task_list); 139 q = list_entry(pos, wait_queue_t, task_list);
@@ -143,16 +144,17 @@ static void __prioq_increase_pos(struct prioq_mutex *mutex, struct task_struct *
143 return; 144 return;
144 } 145 }
145 } 146 }
146 147
147 BUG(); 148 BUG();
148} 149}
149 150
151
150static void __prioq_decrease_pos(struct prioq_mutex *mutex, struct task_struct *t) 152static void __prioq_decrease_pos(struct prioq_mutex *mutex, struct task_struct *t)
151{ 153{
152 wait_queue_t *q; 154 wait_queue_t *q;
153 struct list_head *pos; 155 struct list_head *pos;
154 struct task_struct *queued; 156 struct task_struct *queued;
155 157
156 /* TODO: Make this efficient instead of remove/add */ 158 /* TODO: Make this efficient instead of remove/add */
157 list_for_each(pos, &mutex->wait.task_list) { 159 list_for_each(pos, &mutex->wait.task_list) {
158 q = list_entry(pos, wait_queue_t, task_list); 160 q = list_entry(pos, wait_queue_t, task_list);
@@ -163,10 +165,143 @@ static void __prioq_decrease_pos(struct prioq_mutex *mutex, struct task_struct *
163 return; 165 return;
164 } 166 }
165 } 167 }
166 168
167 BUG(); 169 BUG();
168} 170}
169 171
172#ifdef CONFIG_LITMUS_DGL_SUPPORT
173static int __prioq_dgl_increase_pos(struct prioq_mutex *mutex, struct task_struct *t)
174{
175// TODO:
176// (1) Increase position for 't' in all of it's DGLs.
177// (2) Check to see if 't' can take the DGLs atomically
178// (3) If it can take the DGLs, do so.
179// (4) Cleanup?
180// (5) Wake up 't'
181
182
183 wait_queue_t *q;
184 struct list_head *pos;
185 struct task_struct *queued;
186 int i;
187 int ret = 0;
188
189 list_for_each(pos, &mutex->wait.task_list) {
190 q = list_entry(pos, wait_queue_t, task_list);
191 if(q->func == dgl_wake_up) {
192 // we're looking at a dgl request
193 dgl_wait_state_t *dgl_wait = (dgl_wait_state_t*) q->private;
194 queued = dgl_wait->task;
195
196 if (queued == t) // is it the one we're looking for?
197 {
198 // reposition on the other mutexes
199 for(i = 0; i < dgl_wait->size; ++i) {
200 struct prioq_mutex *pm = (struct prioq_mutex *) dgl_wait->locks[i];
201 if (pm != mutex)
202 __prioq_increase_pos(pm, t);
203 }
204 // reposition on this mutex
205 __remove_wait_queue(&mutex->wait, q);
206 __add_wait_queue_sorted(&mutex->wait, q);
207
208
209 if(__attempt_atomic_dgl_acquire(NULL, dgl_wait)) {
210 /* it can't take the lock. do nothing. */
211 }
212 else {
213 TRACE_CUR("%s/%d can take its entire DGL atomically via inheritance!\n",
214 dgl_wait->task->comm, dgl_wait->task->pid);
215
216 /* we took the lock! we've already been removed from mutex->wait.task_list */
217
218 TRACE_TASK(t, "waking up since it is no longer blocked.\n");
219
220 tsk_rt(t)->blocked_lock = NULL;
221 mb();
222
223#if defined(CONFIG_LITMUS_AFFINITY_LOCKING) && defined(CONFIG_LITMUS_NVIDIA)
224 // re-enable tracking
225 if(tsk_rt(t)->held_gpus) {
226 tsk_rt(t)->suspend_gpu_tracker_on_block = 0;
227 }
228#endif
229 wake_up_process(t);
230 ret = 1;
231 }
232 break;
233 }
234 }
235 else {
236 // not dgl request.
237 queued = (struct task_struct*) q->private;
238 if (queued == t) { // is this the one we're looking for?
239 // if so, reposition it.
240 __remove_wait_queue(&mutex->wait, q);
241 __add_wait_queue_sorted(&mutex->wait, q);
242 break;
243 }
244 }
245 }
246
247 return ret;
248}
249
250static void __prioq_dgl_decrease_pos(struct prioq_mutex *mutex, struct task_struct *t)
251{
252 // TODO:
253 // (1) Increase position for 't' in all of it's DGLs.
254 // (2) Check to see if 't' can take the DGLs atomically
255 // (3) If it can take the DGLs, do so.
256 // (4) Cleanup?
257 // (5) Wake up 't'
258
259
260 wait_queue_t *q;
261 struct list_head *pos;
262 struct task_struct *queued;
263 int i;
264
265 list_for_each(pos, &mutex->wait.task_list) {
266 q = list_entry(pos, wait_queue_t, task_list);
267 if(q->func == dgl_wake_up) {
268 // we're looking at a dgl request
269 dgl_wait_state_t *dgl_wait = (dgl_wait_state_t*) q->private;
270 queued = dgl_wait->task;
271
272 if (queued == t) // is it the one we're looking for?
273 {
274 // reposition on the other mutexes
275 for(i = 0; i < dgl_wait->size; ++i) {
276 struct prioq_mutex *pm = (struct prioq_mutex *)dgl_wait->locks[i];
277 if (pm != mutex)
278 __prioq_decrease_pos(pm, t);
279 }
280 // reposition on this mutex
281 __remove_wait_queue(&mutex->wait, q);
282 __add_wait_queue_sorted(&mutex->wait, q);
283 return;
284 }
285 }
286 else {
287 // not dgl request.
288 queued = (struct task_struct*) q->private;
289 if (queued == t) { // is this the one we're looking for?
290 // if so, reposition it.
291 __remove_wait_queue(&mutex->wait, q);
292 __add_wait_queue_sorted(&mutex->wait, q);
293 return;
294 }
295 }
296 }
297
298 BUG();
299}
300#endif
301
302
303
304
170 305
171/* caller is responsible for locking */ 306/* caller is responsible for locking */
172static struct task_struct* __prioq_mutex_find_hp_waiter(struct prioq_mutex *mutex, 307static struct task_struct* __prioq_mutex_find_hp_waiter(struct prioq_mutex *mutex,
@@ -182,7 +317,10 @@ static struct task_struct* __prioq_mutex_find_hp_waiter(struct prioq_mutex *mute
182 queued = get_queued_task(q); 317 queued = get_queued_task(q);
183 318
184 /* Compare task prios, find high prio task. */ 319 /* Compare task prios, find high prio task. */
185 if (queued && queued != skip && litmus->compare(queued, found)) { 320 if (queued &&
321 (queued != skip) &&
322 (tsk_rt(queued)->blocked_lock == &mutex->litmus_lock) &&
323 litmus->compare(queued, found)) {
186 found = queued; 324 found = queued;
187 } 325 }
188 } 326 }
@@ -198,6 +336,12 @@ int prioq_mutex_is_owner(struct litmus_lock *l, struct task_struct *t)
198 return(mutex->owner == t); 336 return(mutex->owner == t);
199} 337}
200 338
339struct task_struct* prioq_mutex_get_owner(struct litmus_lock *l)
340{
341 struct prioq_mutex *mutex = prioq_mutex_from_lock(l);
342 return(mutex->owner);
343}
344
201// return 1 if resource was immediatly acquired. 345// return 1 if resource was immediatly acquired.
202// Assumes mutex->lock is held. 346// Assumes mutex->lock is held.
203// Must set task state to TASK_UNINTERRUPTIBLE if task blocks. 347// Must set task state to TASK_UNINTERRUPTIBLE if task blocks.
@@ -211,31 +355,16 @@ int prioq_mutex_dgl_lock(struct litmus_lock *l, dgl_wait_state_t* dgl_wait,
211 355
212 BUG_ON(t != current); 356 BUG_ON(t != current);
213 357
214 if (mutex->owner) {
215 TRACE_TASK(t, "Enqueuing on lock %d (held by %s/%d).\n",
216 l->ident, mutex->owner->comm, mutex->owner->pid);
217
218 init_dgl_waitqueue_entry(wq_node, dgl_wait);
219 358
220 set_task_state(t, TASK_UNINTERRUPTIBLE); 359 init_dgl_waitqueue_entry(wq_node, dgl_wait);
221 __add_wait_queue_sorted_exclusive(&mutex->wait, wq_node);
222 } else {
223 TRACE_TASK(t, "Acquired lock %d with no blocking.\n", l->ident);
224 360
225 /* it's ours now */ 361 set_task_state(t, TASK_UNINTERRUPTIBLE);
226 mutex->owner = t; 362 __add_wait_queue_sorted_exclusive(&mutex->wait, wq_node);
227
228 raw_spin_lock(&tsk_rt(t)->hp_blocked_tasks_lock);
229 binheap_add(&l->nest.hp_binheap_node, &tsk_rt(t)->hp_blocked_tasks,
230 struct nested_info, hp_binheap_node);
231 raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock);
232
233 acquired_immediatly = 1;
234 }
235 363
236 return acquired_immediatly; 364 return acquired_immediatly;
237} 365}
238 366
367
239void prioq_mutex_enable_priority(struct litmus_lock *l, 368void prioq_mutex_enable_priority(struct litmus_lock *l,
240 dgl_wait_state_t* dgl_wait) 369 dgl_wait_state_t* dgl_wait)
241{ 370{
@@ -244,11 +373,23 @@ void prioq_mutex_enable_priority(struct litmus_lock *l,
244 struct task_struct *owner = mutex->owner; 373 struct task_struct *owner = mutex->owner;
245 unsigned long flags = 0; // these are unused under DGL coarse-grain locking 374 unsigned long flags = 0; // these are unused under DGL coarse-grain locking
246 375
376 /**************************************
377 * This code looks like it supports fine-grain locking, but it does not!
378 * TODO: Gaurantee that mutex->lock is held by the caller to support fine-grain locking.
379 **************************************/
380
247 BUG_ON(owner == t); 381 BUG_ON(owner == t);
248 382
249 tsk_rt(t)->blocked_lock = l; 383 tsk_rt(t)->blocked_lock = l;
250 mb(); 384 mb();
251 385
386 TRACE_TASK(t, "Enabling prio on lock %d. I am %s/%d : cur hp_waiter is %s/%d.\n",
387 l->ident,
388 (t) ? t->comm : "null",
389 (t) ? t->pid : 0,
390 (mutex->hp_waiter) ? mutex->hp_waiter->comm : "null",
391 (mutex->hp_waiter) ? mutex->hp_waiter->pid : 0);
392
252 if (litmus->compare(t, mutex->hp_waiter)) { 393 if (litmus->compare(t, mutex->hp_waiter)) {
253 struct task_struct *old_max_eff_prio; 394 struct task_struct *old_max_eff_prio;
254 struct task_struct *new_max_eff_prio; 395 struct task_struct *new_max_eff_prio;
@@ -260,6 +401,12 @@ void prioq_mutex_enable_priority(struct litmus_lock *l,
260 else 401 else
261 TRACE_TASK(t, "has higher prio than hp_waiter (NIL).\n"); 402 TRACE_TASK(t, "has higher prio than hp_waiter (NIL).\n");
262 403
404
405 if (!owner) {
406 TRACE_TASK(t, "Enabling priority, but this lock %d is idle.\n", l->ident);
407 goto out;
408 }
409
263 raw_spin_lock(&tsk_rt(owner)->hp_blocked_tasks_lock); 410 raw_spin_lock(&tsk_rt(owner)->hp_blocked_tasks_lock);
264 411
265 old_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); 412 old_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks);
@@ -289,26 +436,55 @@ void prioq_mutex_enable_priority(struct litmus_lock *l,
289 raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); 436 raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock);
290 unlock_fine_irqrestore(&mutex->lock, flags); 437 unlock_fine_irqrestore(&mutex->lock, flags);
291 } 438 }
439
440 return;
292 } 441 }
293 else { 442
294 TRACE_TASK(t, "no change in hp_waiter.\n"); 443 TRACE_TASK(t, "no change in hp_waiter.\n");
295 unlock_fine_irqrestore(&mutex->lock, flags); 444
296 } 445out:
446 unlock_fine_irqrestore(&mutex->lock, flags);
297} 447}
298 448
299static void select_next_lock_if_primary(struct litmus_lock *l, 449static void select_next_lock_if_primary(struct litmus_lock *l,
300 dgl_wait_state_t *dgl_wait) 450 dgl_wait_state_t *dgl_wait)
301{ 451{
302 if(tsk_rt(dgl_wait->task)->blocked_lock == l) { 452 struct task_struct *t = dgl_wait->task;
453
454 if(tsk_rt(t)->blocked_lock == l) {
455 struct prioq_mutex *mutex = prioq_mutex_from_lock(l);
456
303 TRACE_CUR("Lock %d in DGL was primary for %s/%d.\n", 457 TRACE_CUR("Lock %d in DGL was primary for %s/%d.\n",
304 l->ident, dgl_wait->task->comm, dgl_wait->task->pid); 458 l->ident, t->comm, t->pid);
305 tsk_rt(dgl_wait->task)->blocked_lock = NULL; 459
460 tsk_rt(t)->blocked_lock = NULL;
306 mb(); 461 mb();
462
463
464 /* determine new hp_waiter if necessary */
465 if (t == mutex->hp_waiter) {
466
467 TRACE_TASK(t, "Deciding to not be hp waiter on lock %d any more.\n", l->ident);
468 /* next has the highest priority --- it doesn't need to
469 * inherit. However, we need to make sure that the
470 * next-highest priority in the queue is reflected in
471 * hp_waiter. */
472 mutex->hp_waiter = __prioq_mutex_find_hp_waiter(mutex, t);
473 l->nest.hp_waiter_eff_prio = (mutex->hp_waiter) ?
474 effective_priority(mutex->hp_waiter) :
475 NULL;
476
477 if (mutex->hp_waiter)
478 TRACE_TASK(mutex->hp_waiter, "is new highest-prio waiter\n");
479 else
480 TRACE("no further waiters\n");
481 }
482
307 select_next_lock(dgl_wait /*, l*/); // pick the next lock to be blocked on 483 select_next_lock(dgl_wait /*, l*/); // pick the next lock to be blocked on
308 } 484 }
309 else { 485 else {
310 TRACE_CUR("Got lock early! Lock %d in DGL was NOT primary for %s/%d.\n", 486 TRACE_CUR("Got lock early! Lock %d in DGL was NOT primary for %s/%d.\n",
311 l->ident, dgl_wait->task->comm, dgl_wait->task->pid); 487 l->ident, t->comm, t->pid);
312 } 488 }
313} 489}
314#endif 490#endif
@@ -316,13 +492,30 @@ static void select_next_lock_if_primary(struct litmus_lock *l,
316 492
317 493
318 494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
319#ifdef CONFIG_LITMUS_DGL_SUPPORT 512#ifdef CONFIG_LITMUS_DGL_SUPPORT
320 513
321int prioq_mutex_dgl_can_quick_lock(struct litmus_lock *l, struct task_struct *t) 514int prioq_mutex_dgl_can_quick_lock(struct litmus_lock *l, struct task_struct *t)
322{ 515{
323 struct prioq_mutex *mutex = prioq_mutex_from_lock(l); 516 struct prioq_mutex *mutex = prioq_mutex_from_lock(l);
324 517
325 if(!mutex->owner && mutex->hp_waiter == t) { 518 if(!mutex->owner) {
326 wait_queue_t *front = list_entry(mutex->wait.task_list.next, wait_queue_t, task_list); 519 wait_queue_t *front = list_entry(mutex->wait.task_list.next, wait_queue_t, task_list);
327 struct task_struct *at_front = get_queued_task(front); 520 struct task_struct *at_front = get_queued_task(front);
328 if(t == at_front) { 521 if(t == at_front) {
@@ -336,40 +529,68 @@ void prioq_mutex_dgl_quick_lock(struct litmus_lock *l, struct litmus_lock *cur_l
336 struct task_struct* t, wait_queue_t *q) 529 struct task_struct* t, wait_queue_t *q)
337{ 530{
338 struct prioq_mutex *mutex = prioq_mutex_from_lock(l); 531 struct prioq_mutex *mutex = prioq_mutex_from_lock(l);
339 532
340 BUG_ON(mutex->owner); 533 BUG_ON(mutex->owner);
341 BUG_ON(mutex->hp_waiter != t);
342 BUG_ON(t != get_queued_task(list_entry(mutex->wait.task_list.next, wait_queue_t, task_list))); 534 BUG_ON(t != get_queued_task(list_entry(mutex->wait.task_list.next, wait_queue_t, task_list)));
343 535
344 536
345 mutex->owner = t; 537 mutex->owner = t;
346 538
347 if (l != cur_lock) { 539 if (l != cur_lock) {
348 /* we have to update the state of the other lock for it */ 540 /* we have to update the state of the other lock for it */
349 __remove_wait_queue(&mutex->wait, q); 541 __remove_wait_queue(&mutex->wait, q);
350 542
351 mutex->hp_waiter = __prioq_mutex_find_hp_waiter(mutex, t); 543 mutex->hp_waiter = __prioq_mutex_find_hp_waiter(mutex, t);
352 l->nest.hp_waiter_eff_prio = (mutex->hp_waiter) ? 544 l->nest.hp_waiter_eff_prio = (mutex->hp_waiter) ?
353 effective_priority(mutex->hp_waiter) : 545 effective_priority(mutex->hp_waiter) :
354 NULL; 546 NULL;
355 547
356 if (mutex->hp_waiter) 548 if (mutex->hp_waiter)
357 TRACE_TASK(mutex->hp_waiter, "is new highest-prio waiter\n"); 549 TRACE_TASK(mutex->hp_waiter, "is new highest-prio waiter\n");
358 else 550 else
359 TRACE("no further waiters\n"); 551 TRACE("no further waiters\n");
360 552
361 raw_spin_lock(&tsk_rt(t)->hp_blocked_tasks_lock); 553 raw_spin_lock(&tsk_rt(t)->hp_blocked_tasks_lock);
362 554
363 binheap_add(&l->nest.hp_binheap_node, 555 binheap_add(&l->nest.hp_binheap_node,
364 &tsk_rt(t)->hp_blocked_tasks, 556 &tsk_rt(t)->hp_blocked_tasks,
365 struct nested_info, hp_binheap_node); 557 struct nested_info, hp_binheap_node);
366 558
367 raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock); 559 raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock);
368 } 560 }
561 else {
562 /* the unlock call that triggered the quick_lock call will handle
563 * the acquire of cur_lock.
564 */
565 }
369} 566}
370#endif 567#endif
371 568
372 569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
373int prioq_mutex_lock(struct litmus_lock* l) 594int prioq_mutex_lock(struct litmus_lock* l)
374{ 595{
375 struct task_struct *t = current; 596 struct task_struct *t = current;
@@ -394,9 +615,12 @@ int prioq_mutex_lock(struct litmus_lock* l)
394 615
395 /* block if there is an owner, or if hp_waiter is blocked for DGL and 616 /* block if there is an owner, or if hp_waiter is blocked for DGL and
396 * prio(t) < prio(hp_waiter) */ 617 * prio(t) < prio(hp_waiter) */
397 if (mutex->owner) { 618 if (mutex->owner ||
619 (waitqueue_active(&mutex->wait) && litmus->compare(mutex->hp_waiter, t))) {
398 TRACE_TASK(t, "Blocking on lock %d (held by %s/%d).\n", 620 TRACE_TASK(t, "Blocking on lock %d (held by %s/%d).\n",
399 l->ident, mutex->owner->comm, mutex->owner->pid); 621 l->ident,
622 (mutex->owner) ? mutex->owner->comm : "null",
623 (mutex->owner) ? mutex->owner->pid : 0);
400 624
401#if defined(CONFIG_LITMUS_AFFINITY_LOCKING) && defined(CONFIG_LITMUS_NVIDIA) 625#if defined(CONFIG_LITMUS_AFFINITY_LOCKING) && defined(CONFIG_LITMUS_NVIDIA)
402 // KLUDGE: don't count this suspension as time in the critical gpu 626 // KLUDGE: don't count this suspension as time in the critical gpu
@@ -421,7 +645,9 @@ int prioq_mutex_lock(struct litmus_lock* l)
421 __add_wait_queue_sorted_exclusive(&mutex->wait, &wait); 645 __add_wait_queue_sorted_exclusive(&mutex->wait, &wait);
422 646
423 /* check if we need to activate priority inheritance */ 647 /* check if we need to activate priority inheritance */
424 if (litmus->compare(t, mutex->hp_waiter)) { 648 /* We can't be the hp waiter if there is no owner - task waiting for
649 * the full DGL must be the hp_waiter. */
650 if (owner && litmus->compare(t, mutex->hp_waiter)) {
425 651
426 struct task_struct *old_max_eff_prio; 652 struct task_struct *old_max_eff_prio;
427 struct task_struct *new_max_eff_prio; 653 struct task_struct *new_max_eff_prio;
@@ -437,10 +663,10 @@ int prioq_mutex_lock(struct litmus_lock* l)
437 663
438 old_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); 664 old_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks);
439 mutex->hp_waiter = t; 665 mutex->hp_waiter = t;
440 666
441 TRACE_TASK(t, "prioq_mutex %d state after enqeue in priority queue\n", l->ident); 667 TRACE_TASK(t, "prioq_mutex %d state after enqeue in priority queue\n", l->ident);
442 __dump_lock_info(mutex); 668 __dump_prioq_lock_info(mutex);
443 669
444 l->nest.hp_waiter_eff_prio = effective_priority(mutex->hp_waiter); 670 l->nest.hp_waiter_eff_prio = effective_priority(mutex->hp_waiter);
445 binheap_decrease(&l->nest.hp_binheap_node, 671 binheap_decrease(&l->nest.hp_binheap_node,
446 &tsk_rt(owner)->hp_blocked_tasks); 672 &tsk_rt(owner)->hp_blocked_tasks);
@@ -471,8 +697,8 @@ int prioq_mutex_lock(struct litmus_lock* l)
471 TRACE_TASK(t, "no change in hp_waiter.\n"); 697 TRACE_TASK(t, "no change in hp_waiter.\n");
472 698
473 TRACE_TASK(t, "prioq_mutex %d state after enqeue in priority queue\n", l->ident); 699 TRACE_TASK(t, "prioq_mutex %d state after enqeue in priority queue\n", l->ident);
474 __dump_lock_info(mutex); 700 __dump_prioq_lock_info(mutex);
475 701
476 unlock_fine_irqrestore(&mutex->lock, flags); 702 unlock_fine_irqrestore(&mutex->lock, flags);
477 } 703 }
478 704
@@ -525,8 +751,6 @@ int prioq_mutex_unlock(struct litmus_lock* l)
525 751
526 struct task_struct *old_max_eff_prio; 752 struct task_struct *old_max_eff_prio;
527 753
528 int wake_up_task = 1;
529
530#ifdef CONFIG_LITMUS_DGL_SUPPORT 754#ifdef CONFIG_LITMUS_DGL_SUPPORT
531 dgl_wait_state_t *dgl_wait = NULL; 755 dgl_wait_state_t *dgl_wait = NULL;
532 raw_spinlock_t *dgl_lock = litmus->get_dgl_spinlock(t); 756 raw_spinlock_t *dgl_lock = litmus->get_dgl_spinlock(t);
@@ -584,14 +808,40 @@ int prioq_mutex_unlock(struct litmus_lock* l)
584 raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock); 808 raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock);
585 809
586 810
587 /* check if there are jobs waiting for this resource */ 811
812
813 mutex->owner = NULL;
814
588#ifdef CONFIG_LITMUS_DGL_SUPPORT 815#ifdef CONFIG_LITMUS_DGL_SUPPORT
589 __waitqueue_dgl_remove_first(&mutex->wait, &dgl_wait, &next); 816 if(waitqueue_active(&mutex->wait)) {
590 if(dgl_wait) { 817 /* Priority queue-based locks must be _acquired_ atomically under DGLs
591 next = dgl_wait->task; 818 * in order to avoid deadlock. We leave this lock idle momentarily the
819 * DGL waiter can't acquire all locks at once.
820 */
821 wait_queue_t *q = list_entry(mutex->wait.task_list.next, wait_queue_t, task_list);
822 if(q->func == dgl_wake_up) {
823 dgl_wait = (dgl_wait_state_t*) q->private;
824
825 TRACE_CUR("Checking to see if DGL waiter %s/%d can take its locks\n",
826 dgl_wait->task->comm, dgl_wait->task->pid);
827
828 if(__attempt_atomic_dgl_acquire(l, dgl_wait)) {
829 /* failed. can't take this lock yet. we remain at head of prioq
830 * allow hp requests in the future to go ahead of us. */
831 select_next_lock_if_primary(l, dgl_wait);
832 goto out;
833 }
834 else {
835 TRACE_CUR("%s/%d can take its entire DGL atomically.\n",
836 dgl_wait->task->comm, dgl_wait->task->pid);
837 }
838 }
839
840 /* remove the first */
841 next = __waitqueue_dgl_remove_first(&mutex->wait, &dgl_wait);
592 } 842 }
593#else 843#else
594 844 /* check if there are jobs waiting for this resource */
595 next = __waitqueue_remove_first(&mutex->wait); 845 next = __waitqueue_remove_first(&mutex->wait);
596#endif 846#endif
597 if (next) { 847 if (next) {
@@ -623,14 +873,6 @@ int prioq_mutex_unlock(struct litmus_lock* l)
623 &tsk_rt(next)->hp_blocked_tasks, 873 &tsk_rt(next)->hp_blocked_tasks,
624 struct nested_info, hp_binheap_node); 874 struct nested_info, hp_binheap_node);
625 875
626#ifdef CONFIG_LITMUS_DGL_SUPPORT
627 if(dgl_wait) {
628 select_next_lock_if_primary(l, dgl_wait);
629 --(dgl_wait->nr_remaining);
630 wake_up_task = (dgl_wait->nr_remaining == 0);
631 }
632#endif
633
634 raw_spin_unlock(&tsk_rt(next)->hp_blocked_tasks_lock); 876 raw_spin_unlock(&tsk_rt(next)->hp_blocked_tasks_lock);
635 } 877 }
636 else { 878 else {
@@ -678,28 +920,19 @@ int prioq_mutex_unlock(struct litmus_lock* l)
678#endif 920#endif
679 } 921 }
680 922
681 if(wake_up_task) { 923 TRACE_TASK(next, "waking up since it is no longer blocked.\n");
682 TRACE_TASK(next, "waking up since it is no longer blocked.\n");
683 924
684 tsk_rt(next)->blocked_lock = NULL; 925 tsk_rt(next)->blocked_lock = NULL;
685 mb(); 926 mb();
686 927
687#if defined(CONFIG_LITMUS_AFFINITY_LOCKING) && defined(CONFIG_LITMUS_NVIDIA) 928#if defined(CONFIG_LITMUS_AFFINITY_LOCKING) && defined(CONFIG_LITMUS_NVIDIA)
688 // re-enable tracking 929 // re-enable tracking
689 if(tsk_rt(next)->held_gpus) { 930 if(tsk_rt(next)->held_gpus) {
690 tsk_rt(next)->suspend_gpu_tracker_on_block = 0; 931 tsk_rt(next)->suspend_gpu_tracker_on_block = 0;
691 } 932 }
692#endif 933#endif
693 934
694 wake_up_process(next); 935 wake_up_process(next);
695 }
696 else {
697 TRACE_TASK(next, "is still blocked.\n");
698 }
699 }
700 else {
701 /* becomes available */
702 mutex->owner = NULL;
703 } 936 }
704 937
705 unlock_fine_irqrestore(&mutex->lock, flags); 938 unlock_fine_irqrestore(&mutex->lock, flags);
@@ -726,14 +959,34 @@ void prioq_mutex_propagate_increase_inheritance(struct litmus_lock* l,
726 lock_fine(&mutex->lock); 959 lock_fine(&mutex->lock);
727 unlock_fine(to_unlock); 960 unlock_fine(to_unlock);
728 961
962#ifdef CONFIG_LITMUS_DGL_SUPPORT
963 {
964 int woke_up = __prioq_dgl_increase_pos(mutex, t);
965 if (woke_up) {
966 /* t got the DGL. it is not blocked anywhere. just return. */
967 unlock_fine_irqrestore(&mutex->lock, irqflags);
968 return;
969 }
970 }
971#else
729 __prioq_increase_pos(mutex, t); 972 __prioq_increase_pos(mutex, t);
730 973#endif
974
731 if(tsk_rt(t)->blocked_lock == l) { // prevent race on tsk_rt(t)->blocked 975 if(tsk_rt(t)->blocked_lock == l) { // prevent race on tsk_rt(t)->blocked
732 struct task_struct *owner = mutex->owner; 976 struct task_struct *owner = mutex->owner;
733 977
734 struct task_struct *old_max_eff_prio; 978 struct task_struct *old_max_eff_prio;
735 struct task_struct *new_max_eff_prio; 979 struct task_struct *new_max_eff_prio;
736 980
981 if (!owner) {
982 TRACE_TASK(t, "Owner on PRIOQ lock %d is null. Don't propagate.\n", l->ident);
983 if(t == mutex->hp_waiter) {
984 // reflect the changed prio.
985 l->nest.hp_waiter_eff_prio = effective_priority(mutex->hp_waiter);
986 }
987 return;
988 }
989
737 raw_spin_lock(&tsk_rt(owner)->hp_blocked_tasks_lock); 990 raw_spin_lock(&tsk_rt(owner)->hp_blocked_tasks_lock);
738 991
739 old_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); 992 old_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks);
@@ -741,17 +994,17 @@ void prioq_mutex_propagate_increase_inheritance(struct litmus_lock* l,
741 if((t != mutex->hp_waiter) && litmus->compare(t, mutex->hp_waiter)) { 994 if((t != mutex->hp_waiter) && litmus->compare(t, mutex->hp_waiter)) {
742 TRACE_TASK(t, "is new highest-prio waiter by propagation.\n"); 995 TRACE_TASK(t, "is new highest-prio waiter by propagation.\n");
743 mutex->hp_waiter = t; 996 mutex->hp_waiter = t;
744 997
745 TRACE_TASK(t, "prioq_mutex %d state after prio increase in priority queue\n", l->ident); 998 TRACE_TASK(t, "prioq_mutex %d state after prio increase in priority queue\n", l->ident);
746 __dump_lock_info(mutex); 999 __dump_prioq_lock_info(mutex);
747 } 1000 }
748 else { 1001 else {
749 TRACE_TASK(t, "prioq_mutex %d state after prio increase in priority queue\n", l->ident); 1002 TRACE_TASK(t, "prioq_mutex %d state after prio increase in priority queue\n", l->ident);
750 __dump_lock_info(mutex); 1003 __dump_prioq_lock_info(mutex);
751 } 1004 }
752 1005
753 if(t == mutex->hp_waiter) { 1006 if(t == mutex->hp_waiter) {
754 // reflect the decreased priority in the heap node. 1007 // reflect the increased priority in the heap node.
755 l->nest.hp_waiter_eff_prio = effective_priority(mutex->hp_waiter); 1008 l->nest.hp_waiter_eff_prio = effective_priority(mutex->hp_waiter);
756 1009
757 BUG_ON(!binheap_is_in_heap(&l->nest.hp_binheap_node)); 1010 BUG_ON(!binheap_is_in_heap(&l->nest.hp_binheap_node));
@@ -791,10 +1044,10 @@ void prioq_mutex_propagate_increase_inheritance(struct litmus_lock* l,
791 } 1044 }
792 else { 1045 else {
793 struct litmus_lock *still_blocked; 1046 struct litmus_lock *still_blocked;
794 1047
795 TRACE_TASK(t, "prioq_mutex %d state after prio increase in priority queue\n", l->ident); 1048 TRACE_TASK(t, "prioq_mutex %d state after prio increase in priority queue\n", l->ident);
796 __dump_lock_info(mutex); 1049 __dump_prioq_lock_info(mutex);
797 1050
798 still_blocked = tsk_rt(t)->blocked_lock; 1051 still_blocked = tsk_rt(t)->blocked_lock;
799 1052
800 TRACE_TASK(t, "is not blocked on lock %d.\n", l->ident); 1053 TRACE_TASK(t, "is not blocked on lock %d.\n", l->ident);
@@ -836,8 +1089,12 @@ void prioq_mutex_propagate_decrease_inheritance(struct litmus_lock* l,
836 lock_fine(&mutex->lock); 1089 lock_fine(&mutex->lock);
837 unlock_fine(to_unlock); 1090 unlock_fine(to_unlock);
838 1091
1092#ifdef CONFIG_LITMUS_DGL_SUPPORT
1093 __prioq_dgl_decrease_pos(mutex, t);
1094#else
839 __prioq_decrease_pos(mutex, t); 1095 __prioq_decrease_pos(mutex, t);
840 1096#endif
1097
841 if(tsk_rt(t)->blocked_lock == l) { // prevent race on tsk_rt(t)->blocked 1098 if(tsk_rt(t)->blocked_lock == l) { // prevent race on tsk_rt(t)->blocked
842 if(t == mutex->hp_waiter) { 1099 if(t == mutex->hp_waiter) {
843 struct task_struct *owner = mutex->owner; 1100 struct task_struct *owner = mutex->owner;
@@ -845,16 +1102,23 @@ void prioq_mutex_propagate_decrease_inheritance(struct litmus_lock* l,
845 struct task_struct *old_max_eff_prio; 1102 struct task_struct *old_max_eff_prio;
846 struct task_struct *new_max_eff_prio; 1103 struct task_struct *new_max_eff_prio;
847 1104
1105 if (!owner) {
1106 TRACE_TASK(t, "Owner on PRIOQ lock %d is null. Don't propagate.\n", l->ident);
1107 // reflect the changed prio.
1108 l->nest.hp_waiter_eff_prio = effective_priority(mutex->hp_waiter);
1109 return;
1110 }
1111
848 raw_spin_lock(&tsk_rt(owner)->hp_blocked_tasks_lock); 1112 raw_spin_lock(&tsk_rt(owner)->hp_blocked_tasks_lock);
849 1113
850 old_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); 1114 old_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks);
851 1115
852 binheap_delete(&l->nest.hp_binheap_node, &tsk_rt(owner)->hp_blocked_tasks); 1116 binheap_delete(&l->nest.hp_binheap_node, &tsk_rt(owner)->hp_blocked_tasks);
853 mutex->hp_waiter = __prioq_mutex_find_hp_waiter(mutex, NULL); 1117 mutex->hp_waiter = __prioq_mutex_find_hp_waiter(mutex, NULL);
854 1118
855 TRACE_TASK(t, "prioq_mutex %d state after prio decrease in priority queue\n", l->ident); 1119 TRACE_TASK(t, "prioq_mutex %d state after prio decrease in priority queue\n", l->ident);
856 __dump_lock_info(mutex); 1120 __dump_prioq_lock_info(mutex);
857 1121
858 l->nest.hp_waiter_eff_prio = (mutex->hp_waiter) ? 1122 l->nest.hp_waiter_eff_prio = (mutex->hp_waiter) ?
859 effective_priority(mutex->hp_waiter) : NULL; 1123 effective_priority(mutex->hp_waiter) : NULL;
860 binheap_add(&l->nest.hp_binheap_node, 1124 binheap_add(&l->nest.hp_binheap_node,
@@ -904,18 +1168,18 @@ void prioq_mutex_propagate_decrease_inheritance(struct litmus_lock* l,
904 } 1168 }
905 else { 1169 else {
906 TRACE_TASK(t, "prioq_mutex %d state after prio decrease in priority queue\n", l->ident); 1170 TRACE_TASK(t, "prioq_mutex %d state after prio decrease in priority queue\n", l->ident);
907 __dump_lock_info(mutex); 1171 __dump_prioq_lock_info(mutex);
908 1172
909 TRACE_TASK(t, "is not hp_waiter. No propagation.\n"); 1173 TRACE_TASK(t, "is not hp_waiter. No propagation.\n");
910 unlock_fine_irqrestore(&mutex->lock, irqflags); 1174 unlock_fine_irqrestore(&mutex->lock, irqflags);
911 } 1175 }
912 } 1176 }
913 else { 1177 else {
914 struct litmus_lock *still_blocked; 1178 struct litmus_lock *still_blocked;
915 1179
916 TRACE_TASK(t, "prioq_mutex %d state after prio decrease in priority queue\n", l->ident); 1180 TRACE_TASK(t, "prioq_mutex %d state after prio decrease in priority queue\n", l->ident);
917 __dump_lock_info(mutex); 1181 __dump_prioq_lock_info(mutex);
918 1182
919 still_blocked = tsk_rt(t)->blocked_lock; 1183 still_blocked = tsk_rt(t)->blocked_lock;
920 1184
921 TRACE_TASK(t, "is not blocked on lock %d.\n", l->ident); 1185 TRACE_TASK(t, "is not blocked on lock %d.\n", l->ident);
@@ -1153,8 +1417,7 @@ struct litmus_lock* prioq_mutex_new(struct litmus_lock_ops* ops)
1153#endif 1417#endif
1154 1418
1155 ((struct litmus_lock*)mutex)->nest.hp_waiter_ptr = &mutex->hp_waiter; 1419 ((struct litmus_lock*)mutex)->nest.hp_waiter_ptr = &mutex->hp_waiter;
1156 ((struct litmus_lock*)mutex)->nest.owner_ptr = &mutex->owner; 1420
1157
1158 ((struct litmus_lock*)mutex)->proc = &prioq_proc_ops; 1421 ((struct litmus_lock*)mutex)->proc = &prioq_proc_ops;
1159 1422
1160 return &mutex->litmus_lock; 1423 return &mutex->litmus_lock;
diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c
index 61d682d9f415..9b6d25f11c93 100644
--- a/litmus/sched_cedf.c
+++ b/litmus/sched_cedf.c
@@ -1542,7 +1542,11 @@ static struct litmus_lock_ops cedf_fifo_mutex_lock_ops = {
1542#ifdef CONFIG_LITMUS_DGL_SUPPORT 1542#ifdef CONFIG_LITMUS_DGL_SUPPORT
1543 .dgl_lock = fifo_mutex_dgl_lock, 1543 .dgl_lock = fifo_mutex_dgl_lock,
1544 .is_owner = fifo_mutex_is_owner, 1544 .is_owner = fifo_mutex_is_owner,
1545 .get_owner = fifo_mutex_get_owner,
1545 .enable_priority = fifo_mutex_enable_priority, 1546 .enable_priority = fifo_mutex_enable_priority,
1547
1548 .dgl_can_quick_lock = NULL,
1549 .dgl_quick_lock = NULL,
1546#endif 1550#endif
1547}; 1551};
1548 1552
@@ -1558,15 +1562,16 @@ static struct litmus_lock_ops cedf_prioq_mutex_lock_ops = {
1558 .unlock = prioq_mutex_unlock, 1562 .unlock = prioq_mutex_unlock,
1559 .close = prioq_mutex_close, 1563 .close = prioq_mutex_close,
1560 .deallocate = prioq_mutex_free, 1564 .deallocate = prioq_mutex_free,
1561 1565
1562 .propagate_increase_inheritance = prioq_mutex_propagate_increase_inheritance, 1566 .propagate_increase_inheritance = prioq_mutex_propagate_increase_inheritance,
1563 .propagate_decrease_inheritance = prioq_mutex_propagate_decrease_inheritance, 1567 .propagate_decrease_inheritance = prioq_mutex_propagate_decrease_inheritance,
1564 1568
1565#ifdef CONFIG_LITMUS_DGL_SUPPORT 1569#ifdef CONFIG_LITMUS_DGL_SUPPORT
1566 .dgl_lock = prioq_mutex_dgl_lock, 1570 .dgl_lock = prioq_mutex_dgl_lock,
1567 .is_owner = prioq_mutex_is_owner, 1571 .is_owner = prioq_mutex_is_owner,
1572 .get_owner = prioq_mutex_get_owner,
1568 .enable_priority = prioq_mutex_enable_priority, 1573 .enable_priority = prioq_mutex_enable_priority,
1569 1574
1570 .dgl_can_quick_lock = prioq_mutex_dgl_can_quick_lock, 1575 .dgl_can_quick_lock = prioq_mutex_dgl_can_quick_lock,
1571 .dgl_quick_lock = prioq_mutex_dgl_quick_lock, 1576 .dgl_quick_lock = prioq_mutex_dgl_quick_lock,
1572#endif 1577#endif
@@ -1637,7 +1642,7 @@ static long cedf_allocate_lock(struct litmus_lock **lock, int type,
1637 case PRIOQ_MUTEX: 1642 case PRIOQ_MUTEX:
1638 *lock = cedf_new_prioq_mutex(); 1643 *lock = cedf_new_prioq_mutex();
1639 break; 1644 break;
1640 1645
1641 case IKGLP_SEM: 1646 case IKGLP_SEM:
1642 *lock = cedf_new_ikglp(args); 1647 *lock = cedf_new_ikglp(args);
1643 break; 1648 break;