aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/litmus/locking.h16
-rw-r--r--include/litmus/prioq_lock.h1
-rw-r--r--litmus/ikglp_lock.c4
-rw-r--r--litmus/locking.c92
-rw-r--r--litmus/prioq_lock.c151
-rw-r--r--litmus/sched_cedf.c40
6 files changed, 152 insertions, 152 deletions
diff --git a/include/litmus/locking.h b/include/litmus/locking.h
index b1024e397f67..fc437811d2b6 100644
--- a/include/litmus/locking.h
+++ b/include/litmus/locking.h
@@ -11,7 +11,6 @@ struct nested_info
11 struct litmus_lock *lock; 11 struct litmus_lock *lock;
12 struct task_struct *hp_waiter_eff_prio; 12 struct task_struct *hp_waiter_eff_prio;
13 struct task_struct **hp_waiter_ptr; 13 struct task_struct **hp_waiter_ptr;
14// struct task_struct **owner_ptr;
15 struct binheap_node hp_binheap_node; 14 struct binheap_node hp_binheap_node;
16}; 15};
17 16
@@ -134,24 +133,31 @@ struct litmus_lock_ops {
134 /* The lock is no longer being referenced (mandatory method). */ 133 /* The lock is no longer being referenced (mandatory method). */
135 lock_free_t deallocate; 134 lock_free_t deallocate;
136 135
137 136
138#ifdef CONFIG_LITMUS_NESTED_LOCKING 137#ifdef CONFIG_LITMUS_NESTED_LOCKING
139 void (*propagate_increase_inheritance)(struct litmus_lock* l, struct task_struct* t, raw_spinlock_t* to_unlock, unsigned long irqflags); 138 void (*propagate_increase_inheritance)(struct litmus_lock* l, struct task_struct* t, raw_spinlock_t* to_unlock, unsigned long irqflags);
140 void (*propagate_decrease_inheritance)(struct litmus_lock* l, struct task_struct* t, raw_spinlock_t* to_unlock, unsigned long irqflags); 139 void (*propagate_decrease_inheritance)(struct litmus_lock* l, struct task_struct* t, raw_spinlock_t* to_unlock, unsigned long irqflags);
141#endif 140#endif
142
143
144#ifdef CONFIG_LITMUS_DGL_SUPPORT 141#ifdef CONFIG_LITMUS_DGL_SUPPORT
145 raw_spinlock_t* (*get_dgl_spin_lock)(struct litmus_lock *l); 142 raw_spinlock_t* (*get_dgl_spin_lock)(struct litmus_lock *l);
146 int (*dgl_lock)(struct litmus_lock *l, dgl_wait_state_t* dgl_wait, wait_queue_t* wq_node); 143 int (*dgl_lock)(struct litmus_lock *l, dgl_wait_state_t* dgl_wait, wait_queue_t* wq_node);
147 int (*is_owner)(struct litmus_lock *l, struct task_struct *t); 144 int (*is_owner)(struct litmus_lock *l, struct task_struct *t);
148 struct task_struct* (*get_owner)(struct litmus_lock *l); 145 struct task_struct* (*get_owner)(struct litmus_lock *l);
149 void (*enable_priority)(struct litmus_lock *l, dgl_wait_state_t* dgl_wait); 146 void (*enable_priority)(struct litmus_lock *l, dgl_wait_state_t* dgl_wait);
150 147
151 int (*dgl_can_quick_lock)(struct litmus_lock *l, struct task_struct *t); 148 int (*dgl_can_quick_lock)(struct litmus_lock *l, struct task_struct *t);
152 void (*dgl_quick_lock)(struct litmus_lock *l, struct litmus_lock *cur_lock, 149 void (*dgl_quick_lock)(struct litmus_lock *l, struct litmus_lock *cur_lock,
153 struct task_struct* t, wait_queue_t *q); 150 struct task_struct* t, wait_queue_t *q);
154#endif 151#endif
152
153 /* all flags at the end */
154#ifdef CONFIG_LITMUS_NESTED_LOCKING
155 int supports_nesting:1;
156#endif
157#ifdef CONFIG_LITMUS_DGL_SUPPORT
158 int supports_dgl:1;
159 int requires_atomic_dgl:1;
160#endif
155}; 161};
156 162
157 163
diff --git a/include/litmus/prioq_lock.h b/include/litmus/prioq_lock.h
index 5c135ef0bdc6..1b0a591ef1a6 100644
--- a/include/litmus/prioq_lock.h
+++ b/include/litmus/prioq_lock.h
@@ -34,6 +34,7 @@ static inline struct prioq_mutex* prioq_mutex_from_lock(struct litmus_lock* lock
34int prioq_mutex_is_owner(struct litmus_lock *l, struct task_struct *t); 34int prioq_mutex_is_owner(struct litmus_lock *l, struct task_struct *t);
35struct task_struct* prioq_mutex_get_owner(struct litmus_lock *l); 35struct task_struct* prioq_mutex_get_owner(struct litmus_lock *l);
36int prioq_mutex_dgl_lock(struct litmus_lock *l, dgl_wait_state_t* dgl_wait, wait_queue_t* wq_node); 36int prioq_mutex_dgl_lock(struct litmus_lock *l, dgl_wait_state_t* dgl_wait, wait_queue_t* wq_node);
37int prioq_mutex_dgl_unlock(struct litmus_lock *l);
37void prioq_mutex_enable_priority(struct litmus_lock *l, dgl_wait_state_t* dgl_wait); 38void prioq_mutex_enable_priority(struct litmus_lock *l, dgl_wait_state_t* dgl_wait);
38void prioq_mutex_dgl_quick_lock(struct litmus_lock *l, struct litmus_lock *cur_lock, 39void prioq_mutex_dgl_quick_lock(struct litmus_lock *l, struct litmus_lock *cur_lock,
39 struct task_struct* t, wait_queue_t *q); 40 struct task_struct* t, wait_queue_t *q);
diff --git a/litmus/ikglp_lock.c b/litmus/ikglp_lock.c
index 3d79e41b42df..3fd760799a75 100644
--- a/litmus/ikglp_lock.c
+++ b/litmus/ikglp_lock.c
@@ -1401,7 +1401,9 @@ int ikglp_unlock(struct litmus_lock* l)
1401 struct nested_info, hp_binheap_node); 1401 struct nested_info, hp_binheap_node);
1402 ++count; 1402 ++count;
1403 } 1403 }
1404 litmus->decrease_prio(t, NULL); 1404 if (count) {
1405 litmus->decrease_prio(t, NULL);
1406 }
1405 WARN_ON(count > 2); // should not be greater than 2. only local fq inh and donation can be possible. 1407 WARN_ON(count > 2); // should not be greater than 2. only local fq inh and donation can be possible.
1406 } 1408 }
1407 raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock); 1409 raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock);
diff --git a/litmus/locking.c b/litmus/locking.c
index 4b8382cd77d1..eddc67a4d36a 100644
--- a/litmus/locking.c
+++ b/litmus/locking.c
@@ -365,18 +365,6 @@ static void snprintf_dgl(char* buf, size_t bsz, struct litmus_lock* dgl_locks[],
365#endif 365#endif
366 366
367 367
368static int failed_owner(struct litmus_lock *cur_lock, struct task_struct *t)
369{
370 struct task_struct *cur_owner = cur_lock->ops->get_owner(cur_lock);
371 printk(KERN_EMERG "lock %d expected owner %s/%d but got %s/%d.\n",
372 cur_lock->ident,
373 (t) ? t->comm : "null",
374 (t) ? t->pid : 0,
375 (cur_owner) ? cur_owner->comm : "null",
376 (cur_owner) ? cur_owner->pid : 0);
377 BUG();
378}
379
380/* only valid when locks are prioq locks!!! 368/* only valid when locks are prioq locks!!!
381 * THE BIG DGL LOCK MUST BE HELD! */ 369 * THE BIG DGL LOCK MUST BE HELD! */
382int __attempt_atomic_dgl_acquire(struct litmus_lock *cur_lock, dgl_wait_state_t *dgl_wait) 370int __attempt_atomic_dgl_acquire(struct litmus_lock *cur_lock, dgl_wait_state_t *dgl_wait)
@@ -395,12 +383,8 @@ int __attempt_atomic_dgl_acquire(struct litmus_lock *cur_lock, dgl_wait_state_t
395 /* take the locks */ 383 /* take the locks */
396 for(i = 0; i < dgl_wait->size; ++i) { 384 for(i = 0; i < dgl_wait->size; ++i) {
397 struct litmus_lock *l = dgl_wait->locks[i]; 385 struct litmus_lock *l = dgl_wait->locks[i];
398
399 l->ops->dgl_quick_lock(l, cur_lock, dgl_wait->task, &dgl_wait->wq_nodes[i]); 386 l->ops->dgl_quick_lock(l, cur_lock, dgl_wait->task, &dgl_wait->wq_nodes[i]);
400 387 BUG_ON(!(l->ops->is_owner(l, dgl_wait->task)));
401 if(!(l->ops->is_owner(l, dgl_wait->task)))
402 failed_owner(l, dgl_wait->task);
403 //BUG_ON(!(l->ops->is_owner(l, dgl_wait->task)));
404 } 388 }
405 389
406 return 0; /* success */ 390 return 0; /* success */
@@ -510,6 +494,7 @@ static long do_litmus_dgl_atomic_lock(dgl_wait_state_t *dgl_wait)
510 unsigned long irqflags; //, dummyflags; 494 unsigned long irqflags; //, dummyflags;
511 raw_spinlock_t *dgl_lock; 495 raw_spinlock_t *dgl_lock;
512 struct litmus_lock *l; 496 struct litmus_lock *l;
497 struct task_struct *t = current;
513 498
514#ifdef CONFIG_SCHED_DEBUG_TRACE 499#ifdef CONFIG_SCHED_DEBUG_TRACE
515 char dglstr[CONFIG_LITMUS_MAX_DGL_SIZE*5]; 500 char dglstr[CONFIG_LITMUS_MAX_DGL_SIZE*5];
@@ -519,7 +504,7 @@ static long do_litmus_dgl_atomic_lock(dgl_wait_state_t *dgl_wait)
519 504
520 dgl_lock = litmus->get_dgl_spinlock(dgl_wait->task); 505 dgl_lock = litmus->get_dgl_spinlock(dgl_wait->task);
521 506
522 BUG_ON(dgl_wait->task != current); 507 BUG_ON(dgl_wait->task != t);
523 508
524 raw_spin_lock_irqsave(dgl_lock, irqflags); 509 raw_spin_lock_irqsave(dgl_lock, irqflags);
525 510
@@ -528,7 +513,8 @@ static long do_litmus_dgl_atomic_lock(dgl_wait_state_t *dgl_wait)
528 513
529 for(i = 0; i < dgl_wait->size; ++i) { 514 for(i = 0; i < dgl_wait->size; ++i) {
530 struct litmus_lock *l = dgl_wait->locks[i]; 515 struct litmus_lock *l = dgl_wait->locks[i];
531 l->ops->dgl_lock(l, dgl_wait, &dgl_wait->wq_nodes[i]); // this should be a forced enqueue if atomic DGLs are needed. 516 // this should be a forced enqueue if atomic DGLs are needed.
517 l->ops->dgl_lock(l, dgl_wait, &dgl_wait->wq_nodes[i]);
532 } 518 }
533 519
534 if(__attempt_atomic_dgl_acquire(NULL, dgl_wait)) { 520 if(__attempt_atomic_dgl_acquire(NULL, dgl_wait)) {
@@ -536,27 +522,26 @@ static long do_litmus_dgl_atomic_lock(dgl_wait_state_t *dgl_wait)
536 * Pick a lock to push on and suspend. */ 522 * Pick a lock to push on and suspend. */
537 TRACE_CUR("Could not atomically acquire all locks.\n"); 523 TRACE_CUR("Could not atomically acquire all locks.\n");
538 524
525 /* we set the uninterruptible state here since
526 * __attempt_atomic_dgl_acquire() may actually succeed. */
527 set_task_state(t, TASK_UNINTERRUPTIBLE);
539 528
540#if defined(CONFIG_LITMUS_AFFINITY_LOCKING) && defined(CONFIG_LITMUS_NVIDIA) 529#if defined(CONFIG_LITMUS_AFFINITY_LOCKING) && defined(CONFIG_LITMUS_NVIDIA)
541 // KLUDGE: don't count this suspension as time in the critical gpu 530 // KLUDGE: don't count this suspension as time in the critical gpu
542 // critical section 531 // critical section
543 if(tsk_rt(dgl_wait->task)->held_gpus) { 532 if(tsk_rt(t)->held_gpus) {
544 tsk_rt(dgl_wait->task)->suspend_gpu_tracker_on_block = 1; 533 tsk_rt(t)->suspend_gpu_tracker_on_block = 1;
545 } 534 }
546#endif 535#endif
547 // we are not the owner of any lock, so push on the last one in the DGL
548 // by default.
549 536
550 l = dgl_wait->locks[dgl_wait->size - 1]; 537 // select a lock to push priority on
538 dgl_wait->last_primary = 0; // default
539 select_next_lock(dgl_wait); // may change value of last_primary
551 540
552 TRACE_CUR("Activating priority inheritance on lock %d\n", 541 l = dgl_wait->locks[dgl_wait->last_primary];
553 l->ident);
554 542
555 TS_DGL_LOCK_SUSPEND; 543 TS_DGL_LOCK_SUSPEND;
556 544
557 l->ops->enable_priority(l, dgl_wait);
558 dgl_wait->last_primary = dgl_wait->size - 1;
559
560 TRACE_CUR("Suspending for lock %d\n", l->ident); 545 TRACE_CUR("Suspending for lock %d\n", l->ident);
561 546
562 raw_spin_unlock_irqrestore(dgl_lock, irqflags); // free dgl_lock before suspending 547 raw_spin_unlock_irqrestore(dgl_lock, irqflags); // free dgl_lock before suspending
@@ -578,9 +563,7 @@ all_acquired:
578 // SANITY CHECK FOR TESTING 563 // SANITY CHECK FOR TESTING
579 for(i = 0; i < dgl_wait->size; ++i) { 564 for(i = 0; i < dgl_wait->size; ++i) {
580 struct litmus_lock *l = dgl_wait->locks[i]; 565 struct litmus_lock *l = dgl_wait->locks[i];
581 if(!(l->ops->is_owner(l, dgl_wait->task))) 566 BUG_ON(!l->ops->is_owner(l, dgl_wait->task));
582 failed_owner(l, dgl_wait->task);
583 //BUG_ON(!l->ops->is_owner(l, dgl_wait->task));
584 } 567 }
585 568
586 TRACE_CUR("Acquired entire DGL\n"); 569 TRACE_CUR("Acquired entire DGL\n");
@@ -589,23 +572,6 @@ all_acquired:
589} 572}
590 573
591 574
592
593
594static int supports_dgl(struct litmus_lock *l)
595{
596 struct litmus_lock_ops* ops = l->ops;
597 return (ops->dgl_lock &&
598 ops->is_owner &&
599 ops->get_owner &&
600 ops->enable_priority);
601}
602
603static int needs_atomic_dgl(struct litmus_lock *l)
604{
605 struct litmus_lock_ops* ops = l->ops;
606 return (ops->dgl_quick_lock != NULL);
607}
608
609asmlinkage long sys_litmus_dgl_lock(void* __user usr_dgl_ods, int dgl_size) 575asmlinkage long sys_litmus_dgl_lock(void* __user usr_dgl_ods, int dgl_size)
610{ 576{
611 struct task_struct *t = current; 577 struct task_struct *t = current;
@@ -641,13 +607,13 @@ asmlinkage long sys_litmus_dgl_lock(void* __user usr_dgl_ods, int dgl_size)
641 struct od_table_entry *entry = get_entry_for_od(dgl_ods[i]); 607 struct od_table_entry *entry = get_entry_for_od(dgl_ods[i]);
642 if(entry && is_lock(entry)) { 608 if(entry && is_lock(entry)) {
643 dgl_wait_state.locks[i] = get_lock(entry); 609 dgl_wait_state.locks[i] = get_lock(entry);
644 if(!supports_dgl(dgl_wait_state.locks[i])) { 610 if(!dgl_wait_state.locks[i]->ops->supports_dgl) {
645 TRACE_CUR("Lock %d does not support all required DGL operations.\n", 611 TRACE_CUR("Lock %d does not support all required DGL operations.\n",
646 dgl_wait_state.locks[i]->ident); 612 dgl_wait_state.locks[i]->ident);
647 goto out; 613 goto out;
648 } 614 }
649 615
650 if (needs_atomic_dgl(dgl_wait_state.locks[i])) { 616 if(dgl_wait_state.locks[i]->ops->requires_atomic_dgl) {
651 ++num_need_atomic; 617 ++num_need_atomic;
652 } 618 }
653 } 619 }
@@ -686,9 +652,13 @@ static long do_litmus_dgl_unlock(struct litmus_lock* dgl_locks[], int dgl_size)
686 long err = 0; 652 long err = 0;
687 653
688#ifdef CONFIG_SCHED_DEBUG_TRACE 654#ifdef CONFIG_SCHED_DEBUG_TRACE
689 char dglstr[CONFIG_LITMUS_MAX_DGL_SIZE*5]; 655 {
690 snprintf_dgl(dglstr, sizeof(dglstr), dgl_locks, dgl_size); 656 char dglstr[CONFIG_LITMUS_MAX_DGL_SIZE*5];
691 TRACE_CUR("Unlocking a DGL with size %d: %s\n", dgl_size, dglstr); 657 snprintf_dgl(dglstr, sizeof(dglstr), dgl_locks, dgl_size);
658 TRACE_CUR("Unlocking a DGL with size %d: %s\n",
659 dgl_size,
660 dglstr);
661 }
692#endif 662#endif
693 663
694 for(i = dgl_size - 1; i >= 0; --i) { // unlock in reverse order 664 for(i = dgl_size - 1; i >= 0; --i) { // unlock in reverse order
@@ -740,7 +710,7 @@ asmlinkage long sys_litmus_dgl_unlock(void* __user usr_dgl_ods, int dgl_size)
740 entry = get_entry_for_od(dgl_ods[i]); 710 entry = get_entry_for_od(dgl_ods[i]);
741 if(entry && is_lock(entry)) { 711 if(entry && is_lock(entry)) {
742 dgl_locks[i] = get_lock(entry); 712 dgl_locks[i] = get_lock(entry);
743 if(!supports_dgl(dgl_locks[i])) { 713 if(!dgl_locks[i]->ops->supports_dgl) {
744 TRACE_CUR("Lock %d does not support all required DGL operations.\n", 714 TRACE_CUR("Lock %d does not support all required DGL operations.\n",
745 dgl_locks[i]->ident); 715 dgl_locks[i]->ident);
746 goto out; 716 goto out;
@@ -852,19 +822,19 @@ void suspend_for_lock(void)
852 tsk_rt(t)->suspend_gpu_tracker_on_block = 1; 822 tsk_rt(t)->suspend_gpu_tracker_on_block = 1;
853 } 823 }
854#endif 824#endif
855 825
856 schedule(); 826 schedule();
857 827
858 828
859 /* TODO: Move the following to wake_up_for_lock()? */ 829 /* TODO: Move the following to wake_up_for_lock()? */
860 830
861#if defined(CONFIG_LITMUS_AFFINITY_LOCKING) && defined(CONFIG_LITMUS_NVIDIA) 831#if defined(CONFIG_LITMUS_AFFINITY_LOCKING) && defined(CONFIG_LITMUS_NVIDIA)
862 // re-enable tracking 832 // re-enable tracking
863 if(tsk_rt(t)->held_gpus) { 833 if(tsk_rt(t)->held_gpus) {
864 tsk_rt(t)->suspend_gpu_tracker_on_block = 0; 834 tsk_rt(t)->suspend_gpu_tracker_on_block = 0;
865 } 835 }
866#endif 836#endif
867 837
868#ifdef CONFIG_LITMUS_NVIDIA 838#ifdef CONFIG_LITMUS_NVIDIA
869 if (gpu_restore) { 839 if (gpu_restore) {
870 /* restore our state */ 840 /* restore our state */
@@ -883,9 +853,9 @@ void suspend_for_lock(void)
883int wake_up_for_lock(struct task_struct* t) 853int wake_up_for_lock(struct task_struct* t)
884{ 854{
885 int ret; 855 int ret;
886 856
887 ret = wake_up_process(t); 857 ret = wake_up_process(t);
888 858
889 return ret; 859 return ret;
890} 860}
891 861
diff --git a/litmus/prioq_lock.c b/litmus/prioq_lock.c
index faf8c15df542..142f56fe9099 100644
--- a/litmus/prioq_lock.c
+++ b/litmus/prioq_lock.c
@@ -165,12 +165,12 @@ static struct task_struct* __prioq_mutex_find_hp_waiter(struct prioq_mutex *mute
165 wait_queue_t *q; 165 wait_queue_t *q;
166 struct list_head *pos; 166 struct list_head *pos;
167 struct task_struct *queued = NULL, *found = NULL; 167 struct task_struct *queued = NULL, *found = NULL;
168 168
169 /* list in sorted order. higher-prio tasks likely at the front. */ 169 /* list in sorted order. higher-prio tasks likely at the front. */
170 list_for_each(pos, &mutex->wait.task_list) { 170 list_for_each(pos, &mutex->wait.task_list) {
171 q = list_entry(pos, wait_queue_t, task_list); 171 q = list_entry(pos, wait_queue_t, task_list);
172 queued = get_queued_task(q); 172 queued = get_queued_task(q);
173 173
174 /* Compare task prios, find high prio task. */ 174 /* Compare task prios, find high prio task. */
175 if (queued && 175 if (queued &&
176 (queued != skip) && 176 (queued != skip) &&
@@ -195,12 +195,12 @@ static int ___prioq_dgl_acquire_via_inheritance(struct prioq_mutex *mutex, struc
195 struct litmus_lock *l; 195 struct litmus_lock *l;
196 BUG_ON(mutex->owner != NULL); 196 BUG_ON(mutex->owner != NULL);
197 BUG_ON(list_empty(&mutex->wait.task_list)); 197 BUG_ON(list_empty(&mutex->wait.task_list));
198 198
199 l = &mutex->litmus_lock; 199 l = &mutex->litmus_lock;
200 200
201 if (dgl_wait) { 201 if (dgl_wait) {
202 BUG_ON(t != dgl_wait->task); 202 BUG_ON(t != dgl_wait->task);
203 203
204 /* we're a part of a DGL */ 204 /* we're a part of a DGL */
205 if(__attempt_atomic_dgl_acquire(NULL, dgl_wait)) { 205 if(__attempt_atomic_dgl_acquire(NULL, dgl_wait)) {
206 TRACE_CUR("%s/%d cannot take entire DGL via inheritance.\n", 206 TRACE_CUR("%s/%d cannot take entire DGL via inheritance.\n",
@@ -218,50 +218,50 @@ static int ___prioq_dgl_acquire_via_inheritance(struct prioq_mutex *mutex, struc
218 /* we're a regular singular request. we can always take the lock if 218 /* we're a regular singular request. we can always take the lock if
219 * there is no mutex owner. */ 219 * there is no mutex owner. */
220 wait_queue_t *first; 220 wait_queue_t *first;
221 221
222 TRACE_CUR("%s/%d can take it's singular lock via inheritance!\n", 222 TRACE_CUR("%s/%d can take it's singular lock via inheritance!\n",
223 t->comm, t->pid); 223 t->comm, t->pid);
224 224
225 first = list_entry(mutex->wait.task_list.next, wait_queue_t, task_list); 225 first = list_entry(mutex->wait.task_list.next, wait_queue_t, task_list);
226 226
227 BUG_ON(get_queued_task(first) != t); 227 BUG_ON(get_queued_task(first) != t);
228 228
229 __remove_wait_queue(&mutex->wait, first); /* remove the blocked task */ 229 __remove_wait_queue(&mutex->wait, first); /* remove the blocked task */
230 230
231 /* update/cleanup the state of the lock */ 231 /* update/cleanup the state of the lock */
232 232
233 mutex->owner = t; /* take ownership!!! */ 233 mutex->owner = t; /* take ownership!!! */
234 234
235 mutex->hp_waiter = __prioq_mutex_find_hp_waiter(mutex, t); 235 mutex->hp_waiter = __prioq_mutex_find_hp_waiter(mutex, t);
236 l->nest.hp_waiter_eff_prio = (mutex->hp_waiter) ? 236 l->nest.hp_waiter_eff_prio = (mutex->hp_waiter) ?
237 effective_priority(mutex->hp_waiter) : NULL; 237 effective_priority(mutex->hp_waiter) : NULL;
238 238
239 if (mutex->hp_waiter) 239 if (mutex->hp_waiter)
240 TRACE_CUR("%s/%d is new highest-prio waiter\n", 240 TRACE_CUR("%s/%d is new highest-prio waiter\n",
241 mutex->hp_waiter->comm, mutex->hp_waiter->pid); 241 mutex->hp_waiter->comm, mutex->hp_waiter->pid);
242 else 242 else
243 TRACE_CUR("no further waiters\n"); 243 TRACE_CUR("no further waiters\n");
244 244
245 raw_spin_lock(&tsk_rt(t)->hp_blocked_tasks_lock); 245 raw_spin_lock(&tsk_rt(t)->hp_blocked_tasks_lock);
246 246
247 binheap_add(&l->nest.hp_binheap_node, 247 binheap_add(&l->nest.hp_binheap_node,
248 &tsk_rt(t)->hp_blocked_tasks, 248 &tsk_rt(t)->hp_blocked_tasks,
249 struct nested_info, hp_binheap_node); 249 struct nested_info, hp_binheap_node);
250 250
251 raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock); 251 raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock);
252 } 252 }
253 253
254 if (t) { 254 if (t) {
255 BUG_ON(mutex->owner != t); 255 BUG_ON(mutex->owner != t);
256 256
257 TRACE_CUR("%s/%d waking up since it is no longer blocked.\n", t->comm, t->pid); 257 TRACE_CUR("%s/%d waking up since it is no longer blocked.\n", t->comm, t->pid);
258 258
259 tsk_rt(t)->blocked_lock = NULL; 259 tsk_rt(t)->blocked_lock = NULL;
260 mb(); 260 mb();
261 261
262 wake_up_for_lock(t); 262 wake_up_for_lock(t);
263 } 263 }
264 264
265 return (t != NULL); 265 return (t != NULL);
266} 266}
267 267
@@ -276,7 +276,7 @@ static int __prioq_dgl_increase_pos(struct prioq_mutex *mutex, struct task_struc
276// (1) Increase position for 't' for all locks it is waiting. 276// (1) Increase position for 't' for all locks it is waiting.
277// (2) Check to see if 't' can take the lock, DGL or singular lock. 277// (2) Check to see if 't' can take the lock, DGL or singular lock.
278// (3) If it can, do so and wake up 't'. 278// (3) If it can, do so and wake up 't'.
279 279
280 struct list_head *pos; 280 struct list_head *pos;
281 struct task_struct *new_head; 281 struct task_struct *new_head;
282 struct task_struct *cur_head = NULL; 282 struct task_struct *cur_head = NULL;
@@ -284,32 +284,32 @@ static int __prioq_dgl_increase_pos(struct prioq_mutex *mutex, struct task_struc
284 int woke_up = 0; 284 int woke_up = 0;
285 int found = 0; 285 int found = 0;
286 286
287 287
288 BUG_ON(list_empty(&mutex->wait.task_list)); 288 BUG_ON(list_empty(&mutex->wait.task_list));
289 289
290 /* note the task at the head of the queue */ 290 /* note the task at the head of the queue */
291 if(mutex->owner == NULL) { 291 if(mutex->owner == NULL) {
292 cur_head = get_head_task(mutex); 292 cur_head = get_head_task(mutex);
293 } 293 }
294 294
295 list_for_each(pos, &mutex->wait.task_list) { 295 list_for_each(pos, &mutex->wait.task_list) {
296 dgl_wait_state_t *temp_dgl_state; 296 dgl_wait_state_t *temp_dgl_state;
297 wait_queue_t *q = list_entry(pos, wait_queue_t, task_list); 297 wait_queue_t *q = list_entry(pos, wait_queue_t, task_list);
298 struct task_struct *queued = get_queued_task_and_dgl_wait(q, &temp_dgl_state); 298 struct task_struct *queued = get_queued_task_and_dgl_wait(q, &temp_dgl_state);
299 299
300 if (queued == t) { 300 if (queued == t) {
301 301
302 TRACE_CUR("found %s/%d in prioq of lock %d\n", 302 TRACE_CUR("found %s/%d in prioq of lock %d\n",
303 t->comm, t->pid, 303 t->comm, t->pid,
304 mutex->litmus_lock.ident); 304 mutex->litmus_lock.ident);
305 305
306 if(temp_dgl_state) { /* it's a DGL request */ 306 if(temp_dgl_state) { /* it's a DGL request */
307 int i; 307 int i;
308 dgl_wait = temp_dgl_state; 308 dgl_wait = temp_dgl_state;
309 309
310 TRACE_CUR("found request for %s/%d is a DGL request of size %d.\n", 310 TRACE_CUR("found request for %s/%d is a DGL request of size %d.\n",
311 t->comm, t->pid, dgl_wait->size); 311 t->comm, t->pid, dgl_wait->size);
312 312
313 // reposition on the other mutexes 313 // reposition on the other mutexes
314 for(i = 0; i < dgl_wait->size; ++i) { 314 for(i = 0; i < dgl_wait->size; ++i) {
315 // assume they're all PRIOQ_MUTEX 315 // assume they're all PRIOQ_MUTEX
@@ -318,7 +318,7 @@ static int __prioq_dgl_increase_pos(struct prioq_mutex *mutex, struct task_struc
318 __prioq_increase_pos(pm, t); 318 __prioq_increase_pos(pm, t);
319 } 319 }
320 } 320 }
321 321
322 // reposition on this mutex 322 // reposition on this mutex
323 __remove_wait_queue(&mutex->wait, q); 323 __remove_wait_queue(&mutex->wait, q);
324 __add_wait_queue_sorted(&mutex->wait, q); 324 __add_wait_queue_sorted(&mutex->wait, q);
@@ -326,24 +326,24 @@ static int __prioq_dgl_increase_pos(struct prioq_mutex *mutex, struct task_struc
326 break; 326 break;
327 } 327 }
328 } 328 }
329 329
330 BUG_ON(!found); 330 BUG_ON(!found);
331 331
332 if (mutex->owner == NULL) { 332 if (mutex->owner == NULL) {
333 /* who is the new head? */ 333 /* who is the new head? */
334 new_head = get_head_task(mutex); 334 new_head = get_head_task(mutex);
335 335
336 /* is the prioq mutex idle? */ 336 /* is the prioq mutex idle? */
337 if(cur_head != new_head) { 337 if(cur_head != new_head) {
338 /* the new head might be able to take the lock */ 338 /* the new head might be able to take the lock */
339 339
340 BUG_ON(new_head != t); /* the new head must be this task since our prio increased */ 340 BUG_ON(new_head != t); /* the new head must be this task since our prio increased */
341 341
342 TRACE_CUR("Change in prioq head on idle prioq mutex %d: old = %s/%d new = %s/%d\n", 342 TRACE_CUR("Change in prioq head on idle prioq mutex %d: old = %s/%d new = %s/%d\n",
343 mutex->litmus_lock.ident, 343 mutex->litmus_lock.ident,
344 cur_head->comm, cur_head->pid, 344 cur_head->comm, cur_head->pid,
345 new_head->comm, new_head->pid); 345 new_head->comm, new_head->pid);
346 346
347 woke_up = ___prioq_dgl_acquire_via_inheritance(mutex, t, dgl_wait); 347 woke_up = ___prioq_dgl_acquire_via_inheritance(mutex, t, dgl_wait);
348 } 348 }
349 } 349 }
@@ -358,9 +358,9 @@ static int ___prioq_dgl_decrease_pos_and_check_acquire(struct prioq_mutex *mutex
358 struct task_struct *cur_head = NULL; 358 struct task_struct *cur_head = NULL;
359 int woke_up = 0; 359 int woke_up = 0;
360 int found = 1; 360 int found = 1;
361 361
362 BUG_ON(list_empty(&mutex->wait.task_list)); 362 BUG_ON(list_empty(&mutex->wait.task_list));
363 363
364 /* find the position of t in mutex's wait q if it's not provided */ 364 /* find the position of t in mutex's wait q if it's not provided */
365 if (q == NULL) { 365 if (q == NULL) {
366 found = 0; 366 found = 0;
@@ -375,21 +375,21 @@ static int ___prioq_dgl_decrease_pos_and_check_acquire(struct prioq_mutex *mutex
375 375
376 BUG_ON(!q); 376 BUG_ON(!q);
377 BUG_ON(!found); 377 BUG_ON(!found);
378 378
379 if(mutex->owner == NULL) { 379 if(mutex->owner == NULL) {
380 cur_head = get_head_task(mutex); 380 cur_head = get_head_task(mutex);
381 } 381 }
382 382
383 // update the position 383 // update the position
384 __remove_wait_queue(&mutex->wait, q); 384 __remove_wait_queue(&mutex->wait, q);
385 __add_wait_queue_sorted(&mutex->wait, q); 385 __add_wait_queue_sorted(&mutex->wait, q);
386 386
387 if(mutex->owner == NULL) { 387 if(mutex->owner == NULL) {
388 // get a reference to dgl_wait of the new head is a DGL request 388 // get a reference to dgl_wait of the new head is a DGL request
389 dgl_wait_state_t *dgl_wait; 389 dgl_wait_state_t *dgl_wait;
390 q = list_entry(mutex->wait.task_list.next, wait_queue_t, task_list); 390 q = list_entry(mutex->wait.task_list.next, wait_queue_t, task_list);
391 new_head = get_queued_task_and_dgl_wait(q, &dgl_wait); 391 new_head = get_queued_task_and_dgl_wait(q, &dgl_wait);
392 392
393 /* is the prioq mutex idle and did the head change? */ 393 /* is the prioq mutex idle and did the head change? */
394 if(cur_head != new_head) { 394 if(cur_head != new_head) {
395 /* the new head might be able to take the lock */ 395 /* the new head might be able to take the lock */
@@ -397,7 +397,7 @@ static int ___prioq_dgl_decrease_pos_and_check_acquire(struct prioq_mutex *mutex
397 mutex->litmus_lock.ident, 397 mutex->litmus_lock.ident,
398 cur_head->comm, cur_head->pid, 398 cur_head->comm, cur_head->pid,
399 new_head->comm, new_head->pid); 399 new_head->comm, new_head->pid);
400 400
401 woke_up = ___prioq_dgl_acquire_via_inheritance(mutex, new_head, dgl_wait); 401 woke_up = ___prioq_dgl_acquire_via_inheritance(mutex, new_head, dgl_wait);
402 } 402 }
403 } 403 }
@@ -410,28 +410,28 @@ static void __prioq_dgl_decrease_pos(struct prioq_mutex *mutex, struct task_stru
410// (2) For every lock upon which 't' was the head AND that lock is idle: 410// (2) For every lock upon which 't' was the head AND that lock is idle:
411// (3) Can the new head take the lock? 411// (3) Can the new head take the lock?
412// (4) If it can, do so and wake up the new head. 412// (4) If it can, do so and wake up the new head.
413 413
414 struct list_head *pos; 414 struct list_head *pos;
415 415
416 BUG_ON(list_empty(&mutex->wait.task_list)); 416 BUG_ON(list_empty(&mutex->wait.task_list));
417 417
418 list_for_each(pos, &mutex->wait.task_list) { 418 list_for_each(pos, &mutex->wait.task_list) {
419 dgl_wait_state_t *dgl_wait; 419 dgl_wait_state_t *dgl_wait;
420 wait_queue_t *q = list_entry(pos, wait_queue_t, task_list); 420 wait_queue_t *q = list_entry(pos, wait_queue_t, task_list);
421 struct task_struct *queued = get_queued_task_and_dgl_wait(q, &dgl_wait); 421 struct task_struct *queued = get_queued_task_and_dgl_wait(q, &dgl_wait);
422 422
423 if (queued == t) { 423 if (queued == t) {
424 TRACE_CUR("found %s/%d in prioq of lock %d\n", 424 TRACE_CUR("found %s/%d in prioq of lock %d\n",
425 t->comm, t->pid, 425 t->comm, t->pid,
426 mutex->litmus_lock.ident); 426 mutex->litmus_lock.ident);
427 427
428 if (dgl_wait) { 428 if (dgl_wait) {
429 // reposition on all mutexes and check for wakeup 429 // reposition on all mutexes and check for wakeup
430 int i; 430 int i;
431 431
432 TRACE_CUR("found request for %s/%d is a DGL request of size %d.\n", 432 TRACE_CUR("found request for %s/%d is a DGL request of size %d.\n",
433 t->comm, t->pid, dgl_wait->size); 433 t->comm, t->pid, dgl_wait->size);
434 434
435 for(i = 0; i < dgl_wait->size; ++i) { 435 for(i = 0; i < dgl_wait->size; ++i) {
436 // assume they're all PRIOQ_MUTEX 436 // assume they're all PRIOQ_MUTEX
437 struct prioq_mutex *pm = (struct prioq_mutex *) dgl_wait->locks[i]; 437 struct prioq_mutex *pm = (struct prioq_mutex *) dgl_wait->locks[i];
@@ -442,12 +442,12 @@ static void __prioq_dgl_decrease_pos(struct prioq_mutex *mutex, struct task_stru
442 } 442 }
443 } 443 }
444 else { 444 else {
445 ___prioq_dgl_decrease_pos_and_check_acquire(mutex, t, q); 445 ___prioq_dgl_decrease_pos_and_check_acquire(mutex, t, q);
446 } 446 }
447 return; 447 return;
448 } 448 }
449 } 449 }
450 450
451 BUG(); 451 BUG();
452} 452}
453 453
@@ -481,7 +481,7 @@ int prioq_mutex_dgl_lock(struct litmus_lock *l, dgl_wait_state_t* dgl_wait,
481 481
482 init_dgl_waitqueue_entry(wq_node, dgl_wait); 482 init_dgl_waitqueue_entry(wq_node, dgl_wait);
483 483
484 set_task_state(t, TASK_UNINTERRUPTIBLE); 484 //set_task_state(t, TASK_UNINTERRUPTIBLE); /* done in do_litmus_dgl_atomic_lock() only if needed */
485 __add_wait_queue_sorted_exclusive(&mutex->wait, wq_node); 485 __add_wait_queue_sorted_exclusive(&mutex->wait, wq_node);
486 486
487 return acquired_immediatly; 487 return acquired_immediatly;
@@ -494,7 +494,8 @@ void prioq_mutex_enable_priority(struct litmus_lock *l,
494 struct prioq_mutex *mutex = prioq_mutex_from_lock(l); 494 struct prioq_mutex *mutex = prioq_mutex_from_lock(l);
495 struct task_struct *t = dgl_wait->task; 495 struct task_struct *t = dgl_wait->task;
496 struct task_struct *owner = mutex->owner; 496 struct task_struct *owner = mutex->owner;
497 unsigned long flags = 0; // these are unused under DGL coarse-grain locking 497 unsigned long flags;
498 local_save_flags(flags); // needed for coarse-grain DGLs?
498 499
499 /************************************** 500 /**************************************
500 * This code looks like it supports fine-grain locking, but it does not! 501 * This code looks like it supports fine-grain locking, but it does not!
@@ -597,7 +598,7 @@ static void select_next_lock_if_primary(struct litmus_lock *l,
597 effective_priority(mutex->hp_waiter) : 598 effective_priority(mutex->hp_waiter) :
598 NULL; 599 NULL;
599 600
600 601
601 if (mutex->hp_waiter) 602 if (mutex->hp_waiter)
602 TRACE_CUR("%s/%d is new highest-prio waiter\n", 603 TRACE_CUR("%s/%d is new highest-prio waiter\n",
603 mutex->hp_waiter->comm, mutex->hp_waiter->pid); 604 mutex->hp_waiter->comm, mutex->hp_waiter->pid);
@@ -822,30 +823,32 @@ int prioq_mutex_lock(struct litmus_lock* l)
822} 823}
823 824
824 825
825
826int prioq_mutex_unlock(struct litmus_lock* l) 826int prioq_mutex_unlock(struct litmus_lock* l)
827{ 827{
828 int err = 0;
828 struct task_struct *t = current, *next = NULL; 829 struct task_struct *t = current, *next = NULL;
830 struct task_struct *old_max_eff_prio;
829 struct prioq_mutex *mutex = prioq_mutex_from_lock(l); 831 struct prioq_mutex *mutex = prioq_mutex_from_lock(l);
830 unsigned long flags; 832 unsigned long flags;
831 833
832 struct task_struct *old_max_eff_prio;
833
834#ifdef CONFIG_LITMUS_DGL_SUPPORT 834#ifdef CONFIG_LITMUS_DGL_SUPPORT
835 raw_spinlock_t *dgl_lock;
835 dgl_wait_state_t *dgl_wait = NULL; 836 dgl_wait_state_t *dgl_wait = NULL;
836 raw_spinlock_t *dgl_lock = litmus->get_dgl_spinlock(t);
837#endif 837#endif
838 838
839 int err = 0;
840
841 if (mutex->owner != t) { 839 if (mutex->owner != t) {
842 err = -EINVAL; 840 err = -EINVAL;
843 return err; 841 return err;
844 } 842 }
845 843
844#ifdef CONFIG_LITMUS_DGL_SUPPORT
845 dgl_lock = litmus->get_dgl_spinlock(current);
846#endif
847
846 lock_global_irqsave(dgl_lock, flags); 848 lock_global_irqsave(dgl_lock, flags);
847 lock_fine_irqsave(&mutex->lock, flags); 849 lock_fine_irqsave(&mutex->lock, flags);
848 850
851
849 raw_spin_lock(&tsk_rt(t)->hp_blocked_tasks_lock); 852 raw_spin_lock(&tsk_rt(t)->hp_blocked_tasks_lock);
850 853
851 TRACE_TASK(t, "Freeing lock %d\n", l->ident); 854 TRACE_TASK(t, "Freeing lock %d\n", l->ident);
@@ -855,13 +858,13 @@ int prioq_mutex_unlock(struct litmus_lock* l)
855 858
856 if(tsk_rt(t)->inh_task){ 859 if(tsk_rt(t)->inh_task){
857 struct task_struct *new_max_eff_prio = 860 struct task_struct *new_max_eff_prio =
858 top_priority(&tsk_rt(t)->hp_blocked_tasks); 861 top_priority(&tsk_rt(t)->hp_blocked_tasks);
859 862
860 if((new_max_eff_prio == NULL) || 863 if((new_max_eff_prio == NULL) ||
861 /* there was a change in eff prio */ 864 /* there was a change in eff prio */
862 ( (new_max_eff_prio != old_max_eff_prio) && 865 ( (new_max_eff_prio != old_max_eff_prio) &&
863 /* and owner had the old eff prio */ 866 /* and owner had the old eff prio */
864 (effective_priority(t) == old_max_eff_prio)) ) 867 (effective_priority(t) == old_max_eff_prio)) )
865 { 868 {
866 // old_max_eff_prio > new_max_eff_prio 869 // old_max_eff_prio > new_max_eff_prio
867 870
@@ -888,8 +891,6 @@ int prioq_mutex_unlock(struct litmus_lock* l)
888 raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock); 891 raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock);
889 892
890 893
891
892
893 mutex->owner = NULL; 894 mutex->owner = NULL;
894 895
895#ifdef CONFIG_LITMUS_DGL_SUPPORT 896#ifdef CONFIG_LITMUS_DGL_SUPPORT
@@ -900,11 +901,11 @@ int prioq_mutex_unlock(struct litmus_lock* l)
900 */ 901 */
901 wait_queue_t *q = list_entry(mutex->wait.task_list.next, wait_queue_t, task_list); 902 wait_queue_t *q = list_entry(mutex->wait.task_list.next, wait_queue_t, task_list);
902 get_queued_task_and_dgl_wait(q, &dgl_wait); 903 get_queued_task_and_dgl_wait(q, &dgl_wait);
903 904
904 if (dgl_wait) { 905 if (dgl_wait) {
905 TRACE_CUR("Checking to see if DGL waiter %s/%d can take its locks\n", 906 TRACE_CUR("Checking to see if DGL waiter %s/%d can take its locks\n",
906 dgl_wait->task->comm, dgl_wait->task->pid); 907 dgl_wait->task->comm, dgl_wait->task->pid);
907 908
908 if(__attempt_atomic_dgl_acquire(l, dgl_wait)) { 909 if(__attempt_atomic_dgl_acquire(l, dgl_wait)) {
909 /* failed. can't take this lock yet. we remain at head of prioq 910 /* failed. can't take this lock yet. we remain at head of prioq
910 * allow hp requests in the future to go ahead of us. */ 911 * allow hp requests in the future to go ahead of us. */
@@ -919,7 +920,7 @@ int prioq_mutex_unlock(struct litmus_lock* l)
919 920
920 /* remove the first */ 921 /* remove the first */
921 next = __waitqueue_dgl_remove_first(&mutex->wait, &dgl_wait); 922 next = __waitqueue_dgl_remove_first(&mutex->wait, &dgl_wait);
922 923
923 BUG_ON(dgl_wait && (next != dgl_wait->task)); 924 BUG_ON(dgl_wait && (next != dgl_wait->task));
924 } 925 }
925#else 926#else
@@ -935,7 +936,7 @@ int prioq_mutex_unlock(struct litmus_lock* l)
935 if (next == mutex->hp_waiter) { 936 if (next == mutex->hp_waiter) {
936 937
937 TRACE_CUR("%s/%d was highest-prio waiter\n", next->comm, next->pid); 938 TRACE_CUR("%s/%d was highest-prio waiter\n", next->comm, next->pid);
938 939
939 /* next has the highest priority --- it doesn't need to 940 /* next has the highest priority --- it doesn't need to
940 * inherit. However, we need to make sure that the 941 * inherit. However, we need to make sure that the
941 * next-highest priority in the queue is reflected in 942 * next-highest priority in the queue is reflected in
@@ -945,13 +946,13 @@ int prioq_mutex_unlock(struct litmus_lock* l)
945 effective_priority(mutex->hp_waiter) : 946 effective_priority(mutex->hp_waiter) :
946 NULL; 947 NULL;
947 948
948 949
949 if (mutex->hp_waiter) 950 if (mutex->hp_waiter)
950 TRACE_CUR("%s/%d is new highest-prio waiter\n", 951 TRACE_CUR("%s/%d is new highest-prio waiter\n",
951 mutex->hp_waiter->comm, mutex->hp_waiter->pid); 952 mutex->hp_waiter->comm, mutex->hp_waiter->pid);
952 else 953 else
953 TRACE_CUR("no further waiters\n"); 954 TRACE_CUR("no further waiters\n");
954 955
955 956
956 raw_spin_lock(&tsk_rt(next)->hp_blocked_tasks_lock); 957 raw_spin_lock(&tsk_rt(next)->hp_blocked_tasks_lock);
957 958
@@ -1019,8 +1020,8 @@ int prioq_mutex_unlock(struct litmus_lock* l)
1019#ifdef CONFIG_LITMUS_DGL_SUPPORT 1020#ifdef CONFIG_LITMUS_DGL_SUPPORT
1020out: 1021out:
1021#endif 1022#endif
1022 unlock_global_irqrestore(dgl_lock, flags);
1023 1023
1024 unlock_global_irqrestore(dgl_lock, flags);
1024 TRACE_TASK(t, "-- Freed lock %d --\n", l->ident); 1025 TRACE_TASK(t, "-- Freed lock %d --\n", l->ident);
1025 1026
1026 return err; 1027 return err;
diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c
index 40daf8e16d74..69f30188f3ba 100644
--- a/litmus/sched_cedf.c
+++ b/litmus/sched_cedf.c
@@ -1316,6 +1316,14 @@ static int __decrease_priority_inheritance(struct task_struct* t,
1316{ 1316{
1317 int success = 1; 1317 int success = 1;
1318 1318
1319 if (prio_inh == tsk_rt(t)->inh_task) {
1320 /* relationship already established. */
1321 TRACE_TASK(t, "already inherits priority from %s/%d\n",
1322 (prio_inh) ? prio_inh->comm : "(null)",
1323 (prio_inh) ? prio_inh->pid : 0);
1324 goto out;
1325 }
1326
1319 if (prio_inh && (effective_priority(prio_inh) != prio_inh)) { 1327 if (prio_inh && (effective_priority(prio_inh) != prio_inh)) {
1320 TRACE_TASK(t, "Inheriting from %s/%d instead of the eff_prio = %s/%d!\n", 1328 TRACE_TASK(t, "Inheriting from %s/%d instead of the eff_prio = %s/%d!\n",
1321 prio_inh->comm, prio_inh->pid, 1329 prio_inh->comm, prio_inh->pid,
@@ -1337,14 +1345,6 @@ static int __decrease_priority_inheritance(struct task_struct* t,
1337#endif 1345#endif
1338 } 1346 }
1339 1347
1340 if (prio_inh == tsk_rt(t)->inh_task) {
1341 /* relationship already established. */
1342 TRACE_TASK(t, "already inherits priority from %s/%d\n",
1343 (prio_inh) ? prio_inh->comm : "(null)",
1344 (prio_inh) ? prio_inh->pid : 0);
1345 goto out;
1346 }
1347
1348#ifdef CONFIG_LITMUS_NESTED_LOCKING 1348#ifdef CONFIG_LITMUS_NESTED_LOCKING
1349 if(__edf_higher_prio(t, EFFECTIVE, prio_inh, BASE)) { 1349 if(__edf_higher_prio(t, EFFECTIVE, prio_inh, BASE)) {
1350#endif 1350#endif
@@ -1469,7 +1469,7 @@ static void nested_increase_priority_inheritance(struct task_struct* t,
1469 1469
1470 1470
1471 if(blocked_lock) { 1471 if(blocked_lock) {
1472 if(blocked_lock->ops->propagate_increase_inheritance) { 1472 if(blocked_lock->ops->supports_nesting) {
1473 TRACE_TASK(t, "Inheritor is blocked (...perhaps). Checking lock %d.\n", 1473 TRACE_TASK(t, "Inheritor is blocked (...perhaps). Checking lock %d.\n",
1474 blocked_lock->ident); 1474 blocked_lock->ident);
1475 1475
@@ -1506,7 +1506,7 @@ static void nested_decrease_priority_inheritance(struct task_struct* t,
1506 raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock); // unlock the t's heap. 1506 raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock); // unlock the t's heap.
1507 1507
1508 if(blocked_lock) { 1508 if(blocked_lock) {
1509 if(blocked_lock->ops->propagate_decrease_inheritance) { 1509 if(blocked_lock->ops->supports_nesting) {
1510 TRACE_TASK(t, "Inheritor is blocked (...perhaps). Checking lock %d.\n", 1510 TRACE_TASK(t, "Inheritor is blocked (...perhaps). Checking lock %d.\n",
1511 blocked_lock->ident); 1511 blocked_lock->ident);
1512 1512
@@ -1547,7 +1547,11 @@ static struct litmus_lock_ops cedf_fifo_mutex_lock_ops = {
1547 1547
1548 .dgl_can_quick_lock = NULL, 1548 .dgl_can_quick_lock = NULL,
1549 .dgl_quick_lock = NULL, 1549 .dgl_quick_lock = NULL,
1550
1551 .supports_dgl = 1,
1552 .requires_atomic_dgl = 0,
1550#endif 1553#endif
1554 .supports_nesting = 1,
1551}; 1555};
1552 1556
1553static struct litmus_lock* cedf_new_fifo_mutex(void) 1557static struct litmus_lock* cedf_new_fifo_mutex(void)
@@ -1574,7 +1578,11 @@ static struct litmus_lock_ops cedf_prioq_mutex_lock_ops = {
1574 1578
1575 .dgl_can_quick_lock = prioq_mutex_dgl_can_quick_lock, 1579 .dgl_can_quick_lock = prioq_mutex_dgl_can_quick_lock,
1576 .dgl_quick_lock = prioq_mutex_dgl_quick_lock, 1580 .dgl_quick_lock = prioq_mutex_dgl_quick_lock,
1581
1582 .supports_dgl = 1,
1583 .requires_atomic_dgl = 1,
1577#endif 1584#endif
1585 .supports_nesting = 1,
1578}; 1586};
1579 1587
1580static struct litmus_lock* cedf_new_prioq_mutex(void) 1588static struct litmus_lock* cedf_new_prioq_mutex(void)
@@ -1593,6 +1601,12 @@ static struct litmus_lock_ops cedf_ikglp_lock_ops = {
1593 // ikglp can only be an outer-most lock. 1601 // ikglp can only be an outer-most lock.
1594 .propagate_increase_inheritance = NULL, 1602 .propagate_increase_inheritance = NULL,
1595 .propagate_decrease_inheritance = NULL, 1603 .propagate_decrease_inheritance = NULL,
1604
1605#ifdef CONFIG_LITMUS_DGL_SUPPORT
1606 .supports_dgl = 0,
1607 .requires_atomic_dgl = 0,
1608#endif
1609 .supports_nesting = 0,
1596}; 1610};
1597 1611
1598static struct litmus_lock* cedf_new_ikglp(void* __user arg) 1612static struct litmus_lock* cedf_new_ikglp(void* __user arg)
@@ -1617,6 +1631,12 @@ static struct litmus_lock_ops cedf_kfmlp_lock_ops = {
1617 // kfmlp can only be an outer-most lock. 1631 // kfmlp can only be an outer-most lock.
1618 .propagate_increase_inheritance = NULL, 1632 .propagate_increase_inheritance = NULL,
1619 .propagate_decrease_inheritance = NULL, 1633 .propagate_decrease_inheritance = NULL,
1634
1635#ifdef CONFIG_LITMUS_DGL_SUPPORT
1636 .supports_dgl = 0,
1637 .requires_atomic_dgl = 0,
1638#endif
1639 .supports_nesting = 0,
1620}; 1640};
1621 1641
1622 1642