aboutsummaryrefslogtreecommitdiffstats
path: root/litmus
diff options
context:
space:
mode:
authorGlenn Elliott <gelliott@cs.unc.edu>2011-03-31 10:47:01 -0400
committerBjoern B. Brandenburg <bbb@cs.unc.edu>2011-07-18 09:50:46 -0400
commit26c0ddabb1efb8a7360bbd423561d2196b61424c (patch)
tree9f7b7b1d769fdfe56894808107d1fdde480743af /litmus
parent4df6011f5e571630cd99cd29b36bc8a701520540 (diff)
Improve FMLP queue management.
The next owner of a FMLP-protected resource is dequeued from the FMLP FIFO queue by unlock() (when the resource is freed by the previous owner) instead of performing the dequeue by the next owner immediately after it has been woken up. This simplifies the code a little bit and also reduces potential spinlock contention.
Diffstat (limited to 'litmus')
-rw-r--r--litmus/locking.c12
-rw-r--r--litmus/sched_gsn_edf.c4
-rw-r--r--litmus/sched_psn_edf.c6
3 files changed, 9 insertions, 13 deletions
diff --git a/litmus/locking.c b/litmus/locking.c
index 728b56835cf7..2693f1aca859 100644
--- a/litmus/locking.c
+++ b/litmus/locking.c
@@ -107,16 +107,18 @@ asmlinkage long sys_litmus_unlock(int lock_od)
107 return err; 107 return err;
108} 108}
109 109
110struct task_struct* waitqueue_first(wait_queue_head_t *wq) 110struct task_struct* __waitqueue_remove_first(wait_queue_head_t *wq)
111{ 111{
112 wait_queue_t *q; 112 wait_queue_t* q;
113 struct task_struct* t = NULL;
113 114
114 if (waitqueue_active(wq)) { 115 if (waitqueue_active(wq)) {
115 q = list_entry(wq->task_list.next, 116 q = list_entry(wq->task_list.next,
116 wait_queue_t, task_list); 117 wait_queue_t, task_list);
117 return (struct task_struct*) q->private; 118 t = (struct task_struct*) q->private;
118 } else 119 __remove_wait_queue(wq, q);
119 return NULL; 120 }
121 return(t);
120} 122}
121 123
122 124
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c
index 770004544edd..dfec80e351aa 100644
--- a/litmus/sched_gsn_edf.c
+++ b/litmus/sched_gsn_edf.c
@@ -783,8 +783,6 @@ int gsnedf_fmlp_lock(struct litmus_lock* l)
783 * ->owner. We can thus check it without acquiring the spin 783 * ->owner. We can thus check it without acquiring the spin
784 * lock. */ 784 * lock. */
785 BUG_ON(sem->owner != t); 785 BUG_ON(sem->owner != t);
786
787 remove_wait_queue(&sem->wait, &wait);
788 } else { 786 } else {
789 /* it's ours now */ 787 /* it's ours now */
790 sem->owner = t; 788 sem->owner = t;
@@ -810,7 +808,7 @@ int gsnedf_fmlp_unlock(struct litmus_lock* l)
810 } 808 }
811 809
812 /* check if there are jobs waiting for this resource */ 810 /* check if there are jobs waiting for this resource */
813 next = waitqueue_first(&sem->wait); 811 next = __waitqueue_remove_first(&sem->wait);
814 if (next) { 812 if (next) {
815 /* next becomes the resouce holder */ 813 /* next becomes the resouce holder */
816 sem->owner = next; 814 sem->owner = next;
diff --git a/litmus/sched_psn_edf.c b/litmus/sched_psn_edf.c
index 9e62b0504b20..8e4a22dd8d6a 100644
--- a/litmus/sched_psn_edf.c
+++ b/litmus/sched_psn_edf.c
@@ -441,10 +441,6 @@ int psnedf_fmlp_lock(struct litmus_lock* l)
441 * ->owner. We can thus check it without acquiring the spin 441 * ->owner. We can thus check it without acquiring the spin
442 * lock. */ 442 * lock. */
443 BUG_ON(sem->owner != t); 443 BUG_ON(sem->owner != t);
444
445 /* FIXME: could we punt the dequeuing to the previous job,
446 * which is holding the spinlock anyway? */
447 remove_wait_queue(&sem->wait, &wait);
448 } else { 444 } else {
449 /* it's ours now */ 445 /* it's ours now */
450 sem->owner = t; 446 sem->owner = t;
@@ -477,7 +473,7 @@ int psnedf_fmlp_unlock(struct litmus_lock* l)
477 unboost_priority(t); 473 unboost_priority(t);
478 474
479 /* check if there are jobs waiting for this resource */ 475 /* check if there are jobs waiting for this resource */
480 next = waitqueue_first(&sem->wait); 476 next = __waitqueue_remove_first(&sem->wait);
481 if (next) { 477 if (next) {
482 /* boost next job */ 478 /* boost next job */
483 boost_priority(next); 479 boost_priority(next);