aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndrea Bastoni <bastoni@cs.unc.edu>2010-06-10 13:02:23 -0400
committerAndrea Bastoni <bastoni@cs.unc.edu>2010-06-10 13:02:23 -0400
commit5980b2330359d002e4e2278327ff80307504b848 (patch)
tree617e1f1b4f9d99c3f34d97a3866be7c1a397039e
parentb7723f10b088450c82a84275371d48020309e5f7 (diff)
Use a non-sleeping lock to protect fmlp locking operations
The wait_queue_t lock used to protect fmlp_down and fmlp_up operations in vanilla kernel is a sleeping lock in PreemptRT. We call fmlp operations from atomic contexts. This commit replaces the wait_queue lock with a raw_spin_lock.
-rw-r--r--include/litmus/sched_plugin.h5
-rw-r--r--litmus/fmlp.c11
2 files changed, 11 insertions, 5 deletions
diff --git a/include/litmus/sched_plugin.h b/include/litmus/sched_plugin.h
index 9c1c9f28ba79..c44108991053 100644
--- a/include/litmus/sched_plugin.h
+++ b/include/litmus/sched_plugin.h
@@ -11,6 +11,11 @@
11struct pi_semaphore { 11struct pi_semaphore {
12 atomic_t count; 12 atomic_t count;
13 int sleepers; 13 int sleepers;
14 /* in PreemptRT the spinlock in the wait_queue_head_t
15 * is a sleeping spinlock; we use it in atomic, so a
16 * non-sleeping raw_spinlock_t is needed.
17 */
18 raw_spinlock_t wait_lock;
14 wait_queue_head_t wait; 19 wait_queue_head_t wait;
15 struct { 20 struct {
16 /* highest-prio holder/waiter */ 21 /* highest-prio holder/waiter */
diff --git a/litmus/fmlp.c b/litmus/fmlp.c
index 03fa7358d5eb..7f3ae0b3392a 100644
--- a/litmus/fmlp.c
+++ b/litmus/fmlp.c
@@ -30,6 +30,7 @@ static void* create_fmlp_semaphore(void)
30 return NULL; 30 return NULL;
31 atomic_set(&sem->count, 1); 31 atomic_set(&sem->count, 1);
32 sem->sleepers = 0; 32 sem->sleepers = 0;
33 raw_spin_lock_init(&sem->wait_lock);
33 init_waitqueue_head(&sem->wait); 34 init_waitqueue_head(&sem->wait);
34 sem->hp.task = NULL; 35 sem->hp.task = NULL;
35 sem->holder = NULL; 36 sem->holder = NULL;
@@ -142,7 +143,7 @@ static int do_fmlp_down(struct pi_semaphore* sem)
142 143
143 pair.tsk = tsk; 144 pair.tsk = tsk;
144 pair.sem = sem; 145 pair.sem = sem;
145 spin_lock_irqsave(&sem->wait.lock, flags); 146 raw_spin_lock_irqsave(&sem->wait_lock, flags);
146 147
147 if (atomic_dec_return(&sem->count) < 0 || 148 if (atomic_dec_return(&sem->count) < 0 ||
148 waitqueue_active(&sem->wait)) { 149 waitqueue_active(&sem->wait)) {
@@ -154,7 +155,7 @@ static int do_fmlp_down(struct pi_semaphore* sem)
154 litmus->pi_block(sem, tsk); 155 litmus->pi_block(sem, tsk);
155 156
156 /* release lock before sleeping */ 157 /* release lock before sleeping */
157 spin_unlock_irqrestore(&sem->wait.lock, flags); 158 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
158 159
159 TS_PI_DOWN_END; 160 TS_PI_DOWN_END;
160 preempt_enable_no_resched(); 161 preempt_enable_no_resched();
@@ -186,7 +187,7 @@ static int do_fmlp_down(struct pi_semaphore* sem)
186 sem->hp.cpu_task[get_partition(tsk)] = tsk; 187 sem->hp.cpu_task[get_partition(tsk)] = tsk;
187 188
188 litmus->inherit_priority(sem, tsk); 189 litmus->inherit_priority(sem, tsk);
189 spin_unlock_irqrestore(&sem->wait.lock, flags); 190 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
190 } 191 }
191 return suspended; 192 return suspended;
192} 193}
@@ -195,7 +196,7 @@ static void do_fmlp_up(struct pi_semaphore* sem)
195{ 196{
196 unsigned long flags; 197 unsigned long flags;
197 198
198 spin_lock_irqsave(&sem->wait.lock, flags); 199 raw_spin_lock_irqsave(&sem->wait_lock, flags);
199 200
200 TRACE_CUR("releases PI lock %p\n", sem); 201 TRACE_CUR("releases PI lock %p\n", sem);
201 litmus->return_priority(sem); 202 litmus->return_priority(sem);
@@ -204,7 +205,7 @@ static void do_fmlp_up(struct pi_semaphore* sem)
204 /* there is a task queued */ 205 /* there is a task queued */
205 wake_up_locked(&sem->wait); 206 wake_up_locked(&sem->wait);
206 207
207 spin_unlock_irqrestore(&sem->wait.lock, flags); 208 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
208} 209}
209 210
210asmlinkage long sys_fmlp_down(int sem_od) 211asmlinkage long sys_fmlp_down(int sem_od)