aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndrea Bastoni <bastoni@cs.unc.edu>2010-05-29 23:45:13 -0400
committerAndrea Bastoni <bastoni@cs.unc.edu>2010-05-29 23:57:07 -0400
commita66246f9e973a68fb9955a2fa7663a2e02afbd30 (patch)
treeebdf77a3cf491c0d0b77af3d9622f33013af5856
parent6ffc1fee98c4b995eb3a0285f4f8fb467cb0306e (diff)
Change most LitmusRT spinlock_t in raw_spinlock_t
Adapt to new schema for spinlock: (tglx 20091217) spinlock - the weakest one, which might sleep in RT raw_spinlock - spinlock which always spins even on RT arch_spinlock - the hardware level architecture dependent implementation ---- Most probably, all the spinlocks changed by this commit will be true spinning lock (raw_spinlock) in PreemptRT (so hopefully we'll need few changes when porting Litmmus to PreemptRT). There are a couple of spinlock that the kernel still defines as spinlock_t (therefore no changes reported in this commit) that might cause us troubles: - wait_queue_t lock is defined as spinlock_t; it is used in: * fmlp.c -- sem->wait.lock * sync.c -- ts_release.wait.lock - rwlock_t used in fifo implementation in sched_trace.c * this need probably to be changed to something always spinning in RT at the expense of increased locking time. ---- This commit also fixes warnings and errors due to the need to include slab.h when using kmalloc() and friends. ---- This commit does not compile.
-rw-r--r--include/litmus/fdso.h1
-rw-r--r--include/litmus/rt_domain.h26
-rw-r--r--kernel/hrtimer.c8
-rw-r--r--litmus/ftdev.c1
-rw-r--r--litmus/litmus.c10
-rw-r--r--litmus/rt_domain.c18
-rw-r--r--litmus/sched_cedf.c25
-rw-r--r--litmus/sched_gsn_edf.c36
-rw-r--r--litmus/sched_pfair.c25
-rw-r--r--litmus/sched_plugin.c14
-rw-r--r--litmus/sched_psn_edf.c24
11 files changed, 96 insertions, 92 deletions
diff --git a/include/litmus/fdso.h b/include/litmus/fdso.h
index 286e10f86de0..61f1b5baf42c 100644
--- a/include/litmus/fdso.h
+++ b/include/litmus/fdso.h
@@ -10,6 +10,7 @@
10#include <asm/atomic.h> 10#include <asm/atomic.h>
11 11
12#include <linux/fs.h> 12#include <linux/fs.h>
13#include <linux/slab.h>
13 14
14#define MAX_OBJECT_DESCRIPTORS 32 15#define MAX_OBJECT_DESCRIPTORS 32
15 16
diff --git a/include/litmus/rt_domain.h b/include/litmus/rt_domain.h
index b452be1d2256..9bf980713474 100644
--- a/include/litmus/rt_domain.h
+++ b/include/litmus/rt_domain.h
@@ -22,16 +22,16 @@ struct release_queue {
22 22
23typedef struct _rt_domain { 23typedef struct _rt_domain {
24 /* runnable rt tasks are in here */ 24 /* runnable rt tasks are in here */
25 spinlock_t ready_lock; 25 raw_spinlock_t ready_lock;
26 struct bheap ready_queue; 26 struct bheap ready_queue;
27 27
28 /* real-time tasks waiting for release are in here */ 28 /* real-time tasks waiting for release are in here */
29 spinlock_t release_lock; 29 raw_spinlock_t release_lock;
30 struct release_queue release_queue; 30 struct release_queue release_queue;
31 int release_master; 31 int release_master;
32 32
33 /* for moving tasks to the release queue */ 33 /* for moving tasks to the release queue */
34 spinlock_t tobe_lock; 34 raw_spinlock_t tobe_lock;
35 struct list_head tobe_released; 35 struct list_head tobe_released;
36 36
37 /* how do we check if we need to kick another CPU? */ 37 /* how do we check if we need to kick another CPU? */
@@ -109,17 +109,17 @@ static inline void add_ready(rt_domain_t* rt, struct task_struct *new)
109{ 109{
110 unsigned long flags; 110 unsigned long flags;
111 /* first we need the write lock for rt_ready_queue */ 111 /* first we need the write lock for rt_ready_queue */
112 spin_lock_irqsave(&rt->ready_lock, flags); 112 raw_spin_lock_irqsave(&rt->ready_lock, flags);
113 __add_ready(rt, new); 113 __add_ready(rt, new);
114 spin_unlock_irqrestore(&rt->ready_lock, flags); 114 raw_spin_unlock_irqrestore(&rt->ready_lock, flags);
115} 115}
116 116
117static inline void merge_ready(rt_domain_t* rt, struct bheap* tasks) 117static inline void merge_ready(rt_domain_t* rt, struct bheap* tasks)
118{ 118{
119 unsigned long flags; 119 unsigned long flags;
120 spin_lock_irqsave(&rt->ready_lock, flags); 120 raw_spin_lock_irqsave(&rt->ready_lock, flags);
121 __merge_ready(rt, tasks); 121 __merge_ready(rt, tasks);
122 spin_unlock_irqrestore(&rt->ready_lock, flags); 122 raw_spin_unlock_irqrestore(&rt->ready_lock, flags);
123} 123}
124 124
125static inline struct task_struct* take_ready(rt_domain_t* rt) 125static inline struct task_struct* take_ready(rt_domain_t* rt)
@@ -127,9 +127,9 @@ static inline struct task_struct* take_ready(rt_domain_t* rt)
127 unsigned long flags; 127 unsigned long flags;
128 struct task_struct* ret; 128 struct task_struct* ret;
129 /* first we need the write lock for rt_ready_queue */ 129 /* first we need the write lock for rt_ready_queue */
130 spin_lock_irqsave(&rt->ready_lock, flags); 130 raw_spin_lock_irqsave(&rt->ready_lock, flags);
131 ret = __take_ready(rt); 131 ret = __take_ready(rt);
132 spin_unlock_irqrestore(&rt->ready_lock, flags); 132 raw_spin_unlock_irqrestore(&rt->ready_lock, flags);
133 return ret; 133 return ret;
134} 134}
135 135
@@ -138,9 +138,9 @@ static inline void add_release(rt_domain_t* rt, struct task_struct *task)
138{ 138{
139 unsigned long flags; 139 unsigned long flags;
140 /* first we need the write lock for rt_ready_queue */ 140 /* first we need the write lock for rt_ready_queue */
141 spin_lock_irqsave(&rt->tobe_lock, flags); 141 raw_spin_lock_irqsave(&rt->tobe_lock, flags);
142 __add_release(rt, task); 142 __add_release(rt, task);
143 spin_unlock_irqrestore(&rt->tobe_lock, flags); 143 raw_spin_unlock_irqrestore(&rt->tobe_lock, flags);
144} 144}
145 145
146static inline int __jobs_pending(rt_domain_t* rt) 146static inline int __jobs_pending(rt_domain_t* rt)
@@ -153,9 +153,9 @@ static inline int jobs_pending(rt_domain_t* rt)
153 unsigned long flags; 153 unsigned long flags;
154 int ret; 154 int ret;
155 /* first we need the write lock for rt_ready_queue */ 155 /* first we need the write lock for rt_ready_queue */
156 spin_lock_irqsave(&rt->ready_lock, flags); 156 raw_spin_lock_irqsave(&rt->ready_lock, flags);
157 ret = !bheap_empty(&rt->ready_queue); 157 ret = !bheap_empty(&rt->ready_queue);
158 spin_unlock_irqrestore(&rt->ready_lock, flags); 158 raw_spin_unlock_irqrestore(&rt->ready_lock, flags);
159 return ret; 159 return ret;
160} 160}
161 161
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index c0b440b1f6ee..02e5097bf319 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -1052,9 +1052,9 @@ void hrtimer_pull(void)
1052 struct hrtimer_start_on_info *info; 1052 struct hrtimer_start_on_info *info;
1053 struct list_head *pos, *safe, list; 1053 struct list_head *pos, *safe, list;
1054 1054
1055 spin_lock(&base->lock); 1055 raw_spin_lock(&base->lock);
1056 list_replace_init(&base->to_pull, &list); 1056 list_replace_init(&base->to_pull, &list);
1057 spin_unlock(&base->lock); 1057 raw_spin_unlock(&base->lock);
1058 1058
1059 list_for_each_safe(pos, safe, &list) { 1059 list_for_each_safe(pos, safe, &list) {
1060 info = list_entry(pos, struct hrtimer_start_on_info, list); 1060 info = list_entry(pos, struct hrtimer_start_on_info, list);
@@ -1108,10 +1108,10 @@ int hrtimer_start_on(int cpu, struct hrtimer_start_on_info* info,
1108 } else { 1108 } else {
1109 TRACE("hrtimer_start_on: pulling to remote CPU\n"); 1109 TRACE("hrtimer_start_on: pulling to remote CPU\n");
1110 base = &per_cpu(hrtimer_bases, cpu); 1110 base = &per_cpu(hrtimer_bases, cpu);
1111 spin_lock_irqsave(&base->lock, flags); 1111 raw_spin_lock_irqsave(&base->lock, flags);
1112 was_empty = list_empty(&base->to_pull); 1112 was_empty = list_empty(&base->to_pull);
1113 list_add(&info->list, &base->to_pull); 1113 list_add(&info->list, &base->to_pull);
1114 spin_unlock_irqrestore(&base->lock, flags); 1114 raw_spin_unlock_irqrestore(&base->lock, flags);
1115 if (was_empty) 1115 if (was_empty)
1116 /* only send IPI if other no else 1116 /* only send IPI if other no else
1117 * has done so already 1117 * has done so already
diff --git a/litmus/ftdev.c b/litmus/ftdev.c
index 8b2d74d816a2..51dafaebf8a6 100644
--- a/litmus/ftdev.c
+++ b/litmus/ftdev.c
@@ -1,5 +1,6 @@
1#include <linux/sched.h> 1#include <linux/sched.h>
2#include <linux/fs.h> 2#include <linux/fs.h>
3#include <linux/slab.h>
3#include <linux/cdev.h> 4#include <linux/cdev.h>
4#include <asm/uaccess.h> 5#include <asm/uaccess.h>
5#include <linux/module.h> 6#include <linux/module.h>
diff --git a/litmus/litmus.c b/litmus/litmus.c
index e43596a5104c..99714d06eed5 100644
--- a/litmus/litmus.c
+++ b/litmus/litmus.c
@@ -23,7 +23,7 @@
23 23
24/* Number of RT tasks that exist in the system */ 24/* Number of RT tasks that exist in the system */
25atomic_t rt_task_count = ATOMIC_INIT(0); 25atomic_t rt_task_count = ATOMIC_INIT(0);
26static DEFINE_SPINLOCK(task_transition_lock); 26static DEFINE_RAW_SPINLOCK(task_transition_lock);
27/* synchronize plugin switching */ 27/* synchronize plugin switching */
28atomic_t cannot_use_plugin = ATOMIC_INIT(0); 28atomic_t cannot_use_plugin = ATOMIC_INIT(0);
29 29
@@ -323,7 +323,7 @@ long litmus_admit_task(struct task_struct* tsk)
323 INIT_LIST_HEAD(&tsk_rt(tsk)->list); 323 INIT_LIST_HEAD(&tsk_rt(tsk)->list);
324 324
325 /* avoid scheduler plugin changing underneath us */ 325 /* avoid scheduler plugin changing underneath us */
326 spin_lock_irqsave(&task_transition_lock, flags); 326 raw_spin_lock_irqsave(&task_transition_lock, flags);
327 327
328 /* allocate heap node for this task */ 328 /* allocate heap node for this task */
329 tsk_rt(tsk)->heap_node = bheap_node_alloc(GFP_ATOMIC); 329 tsk_rt(tsk)->heap_node = bheap_node_alloc(GFP_ATOMIC);
@@ -350,7 +350,7 @@ long litmus_admit_task(struct task_struct* tsk)
350 } 350 }
351 351
352out_unlock: 352out_unlock:
353 spin_unlock_irqrestore(&task_transition_lock, flags); 353 raw_spin_unlock_irqrestore(&task_transition_lock, flags);
354out: 354out:
355 return retval; 355 return retval;
356} 356}
@@ -396,7 +396,7 @@ int switch_sched_plugin(struct sched_plugin* plugin)
396 smp_call_function(synch_on_plugin_switch, NULL, 0); 396 smp_call_function(synch_on_plugin_switch, NULL, 0);
397 397
398 /* stop task transitions */ 398 /* stop task transitions */
399 spin_lock_irqsave(&task_transition_lock, flags); 399 raw_spin_lock_irqsave(&task_transition_lock, flags);
400 400
401 /* don't switch if there are active real-time tasks */ 401 /* don't switch if there are active real-time tasks */
402 if (atomic_read(&rt_task_count) == 0) { 402 if (atomic_read(&rt_task_count) == 0) {
@@ -414,7 +414,7 @@ int switch_sched_plugin(struct sched_plugin* plugin)
414 } else 414 } else
415 ret = -EBUSY; 415 ret = -EBUSY;
416out: 416out:
417 spin_unlock_irqrestore(&task_transition_lock, flags); 417 raw_spin_unlock_irqrestore(&task_transition_lock, flags);
418 atomic_set(&cannot_use_plugin, 0); 418 atomic_set(&cannot_use_plugin, 0);
419 return ret; 419 return ret;
420} 420}
diff --git a/litmus/rt_domain.c b/litmus/rt_domain.c
index 609ff0f82abb..8d5db6050723 100644
--- a/litmus/rt_domain.c
+++ b/litmus/rt_domain.c
@@ -53,11 +53,11 @@ static enum hrtimer_restart on_release_timer(struct hrtimer *timer)
53 53
54 rh = container_of(timer, struct release_heap, timer); 54 rh = container_of(timer, struct release_heap, timer);
55 55
56 spin_lock_irqsave(&rh->dom->release_lock, flags); 56 raw_spin_lock_irqsave(&rh->dom->release_lock, flags);
57 TRACE("CB has the release_lock 0x%p\n", &rh->dom->release_lock); 57 TRACE("CB has the release_lock 0x%p\n", &rh->dom->release_lock);
58 /* remove from release queue */ 58 /* remove from release queue */
59 list_del(&rh->list); 59 list_del(&rh->list);
60 spin_unlock_irqrestore(&rh->dom->release_lock, flags); 60 raw_spin_unlock_irqrestore(&rh->dom->release_lock, flags);
61 TRACE("CB returned release_lock 0x%p\n", &rh->dom->release_lock); 61 TRACE("CB returned release_lock 0x%p\n", &rh->dom->release_lock);
62 62
63 /* call release callback */ 63 /* call release callback */
@@ -185,20 +185,20 @@ static void arm_release_timer(rt_domain_t *_rt)
185 list_del(pos); 185 list_del(pos);
186 186
187 /* put into release heap while holding release_lock */ 187 /* put into release heap while holding release_lock */
188 spin_lock(&rt->release_lock); 188 raw_spin_lock(&rt->release_lock);
189 TRACE_TASK(t, "I have the release_lock 0x%p\n", &rt->release_lock); 189 TRACE_TASK(t, "I have the release_lock 0x%p\n", &rt->release_lock);
190 190
191 rh = get_release_heap(rt, t, 0); 191 rh = get_release_heap(rt, t, 0);
192 if (!rh) { 192 if (!rh) {
193 /* need to use our own, but drop lock first */ 193 /* need to use our own, but drop lock first */
194 spin_unlock(&rt->release_lock); 194 raw_spin_unlock(&rt->release_lock);
195 TRACE_TASK(t, "Dropped release_lock 0x%p\n", 195 TRACE_TASK(t, "Dropped release_lock 0x%p\n",
196 &rt->release_lock); 196 &rt->release_lock);
197 197
198 reinit_release_heap(t); 198 reinit_release_heap(t);
199 TRACE_TASK(t, "release_heap ready\n"); 199 TRACE_TASK(t, "release_heap ready\n");
200 200
201 spin_lock(&rt->release_lock); 201 raw_spin_lock(&rt->release_lock);
202 TRACE_TASK(t, "Re-acquired release_lock 0x%p\n", 202 TRACE_TASK(t, "Re-acquired release_lock 0x%p\n",
203 &rt->release_lock); 203 &rt->release_lock);
204 204
@@ -207,7 +207,7 @@ static void arm_release_timer(rt_domain_t *_rt)
207 bheap_insert(rt->order, &rh->heap, tsk_rt(t)->heap_node); 207 bheap_insert(rt->order, &rh->heap, tsk_rt(t)->heap_node);
208 TRACE_TASK(t, "arm_release_timer(): added to release heap\n"); 208 TRACE_TASK(t, "arm_release_timer(): added to release heap\n");
209 209
210 spin_unlock(&rt->release_lock); 210 raw_spin_unlock(&rt->release_lock);
211 TRACE_TASK(t, "Returned the release_lock 0x%p\n", &rt->release_lock); 211 TRACE_TASK(t, "Returned the release_lock 0x%p\n", &rt->release_lock);
212 212
213 /* To avoid arming the timer multiple times, we only let the 213 /* To avoid arming the timer multiple times, we only let the
@@ -258,9 +258,9 @@ void rt_domain_init(rt_domain_t *rt,
258 for (i = 0; i < RELEASE_QUEUE_SLOTS; i++) 258 for (i = 0; i < RELEASE_QUEUE_SLOTS; i++)
259 INIT_LIST_HEAD(&rt->release_queue.slot[i]); 259 INIT_LIST_HEAD(&rt->release_queue.slot[i]);
260 260
261 spin_lock_init(&rt->ready_lock); 261 raw_spin_lock_init(&rt->ready_lock);
262 spin_lock_init(&rt->release_lock); 262 raw_spin_lock_init(&rt->release_lock);
263 spin_lock_init(&rt->tobe_lock); 263 raw_spin_lock_init(&rt->tobe_lock);
264 264
265 rt->check_resched = check; 265 rt->check_resched = check;
266 rt->release_jobs = release; 266 rt->release_jobs = release;
diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c
index da44b451c9ad..118fbd14fe25 100644
--- a/litmus/sched_cedf.c
+++ b/litmus/sched_cedf.c
@@ -24,6 +24,7 @@
24#include <linux/spinlock.h> 24#include <linux/spinlock.h>
25#include <linux/percpu.h> 25#include <linux/percpu.h>
26#include <linux/sched.h> 26#include <linux/sched.h>
27#include <linux/slab.h>
27 28
28#include <litmus/litmus.h> 29#include <litmus/litmus.h>
29#include <litmus/jobs.h> 30#include <litmus/jobs.h>
@@ -281,12 +282,12 @@ static void cedf_release_jobs(rt_domain_t* rt, struct bheap* tasks)
281 cedf_domain_t* cluster = container_of(rt, cedf_domain_t, domain); 282 cedf_domain_t* cluster = container_of(rt, cedf_domain_t, domain);
282 unsigned long flags; 283 unsigned long flags;
283 284
284 spin_lock_irqsave(&cluster->lock, flags); 285 raw_spin_lock_irqsave(&cluster->lock, flags);
285 286
286 __merge_ready(&cluster->domain, tasks); 287 __merge_ready(&cluster->domain, tasks);
287 check_for_preemptions(cluster); 288 check_for_preemptions(cluster);
288 289
289 spin_unlock_irqrestore(&cluster->lock, flags); 290 raw_spin_unlock_irqrestore(&cluster->lock, flags);
290} 291}
291 292
292/* caller holds cedf_lock */ 293/* caller holds cedf_lock */
@@ -367,7 +368,7 @@ static struct task_struct* cedf_schedule(struct task_struct * prev)
367 int out_of_time, sleep, preempt, np, exists, blocks; 368 int out_of_time, sleep, preempt, np, exists, blocks;
368 struct task_struct* next = NULL; 369 struct task_struct* next = NULL;
369 370
370 spin_lock(&cluster->lock); 371 raw_spin_lock(&cluster->lock);
371 clear_will_schedule(); 372 clear_will_schedule();
372 373
373 /* sanity checking */ 374 /* sanity checking */
@@ -448,7 +449,7 @@ static struct task_struct* cedf_schedule(struct task_struct * prev)
448 if (exists) 449 if (exists)
449 next = prev; 450 next = prev;
450 451
451 spin_unlock(&cluster->lock); 452 raw_spin_unlock(&cluster->lock);
452 453
453#ifdef WANT_ALL_SCHED_EVENTS 454#ifdef WANT_ALL_SCHED_EVENTS
454 TRACE("cedf_lock released, next=0x%p\n", next); 455 TRACE("cedf_lock released, next=0x%p\n", next);
@@ -490,7 +491,7 @@ static void cedf_task_new(struct task_struct * t, int on_rq, int running)
490 /* the cluster doesn't change even if t is running */ 491 /* the cluster doesn't change even if t is running */
491 cluster = task_cpu_cluster(t); 492 cluster = task_cpu_cluster(t);
492 493
493 spin_lock_irqsave(&cluster->domain.ready_lock, flags); 494 raw_spin_lock_irqsave(&cluster->domain.ready_lock, flags);
494 495
495 /* setup job params */ 496 /* setup job params */
496 release_at(t, litmus_clock()); 497 release_at(t, litmus_clock());
@@ -507,7 +508,7 @@ static void cedf_task_new(struct task_struct * t, int on_rq, int running)
507 t->rt_param.linked_on = NO_CPU; 508 t->rt_param.linked_on = NO_CPU;
508 509
509 cedf_job_arrival(t); 510 cedf_job_arrival(t);
510 spin_unlock_irqrestore(&(cluster->domain.ready_lock), flags); 511 raw_spin_unlock_irqrestore(&(cluster->domain.ready_lock), flags);
511} 512}
512 513
513static void cedf_task_wake_up(struct task_struct *task) 514static void cedf_task_wake_up(struct task_struct *task)
@@ -520,7 +521,7 @@ static void cedf_task_wake_up(struct task_struct *task)
520 521
521 cluster = task_cpu_cluster(task); 522 cluster = task_cpu_cluster(task);
522 523
523 spin_lock_irqsave(&cluster->lock, flags); 524 raw_spin_lock_irqsave(&cluster->lock, flags);
524 /* We need to take suspensions because of semaphores into 525 /* We need to take suspensions because of semaphores into
525 * account! If a job resumes after being suspended due to acquiring 526 * account! If a job resumes after being suspended due to acquiring
526 * a semaphore, it should never be treated as a new job release. 527 * a semaphore, it should never be treated as a new job release.
@@ -543,7 +544,7 @@ static void cedf_task_wake_up(struct task_struct *task)
543 } 544 }
544 } 545 }
545 cedf_job_arrival(task); 546 cedf_job_arrival(task);
546 spin_unlock_irqrestore(&cluster->lock, flags); 547 raw_spin_unlock_irqrestore(&cluster->lock, flags);
547} 548}
548 549
549static void cedf_task_block(struct task_struct *t) 550static void cedf_task_block(struct task_struct *t)
@@ -556,9 +557,9 @@ static void cedf_task_block(struct task_struct *t)
556 cluster = task_cpu_cluster(t); 557 cluster = task_cpu_cluster(t);
557 558
558 /* unlink if necessary */ 559 /* unlink if necessary */
559 spin_lock_irqsave(&cluster->lock, flags); 560 raw_spin_lock_irqsave(&cluster->lock, flags);
560 unlink(t); 561 unlink(t);
561 spin_unlock_irqrestore(&cluster->lock, flags); 562 raw_spin_unlock_irqrestore(&cluster->lock, flags);
562 563
563 BUG_ON(!is_realtime(t)); 564 BUG_ON(!is_realtime(t));
564} 565}
@@ -570,13 +571,13 @@ static void cedf_task_exit(struct task_struct * t)
570 cedf_domain_t *cluster = task_cpu_cluster(t); 571 cedf_domain_t *cluster = task_cpu_cluster(t);
571 572
572 /* unlink if necessary */ 573 /* unlink if necessary */
573 spin_lock_irqsave(&cluster->lock, flags); 574 raw_spin_lock_irqsave(&cluster->lock, flags);
574 unlink(t); 575 unlink(t);
575 if (tsk_rt(t)->scheduled_on != NO_CPU) { 576 if (tsk_rt(t)->scheduled_on != NO_CPU) {
576 cluster->cpus[tsk_rt(t)->scheduled_on]->scheduled = NULL; 577 cluster->cpus[tsk_rt(t)->scheduled_on]->scheduled = NULL;
577 tsk_rt(t)->scheduled_on = NO_CPU; 578 tsk_rt(t)->scheduled_on = NO_CPU;
578 } 579 }
579 spin_unlock_irqrestore(&cluster->lock, flags); 580 raw_spin_unlock_irqrestore(&cluster->lock, flags);
580 581
581 BUG_ON(!is_realtime(t)); 582 BUG_ON(!is_realtime(t));
582 TRACE_TASK(t, "RIP\n"); 583 TRACE_TASK(t, "RIP\n");
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c
index b9310dd6f75c..7424c183d8b2 100644
--- a/litmus/sched_gsn_edf.c
+++ b/litmus/sched_gsn_edf.c
@@ -297,12 +297,12 @@ static void gsnedf_release_jobs(rt_domain_t* rt, struct bheap* tasks)
297{ 297{
298 unsigned long flags; 298 unsigned long flags;
299 299
300 spin_lock_irqsave(&gsnedf_lock, flags); 300 raw_spin_lock_irqsave(&gsnedf_lock, flags);
301 301
302 __merge_ready(rt, tasks); 302 __merge_ready(rt, tasks);
303 check_for_preemptions(); 303 check_for_preemptions();
304 304
305 spin_unlock_irqrestore(&gsnedf_lock, flags); 305 raw_spin_unlock_irqrestore(&gsnedf_lock, flags);
306} 306}
307 307
308/* caller holds gsnedf_lock */ 308/* caller holds gsnedf_lock */
@@ -388,7 +388,7 @@ static struct task_struct* gsnedf_schedule(struct task_struct * prev)
388 if (gsnedf.release_master == entry->cpu) 388 if (gsnedf.release_master == entry->cpu)
389 return NULL; 389 return NULL;
390 390
391 spin_lock(&gsnedf_lock); 391 raw_spin_lock(&gsnedf_lock);
392 clear_will_schedule(); 392 clear_will_schedule();
393 393
394 /* sanity checking */ 394 /* sanity checking */
@@ -469,7 +469,7 @@ static struct task_struct* gsnedf_schedule(struct task_struct * prev)
469 if (exists) 469 if (exists)
470 next = prev; 470 next = prev;
471 471
472 spin_unlock(&gsnedf_lock); 472 raw_spin_unlock(&gsnedf_lock);
473 473
474#ifdef WANT_ALL_SCHED_EVENTS 474#ifdef WANT_ALL_SCHED_EVENTS
475 TRACE("gsnedf_lock released, next=0x%p\n", next); 475 TRACE("gsnedf_lock released, next=0x%p\n", next);
@@ -507,7 +507,7 @@ static void gsnedf_task_new(struct task_struct * t, int on_rq, int running)
507 507
508 TRACE("gsn edf: task new %d\n", t->pid); 508 TRACE("gsn edf: task new %d\n", t->pid);
509 509
510 spin_lock_irqsave(&gsnedf_lock, flags); 510 raw_spin_lock_irqsave(&gsnedf_lock, flags);
511 511
512 /* setup job params */ 512 /* setup job params */
513 release_at(t, litmus_clock()); 513 release_at(t, litmus_clock());
@@ -530,7 +530,7 @@ static void gsnedf_task_new(struct task_struct * t, int on_rq, int running)
530 t->rt_param.linked_on = NO_CPU; 530 t->rt_param.linked_on = NO_CPU;
531 531
532 gsnedf_job_arrival(t); 532 gsnedf_job_arrival(t);
533 spin_unlock_irqrestore(&gsnedf_lock, flags); 533 raw_spin_unlock_irqrestore(&gsnedf_lock, flags);
534} 534}
535 535
536static void gsnedf_task_wake_up(struct task_struct *task) 536static void gsnedf_task_wake_up(struct task_struct *task)
@@ -540,7 +540,7 @@ static void gsnedf_task_wake_up(struct task_struct *task)
540 540
541 TRACE_TASK(task, "wake_up at %llu\n", litmus_clock()); 541 TRACE_TASK(task, "wake_up at %llu\n", litmus_clock());
542 542
543 spin_lock_irqsave(&gsnedf_lock, flags); 543 raw_spin_lock_irqsave(&gsnedf_lock, flags);
544 /* We need to take suspensions because of semaphores into 544 /* We need to take suspensions because of semaphores into
545 * account! If a job resumes after being suspended due to acquiring 545 * account! If a job resumes after being suspended due to acquiring
546 * a semaphore, it should never be treated as a new job release. 546 * a semaphore, it should never be treated as a new job release.
@@ -563,7 +563,7 @@ static void gsnedf_task_wake_up(struct task_struct *task)
563 } 563 }
564 } 564 }
565 gsnedf_job_arrival(task); 565 gsnedf_job_arrival(task);
566 spin_unlock_irqrestore(&gsnedf_lock, flags); 566 raw_spin_unlock_irqrestore(&gsnedf_lock, flags);
567} 567}
568 568
569static void gsnedf_task_block(struct task_struct *t) 569static void gsnedf_task_block(struct task_struct *t)
@@ -573,9 +573,9 @@ static void gsnedf_task_block(struct task_struct *t)
573 TRACE_TASK(t, "block at %llu\n", litmus_clock()); 573 TRACE_TASK(t, "block at %llu\n", litmus_clock());
574 574
575 /* unlink if necessary */ 575 /* unlink if necessary */
576 spin_lock_irqsave(&gsnedf_lock, flags); 576 raw_spin_lock_irqsave(&gsnedf_lock, flags);
577 unlink(t); 577 unlink(t);
578 spin_unlock_irqrestore(&gsnedf_lock, flags); 578 raw_spin_unlock_irqrestore(&gsnedf_lock, flags);
579 579
580 BUG_ON(!is_realtime(t)); 580 BUG_ON(!is_realtime(t));
581} 581}
@@ -586,13 +586,13 @@ static void gsnedf_task_exit(struct task_struct * t)
586 unsigned long flags; 586 unsigned long flags;
587 587
588 /* unlink if necessary */ 588 /* unlink if necessary */
589 spin_lock_irqsave(&gsnedf_lock, flags); 589 raw_spin_lock_irqsave(&gsnedf_lock, flags);
590 unlink(t); 590 unlink(t);
591 if (tsk_rt(t)->scheduled_on != NO_CPU) { 591 if (tsk_rt(t)->scheduled_on != NO_CPU) {
592 gsnedf_cpus[tsk_rt(t)->scheduled_on]->scheduled = NULL; 592 gsnedf_cpus[tsk_rt(t)->scheduled_on]->scheduled = NULL;
593 tsk_rt(t)->scheduled_on = NO_CPU; 593 tsk_rt(t)->scheduled_on = NO_CPU;
594 } 594 }
595 spin_unlock_irqrestore(&gsnedf_lock, flags); 595 raw_spin_unlock_irqrestore(&gsnedf_lock, flags);
596 596
597 BUG_ON(!is_realtime(t)); 597 BUG_ON(!is_realtime(t));
598 TRACE_TASK(t, "RIP\n"); 598 TRACE_TASK(t, "RIP\n");
@@ -628,7 +628,7 @@ static void update_queue_position(struct task_struct *holder)
628 gsnedf_cpus[tsk_rt(holder)->linked_on]->hn); 628 gsnedf_cpus[tsk_rt(holder)->linked_on]->hn);
629 } else { 629 } else {
630 /* holder may be queued: first stop queue changes */ 630 /* holder may be queued: first stop queue changes */
631 spin_lock(&gsnedf.release_lock); 631 raw_spin_lock(&gsnedf.release_lock);
632 if (is_queued(holder)) { 632 if (is_queued(holder)) {
633 TRACE_TASK(holder, "%s: is queued\n", 633 TRACE_TASK(holder, "%s: is queued\n",
634 __FUNCTION__); 634 __FUNCTION__);
@@ -646,7 +646,7 @@ static void update_queue_position(struct task_struct *holder)
646 TRACE_TASK(holder, "%s: is NOT queued => Done.\n", 646 TRACE_TASK(holder, "%s: is NOT queued => Done.\n",
647 __FUNCTION__); 647 __FUNCTION__);
648 } 648 }
649 spin_unlock(&gsnedf.release_lock); 649 raw_spin_unlock(&gsnedf.release_lock);
650 650
651 /* If holder was enqueued in a release heap, then the following 651 /* If holder was enqueued in a release heap, then the following
652 * preemption check is pointless, but we can't easily detect 652 * preemption check is pointless, but we can't easily detect
@@ -680,7 +680,7 @@ static long gsnedf_pi_block(struct pi_semaphore *sem,
680 if (edf_higher_prio(new_waiter, sem->hp.task)) { 680 if (edf_higher_prio(new_waiter, sem->hp.task)) {
681 TRACE_TASK(new_waiter, " boosts priority via %p\n", sem); 681 TRACE_TASK(new_waiter, " boosts priority via %p\n", sem);
682 /* called with IRQs disabled */ 682 /* called with IRQs disabled */
683 spin_lock(&gsnedf_lock); 683 raw_spin_lock(&gsnedf_lock);
684 /* store new highest-priority task */ 684 /* store new highest-priority task */
685 sem->hp.task = new_waiter; 685 sem->hp.task = new_waiter;
686 if (sem->holder) { 686 if (sem->holder) {
@@ -692,7 +692,7 @@ static long gsnedf_pi_block(struct pi_semaphore *sem,
692 sem->holder->rt_param.inh_task = new_waiter; 692 sem->holder->rt_param.inh_task = new_waiter;
693 update_queue_position(sem->holder); 693 update_queue_position(sem->holder);
694 } 694 }
695 spin_unlock(&gsnedf_lock); 695 raw_spin_unlock(&gsnedf_lock);
696 } 696 }
697 697
698 return 0; 698 return 0;
@@ -738,7 +738,7 @@ static long gsnedf_return_priority(struct pi_semaphore *sem)
738 738
739 if (t->rt_param.inh_task) { 739 if (t->rt_param.inh_task) {
740 /* interrupts already disabled by PI code */ 740 /* interrupts already disabled by PI code */
741 spin_lock(&gsnedf_lock); 741 raw_spin_lock(&gsnedf_lock);
742 742
743 /* Reset inh_task to NULL. */ 743 /* Reset inh_task to NULL. */
744 t->rt_param.inh_task = NULL; 744 t->rt_param.inh_task = NULL;
@@ -746,7 +746,7 @@ static long gsnedf_return_priority(struct pi_semaphore *sem)
746 /* Check if rescheduling is necessary */ 746 /* Check if rescheduling is necessary */
747 unlink(t); 747 unlink(t);
748 gsnedf_job_arrival(t); 748 gsnedf_job_arrival(t);
749 spin_unlock(&gsnedf_lock); 749 raw_spin_unlock(&gsnedf_lock);
750 } 750 }
751 751
752 return ret; 752 return ret;
diff --git a/litmus/sched_pfair.c b/litmus/sched_pfair.c
index 2ea39223e7f0..ea77d3295290 100644
--- a/litmus/sched_pfair.c
+++ b/litmus/sched_pfair.c
@@ -12,6 +12,7 @@
12#include <linux/percpu.h> 12#include <linux/percpu.h>
13#include <linux/sched.h> 13#include <linux/sched.h>
14#include <linux/list.h> 14#include <linux/list.h>
15#include <linux/slab.h>
15 16
16#include <litmus/litmus.h> 17#include <litmus/litmus.h>
17#include <litmus/jobs.h> 18#include <litmus/jobs.h>
@@ -415,7 +416,7 @@ static void schedule_next_quantum(quanta_t time)
415 /* called with interrupts disabled */ 416 /* called with interrupts disabled */
416 PTRACE("--- Q %lu at %llu PRE-SPIN\n", 417 PTRACE("--- Q %lu at %llu PRE-SPIN\n",
417 time, litmus_clock()); 418 time, litmus_clock());
418 spin_lock(&pfair_lock); 419 raw_spin_lock(&pfair_lock);
419 PTRACE("<<< Q %lu at %llu\n", 420 PTRACE("<<< Q %lu at %llu\n",
420 time, litmus_clock()); 421 time, litmus_clock());
421 422
@@ -448,7 +449,7 @@ static void schedule_next_quantum(quanta_t time)
448 } 449 }
449 PTRACE(">>> Q %lu at %llu\n", 450 PTRACE(">>> Q %lu at %llu\n",
450 time, litmus_clock()); 451 time, litmus_clock());
451 spin_unlock(&pfair_lock); 452 raw_spin_unlock(&pfair_lock);
452} 453}
453 454
454static noinline void wait_for_quantum(quanta_t q, struct pfair_state* state) 455static noinline void wait_for_quantum(quanta_t q, struct pfair_state* state)
@@ -564,7 +565,7 @@ static struct task_struct* pfair_schedule(struct task_struct * prev)
564 int blocks; 565 int blocks;
565 struct task_struct* next = NULL; 566 struct task_struct* next = NULL;
566 567
567 spin_lock(&pfair_lock); 568 raw_spin_lock(&pfair_lock);
568 569
569 blocks = is_realtime(prev) && !is_running(prev); 570 blocks = is_realtime(prev) && !is_running(prev);
570 571
@@ -577,7 +578,7 @@ static struct task_struct* pfair_schedule(struct task_struct * prev)
577 tsk_rt(next)->scheduled_on = state->cpu; 578 tsk_rt(next)->scheduled_on = state->cpu;
578 } 579 }
579 580
580 spin_unlock(&pfair_lock); 581 raw_spin_unlock(&pfair_lock);
581 582
582 if (next) 583 if (next)
583 TRACE_TASK(next, "scheduled rel=%lu at %lu (%llu)\n", 584 TRACE_TASK(next, "scheduled rel=%lu at %lu (%llu)\n",
@@ -594,7 +595,7 @@ static void pfair_task_new(struct task_struct * t, int on_rq, int running)
594 595
595 TRACE("pfair: task new %d state:%d\n", t->pid, t->state); 596 TRACE("pfair: task new %d state:%d\n", t->pid, t->state);
596 597
597 spin_lock_irqsave(&pfair_lock, flags); 598 raw_spin_lock_irqsave(&pfair_lock, flags);
598 if (running) 599 if (running)
599 t->rt_param.scheduled_on = task_cpu(t); 600 t->rt_param.scheduled_on = task_cpu(t);
600 else 601 else
@@ -605,7 +606,7 @@ static void pfair_task_new(struct task_struct * t, int on_rq, int running)
605 pfair_add_release(t); 606 pfair_add_release(t);
606 check_preempt(t); 607 check_preempt(t);
607 608
608 spin_unlock_irqrestore(&pfair_lock, flags); 609 raw_spin_unlock_irqrestore(&pfair_lock, flags);
609} 610}
610 611
611static void pfair_task_wake_up(struct task_struct *t) 612static void pfair_task_wake_up(struct task_struct *t)
@@ -616,7 +617,7 @@ static void pfair_task_wake_up(struct task_struct *t)
616 TRACE_TASK(t, "wakes at %llu, release=%lu, pfair_time:%lu\n", 617 TRACE_TASK(t, "wakes at %llu, release=%lu, pfair_time:%lu\n",
617 litmus_clock(), cur_release(t), pfair_time); 618 litmus_clock(), cur_release(t), pfair_time);
618 619
619 spin_lock_irqsave(&pfair_lock, flags); 620 raw_spin_lock_irqsave(&pfair_lock, flags);
620 621
621 /* It is a little unclear how to deal with Pfair 622 /* It is a little unclear how to deal with Pfair
622 * tasks that block for a while and then wake. For now, 623 * tasks that block for a while and then wake. For now,
@@ -637,7 +638,7 @@ static void pfair_task_wake_up(struct task_struct *t)
637 638
638 check_preempt(t); 639 check_preempt(t);
639 640
640 spin_unlock_irqrestore(&pfair_lock, flags); 641 raw_spin_unlock_irqrestore(&pfair_lock, flags);
641 TRACE_TASK(t, "wake up done at %llu\n", litmus_clock()); 642 TRACE_TASK(t, "wake up done at %llu\n", litmus_clock());
642} 643}
643 644
@@ -661,12 +662,12 @@ static void pfair_task_exit(struct task_struct * t)
661 * might not be the same as the CPU that the PFAIR scheduler 662 * might not be the same as the CPU that the PFAIR scheduler
662 * has chosen for it. 663 * has chosen for it.
663 */ 664 */
664 spin_lock_irqsave(&pfair_lock, flags); 665 raw_spin_lock_irqsave(&pfair_lock, flags);
665 666
666 TRACE_TASK(t, "RIP, state:%d\n", t->state); 667 TRACE_TASK(t, "RIP, state:%d\n", t->state);
667 drop_all_references(t); 668 drop_all_references(t);
668 669
669 spin_unlock_irqrestore(&pfair_lock, flags); 670 raw_spin_unlock_irqrestore(&pfair_lock, flags);
670 671
671 kfree(t->rt_param.pfair); 672 kfree(t->rt_param.pfair);
672 t->rt_param.pfair = NULL; 673 t->rt_param.pfair = NULL;
@@ -680,7 +681,7 @@ static void pfair_release_at(struct task_struct* task, lt_t start)
680 681
681 BUG_ON(!is_realtime(task)); 682 BUG_ON(!is_realtime(task));
682 683
683 spin_lock_irqsave(&pfair_lock, flags); 684 raw_spin_lock_irqsave(&pfair_lock, flags);
684 release_at(task, start); 685 release_at(task, start);
685 release = time2quanta(start, CEIL); 686 release = time2quanta(start, CEIL);
686 687
@@ -698,7 +699,7 @@ static void pfair_release_at(struct task_struct* task, lt_t start)
698 */ 699 */
699 tsk_pfair(task)->sporadic_release = 0; 700 tsk_pfair(task)->sporadic_release = 0;
700 701
701 spin_unlock_irqrestore(&pfair_lock, flags); 702 raw_spin_unlock_irqrestore(&pfair_lock, flags);
702} 703}
703 704
704static void init_subtask(struct subtask* sub, unsigned long i, 705static void init_subtask(struct subtask* sub, unsigned long i,
diff --git a/litmus/sched_plugin.c b/litmus/sched_plugin.c
index 3767b30e610a..3543b7baff53 100644
--- a/litmus/sched_plugin.c
+++ b/litmus/sched_plugin.c
@@ -187,7 +187,7 @@ struct sched_plugin *litmus = &linux_sched_plugin;
187 187
188/* the list of registered scheduling plugins */ 188/* the list of registered scheduling plugins */
189static LIST_HEAD(sched_plugins); 189static LIST_HEAD(sched_plugins);
190static DEFINE_SPINLOCK(sched_plugins_lock); 190static DEFINE_RAW_SPINLOCK(sched_plugins_lock);
191 191
192#define CHECK(func) {\ 192#define CHECK(func) {\
193 if (!plugin->func) \ 193 if (!plugin->func) \
@@ -220,9 +220,9 @@ int register_sched_plugin(struct sched_plugin* plugin)
220 if (!plugin->release_at) 220 if (!plugin->release_at)
221 plugin->release_at = release_at; 221 plugin->release_at = release_at;
222 222
223 spin_lock(&sched_plugins_lock); 223 raw_spin_lock(&sched_plugins_lock);
224 list_add(&plugin->list, &sched_plugins); 224 list_add(&plugin->list, &sched_plugins);
225 spin_unlock(&sched_plugins_lock); 225 raw_spin_unlock(&sched_plugins_lock);
226 226
227 return 0; 227 return 0;
228} 228}
@@ -234,7 +234,7 @@ struct sched_plugin* find_sched_plugin(const char* name)
234 struct list_head *pos; 234 struct list_head *pos;
235 struct sched_plugin *plugin; 235 struct sched_plugin *plugin;
236 236
237 spin_lock(&sched_plugins_lock); 237 raw_spin_lock(&sched_plugins_lock);
238 list_for_each(pos, &sched_plugins) { 238 list_for_each(pos, &sched_plugins) {
239 plugin = list_entry(pos, struct sched_plugin, list); 239 plugin = list_entry(pos, struct sched_plugin, list);
240 if (!strcmp(plugin->plugin_name, name)) 240 if (!strcmp(plugin->plugin_name, name))
@@ -243,7 +243,7 @@ struct sched_plugin* find_sched_plugin(const char* name)
243 plugin = NULL; 243 plugin = NULL;
244 244
245out_unlock: 245out_unlock:
246 spin_unlock(&sched_plugins_lock); 246 raw_spin_unlock(&sched_plugins_lock);
247 return plugin; 247 return plugin;
248} 248}
249 249
@@ -253,13 +253,13 @@ int print_sched_plugins(char* buf, int max)
253 struct list_head *pos; 253 struct list_head *pos;
254 struct sched_plugin *plugin; 254 struct sched_plugin *plugin;
255 255
256 spin_lock(&sched_plugins_lock); 256 raw_spin_lock(&sched_plugins_lock);
257 list_for_each(pos, &sched_plugins) { 257 list_for_each(pos, &sched_plugins) {
258 plugin = list_entry(pos, struct sched_plugin, list); 258 plugin = list_entry(pos, struct sched_plugin, list);
259 count += snprintf(buf + count, max - count, "%s\n", plugin->plugin_name); 259 count += snprintf(buf + count, max - count, "%s\n", plugin->plugin_name);
260 if (max - count <= 0) 260 if (max - count <= 0)
261 break; 261 break;
262 } 262 }
263 spin_unlock(&sched_plugins_lock); 263 raw_spin_unlock(&sched_plugins_lock);
264 return count; 264 return count;
265} 265}
diff --git a/litmus/sched_psn_edf.c b/litmus/sched_psn_edf.c
index 7f71ecfaaaae..7a548bf5162e 100644
--- a/litmus/sched_psn_edf.c
+++ b/litmus/sched_psn_edf.c
@@ -131,7 +131,7 @@ static struct task_struct* psnedf_schedule(struct task_struct * prev)
131 int out_of_time, sleep, preempt, 131 int out_of_time, sleep, preempt,
132 np, exists, blocks, resched; 132 np, exists, blocks, resched;
133 133
134 spin_lock(&pedf->slock); 134 raw_spin_lock(&pedf->slock);
135 135
136 /* sanity checking 136 /* sanity checking
137 * differently from gedf, when a task exits (dead) 137 * differently from gedf, when a task exits (dead)
@@ -201,7 +201,7 @@ static struct task_struct* psnedf_schedule(struct task_struct * prev)
201 } 201 }
202 202
203 pedf->scheduled = next; 203 pedf->scheduled = next;
204 spin_unlock(&pedf->slock); 204 raw_spin_unlock(&pedf->slock);
205 205
206 return next; 206 return next;
207} 207}
@@ -224,7 +224,7 @@ static void psnedf_task_new(struct task_struct * t, int on_rq, int running)
224 /* The task should be running in the queue, otherwise signal 224 /* The task should be running in the queue, otherwise signal
225 * code will try to wake it up with fatal consequences. 225 * code will try to wake it up with fatal consequences.
226 */ 226 */
227 spin_lock_irqsave(&pedf->slock, flags); 227 raw_spin_lock_irqsave(&pedf->slock, flags);
228 if (running) { 228 if (running) {
229 /* there shouldn't be anything else running at the time */ 229 /* there shouldn't be anything else running at the time */
230 BUG_ON(pedf->scheduled); 230 BUG_ON(pedf->scheduled);
@@ -234,7 +234,7 @@ static void psnedf_task_new(struct task_struct * t, int on_rq, int running)
234 /* maybe we have to reschedule */ 234 /* maybe we have to reschedule */
235 preempt(pedf); 235 preempt(pedf);
236 } 236 }
237 spin_unlock_irqrestore(&pedf->slock, flags); 237 raw_spin_unlock_irqrestore(&pedf->slock, flags);
238} 238}
239 239
240static void psnedf_task_wake_up(struct task_struct *task) 240static void psnedf_task_wake_up(struct task_struct *task)
@@ -245,7 +245,7 @@ static void psnedf_task_wake_up(struct task_struct *task)
245 lt_t now; 245 lt_t now;
246 246
247 TRACE_TASK(task, "wake_up at %llu\n", litmus_clock()); 247 TRACE_TASK(task, "wake_up at %llu\n", litmus_clock());
248 spin_lock_irqsave(&pedf->slock, flags); 248 raw_spin_lock_irqsave(&pedf->slock, flags);
249 BUG_ON(is_queued(task)); 249 BUG_ON(is_queued(task));
250 /* We need to take suspensions because of semaphores into 250 /* We need to take suspensions because of semaphores into
251 * account! If a job resumes after being suspended due to acquiring 251 * account! If a job resumes after being suspended due to acquiring
@@ -270,7 +270,7 @@ static void psnedf_task_wake_up(struct task_struct *task)
270 if (pedf->scheduled != task) 270 if (pedf->scheduled != task)
271 requeue(task, edf); 271 requeue(task, edf);
272 272
273 spin_unlock_irqrestore(&pedf->slock, flags); 273 raw_spin_unlock_irqrestore(&pedf->slock, flags);
274 TRACE_TASK(task, "wake up done\n"); 274 TRACE_TASK(task, "wake up done\n");
275} 275}
276 276
@@ -289,7 +289,7 @@ static void psnedf_task_exit(struct task_struct * t)
289 psnedf_domain_t* pedf = task_pedf(t); 289 psnedf_domain_t* pedf = task_pedf(t);
290 rt_domain_t* edf; 290 rt_domain_t* edf;
291 291
292 spin_lock_irqsave(&pedf->slock, flags); 292 raw_spin_lock_irqsave(&pedf->slock, flags);
293 if (is_queued(t)) { 293 if (is_queued(t)) {
294 /* dequeue */ 294 /* dequeue */
295 edf = task_edf(t); 295 edf = task_edf(t);
@@ -301,7 +301,7 @@ static void psnedf_task_exit(struct task_struct * t)
301 TRACE_TASK(t, "RIP, now reschedule\n"); 301 TRACE_TASK(t, "RIP, now reschedule\n");
302 302
303 preempt(pedf); 303 preempt(pedf);
304 spin_unlock_irqrestore(&pedf->slock, flags); 304 raw_spin_unlock_irqrestore(&pedf->slock, flags);
305} 305}
306 306
307#ifdef CONFIG_FMLP 307#ifdef CONFIG_FMLP
@@ -321,7 +321,7 @@ static long psnedf_pi_block(struct pi_semaphore *sem,
321 edf = task_edf(new_waiter); 321 edf = task_edf(new_waiter);
322 322
323 /* interrupts already disabled */ 323 /* interrupts already disabled */
324 spin_lock(&pedf->slock); 324 raw_spin_lock(&pedf->slock);
325 325
326 /* store new highest-priority task */ 326 /* store new highest-priority task */
327 sem->hp.cpu_task[cpu] = new_waiter; 327 sem->hp.cpu_task[cpu] = new_waiter;
@@ -346,7 +346,7 @@ static long psnedf_pi_block(struct pi_semaphore *sem,
346 if (edf_preemption_needed(edf, current)) 346 if (edf_preemption_needed(edf, current))
347 preempt(pedf); 347 preempt(pedf);
348 348
349 spin_unlock(&pedf->slock); 349 raw_spin_unlock(&pedf->slock);
350 } 350 }
351 351
352 return 0; 352 return 0;
@@ -413,7 +413,7 @@ static long psnedf_return_priority(struct pi_semaphore *sem)
413 /* Always check for delayed preemptions that might have become 413 /* Always check for delayed preemptions that might have become
414 * necessary due to non-preemptive execution. 414 * necessary due to non-preemptive execution.
415 */ 415 */
416 spin_lock(&pedf->slock); 416 raw_spin_lock(&pedf->slock);
417 417
418 /* Reset inh_task to NULL. */ 418 /* Reset inh_task to NULL. */
419 current->rt_param.inh_task = NULL; 419 current->rt_param.inh_task = NULL;
@@ -422,7 +422,7 @@ static long psnedf_return_priority(struct pi_semaphore *sem)
422 if (edf_preemption_needed(edf, current)) 422 if (edf_preemption_needed(edf, current))
423 preempt(pedf); 423 preempt(pedf);
424 424
425 spin_unlock(&pedf->slock); 425 raw_spin_unlock(&pedf->slock);
426 426
427 427
428 return ret; 428 return ret;