aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndrea Bastoni <bastoni@cs.unc.edu>2010-05-21 14:26:22 -0400
committerAndrea Bastoni <bastoni@cs.unc.edu>2010-05-21 14:26:22 -0400
commit816a3cbda0cfdb3781da19e8d532c2f78ca49e18 (patch)
tree3945cc3c0a2819e880601ecb9aae3bfef8fe33c9
parent95a10f2e4716eedd9bfbc559f824598c5346102d (diff)
Change API: spinlock_t -> raw_spinlock_t
Adapt to new schema for spinlock: (tglx 20091217) spinlock - the weakest one, which might sleep in RT raw_spinlock - spinlock which always spins even on RT arch_spinlock - the hardware level architecture dependent implementation ---- Planning for future porting on PreemptRT, probably all of the spinlock changed in this patch should true spinning lock (raw_spinlock). There are a couple of spinlock that the kernel still defines as spinlock_t (therefore no changes reported in this commit) that might cause problems to us: - wait_queue_t lock is defined as spinlock_t; it is used in: * fmlp.c -- sem->wait.lock * sync.c -- ts_release.wait.lock - rwlock_t used in fifo implementation in sched_trace.c * this need probably to be changed to something always spinning in RT at the expense of increased locking time.
-rw-r--r--include/litmus/fdso.h1
-rw-r--r--include/litmus/rt_domain.h26
-rw-r--r--kernel/hrtimer.c8
-rw-r--r--litmus/ftdev.c1
-rw-r--r--litmus/litmus.c10
-rw-r--r--litmus/rt_domain.c18
-rw-r--r--litmus/sched_cedf.c25
-rw-r--r--litmus/sched_gsn_edf.c36
-rw-r--r--litmus/sched_pfair.c25
-rw-r--r--litmus/sched_plugin.c14
-rw-r--r--litmus/sched_psn_edf.c24
11 files changed, 96 insertions, 92 deletions
diff --git a/include/litmus/fdso.h b/include/litmus/fdso.h
index 286e10f86de0..61f1b5baf42c 100644
--- a/include/litmus/fdso.h
+++ b/include/litmus/fdso.h
@@ -10,6 +10,7 @@
10#include <asm/atomic.h> 10#include <asm/atomic.h>
11 11
12#include <linux/fs.h> 12#include <linux/fs.h>
13#include <linux/slab.h>
13 14
14#define MAX_OBJECT_DESCRIPTORS 32 15#define MAX_OBJECT_DESCRIPTORS 32
15 16
diff --git a/include/litmus/rt_domain.h b/include/litmus/rt_domain.h
index b452be1d2256..9bf980713474 100644
--- a/include/litmus/rt_domain.h
+++ b/include/litmus/rt_domain.h
@@ -22,16 +22,16 @@ struct release_queue {
22 22
23typedef struct _rt_domain { 23typedef struct _rt_domain {
24 /* runnable rt tasks are in here */ 24 /* runnable rt tasks are in here */
25 spinlock_t ready_lock; 25 raw_spinlock_t ready_lock;
26 struct bheap ready_queue; 26 struct bheap ready_queue;
27 27
28 /* real-time tasks waiting for release are in here */ 28 /* real-time tasks waiting for release are in here */
29 spinlock_t release_lock; 29 raw_spinlock_t release_lock;
30 struct release_queue release_queue; 30 struct release_queue release_queue;
31 int release_master; 31 int release_master;
32 32
33 /* for moving tasks to the release queue */ 33 /* for moving tasks to the release queue */
34 spinlock_t tobe_lock; 34 raw_spinlock_t tobe_lock;
35 struct list_head tobe_released; 35 struct list_head tobe_released;
36 36
37 /* how do we check if we need to kick another CPU? */ 37 /* how do we check if we need to kick another CPU? */
@@ -109,17 +109,17 @@ static inline void add_ready(rt_domain_t* rt, struct task_struct *new)
109{ 109{
110 unsigned long flags; 110 unsigned long flags;
111 /* first we need the write lock for rt_ready_queue */ 111 /* first we need the write lock for rt_ready_queue */
112 spin_lock_irqsave(&rt->ready_lock, flags); 112 raw_spin_lock_irqsave(&rt->ready_lock, flags);
113 __add_ready(rt, new); 113 __add_ready(rt, new);
114 spin_unlock_irqrestore(&rt->ready_lock, flags); 114 raw_spin_unlock_irqrestore(&rt->ready_lock, flags);
115} 115}
116 116
117static inline void merge_ready(rt_domain_t* rt, struct bheap* tasks) 117static inline void merge_ready(rt_domain_t* rt, struct bheap* tasks)
118{ 118{
119 unsigned long flags; 119 unsigned long flags;
120 spin_lock_irqsave(&rt->ready_lock, flags); 120 raw_spin_lock_irqsave(&rt->ready_lock, flags);
121 __merge_ready(rt, tasks); 121 __merge_ready(rt, tasks);
122 spin_unlock_irqrestore(&rt->ready_lock, flags); 122 raw_spin_unlock_irqrestore(&rt->ready_lock, flags);
123} 123}
124 124
125static inline struct task_struct* take_ready(rt_domain_t* rt) 125static inline struct task_struct* take_ready(rt_domain_t* rt)
@@ -127,9 +127,9 @@ static inline struct task_struct* take_ready(rt_domain_t* rt)
127 unsigned long flags; 127 unsigned long flags;
128 struct task_struct* ret; 128 struct task_struct* ret;
129 /* first we need the write lock for rt_ready_queue */ 129 /* first we need the write lock for rt_ready_queue */
130 spin_lock_irqsave(&rt->ready_lock, flags); 130 raw_spin_lock_irqsave(&rt->ready_lock, flags);
131 ret = __take_ready(rt); 131 ret = __take_ready(rt);
132 spin_unlock_irqrestore(&rt->ready_lock, flags); 132 raw_spin_unlock_irqrestore(&rt->ready_lock, flags);
133 return ret; 133 return ret;
134} 134}
135 135
@@ -138,9 +138,9 @@ static inline void add_release(rt_domain_t* rt, struct task_struct *task)
138{ 138{
139 unsigned long flags; 139 unsigned long flags;
140 /* first we need the write lock for rt_ready_queue */ 140 /* first we need the write lock for rt_ready_queue */
141 spin_lock_irqsave(&rt->tobe_lock, flags); 141 raw_spin_lock_irqsave(&rt->tobe_lock, flags);
142 __add_release(rt, task); 142 __add_release(rt, task);
143 spin_unlock_irqrestore(&rt->tobe_lock, flags); 143 raw_spin_unlock_irqrestore(&rt->tobe_lock, flags);
144} 144}
145 145
146static inline int __jobs_pending(rt_domain_t* rt) 146static inline int __jobs_pending(rt_domain_t* rt)
@@ -153,9 +153,9 @@ static inline int jobs_pending(rt_domain_t* rt)
153 unsigned long flags; 153 unsigned long flags;
154 int ret; 154 int ret;
155 /* first we need the write lock for rt_ready_queue */ 155 /* first we need the write lock for rt_ready_queue */
156 spin_lock_irqsave(&rt->ready_lock, flags); 156 raw_spin_lock_irqsave(&rt->ready_lock, flags);
157 ret = !bheap_empty(&rt->ready_queue); 157 ret = !bheap_empty(&rt->ready_queue);
158 spin_unlock_irqrestore(&rt->ready_lock, flags); 158 raw_spin_unlock_irqrestore(&rt->ready_lock, flags);
159 return ret; 159 return ret;
160} 160}
161 161
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index c0b440b1f6ee..02e5097bf319 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -1052,9 +1052,9 @@ void hrtimer_pull(void)
1052 struct hrtimer_start_on_info *info; 1052 struct hrtimer_start_on_info *info;
1053 struct list_head *pos, *safe, list; 1053 struct list_head *pos, *safe, list;
1054 1054
1055 spin_lock(&base->lock); 1055 raw_spin_lock(&base->lock);
1056 list_replace_init(&base->to_pull, &list); 1056 list_replace_init(&base->to_pull, &list);
1057 spin_unlock(&base->lock); 1057 raw_spin_unlock(&base->lock);
1058 1058
1059 list_for_each_safe(pos, safe, &list) { 1059 list_for_each_safe(pos, safe, &list) {
1060 info = list_entry(pos, struct hrtimer_start_on_info, list); 1060 info = list_entry(pos, struct hrtimer_start_on_info, list);
@@ -1108,10 +1108,10 @@ int hrtimer_start_on(int cpu, struct hrtimer_start_on_info* info,
1108 } else { 1108 } else {
1109 TRACE("hrtimer_start_on: pulling to remote CPU\n"); 1109 TRACE("hrtimer_start_on: pulling to remote CPU\n");
1110 base = &per_cpu(hrtimer_bases, cpu); 1110 base = &per_cpu(hrtimer_bases, cpu);
1111 spin_lock_irqsave(&base->lock, flags); 1111 raw_spin_lock_irqsave(&base->lock, flags);
1112 was_empty = list_empty(&base->to_pull); 1112 was_empty = list_empty(&base->to_pull);
1113 list_add(&info->list, &base->to_pull); 1113 list_add(&info->list, &base->to_pull);
1114 spin_unlock_irqrestore(&base->lock, flags); 1114 raw_spin_unlock_irqrestore(&base->lock, flags);
1115 if (was_empty) 1115 if (was_empty)
1116 /* only send IPI if other no else 1116 /* only send IPI if other no else
1117 * has done so already 1117 * has done so already
diff --git a/litmus/ftdev.c b/litmus/ftdev.c
index 8b2d74d816a2..51dafaebf8a6 100644
--- a/litmus/ftdev.c
+++ b/litmus/ftdev.c
@@ -1,5 +1,6 @@
1#include <linux/sched.h> 1#include <linux/sched.h>
2#include <linux/fs.h> 2#include <linux/fs.h>
3#include <linux/slab.h>
3#include <linux/cdev.h> 4#include <linux/cdev.h>
4#include <asm/uaccess.h> 5#include <asm/uaccess.h>
5#include <linux/module.h> 6#include <linux/module.h>
diff --git a/litmus/litmus.c b/litmus/litmus.c
index 5bf848386e1c..b71fc819eb51 100644
--- a/litmus/litmus.c
+++ b/litmus/litmus.c
@@ -23,7 +23,7 @@
23 23
24/* Number of RT tasks that exist in the system */ 24/* Number of RT tasks that exist in the system */
25atomic_t rt_task_count = ATOMIC_INIT(0); 25atomic_t rt_task_count = ATOMIC_INIT(0);
26static DEFINE_SPINLOCK(task_transition_lock); 26static DEFINE_RAW_SPINLOCK(task_transition_lock);
27/* synchronize plugin switching */ 27/* synchronize plugin switching */
28atomic_t cannot_use_plugin = ATOMIC_INIT(0); 28atomic_t cannot_use_plugin = ATOMIC_INIT(0);
29 29
@@ -330,7 +330,7 @@ long litmus_admit_task(struct task_struct* tsk)
330 INIT_LIST_HEAD(&tsk_rt(tsk)->list); 330 INIT_LIST_HEAD(&tsk_rt(tsk)->list);
331 331
332 /* avoid scheduler plugin changing underneath us */ 332 /* avoid scheduler plugin changing underneath us */
333 spin_lock_irqsave(&task_transition_lock, flags); 333 raw_spin_lock_irqsave(&task_transition_lock, flags);
334 334
335 /* allocate heap node for this task */ 335 /* allocate heap node for this task */
336 tsk_rt(tsk)->heap_node = bheap_node_alloc(GFP_ATOMIC); 336 tsk_rt(tsk)->heap_node = bheap_node_alloc(GFP_ATOMIC);
@@ -357,7 +357,7 @@ long litmus_admit_task(struct task_struct* tsk)
357 } 357 }
358 358
359out_unlock: 359out_unlock:
360 spin_unlock_irqrestore(&task_transition_lock, flags); 360 raw_spin_unlock_irqrestore(&task_transition_lock, flags);
361out: 361out:
362 return retval; 362 return retval;
363} 363}
@@ -403,7 +403,7 @@ int switch_sched_plugin(struct sched_plugin* plugin)
403 smp_call_function(synch_on_plugin_switch, NULL, 0); 403 smp_call_function(synch_on_plugin_switch, NULL, 0);
404 404
405 /* stop task transitions */ 405 /* stop task transitions */
406 spin_lock_irqsave(&task_transition_lock, flags); 406 raw_spin_lock_irqsave(&task_transition_lock, flags);
407 407
408 /* don't switch if there are active real-time tasks */ 408 /* don't switch if there are active real-time tasks */
409 if (atomic_read(&rt_task_count) == 0) { 409 if (atomic_read(&rt_task_count) == 0) {
@@ -421,7 +421,7 @@ int switch_sched_plugin(struct sched_plugin* plugin)
421 } else 421 } else
422 ret = -EBUSY; 422 ret = -EBUSY;
423out: 423out:
424 spin_unlock_irqrestore(&task_transition_lock, flags); 424 raw_spin_unlock_irqrestore(&task_transition_lock, flags);
425 atomic_set(&cannot_use_plugin, 0); 425 atomic_set(&cannot_use_plugin, 0);
426 return ret; 426 return ret;
427} 427}
diff --git a/litmus/rt_domain.c b/litmus/rt_domain.c
index 609ff0f82abb..8d5db6050723 100644
--- a/litmus/rt_domain.c
+++ b/litmus/rt_domain.c
@@ -53,11 +53,11 @@ static enum hrtimer_restart on_release_timer(struct hrtimer *timer)
53 53
54 rh = container_of(timer, struct release_heap, timer); 54 rh = container_of(timer, struct release_heap, timer);
55 55
56 spin_lock_irqsave(&rh->dom->release_lock, flags); 56 raw_spin_lock_irqsave(&rh->dom->release_lock, flags);
57 TRACE("CB has the release_lock 0x%p\n", &rh->dom->release_lock); 57 TRACE("CB has the release_lock 0x%p\n", &rh->dom->release_lock);
58 /* remove from release queue */ 58 /* remove from release queue */
59 list_del(&rh->list); 59 list_del(&rh->list);
60 spin_unlock_irqrestore(&rh->dom->release_lock, flags); 60 raw_spin_unlock_irqrestore(&rh->dom->release_lock, flags);
61 TRACE("CB returned release_lock 0x%p\n", &rh->dom->release_lock); 61 TRACE("CB returned release_lock 0x%p\n", &rh->dom->release_lock);
62 62
63 /* call release callback */ 63 /* call release callback */
@@ -185,20 +185,20 @@ static void arm_release_timer(rt_domain_t *_rt)
185 list_del(pos); 185 list_del(pos);
186 186
187 /* put into release heap while holding release_lock */ 187 /* put into release heap while holding release_lock */
188 spin_lock(&rt->release_lock); 188 raw_spin_lock(&rt->release_lock);
189 TRACE_TASK(t, "I have the release_lock 0x%p\n", &rt->release_lock); 189 TRACE_TASK(t, "I have the release_lock 0x%p\n", &rt->release_lock);
190 190
191 rh = get_release_heap(rt, t, 0); 191 rh = get_release_heap(rt, t, 0);
192 if (!rh) { 192 if (!rh) {
193 /* need to use our own, but drop lock first */ 193 /* need to use our own, but drop lock first */
194 spin_unlock(&rt->release_lock); 194 raw_spin_unlock(&rt->release_lock);
195 TRACE_TASK(t, "Dropped release_lock 0x%p\n", 195 TRACE_TASK(t, "Dropped release_lock 0x%p\n",
196 &rt->release_lock); 196 &rt->release_lock);
197 197
198 reinit_release_heap(t); 198 reinit_release_heap(t);
199 TRACE_TASK(t, "release_heap ready\n"); 199 TRACE_TASK(t, "release_heap ready\n");
200 200
201 spin_lock(&rt->release_lock); 201 raw_spin_lock(&rt->release_lock);
202 TRACE_TASK(t, "Re-acquired release_lock 0x%p\n", 202 TRACE_TASK(t, "Re-acquired release_lock 0x%p\n",
203 &rt->release_lock); 203 &rt->release_lock);
204 204
@@ -207,7 +207,7 @@ static void arm_release_timer(rt_domain_t *_rt)
207 bheap_insert(rt->order, &rh->heap, tsk_rt(t)->heap_node); 207 bheap_insert(rt->order, &rh->heap, tsk_rt(t)->heap_node);
208 TRACE_TASK(t, "arm_release_timer(): added to release heap\n"); 208 TRACE_TASK(t, "arm_release_timer(): added to release heap\n");
209 209
210 spin_unlock(&rt->release_lock); 210 raw_spin_unlock(&rt->release_lock);
211 TRACE_TASK(t, "Returned the release_lock 0x%p\n", &rt->release_lock); 211 TRACE_TASK(t, "Returned the release_lock 0x%p\n", &rt->release_lock);
212 212
213 /* To avoid arming the timer multiple times, we only let the 213 /* To avoid arming the timer multiple times, we only let the
@@ -258,9 +258,9 @@ void rt_domain_init(rt_domain_t *rt,
258 for (i = 0; i < RELEASE_QUEUE_SLOTS; i++) 258 for (i = 0; i < RELEASE_QUEUE_SLOTS; i++)
259 INIT_LIST_HEAD(&rt->release_queue.slot[i]); 259 INIT_LIST_HEAD(&rt->release_queue.slot[i]);
260 260
261 spin_lock_init(&rt->ready_lock); 261 raw_spin_lock_init(&rt->ready_lock);
262 spin_lock_init(&rt->release_lock); 262 raw_spin_lock_init(&rt->release_lock);
263 spin_lock_init(&rt->tobe_lock); 263 raw_spin_lock_init(&rt->tobe_lock);
264 264
265 rt->check_resched = check; 265 rt->check_resched = check;
266 rt->release_jobs = release; 266 rt->release_jobs = release;
diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c
index e57a11afda16..f5b77080cc4f 100644
--- a/litmus/sched_cedf.c
+++ b/litmus/sched_cedf.c
@@ -28,6 +28,7 @@
28#include <linux/spinlock.h> 28#include <linux/spinlock.h>
29#include <linux/percpu.h> 29#include <linux/percpu.h>
30#include <linux/sched.h> 30#include <linux/sched.h>
31#include <linux/slab.h>
31 32
32#include <litmus/litmus.h> 33#include <litmus/litmus.h>
33#include <litmus/jobs.h> 34#include <litmus/jobs.h>
@@ -285,12 +286,12 @@ static void cedf_release_jobs(rt_domain_t* rt, struct bheap* tasks)
285 cedf_domain_t* cluster = container_of(rt, cedf_domain_t, domain); 286 cedf_domain_t* cluster = container_of(rt, cedf_domain_t, domain);
286 unsigned long flags; 287 unsigned long flags;
287 288
288 spin_lock_irqsave(&cluster->lock, flags); 289 raw_spin_lock_irqsave(&cluster->lock, flags);
289 290
290 __merge_ready(&cluster->domain, tasks); 291 __merge_ready(&cluster->domain, tasks);
291 check_for_preemptions(cluster); 292 check_for_preemptions(cluster);
292 293
293 spin_unlock_irqrestore(&cluster->lock, flags); 294 raw_spin_unlock_irqrestore(&cluster->lock, flags);
294} 295}
295 296
296/* caller holds cedf_lock */ 297/* caller holds cedf_lock */
@@ -371,7 +372,7 @@ static struct task_struct* cedf_schedule(struct task_struct * prev)
371 int out_of_time, sleep, preempt, np, exists, blocks; 372 int out_of_time, sleep, preempt, np, exists, blocks;
372 struct task_struct* next = NULL; 373 struct task_struct* next = NULL;
373 374
374 spin_lock(&cluster->lock); 375 raw_spin_lock(&cluster->lock);
375 clear_will_schedule(); 376 clear_will_schedule();
376 377
377 /* sanity checking */ 378 /* sanity checking */
@@ -454,7 +455,7 @@ static struct task_struct* cedf_schedule(struct task_struct * prev)
454 if (exists) 455 if (exists)
455 next = prev; 456 next = prev;
456 457
457 spin_unlock(&cluster->lock); 458 raw_spin_unlock(&cluster->lock);
458 459
459#ifdef WANT_ALL_SCHED_EVENTS 460#ifdef WANT_ALL_SCHED_EVENTS
460 TRACE("cedf_lock released, next=0x%p\n", next); 461 TRACE("cedf_lock released, next=0x%p\n", next);
@@ -496,7 +497,7 @@ static void cedf_task_new(struct task_struct * t, int on_rq, int running)
496 /* the cluster doesn't change even if t is running */ 497 /* the cluster doesn't change even if t is running */
497 cluster = task_cpu_cluster(t); 498 cluster = task_cpu_cluster(t);
498 499
499 spin_lock_irqsave(&cluster->domain.ready_lock, flags); 500 raw_spin_lock_irqsave(&cluster->domain.ready_lock, flags);
500 501
501 /* setup job params */ 502 /* setup job params */
502 release_at(t, litmus_clock()); 503 release_at(t, litmus_clock());
@@ -513,7 +514,7 @@ static void cedf_task_new(struct task_struct * t, int on_rq, int running)
513 t->rt_param.linked_on = NO_CPU; 514 t->rt_param.linked_on = NO_CPU;
514 515
515 cedf_job_arrival(t); 516 cedf_job_arrival(t);
516 spin_unlock_irqrestore(&(cluster->domain.ready_lock), flags); 517 raw_spin_unlock_irqrestore(&(cluster->domain.ready_lock), flags);
517} 518}
518 519
519static void cedf_task_wake_up(struct task_struct *task) 520static void cedf_task_wake_up(struct task_struct *task)
@@ -526,7 +527,7 @@ static void cedf_task_wake_up(struct task_struct *task)
526 527
527 cluster = task_cpu_cluster(task); 528 cluster = task_cpu_cluster(task);
528 529
529 spin_lock_irqsave(&cluster->lock, flags); 530 raw_spin_lock_irqsave(&cluster->lock, flags);
530 /* We need to take suspensions because of semaphores into 531 /* We need to take suspensions because of semaphores into
531 * account! If a job resumes after being suspended due to acquiring 532 * account! If a job resumes after being suspended due to acquiring
532 * a semaphore, it should never be treated as a new job release. 533 * a semaphore, it should never be treated as a new job release.
@@ -549,7 +550,7 @@ static void cedf_task_wake_up(struct task_struct *task)
549 } 550 }
550 } 551 }
551 cedf_job_arrival(task); 552 cedf_job_arrival(task);
552 spin_unlock_irqrestore(&cluster->lock, flags); 553 raw_spin_unlock_irqrestore(&cluster->lock, flags);
553} 554}
554 555
555static void cedf_task_block(struct task_struct *t) 556static void cedf_task_block(struct task_struct *t)
@@ -562,9 +563,9 @@ static void cedf_task_block(struct task_struct *t)
562 cluster = task_cpu_cluster(t); 563 cluster = task_cpu_cluster(t);
563 564
564 /* unlink if necessary */ 565 /* unlink if necessary */
565 spin_lock_irqsave(&cluster->lock, flags); 566 raw_spin_lock_irqsave(&cluster->lock, flags);
566 unlink(t); 567 unlink(t);
567 spin_unlock_irqrestore(&cluster->lock, flags); 568 raw_spin_unlock_irqrestore(&cluster->lock, flags);
568 569
569 BUG_ON(!is_realtime(t)); 570 BUG_ON(!is_realtime(t));
570} 571}
@@ -576,13 +577,13 @@ static void cedf_task_exit(struct task_struct * t)
576 cedf_domain_t *cluster = task_cpu_cluster(t); 577 cedf_domain_t *cluster = task_cpu_cluster(t);
577 578
578 /* unlink if necessary */ 579 /* unlink if necessary */
579 spin_lock_irqsave(&cluster->lock, flags); 580 raw_spin_lock_irqsave(&cluster->lock, flags);
580 unlink(t); 581 unlink(t);
581 if (tsk_rt(t)->scheduled_on != NO_CPU) { 582 if (tsk_rt(t)->scheduled_on != NO_CPU) {
582 cluster->cpus[tsk_rt(t)->scheduled_on]->scheduled = NULL; 583 cluster->cpus[tsk_rt(t)->scheduled_on]->scheduled = NULL;
583 tsk_rt(t)->scheduled_on = NO_CPU; 584 tsk_rt(t)->scheduled_on = NO_CPU;
584 } 585 }
585 spin_unlock_irqrestore(&cluster->lock, flags); 586 raw_spin_unlock_irqrestore(&cluster->lock, flags);
586 587
587 BUG_ON(!is_realtime(t)); 588 BUG_ON(!is_realtime(t));
588 TRACE_TASK(t, "RIP\n"); 589 TRACE_TASK(t, "RIP\n");
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c
index 6137c74729cb..c0c63eba70ce 100644
--- a/litmus/sched_gsn_edf.c
+++ b/litmus/sched_gsn_edf.c
@@ -297,12 +297,12 @@ static void gsnedf_release_jobs(rt_domain_t* rt, struct bheap* tasks)
297{ 297{
298 unsigned long flags; 298 unsigned long flags;
299 299
300 spin_lock_irqsave(&gsnedf_lock, flags); 300 raw_spin_lock_irqsave(&gsnedf_lock, flags);
301 301
302 __merge_ready(rt, tasks); 302 __merge_ready(rt, tasks);
303 check_for_preemptions(); 303 check_for_preemptions();
304 304
305 spin_unlock_irqrestore(&gsnedf_lock, flags); 305 raw_spin_unlock_irqrestore(&gsnedf_lock, flags);
306} 306}
307 307
308/* caller holds gsnedf_lock */ 308/* caller holds gsnedf_lock */
@@ -388,7 +388,7 @@ static struct task_struct* gsnedf_schedule(struct task_struct * prev)
388 if (gsnedf.release_master == entry->cpu) 388 if (gsnedf.release_master == entry->cpu)
389 return NULL; 389 return NULL;
390 390
391 spin_lock(&gsnedf_lock); 391 raw_spin_lock(&gsnedf_lock);
392 clear_will_schedule(); 392 clear_will_schedule();
393 393
394 /* sanity checking */ 394 /* sanity checking */
@@ -471,7 +471,7 @@ static struct task_struct* gsnedf_schedule(struct task_struct * prev)
471 if (exists) 471 if (exists)
472 next = prev; 472 next = prev;
473 473
474 spin_unlock(&gsnedf_lock); 474 raw_spin_unlock(&gsnedf_lock);
475 475
476#ifdef WANT_ALL_SCHED_EVENTS 476#ifdef WANT_ALL_SCHED_EVENTS
477 TRACE("gsnedf_lock released, next=0x%p\n", next); 477 TRACE("gsnedf_lock released, next=0x%p\n", next);
@@ -509,7 +509,7 @@ static void gsnedf_task_new(struct task_struct * t, int on_rq, int running)
509 509
510 TRACE("gsn edf: task new %d\n", t->pid); 510 TRACE("gsn edf: task new %d\n", t->pid);
511 511
512 spin_lock_irqsave(&gsnedf_lock, flags); 512 raw_spin_lock_irqsave(&gsnedf_lock, flags);
513 513
514 /* setup job params */ 514 /* setup job params */
515 release_at(t, litmus_clock()); 515 release_at(t, litmus_clock());
@@ -532,7 +532,7 @@ static void gsnedf_task_new(struct task_struct * t, int on_rq, int running)
532 t->rt_param.linked_on = NO_CPU; 532 t->rt_param.linked_on = NO_CPU;
533 533
534 gsnedf_job_arrival(t); 534 gsnedf_job_arrival(t);
535 spin_unlock_irqrestore(&gsnedf_lock, flags); 535 raw_spin_unlock_irqrestore(&gsnedf_lock, flags);
536} 536}
537 537
538static void gsnedf_task_wake_up(struct task_struct *task) 538static void gsnedf_task_wake_up(struct task_struct *task)
@@ -542,7 +542,7 @@ static void gsnedf_task_wake_up(struct task_struct *task)
542 542
543 TRACE_TASK(task, "wake_up at %llu\n", litmus_clock()); 543 TRACE_TASK(task, "wake_up at %llu\n", litmus_clock());
544 544
545 spin_lock_irqsave(&gsnedf_lock, flags); 545 raw_spin_lock_irqsave(&gsnedf_lock, flags);
546 /* We need to take suspensions because of semaphores into 546 /* We need to take suspensions because of semaphores into
547 * account! If a job resumes after being suspended due to acquiring 547 * account! If a job resumes after being suspended due to acquiring
548 * a semaphore, it should never be treated as a new job release. 548 * a semaphore, it should never be treated as a new job release.
@@ -565,7 +565,7 @@ static void gsnedf_task_wake_up(struct task_struct *task)
565 } 565 }
566 } 566 }
567 gsnedf_job_arrival(task); 567 gsnedf_job_arrival(task);
568 spin_unlock_irqrestore(&gsnedf_lock, flags); 568 raw_spin_unlock_irqrestore(&gsnedf_lock, flags);
569} 569}
570 570
571static void gsnedf_task_block(struct task_struct *t) 571static void gsnedf_task_block(struct task_struct *t)
@@ -575,9 +575,9 @@ static void gsnedf_task_block(struct task_struct *t)
575 TRACE_TASK(t, "block at %llu\n", litmus_clock()); 575 TRACE_TASK(t, "block at %llu\n", litmus_clock());
576 576
577 /* unlink if necessary */ 577 /* unlink if necessary */
578 spin_lock_irqsave(&gsnedf_lock, flags); 578 raw_spin_lock_irqsave(&gsnedf_lock, flags);
579 unlink(t); 579 unlink(t);
580 spin_unlock_irqrestore(&gsnedf_lock, flags); 580 raw_spin_unlock_irqrestore(&gsnedf_lock, flags);
581 581
582 BUG_ON(!is_realtime(t)); 582 BUG_ON(!is_realtime(t));
583} 583}
@@ -588,13 +588,13 @@ static void gsnedf_task_exit(struct task_struct * t)
588 unsigned long flags; 588 unsigned long flags;
589 589
590 /* unlink if necessary */ 590 /* unlink if necessary */
591 spin_lock_irqsave(&gsnedf_lock, flags); 591 raw_spin_lock_irqsave(&gsnedf_lock, flags);
592 unlink(t); 592 unlink(t);
593 if (tsk_rt(t)->scheduled_on != NO_CPU) { 593 if (tsk_rt(t)->scheduled_on != NO_CPU) {
594 gsnedf_cpus[tsk_rt(t)->scheduled_on]->scheduled = NULL; 594 gsnedf_cpus[tsk_rt(t)->scheduled_on]->scheduled = NULL;
595 tsk_rt(t)->scheduled_on = NO_CPU; 595 tsk_rt(t)->scheduled_on = NO_CPU;
596 } 596 }
597 spin_unlock_irqrestore(&gsnedf_lock, flags); 597 raw_spin_unlock_irqrestore(&gsnedf_lock, flags);
598 598
599 BUG_ON(!is_realtime(t)); 599 BUG_ON(!is_realtime(t));
600 TRACE_TASK(t, "RIP\n"); 600 TRACE_TASK(t, "RIP\n");
@@ -630,7 +630,7 @@ static void update_queue_position(struct task_struct *holder)
630 gsnedf_cpus[tsk_rt(holder)->linked_on]->hn); 630 gsnedf_cpus[tsk_rt(holder)->linked_on]->hn);
631 } else { 631 } else {
632 /* holder may be queued: first stop queue changes */ 632 /* holder may be queued: first stop queue changes */
633 spin_lock(&gsnedf.release_lock); 633 raw_spin_lock(&gsnedf.release_lock);
634 if (is_queued(holder)) { 634 if (is_queued(holder)) {
635 TRACE_TASK(holder, "%s: is queued\n", 635 TRACE_TASK(holder, "%s: is queued\n",
636 __FUNCTION__); 636 __FUNCTION__);
@@ -648,7 +648,7 @@ static void update_queue_position(struct task_struct *holder)
648 TRACE_TASK(holder, "%s: is NOT queued => Done.\n", 648 TRACE_TASK(holder, "%s: is NOT queued => Done.\n",
649 __FUNCTION__); 649 __FUNCTION__);
650 } 650 }
651 spin_unlock(&gsnedf.release_lock); 651 raw_spin_unlock(&gsnedf.release_lock);
652 652
653 /* If holder was enqueued in a release heap, then the following 653 /* If holder was enqueued in a release heap, then the following
654 * preemption check is pointless, but we can't easily detect 654 * preemption check is pointless, but we can't easily detect
@@ -682,7 +682,7 @@ static long gsnedf_pi_block(struct pi_semaphore *sem,
682 if (edf_higher_prio(new_waiter, sem->hp.task)) { 682 if (edf_higher_prio(new_waiter, sem->hp.task)) {
683 TRACE_TASK(new_waiter, " boosts priority via %p\n", sem); 683 TRACE_TASK(new_waiter, " boosts priority via %p\n", sem);
684 /* called with IRQs disabled */ 684 /* called with IRQs disabled */
685 spin_lock(&gsnedf_lock); 685 raw_spin_lock(&gsnedf_lock);
686 /* store new highest-priority task */ 686 /* store new highest-priority task */
687 sem->hp.task = new_waiter; 687 sem->hp.task = new_waiter;
688 if (sem->holder) { 688 if (sem->holder) {
@@ -694,7 +694,7 @@ static long gsnedf_pi_block(struct pi_semaphore *sem,
694 sem->holder->rt_param.inh_task = new_waiter; 694 sem->holder->rt_param.inh_task = new_waiter;
695 update_queue_position(sem->holder); 695 update_queue_position(sem->holder);
696 } 696 }
697 spin_unlock(&gsnedf_lock); 697 raw_spin_unlock(&gsnedf_lock);
698 } 698 }
699 699
700 return 0; 700 return 0;
@@ -740,7 +740,7 @@ static long gsnedf_return_priority(struct pi_semaphore *sem)
740 740
741 if (t->rt_param.inh_task) { 741 if (t->rt_param.inh_task) {
742 /* interrupts already disabled by PI code */ 742 /* interrupts already disabled by PI code */
743 spin_lock(&gsnedf_lock); 743 raw_spin_lock(&gsnedf_lock);
744 744
745 /* Reset inh_task to NULL. */ 745 /* Reset inh_task to NULL. */
746 t->rt_param.inh_task = NULL; 746 t->rt_param.inh_task = NULL;
@@ -748,7 +748,7 @@ static long gsnedf_return_priority(struct pi_semaphore *sem)
748 /* Check if rescheduling is necessary */ 748 /* Check if rescheduling is necessary */
749 unlink(t); 749 unlink(t);
750 gsnedf_job_arrival(t); 750 gsnedf_job_arrival(t);
751 spin_unlock(&gsnedf_lock); 751 raw_spin_unlock(&gsnedf_lock);
752 } 752 }
753 753
754 return ret; 754 return ret;
diff --git a/litmus/sched_pfair.c b/litmus/sched_pfair.c
index 2ea39223e7f0..ea77d3295290 100644
--- a/litmus/sched_pfair.c
+++ b/litmus/sched_pfair.c
@@ -12,6 +12,7 @@
12#include <linux/percpu.h> 12#include <linux/percpu.h>
13#include <linux/sched.h> 13#include <linux/sched.h>
14#include <linux/list.h> 14#include <linux/list.h>
15#include <linux/slab.h>
15 16
16#include <litmus/litmus.h> 17#include <litmus/litmus.h>
17#include <litmus/jobs.h> 18#include <litmus/jobs.h>
@@ -415,7 +416,7 @@ static void schedule_next_quantum(quanta_t time)
415 /* called with interrupts disabled */ 416 /* called with interrupts disabled */
416 PTRACE("--- Q %lu at %llu PRE-SPIN\n", 417 PTRACE("--- Q %lu at %llu PRE-SPIN\n",
417 time, litmus_clock()); 418 time, litmus_clock());
418 spin_lock(&pfair_lock); 419 raw_spin_lock(&pfair_lock);
419 PTRACE("<<< Q %lu at %llu\n", 420 PTRACE("<<< Q %lu at %llu\n",
420 time, litmus_clock()); 421 time, litmus_clock());
421 422
@@ -448,7 +449,7 @@ static void schedule_next_quantum(quanta_t time)
448 } 449 }
449 PTRACE(">>> Q %lu at %llu\n", 450 PTRACE(">>> Q %lu at %llu\n",
450 time, litmus_clock()); 451 time, litmus_clock());
451 spin_unlock(&pfair_lock); 452 raw_spin_unlock(&pfair_lock);
452} 453}
453 454
454static noinline void wait_for_quantum(quanta_t q, struct pfair_state* state) 455static noinline void wait_for_quantum(quanta_t q, struct pfair_state* state)
@@ -564,7 +565,7 @@ static struct task_struct* pfair_schedule(struct task_struct * prev)
564 int blocks; 565 int blocks;
565 struct task_struct* next = NULL; 566 struct task_struct* next = NULL;
566 567
567 spin_lock(&pfair_lock); 568 raw_spin_lock(&pfair_lock);
568 569
569 blocks = is_realtime(prev) && !is_running(prev); 570 blocks = is_realtime(prev) && !is_running(prev);
570 571
@@ -577,7 +578,7 @@ static struct task_struct* pfair_schedule(struct task_struct * prev)
577 tsk_rt(next)->scheduled_on = state->cpu; 578 tsk_rt(next)->scheduled_on = state->cpu;
578 } 579 }
579 580
580 spin_unlock(&pfair_lock); 581 raw_spin_unlock(&pfair_lock);
581 582
582 if (next) 583 if (next)
583 TRACE_TASK(next, "scheduled rel=%lu at %lu (%llu)\n", 584 TRACE_TASK(next, "scheduled rel=%lu at %lu (%llu)\n",
@@ -594,7 +595,7 @@ static void pfair_task_new(struct task_struct * t, int on_rq, int running)
594 595
595 TRACE("pfair: task new %d state:%d\n", t->pid, t->state); 596 TRACE("pfair: task new %d state:%d\n", t->pid, t->state);
596 597
597 spin_lock_irqsave(&pfair_lock, flags); 598 raw_spin_lock_irqsave(&pfair_lock, flags);
598 if (running) 599 if (running)
599 t->rt_param.scheduled_on = task_cpu(t); 600 t->rt_param.scheduled_on = task_cpu(t);
600 else 601 else
@@ -605,7 +606,7 @@ static void pfair_task_new(struct task_struct * t, int on_rq, int running)
605 pfair_add_release(t); 606 pfair_add_release(t);
606 check_preempt(t); 607 check_preempt(t);
607 608
608 spin_unlock_irqrestore(&pfair_lock, flags); 609 raw_spin_unlock_irqrestore(&pfair_lock, flags);
609} 610}
610 611
611static void pfair_task_wake_up(struct task_struct *t) 612static void pfair_task_wake_up(struct task_struct *t)
@@ -616,7 +617,7 @@ static void pfair_task_wake_up(struct task_struct *t)
616 TRACE_TASK(t, "wakes at %llu, release=%lu, pfair_time:%lu\n", 617 TRACE_TASK(t, "wakes at %llu, release=%lu, pfair_time:%lu\n",
617 litmus_clock(), cur_release(t), pfair_time); 618 litmus_clock(), cur_release(t), pfair_time);
618 619
619 spin_lock_irqsave(&pfair_lock, flags); 620 raw_spin_lock_irqsave(&pfair_lock, flags);
620 621
621 /* It is a little unclear how to deal with Pfair 622 /* It is a little unclear how to deal with Pfair
622 * tasks that block for a while and then wake. For now, 623 * tasks that block for a while and then wake. For now,
@@ -637,7 +638,7 @@ static void pfair_task_wake_up(struct task_struct *t)
637 638
638 check_preempt(t); 639 check_preempt(t);
639 640
640 spin_unlock_irqrestore(&pfair_lock, flags); 641 raw_spin_unlock_irqrestore(&pfair_lock, flags);
641 TRACE_TASK(t, "wake up done at %llu\n", litmus_clock()); 642 TRACE_TASK(t, "wake up done at %llu\n", litmus_clock());
642} 643}
643 644
@@ -661,12 +662,12 @@ static void pfair_task_exit(struct task_struct * t)
661 * might not be the same as the CPU that the PFAIR scheduler 662 * might not be the same as the CPU that the PFAIR scheduler
662 * has chosen for it. 663 * has chosen for it.
663 */ 664 */
664 spin_lock_irqsave(&pfair_lock, flags); 665 raw_spin_lock_irqsave(&pfair_lock, flags);
665 666
666 TRACE_TASK(t, "RIP, state:%d\n", t->state); 667 TRACE_TASK(t, "RIP, state:%d\n", t->state);
667 drop_all_references(t); 668 drop_all_references(t);
668 669
669 spin_unlock_irqrestore(&pfair_lock, flags); 670 raw_spin_unlock_irqrestore(&pfair_lock, flags);
670 671
671 kfree(t->rt_param.pfair); 672 kfree(t->rt_param.pfair);
672 t->rt_param.pfair = NULL; 673 t->rt_param.pfair = NULL;
@@ -680,7 +681,7 @@ static void pfair_release_at(struct task_struct* task, lt_t start)
680 681
681 BUG_ON(!is_realtime(task)); 682 BUG_ON(!is_realtime(task));
682 683
683 spin_lock_irqsave(&pfair_lock, flags); 684 raw_spin_lock_irqsave(&pfair_lock, flags);
684 release_at(task, start); 685 release_at(task, start);
685 release = time2quanta(start, CEIL); 686 release = time2quanta(start, CEIL);
686 687
@@ -698,7 +699,7 @@ static void pfair_release_at(struct task_struct* task, lt_t start)
698 */ 699 */
699 tsk_pfair(task)->sporadic_release = 0; 700 tsk_pfair(task)->sporadic_release = 0;
700 701
701 spin_unlock_irqrestore(&pfair_lock, flags); 702 raw_spin_unlock_irqrestore(&pfair_lock, flags);
702} 703}
703 704
704static void init_subtask(struct subtask* sub, unsigned long i, 705static void init_subtask(struct subtask* sub, unsigned long i,
diff --git a/litmus/sched_plugin.c b/litmus/sched_plugin.c
index 3767b30e610a..3543b7baff53 100644
--- a/litmus/sched_plugin.c
+++ b/litmus/sched_plugin.c
@@ -187,7 +187,7 @@ struct sched_plugin *litmus = &linux_sched_plugin;
187 187
188/* the list of registered scheduling plugins */ 188/* the list of registered scheduling plugins */
189static LIST_HEAD(sched_plugins); 189static LIST_HEAD(sched_plugins);
190static DEFINE_SPINLOCK(sched_plugins_lock); 190static DEFINE_RAW_SPINLOCK(sched_plugins_lock);
191 191
192#define CHECK(func) {\ 192#define CHECK(func) {\
193 if (!plugin->func) \ 193 if (!plugin->func) \
@@ -220,9 +220,9 @@ int register_sched_plugin(struct sched_plugin* plugin)
220 if (!plugin->release_at) 220 if (!plugin->release_at)
221 plugin->release_at = release_at; 221 plugin->release_at = release_at;
222 222
223 spin_lock(&sched_plugins_lock); 223 raw_spin_lock(&sched_plugins_lock);
224 list_add(&plugin->list, &sched_plugins); 224 list_add(&plugin->list, &sched_plugins);
225 spin_unlock(&sched_plugins_lock); 225 raw_spin_unlock(&sched_plugins_lock);
226 226
227 return 0; 227 return 0;
228} 228}
@@ -234,7 +234,7 @@ struct sched_plugin* find_sched_plugin(const char* name)
234 struct list_head *pos; 234 struct list_head *pos;
235 struct sched_plugin *plugin; 235 struct sched_plugin *plugin;
236 236
237 spin_lock(&sched_plugins_lock); 237 raw_spin_lock(&sched_plugins_lock);
238 list_for_each(pos, &sched_plugins) { 238 list_for_each(pos, &sched_plugins) {
239 plugin = list_entry(pos, struct sched_plugin, list); 239 plugin = list_entry(pos, struct sched_plugin, list);
240 if (!strcmp(plugin->plugin_name, name)) 240 if (!strcmp(plugin->plugin_name, name))
@@ -243,7 +243,7 @@ struct sched_plugin* find_sched_plugin(const char* name)
243 plugin = NULL; 243 plugin = NULL;
244 244
245out_unlock: 245out_unlock:
246 spin_unlock(&sched_plugins_lock); 246 raw_spin_unlock(&sched_plugins_lock);
247 return plugin; 247 return plugin;
248} 248}
249 249
@@ -253,13 +253,13 @@ int print_sched_plugins(char* buf, int max)
253 struct list_head *pos; 253 struct list_head *pos;
254 struct sched_plugin *plugin; 254 struct sched_plugin *plugin;
255 255
256 spin_lock(&sched_plugins_lock); 256 raw_spin_lock(&sched_plugins_lock);
257 list_for_each(pos, &sched_plugins) { 257 list_for_each(pos, &sched_plugins) {
258 plugin = list_entry(pos, struct sched_plugin, list); 258 plugin = list_entry(pos, struct sched_plugin, list);
259 count += snprintf(buf + count, max - count, "%s\n", plugin->plugin_name); 259 count += snprintf(buf + count, max - count, "%s\n", plugin->plugin_name);
260 if (max - count <= 0) 260 if (max - count <= 0)
261 break; 261 break;
262 } 262 }
263 spin_unlock(&sched_plugins_lock); 263 raw_spin_unlock(&sched_plugins_lock);
264 return count; 264 return count;
265} 265}
diff --git a/litmus/sched_psn_edf.c b/litmus/sched_psn_edf.c
index af0b30cb8b89..e50b27391d21 100644
--- a/litmus/sched_psn_edf.c
+++ b/litmus/sched_psn_edf.c
@@ -131,7 +131,7 @@ static struct task_struct* psnedf_schedule(struct task_struct * prev)
131 int out_of_time, sleep, preempt, 131 int out_of_time, sleep, preempt,
132 np, exists, blocks, resched; 132 np, exists, blocks, resched;
133 133
134 spin_lock(&pedf->slock); 134 raw_spin_lock(&pedf->slock);
135 135
136 /* sanity checking 136 /* sanity checking
137 * differently from gedf, when a task exits (dead) 137 * differently from gedf, when a task exits (dead)
@@ -203,7 +203,7 @@ static struct task_struct* psnedf_schedule(struct task_struct * prev)
203 } 203 }
204 204
205 pedf->scheduled = next; 205 pedf->scheduled = next;
206 spin_unlock(&pedf->slock); 206 raw_spin_unlock(&pedf->slock);
207 207
208 return next; 208 return next;
209} 209}
@@ -226,7 +226,7 @@ static void psnedf_task_new(struct task_struct * t, int on_rq, int running)
226 /* The task should be running in the queue, otherwise signal 226 /* The task should be running in the queue, otherwise signal
227 * code will try to wake it up with fatal consequences. 227 * code will try to wake it up with fatal consequences.
228 */ 228 */
229 spin_lock_irqsave(&pedf->slock, flags); 229 raw_spin_lock_irqsave(&pedf->slock, flags);
230 if (running) { 230 if (running) {
231 /* there shouldn't be anything else running at the time */ 231 /* there shouldn't be anything else running at the time */
232 BUG_ON(pedf->scheduled); 232 BUG_ON(pedf->scheduled);
@@ -236,7 +236,7 @@ static void psnedf_task_new(struct task_struct * t, int on_rq, int running)
236 /* maybe we have to reschedule */ 236 /* maybe we have to reschedule */
237 preempt(pedf); 237 preempt(pedf);
238 } 238 }
239 spin_unlock_irqrestore(&pedf->slock, flags); 239 raw_spin_unlock_irqrestore(&pedf->slock, flags);
240} 240}
241 241
242static void psnedf_task_wake_up(struct task_struct *task) 242static void psnedf_task_wake_up(struct task_struct *task)
@@ -247,7 +247,7 @@ static void psnedf_task_wake_up(struct task_struct *task)
247 lt_t now; 247 lt_t now;
248 248
249 TRACE_TASK(task, "wake_up at %llu\n", litmus_clock()); 249 TRACE_TASK(task, "wake_up at %llu\n", litmus_clock());
250 spin_lock_irqsave(&pedf->slock, flags); 250 raw_spin_lock_irqsave(&pedf->slock, flags);
251 BUG_ON(is_queued(task)); 251 BUG_ON(is_queued(task));
252 /* We need to take suspensions because of semaphores into 252 /* We need to take suspensions because of semaphores into
253 * account! If a job resumes after being suspended due to acquiring 253 * account! If a job resumes after being suspended due to acquiring
@@ -272,7 +272,7 @@ static void psnedf_task_wake_up(struct task_struct *task)
272 if (pedf->scheduled != task) 272 if (pedf->scheduled != task)
273 requeue(task, edf); 273 requeue(task, edf);
274 274
275 spin_unlock_irqrestore(&pedf->slock, flags); 275 raw_spin_unlock_irqrestore(&pedf->slock, flags);
276 TRACE_TASK(task, "wake up done\n"); 276 TRACE_TASK(task, "wake up done\n");
277} 277}
278 278
@@ -291,7 +291,7 @@ static void psnedf_task_exit(struct task_struct * t)
291 psnedf_domain_t* pedf = task_pedf(t); 291 psnedf_domain_t* pedf = task_pedf(t);
292 rt_domain_t* edf; 292 rt_domain_t* edf;
293 293
294 spin_lock_irqsave(&pedf->slock, flags); 294 raw_spin_lock_irqsave(&pedf->slock, flags);
295 if (is_queued(t)) { 295 if (is_queued(t)) {
296 /* dequeue */ 296 /* dequeue */
297 edf = task_edf(t); 297 edf = task_edf(t);
@@ -303,7 +303,7 @@ static void psnedf_task_exit(struct task_struct * t)
303 TRACE_TASK(t, "RIP, now reschedule\n"); 303 TRACE_TASK(t, "RIP, now reschedule\n");
304 304
305 preempt(pedf); 305 preempt(pedf);
306 spin_unlock_irqrestore(&pedf->slock, flags); 306 raw_spin_unlock_irqrestore(&pedf->slock, flags);
307} 307}
308 308
309#ifdef CONFIG_FMLP 309#ifdef CONFIG_FMLP
@@ -323,7 +323,7 @@ static long psnedf_pi_block(struct pi_semaphore *sem,
323 edf = task_edf(new_waiter); 323 edf = task_edf(new_waiter);
324 324
325 /* interrupts already disabled */ 325 /* interrupts already disabled */
326 spin_lock(&pedf->slock); 326 raw_spin_lock(&pedf->slock);
327 327
328 /* store new highest-priority task */ 328 /* store new highest-priority task */
329 sem->hp.cpu_task[cpu] = new_waiter; 329 sem->hp.cpu_task[cpu] = new_waiter;
@@ -348,7 +348,7 @@ static long psnedf_pi_block(struct pi_semaphore *sem,
348 if (edf_preemption_needed(edf, current)) 348 if (edf_preemption_needed(edf, current))
349 preempt(pedf); 349 preempt(pedf);
350 350
351 spin_unlock(&pedf->slock); 351 raw_spin_unlock(&pedf->slock);
352 } 352 }
353 353
354 return 0; 354 return 0;
@@ -415,7 +415,7 @@ static long psnedf_return_priority(struct pi_semaphore *sem)
415 /* Always check for delayed preemptions that might have become 415 /* Always check for delayed preemptions that might have become
416 * necessary due to non-preemptive execution. 416 * necessary due to non-preemptive execution.
417 */ 417 */
418 spin_lock(&pedf->slock); 418 raw_spin_lock(&pedf->slock);
419 419
420 /* Reset inh_task to NULL. */ 420 /* Reset inh_task to NULL. */
421 current->rt_param.inh_task = NULL; 421 current->rt_param.inh_task = NULL;
@@ -424,7 +424,7 @@ static long psnedf_return_priority(struct pi_semaphore *sem)
424 if (edf_preemption_needed(edf, current)) 424 if (edf_preemption_needed(edf, current))
425 preempt(pedf); 425 preempt(pedf);
426 426
427 spin_unlock(&pedf->slock); 427 raw_spin_unlock(&pedf->slock);
428 428
429 429
430 return ret; 430 return ret;