diff options
author | Andrea Bastoni <bastoni@cs.unc.edu> | 2010-05-29 23:45:13 -0400 |
---|---|---|
committer | Andrea Bastoni <bastoni@cs.unc.edu> | 2010-05-29 23:57:07 -0400 |
commit | a66246f9e973a68fb9955a2fa7663a2e02afbd30 (patch) | |
tree | ebdf77a3cf491c0d0b77af3d9622f33013af5856 /litmus | |
parent | 6ffc1fee98c4b995eb3a0285f4f8fb467cb0306e (diff) |
Change most LitmusRT spinlock_t in raw_spinlock_t
Adapt to new schema for spinlock:
(tglx 20091217)
spinlock - the weakest one, which might sleep in RT
raw_spinlock - spinlock which always spins even on RT
arch_spinlock - the hardware level architecture dependent implementation
----
Most probably, all the spinlocks changed by this commit will be true
spinning lock (raw_spinlock) in PreemptRT (so hopefully we'll need few
changes when porting Litmmus to PreemptRT).
There are a couple of spinlock that the kernel still defines as
spinlock_t (therefore no changes reported in this commit) that might cause
us troubles:
- wait_queue_t lock is defined as spinlock_t; it is used in:
* fmlp.c -- sem->wait.lock
* sync.c -- ts_release.wait.lock
- rwlock_t used in fifo implementation in sched_trace.c
* this need probably to be changed to something always spinning in RT
at the expense of increased locking time.
----
This commit also fixes warnings and errors due to the need to include
slab.h when using kmalloc() and friends.
----
This commit does not compile.
Diffstat (limited to 'litmus')
-rw-r--r-- | litmus/ftdev.c | 1 | ||||
-rw-r--r-- | litmus/litmus.c | 10 | ||||
-rw-r--r-- | litmus/rt_domain.c | 18 | ||||
-rw-r--r-- | litmus/sched_cedf.c | 25 | ||||
-rw-r--r-- | litmus/sched_gsn_edf.c | 36 | ||||
-rw-r--r-- | litmus/sched_pfair.c | 25 | ||||
-rw-r--r-- | litmus/sched_plugin.c | 14 | ||||
-rw-r--r-- | litmus/sched_psn_edf.c | 24 |
8 files changed, 78 insertions, 75 deletions
diff --git a/litmus/ftdev.c b/litmus/ftdev.c index 8b2d74d816a2..51dafaebf8a6 100644 --- a/litmus/ftdev.c +++ b/litmus/ftdev.c | |||
@@ -1,5 +1,6 @@ | |||
1 | #include <linux/sched.h> | 1 | #include <linux/sched.h> |
2 | #include <linux/fs.h> | 2 | #include <linux/fs.h> |
3 | #include <linux/slab.h> | ||
3 | #include <linux/cdev.h> | 4 | #include <linux/cdev.h> |
4 | #include <asm/uaccess.h> | 5 | #include <asm/uaccess.h> |
5 | #include <linux/module.h> | 6 | #include <linux/module.h> |
diff --git a/litmus/litmus.c b/litmus/litmus.c index e43596a5104c..99714d06eed5 100644 --- a/litmus/litmus.c +++ b/litmus/litmus.c | |||
@@ -23,7 +23,7 @@ | |||
23 | 23 | ||
24 | /* Number of RT tasks that exist in the system */ | 24 | /* Number of RT tasks that exist in the system */ |
25 | atomic_t rt_task_count = ATOMIC_INIT(0); | 25 | atomic_t rt_task_count = ATOMIC_INIT(0); |
26 | static DEFINE_SPINLOCK(task_transition_lock); | 26 | static DEFINE_RAW_SPINLOCK(task_transition_lock); |
27 | /* synchronize plugin switching */ | 27 | /* synchronize plugin switching */ |
28 | atomic_t cannot_use_plugin = ATOMIC_INIT(0); | 28 | atomic_t cannot_use_plugin = ATOMIC_INIT(0); |
29 | 29 | ||
@@ -323,7 +323,7 @@ long litmus_admit_task(struct task_struct* tsk) | |||
323 | INIT_LIST_HEAD(&tsk_rt(tsk)->list); | 323 | INIT_LIST_HEAD(&tsk_rt(tsk)->list); |
324 | 324 | ||
325 | /* avoid scheduler plugin changing underneath us */ | 325 | /* avoid scheduler plugin changing underneath us */ |
326 | spin_lock_irqsave(&task_transition_lock, flags); | 326 | raw_spin_lock_irqsave(&task_transition_lock, flags); |
327 | 327 | ||
328 | /* allocate heap node for this task */ | 328 | /* allocate heap node for this task */ |
329 | tsk_rt(tsk)->heap_node = bheap_node_alloc(GFP_ATOMIC); | 329 | tsk_rt(tsk)->heap_node = bheap_node_alloc(GFP_ATOMIC); |
@@ -350,7 +350,7 @@ long litmus_admit_task(struct task_struct* tsk) | |||
350 | } | 350 | } |
351 | 351 | ||
352 | out_unlock: | 352 | out_unlock: |
353 | spin_unlock_irqrestore(&task_transition_lock, flags); | 353 | raw_spin_unlock_irqrestore(&task_transition_lock, flags); |
354 | out: | 354 | out: |
355 | return retval; | 355 | return retval; |
356 | } | 356 | } |
@@ -396,7 +396,7 @@ int switch_sched_plugin(struct sched_plugin* plugin) | |||
396 | smp_call_function(synch_on_plugin_switch, NULL, 0); | 396 | smp_call_function(synch_on_plugin_switch, NULL, 0); |
397 | 397 | ||
398 | /* stop task transitions */ | 398 | /* stop task transitions */ |
399 | spin_lock_irqsave(&task_transition_lock, flags); | 399 | raw_spin_lock_irqsave(&task_transition_lock, flags); |
400 | 400 | ||
401 | /* don't switch if there are active real-time tasks */ | 401 | /* don't switch if there are active real-time tasks */ |
402 | if (atomic_read(&rt_task_count) == 0) { | 402 | if (atomic_read(&rt_task_count) == 0) { |
@@ -414,7 +414,7 @@ int switch_sched_plugin(struct sched_plugin* plugin) | |||
414 | } else | 414 | } else |
415 | ret = -EBUSY; | 415 | ret = -EBUSY; |
416 | out: | 416 | out: |
417 | spin_unlock_irqrestore(&task_transition_lock, flags); | 417 | raw_spin_unlock_irqrestore(&task_transition_lock, flags); |
418 | atomic_set(&cannot_use_plugin, 0); | 418 | atomic_set(&cannot_use_plugin, 0); |
419 | return ret; | 419 | return ret; |
420 | } | 420 | } |
diff --git a/litmus/rt_domain.c b/litmus/rt_domain.c index 609ff0f82abb..8d5db6050723 100644 --- a/litmus/rt_domain.c +++ b/litmus/rt_domain.c | |||
@@ -53,11 +53,11 @@ static enum hrtimer_restart on_release_timer(struct hrtimer *timer) | |||
53 | 53 | ||
54 | rh = container_of(timer, struct release_heap, timer); | 54 | rh = container_of(timer, struct release_heap, timer); |
55 | 55 | ||
56 | spin_lock_irqsave(&rh->dom->release_lock, flags); | 56 | raw_spin_lock_irqsave(&rh->dom->release_lock, flags); |
57 | TRACE("CB has the release_lock 0x%p\n", &rh->dom->release_lock); | 57 | TRACE("CB has the release_lock 0x%p\n", &rh->dom->release_lock); |
58 | /* remove from release queue */ | 58 | /* remove from release queue */ |
59 | list_del(&rh->list); | 59 | list_del(&rh->list); |
60 | spin_unlock_irqrestore(&rh->dom->release_lock, flags); | 60 | raw_spin_unlock_irqrestore(&rh->dom->release_lock, flags); |
61 | TRACE("CB returned release_lock 0x%p\n", &rh->dom->release_lock); | 61 | TRACE("CB returned release_lock 0x%p\n", &rh->dom->release_lock); |
62 | 62 | ||
63 | /* call release callback */ | 63 | /* call release callback */ |
@@ -185,20 +185,20 @@ static void arm_release_timer(rt_domain_t *_rt) | |||
185 | list_del(pos); | 185 | list_del(pos); |
186 | 186 | ||
187 | /* put into release heap while holding release_lock */ | 187 | /* put into release heap while holding release_lock */ |
188 | spin_lock(&rt->release_lock); | 188 | raw_spin_lock(&rt->release_lock); |
189 | TRACE_TASK(t, "I have the release_lock 0x%p\n", &rt->release_lock); | 189 | TRACE_TASK(t, "I have the release_lock 0x%p\n", &rt->release_lock); |
190 | 190 | ||
191 | rh = get_release_heap(rt, t, 0); | 191 | rh = get_release_heap(rt, t, 0); |
192 | if (!rh) { | 192 | if (!rh) { |
193 | /* need to use our own, but drop lock first */ | 193 | /* need to use our own, but drop lock first */ |
194 | spin_unlock(&rt->release_lock); | 194 | raw_spin_unlock(&rt->release_lock); |
195 | TRACE_TASK(t, "Dropped release_lock 0x%p\n", | 195 | TRACE_TASK(t, "Dropped release_lock 0x%p\n", |
196 | &rt->release_lock); | 196 | &rt->release_lock); |
197 | 197 | ||
198 | reinit_release_heap(t); | 198 | reinit_release_heap(t); |
199 | TRACE_TASK(t, "release_heap ready\n"); | 199 | TRACE_TASK(t, "release_heap ready\n"); |
200 | 200 | ||
201 | spin_lock(&rt->release_lock); | 201 | raw_spin_lock(&rt->release_lock); |
202 | TRACE_TASK(t, "Re-acquired release_lock 0x%p\n", | 202 | TRACE_TASK(t, "Re-acquired release_lock 0x%p\n", |
203 | &rt->release_lock); | 203 | &rt->release_lock); |
204 | 204 | ||
@@ -207,7 +207,7 @@ static void arm_release_timer(rt_domain_t *_rt) | |||
207 | bheap_insert(rt->order, &rh->heap, tsk_rt(t)->heap_node); | 207 | bheap_insert(rt->order, &rh->heap, tsk_rt(t)->heap_node); |
208 | TRACE_TASK(t, "arm_release_timer(): added to release heap\n"); | 208 | TRACE_TASK(t, "arm_release_timer(): added to release heap\n"); |
209 | 209 | ||
210 | spin_unlock(&rt->release_lock); | 210 | raw_spin_unlock(&rt->release_lock); |
211 | TRACE_TASK(t, "Returned the release_lock 0x%p\n", &rt->release_lock); | 211 | TRACE_TASK(t, "Returned the release_lock 0x%p\n", &rt->release_lock); |
212 | 212 | ||
213 | /* To avoid arming the timer multiple times, we only let the | 213 | /* To avoid arming the timer multiple times, we only let the |
@@ -258,9 +258,9 @@ void rt_domain_init(rt_domain_t *rt, | |||
258 | for (i = 0; i < RELEASE_QUEUE_SLOTS; i++) | 258 | for (i = 0; i < RELEASE_QUEUE_SLOTS; i++) |
259 | INIT_LIST_HEAD(&rt->release_queue.slot[i]); | 259 | INIT_LIST_HEAD(&rt->release_queue.slot[i]); |
260 | 260 | ||
261 | spin_lock_init(&rt->ready_lock); | 261 | raw_spin_lock_init(&rt->ready_lock); |
262 | spin_lock_init(&rt->release_lock); | 262 | raw_spin_lock_init(&rt->release_lock); |
263 | spin_lock_init(&rt->tobe_lock); | 263 | raw_spin_lock_init(&rt->tobe_lock); |
264 | 264 | ||
265 | rt->check_resched = check; | 265 | rt->check_resched = check; |
266 | rt->release_jobs = release; | 266 | rt->release_jobs = release; |
diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c index da44b451c9ad..118fbd14fe25 100644 --- a/litmus/sched_cedf.c +++ b/litmus/sched_cedf.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/spinlock.h> | 24 | #include <linux/spinlock.h> |
25 | #include <linux/percpu.h> | 25 | #include <linux/percpu.h> |
26 | #include <linux/sched.h> | 26 | #include <linux/sched.h> |
27 | #include <linux/slab.h> | ||
27 | 28 | ||
28 | #include <litmus/litmus.h> | 29 | #include <litmus/litmus.h> |
29 | #include <litmus/jobs.h> | 30 | #include <litmus/jobs.h> |
@@ -281,12 +282,12 @@ static void cedf_release_jobs(rt_domain_t* rt, struct bheap* tasks) | |||
281 | cedf_domain_t* cluster = container_of(rt, cedf_domain_t, domain); | 282 | cedf_domain_t* cluster = container_of(rt, cedf_domain_t, domain); |
282 | unsigned long flags; | 283 | unsigned long flags; |
283 | 284 | ||
284 | spin_lock_irqsave(&cluster->lock, flags); | 285 | raw_spin_lock_irqsave(&cluster->lock, flags); |
285 | 286 | ||
286 | __merge_ready(&cluster->domain, tasks); | 287 | __merge_ready(&cluster->domain, tasks); |
287 | check_for_preemptions(cluster); | 288 | check_for_preemptions(cluster); |
288 | 289 | ||
289 | spin_unlock_irqrestore(&cluster->lock, flags); | 290 | raw_spin_unlock_irqrestore(&cluster->lock, flags); |
290 | } | 291 | } |
291 | 292 | ||
292 | /* caller holds cedf_lock */ | 293 | /* caller holds cedf_lock */ |
@@ -367,7 +368,7 @@ static struct task_struct* cedf_schedule(struct task_struct * prev) | |||
367 | int out_of_time, sleep, preempt, np, exists, blocks; | 368 | int out_of_time, sleep, preempt, np, exists, blocks; |
368 | struct task_struct* next = NULL; | 369 | struct task_struct* next = NULL; |
369 | 370 | ||
370 | spin_lock(&cluster->lock); | 371 | raw_spin_lock(&cluster->lock); |
371 | clear_will_schedule(); | 372 | clear_will_schedule(); |
372 | 373 | ||
373 | /* sanity checking */ | 374 | /* sanity checking */ |
@@ -448,7 +449,7 @@ static struct task_struct* cedf_schedule(struct task_struct * prev) | |||
448 | if (exists) | 449 | if (exists) |
449 | next = prev; | 450 | next = prev; |
450 | 451 | ||
451 | spin_unlock(&cluster->lock); | 452 | raw_spin_unlock(&cluster->lock); |
452 | 453 | ||
453 | #ifdef WANT_ALL_SCHED_EVENTS | 454 | #ifdef WANT_ALL_SCHED_EVENTS |
454 | TRACE("cedf_lock released, next=0x%p\n", next); | 455 | TRACE("cedf_lock released, next=0x%p\n", next); |
@@ -490,7 +491,7 @@ static void cedf_task_new(struct task_struct * t, int on_rq, int running) | |||
490 | /* the cluster doesn't change even if t is running */ | 491 | /* the cluster doesn't change even if t is running */ |
491 | cluster = task_cpu_cluster(t); | 492 | cluster = task_cpu_cluster(t); |
492 | 493 | ||
493 | spin_lock_irqsave(&cluster->domain.ready_lock, flags); | 494 | raw_spin_lock_irqsave(&cluster->domain.ready_lock, flags); |
494 | 495 | ||
495 | /* setup job params */ | 496 | /* setup job params */ |
496 | release_at(t, litmus_clock()); | 497 | release_at(t, litmus_clock()); |
@@ -507,7 +508,7 @@ static void cedf_task_new(struct task_struct * t, int on_rq, int running) | |||
507 | t->rt_param.linked_on = NO_CPU; | 508 | t->rt_param.linked_on = NO_CPU; |
508 | 509 | ||
509 | cedf_job_arrival(t); | 510 | cedf_job_arrival(t); |
510 | spin_unlock_irqrestore(&(cluster->domain.ready_lock), flags); | 511 | raw_spin_unlock_irqrestore(&(cluster->domain.ready_lock), flags); |
511 | } | 512 | } |
512 | 513 | ||
513 | static void cedf_task_wake_up(struct task_struct *task) | 514 | static void cedf_task_wake_up(struct task_struct *task) |
@@ -520,7 +521,7 @@ static void cedf_task_wake_up(struct task_struct *task) | |||
520 | 521 | ||
521 | cluster = task_cpu_cluster(task); | 522 | cluster = task_cpu_cluster(task); |
522 | 523 | ||
523 | spin_lock_irqsave(&cluster->lock, flags); | 524 | raw_spin_lock_irqsave(&cluster->lock, flags); |
524 | /* We need to take suspensions because of semaphores into | 525 | /* We need to take suspensions because of semaphores into |
525 | * account! If a job resumes after being suspended due to acquiring | 526 | * account! If a job resumes after being suspended due to acquiring |
526 | * a semaphore, it should never be treated as a new job release. | 527 | * a semaphore, it should never be treated as a new job release. |
@@ -543,7 +544,7 @@ static void cedf_task_wake_up(struct task_struct *task) | |||
543 | } | 544 | } |
544 | } | 545 | } |
545 | cedf_job_arrival(task); | 546 | cedf_job_arrival(task); |
546 | spin_unlock_irqrestore(&cluster->lock, flags); | 547 | raw_spin_unlock_irqrestore(&cluster->lock, flags); |
547 | } | 548 | } |
548 | 549 | ||
549 | static void cedf_task_block(struct task_struct *t) | 550 | static void cedf_task_block(struct task_struct *t) |
@@ -556,9 +557,9 @@ static void cedf_task_block(struct task_struct *t) | |||
556 | cluster = task_cpu_cluster(t); | 557 | cluster = task_cpu_cluster(t); |
557 | 558 | ||
558 | /* unlink if necessary */ | 559 | /* unlink if necessary */ |
559 | spin_lock_irqsave(&cluster->lock, flags); | 560 | raw_spin_lock_irqsave(&cluster->lock, flags); |
560 | unlink(t); | 561 | unlink(t); |
561 | spin_unlock_irqrestore(&cluster->lock, flags); | 562 | raw_spin_unlock_irqrestore(&cluster->lock, flags); |
562 | 563 | ||
563 | BUG_ON(!is_realtime(t)); | 564 | BUG_ON(!is_realtime(t)); |
564 | } | 565 | } |
@@ -570,13 +571,13 @@ static void cedf_task_exit(struct task_struct * t) | |||
570 | cedf_domain_t *cluster = task_cpu_cluster(t); | 571 | cedf_domain_t *cluster = task_cpu_cluster(t); |
571 | 572 | ||
572 | /* unlink if necessary */ | 573 | /* unlink if necessary */ |
573 | spin_lock_irqsave(&cluster->lock, flags); | 574 | raw_spin_lock_irqsave(&cluster->lock, flags); |
574 | unlink(t); | 575 | unlink(t); |
575 | if (tsk_rt(t)->scheduled_on != NO_CPU) { | 576 | if (tsk_rt(t)->scheduled_on != NO_CPU) { |
576 | cluster->cpus[tsk_rt(t)->scheduled_on]->scheduled = NULL; | 577 | cluster->cpus[tsk_rt(t)->scheduled_on]->scheduled = NULL; |
577 | tsk_rt(t)->scheduled_on = NO_CPU; | 578 | tsk_rt(t)->scheduled_on = NO_CPU; |
578 | } | 579 | } |
579 | spin_unlock_irqrestore(&cluster->lock, flags); | 580 | raw_spin_unlock_irqrestore(&cluster->lock, flags); |
580 | 581 | ||
581 | BUG_ON(!is_realtime(t)); | 582 | BUG_ON(!is_realtime(t)); |
582 | TRACE_TASK(t, "RIP\n"); | 583 | TRACE_TASK(t, "RIP\n"); |
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c index b9310dd6f75c..7424c183d8b2 100644 --- a/litmus/sched_gsn_edf.c +++ b/litmus/sched_gsn_edf.c | |||
@@ -297,12 +297,12 @@ static void gsnedf_release_jobs(rt_domain_t* rt, struct bheap* tasks) | |||
297 | { | 297 | { |
298 | unsigned long flags; | 298 | unsigned long flags; |
299 | 299 | ||
300 | spin_lock_irqsave(&gsnedf_lock, flags); | 300 | raw_spin_lock_irqsave(&gsnedf_lock, flags); |
301 | 301 | ||
302 | __merge_ready(rt, tasks); | 302 | __merge_ready(rt, tasks); |
303 | check_for_preemptions(); | 303 | check_for_preemptions(); |
304 | 304 | ||
305 | spin_unlock_irqrestore(&gsnedf_lock, flags); | 305 | raw_spin_unlock_irqrestore(&gsnedf_lock, flags); |
306 | } | 306 | } |
307 | 307 | ||
308 | /* caller holds gsnedf_lock */ | 308 | /* caller holds gsnedf_lock */ |
@@ -388,7 +388,7 @@ static struct task_struct* gsnedf_schedule(struct task_struct * prev) | |||
388 | if (gsnedf.release_master == entry->cpu) | 388 | if (gsnedf.release_master == entry->cpu) |
389 | return NULL; | 389 | return NULL; |
390 | 390 | ||
391 | spin_lock(&gsnedf_lock); | 391 | raw_spin_lock(&gsnedf_lock); |
392 | clear_will_schedule(); | 392 | clear_will_schedule(); |
393 | 393 | ||
394 | /* sanity checking */ | 394 | /* sanity checking */ |
@@ -469,7 +469,7 @@ static struct task_struct* gsnedf_schedule(struct task_struct * prev) | |||
469 | if (exists) | 469 | if (exists) |
470 | next = prev; | 470 | next = prev; |
471 | 471 | ||
472 | spin_unlock(&gsnedf_lock); | 472 | raw_spin_unlock(&gsnedf_lock); |
473 | 473 | ||
474 | #ifdef WANT_ALL_SCHED_EVENTS | 474 | #ifdef WANT_ALL_SCHED_EVENTS |
475 | TRACE("gsnedf_lock released, next=0x%p\n", next); | 475 | TRACE("gsnedf_lock released, next=0x%p\n", next); |
@@ -507,7 +507,7 @@ static void gsnedf_task_new(struct task_struct * t, int on_rq, int running) | |||
507 | 507 | ||
508 | TRACE("gsn edf: task new %d\n", t->pid); | 508 | TRACE("gsn edf: task new %d\n", t->pid); |
509 | 509 | ||
510 | spin_lock_irqsave(&gsnedf_lock, flags); | 510 | raw_spin_lock_irqsave(&gsnedf_lock, flags); |
511 | 511 | ||
512 | /* setup job params */ | 512 | /* setup job params */ |
513 | release_at(t, litmus_clock()); | 513 | release_at(t, litmus_clock()); |
@@ -530,7 +530,7 @@ static void gsnedf_task_new(struct task_struct * t, int on_rq, int running) | |||
530 | t->rt_param.linked_on = NO_CPU; | 530 | t->rt_param.linked_on = NO_CPU; |
531 | 531 | ||
532 | gsnedf_job_arrival(t); | 532 | gsnedf_job_arrival(t); |
533 | spin_unlock_irqrestore(&gsnedf_lock, flags); | 533 | raw_spin_unlock_irqrestore(&gsnedf_lock, flags); |
534 | } | 534 | } |
535 | 535 | ||
536 | static void gsnedf_task_wake_up(struct task_struct *task) | 536 | static void gsnedf_task_wake_up(struct task_struct *task) |
@@ -540,7 +540,7 @@ static void gsnedf_task_wake_up(struct task_struct *task) | |||
540 | 540 | ||
541 | TRACE_TASK(task, "wake_up at %llu\n", litmus_clock()); | 541 | TRACE_TASK(task, "wake_up at %llu\n", litmus_clock()); |
542 | 542 | ||
543 | spin_lock_irqsave(&gsnedf_lock, flags); | 543 | raw_spin_lock_irqsave(&gsnedf_lock, flags); |
544 | /* We need to take suspensions because of semaphores into | 544 | /* We need to take suspensions because of semaphores into |
545 | * account! If a job resumes after being suspended due to acquiring | 545 | * account! If a job resumes after being suspended due to acquiring |
546 | * a semaphore, it should never be treated as a new job release. | 546 | * a semaphore, it should never be treated as a new job release. |
@@ -563,7 +563,7 @@ static void gsnedf_task_wake_up(struct task_struct *task) | |||
563 | } | 563 | } |
564 | } | 564 | } |
565 | gsnedf_job_arrival(task); | 565 | gsnedf_job_arrival(task); |
566 | spin_unlock_irqrestore(&gsnedf_lock, flags); | 566 | raw_spin_unlock_irqrestore(&gsnedf_lock, flags); |
567 | } | 567 | } |
568 | 568 | ||
569 | static void gsnedf_task_block(struct task_struct *t) | 569 | static void gsnedf_task_block(struct task_struct *t) |
@@ -573,9 +573,9 @@ static void gsnedf_task_block(struct task_struct *t) | |||
573 | TRACE_TASK(t, "block at %llu\n", litmus_clock()); | 573 | TRACE_TASK(t, "block at %llu\n", litmus_clock()); |
574 | 574 | ||
575 | /* unlink if necessary */ | 575 | /* unlink if necessary */ |
576 | spin_lock_irqsave(&gsnedf_lock, flags); | 576 | raw_spin_lock_irqsave(&gsnedf_lock, flags); |
577 | unlink(t); | 577 | unlink(t); |
578 | spin_unlock_irqrestore(&gsnedf_lock, flags); | 578 | raw_spin_unlock_irqrestore(&gsnedf_lock, flags); |
579 | 579 | ||
580 | BUG_ON(!is_realtime(t)); | 580 | BUG_ON(!is_realtime(t)); |
581 | } | 581 | } |
@@ -586,13 +586,13 @@ static void gsnedf_task_exit(struct task_struct * t) | |||
586 | unsigned long flags; | 586 | unsigned long flags; |
587 | 587 | ||
588 | /* unlink if necessary */ | 588 | /* unlink if necessary */ |
589 | spin_lock_irqsave(&gsnedf_lock, flags); | 589 | raw_spin_lock_irqsave(&gsnedf_lock, flags); |
590 | unlink(t); | 590 | unlink(t); |
591 | if (tsk_rt(t)->scheduled_on != NO_CPU) { | 591 | if (tsk_rt(t)->scheduled_on != NO_CPU) { |
592 | gsnedf_cpus[tsk_rt(t)->scheduled_on]->scheduled = NULL; | 592 | gsnedf_cpus[tsk_rt(t)->scheduled_on]->scheduled = NULL; |
593 | tsk_rt(t)->scheduled_on = NO_CPU; | 593 | tsk_rt(t)->scheduled_on = NO_CPU; |
594 | } | 594 | } |
595 | spin_unlock_irqrestore(&gsnedf_lock, flags); | 595 | raw_spin_unlock_irqrestore(&gsnedf_lock, flags); |
596 | 596 | ||
597 | BUG_ON(!is_realtime(t)); | 597 | BUG_ON(!is_realtime(t)); |
598 | TRACE_TASK(t, "RIP\n"); | 598 | TRACE_TASK(t, "RIP\n"); |
@@ -628,7 +628,7 @@ static void update_queue_position(struct task_struct *holder) | |||
628 | gsnedf_cpus[tsk_rt(holder)->linked_on]->hn); | 628 | gsnedf_cpus[tsk_rt(holder)->linked_on]->hn); |
629 | } else { | 629 | } else { |
630 | /* holder may be queued: first stop queue changes */ | 630 | /* holder may be queued: first stop queue changes */ |
631 | spin_lock(&gsnedf.release_lock); | 631 | raw_spin_lock(&gsnedf.release_lock); |
632 | if (is_queued(holder)) { | 632 | if (is_queued(holder)) { |
633 | TRACE_TASK(holder, "%s: is queued\n", | 633 | TRACE_TASK(holder, "%s: is queued\n", |
634 | __FUNCTION__); | 634 | __FUNCTION__); |
@@ -646,7 +646,7 @@ static void update_queue_position(struct task_struct *holder) | |||
646 | TRACE_TASK(holder, "%s: is NOT queued => Done.\n", | 646 | TRACE_TASK(holder, "%s: is NOT queued => Done.\n", |
647 | __FUNCTION__); | 647 | __FUNCTION__); |
648 | } | 648 | } |
649 | spin_unlock(&gsnedf.release_lock); | 649 | raw_spin_unlock(&gsnedf.release_lock); |
650 | 650 | ||
651 | /* If holder was enqueued in a release heap, then the following | 651 | /* If holder was enqueued in a release heap, then the following |
652 | * preemption check is pointless, but we can't easily detect | 652 | * preemption check is pointless, but we can't easily detect |
@@ -680,7 +680,7 @@ static long gsnedf_pi_block(struct pi_semaphore *sem, | |||
680 | if (edf_higher_prio(new_waiter, sem->hp.task)) { | 680 | if (edf_higher_prio(new_waiter, sem->hp.task)) { |
681 | TRACE_TASK(new_waiter, " boosts priority via %p\n", sem); | 681 | TRACE_TASK(new_waiter, " boosts priority via %p\n", sem); |
682 | /* called with IRQs disabled */ | 682 | /* called with IRQs disabled */ |
683 | spin_lock(&gsnedf_lock); | 683 | raw_spin_lock(&gsnedf_lock); |
684 | /* store new highest-priority task */ | 684 | /* store new highest-priority task */ |
685 | sem->hp.task = new_waiter; | 685 | sem->hp.task = new_waiter; |
686 | if (sem->holder) { | 686 | if (sem->holder) { |
@@ -692,7 +692,7 @@ static long gsnedf_pi_block(struct pi_semaphore *sem, | |||
692 | sem->holder->rt_param.inh_task = new_waiter; | 692 | sem->holder->rt_param.inh_task = new_waiter; |
693 | update_queue_position(sem->holder); | 693 | update_queue_position(sem->holder); |
694 | } | 694 | } |
695 | spin_unlock(&gsnedf_lock); | 695 | raw_spin_unlock(&gsnedf_lock); |
696 | } | 696 | } |
697 | 697 | ||
698 | return 0; | 698 | return 0; |
@@ -738,7 +738,7 @@ static long gsnedf_return_priority(struct pi_semaphore *sem) | |||
738 | 738 | ||
739 | if (t->rt_param.inh_task) { | 739 | if (t->rt_param.inh_task) { |
740 | /* interrupts already disabled by PI code */ | 740 | /* interrupts already disabled by PI code */ |
741 | spin_lock(&gsnedf_lock); | 741 | raw_spin_lock(&gsnedf_lock); |
742 | 742 | ||
743 | /* Reset inh_task to NULL. */ | 743 | /* Reset inh_task to NULL. */ |
744 | t->rt_param.inh_task = NULL; | 744 | t->rt_param.inh_task = NULL; |
@@ -746,7 +746,7 @@ static long gsnedf_return_priority(struct pi_semaphore *sem) | |||
746 | /* Check if rescheduling is necessary */ | 746 | /* Check if rescheduling is necessary */ |
747 | unlink(t); | 747 | unlink(t); |
748 | gsnedf_job_arrival(t); | 748 | gsnedf_job_arrival(t); |
749 | spin_unlock(&gsnedf_lock); | 749 | raw_spin_unlock(&gsnedf_lock); |
750 | } | 750 | } |
751 | 751 | ||
752 | return ret; | 752 | return ret; |
diff --git a/litmus/sched_pfair.c b/litmus/sched_pfair.c index 2ea39223e7f0..ea77d3295290 100644 --- a/litmus/sched_pfair.c +++ b/litmus/sched_pfair.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/percpu.h> | 12 | #include <linux/percpu.h> |
13 | #include <linux/sched.h> | 13 | #include <linux/sched.h> |
14 | #include <linux/list.h> | 14 | #include <linux/list.h> |
15 | #include <linux/slab.h> | ||
15 | 16 | ||
16 | #include <litmus/litmus.h> | 17 | #include <litmus/litmus.h> |
17 | #include <litmus/jobs.h> | 18 | #include <litmus/jobs.h> |
@@ -415,7 +416,7 @@ static void schedule_next_quantum(quanta_t time) | |||
415 | /* called with interrupts disabled */ | 416 | /* called with interrupts disabled */ |
416 | PTRACE("--- Q %lu at %llu PRE-SPIN\n", | 417 | PTRACE("--- Q %lu at %llu PRE-SPIN\n", |
417 | time, litmus_clock()); | 418 | time, litmus_clock()); |
418 | spin_lock(&pfair_lock); | 419 | raw_spin_lock(&pfair_lock); |
419 | PTRACE("<<< Q %lu at %llu\n", | 420 | PTRACE("<<< Q %lu at %llu\n", |
420 | time, litmus_clock()); | 421 | time, litmus_clock()); |
421 | 422 | ||
@@ -448,7 +449,7 @@ static void schedule_next_quantum(quanta_t time) | |||
448 | } | 449 | } |
449 | PTRACE(">>> Q %lu at %llu\n", | 450 | PTRACE(">>> Q %lu at %llu\n", |
450 | time, litmus_clock()); | 451 | time, litmus_clock()); |
451 | spin_unlock(&pfair_lock); | 452 | raw_spin_unlock(&pfair_lock); |
452 | } | 453 | } |
453 | 454 | ||
454 | static noinline void wait_for_quantum(quanta_t q, struct pfair_state* state) | 455 | static noinline void wait_for_quantum(quanta_t q, struct pfair_state* state) |
@@ -564,7 +565,7 @@ static struct task_struct* pfair_schedule(struct task_struct * prev) | |||
564 | int blocks; | 565 | int blocks; |
565 | struct task_struct* next = NULL; | 566 | struct task_struct* next = NULL; |
566 | 567 | ||
567 | spin_lock(&pfair_lock); | 568 | raw_spin_lock(&pfair_lock); |
568 | 569 | ||
569 | blocks = is_realtime(prev) && !is_running(prev); | 570 | blocks = is_realtime(prev) && !is_running(prev); |
570 | 571 | ||
@@ -577,7 +578,7 @@ static struct task_struct* pfair_schedule(struct task_struct * prev) | |||
577 | tsk_rt(next)->scheduled_on = state->cpu; | 578 | tsk_rt(next)->scheduled_on = state->cpu; |
578 | } | 579 | } |
579 | 580 | ||
580 | spin_unlock(&pfair_lock); | 581 | raw_spin_unlock(&pfair_lock); |
581 | 582 | ||
582 | if (next) | 583 | if (next) |
583 | TRACE_TASK(next, "scheduled rel=%lu at %lu (%llu)\n", | 584 | TRACE_TASK(next, "scheduled rel=%lu at %lu (%llu)\n", |
@@ -594,7 +595,7 @@ static void pfair_task_new(struct task_struct * t, int on_rq, int running) | |||
594 | 595 | ||
595 | TRACE("pfair: task new %d state:%d\n", t->pid, t->state); | 596 | TRACE("pfair: task new %d state:%d\n", t->pid, t->state); |
596 | 597 | ||
597 | spin_lock_irqsave(&pfair_lock, flags); | 598 | raw_spin_lock_irqsave(&pfair_lock, flags); |
598 | if (running) | 599 | if (running) |
599 | t->rt_param.scheduled_on = task_cpu(t); | 600 | t->rt_param.scheduled_on = task_cpu(t); |
600 | else | 601 | else |
@@ -605,7 +606,7 @@ static void pfair_task_new(struct task_struct * t, int on_rq, int running) | |||
605 | pfair_add_release(t); | 606 | pfair_add_release(t); |
606 | check_preempt(t); | 607 | check_preempt(t); |
607 | 608 | ||
608 | spin_unlock_irqrestore(&pfair_lock, flags); | 609 | raw_spin_unlock_irqrestore(&pfair_lock, flags); |
609 | } | 610 | } |
610 | 611 | ||
611 | static void pfair_task_wake_up(struct task_struct *t) | 612 | static void pfair_task_wake_up(struct task_struct *t) |
@@ -616,7 +617,7 @@ static void pfair_task_wake_up(struct task_struct *t) | |||
616 | TRACE_TASK(t, "wakes at %llu, release=%lu, pfair_time:%lu\n", | 617 | TRACE_TASK(t, "wakes at %llu, release=%lu, pfair_time:%lu\n", |
617 | litmus_clock(), cur_release(t), pfair_time); | 618 | litmus_clock(), cur_release(t), pfair_time); |
618 | 619 | ||
619 | spin_lock_irqsave(&pfair_lock, flags); | 620 | raw_spin_lock_irqsave(&pfair_lock, flags); |
620 | 621 | ||
621 | /* It is a little unclear how to deal with Pfair | 622 | /* It is a little unclear how to deal with Pfair |
622 | * tasks that block for a while and then wake. For now, | 623 | * tasks that block for a while and then wake. For now, |
@@ -637,7 +638,7 @@ static void pfair_task_wake_up(struct task_struct *t) | |||
637 | 638 | ||
638 | check_preempt(t); | 639 | check_preempt(t); |
639 | 640 | ||
640 | spin_unlock_irqrestore(&pfair_lock, flags); | 641 | raw_spin_unlock_irqrestore(&pfair_lock, flags); |
641 | TRACE_TASK(t, "wake up done at %llu\n", litmus_clock()); | 642 | TRACE_TASK(t, "wake up done at %llu\n", litmus_clock()); |
642 | } | 643 | } |
643 | 644 | ||
@@ -661,12 +662,12 @@ static void pfair_task_exit(struct task_struct * t) | |||
661 | * might not be the same as the CPU that the PFAIR scheduler | 662 | * might not be the same as the CPU that the PFAIR scheduler |
662 | * has chosen for it. | 663 | * has chosen for it. |
663 | */ | 664 | */ |
664 | spin_lock_irqsave(&pfair_lock, flags); | 665 | raw_spin_lock_irqsave(&pfair_lock, flags); |
665 | 666 | ||
666 | TRACE_TASK(t, "RIP, state:%d\n", t->state); | 667 | TRACE_TASK(t, "RIP, state:%d\n", t->state); |
667 | drop_all_references(t); | 668 | drop_all_references(t); |
668 | 669 | ||
669 | spin_unlock_irqrestore(&pfair_lock, flags); | 670 | raw_spin_unlock_irqrestore(&pfair_lock, flags); |
670 | 671 | ||
671 | kfree(t->rt_param.pfair); | 672 | kfree(t->rt_param.pfair); |
672 | t->rt_param.pfair = NULL; | 673 | t->rt_param.pfair = NULL; |
@@ -680,7 +681,7 @@ static void pfair_release_at(struct task_struct* task, lt_t start) | |||
680 | 681 | ||
681 | BUG_ON(!is_realtime(task)); | 682 | BUG_ON(!is_realtime(task)); |
682 | 683 | ||
683 | spin_lock_irqsave(&pfair_lock, flags); | 684 | raw_spin_lock_irqsave(&pfair_lock, flags); |
684 | release_at(task, start); | 685 | release_at(task, start); |
685 | release = time2quanta(start, CEIL); | 686 | release = time2quanta(start, CEIL); |
686 | 687 | ||
@@ -698,7 +699,7 @@ static void pfair_release_at(struct task_struct* task, lt_t start) | |||
698 | */ | 699 | */ |
699 | tsk_pfair(task)->sporadic_release = 0; | 700 | tsk_pfair(task)->sporadic_release = 0; |
700 | 701 | ||
701 | spin_unlock_irqrestore(&pfair_lock, flags); | 702 | raw_spin_unlock_irqrestore(&pfair_lock, flags); |
702 | } | 703 | } |
703 | 704 | ||
704 | static void init_subtask(struct subtask* sub, unsigned long i, | 705 | static void init_subtask(struct subtask* sub, unsigned long i, |
diff --git a/litmus/sched_plugin.c b/litmus/sched_plugin.c index 3767b30e610a..3543b7baff53 100644 --- a/litmus/sched_plugin.c +++ b/litmus/sched_plugin.c | |||
@@ -187,7 +187,7 @@ struct sched_plugin *litmus = &linux_sched_plugin; | |||
187 | 187 | ||
188 | /* the list of registered scheduling plugins */ | 188 | /* the list of registered scheduling plugins */ |
189 | static LIST_HEAD(sched_plugins); | 189 | static LIST_HEAD(sched_plugins); |
190 | static DEFINE_SPINLOCK(sched_plugins_lock); | 190 | static DEFINE_RAW_SPINLOCK(sched_plugins_lock); |
191 | 191 | ||
192 | #define CHECK(func) {\ | 192 | #define CHECK(func) {\ |
193 | if (!plugin->func) \ | 193 | if (!plugin->func) \ |
@@ -220,9 +220,9 @@ int register_sched_plugin(struct sched_plugin* plugin) | |||
220 | if (!plugin->release_at) | 220 | if (!plugin->release_at) |
221 | plugin->release_at = release_at; | 221 | plugin->release_at = release_at; |
222 | 222 | ||
223 | spin_lock(&sched_plugins_lock); | 223 | raw_spin_lock(&sched_plugins_lock); |
224 | list_add(&plugin->list, &sched_plugins); | 224 | list_add(&plugin->list, &sched_plugins); |
225 | spin_unlock(&sched_plugins_lock); | 225 | raw_spin_unlock(&sched_plugins_lock); |
226 | 226 | ||
227 | return 0; | 227 | return 0; |
228 | } | 228 | } |
@@ -234,7 +234,7 @@ struct sched_plugin* find_sched_plugin(const char* name) | |||
234 | struct list_head *pos; | 234 | struct list_head *pos; |
235 | struct sched_plugin *plugin; | 235 | struct sched_plugin *plugin; |
236 | 236 | ||
237 | spin_lock(&sched_plugins_lock); | 237 | raw_spin_lock(&sched_plugins_lock); |
238 | list_for_each(pos, &sched_plugins) { | 238 | list_for_each(pos, &sched_plugins) { |
239 | plugin = list_entry(pos, struct sched_plugin, list); | 239 | plugin = list_entry(pos, struct sched_plugin, list); |
240 | if (!strcmp(plugin->plugin_name, name)) | 240 | if (!strcmp(plugin->plugin_name, name)) |
@@ -243,7 +243,7 @@ struct sched_plugin* find_sched_plugin(const char* name) | |||
243 | plugin = NULL; | 243 | plugin = NULL; |
244 | 244 | ||
245 | out_unlock: | 245 | out_unlock: |
246 | spin_unlock(&sched_plugins_lock); | 246 | raw_spin_unlock(&sched_plugins_lock); |
247 | return plugin; | 247 | return plugin; |
248 | } | 248 | } |
249 | 249 | ||
@@ -253,13 +253,13 @@ int print_sched_plugins(char* buf, int max) | |||
253 | struct list_head *pos; | 253 | struct list_head *pos; |
254 | struct sched_plugin *plugin; | 254 | struct sched_plugin *plugin; |
255 | 255 | ||
256 | spin_lock(&sched_plugins_lock); | 256 | raw_spin_lock(&sched_plugins_lock); |
257 | list_for_each(pos, &sched_plugins) { | 257 | list_for_each(pos, &sched_plugins) { |
258 | plugin = list_entry(pos, struct sched_plugin, list); | 258 | plugin = list_entry(pos, struct sched_plugin, list); |
259 | count += snprintf(buf + count, max - count, "%s\n", plugin->plugin_name); | 259 | count += snprintf(buf + count, max - count, "%s\n", plugin->plugin_name); |
260 | if (max - count <= 0) | 260 | if (max - count <= 0) |
261 | break; | 261 | break; |
262 | } | 262 | } |
263 | spin_unlock(&sched_plugins_lock); | 263 | raw_spin_unlock(&sched_plugins_lock); |
264 | return count; | 264 | return count; |
265 | } | 265 | } |
diff --git a/litmus/sched_psn_edf.c b/litmus/sched_psn_edf.c index 7f71ecfaaaae..7a548bf5162e 100644 --- a/litmus/sched_psn_edf.c +++ b/litmus/sched_psn_edf.c | |||
@@ -131,7 +131,7 @@ static struct task_struct* psnedf_schedule(struct task_struct * prev) | |||
131 | int out_of_time, sleep, preempt, | 131 | int out_of_time, sleep, preempt, |
132 | np, exists, blocks, resched; | 132 | np, exists, blocks, resched; |
133 | 133 | ||
134 | spin_lock(&pedf->slock); | 134 | raw_spin_lock(&pedf->slock); |
135 | 135 | ||
136 | /* sanity checking | 136 | /* sanity checking |
137 | * differently from gedf, when a task exits (dead) | 137 | * differently from gedf, when a task exits (dead) |
@@ -201,7 +201,7 @@ static struct task_struct* psnedf_schedule(struct task_struct * prev) | |||
201 | } | 201 | } |
202 | 202 | ||
203 | pedf->scheduled = next; | 203 | pedf->scheduled = next; |
204 | spin_unlock(&pedf->slock); | 204 | raw_spin_unlock(&pedf->slock); |
205 | 205 | ||
206 | return next; | 206 | return next; |
207 | } | 207 | } |
@@ -224,7 +224,7 @@ static void psnedf_task_new(struct task_struct * t, int on_rq, int running) | |||
224 | /* The task should be running in the queue, otherwise signal | 224 | /* The task should be running in the queue, otherwise signal |
225 | * code will try to wake it up with fatal consequences. | 225 | * code will try to wake it up with fatal consequences. |
226 | */ | 226 | */ |
227 | spin_lock_irqsave(&pedf->slock, flags); | 227 | raw_spin_lock_irqsave(&pedf->slock, flags); |
228 | if (running) { | 228 | if (running) { |
229 | /* there shouldn't be anything else running at the time */ | 229 | /* there shouldn't be anything else running at the time */ |
230 | BUG_ON(pedf->scheduled); | 230 | BUG_ON(pedf->scheduled); |
@@ -234,7 +234,7 @@ static void psnedf_task_new(struct task_struct * t, int on_rq, int running) | |||
234 | /* maybe we have to reschedule */ | 234 | /* maybe we have to reschedule */ |
235 | preempt(pedf); | 235 | preempt(pedf); |
236 | } | 236 | } |
237 | spin_unlock_irqrestore(&pedf->slock, flags); | 237 | raw_spin_unlock_irqrestore(&pedf->slock, flags); |
238 | } | 238 | } |
239 | 239 | ||
240 | static void psnedf_task_wake_up(struct task_struct *task) | 240 | static void psnedf_task_wake_up(struct task_struct *task) |
@@ -245,7 +245,7 @@ static void psnedf_task_wake_up(struct task_struct *task) | |||
245 | lt_t now; | 245 | lt_t now; |
246 | 246 | ||
247 | TRACE_TASK(task, "wake_up at %llu\n", litmus_clock()); | 247 | TRACE_TASK(task, "wake_up at %llu\n", litmus_clock()); |
248 | spin_lock_irqsave(&pedf->slock, flags); | 248 | raw_spin_lock_irqsave(&pedf->slock, flags); |
249 | BUG_ON(is_queued(task)); | 249 | BUG_ON(is_queued(task)); |
250 | /* We need to take suspensions because of semaphores into | 250 | /* We need to take suspensions because of semaphores into |
251 | * account! If a job resumes after being suspended due to acquiring | 251 | * account! If a job resumes after being suspended due to acquiring |
@@ -270,7 +270,7 @@ static void psnedf_task_wake_up(struct task_struct *task) | |||
270 | if (pedf->scheduled != task) | 270 | if (pedf->scheduled != task) |
271 | requeue(task, edf); | 271 | requeue(task, edf); |
272 | 272 | ||
273 | spin_unlock_irqrestore(&pedf->slock, flags); | 273 | raw_spin_unlock_irqrestore(&pedf->slock, flags); |
274 | TRACE_TASK(task, "wake up done\n"); | 274 | TRACE_TASK(task, "wake up done\n"); |
275 | } | 275 | } |
276 | 276 | ||
@@ -289,7 +289,7 @@ static void psnedf_task_exit(struct task_struct * t) | |||
289 | psnedf_domain_t* pedf = task_pedf(t); | 289 | psnedf_domain_t* pedf = task_pedf(t); |
290 | rt_domain_t* edf; | 290 | rt_domain_t* edf; |
291 | 291 | ||
292 | spin_lock_irqsave(&pedf->slock, flags); | 292 | raw_spin_lock_irqsave(&pedf->slock, flags); |
293 | if (is_queued(t)) { | 293 | if (is_queued(t)) { |
294 | /* dequeue */ | 294 | /* dequeue */ |
295 | edf = task_edf(t); | 295 | edf = task_edf(t); |
@@ -301,7 +301,7 @@ static void psnedf_task_exit(struct task_struct * t) | |||
301 | TRACE_TASK(t, "RIP, now reschedule\n"); | 301 | TRACE_TASK(t, "RIP, now reschedule\n"); |
302 | 302 | ||
303 | preempt(pedf); | 303 | preempt(pedf); |
304 | spin_unlock_irqrestore(&pedf->slock, flags); | 304 | raw_spin_unlock_irqrestore(&pedf->slock, flags); |
305 | } | 305 | } |
306 | 306 | ||
307 | #ifdef CONFIG_FMLP | 307 | #ifdef CONFIG_FMLP |
@@ -321,7 +321,7 @@ static long psnedf_pi_block(struct pi_semaphore *sem, | |||
321 | edf = task_edf(new_waiter); | 321 | edf = task_edf(new_waiter); |
322 | 322 | ||
323 | /* interrupts already disabled */ | 323 | /* interrupts already disabled */ |
324 | spin_lock(&pedf->slock); | 324 | raw_spin_lock(&pedf->slock); |
325 | 325 | ||
326 | /* store new highest-priority task */ | 326 | /* store new highest-priority task */ |
327 | sem->hp.cpu_task[cpu] = new_waiter; | 327 | sem->hp.cpu_task[cpu] = new_waiter; |
@@ -346,7 +346,7 @@ static long psnedf_pi_block(struct pi_semaphore *sem, | |||
346 | if (edf_preemption_needed(edf, current)) | 346 | if (edf_preemption_needed(edf, current)) |
347 | preempt(pedf); | 347 | preempt(pedf); |
348 | 348 | ||
349 | spin_unlock(&pedf->slock); | 349 | raw_spin_unlock(&pedf->slock); |
350 | } | 350 | } |
351 | 351 | ||
352 | return 0; | 352 | return 0; |
@@ -413,7 +413,7 @@ static long psnedf_return_priority(struct pi_semaphore *sem) | |||
413 | /* Always check for delayed preemptions that might have become | 413 | /* Always check for delayed preemptions that might have become |
414 | * necessary due to non-preemptive execution. | 414 | * necessary due to non-preemptive execution. |
415 | */ | 415 | */ |
416 | spin_lock(&pedf->slock); | 416 | raw_spin_lock(&pedf->slock); |
417 | 417 | ||
418 | /* Reset inh_task to NULL. */ | 418 | /* Reset inh_task to NULL. */ |
419 | current->rt_param.inh_task = NULL; | 419 | current->rt_param.inh_task = NULL; |
@@ -422,7 +422,7 @@ static long psnedf_return_priority(struct pi_semaphore *sem) | |||
422 | if (edf_preemption_needed(edf, current)) | 422 | if (edf_preemption_needed(edf, current)) |
423 | preempt(pedf); | 423 | preempt(pedf); |
424 | 424 | ||
425 | spin_unlock(&pedf->slock); | 425 | raw_spin_unlock(&pedf->slock); |
426 | 426 | ||
427 | 427 | ||
428 | return ret; | 428 | return ret; |