diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2009-11-17 08:54:03 -0500 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2009-12-14 17:55:33 -0500 |
commit | 1d615482547584b9a8bb6316a58fed6ce90dd9ff (patch) | |
tree | 21dae4b70acb3ce0bdaeeaee1dbd970be41d26e2 /kernel | |
parent | fe841226bd954fba4fd79f037a876053fe9c3217 (diff) |
sched: Convert pi_lock to raw_spinlock
Convert locks which cannot be sleeping locks in preempt-rt to
raw_spinlocks.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Acked-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/exit.c | 2 | ||||
-rw-r--r-- | kernel/fork.c | 4 | ||||
-rw-r--r-- | kernel/futex.c | 38 | ||||
-rw-r--r-- | kernel/rtmutex-debug.c | 4 | ||||
-rw-r--r-- | kernel/rtmutex.c | 58 | ||||
-rw-r--r-- | kernel/sched.c | 12 |
6 files changed, 59 insertions, 59 deletions
diff --git a/kernel/exit.c b/kernel/exit.c index 6f50ef55a6f3..5962d7ccf243 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
@@ -933,7 +933,7 @@ NORET_TYPE void do_exit(long code) | |||
933 | * an exiting task cleaning up the robust pi futexes. | 933 | * an exiting task cleaning up the robust pi futexes. |
934 | */ | 934 | */ |
935 | smp_mb(); | 935 | smp_mb(); |
936 | spin_unlock_wait(&tsk->pi_lock); | 936 | raw_spin_unlock_wait(&tsk->pi_lock); |
937 | 937 | ||
938 | if (unlikely(in_atomic())) | 938 | if (unlikely(in_atomic())) |
939 | printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n", | 939 | printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n", |
diff --git a/kernel/fork.c b/kernel/fork.c index 1415dc4598ae..9bd91447e052 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -939,9 +939,9 @@ SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr) | |||
939 | 939 | ||
940 | static void rt_mutex_init_task(struct task_struct *p) | 940 | static void rt_mutex_init_task(struct task_struct *p) |
941 | { | 941 | { |
942 | spin_lock_init(&p->pi_lock); | 942 | raw_spin_lock_init(&p->pi_lock); |
943 | #ifdef CONFIG_RT_MUTEXES | 943 | #ifdef CONFIG_RT_MUTEXES |
944 | plist_head_init(&p->pi_waiters, &p->pi_lock); | 944 | plist_head_init_raw(&p->pi_waiters, &p->pi_lock); |
945 | p->pi_blocked_on = NULL; | 945 | p->pi_blocked_on = NULL; |
946 | #endif | 946 | #endif |
947 | } | 947 | } |
diff --git a/kernel/futex.c b/kernel/futex.c index 6af474df17bb..320b369d20b5 100644 --- a/kernel/futex.c +++ b/kernel/futex.c | |||
@@ -403,9 +403,9 @@ static void free_pi_state(struct futex_pi_state *pi_state) | |||
403 | * and has cleaned up the pi_state already | 403 | * and has cleaned up the pi_state already |
404 | */ | 404 | */ |
405 | if (pi_state->owner) { | 405 | if (pi_state->owner) { |
406 | spin_lock_irq(&pi_state->owner->pi_lock); | 406 | raw_spin_lock_irq(&pi_state->owner->pi_lock); |
407 | list_del_init(&pi_state->list); | 407 | list_del_init(&pi_state->list); |
408 | spin_unlock_irq(&pi_state->owner->pi_lock); | 408 | raw_spin_unlock_irq(&pi_state->owner->pi_lock); |
409 | 409 | ||
410 | rt_mutex_proxy_unlock(&pi_state->pi_mutex, pi_state->owner); | 410 | rt_mutex_proxy_unlock(&pi_state->pi_mutex, pi_state->owner); |
411 | } | 411 | } |
@@ -470,18 +470,18 @@ void exit_pi_state_list(struct task_struct *curr) | |||
470 | * pi_state_list anymore, but we have to be careful | 470 | * pi_state_list anymore, but we have to be careful |
471 | * versus waiters unqueueing themselves: | 471 | * versus waiters unqueueing themselves: |
472 | */ | 472 | */ |
473 | spin_lock_irq(&curr->pi_lock); | 473 | raw_spin_lock_irq(&curr->pi_lock); |
474 | while (!list_empty(head)) { | 474 | while (!list_empty(head)) { |
475 | 475 | ||
476 | next = head->next; | 476 | next = head->next; |
477 | pi_state = list_entry(next, struct futex_pi_state, list); | 477 | pi_state = list_entry(next, struct futex_pi_state, list); |
478 | key = pi_state->key; | 478 | key = pi_state->key; |
479 | hb = hash_futex(&key); | 479 | hb = hash_futex(&key); |
480 | spin_unlock_irq(&curr->pi_lock); | 480 | raw_spin_unlock_irq(&curr->pi_lock); |
481 | 481 | ||
482 | spin_lock(&hb->lock); | 482 | spin_lock(&hb->lock); |
483 | 483 | ||
484 | spin_lock_irq(&curr->pi_lock); | 484 | raw_spin_lock_irq(&curr->pi_lock); |
485 | /* | 485 | /* |
486 | * We dropped the pi-lock, so re-check whether this | 486 | * We dropped the pi-lock, so re-check whether this |
487 | * task still owns the PI-state: | 487 | * task still owns the PI-state: |
@@ -495,15 +495,15 @@ void exit_pi_state_list(struct task_struct *curr) | |||
495 | WARN_ON(list_empty(&pi_state->list)); | 495 | WARN_ON(list_empty(&pi_state->list)); |
496 | list_del_init(&pi_state->list); | 496 | list_del_init(&pi_state->list); |
497 | pi_state->owner = NULL; | 497 | pi_state->owner = NULL; |
498 | spin_unlock_irq(&curr->pi_lock); | 498 | raw_spin_unlock_irq(&curr->pi_lock); |
499 | 499 | ||
500 | rt_mutex_unlock(&pi_state->pi_mutex); | 500 | rt_mutex_unlock(&pi_state->pi_mutex); |
501 | 501 | ||
502 | spin_unlock(&hb->lock); | 502 | spin_unlock(&hb->lock); |
503 | 503 | ||
504 | spin_lock_irq(&curr->pi_lock); | 504 | raw_spin_lock_irq(&curr->pi_lock); |
505 | } | 505 | } |
506 | spin_unlock_irq(&curr->pi_lock); | 506 | raw_spin_unlock_irq(&curr->pi_lock); |
507 | } | 507 | } |
508 | 508 | ||
509 | static int | 509 | static int |
@@ -558,7 +558,7 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, | |||
558 | * change of the task flags, we do this protected by | 558 | * change of the task flags, we do this protected by |
559 | * p->pi_lock: | 559 | * p->pi_lock: |
560 | */ | 560 | */ |
561 | spin_lock_irq(&p->pi_lock); | 561 | raw_spin_lock_irq(&p->pi_lock); |
562 | if (unlikely(p->flags & PF_EXITING)) { | 562 | if (unlikely(p->flags & PF_EXITING)) { |
563 | /* | 563 | /* |
564 | * The task is on the way out. When PF_EXITPIDONE is | 564 | * The task is on the way out. When PF_EXITPIDONE is |
@@ -567,7 +567,7 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, | |||
567 | */ | 567 | */ |
568 | int ret = (p->flags & PF_EXITPIDONE) ? -ESRCH : -EAGAIN; | 568 | int ret = (p->flags & PF_EXITPIDONE) ? -ESRCH : -EAGAIN; |
569 | 569 | ||
570 | spin_unlock_irq(&p->pi_lock); | 570 | raw_spin_unlock_irq(&p->pi_lock); |
571 | put_task_struct(p); | 571 | put_task_struct(p); |
572 | return ret; | 572 | return ret; |
573 | } | 573 | } |
@@ -586,7 +586,7 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, | |||
586 | WARN_ON(!list_empty(&pi_state->list)); | 586 | WARN_ON(!list_empty(&pi_state->list)); |
587 | list_add(&pi_state->list, &p->pi_state_list); | 587 | list_add(&pi_state->list, &p->pi_state_list); |
588 | pi_state->owner = p; | 588 | pi_state->owner = p; |
589 | spin_unlock_irq(&p->pi_lock); | 589 | raw_spin_unlock_irq(&p->pi_lock); |
590 | 590 | ||
591 | put_task_struct(p); | 591 | put_task_struct(p); |
592 | 592 | ||
@@ -794,16 +794,16 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this) | |||
794 | } | 794 | } |
795 | } | 795 | } |
796 | 796 | ||
797 | spin_lock_irq(&pi_state->owner->pi_lock); | 797 | raw_spin_lock_irq(&pi_state->owner->pi_lock); |
798 | WARN_ON(list_empty(&pi_state->list)); | 798 | WARN_ON(list_empty(&pi_state->list)); |
799 | list_del_init(&pi_state->list); | 799 | list_del_init(&pi_state->list); |
800 | spin_unlock_irq(&pi_state->owner->pi_lock); | 800 | raw_spin_unlock_irq(&pi_state->owner->pi_lock); |
801 | 801 | ||
802 | spin_lock_irq(&new_owner->pi_lock); | 802 | raw_spin_lock_irq(&new_owner->pi_lock); |
803 | WARN_ON(!list_empty(&pi_state->list)); | 803 | WARN_ON(!list_empty(&pi_state->list)); |
804 | list_add(&pi_state->list, &new_owner->pi_state_list); | 804 | list_add(&pi_state->list, &new_owner->pi_state_list); |
805 | pi_state->owner = new_owner; | 805 | pi_state->owner = new_owner; |
806 | spin_unlock_irq(&new_owner->pi_lock); | 806 | raw_spin_unlock_irq(&new_owner->pi_lock); |
807 | 807 | ||
808 | spin_unlock(&pi_state->pi_mutex.wait_lock); | 808 | spin_unlock(&pi_state->pi_mutex.wait_lock); |
809 | rt_mutex_unlock(&pi_state->pi_mutex); | 809 | rt_mutex_unlock(&pi_state->pi_mutex); |
@@ -1529,18 +1529,18 @@ retry: | |||
1529 | * itself. | 1529 | * itself. |
1530 | */ | 1530 | */ |
1531 | if (pi_state->owner != NULL) { | 1531 | if (pi_state->owner != NULL) { |
1532 | spin_lock_irq(&pi_state->owner->pi_lock); | 1532 | raw_spin_lock_irq(&pi_state->owner->pi_lock); |
1533 | WARN_ON(list_empty(&pi_state->list)); | 1533 | WARN_ON(list_empty(&pi_state->list)); |
1534 | list_del_init(&pi_state->list); | 1534 | list_del_init(&pi_state->list); |
1535 | spin_unlock_irq(&pi_state->owner->pi_lock); | 1535 | raw_spin_unlock_irq(&pi_state->owner->pi_lock); |
1536 | } | 1536 | } |
1537 | 1537 | ||
1538 | pi_state->owner = newowner; | 1538 | pi_state->owner = newowner; |
1539 | 1539 | ||
1540 | spin_lock_irq(&newowner->pi_lock); | 1540 | raw_spin_lock_irq(&newowner->pi_lock); |
1541 | WARN_ON(!list_empty(&pi_state->list)); | 1541 | WARN_ON(!list_empty(&pi_state->list)); |
1542 | list_add(&pi_state->list, &newowner->pi_state_list); | 1542 | list_add(&pi_state->list, &newowner->pi_state_list); |
1543 | spin_unlock_irq(&newowner->pi_lock); | 1543 | raw_spin_unlock_irq(&newowner->pi_lock); |
1544 | return 0; | 1544 | return 0; |
1545 | 1545 | ||
1546 | /* | 1546 | /* |
diff --git a/kernel/rtmutex-debug.c b/kernel/rtmutex-debug.c index 5fcb4fe645e2..ddabb54bb5c8 100644 --- a/kernel/rtmutex-debug.c +++ b/kernel/rtmutex-debug.c | |||
@@ -37,8 +37,8 @@ do { \ | |||
37 | if (rt_trace_on) { \ | 37 | if (rt_trace_on) { \ |
38 | rt_trace_on = 0; \ | 38 | rt_trace_on = 0; \ |
39 | console_verbose(); \ | 39 | console_verbose(); \ |
40 | if (spin_is_locked(¤t->pi_lock)) \ | 40 | if (raw_spin_is_locked(¤t->pi_lock)) \ |
41 | spin_unlock(¤t->pi_lock); \ | 41 | raw_spin_unlock(¤t->pi_lock); \ |
42 | } \ | 42 | } \ |
43 | } while (0) | 43 | } while (0) |
44 | 44 | ||
diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c index 29bd4baf9e75..d33da470f9da 100644 --- a/kernel/rtmutex.c +++ b/kernel/rtmutex.c | |||
@@ -138,9 +138,9 @@ static void rt_mutex_adjust_prio(struct task_struct *task) | |||
138 | { | 138 | { |
139 | unsigned long flags; | 139 | unsigned long flags; |
140 | 140 | ||
141 | spin_lock_irqsave(&task->pi_lock, flags); | 141 | raw_spin_lock_irqsave(&task->pi_lock, flags); |
142 | __rt_mutex_adjust_prio(task); | 142 | __rt_mutex_adjust_prio(task); |
143 | spin_unlock_irqrestore(&task->pi_lock, flags); | 143 | raw_spin_unlock_irqrestore(&task->pi_lock, flags); |
144 | } | 144 | } |
145 | 145 | ||
146 | /* | 146 | /* |
@@ -195,7 +195,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, | |||
195 | /* | 195 | /* |
196 | * Task can not go away as we did a get_task() before ! | 196 | * Task can not go away as we did a get_task() before ! |
197 | */ | 197 | */ |
198 | spin_lock_irqsave(&task->pi_lock, flags); | 198 | raw_spin_lock_irqsave(&task->pi_lock, flags); |
199 | 199 | ||
200 | waiter = task->pi_blocked_on; | 200 | waiter = task->pi_blocked_on; |
201 | /* | 201 | /* |
@@ -232,7 +232,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, | |||
232 | 232 | ||
233 | lock = waiter->lock; | 233 | lock = waiter->lock; |
234 | if (!spin_trylock(&lock->wait_lock)) { | 234 | if (!spin_trylock(&lock->wait_lock)) { |
235 | spin_unlock_irqrestore(&task->pi_lock, flags); | 235 | raw_spin_unlock_irqrestore(&task->pi_lock, flags); |
236 | cpu_relax(); | 236 | cpu_relax(); |
237 | goto retry; | 237 | goto retry; |
238 | } | 238 | } |
@@ -253,13 +253,13 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, | |||
253 | plist_add(&waiter->list_entry, &lock->wait_list); | 253 | plist_add(&waiter->list_entry, &lock->wait_list); |
254 | 254 | ||
255 | /* Release the task */ | 255 | /* Release the task */ |
256 | spin_unlock_irqrestore(&task->pi_lock, flags); | 256 | raw_spin_unlock_irqrestore(&task->pi_lock, flags); |
257 | put_task_struct(task); | 257 | put_task_struct(task); |
258 | 258 | ||
259 | /* Grab the next task */ | 259 | /* Grab the next task */ |
260 | task = rt_mutex_owner(lock); | 260 | task = rt_mutex_owner(lock); |
261 | get_task_struct(task); | 261 | get_task_struct(task); |
262 | spin_lock_irqsave(&task->pi_lock, flags); | 262 | raw_spin_lock_irqsave(&task->pi_lock, flags); |
263 | 263 | ||
264 | if (waiter == rt_mutex_top_waiter(lock)) { | 264 | if (waiter == rt_mutex_top_waiter(lock)) { |
265 | /* Boost the owner */ | 265 | /* Boost the owner */ |
@@ -277,7 +277,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, | |||
277 | __rt_mutex_adjust_prio(task); | 277 | __rt_mutex_adjust_prio(task); |
278 | } | 278 | } |
279 | 279 | ||
280 | spin_unlock_irqrestore(&task->pi_lock, flags); | 280 | raw_spin_unlock_irqrestore(&task->pi_lock, flags); |
281 | 281 | ||
282 | top_waiter = rt_mutex_top_waiter(lock); | 282 | top_waiter = rt_mutex_top_waiter(lock); |
283 | spin_unlock(&lock->wait_lock); | 283 | spin_unlock(&lock->wait_lock); |
@@ -288,7 +288,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, | |||
288 | goto again; | 288 | goto again; |
289 | 289 | ||
290 | out_unlock_pi: | 290 | out_unlock_pi: |
291 | spin_unlock_irqrestore(&task->pi_lock, flags); | 291 | raw_spin_unlock_irqrestore(&task->pi_lock, flags); |
292 | out_put_task: | 292 | out_put_task: |
293 | put_task_struct(task); | 293 | put_task_struct(task); |
294 | 294 | ||
@@ -313,9 +313,9 @@ static inline int try_to_steal_lock(struct rt_mutex *lock, | |||
313 | if (pendowner == task) | 313 | if (pendowner == task) |
314 | return 1; | 314 | return 1; |
315 | 315 | ||
316 | spin_lock_irqsave(&pendowner->pi_lock, flags); | 316 | raw_spin_lock_irqsave(&pendowner->pi_lock, flags); |
317 | if (task->prio >= pendowner->prio) { | 317 | if (task->prio >= pendowner->prio) { |
318 | spin_unlock_irqrestore(&pendowner->pi_lock, flags); | 318 | raw_spin_unlock_irqrestore(&pendowner->pi_lock, flags); |
319 | return 0; | 319 | return 0; |
320 | } | 320 | } |
321 | 321 | ||
@@ -325,7 +325,7 @@ static inline int try_to_steal_lock(struct rt_mutex *lock, | |||
325 | * priority. | 325 | * priority. |
326 | */ | 326 | */ |
327 | if (likely(!rt_mutex_has_waiters(lock))) { | 327 | if (likely(!rt_mutex_has_waiters(lock))) { |
328 | spin_unlock_irqrestore(&pendowner->pi_lock, flags); | 328 | raw_spin_unlock_irqrestore(&pendowner->pi_lock, flags); |
329 | return 1; | 329 | return 1; |
330 | } | 330 | } |
331 | 331 | ||
@@ -333,7 +333,7 @@ static inline int try_to_steal_lock(struct rt_mutex *lock, | |||
333 | next = rt_mutex_top_waiter(lock); | 333 | next = rt_mutex_top_waiter(lock); |
334 | plist_del(&next->pi_list_entry, &pendowner->pi_waiters); | 334 | plist_del(&next->pi_list_entry, &pendowner->pi_waiters); |
335 | __rt_mutex_adjust_prio(pendowner); | 335 | __rt_mutex_adjust_prio(pendowner); |
336 | spin_unlock_irqrestore(&pendowner->pi_lock, flags); | 336 | raw_spin_unlock_irqrestore(&pendowner->pi_lock, flags); |
337 | 337 | ||
338 | /* | 338 | /* |
339 | * We are going to steal the lock and a waiter was | 339 | * We are going to steal the lock and a waiter was |
@@ -350,10 +350,10 @@ static inline int try_to_steal_lock(struct rt_mutex *lock, | |||
350 | * might be task: | 350 | * might be task: |
351 | */ | 351 | */ |
352 | if (likely(next->task != task)) { | 352 | if (likely(next->task != task)) { |
353 | spin_lock_irqsave(&task->pi_lock, flags); | 353 | raw_spin_lock_irqsave(&task->pi_lock, flags); |
354 | plist_add(&next->pi_list_entry, &task->pi_waiters); | 354 | plist_add(&next->pi_list_entry, &task->pi_waiters); |
355 | __rt_mutex_adjust_prio(task); | 355 | __rt_mutex_adjust_prio(task); |
356 | spin_unlock_irqrestore(&task->pi_lock, flags); | 356 | raw_spin_unlock_irqrestore(&task->pi_lock, flags); |
357 | } | 357 | } |
358 | return 1; | 358 | return 1; |
359 | } | 359 | } |
@@ -420,7 +420,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, | |||
420 | unsigned long flags; | 420 | unsigned long flags; |
421 | int chain_walk = 0, res; | 421 | int chain_walk = 0, res; |
422 | 422 | ||
423 | spin_lock_irqsave(&task->pi_lock, flags); | 423 | raw_spin_lock_irqsave(&task->pi_lock, flags); |
424 | __rt_mutex_adjust_prio(task); | 424 | __rt_mutex_adjust_prio(task); |
425 | waiter->task = task; | 425 | waiter->task = task; |
426 | waiter->lock = lock; | 426 | waiter->lock = lock; |
@@ -434,17 +434,17 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, | |||
434 | 434 | ||
435 | task->pi_blocked_on = waiter; | 435 | task->pi_blocked_on = waiter; |
436 | 436 | ||
437 | spin_unlock_irqrestore(&task->pi_lock, flags); | 437 | raw_spin_unlock_irqrestore(&task->pi_lock, flags); |
438 | 438 | ||
439 | if (waiter == rt_mutex_top_waiter(lock)) { | 439 | if (waiter == rt_mutex_top_waiter(lock)) { |
440 | spin_lock_irqsave(&owner->pi_lock, flags); | 440 | raw_spin_lock_irqsave(&owner->pi_lock, flags); |
441 | plist_del(&top_waiter->pi_list_entry, &owner->pi_waiters); | 441 | plist_del(&top_waiter->pi_list_entry, &owner->pi_waiters); |
442 | plist_add(&waiter->pi_list_entry, &owner->pi_waiters); | 442 | plist_add(&waiter->pi_list_entry, &owner->pi_waiters); |
443 | 443 | ||
444 | __rt_mutex_adjust_prio(owner); | 444 | __rt_mutex_adjust_prio(owner); |
445 | if (owner->pi_blocked_on) | 445 | if (owner->pi_blocked_on) |
446 | chain_walk = 1; | 446 | chain_walk = 1; |
447 | spin_unlock_irqrestore(&owner->pi_lock, flags); | 447 | raw_spin_unlock_irqrestore(&owner->pi_lock, flags); |
448 | } | 448 | } |
449 | else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock)) | 449 | else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock)) |
450 | chain_walk = 1; | 450 | chain_walk = 1; |
@@ -483,7 +483,7 @@ static void wakeup_next_waiter(struct rt_mutex *lock) | |||
483 | struct task_struct *pendowner; | 483 | struct task_struct *pendowner; |
484 | unsigned long flags; | 484 | unsigned long flags; |
485 | 485 | ||
486 | spin_lock_irqsave(¤t->pi_lock, flags); | 486 | raw_spin_lock_irqsave(¤t->pi_lock, flags); |
487 | 487 | ||
488 | waiter = rt_mutex_top_waiter(lock); | 488 | waiter = rt_mutex_top_waiter(lock); |
489 | plist_del(&waiter->list_entry, &lock->wait_list); | 489 | plist_del(&waiter->list_entry, &lock->wait_list); |
@@ -500,7 +500,7 @@ static void wakeup_next_waiter(struct rt_mutex *lock) | |||
500 | 500 | ||
501 | rt_mutex_set_owner(lock, pendowner, RT_MUTEX_OWNER_PENDING); | 501 | rt_mutex_set_owner(lock, pendowner, RT_MUTEX_OWNER_PENDING); |
502 | 502 | ||
503 | spin_unlock_irqrestore(¤t->pi_lock, flags); | 503 | raw_spin_unlock_irqrestore(¤t->pi_lock, flags); |
504 | 504 | ||
505 | /* | 505 | /* |
506 | * Clear the pi_blocked_on variable and enqueue a possible | 506 | * Clear the pi_blocked_on variable and enqueue a possible |
@@ -509,7 +509,7 @@ static void wakeup_next_waiter(struct rt_mutex *lock) | |||
509 | * waiter with higher priority than pending-owner->normal_prio | 509 | * waiter with higher priority than pending-owner->normal_prio |
510 | * is blocked on the unboosted (pending) owner. | 510 | * is blocked on the unboosted (pending) owner. |
511 | */ | 511 | */ |
512 | spin_lock_irqsave(&pendowner->pi_lock, flags); | 512 | raw_spin_lock_irqsave(&pendowner->pi_lock, flags); |
513 | 513 | ||
514 | WARN_ON(!pendowner->pi_blocked_on); | 514 | WARN_ON(!pendowner->pi_blocked_on); |
515 | WARN_ON(pendowner->pi_blocked_on != waiter); | 515 | WARN_ON(pendowner->pi_blocked_on != waiter); |
@@ -523,7 +523,7 @@ static void wakeup_next_waiter(struct rt_mutex *lock) | |||
523 | next = rt_mutex_top_waiter(lock); | 523 | next = rt_mutex_top_waiter(lock); |
524 | plist_add(&next->pi_list_entry, &pendowner->pi_waiters); | 524 | plist_add(&next->pi_list_entry, &pendowner->pi_waiters); |
525 | } | 525 | } |
526 | spin_unlock_irqrestore(&pendowner->pi_lock, flags); | 526 | raw_spin_unlock_irqrestore(&pendowner->pi_lock, flags); |
527 | 527 | ||
528 | wake_up_process(pendowner); | 528 | wake_up_process(pendowner); |
529 | } | 529 | } |
@@ -541,15 +541,15 @@ static void remove_waiter(struct rt_mutex *lock, | |||
541 | unsigned long flags; | 541 | unsigned long flags; |
542 | int chain_walk = 0; | 542 | int chain_walk = 0; |
543 | 543 | ||
544 | spin_lock_irqsave(¤t->pi_lock, flags); | 544 | raw_spin_lock_irqsave(¤t->pi_lock, flags); |
545 | plist_del(&waiter->list_entry, &lock->wait_list); | 545 | plist_del(&waiter->list_entry, &lock->wait_list); |
546 | waiter->task = NULL; | 546 | waiter->task = NULL; |
547 | current->pi_blocked_on = NULL; | 547 | current->pi_blocked_on = NULL; |
548 | spin_unlock_irqrestore(¤t->pi_lock, flags); | 548 | raw_spin_unlock_irqrestore(¤t->pi_lock, flags); |
549 | 549 | ||
550 | if (first && owner != current) { | 550 | if (first && owner != current) { |
551 | 551 | ||
552 | spin_lock_irqsave(&owner->pi_lock, flags); | 552 | raw_spin_lock_irqsave(&owner->pi_lock, flags); |
553 | 553 | ||
554 | plist_del(&waiter->pi_list_entry, &owner->pi_waiters); | 554 | plist_del(&waiter->pi_list_entry, &owner->pi_waiters); |
555 | 555 | ||
@@ -564,7 +564,7 @@ static void remove_waiter(struct rt_mutex *lock, | |||
564 | if (owner->pi_blocked_on) | 564 | if (owner->pi_blocked_on) |
565 | chain_walk = 1; | 565 | chain_walk = 1; |
566 | 566 | ||
567 | spin_unlock_irqrestore(&owner->pi_lock, flags); | 567 | raw_spin_unlock_irqrestore(&owner->pi_lock, flags); |
568 | } | 568 | } |
569 | 569 | ||
570 | WARN_ON(!plist_node_empty(&waiter->pi_list_entry)); | 570 | WARN_ON(!plist_node_empty(&waiter->pi_list_entry)); |
@@ -592,15 +592,15 @@ void rt_mutex_adjust_pi(struct task_struct *task) | |||
592 | struct rt_mutex_waiter *waiter; | 592 | struct rt_mutex_waiter *waiter; |
593 | unsigned long flags; | 593 | unsigned long flags; |
594 | 594 | ||
595 | spin_lock_irqsave(&task->pi_lock, flags); | 595 | raw_spin_lock_irqsave(&task->pi_lock, flags); |
596 | 596 | ||
597 | waiter = task->pi_blocked_on; | 597 | waiter = task->pi_blocked_on; |
598 | if (!waiter || waiter->list_entry.prio == task->prio) { | 598 | if (!waiter || waiter->list_entry.prio == task->prio) { |
599 | spin_unlock_irqrestore(&task->pi_lock, flags); | 599 | raw_spin_unlock_irqrestore(&task->pi_lock, flags); |
600 | return; | 600 | return; |
601 | } | 601 | } |
602 | 602 | ||
603 | spin_unlock_irqrestore(&task->pi_lock, flags); | 603 | raw_spin_unlock_irqrestore(&task->pi_lock, flags); |
604 | 604 | ||
605 | /* gets dropped in rt_mutex_adjust_prio_chain()! */ | 605 | /* gets dropped in rt_mutex_adjust_prio_chain()! */ |
606 | get_task_struct(task); | 606 | get_task_struct(task); |
diff --git a/kernel/sched.c b/kernel/sched.c index 01c5016e57f1..18cceeecce35 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -6323,7 +6323,7 @@ recheck: | |||
6323 | * make sure no PI-waiters arrive (or leave) while we are | 6323 | * make sure no PI-waiters arrive (or leave) while we are |
6324 | * changing the priority of the task: | 6324 | * changing the priority of the task: |
6325 | */ | 6325 | */ |
6326 | spin_lock_irqsave(&p->pi_lock, flags); | 6326 | raw_spin_lock_irqsave(&p->pi_lock, flags); |
6327 | /* | 6327 | /* |
6328 | * To be able to change p->policy safely, the apropriate | 6328 | * To be able to change p->policy safely, the apropriate |
6329 | * runqueue lock must be held. | 6329 | * runqueue lock must be held. |
@@ -6333,7 +6333,7 @@ recheck: | |||
6333 | if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) { | 6333 | if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) { |
6334 | policy = oldpolicy = -1; | 6334 | policy = oldpolicy = -1; |
6335 | __task_rq_unlock(rq); | 6335 | __task_rq_unlock(rq); |
6336 | spin_unlock_irqrestore(&p->pi_lock, flags); | 6336 | raw_spin_unlock_irqrestore(&p->pi_lock, flags); |
6337 | goto recheck; | 6337 | goto recheck; |
6338 | } | 6338 | } |
6339 | update_rq_clock(rq); | 6339 | update_rq_clock(rq); |
@@ -6357,7 +6357,7 @@ recheck: | |||
6357 | check_class_changed(rq, p, prev_class, oldprio, running); | 6357 | check_class_changed(rq, p, prev_class, oldprio, running); |
6358 | } | 6358 | } |
6359 | __task_rq_unlock(rq); | 6359 | __task_rq_unlock(rq); |
6360 | spin_unlock_irqrestore(&p->pi_lock, flags); | 6360 | raw_spin_unlock_irqrestore(&p->pi_lock, flags); |
6361 | 6361 | ||
6362 | rt_mutex_adjust_pi(p); | 6362 | rt_mutex_adjust_pi(p); |
6363 | 6363 | ||
@@ -9624,7 +9624,7 @@ void __init sched_init(void) | |||
9624 | #endif | 9624 | #endif |
9625 | 9625 | ||
9626 | #ifdef CONFIG_RT_MUTEXES | 9626 | #ifdef CONFIG_RT_MUTEXES |
9627 | plist_head_init(&init_task.pi_waiters, &init_task.pi_lock); | 9627 | plist_head_init_raw(&init_task.pi_waiters, &init_task.pi_lock); |
9628 | #endif | 9628 | #endif |
9629 | 9629 | ||
9630 | /* | 9630 | /* |
@@ -9749,13 +9749,13 @@ void normalize_rt_tasks(void) | |||
9749 | continue; | 9749 | continue; |
9750 | } | 9750 | } |
9751 | 9751 | ||
9752 | spin_lock(&p->pi_lock); | 9752 | raw_spin_lock(&p->pi_lock); |
9753 | rq = __task_rq_lock(p); | 9753 | rq = __task_rq_lock(p); |
9754 | 9754 | ||
9755 | normalize_task(rq, p); | 9755 | normalize_task(rq, p); |
9756 | 9756 | ||
9757 | __task_rq_unlock(rq); | 9757 | __task_rq_unlock(rq); |
9758 | spin_unlock(&p->pi_lock); | 9758 | raw_spin_unlock(&p->pi_lock); |
9759 | } while_each_thread(g, p); | 9759 | } while_each_thread(g, p); |
9760 | 9760 | ||
9761 | read_unlock_irqrestore(&tasklist_lock, flags); | 9761 | read_unlock_irqrestore(&tasklist_lock, flags); |