diff options
author | Steven Rostedt <rostedt@goodmis.org> | 2006-09-29 04:59:44 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-09-29 12:18:09 -0400 |
commit | db630637b2f192bea2ba1c000e9cbe4e542a03ea (patch) | |
tree | 87866cf839510cc2e434730bc1f33fe4264f25a1 | |
parent | c24c95a085c6b52c11c2f5afecc38b0ca143cdae (diff) |
[PATCH] clean up and remove some extra spinlocks from rtmutex
Oleg brought up some interesting points about grabbing the pi_lock for some
protections. In this discussion, I realized that there are some places
that the pi_lock is being grabbed when it really wasn't necessary. Also
this patch does a little bit of clean up.
This patch basically does three things:
1) renames the "boost" variable to "chain_walk". Since it is used in
the debugging case when it isn't going to be boosted. It better
describes what the test is going to do if it succeeds.
2) moves get_task_struct to just before the unlocking of the wait_lock.
This removes duplicate code, and makes it a little easier to read. The
owner wont go away while either the pi_lock or the wait_lock are held.
3) removes the pi_locking and owner blocked checking completely from the
debugging case. This is because the grabbing the lock and doing the
check, then releasing the lock is just so full of races. It's just as
good to go ahead and call the pi_chain_walk function, since after
releasing the lock the owner can then block anyway, and we would have
missed that. For the debug case, we really do want to do the chain walk
to test for deadlocks anyway.
[oleg@tv-sign.ru: more of the same]
Signed-of-by: Steven Rostedt <rostedt@goodmis.org>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Oleg Nesterov <oleg@tv-sign.ru>
Cc: Esben Nielsen <nielsen.esben@googlemail.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r-- | kernel/rtmutex.c | 51 |
1 files changed, 25 insertions, 26 deletions
diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c index 3e13a1e5856..4ab17da46fd 100644 --- a/kernel/rtmutex.c +++ b/kernel/rtmutex.c | |||
@@ -251,6 +251,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, | |||
251 | 251 | ||
252 | /* Grab the next task */ | 252 | /* Grab the next task */ |
253 | task = rt_mutex_owner(lock); | 253 | task = rt_mutex_owner(lock); |
254 | get_task_struct(task); | ||
254 | spin_lock_irqsave(&task->pi_lock, flags); | 255 | spin_lock_irqsave(&task->pi_lock, flags); |
255 | 256 | ||
256 | if (waiter == rt_mutex_top_waiter(lock)) { | 257 | if (waiter == rt_mutex_top_waiter(lock)) { |
@@ -269,7 +270,6 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, | |||
269 | __rt_mutex_adjust_prio(task); | 270 | __rt_mutex_adjust_prio(task); |
270 | } | 271 | } |
271 | 272 | ||
272 | get_task_struct(task); | ||
273 | spin_unlock_irqrestore(&task->pi_lock, flags); | 273 | spin_unlock_irqrestore(&task->pi_lock, flags); |
274 | 274 | ||
275 | top_waiter = rt_mutex_top_waiter(lock); | 275 | top_waiter = rt_mutex_top_waiter(lock); |
@@ -409,7 +409,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, | |||
409 | struct task_struct *owner = rt_mutex_owner(lock); | 409 | struct task_struct *owner = rt_mutex_owner(lock); |
410 | struct rt_mutex_waiter *top_waiter = waiter; | 410 | struct rt_mutex_waiter *top_waiter = waiter; |
411 | unsigned long flags; | 411 | unsigned long flags; |
412 | int boost = 0, res; | 412 | int chain_walk = 0, res; |
413 | 413 | ||
414 | spin_lock_irqsave(¤t->pi_lock, flags); | 414 | spin_lock_irqsave(¤t->pi_lock, flags); |
415 | __rt_mutex_adjust_prio(current); | 415 | __rt_mutex_adjust_prio(current); |
@@ -433,25 +433,23 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, | |||
433 | plist_add(&waiter->pi_list_entry, &owner->pi_waiters); | 433 | plist_add(&waiter->pi_list_entry, &owner->pi_waiters); |
434 | 434 | ||
435 | __rt_mutex_adjust_prio(owner); | 435 | __rt_mutex_adjust_prio(owner); |
436 | if (owner->pi_blocked_on) { | 436 | if (owner->pi_blocked_on) |
437 | boost = 1; | 437 | chain_walk = 1; |
438 | /* gets dropped in rt_mutex_adjust_prio_chain()! */ | ||
439 | get_task_struct(owner); | ||
440 | } | ||
441 | spin_unlock_irqrestore(&owner->pi_lock, flags); | ||
442 | } | ||
443 | else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock)) { | ||
444 | spin_lock_irqsave(&owner->pi_lock, flags); | ||
445 | if (owner->pi_blocked_on) { | ||
446 | boost = 1; | ||
447 | /* gets dropped in rt_mutex_adjust_prio_chain()! */ | ||
448 | get_task_struct(owner); | ||
449 | } | ||
450 | spin_unlock_irqrestore(&owner->pi_lock, flags); | 438 | spin_unlock_irqrestore(&owner->pi_lock, flags); |
451 | } | 439 | } |
452 | if (!boost) | 440 | else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock)) |
441 | chain_walk = 1; | ||
442 | |||
443 | if (!chain_walk) | ||
453 | return 0; | 444 | return 0; |
454 | 445 | ||
446 | /* | ||
447 | * The owner can't disappear while holding a lock, | ||
448 | * so the owner struct is protected by wait_lock. | ||
449 | * Gets dropped in rt_mutex_adjust_prio_chain()! | ||
450 | */ | ||
451 | get_task_struct(owner); | ||
452 | |||
455 | spin_unlock(&lock->wait_lock); | 453 | spin_unlock(&lock->wait_lock); |
456 | 454 | ||
457 | res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, waiter, | 455 | res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, waiter, |
@@ -532,7 +530,7 @@ static void remove_waiter(struct rt_mutex *lock, | |||
532 | int first = (waiter == rt_mutex_top_waiter(lock)); | 530 | int first = (waiter == rt_mutex_top_waiter(lock)); |
533 | struct task_struct *owner = rt_mutex_owner(lock); | 531 | struct task_struct *owner = rt_mutex_owner(lock); |
534 | unsigned long flags; | 532 | unsigned long flags; |
535 | int boost = 0; | 533 | int chain_walk = 0; |
536 | 534 | ||
537 | spin_lock_irqsave(¤t->pi_lock, flags); | 535 | spin_lock_irqsave(¤t->pi_lock, flags); |
538 | plist_del(&waiter->list_entry, &lock->wait_list); | 536 | plist_del(&waiter->list_entry, &lock->wait_list); |
@@ -554,19 +552,20 @@ static void remove_waiter(struct rt_mutex *lock, | |||
554 | } | 552 | } |
555 | __rt_mutex_adjust_prio(owner); | 553 | __rt_mutex_adjust_prio(owner); |
556 | 554 | ||
557 | if (owner->pi_blocked_on) { | 555 | if (owner->pi_blocked_on) |
558 | boost = 1; | 556 | chain_walk = 1; |
559 | /* gets dropped in rt_mutex_adjust_prio_chain()! */ | 557 | |
560 | get_task_struct(owner); | ||
561 | } | ||
562 | spin_unlock_irqrestore(&owner->pi_lock, flags); | 558 | spin_unlock_irqrestore(&owner->pi_lock, flags); |
563 | } | 559 | } |
564 | 560 | ||
565 | WARN_ON(!plist_node_empty(&waiter->pi_list_entry)); | 561 | WARN_ON(!plist_node_empty(&waiter->pi_list_entry)); |
566 | 562 | ||
567 | if (!boost) | 563 | if (!chain_walk) |
568 | return; | 564 | return; |
569 | 565 | ||
566 | /* gets dropped in rt_mutex_adjust_prio_chain()! */ | ||
567 | get_task_struct(owner); | ||
568 | |||
570 | spin_unlock(&lock->wait_lock); | 569 | spin_unlock(&lock->wait_lock); |
571 | 570 | ||
572 | rt_mutex_adjust_prio_chain(owner, 0, lock, NULL, current); | 571 | rt_mutex_adjust_prio_chain(owner, 0, lock, NULL, current); |
@@ -592,10 +591,10 @@ void rt_mutex_adjust_pi(struct task_struct *task) | |||
592 | return; | 591 | return; |
593 | } | 592 | } |
594 | 593 | ||
595 | /* gets dropped in rt_mutex_adjust_prio_chain()! */ | ||
596 | get_task_struct(task); | ||
597 | spin_unlock_irqrestore(&task->pi_lock, flags); | 594 | spin_unlock_irqrestore(&task->pi_lock, flags); |
598 | 595 | ||
596 | /* gets dropped in rt_mutex_adjust_prio_chain()! */ | ||
597 | get_task_struct(task); | ||
599 | rt_mutex_adjust_prio_chain(task, 0, NULL, NULL, task); | 598 | rt_mutex_adjust_prio_chain(task, 0, NULL, NULL, task); |
600 | } | 599 | } |
601 | 600 | ||