diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2008-08-11 03:30:22 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-08-11 03:30:22 -0400 |
commit | 1b12bbc747560ea68bcc132c3d05699e52271da0 (patch) | |
tree | 0e0fe5b7fe07d411251eebdd053e9e7793820248 /kernel/sched.c | |
parent | 64aa348edc617dea17bbd01ddee4e47886d5ec8c (diff) |
lockdep: re-annotate scheduler runqueues
Instead of using a per-rq lock class, use the regular nesting operations.
However, take extra care with double_lock_balance() as it can release the
already held rq->lock (and therefore change its nesting class).
So what can happen is:
spin_lock(rq->lock); // this rq subclass 0
double_lock_balance(rq, other_rq);
// release rq
// acquire other_rq->lock subclass 0
// acquire rq->lock subclass 1
spin_unlock(other_rq->lock);
leaving you with rq->lock in subclass 1
So a subsequent double_lock_balance() call can try to nest a subclass 1
lock while already holding a subclass 1 lock.
Fix this by introducing double_unlock_balance() which releases the other
rq's lock, but also re-sets the subclass for this rq's lock to 0.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 11 |
1 files changed, 9 insertions, 2 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 655f1db26b12..9b2b6a85577d 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -2812,6 +2812,13 @@ static int double_lock_balance(struct rq *this_rq, struct rq *busiest) | |||
2812 | return ret; | 2812 | return ret; |
2813 | } | 2813 | } |
2814 | 2814 | ||
2815 | static void double_unlock_balance(struct rq *this_rq, struct rq *busiest) | ||
2816 | __releases(busiest->lock) | ||
2817 | { | ||
2818 | spin_unlock(&busiest->lock); | ||
2819 | lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_); | ||
2820 | } | ||
2821 | |||
2815 | /* | 2822 | /* |
2816 | * If dest_cpu is allowed for this process, migrate the task to it. | 2823 | * If dest_cpu is allowed for this process, migrate the task to it. |
2817 | * This is accomplished by forcing the cpu_allowed mask to only | 2824 | * This is accomplished by forcing the cpu_allowed mask to only |
@@ -3636,7 +3643,7 @@ redo: | |||
3636 | ld_moved = move_tasks(this_rq, this_cpu, busiest, | 3643 | ld_moved = move_tasks(this_rq, this_cpu, busiest, |
3637 | imbalance, sd, CPU_NEWLY_IDLE, | 3644 | imbalance, sd, CPU_NEWLY_IDLE, |
3638 | &all_pinned); | 3645 | &all_pinned); |
3639 | spin_unlock(&busiest->lock); | 3646 | double_unlock_balance(this_rq, busiest); |
3640 | 3647 | ||
3641 | if (unlikely(all_pinned)) { | 3648 | if (unlikely(all_pinned)) { |
3642 | cpu_clear(cpu_of(busiest), *cpus); | 3649 | cpu_clear(cpu_of(busiest), *cpus); |
@@ -3751,7 +3758,7 @@ static void active_load_balance(struct rq *busiest_rq, int busiest_cpu) | |||
3751 | else | 3758 | else |
3752 | schedstat_inc(sd, alb_failed); | 3759 | schedstat_inc(sd, alb_failed); |
3753 | } | 3760 | } |
3754 | spin_unlock(&target_rq->lock); | 3761 | double_unlock_balance(busiest_rq, target_rq); |
3755 | } | 3762 | } |
3756 | 3763 | ||
3757 | #ifdef CONFIG_NO_HZ | 3764 | #ifdef CONFIG_NO_HZ |