diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2008-08-11 03:30:22 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-08-11 03:30:22 -0400 |
commit | 1b12bbc747560ea68bcc132c3d05699e52271da0 (patch) | |
tree | 0e0fe5b7fe07d411251eebdd053e9e7793820248 | |
parent | 64aa348edc617dea17bbd01ddee4e47886d5ec8c (diff) |
lockdep: re-annotate scheduler runqueues
Instead of using a per-rq lock class, use the regular nesting operations.
However, take extra care with double_lock_balance() as it can release the
already held rq->lock (and therefore change its nesting class).
So what can happen is:
spin_lock(rq->lock); // this rq subclass 0
double_lock_balance(rq, other_rq);
// release rq
// acquire other_rq->lock subclass 0
// acquire rq->lock subclass 1
spin_unlock(other_rq->lock);
leaving you with rq->lock in subclass 1
So a subsequent double_lock_balance() call can try to nest a subclass 1
lock while already holding a subclass 1 lock.
Fix this by introducing double_unlock_balance() which releases the other
rq's lock, but also re-sets the subclass for this rq's lock to 0.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r-- | kernel/sched.c | 11 | ||||
-rw-r--r-- | kernel/sched_rt.c | 8 |
2 files changed, 14 insertions, 5 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 655f1db26b12..9b2b6a85577d 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -2812,6 +2812,13 @@ static int double_lock_balance(struct rq *this_rq, struct rq *busiest) | |||
2812 | return ret; | 2812 | return ret; |
2813 | } | 2813 | } |
2814 | 2814 | ||
2815 | static void double_unlock_balance(struct rq *this_rq, struct rq *busiest) | ||
2816 | __releases(busiest->lock) | ||
2817 | { | ||
2818 | spin_unlock(&busiest->lock); | ||
2819 | lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_); | ||
2820 | } | ||
2821 | |||
2815 | /* | 2822 | /* |
2816 | * If dest_cpu is allowed for this process, migrate the task to it. | 2823 | * If dest_cpu is allowed for this process, migrate the task to it. |
2817 | * This is accomplished by forcing the cpu_allowed mask to only | 2824 | * This is accomplished by forcing the cpu_allowed mask to only |
@@ -3636,7 +3643,7 @@ redo: | |||
3636 | ld_moved = move_tasks(this_rq, this_cpu, busiest, | 3643 | ld_moved = move_tasks(this_rq, this_cpu, busiest, |
3637 | imbalance, sd, CPU_NEWLY_IDLE, | 3644 | imbalance, sd, CPU_NEWLY_IDLE, |
3638 | &all_pinned); | 3645 | &all_pinned); |
3639 | spin_unlock(&busiest->lock); | 3646 | double_unlock_balance(this_rq, busiest); |
3640 | 3647 | ||
3641 | if (unlikely(all_pinned)) { | 3648 | if (unlikely(all_pinned)) { |
3642 | cpu_clear(cpu_of(busiest), *cpus); | 3649 | cpu_clear(cpu_of(busiest), *cpus); |
@@ -3751,7 +3758,7 @@ static void active_load_balance(struct rq *busiest_rq, int busiest_cpu) | |||
3751 | else | 3758 | else |
3752 | schedstat_inc(sd, alb_failed); | 3759 | schedstat_inc(sd, alb_failed); |
3753 | } | 3760 | } |
3754 | spin_unlock(&target_rq->lock); | 3761 | double_unlock_balance(busiest_rq, target_rq); |
3755 | } | 3762 | } |
3756 | 3763 | ||
3757 | #ifdef CONFIG_NO_HZ | 3764 | #ifdef CONFIG_NO_HZ |
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 908c04f9dad0..6163e4cf885b 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
@@ -861,6 +861,8 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p) | |||
861 | #define RT_MAX_TRIES 3 | 861 | #define RT_MAX_TRIES 3 |
862 | 862 | ||
863 | static int double_lock_balance(struct rq *this_rq, struct rq *busiest); | 863 | static int double_lock_balance(struct rq *this_rq, struct rq *busiest); |
864 | static void double_unlock_balance(struct rq *this_rq, struct rq *busiest); | ||
865 | |||
864 | static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep); | 866 | static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep); |
865 | 867 | ||
866 | static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu) | 868 | static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu) |
@@ -1022,7 +1024,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq) | |||
1022 | break; | 1024 | break; |
1023 | 1025 | ||
1024 | /* try again */ | 1026 | /* try again */ |
1025 | spin_unlock(&lowest_rq->lock); | 1027 | double_unlock_balance(rq, lowest_rq); |
1026 | lowest_rq = NULL; | 1028 | lowest_rq = NULL; |
1027 | } | 1029 | } |
1028 | 1030 | ||
@@ -1091,7 +1093,7 @@ static int push_rt_task(struct rq *rq) | |||
1091 | 1093 | ||
1092 | resched_task(lowest_rq->curr); | 1094 | resched_task(lowest_rq->curr); |
1093 | 1095 | ||
1094 | spin_unlock(&lowest_rq->lock); | 1096 | double_unlock_balance(rq, lowest_rq); |
1095 | 1097 | ||
1096 | ret = 1; | 1098 | ret = 1; |
1097 | out: | 1099 | out: |
@@ -1197,7 +1199,7 @@ static int pull_rt_task(struct rq *this_rq) | |||
1197 | 1199 | ||
1198 | } | 1200 | } |
1199 | skip: | 1201 | skip: |
1200 | spin_unlock(&src_rq->lock); | 1202 | double_unlock_balance(this_rq, src_rq); |
1201 | } | 1203 | } |
1202 | 1204 | ||
1203 | return ret; | 1205 | return ret; |