aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2008-07-30 07:26:57 -0400
committerIngo Molnar <mingo@elte.hu>2008-08-01 04:46:48 -0400
commit5e710e37bde120bb069f691bee68e69ef4393173 (patch)
treef622aa07d6e1fb5851bd814a98945c7fb1ff9cd2
parente0fdace10e75dac67d906213b780ff1b1a4cc360 (diff)
lockdep: change scheduler annotation
While thinking about David's graph walk lockdep patch it _finally_ dawned on me that there is no reason we have a lock class per cpu ... Sorry for being dense :-/ The below changes the annotation from a lock class per cpu, to a single nested lock, as the scheduler never holds more that 2 rq locks at a time anyway. If there was code requiring holding all rq locks this would not work and the original annotation would be the only option, but that not being the case, this is a much lighter one. Compiles and boots on a 2-way x86_64. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: David Miller <davem@davemloft.net> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--kernel/sched.c10
1 files changed, 4 insertions, 6 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 0236958addcb..655f1db26b12 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -600,7 +600,6 @@ struct rq {
600 /* BKL stats */ 600 /* BKL stats */
601 unsigned int bkl_count; 601 unsigned int bkl_count;
602#endif 602#endif
603 struct lock_class_key rq_lock_key;
604}; 603};
605 604
606static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); 605static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
@@ -2759,10 +2758,10 @@ static void double_rq_lock(struct rq *rq1, struct rq *rq2)
2759 } else { 2758 } else {
2760 if (rq1 < rq2) { 2759 if (rq1 < rq2) {
2761 spin_lock(&rq1->lock); 2760 spin_lock(&rq1->lock);
2762 spin_lock(&rq2->lock); 2761 spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
2763 } else { 2762 } else {
2764 spin_lock(&rq2->lock); 2763 spin_lock(&rq2->lock);
2765 spin_lock(&rq1->lock); 2764 spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
2766 } 2765 }
2767 } 2766 }
2768 update_rq_clock(rq1); 2767 update_rq_clock(rq1);
@@ -2805,10 +2804,10 @@ static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
2805 if (busiest < this_rq) { 2804 if (busiest < this_rq) {
2806 spin_unlock(&this_rq->lock); 2805 spin_unlock(&this_rq->lock);
2807 spin_lock(&busiest->lock); 2806 spin_lock(&busiest->lock);
2808 spin_lock(&this_rq->lock); 2807 spin_lock_nested(&this_rq->lock, SINGLE_DEPTH_NESTING);
2809 ret = 1; 2808 ret = 1;
2810 } else 2809 } else
2811 spin_lock(&busiest->lock); 2810 spin_lock_nested(&busiest->lock, SINGLE_DEPTH_NESTING);
2812 } 2811 }
2813 return ret; 2812 return ret;
2814} 2813}
@@ -7998,7 +7997,6 @@ void __init sched_init(void)
7998 7997
7999 rq = cpu_rq(i); 7998 rq = cpu_rq(i);
8000 spin_lock_init(&rq->lock); 7999 spin_lock_init(&rq->lock);
8001 lockdep_set_class(&rq->lock, &rq->rq_lock_key);
8002 rq->nr_running = 0; 8000 rq->nr_running = 0;
8003 init_cfs_rq(&rq->cfs, rq); 8001 init_cfs_rq(&rq->cfs, rq);
8004 init_rt_rq(&rq->rt, rq); 8002 init_rt_rq(&rq->rt, rq);