aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorJonathan Corbet <corbet@lwn.net>2011-04-22 13:19:10 -0400
committerIngo Molnar <mingo@elte.hu>2011-04-24 07:18:38 -0400
commit625f2a378e5a10f45fdc37932fc9f8a21676de9e (patch)
tree1bf966a8e65463cdcc313c559533f032657c9dcf /kernel/sched.c
parentd3bf52e998056a6002b2aecfe1d25486376382ac (diff)
sched: Get rid of lock_depth
Neil Brown pointed out that lock_depth somehow escaped the BKL removal work. Let's get rid of it now. Note that the perf scripting utilities still have a bunch of code for dealing with common_lock_depth in tracepoints; I have left that in place in case anybody wants to use that code with older kernels. Suggested-by: Neil Brown <neilb@suse.de> Signed-off-by: Jonathan Corbet <corbet@lwn.net> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Andrew Morton <akpm@linux-foundation.org> Link: http://lkml.kernel.org/r/20110422111910.456c0e84@bike.lwn.net Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c11
1 files changed, 1 insertions, 10 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 8cb0a5769a16..9cde2dd229c9 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -4121,12 +4121,6 @@ static inline void schedule_debug(struct task_struct *prev)
4121 profile_hit(SCHED_PROFILING, __builtin_return_address(0)); 4121 profile_hit(SCHED_PROFILING, __builtin_return_address(0));
4122 4122
4123 schedstat_inc(this_rq(), sched_count); 4123 schedstat_inc(this_rq(), sched_count);
4124#ifdef CONFIG_SCHEDSTATS
4125 if (unlikely(prev->lock_depth >= 0)) {
4126 schedstat_inc(this_rq(), rq_sched_info.bkl_count);
4127 schedstat_inc(prev, sched_info.bkl_count);
4128 }
4129#endif
4130} 4124}
4131 4125
4132static void put_prev_task(struct rq *rq, struct task_struct *prev) 4126static void put_prev_task(struct rq *rq, struct task_struct *prev)
@@ -5852,11 +5846,8 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
5852 raw_spin_unlock_irqrestore(&rq->lock, flags); 5846 raw_spin_unlock_irqrestore(&rq->lock, flags);
5853 5847
5854 /* Set the preempt count _outside_ the spinlocks! */ 5848 /* Set the preempt count _outside_ the spinlocks! */
5855#if defined(CONFIG_PREEMPT)
5856 task_thread_info(idle)->preempt_count = (idle->lock_depth >= 0);
5857#else
5858 task_thread_info(idle)->preempt_count = 0; 5849 task_thread_info(idle)->preempt_count = 0;
5859#endif 5850
5860 /* 5851 /*
5861 * The idle tasks have their own, simple scheduling class: 5852 * The idle tasks have their own, simple scheduling class:
5862 */ 5853 */