aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/hardirq.h6
-rw-r--r--include/linux/smp_lock.h14
-rw-r--r--kernel/Kconfig.preempt4
-rw-r--r--kernel/sched.c19
-rw-r--r--lib/kernel_lock.c123
5 files changed, 5 insertions, 161 deletions
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
index 8d302298a161..2961ec788046 100644
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
@@ -72,11 +72,7 @@
72#define in_softirq() (softirq_count()) 72#define in_softirq() (softirq_count())
73#define in_interrupt() (irq_count()) 73#define in_interrupt() (irq_count())
74 74
75#if defined(CONFIG_PREEMPT) && !defined(CONFIG_PREEMPT_BKL) 75#define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != 0)
76# define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != kernel_locked())
77#else
78# define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != 0)
79#endif
80 76
81#ifdef CONFIG_PREEMPT 77#ifdef CONFIG_PREEMPT
82# define PREEMPT_CHECK_OFFSET 1 78# define PREEMPT_CHECK_OFFSET 1
diff --git a/include/linux/smp_lock.h b/include/linux/smp_lock.h
index 58962c51dee1..aab3a4cff4e1 100644
--- a/include/linux/smp_lock.h
+++ b/include/linux/smp_lock.h
@@ -17,22 +17,10 @@ extern void __lockfunc __release_kernel_lock(void);
17 __release_kernel_lock(); \ 17 __release_kernel_lock(); \
18} while (0) 18} while (0)
19 19
20/*
21 * Non-SMP kernels will never block on the kernel lock,
22 * so we are better off returning a constant zero from
23 * reacquire_kernel_lock() so that the compiler can see
24 * it at compile-time.
25 */
26#if defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT_BKL)
27# define return_value_on_smp return
28#else
29# define return_value_on_smp
30#endif
31
32static inline int reacquire_kernel_lock(struct task_struct *task) 20static inline int reacquire_kernel_lock(struct task_struct *task)
33{ 21{
34 if (unlikely(task->lock_depth >= 0)) 22 if (unlikely(task->lock_depth >= 0))
35 return_value_on_smp __reacquire_kernel_lock(); 23 return __reacquire_kernel_lock();
36 return 0; 24 return 0;
37} 25}
38 26
diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt
index 4420ef427f83..0669b70fa6a3 100644
--- a/kernel/Kconfig.preempt
+++ b/kernel/Kconfig.preempt
@@ -52,10 +52,6 @@ config PREEMPT
52 52
53endchoice 53endchoice
54 54
55config PREEMPT_BKL
56 def_bool y
57 depends on SMP || PREEMPT
58
59config RCU_TRACE 55config RCU_TRACE
60 bool "Enable tracing for RCU - currently stats in debugfs" 56 bool "Enable tracing for RCU - currently stats in debugfs"
61 select DEBUG_FS 57 select DEBUG_FS
diff --git a/kernel/sched.c b/kernel/sched.c
index 22712b2e058a..629614ad0358 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3955,10 +3955,9 @@ EXPORT_SYMBOL(schedule);
3955asmlinkage void __sched preempt_schedule(void) 3955asmlinkage void __sched preempt_schedule(void)
3956{ 3956{
3957 struct thread_info *ti = current_thread_info(); 3957 struct thread_info *ti = current_thread_info();
3958#ifdef CONFIG_PREEMPT_BKL
3959 struct task_struct *task = current; 3958 struct task_struct *task = current;
3960 int saved_lock_depth; 3959 int saved_lock_depth;
3961#endif 3960
3962 /* 3961 /*
3963 * If there is a non-zero preempt_count or interrupts are disabled, 3962 * If there is a non-zero preempt_count or interrupts are disabled,
3964 * we do not want to preempt the current task. Just return.. 3963 * we do not want to preempt the current task. Just return..
@@ -3974,14 +3973,10 @@ asmlinkage void __sched preempt_schedule(void)
3974 * clear ->lock_depth so that schedule() doesnt 3973 * clear ->lock_depth so that schedule() doesnt
3975 * auto-release the semaphore: 3974 * auto-release the semaphore:
3976 */ 3975 */
3977#ifdef CONFIG_PREEMPT_BKL
3978 saved_lock_depth = task->lock_depth; 3976 saved_lock_depth = task->lock_depth;
3979 task->lock_depth = -1; 3977 task->lock_depth = -1;
3980#endif
3981 schedule(); 3978 schedule();
3982#ifdef CONFIG_PREEMPT_BKL
3983 task->lock_depth = saved_lock_depth; 3979 task->lock_depth = saved_lock_depth;
3984#endif
3985 sub_preempt_count(PREEMPT_ACTIVE); 3980 sub_preempt_count(PREEMPT_ACTIVE);
3986 3981
3987 /* 3982 /*
@@ -4002,10 +3997,9 @@ EXPORT_SYMBOL(preempt_schedule);
4002asmlinkage void __sched preempt_schedule_irq(void) 3997asmlinkage void __sched preempt_schedule_irq(void)
4003{ 3998{
4004 struct thread_info *ti = current_thread_info(); 3999 struct thread_info *ti = current_thread_info();
4005#ifdef CONFIG_PREEMPT_BKL
4006 struct task_struct *task = current; 4000 struct task_struct *task = current;
4007 int saved_lock_depth; 4001 int saved_lock_depth;
4008#endif 4002
4009 /* Catch callers which need to be fixed */ 4003 /* Catch callers which need to be fixed */
4010 BUG_ON(ti->preempt_count || !irqs_disabled()); 4004 BUG_ON(ti->preempt_count || !irqs_disabled());
4011 4005
@@ -4017,16 +4011,12 @@ asmlinkage void __sched preempt_schedule_irq(void)
4017 * clear ->lock_depth so that schedule() doesnt 4011 * clear ->lock_depth so that schedule() doesnt
4018 * auto-release the semaphore: 4012 * auto-release the semaphore:
4019 */ 4013 */
4020#ifdef CONFIG_PREEMPT_BKL
4021 saved_lock_depth = task->lock_depth; 4014 saved_lock_depth = task->lock_depth;
4022 task->lock_depth = -1; 4015 task->lock_depth = -1;
4023#endif
4024 local_irq_enable(); 4016 local_irq_enable();
4025 schedule(); 4017 schedule();
4026 local_irq_disable(); 4018 local_irq_disable();
4027#ifdef CONFIG_PREEMPT_BKL
4028 task->lock_depth = saved_lock_depth; 4019 task->lock_depth = saved_lock_depth;
4029#endif
4030 sub_preempt_count(PREEMPT_ACTIVE); 4020 sub_preempt_count(PREEMPT_ACTIVE);
4031 4021
4032 /* 4022 /*
@@ -5241,11 +5231,8 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
5241 spin_unlock_irqrestore(&rq->lock, flags); 5231 spin_unlock_irqrestore(&rq->lock, flags);
5242 5232
5243 /* Set the preempt count _outside_ the spinlocks! */ 5233 /* Set the preempt count _outside_ the spinlocks! */
5244#if defined(CONFIG_PREEMPT) && !defined(CONFIG_PREEMPT_BKL)
5245 task_thread_info(idle)->preempt_count = (idle->lock_depth >= 0);
5246#else
5247 task_thread_info(idle)->preempt_count = 0; 5234 task_thread_info(idle)->preempt_count = 0;
5248#endif 5235
5249 /* 5236 /*
5250 * The idle tasks have their own, simple scheduling class: 5237 * The idle tasks have their own, simple scheduling class:
5251 */ 5238 */
diff --git a/lib/kernel_lock.c b/lib/kernel_lock.c
index f73e2f8c308f..812dbf00844b 100644
--- a/lib/kernel_lock.c
+++ b/lib/kernel_lock.c
@@ -9,7 +9,6 @@
9#include <linux/module.h> 9#include <linux/module.h>
10#include <linux/kallsyms.h> 10#include <linux/kallsyms.h>
11 11
12#ifdef CONFIG_PREEMPT_BKL
13/* 12/*
14 * The 'big kernel semaphore' 13 * The 'big kernel semaphore'
15 * 14 *
@@ -86,128 +85,6 @@ void __lockfunc unlock_kernel(void)
86 up(&kernel_sem); 85 up(&kernel_sem);
87} 86}
88 87
89#else
90
91/*
92 * The 'big kernel lock'
93 *
94 * This spinlock is taken and released recursively by lock_kernel()
95 * and unlock_kernel(). It is transparently dropped and reacquired
96 * over schedule(). It is used to protect legacy code that hasn't
97 * been migrated to a proper locking design yet.
98 *
99 * Don't use in new code.
100 */
101static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kernel_flag);
102
103
104/*
105 * Acquire/release the underlying lock from the scheduler.
106 *
107 * This is called with preemption disabled, and should
108 * return an error value if it cannot get the lock and
109 * TIF_NEED_RESCHED gets set.
110 *
111 * If it successfully gets the lock, it should increment
112 * the preemption count like any spinlock does.
113 *
114 * (This works on UP too - _raw_spin_trylock will never
115 * return false in that case)
116 */
117int __lockfunc __reacquire_kernel_lock(void)
118{
119 while (!_raw_spin_trylock(&kernel_flag)) {
120 if (test_thread_flag(TIF_NEED_RESCHED))
121 return -EAGAIN;
122 cpu_relax();
123 }
124 preempt_disable();
125 return 0;
126}
127
128void __lockfunc __release_kernel_lock(void)
129{
130 _raw_spin_unlock(&kernel_flag);
131 preempt_enable_no_resched();
132}
133
134/*
135 * These are the BKL spinlocks - we try to be polite about preemption.
136 * If SMP is not on (ie UP preemption), this all goes away because the
137 * _raw_spin_trylock() will always succeed.
138 */
139#ifdef CONFIG_PREEMPT
140static inline void __lock_kernel(void)
141{
142 preempt_disable();
143 if (unlikely(!_raw_spin_trylock(&kernel_flag))) {
144 /*
145 * If preemption was disabled even before this
146 * was called, there's nothing we can be polite
147 * about - just spin.
148 */
149 if (preempt_count() > 1) {
150 _raw_spin_lock(&kernel_flag);
151 return;
152 }
153
154 /*
155 * Otherwise, let's wait for the kernel lock
156 * with preemption enabled..
157 */
158 do {
159 preempt_enable();
160 while (spin_is_locked(&kernel_flag))
161 cpu_relax();
162 preempt_disable();
163 } while (!_raw_spin_trylock(&kernel_flag));
164 }
165}
166
167#else
168
169/*
170 * Non-preemption case - just get the spinlock
171 */
172static inline void __lock_kernel(void)
173{
174 _raw_spin_lock(&kernel_flag);
175}
176#endif
177
178static inline void __unlock_kernel(void)
179{
180 /*
181 * the BKL is not covered by lockdep, so we open-code the
182 * unlocking sequence (and thus avoid the dep-chain ops):
183 */
184 _raw_spin_unlock(&kernel_flag);
185 preempt_enable();
186}
187
188/*
189 * Getting the big kernel lock.
190 *
191 * This cannot happen asynchronously, so we only need to
192 * worry about other CPU's.
193 */
194void __lockfunc lock_kernel(void)
195{
196 int depth = current->lock_depth+1;
197 if (likely(!depth))
198 __lock_kernel();
199 current->lock_depth = depth;
200}
201
202void __lockfunc unlock_kernel(void)
203{
204 BUG_ON(current->lock_depth < 0);
205 if (likely(--current->lock_depth < 0))
206 __unlock_kernel();
207}
208
209#endif
210
211EXPORT_SYMBOL(lock_kernel); 88EXPORT_SYMBOL(lock_kernel);
212EXPORT_SYMBOL(unlock_kernel); 89EXPORT_SYMBOL(unlock_kernel);
213 90