diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-01-25 15:08:33 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-01-25 15:08:33 -0500 |
commit | 6478d8800b75253b2a934ddcb734e13ade023ad0 (patch) | |
tree | df4017269b8755735578445c0a8a9e8b3b2615e9 /lib | |
parent | 58b8a73ab8becfcaea84abc2a06038281efa4c8a (diff) |
sched: remove the !PREEMPT_BKL code
remove the !PREEMPT_BKL code.
this removes 160 lines of legacy code.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'lib')
-rw-r--r-- | lib/kernel_lock.c | 123 |
1 files changed, 0 insertions, 123 deletions
diff --git a/lib/kernel_lock.c b/lib/kernel_lock.c index f73e2f8c308f..812dbf00844b 100644 --- a/lib/kernel_lock.c +++ b/lib/kernel_lock.c | |||
@@ -9,7 +9,6 @@ | |||
9 | #include <linux/module.h> | 9 | #include <linux/module.h> |
10 | #include <linux/kallsyms.h> | 10 | #include <linux/kallsyms.h> |
11 | 11 | ||
12 | #ifdef CONFIG_PREEMPT_BKL | ||
13 | /* | 12 | /* |
14 | * The 'big kernel semaphore' | 13 | * The 'big kernel semaphore' |
15 | * | 14 | * |
@@ -86,128 +85,6 @@ void __lockfunc unlock_kernel(void) | |||
86 | up(&kernel_sem); | 85 | up(&kernel_sem); |
87 | } | 86 | } |
88 | 87 | ||
89 | #else | ||
90 | |||
91 | /* | ||
92 | * The 'big kernel lock' | ||
93 | * | ||
94 | * This spinlock is taken and released recursively by lock_kernel() | ||
95 | * and unlock_kernel(). It is transparently dropped and reacquired | ||
96 | * over schedule(). It is used to protect legacy code that hasn't | ||
97 | * been migrated to a proper locking design yet. | ||
98 | * | ||
99 | * Don't use in new code. | ||
100 | */ | ||
101 | static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kernel_flag); | ||
102 | |||
103 | |||
104 | /* | ||
105 | * Acquire/release the underlying lock from the scheduler. | ||
106 | * | ||
107 | * This is called with preemption disabled, and should | ||
108 | * return an error value if it cannot get the lock and | ||
109 | * TIF_NEED_RESCHED gets set. | ||
110 | * | ||
111 | * If it successfully gets the lock, it should increment | ||
112 | * the preemption count like any spinlock does. | ||
113 | * | ||
114 | * (This works on UP too - _raw_spin_trylock will never | ||
115 | * return false in that case) | ||
116 | */ | ||
117 | int __lockfunc __reacquire_kernel_lock(void) | ||
118 | { | ||
119 | while (!_raw_spin_trylock(&kernel_flag)) { | ||
120 | if (test_thread_flag(TIF_NEED_RESCHED)) | ||
121 | return -EAGAIN; | ||
122 | cpu_relax(); | ||
123 | } | ||
124 | preempt_disable(); | ||
125 | return 0; | ||
126 | } | ||
127 | |||
128 | void __lockfunc __release_kernel_lock(void) | ||
129 | { | ||
130 | _raw_spin_unlock(&kernel_flag); | ||
131 | preempt_enable_no_resched(); | ||
132 | } | ||
133 | |||
134 | /* | ||
135 | * These are the BKL spinlocks - we try to be polite about preemption. | ||
136 | * If SMP is not on (ie UP preemption), this all goes away because the | ||
137 | * _raw_spin_trylock() will always succeed. | ||
138 | */ | ||
139 | #ifdef CONFIG_PREEMPT | ||
140 | static inline void __lock_kernel(void) | ||
141 | { | ||
142 | preempt_disable(); | ||
143 | if (unlikely(!_raw_spin_trylock(&kernel_flag))) { | ||
144 | /* | ||
145 | * If preemption was disabled even before this | ||
146 | * was called, there's nothing we can be polite | ||
147 | * about - just spin. | ||
148 | */ | ||
149 | if (preempt_count() > 1) { | ||
150 | _raw_spin_lock(&kernel_flag); | ||
151 | return; | ||
152 | } | ||
153 | |||
154 | /* | ||
155 | * Otherwise, let's wait for the kernel lock | ||
156 | * with preemption enabled.. | ||
157 | */ | ||
158 | do { | ||
159 | preempt_enable(); | ||
160 | while (spin_is_locked(&kernel_flag)) | ||
161 | cpu_relax(); | ||
162 | preempt_disable(); | ||
163 | } while (!_raw_spin_trylock(&kernel_flag)); | ||
164 | } | ||
165 | } | ||
166 | |||
167 | #else | ||
168 | |||
169 | /* | ||
170 | * Non-preemption case - just get the spinlock | ||
171 | */ | ||
172 | static inline void __lock_kernel(void) | ||
173 | { | ||
174 | _raw_spin_lock(&kernel_flag); | ||
175 | } | ||
176 | #endif | ||
177 | |||
178 | static inline void __unlock_kernel(void) | ||
179 | { | ||
180 | /* | ||
181 | * the BKL is not covered by lockdep, so we open-code the | ||
182 | * unlocking sequence (and thus avoid the dep-chain ops): | ||
183 | */ | ||
184 | _raw_spin_unlock(&kernel_flag); | ||
185 | preempt_enable(); | ||
186 | } | ||
187 | |||
188 | /* | ||
189 | * Getting the big kernel lock. | ||
190 | * | ||
191 | * This cannot happen asynchronously, so we only need to | ||
192 | * worry about other CPU's. | ||
193 | */ | ||
194 | void __lockfunc lock_kernel(void) | ||
195 | { | ||
196 | int depth = current->lock_depth+1; | ||
197 | if (likely(!depth)) | ||
198 | __lock_kernel(); | ||
199 | current->lock_depth = depth; | ||
200 | } | ||
201 | |||
202 | void __lockfunc unlock_kernel(void) | ||
203 | { | ||
204 | BUG_ON(current->lock_depth < 0); | ||
205 | if (likely(--current->lock_depth < 0)) | ||
206 | __unlock_kernel(); | ||
207 | } | ||
208 | |||
209 | #endif | ||
210 | |||
211 | EXPORT_SYMBOL(lock_kernel); | 88 | EXPORT_SYMBOL(lock_kernel); |
212 | EXPORT_SYMBOL(unlock_kernel); | 89 | EXPORT_SYMBOL(unlock_kernel); |
213 | 90 | ||