aboutsummaryrefslogtreecommitdiffstats
path: root/lib/kernel_lock.c
diff options
context:
space:
mode:
authorArnd Bergmann <arnd@arndb.de>2011-01-25 16:52:22 -0500
committerArnd Bergmann <arnd@arndb.de>2011-03-05 04:56:00 -0500
commit4ba8216cd90560bc402f52076f64d8546e8aefcb (patch)
treef64c272085c833b36755b5552a726f21eed3d142 /lib/kernel_lock.c
parentae7eb8979ccfa5e9e888101b9c940f20bd0f4115 (diff)
BKL: That's all, folks
This removes the implementation of the big kernel lock, at last. A lot of people have worked on this in the past, I so the credit for this patch should be with everyone who participated in the hunt. The names on the Cc list are the people that were the most active in this, according to the recorded git history, in alphabetical order. Signed-off-by: Arnd Bergmann <arnd@arndb.de> Acked-by: Alan Cox <alan@linux.intel.com> Cc: Alessio Igor Bogani <abogani@texware.it> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Andrew Hendry <andrew.hendry@gmail.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Christoph Hellwig <hch@infradead.org> Cc: Eric W. Biederman <ebiederm@xmission.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Hans Verkuil <hverkuil@xs4all.nl> Acked-by: Ingo Molnar <mingo@elte.hu> Cc: Jan Blunck <jblunck@infradead.org> Cc: John Kacur <jkacur@redhat.com> Cc: Jonathan Corbet <corbet@lwn.net> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Matthew Wilcox <matthew@wil.cx> Cc: Oliver Neukum <oliver@neukum.org> Cc: Paul Menage <menage@google.com> Acked-by: Thomas Gleixner <tglx@linutronix.de> Cc: Trond Myklebust <Trond.Myklebust@netapp.com>
Diffstat (limited to 'lib/kernel_lock.c')
-rw-r--r--lib/kernel_lock.c136
1 files changed, 0 insertions, 136 deletions
diff --git a/lib/kernel_lock.c b/lib/kernel_lock.c
deleted file mode 100644
index d80e1226586..00000000000
--- a/lib/kernel_lock.c
+++ /dev/null
@@ -1,136 +0,0 @@
1/*
2 * lib/kernel_lock.c
3 *
4 * This is the traditional BKL - big kernel lock. Largely
5 * relegated to obsolescence, but used by various less
6 * important (or lazy) subsystems.
7 */
8#include <linux/module.h>
9#include <linux/kallsyms.h>
10#include <linux/semaphore.h>
11#include <linux/smp_lock.h>
12
13/*
14 * The 'big kernel lock'
15 *
16 * This spinlock is taken and released recursively by lock_kernel()
17 * and unlock_kernel(). It is transparently dropped and reacquired
18 * over schedule(). It is used to protect legacy code that hasn't
19 * been migrated to a proper locking design yet.
20 *
21 * Don't use in new code.
22 */
23static __cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(kernel_flag);
24
25
26/*
27 * Acquire/release the underlying lock from the scheduler.
28 *
29 * This is called with preemption disabled, and should
30 * return an error value if it cannot get the lock and
31 * TIF_NEED_RESCHED gets set.
32 *
33 * If it successfully gets the lock, it should increment
34 * the preemption count like any spinlock does.
35 *
36 * (This works on UP too - do_raw_spin_trylock will never
37 * return false in that case)
38 */
39int __lockfunc __reacquire_kernel_lock(void)
40{
41 while (!do_raw_spin_trylock(&kernel_flag)) {
42 if (need_resched())
43 return -EAGAIN;
44 cpu_relax();
45 }
46 preempt_disable();
47 return 0;
48}
49
50void __lockfunc __release_kernel_lock(void)
51{
52 do_raw_spin_unlock(&kernel_flag);
53 preempt_enable_no_resched();
54}
55
56/*
57 * These are the BKL spinlocks - we try to be polite about preemption.
58 * If SMP is not on (ie UP preemption), this all goes away because the
59 * do_raw_spin_trylock() will always succeed.
60 */
61#ifdef CONFIG_PREEMPT
62static inline void __lock_kernel(void)
63{
64 preempt_disable();
65 if (unlikely(!do_raw_spin_trylock(&kernel_flag))) {
66 /*
67 * If preemption was disabled even before this
68 * was called, there's nothing we can be polite
69 * about - just spin.
70 */
71 if (preempt_count() > 1) {
72 do_raw_spin_lock(&kernel_flag);
73 return;
74 }
75
76 /*
77 * Otherwise, let's wait for the kernel lock
78 * with preemption enabled..
79 */
80 do {
81 preempt_enable();
82 while (raw_spin_is_locked(&kernel_flag))
83 cpu_relax();
84 preempt_disable();
85 } while (!do_raw_spin_trylock(&kernel_flag));
86 }
87}
88
89#else
90
91/*
92 * Non-preemption case - just get the spinlock
93 */
94static inline void __lock_kernel(void)
95{
96 do_raw_spin_lock(&kernel_flag);
97}
98#endif
99
100static inline void __unlock_kernel(void)
101{
102 /*
103 * the BKL is not covered by lockdep, so we open-code the
104 * unlocking sequence (and thus avoid the dep-chain ops):
105 */
106 do_raw_spin_unlock(&kernel_flag);
107 preempt_enable();
108}
109
110/*
111 * Getting the big kernel lock.
112 *
113 * This cannot happen asynchronously, so we only need to
114 * worry about other CPU's.
115 */
116void __lockfunc _lock_kernel(const char *func, const char *file, int line)
117{
118 int depth = current->lock_depth + 1;
119
120 if (likely(!depth)) {
121 might_sleep();
122 __lock_kernel();
123 }
124 current->lock_depth = depth;
125}
126
127void __lockfunc _unlock_kernel(const char *func, const char *file, int line)
128{
129 BUG_ON(current->lock_depth < 0);
130 if (likely(--current->lock_depth < 0))
131 __unlock_kernel();
132}
133
134EXPORT_SYMBOL(_lock_kernel);
135EXPORT_SYMBOL(_unlock_kernel);
136