diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-03-16 20:21:00 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-03-16 20:21:00 -0400 |
commit | f74b9444192c60603020c61d7915b72893137edc (patch) | |
tree | 8b1d16d373234038c2b045c9ceb3c33b93059e8a /lib | |
parent | 7a6362800cb7d1d618a697a650c7aaed3eb39320 (diff) | |
parent | 4ba8216cd90560bc402f52076f64d8546e8aefcb (diff) |
Merge branch 'config' of git://git.kernel.org/pub/scm/linux/kernel/git/arnd/bkl
* 'config' of git://git.kernel.org/pub/scm/linux/kernel/git/arnd/bkl:
BKL: That's all, folks
fs/locks.c: Remove stale FIXME left over from BKL conversion
ipx: remove the BKL
appletalk: remove the BKL
x25: remove the BKL
ufs: remove the BKL
hpfs: remove the BKL
drivers: remove extraneous includes of smp_lock.h
tracing: don't trace the BKL
adfs: remove the big kernel lock
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig.debug | 9 | ||||
-rw-r--r-- | lib/Makefile | 1 | ||||
-rw-r--r-- | lib/kernel_lock.c | 143 |
3 files changed, 0 insertions, 153 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 2b97418c67e2..6f440d82b58d 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
@@ -470,15 +470,6 @@ config DEBUG_MUTEXES | |||
470 | This feature allows mutex semantics violations to be detected and | 470 | This feature allows mutex semantics violations to be detected and |
471 | reported. | 471 | reported. |
472 | 472 | ||
473 | config BKL | ||
474 | bool "Big Kernel Lock" if (SMP || PREEMPT) | ||
475 | default y | ||
476 | help | ||
477 | This is the traditional lock that is used in old code instead | ||
478 | of proper locking. All drivers that use the BKL should depend | ||
479 | on this symbol. | ||
480 | Say Y here unless you are working on removing the BKL. | ||
481 | |||
482 | config DEBUG_LOCK_ALLOC | 473 | config DEBUG_LOCK_ALLOC |
483 | bool "Lock debugging: detect incorrect freeing of live locks" | 474 | bool "Lock debugging: detect incorrect freeing of live locks" |
484 | depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT | 475 | depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT |
diff --git a/lib/Makefile b/lib/Makefile index b73ba01a818a..ef7ed71a6ffd 100644 --- a/lib/Makefile +++ b/lib/Makefile | |||
@@ -43,7 +43,6 @@ obj-$(CONFIG_GENERIC_FIND_LAST_BIT) += find_last_bit.o | |||
43 | CFLAGS_hweight.o = $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS)) | 43 | CFLAGS_hweight.o = $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS)) |
44 | obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o | 44 | obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o |
45 | 45 | ||
46 | obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o | ||
47 | obj-$(CONFIG_BTREE) += btree.o | 46 | obj-$(CONFIG_BTREE) += btree.o |
48 | obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o | 47 | obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o |
49 | obj-$(CONFIG_DEBUG_LIST) += list_debug.o | 48 | obj-$(CONFIG_DEBUG_LIST) += list_debug.o |
diff --git a/lib/kernel_lock.c b/lib/kernel_lock.c deleted file mode 100644 index b135d04aa48a..000000000000 --- a/lib/kernel_lock.c +++ /dev/null | |||
@@ -1,143 +0,0 @@ | |||
1 | /* | ||
2 | * lib/kernel_lock.c | ||
3 | * | ||
4 | * This is the traditional BKL - big kernel lock. Largely | ||
5 | * relegated to obsolescence, but used by various less | ||
6 | * important (or lazy) subsystems. | ||
7 | */ | ||
8 | #include <linux/module.h> | ||
9 | #include <linux/kallsyms.h> | ||
10 | #include <linux/semaphore.h> | ||
11 | #include <linux/smp_lock.h> | ||
12 | |||
13 | #define CREATE_TRACE_POINTS | ||
14 | #include <trace/events/bkl.h> | ||
15 | |||
16 | /* | ||
17 | * The 'big kernel lock' | ||
18 | * | ||
19 | * This spinlock is taken and released recursively by lock_kernel() | ||
20 | * and unlock_kernel(). It is transparently dropped and reacquired | ||
21 | * over schedule(). It is used to protect legacy code that hasn't | ||
22 | * been migrated to a proper locking design yet. | ||
23 | * | ||
24 | * Don't use in new code. | ||
25 | */ | ||
26 | static __cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(kernel_flag); | ||
27 | |||
28 | |||
29 | /* | ||
30 | * Acquire/release the underlying lock from the scheduler. | ||
31 | * | ||
32 | * This is called with preemption disabled, and should | ||
33 | * return an error value if it cannot get the lock and | ||
34 | * TIF_NEED_RESCHED gets set. | ||
35 | * | ||
36 | * If it successfully gets the lock, it should increment | ||
37 | * the preemption count like any spinlock does. | ||
38 | * | ||
39 | * (This works on UP too - do_raw_spin_trylock will never | ||
40 | * return false in that case) | ||
41 | */ | ||
42 | int __lockfunc __reacquire_kernel_lock(void) | ||
43 | { | ||
44 | while (!do_raw_spin_trylock(&kernel_flag)) { | ||
45 | if (need_resched()) | ||
46 | return -EAGAIN; | ||
47 | cpu_relax(); | ||
48 | } | ||
49 | preempt_disable(); | ||
50 | return 0; | ||
51 | } | ||
52 | |||
53 | void __lockfunc __release_kernel_lock(void) | ||
54 | { | ||
55 | do_raw_spin_unlock(&kernel_flag); | ||
56 | preempt_enable_no_resched(); | ||
57 | } | ||
58 | |||
59 | /* | ||
60 | * These are the BKL spinlocks - we try to be polite about preemption. | ||
61 | * If SMP is not on (ie UP preemption), this all goes away because the | ||
62 | * do_raw_spin_trylock() will always succeed. | ||
63 | */ | ||
64 | #ifdef CONFIG_PREEMPT | ||
65 | static inline void __lock_kernel(void) | ||
66 | { | ||
67 | preempt_disable(); | ||
68 | if (unlikely(!do_raw_spin_trylock(&kernel_flag))) { | ||
69 | /* | ||
70 | * If preemption was disabled even before this | ||
71 | * was called, there's nothing we can be polite | ||
72 | * about - just spin. | ||
73 | */ | ||
74 | if (preempt_count() > 1) { | ||
75 | do_raw_spin_lock(&kernel_flag); | ||
76 | return; | ||
77 | } | ||
78 | |||
79 | /* | ||
80 | * Otherwise, let's wait for the kernel lock | ||
81 | * with preemption enabled.. | ||
82 | */ | ||
83 | do { | ||
84 | preempt_enable(); | ||
85 | while (raw_spin_is_locked(&kernel_flag)) | ||
86 | cpu_relax(); | ||
87 | preempt_disable(); | ||
88 | } while (!do_raw_spin_trylock(&kernel_flag)); | ||
89 | } | ||
90 | } | ||
91 | |||
92 | #else | ||
93 | |||
94 | /* | ||
95 | * Non-preemption case - just get the spinlock | ||
96 | */ | ||
97 | static inline void __lock_kernel(void) | ||
98 | { | ||
99 | do_raw_spin_lock(&kernel_flag); | ||
100 | } | ||
101 | #endif | ||
102 | |||
103 | static inline void __unlock_kernel(void) | ||
104 | { | ||
105 | /* | ||
106 | * the BKL is not covered by lockdep, so we open-code the | ||
107 | * unlocking sequence (and thus avoid the dep-chain ops): | ||
108 | */ | ||
109 | do_raw_spin_unlock(&kernel_flag); | ||
110 | preempt_enable(); | ||
111 | } | ||
112 | |||
113 | /* | ||
114 | * Getting the big kernel lock. | ||
115 | * | ||
116 | * This cannot happen asynchronously, so we only need to | ||
117 | * worry about other CPU's. | ||
118 | */ | ||
119 | void __lockfunc _lock_kernel(const char *func, const char *file, int line) | ||
120 | { | ||
121 | int depth = current->lock_depth + 1; | ||
122 | |||
123 | trace_lock_kernel(func, file, line); | ||
124 | |||
125 | if (likely(!depth)) { | ||
126 | might_sleep(); | ||
127 | __lock_kernel(); | ||
128 | } | ||
129 | current->lock_depth = depth; | ||
130 | } | ||
131 | |||
132 | void __lockfunc _unlock_kernel(const char *func, const char *file, int line) | ||
133 | { | ||
134 | BUG_ON(current->lock_depth < 0); | ||
135 | if (likely(--current->lock_depth < 0)) | ||
136 | __unlock_kernel(); | ||
137 | |||
138 | trace_unlock_kernel(func, file, line); | ||
139 | } | ||
140 | |||
141 | EXPORT_SYMBOL(_lock_kernel); | ||
142 | EXPORT_SYMBOL(_unlock_kernel); | ||
143 | |||