aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/hardirq.h9
-rw-r--r--include/linux/smp_lock.h65
-rw-r--r--init/Kconfig5
-rw-r--r--kernel/sched.c9
-rw-r--r--lib/Kconfig.debug9
-rw-r--r--lib/Makefile1
-rw-r--r--lib/kernel_lock.c136
7 files changed, 2 insertions, 232 deletions
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
index 32f9fd6619b4..ba362171e8ae 100644
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
@@ -93,13 +93,6 @@
93 */ 93 */
94#define in_nmi() (preempt_count() & NMI_MASK) 94#define in_nmi() (preempt_count() & NMI_MASK)
95 95
96#if defined(CONFIG_PREEMPT) && defined(CONFIG_BKL)
97# include <linux/sched.h>
98# define PREEMPT_INATOMIC_BASE (current->lock_depth >= 0)
99#else
100# define PREEMPT_INATOMIC_BASE 0
101#endif
102
103#if defined(CONFIG_PREEMPT) 96#if defined(CONFIG_PREEMPT)
104# define PREEMPT_CHECK_OFFSET 1 97# define PREEMPT_CHECK_OFFSET 1
105#else 98#else
@@ -113,7 +106,7 @@
113 * used in the general case to determine whether sleeping is possible. 106 * used in the general case to determine whether sleeping is possible.
114 * Do not use in_atomic() in driver code. 107 * Do not use in_atomic() in driver code.
115 */ 108 */
116#define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_INATOMIC_BASE) 109#define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != 0)
117 110
118/* 111/*
119 * Check whether we were atomic before we did preempt_disable(): 112 * Check whether we were atomic before we did preempt_disable():
diff --git a/include/linux/smp_lock.h b/include/linux/smp_lock.h
deleted file mode 100644
index 3a1988202731..000000000000
--- a/include/linux/smp_lock.h
+++ /dev/null
@@ -1,65 +0,0 @@
1#ifndef __LINUX_SMPLOCK_H
2#define __LINUX_SMPLOCK_H
3
4#ifdef CONFIG_LOCK_KERNEL
5#include <linux/sched.h>
6
7extern int __lockfunc __reacquire_kernel_lock(void);
8extern void __lockfunc __release_kernel_lock(void);
9
10/*
11 * Release/re-acquire global kernel lock for the scheduler
12 */
13#define release_kernel_lock(tsk) do { \
14 if (unlikely((tsk)->lock_depth >= 0)) \
15 __release_kernel_lock(); \
16} while (0)
17
18static inline int reacquire_kernel_lock(struct task_struct *task)
19{
20 if (unlikely(task->lock_depth >= 0))
21 return __reacquire_kernel_lock();
22 return 0;
23}
24
25extern void __lockfunc
26_lock_kernel(const char *func, const char *file, int line)
27__acquires(kernel_lock);
28
29extern void __lockfunc
30_unlock_kernel(const char *func, const char *file, int line)
31__releases(kernel_lock);
32
33#define lock_kernel() do { \
34 _lock_kernel(__func__, __FILE__, __LINE__); \
35} while (0)
36
37#define unlock_kernel() do { \
38 _unlock_kernel(__func__, __FILE__, __LINE__); \
39} while (0)
40
41/*
42 * Various legacy drivers don't really need the BKL in a specific
43 * function, but they *do* need to know that the BKL became available.
44 * This function just avoids wrapping a bunch of lock/unlock pairs
45 * around code which doesn't really need it.
46 */
47static inline void cycle_kernel_lock(void)
48{
49 lock_kernel();
50 unlock_kernel();
51}
52
53#else
54
55#ifdef CONFIG_BKL /* provoke build bug if not set */
56#define lock_kernel()
57#define unlock_kernel()
58#define cycle_kernel_lock() do { } while(0)
59#endif /* CONFIG_BKL */
60
61#define release_kernel_lock(task) do { } while(0)
62#define reacquire_kernel_lock(task) 0
63
64#endif /* CONFIG_LOCK_KERNEL */
65#endif /* __LINUX_SMPLOCK_H */
diff --git a/init/Kconfig b/init/Kconfig
index be788c0957d4..a88d1c919a4d 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -69,11 +69,6 @@ config BROKEN_ON_SMP
69 depends on BROKEN || !SMP 69 depends on BROKEN || !SMP
70 default y 70 default y
71 71
72config LOCK_KERNEL
73 bool
74 depends on (SMP || PREEMPT) && BKL
75 default y
76
77config INIT_ENV_ARG_LIMIT 72config INIT_ENV_ARG_LIMIT
78 int 73 int
79 default 32 if !UML 74 default 32 if !UML
diff --git a/kernel/sched.c b/kernel/sched.c
index 18d38e4ec7ba..827c170c6017 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -32,7 +32,6 @@
32#include <linux/init.h> 32#include <linux/init.h>
33#include <linux/uaccess.h> 33#include <linux/uaccess.h>
34#include <linux/highmem.h> 34#include <linux/highmem.h>
35#include <linux/smp_lock.h>
36#include <asm/mmu_context.h> 35#include <asm/mmu_context.h>
37#include <linux/interrupt.h> 36#include <linux/interrupt.h>
38#include <linux/capability.h> 37#include <linux/capability.h>
@@ -3945,9 +3944,6 @@ need_resched:
3945 rcu_note_context_switch(cpu); 3944 rcu_note_context_switch(cpu);
3946 prev = rq->curr; 3945 prev = rq->curr;
3947 3946
3948 release_kernel_lock(prev);
3949need_resched_nonpreemptible:
3950
3951 schedule_debug(prev); 3947 schedule_debug(prev);
3952 3948
3953 if (sched_feat(HRTICK)) 3949 if (sched_feat(HRTICK))
@@ -4010,9 +4006,6 @@ need_resched_nonpreemptible:
4010 4006
4011 post_schedule(rq); 4007 post_schedule(rq);
4012 4008
4013 if (unlikely(reacquire_kernel_lock(prev)))
4014 goto need_resched_nonpreemptible;
4015
4016 preempt_enable_no_resched(); 4009 preempt_enable_no_resched();
4017 if (need_resched()) 4010 if (need_resched())
4018 goto need_resched; 4011 goto need_resched;
@@ -8074,7 +8067,7 @@ static inline int preempt_count_equals(int preempt_offset)
8074{ 8067{
8075 int nested = (preempt_count() & ~PREEMPT_ACTIVE) + rcu_preempt_depth(); 8068 int nested = (preempt_count() & ~PREEMPT_ACTIVE) + rcu_preempt_depth();
8076 8069
8077 return (nested == PREEMPT_INATOMIC_BASE + preempt_offset); 8070 return (nested == preempt_offset);
8078} 8071}
8079 8072
8080void __might_sleep(const char *file, int line, int preempt_offset) 8073void __might_sleep(const char *file, int line, int preempt_offset)
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 2b97418c67e2..6f440d82b58d 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -470,15 +470,6 @@ config DEBUG_MUTEXES
470 This feature allows mutex semantics violations to be detected and 470 This feature allows mutex semantics violations to be detected and
471 reported. 471 reported.
472 472
473config BKL
474 bool "Big Kernel Lock" if (SMP || PREEMPT)
475 default y
476 help
477 This is the traditional lock that is used in old code instead
478 of proper locking. All drivers that use the BKL should depend
479 on this symbol.
480 Say Y here unless you are working on removing the BKL.
481
482config DEBUG_LOCK_ALLOC 473config DEBUG_LOCK_ALLOC
483 bool "Lock debugging: detect incorrect freeing of live locks" 474 bool "Lock debugging: detect incorrect freeing of live locks"
484 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT 475 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
diff --git a/lib/Makefile b/lib/Makefile
index cbb774f7d41d..de6c609bb4e4 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -43,7 +43,6 @@ obj-$(CONFIG_GENERIC_FIND_LAST_BIT) += find_last_bit.o
43CFLAGS_hweight.o = $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS)) 43CFLAGS_hweight.o = $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS))
44obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o 44obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
45 45
46obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o
47obj-$(CONFIG_BTREE) += btree.o 46obj-$(CONFIG_BTREE) += btree.o
48obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o 47obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
49obj-$(CONFIG_DEBUG_LIST) += list_debug.o 48obj-$(CONFIG_DEBUG_LIST) += list_debug.o
diff --git a/lib/kernel_lock.c b/lib/kernel_lock.c
deleted file mode 100644
index d80e12265862..000000000000
--- a/lib/kernel_lock.c
+++ /dev/null
@@ -1,136 +0,0 @@
1/*
2 * lib/kernel_lock.c
3 *
4 * This is the traditional BKL - big kernel lock. Largely
5 * relegated to obsolescence, but used by various less
6 * important (or lazy) subsystems.
7 */
8#include <linux/module.h>
9#include <linux/kallsyms.h>
10#include <linux/semaphore.h>
11#include <linux/smp_lock.h>
12
13/*
14 * The 'big kernel lock'
15 *
16 * This spinlock is taken and released recursively by lock_kernel()
17 * and unlock_kernel(). It is transparently dropped and reacquired
18 * over schedule(). It is used to protect legacy code that hasn't
19 * been migrated to a proper locking design yet.
20 *
21 * Don't use in new code.
22 */
23static __cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(kernel_flag);
24
25
26/*
27 * Acquire/release the underlying lock from the scheduler.
28 *
29 * This is called with preemption disabled, and should
30 * return an error value if it cannot get the lock and
31 * TIF_NEED_RESCHED gets set.
32 *
33 * If it successfully gets the lock, it should increment
34 * the preemption count like any spinlock does.
35 *
36 * (This works on UP too - do_raw_spin_trylock will never
37 * return false in that case)
38 */
39int __lockfunc __reacquire_kernel_lock(void)
40{
41 while (!do_raw_spin_trylock(&kernel_flag)) {
42 if (need_resched())
43 return -EAGAIN;
44 cpu_relax();
45 }
46 preempt_disable();
47 return 0;
48}
49
50void __lockfunc __release_kernel_lock(void)
51{
52 do_raw_spin_unlock(&kernel_flag);
53 preempt_enable_no_resched();
54}
55
56/*
57 * These are the BKL spinlocks - we try to be polite about preemption.
58 * If SMP is not on (ie UP preemption), this all goes away because the
59 * do_raw_spin_trylock() will always succeed.
60 */
61#ifdef CONFIG_PREEMPT
62static inline void __lock_kernel(void)
63{
64 preempt_disable();
65 if (unlikely(!do_raw_spin_trylock(&kernel_flag))) {
66 /*
67 * If preemption was disabled even before this
68 * was called, there's nothing we can be polite
69 * about - just spin.
70 */
71 if (preempt_count() > 1) {
72 do_raw_spin_lock(&kernel_flag);
73 return;
74 }
75
76 /*
77 * Otherwise, let's wait for the kernel lock
78 * with preemption enabled..
79 */
80 do {
81 preempt_enable();
82 while (raw_spin_is_locked(&kernel_flag))
83 cpu_relax();
84 preempt_disable();
85 } while (!do_raw_spin_trylock(&kernel_flag));
86 }
87}
88
89#else
90
91/*
92 * Non-preemption case - just get the spinlock
93 */
94static inline void __lock_kernel(void)
95{
96 do_raw_spin_lock(&kernel_flag);
97}
98#endif
99
100static inline void __unlock_kernel(void)
101{
102 /*
103 * the BKL is not covered by lockdep, so we open-code the
104 * unlocking sequence (and thus avoid the dep-chain ops):
105 */
106 do_raw_spin_unlock(&kernel_flag);
107 preempt_enable();
108}
109
110/*
111 * Getting the big kernel lock.
112 *
113 * This cannot happen asynchronously, so we only need to
114 * worry about other CPU's.
115 */
116void __lockfunc _lock_kernel(const char *func, const char *file, int line)
117{
118 int depth = current->lock_depth + 1;
119
120 if (likely(!depth)) {
121 might_sleep();
122 __lock_kernel();
123 }
124 current->lock_depth = depth;
125}
126
127void __lockfunc _unlock_kernel(const char *func, const char *file, int line)
128{
129 BUG_ON(current->lock_depth < 0);
130 if (likely(--current->lock_depth < 0))
131 __unlock_kernel();
132}
133
134EXPORT_SYMBOL(_lock_kernel);
135EXPORT_SYMBOL(_unlock_kernel);
136