aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/s390/Kconfig28
-rw-r--r--arch/s390/include/asm/spinlock.h29
-rw-r--r--include/linux/sched.h10
-rw-r--r--include/linux/spinlock.h6
-rw-r--r--include/linux/spinlock_api_smp.h75
-rw-r--r--init/Kconfig1
-rw-r--r--kernel/Kconfig.locks202
-rw-r--r--kernel/mutex.c4
-rw-r--r--kernel/sched.c2
-rw-r--r--kernel/spinlock.c310
10 files changed, 422 insertions, 245 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 43c0acad7160..16c673096a22 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -95,6 +95,34 @@ config S390
95 select HAVE_ARCH_TRACEHOOK 95 select HAVE_ARCH_TRACEHOOK
96 select INIT_ALL_POSSIBLE 96 select INIT_ALL_POSSIBLE
97 select HAVE_PERF_EVENTS 97 select HAVE_PERF_EVENTS
98 select ARCH_INLINE_SPIN_TRYLOCK
99 select ARCH_INLINE_SPIN_TRYLOCK_BH
100 select ARCH_INLINE_SPIN_LOCK
101 select ARCH_INLINE_SPIN_LOCK_BH
102 select ARCH_INLINE_SPIN_LOCK_IRQ
103 select ARCH_INLINE_SPIN_LOCK_IRQSAVE
104 select ARCH_INLINE_SPIN_UNLOCK
105 select ARCH_INLINE_SPIN_UNLOCK_BH
106 select ARCH_INLINE_SPIN_UNLOCK_IRQ
107 select ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE
108 select ARCH_INLINE_READ_TRYLOCK
109 select ARCH_INLINE_READ_LOCK
110 select ARCH_INLINE_READ_LOCK_BH
111 select ARCH_INLINE_READ_LOCK_IRQ
112 select ARCH_INLINE_READ_LOCK_IRQSAVE
113 select ARCH_INLINE_READ_UNLOCK
114 select ARCH_INLINE_READ_UNLOCK_BH
115 select ARCH_INLINE_READ_UNLOCK_IRQ
116 select ARCH_INLINE_READ_UNLOCK_IRQRESTORE
117 select ARCH_INLINE_WRITE_TRYLOCK
118 select ARCH_INLINE_WRITE_LOCK
119 select ARCH_INLINE_WRITE_LOCK_BH
120 select ARCH_INLINE_WRITE_LOCK_IRQ
121 select ARCH_INLINE_WRITE_LOCK_IRQSAVE
122 select ARCH_INLINE_WRITE_UNLOCK
123 select ARCH_INLINE_WRITE_UNLOCK_BH
124 select ARCH_INLINE_WRITE_UNLOCK_IRQ
125 select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE
98 126
99config SCHED_OMIT_FRAME_POINTER 127config SCHED_OMIT_FRAME_POINTER
100 bool 128 bool
diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h
index 41ce6861174e..c9af0d19c7ab 100644
--- a/arch/s390/include/asm/spinlock.h
+++ b/arch/s390/include/asm/spinlock.h
@@ -191,33 +191,4 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw)
191#define _raw_read_relax(lock) cpu_relax() 191#define _raw_read_relax(lock) cpu_relax()
192#define _raw_write_relax(lock) cpu_relax() 192#define _raw_write_relax(lock) cpu_relax()
193 193
194#define __always_inline__spin_lock
195#define __always_inline__read_lock
196#define __always_inline__write_lock
197#define __always_inline__spin_lock_bh
198#define __always_inline__read_lock_bh
199#define __always_inline__write_lock_bh
200#define __always_inline__spin_lock_irq
201#define __always_inline__read_lock_irq
202#define __always_inline__write_lock_irq
203#define __always_inline__spin_lock_irqsave
204#define __always_inline__read_lock_irqsave
205#define __always_inline__write_lock_irqsave
206#define __always_inline__spin_trylock
207#define __always_inline__read_trylock
208#define __always_inline__write_trylock
209#define __always_inline__spin_trylock_bh
210#define __always_inline__spin_unlock
211#define __always_inline__read_unlock
212#define __always_inline__write_unlock
213#define __always_inline__spin_unlock_bh
214#define __always_inline__read_unlock_bh
215#define __always_inline__write_unlock_bh
216#define __always_inline__spin_unlock_irq
217#define __always_inline__read_unlock_irq
218#define __always_inline__write_unlock_irq
219#define __always_inline__spin_unlock_irqrestore
220#define __always_inline__read_unlock_irqrestore
221#define __always_inline__write_unlock_irqrestore
222
223#endif /* __ASM_SPINLOCK_H */ 194#endif /* __ASM_SPINLOCK_H */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 75e6e60bf583..49be8f7c05f6 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1421,17 +1421,17 @@ struct task_struct {
1421#endif 1421#endif
1422#ifdef CONFIG_TRACE_IRQFLAGS 1422#ifdef CONFIG_TRACE_IRQFLAGS
1423 unsigned int irq_events; 1423 unsigned int irq_events;
1424 int hardirqs_enabled;
1425 unsigned long hardirq_enable_ip; 1424 unsigned long hardirq_enable_ip;
1426 unsigned int hardirq_enable_event;
1427 unsigned long hardirq_disable_ip; 1425 unsigned long hardirq_disable_ip;
1426 unsigned int hardirq_enable_event;
1428 unsigned int hardirq_disable_event; 1427 unsigned int hardirq_disable_event;
1429 int softirqs_enabled; 1428 int hardirqs_enabled;
1429 int hardirq_context;
1430 unsigned long softirq_disable_ip; 1430 unsigned long softirq_disable_ip;
1431 unsigned int softirq_disable_event;
1432 unsigned long softirq_enable_ip; 1431 unsigned long softirq_enable_ip;
1432 unsigned int softirq_disable_event;
1433 unsigned int softirq_enable_event; 1433 unsigned int softirq_enable_event;
1434 int hardirq_context; 1434 int softirqs_enabled;
1435 int softirq_context; 1435 int softirq_context;
1436#endif 1436#endif
1437#ifdef CONFIG_LOCKDEP 1437#ifdef CONFIG_LOCKDEP
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index f0ca7a7a1757..71dccfeb0d88 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -79,8 +79,6 @@
79 */ 79 */
80#include <linux/spinlock_types.h> 80#include <linux/spinlock_types.h>
81 81
82extern int __lockfunc generic__raw_read_trylock(raw_rwlock_t *lock);
83
84/* 82/*
85 * Pull the __raw*() functions/declarations (UP-nondebug doesnt need them): 83 * Pull the __raw*() functions/declarations (UP-nondebug doesnt need them):
86 */ 84 */
@@ -102,7 +100,7 @@ do { \
102 100
103#else 101#else
104# define spin_lock_init(lock) \ 102# define spin_lock_init(lock) \
105 do { *(lock) = SPIN_LOCK_UNLOCKED; } while (0) 103 do { *(lock) = __SPIN_LOCK_UNLOCKED(lock); } while (0)
106#endif 104#endif
107 105
108#ifdef CONFIG_DEBUG_SPINLOCK 106#ifdef CONFIG_DEBUG_SPINLOCK
@@ -116,7 +114,7 @@ do { \
116} while (0) 114} while (0)
117#else 115#else
118# define rwlock_init(lock) \ 116# define rwlock_init(lock) \
119 do { *(lock) = RW_LOCK_UNLOCKED; } while (0) 117 do { *(lock) = __RW_LOCK_UNLOCKED(lock); } while (0)
120#endif 118#endif
121 119
122#define spin_is_locked(lock) __raw_spin_is_locked(&(lock)->raw_lock) 120#define spin_is_locked(lock) __raw_spin_is_locked(&(lock)->raw_lock)
diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h
index 7a7e18fc2415..8264a7f459bc 100644
--- a/include/linux/spinlock_api_smp.h
+++ b/include/linux/spinlock_api_smp.h
@@ -60,137 +60,118 @@ void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
60void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) 60void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
61 __releases(lock); 61 __releases(lock);
62 62
63/* 63#ifdef CONFIG_INLINE_SPIN_LOCK
64 * We inline the unlock functions in the nondebug case:
65 */
66#if !defined(CONFIG_DEBUG_SPINLOCK) && !defined(CONFIG_PREEMPT)
67#define __always_inline__spin_unlock
68#define __always_inline__read_unlock
69#define __always_inline__write_unlock
70#define __always_inline__spin_unlock_irq
71#define __always_inline__read_unlock_irq
72#define __always_inline__write_unlock_irq
73#endif
74
75#ifndef CONFIG_DEBUG_SPINLOCK
76#ifndef CONFIG_GENERIC_LOCKBREAK
77
78#ifdef __always_inline__spin_lock
79#define _spin_lock(lock) __spin_lock(lock) 64#define _spin_lock(lock) __spin_lock(lock)
80#endif 65#endif
81 66
82#ifdef __always_inline__read_lock 67#ifdef CONFIG_INLINE_READ_LOCK
83#define _read_lock(lock) __read_lock(lock) 68#define _read_lock(lock) __read_lock(lock)
84#endif 69#endif
85 70
86#ifdef __always_inline__write_lock 71#ifdef CONFIG_INLINE_WRITE_LOCK
87#define _write_lock(lock) __write_lock(lock) 72#define _write_lock(lock) __write_lock(lock)
88#endif 73#endif
89 74
90#ifdef __always_inline__spin_lock_bh 75#ifdef CONFIG_INLINE_SPIN_LOCK_BH
91#define _spin_lock_bh(lock) __spin_lock_bh(lock) 76#define _spin_lock_bh(lock) __spin_lock_bh(lock)
92#endif 77#endif
93 78
94#ifdef __always_inline__read_lock_bh 79#ifdef CONFIG_INLINE_READ_LOCK_BH
95#define _read_lock_bh(lock) __read_lock_bh(lock) 80#define _read_lock_bh(lock) __read_lock_bh(lock)
96#endif 81#endif
97 82
98#ifdef __always_inline__write_lock_bh 83#ifdef CONFIG_INLINE_WRITE_LOCK_BH
99#define _write_lock_bh(lock) __write_lock_bh(lock) 84#define _write_lock_bh(lock) __write_lock_bh(lock)
100#endif 85#endif
101 86
102#ifdef __always_inline__spin_lock_irq 87#ifdef CONFIG_INLINE_SPIN_LOCK_IRQ
103#define _spin_lock_irq(lock) __spin_lock_irq(lock) 88#define _spin_lock_irq(lock) __spin_lock_irq(lock)
104#endif 89#endif
105 90
106#ifdef __always_inline__read_lock_irq 91#ifdef CONFIG_INLINE_READ_LOCK_IRQ
107#define _read_lock_irq(lock) __read_lock_irq(lock) 92#define _read_lock_irq(lock) __read_lock_irq(lock)
108#endif 93#endif
109 94
110#ifdef __always_inline__write_lock_irq 95#ifdef CONFIG_INLINE_WRITE_LOCK_IRQ
111#define _write_lock_irq(lock) __write_lock_irq(lock) 96#define _write_lock_irq(lock) __write_lock_irq(lock)
112#endif 97#endif
113 98
114#ifdef __always_inline__spin_lock_irqsave 99#ifdef CONFIG_INLINE_SPIN_LOCK_IRQSAVE
115#define _spin_lock_irqsave(lock) __spin_lock_irqsave(lock) 100#define _spin_lock_irqsave(lock) __spin_lock_irqsave(lock)
116#endif 101#endif
117 102
118#ifdef __always_inline__read_lock_irqsave 103#ifdef CONFIG_INLINE_READ_LOCK_IRQSAVE
119#define _read_lock_irqsave(lock) __read_lock_irqsave(lock) 104#define _read_lock_irqsave(lock) __read_lock_irqsave(lock)
120#endif 105#endif
121 106
122#ifdef __always_inline__write_lock_irqsave 107#ifdef CONFIG_INLINE_WRITE_LOCK_IRQSAVE
123#define _write_lock_irqsave(lock) __write_lock_irqsave(lock) 108#define _write_lock_irqsave(lock) __write_lock_irqsave(lock)
124#endif 109#endif
125 110
126#endif /* !CONFIG_GENERIC_LOCKBREAK */ 111#ifdef CONFIG_INLINE_SPIN_TRYLOCK
127
128#ifdef __always_inline__spin_trylock
129#define _spin_trylock(lock) __spin_trylock(lock) 112#define _spin_trylock(lock) __spin_trylock(lock)
130#endif 113#endif
131 114
132#ifdef __always_inline__read_trylock 115#ifdef CONFIG_INLINE_READ_TRYLOCK
133#define _read_trylock(lock) __read_trylock(lock) 116#define _read_trylock(lock) __read_trylock(lock)
134#endif 117#endif
135 118
136#ifdef __always_inline__write_trylock 119#ifdef CONFIG_INLINE_WRITE_TRYLOCK
137#define _write_trylock(lock) __write_trylock(lock) 120#define _write_trylock(lock) __write_trylock(lock)
138#endif 121#endif
139 122
140#ifdef __always_inline__spin_trylock_bh 123#ifdef CONFIG_INLINE_SPIN_TRYLOCK_BH
141#define _spin_trylock_bh(lock) __spin_trylock_bh(lock) 124#define _spin_trylock_bh(lock) __spin_trylock_bh(lock)
142#endif 125#endif
143 126
144#ifdef __always_inline__spin_unlock 127#ifdef CONFIG_INLINE_SPIN_UNLOCK
145#define _spin_unlock(lock) __spin_unlock(lock) 128#define _spin_unlock(lock) __spin_unlock(lock)
146#endif 129#endif
147 130
148#ifdef __always_inline__read_unlock 131#ifdef CONFIG_INLINE_READ_UNLOCK
149#define _read_unlock(lock) __read_unlock(lock) 132#define _read_unlock(lock) __read_unlock(lock)
150#endif 133#endif
151 134
152#ifdef __always_inline__write_unlock 135#ifdef CONFIG_INLINE_WRITE_UNLOCK
153#define _write_unlock(lock) __write_unlock(lock) 136#define _write_unlock(lock) __write_unlock(lock)
154#endif 137#endif
155 138
156#ifdef __always_inline__spin_unlock_bh 139#ifdef CONFIG_INLINE_SPIN_UNLOCK_BH
157#define _spin_unlock_bh(lock) __spin_unlock_bh(lock) 140#define _spin_unlock_bh(lock) __spin_unlock_bh(lock)
158#endif 141#endif
159 142
160#ifdef __always_inline__read_unlock_bh 143#ifdef CONFIG_INLINE_READ_UNLOCK_BH
161#define _read_unlock_bh(lock) __read_unlock_bh(lock) 144#define _read_unlock_bh(lock) __read_unlock_bh(lock)
162#endif 145#endif
163 146
164#ifdef __always_inline__write_unlock_bh 147#ifdef CONFIG_INLINE_WRITE_UNLOCK_BH
165#define _write_unlock_bh(lock) __write_unlock_bh(lock) 148#define _write_unlock_bh(lock) __write_unlock_bh(lock)
166#endif 149#endif
167 150
168#ifdef __always_inline__spin_unlock_irq 151#ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQ
169#define _spin_unlock_irq(lock) __spin_unlock_irq(lock) 152#define _spin_unlock_irq(lock) __spin_unlock_irq(lock)
170#endif 153#endif
171 154
172#ifdef __always_inline__read_unlock_irq 155#ifdef CONFIG_INLINE_READ_UNLOCK_IRQ
173#define _read_unlock_irq(lock) __read_unlock_irq(lock) 156#define _read_unlock_irq(lock) __read_unlock_irq(lock)
174#endif 157#endif
175 158
176#ifdef __always_inline__write_unlock_irq 159#ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQ
177#define _write_unlock_irq(lock) __write_unlock_irq(lock) 160#define _write_unlock_irq(lock) __write_unlock_irq(lock)
178#endif 161#endif
179 162
180#ifdef __always_inline__spin_unlock_irqrestore 163#ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE
181#define _spin_unlock_irqrestore(lock, flags) __spin_unlock_irqrestore(lock, flags) 164#define _spin_unlock_irqrestore(lock, flags) __spin_unlock_irqrestore(lock, flags)
182#endif 165#endif
183 166
184#ifdef __always_inline__read_unlock_irqrestore 167#ifdef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE
185#define _read_unlock_irqrestore(lock, flags) __read_unlock_irqrestore(lock, flags) 168#define _read_unlock_irqrestore(lock, flags) __read_unlock_irqrestore(lock, flags)
186#endif 169#endif
187 170
188#ifdef __always_inline__write_unlock_irqrestore 171#ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE
189#define _write_unlock_irqrestore(lock, flags) __write_unlock_irqrestore(lock, flags) 172#define _write_unlock_irqrestore(lock, flags) __write_unlock_irqrestore(lock, flags)
190#endif 173#endif
191 174
192#endif /* CONFIG_DEBUG_SPINLOCK */
193
194static inline int __spin_trylock(spinlock_t *lock) 175static inline int __spin_trylock(spinlock_t *lock)
195{ 176{
196 preempt_disable(); 177 preempt_disable();
diff --git a/init/Kconfig b/init/Kconfig
index eb4b33725db1..2e9a1457132c 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1220,3 +1220,4 @@ source "block/Kconfig"
1220config PREEMPT_NOTIFIERS 1220config PREEMPT_NOTIFIERS
1221 bool 1221 bool
1222 1222
1223source "kernel/Kconfig.locks"
diff --git a/kernel/Kconfig.locks b/kernel/Kconfig.locks
new file mode 100644
index 000000000000..88c92fb44618
--- /dev/null
+++ b/kernel/Kconfig.locks
@@ -0,0 +1,202 @@
1#
2# The ARCH_INLINE foo is necessary because select ignores "depends on"
3#
4config ARCH_INLINE_SPIN_TRYLOCK
5 bool
6
7config ARCH_INLINE_SPIN_TRYLOCK_BH
8 bool
9
10config ARCH_INLINE_SPIN_LOCK
11 bool
12
13config ARCH_INLINE_SPIN_LOCK_BH
14 bool
15
16config ARCH_INLINE_SPIN_LOCK_IRQ
17 bool
18
19config ARCH_INLINE_SPIN_LOCK_IRQSAVE
20 bool
21
22config ARCH_INLINE_SPIN_UNLOCK
23 bool
24
25config ARCH_INLINE_SPIN_UNLOCK_BH
26 bool
27
28config ARCH_INLINE_SPIN_UNLOCK_IRQ
29 bool
30
31config ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE
32 bool
33
34
35config ARCH_INLINE_READ_TRYLOCK
36 bool
37
38config ARCH_INLINE_READ_LOCK
39 bool
40
41config ARCH_INLINE_READ_LOCK_BH
42 bool
43
44config ARCH_INLINE_READ_LOCK_IRQ
45 bool
46
47config ARCH_INLINE_READ_LOCK_IRQSAVE
48 bool
49
50config ARCH_INLINE_READ_UNLOCK
51 bool
52
53config ARCH_INLINE_READ_UNLOCK_BH
54 bool
55
56config ARCH_INLINE_READ_UNLOCK_IRQ
57 bool
58
59config ARCH_INLINE_READ_UNLOCK_IRQRESTORE
60 bool
61
62
63config ARCH_INLINE_WRITE_TRYLOCK
64 bool
65
66config ARCH_INLINE_WRITE_LOCK
67 bool
68
69config ARCH_INLINE_WRITE_LOCK_BH
70 bool
71
72config ARCH_INLINE_WRITE_LOCK_IRQ
73 bool
74
75config ARCH_INLINE_WRITE_LOCK_IRQSAVE
76 bool
77
78config ARCH_INLINE_WRITE_UNLOCK
79 bool
80
81config ARCH_INLINE_WRITE_UNLOCK_BH
82 bool
83
84config ARCH_INLINE_WRITE_UNLOCK_IRQ
85 bool
86
87config ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE
88 bool
89
90#
91# lock_* functions are inlined when:
92# - DEBUG_SPINLOCK=n and GENERIC_LOCKBREAK=n and ARCH_INLINE_*LOCK=y
93#
94# trylock_* functions are inlined when:
95# - DEBUG_SPINLOCK=n and ARCH_INLINE_*LOCK=y
96#
97# unlock and unlock_irq functions are inlined when:
98# - DEBUG_SPINLOCK=n and ARCH_INLINE_*LOCK=y
99# or
100# - DEBUG_SPINLOCK=n and PREEMPT=n
101#
102# unlock_bh and unlock_irqrestore functions are inlined when:
103# - DEBUG_SPINLOCK=n and ARCH_INLINE_*LOCK=y
104#
105
106config INLINE_SPIN_TRYLOCK
107 def_bool !DEBUG_SPINLOCK && ARCH_INLINE_SPIN_TRYLOCK
108
109config INLINE_SPIN_TRYLOCK_BH
110 def_bool !DEBUG_SPINLOCK && ARCH_INLINE_SPIN_TRYLOCK_BH
111
112config INLINE_SPIN_LOCK
113 def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && ARCH_INLINE_SPIN_LOCK
114
115config INLINE_SPIN_LOCK_BH
116 def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \
117 ARCH_INLINE_SPIN_LOCK_BH
118
119config INLINE_SPIN_LOCK_IRQ
120 def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \
121 ARCH_INLINE_SPIN_LOCK_IRQ
122
123config INLINE_SPIN_LOCK_IRQSAVE
124 def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \
125 ARCH_INLINE_SPIN_LOCK_IRQSAVE
126
127config INLINE_SPIN_UNLOCK
128 def_bool !DEBUG_SPINLOCK && (!PREEMPT || ARCH_INLINE_SPIN_UNLOCK)
129
130config INLINE_SPIN_UNLOCK_BH
131 def_bool !DEBUG_SPINLOCK && ARCH_INLINE_SPIN_UNLOCK_BH
132
133config INLINE_SPIN_UNLOCK_IRQ
134 def_bool !DEBUG_SPINLOCK && (!PREEMPT || ARCH_INLINE_SPIN_UNLOCK_BH)
135
136config INLINE_SPIN_UNLOCK_IRQRESTORE
137 def_bool !DEBUG_SPINLOCK && ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE
138
139
140config INLINE_READ_TRYLOCK
141 def_bool !DEBUG_SPINLOCK && ARCH_INLINE_READ_TRYLOCK
142
143config INLINE_READ_LOCK
144 def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && ARCH_INLINE_READ_LOCK
145
146config INLINE_READ_LOCK_BH
147 def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \
148 ARCH_INLINE_READ_LOCK_BH
149
150config INLINE_READ_LOCK_IRQ
151 def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \
152 ARCH_INLINE_READ_LOCK_IRQ
153
154config INLINE_READ_LOCK_IRQSAVE
155 def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \
156 ARCH_INLINE_READ_LOCK_IRQSAVE
157
158config INLINE_READ_UNLOCK
159 def_bool !DEBUG_SPINLOCK && (!PREEMPT || ARCH_INLINE_READ_UNLOCK)
160
161config INLINE_READ_UNLOCK_BH
162 def_bool !DEBUG_SPINLOCK && ARCH_INLINE_READ_UNLOCK_BH
163
164config INLINE_READ_UNLOCK_IRQ
165 def_bool !DEBUG_SPINLOCK && (!PREEMPT || ARCH_INLINE_READ_UNLOCK_BH)
166
167config INLINE_READ_UNLOCK_IRQRESTORE
168 def_bool !DEBUG_SPINLOCK && ARCH_INLINE_READ_UNLOCK_IRQRESTORE
169
170
171config INLINE_WRITE_TRYLOCK
172 def_bool !DEBUG_SPINLOCK && ARCH_INLINE_WRITE_TRYLOCK
173
174config INLINE_WRITE_LOCK
175 def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && ARCH_INLINE_WRITE_LOCK
176
177config INLINE_WRITE_LOCK_BH
178 def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \
179 ARCH_INLINE_WRITE_LOCK_BH
180
181config INLINE_WRITE_LOCK_IRQ
182 def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \
183 ARCH_INLINE_WRITE_LOCK_IRQ
184
185config INLINE_WRITE_LOCK_IRQSAVE
186 def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \
187 ARCH_INLINE_WRITE_LOCK_IRQSAVE
188
189config INLINE_WRITE_UNLOCK
190 def_bool !DEBUG_SPINLOCK && (!PREEMPT || ARCH_INLINE_WRITE_UNLOCK)
191
192config INLINE_WRITE_UNLOCK_BH
193 def_bool !DEBUG_SPINLOCK && ARCH_INLINE_WRITE_UNLOCK_BH
194
195config INLINE_WRITE_UNLOCK_IRQ
196 def_bool !DEBUG_SPINLOCK && (!PREEMPT || ARCH_INLINE_WRITE_UNLOCK_BH)
197
198config INLINE_WRITE_UNLOCK_IRQRESTORE
199 def_bool !DEBUG_SPINLOCK && ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE
200
201config MUTEX_SPIN_ON_OWNER
202 def_bool SMP && !DEBUG_MUTEXES && !HAVE_DEFAULT_NO_SPIN_MUTEXES
diff --git a/kernel/mutex.c b/kernel/mutex.c
index 947b3ad551f8..632f04c57d82 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -148,8 +148,8 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
148 148
149 preempt_disable(); 149 preempt_disable();
150 mutex_acquire(&lock->dep_map, subclass, 0, ip); 150 mutex_acquire(&lock->dep_map, subclass, 0, ip);
151#if defined(CONFIG_SMP) && !defined(CONFIG_DEBUG_MUTEXES) && \ 151
152 !defined(CONFIG_HAVE_DEFAULT_NO_SPIN_MUTEXES) 152#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
153 /* 153 /*
154 * Optimistic spinning. 154 * Optimistic spinning.
155 * 155 *
diff --git a/kernel/sched.c b/kernel/sched.c
index 3c11ae0a948d..ec0af1fcb195 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -5481,7 +5481,7 @@ need_resched_nonpreemptible:
5481} 5481}
5482EXPORT_SYMBOL(schedule); 5482EXPORT_SYMBOL(schedule);
5483 5483
5484#ifdef CONFIG_SMP 5484#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
5485/* 5485/*
5486 * Look out! "owner" is an entirely speculative pointer 5486 * Look out! "owner" is an entirely speculative pointer
5487 * access and not reliable. 5487 * access and not reliable.
diff --git a/kernel/spinlock.c b/kernel/spinlock.c
index 5ddab730cb2f..41e042219ff6 100644
--- a/kernel/spinlock.c
+++ b/kernel/spinlock.c
@@ -21,145 +21,28 @@
21#include <linux/debug_locks.h> 21#include <linux/debug_locks.h>
22#include <linux/module.h> 22#include <linux/module.h>
23 23
24#ifndef _spin_trylock
25int __lockfunc _spin_trylock(spinlock_t *lock)
26{
27 return __spin_trylock(lock);
28}
29EXPORT_SYMBOL(_spin_trylock);
30#endif
31
32#ifndef _read_trylock
33int __lockfunc _read_trylock(rwlock_t *lock)
34{
35 return __read_trylock(lock);
36}
37EXPORT_SYMBOL(_read_trylock);
38#endif
39
40#ifndef _write_trylock
41int __lockfunc _write_trylock(rwlock_t *lock)
42{
43 return __write_trylock(lock);
44}
45EXPORT_SYMBOL(_write_trylock);
46#endif
47
48/* 24/*
49 * If lockdep is enabled then we use the non-preemption spin-ops 25 * If lockdep is enabled then we use the non-preemption spin-ops
50 * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are 26 * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
51 * not re-enabled during lock-acquire (which the preempt-spin-ops do): 27 * not re-enabled during lock-acquire (which the preempt-spin-ops do):
52 */ 28 */
53#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC) 29#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
54
55#ifndef _read_lock
56void __lockfunc _read_lock(rwlock_t *lock)
57{
58 __read_lock(lock);
59}
60EXPORT_SYMBOL(_read_lock);
61#endif
62
63#ifndef _spin_lock_irqsave
64unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)
65{
66 return __spin_lock_irqsave(lock);
67}
68EXPORT_SYMBOL(_spin_lock_irqsave);
69#endif
70
71#ifndef _spin_lock_irq
72void __lockfunc _spin_lock_irq(spinlock_t *lock)
73{
74 __spin_lock_irq(lock);
75}
76EXPORT_SYMBOL(_spin_lock_irq);
77#endif
78
79#ifndef _spin_lock_bh
80void __lockfunc _spin_lock_bh(spinlock_t *lock)
81{
82 __spin_lock_bh(lock);
83}
84EXPORT_SYMBOL(_spin_lock_bh);
85#endif
86
87#ifndef _read_lock_irqsave
88unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock)
89{
90 return __read_lock_irqsave(lock);
91}
92EXPORT_SYMBOL(_read_lock_irqsave);
93#endif
94
95#ifndef _read_lock_irq
96void __lockfunc _read_lock_irq(rwlock_t *lock)
97{
98 __read_lock_irq(lock);
99}
100EXPORT_SYMBOL(_read_lock_irq);
101#endif
102
103#ifndef _read_lock_bh
104void __lockfunc _read_lock_bh(rwlock_t *lock)
105{
106 __read_lock_bh(lock);
107}
108EXPORT_SYMBOL(_read_lock_bh);
109#endif
110
111#ifndef _write_lock_irqsave
112unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)
113{
114 return __write_lock_irqsave(lock);
115}
116EXPORT_SYMBOL(_write_lock_irqsave);
117#endif
118
119#ifndef _write_lock_irq
120void __lockfunc _write_lock_irq(rwlock_t *lock)
121{
122 __write_lock_irq(lock);
123}
124EXPORT_SYMBOL(_write_lock_irq);
125#endif
126
127#ifndef _write_lock_bh
128void __lockfunc _write_lock_bh(rwlock_t *lock)
129{
130 __write_lock_bh(lock);
131}
132EXPORT_SYMBOL(_write_lock_bh);
133#endif
134
135#ifndef _spin_lock
136void __lockfunc _spin_lock(spinlock_t *lock)
137{
138 __spin_lock(lock);
139}
140EXPORT_SYMBOL(_spin_lock);
141#endif
142
143#ifndef _write_lock
144void __lockfunc _write_lock(rwlock_t *lock)
145{
146 __write_lock(lock);
147}
148EXPORT_SYMBOL(_write_lock);
149#endif
150
151#else /* CONFIG_PREEMPT: */
152
153/* 30/*
31 * The __lock_function inlines are taken from
32 * include/linux/spinlock_api_smp.h
33 */
34#else
35/*
36 * We build the __lock_function inlines here. They are too large for
37 * inlining all over the place, but here is only one user per function
38 * which embedds them into the calling _lock_function below.
39 *
154 * This could be a long-held lock. We both prepare to spin for a long 40 * This could be a long-held lock. We both prepare to spin for a long
155 * time (making _this_ CPU preemptable if possible), and we also signal 41 * time (making _this_ CPU preemptable if possible), and we also signal
156 * towards that other CPU that it should break the lock ASAP. 42 * towards that other CPU that it should break the lock ASAP.
157 *
158 * (We do this in a function because inlining it would be excessive.)
159 */ 43 */
160
161#define BUILD_LOCK_OPS(op, locktype) \ 44#define BUILD_LOCK_OPS(op, locktype) \
162void __lockfunc _##op##_lock(locktype##_t *lock) \ 45void __lockfunc __##op##_lock(locktype##_t *lock) \
163{ \ 46{ \
164 for (;;) { \ 47 for (;;) { \
165 preempt_disable(); \ 48 preempt_disable(); \
@@ -175,9 +58,7 @@ void __lockfunc _##op##_lock(locktype##_t *lock) \
175 (lock)->break_lock = 0; \ 58 (lock)->break_lock = 0; \
176} \ 59} \
177 \ 60 \
178EXPORT_SYMBOL(_##op##_lock); \ 61unsigned long __lockfunc __##op##_lock_irqsave(locktype##_t *lock) \
179 \
180unsigned long __lockfunc _##op##_lock_irqsave(locktype##_t *lock) \
181{ \ 62{ \
182 unsigned long flags; \ 63 unsigned long flags; \
183 \ 64 \
@@ -198,16 +79,12 @@ unsigned long __lockfunc _##op##_lock_irqsave(locktype##_t *lock) \
198 return flags; \ 79 return flags; \
199} \ 80} \
200 \ 81 \
201EXPORT_SYMBOL(_##op##_lock_irqsave); \ 82void __lockfunc __##op##_lock_irq(locktype##_t *lock) \
202 \
203void __lockfunc _##op##_lock_irq(locktype##_t *lock) \
204{ \ 83{ \
205 _##op##_lock_irqsave(lock); \ 84 _##op##_lock_irqsave(lock); \
206} \ 85} \
207 \ 86 \
208EXPORT_SYMBOL(_##op##_lock_irq); \ 87void __lockfunc __##op##_lock_bh(locktype##_t *lock) \
209 \
210void __lockfunc _##op##_lock_bh(locktype##_t *lock) \
211{ \ 88{ \
212 unsigned long flags; \ 89 unsigned long flags; \
213 \ 90 \
@@ -220,23 +97,21 @@ void __lockfunc _##op##_lock_bh(locktype##_t *lock) \
220 local_bh_disable(); \ 97 local_bh_disable(); \
221 local_irq_restore(flags); \ 98 local_irq_restore(flags); \
222} \ 99} \
223 \
224EXPORT_SYMBOL(_##op##_lock_bh)
225 100
226/* 101/*
227 * Build preemption-friendly versions of the following 102 * Build preemption-friendly versions of the following
228 * lock-spinning functions: 103 * lock-spinning functions:
229 * 104 *
230 * _[spin|read|write]_lock() 105 * __[spin|read|write]_lock()
231 * _[spin|read|write]_lock_irq() 106 * __[spin|read|write]_lock_irq()
232 * _[spin|read|write]_lock_irqsave() 107 * __[spin|read|write]_lock_irqsave()
233 * _[spin|read|write]_lock_bh() 108 * __[spin|read|write]_lock_bh()
234 */ 109 */
235BUILD_LOCK_OPS(spin, spinlock); 110BUILD_LOCK_OPS(spin, spinlock);
236BUILD_LOCK_OPS(read, rwlock); 111BUILD_LOCK_OPS(read, rwlock);
237BUILD_LOCK_OPS(write, rwlock); 112BUILD_LOCK_OPS(write, rwlock);
238 113
239#endif /* CONFIG_PREEMPT */ 114#endif
240 115
241#ifdef CONFIG_DEBUG_LOCK_ALLOC 116#ifdef CONFIG_DEBUG_LOCK_ALLOC
242 117
@@ -248,7 +123,8 @@ void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass)
248} 123}
249EXPORT_SYMBOL(_spin_lock_nested); 124EXPORT_SYMBOL(_spin_lock_nested);
250 125
251unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclass) 126unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock,
127 int subclass)
252{ 128{
253 unsigned long flags; 129 unsigned long flags;
254 130
@@ -272,7 +148,127 @@ EXPORT_SYMBOL(_spin_lock_nest_lock);
272 148
273#endif 149#endif
274 150
275#ifndef _spin_unlock 151#ifndef CONFIG_INLINE_SPIN_TRYLOCK
152int __lockfunc _spin_trylock(spinlock_t *lock)
153{
154 return __spin_trylock(lock);
155}
156EXPORT_SYMBOL(_spin_trylock);
157#endif
158
159#ifndef CONFIG_INLINE_READ_TRYLOCK
160int __lockfunc _read_trylock(rwlock_t *lock)
161{
162 return __read_trylock(lock);
163}
164EXPORT_SYMBOL(_read_trylock);
165#endif
166
167#ifndef CONFIG_INLINE_WRITE_TRYLOCK
168int __lockfunc _write_trylock(rwlock_t *lock)
169{
170 return __write_trylock(lock);
171}
172EXPORT_SYMBOL(_write_trylock);
173#endif
174
175#ifndef CONFIG_INLINE_READ_LOCK
176void __lockfunc _read_lock(rwlock_t *lock)
177{
178 __read_lock(lock);
179}
180EXPORT_SYMBOL(_read_lock);
181#endif
182
183#ifndef CONFIG_INLINE_SPIN_LOCK_IRQSAVE
184unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)
185{
186 return __spin_lock_irqsave(lock);
187}
188EXPORT_SYMBOL(_spin_lock_irqsave);
189#endif
190
191#ifndef CONFIG_INLINE_SPIN_LOCK_IRQ
192void __lockfunc _spin_lock_irq(spinlock_t *lock)
193{
194 __spin_lock_irq(lock);
195}
196EXPORT_SYMBOL(_spin_lock_irq);
197#endif
198
199#ifndef CONFIG_INLINE_SPIN_LOCK_BH
200void __lockfunc _spin_lock_bh(spinlock_t *lock)
201{
202 __spin_lock_bh(lock);
203}
204EXPORT_SYMBOL(_spin_lock_bh);
205#endif
206
207#ifndef CONFIG_INLINE_READ_LOCK_IRQSAVE
208unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock)
209{
210 return __read_lock_irqsave(lock);
211}
212EXPORT_SYMBOL(_read_lock_irqsave);
213#endif
214
215#ifndef CONFIG_INLINE_READ_LOCK_IRQ
216void __lockfunc _read_lock_irq(rwlock_t *lock)
217{
218 __read_lock_irq(lock);
219}
220EXPORT_SYMBOL(_read_lock_irq);
221#endif
222
223#ifndef CONFIG_INLINE_READ_LOCK_BH
224void __lockfunc _read_lock_bh(rwlock_t *lock)
225{
226 __read_lock_bh(lock);
227}
228EXPORT_SYMBOL(_read_lock_bh);
229#endif
230
231#ifndef CONFIG_INLINE_WRITE_LOCK_IRQSAVE
232unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)
233{
234 return __write_lock_irqsave(lock);
235}
236EXPORT_SYMBOL(_write_lock_irqsave);
237#endif
238
239#ifndef CONFIG_INLINE_WRITE_LOCK_IRQ
240void __lockfunc _write_lock_irq(rwlock_t *lock)
241{
242 __write_lock_irq(lock);
243}
244EXPORT_SYMBOL(_write_lock_irq);
245#endif
246
247#ifndef CONFIG_INLINE_WRITE_LOCK_BH
248void __lockfunc _write_lock_bh(rwlock_t *lock)
249{
250 __write_lock_bh(lock);
251}
252EXPORT_SYMBOL(_write_lock_bh);
253#endif
254
255#ifndef CONFIG_INLINE_SPIN_LOCK
256void __lockfunc _spin_lock(spinlock_t *lock)
257{
258 __spin_lock(lock);
259}
260EXPORT_SYMBOL(_spin_lock);
261#endif
262
263#ifndef CONFIG_INLINE_WRITE_LOCK
264void __lockfunc _write_lock(rwlock_t *lock)
265{
266 __write_lock(lock);
267}
268EXPORT_SYMBOL(_write_lock);
269#endif
270
271#ifndef CONFIG_INLINE_SPIN_UNLOCK
276void __lockfunc _spin_unlock(spinlock_t *lock) 272void __lockfunc _spin_unlock(spinlock_t *lock)
277{ 273{
278 __spin_unlock(lock); 274 __spin_unlock(lock);
@@ -280,7 +276,7 @@ void __lockfunc _spin_unlock(spinlock_t *lock)
280EXPORT_SYMBOL(_spin_unlock); 276EXPORT_SYMBOL(_spin_unlock);
281#endif 277#endif
282 278
283#ifndef _write_unlock 279#ifndef CONFIG_INLINE_WRITE_UNLOCK
284void __lockfunc _write_unlock(rwlock_t *lock) 280void __lockfunc _write_unlock(rwlock_t *lock)
285{ 281{
286 __write_unlock(lock); 282 __write_unlock(lock);
@@ -288,7 +284,7 @@ void __lockfunc _write_unlock(rwlock_t *lock)
288EXPORT_SYMBOL(_write_unlock); 284EXPORT_SYMBOL(_write_unlock);
289#endif 285#endif
290 286
291#ifndef _read_unlock 287#ifndef CONFIG_INLINE_READ_UNLOCK
292void __lockfunc _read_unlock(rwlock_t *lock) 288void __lockfunc _read_unlock(rwlock_t *lock)
293{ 289{
294 __read_unlock(lock); 290 __read_unlock(lock);
@@ -296,7 +292,7 @@ void __lockfunc _read_unlock(rwlock_t *lock)
296EXPORT_SYMBOL(_read_unlock); 292EXPORT_SYMBOL(_read_unlock);
297#endif 293#endif
298 294
299#ifndef _spin_unlock_irqrestore 295#ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE
300void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) 296void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
301{ 297{
302 __spin_unlock_irqrestore(lock, flags); 298 __spin_unlock_irqrestore(lock, flags);
@@ -304,7 +300,7 @@ void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
304EXPORT_SYMBOL(_spin_unlock_irqrestore); 300EXPORT_SYMBOL(_spin_unlock_irqrestore);
305#endif 301#endif
306 302
307#ifndef _spin_unlock_irq 303#ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQ
308void __lockfunc _spin_unlock_irq(spinlock_t *lock) 304void __lockfunc _spin_unlock_irq(spinlock_t *lock)
309{ 305{
310 __spin_unlock_irq(lock); 306 __spin_unlock_irq(lock);
@@ -312,7 +308,7 @@ void __lockfunc _spin_unlock_irq(spinlock_t *lock)
312EXPORT_SYMBOL(_spin_unlock_irq); 308EXPORT_SYMBOL(_spin_unlock_irq);
313#endif 309#endif
314 310
315#ifndef _spin_unlock_bh 311#ifndef CONFIG_INLINE_SPIN_UNLOCK_BH
316void __lockfunc _spin_unlock_bh(spinlock_t *lock) 312void __lockfunc _spin_unlock_bh(spinlock_t *lock)
317{ 313{
318 __spin_unlock_bh(lock); 314 __spin_unlock_bh(lock);
@@ -320,7 +316,7 @@ void __lockfunc _spin_unlock_bh(spinlock_t *lock)
320EXPORT_SYMBOL(_spin_unlock_bh); 316EXPORT_SYMBOL(_spin_unlock_bh);
321#endif 317#endif
322 318
323#ifndef _read_unlock_irqrestore 319#ifndef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE
324void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) 320void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
325{ 321{
326 __read_unlock_irqrestore(lock, flags); 322 __read_unlock_irqrestore(lock, flags);
@@ -328,7 +324,7 @@ void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
328EXPORT_SYMBOL(_read_unlock_irqrestore); 324EXPORT_SYMBOL(_read_unlock_irqrestore);
329#endif 325#endif
330 326
331#ifndef _read_unlock_irq 327#ifndef CONFIG_INLINE_READ_UNLOCK_IRQ
332void __lockfunc _read_unlock_irq(rwlock_t *lock) 328void __lockfunc _read_unlock_irq(rwlock_t *lock)
333{ 329{
334 __read_unlock_irq(lock); 330 __read_unlock_irq(lock);
@@ -336,7 +332,7 @@ void __lockfunc _read_unlock_irq(rwlock_t *lock)
336EXPORT_SYMBOL(_read_unlock_irq); 332EXPORT_SYMBOL(_read_unlock_irq);
337#endif 333#endif
338 334
339#ifndef _read_unlock_bh 335#ifndef CONFIG_INLINE_READ_UNLOCK_BH
340void __lockfunc _read_unlock_bh(rwlock_t *lock) 336void __lockfunc _read_unlock_bh(rwlock_t *lock)
341{ 337{
342 __read_unlock_bh(lock); 338 __read_unlock_bh(lock);
@@ -344,7 +340,7 @@ void __lockfunc _read_unlock_bh(rwlock_t *lock)
344EXPORT_SYMBOL(_read_unlock_bh); 340EXPORT_SYMBOL(_read_unlock_bh);
345#endif 341#endif
346 342
347#ifndef _write_unlock_irqrestore 343#ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE
348void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) 344void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
349{ 345{
350 __write_unlock_irqrestore(lock, flags); 346 __write_unlock_irqrestore(lock, flags);
@@ -352,7 +348,7 @@ void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
352EXPORT_SYMBOL(_write_unlock_irqrestore); 348EXPORT_SYMBOL(_write_unlock_irqrestore);
353#endif 349#endif
354 350
355#ifndef _write_unlock_irq 351#ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQ
356void __lockfunc _write_unlock_irq(rwlock_t *lock) 352void __lockfunc _write_unlock_irq(rwlock_t *lock)
357{ 353{
358 __write_unlock_irq(lock); 354 __write_unlock_irq(lock);
@@ -360,7 +356,7 @@ void __lockfunc _write_unlock_irq(rwlock_t *lock)
360EXPORT_SYMBOL(_write_unlock_irq); 356EXPORT_SYMBOL(_write_unlock_irq);
361#endif 357#endif
362 358
363#ifndef _write_unlock_bh 359#ifndef CONFIG_INLINE_WRITE_UNLOCK_BH
364void __lockfunc _write_unlock_bh(rwlock_t *lock) 360void __lockfunc _write_unlock_bh(rwlock_t *lock)
365{ 361{
366 __write_unlock_bh(lock); 362 __write_unlock_bh(lock);
@@ -368,7 +364,7 @@ void __lockfunc _write_unlock_bh(rwlock_t *lock)
368EXPORT_SYMBOL(_write_unlock_bh); 364EXPORT_SYMBOL(_write_unlock_bh);
369#endif 365#endif
370 366
371#ifndef _spin_trylock_bh 367#ifndef CONFIG_INLINE_SPIN_TRYLOCK_BH
372int __lockfunc _spin_trylock_bh(spinlock_t *lock) 368int __lockfunc _spin_trylock_bh(spinlock_t *lock)
373{ 369{
374 return __spin_trylock_bh(lock); 370 return __spin_trylock_bh(lock);