diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-12-05 12:49:59 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-12-05 12:49:59 -0500 |
commit | 3e72b810e30cdf4655279dd767eb798ac7a8fe5e (patch) | |
tree | a6c8daae5390b44750dfc4ca9bc984430dd16e74 /kernel | |
parent | 9b269d4034c7855ac34f0985cc55ee29bd80e80a (diff) | |
parent | c08f782985eed9959438368e84ce1d7f2ed03d95 (diff) |
Merge branch 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
mutex: Fix missing conditions to build mutex_spin_on_owner()
mutex: Better control mutex adaptive spinning config
locking, task_struct: Reduce size on TRACE_IRQFLAGS and 64bit
locking: Use __[SPIN|RW]_LOCK_UNLOCKED in [spin|rw]_lock_init()
locking: Remove unused prototype
locking: Reduce ifdefs in kernel/spinlock.c
locking: Make inlining decision Kconfig based
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/Kconfig.locks | 202 | ||||
-rw-r--r-- | kernel/mutex.c | 4 | ||||
-rw-r--r-- | kernel/sched.c | 2 | ||||
-rw-r--r-- | kernel/spinlock.c | 310 |
4 files changed, 358 insertions, 160 deletions
diff --git a/kernel/Kconfig.locks b/kernel/Kconfig.locks new file mode 100644 index 000000000000..88c92fb44618 --- /dev/null +++ b/kernel/Kconfig.locks | |||
@@ -0,0 +1,202 @@ | |||
1 | # | ||
2 | # The ARCH_INLINE foo is necessary because select ignores "depends on" | ||
3 | # | ||
4 | config ARCH_INLINE_SPIN_TRYLOCK | ||
5 | bool | ||
6 | |||
7 | config ARCH_INLINE_SPIN_TRYLOCK_BH | ||
8 | bool | ||
9 | |||
10 | config ARCH_INLINE_SPIN_LOCK | ||
11 | bool | ||
12 | |||
13 | config ARCH_INLINE_SPIN_LOCK_BH | ||
14 | bool | ||
15 | |||
16 | config ARCH_INLINE_SPIN_LOCK_IRQ | ||
17 | bool | ||
18 | |||
19 | config ARCH_INLINE_SPIN_LOCK_IRQSAVE | ||
20 | bool | ||
21 | |||
22 | config ARCH_INLINE_SPIN_UNLOCK | ||
23 | bool | ||
24 | |||
25 | config ARCH_INLINE_SPIN_UNLOCK_BH | ||
26 | bool | ||
27 | |||
28 | config ARCH_INLINE_SPIN_UNLOCK_IRQ | ||
29 | bool | ||
30 | |||
31 | config ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE | ||
32 | bool | ||
33 | |||
34 | |||
35 | config ARCH_INLINE_READ_TRYLOCK | ||
36 | bool | ||
37 | |||
38 | config ARCH_INLINE_READ_LOCK | ||
39 | bool | ||
40 | |||
41 | config ARCH_INLINE_READ_LOCK_BH | ||
42 | bool | ||
43 | |||
44 | config ARCH_INLINE_READ_LOCK_IRQ | ||
45 | bool | ||
46 | |||
47 | config ARCH_INLINE_READ_LOCK_IRQSAVE | ||
48 | bool | ||
49 | |||
50 | config ARCH_INLINE_READ_UNLOCK | ||
51 | bool | ||
52 | |||
53 | config ARCH_INLINE_READ_UNLOCK_BH | ||
54 | bool | ||
55 | |||
56 | config ARCH_INLINE_READ_UNLOCK_IRQ | ||
57 | bool | ||
58 | |||
59 | config ARCH_INLINE_READ_UNLOCK_IRQRESTORE | ||
60 | bool | ||
61 | |||
62 | |||
63 | config ARCH_INLINE_WRITE_TRYLOCK | ||
64 | bool | ||
65 | |||
66 | config ARCH_INLINE_WRITE_LOCK | ||
67 | bool | ||
68 | |||
69 | config ARCH_INLINE_WRITE_LOCK_BH | ||
70 | bool | ||
71 | |||
72 | config ARCH_INLINE_WRITE_LOCK_IRQ | ||
73 | bool | ||
74 | |||
75 | config ARCH_INLINE_WRITE_LOCK_IRQSAVE | ||
76 | bool | ||
77 | |||
78 | config ARCH_INLINE_WRITE_UNLOCK | ||
79 | bool | ||
80 | |||
81 | config ARCH_INLINE_WRITE_UNLOCK_BH | ||
82 | bool | ||
83 | |||
84 | config ARCH_INLINE_WRITE_UNLOCK_IRQ | ||
85 | bool | ||
86 | |||
87 | config ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE | ||
88 | bool | ||
89 | |||
90 | # | ||
91 | # lock_* functions are inlined when: | ||
92 | # - DEBUG_SPINLOCK=n and GENERIC_LOCKBREAK=n and ARCH_INLINE_*LOCK=y | ||
93 | # | ||
94 | # trylock_* functions are inlined when: | ||
95 | # - DEBUG_SPINLOCK=n and ARCH_INLINE_*LOCK=y | ||
96 | # | ||
97 | # unlock and unlock_irq functions are inlined when: | ||
98 | # - DEBUG_SPINLOCK=n and ARCH_INLINE_*LOCK=y | ||
99 | # or | ||
100 | # - DEBUG_SPINLOCK=n and PREEMPT=n | ||
101 | # | ||
102 | # unlock_bh and unlock_irqrestore functions are inlined when: | ||
103 | # - DEBUG_SPINLOCK=n and ARCH_INLINE_*LOCK=y | ||
104 | # | ||
105 | |||
106 | config INLINE_SPIN_TRYLOCK | ||
107 | def_bool !DEBUG_SPINLOCK && ARCH_INLINE_SPIN_TRYLOCK | ||
108 | |||
109 | config INLINE_SPIN_TRYLOCK_BH | ||
110 | def_bool !DEBUG_SPINLOCK && ARCH_INLINE_SPIN_TRYLOCK_BH | ||
111 | |||
112 | config INLINE_SPIN_LOCK | ||
113 | def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && ARCH_INLINE_SPIN_LOCK | ||
114 | |||
115 | config INLINE_SPIN_LOCK_BH | ||
116 | def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \ | ||
117 | ARCH_INLINE_SPIN_LOCK_BH | ||
118 | |||
119 | config INLINE_SPIN_LOCK_IRQ | ||
120 | def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \ | ||
121 | ARCH_INLINE_SPIN_LOCK_IRQ | ||
122 | |||
123 | config INLINE_SPIN_LOCK_IRQSAVE | ||
124 | def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \ | ||
125 | ARCH_INLINE_SPIN_LOCK_IRQSAVE | ||
126 | |||
127 | config INLINE_SPIN_UNLOCK | ||
128 | def_bool !DEBUG_SPINLOCK && (!PREEMPT || ARCH_INLINE_SPIN_UNLOCK) | ||
129 | |||
130 | config INLINE_SPIN_UNLOCK_BH | ||
131 | def_bool !DEBUG_SPINLOCK && ARCH_INLINE_SPIN_UNLOCK_BH | ||
132 | |||
133 | config INLINE_SPIN_UNLOCK_IRQ | ||
134 | def_bool !DEBUG_SPINLOCK && (!PREEMPT || ARCH_INLINE_SPIN_UNLOCK_BH) | ||
135 | |||
136 | config INLINE_SPIN_UNLOCK_IRQRESTORE | ||
137 | def_bool !DEBUG_SPINLOCK && ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE | ||
138 | |||
139 | |||
140 | config INLINE_READ_TRYLOCK | ||
141 | def_bool !DEBUG_SPINLOCK && ARCH_INLINE_READ_TRYLOCK | ||
142 | |||
143 | config INLINE_READ_LOCK | ||
144 | def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && ARCH_INLINE_READ_LOCK | ||
145 | |||
146 | config INLINE_READ_LOCK_BH | ||
147 | def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \ | ||
148 | ARCH_INLINE_READ_LOCK_BH | ||
149 | |||
150 | config INLINE_READ_LOCK_IRQ | ||
151 | def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \ | ||
152 | ARCH_INLINE_READ_LOCK_IRQ | ||
153 | |||
154 | config INLINE_READ_LOCK_IRQSAVE | ||
155 | def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \ | ||
156 | ARCH_INLINE_READ_LOCK_IRQSAVE | ||
157 | |||
158 | config INLINE_READ_UNLOCK | ||
159 | def_bool !DEBUG_SPINLOCK && (!PREEMPT || ARCH_INLINE_READ_UNLOCK) | ||
160 | |||
161 | config INLINE_READ_UNLOCK_BH | ||
162 | def_bool !DEBUG_SPINLOCK && ARCH_INLINE_READ_UNLOCK_BH | ||
163 | |||
164 | config INLINE_READ_UNLOCK_IRQ | ||
165 | def_bool !DEBUG_SPINLOCK && (!PREEMPT || ARCH_INLINE_READ_UNLOCK_BH) | ||
166 | |||
167 | config INLINE_READ_UNLOCK_IRQRESTORE | ||
168 | def_bool !DEBUG_SPINLOCK && ARCH_INLINE_READ_UNLOCK_IRQRESTORE | ||
169 | |||
170 | |||
171 | config INLINE_WRITE_TRYLOCK | ||
172 | def_bool !DEBUG_SPINLOCK && ARCH_INLINE_WRITE_TRYLOCK | ||
173 | |||
174 | config INLINE_WRITE_LOCK | ||
175 | def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && ARCH_INLINE_WRITE_LOCK | ||
176 | |||
177 | config INLINE_WRITE_LOCK_BH | ||
178 | def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \ | ||
179 | ARCH_INLINE_WRITE_LOCK_BH | ||
180 | |||
181 | config INLINE_WRITE_LOCK_IRQ | ||
182 | def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \ | ||
183 | ARCH_INLINE_WRITE_LOCK_IRQ | ||
184 | |||
185 | config INLINE_WRITE_LOCK_IRQSAVE | ||
186 | def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \ | ||
187 | ARCH_INLINE_WRITE_LOCK_IRQSAVE | ||
188 | |||
189 | config INLINE_WRITE_UNLOCK | ||
190 | def_bool !DEBUG_SPINLOCK && (!PREEMPT || ARCH_INLINE_WRITE_UNLOCK) | ||
191 | |||
192 | config INLINE_WRITE_UNLOCK_BH | ||
193 | def_bool !DEBUG_SPINLOCK && ARCH_INLINE_WRITE_UNLOCK_BH | ||
194 | |||
195 | config INLINE_WRITE_UNLOCK_IRQ | ||
196 | def_bool !DEBUG_SPINLOCK && (!PREEMPT || ARCH_INLINE_WRITE_UNLOCK_BH) | ||
197 | |||
198 | config INLINE_WRITE_UNLOCK_IRQRESTORE | ||
199 | def_bool !DEBUG_SPINLOCK && ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE | ||
200 | |||
201 | config MUTEX_SPIN_ON_OWNER | ||
202 | def_bool SMP && !DEBUG_MUTEXES && !HAVE_DEFAULT_NO_SPIN_MUTEXES | ||
diff --git a/kernel/mutex.c b/kernel/mutex.c index 947b3ad551f8..632f04c57d82 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c | |||
@@ -148,8 +148,8 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, | |||
148 | 148 | ||
149 | preempt_disable(); | 149 | preempt_disable(); |
150 | mutex_acquire(&lock->dep_map, subclass, 0, ip); | 150 | mutex_acquire(&lock->dep_map, subclass, 0, ip); |
151 | #if defined(CONFIG_SMP) && !defined(CONFIG_DEBUG_MUTEXES) && \ | 151 | |
152 | !defined(CONFIG_HAVE_DEFAULT_NO_SPIN_MUTEXES) | 152 | #ifdef CONFIG_MUTEX_SPIN_ON_OWNER |
153 | /* | 153 | /* |
154 | * Optimistic spinning. | 154 | * Optimistic spinning. |
155 | * | 155 | * |
diff --git a/kernel/sched.c b/kernel/sched.c index 3c11ae0a948d..ec0af1fcb195 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -5481,7 +5481,7 @@ need_resched_nonpreemptible: | |||
5481 | } | 5481 | } |
5482 | EXPORT_SYMBOL(schedule); | 5482 | EXPORT_SYMBOL(schedule); |
5483 | 5483 | ||
5484 | #ifdef CONFIG_SMP | 5484 | #ifdef CONFIG_MUTEX_SPIN_ON_OWNER |
5485 | /* | 5485 | /* |
5486 | * Look out! "owner" is an entirely speculative pointer | 5486 | * Look out! "owner" is an entirely speculative pointer |
5487 | * access and not reliable. | 5487 | * access and not reliable. |
diff --git a/kernel/spinlock.c b/kernel/spinlock.c index 5ddab730cb2f..41e042219ff6 100644 --- a/kernel/spinlock.c +++ b/kernel/spinlock.c | |||
@@ -21,145 +21,28 @@ | |||
21 | #include <linux/debug_locks.h> | 21 | #include <linux/debug_locks.h> |
22 | #include <linux/module.h> | 22 | #include <linux/module.h> |
23 | 23 | ||
24 | #ifndef _spin_trylock | ||
25 | int __lockfunc _spin_trylock(spinlock_t *lock) | ||
26 | { | ||
27 | return __spin_trylock(lock); | ||
28 | } | ||
29 | EXPORT_SYMBOL(_spin_trylock); | ||
30 | #endif | ||
31 | |||
32 | #ifndef _read_trylock | ||
33 | int __lockfunc _read_trylock(rwlock_t *lock) | ||
34 | { | ||
35 | return __read_trylock(lock); | ||
36 | } | ||
37 | EXPORT_SYMBOL(_read_trylock); | ||
38 | #endif | ||
39 | |||
40 | #ifndef _write_trylock | ||
41 | int __lockfunc _write_trylock(rwlock_t *lock) | ||
42 | { | ||
43 | return __write_trylock(lock); | ||
44 | } | ||
45 | EXPORT_SYMBOL(_write_trylock); | ||
46 | #endif | ||
47 | |||
48 | /* | 24 | /* |
49 | * If lockdep is enabled then we use the non-preemption spin-ops | 25 | * If lockdep is enabled then we use the non-preemption spin-ops |
50 | * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are | 26 | * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are |
51 | * not re-enabled during lock-acquire (which the preempt-spin-ops do): | 27 | * not re-enabled during lock-acquire (which the preempt-spin-ops do): |
52 | */ | 28 | */ |
53 | #if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC) | 29 | #if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC) |
54 | |||
55 | #ifndef _read_lock | ||
56 | void __lockfunc _read_lock(rwlock_t *lock) | ||
57 | { | ||
58 | __read_lock(lock); | ||
59 | } | ||
60 | EXPORT_SYMBOL(_read_lock); | ||
61 | #endif | ||
62 | |||
63 | #ifndef _spin_lock_irqsave | ||
64 | unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock) | ||
65 | { | ||
66 | return __spin_lock_irqsave(lock); | ||
67 | } | ||
68 | EXPORT_SYMBOL(_spin_lock_irqsave); | ||
69 | #endif | ||
70 | |||
71 | #ifndef _spin_lock_irq | ||
72 | void __lockfunc _spin_lock_irq(spinlock_t *lock) | ||
73 | { | ||
74 | __spin_lock_irq(lock); | ||
75 | } | ||
76 | EXPORT_SYMBOL(_spin_lock_irq); | ||
77 | #endif | ||
78 | |||
79 | #ifndef _spin_lock_bh | ||
80 | void __lockfunc _spin_lock_bh(spinlock_t *lock) | ||
81 | { | ||
82 | __spin_lock_bh(lock); | ||
83 | } | ||
84 | EXPORT_SYMBOL(_spin_lock_bh); | ||
85 | #endif | ||
86 | |||
87 | #ifndef _read_lock_irqsave | ||
88 | unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock) | ||
89 | { | ||
90 | return __read_lock_irqsave(lock); | ||
91 | } | ||
92 | EXPORT_SYMBOL(_read_lock_irqsave); | ||
93 | #endif | ||
94 | |||
95 | #ifndef _read_lock_irq | ||
96 | void __lockfunc _read_lock_irq(rwlock_t *lock) | ||
97 | { | ||
98 | __read_lock_irq(lock); | ||
99 | } | ||
100 | EXPORT_SYMBOL(_read_lock_irq); | ||
101 | #endif | ||
102 | |||
103 | #ifndef _read_lock_bh | ||
104 | void __lockfunc _read_lock_bh(rwlock_t *lock) | ||
105 | { | ||
106 | __read_lock_bh(lock); | ||
107 | } | ||
108 | EXPORT_SYMBOL(_read_lock_bh); | ||
109 | #endif | ||
110 | |||
111 | #ifndef _write_lock_irqsave | ||
112 | unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock) | ||
113 | { | ||
114 | return __write_lock_irqsave(lock); | ||
115 | } | ||
116 | EXPORT_SYMBOL(_write_lock_irqsave); | ||
117 | #endif | ||
118 | |||
119 | #ifndef _write_lock_irq | ||
120 | void __lockfunc _write_lock_irq(rwlock_t *lock) | ||
121 | { | ||
122 | __write_lock_irq(lock); | ||
123 | } | ||
124 | EXPORT_SYMBOL(_write_lock_irq); | ||
125 | #endif | ||
126 | |||
127 | #ifndef _write_lock_bh | ||
128 | void __lockfunc _write_lock_bh(rwlock_t *lock) | ||
129 | { | ||
130 | __write_lock_bh(lock); | ||
131 | } | ||
132 | EXPORT_SYMBOL(_write_lock_bh); | ||
133 | #endif | ||
134 | |||
135 | #ifndef _spin_lock | ||
136 | void __lockfunc _spin_lock(spinlock_t *lock) | ||
137 | { | ||
138 | __spin_lock(lock); | ||
139 | } | ||
140 | EXPORT_SYMBOL(_spin_lock); | ||
141 | #endif | ||
142 | |||
143 | #ifndef _write_lock | ||
144 | void __lockfunc _write_lock(rwlock_t *lock) | ||
145 | { | ||
146 | __write_lock(lock); | ||
147 | } | ||
148 | EXPORT_SYMBOL(_write_lock); | ||
149 | #endif | ||
150 | |||
151 | #else /* CONFIG_PREEMPT: */ | ||
152 | |||
153 | /* | 30 | /* |
31 | * The __lock_function inlines are taken from | ||
32 | * include/linux/spinlock_api_smp.h | ||
33 | */ | ||
34 | #else | ||
35 | /* | ||
36 | * We build the __lock_function inlines here. They are too large for | ||
37 | * inlining all over the place, but here is only one user per function | ||
38 | * which embedds them into the calling _lock_function below. | ||
39 | * | ||
154 | * This could be a long-held lock. We both prepare to spin for a long | 40 | * This could be a long-held lock. We both prepare to spin for a long |
155 | * time (making _this_ CPU preemptable if possible), and we also signal | 41 | * time (making _this_ CPU preemptable if possible), and we also signal |
156 | * towards that other CPU that it should break the lock ASAP. | 42 | * towards that other CPU that it should break the lock ASAP. |
157 | * | ||
158 | * (We do this in a function because inlining it would be excessive.) | ||
159 | */ | 43 | */ |
160 | |||
161 | #define BUILD_LOCK_OPS(op, locktype) \ | 44 | #define BUILD_LOCK_OPS(op, locktype) \ |
162 | void __lockfunc _##op##_lock(locktype##_t *lock) \ | 45 | void __lockfunc __##op##_lock(locktype##_t *lock) \ |
163 | { \ | 46 | { \ |
164 | for (;;) { \ | 47 | for (;;) { \ |
165 | preempt_disable(); \ | 48 | preempt_disable(); \ |
@@ -175,9 +58,7 @@ void __lockfunc _##op##_lock(locktype##_t *lock) \ | |||
175 | (lock)->break_lock = 0; \ | 58 | (lock)->break_lock = 0; \ |
176 | } \ | 59 | } \ |
177 | \ | 60 | \ |
178 | EXPORT_SYMBOL(_##op##_lock); \ | 61 | unsigned long __lockfunc __##op##_lock_irqsave(locktype##_t *lock) \ |
179 | \ | ||
180 | unsigned long __lockfunc _##op##_lock_irqsave(locktype##_t *lock) \ | ||
181 | { \ | 62 | { \ |
182 | unsigned long flags; \ | 63 | unsigned long flags; \ |
183 | \ | 64 | \ |
@@ -198,16 +79,12 @@ unsigned long __lockfunc _##op##_lock_irqsave(locktype##_t *lock) \ | |||
198 | return flags; \ | 79 | return flags; \ |
199 | } \ | 80 | } \ |
200 | \ | 81 | \ |
201 | EXPORT_SYMBOL(_##op##_lock_irqsave); \ | 82 | void __lockfunc __##op##_lock_irq(locktype##_t *lock) \ |
202 | \ | ||
203 | void __lockfunc _##op##_lock_irq(locktype##_t *lock) \ | ||
204 | { \ | 83 | { \ |
205 | _##op##_lock_irqsave(lock); \ | 84 | _##op##_lock_irqsave(lock); \ |
206 | } \ | 85 | } \ |
207 | \ | 86 | \ |
208 | EXPORT_SYMBOL(_##op##_lock_irq); \ | 87 | void __lockfunc __##op##_lock_bh(locktype##_t *lock) \ |
209 | \ | ||
210 | void __lockfunc _##op##_lock_bh(locktype##_t *lock) \ | ||
211 | { \ | 88 | { \ |
212 | unsigned long flags; \ | 89 | unsigned long flags; \ |
213 | \ | 90 | \ |
@@ -220,23 +97,21 @@ void __lockfunc _##op##_lock_bh(locktype##_t *lock) \ | |||
220 | local_bh_disable(); \ | 97 | local_bh_disable(); \ |
221 | local_irq_restore(flags); \ | 98 | local_irq_restore(flags); \ |
222 | } \ | 99 | } \ |
223 | \ | ||
224 | EXPORT_SYMBOL(_##op##_lock_bh) | ||
225 | 100 | ||
226 | /* | 101 | /* |
227 | * Build preemption-friendly versions of the following | 102 | * Build preemption-friendly versions of the following |
228 | * lock-spinning functions: | 103 | * lock-spinning functions: |
229 | * | 104 | * |
230 | * _[spin|read|write]_lock() | 105 | * __[spin|read|write]_lock() |
231 | * _[spin|read|write]_lock_irq() | 106 | * __[spin|read|write]_lock_irq() |
232 | * _[spin|read|write]_lock_irqsave() | 107 | * __[spin|read|write]_lock_irqsave() |
233 | * _[spin|read|write]_lock_bh() | 108 | * __[spin|read|write]_lock_bh() |
234 | */ | 109 | */ |
235 | BUILD_LOCK_OPS(spin, spinlock); | 110 | BUILD_LOCK_OPS(spin, spinlock); |
236 | BUILD_LOCK_OPS(read, rwlock); | 111 | BUILD_LOCK_OPS(read, rwlock); |
237 | BUILD_LOCK_OPS(write, rwlock); | 112 | BUILD_LOCK_OPS(write, rwlock); |
238 | 113 | ||
239 | #endif /* CONFIG_PREEMPT */ | 114 | #endif |
240 | 115 | ||
241 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 116 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
242 | 117 | ||
@@ -248,7 +123,8 @@ void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass) | |||
248 | } | 123 | } |
249 | EXPORT_SYMBOL(_spin_lock_nested); | 124 | EXPORT_SYMBOL(_spin_lock_nested); |
250 | 125 | ||
251 | unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclass) | 126 | unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, |
127 | int subclass) | ||
252 | { | 128 | { |
253 | unsigned long flags; | 129 | unsigned long flags; |
254 | 130 | ||
@@ -272,7 +148,127 @@ EXPORT_SYMBOL(_spin_lock_nest_lock); | |||
272 | 148 | ||
273 | #endif | 149 | #endif |
274 | 150 | ||
275 | #ifndef _spin_unlock | 151 | #ifndef CONFIG_INLINE_SPIN_TRYLOCK |
152 | int __lockfunc _spin_trylock(spinlock_t *lock) | ||
153 | { | ||
154 | return __spin_trylock(lock); | ||
155 | } | ||
156 | EXPORT_SYMBOL(_spin_trylock); | ||
157 | #endif | ||
158 | |||
159 | #ifndef CONFIG_INLINE_READ_TRYLOCK | ||
160 | int __lockfunc _read_trylock(rwlock_t *lock) | ||
161 | { | ||
162 | return __read_trylock(lock); | ||
163 | } | ||
164 | EXPORT_SYMBOL(_read_trylock); | ||
165 | #endif | ||
166 | |||
167 | #ifndef CONFIG_INLINE_WRITE_TRYLOCK | ||
168 | int __lockfunc _write_trylock(rwlock_t *lock) | ||
169 | { | ||
170 | return __write_trylock(lock); | ||
171 | } | ||
172 | EXPORT_SYMBOL(_write_trylock); | ||
173 | #endif | ||
174 | |||
175 | #ifndef CONFIG_INLINE_READ_LOCK | ||
176 | void __lockfunc _read_lock(rwlock_t *lock) | ||
177 | { | ||
178 | __read_lock(lock); | ||
179 | } | ||
180 | EXPORT_SYMBOL(_read_lock); | ||
181 | #endif | ||
182 | |||
183 | #ifndef CONFIG_INLINE_SPIN_LOCK_IRQSAVE | ||
184 | unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock) | ||
185 | { | ||
186 | return __spin_lock_irqsave(lock); | ||
187 | } | ||
188 | EXPORT_SYMBOL(_spin_lock_irqsave); | ||
189 | #endif | ||
190 | |||
191 | #ifndef CONFIG_INLINE_SPIN_LOCK_IRQ | ||
192 | void __lockfunc _spin_lock_irq(spinlock_t *lock) | ||
193 | { | ||
194 | __spin_lock_irq(lock); | ||
195 | } | ||
196 | EXPORT_SYMBOL(_spin_lock_irq); | ||
197 | #endif | ||
198 | |||
199 | #ifndef CONFIG_INLINE_SPIN_LOCK_BH | ||
200 | void __lockfunc _spin_lock_bh(spinlock_t *lock) | ||
201 | { | ||
202 | __spin_lock_bh(lock); | ||
203 | } | ||
204 | EXPORT_SYMBOL(_spin_lock_bh); | ||
205 | #endif | ||
206 | |||
207 | #ifndef CONFIG_INLINE_READ_LOCK_IRQSAVE | ||
208 | unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock) | ||
209 | { | ||
210 | return __read_lock_irqsave(lock); | ||
211 | } | ||
212 | EXPORT_SYMBOL(_read_lock_irqsave); | ||
213 | #endif | ||
214 | |||
215 | #ifndef CONFIG_INLINE_READ_LOCK_IRQ | ||
216 | void __lockfunc _read_lock_irq(rwlock_t *lock) | ||
217 | { | ||
218 | __read_lock_irq(lock); | ||
219 | } | ||
220 | EXPORT_SYMBOL(_read_lock_irq); | ||
221 | #endif | ||
222 | |||
223 | #ifndef CONFIG_INLINE_READ_LOCK_BH | ||
224 | void __lockfunc _read_lock_bh(rwlock_t *lock) | ||
225 | { | ||
226 | __read_lock_bh(lock); | ||
227 | } | ||
228 | EXPORT_SYMBOL(_read_lock_bh); | ||
229 | #endif | ||
230 | |||
231 | #ifndef CONFIG_INLINE_WRITE_LOCK_IRQSAVE | ||
232 | unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock) | ||
233 | { | ||
234 | return __write_lock_irqsave(lock); | ||
235 | } | ||
236 | EXPORT_SYMBOL(_write_lock_irqsave); | ||
237 | #endif | ||
238 | |||
239 | #ifndef CONFIG_INLINE_WRITE_LOCK_IRQ | ||
240 | void __lockfunc _write_lock_irq(rwlock_t *lock) | ||
241 | { | ||
242 | __write_lock_irq(lock); | ||
243 | } | ||
244 | EXPORT_SYMBOL(_write_lock_irq); | ||
245 | #endif | ||
246 | |||
247 | #ifndef CONFIG_INLINE_WRITE_LOCK_BH | ||
248 | void __lockfunc _write_lock_bh(rwlock_t *lock) | ||
249 | { | ||
250 | __write_lock_bh(lock); | ||
251 | } | ||
252 | EXPORT_SYMBOL(_write_lock_bh); | ||
253 | #endif | ||
254 | |||
255 | #ifndef CONFIG_INLINE_SPIN_LOCK | ||
256 | void __lockfunc _spin_lock(spinlock_t *lock) | ||
257 | { | ||
258 | __spin_lock(lock); | ||
259 | } | ||
260 | EXPORT_SYMBOL(_spin_lock); | ||
261 | #endif | ||
262 | |||
263 | #ifndef CONFIG_INLINE_WRITE_LOCK | ||
264 | void __lockfunc _write_lock(rwlock_t *lock) | ||
265 | { | ||
266 | __write_lock(lock); | ||
267 | } | ||
268 | EXPORT_SYMBOL(_write_lock); | ||
269 | #endif | ||
270 | |||
271 | #ifndef CONFIG_INLINE_SPIN_UNLOCK | ||
276 | void __lockfunc _spin_unlock(spinlock_t *lock) | 272 | void __lockfunc _spin_unlock(spinlock_t *lock) |
277 | { | 273 | { |
278 | __spin_unlock(lock); | 274 | __spin_unlock(lock); |
@@ -280,7 +276,7 @@ void __lockfunc _spin_unlock(spinlock_t *lock) | |||
280 | EXPORT_SYMBOL(_spin_unlock); | 276 | EXPORT_SYMBOL(_spin_unlock); |
281 | #endif | 277 | #endif |
282 | 278 | ||
283 | #ifndef _write_unlock | 279 | #ifndef CONFIG_INLINE_WRITE_UNLOCK |
284 | void __lockfunc _write_unlock(rwlock_t *lock) | 280 | void __lockfunc _write_unlock(rwlock_t *lock) |
285 | { | 281 | { |
286 | __write_unlock(lock); | 282 | __write_unlock(lock); |
@@ -288,7 +284,7 @@ void __lockfunc _write_unlock(rwlock_t *lock) | |||
288 | EXPORT_SYMBOL(_write_unlock); | 284 | EXPORT_SYMBOL(_write_unlock); |
289 | #endif | 285 | #endif |
290 | 286 | ||
291 | #ifndef _read_unlock | 287 | #ifndef CONFIG_INLINE_READ_UNLOCK |
292 | void __lockfunc _read_unlock(rwlock_t *lock) | 288 | void __lockfunc _read_unlock(rwlock_t *lock) |
293 | { | 289 | { |
294 | __read_unlock(lock); | 290 | __read_unlock(lock); |
@@ -296,7 +292,7 @@ void __lockfunc _read_unlock(rwlock_t *lock) | |||
296 | EXPORT_SYMBOL(_read_unlock); | 292 | EXPORT_SYMBOL(_read_unlock); |
297 | #endif | 293 | #endif |
298 | 294 | ||
299 | #ifndef _spin_unlock_irqrestore | 295 | #ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE |
300 | void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) | 296 | void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) |
301 | { | 297 | { |
302 | __spin_unlock_irqrestore(lock, flags); | 298 | __spin_unlock_irqrestore(lock, flags); |
@@ -304,7 +300,7 @@ void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) | |||
304 | EXPORT_SYMBOL(_spin_unlock_irqrestore); | 300 | EXPORT_SYMBOL(_spin_unlock_irqrestore); |
305 | #endif | 301 | #endif |
306 | 302 | ||
307 | #ifndef _spin_unlock_irq | 303 | #ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQ |
308 | void __lockfunc _spin_unlock_irq(spinlock_t *lock) | 304 | void __lockfunc _spin_unlock_irq(spinlock_t *lock) |
309 | { | 305 | { |
310 | __spin_unlock_irq(lock); | 306 | __spin_unlock_irq(lock); |
@@ -312,7 +308,7 @@ void __lockfunc _spin_unlock_irq(spinlock_t *lock) | |||
312 | EXPORT_SYMBOL(_spin_unlock_irq); | 308 | EXPORT_SYMBOL(_spin_unlock_irq); |
313 | #endif | 309 | #endif |
314 | 310 | ||
315 | #ifndef _spin_unlock_bh | 311 | #ifndef CONFIG_INLINE_SPIN_UNLOCK_BH |
316 | void __lockfunc _spin_unlock_bh(spinlock_t *lock) | 312 | void __lockfunc _spin_unlock_bh(spinlock_t *lock) |
317 | { | 313 | { |
318 | __spin_unlock_bh(lock); | 314 | __spin_unlock_bh(lock); |
@@ -320,7 +316,7 @@ void __lockfunc _spin_unlock_bh(spinlock_t *lock) | |||
320 | EXPORT_SYMBOL(_spin_unlock_bh); | 316 | EXPORT_SYMBOL(_spin_unlock_bh); |
321 | #endif | 317 | #endif |
322 | 318 | ||
323 | #ifndef _read_unlock_irqrestore | 319 | #ifndef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE |
324 | void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | 320 | void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) |
325 | { | 321 | { |
326 | __read_unlock_irqrestore(lock, flags); | 322 | __read_unlock_irqrestore(lock, flags); |
@@ -328,7 +324,7 @@ void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | |||
328 | EXPORT_SYMBOL(_read_unlock_irqrestore); | 324 | EXPORT_SYMBOL(_read_unlock_irqrestore); |
329 | #endif | 325 | #endif |
330 | 326 | ||
331 | #ifndef _read_unlock_irq | 327 | #ifndef CONFIG_INLINE_READ_UNLOCK_IRQ |
332 | void __lockfunc _read_unlock_irq(rwlock_t *lock) | 328 | void __lockfunc _read_unlock_irq(rwlock_t *lock) |
333 | { | 329 | { |
334 | __read_unlock_irq(lock); | 330 | __read_unlock_irq(lock); |
@@ -336,7 +332,7 @@ void __lockfunc _read_unlock_irq(rwlock_t *lock) | |||
336 | EXPORT_SYMBOL(_read_unlock_irq); | 332 | EXPORT_SYMBOL(_read_unlock_irq); |
337 | #endif | 333 | #endif |
338 | 334 | ||
339 | #ifndef _read_unlock_bh | 335 | #ifndef CONFIG_INLINE_READ_UNLOCK_BH |
340 | void __lockfunc _read_unlock_bh(rwlock_t *lock) | 336 | void __lockfunc _read_unlock_bh(rwlock_t *lock) |
341 | { | 337 | { |
342 | __read_unlock_bh(lock); | 338 | __read_unlock_bh(lock); |
@@ -344,7 +340,7 @@ void __lockfunc _read_unlock_bh(rwlock_t *lock) | |||
344 | EXPORT_SYMBOL(_read_unlock_bh); | 340 | EXPORT_SYMBOL(_read_unlock_bh); |
345 | #endif | 341 | #endif |
346 | 342 | ||
347 | #ifndef _write_unlock_irqrestore | 343 | #ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE |
348 | void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | 344 | void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) |
349 | { | 345 | { |
350 | __write_unlock_irqrestore(lock, flags); | 346 | __write_unlock_irqrestore(lock, flags); |
@@ -352,7 +348,7 @@ void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | |||
352 | EXPORT_SYMBOL(_write_unlock_irqrestore); | 348 | EXPORT_SYMBOL(_write_unlock_irqrestore); |
353 | #endif | 349 | #endif |
354 | 350 | ||
355 | #ifndef _write_unlock_irq | 351 | #ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQ |
356 | void __lockfunc _write_unlock_irq(rwlock_t *lock) | 352 | void __lockfunc _write_unlock_irq(rwlock_t *lock) |
357 | { | 353 | { |
358 | __write_unlock_irq(lock); | 354 | __write_unlock_irq(lock); |
@@ -360,7 +356,7 @@ void __lockfunc _write_unlock_irq(rwlock_t *lock) | |||
360 | EXPORT_SYMBOL(_write_unlock_irq); | 356 | EXPORT_SYMBOL(_write_unlock_irq); |
361 | #endif | 357 | #endif |
362 | 358 | ||
363 | #ifndef _write_unlock_bh | 359 | #ifndef CONFIG_INLINE_WRITE_UNLOCK_BH |
364 | void __lockfunc _write_unlock_bh(rwlock_t *lock) | 360 | void __lockfunc _write_unlock_bh(rwlock_t *lock) |
365 | { | 361 | { |
366 | __write_unlock_bh(lock); | 362 | __write_unlock_bh(lock); |
@@ -368,7 +364,7 @@ void __lockfunc _write_unlock_bh(rwlock_t *lock) | |||
368 | EXPORT_SYMBOL(_write_unlock_bh); | 364 | EXPORT_SYMBOL(_write_unlock_bh); |
369 | #endif | 365 | #endif |
370 | 366 | ||
371 | #ifndef _spin_trylock_bh | 367 | #ifndef CONFIG_INLINE_SPIN_TRYLOCK_BH |
372 | int __lockfunc _spin_trylock_bh(spinlock_t *lock) | 368 | int __lockfunc _spin_trylock_bh(spinlock_t *lock) |
373 | { | 369 | { |
374 | return __spin_trylock_bh(lock); | 370 | return __spin_trylock_bh(lock); |