diff options
| author | Peter Zijlstra <peterz@infradead.org> | 2013-10-31 13:15:36 -0400 |
|---|---|---|
| committer | Ingo Molnar <mingo@kernel.org> | 2013-11-06 01:55:21 -0500 |
| commit | 60fc28746a7b61775ae28950ddf7a4ac15955639 (patch) | |
| tree | 427ff89f3beaeca421ca25bbfd7c882c0b0c736b /kernel/locking | |
| parent | 8eddac3f103736163f49255bcb109edadea167f6 (diff) | |
locking: Move the spinlock code to kernel/locking/
Suggested-by: Ingo Molnar <mingo@kernel.org>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/n/tip-b81ol0z3mon45m51o131yc9j@git.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/locking')
| -rw-r--r-- | kernel/locking/Makefile | 4 | ||||
| -rw-r--r-- | kernel/locking/spinlock.c | 399 | ||||
| -rw-r--r-- | kernel/locking/spinlock_debug.c | 302 |
3 files changed, 705 insertions, 0 deletions
diff --git a/kernel/locking/Makefile b/kernel/locking/Makefile index c103599fc1ba..674d2152d10f 100644 --- a/kernel/locking/Makefile +++ b/kernel/locking/Makefile | |||
| @@ -13,3 +13,7 @@ obj-$(CONFIG_LOCKDEP) += lockdep.o | |||
| 13 | ifeq ($(CONFIG_PROC_FS),y) | 13 | ifeq ($(CONFIG_PROC_FS),y) |
| 14 | obj-$(CONFIG_LOCKDEP) += lockdep_proc.o | 14 | obj-$(CONFIG_LOCKDEP) += lockdep_proc.o |
| 15 | endif | 15 | endif |
| 16 | obj-$(CONFIG_SMP) += spinlock.o | ||
| 17 | obj-$(CONFIG_PROVE_LOCKING) += spinlock.o | ||
| 18 | obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o | ||
| 19 | obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o | ||
diff --git a/kernel/locking/spinlock.c b/kernel/locking/spinlock.c new file mode 100644 index 000000000000..4b082b5cac9e --- /dev/null +++ b/kernel/locking/spinlock.c | |||
| @@ -0,0 +1,399 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (2004) Linus Torvalds | ||
| 3 | * | ||
| 4 | * Author: Zwane Mwaikambo <zwane@fsmlabs.com> | ||
| 5 | * | ||
| 6 | * Copyright (2004, 2005) Ingo Molnar | ||
| 7 | * | ||
| 8 | * This file contains the spinlock/rwlock implementations for the | ||
| 9 | * SMP and the DEBUG_SPINLOCK cases. (UP-nondebug inlines them) | ||
| 10 | * | ||
| 11 | * Note that some architectures have special knowledge about the | ||
| 12 | * stack frames of these functions in their profile_pc. If you | ||
| 13 | * change anything significant here that could change the stack | ||
| 14 | * frame contact the architecture maintainers. | ||
| 15 | */ | ||
| 16 | |||
| 17 | #include <linux/linkage.h> | ||
| 18 | #include <linux/preempt.h> | ||
| 19 | #include <linux/spinlock.h> | ||
| 20 | #include <linux/interrupt.h> | ||
| 21 | #include <linux/debug_locks.h> | ||
| 22 | #include <linux/export.h> | ||
| 23 | |||
| 24 | /* | ||
| 25 | * If lockdep is enabled then we use the non-preemption spin-ops | ||
| 26 | * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are | ||
| 27 | * not re-enabled during lock-acquire (which the preempt-spin-ops do): | ||
| 28 | */ | ||
| 29 | #if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC) | ||
| 30 | /* | ||
| 31 | * The __lock_function inlines are taken from | ||
| 32 | * include/linux/spinlock_api_smp.h | ||
| 33 | */ | ||
| 34 | #else | ||
| 35 | #define raw_read_can_lock(l) read_can_lock(l) | ||
| 36 | #define raw_write_can_lock(l) write_can_lock(l) | ||
| 37 | |||
| 38 | /* | ||
| 39 | * Some architectures can relax in favour of the CPU owning the lock. | ||
| 40 | */ | ||
| 41 | #ifndef arch_read_relax | ||
| 42 | # define arch_read_relax(l) cpu_relax() | ||
| 43 | #endif | ||
| 44 | #ifndef arch_write_relax | ||
| 45 | # define arch_write_relax(l) cpu_relax() | ||
| 46 | #endif | ||
| 47 | #ifndef arch_spin_relax | ||
| 48 | # define arch_spin_relax(l) cpu_relax() | ||
| 49 | #endif | ||
| 50 | |||
| 51 | /* | ||
| 52 | * We build the __lock_function inlines here. They are too large for | ||
| 53 | * inlining all over the place, but here is only one user per function | ||
| 54 | * which embedds them into the calling _lock_function below. | ||
| 55 | * | ||
| 56 | * This could be a long-held lock. We both prepare to spin for a long | ||
| 57 | * time (making _this_ CPU preemptable if possible), and we also signal | ||
| 58 | * towards that other CPU that it should break the lock ASAP. | ||
| 59 | */ | ||
| 60 | #define BUILD_LOCK_OPS(op, locktype) \ | ||
| 61 | void __lockfunc __raw_##op##_lock(locktype##_t *lock) \ | ||
| 62 | { \ | ||
| 63 | for (;;) { \ | ||
| 64 | preempt_disable(); \ | ||
| 65 | if (likely(do_raw_##op##_trylock(lock))) \ | ||
| 66 | break; \ | ||
| 67 | preempt_enable(); \ | ||
| 68 | \ | ||
| 69 | if (!(lock)->break_lock) \ | ||
| 70 | (lock)->break_lock = 1; \ | ||
| 71 | while (!raw_##op##_can_lock(lock) && (lock)->break_lock)\ | ||
| 72 | arch_##op##_relax(&lock->raw_lock); \ | ||
| 73 | } \ | ||
| 74 | (lock)->break_lock = 0; \ | ||
| 75 | } \ | ||
| 76 | \ | ||
| 77 | unsigned long __lockfunc __raw_##op##_lock_irqsave(locktype##_t *lock) \ | ||
| 78 | { \ | ||
| 79 | unsigned long flags; \ | ||
| 80 | \ | ||
| 81 | for (;;) { \ | ||
| 82 | preempt_disable(); \ | ||
| 83 | local_irq_save(flags); \ | ||
| 84 | if (likely(do_raw_##op##_trylock(lock))) \ | ||
| 85 | break; \ | ||
| 86 | local_irq_restore(flags); \ | ||
| 87 | preempt_enable(); \ | ||
| 88 | \ | ||
| 89 | if (!(lock)->break_lock) \ | ||
| 90 | (lock)->break_lock = 1; \ | ||
| 91 | while (!raw_##op##_can_lock(lock) && (lock)->break_lock)\ | ||
| 92 | arch_##op##_relax(&lock->raw_lock); \ | ||
| 93 | } \ | ||
| 94 | (lock)->break_lock = 0; \ | ||
| 95 | return flags; \ | ||
| 96 | } \ | ||
| 97 | \ | ||
| 98 | void __lockfunc __raw_##op##_lock_irq(locktype##_t *lock) \ | ||
| 99 | { \ | ||
| 100 | _raw_##op##_lock_irqsave(lock); \ | ||
| 101 | } \ | ||
| 102 | \ | ||
| 103 | void __lockfunc __raw_##op##_lock_bh(locktype##_t *lock) \ | ||
| 104 | { \ | ||
| 105 | unsigned long flags; \ | ||
| 106 | \ | ||
| 107 | /* */ \ | ||
| 108 | /* Careful: we must exclude softirqs too, hence the */ \ | ||
| 109 | /* irq-disabling. We use the generic preemption-aware */ \ | ||
| 110 | /* function: */ \ | ||
| 111 | /**/ \ | ||
| 112 | flags = _raw_##op##_lock_irqsave(lock); \ | ||
| 113 | local_bh_disable(); \ | ||
| 114 | local_irq_restore(flags); \ | ||
| 115 | } \ | ||
| 116 | |||
| 117 | /* | ||
| 118 | * Build preemption-friendly versions of the following | ||
| 119 | * lock-spinning functions: | ||
| 120 | * | ||
| 121 | * __[spin|read|write]_lock() | ||
| 122 | * __[spin|read|write]_lock_irq() | ||
| 123 | * __[spin|read|write]_lock_irqsave() | ||
| 124 | * __[spin|read|write]_lock_bh() | ||
| 125 | */ | ||
| 126 | BUILD_LOCK_OPS(spin, raw_spinlock); | ||
| 127 | BUILD_LOCK_OPS(read, rwlock); | ||
| 128 | BUILD_LOCK_OPS(write, rwlock); | ||
| 129 | |||
| 130 | #endif | ||
| 131 | |||
| 132 | #ifndef CONFIG_INLINE_SPIN_TRYLOCK | ||
| 133 | int __lockfunc _raw_spin_trylock(raw_spinlock_t *lock) | ||
| 134 | { | ||
| 135 | return __raw_spin_trylock(lock); | ||
| 136 | } | ||
| 137 | EXPORT_SYMBOL(_raw_spin_trylock); | ||
| 138 | #endif | ||
| 139 | |||
| 140 | #ifndef CONFIG_INLINE_SPIN_TRYLOCK_BH | ||
| 141 | int __lockfunc _raw_spin_trylock_bh(raw_spinlock_t *lock) | ||
| 142 | { | ||
| 143 | return __raw_spin_trylock_bh(lock); | ||
| 144 | } | ||
| 145 | EXPORT_SYMBOL(_raw_spin_trylock_bh); | ||
| 146 | #endif | ||
| 147 | |||
| 148 | #ifndef CONFIG_INLINE_SPIN_LOCK | ||
| 149 | void __lockfunc _raw_spin_lock(raw_spinlock_t *lock) | ||
| 150 | { | ||
| 151 | __raw_spin_lock(lock); | ||
| 152 | } | ||
| 153 | EXPORT_SYMBOL(_raw_spin_lock); | ||
| 154 | #endif | ||
| 155 | |||
| 156 | #ifndef CONFIG_INLINE_SPIN_LOCK_IRQSAVE | ||
| 157 | unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t *lock) | ||
| 158 | { | ||
| 159 | return __raw_spin_lock_irqsave(lock); | ||
| 160 | } | ||
| 161 | EXPORT_SYMBOL(_raw_spin_lock_irqsave); | ||
| 162 | #endif | ||
| 163 | |||
| 164 | #ifndef CONFIG_INLINE_SPIN_LOCK_IRQ | ||
| 165 | void __lockfunc _raw_spin_lock_irq(raw_spinlock_t *lock) | ||
| 166 | { | ||
| 167 | __raw_spin_lock_irq(lock); | ||
| 168 | } | ||
| 169 | EXPORT_SYMBOL(_raw_spin_lock_irq); | ||
| 170 | #endif | ||
| 171 | |||
| 172 | #ifndef CONFIG_INLINE_SPIN_LOCK_BH | ||
| 173 | void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock) | ||
| 174 | { | ||
| 175 | __raw_spin_lock_bh(lock); | ||
| 176 | } | ||
| 177 | EXPORT_SYMBOL(_raw_spin_lock_bh); | ||
| 178 | #endif | ||
| 179 | |||
| 180 | #ifdef CONFIG_UNINLINE_SPIN_UNLOCK | ||
| 181 | void __lockfunc _raw_spin_unlock(raw_spinlock_t *lock) | ||
| 182 | { | ||
| 183 | __raw_spin_unlock(lock); | ||
| 184 | } | ||
| 185 | EXPORT_SYMBOL(_raw_spin_unlock); | ||
| 186 | #endif | ||
| 187 | |||
| 188 | #ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE | ||
| 189 | void __lockfunc _raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags) | ||
| 190 | { | ||
| 191 | __raw_spin_unlock_irqrestore(lock, flags); | ||
| 192 | } | ||
| 193 | EXPORT_SYMBOL(_raw_spin_unlock_irqrestore); | ||
| 194 | #endif | ||
| 195 | |||
| 196 | #ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQ | ||
| 197 | void __lockfunc _raw_spin_unlock_irq(raw_spinlock_t *lock) | ||
| 198 | { | ||
| 199 | __raw_spin_unlock_irq(lock); | ||
| 200 | } | ||
| 201 | EXPORT_SYMBOL(_raw_spin_unlock_irq); | ||
| 202 | #endif | ||
| 203 | |||
| 204 | #ifndef CONFIG_INLINE_SPIN_UNLOCK_BH | ||
| 205 | void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock) | ||
| 206 | { | ||
| 207 | __raw_spin_unlock_bh(lock); | ||
| 208 | } | ||
| 209 | EXPORT_SYMBOL(_raw_spin_unlock_bh); | ||
| 210 | #endif | ||
| 211 | |||
| 212 | #ifndef CONFIG_INLINE_READ_TRYLOCK | ||
| 213 | int __lockfunc _raw_read_trylock(rwlock_t *lock) | ||
| 214 | { | ||
| 215 | return __raw_read_trylock(lock); | ||
| 216 | } | ||
| 217 | EXPORT_SYMBOL(_raw_read_trylock); | ||
| 218 | #endif | ||
| 219 | |||
| 220 | #ifndef CONFIG_INLINE_READ_LOCK | ||
| 221 | void __lockfunc _raw_read_lock(rwlock_t *lock) | ||
| 222 | { | ||
| 223 | __raw_read_lock(lock); | ||
| 224 | } | ||
| 225 | EXPORT_SYMBOL(_raw_read_lock); | ||
| 226 | #endif | ||
| 227 | |||
| 228 | #ifndef CONFIG_INLINE_READ_LOCK_IRQSAVE | ||
| 229 | unsigned long __lockfunc _raw_read_lock_irqsave(rwlock_t *lock) | ||
| 230 | { | ||
| 231 | return __raw_read_lock_irqsave(lock); | ||
| 232 | } | ||
| 233 | EXPORT_SYMBOL(_raw_read_lock_irqsave); | ||
| 234 | #endif | ||
| 235 | |||
| 236 | #ifndef CONFIG_INLINE_READ_LOCK_IRQ | ||
| 237 | void __lockfunc _raw_read_lock_irq(rwlock_t *lock) | ||
| 238 | { | ||
| 239 | __raw_read_lock_irq(lock); | ||
| 240 | } | ||
| 241 | EXPORT_SYMBOL(_raw_read_lock_irq); | ||
| 242 | #endif | ||
| 243 | |||
| 244 | #ifndef CONFIG_INLINE_READ_LOCK_BH | ||
| 245 | void __lockfunc _raw_read_lock_bh(rwlock_t *lock) | ||
| 246 | { | ||
| 247 | __raw_read_lock_bh(lock); | ||
| 248 | } | ||
| 249 | EXPORT_SYMBOL(_raw_read_lock_bh); | ||
| 250 | #endif | ||
| 251 | |||
| 252 | #ifndef CONFIG_INLINE_READ_UNLOCK | ||
| 253 | void __lockfunc _raw_read_unlock(rwlock_t *lock) | ||
| 254 | { | ||
| 255 | __raw_read_unlock(lock); | ||
| 256 | } | ||
| 257 | EXPORT_SYMBOL(_raw_read_unlock); | ||
| 258 | #endif | ||
| 259 | |||
| 260 | #ifndef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE | ||
| 261 | void __lockfunc _raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | ||
| 262 | { | ||
| 263 | __raw_read_unlock_irqrestore(lock, flags); | ||
| 264 | } | ||
| 265 | EXPORT_SYMBOL(_raw_read_unlock_irqrestore); | ||
| 266 | #endif | ||
| 267 | |||
| 268 | #ifndef CONFIG_INLINE_READ_UNLOCK_IRQ | ||
| 269 | void __lockfunc _raw_read_unlock_irq(rwlock_t *lock) | ||
| 270 | { | ||
| 271 | __raw_read_unlock_irq(lock); | ||
| 272 | } | ||
| 273 | EXPORT_SYMBOL(_raw_read_unlock_irq); | ||
| 274 | #endif | ||
| 275 | |||
| 276 | #ifndef CONFIG_INLINE_READ_UNLOCK_BH | ||
| 277 | void __lockfunc _raw_read_unlock_bh(rwlock_t *lock) | ||
| 278 | { | ||
| 279 | __raw_read_unlock_bh(lock); | ||
| 280 | } | ||
| 281 | EXPORT_SYMBOL(_raw_read_unlock_bh); | ||
| 282 | #endif | ||
| 283 | |||
| 284 | #ifndef CONFIG_INLINE_WRITE_TRYLOCK | ||
| 285 | int __lockfunc _raw_write_trylock(rwlock_t *lock) | ||
| 286 | { | ||
| 287 | return __raw_write_trylock(lock); | ||
| 288 | } | ||
| 289 | EXPORT_SYMBOL(_raw_write_trylock); | ||
| 290 | #endif | ||
| 291 | |||
| 292 | #ifndef CONFIG_INLINE_WRITE_LOCK | ||
| 293 | void __lockfunc _raw_write_lock(rwlock_t *lock) | ||
| 294 | { | ||
| 295 | __raw_write_lock(lock); | ||
| 296 | } | ||
| 297 | EXPORT_SYMBOL(_raw_write_lock); | ||
| 298 | #endif | ||
| 299 | |||
| 300 | #ifndef CONFIG_INLINE_WRITE_LOCK_IRQSAVE | ||
| 301 | unsigned long __lockfunc _raw_write_lock_irqsave(rwlock_t *lock) | ||
| 302 | { | ||
| 303 | return __raw_write_lock_irqsave(lock); | ||
| 304 | } | ||
| 305 | EXPORT_SYMBOL(_raw_write_lock_irqsave); | ||
| 306 | #endif | ||
| 307 | |||
| 308 | #ifndef CONFIG_INLINE_WRITE_LOCK_IRQ | ||
| 309 | void __lockfunc _raw_write_lock_irq(rwlock_t *lock) | ||
| 310 | { | ||
| 311 | __raw_write_lock_irq(lock); | ||
| 312 | } | ||
| 313 | EXPORT_SYMBOL(_raw_write_lock_irq); | ||
| 314 | #endif | ||
| 315 | |||
| 316 | #ifndef CONFIG_INLINE_WRITE_LOCK_BH | ||
| 317 | void __lockfunc _raw_write_lock_bh(rwlock_t *lock) | ||
| 318 | { | ||
| 319 | __raw_write_lock_bh(lock); | ||
| 320 | } | ||
| 321 | EXPORT_SYMBOL(_raw_write_lock_bh); | ||
| 322 | #endif | ||
| 323 | |||
| 324 | #ifndef CONFIG_INLINE_WRITE_UNLOCK | ||
| 325 | void __lockfunc _raw_write_unlock(rwlock_t *lock) | ||
| 326 | { | ||
| 327 | __raw_write_unlock(lock); | ||
| 328 | } | ||
| 329 | EXPORT_SYMBOL(_raw_write_unlock); | ||
| 330 | #endif | ||
| 331 | |||
| 332 | #ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE | ||
| 333 | void __lockfunc _raw_write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | ||
| 334 | { | ||
| 335 | __raw_write_unlock_irqrestore(lock, flags); | ||
| 336 | } | ||
| 337 | EXPORT_SYMBOL(_raw_write_unlock_irqrestore); | ||
| 338 | #endif | ||
| 339 | |||
| 340 | #ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQ | ||
| 341 | void __lockfunc _raw_write_unlock_irq(rwlock_t *lock) | ||
| 342 | { | ||
| 343 | __raw_write_unlock_irq(lock); | ||
| 344 | } | ||
| 345 | EXPORT_SYMBOL(_raw_write_unlock_irq); | ||
| 346 | #endif | ||
| 347 | |||
| 348 | #ifndef CONFIG_INLINE_WRITE_UNLOCK_BH | ||
| 349 | void __lockfunc _raw_write_unlock_bh(rwlock_t *lock) | ||
| 350 | { | ||
| 351 | __raw_write_unlock_bh(lock); | ||
| 352 | } | ||
| 353 | EXPORT_SYMBOL(_raw_write_unlock_bh); | ||
| 354 | #endif | ||
| 355 | |||
| 356 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
| 357 | |||
| 358 | void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass) | ||
| 359 | { | ||
| 360 | preempt_disable(); | ||
| 361 | spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_); | ||
| 362 | LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); | ||
| 363 | } | ||
| 364 | EXPORT_SYMBOL(_raw_spin_lock_nested); | ||
| 365 | |||
| 366 | unsigned long __lockfunc _raw_spin_lock_irqsave_nested(raw_spinlock_t *lock, | ||
| 367 | int subclass) | ||
| 368 | { | ||
| 369 | unsigned long flags; | ||
| 370 | |||
| 371 | local_irq_save(flags); | ||
| 372 | preempt_disable(); | ||
| 373 | spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_); | ||
| 374 | LOCK_CONTENDED_FLAGS(lock, do_raw_spin_trylock, do_raw_spin_lock, | ||
| 375 | do_raw_spin_lock_flags, &flags); | ||
| 376 | return flags; | ||
| 377 | } | ||
| 378 | EXPORT_SYMBOL(_raw_spin_lock_irqsave_nested); | ||
| 379 | |||
| 380 | void __lockfunc _raw_spin_lock_nest_lock(raw_spinlock_t *lock, | ||
| 381 | struct lockdep_map *nest_lock) | ||
| 382 | { | ||
| 383 | preempt_disable(); | ||
| 384 | spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_); | ||
| 385 | LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); | ||
| 386 | } | ||
| 387 | EXPORT_SYMBOL(_raw_spin_lock_nest_lock); | ||
| 388 | |||
| 389 | #endif | ||
| 390 | |||
| 391 | notrace int in_lock_functions(unsigned long addr) | ||
| 392 | { | ||
| 393 | /* Linker adds these: start and end of __lockfunc functions */ | ||
| 394 | extern char __lock_text_start[], __lock_text_end[]; | ||
| 395 | |||
| 396 | return addr >= (unsigned long)__lock_text_start | ||
| 397 | && addr < (unsigned long)__lock_text_end; | ||
| 398 | } | ||
| 399 | EXPORT_SYMBOL(in_lock_functions); | ||
diff --git a/kernel/locking/spinlock_debug.c b/kernel/locking/spinlock_debug.c new file mode 100644 index 000000000000..0374a596cffa --- /dev/null +++ b/kernel/locking/spinlock_debug.c | |||
| @@ -0,0 +1,302 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2005, Red Hat, Inc., Ingo Molnar | ||
| 3 | * Released under the General Public License (GPL). | ||
| 4 | * | ||
| 5 | * This file contains the spinlock/rwlock implementations for | ||
| 6 | * DEBUG_SPINLOCK. | ||
| 7 | */ | ||
| 8 | |||
| 9 | #include <linux/spinlock.h> | ||
| 10 | #include <linux/nmi.h> | ||
| 11 | #include <linux/interrupt.h> | ||
| 12 | #include <linux/debug_locks.h> | ||
| 13 | #include <linux/delay.h> | ||
| 14 | #include <linux/export.h> | ||
| 15 | |||
| 16 | void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, | ||
| 17 | struct lock_class_key *key) | ||
| 18 | { | ||
| 19 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
| 20 | /* | ||
| 21 | * Make sure we are not reinitializing a held lock: | ||
| 22 | */ | ||
| 23 | debug_check_no_locks_freed((void *)lock, sizeof(*lock)); | ||
| 24 | lockdep_init_map(&lock->dep_map, name, key, 0); | ||
| 25 | #endif | ||
| 26 | lock->raw_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; | ||
| 27 | lock->magic = SPINLOCK_MAGIC; | ||
| 28 | lock->owner = SPINLOCK_OWNER_INIT; | ||
| 29 | lock->owner_cpu = -1; | ||
| 30 | } | ||
| 31 | |||
| 32 | EXPORT_SYMBOL(__raw_spin_lock_init); | ||
| 33 | |||
| 34 | void __rwlock_init(rwlock_t *lock, const char *name, | ||
| 35 | struct lock_class_key *key) | ||
| 36 | { | ||
| 37 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
| 38 | /* | ||
| 39 | * Make sure we are not reinitializing a held lock: | ||
| 40 | */ | ||
| 41 | debug_check_no_locks_freed((void *)lock, sizeof(*lock)); | ||
| 42 | lockdep_init_map(&lock->dep_map, name, key, 0); | ||
| 43 | #endif | ||
| 44 | lock->raw_lock = (arch_rwlock_t) __ARCH_RW_LOCK_UNLOCKED; | ||
| 45 | lock->magic = RWLOCK_MAGIC; | ||
| 46 | lock->owner = SPINLOCK_OWNER_INIT; | ||
| 47 | lock->owner_cpu = -1; | ||
| 48 | } | ||
| 49 | |||
| 50 | EXPORT_SYMBOL(__rwlock_init); | ||
| 51 | |||
| 52 | static void spin_dump(raw_spinlock_t *lock, const char *msg) | ||
| 53 | { | ||
| 54 | struct task_struct *owner = NULL; | ||
| 55 | |||
| 56 | if (lock->owner && lock->owner != SPINLOCK_OWNER_INIT) | ||
| 57 | owner = lock->owner; | ||
| 58 | printk(KERN_EMERG "BUG: spinlock %s on CPU#%d, %s/%d\n", | ||
| 59 | msg, raw_smp_processor_id(), | ||
| 60 | current->comm, task_pid_nr(current)); | ||
| 61 | printk(KERN_EMERG " lock: %pS, .magic: %08x, .owner: %s/%d, " | ||
| 62 | ".owner_cpu: %d\n", | ||
| 63 | lock, lock->magic, | ||
| 64 | owner ? owner->comm : "<none>", | ||
| 65 | owner ? task_pid_nr(owner) : -1, | ||
| 66 | lock->owner_cpu); | ||
| 67 | dump_stack(); | ||
| 68 | } | ||
| 69 | |||
| 70 | static void spin_bug(raw_spinlock_t *lock, const char *msg) | ||
| 71 | { | ||
| 72 | if (!debug_locks_off()) | ||
| 73 | return; | ||
| 74 | |||
| 75 | spin_dump(lock, msg); | ||
| 76 | } | ||
| 77 | |||
| 78 | #define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg) | ||
| 79 | |||
| 80 | static inline void | ||
| 81 | debug_spin_lock_before(raw_spinlock_t *lock) | ||
| 82 | { | ||
| 83 | SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic"); | ||
| 84 | SPIN_BUG_ON(lock->owner == current, lock, "recursion"); | ||
| 85 | SPIN_BUG_ON(lock->owner_cpu == raw_smp_processor_id(), | ||
| 86 | lock, "cpu recursion"); | ||
| 87 | } | ||
| 88 | |||
| 89 | static inline void debug_spin_lock_after(raw_spinlock_t *lock) | ||
| 90 | { | ||
| 91 | lock->owner_cpu = raw_smp_processor_id(); | ||
| 92 | lock->owner = current; | ||
| 93 | } | ||
| 94 | |||
| 95 | static inline void debug_spin_unlock(raw_spinlock_t *lock) | ||
| 96 | { | ||
| 97 | SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic"); | ||
| 98 | SPIN_BUG_ON(!raw_spin_is_locked(lock), lock, "already unlocked"); | ||
| 99 | SPIN_BUG_ON(lock->owner != current, lock, "wrong owner"); | ||
| 100 | SPIN_BUG_ON(lock->owner_cpu != raw_smp_processor_id(), | ||
| 101 | lock, "wrong CPU"); | ||
| 102 | lock->owner = SPINLOCK_OWNER_INIT; | ||
| 103 | lock->owner_cpu = -1; | ||
| 104 | } | ||
| 105 | |||
| 106 | static void __spin_lock_debug(raw_spinlock_t *lock) | ||
| 107 | { | ||
| 108 | u64 i; | ||
| 109 | u64 loops = loops_per_jiffy * HZ; | ||
| 110 | |||
| 111 | for (i = 0; i < loops; i++) { | ||
| 112 | if (arch_spin_trylock(&lock->raw_lock)) | ||
| 113 | return; | ||
| 114 | __delay(1); | ||
| 115 | } | ||
| 116 | /* lockup suspected: */ | ||
| 117 | spin_dump(lock, "lockup suspected"); | ||
| 118 | #ifdef CONFIG_SMP | ||
| 119 | trigger_all_cpu_backtrace(); | ||
| 120 | #endif | ||
| 121 | |||
| 122 | /* | ||
| 123 | * The trylock above was causing a livelock. Give the lower level arch | ||
| 124 | * specific lock code a chance to acquire the lock. We have already | ||
| 125 | * printed a warning/backtrace at this point. The non-debug arch | ||
| 126 | * specific code might actually succeed in acquiring the lock. If it is | ||
| 127 | * not successful, the end-result is the same - there is no forward | ||
| 128 | * progress. | ||
| 129 | */ | ||
| 130 | arch_spin_lock(&lock->raw_lock); | ||
| 131 | } | ||
| 132 | |||
| 133 | void do_raw_spin_lock(raw_spinlock_t *lock) | ||
| 134 | { | ||
| 135 | debug_spin_lock_before(lock); | ||
| 136 | if (unlikely(!arch_spin_trylock(&lock->raw_lock))) | ||
| 137 | __spin_lock_debug(lock); | ||
| 138 | debug_spin_lock_after(lock); | ||
| 139 | } | ||
| 140 | |||
| 141 | int do_raw_spin_trylock(raw_spinlock_t *lock) | ||
| 142 | { | ||
| 143 | int ret = arch_spin_trylock(&lock->raw_lock); | ||
| 144 | |||
| 145 | if (ret) | ||
| 146 | debug_spin_lock_after(lock); | ||
| 147 | #ifndef CONFIG_SMP | ||
| 148 | /* | ||
| 149 | * Must not happen on UP: | ||
| 150 | */ | ||
| 151 | SPIN_BUG_ON(!ret, lock, "trylock failure on UP"); | ||
| 152 | #endif | ||
| 153 | return ret; | ||
| 154 | } | ||
| 155 | |||
| 156 | void do_raw_spin_unlock(raw_spinlock_t *lock) | ||
| 157 | { | ||
| 158 | debug_spin_unlock(lock); | ||
| 159 | arch_spin_unlock(&lock->raw_lock); | ||
| 160 | } | ||
| 161 | |||
| 162 | static void rwlock_bug(rwlock_t *lock, const char *msg) | ||
| 163 | { | ||
| 164 | if (!debug_locks_off()) | ||
| 165 | return; | ||
| 166 | |||
| 167 | printk(KERN_EMERG "BUG: rwlock %s on CPU#%d, %s/%d, %p\n", | ||
| 168 | msg, raw_smp_processor_id(), current->comm, | ||
| 169 | task_pid_nr(current), lock); | ||
| 170 | dump_stack(); | ||
| 171 | } | ||
| 172 | |||
| 173 | #define RWLOCK_BUG_ON(cond, lock, msg) if (unlikely(cond)) rwlock_bug(lock, msg) | ||
| 174 | |||
| 175 | #if 0 /* __write_lock_debug() can lock up - maybe this can too? */ | ||
| 176 | static void __read_lock_debug(rwlock_t *lock) | ||
| 177 | { | ||
| 178 | u64 i; | ||
| 179 | u64 loops = loops_per_jiffy * HZ; | ||
| 180 | int print_once = 1; | ||
| 181 | |||
| 182 | for (;;) { | ||
| 183 | for (i = 0; i < loops; i++) { | ||
| 184 | if (arch_read_trylock(&lock->raw_lock)) | ||
| 185 | return; | ||
| 186 | __delay(1); | ||
| 187 | } | ||
| 188 | /* lockup suspected: */ | ||
| 189 | if (print_once) { | ||
| 190 | print_once = 0; | ||
| 191 | printk(KERN_EMERG "BUG: read-lock lockup on CPU#%d, " | ||
| 192 | "%s/%d, %p\n", | ||
| 193 | raw_smp_processor_id(), current->comm, | ||
| 194 | current->pid, lock); | ||
| 195 | dump_stack(); | ||
| 196 | } | ||
| 197 | } | ||
| 198 | } | ||
| 199 | #endif | ||
| 200 | |||
| 201 | void do_raw_read_lock(rwlock_t *lock) | ||
| 202 | { | ||
| 203 | RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); | ||
| 204 | arch_read_lock(&lock->raw_lock); | ||
| 205 | } | ||
| 206 | |||
| 207 | int do_raw_read_trylock(rwlock_t *lock) | ||
| 208 | { | ||
| 209 | int ret = arch_read_trylock(&lock->raw_lock); | ||
| 210 | |||
| 211 | #ifndef CONFIG_SMP | ||
| 212 | /* | ||
| 213 | * Must not happen on UP: | ||
| 214 | */ | ||
| 215 | RWLOCK_BUG_ON(!ret, lock, "trylock failure on UP"); | ||
| 216 | #endif | ||
| 217 | return ret; | ||
| 218 | } | ||
| 219 | |||
| 220 | void do_raw_read_unlock(rwlock_t *lock) | ||
| 221 | { | ||
| 222 | RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); | ||
| 223 | arch_read_unlock(&lock->raw_lock); | ||
| 224 | } | ||
| 225 | |||
| 226 | static inline void debug_write_lock_before(rwlock_t *lock) | ||
| 227 | { | ||
| 228 | RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); | ||
| 229 | RWLOCK_BUG_ON(lock->owner == current, lock, "recursion"); | ||
| 230 | RWLOCK_BUG_ON(lock->owner_cpu == raw_smp_processor_id(), | ||
| 231 | lock, "cpu recursion"); | ||
| 232 | } | ||
| 233 | |||
| 234 | static inline void debug_write_lock_after(rwlock_t *lock) | ||
| 235 | { | ||
| 236 | lock->owner_cpu = raw_smp_processor_id(); | ||
| 237 | lock->owner = current; | ||
| 238 | } | ||
| 239 | |||
| 240 | static inline void debug_write_unlock(rwlock_t *lock) | ||
| 241 | { | ||
| 242 | RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); | ||
| 243 | RWLOCK_BUG_ON(lock->owner != current, lock, "wrong owner"); | ||
| 244 | RWLOCK_BUG_ON(lock->owner_cpu != raw_smp_processor_id(), | ||
| 245 | lock, "wrong CPU"); | ||
| 246 | lock->owner = SPINLOCK_OWNER_INIT; | ||
| 247 | lock->owner_cpu = -1; | ||
| 248 | } | ||
| 249 | |||
| 250 | #if 0 /* This can cause lockups */ | ||
| 251 | static void __write_lock_debug(rwlock_t *lock) | ||
| 252 | { | ||
| 253 | u64 i; | ||
| 254 | u64 loops = loops_per_jiffy * HZ; | ||
| 255 | int print_once = 1; | ||
| 256 | |||
| 257 | for (;;) { | ||
| 258 | for (i = 0; i < loops; i++) { | ||
| 259 | if (arch_write_trylock(&lock->raw_lock)) | ||
| 260 | return; | ||
| 261 | __delay(1); | ||
| 262 | } | ||
| 263 | /* lockup suspected: */ | ||
| 264 | if (print_once) { | ||
| 265 | print_once = 0; | ||
| 266 | printk(KERN_EMERG "BUG: write-lock lockup on CPU#%d, " | ||
| 267 | "%s/%d, %p\n", | ||
| 268 | raw_smp_processor_id(), current->comm, | ||
| 269 | current->pid, lock); | ||
| 270 | dump_stack(); | ||
| 271 | } | ||
| 272 | } | ||
| 273 | } | ||
| 274 | #endif | ||
| 275 | |||
| 276 | void do_raw_write_lock(rwlock_t *lock) | ||
| 277 | { | ||
| 278 | debug_write_lock_before(lock); | ||
| 279 | arch_write_lock(&lock->raw_lock); | ||
| 280 | debug_write_lock_after(lock); | ||
| 281 | } | ||
| 282 | |||
| 283 | int do_raw_write_trylock(rwlock_t *lock) | ||
| 284 | { | ||
| 285 | int ret = arch_write_trylock(&lock->raw_lock); | ||
| 286 | |||
| 287 | if (ret) | ||
| 288 | debug_write_lock_after(lock); | ||
| 289 | #ifndef CONFIG_SMP | ||
| 290 | /* | ||
| 291 | * Must not happen on UP: | ||
| 292 | */ | ||
| 293 | RWLOCK_BUG_ON(!ret, lock, "trylock failure on UP"); | ||
| 294 | #endif | ||
| 295 | return ret; | ||
| 296 | } | ||
| 297 | |||
| 298 | void do_raw_write_unlock(rwlock_t *lock) | ||
| 299 | { | ||
| 300 | debug_write_unlock(lock); | ||
| 301 | arch_write_unlock(&lock->raw_lock); | ||
| 302 | } | ||
