diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-12-05 12:49:59 -0500 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-12-05 12:49:59 -0500 |
| commit | 3e72b810e30cdf4655279dd767eb798ac7a8fe5e (patch) | |
| tree | a6c8daae5390b44750dfc4ca9bc984430dd16e74 | |
| parent | 9b269d4034c7855ac34f0985cc55ee29bd80e80a (diff) | |
| parent | c08f782985eed9959438368e84ce1d7f2ed03d95 (diff) | |
Merge branch 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
mutex: Fix missing conditions to build mutex_spin_on_owner()
mutex: Better control mutex adaptive spinning config
locking, task_struct: Reduce size on TRACE_IRQFLAGS and 64bit
locking: Use __[SPIN|RW]_LOCK_UNLOCKED in [spin|rw]_lock_init()
locking: Remove unused prototype
locking: Reduce ifdefs in kernel/spinlock.c
locking: Make inlining decision Kconfig based
| -rw-r--r-- | arch/s390/Kconfig | 28 | ||||
| -rw-r--r-- | arch/s390/include/asm/spinlock.h | 29 | ||||
| -rw-r--r-- | include/linux/sched.h | 10 | ||||
| -rw-r--r-- | include/linux/spinlock.h | 6 | ||||
| -rw-r--r-- | include/linux/spinlock_api_smp.h | 75 | ||||
| -rw-r--r-- | init/Kconfig | 1 | ||||
| -rw-r--r-- | kernel/Kconfig.locks | 202 | ||||
| -rw-r--r-- | kernel/mutex.c | 4 | ||||
| -rw-r--r-- | kernel/sched.c | 2 | ||||
| -rw-r--r-- | kernel/spinlock.c | 310 |
10 files changed, 422 insertions, 245 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 43c0acad7160..16c673096a22 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig | |||
| @@ -95,6 +95,34 @@ config S390 | |||
| 95 | select HAVE_ARCH_TRACEHOOK | 95 | select HAVE_ARCH_TRACEHOOK |
| 96 | select INIT_ALL_POSSIBLE | 96 | select INIT_ALL_POSSIBLE |
| 97 | select HAVE_PERF_EVENTS | 97 | select HAVE_PERF_EVENTS |
| 98 | select ARCH_INLINE_SPIN_TRYLOCK | ||
| 99 | select ARCH_INLINE_SPIN_TRYLOCK_BH | ||
| 100 | select ARCH_INLINE_SPIN_LOCK | ||
| 101 | select ARCH_INLINE_SPIN_LOCK_BH | ||
| 102 | select ARCH_INLINE_SPIN_LOCK_IRQ | ||
| 103 | select ARCH_INLINE_SPIN_LOCK_IRQSAVE | ||
| 104 | select ARCH_INLINE_SPIN_UNLOCK | ||
| 105 | select ARCH_INLINE_SPIN_UNLOCK_BH | ||
| 106 | select ARCH_INLINE_SPIN_UNLOCK_IRQ | ||
| 107 | select ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE | ||
| 108 | select ARCH_INLINE_READ_TRYLOCK | ||
| 109 | select ARCH_INLINE_READ_LOCK | ||
| 110 | select ARCH_INLINE_READ_LOCK_BH | ||
| 111 | select ARCH_INLINE_READ_LOCK_IRQ | ||
| 112 | select ARCH_INLINE_READ_LOCK_IRQSAVE | ||
| 113 | select ARCH_INLINE_READ_UNLOCK | ||
| 114 | select ARCH_INLINE_READ_UNLOCK_BH | ||
| 115 | select ARCH_INLINE_READ_UNLOCK_IRQ | ||
| 116 | select ARCH_INLINE_READ_UNLOCK_IRQRESTORE | ||
| 117 | select ARCH_INLINE_WRITE_TRYLOCK | ||
| 118 | select ARCH_INLINE_WRITE_LOCK | ||
| 119 | select ARCH_INLINE_WRITE_LOCK_BH | ||
| 120 | select ARCH_INLINE_WRITE_LOCK_IRQ | ||
| 121 | select ARCH_INLINE_WRITE_LOCK_IRQSAVE | ||
| 122 | select ARCH_INLINE_WRITE_UNLOCK | ||
| 123 | select ARCH_INLINE_WRITE_UNLOCK_BH | ||
| 124 | select ARCH_INLINE_WRITE_UNLOCK_IRQ | ||
| 125 | select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE | ||
| 98 | 126 | ||
| 99 | config SCHED_OMIT_FRAME_POINTER | 127 | config SCHED_OMIT_FRAME_POINTER |
| 100 | bool | 128 | bool |
diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h index 41ce6861174e..c9af0d19c7ab 100644 --- a/arch/s390/include/asm/spinlock.h +++ b/arch/s390/include/asm/spinlock.h | |||
| @@ -191,33 +191,4 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw) | |||
| 191 | #define _raw_read_relax(lock) cpu_relax() | 191 | #define _raw_read_relax(lock) cpu_relax() |
| 192 | #define _raw_write_relax(lock) cpu_relax() | 192 | #define _raw_write_relax(lock) cpu_relax() |
| 193 | 193 | ||
| 194 | #define __always_inline__spin_lock | ||
| 195 | #define __always_inline__read_lock | ||
| 196 | #define __always_inline__write_lock | ||
| 197 | #define __always_inline__spin_lock_bh | ||
| 198 | #define __always_inline__read_lock_bh | ||
| 199 | #define __always_inline__write_lock_bh | ||
| 200 | #define __always_inline__spin_lock_irq | ||
| 201 | #define __always_inline__read_lock_irq | ||
| 202 | #define __always_inline__write_lock_irq | ||
| 203 | #define __always_inline__spin_lock_irqsave | ||
| 204 | #define __always_inline__read_lock_irqsave | ||
| 205 | #define __always_inline__write_lock_irqsave | ||
| 206 | #define __always_inline__spin_trylock | ||
| 207 | #define __always_inline__read_trylock | ||
| 208 | #define __always_inline__write_trylock | ||
| 209 | #define __always_inline__spin_trylock_bh | ||
| 210 | #define __always_inline__spin_unlock | ||
| 211 | #define __always_inline__read_unlock | ||
| 212 | #define __always_inline__write_unlock | ||
| 213 | #define __always_inline__spin_unlock_bh | ||
| 214 | #define __always_inline__read_unlock_bh | ||
| 215 | #define __always_inline__write_unlock_bh | ||
| 216 | #define __always_inline__spin_unlock_irq | ||
| 217 | #define __always_inline__read_unlock_irq | ||
| 218 | #define __always_inline__write_unlock_irq | ||
| 219 | #define __always_inline__spin_unlock_irqrestore | ||
| 220 | #define __always_inline__read_unlock_irqrestore | ||
| 221 | #define __always_inline__write_unlock_irqrestore | ||
| 222 | |||
| 223 | #endif /* __ASM_SPINLOCK_H */ | 194 | #endif /* __ASM_SPINLOCK_H */ |
diff --git a/include/linux/sched.h b/include/linux/sched.h index 75e6e60bf583..49be8f7c05f6 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
| @@ -1421,17 +1421,17 @@ struct task_struct { | |||
| 1421 | #endif | 1421 | #endif |
| 1422 | #ifdef CONFIG_TRACE_IRQFLAGS | 1422 | #ifdef CONFIG_TRACE_IRQFLAGS |
| 1423 | unsigned int irq_events; | 1423 | unsigned int irq_events; |
| 1424 | int hardirqs_enabled; | ||
| 1425 | unsigned long hardirq_enable_ip; | 1424 | unsigned long hardirq_enable_ip; |
| 1426 | unsigned int hardirq_enable_event; | ||
| 1427 | unsigned long hardirq_disable_ip; | 1425 | unsigned long hardirq_disable_ip; |
| 1426 | unsigned int hardirq_enable_event; | ||
| 1428 | unsigned int hardirq_disable_event; | 1427 | unsigned int hardirq_disable_event; |
| 1429 | int softirqs_enabled; | 1428 | int hardirqs_enabled; |
| 1429 | int hardirq_context; | ||
| 1430 | unsigned long softirq_disable_ip; | 1430 | unsigned long softirq_disable_ip; |
| 1431 | unsigned int softirq_disable_event; | ||
| 1432 | unsigned long softirq_enable_ip; | 1431 | unsigned long softirq_enable_ip; |
| 1432 | unsigned int softirq_disable_event; | ||
| 1433 | unsigned int softirq_enable_event; | 1433 | unsigned int softirq_enable_event; |
| 1434 | int hardirq_context; | 1434 | int softirqs_enabled; |
| 1435 | int softirq_context; | 1435 | int softirq_context; |
| 1436 | #endif | 1436 | #endif |
| 1437 | #ifdef CONFIG_LOCKDEP | 1437 | #ifdef CONFIG_LOCKDEP |
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index f0ca7a7a1757..71dccfeb0d88 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h | |||
| @@ -79,8 +79,6 @@ | |||
| 79 | */ | 79 | */ |
| 80 | #include <linux/spinlock_types.h> | 80 | #include <linux/spinlock_types.h> |
| 81 | 81 | ||
| 82 | extern int __lockfunc generic__raw_read_trylock(raw_rwlock_t *lock); | ||
| 83 | |||
| 84 | /* | 82 | /* |
| 85 | * Pull the __raw*() functions/declarations (UP-nondebug doesnt need them): | 83 | * Pull the __raw*() functions/declarations (UP-nondebug doesnt need them): |
| 86 | */ | 84 | */ |
| @@ -102,7 +100,7 @@ do { \ | |||
| 102 | 100 | ||
| 103 | #else | 101 | #else |
| 104 | # define spin_lock_init(lock) \ | 102 | # define spin_lock_init(lock) \ |
| 105 | do { *(lock) = SPIN_LOCK_UNLOCKED; } while (0) | 103 | do { *(lock) = __SPIN_LOCK_UNLOCKED(lock); } while (0) |
| 106 | #endif | 104 | #endif |
| 107 | 105 | ||
| 108 | #ifdef CONFIG_DEBUG_SPINLOCK | 106 | #ifdef CONFIG_DEBUG_SPINLOCK |
| @@ -116,7 +114,7 @@ do { \ | |||
| 116 | } while (0) | 114 | } while (0) |
| 117 | #else | 115 | #else |
| 118 | # define rwlock_init(lock) \ | 116 | # define rwlock_init(lock) \ |
| 119 | do { *(lock) = RW_LOCK_UNLOCKED; } while (0) | 117 | do { *(lock) = __RW_LOCK_UNLOCKED(lock); } while (0) |
| 120 | #endif | 118 | #endif |
| 121 | 119 | ||
| 122 | #define spin_is_locked(lock) __raw_spin_is_locked(&(lock)->raw_lock) | 120 | #define spin_is_locked(lock) __raw_spin_is_locked(&(lock)->raw_lock) |
diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h index 7a7e18fc2415..8264a7f459bc 100644 --- a/include/linux/spinlock_api_smp.h +++ b/include/linux/spinlock_api_smp.h | |||
| @@ -60,137 +60,118 @@ void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | |||
| 60 | void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | 60 | void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) |
| 61 | __releases(lock); | 61 | __releases(lock); |
| 62 | 62 | ||
| 63 | /* | 63 | #ifdef CONFIG_INLINE_SPIN_LOCK |
| 64 | * We inline the unlock functions in the nondebug case: | ||
| 65 | */ | ||
| 66 | #if !defined(CONFIG_DEBUG_SPINLOCK) && !defined(CONFIG_PREEMPT) | ||
| 67 | #define __always_inline__spin_unlock | ||
| 68 | #define __always_inline__read_unlock | ||
| 69 | #define __always_inline__write_unlock | ||
| 70 | #define __always_inline__spin_unlock_irq | ||
| 71 | #define __always_inline__read_unlock_irq | ||
| 72 | #define __always_inline__write_unlock_irq | ||
| 73 | #endif | ||
| 74 | |||
| 75 | #ifndef CONFIG_DEBUG_SPINLOCK | ||
| 76 | #ifndef CONFIG_GENERIC_LOCKBREAK | ||
| 77 | |||
| 78 | #ifdef __always_inline__spin_lock | ||
| 79 | #define _spin_lock(lock) __spin_lock(lock) | 64 | #define _spin_lock(lock) __spin_lock(lock) |
| 80 | #endif | 65 | #endif |
| 81 | 66 | ||
| 82 | #ifdef __always_inline__read_lock | 67 | #ifdef CONFIG_INLINE_READ_LOCK |
| 83 | #define _read_lock(lock) __read_lock(lock) | 68 | #define _read_lock(lock) __read_lock(lock) |
| 84 | #endif | 69 | #endif |
| 85 | 70 | ||
| 86 | #ifdef __always_inline__write_lock | 71 | #ifdef CONFIG_INLINE_WRITE_LOCK |
| 87 | #define _write_lock(lock) __write_lock(lock) | 72 | #define _write_lock(lock) __write_lock(lock) |
| 88 | #endif | 73 | #endif |
| 89 | 74 | ||
| 90 | #ifdef __always_inline__spin_lock_bh | 75 | #ifdef CONFIG_INLINE_SPIN_LOCK_BH |
| 91 | #define _spin_lock_bh(lock) __spin_lock_bh(lock) | 76 | #define _spin_lock_bh(lock) __spin_lock_bh(lock) |
| 92 | #endif | 77 | #endif |
| 93 | 78 | ||
| 94 | #ifdef __always_inline__read_lock_bh | 79 | #ifdef CONFIG_INLINE_READ_LOCK_BH |
| 95 | #define _read_lock_bh(lock) __read_lock_bh(lock) | 80 | #define _read_lock_bh(lock) __read_lock_bh(lock) |
| 96 | #endif | 81 | #endif |
| 97 | 82 | ||
| 98 | #ifdef __always_inline__write_lock_bh | 83 | #ifdef CONFIG_INLINE_WRITE_LOCK_BH |
| 99 | #define _write_lock_bh(lock) __write_lock_bh(lock) | 84 | #define _write_lock_bh(lock) __write_lock_bh(lock) |
| 100 | #endif | 85 | #endif |
| 101 | 86 | ||
| 102 | #ifdef __always_inline__spin_lock_irq | 87 | #ifdef CONFIG_INLINE_SPIN_LOCK_IRQ |
| 103 | #define _spin_lock_irq(lock) __spin_lock_irq(lock) | 88 | #define _spin_lock_irq(lock) __spin_lock_irq(lock) |
| 104 | #endif | 89 | #endif |
| 105 | 90 | ||
| 106 | #ifdef __always_inline__read_lock_irq | 91 | #ifdef CONFIG_INLINE_READ_LOCK_IRQ |
| 107 | #define _read_lock_irq(lock) __read_lock_irq(lock) | 92 | #define _read_lock_irq(lock) __read_lock_irq(lock) |
| 108 | #endif | 93 | #endif |
| 109 | 94 | ||
| 110 | #ifdef __always_inline__write_lock_irq | 95 | #ifdef CONFIG_INLINE_WRITE_LOCK_IRQ |
| 111 | #define _write_lock_irq(lock) __write_lock_irq(lock) | 96 | #define _write_lock_irq(lock) __write_lock_irq(lock) |
| 112 | #endif | 97 | #endif |
| 113 | 98 | ||
| 114 | #ifdef __always_inline__spin_lock_irqsave | 99 | #ifdef CONFIG_INLINE_SPIN_LOCK_IRQSAVE |
| 115 | #define _spin_lock_irqsave(lock) __spin_lock_irqsave(lock) | 100 | #define _spin_lock_irqsave(lock) __spin_lock_irqsave(lock) |
| 116 | #endif | 101 | #endif |
| 117 | 102 | ||
| 118 | #ifdef __always_inline__read_lock_irqsave | 103 | #ifdef CONFIG_INLINE_READ_LOCK_IRQSAVE |
| 119 | #define _read_lock_irqsave(lock) __read_lock_irqsave(lock) | 104 | #define _read_lock_irqsave(lock) __read_lock_irqsave(lock) |
| 120 | #endif | 105 | #endif |
| 121 | 106 | ||
| 122 | #ifdef __always_inline__write_lock_irqsave | 107 | #ifdef CONFIG_INLINE_WRITE_LOCK_IRQSAVE |
| 123 | #define _write_lock_irqsave(lock) __write_lock_irqsave(lock) | 108 | #define _write_lock_irqsave(lock) __write_lock_irqsave(lock) |
| 124 | #endif | 109 | #endif |
| 125 | 110 | ||
| 126 | #endif /* !CONFIG_GENERIC_LOCKBREAK */ | 111 | #ifdef CONFIG_INLINE_SPIN_TRYLOCK |
| 127 | |||
| 128 | #ifdef __always_inline__spin_trylock | ||
| 129 | #define _spin_trylock(lock) __spin_trylock(lock) | 112 | #define _spin_trylock(lock) __spin_trylock(lock) |
| 130 | #endif | 113 | #endif |
| 131 | 114 | ||
| 132 | #ifdef __always_inline__read_trylock | 115 | #ifdef CONFIG_INLINE_READ_TRYLOCK |
| 133 | #define _read_trylock(lock) __read_trylock(lock) | 116 | #define _read_trylock(lock) __read_trylock(lock) |
| 134 | #endif | 117 | #endif |
| 135 | 118 | ||
| 136 | #ifdef __always_inline__write_trylock | 119 | #ifdef CONFIG_INLINE_WRITE_TRYLOCK |
| 137 | #define _write_trylock(lock) __write_trylock(lock) | 120 | #define _write_trylock(lock) __write_trylock(lock) |
| 138 | #endif | 121 | #endif |
| 139 | 122 | ||
| 140 | #ifdef __always_inline__spin_trylock_bh | 123 | #ifdef CONFIG_INLINE_SPIN_TRYLOCK_BH |
| 141 | #define _spin_trylock_bh(lock) __spin_trylock_bh(lock) | 124 | #define _spin_trylock_bh(lock) __spin_trylock_bh(lock) |
| 142 | #endif | 125 | #endif |
| 143 | 126 | ||
| 144 | #ifdef __always_inline__spin_unlock | 127 | #ifdef CONFIG_INLINE_SPIN_UNLOCK |
| 145 | #define _spin_unlock(lock) __spin_unlock(lock) | 128 | #define _spin_unlock(lock) __spin_unlock(lock) |
| 146 | #endif | 129 | #endif |
| 147 | 130 | ||
| 148 | #ifdef __always_inline__read_unlock | 131 | #ifdef CONFIG_INLINE_READ_UNLOCK |
| 149 | #define _read_unlock(lock) __read_unlock(lock) | 132 | #define _read_unlock(lock) __read_unlock(lock) |
| 150 | #endif | 133 | #endif |
| 151 | 134 | ||
| 152 | #ifdef __always_inline__write_unlock | 135 | #ifdef CONFIG_INLINE_WRITE_UNLOCK |
| 153 | #define _write_unlock(lock) __write_unlock(lock) | 136 | #define _write_unlock(lock) __write_unlock(lock) |
| 154 | #endif | 137 | #endif |
| 155 | 138 | ||
| 156 | #ifdef __always_inline__spin_unlock_bh | 139 | #ifdef CONFIG_INLINE_SPIN_UNLOCK_BH |
| 157 | #define _spin_unlock_bh(lock) __spin_unlock_bh(lock) | 140 | #define _spin_unlock_bh(lock) __spin_unlock_bh(lock) |
| 158 | #endif | 141 | #endif |
| 159 | 142 | ||
| 160 | #ifdef __always_inline__read_unlock_bh | 143 | #ifdef CONFIG_INLINE_READ_UNLOCK_BH |
| 161 | #define _read_unlock_bh(lock) __read_unlock_bh(lock) | 144 | #define _read_unlock_bh(lock) __read_unlock_bh(lock) |
| 162 | #endif | 145 | #endif |
| 163 | 146 | ||
| 164 | #ifdef __always_inline__write_unlock_bh | 147 | #ifdef CONFIG_INLINE_WRITE_UNLOCK_BH |
| 165 | #define _write_unlock_bh(lock) __write_unlock_bh(lock) | 148 | #define _write_unlock_bh(lock) __write_unlock_bh(lock) |
| 166 | #endif | 149 | #endif |
| 167 | 150 | ||
| 168 | #ifdef __always_inline__spin_unlock_irq | 151 | #ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQ |
| 169 | #define _spin_unlock_irq(lock) __spin_unlock_irq(lock) | 152 | #define _spin_unlock_irq(lock) __spin_unlock_irq(lock) |
| 170 | #endif | 153 | #endif |
| 171 | 154 | ||
| 172 | #ifdef __always_inline__read_unlock_irq | 155 | #ifdef CONFIG_INLINE_READ_UNLOCK_IRQ |
| 173 | #define _read_unlock_irq(lock) __read_unlock_irq(lock) | 156 | #define _read_unlock_irq(lock) __read_unlock_irq(lock) |
| 174 | #endif | 157 | #endif |
| 175 | 158 | ||
| 176 | #ifdef __always_inline__write_unlock_irq | 159 | #ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQ |
| 177 | #define _write_unlock_irq(lock) __write_unlock_irq(lock) | 160 | #define _write_unlock_irq(lock) __write_unlock_irq(lock) |
| 178 | #endif | 161 | #endif |
| 179 | 162 | ||
| 180 | #ifdef __always_inline__spin_unlock_irqrestore | 163 | #ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE |
| 181 | #define _spin_unlock_irqrestore(lock, flags) __spin_unlock_irqrestore(lock, flags) | 164 | #define _spin_unlock_irqrestore(lock, flags) __spin_unlock_irqrestore(lock, flags) |
| 182 | #endif | 165 | #endif |
| 183 | 166 | ||
| 184 | #ifdef __always_inline__read_unlock_irqrestore | 167 | #ifdef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE |
| 185 | #define _read_unlock_irqrestore(lock, flags) __read_unlock_irqrestore(lock, flags) | 168 | #define _read_unlock_irqrestore(lock, flags) __read_unlock_irqrestore(lock, flags) |
| 186 | #endif | 169 | #endif |
| 187 | 170 | ||
| 188 | #ifdef __always_inline__write_unlock_irqrestore | 171 | #ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE |
| 189 | #define _write_unlock_irqrestore(lock, flags) __write_unlock_irqrestore(lock, flags) | 172 | #define _write_unlock_irqrestore(lock, flags) __write_unlock_irqrestore(lock, flags) |
| 190 | #endif | 173 | #endif |
| 191 | 174 | ||
| 192 | #endif /* CONFIG_DEBUG_SPINLOCK */ | ||
| 193 | |||
| 194 | static inline int __spin_trylock(spinlock_t *lock) | 175 | static inline int __spin_trylock(spinlock_t *lock) |
| 195 | { | 176 | { |
| 196 | preempt_disable(); | 177 | preempt_disable(); |
diff --git a/init/Kconfig b/init/Kconfig index eb4b33725db1..2e9a1457132c 100644 --- a/init/Kconfig +++ b/init/Kconfig | |||
| @@ -1220,3 +1220,4 @@ source "block/Kconfig" | |||
| 1220 | config PREEMPT_NOTIFIERS | 1220 | config PREEMPT_NOTIFIERS |
| 1221 | bool | 1221 | bool |
| 1222 | 1222 | ||
| 1223 | source "kernel/Kconfig.locks" | ||
diff --git a/kernel/Kconfig.locks b/kernel/Kconfig.locks new file mode 100644 index 000000000000..88c92fb44618 --- /dev/null +++ b/kernel/Kconfig.locks | |||
| @@ -0,0 +1,202 @@ | |||
| 1 | # | ||
| 2 | # The ARCH_INLINE foo is necessary because select ignores "depends on" | ||
| 3 | # | ||
| 4 | config ARCH_INLINE_SPIN_TRYLOCK | ||
| 5 | bool | ||
| 6 | |||
| 7 | config ARCH_INLINE_SPIN_TRYLOCK_BH | ||
| 8 | bool | ||
| 9 | |||
| 10 | config ARCH_INLINE_SPIN_LOCK | ||
| 11 | bool | ||
| 12 | |||
| 13 | config ARCH_INLINE_SPIN_LOCK_BH | ||
| 14 | bool | ||
| 15 | |||
| 16 | config ARCH_INLINE_SPIN_LOCK_IRQ | ||
| 17 | bool | ||
| 18 | |||
| 19 | config ARCH_INLINE_SPIN_LOCK_IRQSAVE | ||
| 20 | bool | ||
| 21 | |||
| 22 | config ARCH_INLINE_SPIN_UNLOCK | ||
| 23 | bool | ||
| 24 | |||
| 25 | config ARCH_INLINE_SPIN_UNLOCK_BH | ||
| 26 | bool | ||
| 27 | |||
| 28 | config ARCH_INLINE_SPIN_UNLOCK_IRQ | ||
| 29 | bool | ||
| 30 | |||
| 31 | config ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE | ||
| 32 | bool | ||
| 33 | |||
| 34 | |||
| 35 | config ARCH_INLINE_READ_TRYLOCK | ||
| 36 | bool | ||
| 37 | |||
| 38 | config ARCH_INLINE_READ_LOCK | ||
| 39 | bool | ||
| 40 | |||
| 41 | config ARCH_INLINE_READ_LOCK_BH | ||
| 42 | bool | ||
| 43 | |||
| 44 | config ARCH_INLINE_READ_LOCK_IRQ | ||
| 45 | bool | ||
| 46 | |||
| 47 | config ARCH_INLINE_READ_LOCK_IRQSAVE | ||
| 48 | bool | ||
| 49 | |||
| 50 | config ARCH_INLINE_READ_UNLOCK | ||
| 51 | bool | ||
| 52 | |||
| 53 | config ARCH_INLINE_READ_UNLOCK_BH | ||
| 54 | bool | ||
| 55 | |||
| 56 | config ARCH_INLINE_READ_UNLOCK_IRQ | ||
| 57 | bool | ||
| 58 | |||
| 59 | config ARCH_INLINE_READ_UNLOCK_IRQRESTORE | ||
| 60 | bool | ||
| 61 | |||
| 62 | |||
| 63 | config ARCH_INLINE_WRITE_TRYLOCK | ||
| 64 | bool | ||
| 65 | |||
| 66 | config ARCH_INLINE_WRITE_LOCK | ||
| 67 | bool | ||
| 68 | |||
| 69 | config ARCH_INLINE_WRITE_LOCK_BH | ||
| 70 | bool | ||
| 71 | |||
| 72 | config ARCH_INLINE_WRITE_LOCK_IRQ | ||
| 73 | bool | ||
| 74 | |||
| 75 | config ARCH_INLINE_WRITE_LOCK_IRQSAVE | ||
| 76 | bool | ||
| 77 | |||
| 78 | config ARCH_INLINE_WRITE_UNLOCK | ||
| 79 | bool | ||
| 80 | |||
| 81 | config ARCH_INLINE_WRITE_UNLOCK_BH | ||
| 82 | bool | ||
| 83 | |||
| 84 | config ARCH_INLINE_WRITE_UNLOCK_IRQ | ||
| 85 | bool | ||
| 86 | |||
| 87 | config ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE | ||
| 88 | bool | ||
| 89 | |||
| 90 | # | ||
| 91 | # lock_* functions are inlined when: | ||
| 92 | # - DEBUG_SPINLOCK=n and GENERIC_LOCKBREAK=n and ARCH_INLINE_*LOCK=y | ||
| 93 | # | ||
| 94 | # trylock_* functions are inlined when: | ||
| 95 | # - DEBUG_SPINLOCK=n and ARCH_INLINE_*LOCK=y | ||
| 96 | # | ||
| 97 | # unlock and unlock_irq functions are inlined when: | ||
| 98 | # - DEBUG_SPINLOCK=n and ARCH_INLINE_*LOCK=y | ||
| 99 | # or | ||
| 100 | # - DEBUG_SPINLOCK=n and PREEMPT=n | ||
| 101 | # | ||
| 102 | # unlock_bh and unlock_irqrestore functions are inlined when: | ||
| 103 | # - DEBUG_SPINLOCK=n and ARCH_INLINE_*LOCK=y | ||
| 104 | # | ||
| 105 | |||
| 106 | config INLINE_SPIN_TRYLOCK | ||
| 107 | def_bool !DEBUG_SPINLOCK && ARCH_INLINE_SPIN_TRYLOCK | ||
| 108 | |||
| 109 | config INLINE_SPIN_TRYLOCK_BH | ||
| 110 | def_bool !DEBUG_SPINLOCK && ARCH_INLINE_SPIN_TRYLOCK_BH | ||
| 111 | |||
| 112 | config INLINE_SPIN_LOCK | ||
| 113 | def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && ARCH_INLINE_SPIN_LOCK | ||
| 114 | |||
| 115 | config INLINE_SPIN_LOCK_BH | ||
| 116 | def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \ | ||
| 117 | ARCH_INLINE_SPIN_LOCK_BH | ||
| 118 | |||
| 119 | config INLINE_SPIN_LOCK_IRQ | ||
| 120 | def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \ | ||
| 121 | ARCH_INLINE_SPIN_LOCK_IRQ | ||
| 122 | |||
| 123 | config INLINE_SPIN_LOCK_IRQSAVE | ||
| 124 | def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \ | ||
| 125 | ARCH_INLINE_SPIN_LOCK_IRQSAVE | ||
| 126 | |||
| 127 | config INLINE_SPIN_UNLOCK | ||
| 128 | def_bool !DEBUG_SPINLOCK && (!PREEMPT || ARCH_INLINE_SPIN_UNLOCK) | ||
| 129 | |||
| 130 | config INLINE_SPIN_UNLOCK_BH | ||
| 131 | def_bool !DEBUG_SPINLOCK && ARCH_INLINE_SPIN_UNLOCK_BH | ||
| 132 | |||
| 133 | config INLINE_SPIN_UNLOCK_IRQ | ||
| 134 | def_bool !DEBUG_SPINLOCK && (!PREEMPT || ARCH_INLINE_SPIN_UNLOCK_BH) | ||
| 135 | |||
| 136 | config INLINE_SPIN_UNLOCK_IRQRESTORE | ||
| 137 | def_bool !DEBUG_SPINLOCK && ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE | ||
| 138 | |||
| 139 | |||
| 140 | config INLINE_READ_TRYLOCK | ||
| 141 | def_bool !DEBUG_SPINLOCK && ARCH_INLINE_READ_TRYLOCK | ||
| 142 | |||
| 143 | config INLINE_READ_LOCK | ||
| 144 | def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && ARCH_INLINE_READ_LOCK | ||
| 145 | |||
| 146 | config INLINE_READ_LOCK_BH | ||
| 147 | def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \ | ||
| 148 | ARCH_INLINE_READ_LOCK_BH | ||
| 149 | |||
| 150 | config INLINE_READ_LOCK_IRQ | ||
| 151 | def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \ | ||
| 152 | ARCH_INLINE_READ_LOCK_IRQ | ||
| 153 | |||
| 154 | config INLINE_READ_LOCK_IRQSAVE | ||
| 155 | def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \ | ||
| 156 | ARCH_INLINE_READ_LOCK_IRQSAVE | ||
| 157 | |||
| 158 | config INLINE_READ_UNLOCK | ||
| 159 | def_bool !DEBUG_SPINLOCK && (!PREEMPT || ARCH_INLINE_READ_UNLOCK) | ||
| 160 | |||
| 161 | config INLINE_READ_UNLOCK_BH | ||
| 162 | def_bool !DEBUG_SPINLOCK && ARCH_INLINE_READ_UNLOCK_BH | ||
| 163 | |||
| 164 | config INLINE_READ_UNLOCK_IRQ | ||
| 165 | def_bool !DEBUG_SPINLOCK && (!PREEMPT || ARCH_INLINE_READ_UNLOCK_BH) | ||
| 166 | |||
| 167 | config INLINE_READ_UNLOCK_IRQRESTORE | ||
| 168 | def_bool !DEBUG_SPINLOCK && ARCH_INLINE_READ_UNLOCK_IRQRESTORE | ||
| 169 | |||
| 170 | |||
| 171 | config INLINE_WRITE_TRYLOCK | ||
| 172 | def_bool !DEBUG_SPINLOCK && ARCH_INLINE_WRITE_TRYLOCK | ||
| 173 | |||
| 174 | config INLINE_WRITE_LOCK | ||
| 175 | def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && ARCH_INLINE_WRITE_LOCK | ||
| 176 | |||
| 177 | config INLINE_WRITE_LOCK_BH | ||
| 178 | def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \ | ||
| 179 | ARCH_INLINE_WRITE_LOCK_BH | ||
| 180 | |||
| 181 | config INLINE_WRITE_LOCK_IRQ | ||
| 182 | def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \ | ||
| 183 | ARCH_INLINE_WRITE_LOCK_IRQ | ||
| 184 | |||
| 185 | config INLINE_WRITE_LOCK_IRQSAVE | ||
| 186 | def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \ | ||
| 187 | ARCH_INLINE_WRITE_LOCK_IRQSAVE | ||
| 188 | |||
| 189 | config INLINE_WRITE_UNLOCK | ||
| 190 | def_bool !DEBUG_SPINLOCK && (!PREEMPT || ARCH_INLINE_WRITE_UNLOCK) | ||
| 191 | |||
| 192 | config INLINE_WRITE_UNLOCK_BH | ||
| 193 | def_bool !DEBUG_SPINLOCK && ARCH_INLINE_WRITE_UNLOCK_BH | ||
| 194 | |||
| 195 | config INLINE_WRITE_UNLOCK_IRQ | ||
| 196 | def_bool !DEBUG_SPINLOCK && (!PREEMPT || ARCH_INLINE_WRITE_UNLOCK_BH) | ||
| 197 | |||
| 198 | config INLINE_WRITE_UNLOCK_IRQRESTORE | ||
| 199 | def_bool !DEBUG_SPINLOCK && ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE | ||
| 200 | |||
| 201 | config MUTEX_SPIN_ON_OWNER | ||
| 202 | def_bool SMP && !DEBUG_MUTEXES && !HAVE_DEFAULT_NO_SPIN_MUTEXES | ||
diff --git a/kernel/mutex.c b/kernel/mutex.c index 947b3ad551f8..632f04c57d82 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c | |||
| @@ -148,8 +148,8 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, | |||
| 148 | 148 | ||
| 149 | preempt_disable(); | 149 | preempt_disable(); |
| 150 | mutex_acquire(&lock->dep_map, subclass, 0, ip); | 150 | mutex_acquire(&lock->dep_map, subclass, 0, ip); |
| 151 | #if defined(CONFIG_SMP) && !defined(CONFIG_DEBUG_MUTEXES) && \ | 151 | |
| 152 | !defined(CONFIG_HAVE_DEFAULT_NO_SPIN_MUTEXES) | 152 | #ifdef CONFIG_MUTEX_SPIN_ON_OWNER |
| 153 | /* | 153 | /* |
| 154 | * Optimistic spinning. | 154 | * Optimistic spinning. |
| 155 | * | 155 | * |
diff --git a/kernel/sched.c b/kernel/sched.c index 3c11ae0a948d..ec0af1fcb195 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
| @@ -5481,7 +5481,7 @@ need_resched_nonpreemptible: | |||
| 5481 | } | 5481 | } |
| 5482 | EXPORT_SYMBOL(schedule); | 5482 | EXPORT_SYMBOL(schedule); |
| 5483 | 5483 | ||
| 5484 | #ifdef CONFIG_SMP | 5484 | #ifdef CONFIG_MUTEX_SPIN_ON_OWNER |
| 5485 | /* | 5485 | /* |
| 5486 | * Look out! "owner" is an entirely speculative pointer | 5486 | * Look out! "owner" is an entirely speculative pointer |
| 5487 | * access and not reliable. | 5487 | * access and not reliable. |
diff --git a/kernel/spinlock.c b/kernel/spinlock.c index 5ddab730cb2f..41e042219ff6 100644 --- a/kernel/spinlock.c +++ b/kernel/spinlock.c | |||
| @@ -21,145 +21,28 @@ | |||
| 21 | #include <linux/debug_locks.h> | 21 | #include <linux/debug_locks.h> |
| 22 | #include <linux/module.h> | 22 | #include <linux/module.h> |
| 23 | 23 | ||
| 24 | #ifndef _spin_trylock | ||
| 25 | int __lockfunc _spin_trylock(spinlock_t *lock) | ||
| 26 | { | ||
| 27 | return __spin_trylock(lock); | ||
| 28 | } | ||
| 29 | EXPORT_SYMBOL(_spin_trylock); | ||
| 30 | #endif | ||
| 31 | |||
| 32 | #ifndef _read_trylock | ||
| 33 | int __lockfunc _read_trylock(rwlock_t *lock) | ||
| 34 | { | ||
| 35 | return __read_trylock(lock); | ||
| 36 | } | ||
| 37 | EXPORT_SYMBOL(_read_trylock); | ||
| 38 | #endif | ||
| 39 | |||
| 40 | #ifndef _write_trylock | ||
| 41 | int __lockfunc _write_trylock(rwlock_t *lock) | ||
| 42 | { | ||
| 43 | return __write_trylock(lock); | ||
| 44 | } | ||
| 45 | EXPORT_SYMBOL(_write_trylock); | ||
| 46 | #endif | ||
| 47 | |||
| 48 | /* | 24 | /* |
| 49 | * If lockdep is enabled then we use the non-preemption spin-ops | 25 | * If lockdep is enabled then we use the non-preemption spin-ops |
| 50 | * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are | 26 | * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are |
| 51 | * not re-enabled during lock-acquire (which the preempt-spin-ops do): | 27 | * not re-enabled during lock-acquire (which the preempt-spin-ops do): |
| 52 | */ | 28 | */ |
| 53 | #if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC) | 29 | #if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC) |
| 54 | |||
| 55 | #ifndef _read_lock | ||
| 56 | void __lockfunc _read_lock(rwlock_t *lock) | ||
| 57 | { | ||
| 58 | __read_lock(lock); | ||
| 59 | } | ||
| 60 | EXPORT_SYMBOL(_read_lock); | ||
| 61 | #endif | ||
| 62 | |||
| 63 | #ifndef _spin_lock_irqsave | ||
| 64 | unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock) | ||
| 65 | { | ||
| 66 | return __spin_lock_irqsave(lock); | ||
| 67 | } | ||
| 68 | EXPORT_SYMBOL(_spin_lock_irqsave); | ||
| 69 | #endif | ||
| 70 | |||
| 71 | #ifndef _spin_lock_irq | ||
| 72 | void __lockfunc _spin_lock_irq(spinlock_t *lock) | ||
| 73 | { | ||
| 74 | __spin_lock_irq(lock); | ||
| 75 | } | ||
| 76 | EXPORT_SYMBOL(_spin_lock_irq); | ||
| 77 | #endif | ||
| 78 | |||
| 79 | #ifndef _spin_lock_bh | ||
| 80 | void __lockfunc _spin_lock_bh(spinlock_t *lock) | ||
| 81 | { | ||
| 82 | __spin_lock_bh(lock); | ||
| 83 | } | ||
| 84 | EXPORT_SYMBOL(_spin_lock_bh); | ||
| 85 | #endif | ||
| 86 | |||
| 87 | #ifndef _read_lock_irqsave | ||
| 88 | unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock) | ||
| 89 | { | ||
| 90 | return __read_lock_irqsave(lock); | ||
| 91 | } | ||
| 92 | EXPORT_SYMBOL(_read_lock_irqsave); | ||
| 93 | #endif | ||
| 94 | |||
| 95 | #ifndef _read_lock_irq | ||
| 96 | void __lockfunc _read_lock_irq(rwlock_t *lock) | ||
| 97 | { | ||
| 98 | __read_lock_irq(lock); | ||
| 99 | } | ||
| 100 | EXPORT_SYMBOL(_read_lock_irq); | ||
| 101 | #endif | ||
| 102 | |||
| 103 | #ifndef _read_lock_bh | ||
| 104 | void __lockfunc _read_lock_bh(rwlock_t *lock) | ||
| 105 | { | ||
| 106 | __read_lock_bh(lock); | ||
| 107 | } | ||
| 108 | EXPORT_SYMBOL(_read_lock_bh); | ||
| 109 | #endif | ||
| 110 | |||
| 111 | #ifndef _write_lock_irqsave | ||
| 112 | unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock) | ||
| 113 | { | ||
| 114 | return __write_lock_irqsave(lock); | ||
| 115 | } | ||
| 116 | EXPORT_SYMBOL(_write_lock_irqsave); | ||
| 117 | #endif | ||
| 118 | |||
| 119 | #ifndef _write_lock_irq | ||
| 120 | void __lockfunc _write_lock_irq(rwlock_t *lock) | ||
| 121 | { | ||
| 122 | __write_lock_irq(lock); | ||
| 123 | } | ||
| 124 | EXPORT_SYMBOL(_write_lock_irq); | ||
| 125 | #endif | ||
| 126 | |||
| 127 | #ifndef _write_lock_bh | ||
| 128 | void __lockfunc _write_lock_bh(rwlock_t *lock) | ||
| 129 | { | ||
| 130 | __write_lock_bh(lock); | ||
| 131 | } | ||
| 132 | EXPORT_SYMBOL(_write_lock_bh); | ||
| 133 | #endif | ||
| 134 | |||
| 135 | #ifndef _spin_lock | ||
| 136 | void __lockfunc _spin_lock(spinlock_t *lock) | ||
| 137 | { | ||
| 138 | __spin_lock(lock); | ||
| 139 | } | ||
| 140 | EXPORT_SYMBOL(_spin_lock); | ||
| 141 | #endif | ||
| 142 | |||
| 143 | #ifndef _write_lock | ||
| 144 | void __lockfunc _write_lock(rwlock_t *lock) | ||
| 145 | { | ||
| 146 | __write_lock(lock); | ||
| 147 | } | ||
| 148 | EXPORT_SYMBOL(_write_lock); | ||
| 149 | #endif | ||
| 150 | |||
| 151 | #else /* CONFIG_PREEMPT: */ | ||
| 152 | |||
| 153 | /* | 30 | /* |
| 31 | * The __lock_function inlines are taken from | ||
| 32 | * include/linux/spinlock_api_smp.h | ||
| 33 | */ | ||
| 34 | #else | ||
| 35 | /* | ||
| 36 | * We build the __lock_function inlines here. They are too large for | ||
| 37 | * inlining all over the place, but here is only one user per function | ||
| 38 | * which embedds them into the calling _lock_function below. | ||
| 39 | * | ||
| 154 | * This could be a long-held lock. We both prepare to spin for a long | 40 | * This could be a long-held lock. We both prepare to spin for a long |
| 155 | * time (making _this_ CPU preemptable if possible), and we also signal | 41 | * time (making _this_ CPU preemptable if possible), and we also signal |
| 156 | * towards that other CPU that it should break the lock ASAP. | 42 | * towards that other CPU that it should break the lock ASAP. |
| 157 | * | ||
| 158 | * (We do this in a function because inlining it would be excessive.) | ||
| 159 | */ | 43 | */ |
| 160 | |||
| 161 | #define BUILD_LOCK_OPS(op, locktype) \ | 44 | #define BUILD_LOCK_OPS(op, locktype) \ |
| 162 | void __lockfunc _##op##_lock(locktype##_t *lock) \ | 45 | void __lockfunc __##op##_lock(locktype##_t *lock) \ |
| 163 | { \ | 46 | { \ |
| 164 | for (;;) { \ | 47 | for (;;) { \ |
| 165 | preempt_disable(); \ | 48 | preempt_disable(); \ |
| @@ -175,9 +58,7 @@ void __lockfunc _##op##_lock(locktype##_t *lock) \ | |||
| 175 | (lock)->break_lock = 0; \ | 58 | (lock)->break_lock = 0; \ |
| 176 | } \ | 59 | } \ |
| 177 | \ | 60 | \ |
| 178 | EXPORT_SYMBOL(_##op##_lock); \ | 61 | unsigned long __lockfunc __##op##_lock_irqsave(locktype##_t *lock) \ |
| 179 | \ | ||
| 180 | unsigned long __lockfunc _##op##_lock_irqsave(locktype##_t *lock) \ | ||
| 181 | { \ | 62 | { \ |
| 182 | unsigned long flags; \ | 63 | unsigned long flags; \ |
| 183 | \ | 64 | \ |
| @@ -198,16 +79,12 @@ unsigned long __lockfunc _##op##_lock_irqsave(locktype##_t *lock) \ | |||
| 198 | return flags; \ | 79 | return flags; \ |
| 199 | } \ | 80 | } \ |
| 200 | \ | 81 | \ |
| 201 | EXPORT_SYMBOL(_##op##_lock_irqsave); \ | 82 | void __lockfunc __##op##_lock_irq(locktype##_t *lock) \ |
| 202 | \ | ||
| 203 | void __lockfunc _##op##_lock_irq(locktype##_t *lock) \ | ||
| 204 | { \ | 83 | { \ |
| 205 | _##op##_lock_irqsave(lock); \ | 84 | _##op##_lock_irqsave(lock); \ |
| 206 | } \ | 85 | } \ |
| 207 | \ | 86 | \ |
| 208 | EXPORT_SYMBOL(_##op##_lock_irq); \ | 87 | void __lockfunc __##op##_lock_bh(locktype##_t *lock) \ |
| 209 | \ | ||
| 210 | void __lockfunc _##op##_lock_bh(locktype##_t *lock) \ | ||
| 211 | { \ | 88 | { \ |
| 212 | unsigned long flags; \ | 89 | unsigned long flags; \ |
| 213 | \ | 90 | \ |
| @@ -220,23 +97,21 @@ void __lockfunc _##op##_lock_bh(locktype##_t *lock) \ | |||
| 220 | local_bh_disable(); \ | 97 | local_bh_disable(); \ |
| 221 | local_irq_restore(flags); \ | 98 | local_irq_restore(flags); \ |
| 222 | } \ | 99 | } \ |
| 223 | \ | ||
| 224 | EXPORT_SYMBOL(_##op##_lock_bh) | ||
| 225 | 100 | ||
| 226 | /* | 101 | /* |
| 227 | * Build preemption-friendly versions of the following | 102 | * Build preemption-friendly versions of the following |
| 228 | * lock-spinning functions: | 103 | * lock-spinning functions: |
| 229 | * | 104 | * |
| 230 | * _[spin|read|write]_lock() | 105 | * __[spin|read|write]_lock() |
| 231 | * _[spin|read|write]_lock_irq() | 106 | * __[spin|read|write]_lock_irq() |
| 232 | * _[spin|read|write]_lock_irqsave() | 107 | * __[spin|read|write]_lock_irqsave() |
| 233 | * _[spin|read|write]_lock_bh() | 108 | * __[spin|read|write]_lock_bh() |
| 234 | */ | 109 | */ |
| 235 | BUILD_LOCK_OPS(spin, spinlock); | 110 | BUILD_LOCK_OPS(spin, spinlock); |
| 236 | BUILD_LOCK_OPS(read, rwlock); | 111 | BUILD_LOCK_OPS(read, rwlock); |
| 237 | BUILD_LOCK_OPS(write, rwlock); | 112 | BUILD_LOCK_OPS(write, rwlock); |
| 238 | 113 | ||
| 239 | #endif /* CONFIG_PREEMPT */ | 114 | #endif |
| 240 | 115 | ||
| 241 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 116 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
| 242 | 117 | ||
| @@ -248,7 +123,8 @@ void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass) | |||
| 248 | } | 123 | } |
| 249 | EXPORT_SYMBOL(_spin_lock_nested); | 124 | EXPORT_SYMBOL(_spin_lock_nested); |
| 250 | 125 | ||
| 251 | unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclass) | 126 | unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, |
| 127 | int subclass) | ||
| 252 | { | 128 | { |
| 253 | unsigned long flags; | 129 | unsigned long flags; |
| 254 | 130 | ||
| @@ -272,7 +148,127 @@ EXPORT_SYMBOL(_spin_lock_nest_lock); | |||
| 272 | 148 | ||
| 273 | #endif | 149 | #endif |
| 274 | 150 | ||
| 275 | #ifndef _spin_unlock | 151 | #ifndef CONFIG_INLINE_SPIN_TRYLOCK |
| 152 | int __lockfunc _spin_trylock(spinlock_t *lock) | ||
| 153 | { | ||
| 154 | return __spin_trylock(lock); | ||
| 155 | } | ||
| 156 | EXPORT_SYMBOL(_spin_trylock); | ||
| 157 | #endif | ||
| 158 | |||
| 159 | #ifndef CONFIG_INLINE_READ_TRYLOCK | ||
| 160 | int __lockfunc _read_trylock(rwlock_t *lock) | ||
| 161 | { | ||
| 162 | return __read_trylock(lock); | ||
| 163 | } | ||
| 164 | EXPORT_SYMBOL(_read_trylock); | ||
| 165 | #endif | ||
| 166 | |||
| 167 | #ifndef CONFIG_INLINE_WRITE_TRYLOCK | ||
| 168 | int __lockfunc _write_trylock(rwlock_t *lock) | ||
| 169 | { | ||
| 170 | return __write_trylock(lock); | ||
| 171 | } | ||
| 172 | EXPORT_SYMBOL(_write_trylock); | ||
| 173 | #endif | ||
| 174 | |||
| 175 | #ifndef CONFIG_INLINE_READ_LOCK | ||
| 176 | void __lockfunc _read_lock(rwlock_t *lock) | ||
| 177 | { | ||
| 178 | __read_lock(lock); | ||
| 179 | } | ||
| 180 | EXPORT_SYMBOL(_read_lock); | ||
| 181 | #endif | ||
| 182 | |||
| 183 | #ifndef CONFIG_INLINE_SPIN_LOCK_IRQSAVE | ||
| 184 | unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock) | ||
| 185 | { | ||
| 186 | return __spin_lock_irqsave(lock); | ||
| 187 | } | ||
| 188 | EXPORT_SYMBOL(_spin_lock_irqsave); | ||
| 189 | #endif | ||
| 190 | |||
| 191 | #ifndef CONFIG_INLINE_SPIN_LOCK_IRQ | ||
| 192 | void __lockfunc _spin_lock_irq(spinlock_t *lock) | ||
| 193 | { | ||
| 194 | __spin_lock_irq(lock); | ||
| 195 | } | ||
| 196 | EXPORT_SYMBOL(_spin_lock_irq); | ||
| 197 | #endif | ||
| 198 | |||
| 199 | #ifndef CONFIG_INLINE_SPIN_LOCK_BH | ||
| 200 | void __lockfunc _spin_lock_bh(spinlock_t *lock) | ||
| 201 | { | ||
| 202 | __spin_lock_bh(lock); | ||
| 203 | } | ||
| 204 | EXPORT_SYMBOL(_spin_lock_bh); | ||
| 205 | #endif | ||
| 206 | |||
| 207 | #ifndef CONFIG_INLINE_READ_LOCK_IRQSAVE | ||
| 208 | unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock) | ||
| 209 | { | ||
| 210 | return __read_lock_irqsave(lock); | ||
| 211 | } | ||
| 212 | EXPORT_SYMBOL(_read_lock_irqsave); | ||
| 213 | #endif | ||
| 214 | |||
| 215 | #ifndef CONFIG_INLINE_READ_LOCK_IRQ | ||
| 216 | void __lockfunc _read_lock_irq(rwlock_t *lock) | ||
| 217 | { | ||
| 218 | __read_lock_irq(lock); | ||
| 219 | } | ||
| 220 | EXPORT_SYMBOL(_read_lock_irq); | ||
| 221 | #endif | ||
| 222 | |||
| 223 | #ifndef CONFIG_INLINE_READ_LOCK_BH | ||
| 224 | void __lockfunc _read_lock_bh(rwlock_t *lock) | ||
| 225 | { | ||
| 226 | __read_lock_bh(lock); | ||
| 227 | } | ||
| 228 | EXPORT_SYMBOL(_read_lock_bh); | ||
| 229 | #endif | ||
| 230 | |||
| 231 | #ifndef CONFIG_INLINE_WRITE_LOCK_IRQSAVE | ||
| 232 | unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock) | ||
| 233 | { | ||
| 234 | return __write_lock_irqsave(lock); | ||
| 235 | } | ||
| 236 | EXPORT_SYMBOL(_write_lock_irqsave); | ||
| 237 | #endif | ||
| 238 | |||
| 239 | #ifndef CONFIG_INLINE_WRITE_LOCK_IRQ | ||
| 240 | void __lockfunc _write_lock_irq(rwlock_t *lock) | ||
| 241 | { | ||
| 242 | __write_lock_irq(lock); | ||
| 243 | } | ||
| 244 | EXPORT_SYMBOL(_write_lock_irq); | ||
| 245 | #endif | ||
| 246 | |||
| 247 | #ifndef CONFIG_INLINE_WRITE_LOCK_BH | ||
| 248 | void __lockfunc _write_lock_bh(rwlock_t *lock) | ||
| 249 | { | ||
| 250 | __write_lock_bh(lock); | ||
| 251 | } | ||
| 252 | EXPORT_SYMBOL(_write_lock_bh); | ||
| 253 | #endif | ||
| 254 | |||
| 255 | #ifndef CONFIG_INLINE_SPIN_LOCK | ||
| 256 | void __lockfunc _spin_lock(spinlock_t *lock) | ||
| 257 | { | ||
| 258 | __spin_lock(lock); | ||
| 259 | } | ||
| 260 | EXPORT_SYMBOL(_spin_lock); | ||
| 261 | #endif | ||
| 262 | |||
| 263 | #ifndef CONFIG_INLINE_WRITE_LOCK | ||
| 264 | void __lockfunc _write_lock(rwlock_t *lock) | ||
| 265 | { | ||
| 266 | __write_lock(lock); | ||
| 267 | } | ||
| 268 | EXPORT_SYMBOL(_write_lock); | ||
| 269 | #endif | ||
| 270 | |||
| 271 | #ifndef CONFIG_INLINE_SPIN_UNLOCK | ||
| 276 | void __lockfunc _spin_unlock(spinlock_t *lock) | 272 | void __lockfunc _spin_unlock(spinlock_t *lock) |
| 277 | { | 273 | { |
| 278 | __spin_unlock(lock); | 274 | __spin_unlock(lock); |
| @@ -280,7 +276,7 @@ void __lockfunc _spin_unlock(spinlock_t *lock) | |||
| 280 | EXPORT_SYMBOL(_spin_unlock); | 276 | EXPORT_SYMBOL(_spin_unlock); |
| 281 | #endif | 277 | #endif |
| 282 | 278 | ||
| 283 | #ifndef _write_unlock | 279 | #ifndef CONFIG_INLINE_WRITE_UNLOCK |
| 284 | void __lockfunc _write_unlock(rwlock_t *lock) | 280 | void __lockfunc _write_unlock(rwlock_t *lock) |
| 285 | { | 281 | { |
| 286 | __write_unlock(lock); | 282 | __write_unlock(lock); |
| @@ -288,7 +284,7 @@ void __lockfunc _write_unlock(rwlock_t *lock) | |||
| 288 | EXPORT_SYMBOL(_write_unlock); | 284 | EXPORT_SYMBOL(_write_unlock); |
| 289 | #endif | 285 | #endif |
| 290 | 286 | ||
| 291 | #ifndef _read_unlock | 287 | #ifndef CONFIG_INLINE_READ_UNLOCK |
| 292 | void __lockfunc _read_unlock(rwlock_t *lock) | 288 | void __lockfunc _read_unlock(rwlock_t *lock) |
| 293 | { | 289 | { |
| 294 | __read_unlock(lock); | 290 | __read_unlock(lock); |
| @@ -296,7 +292,7 @@ void __lockfunc _read_unlock(rwlock_t *lock) | |||
| 296 | EXPORT_SYMBOL(_read_unlock); | 292 | EXPORT_SYMBOL(_read_unlock); |
| 297 | #endif | 293 | #endif |
| 298 | 294 | ||
| 299 | #ifndef _spin_unlock_irqrestore | 295 | #ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE |
| 300 | void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) | 296 | void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) |
| 301 | { | 297 | { |
| 302 | __spin_unlock_irqrestore(lock, flags); | 298 | __spin_unlock_irqrestore(lock, flags); |
| @@ -304,7 +300,7 @@ void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) | |||
| 304 | EXPORT_SYMBOL(_spin_unlock_irqrestore); | 300 | EXPORT_SYMBOL(_spin_unlock_irqrestore); |
| 305 | #endif | 301 | #endif |
| 306 | 302 | ||
| 307 | #ifndef _spin_unlock_irq | 303 | #ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQ |
| 308 | void __lockfunc _spin_unlock_irq(spinlock_t *lock) | 304 | void __lockfunc _spin_unlock_irq(spinlock_t *lock) |
| 309 | { | 305 | { |
| 310 | __spin_unlock_irq(lock); | 306 | __spin_unlock_irq(lock); |
| @@ -312,7 +308,7 @@ void __lockfunc _spin_unlock_irq(spinlock_t *lock) | |||
| 312 | EXPORT_SYMBOL(_spin_unlock_irq); | 308 | EXPORT_SYMBOL(_spin_unlock_irq); |
| 313 | #endif | 309 | #endif |
| 314 | 310 | ||
| 315 | #ifndef _spin_unlock_bh | 311 | #ifndef CONFIG_INLINE_SPIN_UNLOCK_BH |
| 316 | void __lockfunc _spin_unlock_bh(spinlock_t *lock) | 312 | void __lockfunc _spin_unlock_bh(spinlock_t *lock) |
| 317 | { | 313 | { |
| 318 | __spin_unlock_bh(lock); | 314 | __spin_unlock_bh(lock); |
| @@ -320,7 +316,7 @@ void __lockfunc _spin_unlock_bh(spinlock_t *lock) | |||
| 320 | EXPORT_SYMBOL(_spin_unlock_bh); | 316 | EXPORT_SYMBOL(_spin_unlock_bh); |
| 321 | #endif | 317 | #endif |
| 322 | 318 | ||
| 323 | #ifndef _read_unlock_irqrestore | 319 | #ifndef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE |
| 324 | void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | 320 | void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) |
| 325 | { | 321 | { |
| 326 | __read_unlock_irqrestore(lock, flags); | 322 | __read_unlock_irqrestore(lock, flags); |
| @@ -328,7 +324,7 @@ void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | |||
| 328 | EXPORT_SYMBOL(_read_unlock_irqrestore); | 324 | EXPORT_SYMBOL(_read_unlock_irqrestore); |
| 329 | #endif | 325 | #endif |
| 330 | 326 | ||
| 331 | #ifndef _read_unlock_irq | 327 | #ifndef CONFIG_INLINE_READ_UNLOCK_IRQ |
| 332 | void __lockfunc _read_unlock_irq(rwlock_t *lock) | 328 | void __lockfunc _read_unlock_irq(rwlock_t *lock) |
| 333 | { | 329 | { |
| 334 | __read_unlock_irq(lock); | 330 | __read_unlock_irq(lock); |
| @@ -336,7 +332,7 @@ void __lockfunc _read_unlock_irq(rwlock_t *lock) | |||
| 336 | EXPORT_SYMBOL(_read_unlock_irq); | 332 | EXPORT_SYMBOL(_read_unlock_irq); |
| 337 | #endif | 333 | #endif |
| 338 | 334 | ||
| 339 | #ifndef _read_unlock_bh | 335 | #ifndef CONFIG_INLINE_READ_UNLOCK_BH |
| 340 | void __lockfunc _read_unlock_bh(rwlock_t *lock) | 336 | void __lockfunc _read_unlock_bh(rwlock_t *lock) |
| 341 | { | 337 | { |
| 342 | __read_unlock_bh(lock); | 338 | __read_unlock_bh(lock); |
| @@ -344,7 +340,7 @@ void __lockfunc _read_unlock_bh(rwlock_t *lock) | |||
| 344 | EXPORT_SYMBOL(_read_unlock_bh); | 340 | EXPORT_SYMBOL(_read_unlock_bh); |
| 345 | #endif | 341 | #endif |
| 346 | 342 | ||
| 347 | #ifndef _write_unlock_irqrestore | 343 | #ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE |
| 348 | void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | 344 | void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) |
| 349 | { | 345 | { |
| 350 | __write_unlock_irqrestore(lock, flags); | 346 | __write_unlock_irqrestore(lock, flags); |
| @@ -352,7 +348,7 @@ void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | |||
| 352 | EXPORT_SYMBOL(_write_unlock_irqrestore); | 348 | EXPORT_SYMBOL(_write_unlock_irqrestore); |
| 353 | #endif | 349 | #endif |
| 354 | 350 | ||
| 355 | #ifndef _write_unlock_irq | 351 | #ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQ |
| 356 | void __lockfunc _write_unlock_irq(rwlock_t *lock) | 352 | void __lockfunc _write_unlock_irq(rwlock_t *lock) |
| 357 | { | 353 | { |
| 358 | __write_unlock_irq(lock); | 354 | __write_unlock_irq(lock); |
| @@ -360,7 +356,7 @@ void __lockfunc _write_unlock_irq(rwlock_t *lock) | |||
| 360 | EXPORT_SYMBOL(_write_unlock_irq); | 356 | EXPORT_SYMBOL(_write_unlock_irq); |
| 361 | #endif | 357 | #endif |
| 362 | 358 | ||
| 363 | #ifndef _write_unlock_bh | 359 | #ifndef CONFIG_INLINE_WRITE_UNLOCK_BH |
| 364 | void __lockfunc _write_unlock_bh(rwlock_t *lock) | 360 | void __lockfunc _write_unlock_bh(rwlock_t *lock) |
| 365 | { | 361 | { |
| 366 | __write_unlock_bh(lock); | 362 | __write_unlock_bh(lock); |
| @@ -368,7 +364,7 @@ void __lockfunc _write_unlock_bh(rwlock_t *lock) | |||
| 368 | EXPORT_SYMBOL(_write_unlock_bh); | 364 | EXPORT_SYMBOL(_write_unlock_bh); |
| 369 | #endif | 365 | #endif |
| 370 | 366 | ||
| 371 | #ifndef _spin_trylock_bh | 367 | #ifndef CONFIG_INLINE_SPIN_TRYLOCK_BH |
| 372 | int __lockfunc _spin_trylock_bh(spinlock_t *lock) | 368 | int __lockfunc _spin_trylock_bh(spinlock_t *lock) |
| 373 | { | 369 | { |
| 374 | return __spin_trylock_bh(lock); | 370 | return __spin_trylock_bh(lock); |
