diff options
| -rw-r--r-- | include/linux/rwlock.h | 50 | ||||
| -rw-r--r-- | include/linux/rwlock_api_smp.h | 113 | ||||
| -rw-r--r-- | include/linux/spinlock.h | 37 | ||||
| -rw-r--r-- | include/linux/spinlock_api_smp.h | 79 | ||||
| -rw-r--r-- | include/linux/spinlock_api_up.h | 64 | ||||
| -rw-r--r-- | kernel/spinlock.c | 192 |
6 files changed, 274 insertions, 261 deletions
diff --git a/include/linux/rwlock.h b/include/linux/rwlock.h index bd799bc6d086..71e0b00b6f2c 100644 --- a/include/linux/rwlock.h +++ b/include/linux/rwlock.h | |||
| @@ -38,7 +38,7 @@ do { \ | |||
| 38 | extern int do_raw_write_trylock(rwlock_t *lock); | 38 | extern int do_raw_write_trylock(rwlock_t *lock); |
| 39 | extern void do_raw_write_unlock(rwlock_t *lock); | 39 | extern void do_raw_write_unlock(rwlock_t *lock); |
| 40 | #else | 40 | #else |
| 41 | # define do_raw_read_lock(rwlock) arch_read_lock(&(rwlock)->raw_lock) | 41 | # define do_raw_read_lock(rwlock) arch_read_lock(&(rwlock)->raw_lock) |
| 42 | # define do_raw_read_lock_flags(lock, flags) \ | 42 | # define do_raw_read_lock_flags(lock, flags) \ |
| 43 | arch_read_lock_flags(&(lock)->raw_lock, *(flags)) | 43 | arch_read_lock_flags(&(lock)->raw_lock, *(flags)) |
| 44 | # define do_raw_read_trylock(rwlock) arch_read_trylock(&(rwlock)->raw_lock) | 44 | # define do_raw_read_trylock(rwlock) arch_read_trylock(&(rwlock)->raw_lock) |
| @@ -58,23 +58,23 @@ do { \ | |||
| 58 | * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various | 58 | * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various |
| 59 | * methods are defined as nops in the case they are not required. | 59 | * methods are defined as nops in the case they are not required. |
| 60 | */ | 60 | */ |
| 61 | #define read_trylock(lock) __cond_lock(lock, _read_trylock(lock)) | 61 | #define read_trylock(lock) __cond_lock(lock, _raw_read_trylock(lock)) |
| 62 | #define write_trylock(lock) __cond_lock(lock, _write_trylock(lock)) | 62 | #define write_trylock(lock) __cond_lock(lock, _raw_write_trylock(lock)) |
| 63 | 63 | ||
| 64 | #define write_lock(lock) _write_lock(lock) | 64 | #define write_lock(lock) _raw_write_lock(lock) |
| 65 | #define read_lock(lock) _read_lock(lock) | 65 | #define read_lock(lock) _raw_read_lock(lock) |
| 66 | 66 | ||
| 67 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) | 67 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) |
| 68 | 68 | ||
| 69 | #define read_lock_irqsave(lock, flags) \ | 69 | #define read_lock_irqsave(lock, flags) \ |
| 70 | do { \ | 70 | do { \ |
| 71 | typecheck(unsigned long, flags); \ | 71 | typecheck(unsigned long, flags); \ |
| 72 | flags = _read_lock_irqsave(lock); \ | 72 | flags = _raw_read_lock_irqsave(lock); \ |
| 73 | } while (0) | 73 | } while (0) |
| 74 | #define write_lock_irqsave(lock, flags) \ | 74 | #define write_lock_irqsave(lock, flags) \ |
| 75 | do { \ | 75 | do { \ |
| 76 | typecheck(unsigned long, flags); \ | 76 | typecheck(unsigned long, flags); \ |
| 77 | flags = _write_lock_irqsave(lock); \ | 77 | flags = _raw_write_lock_irqsave(lock); \ |
| 78 | } while (0) | 78 | } while (0) |
| 79 | 79 | ||
| 80 | #else | 80 | #else |
| @@ -82,38 +82,38 @@ do { \ | |||
| 82 | #define read_lock_irqsave(lock, flags) \ | 82 | #define read_lock_irqsave(lock, flags) \ |
| 83 | do { \ | 83 | do { \ |
| 84 | typecheck(unsigned long, flags); \ | 84 | typecheck(unsigned long, flags); \ |
| 85 | _read_lock_irqsave(lock, flags); \ | 85 | _raw_read_lock_irqsave(lock, flags); \ |
| 86 | } while (0) | 86 | } while (0) |
| 87 | #define write_lock_irqsave(lock, flags) \ | 87 | #define write_lock_irqsave(lock, flags) \ |
| 88 | do { \ | 88 | do { \ |
| 89 | typecheck(unsigned long, flags); \ | 89 | typecheck(unsigned long, flags); \ |
| 90 | _write_lock_irqsave(lock, flags); \ | 90 | _raw_write_lock_irqsave(lock, flags); \ |
| 91 | } while (0) | 91 | } while (0) |
| 92 | 92 | ||
| 93 | #endif | 93 | #endif |
| 94 | 94 | ||
| 95 | #define read_lock_irq(lock) _read_lock_irq(lock) | 95 | #define read_lock_irq(lock) _raw_read_lock_irq(lock) |
| 96 | #define read_lock_bh(lock) _read_lock_bh(lock) | 96 | #define read_lock_bh(lock) _raw_read_lock_bh(lock) |
| 97 | #define write_lock_irq(lock) _write_lock_irq(lock) | 97 | #define write_lock_irq(lock) _raw_write_lock_irq(lock) |
| 98 | #define write_lock_bh(lock) _write_lock_bh(lock) | 98 | #define write_lock_bh(lock) _raw_write_lock_bh(lock) |
| 99 | #define read_unlock(lock) _read_unlock(lock) | 99 | #define read_unlock(lock) _raw_read_unlock(lock) |
| 100 | #define write_unlock(lock) _write_unlock(lock) | 100 | #define write_unlock(lock) _raw_write_unlock(lock) |
| 101 | #define read_unlock_irq(lock) _read_unlock_irq(lock) | 101 | #define read_unlock_irq(lock) _raw_read_unlock_irq(lock) |
| 102 | #define write_unlock_irq(lock) _write_unlock_irq(lock) | 102 | #define write_unlock_irq(lock) _raw_write_unlock_irq(lock) |
| 103 | 103 | ||
| 104 | #define read_unlock_irqrestore(lock, flags) \ | 104 | #define read_unlock_irqrestore(lock, flags) \ |
| 105 | do { \ | 105 | do { \ |
| 106 | typecheck(unsigned long, flags); \ | 106 | typecheck(unsigned long, flags); \ |
| 107 | _read_unlock_irqrestore(lock, flags); \ | 107 | _raw_read_unlock_irqrestore(lock, flags); \ |
| 108 | } while (0) | 108 | } while (0) |
| 109 | #define read_unlock_bh(lock) _read_unlock_bh(lock) | 109 | #define read_unlock_bh(lock) _raw_read_unlock_bh(lock) |
| 110 | 110 | ||
| 111 | #define write_unlock_irqrestore(lock, flags) \ | 111 | #define write_unlock_irqrestore(lock, flags) \ |
| 112 | do { \ | 112 | do { \ |
| 113 | typecheck(unsigned long, flags); \ | 113 | typecheck(unsigned long, flags); \ |
| 114 | _write_unlock_irqrestore(lock, flags); \ | 114 | _raw_write_unlock_irqrestore(lock, flags); \ |
| 115 | } while (0) | 115 | } while (0) |
| 116 | #define write_unlock_bh(lock) _write_unlock_bh(lock) | 116 | #define write_unlock_bh(lock) _raw_write_unlock_bh(lock) |
| 117 | 117 | ||
| 118 | #define write_trylock_irqsave(lock, flags) \ | 118 | #define write_trylock_irqsave(lock, flags) \ |
| 119 | ({ \ | 119 | ({ \ |
diff --git a/include/linux/rwlock_api_smp.h b/include/linux/rwlock_api_smp.h index b3ba5ae6a8c4..9c9f0495d37c 100644 --- a/include/linux/rwlock_api_smp.h +++ b/include/linux/rwlock_api_smp.h | |||
| @@ -15,102 +15,106 @@ | |||
| 15 | * Released under the General Public License (GPL). | 15 | * Released under the General Public License (GPL). |
| 16 | */ | 16 | */ |
| 17 | 17 | ||
| 18 | void __lockfunc _read_lock(rwlock_t *lock) __acquires(lock); | 18 | void __lockfunc _raw_read_lock(rwlock_t *lock) __acquires(lock); |
| 19 | void __lockfunc _write_lock(rwlock_t *lock) __acquires(lock); | 19 | void __lockfunc _raw_write_lock(rwlock_t *lock) __acquires(lock); |
| 20 | void __lockfunc _read_lock_bh(rwlock_t *lock) __acquires(lock); | 20 | void __lockfunc _raw_read_lock_bh(rwlock_t *lock) __acquires(lock); |
| 21 | void __lockfunc _write_lock_bh(rwlock_t *lock) __acquires(lock); | 21 | void __lockfunc _raw_write_lock_bh(rwlock_t *lock) __acquires(lock); |
| 22 | void __lockfunc _read_lock_irq(rwlock_t *lock) __acquires(lock); | 22 | void __lockfunc _raw_read_lock_irq(rwlock_t *lock) __acquires(lock); |
| 23 | void __lockfunc _write_lock_irq(rwlock_t *lock) __acquires(lock); | 23 | void __lockfunc _raw_write_lock_irq(rwlock_t *lock) __acquires(lock); |
| 24 | unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock) | 24 | unsigned long __lockfunc _raw_read_lock_irqsave(rwlock_t *lock) |
| 25 | __acquires(lock); | 25 | __acquires(lock); |
| 26 | unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock) | 26 | unsigned long __lockfunc _raw_write_lock_irqsave(rwlock_t *lock) |
| 27 | __acquires(lock); | 27 | __acquires(lock); |
| 28 | int __lockfunc _read_trylock(rwlock_t *lock); | 28 | int __lockfunc _raw_read_trylock(rwlock_t *lock); |
| 29 | int __lockfunc _write_trylock(rwlock_t *lock); | 29 | int __lockfunc _raw_write_trylock(rwlock_t *lock); |
| 30 | void __lockfunc _read_unlock(rwlock_t *lock) __releases(lock); | 30 | void __lockfunc _raw_read_unlock(rwlock_t *lock) __releases(lock); |
| 31 | void __lockfunc _write_unlock(rwlock_t *lock) __releases(lock); | 31 | void __lockfunc _raw_write_unlock(rwlock_t *lock) __releases(lock); |
| 32 | void __lockfunc _read_unlock_bh(rwlock_t *lock) __releases(lock); | 32 | void __lockfunc _raw_read_unlock_bh(rwlock_t *lock) __releases(lock); |
| 33 | void __lockfunc _write_unlock_bh(rwlock_t *lock) __releases(lock); | 33 | void __lockfunc _raw_write_unlock_bh(rwlock_t *lock) __releases(lock); |
| 34 | void __lockfunc _read_unlock_irq(rwlock_t *lock) __releases(lock); | 34 | void __lockfunc _raw_read_unlock_irq(rwlock_t *lock) __releases(lock); |
| 35 | void __lockfunc _write_unlock_irq(rwlock_t *lock) __releases(lock); | 35 | void __lockfunc _raw_write_unlock_irq(rwlock_t *lock) __releases(lock); |
| 36 | void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | 36 | void __lockfunc |
| 37 | _raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | ||
| 37 | __releases(lock); | 38 | __releases(lock); |
| 38 | void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | 39 | void __lockfunc |
| 40 | _raw_write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | ||
| 39 | __releases(lock); | 41 | __releases(lock); |
| 40 | 42 | ||
| 41 | #ifdef CONFIG_INLINE_READ_LOCK | 43 | #ifdef CONFIG_INLINE_READ_LOCK |
| 42 | #define _read_lock(lock) __read_lock(lock) | 44 | #define _raw_read_lock(lock) __raw_read_lock(lock) |
| 43 | #endif | 45 | #endif |
| 44 | 46 | ||
| 45 | #ifdef CONFIG_INLINE_WRITE_LOCK | 47 | #ifdef CONFIG_INLINE_WRITE_LOCK |
| 46 | #define _write_lock(lock) __write_lock(lock) | 48 | #define _raw_write_lock(lock) __raw_write_lock(lock) |
| 47 | #endif | 49 | #endif |
| 48 | 50 | ||
| 49 | #ifdef CONFIG_INLINE_READ_LOCK_BH | 51 | #ifdef CONFIG_INLINE_READ_LOCK_BH |
| 50 | #define _read_lock_bh(lock) __read_lock_bh(lock) | 52 | #define _raw_read_lock_bh(lock) __raw_read_lock_bh(lock) |
| 51 | #endif | 53 | #endif |
| 52 | 54 | ||
| 53 | #ifdef CONFIG_INLINE_WRITE_LOCK_BH | 55 | #ifdef CONFIG_INLINE_WRITE_LOCK_BH |
| 54 | #define _write_lock_bh(lock) __write_lock_bh(lock) | 56 | #define _raw_write_lock_bh(lock) __raw_write_lock_bh(lock) |
| 55 | #endif | 57 | #endif |
| 56 | 58 | ||
| 57 | #ifdef CONFIG_INLINE_READ_LOCK_IRQ | 59 | #ifdef CONFIG_INLINE_READ_LOCK_IRQ |
| 58 | #define _read_lock_irq(lock) __read_lock_irq(lock) | 60 | #define _raw_read_lock_irq(lock) __raw_read_lock_irq(lock) |
| 59 | #endif | 61 | #endif |
| 60 | 62 | ||
| 61 | #ifdef CONFIG_INLINE_WRITE_LOCK_IRQ | 63 | #ifdef CONFIG_INLINE_WRITE_LOCK_IRQ |
| 62 | #define _write_lock_irq(lock) __write_lock_irq(lock) | 64 | #define _raw_write_lock_irq(lock) __raw_write_lock_irq(lock) |
| 63 | #endif | 65 | #endif |
| 64 | 66 | ||
| 65 | #ifdef CONFIG_INLINE_READ_LOCK_IRQSAVE | 67 | #ifdef CONFIG_INLINE_READ_LOCK_IRQSAVE |
| 66 | #define _read_lock_irqsave(lock) __read_lock_irqsave(lock) | 68 | #define _raw_read_lock_irqsave(lock) __raw_read_lock_irqsave(lock) |
| 67 | #endif | 69 | #endif |
| 68 | 70 | ||
| 69 | #ifdef CONFIG_INLINE_WRITE_LOCK_IRQSAVE | 71 | #ifdef CONFIG_INLINE_WRITE_LOCK_IRQSAVE |
| 70 | #define _write_lock_irqsave(lock) __write_lock_irqsave(lock) | 72 | #define _raw_write_lock_irqsave(lock) __raw_write_lock_irqsave(lock) |
| 71 | #endif | 73 | #endif |
| 72 | 74 | ||
| 73 | #ifdef CONFIG_INLINE_READ_TRYLOCK | 75 | #ifdef CONFIG_INLINE_READ_TRYLOCK |
| 74 | #define _read_trylock(lock) __read_trylock(lock) | 76 | #define _raw_read_trylock(lock) __raw_read_trylock(lock) |
| 75 | #endif | 77 | #endif |
| 76 | 78 | ||
| 77 | #ifdef CONFIG_INLINE_WRITE_TRYLOCK | 79 | #ifdef CONFIG_INLINE_WRITE_TRYLOCK |
| 78 | #define _write_trylock(lock) __write_trylock(lock) | 80 | #define _raw_write_trylock(lock) __raw_write_trylock(lock) |
| 79 | #endif | 81 | #endif |
| 80 | 82 | ||
| 81 | #ifdef CONFIG_INLINE_READ_UNLOCK | 83 | #ifdef CONFIG_INLINE_READ_UNLOCK |
| 82 | #define _read_unlock(lock) __read_unlock(lock) | 84 | #define _raw_read_unlock(lock) __raw_read_unlock(lock) |
| 83 | #endif | 85 | #endif |
| 84 | 86 | ||
| 85 | #ifdef CONFIG_INLINE_WRITE_UNLOCK | 87 | #ifdef CONFIG_INLINE_WRITE_UNLOCK |
| 86 | #define _write_unlock(lock) __write_unlock(lock) | 88 | #define _raw_write_unlock(lock) __raw_write_unlock(lock) |
| 87 | #endif | 89 | #endif |
| 88 | 90 | ||
| 89 | #ifdef CONFIG_INLINE_READ_UNLOCK_BH | 91 | #ifdef CONFIG_INLINE_READ_UNLOCK_BH |
| 90 | #define _read_unlock_bh(lock) __read_unlock_bh(lock) | 92 | #define _raw_read_unlock_bh(lock) __raw_read_unlock_bh(lock) |
| 91 | #endif | 93 | #endif |
| 92 | 94 | ||
| 93 | #ifdef CONFIG_INLINE_WRITE_UNLOCK_BH | 95 | #ifdef CONFIG_INLINE_WRITE_UNLOCK_BH |
| 94 | #define _write_unlock_bh(lock) __write_unlock_bh(lock) | 96 | #define _raw_write_unlock_bh(lock) __raw_write_unlock_bh(lock) |
| 95 | #endif | 97 | #endif |
| 96 | 98 | ||
| 97 | #ifdef CONFIG_INLINE_READ_UNLOCK_IRQ | 99 | #ifdef CONFIG_INLINE_READ_UNLOCK_IRQ |
| 98 | #define _read_unlock_irq(lock) __read_unlock_irq(lock) | 100 | #define _raw_read_unlock_irq(lock) __raw_read_unlock_irq(lock) |
| 99 | #endif | 101 | #endif |
| 100 | 102 | ||
| 101 | #ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQ | 103 | #ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQ |
| 102 | #define _write_unlock_irq(lock) __write_unlock_irq(lock) | 104 | #define _raw_write_unlock_irq(lock) __raw_write_unlock_irq(lock) |
| 103 | #endif | 105 | #endif |
| 104 | 106 | ||
| 105 | #ifdef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE | 107 | #ifdef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE |
| 106 | #define _read_unlock_irqrestore(lock, flags) __read_unlock_irqrestore(lock, flags) | 108 | #define _raw_read_unlock_irqrestore(lock, flags) \ |
| 109 | __raw_read_unlock_irqrestore(lock, flags) | ||
| 107 | #endif | 110 | #endif |
| 108 | 111 | ||
| 109 | #ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE | 112 | #ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE |
| 110 | #define _write_unlock_irqrestore(lock, flags) __write_unlock_irqrestore(lock, flags) | 113 | #define _raw_write_unlock_irqrestore(lock, flags) \ |
| 114 | __raw_write_unlock_irqrestore(lock, flags) | ||
| 111 | #endif | 115 | #endif |
| 112 | 116 | ||
| 113 | static inline int __read_trylock(rwlock_t *lock) | 117 | static inline int __raw_read_trylock(rwlock_t *lock) |
| 114 | { | 118 | { |
| 115 | preempt_disable(); | 119 | preempt_disable(); |
| 116 | if (do_raw_read_trylock(lock)) { | 120 | if (do_raw_read_trylock(lock)) { |
| @@ -121,7 +125,7 @@ static inline int __read_trylock(rwlock_t *lock) | |||
| 121 | return 0; | 125 | return 0; |
| 122 | } | 126 | } |
| 123 | 127 | ||
| 124 | static inline int __write_trylock(rwlock_t *lock) | 128 | static inline int __raw_write_trylock(rwlock_t *lock) |
| 125 | { | 129 | { |
| 126 | preempt_disable(); | 130 | preempt_disable(); |
| 127 | if (do_raw_write_trylock(lock)) { | 131 | if (do_raw_write_trylock(lock)) { |
| @@ -139,14 +143,14 @@ static inline int __write_trylock(rwlock_t *lock) | |||
| 139 | */ | 143 | */ |
| 140 | #if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC) | 144 | #if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC) |
| 141 | 145 | ||
| 142 | static inline void __read_lock(rwlock_t *lock) | 146 | static inline void __raw_read_lock(rwlock_t *lock) |
| 143 | { | 147 | { |
| 144 | preempt_disable(); | 148 | preempt_disable(); |
| 145 | rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); | 149 | rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); |
| 146 | LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock); | 150 | LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock); |
| 147 | } | 151 | } |
| 148 | 152 | ||
| 149 | static inline unsigned long __read_lock_irqsave(rwlock_t *lock) | 153 | static inline unsigned long __raw_read_lock_irqsave(rwlock_t *lock) |
| 150 | { | 154 | { |
| 151 | unsigned long flags; | 155 | unsigned long flags; |
| 152 | 156 | ||
| @@ -158,7 +162,7 @@ static inline unsigned long __read_lock_irqsave(rwlock_t *lock) | |||
| 158 | return flags; | 162 | return flags; |
| 159 | } | 163 | } |
| 160 | 164 | ||
| 161 | static inline void __read_lock_irq(rwlock_t *lock) | 165 | static inline void __raw_read_lock_irq(rwlock_t *lock) |
| 162 | { | 166 | { |
| 163 | local_irq_disable(); | 167 | local_irq_disable(); |
| 164 | preempt_disable(); | 168 | preempt_disable(); |
| @@ -166,7 +170,7 @@ static inline void __read_lock_irq(rwlock_t *lock) | |||
| 166 | LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock); | 170 | LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock); |
| 167 | } | 171 | } |
| 168 | 172 | ||
| 169 | static inline void __read_lock_bh(rwlock_t *lock) | 173 | static inline void __raw_read_lock_bh(rwlock_t *lock) |
| 170 | { | 174 | { |
| 171 | local_bh_disable(); | 175 | local_bh_disable(); |
| 172 | preempt_disable(); | 176 | preempt_disable(); |
| @@ -174,7 +178,7 @@ static inline void __read_lock_bh(rwlock_t *lock) | |||
| 174 | LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock); | 178 | LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock); |
| 175 | } | 179 | } |
| 176 | 180 | ||
| 177 | static inline unsigned long __write_lock_irqsave(rwlock_t *lock) | 181 | static inline unsigned long __raw_write_lock_irqsave(rwlock_t *lock) |
| 178 | { | 182 | { |
| 179 | unsigned long flags; | 183 | unsigned long flags; |
| 180 | 184 | ||
| @@ -186,7 +190,7 @@ static inline unsigned long __write_lock_irqsave(rwlock_t *lock) | |||
| 186 | return flags; | 190 | return flags; |
| 187 | } | 191 | } |
| 188 | 192 | ||
| 189 | static inline void __write_lock_irq(rwlock_t *lock) | 193 | static inline void __raw_write_lock_irq(rwlock_t *lock) |
| 190 | { | 194 | { |
| 191 | local_irq_disable(); | 195 | local_irq_disable(); |
| 192 | preempt_disable(); | 196 | preempt_disable(); |
| @@ -194,7 +198,7 @@ static inline void __write_lock_irq(rwlock_t *lock) | |||
| 194 | LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock); | 198 | LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock); |
| 195 | } | 199 | } |
| 196 | 200 | ||
| 197 | static inline void __write_lock_bh(rwlock_t *lock) | 201 | static inline void __raw_write_lock_bh(rwlock_t *lock) |
| 198 | { | 202 | { |
| 199 | local_bh_disable(); | 203 | local_bh_disable(); |
| 200 | preempt_disable(); | 204 | preempt_disable(); |
| @@ -202,7 +206,7 @@ static inline void __write_lock_bh(rwlock_t *lock) | |||
| 202 | LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock); | 206 | LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock); |
| 203 | } | 207 | } |
| 204 | 208 | ||
| 205 | static inline void __write_lock(rwlock_t *lock) | 209 | static inline void __raw_write_lock(rwlock_t *lock) |
| 206 | { | 210 | { |
| 207 | preempt_disable(); | 211 | preempt_disable(); |
| 208 | rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); | 212 | rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); |
| @@ -211,21 +215,22 @@ static inline void __write_lock(rwlock_t *lock) | |||
| 211 | 215 | ||
| 212 | #endif /* CONFIG_PREEMPT */ | 216 | #endif /* CONFIG_PREEMPT */ |
| 213 | 217 | ||
| 214 | static inline void __write_unlock(rwlock_t *lock) | 218 | static inline void __raw_write_unlock(rwlock_t *lock) |
| 215 | { | 219 | { |
| 216 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | 220 | rwlock_release(&lock->dep_map, 1, _RET_IP_); |
| 217 | do_raw_write_unlock(lock); | 221 | do_raw_write_unlock(lock); |
| 218 | preempt_enable(); | 222 | preempt_enable(); |
| 219 | } | 223 | } |
| 220 | 224 | ||
| 221 | static inline void __read_unlock(rwlock_t *lock) | 225 | static inline void __raw_read_unlock(rwlock_t *lock) |
| 222 | { | 226 | { |
| 223 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | 227 | rwlock_release(&lock->dep_map, 1, _RET_IP_); |
| 224 | do_raw_read_unlock(lock); | 228 | do_raw_read_unlock(lock); |
| 225 | preempt_enable(); | 229 | preempt_enable(); |
| 226 | } | 230 | } |
| 227 | 231 | ||
| 228 | static inline void __read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | 232 | static inline void |
| 233 | __raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | ||
| 229 | { | 234 | { |
| 230 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | 235 | rwlock_release(&lock->dep_map, 1, _RET_IP_); |
| 231 | do_raw_read_unlock(lock); | 236 | do_raw_read_unlock(lock); |
| @@ -233,7 +238,7 @@ static inline void __read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | |||
| 233 | preempt_enable(); | 238 | preempt_enable(); |
| 234 | } | 239 | } |
| 235 | 240 | ||
| 236 | static inline void __read_unlock_irq(rwlock_t *lock) | 241 | static inline void __raw_read_unlock_irq(rwlock_t *lock) |
| 237 | { | 242 | { |
| 238 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | 243 | rwlock_release(&lock->dep_map, 1, _RET_IP_); |
| 239 | do_raw_read_unlock(lock); | 244 | do_raw_read_unlock(lock); |
| @@ -241,7 +246,7 @@ static inline void __read_unlock_irq(rwlock_t *lock) | |||
| 241 | preempt_enable(); | 246 | preempt_enable(); |
| 242 | } | 247 | } |
| 243 | 248 | ||
| 244 | static inline void __read_unlock_bh(rwlock_t *lock) | 249 | static inline void __raw_read_unlock_bh(rwlock_t *lock) |
| 245 | { | 250 | { |
| 246 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | 251 | rwlock_release(&lock->dep_map, 1, _RET_IP_); |
| 247 | do_raw_read_unlock(lock); | 252 | do_raw_read_unlock(lock); |
| @@ -249,7 +254,7 @@ static inline void __read_unlock_bh(rwlock_t *lock) | |||
| 249 | local_bh_enable_ip((unsigned long)__builtin_return_address(0)); | 254 | local_bh_enable_ip((unsigned long)__builtin_return_address(0)); |
| 250 | } | 255 | } |
| 251 | 256 | ||
| 252 | static inline void __write_unlock_irqrestore(rwlock_t *lock, | 257 | static inline void __raw_write_unlock_irqrestore(rwlock_t *lock, |
| 253 | unsigned long flags) | 258 | unsigned long flags) |
| 254 | { | 259 | { |
| 255 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | 260 | rwlock_release(&lock->dep_map, 1, _RET_IP_); |
| @@ -258,7 +263,7 @@ static inline void __write_unlock_irqrestore(rwlock_t *lock, | |||
| 258 | preempt_enable(); | 263 | preempt_enable(); |
| 259 | } | 264 | } |
| 260 | 265 | ||
| 261 | static inline void __write_unlock_irq(rwlock_t *lock) | 266 | static inline void __raw_write_unlock_irq(rwlock_t *lock) |
| 262 | { | 267 | { |
| 263 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | 268 | rwlock_release(&lock->dep_map, 1, _RET_IP_); |
| 264 | do_raw_write_unlock(lock); | 269 | do_raw_write_unlock(lock); |
| @@ -266,7 +271,7 @@ static inline void __write_unlock_irq(rwlock_t *lock) | |||
| 266 | preempt_enable(); | 271 | preempt_enable(); |
| 267 | } | 272 | } |
| 268 | 273 | ||
| 269 | static inline void __write_unlock_bh(rwlock_t *lock) | 274 | static inline void __raw_write_unlock_bh(rwlock_t *lock) |
| 270 | { | 275 | { |
| 271 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | 276 | rwlock_release(&lock->dep_map, 1, _RET_IP_); |
| 272 | do_raw_write_unlock(lock); | 277 | do_raw_write_unlock(lock); |
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index 0cbc58acf689..86088213334a 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h | |||
| @@ -161,20 +161,22 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) | |||
| 161 | * various methods are defined as nops in the case they are not | 161 | * various methods are defined as nops in the case they are not |
| 162 | * required. | 162 | * required. |
| 163 | */ | 163 | */ |
| 164 | #define raw_spin_trylock(lock) __cond_lock(lock, _spin_trylock(lock)) | 164 | #define raw_spin_trylock(lock) __cond_lock(lock, _raw_spin_trylock(lock)) |
| 165 | 165 | ||
| 166 | #define raw_spin_lock(lock) _spin_lock(lock) | 166 | #define raw_spin_lock(lock) _raw_spin_lock(lock) |
| 167 | 167 | ||
| 168 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 168 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
| 169 | # define raw_spin_lock_nested(lock, subclass) _spin_lock_nested(lock, subclass) | 169 | # define raw_spin_lock_nested(lock, subclass) \ |
| 170 | _raw_spin_lock_nested(lock, subclass) | ||
| 171 | |||
| 170 | # define raw_spin_lock_nest_lock(lock, nest_lock) \ | 172 | # define raw_spin_lock_nest_lock(lock, nest_lock) \ |
| 171 | do { \ | 173 | do { \ |
| 172 | typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\ | 174 | typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\ |
| 173 | _spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \ | 175 | _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \ |
| 174 | } while (0) | 176 | } while (0) |
| 175 | #else | 177 | #else |
| 176 | # define raw_spin_lock_nested(lock, subclass) _spin_lock(lock) | 178 | # define raw_spin_lock_nested(lock, subclass) _raw_spin_lock(lock) |
| 177 | # define raw_spin_lock_nest_lock(lock, nest_lock) _spin_lock(lock) | 179 | # define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock) |
| 178 | #endif | 180 | #endif |
| 179 | 181 | ||
| 180 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) | 182 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) |
| @@ -182,20 +184,20 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) | |||
| 182 | #define raw_spin_lock_irqsave(lock, flags) \ | 184 | #define raw_spin_lock_irqsave(lock, flags) \ |
| 183 | do { \ | 185 | do { \ |
| 184 | typecheck(unsigned long, flags); \ | 186 | typecheck(unsigned long, flags); \ |
| 185 | flags = _spin_lock_irqsave(lock); \ | 187 | flags = _raw_spin_lock_irqsave(lock); \ |
| 186 | } while (0) | 188 | } while (0) |
| 187 | 189 | ||
| 188 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 190 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
| 189 | #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ | 191 | #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ |
| 190 | do { \ | 192 | do { \ |
| 191 | typecheck(unsigned long, flags); \ | 193 | typecheck(unsigned long, flags); \ |
| 192 | flags = _spin_lock_irqsave_nested(lock, subclass); \ | 194 | flags = _raw_spin_lock_irqsave_nested(lock, subclass); \ |
| 193 | } while (0) | 195 | } while (0) |
| 194 | #else | 196 | #else |
| 195 | #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ | 197 | #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ |
| 196 | do { \ | 198 | do { \ |
| 197 | typecheck(unsigned long, flags); \ | 199 | typecheck(unsigned long, flags); \ |
| 198 | flags = _spin_lock_irqsave(lock); \ | 200 | flags = _raw_spin_lock_irqsave(lock); \ |
| 199 | } while (0) | 201 | } while (0) |
| 200 | #endif | 202 | #endif |
| 201 | 203 | ||
| @@ -204,7 +206,7 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) | |||
| 204 | #define raw_spin_lock_irqsave(lock, flags) \ | 206 | #define raw_spin_lock_irqsave(lock, flags) \ |
| 205 | do { \ | 207 | do { \ |
| 206 | typecheck(unsigned long, flags); \ | 208 | typecheck(unsigned long, flags); \ |
| 207 | _spin_lock_irqsave(lock, flags); \ | 209 | _raw_spin_lock_irqsave(lock, flags); \ |
| 208 | } while (0) | 210 | } while (0) |
| 209 | 211 | ||
| 210 | #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ | 212 | #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ |
| @@ -212,19 +214,20 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) | |||
| 212 | 214 | ||
| 213 | #endif | 215 | #endif |
| 214 | 216 | ||
| 215 | #define raw_spin_lock_irq(lock) _spin_lock_irq(lock) | 217 | #define raw_spin_lock_irq(lock) _raw_spin_lock_irq(lock) |
| 216 | #define raw_spin_lock_bh(lock) _spin_lock_bh(lock) | 218 | #define raw_spin_lock_bh(lock) _raw_spin_lock_bh(lock) |
| 217 | #define raw_spin_unlock(lock) _spin_unlock(lock) | 219 | #define raw_spin_unlock(lock) _raw_spin_unlock(lock) |
| 218 | #define raw_spin_unlock_irq(lock) _spin_unlock_irq(lock) | 220 | #define raw_spin_unlock_irq(lock) _raw_spin_unlock_irq(lock) |
| 219 | 221 | ||
| 220 | #define raw_spin_unlock_irqrestore(lock, flags) \ | 222 | #define raw_spin_unlock_irqrestore(lock, flags) \ |
| 221 | do { \ | 223 | do { \ |
| 222 | typecheck(unsigned long, flags); \ | 224 | typecheck(unsigned long, flags); \ |
| 223 | _spin_unlock_irqrestore(lock, flags); \ | 225 | _raw_spin_unlock_irqrestore(lock, flags); \ |
| 224 | } while (0) | 226 | } while (0) |
| 225 | #define raw_spin_unlock_bh(lock) _spin_unlock_bh(lock) | 227 | #define raw_spin_unlock_bh(lock) _raw_spin_unlock_bh(lock) |
| 226 | 228 | ||
| 227 | #define raw_spin_trylock_bh(lock) __cond_lock(lock, _spin_trylock_bh(lock)) | 229 | #define raw_spin_trylock_bh(lock) \ |
| 230 | __cond_lock(lock, _raw_spin_trylock_bh(lock)) | ||
| 228 | 231 | ||
| 229 | #define raw_spin_trylock_irq(lock) \ | 232 | #define raw_spin_trylock_irq(lock) \ |
| 230 | ({ \ | 233 | ({ \ |
diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h index 1be1fc57fc4b..e253ccd7a604 100644 --- a/include/linux/spinlock_api_smp.h +++ b/include/linux/spinlock_api_smp.h | |||
| @@ -19,70 +19,71 @@ int in_lock_functions(unsigned long addr); | |||
| 19 | 19 | ||
| 20 | #define assert_raw_spin_locked(x) BUG_ON(!raw_spin_is_locked(x)) | 20 | #define assert_raw_spin_locked(x) BUG_ON(!raw_spin_is_locked(x)) |
| 21 | 21 | ||
| 22 | void __lockfunc _spin_lock(raw_spinlock_t *lock) __acquires(lock); | 22 | void __lockfunc _raw_spin_lock(raw_spinlock_t *lock) __acquires(lock); |
| 23 | void __lockfunc _spin_lock_nested(raw_spinlock_t *lock, int subclass) | 23 | void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass) |
| 24 | __acquires(lock); | 24 | __acquires(lock); |
| 25 | void __lockfunc | 25 | void __lockfunc |
| 26 | _spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *map) | 26 | _raw_spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *map) |
| 27 | __acquires(lock); | 27 | __acquires(lock); |
| 28 | void __lockfunc _spin_lock_bh(raw_spinlock_t *lock) __acquires(lock); | 28 | void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock) __acquires(lock); |
| 29 | void __lockfunc _spin_lock_irq(raw_spinlock_t *lock) __acquires(lock); | 29 | void __lockfunc _raw_spin_lock_irq(raw_spinlock_t *lock) |
| 30 | 30 | __acquires(lock); | |
| 31 | unsigned long __lockfunc _spin_lock_irqsave(raw_spinlock_t *lock) | 31 | |
| 32 | __acquires(lock); | 32 | unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t *lock) |
| 33 | __acquires(lock); | ||
| 33 | unsigned long __lockfunc | 34 | unsigned long __lockfunc |
| 34 | _spin_lock_irqsave_nested(raw_spinlock_t *lock, int subclass) | 35 | _raw_spin_lock_irqsave_nested(raw_spinlock_t *lock, int subclass) |
| 35 | __acquires(lock); | 36 | __acquires(lock); |
| 36 | int __lockfunc _spin_trylock(raw_spinlock_t *lock); | 37 | int __lockfunc _raw_spin_trylock(raw_spinlock_t *lock); |
| 37 | int __lockfunc _spin_trylock_bh(raw_spinlock_t *lock); | 38 | int __lockfunc _raw_spin_trylock_bh(raw_spinlock_t *lock); |
| 38 | void __lockfunc _spin_unlock(raw_spinlock_t *lock) __releases(lock); | 39 | void __lockfunc _raw_spin_unlock(raw_spinlock_t *lock) __releases(lock); |
| 39 | void __lockfunc _spin_unlock_bh(raw_spinlock_t *lock) __releases(lock); | 40 | void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock) __releases(lock); |
| 40 | void __lockfunc _spin_unlock_irq(raw_spinlock_t *lock) __releases(lock); | 41 | void __lockfunc _raw_spin_unlock_irq(raw_spinlock_t *lock) __releases(lock); |
| 41 | void __lockfunc | 42 | void __lockfunc |
| 42 | _spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags) | 43 | _raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags) |
| 43 | __releases(lock); | 44 | __releases(lock); |
| 44 | 45 | ||
| 45 | #ifdef CONFIG_INLINE_SPIN_LOCK | 46 | #ifdef CONFIG_INLINE_SPIN_LOCK |
| 46 | #define _spin_lock(lock) __spin_lock(lock) | 47 | #define _raw_spin_lock(lock) __raw_spin_lock(lock) |
| 47 | #endif | 48 | #endif |
| 48 | 49 | ||
| 49 | #ifdef CONFIG_INLINE_SPIN_LOCK_BH | 50 | #ifdef CONFIG_INLINE_SPIN_LOCK_BH |
| 50 | #define _spin_lock_bh(lock) __spin_lock_bh(lock) | 51 | #define _raw_spin_lock_bh(lock) __raw_spin_lock_bh(lock) |
| 51 | #endif | 52 | #endif |
| 52 | 53 | ||
| 53 | #ifdef CONFIG_INLINE_SPIN_LOCK_IRQ | 54 | #ifdef CONFIG_INLINE_SPIN_LOCK_IRQ |
| 54 | #define _spin_lock_irq(lock) __spin_lock_irq(lock) | 55 | #define _raw_spin_lock_irq(lock) __raw_spin_lock_irq(lock) |
| 55 | #endif | 56 | #endif |
| 56 | 57 | ||
| 57 | #ifdef CONFIG_INLINE_SPIN_LOCK_IRQSAVE | 58 | #ifdef CONFIG_INLINE_SPIN_LOCK_IRQSAVE |
| 58 | #define _spin_lock_irqsave(lock) __spin_lock_irqsave(lock) | 59 | #define _raw_spin_lock_irqsave(lock) __raw_spin_lock_irqsave(lock) |
| 59 | #endif | 60 | #endif |
| 60 | 61 | ||
| 61 | #ifdef CONFIG_INLINE_SPIN_TRYLOCK | 62 | #ifdef CONFIG_INLINE_SPIN_TRYLOCK |
| 62 | #define _spin_trylock(lock) __spin_trylock(lock) | 63 | #define _raw_spin_trylock(lock) __raw_spin_trylock(lock) |
| 63 | #endif | 64 | #endif |
| 64 | 65 | ||
| 65 | #ifdef CONFIG_INLINE_SPIN_TRYLOCK_BH | 66 | #ifdef CONFIG_INLINE_SPIN_TRYLOCK_BH |
| 66 | #define _spin_trylock_bh(lock) __spin_trylock_bh(lock) | 67 | #define _raw_spin_trylock_bh(lock) __raw_spin_trylock_bh(lock) |
| 67 | #endif | 68 | #endif |
| 68 | 69 | ||
| 69 | #ifdef CONFIG_INLINE_SPIN_UNLOCK | 70 | #ifdef CONFIG_INLINE_SPIN_UNLOCK |
| 70 | #define _spin_unlock(lock) __spin_unlock(lock) | 71 | #define _raw_spin_unlock(lock) __raw_spin_unlock(lock) |
| 71 | #endif | 72 | #endif |
| 72 | 73 | ||
| 73 | #ifdef CONFIG_INLINE_SPIN_UNLOCK_BH | 74 | #ifdef CONFIG_INLINE_SPIN_UNLOCK_BH |
| 74 | #define _spin_unlock_bh(lock) __spin_unlock_bh(lock) | 75 | #define _raw_spin_unlock_bh(lock) __raw_spin_unlock_bh(lock) |
| 75 | #endif | 76 | #endif |
| 76 | 77 | ||
| 77 | #ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQ | 78 | #ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQ |
| 78 | #define _spin_unlock_irq(lock) __spin_unlock_irq(lock) | 79 | #define _raw_spin_unlock_irq(lock) __raw_spin_unlock_irq(lock) |
| 79 | #endif | 80 | #endif |
| 80 | 81 | ||
| 81 | #ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE | 82 | #ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE |
| 82 | #define _spin_unlock_irqrestore(lock, flags) __spin_unlock_irqrestore(lock, flags) | 83 | #define _raw_spin_unlock_irqrestore(lock, flags) __raw_spin_unlock_irqrestore(lock, flags) |
| 83 | #endif | 84 | #endif |
| 84 | 85 | ||
| 85 | static inline int __spin_trylock(raw_spinlock_t *lock) | 86 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) |
| 86 | { | 87 | { |
| 87 | preempt_disable(); | 88 | preempt_disable(); |
| 88 | if (do_raw_spin_trylock(lock)) { | 89 | if (do_raw_spin_trylock(lock)) { |
| @@ -100,7 +101,7 @@ static inline int __spin_trylock(raw_spinlock_t *lock) | |||
| 100 | */ | 101 | */ |
| 101 | #if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC) | 102 | #if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC) |
| 102 | 103 | ||
| 103 | static inline unsigned long __spin_lock_irqsave(raw_spinlock_t *lock) | 104 | static inline unsigned long __raw_spin_lock_irqsave(raw_spinlock_t *lock) |
| 104 | { | 105 | { |
| 105 | unsigned long flags; | 106 | unsigned long flags; |
| 106 | 107 | ||
| @@ -120,7 +121,7 @@ static inline unsigned long __spin_lock_irqsave(raw_spinlock_t *lock) | |||
| 120 | return flags; | 121 | return flags; |
| 121 | } | 122 | } |
| 122 | 123 | ||
| 123 | static inline void __spin_lock_irq(raw_spinlock_t *lock) | 124 | static inline void __raw_spin_lock_irq(raw_spinlock_t *lock) |
| 124 | { | 125 | { |
| 125 | local_irq_disable(); | 126 | local_irq_disable(); |
| 126 | preempt_disable(); | 127 | preempt_disable(); |
| @@ -128,7 +129,7 @@ static inline void __spin_lock_irq(raw_spinlock_t *lock) | |||
| 128 | LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); | 129 | LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); |
| 129 | } | 130 | } |
| 130 | 131 | ||
| 131 | static inline void __spin_lock_bh(raw_spinlock_t *lock) | 132 | static inline void __raw_spin_lock_bh(raw_spinlock_t *lock) |
| 132 | { | 133 | { |
| 133 | local_bh_disable(); | 134 | local_bh_disable(); |
| 134 | preempt_disable(); | 135 | preempt_disable(); |
| @@ -136,7 +137,7 @@ static inline void __spin_lock_bh(raw_spinlock_t *lock) | |||
| 136 | LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); | 137 | LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); |
| 137 | } | 138 | } |
| 138 | 139 | ||
| 139 | static inline void __spin_lock(raw_spinlock_t *lock) | 140 | static inline void __raw_spin_lock(raw_spinlock_t *lock) |
| 140 | { | 141 | { |
| 141 | preempt_disable(); | 142 | preempt_disable(); |
| 142 | spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); | 143 | spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); |
| @@ -145,14 +146,14 @@ static inline void __spin_lock(raw_spinlock_t *lock) | |||
| 145 | 146 | ||
| 146 | #endif /* CONFIG_PREEMPT */ | 147 | #endif /* CONFIG_PREEMPT */ |
| 147 | 148 | ||
| 148 | static inline void __spin_unlock(raw_spinlock_t *lock) | 149 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) |
| 149 | { | 150 | { |
| 150 | spin_release(&lock->dep_map, 1, _RET_IP_); | 151 | spin_release(&lock->dep_map, 1, _RET_IP_); |
| 151 | do_raw_spin_unlock(lock); | 152 | do_raw_spin_unlock(lock); |
| 152 | preempt_enable(); | 153 | preempt_enable(); |
| 153 | } | 154 | } |
| 154 | 155 | ||
| 155 | static inline void __spin_unlock_irqrestore(raw_spinlock_t *lock, | 156 | static inline void __raw_spin_unlock_irqrestore(raw_spinlock_t *lock, |
| 156 | unsigned long flags) | 157 | unsigned long flags) |
| 157 | { | 158 | { |
| 158 | spin_release(&lock->dep_map, 1, _RET_IP_); | 159 | spin_release(&lock->dep_map, 1, _RET_IP_); |
| @@ -161,7 +162,7 @@ static inline void __spin_unlock_irqrestore(raw_spinlock_t *lock, | |||
| 161 | preempt_enable(); | 162 | preempt_enable(); |
| 162 | } | 163 | } |
| 163 | 164 | ||
| 164 | static inline void __spin_unlock_irq(raw_spinlock_t *lock) | 165 | static inline void __raw_spin_unlock_irq(raw_spinlock_t *lock) |
| 165 | { | 166 | { |
| 166 | spin_release(&lock->dep_map, 1, _RET_IP_); | 167 | spin_release(&lock->dep_map, 1, _RET_IP_); |
| 167 | do_raw_spin_unlock(lock); | 168 | do_raw_spin_unlock(lock); |
| @@ -169,7 +170,7 @@ static inline void __spin_unlock_irq(raw_spinlock_t *lock) | |||
| 169 | preempt_enable(); | 170 | preempt_enable(); |
| 170 | } | 171 | } |
| 171 | 172 | ||
| 172 | static inline void __spin_unlock_bh(raw_spinlock_t *lock) | 173 | static inline void __raw_spin_unlock_bh(raw_spinlock_t *lock) |
| 173 | { | 174 | { |
| 174 | spin_release(&lock->dep_map, 1, _RET_IP_); | 175 | spin_release(&lock->dep_map, 1, _RET_IP_); |
| 175 | do_raw_spin_unlock(lock); | 176 | do_raw_spin_unlock(lock); |
| @@ -177,7 +178,7 @@ static inline void __spin_unlock_bh(raw_spinlock_t *lock) | |||
| 177 | local_bh_enable_ip((unsigned long)__builtin_return_address(0)); | 178 | local_bh_enable_ip((unsigned long)__builtin_return_address(0)); |
| 178 | } | 179 | } |
| 179 | 180 | ||
| 180 | static inline int __spin_trylock_bh(raw_spinlock_t *lock) | 181 | static inline int __raw_spin_trylock_bh(raw_spinlock_t *lock) |
| 181 | { | 182 | { |
| 182 | local_bh_disable(); | 183 | local_bh_disable(); |
| 183 | preempt_disable(); | 184 | preempt_disable(); |
diff --git a/include/linux/spinlock_api_up.h b/include/linux/spinlock_api_up.h index 3a9e27adecf9..af1f47229e70 100644 --- a/include/linux/spinlock_api_up.h +++ b/include/linux/spinlock_api_up.h | |||
| @@ -40,7 +40,8 @@ | |||
| 40 | do { preempt_enable(); __release(lock); (void)(lock); } while (0) | 40 | do { preempt_enable(); __release(lock); (void)(lock); } while (0) |
| 41 | 41 | ||
| 42 | #define __UNLOCK_BH(lock) \ | 42 | #define __UNLOCK_BH(lock) \ |
| 43 | do { preempt_enable_no_resched(); local_bh_enable(); __release(lock); (void)(lock); } while (0) | 43 | do { preempt_enable_no_resched(); local_bh_enable(); \ |
| 44 | __release(lock); (void)(lock); } while (0) | ||
| 44 | 45 | ||
| 45 | #define __UNLOCK_IRQ(lock) \ | 46 | #define __UNLOCK_IRQ(lock) \ |
| 46 | do { local_irq_enable(); __UNLOCK(lock); } while (0) | 47 | do { local_irq_enable(); __UNLOCK(lock); } while (0) |
| @@ -48,34 +49,37 @@ | |||
| 48 | #define __UNLOCK_IRQRESTORE(lock, flags) \ | 49 | #define __UNLOCK_IRQRESTORE(lock, flags) \ |
| 49 | do { local_irq_restore(flags); __UNLOCK(lock); } while (0) | 50 | do { local_irq_restore(flags); __UNLOCK(lock); } while (0) |
| 50 | 51 | ||
| 51 | #define _spin_lock(lock) __LOCK(lock) | 52 | #define _raw_spin_lock(lock) __LOCK(lock) |
| 52 | #define _spin_lock_nested(lock, subclass) __LOCK(lock) | 53 | #define _raw_spin_lock_nested(lock, subclass) __LOCK(lock) |
| 53 | #define _read_lock(lock) __LOCK(lock) | 54 | #define _raw_read_lock(lock) __LOCK(lock) |
| 54 | #define _write_lock(lock) __LOCK(lock) | 55 | #define _raw_write_lock(lock) __LOCK(lock) |
| 55 | #define _spin_lock_bh(lock) __LOCK_BH(lock) | 56 | #define _raw_spin_lock_bh(lock) __LOCK_BH(lock) |
| 56 | #define _read_lock_bh(lock) __LOCK_BH(lock) | 57 | #define _raw_read_lock_bh(lock) __LOCK_BH(lock) |
| 57 | #define _write_lock_bh(lock) __LOCK_BH(lock) | 58 | #define _raw_write_lock_bh(lock) __LOCK_BH(lock) |
| 58 | #define _spin_lock_irq(lock) __LOCK_IRQ(lock) | 59 | #define _raw_spin_lock_irq(lock) __LOCK_IRQ(lock) |
| 59 | #define _read_lock_irq(lock) __LOCK_IRQ(lock) | 60 | #define _raw_read_lock_irq(lock) __LOCK_IRQ(lock) |
| 60 | #define _write_lock_irq(lock) __LOCK_IRQ(lock) | 61 | #define _raw_write_lock_irq(lock) __LOCK_IRQ(lock) |
| 61 | #define _spin_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags) | 62 | #define _raw_spin_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags) |
| 62 | #define _read_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags) | 63 | #define _raw_read_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags) |
| 63 | #define _write_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags) | 64 | #define _raw_write_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags) |
| 64 | #define _spin_trylock(lock) ({ __LOCK(lock); 1; }) | 65 | #define _raw_spin_trylock(lock) ({ __LOCK(lock); 1; }) |
| 65 | #define _read_trylock(lock) ({ __LOCK(lock); 1; }) | 66 | #define _raw_read_trylock(lock) ({ __LOCK(lock); 1; }) |
| 66 | #define _write_trylock(lock) ({ __LOCK(lock); 1; }) | 67 | #define _raw_write_trylock(lock) ({ __LOCK(lock); 1; }) |
| 67 | #define _spin_trylock_bh(lock) ({ __LOCK_BH(lock); 1; }) | 68 | #define _raw_spin_trylock_bh(lock) ({ __LOCK_BH(lock); 1; }) |
| 68 | #define _spin_unlock(lock) __UNLOCK(lock) | 69 | #define _raw_spin_unlock(lock) __UNLOCK(lock) |
| 69 | #define _read_unlock(lock) __UNLOCK(lock) | 70 | #define _raw_read_unlock(lock) __UNLOCK(lock) |
| 70 | #define _write_unlock(lock) __UNLOCK(lock) | 71 | #define _raw_write_unlock(lock) __UNLOCK(lock) |
| 71 | #define _spin_unlock_bh(lock) __UNLOCK_BH(lock) | 72 | #define _raw_spin_unlock_bh(lock) __UNLOCK_BH(lock) |
| 72 | #define _write_unlock_bh(lock) __UNLOCK_BH(lock) | 73 | #define _raw_write_unlock_bh(lock) __UNLOCK_BH(lock) |
| 73 | #define _read_unlock_bh(lock) __UNLOCK_BH(lock) | 74 | #define _raw_read_unlock_bh(lock) __UNLOCK_BH(lock) |
| 74 | #define _spin_unlock_irq(lock) __UNLOCK_IRQ(lock) | 75 | #define _raw_spin_unlock_irq(lock) __UNLOCK_IRQ(lock) |
| 75 | #define _read_unlock_irq(lock) __UNLOCK_IRQ(lock) | 76 | #define _raw_read_unlock_irq(lock) __UNLOCK_IRQ(lock) |
| 76 | #define _write_unlock_irq(lock) __UNLOCK_IRQ(lock) | 77 | #define _raw_write_unlock_irq(lock) __UNLOCK_IRQ(lock) |
| 77 | #define _spin_unlock_irqrestore(lock, flags) __UNLOCK_IRQRESTORE(lock, flags) | 78 | #define _raw_spin_unlock_irqrestore(lock, flags) \ |
| 78 | #define _read_unlock_irqrestore(lock, flags) __UNLOCK_IRQRESTORE(lock, flags) | 79 | __UNLOCK_IRQRESTORE(lock, flags) |
| 79 | #define _write_unlock_irqrestore(lock, flags) __UNLOCK_IRQRESTORE(lock, flags) | 80 | #define _raw_read_unlock_irqrestore(lock, flags) \ |
| 81 | __UNLOCK_IRQRESTORE(lock, flags) | ||
| 82 | #define _raw_write_unlock_irqrestore(lock, flags) \ | ||
| 83 | __UNLOCK_IRQRESTORE(lock, flags) | ||
| 80 | 84 | ||
| 81 | #endif /* __LINUX_SPINLOCK_API_UP_H */ | 85 | #endif /* __LINUX_SPINLOCK_API_UP_H */ |
diff --git a/kernel/spinlock.c b/kernel/spinlock.c index 795240b81224..be6517fb9c14 100644 --- a/kernel/spinlock.c +++ b/kernel/spinlock.c | |||
| @@ -44,7 +44,7 @@ | |||
| 44 | * towards that other CPU that it should break the lock ASAP. | 44 | * towards that other CPU that it should break the lock ASAP. |
| 45 | */ | 45 | */ |
| 46 | #define BUILD_LOCK_OPS(op, locktype) \ | 46 | #define BUILD_LOCK_OPS(op, locktype) \ |
| 47 | void __lockfunc __##op##_lock(locktype##_t *lock) \ | 47 | void __lockfunc __raw_##op##_lock(locktype##_t *lock) \ |
| 48 | { \ | 48 | { \ |
| 49 | for (;;) { \ | 49 | for (;;) { \ |
| 50 | preempt_disable(); \ | 50 | preempt_disable(); \ |
| @@ -60,7 +60,7 @@ void __lockfunc __##op##_lock(locktype##_t *lock) \ | |||
| 60 | (lock)->break_lock = 0; \ | 60 | (lock)->break_lock = 0; \ |
| 61 | } \ | 61 | } \ |
| 62 | \ | 62 | \ |
| 63 | unsigned long __lockfunc __##op##_lock_irqsave(locktype##_t *lock) \ | 63 | unsigned long __lockfunc __raw_##op##_lock_irqsave(locktype##_t *lock) \ |
| 64 | { \ | 64 | { \ |
| 65 | unsigned long flags; \ | 65 | unsigned long flags; \ |
| 66 | \ | 66 | \ |
| @@ -81,12 +81,12 @@ unsigned long __lockfunc __##op##_lock_irqsave(locktype##_t *lock) \ | |||
| 81 | return flags; \ | 81 | return flags; \ |
| 82 | } \ | 82 | } \ |
| 83 | \ | 83 | \ |
| 84 | void __lockfunc __##op##_lock_irq(locktype##_t *lock) \ | 84 | void __lockfunc __raw_##op##_lock_irq(locktype##_t *lock) \ |
| 85 | { \ | 85 | { \ |
| 86 | _##op##_lock_irqsave(lock); \ | 86 | _raw_##op##_lock_irqsave(lock); \ |
| 87 | } \ | 87 | } \ |
| 88 | \ | 88 | \ |
| 89 | void __lockfunc __##op##_lock_bh(locktype##_t *lock) \ | 89 | void __lockfunc __raw_##op##_lock_bh(locktype##_t *lock) \ |
| 90 | { \ | 90 | { \ |
| 91 | unsigned long flags; \ | 91 | unsigned long flags; \ |
| 92 | \ | 92 | \ |
| @@ -95,7 +95,7 @@ void __lockfunc __##op##_lock_bh(locktype##_t *lock) \ | |||
| 95 | /* irq-disabling. We use the generic preemption-aware */ \ | 95 | /* irq-disabling. We use the generic preemption-aware */ \ |
| 96 | /* function: */ \ | 96 | /* function: */ \ |
| 97 | /**/ \ | 97 | /**/ \ |
| 98 | flags = _##op##_lock_irqsave(lock); \ | 98 | flags = _raw_##op##_lock_irqsave(lock); \ |
| 99 | local_bh_disable(); \ | 99 | local_bh_disable(); \ |
| 100 | local_irq_restore(flags); \ | 100 | local_irq_restore(flags); \ |
| 101 | } \ | 101 | } \ |
| @@ -116,240 +116,240 @@ BUILD_LOCK_OPS(write, rwlock); | |||
| 116 | #endif | 116 | #endif |
| 117 | 117 | ||
| 118 | #ifndef CONFIG_INLINE_SPIN_TRYLOCK | 118 | #ifndef CONFIG_INLINE_SPIN_TRYLOCK |
| 119 | int __lockfunc _spin_trylock(raw_spinlock_t *lock) | 119 | int __lockfunc _raw_spin_trylock(raw_spinlock_t *lock) |
| 120 | { | 120 | { |
| 121 | return __spin_trylock(lock); | 121 | return __raw_spin_trylock(lock); |
| 122 | } | 122 | } |
| 123 | EXPORT_SYMBOL(_spin_trylock); | 123 | EXPORT_SYMBOL(_raw_spin_trylock); |
| 124 | #endif | 124 | #endif |
| 125 | 125 | ||
| 126 | #ifndef CONFIG_INLINE_SPIN_TRYLOCK_BH | 126 | #ifndef CONFIG_INLINE_SPIN_TRYLOCK_BH |
| 127 | int __lockfunc _spin_trylock_bh(raw_spinlock_t *lock) | 127 | int __lockfunc _raw_spin_trylock_bh(raw_spinlock_t *lock) |
| 128 | { | 128 | { |
| 129 | return __spin_trylock_bh(lock); | 129 | return __raw_spin_trylock_bh(lock); |
| 130 | } | 130 | } |
| 131 | EXPORT_SYMBOL(_spin_trylock_bh); | 131 | EXPORT_SYMBOL(_raw_spin_trylock_bh); |
| 132 | #endif | 132 | #endif |
| 133 | 133 | ||
| 134 | #ifndef CONFIG_INLINE_SPIN_LOCK | 134 | #ifndef CONFIG_INLINE_SPIN_LOCK |
| 135 | void __lockfunc _spin_lock(raw_spinlock_t *lock) | 135 | void __lockfunc _raw_spin_lock(raw_spinlock_t *lock) |
| 136 | { | 136 | { |
| 137 | __spin_lock(lock); | 137 | __raw_spin_lock(lock); |
| 138 | } | 138 | } |
| 139 | EXPORT_SYMBOL(_spin_lock); | 139 | EXPORT_SYMBOL(_raw_spin_lock); |
| 140 | #endif | 140 | #endif |
| 141 | 141 | ||
| 142 | #ifndef CONFIG_INLINE_SPIN_LOCK_IRQSAVE | 142 | #ifndef CONFIG_INLINE_SPIN_LOCK_IRQSAVE |
| 143 | unsigned long __lockfunc _spin_lock_irqsave(raw_spinlock_t *lock) | 143 | unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t *lock) |
| 144 | { | 144 | { |
| 145 | return __spin_lock_irqsave(lock); | 145 | return __raw_spin_lock_irqsave(lock); |
| 146 | } | 146 | } |
| 147 | EXPORT_SYMBOL(_spin_lock_irqsave); | 147 | EXPORT_SYMBOL(_raw_spin_lock_irqsave); |
| 148 | #endif | 148 | #endif |
| 149 | 149 | ||
| 150 | #ifndef CONFIG_INLINE_SPIN_LOCK_IRQ | 150 | #ifndef CONFIG_INLINE_SPIN_LOCK_IRQ |
| 151 | void __lockfunc _spin_lock_irq(raw_spinlock_t *lock) | 151 | void __lockfunc _raw_spin_lock_irq(raw_spinlock_t *lock) |
| 152 | { | 152 | { |
| 153 | __spin_lock_irq(lock); | 153 | __raw_spin_lock_irq(lock); |
| 154 | } | 154 | } |
| 155 | EXPORT_SYMBOL(_spin_lock_irq); | 155 | EXPORT_SYMBOL(_raw_spin_lock_irq); |
| 156 | #endif | 156 | #endif |
| 157 | 157 | ||
| 158 | #ifndef CONFIG_INLINE_SPIN_LOCK_BH | 158 | #ifndef CONFIG_INLINE_SPIN_LOCK_BH |
| 159 | void __lockfunc _spin_lock_bh(raw_spinlock_t *lock) | 159 | void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock) |
| 160 | { | 160 | { |
| 161 | __spin_lock_bh(lock); | 161 | __raw_spin_lock_bh(lock); |
| 162 | } | 162 | } |
| 163 | EXPORT_SYMBOL(_spin_lock_bh); | 163 | EXPORT_SYMBOL(_raw_spin_lock_bh); |
| 164 | #endif | 164 | #endif |
| 165 | 165 | ||
| 166 | #ifndef CONFIG_INLINE_SPIN_UNLOCK | 166 | #ifndef CONFIG_INLINE_SPIN_UNLOCK |
| 167 | void __lockfunc _spin_unlock(raw_spinlock_t *lock) | 167 | void __lockfunc _raw_spin_unlock(raw_spinlock_t *lock) |
| 168 | { | 168 | { |
| 169 | __spin_unlock(lock); | 169 | __raw_spin_unlock(lock); |
| 170 | } | 170 | } |
| 171 | EXPORT_SYMBOL(_spin_unlock); | 171 | EXPORT_SYMBOL(_raw_spin_unlock); |
| 172 | #endif | 172 | #endif |
| 173 | 173 | ||
| 174 | #ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE | 174 | #ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE |
| 175 | void __lockfunc _spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags) | 175 | void __lockfunc _raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags) |
| 176 | { | 176 | { |
| 177 | __spin_unlock_irqrestore(lock, flags); | 177 | __raw_spin_unlock_irqrestore(lock, flags); |
| 178 | } | 178 | } |
| 179 | EXPORT_SYMBOL(_spin_unlock_irqrestore); | 179 | EXPORT_SYMBOL(_raw_spin_unlock_irqrestore); |
| 180 | #endif | 180 | #endif |
| 181 | 181 | ||
| 182 | #ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQ | 182 | #ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQ |
| 183 | void __lockfunc _spin_unlock_irq(raw_spinlock_t *lock) | 183 | void __lockfunc _raw_spin_unlock_irq(raw_spinlock_t *lock) |
| 184 | { | 184 | { |
| 185 | __spin_unlock_irq(lock); | 185 | __raw_spin_unlock_irq(lock); |
| 186 | } | 186 | } |
| 187 | EXPORT_SYMBOL(_spin_unlock_irq); | 187 | EXPORT_SYMBOL(_raw_spin_unlock_irq); |
| 188 | #endif | 188 | #endif |
| 189 | 189 | ||
| 190 | #ifndef CONFIG_INLINE_SPIN_UNLOCK_BH | 190 | #ifndef CONFIG_INLINE_SPIN_UNLOCK_BH |
| 191 | void __lockfunc _spin_unlock_bh(raw_spinlock_t *lock) | 191 | void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock) |
| 192 | { | 192 | { |
| 193 | __spin_unlock_bh(lock); | 193 | __raw_spin_unlock_bh(lock); |
| 194 | } | 194 | } |
| 195 | EXPORT_SYMBOL(_spin_unlock_bh); | 195 | EXPORT_SYMBOL(_raw_spin_unlock_bh); |
| 196 | #endif | 196 | #endif |
| 197 | 197 | ||
| 198 | #ifndef CONFIG_INLINE_READ_TRYLOCK | 198 | #ifndef CONFIG_INLINE_READ_TRYLOCK |
| 199 | int __lockfunc _read_trylock(rwlock_t *lock) | 199 | int __lockfunc _raw_read_trylock(rwlock_t *lock) |
| 200 | { | 200 | { |
| 201 | return __read_trylock(lock); | 201 | return __raw_read_trylock(lock); |
| 202 | } | 202 | } |
| 203 | EXPORT_SYMBOL(_read_trylock); | 203 | EXPORT_SYMBOL(_raw_read_trylock); |
| 204 | #endif | 204 | #endif |
| 205 | 205 | ||
| 206 | #ifndef CONFIG_INLINE_READ_LOCK | 206 | #ifndef CONFIG_INLINE_READ_LOCK |
| 207 | void __lockfunc _read_lock(rwlock_t *lock) | 207 | void __lockfunc _raw_read_lock(rwlock_t *lock) |
| 208 | { | 208 | { |
| 209 | __read_lock(lock); | 209 | __raw_read_lock(lock); |
| 210 | } | 210 | } |
| 211 | EXPORT_SYMBOL(_read_lock); | 211 | EXPORT_SYMBOL(_raw_read_lock); |
| 212 | #endif | 212 | #endif |
| 213 | 213 | ||
| 214 | #ifndef CONFIG_INLINE_READ_LOCK_IRQSAVE | 214 | #ifndef CONFIG_INLINE_READ_LOCK_IRQSAVE |
| 215 | unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock) | 215 | unsigned long __lockfunc _raw_read_lock_irqsave(rwlock_t *lock) |
| 216 | { | 216 | { |
| 217 | return __read_lock_irqsave(lock); | 217 | return __raw_read_lock_irqsave(lock); |
| 218 | } | 218 | } |
| 219 | EXPORT_SYMBOL(_read_lock_irqsave); | 219 | EXPORT_SYMBOL(_raw_read_lock_irqsave); |
| 220 | #endif | 220 | #endif |
| 221 | 221 | ||
| 222 | #ifndef CONFIG_INLINE_READ_LOCK_IRQ | 222 | #ifndef CONFIG_INLINE_READ_LOCK_IRQ |
| 223 | void __lockfunc _read_lock_irq(rwlock_t *lock) | 223 | void __lockfunc _raw_read_lock_irq(rwlock_t *lock) |
| 224 | { | 224 | { |
| 225 | __read_lock_irq(lock); | 225 | __raw_read_lock_irq(lock); |
| 226 | } | 226 | } |
| 227 | EXPORT_SYMBOL(_read_lock_irq); | 227 | EXPORT_SYMBOL(_raw_read_lock_irq); |
| 228 | #endif | 228 | #endif |
| 229 | 229 | ||
| 230 | #ifndef CONFIG_INLINE_READ_LOCK_BH | 230 | #ifndef CONFIG_INLINE_READ_LOCK_BH |
| 231 | void __lockfunc _read_lock_bh(rwlock_t *lock) | 231 | void __lockfunc _raw_read_lock_bh(rwlock_t *lock) |
| 232 | { | 232 | { |
| 233 | __read_lock_bh(lock); | 233 | __raw_read_lock_bh(lock); |
| 234 | } | 234 | } |
| 235 | EXPORT_SYMBOL(_read_lock_bh); | 235 | EXPORT_SYMBOL(_raw_read_lock_bh); |
| 236 | #endif | 236 | #endif |
| 237 | 237 | ||
| 238 | #ifndef CONFIG_INLINE_READ_UNLOCK | 238 | #ifndef CONFIG_INLINE_READ_UNLOCK |
| 239 | void __lockfunc _read_unlock(rwlock_t *lock) | 239 | void __lockfunc _raw_read_unlock(rwlock_t *lock) |
| 240 | { | 240 | { |
| 241 | __read_unlock(lock); | 241 | __raw_read_unlock(lock); |
| 242 | } | 242 | } |
| 243 | EXPORT_SYMBOL(_read_unlock); | 243 | EXPORT_SYMBOL(_raw_read_unlock); |
| 244 | #endif | 244 | #endif |
| 245 | 245 | ||
| 246 | #ifndef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE | 246 | #ifndef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE |
| 247 | void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | 247 | void __lockfunc _raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) |
| 248 | { | 248 | { |
| 249 | __read_unlock_irqrestore(lock, flags); | 249 | __raw_read_unlock_irqrestore(lock, flags); |
| 250 | } | 250 | } |
| 251 | EXPORT_SYMBOL(_read_unlock_irqrestore); | 251 | EXPORT_SYMBOL(_raw_read_unlock_irqrestore); |
| 252 | #endif | 252 | #endif |
| 253 | 253 | ||
| 254 | #ifndef CONFIG_INLINE_READ_UNLOCK_IRQ | 254 | #ifndef CONFIG_INLINE_READ_UNLOCK_IRQ |
| 255 | void __lockfunc _read_unlock_irq(rwlock_t *lock) | 255 | void __lockfunc _raw_read_unlock_irq(rwlock_t *lock) |
| 256 | { | 256 | { |
| 257 | __read_unlock_irq(lock); | 257 | __raw_read_unlock_irq(lock); |
| 258 | } | 258 | } |
| 259 | EXPORT_SYMBOL(_read_unlock_irq); | 259 | EXPORT_SYMBOL(_raw_read_unlock_irq); |
| 260 | #endif | 260 | #endif |
| 261 | 261 | ||
| 262 | #ifndef CONFIG_INLINE_READ_UNLOCK_BH | 262 | #ifndef CONFIG_INLINE_READ_UNLOCK_BH |
| 263 | void __lockfunc _read_unlock_bh(rwlock_t *lock) | 263 | void __lockfunc _raw_read_unlock_bh(rwlock_t *lock) |
| 264 | { | 264 | { |
| 265 | __read_unlock_bh(lock); | 265 | __raw_read_unlock_bh(lock); |
| 266 | } | 266 | } |
| 267 | EXPORT_SYMBOL(_read_unlock_bh); | 267 | EXPORT_SYMBOL(_raw_read_unlock_bh); |
| 268 | #endif | 268 | #endif |
| 269 | 269 | ||
| 270 | #ifndef CONFIG_INLINE_WRITE_TRYLOCK | 270 | #ifndef CONFIG_INLINE_WRITE_TRYLOCK |
| 271 | int __lockfunc _write_trylock(rwlock_t *lock) | 271 | int __lockfunc _raw_write_trylock(rwlock_t *lock) |
| 272 | { | 272 | { |
| 273 | return __write_trylock(lock); | 273 | return __raw_write_trylock(lock); |
| 274 | } | 274 | } |
| 275 | EXPORT_SYMBOL(_write_trylock); | 275 | EXPORT_SYMBOL(_raw_write_trylock); |
| 276 | #endif | 276 | #endif |
| 277 | 277 | ||
| 278 | #ifndef CONFIG_INLINE_WRITE_LOCK | 278 | #ifndef CONFIG_INLINE_WRITE_LOCK |
| 279 | void __lockfunc _write_lock(rwlock_t *lock) | 279 | void __lockfunc _raw_write_lock(rwlock_t *lock) |
| 280 | { | 280 | { |
| 281 | __write_lock(lock); | 281 | __raw_write_lock(lock); |
| 282 | } | 282 | } |
| 283 | EXPORT_SYMBOL(_write_lock); | 283 | EXPORT_SYMBOL(_raw_write_lock); |
| 284 | #endif | 284 | #endif |
| 285 | 285 | ||
| 286 | #ifndef CONFIG_INLINE_WRITE_LOCK_IRQSAVE | 286 | #ifndef CONFIG_INLINE_WRITE_LOCK_IRQSAVE |
| 287 | unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock) | 287 | unsigned long __lockfunc _raw_write_lock_irqsave(rwlock_t *lock) |
| 288 | { | 288 | { |
| 289 | return __write_lock_irqsave(lock); | 289 | return __raw_write_lock_irqsave(lock); |
| 290 | } | 290 | } |
| 291 | EXPORT_SYMBOL(_write_lock_irqsave); | 291 | EXPORT_SYMBOL(_raw_write_lock_irqsave); |
| 292 | #endif | 292 | #endif |
| 293 | 293 | ||
| 294 | #ifndef CONFIG_INLINE_WRITE_LOCK_IRQ | 294 | #ifndef CONFIG_INLINE_WRITE_LOCK_IRQ |
| 295 | void __lockfunc _write_lock_irq(rwlock_t *lock) | 295 | void __lockfunc _raw_write_lock_irq(rwlock_t *lock) |
| 296 | { | 296 | { |
| 297 | __write_lock_irq(lock); | 297 | __raw_write_lock_irq(lock); |
| 298 | } | 298 | } |
| 299 | EXPORT_SYMBOL(_write_lock_irq); | 299 | EXPORT_SYMBOL(_raw_write_lock_irq); |
| 300 | #endif | 300 | #endif |
| 301 | 301 | ||
| 302 | #ifndef CONFIG_INLINE_WRITE_LOCK_BH | 302 | #ifndef CONFIG_INLINE_WRITE_LOCK_BH |
| 303 | void __lockfunc _write_lock_bh(rwlock_t *lock) | 303 | void __lockfunc _raw_write_lock_bh(rwlock_t *lock) |
| 304 | { | 304 | { |
| 305 | __write_lock_bh(lock); | 305 | __raw_write_lock_bh(lock); |
| 306 | } | 306 | } |
| 307 | EXPORT_SYMBOL(_write_lock_bh); | 307 | EXPORT_SYMBOL(_raw_write_lock_bh); |
| 308 | #endif | 308 | #endif |
| 309 | 309 | ||
| 310 | #ifndef CONFIG_INLINE_WRITE_UNLOCK | 310 | #ifndef CONFIG_INLINE_WRITE_UNLOCK |
| 311 | void __lockfunc _write_unlock(rwlock_t *lock) | 311 | void __lockfunc _raw_write_unlock(rwlock_t *lock) |
| 312 | { | 312 | { |
| 313 | __write_unlock(lock); | 313 | __raw_write_unlock(lock); |
| 314 | } | 314 | } |
| 315 | EXPORT_SYMBOL(_write_unlock); | 315 | EXPORT_SYMBOL(_raw_write_unlock); |
| 316 | #endif | 316 | #endif |
| 317 | 317 | ||
| 318 | #ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE | 318 | #ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE |
| 319 | void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | 319 | void __lockfunc _raw_write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) |
| 320 | { | 320 | { |
| 321 | __write_unlock_irqrestore(lock, flags); | 321 | __raw_write_unlock_irqrestore(lock, flags); |
| 322 | } | 322 | } |
| 323 | EXPORT_SYMBOL(_write_unlock_irqrestore); | 323 | EXPORT_SYMBOL(_raw_write_unlock_irqrestore); |
| 324 | #endif | 324 | #endif |
| 325 | 325 | ||
| 326 | #ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQ | 326 | #ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQ |
| 327 | void __lockfunc _write_unlock_irq(rwlock_t *lock) | 327 | void __lockfunc _raw_write_unlock_irq(rwlock_t *lock) |
| 328 | { | 328 | { |
| 329 | __write_unlock_irq(lock); | 329 | __raw_write_unlock_irq(lock); |
| 330 | } | 330 | } |
| 331 | EXPORT_SYMBOL(_write_unlock_irq); | 331 | EXPORT_SYMBOL(_raw_write_unlock_irq); |
| 332 | #endif | 332 | #endif |
| 333 | 333 | ||
| 334 | #ifndef CONFIG_INLINE_WRITE_UNLOCK_BH | 334 | #ifndef CONFIG_INLINE_WRITE_UNLOCK_BH |
| 335 | void __lockfunc _write_unlock_bh(rwlock_t *lock) | 335 | void __lockfunc _raw_write_unlock_bh(rwlock_t *lock) |
| 336 | { | 336 | { |
| 337 | __write_unlock_bh(lock); | 337 | __raw_write_unlock_bh(lock); |
| 338 | } | 338 | } |
| 339 | EXPORT_SYMBOL(_write_unlock_bh); | 339 | EXPORT_SYMBOL(_raw_write_unlock_bh); |
| 340 | #endif | 340 | #endif |
| 341 | 341 | ||
| 342 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 342 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
| 343 | 343 | ||
| 344 | void __lockfunc _spin_lock_nested(raw_spinlock_t *lock, int subclass) | 344 | void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass) |
| 345 | { | 345 | { |
| 346 | preempt_disable(); | 346 | preempt_disable(); |
| 347 | spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_); | 347 | spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_); |
| 348 | LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); | 348 | LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); |
| 349 | } | 349 | } |
| 350 | EXPORT_SYMBOL(_spin_lock_nested); | 350 | EXPORT_SYMBOL(_raw_spin_lock_nested); |
| 351 | 351 | ||
| 352 | unsigned long __lockfunc _spin_lock_irqsave_nested(raw_spinlock_t *lock, | 352 | unsigned long __lockfunc _raw_spin_lock_irqsave_nested(raw_spinlock_t *lock, |
| 353 | int subclass) | 353 | int subclass) |
| 354 | { | 354 | { |
| 355 | unsigned long flags; | 355 | unsigned long flags; |
| @@ -361,16 +361,16 @@ unsigned long __lockfunc _spin_lock_irqsave_nested(raw_spinlock_t *lock, | |||
| 361 | do_raw_spin_lock_flags, &flags); | 361 | do_raw_spin_lock_flags, &flags); |
| 362 | return flags; | 362 | return flags; |
| 363 | } | 363 | } |
| 364 | EXPORT_SYMBOL(_spin_lock_irqsave_nested); | 364 | EXPORT_SYMBOL(_raw_spin_lock_irqsave_nested); |
| 365 | 365 | ||
| 366 | void __lockfunc _spin_lock_nest_lock(raw_spinlock_t *lock, | 366 | void __lockfunc _raw_spin_lock_nest_lock(raw_spinlock_t *lock, |
| 367 | struct lockdep_map *nest_lock) | 367 | struct lockdep_map *nest_lock) |
| 368 | { | 368 | { |
| 369 | preempt_disable(); | 369 | preempt_disable(); |
| 370 | spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_); | 370 | spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_); |
| 371 | LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); | 371 | LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); |
| 372 | } | 372 | } |
| 373 | EXPORT_SYMBOL(_spin_lock_nest_lock); | 373 | EXPORT_SYMBOL(_raw_spin_lock_nest_lock); |
| 374 | 374 | ||
| 375 | #endif | 375 | #endif |
| 376 | 376 | ||
