diff options
| -rw-r--r-- | arch/alpha/include/asm/spinlock.h | 20 | ||||
| -rw-r--r-- | arch/arm/include/asm/spinlock.h | 20 | ||||
| -rw-r--r-- | arch/blackfin/include/asm/spinlock.h | 40 | ||||
| -rw-r--r-- | arch/cris/include/arch-v32/arch/spinlock.h | 16 | ||||
| -rw-r--r-- | arch/ia64/include/asm/spinlock.h | 32 | ||||
| -rw-r--r-- | arch/m32r/include/asm/spinlock.h | 20 | ||||
| -rw-r--r-- | arch/mips/include/asm/spinlock.h | 42 | ||||
| -rw-r--r-- | arch/parisc/include/asm/spinlock.h | 20 | ||||
| -rw-r--r-- | arch/powerpc/include/asm/spinlock.h | 32 | ||||
| -rw-r--r-- | arch/s390/include/asm/spinlock.h | 20 | ||||
| -rw-r--r-- | arch/s390/lib/spinlock.c | 12 | ||||
| -rw-r--r-- | arch/sh/include/asm/spinlock.h | 32 | ||||
| -rw-r--r-- | arch/sparc/include/asm/spinlock_32.h | 32 | ||||
| -rw-r--r-- | arch/sparc/include/asm/spinlock_64.h | 22 | ||||
| -rw-r--r-- | arch/x86/include/asm/spinlock.h | 20 | ||||
| -rw-r--r-- | include/linux/rwlock.h | 20 | ||||
| -rw-r--r-- | include/linux/spinlock_up.h | 16 | ||||
| -rw-r--r-- | lib/spinlock_debug.c | 16 |
18 files changed, 216 insertions, 216 deletions
diff --git a/arch/alpha/include/asm/spinlock.h b/arch/alpha/include/asm/spinlock.h index e8b2970f037b..d0faca1e992d 100644 --- a/arch/alpha/include/asm/spinlock.h +++ b/arch/alpha/include/asm/spinlock.h | |||
| @@ -50,17 +50,17 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock) | |||
| 50 | 50 | ||
| 51 | /***********************************************************/ | 51 | /***********************************************************/ |
| 52 | 52 | ||
| 53 | static inline int __raw_read_can_lock(arch_rwlock_t *lock) | 53 | static inline int arch_read_can_lock(arch_rwlock_t *lock) |
| 54 | { | 54 | { |
| 55 | return (lock->lock & 1) == 0; | 55 | return (lock->lock & 1) == 0; |
| 56 | } | 56 | } |
| 57 | 57 | ||
| 58 | static inline int __raw_write_can_lock(arch_rwlock_t *lock) | 58 | static inline int arch_write_can_lock(arch_rwlock_t *lock) |
| 59 | { | 59 | { |
| 60 | return lock->lock == 0; | 60 | return lock->lock == 0; |
| 61 | } | 61 | } |
| 62 | 62 | ||
| 63 | static inline void __raw_read_lock(arch_rwlock_t *lock) | 63 | static inline void arch_read_lock(arch_rwlock_t *lock) |
| 64 | { | 64 | { |
| 65 | long regx; | 65 | long regx; |
| 66 | 66 | ||
| @@ -80,7 +80,7 @@ static inline void __raw_read_lock(arch_rwlock_t *lock) | |||
| 80 | : "m" (*lock) : "memory"); | 80 | : "m" (*lock) : "memory"); |
| 81 | } | 81 | } |
| 82 | 82 | ||
| 83 | static inline void __raw_write_lock(arch_rwlock_t *lock) | 83 | static inline void arch_write_lock(arch_rwlock_t *lock) |
| 84 | { | 84 | { |
| 85 | long regx; | 85 | long regx; |
| 86 | 86 | ||
| @@ -100,7 +100,7 @@ static inline void __raw_write_lock(arch_rwlock_t *lock) | |||
| 100 | : "m" (*lock) : "memory"); | 100 | : "m" (*lock) : "memory"); |
| 101 | } | 101 | } |
| 102 | 102 | ||
| 103 | static inline int __raw_read_trylock(arch_rwlock_t * lock) | 103 | static inline int arch_read_trylock(arch_rwlock_t * lock) |
| 104 | { | 104 | { |
| 105 | long regx; | 105 | long regx; |
| 106 | int success; | 106 | int success; |
| @@ -122,7 +122,7 @@ static inline int __raw_read_trylock(arch_rwlock_t * lock) | |||
| 122 | return success; | 122 | return success; |
| 123 | } | 123 | } |
| 124 | 124 | ||
| 125 | static inline int __raw_write_trylock(arch_rwlock_t * lock) | 125 | static inline int arch_write_trylock(arch_rwlock_t * lock) |
| 126 | { | 126 | { |
| 127 | long regx; | 127 | long regx; |
| 128 | int success; | 128 | int success; |
| @@ -144,7 +144,7 @@ static inline int __raw_write_trylock(arch_rwlock_t * lock) | |||
| 144 | return success; | 144 | return success; |
| 145 | } | 145 | } |
| 146 | 146 | ||
| 147 | static inline void __raw_read_unlock(arch_rwlock_t * lock) | 147 | static inline void arch_read_unlock(arch_rwlock_t * lock) |
| 148 | { | 148 | { |
| 149 | long regx; | 149 | long regx; |
| 150 | __asm__ __volatile__( | 150 | __asm__ __volatile__( |
| @@ -160,14 +160,14 @@ static inline void __raw_read_unlock(arch_rwlock_t * lock) | |||
| 160 | : "m" (*lock) : "memory"); | 160 | : "m" (*lock) : "memory"); |
| 161 | } | 161 | } |
| 162 | 162 | ||
| 163 | static inline void __raw_write_unlock(arch_rwlock_t * lock) | 163 | static inline void arch_write_unlock(arch_rwlock_t * lock) |
| 164 | { | 164 | { |
| 165 | mb(); | 165 | mb(); |
| 166 | lock->lock = 0; | 166 | lock->lock = 0; |
| 167 | } | 167 | } |
| 168 | 168 | ||
| 169 | #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) | 169 | #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) |
| 170 | #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) | 170 | #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) |
| 171 | 171 | ||
| 172 | #define arch_spin_relax(lock) cpu_relax() | 172 | #define arch_spin_relax(lock) cpu_relax() |
| 173 | #define arch_read_relax(lock) cpu_relax() | 173 | #define arch_read_relax(lock) cpu_relax() |
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h index a8671d8bc7d4..c91c64cab922 100644 --- a/arch/arm/include/asm/spinlock.h +++ b/arch/arm/include/asm/spinlock.h | |||
| @@ -86,7 +86,7 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock) | |||
| 86 | * just write zero since the lock is exclusively held. | 86 | * just write zero since the lock is exclusively held. |
| 87 | */ | 87 | */ |
| 88 | 88 | ||
| 89 | static inline void __raw_write_lock(arch_rwlock_t *rw) | 89 | static inline void arch_write_lock(arch_rwlock_t *rw) |
| 90 | { | 90 | { |
| 91 | unsigned long tmp; | 91 | unsigned long tmp; |
| 92 | 92 | ||
| @@ -106,7 +106,7 @@ static inline void __raw_write_lock(arch_rwlock_t *rw) | |||
| 106 | smp_mb(); | 106 | smp_mb(); |
| 107 | } | 107 | } |
| 108 | 108 | ||
| 109 | static inline int __raw_write_trylock(arch_rwlock_t *rw) | 109 | static inline int arch_write_trylock(arch_rwlock_t *rw) |
| 110 | { | 110 | { |
| 111 | unsigned long tmp; | 111 | unsigned long tmp; |
| 112 | 112 | ||
| @@ -126,7 +126,7 @@ static inline int __raw_write_trylock(arch_rwlock_t *rw) | |||
| 126 | } | 126 | } |
| 127 | } | 127 | } |
| 128 | 128 | ||
| 129 | static inline void __raw_write_unlock(arch_rwlock_t *rw) | 129 | static inline void arch_write_unlock(arch_rwlock_t *rw) |
| 130 | { | 130 | { |
| 131 | smp_mb(); | 131 | smp_mb(); |
| 132 | 132 | ||
| @@ -142,7 +142,7 @@ static inline void __raw_write_unlock(arch_rwlock_t *rw) | |||
| 142 | } | 142 | } |
| 143 | 143 | ||
| 144 | /* write_can_lock - would write_trylock() succeed? */ | 144 | /* write_can_lock - would write_trylock() succeed? */ |
| 145 | #define __raw_write_can_lock(x) ((x)->lock == 0) | 145 | #define arch_write_can_lock(x) ((x)->lock == 0) |
| 146 | 146 | ||
| 147 | /* | 147 | /* |
| 148 | * Read locks are a bit more hairy: | 148 | * Read locks are a bit more hairy: |
| @@ -156,7 +156,7 @@ static inline void __raw_write_unlock(arch_rwlock_t *rw) | |||
| 156 | * currently active. However, we know we won't have any write | 156 | * currently active. However, we know we won't have any write |
| 157 | * locks. | 157 | * locks. |
| 158 | */ | 158 | */ |
| 159 | static inline void __raw_read_lock(arch_rwlock_t *rw) | 159 | static inline void arch_read_lock(arch_rwlock_t *rw) |
| 160 | { | 160 | { |
| 161 | unsigned long tmp, tmp2; | 161 | unsigned long tmp, tmp2; |
| 162 | 162 | ||
| @@ -176,7 +176,7 @@ static inline void __raw_read_lock(arch_rwlock_t *rw) | |||
| 176 | smp_mb(); | 176 | smp_mb(); |
| 177 | } | 177 | } |
| 178 | 178 | ||
| 179 | static inline void __raw_read_unlock(arch_rwlock_t *rw) | 179 | static inline void arch_read_unlock(arch_rwlock_t *rw) |
| 180 | { | 180 | { |
| 181 | unsigned long tmp, tmp2; | 181 | unsigned long tmp, tmp2; |
| 182 | 182 | ||
| @@ -198,7 +198,7 @@ static inline void __raw_read_unlock(arch_rwlock_t *rw) | |||
| 198 | : "cc"); | 198 | : "cc"); |
| 199 | } | 199 | } |
| 200 | 200 | ||
| 201 | static inline int __raw_read_trylock(arch_rwlock_t *rw) | 201 | static inline int arch_read_trylock(arch_rwlock_t *rw) |
| 202 | { | 202 | { |
| 203 | unsigned long tmp, tmp2 = 1; | 203 | unsigned long tmp, tmp2 = 1; |
| 204 | 204 | ||
| @@ -215,10 +215,10 @@ static inline int __raw_read_trylock(arch_rwlock_t *rw) | |||
| 215 | } | 215 | } |
| 216 | 216 | ||
| 217 | /* read_can_lock - would read_trylock() succeed? */ | 217 | /* read_can_lock - would read_trylock() succeed? */ |
| 218 | #define __raw_read_can_lock(x) ((x)->lock < 0x80000000) | 218 | #define arch_read_can_lock(x) ((x)->lock < 0x80000000) |
| 219 | 219 | ||
| 220 | #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) | 220 | #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) |
| 221 | #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) | 221 | #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) |
| 222 | 222 | ||
| 223 | #define arch_spin_relax(lock) cpu_relax() | 223 | #define arch_spin_relax(lock) cpu_relax() |
| 224 | #define arch_read_relax(lock) cpu_relax() | 224 | #define arch_read_relax(lock) cpu_relax() |
diff --git a/arch/blackfin/include/asm/spinlock.h b/arch/blackfin/include/asm/spinlock.h index 7e1c56b0a571..1942ccfedbe0 100644 --- a/arch/blackfin/include/asm/spinlock.h +++ b/arch/blackfin/include/asm/spinlock.h | |||
| @@ -17,12 +17,12 @@ asmlinkage int __raw_spin_is_locked_asm(volatile int *ptr); | |||
| 17 | asmlinkage void __raw_spin_lock_asm(volatile int *ptr); | 17 | asmlinkage void __raw_spin_lock_asm(volatile int *ptr); |
| 18 | asmlinkage int __raw_spin_trylock_asm(volatile int *ptr); | 18 | asmlinkage int __raw_spin_trylock_asm(volatile int *ptr); |
| 19 | asmlinkage void __raw_spin_unlock_asm(volatile int *ptr); | 19 | asmlinkage void __raw_spin_unlock_asm(volatile int *ptr); |
| 20 | asmlinkage void __raw_read_lock_asm(volatile int *ptr); | 20 | asmlinkage void arch_read_lock_asm(volatile int *ptr); |
| 21 | asmlinkage int __raw_read_trylock_asm(volatile int *ptr); | 21 | asmlinkage int arch_read_trylock_asm(volatile int *ptr); |
| 22 | asmlinkage void __raw_read_unlock_asm(volatile int *ptr); | 22 | asmlinkage void arch_read_unlock_asm(volatile int *ptr); |
| 23 | asmlinkage void __raw_write_lock_asm(volatile int *ptr); | 23 | asmlinkage void arch_write_lock_asm(volatile int *ptr); |
| 24 | asmlinkage int __raw_write_trylock_asm(volatile int *ptr); | 24 | asmlinkage int arch_write_trylock_asm(volatile int *ptr); |
| 25 | asmlinkage void __raw_write_unlock_asm(volatile int *ptr); | 25 | asmlinkage void arch_write_unlock_asm(volatile int *ptr); |
| 26 | 26 | ||
| 27 | static inline int arch_spin_is_locked(arch_spinlock_t *lock) | 27 | static inline int arch_spin_is_locked(arch_spinlock_t *lock) |
| 28 | { | 28 | { |
| @@ -52,44 +52,44 @@ static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) | |||
| 52 | cpu_relax(); | 52 | cpu_relax(); |
| 53 | } | 53 | } |
| 54 | 54 | ||
| 55 | static inline int __raw_read_can_lock(arch_rwlock_t *rw) | 55 | static inline int arch_read_can_lock(arch_rwlock_t *rw) |
| 56 | { | 56 | { |
| 57 | return __raw_uncached_fetch_asm(&rw->lock) > 0; | 57 | return __raw_uncached_fetch_asm(&rw->lock) > 0; |
| 58 | } | 58 | } |
| 59 | 59 | ||
| 60 | static inline int __raw_write_can_lock(arch_rwlock_t *rw) | 60 | static inline int arch_write_can_lock(arch_rwlock_t *rw) |
| 61 | { | 61 | { |
| 62 | return __raw_uncached_fetch_asm(&rw->lock) == RW_LOCK_BIAS; | 62 | return __raw_uncached_fetch_asm(&rw->lock) == RW_LOCK_BIAS; |
| 63 | } | 63 | } |
| 64 | 64 | ||
| 65 | static inline void __raw_read_lock(arch_rwlock_t *rw) | 65 | static inline void arch_read_lock(arch_rwlock_t *rw) |
| 66 | { | 66 | { |
| 67 | __raw_read_lock_asm(&rw->lock); | 67 | arch_read_lock_asm(&rw->lock); |
| 68 | } | 68 | } |
| 69 | 69 | ||
| 70 | static inline int __raw_read_trylock(arch_rwlock_t *rw) | 70 | static inline int arch_read_trylock(arch_rwlock_t *rw) |
| 71 | { | 71 | { |
| 72 | return __raw_read_trylock_asm(&rw->lock); | 72 | return arch_read_trylock_asm(&rw->lock); |
| 73 | } | 73 | } |
| 74 | 74 | ||
| 75 | static inline void __raw_read_unlock(arch_rwlock_t *rw) | 75 | static inline void arch_read_unlock(arch_rwlock_t *rw) |
| 76 | { | 76 | { |
| 77 | __raw_read_unlock_asm(&rw->lock); | 77 | arch_read_unlock_asm(&rw->lock); |
| 78 | } | 78 | } |
| 79 | 79 | ||
| 80 | static inline void __raw_write_lock(arch_rwlock_t *rw) | 80 | static inline void arch_write_lock(arch_rwlock_t *rw) |
| 81 | { | 81 | { |
| 82 | __raw_write_lock_asm(&rw->lock); | 82 | arch_write_lock_asm(&rw->lock); |
| 83 | } | 83 | } |
| 84 | 84 | ||
| 85 | static inline int __raw_write_trylock(arch_rwlock_t *rw) | 85 | static inline int arch_write_trylock(arch_rwlock_t *rw) |
| 86 | { | 86 | { |
| 87 | return __raw_write_trylock_asm(&rw->lock); | 87 | return arch_write_trylock_asm(&rw->lock); |
| 88 | } | 88 | } |
| 89 | 89 | ||
| 90 | static inline void __raw_write_unlock(arch_rwlock_t *rw) | 90 | static inline void arch_write_unlock(arch_rwlock_t *rw) |
| 91 | { | 91 | { |
| 92 | __raw_write_unlock_asm(&rw->lock); | 92 | arch_write_unlock_asm(&rw->lock); |
| 93 | } | 93 | } |
| 94 | 94 | ||
| 95 | #define arch_spin_relax(lock) cpu_relax() | 95 | #define arch_spin_relax(lock) cpu_relax() |
diff --git a/arch/cris/include/arch-v32/arch/spinlock.h b/arch/cris/include/arch-v32/arch/spinlock.h index 1d7d3a8046cb..f171a6600fbc 100644 --- a/arch/cris/include/arch-v32/arch/spinlock.h +++ b/arch/cris/include/arch-v32/arch/spinlock.h | |||
| @@ -56,17 +56,17 @@ arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) | |||
| 56 | * | 56 | * |
| 57 | */ | 57 | */ |
| 58 | 58 | ||
| 59 | static inline int __raw_read_can_lock(arch_rwlock_t *x) | 59 | static inline int arch_read_can_lock(arch_rwlock_t *x) |
| 60 | { | 60 | { |
| 61 | return (int)(x)->lock > 0; | 61 | return (int)(x)->lock > 0; |
| 62 | } | 62 | } |
| 63 | 63 | ||
| 64 | static inline int __raw_write_can_lock(arch_rwlock_t *x) | 64 | static inline int arch_write_can_lock(arch_rwlock_t *x) |
| 65 | { | 65 | { |
| 66 | return (x)->lock == RW_LOCK_BIAS; | 66 | return (x)->lock == RW_LOCK_BIAS; |
| 67 | } | 67 | } |
| 68 | 68 | ||
| 69 | static inline void __raw_read_lock(arch_rwlock_t *rw) | 69 | static inline void arch_read_lock(arch_rwlock_t *rw) |
| 70 | { | 70 | { |
| 71 | arch_spin_lock(&rw->slock); | 71 | arch_spin_lock(&rw->slock); |
| 72 | while (rw->lock == 0); | 72 | while (rw->lock == 0); |
| @@ -74,7 +74,7 @@ static inline void __raw_read_lock(arch_rwlock_t *rw) | |||
| 74 | arch_spin_unlock(&rw->slock); | 74 | arch_spin_unlock(&rw->slock); |
| 75 | } | 75 | } |
| 76 | 76 | ||
| 77 | static inline void __raw_write_lock(arch_rwlock_t *rw) | 77 | static inline void arch_write_lock(arch_rwlock_t *rw) |
| 78 | { | 78 | { |
| 79 | arch_spin_lock(&rw->slock); | 79 | arch_spin_lock(&rw->slock); |
| 80 | while (rw->lock != RW_LOCK_BIAS); | 80 | while (rw->lock != RW_LOCK_BIAS); |
| @@ -82,14 +82,14 @@ static inline void __raw_write_lock(arch_rwlock_t *rw) | |||
| 82 | arch_spin_unlock(&rw->slock); | 82 | arch_spin_unlock(&rw->slock); |
| 83 | } | 83 | } |
| 84 | 84 | ||
| 85 | static inline void __raw_read_unlock(arch_rwlock_t *rw) | 85 | static inline void arch_read_unlock(arch_rwlock_t *rw) |
| 86 | { | 86 | { |
| 87 | arch_spin_lock(&rw->slock); | 87 | arch_spin_lock(&rw->slock); |
| 88 | rw->lock++; | 88 | rw->lock++; |
| 89 | arch_spin_unlock(&rw->slock); | 89 | arch_spin_unlock(&rw->slock); |
| 90 | } | 90 | } |
| 91 | 91 | ||
| 92 | static inline void __raw_write_unlock(arch_rwlock_t *rw) | 92 | static inline void arch_write_unlock(arch_rwlock_t *rw) |
| 93 | { | 93 | { |
| 94 | arch_spin_lock(&rw->slock); | 94 | arch_spin_lock(&rw->slock); |
| 95 | while (rw->lock != RW_LOCK_BIAS); | 95 | while (rw->lock != RW_LOCK_BIAS); |
| @@ -97,7 +97,7 @@ static inline void __raw_write_unlock(arch_rwlock_t *rw) | |||
| 97 | arch_spin_unlock(&rw->slock); | 97 | arch_spin_unlock(&rw->slock); |
| 98 | } | 98 | } |
| 99 | 99 | ||
| 100 | static inline int __raw_read_trylock(arch_rwlock_t *rw) | 100 | static inline int arch_read_trylock(arch_rwlock_t *rw) |
| 101 | { | 101 | { |
| 102 | int ret = 0; | 102 | int ret = 0; |
| 103 | arch_spin_lock(&rw->slock); | 103 | arch_spin_lock(&rw->slock); |
| @@ -109,7 +109,7 @@ static inline int __raw_read_trylock(arch_rwlock_t *rw) | |||
| 109 | return ret; | 109 | return ret; |
| 110 | } | 110 | } |
| 111 | 111 | ||
| 112 | static inline int __raw_write_trylock(arch_rwlock_t *rw) | 112 | static inline int arch_write_trylock(arch_rwlock_t *rw) |
| 113 | { | 113 | { |
| 114 | int ret = 0; | 114 | int ret = 0; |
| 115 | arch_spin_lock(&rw->slock); | 115 | arch_spin_lock(&rw->slock); |
diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h index 6715b6a8ebc3..1a91c9121d17 100644 --- a/arch/ia64/include/asm/spinlock.h +++ b/arch/ia64/include/asm/spinlock.h | |||
| @@ -140,13 +140,13 @@ static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) | |||
| 140 | __ticket_spin_unlock_wait(lock); | 140 | __ticket_spin_unlock_wait(lock); |
| 141 | } | 141 | } |
| 142 | 142 | ||
| 143 | #define __raw_read_can_lock(rw) (*(volatile int *)(rw) >= 0) | 143 | #define arch_read_can_lock(rw) (*(volatile int *)(rw) >= 0) |
| 144 | #define __raw_write_can_lock(rw) (*(volatile int *)(rw) == 0) | 144 | #define arch_write_can_lock(rw) (*(volatile int *)(rw) == 0) |
| 145 | 145 | ||
| 146 | #ifdef ASM_SUPPORTED | 146 | #ifdef ASM_SUPPORTED |
| 147 | 147 | ||
| 148 | static __always_inline void | 148 | static __always_inline void |
| 149 | __raw_read_lock_flags(arch_rwlock_t *lock, unsigned long flags) | 149 | arch_read_lock_flags(arch_rwlock_t *lock, unsigned long flags) |
| 150 | { | 150 | { |
| 151 | __asm__ __volatile__ ( | 151 | __asm__ __volatile__ ( |
| 152 | "tbit.nz p6, p0 = %1,%2\n" | 152 | "tbit.nz p6, p0 = %1,%2\n" |
| @@ -169,13 +169,13 @@ __raw_read_lock_flags(arch_rwlock_t *lock, unsigned long flags) | |||
| 169 | : "p6", "p7", "r2", "memory"); | 169 | : "p6", "p7", "r2", "memory"); |
| 170 | } | 170 | } |
| 171 | 171 | ||
| 172 | #define __raw_read_lock(lock) __raw_read_lock_flags(lock, 0) | 172 | #define arch_read_lock(lock) arch_read_lock_flags(lock, 0) |
| 173 | 173 | ||
| 174 | #else /* !ASM_SUPPORTED */ | 174 | #else /* !ASM_SUPPORTED */ |
| 175 | 175 | ||
| 176 | #define __raw_read_lock_flags(rw, flags) __raw_read_lock(rw) | 176 | #define arch_read_lock_flags(rw, flags) arch_read_lock(rw) |
| 177 | 177 | ||
| 178 | #define __raw_read_lock(rw) \ | 178 | #define arch_read_lock(rw) \ |
| 179 | do { \ | 179 | do { \ |
| 180 | arch_rwlock_t *__read_lock_ptr = (rw); \ | 180 | arch_rwlock_t *__read_lock_ptr = (rw); \ |
| 181 | \ | 181 | \ |
| @@ -188,7 +188,7 @@ do { \ | |||
| 188 | 188 | ||
| 189 | #endif /* !ASM_SUPPORTED */ | 189 | #endif /* !ASM_SUPPORTED */ |
| 190 | 190 | ||
| 191 | #define __raw_read_unlock(rw) \ | 191 | #define arch_read_unlock(rw) \ |
| 192 | do { \ | 192 | do { \ |
| 193 | arch_rwlock_t *__read_lock_ptr = (rw); \ | 193 | arch_rwlock_t *__read_lock_ptr = (rw); \ |
| 194 | ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \ | 194 | ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \ |
| @@ -197,7 +197,7 @@ do { \ | |||
| 197 | #ifdef ASM_SUPPORTED | 197 | #ifdef ASM_SUPPORTED |
| 198 | 198 | ||
| 199 | static __always_inline void | 199 | static __always_inline void |
| 200 | __raw_write_lock_flags(arch_rwlock_t *lock, unsigned long flags) | 200 | arch_write_lock_flags(arch_rwlock_t *lock, unsigned long flags) |
| 201 | { | 201 | { |
| 202 | __asm__ __volatile__ ( | 202 | __asm__ __volatile__ ( |
| 203 | "tbit.nz p6, p0 = %1, %2\n" | 203 | "tbit.nz p6, p0 = %1, %2\n" |
| @@ -221,9 +221,9 @@ __raw_write_lock_flags(arch_rwlock_t *lock, unsigned long flags) | |||
| 221 | : "ar.ccv", "p6", "p7", "r2", "r29", "memory"); | 221 | : "ar.ccv", "p6", "p7", "r2", "r29", "memory"); |
| 222 | } | 222 | } |
| 223 | 223 | ||
| 224 | #define __raw_write_lock(rw) __raw_write_lock_flags(rw, 0) | 224 | #define arch_write_lock(rw) arch_write_lock_flags(rw, 0) |
| 225 | 225 | ||
| 226 | #define __raw_write_trylock(rw) \ | 226 | #define arch_write_trylock(rw) \ |
| 227 | ({ \ | 227 | ({ \ |
| 228 | register long result; \ | 228 | register long result; \ |
| 229 | \ | 229 | \ |
| @@ -235,7 +235,7 @@ __raw_write_lock_flags(arch_rwlock_t *lock, unsigned long flags) | |||
| 235 | (result == 0); \ | 235 | (result == 0); \ |
| 236 | }) | 236 | }) |
| 237 | 237 | ||
| 238 | static inline void __raw_write_unlock(arch_rwlock_t *x) | 238 | static inline void arch_write_unlock(arch_rwlock_t *x) |
| 239 | { | 239 | { |
| 240 | u8 *y = (u8 *)x; | 240 | u8 *y = (u8 *)x; |
| 241 | barrier(); | 241 | barrier(); |
| @@ -244,9 +244,9 @@ static inline void __raw_write_unlock(arch_rwlock_t *x) | |||
| 244 | 244 | ||
| 245 | #else /* !ASM_SUPPORTED */ | 245 | #else /* !ASM_SUPPORTED */ |
| 246 | 246 | ||
| 247 | #define __raw_write_lock_flags(l, flags) __raw_write_lock(l) | 247 | #define arch_write_lock_flags(l, flags) arch_write_lock(l) |
| 248 | 248 | ||
| 249 | #define __raw_write_lock(l) \ | 249 | #define arch_write_lock(l) \ |
| 250 | ({ \ | 250 | ({ \ |
| 251 | __u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1); \ | 251 | __u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1); \ |
| 252 | __u32 *ia64_write_lock_ptr = (__u32 *) (l); \ | 252 | __u32 *ia64_write_lock_ptr = (__u32 *) (l); \ |
| @@ -257,7 +257,7 @@ static inline void __raw_write_unlock(arch_rwlock_t *x) | |||
| 257 | } while (ia64_val); \ | 257 | } while (ia64_val); \ |
| 258 | }) | 258 | }) |
| 259 | 259 | ||
| 260 | #define __raw_write_trylock(rw) \ | 260 | #define arch_write_trylock(rw) \ |
| 261 | ({ \ | 261 | ({ \ |
| 262 | __u64 ia64_val; \ | 262 | __u64 ia64_val; \ |
| 263 | __u64 ia64_set_val = ia64_dep_mi(-1, 0, 31,1); \ | 263 | __u64 ia64_set_val = ia64_dep_mi(-1, 0, 31,1); \ |
| @@ -265,7 +265,7 @@ static inline void __raw_write_unlock(arch_rwlock_t *x) | |||
| 265 | (ia64_val == 0); \ | 265 | (ia64_val == 0); \ |
| 266 | }) | 266 | }) |
| 267 | 267 | ||
| 268 | static inline void __raw_write_unlock(arch_rwlock_t *x) | 268 | static inline void arch_write_unlock(arch_rwlock_t *x) |
| 269 | { | 269 | { |
| 270 | barrier(); | 270 | barrier(); |
| 271 | x->write_lock = 0; | 271 | x->write_lock = 0; |
| @@ -273,7 +273,7 @@ static inline void __raw_write_unlock(arch_rwlock_t *x) | |||
| 273 | 273 | ||
| 274 | #endif /* !ASM_SUPPORTED */ | 274 | #endif /* !ASM_SUPPORTED */ |
| 275 | 275 | ||
| 276 | static inline int __raw_read_trylock(arch_rwlock_t *x) | 276 | static inline int arch_read_trylock(arch_rwlock_t *x) |
| 277 | { | 277 | { |
| 278 | union { | 278 | union { |
| 279 | arch_rwlock_t lock; | 279 | arch_rwlock_t lock; |
diff --git a/arch/m32r/include/asm/spinlock.h b/arch/m32r/include/asm/spinlock.h index 1c76af8c8e1b..179a06489b10 100644 --- a/arch/m32r/include/asm/spinlock.h +++ b/arch/m32r/include/asm/spinlock.h | |||
| @@ -140,15 +140,15 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock) | |||
| 140 | * read_can_lock - would read_trylock() succeed? | 140 | * read_can_lock - would read_trylock() succeed? |
| 141 | * @lock: the rwlock in question. | 141 | * @lock: the rwlock in question. |
| 142 | */ | 142 | */ |
| 143 | #define __raw_read_can_lock(x) ((int)(x)->lock > 0) | 143 | #define arch_read_can_lock(x) ((int)(x)->lock > 0) |
| 144 | 144 | ||
| 145 | /** | 145 | /** |
| 146 | * write_can_lock - would write_trylock() succeed? | 146 | * write_can_lock - would write_trylock() succeed? |
| 147 | * @lock: the rwlock in question. | 147 | * @lock: the rwlock in question. |
| 148 | */ | 148 | */ |
| 149 | #define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) | 149 | #define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) |
| 150 | 150 | ||
| 151 | static inline void __raw_read_lock(arch_rwlock_t *rw) | 151 | static inline void arch_read_lock(arch_rwlock_t *rw) |
| 152 | { | 152 | { |
| 153 | unsigned long tmp0, tmp1; | 153 | unsigned long tmp0, tmp1; |
| 154 | 154 | ||
| @@ -199,7 +199,7 @@ static inline void __raw_read_lock(arch_rwlock_t *rw) | |||
| 199 | ); | 199 | ); |
| 200 | } | 200 | } |
| 201 | 201 | ||
| 202 | static inline void __raw_write_lock(arch_rwlock_t *rw) | 202 | static inline void arch_write_lock(arch_rwlock_t *rw) |
| 203 | { | 203 | { |
| 204 | unsigned long tmp0, tmp1, tmp2; | 204 | unsigned long tmp0, tmp1, tmp2; |
| 205 | 205 | ||
| @@ -252,7 +252,7 @@ static inline void __raw_write_lock(arch_rwlock_t *rw) | |||
| 252 | ); | 252 | ); |
| 253 | } | 253 | } |
| 254 | 254 | ||
| 255 | static inline void __raw_read_unlock(arch_rwlock_t *rw) | 255 | static inline void arch_read_unlock(arch_rwlock_t *rw) |
| 256 | { | 256 | { |
| 257 | unsigned long tmp0, tmp1; | 257 | unsigned long tmp0, tmp1; |
| 258 | 258 | ||
| @@ -274,7 +274,7 @@ static inline void __raw_read_unlock(arch_rwlock_t *rw) | |||
| 274 | ); | 274 | ); |
| 275 | } | 275 | } |
| 276 | 276 | ||
| 277 | static inline void __raw_write_unlock(arch_rwlock_t *rw) | 277 | static inline void arch_write_unlock(arch_rwlock_t *rw) |
| 278 | { | 278 | { |
| 279 | unsigned long tmp0, tmp1, tmp2; | 279 | unsigned long tmp0, tmp1, tmp2; |
| 280 | 280 | ||
| @@ -298,7 +298,7 @@ static inline void __raw_write_unlock(arch_rwlock_t *rw) | |||
| 298 | ); | 298 | ); |
| 299 | } | 299 | } |
| 300 | 300 | ||
| 301 | static inline int __raw_read_trylock(arch_rwlock_t *lock) | 301 | static inline int arch_read_trylock(arch_rwlock_t *lock) |
| 302 | { | 302 | { |
| 303 | atomic_t *count = (atomic_t*)lock; | 303 | atomic_t *count = (atomic_t*)lock; |
| 304 | if (atomic_dec_return(count) >= 0) | 304 | if (atomic_dec_return(count) >= 0) |
| @@ -307,7 +307,7 @@ static inline int __raw_read_trylock(arch_rwlock_t *lock) | |||
| 307 | return 0; | 307 | return 0; |
| 308 | } | 308 | } |
| 309 | 309 | ||
| 310 | static inline int __raw_write_trylock(arch_rwlock_t *lock) | 310 | static inline int arch_write_trylock(arch_rwlock_t *lock) |
| 311 | { | 311 | { |
| 312 | atomic_t *count = (atomic_t *)lock; | 312 | atomic_t *count = (atomic_t *)lock; |
| 313 | if (atomic_sub_and_test(RW_LOCK_BIAS, count)) | 313 | if (atomic_sub_and_test(RW_LOCK_BIAS, count)) |
| @@ -316,8 +316,8 @@ static inline int __raw_write_trylock(arch_rwlock_t *lock) | |||
| 316 | return 0; | 316 | return 0; |
| 317 | } | 317 | } |
| 318 | 318 | ||
| 319 | #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) | 319 | #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) |
| 320 | #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) | 320 | #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) |
| 321 | 321 | ||
| 322 | #define arch_spin_relax(lock) cpu_relax() | 322 | #define arch_spin_relax(lock) cpu_relax() |
| 323 | #define arch_read_relax(lock) cpu_relax() | 323 | #define arch_read_relax(lock) cpu_relax() |
diff --git a/arch/mips/include/asm/spinlock.h b/arch/mips/include/asm/spinlock.h index 7bf27c8a3364..21ef9efbde43 100644 --- a/arch/mips/include/asm/spinlock.h +++ b/arch/mips/include/asm/spinlock.h | |||
| @@ -248,21 +248,21 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock) | |||
| 248 | * read_can_lock - would read_trylock() succeed? | 248 | * read_can_lock - would read_trylock() succeed? |
| 249 | * @lock: the rwlock in question. | 249 | * @lock: the rwlock in question. |
| 250 | */ | 250 | */ |
| 251 | #define __raw_read_can_lock(rw) ((rw)->lock >= 0) | 251 | #define arch_read_can_lock(rw) ((rw)->lock >= 0) |
| 252 | 252 | ||
| 253 | /* | 253 | /* |
| 254 | * write_can_lock - would write_trylock() succeed? | 254 | * write_can_lock - would write_trylock() succeed? |
| 255 | * @lock: the rwlock in question. | 255 | * @lock: the rwlock in question. |
| 256 | */ | 256 | */ |
| 257 | #define __raw_write_can_lock(rw) (!(rw)->lock) | 257 | #define arch_write_can_lock(rw) (!(rw)->lock) |
| 258 | 258 | ||
| 259 | static inline void __raw_read_lock(arch_rwlock_t *rw) | 259 | static inline void arch_read_lock(arch_rwlock_t *rw) |
| 260 | { | 260 | { |
| 261 | unsigned int tmp; | 261 | unsigned int tmp; |
| 262 | 262 | ||
| 263 | if (R10000_LLSC_WAR) { | 263 | if (R10000_LLSC_WAR) { |
| 264 | __asm__ __volatile__( | 264 | __asm__ __volatile__( |
| 265 | " .set noreorder # __raw_read_lock \n" | 265 | " .set noreorder # arch_read_lock \n" |
| 266 | "1: ll %1, %2 \n" | 266 | "1: ll %1, %2 \n" |
| 267 | " bltz %1, 1b \n" | 267 | " bltz %1, 1b \n" |
| 268 | " addu %1, 1 \n" | 268 | " addu %1, 1 \n" |
| @@ -275,7 +275,7 @@ static inline void __raw_read_lock(arch_rwlock_t *rw) | |||
| 275 | : "memory"); | 275 | : "memory"); |
| 276 | } else { | 276 | } else { |
| 277 | __asm__ __volatile__( | 277 | __asm__ __volatile__( |
| 278 | " .set noreorder # __raw_read_lock \n" | 278 | " .set noreorder # arch_read_lock \n" |
| 279 | "1: ll %1, %2 \n" | 279 | "1: ll %1, %2 \n" |
| 280 | " bltz %1, 2f \n" | 280 | " bltz %1, 2f \n" |
| 281 | " addu %1, 1 \n" | 281 | " addu %1, 1 \n" |
| @@ -301,7 +301,7 @@ static inline void __raw_read_lock(arch_rwlock_t *rw) | |||
| 301 | /* Note the use of sub, not subu which will make the kernel die with an | 301 | /* Note the use of sub, not subu which will make the kernel die with an |
| 302 | overflow exception if we ever try to unlock an rwlock that is already | 302 | overflow exception if we ever try to unlock an rwlock that is already |
| 303 | unlocked or is being held by a writer. */ | 303 | unlocked or is being held by a writer. */ |
| 304 | static inline void __raw_read_unlock(arch_rwlock_t *rw) | 304 | static inline void arch_read_unlock(arch_rwlock_t *rw) |
| 305 | { | 305 | { |
| 306 | unsigned int tmp; | 306 | unsigned int tmp; |
| 307 | 307 | ||
| @@ -309,7 +309,7 @@ static inline void __raw_read_unlock(arch_rwlock_t *rw) | |||
| 309 | 309 | ||
| 310 | if (R10000_LLSC_WAR) { | 310 | if (R10000_LLSC_WAR) { |
| 311 | __asm__ __volatile__( | 311 | __asm__ __volatile__( |
| 312 | "1: ll %1, %2 # __raw_read_unlock \n" | 312 | "1: ll %1, %2 # arch_read_unlock \n" |
| 313 | " sub %1, 1 \n" | 313 | " sub %1, 1 \n" |
| 314 | " sc %1, %0 \n" | 314 | " sc %1, %0 \n" |
| 315 | " beqzl %1, 1b \n" | 315 | " beqzl %1, 1b \n" |
| @@ -318,7 +318,7 @@ static inline void __raw_read_unlock(arch_rwlock_t *rw) | |||
| 318 | : "memory"); | 318 | : "memory"); |
| 319 | } else { | 319 | } else { |
| 320 | __asm__ __volatile__( | 320 | __asm__ __volatile__( |
| 321 | " .set noreorder # __raw_read_unlock \n" | 321 | " .set noreorder # arch_read_unlock \n" |
| 322 | "1: ll %1, %2 \n" | 322 | "1: ll %1, %2 \n" |
| 323 | " sub %1, 1 \n" | 323 | " sub %1, 1 \n" |
| 324 | " sc %1, %0 \n" | 324 | " sc %1, %0 \n" |
| @@ -335,13 +335,13 @@ static inline void __raw_read_unlock(arch_rwlock_t *rw) | |||
| 335 | } | 335 | } |
| 336 | } | 336 | } |
| 337 | 337 | ||
| 338 | static inline void __raw_write_lock(arch_rwlock_t *rw) | 338 | static inline void arch_write_lock(arch_rwlock_t *rw) |
| 339 | { | 339 | { |
| 340 | unsigned int tmp; | 340 | unsigned int tmp; |
| 341 | 341 | ||
| 342 | if (R10000_LLSC_WAR) { | 342 | if (R10000_LLSC_WAR) { |
| 343 | __asm__ __volatile__( | 343 | __asm__ __volatile__( |
| 344 | " .set noreorder # __raw_write_lock \n" | 344 | " .set noreorder # arch_write_lock \n" |
| 345 | "1: ll %1, %2 \n" | 345 | "1: ll %1, %2 \n" |
| 346 | " bnez %1, 1b \n" | 346 | " bnez %1, 1b \n" |
| 347 | " lui %1, 0x8000 \n" | 347 | " lui %1, 0x8000 \n" |
| @@ -354,7 +354,7 @@ static inline void __raw_write_lock(arch_rwlock_t *rw) | |||
| 354 | : "memory"); | 354 | : "memory"); |
| 355 | } else { | 355 | } else { |
| 356 | __asm__ __volatile__( | 356 | __asm__ __volatile__( |
| 357 | " .set noreorder # __raw_write_lock \n" | 357 | " .set noreorder # arch_write_lock \n" |
| 358 | "1: ll %1, %2 \n" | 358 | "1: ll %1, %2 \n" |
| 359 | " bnez %1, 2f \n" | 359 | " bnez %1, 2f \n" |
| 360 | " lui %1, 0x8000 \n" | 360 | " lui %1, 0x8000 \n" |
| @@ -377,26 +377,26 @@ static inline void __raw_write_lock(arch_rwlock_t *rw) | |||
| 377 | smp_llsc_mb(); | 377 | smp_llsc_mb(); |
| 378 | } | 378 | } |
| 379 | 379 | ||
| 380 | static inline void __raw_write_unlock(arch_rwlock_t *rw) | 380 | static inline void arch_write_unlock(arch_rwlock_t *rw) |
| 381 | { | 381 | { |
| 382 | smp_mb(); | 382 | smp_mb(); |
| 383 | 383 | ||
| 384 | __asm__ __volatile__( | 384 | __asm__ __volatile__( |
| 385 | " # __raw_write_unlock \n" | 385 | " # arch_write_unlock \n" |
| 386 | " sw $0, %0 \n" | 386 | " sw $0, %0 \n" |
| 387 | : "=m" (rw->lock) | 387 | : "=m" (rw->lock) |
| 388 | : "m" (rw->lock) | 388 | : "m" (rw->lock) |
| 389 | : "memory"); | 389 | : "memory"); |
| 390 | } | 390 | } |
| 391 | 391 | ||
| 392 | static inline int __raw_read_trylock(arch_rwlock_t *rw) | 392 | static inline int arch_read_trylock(arch_rwlock_t *rw) |
| 393 | { | 393 | { |
| 394 | unsigned int tmp; | 394 | unsigned int tmp; |
| 395 | int ret; | 395 | int ret; |
| 396 | 396 | ||
| 397 | if (R10000_LLSC_WAR) { | 397 | if (R10000_LLSC_WAR) { |
| 398 | __asm__ __volatile__( | 398 | __asm__ __volatile__( |
| 399 | " .set noreorder # __raw_read_trylock \n" | 399 | " .set noreorder # arch_read_trylock \n" |
| 400 | " li %2, 0 \n" | 400 | " li %2, 0 \n" |
| 401 | "1: ll %1, %3 \n" | 401 | "1: ll %1, %3 \n" |
| 402 | " bltz %1, 2f \n" | 402 | " bltz %1, 2f \n" |
| @@ -413,7 +413,7 @@ static inline int __raw_read_trylock(arch_rwlock_t *rw) | |||
| 413 | : "memory"); | 413 | : "memory"); |
| 414 | } else { | 414 | } else { |
| 415 | __asm__ __volatile__( | 415 | __asm__ __volatile__( |
| 416 | " .set noreorder # __raw_read_trylock \n" | 416 | " .set noreorder # arch_read_trylock \n" |
| 417 | " li %2, 0 \n" | 417 | " li %2, 0 \n" |
| 418 | "1: ll %1, %3 \n" | 418 | "1: ll %1, %3 \n" |
| 419 | " bltz %1, 2f \n" | 419 | " bltz %1, 2f \n" |
| @@ -433,14 +433,14 @@ static inline int __raw_read_trylock(arch_rwlock_t *rw) | |||
| 433 | return ret; | 433 | return ret; |
| 434 | } | 434 | } |
| 435 | 435 | ||
| 436 | static inline int __raw_write_trylock(arch_rwlock_t *rw) | 436 | static inline int arch_write_trylock(arch_rwlock_t *rw) |
| 437 | { | 437 | { |
| 438 | unsigned int tmp; | 438 | unsigned int tmp; |
| 439 | int ret; | 439 | int ret; |
| 440 | 440 | ||
| 441 | if (R10000_LLSC_WAR) { | 441 | if (R10000_LLSC_WAR) { |
| 442 | __asm__ __volatile__( | 442 | __asm__ __volatile__( |
| 443 | " .set noreorder # __raw_write_trylock \n" | 443 | " .set noreorder # arch_write_trylock \n" |
| 444 | " li %2, 0 \n" | 444 | " li %2, 0 \n" |
| 445 | "1: ll %1, %3 \n" | 445 | "1: ll %1, %3 \n" |
| 446 | " bnez %1, 2f \n" | 446 | " bnez %1, 2f \n" |
| @@ -457,7 +457,7 @@ static inline int __raw_write_trylock(arch_rwlock_t *rw) | |||
| 457 | : "memory"); | 457 | : "memory"); |
| 458 | } else { | 458 | } else { |
| 459 | __asm__ __volatile__( | 459 | __asm__ __volatile__( |
| 460 | " .set noreorder # __raw_write_trylock \n" | 460 | " .set noreorder # arch_write_trylock \n" |
| 461 | " li %2, 0 \n" | 461 | " li %2, 0 \n" |
| 462 | "1: ll %1, %3 \n" | 462 | "1: ll %1, %3 \n" |
| 463 | " bnez %1, 2f \n" | 463 | " bnez %1, 2f \n" |
| @@ -480,8 +480,8 @@ static inline int __raw_write_trylock(arch_rwlock_t *rw) | |||
| 480 | return ret; | 480 | return ret; |
| 481 | } | 481 | } |
| 482 | 482 | ||
| 483 | #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) | 483 | #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) |
| 484 | #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) | 484 | #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) |
| 485 | 485 | ||
| 486 | #define arch_spin_relax(lock) cpu_relax() | 486 | #define arch_spin_relax(lock) cpu_relax() |
| 487 | #define arch_read_relax(lock) cpu_relax() | 487 | #define arch_read_relax(lock) cpu_relax() |
diff --git a/arch/parisc/include/asm/spinlock.h b/arch/parisc/include/asm/spinlock.h index 1ff3a0a94a43..74036f436a3b 100644 --- a/arch/parisc/include/asm/spinlock.h +++ b/arch/parisc/include/asm/spinlock.h | |||
| @@ -69,7 +69,7 @@ static inline int arch_spin_trylock(arch_spinlock_t *x) | |||
| 69 | 69 | ||
| 70 | /* Note that we have to ensure interrupts are disabled in case we're | 70 | /* Note that we have to ensure interrupts are disabled in case we're |
| 71 | * interrupted by some other code that wants to grab the same read lock */ | 71 | * interrupted by some other code that wants to grab the same read lock */ |
| 72 | static __inline__ void __raw_read_lock(arch_rwlock_t *rw) | 72 | static __inline__ void arch_read_lock(arch_rwlock_t *rw) |
| 73 | { | 73 | { |
| 74 | unsigned long flags; | 74 | unsigned long flags; |
| 75 | local_irq_save(flags); | 75 | local_irq_save(flags); |
| @@ -81,7 +81,7 @@ static __inline__ void __raw_read_lock(arch_rwlock_t *rw) | |||
| 81 | 81 | ||
| 82 | /* Note that we have to ensure interrupts are disabled in case we're | 82 | /* Note that we have to ensure interrupts are disabled in case we're |
| 83 | * interrupted by some other code that wants to grab the same read lock */ | 83 | * interrupted by some other code that wants to grab the same read lock */ |
| 84 | static __inline__ void __raw_read_unlock(arch_rwlock_t *rw) | 84 | static __inline__ void arch_read_unlock(arch_rwlock_t *rw) |
| 85 | { | 85 | { |
| 86 | unsigned long flags; | 86 | unsigned long flags; |
| 87 | local_irq_save(flags); | 87 | local_irq_save(flags); |
| @@ -93,7 +93,7 @@ static __inline__ void __raw_read_unlock(arch_rwlock_t *rw) | |||
| 93 | 93 | ||
| 94 | /* Note that we have to ensure interrupts are disabled in case we're | 94 | /* Note that we have to ensure interrupts are disabled in case we're |
| 95 | * interrupted by some other code that wants to grab the same read lock */ | 95 | * interrupted by some other code that wants to grab the same read lock */ |
| 96 | static __inline__ int __raw_read_trylock(arch_rwlock_t *rw) | 96 | static __inline__ int arch_read_trylock(arch_rwlock_t *rw) |
| 97 | { | 97 | { |
| 98 | unsigned long flags; | 98 | unsigned long flags; |
| 99 | retry: | 99 | retry: |
| @@ -119,7 +119,7 @@ static __inline__ int __raw_read_trylock(arch_rwlock_t *rw) | |||
| 119 | 119 | ||
| 120 | /* Note that we have to ensure interrupts are disabled in case we're | 120 | /* Note that we have to ensure interrupts are disabled in case we're |
| 121 | * interrupted by some other code that wants to read_trylock() this lock */ | 121 | * interrupted by some other code that wants to read_trylock() this lock */ |
| 122 | static __inline__ void __raw_write_lock(arch_rwlock_t *rw) | 122 | static __inline__ void arch_write_lock(arch_rwlock_t *rw) |
| 123 | { | 123 | { |
| 124 | unsigned long flags; | 124 | unsigned long flags; |
| 125 | retry: | 125 | retry: |
| @@ -141,7 +141,7 @@ retry: | |||
| 141 | local_irq_restore(flags); | 141 | local_irq_restore(flags); |
| 142 | } | 142 | } |
| 143 | 143 | ||
| 144 | static __inline__ void __raw_write_unlock(arch_rwlock_t *rw) | 144 | static __inline__ void arch_write_unlock(arch_rwlock_t *rw) |
| 145 | { | 145 | { |
| 146 | rw->counter = 0; | 146 | rw->counter = 0; |
| 147 | arch_spin_unlock(&rw->lock); | 147 | arch_spin_unlock(&rw->lock); |
| @@ -149,7 +149,7 @@ static __inline__ void __raw_write_unlock(arch_rwlock_t *rw) | |||
| 149 | 149 | ||
| 150 | /* Note that we have to ensure interrupts are disabled in case we're | 150 | /* Note that we have to ensure interrupts are disabled in case we're |
| 151 | * interrupted by some other code that wants to read_trylock() this lock */ | 151 | * interrupted by some other code that wants to read_trylock() this lock */ |
| 152 | static __inline__ int __raw_write_trylock(arch_rwlock_t *rw) | 152 | static __inline__ int arch_write_trylock(arch_rwlock_t *rw) |
| 153 | { | 153 | { |
| 154 | unsigned long flags; | 154 | unsigned long flags; |
| 155 | int result = 0; | 155 | int result = 0; |
| @@ -173,7 +173,7 @@ static __inline__ int __raw_write_trylock(arch_rwlock_t *rw) | |||
| 173 | * read_can_lock - would read_trylock() succeed? | 173 | * read_can_lock - would read_trylock() succeed? |
| 174 | * @lock: the rwlock in question. | 174 | * @lock: the rwlock in question. |
| 175 | */ | 175 | */ |
| 176 | static __inline__ int __raw_read_can_lock(arch_rwlock_t *rw) | 176 | static __inline__ int arch_read_can_lock(arch_rwlock_t *rw) |
| 177 | { | 177 | { |
| 178 | return rw->counter >= 0; | 178 | return rw->counter >= 0; |
| 179 | } | 179 | } |
| @@ -182,13 +182,13 @@ static __inline__ int __raw_read_can_lock(arch_rwlock_t *rw) | |||
| 182 | * write_can_lock - would write_trylock() succeed? | 182 | * write_can_lock - would write_trylock() succeed? |
| 183 | * @lock: the rwlock in question. | 183 | * @lock: the rwlock in question. |
| 184 | */ | 184 | */ |
| 185 | static __inline__ int __raw_write_can_lock(arch_rwlock_t *rw) | 185 | static __inline__ int arch_write_can_lock(arch_rwlock_t *rw) |
| 186 | { | 186 | { |
| 187 | return !rw->counter; | 187 | return !rw->counter; |
| 188 | } | 188 | } |
| 189 | 189 | ||
| 190 | #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) | 190 | #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) |
| 191 | #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) | 191 | #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) |
| 192 | 192 | ||
| 193 | #define arch_spin_relax(lock) cpu_relax() | 193 | #define arch_spin_relax(lock) cpu_relax() |
| 194 | #define arch_read_relax(lock) cpu_relax() | 194 | #define arch_read_relax(lock) cpu_relax() |
diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h index 2fad2c07c593..764094cff681 100644 --- a/arch/powerpc/include/asm/spinlock.h +++ b/arch/powerpc/include/asm/spinlock.h | |||
| @@ -166,8 +166,8 @@ extern void arch_spin_unlock_wait(arch_spinlock_t *lock); | |||
| 166 | * read-locks. | 166 | * read-locks. |
| 167 | */ | 167 | */ |
| 168 | 168 | ||
| 169 | #define __raw_read_can_lock(rw) ((rw)->lock >= 0) | 169 | #define arch_read_can_lock(rw) ((rw)->lock >= 0) |
| 170 | #define __raw_write_can_lock(rw) (!(rw)->lock) | 170 | #define arch_write_can_lock(rw) (!(rw)->lock) |
| 171 | 171 | ||
| 172 | #ifdef CONFIG_PPC64 | 172 | #ifdef CONFIG_PPC64 |
| 173 | #define __DO_SIGN_EXTEND "extsw %0,%0\n" | 173 | #define __DO_SIGN_EXTEND "extsw %0,%0\n" |
| @@ -181,7 +181,7 @@ extern void arch_spin_unlock_wait(arch_spinlock_t *lock); | |||
| 181 | * This returns the old value in the lock + 1, | 181 | * This returns the old value in the lock + 1, |
| 182 | * so we got a read lock if the return value is > 0. | 182 | * so we got a read lock if the return value is > 0. |
| 183 | */ | 183 | */ |
| 184 | static inline long arch_read_trylock(arch_rwlock_t *rw) | 184 | static inline long __arch_read_trylock(arch_rwlock_t *rw) |
| 185 | { | 185 | { |
| 186 | long tmp; | 186 | long tmp; |
| 187 | 187 | ||
| @@ -205,7 +205,7 @@ static inline long arch_read_trylock(arch_rwlock_t *rw) | |||
| 205 | * This returns the old value in the lock, | 205 | * This returns the old value in the lock, |
| 206 | * so we got the write lock if the return value is 0. | 206 | * so we got the write lock if the return value is 0. |
| 207 | */ | 207 | */ |
| 208 | static inline long arch_write_trylock(arch_rwlock_t *rw) | 208 | static inline long __arch_write_trylock(arch_rwlock_t *rw) |
| 209 | { | 209 | { |
| 210 | long tmp, token; | 210 | long tmp, token; |
| 211 | 211 | ||
| @@ -225,10 +225,10 @@ static inline long arch_write_trylock(arch_rwlock_t *rw) | |||
| 225 | return tmp; | 225 | return tmp; |
| 226 | } | 226 | } |
| 227 | 227 | ||
| 228 | static inline void __raw_read_lock(arch_rwlock_t *rw) | 228 | static inline void arch_read_lock(arch_rwlock_t *rw) |
| 229 | { | 229 | { |
| 230 | while (1) { | 230 | while (1) { |
| 231 | if (likely(arch_read_trylock(rw) > 0)) | 231 | if (likely(__arch_read_trylock(rw) > 0)) |
| 232 | break; | 232 | break; |
| 233 | do { | 233 | do { |
| 234 | HMT_low(); | 234 | HMT_low(); |
| @@ -239,10 +239,10 @@ static inline void __raw_read_lock(arch_rwlock_t *rw) | |||
| 239 | } | 239 | } |
| 240 | } | 240 | } |
| 241 | 241 | ||
| 242 | static inline void __raw_write_lock(arch_rwlock_t *rw) | 242 | static inline void arch_write_lock(arch_rwlock_t *rw) |
| 243 | { | 243 | { |
| 244 | while (1) { | 244 | while (1) { |
| 245 | if (likely(arch_write_trylock(rw) == 0)) | 245 | if (likely(__arch_write_trylock(rw) == 0)) |
| 246 | break; | 246 | break; |
| 247 | do { | 247 | do { |
| 248 | HMT_low(); | 248 | HMT_low(); |
| @@ -253,17 +253,17 @@ static inline void __raw_write_lock(arch_rwlock_t *rw) | |||
| 253 | } | 253 | } |
| 254 | } | 254 | } |
| 255 | 255 | ||
| 256 | static inline int __raw_read_trylock(arch_rwlock_t *rw) | 256 | static inline int arch_read_trylock(arch_rwlock_t *rw) |
| 257 | { | 257 | { |
| 258 | return arch_read_trylock(rw) > 0; | 258 | return __arch_read_trylock(rw) > 0; |
| 259 | } | 259 | } |
| 260 | 260 | ||
| 261 | static inline int __raw_write_trylock(arch_rwlock_t *rw) | 261 | static inline int arch_write_trylock(arch_rwlock_t *rw) |
| 262 | { | 262 | { |
| 263 | return arch_write_trylock(rw) == 0; | 263 | return __arch_write_trylock(rw) == 0; |
| 264 | } | 264 | } |
| 265 | 265 | ||
| 266 | static inline void __raw_read_unlock(arch_rwlock_t *rw) | 266 | static inline void arch_read_unlock(arch_rwlock_t *rw) |
| 267 | { | 267 | { |
| 268 | long tmp; | 268 | long tmp; |
| 269 | 269 | ||
| @@ -280,15 +280,15 @@ static inline void __raw_read_unlock(arch_rwlock_t *rw) | |||
| 280 | : "cr0", "xer", "memory"); | 280 | : "cr0", "xer", "memory"); |
| 281 | } | 281 | } |
| 282 | 282 | ||
| 283 | static inline void __raw_write_unlock(arch_rwlock_t *rw) | 283 | static inline void arch_write_unlock(arch_rwlock_t *rw) |
| 284 | { | 284 | { |
| 285 | __asm__ __volatile__("# write_unlock\n\t" | 285 | __asm__ __volatile__("# write_unlock\n\t" |
| 286 | LWSYNC_ON_SMP: : :"memory"); | 286 | LWSYNC_ON_SMP: : :"memory"); |
| 287 | rw->lock = 0; | 287 | rw->lock = 0; |
| 288 | } | 288 | } |
| 289 | 289 | ||
| 290 | #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) | 290 | #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) |
| 291 | #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) | 291 | #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) |
| 292 | 292 | ||
| 293 | #define arch_spin_relax(lock) __spin_yield(lock) | 293 | #define arch_spin_relax(lock) __spin_yield(lock) |
| 294 | #define arch_read_relax(lock) __rw_yield(lock) | 294 | #define arch_read_relax(lock) __rw_yield(lock) |
diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h index 7f98f0e48acb..a587907d77f3 100644 --- a/arch/s390/include/asm/spinlock.h +++ b/arch/s390/include/asm/spinlock.h | |||
| @@ -113,13 +113,13 @@ static inline void arch_spin_unlock(arch_spinlock_t *lp) | |||
| 113 | * read_can_lock - would read_trylock() succeed? | 113 | * read_can_lock - would read_trylock() succeed? |
| 114 | * @lock: the rwlock in question. | 114 | * @lock: the rwlock in question. |
| 115 | */ | 115 | */ |
| 116 | #define __raw_read_can_lock(x) ((int)(x)->lock >= 0) | 116 | #define arch_read_can_lock(x) ((int)(x)->lock >= 0) |
| 117 | 117 | ||
| 118 | /** | 118 | /** |
| 119 | * write_can_lock - would write_trylock() succeed? | 119 | * write_can_lock - would write_trylock() succeed? |
| 120 | * @lock: the rwlock in question. | 120 | * @lock: the rwlock in question. |
| 121 | */ | 121 | */ |
| 122 | #define __raw_write_can_lock(x) ((x)->lock == 0) | 122 | #define arch_write_can_lock(x) ((x)->lock == 0) |
| 123 | 123 | ||
| 124 | extern void _raw_read_lock_wait(arch_rwlock_t *lp); | 124 | extern void _raw_read_lock_wait(arch_rwlock_t *lp); |
| 125 | extern void _raw_read_lock_wait_flags(arch_rwlock_t *lp, unsigned long flags); | 125 | extern void _raw_read_lock_wait_flags(arch_rwlock_t *lp, unsigned long flags); |
| @@ -128,7 +128,7 @@ extern void _raw_write_lock_wait(arch_rwlock_t *lp); | |||
| 128 | extern void _raw_write_lock_wait_flags(arch_rwlock_t *lp, unsigned long flags); | 128 | extern void _raw_write_lock_wait_flags(arch_rwlock_t *lp, unsigned long flags); |
| 129 | extern int _raw_write_trylock_retry(arch_rwlock_t *lp); | 129 | extern int _raw_write_trylock_retry(arch_rwlock_t *lp); |
| 130 | 130 | ||
| 131 | static inline void __raw_read_lock(arch_rwlock_t *rw) | 131 | static inline void arch_read_lock(arch_rwlock_t *rw) |
| 132 | { | 132 | { |
| 133 | unsigned int old; | 133 | unsigned int old; |
| 134 | old = rw->lock & 0x7fffffffU; | 134 | old = rw->lock & 0x7fffffffU; |
| @@ -136,7 +136,7 @@ static inline void __raw_read_lock(arch_rwlock_t *rw) | |||
| 136 | _raw_read_lock_wait(rw); | 136 | _raw_read_lock_wait(rw); |
| 137 | } | 137 | } |
| 138 | 138 | ||
| 139 | static inline void __raw_read_lock_flags(arch_rwlock_t *rw, unsigned long flags) | 139 | static inline void arch_read_lock_flags(arch_rwlock_t *rw, unsigned long flags) |
| 140 | { | 140 | { |
| 141 | unsigned int old; | 141 | unsigned int old; |
| 142 | old = rw->lock & 0x7fffffffU; | 142 | old = rw->lock & 0x7fffffffU; |
| @@ -144,7 +144,7 @@ static inline void __raw_read_lock_flags(arch_rwlock_t *rw, unsigned long flags) | |||
| 144 | _raw_read_lock_wait_flags(rw, flags); | 144 | _raw_read_lock_wait_flags(rw, flags); |
| 145 | } | 145 | } |
| 146 | 146 | ||
| 147 | static inline void __raw_read_unlock(arch_rwlock_t *rw) | 147 | static inline void arch_read_unlock(arch_rwlock_t *rw) |
| 148 | { | 148 | { |
| 149 | unsigned int old, cmp; | 149 | unsigned int old, cmp; |
| 150 | 150 | ||
| @@ -155,24 +155,24 @@ static inline void __raw_read_unlock(arch_rwlock_t *rw) | |||
| 155 | } while (cmp != old); | 155 | } while (cmp != old); |
| 156 | } | 156 | } |
| 157 | 157 | ||
| 158 | static inline void __raw_write_lock(arch_rwlock_t *rw) | 158 | static inline void arch_write_lock(arch_rwlock_t *rw) |
| 159 | { | 159 | { |
| 160 | if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0)) | 160 | if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0)) |
| 161 | _raw_write_lock_wait(rw); | 161 | _raw_write_lock_wait(rw); |
| 162 | } | 162 | } |
| 163 | 163 | ||
| 164 | static inline void __raw_write_lock_flags(arch_rwlock_t *rw, unsigned long flags) | 164 | static inline void arch_write_lock_flags(arch_rwlock_t *rw, unsigned long flags) |
| 165 | { | 165 | { |
| 166 | if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0)) | 166 | if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0)) |
| 167 | _raw_write_lock_wait_flags(rw, flags); | 167 | _raw_write_lock_wait_flags(rw, flags); |
| 168 | } | 168 | } |
| 169 | 169 | ||
| 170 | static inline void __raw_write_unlock(arch_rwlock_t *rw) | 170 | static inline void arch_write_unlock(arch_rwlock_t *rw) |
| 171 | { | 171 | { |
| 172 | _raw_compare_and_swap(&rw->lock, 0x80000000, 0); | 172 | _raw_compare_and_swap(&rw->lock, 0x80000000, 0); |
| 173 | } | 173 | } |
| 174 | 174 | ||
| 175 | static inline int __raw_read_trylock(arch_rwlock_t *rw) | 175 | static inline int arch_read_trylock(arch_rwlock_t *rw) |
| 176 | { | 176 | { |
| 177 | unsigned int old; | 177 | unsigned int old; |
| 178 | old = rw->lock & 0x7fffffffU; | 178 | old = rw->lock & 0x7fffffffU; |
| @@ -181,7 +181,7 @@ static inline int __raw_read_trylock(arch_rwlock_t *rw) | |||
| 181 | return _raw_read_trylock_retry(rw); | 181 | return _raw_read_trylock_retry(rw); |
| 182 | } | 182 | } |
| 183 | 183 | ||
| 184 | static inline int __raw_write_trylock(arch_rwlock_t *rw) | 184 | static inline int arch_write_trylock(arch_rwlock_t *rw) |
| 185 | { | 185 | { |
| 186 | if (likely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)) | 186 | if (likely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)) |
| 187 | return 1; | 187 | return 1; |
diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c index 09fee9a1aa15..10754a375668 100644 --- a/arch/s390/lib/spinlock.c +++ b/arch/s390/lib/spinlock.c | |||
| @@ -115,7 +115,7 @@ void _raw_read_lock_wait(arch_rwlock_t *rw) | |||
| 115 | _raw_yield(); | 115 | _raw_yield(); |
| 116 | count = spin_retry; | 116 | count = spin_retry; |
| 117 | } | 117 | } |
| 118 | if (!__raw_read_can_lock(rw)) | 118 | if (!arch_read_can_lock(rw)) |
| 119 | continue; | 119 | continue; |
| 120 | old = rw->lock & 0x7fffffffU; | 120 | old = rw->lock & 0x7fffffffU; |
| 121 | if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old) | 121 | if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old) |
| @@ -135,7 +135,7 @@ void _raw_read_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags) | |||
| 135 | _raw_yield(); | 135 | _raw_yield(); |
| 136 | count = spin_retry; | 136 | count = spin_retry; |
| 137 | } | 137 | } |
| 138 | if (!__raw_read_can_lock(rw)) | 138 | if (!arch_read_can_lock(rw)) |
| 139 | continue; | 139 | continue; |
| 140 | old = rw->lock & 0x7fffffffU; | 140 | old = rw->lock & 0x7fffffffU; |
| 141 | local_irq_disable(); | 141 | local_irq_disable(); |
| @@ -151,7 +151,7 @@ int _raw_read_trylock_retry(arch_rwlock_t *rw) | |||
| 151 | int count = spin_retry; | 151 | int count = spin_retry; |
| 152 | 152 | ||
| 153 | while (count-- > 0) { | 153 | while (count-- > 0) { |
| 154 | if (!__raw_read_can_lock(rw)) | 154 | if (!arch_read_can_lock(rw)) |
| 155 | continue; | 155 | continue; |
| 156 | old = rw->lock & 0x7fffffffU; | 156 | old = rw->lock & 0x7fffffffU; |
| 157 | if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old) | 157 | if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old) |
| @@ -170,7 +170,7 @@ void _raw_write_lock_wait(arch_rwlock_t *rw) | |||
| 170 | _raw_yield(); | 170 | _raw_yield(); |
| 171 | count = spin_retry; | 171 | count = spin_retry; |
| 172 | } | 172 | } |
| 173 | if (!__raw_write_can_lock(rw)) | 173 | if (!arch_write_can_lock(rw)) |
| 174 | continue; | 174 | continue; |
| 175 | if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0) | 175 | if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0) |
| 176 | return; | 176 | return; |
| @@ -188,7 +188,7 @@ void _raw_write_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags) | |||
| 188 | _raw_yield(); | 188 | _raw_yield(); |
| 189 | count = spin_retry; | 189 | count = spin_retry; |
| 190 | } | 190 | } |
| 191 | if (!__raw_write_can_lock(rw)) | 191 | if (!arch_write_can_lock(rw)) |
| 192 | continue; | 192 | continue; |
| 193 | local_irq_disable(); | 193 | local_irq_disable(); |
| 194 | if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0) | 194 | if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0) |
| @@ -202,7 +202,7 @@ int _raw_write_trylock_retry(arch_rwlock_t *rw) | |||
| 202 | int count = spin_retry; | 202 | int count = spin_retry; |
| 203 | 203 | ||
| 204 | while (count-- > 0) { | 204 | while (count-- > 0) { |
| 205 | if (!__raw_write_can_lock(rw)) | 205 | if (!arch_write_can_lock(rw)) |
| 206 | continue; | 206 | continue; |
| 207 | if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0) | 207 | if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0) |
| 208 | return 1; | 208 | return 1; |
diff --git a/arch/sh/include/asm/spinlock.h b/arch/sh/include/asm/spinlock.h index 7f3626aac869..bdc0f3b6c56a 100644 --- a/arch/sh/include/asm/spinlock.h +++ b/arch/sh/include/asm/spinlock.h | |||
| @@ -100,21 +100,21 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock) | |||
| 100 | * read_can_lock - would read_trylock() succeed? | 100 | * read_can_lock - would read_trylock() succeed? |
| 101 | * @lock: the rwlock in question. | 101 | * @lock: the rwlock in question. |
| 102 | */ | 102 | */ |
| 103 | #define __raw_read_can_lock(x) ((x)->lock > 0) | 103 | #define arch_read_can_lock(x) ((x)->lock > 0) |
| 104 | 104 | ||
| 105 | /** | 105 | /** |
| 106 | * write_can_lock - would write_trylock() succeed? | 106 | * write_can_lock - would write_trylock() succeed? |
| 107 | * @lock: the rwlock in question. | 107 | * @lock: the rwlock in question. |
| 108 | */ | 108 | */ |
| 109 | #define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) | 109 | #define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) |
| 110 | 110 | ||
| 111 | static inline void __raw_read_lock(arch_rwlock_t *rw) | 111 | static inline void arch_read_lock(arch_rwlock_t *rw) |
| 112 | { | 112 | { |
| 113 | unsigned long tmp; | 113 | unsigned long tmp; |
| 114 | 114 | ||
| 115 | __asm__ __volatile__ ( | 115 | __asm__ __volatile__ ( |
| 116 | "1: \n\t" | 116 | "1: \n\t" |
| 117 | "movli.l @%1, %0 ! __raw_read_lock \n\t" | 117 | "movli.l @%1, %0 ! arch_read_lock \n\t" |
| 118 | "cmp/pl %0 \n\t" | 118 | "cmp/pl %0 \n\t" |
| 119 | "bf 1b \n\t" | 119 | "bf 1b \n\t" |
| 120 | "add #-1, %0 \n\t" | 120 | "add #-1, %0 \n\t" |
| @@ -126,13 +126,13 @@ static inline void __raw_read_lock(arch_rwlock_t *rw) | |||
| 126 | ); | 126 | ); |
| 127 | } | 127 | } |
| 128 | 128 | ||
| 129 | static inline void __raw_read_unlock(arch_rwlock_t *rw) | 129 | static inline void arch_read_unlock(arch_rwlock_t *rw) |
| 130 | { | 130 | { |
| 131 | unsigned long tmp; | 131 | unsigned long tmp; |
| 132 | 132 | ||
| 133 | __asm__ __volatile__ ( | 133 | __asm__ __volatile__ ( |
| 134 | "1: \n\t" | 134 | "1: \n\t" |
| 135 | "movli.l @%1, %0 ! __raw_read_unlock \n\t" | 135 | "movli.l @%1, %0 ! arch_read_unlock \n\t" |
| 136 | "add #1, %0 \n\t" | 136 | "add #1, %0 \n\t" |
| 137 | "movco.l %0, @%1 \n\t" | 137 | "movco.l %0, @%1 \n\t" |
| 138 | "bf 1b \n\t" | 138 | "bf 1b \n\t" |
| @@ -142,13 +142,13 @@ static inline void __raw_read_unlock(arch_rwlock_t *rw) | |||
| 142 | ); | 142 | ); |
| 143 | } | 143 | } |
| 144 | 144 | ||
| 145 | static inline void __raw_write_lock(arch_rwlock_t *rw) | 145 | static inline void arch_write_lock(arch_rwlock_t *rw) |
| 146 | { | 146 | { |
| 147 | unsigned long tmp; | 147 | unsigned long tmp; |
| 148 | 148 | ||
| 149 | __asm__ __volatile__ ( | 149 | __asm__ __volatile__ ( |
| 150 | "1: \n\t" | 150 | "1: \n\t" |
| 151 | "movli.l @%1, %0 ! __raw_write_lock \n\t" | 151 | "movli.l @%1, %0 ! arch_write_lock \n\t" |
| 152 | "cmp/hs %2, %0 \n\t" | 152 | "cmp/hs %2, %0 \n\t" |
| 153 | "bf 1b \n\t" | 153 | "bf 1b \n\t" |
| 154 | "sub %2, %0 \n\t" | 154 | "sub %2, %0 \n\t" |
| @@ -160,23 +160,23 @@ static inline void __raw_write_lock(arch_rwlock_t *rw) | |||
| 160 | ); | 160 | ); |
| 161 | } | 161 | } |
| 162 | 162 | ||
| 163 | static inline void __raw_write_unlock(arch_rwlock_t *rw) | 163 | static inline void arch_write_unlock(arch_rwlock_t *rw) |
| 164 | { | 164 | { |
| 165 | __asm__ __volatile__ ( | 165 | __asm__ __volatile__ ( |
| 166 | "mov.l %1, @%0 ! __raw_write_unlock \n\t" | 166 | "mov.l %1, @%0 ! arch_write_unlock \n\t" |
| 167 | : | 167 | : |
| 168 | : "r" (&rw->lock), "r" (RW_LOCK_BIAS) | 168 | : "r" (&rw->lock), "r" (RW_LOCK_BIAS) |
| 169 | : "t", "memory" | 169 | : "t", "memory" |
| 170 | ); | 170 | ); |
| 171 | } | 171 | } |
| 172 | 172 | ||
| 173 | static inline int __raw_read_trylock(arch_rwlock_t *rw) | 173 | static inline int arch_read_trylock(arch_rwlock_t *rw) |
| 174 | { | 174 | { |
| 175 | unsigned long tmp, oldval; | 175 | unsigned long tmp, oldval; |
| 176 | 176 | ||
| 177 | __asm__ __volatile__ ( | 177 | __asm__ __volatile__ ( |
| 178 | "1: \n\t" | 178 | "1: \n\t" |
| 179 | "movli.l @%2, %0 ! __raw_read_trylock \n\t" | 179 | "movli.l @%2, %0 ! arch_read_trylock \n\t" |
| 180 | "mov %0, %1 \n\t" | 180 | "mov %0, %1 \n\t" |
| 181 | "cmp/pl %0 \n\t" | 181 | "cmp/pl %0 \n\t" |
| 182 | "bf 2f \n\t" | 182 | "bf 2f \n\t" |
| @@ -193,13 +193,13 @@ static inline int __raw_read_trylock(arch_rwlock_t *rw) | |||
| 193 | return (oldval > 0); | 193 | return (oldval > 0); |
| 194 | } | 194 | } |
| 195 | 195 | ||
| 196 | static inline int __raw_write_trylock(arch_rwlock_t *rw) | 196 | static inline int arch_write_trylock(arch_rwlock_t *rw) |
| 197 | { | 197 | { |
| 198 | unsigned long tmp, oldval; | 198 | unsigned long tmp, oldval; |
| 199 | 199 | ||
| 200 | __asm__ __volatile__ ( | 200 | __asm__ __volatile__ ( |
| 201 | "1: \n\t" | 201 | "1: \n\t" |
| 202 | "movli.l @%2, %0 ! __raw_write_trylock \n\t" | 202 | "movli.l @%2, %0 ! arch_write_trylock \n\t" |
| 203 | "mov %0, %1 \n\t" | 203 | "mov %0, %1 \n\t" |
| 204 | "cmp/hs %3, %0 \n\t" | 204 | "cmp/hs %3, %0 \n\t" |
| 205 | "bf 2f \n\t" | 205 | "bf 2f \n\t" |
| @@ -216,8 +216,8 @@ static inline int __raw_write_trylock(arch_rwlock_t *rw) | |||
| 216 | return (oldval > (RW_LOCK_BIAS - 1)); | 216 | return (oldval > (RW_LOCK_BIAS - 1)); |
| 217 | } | 217 | } |
| 218 | 218 | ||
| 219 | #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) | 219 | #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) |
| 220 | #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) | 220 | #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) |
| 221 | 221 | ||
| 222 | #define arch_spin_relax(lock) cpu_relax() | 222 | #define arch_spin_relax(lock) cpu_relax() |
| 223 | #define arch_read_relax(lock) cpu_relax() | 223 | #define arch_read_relax(lock) cpu_relax() |
diff --git a/arch/sparc/include/asm/spinlock_32.h b/arch/sparc/include/asm/spinlock_32.h index 06d37e588fde..7f9b9dba38a6 100644 --- a/arch/sparc/include/asm/spinlock_32.h +++ b/arch/sparc/include/asm/spinlock_32.h | |||
| @@ -76,7 +76,7 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock) | |||
| 76 | * | 76 | * |
| 77 | * Unfortunately this scheme limits us to ~16,000,000 cpus. | 77 | * Unfortunately this scheme limits us to ~16,000,000 cpus. |
| 78 | */ | 78 | */ |
| 79 | static inline void arch_read_lock(arch_rwlock_t *rw) | 79 | static inline void __arch_read_lock(arch_rwlock_t *rw) |
| 80 | { | 80 | { |
| 81 | register arch_rwlock_t *lp asm("g1"); | 81 | register arch_rwlock_t *lp asm("g1"); |
| 82 | lp = rw; | 82 | lp = rw; |
| @@ -89,14 +89,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw) | |||
| 89 | : "g2", "g4", "memory", "cc"); | 89 | : "g2", "g4", "memory", "cc"); |
| 90 | } | 90 | } |
| 91 | 91 | ||
| 92 | #define __raw_read_lock(lock) \ | 92 | #define arch_read_lock(lock) \ |
| 93 | do { unsigned long flags; \ | 93 | do { unsigned long flags; \ |
| 94 | local_irq_save(flags); \ | 94 | local_irq_save(flags); \ |
| 95 | arch_read_lock(lock); \ | 95 | __arch_read_lock(lock); \ |
| 96 | local_irq_restore(flags); \ | 96 | local_irq_restore(flags); \ |
| 97 | } while(0) | 97 | } while(0) |
| 98 | 98 | ||
| 99 | static inline void arch_read_unlock(arch_rwlock_t *rw) | 99 | static inline void __arch_read_unlock(arch_rwlock_t *rw) |
| 100 | { | 100 | { |
| 101 | register arch_rwlock_t *lp asm("g1"); | 101 | register arch_rwlock_t *lp asm("g1"); |
| 102 | lp = rw; | 102 | lp = rw; |
| @@ -109,14 +109,14 @@ static inline void arch_read_unlock(arch_rwlock_t *rw) | |||
| 109 | : "g2", "g4", "memory", "cc"); | 109 | : "g2", "g4", "memory", "cc"); |
| 110 | } | 110 | } |
| 111 | 111 | ||
| 112 | #define __raw_read_unlock(lock) \ | 112 | #define arch_read_unlock(lock) \ |
| 113 | do { unsigned long flags; \ | 113 | do { unsigned long flags; \ |
| 114 | local_irq_save(flags); \ | 114 | local_irq_save(flags); \ |
| 115 | arch_read_unlock(lock); \ | 115 | __arch_read_unlock(lock); \ |
| 116 | local_irq_restore(flags); \ | 116 | local_irq_restore(flags); \ |
| 117 | } while(0) | 117 | } while(0) |
| 118 | 118 | ||
| 119 | static inline void __raw_write_lock(arch_rwlock_t *rw) | 119 | static inline void arch_write_lock(arch_rwlock_t *rw) |
| 120 | { | 120 | { |
| 121 | register arch_rwlock_t *lp asm("g1"); | 121 | register arch_rwlock_t *lp asm("g1"); |
| 122 | lp = rw; | 122 | lp = rw; |
| @@ -130,7 +130,7 @@ static inline void __raw_write_lock(arch_rwlock_t *rw) | |||
| 130 | *(volatile __u32 *)&lp->lock = ~0U; | 130 | *(volatile __u32 *)&lp->lock = ~0U; |
| 131 | } | 131 | } |
| 132 | 132 | ||
| 133 | static inline int __raw_write_trylock(arch_rwlock_t *rw) | 133 | static inline int arch_write_trylock(arch_rwlock_t *rw) |
| 134 | { | 134 | { |
| 135 | unsigned int val; | 135 | unsigned int val; |
| 136 | 136 | ||
| @@ -150,7 +150,7 @@ static inline int __raw_write_trylock(arch_rwlock_t *rw) | |||
| 150 | return (val == 0); | 150 | return (val == 0); |
| 151 | } | 151 | } |
| 152 | 152 | ||
| 153 | static inline int arch_read_trylock(arch_rwlock_t *rw) | 153 | static inline int __arch_read_trylock(arch_rwlock_t *rw) |
| 154 | { | 154 | { |
| 155 | register arch_rwlock_t *lp asm("g1"); | 155 | register arch_rwlock_t *lp asm("g1"); |
| 156 | register int res asm("o0"); | 156 | register int res asm("o0"); |
| @@ -165,27 +165,27 @@ static inline int arch_read_trylock(arch_rwlock_t *rw) | |||
| 165 | return res; | 165 | return res; |
| 166 | } | 166 | } |
| 167 | 167 | ||
| 168 | #define __raw_read_trylock(lock) \ | 168 | #define arch_read_trylock(lock) \ |
| 169 | ({ unsigned long flags; \ | 169 | ({ unsigned long flags; \ |
| 170 | int res; \ | 170 | int res; \ |
| 171 | local_irq_save(flags); \ | 171 | local_irq_save(flags); \ |
| 172 | res = arch_read_trylock(lock); \ | 172 | res = __arch_read_trylock(lock); \ |
| 173 | local_irq_restore(flags); \ | 173 | local_irq_restore(flags); \ |
| 174 | res; \ | 174 | res; \ |
| 175 | }) | 175 | }) |
| 176 | 176 | ||
| 177 | #define __raw_write_unlock(rw) do { (rw)->lock = 0; } while(0) | 177 | #define arch_write_unlock(rw) do { (rw)->lock = 0; } while(0) |
| 178 | 178 | ||
| 179 | #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) | 179 | #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) |
| 180 | #define __raw_read_lock_flags(rw, flags) __raw_read_lock(rw) | 180 | #define arch_read_lock_flags(rw, flags) arch_read_lock(rw) |
| 181 | #define __raw_write_lock_flags(rw, flags) __raw_write_lock(rw) | 181 | #define arch_write_lock_flags(rw, flags) arch_write_lock(rw) |
| 182 | 182 | ||
| 183 | #define arch_spin_relax(lock) cpu_relax() | 183 | #define arch_spin_relax(lock) cpu_relax() |
| 184 | #define arch_read_relax(lock) cpu_relax() | 184 | #define arch_read_relax(lock) cpu_relax() |
| 185 | #define arch_write_relax(lock) cpu_relax() | 185 | #define arch_write_relax(lock) cpu_relax() |
| 186 | 186 | ||
| 187 | #define __raw_read_can_lock(rw) (!((rw)->lock & 0xff)) | 187 | #define arch_read_can_lock(rw) (!((rw)->lock & 0xff)) |
| 188 | #define __raw_write_can_lock(rw) (!(rw)->lock) | 188 | #define arch_write_can_lock(rw) (!(rw)->lock) |
| 189 | 189 | ||
| 190 | #endif /* !(__ASSEMBLY__) */ | 190 | #endif /* !(__ASSEMBLY__) */ |
| 191 | 191 | ||
diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h index 2b22d7f2c2fb..073936a8b275 100644 --- a/arch/sparc/include/asm/spinlock_64.h +++ b/arch/sparc/include/asm/spinlock_64.h | |||
| @@ -210,17 +210,17 @@ static int inline arch_write_trylock(arch_rwlock_t *lock) | |||
| 210 | return result; | 210 | return result; |
| 211 | } | 211 | } |
| 212 | 212 | ||
| 213 | #define __raw_read_lock(p) arch_read_lock(p) | 213 | #define arch_read_lock(p) arch_read_lock(p) |
| 214 | #define __raw_read_lock_flags(p, f) arch_read_lock(p) | 214 | #define arch_read_lock_flags(p, f) arch_read_lock(p) |
| 215 | #define __raw_read_trylock(p) arch_read_trylock(p) | 215 | #define arch_read_trylock(p) arch_read_trylock(p) |
| 216 | #define __raw_read_unlock(p) arch_read_unlock(p) | 216 | #define arch_read_unlock(p) arch_read_unlock(p) |
| 217 | #define __raw_write_lock(p) arch_write_lock(p) | 217 | #define arch_write_lock(p) arch_write_lock(p) |
| 218 | #define __raw_write_lock_flags(p, f) arch_write_lock(p) | 218 | #define arch_write_lock_flags(p, f) arch_write_lock(p) |
| 219 | #define __raw_write_unlock(p) arch_write_unlock(p) | 219 | #define arch_write_unlock(p) arch_write_unlock(p) |
| 220 | #define __raw_write_trylock(p) arch_write_trylock(p) | 220 | #define arch_write_trylock(p) arch_write_trylock(p) |
| 221 | 221 | ||
| 222 | #define __raw_read_can_lock(rw) (!((rw)->lock & 0x80000000UL)) | 222 | #define arch_read_can_lock(rw) (!((rw)->lock & 0x80000000UL)) |
| 223 | #define __raw_write_can_lock(rw) (!(rw)->lock) | 223 | #define arch_write_can_lock(rw) (!(rw)->lock) |
| 224 | 224 | ||
| 225 | #define arch_spin_relax(lock) cpu_relax() | 225 | #define arch_spin_relax(lock) cpu_relax() |
| 226 | #define arch_read_relax(lock) cpu_relax() | 226 | #define arch_read_relax(lock) cpu_relax() |
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h index 99cb86e843a0..3089f70c0c52 100644 --- a/arch/x86/include/asm/spinlock.h +++ b/arch/x86/include/asm/spinlock.h | |||
| @@ -232,7 +232,7 @@ static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) | |||
| 232 | * read_can_lock - would read_trylock() succeed? | 232 | * read_can_lock - would read_trylock() succeed? |
| 233 | * @lock: the rwlock in question. | 233 | * @lock: the rwlock in question. |
| 234 | */ | 234 | */ |
| 235 | static inline int __raw_read_can_lock(arch_rwlock_t *lock) | 235 | static inline int arch_read_can_lock(arch_rwlock_t *lock) |
| 236 | { | 236 | { |
| 237 | return (int)(lock)->lock > 0; | 237 | return (int)(lock)->lock > 0; |
| 238 | } | 238 | } |
| @@ -241,12 +241,12 @@ static inline int __raw_read_can_lock(arch_rwlock_t *lock) | |||
| 241 | * write_can_lock - would write_trylock() succeed? | 241 | * write_can_lock - would write_trylock() succeed? |
| 242 | * @lock: the rwlock in question. | 242 | * @lock: the rwlock in question. |
| 243 | */ | 243 | */ |
| 244 | static inline int __raw_write_can_lock(arch_rwlock_t *lock) | 244 | static inline int arch_write_can_lock(arch_rwlock_t *lock) |
| 245 | { | 245 | { |
| 246 | return (lock)->lock == RW_LOCK_BIAS; | 246 | return (lock)->lock == RW_LOCK_BIAS; |
| 247 | } | 247 | } |
| 248 | 248 | ||
| 249 | static inline void __raw_read_lock(arch_rwlock_t *rw) | 249 | static inline void arch_read_lock(arch_rwlock_t *rw) |
| 250 | { | 250 | { |
| 251 | asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t" | 251 | asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t" |
| 252 | "jns 1f\n" | 252 | "jns 1f\n" |
| @@ -255,7 +255,7 @@ static inline void __raw_read_lock(arch_rwlock_t *rw) | |||
| 255 | ::LOCK_PTR_REG (rw) : "memory"); | 255 | ::LOCK_PTR_REG (rw) : "memory"); |
| 256 | } | 256 | } |
| 257 | 257 | ||
| 258 | static inline void __raw_write_lock(arch_rwlock_t *rw) | 258 | static inline void arch_write_lock(arch_rwlock_t *rw) |
| 259 | { | 259 | { |
| 260 | asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t" | 260 | asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t" |
| 261 | "jz 1f\n" | 261 | "jz 1f\n" |
| @@ -264,7 +264,7 @@ static inline void __raw_write_lock(arch_rwlock_t *rw) | |||
| 264 | ::LOCK_PTR_REG (rw), "i" (RW_LOCK_BIAS) : "memory"); | 264 | ::LOCK_PTR_REG (rw), "i" (RW_LOCK_BIAS) : "memory"); |
| 265 | } | 265 | } |
| 266 | 266 | ||
| 267 | static inline int __raw_read_trylock(arch_rwlock_t *lock) | 267 | static inline int arch_read_trylock(arch_rwlock_t *lock) |
| 268 | { | 268 | { |
| 269 | atomic_t *count = (atomic_t *)lock; | 269 | atomic_t *count = (atomic_t *)lock; |
| 270 | 270 | ||
| @@ -274,7 +274,7 @@ static inline int __raw_read_trylock(arch_rwlock_t *lock) | |||
| 274 | return 0; | 274 | return 0; |
| 275 | } | 275 | } |
| 276 | 276 | ||
| 277 | static inline int __raw_write_trylock(arch_rwlock_t *lock) | 277 | static inline int arch_write_trylock(arch_rwlock_t *lock) |
| 278 | { | 278 | { |
| 279 | atomic_t *count = (atomic_t *)lock; | 279 | atomic_t *count = (atomic_t *)lock; |
| 280 | 280 | ||
| @@ -284,19 +284,19 @@ static inline int __raw_write_trylock(arch_rwlock_t *lock) | |||
| 284 | return 0; | 284 | return 0; |
| 285 | } | 285 | } |
| 286 | 286 | ||
| 287 | static inline void __raw_read_unlock(arch_rwlock_t *rw) | 287 | static inline void arch_read_unlock(arch_rwlock_t *rw) |
| 288 | { | 288 | { |
| 289 | asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory"); | 289 | asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory"); |
| 290 | } | 290 | } |
| 291 | 291 | ||
| 292 | static inline void __raw_write_unlock(arch_rwlock_t *rw) | 292 | static inline void arch_write_unlock(arch_rwlock_t *rw) |
| 293 | { | 293 | { |
| 294 | asm volatile(LOCK_PREFIX "addl %1, %0" | 294 | asm volatile(LOCK_PREFIX "addl %1, %0" |
| 295 | : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory"); | 295 | : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory"); |
| 296 | } | 296 | } |
| 297 | 297 | ||
| 298 | #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) | 298 | #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) |
| 299 | #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) | 299 | #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) |
| 300 | 300 | ||
| 301 | #define arch_spin_relax(lock) cpu_relax() | 301 | #define arch_spin_relax(lock) cpu_relax() |
| 302 | #define arch_read_relax(lock) cpu_relax() | 302 | #define arch_read_relax(lock) cpu_relax() |
diff --git a/include/linux/rwlock.h b/include/linux/rwlock.h index 73785b0bd6b9..5725b034defe 100644 --- a/include/linux/rwlock.h +++ b/include/linux/rwlock.h | |||
| @@ -38,20 +38,20 @@ do { \ | |||
| 38 | extern int _raw_write_trylock(rwlock_t *lock); | 38 | extern int _raw_write_trylock(rwlock_t *lock); |
| 39 | extern void _raw_write_unlock(rwlock_t *lock); | 39 | extern void _raw_write_unlock(rwlock_t *lock); |
| 40 | #else | 40 | #else |
| 41 | # define _raw_read_lock(rwlock) __raw_read_lock(&(rwlock)->raw_lock) | 41 | # define _raw_read_lock(rwlock) arch_read_lock(&(rwlock)->raw_lock) |
| 42 | # define _raw_read_lock_flags(lock, flags) \ | 42 | # define _raw_read_lock_flags(lock, flags) \ |
| 43 | __raw_read_lock_flags(&(lock)->raw_lock, *(flags)) | 43 | arch_read_lock_flags(&(lock)->raw_lock, *(flags)) |
| 44 | # define _raw_read_trylock(rwlock) __raw_read_trylock(&(rwlock)->raw_lock) | 44 | # define _raw_read_trylock(rwlock) arch_read_trylock(&(rwlock)->raw_lock) |
| 45 | # define _raw_read_unlock(rwlock) __raw_read_unlock(&(rwlock)->raw_lock) | 45 | # define _raw_read_unlock(rwlock) arch_read_unlock(&(rwlock)->raw_lock) |
| 46 | # define _raw_write_lock(rwlock) __raw_write_lock(&(rwlock)->raw_lock) | 46 | # define _raw_write_lock(rwlock) arch_write_lock(&(rwlock)->raw_lock) |
| 47 | # define _raw_write_lock_flags(lock, flags) \ | 47 | # define _raw_write_lock_flags(lock, flags) \ |
| 48 | __raw_write_lock_flags(&(lock)->raw_lock, *(flags)) | 48 | arch_write_lock_flags(&(lock)->raw_lock, *(flags)) |
| 49 | # define _raw_write_trylock(rwlock) __raw_write_trylock(&(rwlock)->raw_lock) | 49 | # define _raw_write_trylock(rwlock) arch_write_trylock(&(rwlock)->raw_lock) |
| 50 | # define _raw_write_unlock(rwlock) __raw_write_unlock(&(rwlock)->raw_lock) | 50 | # define _raw_write_unlock(rwlock) arch_write_unlock(&(rwlock)->raw_lock) |
| 51 | #endif | 51 | #endif |
| 52 | 52 | ||
| 53 | #define read_can_lock(rwlock) __raw_read_can_lock(&(rwlock)->raw_lock) | 53 | #define read_can_lock(rwlock) arch_read_can_lock(&(rwlock)->raw_lock) |
| 54 | #define write_can_lock(rwlock) __raw_write_can_lock(&(rwlock)->raw_lock) | 54 | #define write_can_lock(rwlock) arch_write_can_lock(&(rwlock)->raw_lock) |
| 55 | 55 | ||
| 56 | /* | 56 | /* |
| 57 | * Define the various rw_lock methods. Note we define these | 57 | * Define the various rw_lock methods. Note we define these |
diff --git a/include/linux/spinlock_up.h b/include/linux/spinlock_up.h index 1d3bcc3cf7c6..b14f6a91e19f 100644 --- a/include/linux/spinlock_up.h +++ b/include/linux/spinlock_up.h | |||
| @@ -49,12 +49,12 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock) | |||
| 49 | /* | 49 | /* |
| 50 | * Read-write spinlocks. No debug version. | 50 | * Read-write spinlocks. No debug version. |
| 51 | */ | 51 | */ |
| 52 | #define __raw_read_lock(lock) do { (void)(lock); } while (0) | 52 | #define arch_read_lock(lock) do { (void)(lock); } while (0) |
| 53 | #define __raw_write_lock(lock) do { (void)(lock); } while (0) | 53 | #define arch_write_lock(lock) do { (void)(lock); } while (0) |
| 54 | #define __raw_read_trylock(lock) ({ (void)(lock); 1; }) | 54 | #define arch_read_trylock(lock) ({ (void)(lock); 1; }) |
| 55 | #define __raw_write_trylock(lock) ({ (void)(lock); 1; }) | 55 | #define arch_write_trylock(lock) ({ (void)(lock); 1; }) |
| 56 | #define __raw_read_unlock(lock) do { (void)(lock); } while (0) | 56 | #define arch_read_unlock(lock) do { (void)(lock); } while (0) |
| 57 | #define __raw_write_unlock(lock) do { (void)(lock); } while (0) | 57 | #define arch_write_unlock(lock) do { (void)(lock); } while (0) |
| 58 | 58 | ||
| 59 | #else /* DEBUG_SPINLOCK */ | 59 | #else /* DEBUG_SPINLOCK */ |
| 60 | #define arch_spin_is_locked(lock) ((void)(lock), 0) | 60 | #define arch_spin_is_locked(lock) ((void)(lock), 0) |
| @@ -67,8 +67,8 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock) | |||
| 67 | 67 | ||
| 68 | #define arch_spin_is_contended(lock) (((void)(lock), 0)) | 68 | #define arch_spin_is_contended(lock) (((void)(lock), 0)) |
| 69 | 69 | ||
| 70 | #define __raw_read_can_lock(lock) (((void)(lock), 1)) | 70 | #define arch_read_can_lock(lock) (((void)(lock), 1)) |
| 71 | #define __raw_write_can_lock(lock) (((void)(lock), 1)) | 71 | #define arch_write_can_lock(lock) (((void)(lock), 1)) |
| 72 | 72 | ||
| 73 | #define arch_spin_unlock_wait(lock) \ | 73 | #define arch_spin_unlock_wait(lock) \ |
| 74 | do { cpu_relax(); } while (arch_spin_is_locked(lock)) | 74 | do { cpu_relax(); } while (arch_spin_is_locked(lock)) |
diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c index 3f72f10d9cb0..0cea0bf6114e 100644 --- a/lib/spinlock_debug.c +++ b/lib/spinlock_debug.c | |||
| @@ -176,7 +176,7 @@ static void __read_lock_debug(rwlock_t *lock) | |||
| 176 | 176 | ||
| 177 | for (;;) { | 177 | for (;;) { |
| 178 | for (i = 0; i < loops; i++) { | 178 | for (i = 0; i < loops; i++) { |
| 179 | if (__raw_read_trylock(&lock->raw_lock)) | 179 | if (arch_read_trylock(&lock->raw_lock)) |
| 180 | return; | 180 | return; |
| 181 | __delay(1); | 181 | __delay(1); |
| 182 | } | 182 | } |
| @@ -196,12 +196,12 @@ static void __read_lock_debug(rwlock_t *lock) | |||
| 196 | void _raw_read_lock(rwlock_t *lock) | 196 | void _raw_read_lock(rwlock_t *lock) |
| 197 | { | 197 | { |
| 198 | RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); | 198 | RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); |
| 199 | __raw_read_lock(&lock->raw_lock); | 199 | arch_read_lock(&lock->raw_lock); |
| 200 | } | 200 | } |
| 201 | 201 | ||
| 202 | int _raw_read_trylock(rwlock_t *lock) | 202 | int _raw_read_trylock(rwlock_t *lock) |
| 203 | { | 203 | { |
| 204 | int ret = __raw_read_trylock(&lock->raw_lock); | 204 | int ret = arch_read_trylock(&lock->raw_lock); |
| 205 | 205 | ||
| 206 | #ifndef CONFIG_SMP | 206 | #ifndef CONFIG_SMP |
| 207 | /* | 207 | /* |
| @@ -215,7 +215,7 @@ int _raw_read_trylock(rwlock_t *lock) | |||
| 215 | void _raw_read_unlock(rwlock_t *lock) | 215 | void _raw_read_unlock(rwlock_t *lock) |
| 216 | { | 216 | { |
| 217 | RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); | 217 | RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); |
| 218 | __raw_read_unlock(&lock->raw_lock); | 218 | arch_read_unlock(&lock->raw_lock); |
| 219 | } | 219 | } |
| 220 | 220 | ||
| 221 | static inline void debug_write_lock_before(rwlock_t *lock) | 221 | static inline void debug_write_lock_before(rwlock_t *lock) |
| @@ -251,7 +251,7 @@ static void __write_lock_debug(rwlock_t *lock) | |||
| 251 | 251 | ||
| 252 | for (;;) { | 252 | for (;;) { |
| 253 | for (i = 0; i < loops; i++) { | 253 | for (i = 0; i < loops; i++) { |
| 254 | if (__raw_write_trylock(&lock->raw_lock)) | 254 | if (arch_write_trylock(&lock->raw_lock)) |
| 255 | return; | 255 | return; |
| 256 | __delay(1); | 256 | __delay(1); |
| 257 | } | 257 | } |
| @@ -271,13 +271,13 @@ static void __write_lock_debug(rwlock_t *lock) | |||
| 271 | void _raw_write_lock(rwlock_t *lock) | 271 | void _raw_write_lock(rwlock_t *lock) |
| 272 | { | 272 | { |
| 273 | debug_write_lock_before(lock); | 273 | debug_write_lock_before(lock); |
| 274 | __raw_write_lock(&lock->raw_lock); | 274 | arch_write_lock(&lock->raw_lock); |
| 275 | debug_write_lock_after(lock); | 275 | debug_write_lock_after(lock); |
| 276 | } | 276 | } |
| 277 | 277 | ||
| 278 | int _raw_write_trylock(rwlock_t *lock) | 278 | int _raw_write_trylock(rwlock_t *lock) |
| 279 | { | 279 | { |
| 280 | int ret = __raw_write_trylock(&lock->raw_lock); | 280 | int ret = arch_write_trylock(&lock->raw_lock); |
| 281 | 281 | ||
| 282 | if (ret) | 282 | if (ret) |
| 283 | debug_write_lock_after(lock); | 283 | debug_write_lock_after(lock); |
| @@ -293,5 +293,5 @@ int _raw_write_trylock(rwlock_t *lock) | |||
| 293 | void _raw_write_unlock(rwlock_t *lock) | 293 | void _raw_write_unlock(rwlock_t *lock) |
| 294 | { | 294 | { |
| 295 | debug_write_unlock(lock); | 295 | debug_write_unlock(lock); |
| 296 | __raw_write_unlock(&lock->raw_lock); | 296 | arch_write_unlock(&lock->raw_lock); |
| 297 | } | 297 | } |
