diff options
Diffstat (limited to 'include/asm-arm/spinlock.h')
| -rw-r--r-- | include/asm-arm/spinlock.h | 50 |
1 files changed, 17 insertions, 33 deletions
diff --git a/include/asm-arm/spinlock.h b/include/asm-arm/spinlock.h index 1f906d09b688..cb4906b45555 100644 --- a/include/asm-arm/spinlock.h +++ b/include/asm-arm/spinlock.h | |||
| @@ -16,21 +16,14 @@ | |||
| 16 | * Unlocked value: 0 | 16 | * Unlocked value: 0 |
| 17 | * Locked value: 1 | 17 | * Locked value: 1 |
| 18 | */ | 18 | */ |
| 19 | typedef struct { | ||
| 20 | volatile unsigned int lock; | ||
| 21 | #ifdef CONFIG_PREEMPT | ||
| 22 | unsigned int break_lock; | ||
| 23 | #endif | ||
| 24 | } spinlock_t; | ||
| 25 | 19 | ||
| 26 | #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 } | 20 | #define __raw_spin_is_locked(x) ((x)->lock != 0) |
| 21 | #define __raw_spin_unlock_wait(lock) \ | ||
| 22 | do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) | ||
| 27 | 23 | ||
| 28 | #define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while (0) | 24 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) |
| 29 | #define spin_is_locked(x) ((x)->lock != 0) | ||
| 30 | #define spin_unlock_wait(x) do { barrier(); } while (spin_is_locked(x)) | ||
| 31 | #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) | ||
| 32 | 25 | ||
| 33 | static inline void _raw_spin_lock(spinlock_t *lock) | 26 | static inline void __raw_spin_lock(raw_spinlock_t *lock) |
| 34 | { | 27 | { |
| 35 | unsigned long tmp; | 28 | unsigned long tmp; |
| 36 | 29 | ||
| @@ -47,7 +40,7 @@ static inline void _raw_spin_lock(spinlock_t *lock) | |||
| 47 | smp_mb(); | 40 | smp_mb(); |
| 48 | } | 41 | } |
| 49 | 42 | ||
| 50 | static inline int _raw_spin_trylock(spinlock_t *lock) | 43 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) |
| 51 | { | 44 | { |
| 52 | unsigned long tmp; | 45 | unsigned long tmp; |
| 53 | 46 | ||
| @@ -67,7 +60,7 @@ static inline int _raw_spin_trylock(spinlock_t *lock) | |||
| 67 | } | 60 | } |
| 68 | } | 61 | } |
| 69 | 62 | ||
| 70 | static inline void _raw_spin_unlock(spinlock_t *lock) | 63 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) |
| 71 | { | 64 | { |
| 72 | smp_mb(); | 65 | smp_mb(); |
| 73 | 66 | ||
| @@ -80,23 +73,14 @@ static inline void _raw_spin_unlock(spinlock_t *lock) | |||
| 80 | 73 | ||
| 81 | /* | 74 | /* |
| 82 | * RWLOCKS | 75 | * RWLOCKS |
| 83 | */ | 76 | * |
| 84 | typedef struct { | 77 | * |
| 85 | volatile unsigned int lock; | ||
| 86 | #ifdef CONFIG_PREEMPT | ||
| 87 | unsigned int break_lock; | ||
| 88 | #endif | ||
| 89 | } rwlock_t; | ||
| 90 | |||
| 91 | #define RW_LOCK_UNLOCKED (rwlock_t) { 0 } | ||
| 92 | #define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while (0) | ||
| 93 | #define rwlock_is_locked(x) (*((volatile unsigned int *)(x)) != 0) | ||
| 94 | |||
| 95 | /* | ||
| 96 | * Write locks are easy - we just set bit 31. When unlocking, we can | 78 | * Write locks are easy - we just set bit 31. When unlocking, we can |
| 97 | * just write zero since the lock is exclusively held. | 79 | * just write zero since the lock is exclusively held. |
| 98 | */ | 80 | */ |
| 99 | static inline void _raw_write_lock(rwlock_t *rw) | 81 | #define rwlock_is_locked(x) (*((volatile unsigned int *)(x)) != 0) |
| 82 | |||
| 83 | static inline void __raw_write_lock(rwlock_t *rw) | ||
| 100 | { | 84 | { |
| 101 | unsigned long tmp; | 85 | unsigned long tmp; |
| 102 | 86 | ||
| @@ -113,7 +97,7 @@ static inline void _raw_write_lock(rwlock_t *rw) | |||
| 113 | smp_mb(); | 97 | smp_mb(); |
| 114 | } | 98 | } |
| 115 | 99 | ||
| 116 | static inline int _raw_write_trylock(rwlock_t *rw) | 100 | static inline int __raw_write_trylock(rwlock_t *rw) |
| 117 | { | 101 | { |
| 118 | unsigned long tmp; | 102 | unsigned long tmp; |
| 119 | 103 | ||
| @@ -133,7 +117,7 @@ static inline int _raw_write_trylock(rwlock_t *rw) | |||
| 133 | } | 117 | } |
| 134 | } | 118 | } |
| 135 | 119 | ||
| 136 | static inline void _raw_write_unlock(rwlock_t *rw) | 120 | static inline void __raw_write_unlock(raw_rwlock_t *rw) |
| 137 | { | 121 | { |
| 138 | smp_mb(); | 122 | smp_mb(); |
| 139 | 123 | ||
| @@ -156,7 +140,7 @@ static inline void _raw_write_unlock(rwlock_t *rw) | |||
| 156 | * currently active. However, we know we won't have any write | 140 | * currently active. However, we know we won't have any write |
| 157 | * locks. | 141 | * locks. |
| 158 | */ | 142 | */ |
| 159 | static inline void _raw_read_lock(rwlock_t *rw) | 143 | static inline void __raw_read_lock(raw_rwlock_t *rw) |
| 160 | { | 144 | { |
| 161 | unsigned long tmp, tmp2; | 145 | unsigned long tmp, tmp2; |
| 162 | 146 | ||
| @@ -173,7 +157,7 @@ static inline void _raw_read_lock(rwlock_t *rw) | |||
| 173 | smp_mb(); | 157 | smp_mb(); |
| 174 | } | 158 | } |
| 175 | 159 | ||
| 176 | static inline void _raw_read_unlock(rwlock_t *rw) | 160 | static inline void __raw_read_unlock(rwlock_t *rw) |
| 177 | { | 161 | { |
| 178 | unsigned long tmp, tmp2; | 162 | unsigned long tmp, tmp2; |
| 179 | 163 | ||
| @@ -190,6 +174,6 @@ static inline void _raw_read_unlock(rwlock_t *rw) | |||
| 190 | : "cc"); | 174 | : "cc"); |
| 191 | } | 175 | } |
| 192 | 176 | ||
| 193 | #define _raw_read_trylock(lock) generic_raw_read_trylock(lock) | 177 | #define __raw_read_trylock(lock) generic__raw_read_trylock(lock) |
| 194 | 178 | ||
| 195 | #endif /* __ASM_SPINLOCK_H */ | 179 | #endif /* __ASM_SPINLOCK_H */ |
