diff options
Diffstat (limited to 'include/asm-ia64/spinlock.h')
| -rw-r--r-- | include/asm-ia64/spinlock.h | 69 |
1 files changed, 26 insertions, 43 deletions
diff --git a/include/asm-ia64/spinlock.h b/include/asm-ia64/spinlock.h index d2430aa0d49d..5b78611411c3 100644 --- a/include/asm-ia64/spinlock.h +++ b/include/asm-ia64/spinlock.h | |||
| @@ -17,28 +17,20 @@ | |||
| 17 | #include <asm/intrinsics.h> | 17 | #include <asm/intrinsics.h> |
| 18 | #include <asm/system.h> | 18 | #include <asm/system.h> |
| 19 | 19 | ||
| 20 | typedef struct { | 20 | #define __raw_spin_lock_init(x) ((x)->lock = 0) |
| 21 | volatile unsigned int lock; | ||
| 22 | #ifdef CONFIG_PREEMPT | ||
| 23 | unsigned int break_lock; | ||
| 24 | #endif | ||
| 25 | } spinlock_t; | ||
| 26 | |||
| 27 | #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 } | ||
| 28 | #define spin_lock_init(x) ((x)->lock = 0) | ||
| 29 | 21 | ||
| 30 | #ifdef ASM_SUPPORTED | 22 | #ifdef ASM_SUPPORTED |
| 31 | /* | 23 | /* |
| 32 | * Try to get the lock. If we fail to get the lock, make a non-standard call to | 24 | * Try to get the lock. If we fail to get the lock, make a non-standard call to |
| 33 | * ia64_spinlock_contention(). We do not use a normal call because that would force all | 25 | * ia64_spinlock_contention(). We do not use a normal call because that would force all |
| 34 | * callers of spin_lock() to be non-leaf routines. Instead, ia64_spinlock_contention() is | 26 | * callers of __raw_spin_lock() to be non-leaf routines. Instead, ia64_spinlock_contention() is |
| 35 | * carefully coded to touch only those registers that spin_lock() marks "clobbered". | 27 | * carefully coded to touch only those registers that __raw_spin_lock() marks "clobbered". |
| 36 | */ | 28 | */ |
| 37 | 29 | ||
| 38 | #define IA64_SPINLOCK_CLOBBERS "ar.ccv", "ar.pfs", "p14", "p15", "r27", "r28", "r29", "r30", "b6", "memory" | 30 | #define IA64_SPINLOCK_CLOBBERS "ar.ccv", "ar.pfs", "p14", "p15", "r27", "r28", "r29", "r30", "b6", "memory" |
| 39 | 31 | ||
| 40 | static inline void | 32 | static inline void |
| 41 | _raw_spin_lock_flags (spinlock_t *lock, unsigned long flags) | 33 | __raw_spin_lock_flags (raw_spinlock_t *lock, unsigned long flags) |
| 42 | { | 34 | { |
| 43 | register volatile unsigned int *ptr asm ("r31") = &lock->lock; | 35 | register volatile unsigned int *ptr asm ("r31") = &lock->lock; |
| 44 | 36 | ||
| @@ -94,17 +86,17 @@ _raw_spin_lock_flags (spinlock_t *lock, unsigned long flags) | |||
| 94 | #endif | 86 | #endif |
| 95 | } | 87 | } |
| 96 | 88 | ||
| 97 | #define _raw_spin_lock(lock) _raw_spin_lock_flags(lock, 0) | 89 | #define __raw_spin_lock(lock) __raw_spin_lock_flags(lock, 0) |
| 98 | 90 | ||
| 99 | /* Unlock by doing an ordered store and releasing the cacheline with nta */ | 91 | /* Unlock by doing an ordered store and releasing the cacheline with nta */ |
| 100 | static inline void _raw_spin_unlock(spinlock_t *x) { | 92 | static inline void __raw_spin_unlock(raw_spinlock_t *x) { |
| 101 | barrier(); | 93 | barrier(); |
| 102 | asm volatile ("st4.rel.nta [%0] = r0\n\t" :: "r"(x)); | 94 | asm volatile ("st4.rel.nta [%0] = r0\n\t" :: "r"(x)); |
| 103 | } | 95 | } |
| 104 | 96 | ||
| 105 | #else /* !ASM_SUPPORTED */ | 97 | #else /* !ASM_SUPPORTED */ |
| 106 | #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) | 98 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) |
| 107 | # define _raw_spin_lock(x) \ | 99 | # define __raw_spin_lock(x) \ |
| 108 | do { \ | 100 | do { \ |
| 109 | __u32 *ia64_spinlock_ptr = (__u32 *) (x); \ | 101 | __u32 *ia64_spinlock_ptr = (__u32 *) (x); \ |
| 110 | __u64 ia64_spinlock_val; \ | 102 | __u64 ia64_spinlock_val; \ |
| @@ -117,29 +109,20 @@ do { \ | |||
| 117 | } while (ia64_spinlock_val); \ | 109 | } while (ia64_spinlock_val); \ |
| 118 | } \ | 110 | } \ |
| 119 | } while (0) | 111 | } while (0) |
| 120 | #define _raw_spin_unlock(x) do { barrier(); ((spinlock_t *) x)->lock = 0; } while (0) | 112 | #define __raw_spin_unlock(x) do { barrier(); ((raw_spinlock_t *) x)->lock = 0; } while (0) |
| 121 | #endif /* !ASM_SUPPORTED */ | 113 | #endif /* !ASM_SUPPORTED */ |
| 122 | 114 | ||
| 123 | #define spin_is_locked(x) ((x)->lock != 0) | 115 | #define __raw_spin_is_locked(x) ((x)->lock != 0) |
| 124 | #define _raw_spin_trylock(x) (cmpxchg_acq(&(x)->lock, 0, 1) == 0) | 116 | #define __raw_spin_trylock(x) (cmpxchg_acq(&(x)->lock, 0, 1) == 0) |
| 125 | #define spin_unlock_wait(x) do { barrier(); } while ((x)->lock) | 117 | #define __raw_spin_unlock_wait(lock) \ |
| 126 | 118 | do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) | |
| 127 | typedef struct { | ||
| 128 | volatile unsigned int read_counter : 24; | ||
| 129 | volatile unsigned int write_lock : 8; | ||
| 130 | #ifdef CONFIG_PREEMPT | ||
| 131 | unsigned int break_lock; | ||
| 132 | #endif | ||
| 133 | } rwlock_t; | ||
| 134 | #define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0 } | ||
| 135 | 119 | ||
| 136 | #define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0) | 120 | #define __raw_read_can_lock(rw) (*(volatile int *)(rw) >= 0) |
| 137 | #define read_can_lock(rw) (*(volatile int *)(rw) >= 0) | 121 | #define __raw_write_can_lock(rw) (*(volatile int *)(rw) == 0) |
| 138 | #define write_can_lock(rw) (*(volatile int *)(rw) == 0) | ||
| 139 | 122 | ||
| 140 | #define _raw_read_lock(rw) \ | 123 | #define __raw_read_lock(rw) \ |
| 141 | do { \ | 124 | do { \ |
| 142 | rwlock_t *__read_lock_ptr = (rw); \ | 125 | raw_rwlock_t *__read_lock_ptr = (rw); \ |
| 143 | \ | 126 | \ |
| 144 | while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, acq) < 0)) { \ | 127 | while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, acq) < 0)) { \ |
| 145 | ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \ | 128 | ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \ |
| @@ -148,14 +131,14 @@ do { \ | |||
| 148 | } \ | 131 | } \ |
| 149 | } while (0) | 132 | } while (0) |
| 150 | 133 | ||
| 151 | #define _raw_read_unlock(rw) \ | 134 | #define __raw_read_unlock(rw) \ |
| 152 | do { \ | 135 | do { \ |
| 153 | rwlock_t *__read_lock_ptr = (rw); \ | 136 | raw_rwlock_t *__read_lock_ptr = (rw); \ |
| 154 | ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \ | 137 | ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \ |
| 155 | } while (0) | 138 | } while (0) |
| 156 | 139 | ||
| 157 | #ifdef ASM_SUPPORTED | 140 | #ifdef ASM_SUPPORTED |
| 158 | #define _raw_write_lock(rw) \ | 141 | #define __raw_write_lock(rw) \ |
| 159 | do { \ | 142 | do { \ |
| 160 | __asm__ __volatile__ ( \ | 143 | __asm__ __volatile__ ( \ |
| 161 | "mov ar.ccv = r0\n" \ | 144 | "mov ar.ccv = r0\n" \ |
| @@ -170,7 +153,7 @@ do { \ | |||
| 170 | :: "r"(rw) : "ar.ccv", "p7", "r2", "r29", "memory"); \ | 153 | :: "r"(rw) : "ar.ccv", "p7", "r2", "r29", "memory"); \ |
| 171 | } while(0) | 154 | } while(0) |
| 172 | 155 | ||
| 173 | #define _raw_write_trylock(rw) \ | 156 | #define __raw_write_trylock(rw) \ |
| 174 | ({ \ | 157 | ({ \ |
| 175 | register long result; \ | 158 | register long result; \ |
| 176 | \ | 159 | \ |
| @@ -182,7 +165,7 @@ do { \ | |||
| 182 | (result == 0); \ | 165 | (result == 0); \ |
| 183 | }) | 166 | }) |
| 184 | 167 | ||
| 185 | static inline void _raw_write_unlock(rwlock_t *x) | 168 | static inline void __raw_write_unlock(raw_rwlock_t *x) |
| 186 | { | 169 | { |
| 187 | u8 *y = (u8 *)x; | 170 | u8 *y = (u8 *)x; |
| 188 | barrier(); | 171 | barrier(); |
| @@ -191,7 +174,7 @@ static inline void _raw_write_unlock(rwlock_t *x) | |||
| 191 | 174 | ||
| 192 | #else /* !ASM_SUPPORTED */ | 175 | #else /* !ASM_SUPPORTED */ |
| 193 | 176 | ||
| 194 | #define _raw_write_lock(l) \ | 177 | #define __raw_write_lock(l) \ |
| 195 | ({ \ | 178 | ({ \ |
| 196 | __u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1); \ | 179 | __u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1); \ |
| 197 | __u32 *ia64_write_lock_ptr = (__u32 *) (l); \ | 180 | __u32 *ia64_write_lock_ptr = (__u32 *) (l); \ |
| @@ -202,7 +185,7 @@ static inline void _raw_write_unlock(rwlock_t *x) | |||
| 202 | } while (ia64_val); \ | 185 | } while (ia64_val); \ |
| 203 | }) | 186 | }) |
| 204 | 187 | ||
| 205 | #define _raw_write_trylock(rw) \ | 188 | #define __raw_write_trylock(rw) \ |
| 206 | ({ \ | 189 | ({ \ |
| 207 | __u64 ia64_val; \ | 190 | __u64 ia64_val; \ |
| 208 | __u64 ia64_set_val = ia64_dep_mi(-1, 0, 31,1); \ | 191 | __u64 ia64_set_val = ia64_dep_mi(-1, 0, 31,1); \ |
| @@ -210,7 +193,7 @@ static inline void _raw_write_unlock(rwlock_t *x) | |||
| 210 | (ia64_val == 0); \ | 193 | (ia64_val == 0); \ |
| 211 | }) | 194 | }) |
| 212 | 195 | ||
| 213 | static inline void _raw_write_unlock(rwlock_t *x) | 196 | static inline void __raw_write_unlock(raw_rwlock_t *x) |
| 214 | { | 197 | { |
| 215 | barrier(); | 198 | barrier(); |
| 216 | x->write_lock = 0; | 199 | x->write_lock = 0; |
| @@ -218,6 +201,6 @@ static inline void _raw_write_unlock(rwlock_t *x) | |||
| 218 | 201 | ||
| 219 | #endif /* !ASM_SUPPORTED */ | 202 | #endif /* !ASM_SUPPORTED */ |
| 220 | 203 | ||
| 221 | #define _raw_read_trylock(lock) generic_raw_read_trylock(lock) | 204 | #define __raw_read_trylock(lock) generic__raw_read_trylock(lock) |
| 222 | 205 | ||
| 223 | #endif /* _ASM_IA64_SPINLOCK_H */ | 206 | #endif /* _ASM_IA64_SPINLOCK_H */ |
