diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-12-16 12:33:49 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-12-16 12:33:49 -0500 |
commit | ee1156c11a1121e118b0a7f2dec240f0d421b1fd (patch) | |
tree | b8771cc5a9758af9d7410fc519227c036c222130 /arch/ia64/include/asm/spinlock.h | |
parent | b9f8fcd55bbdb037e5332dbdb7b494f0b70861ac (diff) | |
parent | 8bea8672edfca7ec5f661cafb218f1205863b343 (diff) |
Merge branch 'linus' into sched/urgent
Conflicts:
kernel/sched_idletask.c
Merge reason: resolve the conflicts, pick up latest changes.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/ia64/include/asm/spinlock.h')
-rw-r--r-- | arch/ia64/include/asm/spinlock.h | 76 |
1 files changed, 38 insertions, 38 deletions
diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h index 239ecdc9516d..1a91c9121d17 100644 --- a/arch/ia64/include/asm/spinlock.h +++ b/arch/ia64/include/asm/spinlock.h | |||
@@ -17,7 +17,7 @@ | |||
17 | #include <asm/intrinsics.h> | 17 | #include <asm/intrinsics.h> |
18 | #include <asm/system.h> | 18 | #include <asm/system.h> |
19 | 19 | ||
20 | #define __raw_spin_lock_init(x) ((x)->lock = 0) | 20 | #define arch_spin_lock_init(x) ((x)->lock = 0) |
21 | 21 | ||
22 | /* | 22 | /* |
23 | * Ticket locks are conceptually two parts, one indicating the current head of | 23 | * Ticket locks are conceptually two parts, one indicating the current head of |
@@ -38,7 +38,7 @@ | |||
38 | #define TICKET_BITS 15 | 38 | #define TICKET_BITS 15 |
39 | #define TICKET_MASK ((1 << TICKET_BITS) - 1) | 39 | #define TICKET_MASK ((1 << TICKET_BITS) - 1) |
40 | 40 | ||
41 | static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) | 41 | static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock) |
42 | { | 42 | { |
43 | int *p = (int *)&lock->lock, ticket, serve; | 43 | int *p = (int *)&lock->lock, ticket, serve; |
44 | 44 | ||
@@ -58,7 +58,7 @@ static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) | |||
58 | } | 58 | } |
59 | } | 59 | } |
60 | 60 | ||
61 | static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) | 61 | static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock) |
62 | { | 62 | { |
63 | int tmp = ACCESS_ONCE(lock->lock); | 63 | int tmp = ACCESS_ONCE(lock->lock); |
64 | 64 | ||
@@ -67,7 +67,7 @@ static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) | |||
67 | return 0; | 67 | return 0; |
68 | } | 68 | } |
69 | 69 | ||
70 | static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock) | 70 | static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock) |
71 | { | 71 | { |
72 | unsigned short *p = (unsigned short *)&lock->lock + 1, tmp; | 72 | unsigned short *p = (unsigned short *)&lock->lock + 1, tmp; |
73 | 73 | ||
@@ -75,7 +75,7 @@ static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock) | |||
75 | ACCESS_ONCE(*p) = (tmp + 2) & ~1; | 75 | ACCESS_ONCE(*p) = (tmp + 2) & ~1; |
76 | } | 76 | } |
77 | 77 | ||
78 | static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock) | 78 | static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock) |
79 | { | 79 | { |
80 | int *p = (int *)&lock->lock, ticket; | 80 | int *p = (int *)&lock->lock, ticket; |
81 | 81 | ||
@@ -89,64 +89,64 @@ static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock) | |||
89 | } | 89 | } |
90 | } | 90 | } |
91 | 91 | ||
92 | static inline int __ticket_spin_is_locked(raw_spinlock_t *lock) | 92 | static inline int __ticket_spin_is_locked(arch_spinlock_t *lock) |
93 | { | 93 | { |
94 | long tmp = ACCESS_ONCE(lock->lock); | 94 | long tmp = ACCESS_ONCE(lock->lock); |
95 | 95 | ||
96 | return !!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK); | 96 | return !!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK); |
97 | } | 97 | } |
98 | 98 | ||
99 | static inline int __ticket_spin_is_contended(raw_spinlock_t *lock) | 99 | static inline int __ticket_spin_is_contended(arch_spinlock_t *lock) |
100 | { | 100 | { |
101 | long tmp = ACCESS_ONCE(lock->lock); | 101 | long tmp = ACCESS_ONCE(lock->lock); |
102 | 102 | ||
103 | return ((tmp - (tmp >> TICKET_SHIFT)) & TICKET_MASK) > 1; | 103 | return ((tmp - (tmp >> TICKET_SHIFT)) & TICKET_MASK) > 1; |
104 | } | 104 | } |
105 | 105 | ||
106 | static inline int __raw_spin_is_locked(raw_spinlock_t *lock) | 106 | static inline int arch_spin_is_locked(arch_spinlock_t *lock) |
107 | { | 107 | { |
108 | return __ticket_spin_is_locked(lock); | 108 | return __ticket_spin_is_locked(lock); |
109 | } | 109 | } |
110 | 110 | ||
111 | static inline int __raw_spin_is_contended(raw_spinlock_t *lock) | 111 | static inline int arch_spin_is_contended(arch_spinlock_t *lock) |
112 | { | 112 | { |
113 | return __ticket_spin_is_contended(lock); | 113 | return __ticket_spin_is_contended(lock); |
114 | } | 114 | } |
115 | #define __raw_spin_is_contended __raw_spin_is_contended | 115 | #define arch_spin_is_contended arch_spin_is_contended |
116 | 116 | ||
117 | static __always_inline void __raw_spin_lock(raw_spinlock_t *lock) | 117 | static __always_inline void arch_spin_lock(arch_spinlock_t *lock) |
118 | { | 118 | { |
119 | __ticket_spin_lock(lock); | 119 | __ticket_spin_lock(lock); |
120 | } | 120 | } |
121 | 121 | ||
122 | static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock) | 122 | static __always_inline int arch_spin_trylock(arch_spinlock_t *lock) |
123 | { | 123 | { |
124 | return __ticket_spin_trylock(lock); | 124 | return __ticket_spin_trylock(lock); |
125 | } | 125 | } |
126 | 126 | ||
127 | static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock) | 127 | static __always_inline void arch_spin_unlock(arch_spinlock_t *lock) |
128 | { | 128 | { |
129 | __ticket_spin_unlock(lock); | 129 | __ticket_spin_unlock(lock); |
130 | } | 130 | } |
131 | 131 | ||
132 | static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock, | 132 | static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock, |
133 | unsigned long flags) | 133 | unsigned long flags) |
134 | { | 134 | { |
135 | __raw_spin_lock(lock); | 135 | arch_spin_lock(lock); |
136 | } | 136 | } |
137 | 137 | ||
138 | static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) | 138 | static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) |
139 | { | 139 | { |
140 | __ticket_spin_unlock_wait(lock); | 140 | __ticket_spin_unlock_wait(lock); |
141 | } | 141 | } |
142 | 142 | ||
143 | #define __raw_read_can_lock(rw) (*(volatile int *)(rw) >= 0) | 143 | #define arch_read_can_lock(rw) (*(volatile int *)(rw) >= 0) |
144 | #define __raw_write_can_lock(rw) (*(volatile int *)(rw) == 0) | 144 | #define arch_write_can_lock(rw) (*(volatile int *)(rw) == 0) |
145 | 145 | ||
146 | #ifdef ASM_SUPPORTED | 146 | #ifdef ASM_SUPPORTED |
147 | 147 | ||
148 | static __always_inline void | 148 | static __always_inline void |
149 | __raw_read_lock_flags(raw_rwlock_t *lock, unsigned long flags) | 149 | arch_read_lock_flags(arch_rwlock_t *lock, unsigned long flags) |
150 | { | 150 | { |
151 | __asm__ __volatile__ ( | 151 | __asm__ __volatile__ ( |
152 | "tbit.nz p6, p0 = %1,%2\n" | 152 | "tbit.nz p6, p0 = %1,%2\n" |
@@ -169,15 +169,15 @@ __raw_read_lock_flags(raw_rwlock_t *lock, unsigned long flags) | |||
169 | : "p6", "p7", "r2", "memory"); | 169 | : "p6", "p7", "r2", "memory"); |
170 | } | 170 | } |
171 | 171 | ||
172 | #define __raw_read_lock(lock) __raw_read_lock_flags(lock, 0) | 172 | #define arch_read_lock(lock) arch_read_lock_flags(lock, 0) |
173 | 173 | ||
174 | #else /* !ASM_SUPPORTED */ | 174 | #else /* !ASM_SUPPORTED */ |
175 | 175 | ||
176 | #define __raw_read_lock_flags(rw, flags) __raw_read_lock(rw) | 176 | #define arch_read_lock_flags(rw, flags) arch_read_lock(rw) |
177 | 177 | ||
178 | #define __raw_read_lock(rw) \ | 178 | #define arch_read_lock(rw) \ |
179 | do { \ | 179 | do { \ |
180 | raw_rwlock_t *__read_lock_ptr = (rw); \ | 180 | arch_rwlock_t *__read_lock_ptr = (rw); \ |
181 | \ | 181 | \ |
182 | while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, acq) < 0)) { \ | 182 | while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, acq) < 0)) { \ |
183 | ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \ | 183 | ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \ |
@@ -188,16 +188,16 @@ do { \ | |||
188 | 188 | ||
189 | #endif /* !ASM_SUPPORTED */ | 189 | #endif /* !ASM_SUPPORTED */ |
190 | 190 | ||
191 | #define __raw_read_unlock(rw) \ | 191 | #define arch_read_unlock(rw) \ |
192 | do { \ | 192 | do { \ |
193 | raw_rwlock_t *__read_lock_ptr = (rw); \ | 193 | arch_rwlock_t *__read_lock_ptr = (rw); \ |
194 | ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \ | 194 | ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \ |
195 | } while (0) | 195 | } while (0) |
196 | 196 | ||
197 | #ifdef ASM_SUPPORTED | 197 | #ifdef ASM_SUPPORTED |
198 | 198 | ||
199 | static __always_inline void | 199 | static __always_inline void |
200 | __raw_write_lock_flags(raw_rwlock_t *lock, unsigned long flags) | 200 | arch_write_lock_flags(arch_rwlock_t *lock, unsigned long flags) |
201 | { | 201 | { |
202 | __asm__ __volatile__ ( | 202 | __asm__ __volatile__ ( |
203 | "tbit.nz p6, p0 = %1, %2\n" | 203 | "tbit.nz p6, p0 = %1, %2\n" |
@@ -221,9 +221,9 @@ __raw_write_lock_flags(raw_rwlock_t *lock, unsigned long flags) | |||
221 | : "ar.ccv", "p6", "p7", "r2", "r29", "memory"); | 221 | : "ar.ccv", "p6", "p7", "r2", "r29", "memory"); |
222 | } | 222 | } |
223 | 223 | ||
224 | #define __raw_write_lock(rw) __raw_write_lock_flags(rw, 0) | 224 | #define arch_write_lock(rw) arch_write_lock_flags(rw, 0) |
225 | 225 | ||
226 | #define __raw_write_trylock(rw) \ | 226 | #define arch_write_trylock(rw) \ |
227 | ({ \ | 227 | ({ \ |
228 | register long result; \ | 228 | register long result; \ |
229 | \ | 229 | \ |
@@ -235,7 +235,7 @@ __raw_write_lock_flags(raw_rwlock_t *lock, unsigned long flags) | |||
235 | (result == 0); \ | 235 | (result == 0); \ |
236 | }) | 236 | }) |
237 | 237 | ||
238 | static inline void __raw_write_unlock(raw_rwlock_t *x) | 238 | static inline void arch_write_unlock(arch_rwlock_t *x) |
239 | { | 239 | { |
240 | u8 *y = (u8 *)x; | 240 | u8 *y = (u8 *)x; |
241 | barrier(); | 241 | barrier(); |
@@ -244,9 +244,9 @@ static inline void __raw_write_unlock(raw_rwlock_t *x) | |||
244 | 244 | ||
245 | #else /* !ASM_SUPPORTED */ | 245 | #else /* !ASM_SUPPORTED */ |
246 | 246 | ||
247 | #define __raw_write_lock_flags(l, flags) __raw_write_lock(l) | 247 | #define arch_write_lock_flags(l, flags) arch_write_lock(l) |
248 | 248 | ||
249 | #define __raw_write_lock(l) \ | 249 | #define arch_write_lock(l) \ |
250 | ({ \ | 250 | ({ \ |
251 | __u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1); \ | 251 | __u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1); \ |
252 | __u32 *ia64_write_lock_ptr = (__u32 *) (l); \ | 252 | __u32 *ia64_write_lock_ptr = (__u32 *) (l); \ |
@@ -257,7 +257,7 @@ static inline void __raw_write_unlock(raw_rwlock_t *x) | |||
257 | } while (ia64_val); \ | 257 | } while (ia64_val); \ |
258 | }) | 258 | }) |
259 | 259 | ||
260 | #define __raw_write_trylock(rw) \ | 260 | #define arch_write_trylock(rw) \ |
261 | ({ \ | 261 | ({ \ |
262 | __u64 ia64_val; \ | 262 | __u64 ia64_val; \ |
263 | __u64 ia64_set_val = ia64_dep_mi(-1, 0, 31,1); \ | 263 | __u64 ia64_set_val = ia64_dep_mi(-1, 0, 31,1); \ |
@@ -265,7 +265,7 @@ static inline void __raw_write_unlock(raw_rwlock_t *x) | |||
265 | (ia64_val == 0); \ | 265 | (ia64_val == 0); \ |
266 | }) | 266 | }) |
267 | 267 | ||
268 | static inline void __raw_write_unlock(raw_rwlock_t *x) | 268 | static inline void arch_write_unlock(arch_rwlock_t *x) |
269 | { | 269 | { |
270 | barrier(); | 270 | barrier(); |
271 | x->write_lock = 0; | 271 | x->write_lock = 0; |
@@ -273,10 +273,10 @@ static inline void __raw_write_unlock(raw_rwlock_t *x) | |||
273 | 273 | ||
274 | #endif /* !ASM_SUPPORTED */ | 274 | #endif /* !ASM_SUPPORTED */ |
275 | 275 | ||
276 | static inline int __raw_read_trylock(raw_rwlock_t *x) | 276 | static inline int arch_read_trylock(arch_rwlock_t *x) |
277 | { | 277 | { |
278 | union { | 278 | union { |
279 | raw_rwlock_t lock; | 279 | arch_rwlock_t lock; |
280 | __u32 word; | 280 | __u32 word; |
281 | } old, new; | 281 | } old, new; |
282 | old.lock = new.lock = *x; | 282 | old.lock = new.lock = *x; |
@@ -285,8 +285,8 @@ static inline int __raw_read_trylock(raw_rwlock_t *x) | |||
285 | return (u32)ia64_cmpxchg4_acq((__u32 *)(x), new.word, old.word) == old.word; | 285 | return (u32)ia64_cmpxchg4_acq((__u32 *)(x), new.word, old.word) == old.word; |
286 | } | 286 | } |
287 | 287 | ||
288 | #define _raw_spin_relax(lock) cpu_relax() | 288 | #define arch_spin_relax(lock) cpu_relax() |
289 | #define _raw_read_relax(lock) cpu_relax() | 289 | #define arch_read_relax(lock) cpu_relax() |
290 | #define _raw_write_relax(lock) cpu_relax() | 290 | #define arch_write_relax(lock) cpu_relax() |
291 | 291 | ||
292 | #endif /* _ASM_IA64_SPINLOCK_H */ | 292 | #endif /* _ASM_IA64_SPINLOCK_H */ |