aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64
diff options
context:
space:
mode:
authorRobin Holt <holt@sgi.com>2009-04-02 19:59:47 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-04-02 22:05:11 -0400
commit2d09cde985702503970d7cc18d762fae17e1cf88 (patch)
treec296357ef9a7b0194da1471a1ab5c2b8270c258e /arch/ia64
parentf5f7eac41db827a47b2163330eecd7bb55ae9f12 (diff)
ia64: implement interrupt-enabling rwlocks
Implement __raw_read_lock_flags and __raw_write_lock_flags for the ia64 architecture. [kosaki.motohiro@jp.fujitsu.com: typo fix] Signed-off-by: Petr Tesarik <ptesarik@suse.cz> Signed-off-by: Robin Holt <holt@sgi.com> Cc: <linux-arch@vger.kernel.org> Cc: Ingo Molnar <mingo@elte.hu> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Acked-by: Tony Luck <tony.luck@intel.com> Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/ia64')
-rw-r--r--arch/ia64/include/asm/spinlock.h80
1 files changed, 63 insertions, 17 deletions
diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
index 0a619618b2fa..13ab71576bc7 100644
--- a/arch/ia64/include/asm/spinlock.h
+++ b/arch/ia64/include/asm/spinlock.h
@@ -120,6 +120,38 @@ do { \
120#define __raw_read_can_lock(rw) (*(volatile int *)(rw) >= 0) 120#define __raw_read_can_lock(rw) (*(volatile int *)(rw) >= 0)
121#define __raw_write_can_lock(rw) (*(volatile int *)(rw) == 0) 121#define __raw_write_can_lock(rw) (*(volatile int *)(rw) == 0)
122 122
123#ifdef ASM_SUPPORTED
124
125static __always_inline void
126__raw_read_lock_flags(raw_rwlock_t *lock, unsigned long flags)
127{
128 __asm__ __volatile__ (
129 "tbit.nz p6, p0 = %1,%2\n"
130 "br.few 3f\n"
131 "1:\n"
132 "fetchadd4.rel r2 = [%0], -1;;\n"
133 "(p6) ssm psr.i\n"
134 "2:\n"
135 "hint @pause\n"
136 "ld4 r2 = [%0];;\n"
137 "cmp4.lt p7,p0 = r2, r0\n"
138 "(p7) br.cond.spnt.few 2b\n"
139 "(p6) rsm psr.i\n"
140 ";;\n"
141 "3:\n"
142 "fetchadd4.acq r2 = [%0], 1;;\n"
143 "cmp4.lt p7,p0 = r2, r0\n"
144 "(p7) br.cond.spnt.few 1b\n"
145 : : "r"(lock), "r"(flags), "i"(IA64_PSR_I_BIT)
146 : "p6", "p7", "r2", "memory");
147}
148
149#define __raw_read_lock(lock) __raw_read_lock_flags(lock, 0)
150
151#else /* !ASM_SUPPORTED */
152
153#define __raw_read_lock_flags(rw, flags) __raw_read_lock(rw)
154
123#define __raw_read_lock(rw) \ 155#define __raw_read_lock(rw) \
124do { \ 156do { \
125 raw_rwlock_t *__read_lock_ptr = (rw); \ 157 raw_rwlock_t *__read_lock_ptr = (rw); \
@@ -131,6 +163,8 @@ do { \
131 } \ 163 } \
132} while (0) 164} while (0)
133 165
166#endif /* !ASM_SUPPORTED */
167
134#define __raw_read_unlock(rw) \ 168#define __raw_read_unlock(rw) \
135do { \ 169do { \
136 raw_rwlock_t *__read_lock_ptr = (rw); \ 170 raw_rwlock_t *__read_lock_ptr = (rw); \
@@ -138,20 +172,33 @@ do { \
138} while (0) 172} while (0)
139 173
140#ifdef ASM_SUPPORTED 174#ifdef ASM_SUPPORTED
141#define __raw_write_lock(rw) \ 175
142do { \ 176static __always_inline void
143 __asm__ __volatile__ ( \ 177__raw_write_lock_flags(raw_rwlock_t *lock, unsigned long flags)
144 "mov ar.ccv = r0\n" \ 178{
145 "dep r29 = -1, r0, 31, 1;;\n" \ 179 __asm__ __volatile__ (
146 "1:\n" \ 180 "tbit.nz p6, p0 = %1, %2\n"
147 "ld4 r2 = [%0];;\n" \ 181 "mov ar.ccv = r0\n"
148 "cmp4.eq p0,p7 = r0,r2\n" \ 182 "dep r29 = -1, r0, 31, 1\n"
149 "(p7) br.cond.spnt.few 1b \n" \ 183 "br.few 3f;;\n"
150 "cmpxchg4.acq r2 = [%0], r29, ar.ccv;;\n" \ 184 "1:\n"
151 "cmp4.eq p0,p7 = r0, r2\n" \ 185 "(p6) ssm psr.i\n"
152 "(p7) br.cond.spnt.few 1b;;\n" \ 186 "2:\n"
153 :: "r"(rw) : "ar.ccv", "p7", "r2", "r29", "memory"); \ 187 "hint @pause\n"
154} while(0) 188 "ld4 r2 = [%0];;\n"
189 "cmp4.eq p0,p7 = r0, r2\n"
190 "(p7) br.cond.spnt.few 2b\n"
191 "(p6) rsm psr.i\n"
192 ";;\n"
193 "3:\n"
194 "cmpxchg4.acq r2 = [%0], r29, ar.ccv;;\n"
195 "cmp4.eq p0,p7 = r0, r2\n"
196 "(p7) br.cond.spnt.few 1b;;\n"
197 : : "r"(lock), "r"(flags), "i"(IA64_PSR_I_BIT)
198 : "ar.ccv", "p6", "p7", "r2", "r29", "memory");
199}
200
201#define __raw_write_lock(rw) __raw_write_lock_flags(rw, 0)
155 202
156#define __raw_write_trylock(rw) \ 203#define __raw_write_trylock(rw) \
157({ \ 204({ \
@@ -174,6 +221,8 @@ static inline void __raw_write_unlock(raw_rwlock_t *x)
174 221
175#else /* !ASM_SUPPORTED */ 222#else /* !ASM_SUPPORTED */
176 223
224#define __raw_write_lock_flags(l, flags) __raw_write_lock(l)
225
177#define __raw_write_lock(l) \ 226#define __raw_write_lock(l) \
178({ \ 227({ \
179 __u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1); \ 228 __u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1); \
@@ -213,9 +262,6 @@ static inline int __raw_read_trylock(raw_rwlock_t *x)
213 return (u32)ia64_cmpxchg4_acq((__u32 *)(x), new.word, old.word) == old.word; 262 return (u32)ia64_cmpxchg4_acq((__u32 *)(x), new.word, old.word) == old.word;
214} 263}
215 264
216#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
217#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
218
219#define _raw_spin_relax(lock) cpu_relax() 265#define _raw_spin_relax(lock) cpu_relax()
220#define _raw_read_relax(lock) cpu_relax() 266#define _raw_read_relax(lock) cpu_relax()
221#define _raw_write_relax(lock) cpu_relax() 267#define _raw_write_relax(lock) cpu_relax()