aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorNick Piggin <npiggin@suse.de>2007-10-18 06:06:51 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-18 17:37:29 -0400
commit44086d5286c57c86622dee37684decf32355aa7c (patch)
tree7660cc81afd36f3724cab197bc97860e3c20fc80 /include
parent7c29ca5b8d13287ed67d2863f4c5f7bfc1a15279 (diff)
alpha: lock bitops
Alpha can avoid one mb when acquiring a lock with test_and_set_bit_lock. [bunk@kernel.org: alpha bitops.h must #include <asm/barrier.h>] Signed-off-by: Nick Piggin <npiggin@suse.de> Cc: Richard Henderson <rth@twiddle.net> Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru> Signed-off-by: Adrian Bunk <bunk@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include')
-rw-r--r--include/asm-alpha/bitops.h43
1 files changed, 42 insertions, 1 deletions
diff --git a/include/asm-alpha/bitops.h b/include/asm-alpha/bitops.h
index f1bbe6cf0e84..381b4f5b4d5d 100644
--- a/include/asm-alpha/bitops.h
+++ b/include/asm-alpha/bitops.h
@@ -2,6 +2,7 @@
2#define _ALPHA_BITOPS_H 2#define _ALPHA_BITOPS_H
3 3
4#include <asm/compiler.h> 4#include <asm/compiler.h>
5#include <asm/barrier.h>
5 6
6/* 7/*
7 * Copyright 1994, Linus Torvalds. 8 * Copyright 1994, Linus Torvalds.
@@ -69,6 +70,13 @@ clear_bit(unsigned long nr, volatile void * addr)
69 :"Ir" (1UL << (nr & 31)), "m" (*m)); 70 :"Ir" (1UL << (nr & 31)), "m" (*m));
70} 71}
71 72
73static inline void
74clear_bit_unlock(unsigned long nr, volatile void * addr)
75{
76 smp_mb();
77 clear_bit(nr, addr);
78}
79
72/* 80/*
73 * WARNING: non atomic version. 81 * WARNING: non atomic version.
74 */ 82 */
@@ -81,6 +89,13 @@ __clear_bit(unsigned long nr, volatile void * addr)
81} 89}
82 90
83static inline void 91static inline void
92__clear_bit_unlock(unsigned long nr, volatile void * addr)
93{
94 smp_mb();
95 __clear_bit(nr, addr);
96}
97
98static inline void
84change_bit(unsigned long nr, volatile void * addr) 99change_bit(unsigned long nr, volatile void * addr)
85{ 100{
86 unsigned long temp; 101 unsigned long temp;
@@ -139,6 +154,33 @@ test_and_set_bit(unsigned long nr, volatile void *addr)
139 return oldbit != 0; 154 return oldbit != 0;
140} 155}
141 156
157static inline int
158test_and_set_bit_lock(unsigned long nr, volatile void *addr)
159{
160 unsigned long oldbit;
161 unsigned long temp;
162 int *m = ((int *) addr) + (nr >> 5);
163
164 __asm__ __volatile__(
165 "1: ldl_l %0,%4\n"
166 " and %0,%3,%2\n"
167 " bne %2,2f\n"
168 " xor %0,%3,%0\n"
169 " stl_c %0,%1\n"
170 " beq %0,3f\n"
171 "2:\n"
172#ifdef CONFIG_SMP
173 " mb\n"
174#endif
175 ".subsection 2\n"
176 "3: br 1b\n"
177 ".previous"
178 :"=&r" (temp), "=m" (*m), "=&r" (oldbit)
179 :"Ir" (1UL << (nr & 31)), "m" (*m) : "memory");
180
181 return oldbit != 0;
182}
183
142/* 184/*
143 * WARNING: non atomic version. 185 * WARNING: non atomic version.
144 */ 186 */
@@ -376,7 +418,6 @@ static inline unsigned int hweight8(unsigned int w)
376#else 418#else
377#include <asm-generic/bitops/hweight.h> 419#include <asm-generic/bitops/hweight.h>
378#endif 420#endif
379#include <asm-generic/bitops/lock.h>
380 421
381#endif /* __KERNEL__ */ 422#endif /* __KERNEL__ */
382 423