aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-x86/bitops.h
diff options
context:
space:
mode:
authorAndrew Morton <akpm@linux-foundation.org>2008-05-14 19:10:41 -0400
committerThomas Gleixner <tglx@linutronix.de>2008-05-25 02:51:31 -0400
commit5136dea5734cfddbc6d7ccb7ead85a3ac7ce3de2 (patch)
treef6fe41531da78f36c40130bc32cf33ca1e47ab87 /include/asm-x86/bitops.h
parent75d3bce2fc0a80f435fe12f2c9ed2632c8ac29e4 (diff)
x86: bitops take an unsigned long *
All (or most) other architectures do this. So should x86. Fix. Cc: Andrea Arcangeli <andrea@qumranet.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'include/asm-x86/bitops.h')
-rw-r--r--include/asm-x86/bitops.h34
1 files changed, 17 insertions, 17 deletions
diff --git a/include/asm-x86/bitops.h b/include/asm-x86/bitops.h
index ee4b3ead6a43..7d2494bdc660 100644
--- a/include/asm-x86/bitops.h
+++ b/include/asm-x86/bitops.h
@@ -43,7 +43,7 @@
43 * Note that @nr may be almost arbitrarily large; this function is not 43 * Note that @nr may be almost arbitrarily large; this function is not
44 * restricted to acting on a single-word quantity. 44 * restricted to acting on a single-word quantity.
45 */ 45 */
46static inline void set_bit(int nr, volatile void *addr) 46static inline void set_bit(int nr, volatile unsigned long *addr)
47{ 47{
48 asm volatile(LOCK_PREFIX "bts %1,%0" : ADDR : "Ir" (nr) : "memory"); 48 asm volatile(LOCK_PREFIX "bts %1,%0" : ADDR : "Ir" (nr) : "memory");
49} 49}
@@ -57,7 +57,7 @@ static inline void set_bit(int nr, volatile void *addr)
57 * If it's called on the same region of memory simultaneously, the effect 57 * If it's called on the same region of memory simultaneously, the effect
58 * may be that only one operation succeeds. 58 * may be that only one operation succeeds.
59 */ 59 */
60static inline void __set_bit(int nr, volatile void *addr) 60static inline void __set_bit(int nr, volatile unsigned long *addr)
61{ 61{
62 asm volatile("bts %1,%0" : ADDR : "Ir" (nr) : "memory"); 62 asm volatile("bts %1,%0" : ADDR : "Ir" (nr) : "memory");
63} 63}
@@ -72,7 +72,7 @@ static inline void __set_bit(int nr, volatile void *addr)
72 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() 72 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
73 * in order to ensure changes are visible on other processors. 73 * in order to ensure changes are visible on other processors.
74 */ 74 */
75static inline void clear_bit(int nr, volatile void *addr) 75static inline void clear_bit(int nr, volatile unsigned long *addr)
76{ 76{
77 asm volatile(LOCK_PREFIX "btr %1,%0" : ADDR : "Ir" (nr)); 77 asm volatile(LOCK_PREFIX "btr %1,%0" : ADDR : "Ir" (nr));
78} 78}
@@ -85,13 +85,13 @@ static inline void clear_bit(int nr, volatile void *addr)
85 * clear_bit() is atomic and implies release semantics before the memory 85 * clear_bit() is atomic and implies release semantics before the memory
86 * operation. It can be used for an unlock. 86 * operation. It can be used for an unlock.
87 */ 87 */
88static inline void clear_bit_unlock(unsigned nr, volatile void *addr) 88static inline void clear_bit_unlock(unsigned nr, volatile unsigned long *addr)
89{ 89{
90 barrier(); 90 barrier();
91 clear_bit(nr, addr); 91 clear_bit(nr, addr);
92} 92}
93 93
94static inline void __clear_bit(int nr, volatile void *addr) 94static inline void __clear_bit(int nr, volatile unsigned long *addr)
95{ 95{
96 asm volatile("btr %1,%0" : ADDR : "Ir" (nr)); 96 asm volatile("btr %1,%0" : ADDR : "Ir" (nr));
97} 97}
@@ -108,7 +108,7 @@ static inline void __clear_bit(int nr, volatile void *addr)
108 * No memory barrier is required here, because x86 cannot reorder stores past 108 * No memory barrier is required here, because x86 cannot reorder stores past
109 * older loads. Same principle as spin_unlock. 109 * older loads. Same principle as spin_unlock.
110 */ 110 */
111static inline void __clear_bit_unlock(unsigned nr, volatile void *addr) 111static inline void __clear_bit_unlock(unsigned nr, volatile unsigned long *addr)
112{ 112{
113 barrier(); 113 barrier();
114 __clear_bit(nr, addr); 114 __clear_bit(nr, addr);
@@ -126,7 +126,7 @@ static inline void __clear_bit_unlock(unsigned nr, volatile void *addr)
126 * If it's called on the same region of memory simultaneously, the effect 126 * If it's called on the same region of memory simultaneously, the effect
127 * may be that only one operation succeeds. 127 * may be that only one operation succeeds.
128 */ 128 */
129static inline void __change_bit(int nr, volatile void *addr) 129static inline void __change_bit(int nr, volatile unsigned long *addr)
130{ 130{
131 asm volatile("btc %1,%0" : ADDR : "Ir" (nr)); 131 asm volatile("btc %1,%0" : ADDR : "Ir" (nr));
132} 132}
@@ -140,7 +140,7 @@ static inline void __change_bit(int nr, volatile void *addr)
140 * Note that @nr may be almost arbitrarily large; this function is not 140 * Note that @nr may be almost arbitrarily large; this function is not
141 * restricted to acting on a single-word quantity. 141 * restricted to acting on a single-word quantity.
142 */ 142 */
143static inline void change_bit(int nr, volatile void *addr) 143static inline void change_bit(int nr, volatile unsigned long *addr)
144{ 144{
145 asm volatile(LOCK_PREFIX "btc %1,%0" : ADDR : "Ir" (nr)); 145 asm volatile(LOCK_PREFIX "btc %1,%0" : ADDR : "Ir" (nr));
146} 146}
@@ -153,7 +153,7 @@ static inline void change_bit(int nr, volatile void *addr)
153 * This operation is atomic and cannot be reordered. 153 * This operation is atomic and cannot be reordered.
154 * It also implies a memory barrier. 154 * It also implies a memory barrier.
155 */ 155 */
156static inline int test_and_set_bit(int nr, volatile void *addr) 156static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
157{ 157{
158 int oldbit; 158 int oldbit;
159 159
@@ -170,7 +170,7 @@ static inline int test_and_set_bit(int nr, volatile void *addr)
170 * 170 *
171 * This is the same as test_and_set_bit on x86. 171 * This is the same as test_and_set_bit on x86.
172 */ 172 */
173static inline int test_and_set_bit_lock(int nr, volatile void *addr) 173static inline int test_and_set_bit_lock(int nr, volatile unsigned long *addr)
174{ 174{
175 return test_and_set_bit(nr, addr); 175 return test_and_set_bit(nr, addr);
176} 176}
@@ -184,7 +184,7 @@ static inline int test_and_set_bit_lock(int nr, volatile void *addr)
184 * If two examples of this operation race, one can appear to succeed 184 * If two examples of this operation race, one can appear to succeed
185 * but actually fail. You must protect multiple accesses with a lock. 185 * but actually fail. You must protect multiple accesses with a lock.
186 */ 186 */
187static inline int __test_and_set_bit(int nr, volatile void *addr) 187static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
188{ 188{
189 int oldbit; 189 int oldbit;
190 190
@@ -203,7 +203,7 @@ static inline int __test_and_set_bit(int nr, volatile void *addr)
203 * This operation is atomic and cannot be reordered. 203 * This operation is atomic and cannot be reordered.
204 * It also implies a memory barrier. 204 * It also implies a memory barrier.
205 */ 205 */
206static inline int test_and_clear_bit(int nr, volatile void *addr) 206static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
207{ 207{
208 int oldbit; 208 int oldbit;
209 209
@@ -223,7 +223,7 @@ static inline int test_and_clear_bit(int nr, volatile void *addr)
223 * If two examples of this operation race, one can appear to succeed 223 * If two examples of this operation race, one can appear to succeed
224 * but actually fail. You must protect multiple accesses with a lock. 224 * but actually fail. You must protect multiple accesses with a lock.
225 */ 225 */
226static inline int __test_and_clear_bit(int nr, volatile void *addr) 226static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
227{ 227{
228 int oldbit; 228 int oldbit;
229 229
@@ -235,7 +235,7 @@ static inline int __test_and_clear_bit(int nr, volatile void *addr)
235} 235}
236 236
237/* WARNING: non atomic and it can be reordered! */ 237/* WARNING: non atomic and it can be reordered! */
238static inline int __test_and_change_bit(int nr, volatile void *addr) 238static inline int __test_and_change_bit(int nr, volatile unsigned long *addr)
239{ 239{
240 int oldbit; 240 int oldbit;
241 241
@@ -255,7 +255,7 @@ static inline int __test_and_change_bit(int nr, volatile void *addr)
255 * This operation is atomic and cannot be reordered. 255 * This operation is atomic and cannot be reordered.
256 * It also implies a memory barrier. 256 * It also implies a memory barrier.
257 */ 257 */
258static inline int test_and_change_bit(int nr, volatile void *addr) 258static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
259{ 259{
260 int oldbit; 260 int oldbit;
261 261
@@ -266,13 +266,13 @@ static inline int test_and_change_bit(int nr, volatile void *addr)
266 return oldbit; 266 return oldbit;
267} 267}
268 268
269static inline int constant_test_bit(int nr, const volatile void *addr) 269static inline int constant_test_bit(int nr, const volatile unsigned long *addr)
270{ 270{
271 return ((1UL << (nr % BITS_PER_LONG)) & 271 return ((1UL << (nr % BITS_PER_LONG)) &
272 (((unsigned long *)addr)[nr / BITS_PER_LONG])) != 0; 272 (((unsigned long *)addr)[nr / BITS_PER_LONG])) != 0;
273} 273}
274 274
275static inline int variable_test_bit(int nr, volatile const void *addr) 275static inline int variable_test_bit(int nr, volatile const unsigned long *addr)
276{ 276{
277 int oldbit; 277 int oldbit;
278 278