diff options
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/include/asm/bitops.h | 46 | ||||
-rw-r--r-- | arch/x86/include/asm/sync_bitops.h | 24 |
2 files changed, 39 insertions, 31 deletions
diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h index 6dfd0195bb55..41639ce8fd63 100644 --- a/arch/x86/include/asm/bitops.h +++ b/arch/x86/include/asm/bitops.h | |||
@@ -15,6 +15,14 @@ | |||
15 | #include <linux/compiler.h> | 15 | #include <linux/compiler.h> |
16 | #include <asm/alternative.h> | 16 | #include <asm/alternative.h> |
17 | 17 | ||
18 | #if BITS_PER_LONG == 32 | ||
19 | # define _BITOPS_LONG_SHIFT 5 | ||
20 | #elif BITS_PER_LONG == 64 | ||
21 | # define _BITOPS_LONG_SHIFT 6 | ||
22 | #else | ||
23 | # error "Unexpected BITS_PER_LONG" | ||
24 | #endif | ||
25 | |||
18 | #define BIT_64(n) (U64_C(1) << (n)) | 26 | #define BIT_64(n) (U64_C(1) << (n)) |
19 | 27 | ||
20 | /* | 28 | /* |
@@ -59,7 +67,7 @@ | |||
59 | * restricted to acting on a single-word quantity. | 67 | * restricted to acting on a single-word quantity. |
60 | */ | 68 | */ |
61 | static __always_inline void | 69 | static __always_inline void |
62 | set_bit(unsigned int nr, volatile unsigned long *addr) | 70 | set_bit(long nr, volatile unsigned long *addr) |
63 | { | 71 | { |
64 | if (IS_IMMEDIATE(nr)) { | 72 | if (IS_IMMEDIATE(nr)) { |
65 | asm volatile(LOCK_PREFIX "orb %1,%0" | 73 | asm volatile(LOCK_PREFIX "orb %1,%0" |
@@ -81,7 +89,7 @@ set_bit(unsigned int nr, volatile unsigned long *addr) | |||
81 | * If it's called on the same region of memory simultaneously, the effect | 89 | * If it's called on the same region of memory simultaneously, the effect |
82 | * may be that only one operation succeeds. | 90 | * may be that only one operation succeeds. |
83 | */ | 91 | */ |
84 | static inline void __set_bit(int nr, volatile unsigned long *addr) | 92 | static inline void __set_bit(long nr, volatile unsigned long *addr) |
85 | { | 93 | { |
86 | asm volatile("bts %1,%0" : ADDR : "Ir" (nr) : "memory"); | 94 | asm volatile("bts %1,%0" : ADDR : "Ir" (nr) : "memory"); |
87 | } | 95 | } |
@@ -97,7 +105,7 @@ static inline void __set_bit(int nr, volatile unsigned long *addr) | |||
97 | * in order to ensure changes are visible on other processors. | 105 | * in order to ensure changes are visible on other processors. |
98 | */ | 106 | */ |
99 | static __always_inline void | 107 | static __always_inline void |
100 | clear_bit(int nr, volatile unsigned long *addr) | 108 | clear_bit(long nr, volatile unsigned long *addr) |
101 | { | 109 | { |
102 | if (IS_IMMEDIATE(nr)) { | 110 | if (IS_IMMEDIATE(nr)) { |
103 | asm volatile(LOCK_PREFIX "andb %1,%0" | 111 | asm volatile(LOCK_PREFIX "andb %1,%0" |
@@ -118,13 +126,13 @@ clear_bit(int nr, volatile unsigned long *addr) | |||
118 | * clear_bit() is atomic and implies release semantics before the memory | 126 | * clear_bit() is atomic and implies release semantics before the memory |
119 | * operation. It can be used for an unlock. | 127 | * operation. It can be used for an unlock. |
120 | */ | 128 | */ |
121 | static inline void clear_bit_unlock(unsigned nr, volatile unsigned long *addr) | 129 | static inline void clear_bit_unlock(long nr, volatile unsigned long *addr) |
122 | { | 130 | { |
123 | barrier(); | 131 | barrier(); |
124 | clear_bit(nr, addr); | 132 | clear_bit(nr, addr); |
125 | } | 133 | } |
126 | 134 | ||
127 | static inline void __clear_bit(int nr, volatile unsigned long *addr) | 135 | static inline void __clear_bit(long nr, volatile unsigned long *addr) |
128 | { | 136 | { |
129 | asm volatile("btr %1,%0" : ADDR : "Ir" (nr)); | 137 | asm volatile("btr %1,%0" : ADDR : "Ir" (nr)); |
130 | } | 138 | } |
@@ -141,7 +149,7 @@ static inline void __clear_bit(int nr, volatile unsigned long *addr) | |||
141 | * No memory barrier is required here, because x86 cannot reorder stores past | 149 | * No memory barrier is required here, because x86 cannot reorder stores past |
142 | * older loads. Same principle as spin_unlock. | 150 | * older loads. Same principle as spin_unlock. |
143 | */ | 151 | */ |
144 | static inline void __clear_bit_unlock(unsigned nr, volatile unsigned long *addr) | 152 | static inline void __clear_bit_unlock(long nr, volatile unsigned long *addr) |
145 | { | 153 | { |
146 | barrier(); | 154 | barrier(); |
147 | __clear_bit(nr, addr); | 155 | __clear_bit(nr, addr); |
@@ -159,7 +167,7 @@ static inline void __clear_bit_unlock(unsigned nr, volatile unsigned long *addr) | |||
159 | * If it's called on the same region of memory simultaneously, the effect | 167 | * If it's called on the same region of memory simultaneously, the effect |
160 | * may be that only one operation succeeds. | 168 | * may be that only one operation succeeds. |
161 | */ | 169 | */ |
162 | static inline void __change_bit(int nr, volatile unsigned long *addr) | 170 | static inline void __change_bit(long nr, volatile unsigned long *addr) |
163 | { | 171 | { |
164 | asm volatile("btc %1,%0" : ADDR : "Ir" (nr)); | 172 | asm volatile("btc %1,%0" : ADDR : "Ir" (nr)); |
165 | } | 173 | } |
@@ -173,7 +181,7 @@ static inline void __change_bit(int nr, volatile unsigned long *addr) | |||
173 | * Note that @nr may be almost arbitrarily large; this function is not | 181 | * Note that @nr may be almost arbitrarily large; this function is not |
174 | * restricted to acting on a single-word quantity. | 182 | * restricted to acting on a single-word quantity. |
175 | */ | 183 | */ |
176 | static inline void change_bit(int nr, volatile unsigned long *addr) | 184 | static inline void change_bit(long nr, volatile unsigned long *addr) |
177 | { | 185 | { |
178 | if (IS_IMMEDIATE(nr)) { | 186 | if (IS_IMMEDIATE(nr)) { |
179 | asm volatile(LOCK_PREFIX "xorb %1,%0" | 187 | asm volatile(LOCK_PREFIX "xorb %1,%0" |
@@ -194,7 +202,7 @@ static inline void change_bit(int nr, volatile unsigned long *addr) | |||
194 | * This operation is atomic and cannot be reordered. | 202 | * This operation is atomic and cannot be reordered. |
195 | * It also implies a memory barrier. | 203 | * It also implies a memory barrier. |
196 | */ | 204 | */ |
197 | static inline int test_and_set_bit(int nr, volatile unsigned long *addr) | 205 | static inline int test_and_set_bit(long nr, volatile unsigned long *addr) |
198 | { | 206 | { |
199 | int oldbit; | 207 | int oldbit; |
200 | 208 | ||
@@ -212,7 +220,7 @@ static inline int test_and_set_bit(int nr, volatile unsigned long *addr) | |||
212 | * This is the same as test_and_set_bit on x86. | 220 | * This is the same as test_and_set_bit on x86. |
213 | */ | 221 | */ |
214 | static __always_inline int | 222 | static __always_inline int |
215 | test_and_set_bit_lock(int nr, volatile unsigned long *addr) | 223 | test_and_set_bit_lock(long nr, volatile unsigned long *addr) |
216 | { | 224 | { |
217 | return test_and_set_bit(nr, addr); | 225 | return test_and_set_bit(nr, addr); |
218 | } | 226 | } |
@@ -226,7 +234,7 @@ test_and_set_bit_lock(int nr, volatile unsigned long *addr) | |||
226 | * If two examples of this operation race, one can appear to succeed | 234 | * If two examples of this operation race, one can appear to succeed |
227 | * but actually fail. You must protect multiple accesses with a lock. | 235 | * but actually fail. You must protect multiple accesses with a lock. |
228 | */ | 236 | */ |
229 | static inline int __test_and_set_bit(int nr, volatile unsigned long *addr) | 237 | static inline int __test_and_set_bit(long nr, volatile unsigned long *addr) |
230 | { | 238 | { |
231 | int oldbit; | 239 | int oldbit; |
232 | 240 | ||
@@ -245,7 +253,7 @@ static inline int __test_and_set_bit(int nr, volatile unsigned long *addr) | |||
245 | * This operation is atomic and cannot be reordered. | 253 | * This operation is atomic and cannot be reordered. |
246 | * It also implies a memory barrier. | 254 | * It also implies a memory barrier. |
247 | */ | 255 | */ |
248 | static inline int test_and_clear_bit(int nr, volatile unsigned long *addr) | 256 | static inline int test_and_clear_bit(long nr, volatile unsigned long *addr) |
249 | { | 257 | { |
250 | int oldbit; | 258 | int oldbit; |
251 | 259 | ||
@@ -272,7 +280,7 @@ static inline int test_and_clear_bit(int nr, volatile unsigned long *addr) | |||
272 | * accessed from a hypervisor on the same CPU if running in a VM: don't change | 280 | * accessed from a hypervisor on the same CPU if running in a VM: don't change |
273 | * this without also updating arch/x86/kernel/kvm.c | 281 | * this without also updating arch/x86/kernel/kvm.c |
274 | */ | 282 | */ |
275 | static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) | 283 | static inline int __test_and_clear_bit(long nr, volatile unsigned long *addr) |
276 | { | 284 | { |
277 | int oldbit; | 285 | int oldbit; |
278 | 286 | ||
@@ -284,7 +292,7 @@ static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) | |||
284 | } | 292 | } |
285 | 293 | ||
286 | /* WARNING: non atomic and it can be reordered! */ | 294 | /* WARNING: non atomic and it can be reordered! */ |
287 | static inline int __test_and_change_bit(int nr, volatile unsigned long *addr) | 295 | static inline int __test_and_change_bit(long nr, volatile unsigned long *addr) |
288 | { | 296 | { |
289 | int oldbit; | 297 | int oldbit; |
290 | 298 | ||
@@ -304,7 +312,7 @@ static inline int __test_and_change_bit(int nr, volatile unsigned long *addr) | |||
304 | * This operation is atomic and cannot be reordered. | 312 | * This operation is atomic and cannot be reordered. |
305 | * It also implies a memory barrier. | 313 | * It also implies a memory barrier. |
306 | */ | 314 | */ |
307 | static inline int test_and_change_bit(int nr, volatile unsigned long *addr) | 315 | static inline int test_and_change_bit(long nr, volatile unsigned long *addr) |
308 | { | 316 | { |
309 | int oldbit; | 317 | int oldbit; |
310 | 318 | ||
@@ -315,13 +323,13 @@ static inline int test_and_change_bit(int nr, volatile unsigned long *addr) | |||
315 | return oldbit; | 323 | return oldbit; |
316 | } | 324 | } |
317 | 325 | ||
318 | static __always_inline int constant_test_bit(unsigned int nr, const volatile unsigned long *addr) | 326 | static __always_inline int constant_test_bit(long nr, const volatile unsigned long *addr) |
319 | { | 327 | { |
320 | return ((1UL << (nr % BITS_PER_LONG)) & | 328 | return ((1UL << (nr & (BITS_PER_LONG-1))) & |
321 | (addr[nr / BITS_PER_LONG])) != 0; | 329 | (addr[nr >> _BITOPS_LONG_SHIFT])) != 0; |
322 | } | 330 | } |
323 | 331 | ||
324 | static inline int variable_test_bit(int nr, volatile const unsigned long *addr) | 332 | static inline int variable_test_bit(long nr, volatile const unsigned long *addr) |
325 | { | 333 | { |
326 | int oldbit; | 334 | int oldbit; |
327 | 335 | ||
diff --git a/arch/x86/include/asm/sync_bitops.h b/arch/x86/include/asm/sync_bitops.h index 9d09b4073b60..05af3b31d522 100644 --- a/arch/x86/include/asm/sync_bitops.h +++ b/arch/x86/include/asm/sync_bitops.h | |||
@@ -26,9 +26,9 @@ | |||
26 | * Note that @nr may be almost arbitrarily large; this function is not | 26 | * Note that @nr may be almost arbitrarily large; this function is not |
27 | * restricted to acting on a single-word quantity. | 27 | * restricted to acting on a single-word quantity. |
28 | */ | 28 | */ |
29 | static inline void sync_set_bit(int nr, volatile unsigned long *addr) | 29 | static inline void sync_set_bit(long nr, volatile unsigned long *addr) |
30 | { | 30 | { |
31 | asm volatile("lock; btsl %1,%0" | 31 | asm volatile("lock; bts %1,%0" |
32 | : "+m" (ADDR) | 32 | : "+m" (ADDR) |
33 | : "Ir" (nr) | 33 | : "Ir" (nr) |
34 | : "memory"); | 34 | : "memory"); |
@@ -44,9 +44,9 @@ static inline void sync_set_bit(int nr, volatile unsigned long *addr) | |||
44 | * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() | 44 | * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() |
45 | * in order to ensure changes are visible on other processors. | 45 | * in order to ensure changes are visible on other processors. |
46 | */ | 46 | */ |
47 | static inline void sync_clear_bit(int nr, volatile unsigned long *addr) | 47 | static inline void sync_clear_bit(long nr, volatile unsigned long *addr) |
48 | { | 48 | { |
49 | asm volatile("lock; btrl %1,%0" | 49 | asm volatile("lock; btr %1,%0" |
50 | : "+m" (ADDR) | 50 | : "+m" (ADDR) |
51 | : "Ir" (nr) | 51 | : "Ir" (nr) |
52 | : "memory"); | 52 | : "memory"); |
@@ -61,9 +61,9 @@ static inline void sync_clear_bit(int nr, volatile unsigned long *addr) | |||
61 | * Note that @nr may be almost arbitrarily large; this function is not | 61 | * Note that @nr may be almost arbitrarily large; this function is not |
62 | * restricted to acting on a single-word quantity. | 62 | * restricted to acting on a single-word quantity. |
63 | */ | 63 | */ |
64 | static inline void sync_change_bit(int nr, volatile unsigned long *addr) | 64 | static inline void sync_change_bit(long nr, volatile unsigned long *addr) |
65 | { | 65 | { |
66 | asm volatile("lock; btcl %1,%0" | 66 | asm volatile("lock; btc %1,%0" |
67 | : "+m" (ADDR) | 67 | : "+m" (ADDR) |
68 | : "Ir" (nr) | 68 | : "Ir" (nr) |
69 | : "memory"); | 69 | : "memory"); |
@@ -77,11 +77,11 @@ static inline void sync_change_bit(int nr, volatile unsigned long *addr) | |||
77 | * This operation is atomic and cannot be reordered. | 77 | * This operation is atomic and cannot be reordered. |
78 | * It also implies a memory barrier. | 78 | * It also implies a memory barrier. |
79 | */ | 79 | */ |
80 | static inline int sync_test_and_set_bit(int nr, volatile unsigned long *addr) | 80 | static inline int sync_test_and_set_bit(long nr, volatile unsigned long *addr) |
81 | { | 81 | { |
82 | int oldbit; | 82 | int oldbit; |
83 | 83 | ||
84 | asm volatile("lock; btsl %2,%1\n\tsbbl %0,%0" | 84 | asm volatile("lock; bts %2,%1\n\tsbbl %0,%0" |
85 | : "=r" (oldbit), "+m" (ADDR) | 85 | : "=r" (oldbit), "+m" (ADDR) |
86 | : "Ir" (nr) : "memory"); | 86 | : "Ir" (nr) : "memory"); |
87 | return oldbit; | 87 | return oldbit; |
@@ -95,11 +95,11 @@ static inline int sync_test_and_set_bit(int nr, volatile unsigned long *addr) | |||
95 | * This operation is atomic and cannot be reordered. | 95 | * This operation is atomic and cannot be reordered. |
96 | * It also implies a memory barrier. | 96 | * It also implies a memory barrier. |
97 | */ | 97 | */ |
98 | static inline int sync_test_and_clear_bit(int nr, volatile unsigned long *addr) | 98 | static inline int sync_test_and_clear_bit(long nr, volatile unsigned long *addr) |
99 | { | 99 | { |
100 | int oldbit; | 100 | int oldbit; |
101 | 101 | ||
102 | asm volatile("lock; btrl %2,%1\n\tsbbl %0,%0" | 102 | asm volatile("lock; btr %2,%1\n\tsbbl %0,%0" |
103 | : "=r" (oldbit), "+m" (ADDR) | 103 | : "=r" (oldbit), "+m" (ADDR) |
104 | : "Ir" (nr) : "memory"); | 104 | : "Ir" (nr) : "memory"); |
105 | return oldbit; | 105 | return oldbit; |
@@ -113,11 +113,11 @@ static inline int sync_test_and_clear_bit(int nr, volatile unsigned long *addr) | |||
113 | * This operation is atomic and cannot be reordered. | 113 | * This operation is atomic and cannot be reordered. |
114 | * It also implies a memory barrier. | 114 | * It also implies a memory barrier. |
115 | */ | 115 | */ |
116 | static inline int sync_test_and_change_bit(int nr, volatile unsigned long *addr) | 116 | static inline int sync_test_and_change_bit(long nr, volatile unsigned long *addr) |
117 | { | 117 | { |
118 | int oldbit; | 118 | int oldbit; |
119 | 119 | ||
120 | asm volatile("lock; btcl %2,%1\n\tsbbl %0,%0" | 120 | asm volatile("lock; btc %2,%1\n\tsbbl %0,%0" |
121 | : "=r" (oldbit), "+m" (ADDR) | 121 | : "=r" (oldbit), "+m" (ADDR) |
122 | : "Ir" (nr) : "memory"); | 122 | : "Ir" (nr) : "memory"); |
123 | return oldbit; | 123 | return oldbit; |