diff options
Diffstat (limited to 'include/asm-x86/bitops_64.h')
-rw-r--r-- | include/asm-x86/bitops_64.h | 427 |
1 files changed, 427 insertions, 0 deletions
diff --git a/include/asm-x86/bitops_64.h b/include/asm-x86/bitops_64.h new file mode 100644 index 000000000000..d4dbbe5f7bd9 --- /dev/null +++ b/include/asm-x86/bitops_64.h | |||
@@ -0,0 +1,427 @@ | |||
1 | #ifndef _X86_64_BITOPS_H | ||
2 | #define _X86_64_BITOPS_H | ||
3 | |||
4 | /* | ||
5 | * Copyright 1992, Linus Torvalds. | ||
6 | */ | ||
7 | |||
8 | #include <asm/alternative.h> | ||
9 | |||
10 | #if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1) | ||
11 | /* Technically wrong, but this avoids compilation errors on some gcc | ||
12 | versions. */ | ||
13 | #define ADDR "=m" (*(volatile long *) addr) | ||
14 | #else | ||
15 | #define ADDR "+m" (*(volatile long *) addr) | ||
16 | #endif | ||
17 | |||
18 | /** | ||
19 | * set_bit - Atomically set a bit in memory | ||
20 | * @nr: the bit to set | ||
21 | * @addr: the address to start counting from | ||
22 | * | ||
23 | * This function is atomic and may not be reordered. See __set_bit() | ||
24 | * if you do not require the atomic guarantees. | ||
25 | * Note that @nr may be almost arbitrarily large; this function is not | ||
26 | * restricted to acting on a single-word quantity. | ||
27 | */ | ||
28 | static __inline__ void set_bit(int nr, volatile void * addr) | ||
29 | { | ||
30 | __asm__ __volatile__( LOCK_PREFIX | ||
31 | "btsl %1,%0" | ||
32 | :ADDR | ||
33 | :"dIr" (nr) : "memory"); | ||
34 | } | ||
35 | |||
36 | /** | ||
37 | * __set_bit - Set a bit in memory | ||
38 | * @nr: the bit to set | ||
39 | * @addr: the address to start counting from | ||
40 | * | ||
41 | * Unlike set_bit(), this function is non-atomic and may be reordered. | ||
42 | * If it's called on the same region of memory simultaneously, the effect | ||
43 | * may be that only one operation succeeds. | ||
44 | */ | ||
45 | static __inline__ void __set_bit(int nr, volatile void * addr) | ||
46 | { | ||
47 | __asm__ volatile( | ||
48 | "btsl %1,%0" | ||
49 | :ADDR | ||
50 | :"dIr" (nr) : "memory"); | ||
51 | } | ||
52 | |||
53 | /** | ||
54 | * clear_bit - Clears a bit in memory | ||
55 | * @nr: Bit to clear | ||
56 | * @addr: Address to start counting from | ||
57 | * | ||
58 | * clear_bit() is atomic and may not be reordered. However, it does | ||
59 | * not contain a memory barrier, so if it is used for locking purposes, | ||
60 | * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() | ||
61 | * in order to ensure changes are visible on other processors. | ||
62 | */ | ||
63 | static __inline__ void clear_bit(int nr, volatile void * addr) | ||
64 | { | ||
65 | __asm__ __volatile__( LOCK_PREFIX | ||
66 | "btrl %1,%0" | ||
67 | :ADDR | ||
68 | :"dIr" (nr)); | ||
69 | } | ||
70 | |||
71 | static __inline__ void __clear_bit(int nr, volatile void * addr) | ||
72 | { | ||
73 | __asm__ __volatile__( | ||
74 | "btrl %1,%0" | ||
75 | :ADDR | ||
76 | :"dIr" (nr)); | ||
77 | } | ||
78 | |||
79 | #define smp_mb__before_clear_bit() barrier() | ||
80 | #define smp_mb__after_clear_bit() barrier() | ||
81 | |||
82 | /** | ||
83 | * __change_bit - Toggle a bit in memory | ||
84 | * @nr: the bit to change | ||
85 | * @addr: the address to start counting from | ||
86 | * | ||
87 | * Unlike change_bit(), this function is non-atomic and may be reordered. | ||
88 | * If it's called on the same region of memory simultaneously, the effect | ||
89 | * may be that only one operation succeeds. | ||
90 | */ | ||
91 | static __inline__ void __change_bit(int nr, volatile void * addr) | ||
92 | { | ||
93 | __asm__ __volatile__( | ||
94 | "btcl %1,%0" | ||
95 | :ADDR | ||
96 | :"dIr" (nr)); | ||
97 | } | ||
98 | |||
99 | /** | ||
100 | * change_bit - Toggle a bit in memory | ||
101 | * @nr: Bit to change | ||
102 | * @addr: Address to start counting from | ||
103 | * | ||
104 | * change_bit() is atomic and may not be reordered. | ||
105 | * Note that @nr may be almost arbitrarily large; this function is not | ||
106 | * restricted to acting on a single-word quantity. | ||
107 | */ | ||
108 | static __inline__ void change_bit(int nr, volatile void * addr) | ||
109 | { | ||
110 | __asm__ __volatile__( LOCK_PREFIX | ||
111 | "btcl %1,%0" | ||
112 | :ADDR | ||
113 | :"dIr" (nr)); | ||
114 | } | ||
115 | |||
116 | /** | ||
117 | * test_and_set_bit - Set a bit and return its old value | ||
118 | * @nr: Bit to set | ||
119 | * @addr: Address to count from | ||
120 | * | ||
121 | * This operation is atomic and cannot be reordered. | ||
122 | * It also implies a memory barrier. | ||
123 | */ | ||
124 | static __inline__ int test_and_set_bit(int nr, volatile void * addr) | ||
125 | { | ||
126 | int oldbit; | ||
127 | |||
128 | __asm__ __volatile__( LOCK_PREFIX | ||
129 | "btsl %2,%1\n\tsbbl %0,%0" | ||
130 | :"=r" (oldbit),ADDR | ||
131 | :"dIr" (nr) : "memory"); | ||
132 | return oldbit; | ||
133 | } | ||
134 | |||
135 | /** | ||
136 | * __test_and_set_bit - Set a bit and return its old value | ||
137 | * @nr: Bit to set | ||
138 | * @addr: Address to count from | ||
139 | * | ||
140 | * This operation is non-atomic and can be reordered. | ||
141 | * If two examples of this operation race, one can appear to succeed | ||
142 | * but actually fail. You must protect multiple accesses with a lock. | ||
143 | */ | ||
144 | static __inline__ int __test_and_set_bit(int nr, volatile void * addr) | ||
145 | { | ||
146 | int oldbit; | ||
147 | |||
148 | __asm__( | ||
149 | "btsl %2,%1\n\tsbbl %0,%0" | ||
150 | :"=r" (oldbit),ADDR | ||
151 | :"dIr" (nr)); | ||
152 | return oldbit; | ||
153 | } | ||
154 | |||
155 | /** | ||
156 | * test_and_clear_bit - Clear a bit and return its old value | ||
157 | * @nr: Bit to clear | ||
158 | * @addr: Address to count from | ||
159 | * | ||
160 | * This operation is atomic and cannot be reordered. | ||
161 | * It also implies a memory barrier. | ||
162 | */ | ||
163 | static __inline__ int test_and_clear_bit(int nr, volatile void * addr) | ||
164 | { | ||
165 | int oldbit; | ||
166 | |||
167 | __asm__ __volatile__( LOCK_PREFIX | ||
168 | "btrl %2,%1\n\tsbbl %0,%0" | ||
169 | :"=r" (oldbit),ADDR | ||
170 | :"dIr" (nr) : "memory"); | ||
171 | return oldbit; | ||
172 | } | ||
173 | |||
174 | /** | ||
175 | * __test_and_clear_bit - Clear a bit and return its old value | ||
176 | * @nr: Bit to clear | ||
177 | * @addr: Address to count from | ||
178 | * | ||
179 | * This operation is non-atomic and can be reordered. | ||
180 | * If two examples of this operation race, one can appear to succeed | ||
181 | * but actually fail. You must protect multiple accesses with a lock. | ||
182 | */ | ||
183 | static __inline__ int __test_and_clear_bit(int nr, volatile void * addr) | ||
184 | { | ||
185 | int oldbit; | ||
186 | |||
187 | __asm__( | ||
188 | "btrl %2,%1\n\tsbbl %0,%0" | ||
189 | :"=r" (oldbit),ADDR | ||
190 | :"dIr" (nr)); | ||
191 | return oldbit; | ||
192 | } | ||
193 | |||
194 | /* WARNING: non atomic and it can be reordered! */ | ||
195 | static __inline__ int __test_and_change_bit(int nr, volatile void * addr) | ||
196 | { | ||
197 | int oldbit; | ||
198 | |||
199 | __asm__ __volatile__( | ||
200 | "btcl %2,%1\n\tsbbl %0,%0" | ||
201 | :"=r" (oldbit),ADDR | ||
202 | :"dIr" (nr) : "memory"); | ||
203 | return oldbit; | ||
204 | } | ||
205 | |||
206 | /** | ||
207 | * test_and_change_bit - Change a bit and return its old value | ||
208 | * @nr: Bit to change | ||
209 | * @addr: Address to count from | ||
210 | * | ||
211 | * This operation is atomic and cannot be reordered. | ||
212 | * It also implies a memory barrier. | ||
213 | */ | ||
214 | static __inline__ int test_and_change_bit(int nr, volatile void * addr) | ||
215 | { | ||
216 | int oldbit; | ||
217 | |||
218 | __asm__ __volatile__( LOCK_PREFIX | ||
219 | "btcl %2,%1\n\tsbbl %0,%0" | ||
220 | :"=r" (oldbit),ADDR | ||
221 | :"dIr" (nr) : "memory"); | ||
222 | return oldbit; | ||
223 | } | ||
224 | |||
225 | #if 0 /* Fool kernel-doc since it doesn't do macros yet */ | ||
226 | /** | ||
227 | * test_bit - Determine whether a bit is set | ||
228 | * @nr: bit number to test | ||
229 | * @addr: Address to start counting from | ||
230 | */ | ||
231 | static int test_bit(int nr, const volatile void * addr); | ||
232 | #endif | ||
233 | |||
234 | static __inline__ int constant_test_bit(int nr, const volatile void * addr) | ||
235 | { | ||
236 | return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0; | ||
237 | } | ||
238 | |||
239 | static __inline__ int variable_test_bit(int nr, volatile const void * addr) | ||
240 | { | ||
241 | int oldbit; | ||
242 | |||
243 | __asm__ __volatile__( | ||
244 | "btl %2,%1\n\tsbbl %0,%0" | ||
245 | :"=r" (oldbit) | ||
246 | :"m" (*(volatile long *)addr),"dIr" (nr)); | ||
247 | return oldbit; | ||
248 | } | ||
249 | |||
250 | #define test_bit(nr,addr) \ | ||
251 | (__builtin_constant_p(nr) ? \ | ||
252 | constant_test_bit((nr),(addr)) : \ | ||
253 | variable_test_bit((nr),(addr))) | ||
254 | |||
255 | #undef ADDR | ||
256 | |||
257 | extern long find_first_zero_bit(const unsigned long * addr, unsigned long size); | ||
258 | extern long find_next_zero_bit (const unsigned long * addr, long size, long offset); | ||
259 | extern long find_first_bit(const unsigned long * addr, unsigned long size); | ||
260 | extern long find_next_bit(const unsigned long * addr, long size, long offset); | ||
261 | |||
262 | /* return index of first bet set in val or max when no bit is set */ | ||
263 | static inline unsigned long __scanbit(unsigned long val, unsigned long max) | ||
264 | { | ||
265 | asm("bsfq %1,%0 ; cmovz %2,%0" : "=&r" (val) : "r" (val), "r" (max)); | ||
266 | return val; | ||
267 | } | ||
268 | |||
269 | #define find_first_bit(addr,size) \ | ||
270 | ((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \ | ||
271 | (__scanbit(*(unsigned long *)addr,(size))) : \ | ||
272 | find_first_bit(addr,size))) | ||
273 | |||
274 | #define find_next_bit(addr,size,off) \ | ||
275 | ((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \ | ||
276 | ((off) + (__scanbit((*(unsigned long *)addr) >> (off),(size)-(off)))) : \ | ||
277 | find_next_bit(addr,size,off))) | ||
278 | |||
279 | #define find_first_zero_bit(addr,size) \ | ||
280 | ((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \ | ||
281 | (__scanbit(~*(unsigned long *)addr,(size))) : \ | ||
282 | find_first_zero_bit(addr,size))) | ||
283 | |||
284 | #define find_next_zero_bit(addr,size,off) \ | ||
285 | ((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \ | ||
286 | ((off)+(__scanbit(~(((*(unsigned long *)addr)) >> (off)),(size)-(off)))) : \ | ||
287 | find_next_zero_bit(addr,size,off))) | ||
288 | |||
289 | /* | ||
290 | * Find string of zero bits in a bitmap. -1 when not found. | ||
291 | */ | ||
292 | extern unsigned long | ||
293 | find_next_zero_string(unsigned long *bitmap, long start, long nbits, int len); | ||
294 | |||
295 | static inline void set_bit_string(unsigned long *bitmap, unsigned long i, | ||
296 | int len) | ||
297 | { | ||
298 | unsigned long end = i + len; | ||
299 | while (i < end) { | ||
300 | __set_bit(i, bitmap); | ||
301 | i++; | ||
302 | } | ||
303 | } | ||
304 | |||
305 | static inline void __clear_bit_string(unsigned long *bitmap, unsigned long i, | ||
306 | int len) | ||
307 | { | ||
308 | unsigned long end = i + len; | ||
309 | while (i < end) { | ||
310 | __clear_bit(i, bitmap); | ||
311 | i++; | ||
312 | } | ||
313 | } | ||
314 | |||
315 | /** | ||
316 | * ffz - find first zero in word. | ||
317 | * @word: The word to search | ||
318 | * | ||
319 | * Undefined if no zero exists, so code should check against ~0UL first. | ||
320 | */ | ||
321 | static __inline__ unsigned long ffz(unsigned long word) | ||
322 | { | ||
323 | __asm__("bsfq %1,%0" | ||
324 | :"=r" (word) | ||
325 | :"r" (~word)); | ||
326 | return word; | ||
327 | } | ||
328 | |||
329 | /** | ||
330 | * __ffs - find first bit in word. | ||
331 | * @word: The word to search | ||
332 | * | ||
333 | * Undefined if no bit exists, so code should check against 0 first. | ||
334 | */ | ||
335 | static __inline__ unsigned long __ffs(unsigned long word) | ||
336 | { | ||
337 | __asm__("bsfq %1,%0" | ||
338 | :"=r" (word) | ||
339 | :"rm" (word)); | ||
340 | return word; | ||
341 | } | ||
342 | |||
343 | /* | ||
344 | * __fls: find last bit set. | ||
345 | * @word: The word to search | ||
346 | * | ||
347 | * Undefined if no zero exists, so code should check against ~0UL first. | ||
348 | */ | ||
349 | static __inline__ unsigned long __fls(unsigned long word) | ||
350 | { | ||
351 | __asm__("bsrq %1,%0" | ||
352 | :"=r" (word) | ||
353 | :"rm" (word)); | ||
354 | return word; | ||
355 | } | ||
356 | |||
357 | #ifdef __KERNEL__ | ||
358 | |||
359 | #include <asm-generic/bitops/sched.h> | ||
360 | |||
361 | /** | ||
362 | * ffs - find first bit set | ||
363 | * @x: the word to search | ||
364 | * | ||
365 | * This is defined the same way as | ||
366 | * the libc and compiler builtin ffs routines, therefore | ||
367 | * differs in spirit from the above ffz (man ffs). | ||
368 | */ | ||
369 | static __inline__ int ffs(int x) | ||
370 | { | ||
371 | int r; | ||
372 | |||
373 | __asm__("bsfl %1,%0\n\t" | ||
374 | "cmovzl %2,%0" | ||
375 | : "=r" (r) : "rm" (x), "r" (-1)); | ||
376 | return r+1; | ||
377 | } | ||
378 | |||
379 | /** | ||
380 | * fls64 - find last bit set in 64 bit word | ||
381 | * @x: the word to search | ||
382 | * | ||
383 | * This is defined the same way as fls. | ||
384 | */ | ||
385 | static __inline__ int fls64(__u64 x) | ||
386 | { | ||
387 | if (x == 0) | ||
388 | return 0; | ||
389 | return __fls(x) + 1; | ||
390 | } | ||
391 | |||
392 | /** | ||
393 | * fls - find last bit set | ||
394 | * @x: the word to search | ||
395 | * | ||
396 | * This is defined the same way as ffs. | ||
397 | */ | ||
398 | static __inline__ int fls(int x) | ||
399 | { | ||
400 | int r; | ||
401 | |||
402 | __asm__("bsrl %1,%0\n\t" | ||
403 | "cmovzl %2,%0" | ||
404 | : "=&r" (r) : "rm" (x), "rm" (-1)); | ||
405 | return r+1; | ||
406 | } | ||
407 | |||
408 | #define ARCH_HAS_FAST_MULTIPLIER 1 | ||
409 | |||
410 | #include <asm-generic/bitops/hweight.h> | ||
411 | |||
412 | #endif /* __KERNEL__ */ | ||
413 | |||
414 | #ifdef __KERNEL__ | ||
415 | |||
416 | #include <asm-generic/bitops/ext2-non-atomic.h> | ||
417 | |||
418 | #define ext2_set_bit_atomic(lock,nr,addr) \ | ||
419 | test_and_set_bit((nr),(unsigned long*)addr) | ||
420 | #define ext2_clear_bit_atomic(lock,nr,addr) \ | ||
421 | test_and_clear_bit((nr),(unsigned long*)addr) | ||
422 | |||
423 | #include <asm-generic/bitops/minix.h> | ||
424 | |||
425 | #endif /* __KERNEL__ */ | ||
426 | |||
427 | #endif /* _X86_64_BITOPS_H */ | ||