diff options
Diffstat (limited to 'include/asm-ia64/bitops.h')
-rw-r--r-- | include/asm-ia64/bitops.h | 410 |
1 files changed, 410 insertions, 0 deletions
diff --git a/include/asm-ia64/bitops.h b/include/asm-ia64/bitops.h new file mode 100644 index 000000000000..925d54cee475 --- /dev/null +++ b/include/asm-ia64/bitops.h | |||
@@ -0,0 +1,410 @@ | |||
1 | #ifndef _ASM_IA64_BITOPS_H | ||
2 | #define _ASM_IA64_BITOPS_H | ||
3 | |||
4 | /* | ||
5 | * Copyright (C) 1998-2003 Hewlett-Packard Co | ||
6 | * David Mosberger-Tang <davidm@hpl.hp.com> | ||
7 | * | ||
8 | * 02/06/02 find_next_bit() and find_first_bit() added from Erich Focht's ia64 O(1) | ||
9 | * scheduler patch | ||
10 | */ | ||
11 | |||
12 | #include <linux/compiler.h> | ||
13 | #include <linux/types.h> | ||
14 | #include <asm/bitops.h> | ||
15 | #include <asm/intrinsics.h> | ||
16 | |||
17 | /** | ||
18 | * set_bit - Atomically set a bit in memory | ||
19 | * @nr: the bit to set | ||
20 | * @addr: the address to start counting from | ||
21 | * | ||
22 | * This function is atomic and may not be reordered. See __set_bit() | ||
23 | * if you do not require the atomic guarantees. | ||
24 | * Note that @nr may be almost arbitrarily large; this function is not | ||
25 | * restricted to acting on a single-word quantity. | ||
26 | * | ||
27 | * The address must be (at least) "long" aligned. | ||
28 | * Note that there are driver (e.g., eepro100) which use these operations to operate on | ||
29 | * hw-defined data-structures, so we can't easily change these operations to force a | ||
30 | * bigger alignment. | ||
31 | * | ||
32 | * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). | ||
33 | */ | ||
34 | static __inline__ void | ||
35 | set_bit (int nr, volatile void *addr) | ||
36 | { | ||
37 | __u32 bit, old, new; | ||
38 | volatile __u32 *m; | ||
39 | CMPXCHG_BUGCHECK_DECL | ||
40 | |||
41 | m = (volatile __u32 *) addr + (nr >> 5); | ||
42 | bit = 1 << (nr & 31); | ||
43 | do { | ||
44 | CMPXCHG_BUGCHECK(m); | ||
45 | old = *m; | ||
46 | new = old | bit; | ||
47 | } while (cmpxchg_acq(m, old, new) != old); | ||
48 | } | ||
49 | |||
50 | /** | ||
51 | * __set_bit - Set a bit in memory | ||
52 | * @nr: the bit to set | ||
53 | * @addr: the address to start counting from | ||
54 | * | ||
55 | * Unlike set_bit(), this function is non-atomic and may be reordered. | ||
56 | * If it's called on the same region of memory simultaneously, the effect | ||
57 | * may be that only one operation succeeds. | ||
58 | */ | ||
59 | static __inline__ void | ||
60 | __set_bit (int nr, volatile void *addr) | ||
61 | { | ||
62 | *((__u32 *) addr + (nr >> 5)) |= (1 << (nr & 31)); | ||
63 | } | ||
64 | |||
65 | /* | ||
66 | * clear_bit() has "acquire" semantics. | ||
67 | */ | ||
68 | #define smp_mb__before_clear_bit() smp_mb() | ||
69 | #define smp_mb__after_clear_bit() do { /* skip */; } while (0) | ||
70 | |||
71 | /** | ||
72 | * clear_bit - Clears a bit in memory | ||
73 | * @nr: Bit to clear | ||
74 | * @addr: Address to start counting from | ||
75 | * | ||
76 | * clear_bit() is atomic and may not be reordered. However, it does | ||
77 | * not contain a memory barrier, so if it is used for locking purposes, | ||
78 | * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() | ||
79 | * in order to ensure changes are visible on other processors. | ||
80 | */ | ||
81 | static __inline__ void | ||
82 | clear_bit (int nr, volatile void *addr) | ||
83 | { | ||
84 | __u32 mask, old, new; | ||
85 | volatile __u32 *m; | ||
86 | CMPXCHG_BUGCHECK_DECL | ||
87 | |||
88 | m = (volatile __u32 *) addr + (nr >> 5); | ||
89 | mask = ~(1 << (nr & 31)); | ||
90 | do { | ||
91 | CMPXCHG_BUGCHECK(m); | ||
92 | old = *m; | ||
93 | new = old & mask; | ||
94 | } while (cmpxchg_acq(m, old, new) != old); | ||
95 | } | ||
96 | |||
97 | /** | ||
98 | * __clear_bit - Clears a bit in memory (non-atomic version) | ||
99 | */ | ||
100 | static __inline__ void | ||
101 | __clear_bit (int nr, volatile void *addr) | ||
102 | { | ||
103 | volatile __u32 *p = (__u32 *) addr + (nr >> 5); | ||
104 | __u32 m = 1 << (nr & 31); | ||
105 | *p &= ~m; | ||
106 | } | ||
107 | |||
108 | /** | ||
109 | * change_bit - Toggle a bit in memory | ||
110 | * @nr: Bit to clear | ||
111 | * @addr: Address to start counting from | ||
112 | * | ||
113 | * change_bit() is atomic and may not be reordered. | ||
114 | * Note that @nr may be almost arbitrarily large; this function is not | ||
115 | * restricted to acting on a single-word quantity. | ||
116 | */ | ||
117 | static __inline__ void | ||
118 | change_bit (int nr, volatile void *addr) | ||
119 | { | ||
120 | __u32 bit, old, new; | ||
121 | volatile __u32 *m; | ||
122 | CMPXCHG_BUGCHECK_DECL | ||
123 | |||
124 | m = (volatile __u32 *) addr + (nr >> 5); | ||
125 | bit = (1 << (nr & 31)); | ||
126 | do { | ||
127 | CMPXCHG_BUGCHECK(m); | ||
128 | old = *m; | ||
129 | new = old ^ bit; | ||
130 | } while (cmpxchg_acq(m, old, new) != old); | ||
131 | } | ||
132 | |||
133 | /** | ||
134 | * __change_bit - Toggle a bit in memory | ||
135 | * @nr: the bit to set | ||
136 | * @addr: the address to start counting from | ||
137 | * | ||
138 | * Unlike change_bit(), this function is non-atomic and may be reordered. | ||
139 | * If it's called on the same region of memory simultaneously, the effect | ||
140 | * may be that only one operation succeeds. | ||
141 | */ | ||
142 | static __inline__ void | ||
143 | __change_bit (int nr, volatile void *addr) | ||
144 | { | ||
145 | *((__u32 *) addr + (nr >> 5)) ^= (1 << (nr & 31)); | ||
146 | } | ||
147 | |||
148 | /** | ||
149 | * test_and_set_bit - Set a bit and return its old value | ||
150 | * @nr: Bit to set | ||
151 | * @addr: Address to count from | ||
152 | * | ||
153 | * This operation is atomic and cannot be reordered. | ||
154 | * It also implies a memory barrier. | ||
155 | */ | ||
156 | static __inline__ int | ||
157 | test_and_set_bit (int nr, volatile void *addr) | ||
158 | { | ||
159 | __u32 bit, old, new; | ||
160 | volatile __u32 *m; | ||
161 | CMPXCHG_BUGCHECK_DECL | ||
162 | |||
163 | m = (volatile __u32 *) addr + (nr >> 5); | ||
164 | bit = 1 << (nr & 31); | ||
165 | do { | ||
166 | CMPXCHG_BUGCHECK(m); | ||
167 | old = *m; | ||
168 | new = old | bit; | ||
169 | } while (cmpxchg_acq(m, old, new) != old); | ||
170 | return (old & bit) != 0; | ||
171 | } | ||
172 | |||
173 | /** | ||
174 | * __test_and_set_bit - Set a bit and return its old value | ||
175 | * @nr: Bit to set | ||
176 | * @addr: Address to count from | ||
177 | * | ||
178 | * This operation is non-atomic and can be reordered. | ||
179 | * If two examples of this operation race, one can appear to succeed | ||
180 | * but actually fail. You must protect multiple accesses with a lock. | ||
181 | */ | ||
182 | static __inline__ int | ||
183 | __test_and_set_bit (int nr, volatile void *addr) | ||
184 | { | ||
185 | __u32 *p = (__u32 *) addr + (nr >> 5); | ||
186 | __u32 m = 1 << (nr & 31); | ||
187 | int oldbitset = (*p & m) != 0; | ||
188 | |||
189 | *p |= m; | ||
190 | return oldbitset; | ||
191 | } | ||
192 | |||
193 | /** | ||
194 | * test_and_clear_bit - Clear a bit and return its old value | ||
195 | * @nr: Bit to set | ||
196 | * @addr: Address to count from | ||
197 | * | ||
198 | * This operation is atomic and cannot be reordered. | ||
199 | * It also implies a memory barrier. | ||
200 | */ | ||
201 | static __inline__ int | ||
202 | test_and_clear_bit (int nr, volatile void *addr) | ||
203 | { | ||
204 | __u32 mask, old, new; | ||
205 | volatile __u32 *m; | ||
206 | CMPXCHG_BUGCHECK_DECL | ||
207 | |||
208 | m = (volatile __u32 *) addr + (nr >> 5); | ||
209 | mask = ~(1 << (nr & 31)); | ||
210 | do { | ||
211 | CMPXCHG_BUGCHECK(m); | ||
212 | old = *m; | ||
213 | new = old & mask; | ||
214 | } while (cmpxchg_acq(m, old, new) != old); | ||
215 | return (old & ~mask) != 0; | ||
216 | } | ||
217 | |||
218 | /** | ||
219 | * __test_and_clear_bit - Clear a bit and return its old value | ||
220 | * @nr: Bit to set | ||
221 | * @addr: Address to count from | ||
222 | * | ||
223 | * This operation is non-atomic and can be reordered. | ||
224 | * If two examples of this operation race, one can appear to succeed | ||
225 | * but actually fail. You must protect multiple accesses with a lock. | ||
226 | */ | ||
227 | static __inline__ int | ||
228 | __test_and_clear_bit(int nr, volatile void * addr) | ||
229 | { | ||
230 | __u32 *p = (__u32 *) addr + (nr >> 5); | ||
231 | __u32 m = 1 << (nr & 31); | ||
232 | int oldbitset = *p & m; | ||
233 | |||
234 | *p &= ~m; | ||
235 | return oldbitset; | ||
236 | } | ||
237 | |||
238 | /** | ||
239 | * test_and_change_bit - Change a bit and return its old value | ||
240 | * @nr: Bit to set | ||
241 | * @addr: Address to count from | ||
242 | * | ||
243 | * This operation is atomic and cannot be reordered. | ||
244 | * It also implies a memory barrier. | ||
245 | */ | ||
246 | static __inline__ int | ||
247 | test_and_change_bit (int nr, volatile void *addr) | ||
248 | { | ||
249 | __u32 bit, old, new; | ||
250 | volatile __u32 *m; | ||
251 | CMPXCHG_BUGCHECK_DECL | ||
252 | |||
253 | m = (volatile __u32 *) addr + (nr >> 5); | ||
254 | bit = (1 << (nr & 31)); | ||
255 | do { | ||
256 | CMPXCHG_BUGCHECK(m); | ||
257 | old = *m; | ||
258 | new = old ^ bit; | ||
259 | } while (cmpxchg_acq(m, old, new) != old); | ||
260 | return (old & bit) != 0; | ||
261 | } | ||
262 | |||
263 | /* | ||
264 | * WARNING: non atomic version. | ||
265 | */ | ||
266 | static __inline__ int | ||
267 | __test_and_change_bit (int nr, void *addr) | ||
268 | { | ||
269 | __u32 old, bit = (1 << (nr & 31)); | ||
270 | __u32 *m = (__u32 *) addr + (nr >> 5); | ||
271 | |||
272 | old = *m; | ||
273 | *m = old ^ bit; | ||
274 | return (old & bit) != 0; | ||
275 | } | ||
276 | |||
277 | static __inline__ int | ||
278 | test_bit (int nr, const volatile void *addr) | ||
279 | { | ||
280 | return 1 & (((const volatile __u32 *) addr)[nr >> 5] >> (nr & 31)); | ||
281 | } | ||
282 | |||
283 | /** | ||
284 | * ffz - find the first zero bit in a long word | ||
285 | * @x: The long word to find the bit in | ||
286 | * | ||
287 | * Returns the bit-number (0..63) of the first (least significant) zero bit. Undefined if | ||
288 | * no zero exists, so code should check against ~0UL first... | ||
289 | */ | ||
290 | static inline unsigned long | ||
291 | ffz (unsigned long x) | ||
292 | { | ||
293 | unsigned long result; | ||
294 | |||
295 | result = ia64_popcnt(x & (~x - 1)); | ||
296 | return result; | ||
297 | } | ||
298 | |||
299 | /** | ||
300 | * __ffs - find first bit in word. | ||
301 | * @x: The word to search | ||
302 | * | ||
303 | * Undefined if no bit exists, so code should check against 0 first. | ||
304 | */ | ||
305 | static __inline__ unsigned long | ||
306 | __ffs (unsigned long x) | ||
307 | { | ||
308 | unsigned long result; | ||
309 | |||
310 | result = ia64_popcnt((x-1) & ~x); | ||
311 | return result; | ||
312 | } | ||
313 | |||
314 | #ifdef __KERNEL__ | ||
315 | |||
316 | /* | ||
317 | * find_last_zero_bit - find the last zero bit in a 64 bit quantity | ||
318 | * @x: The value to search | ||
319 | */ | ||
320 | static inline unsigned long | ||
321 | ia64_fls (unsigned long x) | ||
322 | { | ||
323 | long double d = x; | ||
324 | long exp; | ||
325 | |||
326 | exp = ia64_getf_exp(d); | ||
327 | return exp - 0xffff; | ||
328 | } | ||
329 | |||
330 | static inline int | ||
331 | fls (int x) | ||
332 | { | ||
333 | return ia64_fls((unsigned int) x); | ||
334 | } | ||
335 | |||
336 | /* | ||
337 | * ffs: find first bit set. This is defined the same way as the libc and compiler builtin | ||
338 | * ffs routines, therefore differs in spirit from the above ffz (man ffs): it operates on | ||
339 | * "int" values only and the result value is the bit number + 1. ffs(0) is defined to | ||
340 | * return zero. | ||
341 | */ | ||
342 | #define ffs(x) __builtin_ffs(x) | ||
343 | |||
344 | /* | ||
345 | * hweightN: returns the hamming weight (i.e. the number | ||
346 | * of bits set) of a N-bit word | ||
347 | */ | ||
348 | static __inline__ unsigned long | ||
349 | hweight64 (unsigned long x) | ||
350 | { | ||
351 | unsigned long result; | ||
352 | result = ia64_popcnt(x); | ||
353 | return result; | ||
354 | } | ||
355 | |||
356 | #define hweight32(x) (unsigned int) hweight64((x) & 0xfffffffful) | ||
357 | #define hweight16(x) (unsigned int) hweight64((x) & 0xfffful) | ||
358 | #define hweight8(x) (unsigned int) hweight64((x) & 0xfful) | ||
359 | |||
360 | #endif /* __KERNEL__ */ | ||
361 | |||
362 | extern int __find_next_zero_bit (const void *addr, unsigned long size, | ||
363 | unsigned long offset); | ||
364 | extern int __find_next_bit(const void *addr, unsigned long size, | ||
365 | unsigned long offset); | ||
366 | |||
367 | #define find_next_zero_bit(addr, size, offset) \ | ||
368 | __find_next_zero_bit((addr), (size), (offset)) | ||
369 | #define find_next_bit(addr, size, offset) \ | ||
370 | __find_next_bit((addr), (size), (offset)) | ||
371 | |||
372 | /* | ||
373 | * The optimizer actually does good code for this case.. | ||
374 | */ | ||
375 | #define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0) | ||
376 | |||
377 | #define find_first_bit(addr, size) find_next_bit((addr), (size), 0) | ||
378 | |||
379 | #ifdef __KERNEL__ | ||
380 | |||
381 | #define __clear_bit(nr, addr) clear_bit(nr, addr) | ||
382 | |||
383 | #define ext2_set_bit test_and_set_bit | ||
384 | #define ext2_set_bit_atomic(l,n,a) test_and_set_bit(n,a) | ||
385 | #define ext2_clear_bit test_and_clear_bit | ||
386 | #define ext2_clear_bit_atomic(l,n,a) test_and_clear_bit(n,a) | ||
387 | #define ext2_test_bit test_bit | ||
388 | #define ext2_find_first_zero_bit find_first_zero_bit | ||
389 | #define ext2_find_next_zero_bit find_next_zero_bit | ||
390 | |||
391 | /* Bitmap functions for the minix filesystem. */ | ||
392 | #define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr) | ||
393 | #define minix_set_bit(nr,addr) set_bit(nr,addr) | ||
394 | #define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr) | ||
395 | #define minix_test_bit(nr,addr) test_bit(nr,addr) | ||
396 | #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size) | ||
397 | |||
398 | static inline int | ||
399 | sched_find_first_bit (unsigned long *b) | ||
400 | { | ||
401 | if (unlikely(b[0])) | ||
402 | return __ffs(b[0]); | ||
403 | if (unlikely(b[1])) | ||
404 | return 64 + __ffs(b[1]); | ||
405 | return __ffs(b[2]) + 128; | ||
406 | } | ||
407 | |||
408 | #endif /* __KERNEL__ */ | ||
409 | |||
410 | #endif /* _ASM_IA64_BITOPS_H */ | ||