diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /include/asm-v850/bitops.h |
Linux-2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'include/asm-v850/bitops.h')
-rw-r--r-- | include/asm-v850/bitops.h | 355 |
1 files changed, 355 insertions, 0 deletions
diff --git a/include/asm-v850/bitops.h b/include/asm-v850/bitops.h new file mode 100644 index 000000000000..7c4ecaf5151c --- /dev/null +++ b/include/asm-v850/bitops.h | |||
@@ -0,0 +1,355 @@ | |||
1 | /* | ||
2 | * include/asm-v850/bitops.h -- Bit operations | ||
3 | * | ||
4 | * Copyright (C) 2001,02,03,04 NEC Electronics Corporation | ||
5 | * Copyright (C) 2001,02,03,04 Miles Bader <miles@gnu.org> | ||
6 | * Copyright (C) 1992 Linus Torvalds. | ||
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU General | ||
9 | * Public License. See the file COPYING in the main directory of this | ||
10 | * archive for more details. | ||
11 | */ | ||
12 | |||
13 | #ifndef __V850_BITOPS_H__ | ||
14 | #define __V850_BITOPS_H__ | ||
15 | |||
16 | |||
17 | #include <linux/config.h> | ||
18 | #include <linux/compiler.h> /* unlikely */ | ||
19 | #include <asm/byteorder.h> /* swab32 */ | ||
20 | #include <asm/system.h> /* interrupt enable/disable */ | ||
21 | |||
22 | |||
23 | #ifdef __KERNEL__ | ||
24 | |||
25 | /* | ||
26 | * The __ functions are not atomic | ||
27 | */ | ||
28 | |||
29 | /* | ||
30 | * ffz = Find First Zero in word. Undefined if no zero exists, | ||
31 | * so code should check against ~0UL first.. | ||
32 | */ | ||
33 | extern __inline__ unsigned long ffz (unsigned long word) | ||
34 | { | ||
35 | unsigned long result = 0; | ||
36 | |||
37 | while (word & 1) { | ||
38 | result++; | ||
39 | word >>= 1; | ||
40 | } | ||
41 | return result; | ||
42 | } | ||
43 | |||
44 | |||
45 | /* In the following constant-bit-op macros, a "g" constraint is used when | ||
46 | we really need an integer ("i" constraint). This is to avoid | ||
47 | warnings/errors from the compiler in the case where the associated | ||
48 | operand _isn't_ an integer, and shouldn't produce bogus assembly because | ||
49 | use of that form is protected by a guard statement that checks for | ||
50 | constants, and should otherwise be removed by the optimizer. This | ||
51 | _usually_ works -- however, __builtin_constant_p returns true for a | ||
52 | variable with a known constant value too, and unfortunately gcc will | ||
53 | happily put the variable in a register and use the register for the "g" | ||
54 | constraint'd asm operand. To avoid the latter problem, we add a | ||
55 | constant offset to the operand and subtract it back in the asm code; | ||
56 | forcing gcc to do arithmetic on the value is usually enough to get it | ||
57 | to use a real constant value. This is horrible, and ultimately | ||
58 | unreliable too, but it seems to work for now (hopefully gcc will offer | ||
59 | us more control in the future, so we can do a better job). */ | ||
60 | |||
61 | #define __const_bit_op(op, nr, addr) \ | ||
62 | ({ __asm__ (op " (%0 - 0x123), %1" \ | ||
63 | :: "g" (((nr) & 0x7) + 0x123), \ | ||
64 | "m" (*((char *)(addr) + ((nr) >> 3))) \ | ||
65 | : "memory"); }) | ||
66 | #define __var_bit_op(op, nr, addr) \ | ||
67 | ({ int __nr = (nr); \ | ||
68 | __asm__ (op " %0, [%1]" \ | ||
69 | :: "r" (__nr & 0x7), \ | ||
70 | "r" ((char *)(addr) + (__nr >> 3)) \ | ||
71 | : "memory"); }) | ||
72 | #define __bit_op(op, nr, addr) \ | ||
73 | ((__builtin_constant_p (nr) && (unsigned)(nr) <= 0x7FFFF) \ | ||
74 | ? __const_bit_op (op, nr, addr) \ | ||
75 | : __var_bit_op (op, nr, addr)) | ||
76 | |||
77 | #define __set_bit(nr, addr) __bit_op ("set1", nr, addr) | ||
78 | #define __clear_bit(nr, addr) __bit_op ("clr1", nr, addr) | ||
79 | #define __change_bit(nr, addr) __bit_op ("not1", nr, addr) | ||
80 | |||
81 | /* The bit instructions used by `non-atomic' variants are actually atomic. */ | ||
82 | #define set_bit __set_bit | ||
83 | #define clear_bit __clear_bit | ||
84 | #define change_bit __change_bit | ||
85 | |||
86 | |||
87 | #define __const_tns_bit_op(op, nr, addr) \ | ||
88 | ({ int __tns_res; \ | ||
89 | __asm__ __volatile__ ( \ | ||
90 | "tst1 (%1 - 0x123), %2; setf nz, %0; " op " (%1 - 0x123), %2" \ | ||
91 | : "=&r" (__tns_res) \ | ||
92 | : "g" (((nr) & 0x7) + 0x123), \ | ||
93 | "m" (*((char *)(addr) + ((nr) >> 3))) \ | ||
94 | : "memory"); \ | ||
95 | __tns_res; \ | ||
96 | }) | ||
97 | #define __var_tns_bit_op(op, nr, addr) \ | ||
98 | ({ int __nr = (nr); \ | ||
99 | int __tns_res; \ | ||
100 | __asm__ __volatile__ ( \ | ||
101 | "tst1 %1, [%2]; setf nz, %0; " op " %1, [%2]" \ | ||
102 | : "=&r" (__tns_res) \ | ||
103 | : "r" (__nr & 0x7), \ | ||
104 | "r" ((char *)(addr) + (__nr >> 3)) \ | ||
105 | : "memory"); \ | ||
106 | __tns_res; \ | ||
107 | }) | ||
108 | #define __tns_bit_op(op, nr, addr) \ | ||
109 | ((__builtin_constant_p (nr) && (unsigned)(nr) <= 0x7FFFF) \ | ||
110 | ? __const_tns_bit_op (op, nr, addr) \ | ||
111 | : __var_tns_bit_op (op, nr, addr)) | ||
112 | #define __tns_atomic_bit_op(op, nr, addr) \ | ||
113 | ({ int __tns_atomic_res, __tns_atomic_flags; \ | ||
114 | local_irq_save (__tns_atomic_flags); \ | ||
115 | __tns_atomic_res = __tns_bit_op (op, nr, addr); \ | ||
116 | local_irq_restore (__tns_atomic_flags); \ | ||
117 | __tns_atomic_res; \ | ||
118 | }) | ||
119 | |||
120 | #define __test_and_set_bit(nr, addr) __tns_bit_op ("set1", nr, addr) | ||
121 | #define test_and_set_bit(nr, addr) __tns_atomic_bit_op ("set1", nr, addr) | ||
122 | |||
123 | #define __test_and_clear_bit(nr, addr) __tns_bit_op ("clr1", nr, addr) | ||
124 | #define test_and_clear_bit(nr, addr) __tns_atomic_bit_op ("clr1", nr, addr) | ||
125 | |||
126 | #define __test_and_change_bit(nr, addr) __tns_bit_op ("not1", nr, addr) | ||
127 | #define test_and_change_bit(nr, addr) __tns_atomic_bit_op ("not1", nr, addr) | ||
128 | |||
129 | |||
130 | #define __const_test_bit(nr, addr) \ | ||
131 | ({ int __test_bit_res; \ | ||
132 | __asm__ __volatile__ ("tst1 (%1 - 0x123), %2; setf nz, %0" \ | ||
133 | : "=r" (__test_bit_res) \ | ||
134 | : "g" (((nr) & 0x7) + 0x123), \ | ||
135 | "m" (*((const char *)(addr) + ((nr) >> 3)))); \ | ||
136 | __test_bit_res; \ | ||
137 | }) | ||
138 | extern __inline__ int __test_bit (int nr, const void *addr) | ||
139 | { | ||
140 | int res; | ||
141 | __asm__ __volatile__ ("tst1 %1, [%2]; setf nz, %0" | ||
142 | : "=r" (res) | ||
143 | : "r" (nr & 0x7), "r" (addr + (nr >> 3))); | ||
144 | return res; | ||
145 | } | ||
146 | #define test_bit(nr,addr) \ | ||
147 | ((__builtin_constant_p (nr) && (unsigned)(nr) <= 0x7FFFF) \ | ||
148 | ? __const_test_bit ((nr), (addr)) \ | ||
149 | : __test_bit ((nr), (addr))) | ||
150 | |||
151 | |||
152 | /* clear_bit doesn't provide any barrier for the compiler. */ | ||
153 | #define smp_mb__before_clear_bit() barrier () | ||
154 | #define smp_mb__after_clear_bit() barrier () | ||
155 | |||
156 | |||
157 | #define find_first_zero_bit(addr, size) \ | ||
158 | find_next_zero_bit ((addr), (size), 0) | ||
159 | |||
160 | extern __inline__ int find_next_zero_bit (void *addr, int size, int offset) | ||
161 | { | ||
162 | unsigned long *p = ((unsigned long *) addr) + (offset >> 5); | ||
163 | unsigned long result = offset & ~31UL; | ||
164 | unsigned long tmp; | ||
165 | |||
166 | if (offset >= size) | ||
167 | return size; | ||
168 | size -= result; | ||
169 | offset &= 31UL; | ||
170 | if (offset) { | ||
171 | tmp = * (p++); | ||
172 | tmp |= ~0UL >> (32-offset); | ||
173 | if (size < 32) | ||
174 | goto found_first; | ||
175 | if (~tmp) | ||
176 | goto found_middle; | ||
177 | size -= 32; | ||
178 | result += 32; | ||
179 | } | ||
180 | while (size & ~31UL) { | ||
181 | if (~ (tmp = * (p++))) | ||
182 | goto found_middle; | ||
183 | result += 32; | ||
184 | size -= 32; | ||
185 | } | ||
186 | if (!size) | ||
187 | return result; | ||
188 | tmp = *p; | ||
189 | |||
190 | found_first: | ||
191 | tmp |= ~0UL >> size; | ||
192 | found_middle: | ||
193 | return result + ffz (tmp); | ||
194 | } | ||
195 | |||
196 | |||
197 | /* This is the same as generic_ffs, but we can't use that because it's | ||
198 | inline and the #include order mucks things up. */ | ||
199 | static inline int generic_ffs_for_find_next_bit(int x) | ||
200 | { | ||
201 | int r = 1; | ||
202 | |||
203 | if (!x) | ||
204 | return 0; | ||
205 | if (!(x & 0xffff)) { | ||
206 | x >>= 16; | ||
207 | r += 16; | ||
208 | } | ||
209 | if (!(x & 0xff)) { | ||
210 | x >>= 8; | ||
211 | r += 8; | ||
212 | } | ||
213 | if (!(x & 0xf)) { | ||
214 | x >>= 4; | ||
215 | r += 4; | ||
216 | } | ||
217 | if (!(x & 3)) { | ||
218 | x >>= 2; | ||
219 | r += 2; | ||
220 | } | ||
221 | if (!(x & 1)) { | ||
222 | x >>= 1; | ||
223 | r += 1; | ||
224 | } | ||
225 | return r; | ||
226 | } | ||
227 | |||
228 | /* | ||
229 | * Find next one bit in a bitmap reasonably efficiently. | ||
230 | */ | ||
231 | static __inline__ unsigned long find_next_bit(const unsigned long *addr, | ||
232 | unsigned long size, unsigned long offset) | ||
233 | { | ||
234 | unsigned int *p = ((unsigned int *) addr) + (offset >> 5); | ||
235 | unsigned int result = offset & ~31UL; | ||
236 | unsigned int tmp; | ||
237 | |||
238 | if (offset >= size) | ||
239 | return size; | ||
240 | size -= result; | ||
241 | offset &= 31UL; | ||
242 | if (offset) { | ||
243 | tmp = *p++; | ||
244 | tmp &= ~0UL << offset; | ||
245 | if (size < 32) | ||
246 | goto found_first; | ||
247 | if (tmp) | ||
248 | goto found_middle; | ||
249 | size -= 32; | ||
250 | result += 32; | ||
251 | } | ||
252 | while (size >= 32) { | ||
253 | if ((tmp = *p++) != 0) | ||
254 | goto found_middle; | ||
255 | result += 32; | ||
256 | size -= 32; | ||
257 | } | ||
258 | if (!size) | ||
259 | return result; | ||
260 | tmp = *p; | ||
261 | |||
262 | found_first: | ||
263 | tmp &= ~0UL >> (32 - size); | ||
264 | if (tmp == 0UL) /* Are any bits set? */ | ||
265 | return result + size; /* Nope. */ | ||
266 | found_middle: | ||
267 | return result + generic_ffs_for_find_next_bit(tmp); | ||
268 | } | ||
269 | |||
270 | /* | ||
271 | * find_first_bit - find the first set bit in a memory region | ||
272 | */ | ||
273 | #define find_first_bit(addr, size) \ | ||
274 | find_next_bit((addr), (size), 0) | ||
275 | |||
276 | |||
277 | #define ffs(x) generic_ffs (x) | ||
278 | #define fls(x) generic_fls (x) | ||
279 | #define __ffs(x) ffs(x) | ||
280 | |||
281 | |||
282 | /* | ||
283 | * This is just `generic_ffs' from <linux/bitops.h>, except that it assumes | ||
284 | * that at least one bit is set, and returns the real index of the bit | ||
285 | * (rather than the bit index + 1, like ffs does). | ||
286 | */ | ||
287 | static inline int sched_ffs(int x) | ||
288 | { | ||
289 | int r = 0; | ||
290 | |||
291 | if (!(x & 0xffff)) { | ||
292 | x >>= 16; | ||
293 | r += 16; | ||
294 | } | ||
295 | if (!(x & 0xff)) { | ||
296 | x >>= 8; | ||
297 | r += 8; | ||
298 | } | ||
299 | if (!(x & 0xf)) { | ||
300 | x >>= 4; | ||
301 | r += 4; | ||
302 | } | ||
303 | if (!(x & 3)) { | ||
304 | x >>= 2; | ||
305 | r += 2; | ||
306 | } | ||
307 | if (!(x & 1)) { | ||
308 | x >>= 1; | ||
309 | r += 1; | ||
310 | } | ||
311 | return r; | ||
312 | } | ||
313 | |||
314 | /* | ||
315 | * Every architecture must define this function. It's the fastest | ||
316 | * way of searching a 140-bit bitmap where the first 100 bits are | ||
317 | * unlikely to be set. It's guaranteed that at least one of the 140 | ||
318 | * bits is set. | ||
319 | */ | ||
320 | static inline int sched_find_first_bit(unsigned long *b) | ||
321 | { | ||
322 | unsigned offs = 0; | ||
323 | while (! *b) { | ||
324 | b++; | ||
325 | offs += 32; | ||
326 | } | ||
327 | return sched_ffs (*b) + offs; | ||
328 | } | ||
329 | |||
330 | /* | ||
331 | * hweightN: returns the hamming weight (i.e. the number | ||
332 | * of bits set) of a N-bit word | ||
333 | */ | ||
334 | #define hweight32(x) generic_hweight32 (x) | ||
335 | #define hweight16(x) generic_hweight16 (x) | ||
336 | #define hweight8(x) generic_hweight8 (x) | ||
337 | |||
338 | #define ext2_set_bit test_and_set_bit | ||
339 | #define ext2_set_bit_atomic(l,n,a) test_and_set_bit(n,a) | ||
340 | #define ext2_clear_bit test_and_clear_bit | ||
341 | #define ext2_clear_bit_atomic(l,n,a) test_and_clear_bit(n,a) | ||
342 | #define ext2_test_bit test_bit | ||
343 | #define ext2_find_first_zero_bit find_first_zero_bit | ||
344 | #define ext2_find_next_zero_bit find_next_zero_bit | ||
345 | |||
346 | /* Bitmap functions for the minix filesystem. */ | ||
347 | #define minix_test_and_set_bit test_and_set_bit | ||
348 | #define minix_set_bit set_bit | ||
349 | #define minix_test_and_clear_bit test_and_clear_bit | ||
350 | #define minix_test_bit test_bit | ||
351 | #define minix_find_first_zero_bit find_first_zero_bit | ||
352 | |||
353 | #endif /* __KERNEL__ */ | ||
354 | |||
355 | #endif /* __V850_BITOPS_H__ */ | ||