aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/v850/Kconfig6
-rw-r--r--include/asm-v850/bitops.h220
2 files changed, 17 insertions, 209 deletions
diff --git a/arch/v850/Kconfig b/arch/v850/Kconfig
index e7fc3e500342..37ec644603ab 100644
--- a/arch/v850/Kconfig
+++ b/arch/v850/Kconfig
@@ -16,6 +16,12 @@ config RWSEM_GENERIC_SPINLOCK
16config RWSEM_XCHGADD_ALGORITHM 16config RWSEM_XCHGADD_ALGORITHM
17 bool 17 bool
18 default n 18 default n
19config GENERIC_FIND_NEXT_BIT
20 bool
21 default y
22config GENERIC_HWEIGHT
23 bool
24 default y
19config GENERIC_CALIBRATE_DELAY 25config GENERIC_CALIBRATE_DELAY
20 bool 26 bool
21 default y 27 default y
diff --git a/include/asm-v850/bitops.h b/include/asm-v850/bitops.h
index 44d596e792e7..1f6fd5ab4177 100644
--- a/include/asm-v850/bitops.h
+++ b/include/asm-v850/bitops.h
@@ -22,25 +22,11 @@
22 22
23#ifdef __KERNEL__ 23#ifdef __KERNEL__
24 24
25/* 25#include <asm-generic/bitops/ffz.h>
26 * The __ functions are not atomic
27 */
28 26
29/* 27/*
30 * ffz = Find First Zero in word. Undefined if no zero exists, 28 * The __ functions are not atomic
31 * so code should check against ~0UL first..
32 */ 29 */
33static inline unsigned long ffz (unsigned long word)
34{
35 unsigned long result = 0;
36
37 while (word & 1) {
38 result++;
39 word >>= 1;
40 }
41 return result;
42}
43
44 30
45/* In the following constant-bit-op macros, a "g" constraint is used when 31/* In the following constant-bit-op macros, a "g" constraint is used when
46 we really need an integer ("i" constraint). This is to avoid 32 we really need an integer ("i" constraint). This is to avoid
@@ -153,203 +139,19 @@ static inline int __test_bit (int nr, const void *addr)
153#define smp_mb__before_clear_bit() barrier () 139#define smp_mb__before_clear_bit() barrier ()
154#define smp_mb__after_clear_bit() barrier () 140#define smp_mb__after_clear_bit() barrier ()
155 141
142#include <asm-generic/bitops/ffs.h>
143#include <asm-generic/bitops/fls.h>
144#include <asm-generic/bitops/fls64.h>
145#include <asm-generic/bitops/__ffs.h>
146#include <asm-generic/bitops/find.h>
147#include <asm-generic/bitops/sched.h>
148#include <asm-generic/bitops/hweight.h>
156 149
157#define find_first_zero_bit(addr, size) \ 150#include <asm-generic/bitops/ext2-non-atomic.h>
158 find_next_zero_bit ((addr), (size), 0)
159
160static inline int find_next_zero_bit(const void *addr, int size, int offset)
161{
162 unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
163 unsigned long result = offset & ~31UL;
164 unsigned long tmp;
165
166 if (offset >= size)
167 return size;
168 size -= result;
169 offset &= 31UL;
170 if (offset) {
171 tmp = * (p++);
172 tmp |= ~0UL >> (32-offset);
173 if (size < 32)
174 goto found_first;
175 if (~tmp)
176 goto found_middle;
177 size -= 32;
178 result += 32;
179 }
180 while (size & ~31UL) {
181 if (~ (tmp = * (p++)))
182 goto found_middle;
183 result += 32;
184 size -= 32;
185 }
186 if (!size)
187 return result;
188 tmp = *p;
189
190 found_first:
191 tmp |= ~0UL << size;
192 found_middle:
193 return result + ffz (tmp);
194}
195
196
197/* This is the same as generic_ffs, but we can't use that because it's
198 inline and the #include order mucks things up. */
199static inline int generic_ffs_for_find_next_bit(int x)
200{
201 int r = 1;
202
203 if (!x)
204 return 0;
205 if (!(x & 0xffff)) {
206 x >>= 16;
207 r += 16;
208 }
209 if (!(x & 0xff)) {
210 x >>= 8;
211 r += 8;
212 }
213 if (!(x & 0xf)) {
214 x >>= 4;
215 r += 4;
216 }
217 if (!(x & 3)) {
218 x >>= 2;
219 r += 2;
220 }
221 if (!(x & 1)) {
222 x >>= 1;
223 r += 1;
224 }
225 return r;
226}
227
228/*
229 * Find next one bit in a bitmap reasonably efficiently.
230 */
231static __inline__ unsigned long find_next_bit(const unsigned long *addr,
232 unsigned long size, unsigned long offset)
233{
234 unsigned int *p = ((unsigned int *) addr) + (offset >> 5);
235 unsigned int result = offset & ~31UL;
236 unsigned int tmp;
237
238 if (offset >= size)
239 return size;
240 size -= result;
241 offset &= 31UL;
242 if (offset) {
243 tmp = *p++;
244 tmp &= ~0UL << offset;
245 if (size < 32)
246 goto found_first;
247 if (tmp)
248 goto found_middle;
249 size -= 32;
250 result += 32;
251 }
252 while (size >= 32) {
253 if ((tmp = *p++) != 0)
254 goto found_middle;
255 result += 32;
256 size -= 32;
257 }
258 if (!size)
259 return result;
260 tmp = *p;
261
262found_first:
263 tmp &= ~0UL >> (32 - size);
264 if (tmp == 0UL) /* Are any bits set? */
265 return result + size; /* Nope. */
266found_middle:
267 return result + generic_ffs_for_find_next_bit(tmp);
268}
269
270/*
271 * find_first_bit - find the first set bit in a memory region
272 */
273#define find_first_bit(addr, size) \
274 find_next_bit((addr), (size), 0)
275
276
277#define ffs(x) generic_ffs (x)
278#define fls(x) generic_fls (x)
279#define fls64(x) generic_fls64(x)
280#define __ffs(x) ffs(x)
281
282
283/*
284 * This is just `generic_ffs' from <linux/bitops.h>, except that it assumes
285 * that at least one bit is set, and returns the real index of the bit
286 * (rather than the bit index + 1, like ffs does).
287 */
288static inline int sched_ffs(int x)
289{
290 int r = 0;
291
292 if (!(x & 0xffff)) {
293 x >>= 16;
294 r += 16;
295 }
296 if (!(x & 0xff)) {
297 x >>= 8;
298 r += 8;
299 }
300 if (!(x & 0xf)) {
301 x >>= 4;
302 r += 4;
303 }
304 if (!(x & 3)) {
305 x >>= 2;
306 r += 2;
307 }
308 if (!(x & 1)) {
309 x >>= 1;
310 r += 1;
311 }
312 return r;
313}
314
315/*
316 * Every architecture must define this function. It's the fastest
317 * way of searching a 140-bit bitmap where the first 100 bits are
318 * unlikely to be set. It's guaranteed that at least one of the 140
319 * bits is set.
320 */
321static inline int sched_find_first_bit(unsigned long *b)
322{
323 unsigned offs = 0;
324 while (! *b) {
325 b++;
326 offs += 32;
327 }
328 return sched_ffs (*b) + offs;
329}
330
331/*
332 * hweightN: returns the hamming weight (i.e. the number
333 * of bits set) of a N-bit word
334 */
335#define hweight32(x) generic_hweight32 (x)
336#define hweight16(x) generic_hweight16 (x)
337#define hweight8(x) generic_hweight8 (x)
338
339#define ext2_set_bit __test_and_set_bit
340#define ext2_set_bit_atomic(l,n,a) test_and_set_bit(n,a) 151#define ext2_set_bit_atomic(l,n,a) test_and_set_bit(n,a)
341#define ext2_clear_bit __test_and_clear_bit
342#define ext2_clear_bit_atomic(l,n,a) test_and_clear_bit(n,a) 152#define ext2_clear_bit_atomic(l,n,a) test_and_clear_bit(n,a)
343#define ext2_test_bit test_bit
344#define ext2_find_first_zero_bit find_first_zero_bit
345#define ext2_find_next_zero_bit find_next_zero_bit
346 153
347/* Bitmap functions for the minix filesystem. */ 154#include <asm-generic/bitops/minix.h>
348#define minix_test_and_set_bit __test_and_set_bit
349#define minix_set_bit __set_bit
350#define minix_test_and_clear_bit __test_and_clear_bit
351#define minix_test_bit test_bit
352#define minix_find_first_zero_bit find_first_zero_bit
353 155
354#endif /* __KERNEL__ */ 156#endif /* __KERNEL__ */
355 157