aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/s390/Kconfig1
-rw-r--r--arch/s390/include/asm/bitops.h600
-rw-r--r--arch/s390/kernel/Makefile2
-rw-r--r--arch/s390/kernel/bitmap.c48
-rw-r--r--arch/s390/lib/Makefile2
-rw-r--r--arch/s390/lib/find.c77
6 files changed, 126 insertions, 604 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 7143793859fa..a02177fb5ec1 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -99,6 +99,7 @@ config S390
99 select CLONE_BACKWARDS2 99 select CLONE_BACKWARDS2
100 select GENERIC_CLOCKEVENTS 100 select GENERIC_CLOCKEVENTS
101 select GENERIC_CPU_DEVICES if !SMP 101 select GENERIC_CPU_DEVICES if !SMP
102 select GENERIC_FIND_FIRST_BIT
102 select GENERIC_SMP_IDLE_THREAD 103 select GENERIC_SMP_IDLE_THREAD
103 select GENERIC_TIME_VSYSCALL_OLD 104 select GENERIC_TIME_VSYSCALL_OLD
104 select HAVE_ALIGNED_STRUCT_PAGE if SLUB 105 select HAVE_ALIGNED_STRUCT_PAGE if SLUB
diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h
index 16df62dde094..e5ca8598c06b 100644
--- a/arch/s390/include/asm/bitops.h
+++ b/arch/s390/include/asm/bitops.h
@@ -1,10 +1,40 @@
1/* 1/*
2 * S390 version 2 * Copyright IBM Corp. 1999,2013
3 * Copyright IBM Corp. 1999
4 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
5 * 3 *
6 * Derived from "include/asm-i386/bitops.h" 4 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
7 * Copyright (C) 1992, Linus Torvalds 5 *
6 * The description below was taken in large parts from the powerpc
7 * bitops header file:
8 * Within a word, bits are numbered LSB first. Lot's of places make
9 * this assumption by directly testing bits with (val & (1<<nr)).
10 * This can cause confusion for large (> 1 word) bitmaps on a
11 * big-endian system because, unlike little endian, the number of each
12 * bit depends on the word size.
13 *
14 * The bitop functions are defined to work on unsigned longs, so for an
15 * s390x system the bits end up numbered:
16 * |63..............0|127............64|191...........128|255...........196|
17 * and on s390:
18 * |31.....0|63....31|95....64|127...96|159..128|191..160|223..192|255..224|
19 *
20 * There are a few little-endian macros used mostly for filesystem
21 * bitmaps, these work on similar bit arrays layouts, but
22 * byte-oriented:
23 * |7...0|15...8|23...16|31...24|39...32|47...40|55...48|63...56|
24 *
25 * The main difference is that bit 3-5 (64b) or 3-4 (32b) in the bit
26 * number field needs to be reversed compared to the big-endian bit
27 * fields. This can be achieved by XOR with 0x38 (64b) or 0x18 (32b).
28 *
29 * We also have special functions which work with an MSB0 encoding:
30 * on an s390x system the bits are numbered:
31 * |0..............63|64............127|128...........191|192...........255|
32 * and on s390:
33 * |0.....31|31....63|64....95|96...127|128..159|160..191|192..223|224..255|
34 *
35 * The main difference is that bit 0-63 (64b) or 0-31 (32b) in the bit
36 * number field needs to be reversed compared to the LSB0 encoded bit
37 * fields. This can be achieved by XOR with 0x3f (64b) or 0x1f (32b).
8 * 38 *
9 */ 39 */
10 40
@@ -18,46 +48,6 @@
18#include <linux/typecheck.h> 48#include <linux/typecheck.h>
19#include <linux/compiler.h> 49#include <linux/compiler.h>
20 50
21/*
22 * 32 bit bitops format:
23 * bit 0 is the LSB of *addr; bit 31 is the MSB of *addr;
24 * bit 32 is the LSB of *(addr+4). That combined with the
25 * big endian byte order on S390 give the following bit
26 * order in memory:
27 * 1f 1e 1d 1c 1b 1a 19 18 17 16 15 14 13 12 11 10 \
28 * 0f 0e 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00
29 * after that follows the next long with bit numbers
30 * 3f 3e 3d 3c 3b 3a 39 38 37 36 35 34 33 32 31 30
31 * 2f 2e 2d 2c 2b 2a 29 28 27 26 25 24 23 22 21 20
32 * The reason for this bit ordering is the fact that
33 * in the architecture independent code bits operations
34 * of the form "flags |= (1 << bitnr)" are used INTERMIXED
35 * with operation of the form "set_bit(bitnr, flags)".
36 *
37 * 64 bit bitops format:
38 * bit 0 is the LSB of *addr; bit 63 is the MSB of *addr;
39 * bit 64 is the LSB of *(addr+8). That combined with the
40 * big endian byte order on S390 give the following bit
41 * order in memory:
42 * 3f 3e 3d 3c 3b 3a 39 38 37 36 35 34 33 32 31 30
43 * 2f 2e 2d 2c 2b 2a 29 28 27 26 25 24 23 22 21 20
44 * 1f 1e 1d 1c 1b 1a 19 18 17 16 15 14 13 12 11 10
45 * 0f 0e 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00
46 * after that follows the next long with bit numbers
47 * 7f 7e 7d 7c 7b 7a 79 78 77 76 75 74 73 72 71 70
48 * 6f 6e 6d 6c 6b 6a 69 68 67 66 65 64 63 62 61 60
49 * 5f 5e 5d 5c 5b 5a 59 58 57 56 55 54 53 52 51 50
50 * 4f 4e 4d 4c 4b 4a 49 48 47 46 45 44 43 42 41 40
51 * The reason for this bit ordering is the fact that
52 * in the architecture independent code bits operations
53 * of the form "flags |= (1 << bitnr)" are used INTERMIXED
54 * with operation of the form "set_bit(bitnr, flags)".
55 */
56
57/* bitmap tables from arch/s390/kernel/bitmap.c */
58extern const char _zb_findmap[];
59extern const char _sb_findmap[];
60
61#ifndef CONFIG_64BIT 51#ifndef CONFIG_64BIT
62 52
63#define __BITOPS_OR "or" 53#define __BITOPS_OR "or"
@@ -310,522 +300,24 @@ static inline int test_bit(unsigned long nr, const volatile unsigned long *ptr)
310} 300}
311 301
312/* 302/*
313 * Optimized find bit helper functions. 303 * ATTENTION:
314 */ 304 * find_first_bit_left() and find_next_bit_left() use MSB0 encoding.
315
316/**
317 * __ffz_word_loop - find byte offset of first long != -1UL
318 * @addr: pointer to array of unsigned long
319 * @size: size of the array in bits
320 */
321static inline unsigned long __ffz_word_loop(const unsigned long *addr,
322 unsigned long size)
323{
324 typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype;
325 unsigned long bytes = 0;
326
327 asm volatile(
328#ifndef CONFIG_64BIT
329 " ahi %1,-1\n"
330 " sra %1,5\n"
331 " jz 1f\n"
332 "0: c %2,0(%0,%3)\n"
333 " jne 1f\n"
334 " la %0,4(%0)\n"
335 " brct %1,0b\n"
336 "1:\n"
337#else
338 " aghi %1,-1\n"
339 " srag %1,%1,6\n"
340 " jz 1f\n"
341 "0: cg %2,0(%0,%3)\n"
342 " jne 1f\n"
343 " la %0,8(%0)\n"
344 " brct %1,0b\n"
345 "1:\n"
346#endif
347 : "+&a" (bytes), "+&d" (size)
348 : "d" (-1UL), "a" (addr), "m" (*(addrtype *) addr)
349 : "cc" );
350 return bytes;
351}
352
353/**
354 * __ffs_word_loop - find byte offset of first long != 0UL
355 * @addr: pointer to array of unsigned long
356 * @size: size of the array in bits
357 */
358static inline unsigned long __ffs_word_loop(const unsigned long *addr,
359 unsigned long size)
360{
361 typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype;
362 unsigned long bytes = 0;
363
364 asm volatile(
365#ifndef CONFIG_64BIT
366 " ahi %1,-1\n"
367 " sra %1,5\n"
368 " jz 1f\n"
369 "0: c %2,0(%0,%3)\n"
370 " jne 1f\n"
371 " la %0,4(%0)\n"
372 " brct %1,0b\n"
373 "1:\n"
374#else
375 " aghi %1,-1\n"
376 " srag %1,%1,6\n"
377 " jz 1f\n"
378 "0: cg %2,0(%0,%3)\n"
379 " jne 1f\n"
380 " la %0,8(%0)\n"
381 " brct %1,0b\n"
382 "1:\n"
383#endif
384 : "+&a" (bytes), "+&a" (size)
385 : "d" (0UL), "a" (addr), "m" (*(addrtype *) addr)
386 : "cc" );
387 return bytes;
388}
389
390/**
391 * __ffz_word - add number of the first unset bit
392 * @nr: base value the bit number is added to
393 * @word: the word that is searched for unset bits
394 */
395static inline unsigned long __ffz_word(unsigned long nr, unsigned long word)
396{
397#ifdef CONFIG_64BIT
398 if ((word & 0xffffffff) == 0xffffffff) {
399 word >>= 32;
400 nr += 32;
401 }
402#endif
403 if ((word & 0xffff) == 0xffff) {
404 word >>= 16;
405 nr += 16;
406 }
407 if ((word & 0xff) == 0xff) {
408 word >>= 8;
409 nr += 8;
410 }
411 return nr + _zb_findmap[(unsigned char) word];
412}
413
414/**
415 * __ffs_word - add number of the first set bit
416 * @nr: base value the bit number is added to
417 * @word: the word that is searched for set bits
418 */
419static inline unsigned long __ffs_word(unsigned long nr, unsigned long word)
420{
421#ifdef CONFIG_64BIT
422 if ((word & 0xffffffff) == 0) {
423 word >>= 32;
424 nr += 32;
425 }
426#endif
427 if ((word & 0xffff) == 0) {
428 word >>= 16;
429 nr += 16;
430 }
431 if ((word & 0xff) == 0) {
432 word >>= 8;
433 nr += 8;
434 }
435 return nr + _sb_findmap[(unsigned char) word];
436}
437
438
439/**
440 * __load_ulong_be - load big endian unsigned long
441 * @p: pointer to array of unsigned long
442 * @offset: byte offset of source value in the array
443 */
444static inline unsigned long __load_ulong_be(const unsigned long *p,
445 unsigned long offset)
446{
447 p = (unsigned long *)((unsigned long) p + offset);
448 return *p;
449}
450
451/**
452 * __load_ulong_le - load little endian unsigned long
453 * @p: pointer to array of unsigned long
454 * @offset: byte offset of source value in the array
455 */
456static inline unsigned long __load_ulong_le(const unsigned long *p,
457 unsigned long offset)
458{
459 unsigned long word;
460
461 p = (unsigned long *)((unsigned long) p + offset);
462#ifndef CONFIG_64BIT
463 asm volatile(
464 " ic %0,%O1(%R1)\n"
465 " icm %0,2,%O1+1(%R1)\n"
466 " icm %0,4,%O1+2(%R1)\n"
467 " icm %0,8,%O1+3(%R1)"
468 : "=&d" (word) : "Q" (*p) : "cc");
469#else
470 asm volatile(
471 " lrvg %0,%1"
472 : "=d" (word) : "m" (*p) );
473#endif
474 return word;
475}
476
477/*
478 * The various find bit functions.
479 */
480
481/*
482 * ffz - find first zero in word.
483 * @word: The word to search
484 *
485 * Undefined if no zero exists, so code should check against ~0UL first.
486 */
487static inline unsigned long ffz(unsigned long word)
488{
489 return __ffz_word(0, word);
490}
491
492/**
493 * __ffs - find first bit in word.
494 * @word: The word to search
495 *
496 * Undefined if no bit exists, so code should check against 0 first.
497 */
498static inline unsigned long __ffs (unsigned long word)
499{
500 return __ffs_word(0, word);
501}
502
503/**
504 * ffs - find first bit set
505 * @x: the word to search
506 *
507 * This is defined the same way as
508 * the libc and compiler builtin ffs routines, therefore
509 * differs in spirit from the above ffz (man ffs).
510 */
511static inline int ffs(int x)
512{
513 if (!x)
514 return 0;
515 return __ffs_word(1, x);
516}
517
518/**
519 * find_first_zero_bit - find the first zero bit in a memory region
520 * @addr: The address to start the search at
521 * @size: The maximum size to search
522 *
523 * Returns the bit-number of the first zero bit, not the number of the byte
524 * containing a bit.
525 */
526static inline unsigned long find_first_zero_bit(const unsigned long *addr,
527 unsigned long size)
528{
529 unsigned long bytes, bits;
530
531 if (!size)
532 return 0;
533 bytes = __ffz_word_loop(addr, size);
534 bits = __ffz_word(bytes*8, __load_ulong_be(addr, bytes));
535 return (bits < size) ? bits : size;
536}
537#define find_first_zero_bit find_first_zero_bit
538
539/**
540 * find_first_bit - find the first set bit in a memory region
541 * @addr: The address to start the search at
542 * @size: The maximum size to search
543 *
544 * Returns the bit-number of the first set bit, not the number of the byte
545 * containing a bit.
546 */
547static inline unsigned long find_first_bit(const unsigned long * addr,
548 unsigned long size)
549{
550 unsigned long bytes, bits;
551
552 if (!size)
553 return 0;
554 bytes = __ffs_word_loop(addr, size);
555 bits = __ffs_word(bytes*8, __load_ulong_be(addr, bytes));
556 return (bits < size) ? bits : size;
557}
558#define find_first_bit find_first_bit
559
560/*
561 * Big endian variant whichs starts bit counting from left using
562 * the flogr (find leftmost one) instruction.
563 */ 305 */
564static inline unsigned long __flo_word(unsigned long nr, unsigned long val) 306unsigned long find_first_bit_left(const unsigned long *addr, unsigned long size);
565{ 307unsigned long find_next_bit_left(const unsigned long *addr, unsigned long size,
566 register unsigned long bit asm("2") = val; 308 unsigned long offset);
567 register unsigned long out asm("3");
568
569 asm volatile (
570 " .insn rre,0xb9830000,%[bit],%[bit]\n"
571 : [bit] "+d" (bit), [out] "=d" (out) : : "cc");
572 return nr + bit;
573}
574
575/*
576 * 64 bit special left bitops format:
577 * order in memory:
578 * 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f
579 * 10 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f
580 * 20 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f
581 * 30 31 32 33 34 35 36 37 38 39 3a 3b 3c 3d 3e 3f
582 * after that follows the next long with bit numbers
583 * 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f
584 * 50 51 52 53 54 55 56 57 58 59 5a 5b 5c 5d 5e 5f
585 * 60 61 62 63 64 65 66 67 68 69 6a 6b 6c 6d 6e 6f
586 * 70 71 72 73 74 75 76 77 78 79 7a 7b 7c 7d 7e 7f
587 * The reason for this bit ordering is the fact that
588 * the hardware sets bits in a bitmap starting at bit 0
589 * and we don't want to scan the bitmap from the 'wrong
590 * end'.
591 */
592static inline unsigned long find_first_bit_left(const unsigned long *addr,
593 unsigned long size)
594{
595 unsigned long bytes, bits;
596
597 if (!size)
598 return 0;
599 bytes = __ffs_word_loop(addr, size);
600 bits = __flo_word(bytes * 8, __load_ulong_be(addr, bytes));
601 return (bits < size) ? bits : size;
602}
603 309
604static inline int find_next_bit_left(const unsigned long *addr, 310#include <asm-generic/bitops/__ffs.h>
605 unsigned long size, 311#include <asm-generic/bitops/ffs.h>
606 unsigned long offset)
607{
608 const unsigned long *p;
609 unsigned long bit, set;
610
611 if (offset >= size)
612 return size;
613 bit = offset & (BITS_PER_LONG - 1);
614 offset -= bit;
615 size -= offset;
616 p = addr + offset / BITS_PER_LONG;
617 if (bit) {
618 set = __flo_word(0, *p & (~0UL >> bit));
619 if (set >= size)
620 return size + offset;
621 if (set < BITS_PER_LONG)
622 return set + offset;
623 offset += BITS_PER_LONG;
624 size -= BITS_PER_LONG;
625 p++;
626 }
627 return offset + find_first_bit_left(p, size);
628}
629
630#define for_each_set_bit_left(bit, addr, size) \
631 for ((bit) = find_first_bit_left((addr), (size)); \
632 (bit) < (size); \
633 (bit) = find_next_bit_left((addr), (size), (bit) + 1))
634
635/* same as for_each_set_bit() but use bit as value to start with */
636#define for_each_set_bit_left_cont(bit, addr, size) \
637 for ((bit) = find_next_bit_left((addr), (size), (bit)); \
638 (bit) < (size); \
639 (bit) = find_next_bit_left((addr), (size), (bit) + 1))
640
641/**
642 * find_next_zero_bit - find the first zero bit in a memory region
643 * @addr: The address to base the search on
644 * @offset: The bitnumber to start searching at
645 * @size: The maximum size to search
646 */
647static inline int find_next_zero_bit (const unsigned long * addr,
648 unsigned long size,
649 unsigned long offset)
650{
651 const unsigned long *p;
652 unsigned long bit, set;
653
654 if (offset >= size)
655 return size;
656 bit = offset & (BITS_PER_LONG - 1);
657 offset -= bit;
658 size -= offset;
659 p = addr + offset / BITS_PER_LONG;
660 if (bit) {
661 /*
662 * __ffz_word returns BITS_PER_LONG
663 * if no zero bit is present in the word.
664 */
665 set = __ffz_word(bit, *p >> bit);
666 if (set >= size)
667 return size + offset;
668 if (set < BITS_PER_LONG)
669 return set + offset;
670 offset += BITS_PER_LONG;
671 size -= BITS_PER_LONG;
672 p++;
673 }
674 return offset + find_first_zero_bit(p, size);
675}
676#define find_next_zero_bit find_next_zero_bit
677
678/**
679 * find_next_bit - find the first set bit in a memory region
680 * @addr: The address to base the search on
681 * @offset: The bitnumber to start searching at
682 * @size: The maximum size to search
683 */
684static inline int find_next_bit (const unsigned long * addr,
685 unsigned long size,
686 unsigned long offset)
687{
688 const unsigned long *p;
689 unsigned long bit, set;
690
691 if (offset >= size)
692 return size;
693 bit = offset & (BITS_PER_LONG - 1);
694 offset -= bit;
695 size -= offset;
696 p = addr + offset / BITS_PER_LONG;
697 if (bit) {
698 /*
699 * __ffs_word returns BITS_PER_LONG
700 * if no one bit is present in the word.
701 */
702 set = __ffs_word(0, *p & (~0UL << bit));
703 if (set >= size)
704 return size + offset;
705 if (set < BITS_PER_LONG)
706 return set + offset;
707 offset += BITS_PER_LONG;
708 size -= BITS_PER_LONG;
709 p++;
710 }
711 return offset + find_first_bit(p, size);
712}
713#define find_next_bit find_next_bit
714
715/*
716 * Every architecture must define this function. It's the fastest
717 * way of searching a 140-bit bitmap where the first 100 bits are
718 * unlikely to be set. It's guaranteed that at least one of the 140
719 * bits is cleared.
720 */
721static inline int sched_find_first_bit(unsigned long *b)
722{
723 return find_first_bit(b, 140);
724}
725
726#include <asm-generic/bitops/fls.h>
727#include <asm-generic/bitops/__fls.h> 312#include <asm-generic/bitops/__fls.h>
313#include <asm-generic/bitops/fls.h>
728#include <asm-generic/bitops/fls64.h> 314#include <asm-generic/bitops/fls64.h>
729 315#include <asm-generic/bitops/ffz.h>
316#include <asm-generic/bitops/find.h>
730#include <asm-generic/bitops/hweight.h> 317#include <asm-generic/bitops/hweight.h>
731#include <asm-generic/bitops/lock.h> 318#include <asm-generic/bitops/lock.h>
732 319#include <asm-generic/bitops/sched.h>
733/*
734 * ATTENTION: intel byte ordering convention for ext2 and minix !!
735 * bit 0 is the LSB of addr; bit 31 is the MSB of addr;
736 * bit 32 is the LSB of (addr+4).
737 * That combined with the little endian byte order of Intel gives the
738 * following bit order in memory:
739 * 07 06 05 04 03 02 01 00 15 14 13 12 11 10 09 08 \
740 * 23 22 21 20 19 18 17 16 31 30 29 28 27 26 25 24
741 */
742
743static inline int find_first_zero_bit_le(void *vaddr, unsigned int size)
744{
745 unsigned long bytes, bits;
746
747 if (!size)
748 return 0;
749 bytes = __ffz_word_loop(vaddr, size);
750 bits = __ffz_word(bytes*8, __load_ulong_le(vaddr, bytes));
751 return (bits < size) ? bits : size;
752}
753#define find_first_zero_bit_le find_first_zero_bit_le
754
755static inline int find_next_zero_bit_le(void *vaddr, unsigned long size,
756 unsigned long offset)
757{
758 unsigned long *addr = vaddr, *p;
759 unsigned long bit, set;
760
761 if (offset >= size)
762 return size;
763 bit = offset & (BITS_PER_LONG - 1);
764 offset -= bit;
765 size -= offset;
766 p = addr + offset / BITS_PER_LONG;
767 if (bit) {
768 /*
769 * s390 version of ffz returns BITS_PER_LONG
770 * if no zero bit is present in the word.
771 */
772 set = __ffz_word(bit, __load_ulong_le(p, 0) >> bit);
773 if (set >= size)
774 return size + offset;
775 if (set < BITS_PER_LONG)
776 return set + offset;
777 offset += BITS_PER_LONG;
778 size -= BITS_PER_LONG;
779 p++;
780 }
781 return offset + find_first_zero_bit_le(p, size);
782}
783#define find_next_zero_bit_le find_next_zero_bit_le
784
785static inline unsigned long find_first_bit_le(void *vaddr, unsigned long size)
786{
787 unsigned long bytes, bits;
788
789 if (!size)
790 return 0;
791 bytes = __ffs_word_loop(vaddr, size);
792 bits = __ffs_word(bytes*8, __load_ulong_le(vaddr, bytes));
793 return (bits < size) ? bits : size;
794}
795#define find_first_bit_le find_first_bit_le
796
797static inline int find_next_bit_le(void *vaddr, unsigned long size,
798 unsigned long offset)
799{
800 unsigned long *addr = vaddr, *p;
801 unsigned long bit, set;
802
803 if (offset >= size)
804 return size;
805 bit = offset & (BITS_PER_LONG - 1);
806 offset -= bit;
807 size -= offset;
808 p = addr + offset / BITS_PER_LONG;
809 if (bit) {
810 /*
811 * s390 version of ffz returns BITS_PER_LONG
812 * if no zero bit is present in the word.
813 */
814 set = __ffs_word(0, __load_ulong_le(p, 0) & (~0UL << bit));
815 if (set >= size)
816 return size + offset;
817 if (set < BITS_PER_LONG)
818 return set + offset;
819 offset += BITS_PER_LONG;
820 size -= BITS_PER_LONG;
821 p++;
822 }
823 return offset + find_first_bit_le(p, size);
824}
825#define find_next_bit_le find_next_bit_le
826
827#include <asm-generic/bitops/le.h> 320#include <asm-generic/bitops/le.h>
828
829#include <asm-generic/bitops/ext2-atomic-setbit.h> 321#include <asm-generic/bitops/ext2-atomic-setbit.h>
830 322
831#endif /* _S390_BITOPS_H */ 323#endif /* _S390_BITOPS_H */
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 4bb2a4656163..2403303cfed7 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -28,7 +28,7 @@ CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"'
28 28
29CFLAGS_sysinfo.o += -Iinclude/math-emu -Iarch/s390/math-emu -w 29CFLAGS_sysinfo.o += -Iinclude/math-emu -Iarch/s390/math-emu -w
30 30
31obj-y := bitmap.o traps.o time.o process.o base.o early.o setup.o vtime.o 31obj-y := traps.o time.o process.o base.o early.o setup.o vtime.o
32obj-y += processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o 32obj-y += processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o
33obj-y += debug.o irq.o ipl.o dis.o diag.o sclp.o vdso.o 33obj-y += debug.o irq.o ipl.o dis.o diag.o sclp.o vdso.o
34obj-y += sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o 34obj-y += sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o
diff --git a/arch/s390/kernel/bitmap.c b/arch/s390/kernel/bitmap.c
deleted file mode 100644
index 5cdb813fd77f..000000000000
--- a/arch/s390/kernel/bitmap.c
+++ /dev/null
@@ -1,48 +0,0 @@
1/*
2 * Bitmaps for set_bit, clear_bit, test_and_set_bit, ...
3 * See include/asm/{bitops.h|posix_types.h} for details
4 *
5 * Copyright IBM Corp. 1999, 2009
6 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
7 */
8
9#include <linux/bitops.h>
10#include <linux/module.h>
11
12const char _zb_findmap[] = {
13 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,
14 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5,
15 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,
16 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,6,
17 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,
18 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5,
19 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,
20 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,7,
21 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,
22 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5,
23 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,
24 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,6,
25 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,
26 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5,
27 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,
28 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,8 };
29EXPORT_SYMBOL(_zb_findmap);
30
31const char _sb_findmap[] = {
32 8,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
33 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
34 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
35 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
36 6,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
37 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
38 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
39 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
40 7,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
41 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
42 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
43 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
44 6,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
45 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
46 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
47 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 };
48EXPORT_SYMBOL(_sb_findmap);
diff --git a/arch/s390/lib/Makefile b/arch/s390/lib/Makefile
index 20b0e97a7df2..e506c5fb6f6a 100644
--- a/arch/s390/lib/Makefile
+++ b/arch/s390/lib/Makefile
@@ -2,7 +2,7 @@
2# Makefile for s390-specific library files.. 2# Makefile for s390-specific library files..
3# 3#
4 4
5lib-y += delay.o string.o uaccess_std.o uaccess_pt.o 5lib-y += delay.o string.o uaccess_std.o uaccess_pt.o find.o
6obj-$(CONFIG_32BIT) += div64.o qrnnd.o ucmpdi2.o mem32.o 6obj-$(CONFIG_32BIT) += div64.o qrnnd.o ucmpdi2.o mem32.o
7obj-$(CONFIG_64BIT) += mem64.o 7obj-$(CONFIG_64BIT) += mem64.o
8lib-$(CONFIG_64BIT) += uaccess_mvcos.o 8lib-$(CONFIG_64BIT) += uaccess_mvcos.o
diff --git a/arch/s390/lib/find.c b/arch/s390/lib/find.c
new file mode 100644
index 000000000000..8963929b06b9
--- /dev/null
+++ b/arch/s390/lib/find.c
@@ -0,0 +1,77 @@
1/*
2 * MSB0 numbered special bitops handling.
3 *
4 * On s390x the bits are numbered:
5 * |0..............63|64............127|128...........191|192...........255|
6 * and on s390:
7 * |0.....31|31....63|64....95|96...127|128..159|160..191|192..223|224..255|
8 *
9 * The reason for this bit numbering is the fact that the hardware sets bits
10 * in a bitmap starting at bit 0 (MSB) and we don't want to scan the bitmap
11 * from the 'wrong end'.
12 */
13
14#include <linux/compiler.h>
15#include <linux/bitops.h>
16#include <linux/export.h>
17
18unsigned long find_first_bit_left(const unsigned long *addr, unsigned long size)
19{
20 const unsigned long *p = addr;
21 unsigned long result = 0;
22 unsigned long tmp;
23
24 while (size & ~(BITS_PER_LONG - 1)) {
25 if ((tmp = *(p++)))
26 goto found;
27 result += BITS_PER_LONG;
28 size -= BITS_PER_LONG;
29 }
30 if (!size)
31 return result;
32 tmp = (*p) & (~0UL << (BITS_PER_LONG - size));
33 if (!tmp) /* Are any bits set? */
34 return result + size; /* Nope. */
35found:
36 return result + (__fls(tmp) ^ (BITS_PER_LONG - 1));
37}
38EXPORT_SYMBOL(find_first_bit_left);
39
40unsigned long find_next_bit_left(const unsigned long *addr, unsigned long size,
41 unsigned long offset)
42{
43 const unsigned long *p = addr + (offset / BITS_PER_LONG);
44 unsigned long result = offset & ~(BITS_PER_LONG - 1);
45 unsigned long tmp;
46
47 if (offset >= size)
48 return size;
49 size -= result;
50 offset %= BITS_PER_LONG;
51 if (offset) {
52 tmp = *(p++);
53 tmp &= (~0UL >> offset);
54 if (size < BITS_PER_LONG)
55 goto found_first;
56 if (tmp)
57 goto found_middle;
58 size -= BITS_PER_LONG;
59 result += BITS_PER_LONG;
60 }
61 while (size & ~(BITS_PER_LONG-1)) {
62 if ((tmp = *(p++)))
63 goto found_middle;
64 result += BITS_PER_LONG;
65 size -= BITS_PER_LONG;
66 }
67 if (!size)
68 return result;
69 tmp = *p;
70found_first:
71 tmp &= (~0UL << (BITS_PER_LONG - size));
72 if (!tmp) /* Are any bits set? */
73 return result + size; /* Nope. */
74found_middle:
75 return result + (__fls(tmp) ^ (BITS_PER_LONG - 1));
76}
77EXPORT_SYMBOL(find_next_bit_left);