aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorDmitry Torokhov <dtor_core@ameritech.net>2006-04-02 00:08:05 -0500
committerDmitry Torokhov <dtor_core@ameritech.net>2006-04-02 00:08:05 -0500
commit95d465fd750897ab32462a6702fbfe1b122cbbc0 (patch)
tree65c38b2f11c51bb6932e44dd6c92f15b0091abfe /lib
parent642fde17dceceb56c7ba2762733ac688666ae657 (diff)
parent683aa4012f53b2ada0f430487e05d37b0d94e90a (diff)
Manual merge with Linus.
Conflicts: arch/powerpc/kernel/setup-common.c drivers/input/keyboard/hil_kbd.c drivers/input/mouse/hil_ptr.c
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug31
-rw-r--r--lib/Makefile3
-rw-r--r--lib/bitmap.c182
-rw-r--r--lib/cpumask.c45
-rw-r--r--lib/extable.c1
-rw-r--r--lib/find_next_bit.c177
-rw-r--r--lib/hweight.c53
-rw-r--r--lib/kobject.c60
-rw-r--r--lib/kobject_uevent.c2
-rw-r--r--lib/kref.c7
-rw-r--r--lib/radix-tree.c49
-rw-r--r--lib/reed_solomon/reed_solomon.c11
-rw-r--r--lib/string.c1
-rw-r--r--lib/swiotlb.c32
14 files changed, 485 insertions, 169 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index a314e663d517..d57fd9181b18 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -78,13 +78,17 @@ config SCHEDSTATS
78 this adds. 78 this adds.
79 79
80config DEBUG_SLAB 80config DEBUG_SLAB
81 bool "Debug memory allocations" 81 bool "Debug slab memory allocations"
82 depends on DEBUG_KERNEL && SLAB 82 depends on DEBUG_KERNEL && SLAB
83 help 83 help
84 Say Y here to have the kernel do limited verification on memory 84 Say Y here to have the kernel do limited verification on memory
85 allocation as well as poisoning memory on free to catch use of freed 85 allocation as well as poisoning memory on free to catch use of freed
86 memory. This can make kmalloc/kfree-intensive workloads much slower. 86 memory. This can make kmalloc/kfree-intensive workloads much slower.
87 87
88config DEBUG_SLAB_LEAK
89 bool "Memory leak debugging"
90 depends on DEBUG_SLAB
91
88config DEBUG_PREEMPT 92config DEBUG_PREEMPT
89 bool "Debug preemptible kernel" 93 bool "Debug preemptible kernel"
90 depends on DEBUG_KERNEL && PREEMPT 94 depends on DEBUG_KERNEL && PREEMPT
@@ -153,22 +157,9 @@ config DEBUG_INFO
153 157
154 If unsure, say N. 158 If unsure, say N.
155 159
156config DEBUG_IOREMAP
157 bool "Enable ioremap() debugging"
158 depends on DEBUG_KERNEL && PARISC
159 help
160 Enabling this option will cause the kernel to distinguish between
161 ioremapped and physical addresses. It will print a backtrace (at
162 most one every 10 seconds), hopefully allowing you to see which
163 drivers need work. Fixing all these problems is a prerequisite
164 for turning on USE_HPPA_IOREMAP. The warnings are harmless;
165 the kernel has enough information to fix the broken drivers
166 automatically, but we'd like to make it more efficient by not
167 having to do that.
168
169config DEBUG_FS 160config DEBUG_FS
170 bool "Debug Filesystem" 161 bool "Debug Filesystem"
171 depends on DEBUG_KERNEL && SYSFS 162 depends on SYSFS
172 help 163 help
173 debugfs is a virtual file system that kernel developers use to put 164 debugfs is a virtual file system that kernel developers use to put
174 debugging files into. Enable this option to be able to read and 165 debugging files into. Enable this option to be able to read and
@@ -195,6 +186,16 @@ config FRAME_POINTER
195 some architectures or if you use external debuggers. 186 some architectures or if you use external debuggers.
196 If you don't debug the kernel, you can say N. 187 If you don't debug the kernel, you can say N.
197 188
189config UNWIND_INFO
190 bool "Compile the kernel with frame unwind information"
191 depends on !IA64
192 depends on !MODULES || !(MIPS || PARISC || PPC || SUPERH || SPARC64 || V850)
193 help
194 If you say Y here the resulting kernel image will be slightly larger
195 but not slower, and it will give very useful debugging information.
196 If you don't debug the kernel, you can say N, but we may not be able
197 to solve problems without frame unwind information or frame pointers.
198
198config FORCED_INLINING 199config FORCED_INLINING
199 bool "Force gcc to inline functions marked 'inline'" 200 bool "Force gcc to inline functions marked 'inline'"
200 depends on DEBUG_KERNEL 201 depends on DEBUG_KERNEL
diff --git a/lib/Makefile b/lib/Makefile
index 648b2c1242fd..b830c9a15541 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -7,6 +7,8 @@ lib-y := errno.o ctype.o string.o vsprintf.o cmdline.o \
7 idr.o div64.o int_sqrt.o bitmap.o extable.o prio_tree.o \ 7 idr.o div64.o int_sqrt.o bitmap.o extable.o prio_tree.o \
8 sha1.o 8 sha1.o
9 9
10lib-$(CONFIG_SMP) += cpumask.o
11
10lib-y += kobject.o kref.o kobject_uevent.o klist.o 12lib-y += kobject.o kref.o kobject_uevent.o klist.o
11 13
12obj-y += sort.o parser.o halfmd4.o iomap_copy.o 14obj-y += sort.o parser.o halfmd4.o iomap_copy.o
@@ -21,6 +23,7 @@ lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
21lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o 23lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
22lib-$(CONFIG_SEMAPHORE_SLEEPERS) += semaphore-sleepers.o 24lib-$(CONFIG_SEMAPHORE_SLEEPERS) += semaphore-sleepers.o
23lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o 25lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o
26lib-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
24obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o 27obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o
25obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o 28obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
26 29
diff --git a/lib/bitmap.c b/lib/bitmap.c
index 48e708381d44..ed2ae3b0cd06 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -253,33 +253,18 @@ int __bitmap_subset(const unsigned long *bitmap1,
253} 253}
254EXPORT_SYMBOL(__bitmap_subset); 254EXPORT_SYMBOL(__bitmap_subset);
255 255
256#if BITS_PER_LONG == 32
257int __bitmap_weight(const unsigned long *bitmap, int bits) 256int __bitmap_weight(const unsigned long *bitmap, int bits)
258{ 257{
259 int k, w = 0, lim = bits/BITS_PER_LONG; 258 int k, w = 0, lim = bits/BITS_PER_LONG;
260 259
261 for (k = 0; k < lim; k++) 260 for (k = 0; k < lim; k++)
262 w += hweight32(bitmap[k]); 261 w += hweight_long(bitmap[k]);
263 262
264 if (bits % BITS_PER_LONG) 263 if (bits % BITS_PER_LONG)
265 w += hweight32(bitmap[k] & BITMAP_LAST_WORD_MASK(bits)); 264 w += hweight_long(bitmap[k] & BITMAP_LAST_WORD_MASK(bits));
266 265
267 return w; 266 return w;
268} 267}
269#else
270int __bitmap_weight(const unsigned long *bitmap, int bits)
271{
272 int k, w = 0, lim = bits/BITS_PER_LONG;
273
274 for (k = 0; k < lim; k++)
275 w += hweight64(bitmap[k]);
276
277 if (bits % BITS_PER_LONG)
278 w += hweight64(bitmap[k] & BITMAP_LAST_WORD_MASK(bits));
279
280 return w;
281}
282#endif
283EXPORT_SYMBOL(__bitmap_weight); 268EXPORT_SYMBOL(__bitmap_weight);
284 269
285/* 270/*
@@ -676,84 +661,143 @@ int bitmap_bitremap(int oldbit, const unsigned long *old,
676} 661}
677EXPORT_SYMBOL(bitmap_bitremap); 662EXPORT_SYMBOL(bitmap_bitremap);
678 663
679/** 664/*
680 * bitmap_find_free_region - find a contiguous aligned mem region 665 * Common code for bitmap_*_region() routines.
681 * @bitmap: an array of unsigned longs corresponding to the bitmap 666 * bitmap: array of unsigned longs corresponding to the bitmap
682 * @bits: number of bits in the bitmap 667 * pos: the beginning of the region
683 * @order: region size to find (size is actually 1<<order) 668 * order: region size (log base 2 of number of bits)
669 * reg_op: operation(s) to perform on that region of bitmap
684 * 670 *
685 * This is used to allocate a memory region from a bitmap. The idea is 671 * Can set, verify and/or release a region of bits in a bitmap,
686 * that the region has to be 1<<order sized and 1<<order aligned (this 672 * depending on which combination of REG_OP_* flag bits is set.
687 * makes the search algorithm much faster).
688 * 673 *
689 * The region is marked as set bits in the bitmap if a free one is 674 * A region of a bitmap is a sequence of bits in the bitmap, of
690 * found. 675 * some size '1 << order' (a power of two), aligned to that same
676 * '1 << order' power of two.
691 * 677 *
692 * Returns either beginning of region or negative error 678 * Returns 1 if REG_OP_ISFREE succeeds (region is all zero bits).
679 * Returns 0 in all other cases and reg_ops.
693 */ 680 */
694int bitmap_find_free_region(unsigned long *bitmap, int bits, int order)
695{
696 unsigned long mask;
697 int pages = 1 << order;
698 int i;
699 681
700 if(pages > BITS_PER_LONG) 682enum {
701 return -EINVAL; 683 REG_OP_ISFREE, /* true if region is all zero bits */
684 REG_OP_ALLOC, /* set all bits in region */
685 REG_OP_RELEASE, /* clear all bits in region */
686};
702 687
703 /* make a mask of the order */ 688static int __reg_op(unsigned long *bitmap, int pos, int order, int reg_op)
704 mask = (1ul << (pages - 1)); 689{
690 int nbits_reg; /* number of bits in region */
691 int index; /* index first long of region in bitmap */
692 int offset; /* bit offset region in bitmap[index] */
693 int nlongs_reg; /* num longs spanned by region in bitmap */
694 int nbitsinlong; /* num bits of region in each spanned long */
695 unsigned long mask; /* bitmask for one long of region */
696 int i; /* scans bitmap by longs */
697 int ret = 0; /* return value */
698
699 /*
700 * Either nlongs_reg == 1 (for small orders that fit in one long)
701 * or (offset == 0 && mask == ~0UL) (for larger multiword orders.)
702 */
703 nbits_reg = 1 << order;
704 index = pos / BITS_PER_LONG;
705 offset = pos - (index * BITS_PER_LONG);
706 nlongs_reg = BITS_TO_LONGS(nbits_reg);
707 nbitsinlong = min(nbits_reg, BITS_PER_LONG);
708
709 /*
710 * Can't do "mask = (1UL << nbitsinlong) - 1", as that
711 * overflows if nbitsinlong == BITS_PER_LONG.
712 */
713 mask = (1UL << (nbitsinlong - 1));
705 mask += mask - 1; 714 mask += mask - 1;
715 mask <<= offset;
706 716
707 /* run up the bitmap pages bits at a time */ 717 switch (reg_op) {
708 for (i = 0; i < bits; i += pages) { 718 case REG_OP_ISFREE:
709 int index = i/BITS_PER_LONG; 719 for (i = 0; i < nlongs_reg; i++) {
710 int offset = i - (index * BITS_PER_LONG); 720 if (bitmap[index + i] & mask)
711 if((bitmap[index] & (mask << offset)) == 0) { 721 goto done;
712 /* set region in bimap */
713 bitmap[index] |= (mask << offset);
714 return i;
715 } 722 }
723 ret = 1; /* all bits in region free (zero) */
724 break;
725
726 case REG_OP_ALLOC:
727 for (i = 0; i < nlongs_reg; i++)
728 bitmap[index + i] |= mask;
729 break;
730
731 case REG_OP_RELEASE:
732 for (i = 0; i < nlongs_reg; i++)
733 bitmap[index + i] &= ~mask;
734 break;
716 } 735 }
717 return -ENOMEM; 736done:
737 return ret;
738}
739
740/**
741 * bitmap_find_free_region - find a contiguous aligned mem region
742 * @bitmap: array of unsigned longs corresponding to the bitmap
743 * @bits: number of bits in the bitmap
744 * @order: region size (log base 2 of number of bits) to find
745 *
746 * Find a region of free (zero) bits in a @bitmap of @bits bits and
747 * allocate them (set them to one). Only consider regions of length
748 * a power (@order) of two, aligned to that power of two, which
749 * makes the search algorithm much faster.
750 *
751 * Return the bit offset in bitmap of the allocated region,
752 * or -errno on failure.
753 */
754int bitmap_find_free_region(unsigned long *bitmap, int bits, int order)
755{
756 int pos; /* scans bitmap by regions of size order */
757
758 for (pos = 0; pos < bits; pos += (1 << order))
759 if (__reg_op(bitmap, pos, order, REG_OP_ISFREE))
760 break;
761 if (pos == bits)
762 return -ENOMEM;
763 __reg_op(bitmap, pos, order, REG_OP_ALLOC);
764 return pos;
718} 765}
719EXPORT_SYMBOL(bitmap_find_free_region); 766EXPORT_SYMBOL(bitmap_find_free_region);
720 767
721/** 768/**
722 * bitmap_release_region - release allocated bitmap region 769 * bitmap_release_region - release allocated bitmap region
723 * @bitmap: a pointer to the bitmap 770 * @bitmap: array of unsigned longs corresponding to the bitmap
724 * @pos: the beginning of the region 771 * @pos: beginning of bit region to release
725 * @order: the order of the bits to release (number is 1<<order) 772 * @order: region size (log base 2 of number of bits) to release
726 * 773 *
727 * This is the complement to __bitmap_find_free_region and releases 774 * This is the complement to __bitmap_find_free_region and releases
728 * the found region (by clearing it in the bitmap). 775 * the found region (by clearing it in the bitmap).
776 *
777 * No return value.
729 */ 778 */
730void bitmap_release_region(unsigned long *bitmap, int pos, int order) 779void bitmap_release_region(unsigned long *bitmap, int pos, int order)
731{ 780{
732 int pages = 1 << order; 781 __reg_op(bitmap, pos, order, REG_OP_RELEASE);
733 unsigned long mask = (1ul << (pages - 1));
734 int index = pos/BITS_PER_LONG;
735 int offset = pos - (index * BITS_PER_LONG);
736 mask += mask - 1;
737 bitmap[index] &= ~(mask << offset);
738} 782}
739EXPORT_SYMBOL(bitmap_release_region); 783EXPORT_SYMBOL(bitmap_release_region);
740 784
785/**
786 * bitmap_allocate_region - allocate bitmap region
787 * @bitmap: array of unsigned longs corresponding to the bitmap
788 * @pos: beginning of bit region to allocate
789 * @order: region size (log base 2 of number of bits) to allocate
790 *
791 * Allocate (set bits in) a specified region of a bitmap.
792 *
793 * Return 0 on success, or -EBUSY if specified region wasn't
794 * free (not all bits were zero).
795 */
741int bitmap_allocate_region(unsigned long *bitmap, int pos, int order) 796int bitmap_allocate_region(unsigned long *bitmap, int pos, int order)
742{ 797{
743 int pages = 1 << order; 798 if (!__reg_op(bitmap, pos, order, REG_OP_ISFREE))
744 unsigned long mask = (1ul << (pages - 1));
745 int index = pos/BITS_PER_LONG;
746 int offset = pos - (index * BITS_PER_LONG);
747
748 /* We don't do regions of pages > BITS_PER_LONG. The
749 * algorithm would be a simple look for multiple zeros in the
750 * array, but there's no driver today that needs this. If you
751 * trip this BUG(), you get to code it... */
752 BUG_ON(pages > BITS_PER_LONG);
753 mask += mask - 1;
754 if (bitmap[index] & (mask << offset))
755 return -EBUSY; 799 return -EBUSY;
756 bitmap[index] |= (mask << offset); 800 __reg_op(bitmap, pos, order, REG_OP_ALLOC);
757 return 0; 801 return 0;
758} 802}
759EXPORT_SYMBOL(bitmap_allocate_region); 803EXPORT_SYMBOL(bitmap_allocate_region);
diff --git a/lib/cpumask.c b/lib/cpumask.c
new file mode 100644
index 000000000000..3a67dc5ada7d
--- /dev/null
+++ b/lib/cpumask.c
@@ -0,0 +1,45 @@
1#include <linux/kernel.h>
2#include <linux/bitops.h>
3#include <linux/cpumask.h>
4#include <linux/module.h>
5
6int __first_cpu(const cpumask_t *srcp)
7{
8 return min_t(int, NR_CPUS, find_first_bit(srcp->bits, NR_CPUS));
9}
10EXPORT_SYMBOL(__first_cpu);
11
12int __next_cpu(int n, const cpumask_t *srcp)
13{
14 return min_t(int, NR_CPUS, find_next_bit(srcp->bits, NR_CPUS, n+1));
15}
16EXPORT_SYMBOL(__next_cpu);
17
18/*
19 * Find the highest possible smp_processor_id()
20 *
21 * Note: if we're prepared to assume that cpu_possible_map never changes
22 * (reasonable) then this function should cache its return value.
23 */
24int highest_possible_processor_id(void)
25{
26 unsigned int cpu;
27 unsigned highest = 0;
28
29 for_each_cpu_mask(cpu, cpu_possible_map)
30 highest = cpu;
31 return highest;
32}
33EXPORT_SYMBOL(highest_possible_processor_id);
34
35int __any_online_cpu(const cpumask_t *mask)
36{
37 int cpu;
38
39 for_each_cpu_mask(cpu, *mask) {
40 if (cpu_online(cpu))
41 break;
42 }
43 return cpu;
44}
45EXPORT_SYMBOL(__any_online_cpu);
diff --git a/lib/extable.c b/lib/extable.c
index 18df57c029df..01c08b5836f5 100644
--- a/lib/extable.c
+++ b/lib/extable.c
@@ -1,5 +1,4 @@
1/* 1/*
2 * lib/extable.c
3 * Derived from arch/ppc/mm/extable.c and arch/i386/mm/extable.c. 2 * Derived from arch/ppc/mm/extable.c and arch/i386/mm/extable.c.
4 * 3 *
5 * Copyright (C) 2004 Paul Mackerras, IBM Corp. 4 * Copyright (C) 2004 Paul Mackerras, IBM Corp.
diff --git a/lib/find_next_bit.c b/lib/find_next_bit.c
index c05b4b19cf6c..bda0d71a2514 100644
--- a/lib/find_next_bit.c
+++ b/lib/find_next_bit.c
@@ -11,48 +11,171 @@
11 11
12#include <linux/bitops.h> 12#include <linux/bitops.h>
13#include <linux/module.h> 13#include <linux/module.h>
14#include <asm/types.h>
15#include <asm/byteorder.h>
14 16
15int find_next_bit(const unsigned long *addr, int size, int offset) 17#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG)
18
19/**
20 * find_next_bit - find the next set bit in a memory region
21 * @addr: The address to base the search on
22 * @offset: The bitnumber to start searching at
23 * @size: The maximum size to search
24 */
25unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
26 unsigned long offset)
16{ 27{
17 const unsigned long *base; 28 const unsigned long *p = addr + BITOP_WORD(offset);
18 const int NBITS = sizeof(*addr) * 8; 29 unsigned long result = offset & ~(BITS_PER_LONG-1);
19 unsigned long tmp; 30 unsigned long tmp;
20 31
21 base = addr; 32 if (offset >= size)
33 return size;
34 size -= result;
35 offset %= BITS_PER_LONG;
22 if (offset) { 36 if (offset) {
23 int suboffset; 37 tmp = *(p++);
38 tmp &= (~0UL << offset);
39 if (size < BITS_PER_LONG)
40 goto found_first;
41 if (tmp)
42 goto found_middle;
43 size -= BITS_PER_LONG;
44 result += BITS_PER_LONG;
45 }
46 while (size & ~(BITS_PER_LONG-1)) {
47 if ((tmp = *(p++)))
48 goto found_middle;
49 result += BITS_PER_LONG;
50 size -= BITS_PER_LONG;
51 }
52 if (!size)
53 return result;
54 tmp = *p;
24 55
25 addr += offset / NBITS; 56found_first:
57 tmp &= (~0UL >> (BITS_PER_LONG - size));
58 if (tmp == 0UL) /* Are any bits set? */
59 return result + size; /* Nope. */
60found_middle:
61 return result + __ffs(tmp);
62}
26 63
27 suboffset = offset % NBITS; 64EXPORT_SYMBOL(find_next_bit);
28 if (suboffset) {
29 tmp = *addr;
30 tmp >>= suboffset;
31 if (tmp)
32 goto finish;
33 }
34 65
35 addr++; 66/*
67 * This implementation of find_{first,next}_zero_bit was stolen from
68 * Linus' asm-alpha/bitops.h.
69 */
70unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size,
71 unsigned long offset)
72{
73 const unsigned long *p = addr + BITOP_WORD(offset);
74 unsigned long result = offset & ~(BITS_PER_LONG-1);
75 unsigned long tmp;
76
77 if (offset >= size)
78 return size;
79 size -= result;
80 offset %= BITS_PER_LONG;
81 if (offset) {
82 tmp = *(p++);
83 tmp |= ~0UL >> (BITS_PER_LONG - offset);
84 if (size < BITS_PER_LONG)
85 goto found_first;
86 if (~tmp)
87 goto found_middle;
88 size -= BITS_PER_LONG;
89 result += BITS_PER_LONG;
90 }
91 while (size & ~(BITS_PER_LONG-1)) {
92 if (~(tmp = *(p++)))
93 goto found_middle;
94 result += BITS_PER_LONG;
95 size -= BITS_PER_LONG;
36 } 96 }
97 if (!size)
98 return result;
99 tmp = *p;
100
101found_first:
102 tmp |= ~0UL << size;
103 if (tmp == ~0UL) /* Are any bits zero? */
104 return result + size; /* Nope. */
105found_middle:
106 return result + ffz(tmp);
107}
108
109EXPORT_SYMBOL(find_next_zero_bit);
37 110
38 while ((tmp = *addr) == 0) 111#ifdef __BIG_ENDIAN
39 addr++;
40 112
41 offset = (addr - base) * NBITS; 113/* include/linux/byteorder does not support "unsigned long" type */
114static inline unsigned long ext2_swabp(const unsigned long * x)
115{
116#if BITS_PER_LONG == 64
117 return (unsigned long) __swab64p((u64 *) x);
118#elif BITS_PER_LONG == 32
119 return (unsigned long) __swab32p((u32 *) x);
120#else
121#error BITS_PER_LONG not defined
122#endif
123}
124
125/* include/linux/byteorder doesn't support "unsigned long" type */
126static inline unsigned long ext2_swab(const unsigned long y)
127{
128#if BITS_PER_LONG == 64
129 return (unsigned long) __swab64((u64) y);
130#elif BITS_PER_LONG == 32
131 return (unsigned long) __swab32((u32) y);
132#else
133#error BITS_PER_LONG not defined
134#endif
135}
42 136
43 finish: 137unsigned long generic_find_next_zero_le_bit(const unsigned long *addr, unsigned
44 /* count the remaining bits without using __ffs() since that takes a 32-bit arg */ 138 long size, unsigned long offset)
45 while (!(tmp & 0xff)) { 139{
46 offset += 8; 140 const unsigned long *p = addr + BITOP_WORD(offset);
47 tmp >>= 8; 141 unsigned long result = offset & ~(BITS_PER_LONG - 1);
142 unsigned long tmp;
143
144 if (offset >= size)
145 return size;
146 size -= result;
147 offset &= (BITS_PER_LONG - 1UL);
148 if (offset) {
149 tmp = ext2_swabp(p++);
150 tmp |= (~0UL >> (BITS_PER_LONG - offset));
151 if (size < BITS_PER_LONG)
152 goto found_first;
153 if (~tmp)
154 goto found_middle;
155 size -= BITS_PER_LONG;
156 result += BITS_PER_LONG;
48 } 157 }
49 158
50 while (!(tmp & 1)) { 159 while (size & ~(BITS_PER_LONG - 1)) {
51 offset++; 160 if (~(tmp = *(p++)))
52 tmp >>= 1; 161 goto found_middle_swap;
162 result += BITS_PER_LONG;
163 size -= BITS_PER_LONG;
53 } 164 }
165 if (!size)
166 return result;
167 tmp = ext2_swabp(p);
168found_first:
169 tmp |= ~0UL << size;
170 if (tmp == ~0UL) /* Are any bits zero? */
171 return result + size; /* Nope. Skip ffz */
172found_middle:
173 return result + ffz(tmp);
54 174
55 return offset; 175found_middle_swap:
176 return result + ffz(ext2_swab(tmp));
56} 177}
57 178
58EXPORT_SYMBOL(find_next_bit); 179EXPORT_SYMBOL(generic_find_next_zero_le_bit);
180
181#endif /* __BIG_ENDIAN */
diff --git a/lib/hweight.c b/lib/hweight.c
new file mode 100644
index 000000000000..438257671708
--- /dev/null
+++ b/lib/hweight.c
@@ -0,0 +1,53 @@
1#include <linux/module.h>
2#include <asm/types.h>
3
4/**
5 * hweightN - returns the hamming weight of a N-bit word
6 * @x: the word to weigh
7 *
8 * The Hamming Weight of a number is the total number of bits set in it.
9 */
10
11unsigned int hweight32(unsigned int w)
12{
13 unsigned int res = w - ((w >> 1) & 0x55555555);
14 res = (res & 0x33333333) + ((res >> 2) & 0x33333333);
15 res = (res + (res >> 4)) & 0x0F0F0F0F;
16 res = res + (res >> 8);
17 return (res + (res >> 16)) & 0x000000FF;
18}
19EXPORT_SYMBOL(hweight32);
20
21unsigned int hweight16(unsigned int w)
22{
23 unsigned int res = w - ((w >> 1) & 0x5555);
24 res = (res & 0x3333) + ((res >> 2) & 0x3333);
25 res = (res + (res >> 4)) & 0x0F0F;
26 return (res + (res >> 8)) & 0x00FF;
27}
28EXPORT_SYMBOL(hweight16);
29
30unsigned int hweight8(unsigned int w)
31{
32 unsigned int res = w - ((w >> 1) & 0x55);
33 res = (res & 0x33) + ((res >> 2) & 0x33);
34 return (res + (res >> 4)) & 0x0F;
35}
36EXPORT_SYMBOL(hweight8);
37
38unsigned long hweight64(__u64 w)
39{
40#if BITS_PER_LONG == 32
41 return hweight32((unsigned int)(w >> 32)) + hweight32((unsigned int)w);
42#elif BITS_PER_LONG == 64
43 __u64 res = w - ((w >> 1) & 0x5555555555555555ul);
44 res = (res & 0x3333333333333333ul) + ((res >> 2) & 0x3333333333333333ul);
45 res = (res + (res >> 4)) & 0x0F0F0F0F0F0F0F0Ful;
46 res = res + (res >> 8);
47 res = res + (res >> 16);
48 return (res + (res >> 32)) & 0x00000000000000FFul;
49#else
50#error BITS_PER_LONG not defined
51#endif
52}
53EXPORT_SYMBOL(hweight64);
diff --git a/lib/kobject.c b/lib/kobject.c
index efe67fa96a71..25204a41a9b0 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -194,6 +194,17 @@ int kobject_add(struct kobject * kobj)
194 unlink(kobj); 194 unlink(kobj);
195 if (parent) 195 if (parent)
196 kobject_put(parent); 196 kobject_put(parent);
197
198 /* be noisy on error issues */
199 if (error == -EEXIST)
200 printk("kobject_add failed for %s with -EEXIST, "
201 "don't try to register things with the "
202 "same name in the same directory.\n",
203 kobject_name(kobj));
204 else
205 printk("kobject_add failed for %s (%d)\n",
206 kobject_name(kobj), error);
207 dump_stack();
197 } 208 }
198 209
199 return error; 210 return error;
@@ -207,18 +218,13 @@ int kobject_add(struct kobject * kobj)
207 218
208int kobject_register(struct kobject * kobj) 219int kobject_register(struct kobject * kobj)
209{ 220{
210 int error = 0; 221 int error = -EINVAL;
211 if (kobj) { 222 if (kobj) {
212 kobject_init(kobj); 223 kobject_init(kobj);
213 error = kobject_add(kobj); 224 error = kobject_add(kobj);
214 if (error) { 225 if (!error)
215 printk("kobject_register failed for %s (%d)\n",
216 kobject_name(kobj),error);
217 dump_stack();
218 } else
219 kobject_uevent(kobj, KOBJ_ADD); 226 kobject_uevent(kobj, KOBJ_ADD);
220 } else 227 }
221 error = -EINVAL;
222 return error; 228 return error;
223} 229}
224 230
@@ -379,6 +385,44 @@ void kobject_put(struct kobject * kobj)
379} 385}
380 386
381 387
388static void dir_release(struct kobject *kobj)
389{
390 kfree(kobj);
391}
392
393static struct kobj_type dir_ktype = {
394 .release = dir_release,
395 .sysfs_ops = NULL,
396 .default_attrs = NULL,
397};
398
399/**
400 * kobject_add_dir - add sub directory of object.
401 * @parent: object in which a directory is created.
402 * @name: directory name.
403 *
404 * Add a plain directory object as child of given object.
405 */
406struct kobject *kobject_add_dir(struct kobject *parent, const char *name)
407{
408 struct kobject *k;
409
410 if (!parent)
411 return NULL;
412
413 k = kzalloc(sizeof(*k), GFP_KERNEL);
414 if (!k)
415 return NULL;
416
417 k->parent = parent;
418 k->ktype = &dir_ktype;
419 kobject_set_name(k, name);
420 kobject_register(k);
421
422 return k;
423}
424EXPORT_SYMBOL_GPL(kobject_add_dir);
425
382/** 426/**
383 * kset_init - initialize a kset for use 427 * kset_init - initialize a kset for use
384 * @k: kset 428 * @k: kset
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index 086a0c6e888e..982226daf939 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -26,6 +26,8 @@
26#define NUM_ENVP 32 /* number of env pointers */ 26#define NUM_ENVP 32 /* number of env pointers */
27 27
28#if defined(CONFIG_HOTPLUG) && defined(CONFIG_NET) 28#if defined(CONFIG_HOTPLUG) && defined(CONFIG_NET)
29u64 uevent_seqnum;
30char uevent_helper[UEVENT_HELPER_PATH_LEN] = "/sbin/hotplug";
29static DEFINE_SPINLOCK(sequence_lock); 31static DEFINE_SPINLOCK(sequence_lock);
30static struct sock *uevent_sock; 32static struct sock *uevent_sock;
31 33
diff --git a/lib/kref.c b/lib/kref.c
index 0d07cc31c818..4a467faf1367 100644
--- a/lib/kref.c
+++ b/lib/kref.c
@@ -52,7 +52,12 @@ int kref_put(struct kref *kref, void (*release)(struct kref *kref))
52 WARN_ON(release == NULL); 52 WARN_ON(release == NULL);
53 WARN_ON(release == (void (*)(struct kref *))kfree); 53 WARN_ON(release == (void (*)(struct kref *))kfree);
54 54
55 if (atomic_dec_and_test(&kref->refcount)) { 55 /*
56 * if current count is one, we are the last user and can release object
57 * right now, avoiding an atomic operation on 'refcount'
58 */
59 if ((atomic_read(&kref->refcount) == 1) ||
60 (atomic_dec_and_test(&kref->refcount))) {
56 release(kref); 61 release(kref);
57 return 1; 62 return 1;
58 } 63 }
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 1e5b17dc7e3d..7097bb239e40 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -37,7 +37,6 @@
37#else 37#else
38#define RADIX_TREE_MAP_SHIFT 3 /* For more stressful testing */ 38#define RADIX_TREE_MAP_SHIFT 3 /* For more stressful testing */
39#endif 39#endif
40#define RADIX_TREE_TAGS 2
41 40
42#define RADIX_TREE_MAP_SIZE (1UL << RADIX_TREE_MAP_SHIFT) 41#define RADIX_TREE_MAP_SIZE (1UL << RADIX_TREE_MAP_SHIFT)
43#define RADIX_TREE_MAP_MASK (RADIX_TREE_MAP_SIZE-1) 42#define RADIX_TREE_MAP_MASK (RADIX_TREE_MAP_SIZE-1)
@@ -48,7 +47,7 @@
48struct radix_tree_node { 47struct radix_tree_node {
49 unsigned int count; 48 unsigned int count;
50 void *slots[RADIX_TREE_MAP_SIZE]; 49 void *slots[RADIX_TREE_MAP_SIZE];
51 unsigned long tags[RADIX_TREE_TAGS][RADIX_TREE_TAG_LONGS]; 50 unsigned long tags[RADIX_TREE_MAX_TAGS][RADIX_TREE_TAG_LONGS];
52}; 51};
53 52
54struct radix_tree_path { 53struct radix_tree_path {
@@ -135,17 +134,20 @@ out:
135 return ret; 134 return ret;
136} 135}
137 136
138static inline void tag_set(struct radix_tree_node *node, int tag, int offset) 137static inline void tag_set(struct radix_tree_node *node, unsigned int tag,
138 int offset)
139{ 139{
140 __set_bit(offset, node->tags[tag]); 140 __set_bit(offset, node->tags[tag]);
141} 141}
142 142
143static inline void tag_clear(struct radix_tree_node *node, int tag, int offset) 143static inline void tag_clear(struct radix_tree_node *node, unsigned int tag,
144 int offset)
144{ 145{
145 __clear_bit(offset, node->tags[tag]); 146 __clear_bit(offset, node->tags[tag]);
146} 147}
147 148
148static inline int tag_get(struct radix_tree_node *node, int tag, int offset) 149static inline int tag_get(struct radix_tree_node *node, unsigned int tag,
150 int offset)
149{ 151{
150 return test_bit(offset, node->tags[tag]); 152 return test_bit(offset, node->tags[tag]);
151} 153}
@@ -154,7 +156,7 @@ static inline int tag_get(struct radix_tree_node *node, int tag, int offset)
154 * Returns 1 if any slot in the node has this tag set. 156 * Returns 1 if any slot in the node has this tag set.
155 * Otherwise returns 0. 157 * Otherwise returns 0.
156 */ 158 */
157static inline int any_tag_set(struct radix_tree_node *node, int tag) 159static inline int any_tag_set(struct radix_tree_node *node, unsigned int tag)
158{ 160{
159 int idx; 161 int idx;
160 for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) { 162 for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) {
@@ -180,7 +182,7 @@ static int radix_tree_extend(struct radix_tree_root *root, unsigned long index)
180{ 182{
181 struct radix_tree_node *node; 183 struct radix_tree_node *node;
182 unsigned int height; 184 unsigned int height;
183 char tags[RADIX_TREE_TAGS]; 185 char tags[RADIX_TREE_MAX_TAGS];
184 int tag; 186 int tag;
185 187
186 /* Figure out what the height should be. */ 188 /* Figure out what the height should be. */
@@ -197,7 +199,7 @@ static int radix_tree_extend(struct radix_tree_root *root, unsigned long index)
197 * Prepare the tag status of the top-level node for propagation 199 * Prepare the tag status of the top-level node for propagation
198 * into the newly-pushed top-level node(s) 200 * into the newly-pushed top-level node(s)
199 */ 201 */
200 for (tag = 0; tag < RADIX_TREE_TAGS; tag++) { 202 for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) {
201 tags[tag] = 0; 203 tags[tag] = 0;
202 if (any_tag_set(root->rnode, tag)) 204 if (any_tag_set(root->rnode, tag))
203 tags[tag] = 1; 205 tags[tag] = 1;
@@ -211,7 +213,7 @@ static int radix_tree_extend(struct radix_tree_root *root, unsigned long index)
211 node->slots[0] = root->rnode; 213 node->slots[0] = root->rnode;
212 214
213 /* Propagate the aggregated tag info into the new root */ 215 /* Propagate the aggregated tag info into the new root */
214 for (tag = 0; tag < RADIX_TREE_TAGS; tag++) { 216 for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) {
215 if (tags[tag]) 217 if (tags[tag])
216 tag_set(node, tag, 0); 218 tag_set(node, tag, 0);
217 } 219 }
@@ -349,14 +351,15 @@ EXPORT_SYMBOL(radix_tree_lookup);
349 * @index: index key 351 * @index: index key
350 * @tag: tag index 352 * @tag: tag index
351 * 353 *
352 * Set the search tag corresponging to @index in the radix tree. From 354 * Set the search tag (which must be < RADIX_TREE_MAX_TAGS)
355 * corresponding to @index in the radix tree. From
353 * the root all the way down to the leaf node. 356 * the root all the way down to the leaf node.
354 * 357 *
355 * Returns the address of the tagged item. Setting a tag on a not-present 358 * Returns the address of the tagged item. Setting a tag on a not-present
356 * item is a bug. 359 * item is a bug.
357 */ 360 */
358void *radix_tree_tag_set(struct radix_tree_root *root, 361void *radix_tree_tag_set(struct radix_tree_root *root,
359 unsigned long index, int tag) 362 unsigned long index, unsigned int tag)
360{ 363{
361 unsigned int height, shift; 364 unsigned int height, shift;
362 struct radix_tree_node *slot; 365 struct radix_tree_node *slot;
@@ -390,7 +393,8 @@ EXPORT_SYMBOL(radix_tree_tag_set);
390 * @index: index key 393 * @index: index key
391 * @tag: tag index 394 * @tag: tag index
392 * 395 *
393 * Clear the search tag corresponging to @index in the radix tree. If 396 * Clear the search tag (which must be < RADIX_TREE_MAX_TAGS)
397 * corresponding to @index in the radix tree. If
394 * this causes the leaf node to have no tags set then clear the tag in the 398 * this causes the leaf node to have no tags set then clear the tag in the
395 * next-to-leaf node, etc. 399 * next-to-leaf node, etc.
396 * 400 *
@@ -398,7 +402,7 @@ EXPORT_SYMBOL(radix_tree_tag_set);
398 * has the same return value and semantics as radix_tree_lookup(). 402 * has the same return value and semantics as radix_tree_lookup().
399 */ 403 */
400void *radix_tree_tag_clear(struct radix_tree_root *root, 404void *radix_tree_tag_clear(struct radix_tree_root *root,
401 unsigned long index, int tag) 405 unsigned long index, unsigned int tag)
402{ 406{
403 struct radix_tree_path path[RADIX_TREE_MAX_PATH], *pathp = path; 407 struct radix_tree_path path[RADIX_TREE_MAX_PATH], *pathp = path;
404 struct radix_tree_node *slot; 408 struct radix_tree_node *slot;
@@ -450,7 +454,7 @@ EXPORT_SYMBOL(radix_tree_tag_clear);
450 * radix_tree_tag_get - get a tag on a radix tree node 454 * radix_tree_tag_get - get a tag on a radix tree node
451 * @root: radix tree root 455 * @root: radix tree root
452 * @index: index key 456 * @index: index key
453 * @tag: tag index 457 * @tag: tag index (< RADIX_TREE_MAX_TAGS)
454 * 458 *
455 * Return values: 459 * Return values:
456 * 460 *
@@ -459,7 +463,7 @@ EXPORT_SYMBOL(radix_tree_tag_clear);
459 * -1: tag present, unset 463 * -1: tag present, unset
460 */ 464 */
461int radix_tree_tag_get(struct radix_tree_root *root, 465int radix_tree_tag_get(struct radix_tree_root *root,
462 unsigned long index, int tag) 466 unsigned long index, unsigned int tag)
463{ 467{
464 unsigned int height, shift; 468 unsigned int height, shift;
465 struct radix_tree_node *slot; 469 struct radix_tree_node *slot;
@@ -592,7 +596,7 @@ EXPORT_SYMBOL(radix_tree_gang_lookup);
592 */ 596 */
593static unsigned int 597static unsigned int
594__lookup_tag(struct radix_tree_root *root, void **results, unsigned long index, 598__lookup_tag(struct radix_tree_root *root, void **results, unsigned long index,
595 unsigned int max_items, unsigned long *next_index, int tag) 599 unsigned int max_items, unsigned long *next_index, unsigned int tag)
596{ 600{
597 unsigned int nr_found = 0; 601 unsigned int nr_found = 0;
598 unsigned int shift; 602 unsigned int shift;
@@ -646,7 +650,7 @@ out:
646 * @results: where the results of the lookup are placed 650 * @results: where the results of the lookup are placed
647 * @first_index: start the lookup from this key 651 * @first_index: start the lookup from this key
648 * @max_items: place up to this many items at *results 652 * @max_items: place up to this many items at *results
649 * @tag: the tag index 653 * @tag: the tag index (< RADIX_TREE_MAX_TAGS)
650 * 654 *
651 * Performs an index-ascending scan of the tree for present items which 655 * Performs an index-ascending scan of the tree for present items which
652 * have the tag indexed by @tag set. Places the items at *@results and 656 * have the tag indexed by @tag set. Places the items at *@results and
@@ -654,7 +658,8 @@ out:
654 */ 658 */
655unsigned int 659unsigned int
656radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results, 660radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results,
657 unsigned long first_index, unsigned int max_items, int tag) 661 unsigned long first_index, unsigned int max_items,
662 unsigned int tag)
658{ 663{
659 const unsigned long max_index = radix_tree_maxindex(root->height); 664 const unsigned long max_index = radix_tree_maxindex(root->height);
660 unsigned long cur_index = first_index; 665 unsigned long cur_index = first_index;
@@ -716,7 +721,7 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
716 struct radix_tree_node *slot; 721 struct radix_tree_node *slot;
717 unsigned int height, shift; 722 unsigned int height, shift;
718 void *ret = NULL; 723 void *ret = NULL;
719 char tags[RADIX_TREE_TAGS]; 724 char tags[RADIX_TREE_MAX_TAGS];
720 int nr_cleared_tags; 725 int nr_cleared_tags;
721 int tag; 726 int tag;
722 int offset; 727 int offset;
@@ -751,7 +756,7 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
751 * Clear all tags associated with the just-deleted item 756 * Clear all tags associated with the just-deleted item
752 */ 757 */
753 nr_cleared_tags = 0; 758 nr_cleared_tags = 0;
754 for (tag = 0; tag < RADIX_TREE_TAGS; tag++) { 759 for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) {
755 tags[tag] = 1; 760 tags[tag] = 1;
756 if (tag_get(pathp->node, tag, pathp->offset)) { 761 if (tag_get(pathp->node, tag, pathp->offset)) {
757 tag_clear(pathp->node, tag, pathp->offset); 762 tag_clear(pathp->node, tag, pathp->offset);
@@ -763,7 +768,7 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
763 } 768 }
764 769
765 for (pathp--; nr_cleared_tags && pathp->node; pathp--) { 770 for (pathp--; nr_cleared_tags && pathp->node; pathp--) {
766 for (tag = 0; tag < RADIX_TREE_TAGS; tag++) { 771 for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) {
767 if (tags[tag]) 772 if (tags[tag])
768 continue; 773 continue;
769 774
@@ -801,7 +806,7 @@ EXPORT_SYMBOL(radix_tree_delete);
801 * @root: radix tree root 806 * @root: radix tree root
802 * @tag: tag to test 807 * @tag: tag to test
803 */ 808 */
804int radix_tree_tagged(struct radix_tree_root *root, int tag) 809int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag)
805{ 810{
806 struct radix_tree_node *rnode; 811 struct radix_tree_node *rnode;
807 rnode = root->rnode; 812 rnode = root->rnode;
diff --git a/lib/reed_solomon/reed_solomon.c b/lib/reed_solomon/reed_solomon.c
index f5fef948a415..f8ac9fa95de1 100644
--- a/lib/reed_solomon/reed_solomon.c
+++ b/lib/reed_solomon/reed_solomon.c
@@ -44,12 +44,13 @@
44#include <linux/module.h> 44#include <linux/module.h>
45#include <linux/rslib.h> 45#include <linux/rslib.h>
46#include <linux/slab.h> 46#include <linux/slab.h>
47#include <linux/mutex.h>
47#include <asm/semaphore.h> 48#include <asm/semaphore.h>
48 49
49/* This list holds all currently allocated rs control structures */ 50/* This list holds all currently allocated rs control structures */
50static LIST_HEAD (rslist); 51static LIST_HEAD (rslist);
51/* Protection for the list */ 52/* Protection for the list */
52static DECLARE_MUTEX(rslistlock); 53static DEFINE_MUTEX(rslistlock);
53 54
54/** 55/**
55 * rs_init - Initialize a Reed-Solomon codec 56 * rs_init - Initialize a Reed-Solomon codec
@@ -161,7 +162,7 @@ errrs:
161 */ 162 */
162void free_rs(struct rs_control *rs) 163void free_rs(struct rs_control *rs)
163{ 164{
164 down(&rslistlock); 165 mutex_lock(&rslistlock);
165 rs->users--; 166 rs->users--;
166 if(!rs->users) { 167 if(!rs->users) {
167 list_del(&rs->list); 168 list_del(&rs->list);
@@ -170,7 +171,7 @@ void free_rs(struct rs_control *rs)
170 kfree(rs->genpoly); 171 kfree(rs->genpoly);
171 kfree(rs); 172 kfree(rs);
172 } 173 }
173 up(&rslistlock); 174 mutex_unlock(&rslistlock);
174} 175}
175 176
176/** 177/**
@@ -201,7 +202,7 @@ struct rs_control *init_rs(int symsize, int gfpoly, int fcr, int prim,
201 if (nroots < 0 || nroots >= (1<<symsize)) 202 if (nroots < 0 || nroots >= (1<<symsize))
202 return NULL; 203 return NULL;
203 204
204 down(&rslistlock); 205 mutex_lock(&rslistlock);
205 206
206 /* Walk through the list and look for a matching entry */ 207 /* Walk through the list and look for a matching entry */
207 list_for_each(tmp, &rslist) { 208 list_for_each(tmp, &rslist) {
@@ -228,7 +229,7 @@ struct rs_control *init_rs(int symsize, int gfpoly, int fcr, int prim,
228 list_add(&rs->list, &rslist); 229 list_add(&rs->list, &rslist);
229 } 230 }
230out: 231out:
231 up(&rslistlock); 232 mutex_unlock(&rslistlock);
232 return rs; 233 return rs;
233} 234}
234 235
diff --git a/lib/string.c b/lib/string.c
index 037a48acedbb..b3c28a3f6332 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -403,7 +403,6 @@ char *strpbrk(const char *cs, const char *ct)
403 } 403 }
404 return NULL; 404 return NULL;
405} 405}
406EXPORT_SYMBOL(strpbrk);
407#endif 406#endif
408 407
409#ifndef __HAVE_ARCH_STRSEP 408#ifndef __HAVE_ARCH_STRSEP
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 0af497b6b9a8..10625785eefd 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -296,8 +296,7 @@ map_single(struct device *hwdev, char *buffer, size_t size, int dir)
296 else 296 else
297 stride = 1; 297 stride = 1;
298 298
299 if (!nslots) 299 BUG_ON(!nslots);
300 BUG();
301 300
302 /* 301 /*
303 * Find suitable number of IO TLB entries size that will fit this 302 * Find suitable number of IO TLB entries size that will fit this
@@ -416,14 +415,14 @@ sync_single(struct device *hwdev, char *dma_addr, size_t size,
416 case SYNC_FOR_CPU: 415 case SYNC_FOR_CPU:
417 if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)) 416 if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
418 memcpy(buffer, dma_addr, size); 417 memcpy(buffer, dma_addr, size);
419 else if (dir != DMA_TO_DEVICE) 418 else
420 BUG(); 419 BUG_ON(dir != DMA_TO_DEVICE);
421 break; 420 break;
422 case SYNC_FOR_DEVICE: 421 case SYNC_FOR_DEVICE:
423 if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)) 422 if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
424 memcpy(dma_addr, buffer, size); 423 memcpy(dma_addr, buffer, size);
425 else if (dir != DMA_FROM_DEVICE) 424 else
426 BUG(); 425 BUG_ON(dir != DMA_FROM_DEVICE);
427 break; 426 break;
428 default: 427 default:
429 BUG(); 428 BUG();
@@ -529,8 +528,7 @@ swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir)
529 unsigned long dev_addr = virt_to_phys(ptr); 528 unsigned long dev_addr = virt_to_phys(ptr);
530 void *map; 529 void *map;
531 530
532 if (dir == DMA_NONE) 531 BUG_ON(dir == DMA_NONE);
533 BUG();
534 /* 532 /*
535 * If the pointer passed in happens to be in the device's DMA window, 533 * If the pointer passed in happens to be in the device's DMA window,
536 * we can safely return the device addr and not worry about bounce 534 * we can safely return the device addr and not worry about bounce
@@ -592,8 +590,7 @@ swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size,
592{ 590{
593 char *dma_addr = phys_to_virt(dev_addr); 591 char *dma_addr = phys_to_virt(dev_addr);
594 592
595 if (dir == DMA_NONE) 593 BUG_ON(dir == DMA_NONE);
596 BUG();
597 if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) 594 if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
598 unmap_single(hwdev, dma_addr, size, dir); 595 unmap_single(hwdev, dma_addr, size, dir);
599 else if (dir == DMA_FROM_DEVICE) 596 else if (dir == DMA_FROM_DEVICE)
@@ -616,8 +613,7 @@ swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
616{ 613{
617 char *dma_addr = phys_to_virt(dev_addr); 614 char *dma_addr = phys_to_virt(dev_addr);
618 615
619 if (dir == DMA_NONE) 616 BUG_ON(dir == DMA_NONE);
620 BUG();
621 if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) 617 if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
622 sync_single(hwdev, dma_addr, size, dir, target); 618 sync_single(hwdev, dma_addr, size, dir, target);
623 else if (dir == DMA_FROM_DEVICE) 619 else if (dir == DMA_FROM_DEVICE)
@@ -648,8 +644,7 @@ swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr,
648{ 644{
649 char *dma_addr = phys_to_virt(dev_addr) + offset; 645 char *dma_addr = phys_to_virt(dev_addr) + offset;
650 646
651 if (dir == DMA_NONE) 647 BUG_ON(dir == DMA_NONE);
652 BUG();
653 if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) 648 if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
654 sync_single(hwdev, dma_addr, size, dir, target); 649 sync_single(hwdev, dma_addr, size, dir, target);
655 else if (dir == DMA_FROM_DEVICE) 650 else if (dir == DMA_FROM_DEVICE)
@@ -696,8 +691,7 @@ swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
696 unsigned long dev_addr; 691 unsigned long dev_addr;
697 int i; 692 int i;
698 693
699 if (dir == DMA_NONE) 694 BUG_ON(dir == DMA_NONE);
700 BUG();
701 695
702 for (i = 0; i < nelems; i++, sg++) { 696 for (i = 0; i < nelems; i++, sg++) {
703 addr = SG_ENT_VIRT_ADDRESS(sg); 697 addr = SG_ENT_VIRT_ADDRESS(sg);
@@ -730,8 +724,7 @@ swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
730{ 724{
731 int i; 725 int i;
732 726
733 if (dir == DMA_NONE) 727 BUG_ON(dir == DMA_NONE);
734 BUG();
735 728
736 for (i = 0; i < nelems; i++, sg++) 729 for (i = 0; i < nelems; i++, sg++)
737 if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg)) 730 if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
@@ -753,8 +746,7 @@ swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sg,
753{ 746{
754 int i; 747 int i;
755 748
756 if (dir == DMA_NONE) 749 BUG_ON(dir == DMA_NONE);
757 BUG();
758 750
759 for (i = 0; i < nelems; i++, sg++) 751 for (i = 0; i < nelems; i++, sg++)
760 if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg)) 752 if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))