aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug16
-rw-r--r--lib/Makefile2
-rw-r--r--lib/bitmap.c163
-rw-r--r--lib/cpumask.c45
-rw-r--r--lib/radix-tree.c49
-rw-r--r--lib/swiotlb.c32
6 files changed, 212 insertions, 95 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index a314e663d517..7e70ab13e191 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -78,13 +78,17 @@ config SCHEDSTATS
78 this adds. 78 this adds.
79 79
80config DEBUG_SLAB 80config DEBUG_SLAB
81 bool "Debug memory allocations" 81 bool "Debug slab memory allocations"
82 depends on DEBUG_KERNEL && SLAB 82 depends on DEBUG_KERNEL && SLAB
83 help 83 help
84 Say Y here to have the kernel do limited verification on memory 84 Say Y here to have the kernel do limited verification on memory
85 allocation as well as poisoning memory on free to catch use of freed 85 allocation as well as poisoning memory on free to catch use of freed
86 memory. This can make kmalloc/kfree-intensive workloads much slower. 86 memory. This can make kmalloc/kfree-intensive workloads much slower.
87 87
88config DEBUG_SLAB_LEAK
89 bool "Memory leak debugging"
90 depends on DEBUG_SLAB
91
88config DEBUG_PREEMPT 92config DEBUG_PREEMPT
89 bool "Debug preemptible kernel" 93 bool "Debug preemptible kernel"
90 depends on DEBUG_KERNEL && PREEMPT 94 depends on DEBUG_KERNEL && PREEMPT
@@ -195,6 +199,16 @@ config FRAME_POINTER
195 some architectures or if you use external debuggers. 199 some architectures or if you use external debuggers.
196 If you don't debug the kernel, you can say N. 200 If you don't debug the kernel, you can say N.
197 201
202config UNWIND_INFO
203 bool "Compile the kernel with frame unwind information"
204 depends on !IA64
205 depends on !MODULES || !(MIPS || PARISC || PPC || SUPERH || SPARC64 || V850)
206 help
207 If you say Y here the resulting kernel image will be slightly larger
208 but not slower, and it will give very useful debugging information.
209 If you don't debug the kernel, you can say N, but we may not be able
210 to solve problems without frame unwind information or frame pointers.
211
198config FORCED_INLINING 212config FORCED_INLINING
199 bool "Force gcc to inline functions marked 'inline'" 213 bool "Force gcc to inline functions marked 'inline'"
200 depends on DEBUG_KERNEL 214 depends on DEBUG_KERNEL
diff --git a/lib/Makefile b/lib/Makefile
index 648b2c1242fd..f827e3c24ec0 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -7,6 +7,8 @@ lib-y := errno.o ctype.o string.o vsprintf.o cmdline.o \
7 idr.o div64.o int_sqrt.o bitmap.o extable.o prio_tree.o \ 7 idr.o div64.o int_sqrt.o bitmap.o extable.o prio_tree.o \
8 sha1.o 8 sha1.o
9 9
10lib-$(CONFIG_SMP) += cpumask.o
11
10lib-y += kobject.o kref.o kobject_uevent.o klist.o 12lib-y += kobject.o kref.o kobject_uevent.o klist.o
11 13
12obj-y += sort.o parser.o halfmd4.o iomap_copy.o 14obj-y += sort.o parser.o halfmd4.o iomap_copy.o
diff --git a/lib/bitmap.c b/lib/bitmap.c
index 48e708381d44..8acab0e176ef 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -676,84 +676,143 @@ int bitmap_bitremap(int oldbit, const unsigned long *old,
676} 676}
677EXPORT_SYMBOL(bitmap_bitremap); 677EXPORT_SYMBOL(bitmap_bitremap);
678 678
679/** 679/*
680 * bitmap_find_free_region - find a contiguous aligned mem region 680 * Common code for bitmap_*_region() routines.
681 * @bitmap: an array of unsigned longs corresponding to the bitmap 681 * bitmap: array of unsigned longs corresponding to the bitmap
682 * @bits: number of bits in the bitmap 682 * pos: the beginning of the region
683 * @order: region size to find (size is actually 1<<order) 683 * order: region size (log base 2 of number of bits)
684 * reg_op: operation(s) to perform on that region of bitmap
684 * 685 *
685 * This is used to allocate a memory region from a bitmap. The idea is 686 * Can set, verify and/or release a region of bits in a bitmap,
686 * that the region has to be 1<<order sized and 1<<order aligned (this 687 * depending on which combination of REG_OP_* flag bits is set.
687 * makes the search algorithm much faster).
688 * 688 *
689 * The region is marked as set bits in the bitmap if a free one is 689 * A region of a bitmap is a sequence of bits in the bitmap, of
690 * found. 690 * some size '1 << order' (a power of two), aligned to that same
691 * '1 << order' power of two.
691 * 692 *
692 * Returns either beginning of region or negative error 693 * Returns 1 if REG_OP_ISFREE succeeds (region is all zero bits).
694 * Returns 0 in all other cases and reg_ops.
693 */ 695 */
694int bitmap_find_free_region(unsigned long *bitmap, int bits, int order)
695{
696 unsigned long mask;
697 int pages = 1 << order;
698 int i;
699 696
700 if(pages > BITS_PER_LONG) 697enum {
701 return -EINVAL; 698 REG_OP_ISFREE, /* true if region is all zero bits */
699 REG_OP_ALLOC, /* set all bits in region */
700 REG_OP_RELEASE, /* clear all bits in region */
701};
702 702
703 /* make a mask of the order */ 703static int __reg_op(unsigned long *bitmap, int pos, int order, int reg_op)
704 mask = (1ul << (pages - 1)); 704{
705 int nbits_reg; /* number of bits in region */
706 int index; /* index first long of region in bitmap */
707 int offset; /* bit offset region in bitmap[index] */
708 int nlongs_reg; /* num longs spanned by region in bitmap */
709 int nbitsinlong; /* num bits of region in each spanned long */
710 unsigned long mask; /* bitmask for one long of region */
711 int i; /* scans bitmap by longs */
712 int ret = 0; /* return value */
713
714 /*
715 * Either nlongs_reg == 1 (for small orders that fit in one long)
716 * or (offset == 0 && mask == ~0UL) (for larger multiword orders.)
717 */
718 nbits_reg = 1 << order;
719 index = pos / BITS_PER_LONG;
720 offset = pos - (index * BITS_PER_LONG);
721 nlongs_reg = BITS_TO_LONGS(nbits_reg);
722 nbitsinlong = min(nbits_reg, BITS_PER_LONG);
723
724 /*
725 * Can't do "mask = (1UL << nbitsinlong) - 1", as that
726 * overflows if nbitsinlong == BITS_PER_LONG.
727 */
728 mask = (1UL << (nbitsinlong - 1));
705 mask += mask - 1; 729 mask += mask - 1;
730 mask <<= offset;
706 731
707 /* run up the bitmap pages bits at a time */ 732 switch (reg_op) {
708 for (i = 0; i < bits; i += pages) { 733 case REG_OP_ISFREE:
709 int index = i/BITS_PER_LONG; 734 for (i = 0; i < nlongs_reg; i++) {
710 int offset = i - (index * BITS_PER_LONG); 735 if (bitmap[index + i] & mask)
711 if((bitmap[index] & (mask << offset)) == 0) { 736 goto done;
712 /* set region in bimap */
713 bitmap[index] |= (mask << offset);
714 return i;
715 } 737 }
738 ret = 1; /* all bits in region free (zero) */
739 break;
740
741 case REG_OP_ALLOC:
742 for (i = 0; i < nlongs_reg; i++)
743 bitmap[index + i] |= mask;
744 break;
745
746 case REG_OP_RELEASE:
747 for (i = 0; i < nlongs_reg; i++)
748 bitmap[index + i] &= ~mask;
749 break;
716 } 750 }
717 return -ENOMEM; 751done:
752 return ret;
753}
754
755/**
756 * bitmap_find_free_region - find a contiguous aligned mem region
757 * @bitmap: array of unsigned longs corresponding to the bitmap
758 * @bits: number of bits in the bitmap
759 * @order: region size (log base 2 of number of bits) to find
760 *
761 * Find a region of free (zero) bits in a @bitmap of @bits bits and
762 * allocate them (set them to one). Only consider regions of length
763 * a power (@order) of two, aligned to that power of two, which
764 * makes the search algorithm much faster.
765 *
766 * Return the bit offset in bitmap of the allocated region,
767 * or -errno on failure.
768 */
769int bitmap_find_free_region(unsigned long *bitmap, int bits, int order)
770{
771 int pos; /* scans bitmap by regions of size order */
772
773 for (pos = 0; pos < bits; pos += (1 << order))
774 if (__reg_op(bitmap, pos, order, REG_OP_ISFREE))
775 break;
776 if (pos == bits)
777 return -ENOMEM;
778 __reg_op(bitmap, pos, order, REG_OP_ALLOC);
779 return pos;
718} 780}
719EXPORT_SYMBOL(bitmap_find_free_region); 781EXPORT_SYMBOL(bitmap_find_free_region);
720 782
721/** 783/**
722 * bitmap_release_region - release allocated bitmap region 784 * bitmap_release_region - release allocated bitmap region
723 * @bitmap: a pointer to the bitmap 785 * @bitmap: array of unsigned longs corresponding to the bitmap
724 * @pos: the beginning of the region 786 * @pos: beginning of bit region to release
725 * @order: the order of the bits to release (number is 1<<order) 787 * @order: region size (log base 2 of number of bits) to release
726 * 788 *
727 * This is the complement to __bitmap_find_free_region and releases 789 * This is the complement to __bitmap_find_free_region and releases
728 * the found region (by clearing it in the bitmap). 790 * the found region (by clearing it in the bitmap).
791 *
792 * No return value.
729 */ 793 */
730void bitmap_release_region(unsigned long *bitmap, int pos, int order) 794void bitmap_release_region(unsigned long *bitmap, int pos, int order)
731{ 795{
732 int pages = 1 << order; 796 __reg_op(bitmap, pos, order, REG_OP_RELEASE);
733 unsigned long mask = (1ul << (pages - 1));
734 int index = pos/BITS_PER_LONG;
735 int offset = pos - (index * BITS_PER_LONG);
736 mask += mask - 1;
737 bitmap[index] &= ~(mask << offset);
738} 797}
739EXPORT_SYMBOL(bitmap_release_region); 798EXPORT_SYMBOL(bitmap_release_region);
740 799
800/**
801 * bitmap_allocate_region - allocate bitmap region
802 * @bitmap: array of unsigned longs corresponding to the bitmap
803 * @pos: beginning of bit region to allocate
804 * @order: region size (log base 2 of number of bits) to allocate
805 *
806 * Allocate (set bits in) a specified region of a bitmap.
807 *
808 * Return 0 on success, or -EBUSY if specified region wasn't
809 * free (not all bits were zero).
810 */
741int bitmap_allocate_region(unsigned long *bitmap, int pos, int order) 811int bitmap_allocate_region(unsigned long *bitmap, int pos, int order)
742{ 812{
743 int pages = 1 << order; 813 if (!__reg_op(bitmap, pos, order, REG_OP_ISFREE))
744 unsigned long mask = (1ul << (pages - 1));
745 int index = pos/BITS_PER_LONG;
746 int offset = pos - (index * BITS_PER_LONG);
747
748 /* We don't do regions of pages > BITS_PER_LONG. The
749 * algorithm would be a simple look for multiple zeros in the
750 * array, but there's no driver today that needs this. If you
751 * trip this BUG(), you get to code it... */
752 BUG_ON(pages > BITS_PER_LONG);
753 mask += mask - 1;
754 if (bitmap[index] & (mask << offset))
755 return -EBUSY; 814 return -EBUSY;
756 bitmap[index] |= (mask << offset); 815 __reg_op(bitmap, pos, order, REG_OP_ALLOC);
757 return 0; 816 return 0;
758} 817}
759EXPORT_SYMBOL(bitmap_allocate_region); 818EXPORT_SYMBOL(bitmap_allocate_region);
diff --git a/lib/cpumask.c b/lib/cpumask.c
new file mode 100644
index 000000000000..3a67dc5ada7d
--- /dev/null
+++ b/lib/cpumask.c
@@ -0,0 +1,45 @@
1#include <linux/kernel.h>
2#include <linux/bitops.h>
3#include <linux/cpumask.h>
4#include <linux/module.h>
5
6int __first_cpu(const cpumask_t *srcp)
7{
8 return min_t(int, NR_CPUS, find_first_bit(srcp->bits, NR_CPUS));
9}
10EXPORT_SYMBOL(__first_cpu);
11
12int __next_cpu(int n, const cpumask_t *srcp)
13{
14 return min_t(int, NR_CPUS, find_next_bit(srcp->bits, NR_CPUS, n+1));
15}
16EXPORT_SYMBOL(__next_cpu);
17
18/*
19 * Find the highest possible smp_processor_id()
20 *
21 * Note: if we're prepared to assume that cpu_possible_map never changes
22 * (reasonable) then this function should cache its return value.
23 */
24int highest_possible_processor_id(void)
25{
26 unsigned int cpu;
27 unsigned highest = 0;
28
29 for_each_cpu_mask(cpu, cpu_possible_map)
30 highest = cpu;
31 return highest;
32}
33EXPORT_SYMBOL(highest_possible_processor_id);
34
35int __any_online_cpu(const cpumask_t *mask)
36{
37 int cpu;
38
39 for_each_cpu_mask(cpu, *mask) {
40 if (cpu_online(cpu))
41 break;
42 }
43 return cpu;
44}
45EXPORT_SYMBOL(__any_online_cpu);
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 1e5b17dc7e3d..7097bb239e40 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -37,7 +37,6 @@
37#else 37#else
38#define RADIX_TREE_MAP_SHIFT 3 /* For more stressful testing */ 38#define RADIX_TREE_MAP_SHIFT 3 /* For more stressful testing */
39#endif 39#endif
40#define RADIX_TREE_TAGS 2
41 40
42#define RADIX_TREE_MAP_SIZE (1UL << RADIX_TREE_MAP_SHIFT) 41#define RADIX_TREE_MAP_SIZE (1UL << RADIX_TREE_MAP_SHIFT)
43#define RADIX_TREE_MAP_MASK (RADIX_TREE_MAP_SIZE-1) 42#define RADIX_TREE_MAP_MASK (RADIX_TREE_MAP_SIZE-1)
@@ -48,7 +47,7 @@
48struct radix_tree_node { 47struct radix_tree_node {
49 unsigned int count; 48 unsigned int count;
50 void *slots[RADIX_TREE_MAP_SIZE]; 49 void *slots[RADIX_TREE_MAP_SIZE];
51 unsigned long tags[RADIX_TREE_TAGS][RADIX_TREE_TAG_LONGS]; 50 unsigned long tags[RADIX_TREE_MAX_TAGS][RADIX_TREE_TAG_LONGS];
52}; 51};
53 52
54struct radix_tree_path { 53struct radix_tree_path {
@@ -135,17 +134,20 @@ out:
135 return ret; 134 return ret;
136} 135}
137 136
138static inline void tag_set(struct radix_tree_node *node, int tag, int offset) 137static inline void tag_set(struct radix_tree_node *node, unsigned int tag,
138 int offset)
139{ 139{
140 __set_bit(offset, node->tags[tag]); 140 __set_bit(offset, node->tags[tag]);
141} 141}
142 142
143static inline void tag_clear(struct radix_tree_node *node, int tag, int offset) 143static inline void tag_clear(struct radix_tree_node *node, unsigned int tag,
144 int offset)
144{ 145{
145 __clear_bit(offset, node->tags[tag]); 146 __clear_bit(offset, node->tags[tag]);
146} 147}
147 148
148static inline int tag_get(struct radix_tree_node *node, int tag, int offset) 149static inline int tag_get(struct radix_tree_node *node, unsigned int tag,
150 int offset)
149{ 151{
150 return test_bit(offset, node->tags[tag]); 152 return test_bit(offset, node->tags[tag]);
151} 153}
@@ -154,7 +156,7 @@ static inline int tag_get(struct radix_tree_node *node, int tag, int offset)
154 * Returns 1 if any slot in the node has this tag set. 156 * Returns 1 if any slot in the node has this tag set.
155 * Otherwise returns 0. 157 * Otherwise returns 0.
156 */ 158 */
157static inline int any_tag_set(struct radix_tree_node *node, int tag) 159static inline int any_tag_set(struct radix_tree_node *node, unsigned int tag)
158{ 160{
159 int idx; 161 int idx;
160 for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) { 162 for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) {
@@ -180,7 +182,7 @@ static int radix_tree_extend(struct radix_tree_root *root, unsigned long index)
180{ 182{
181 struct radix_tree_node *node; 183 struct radix_tree_node *node;
182 unsigned int height; 184 unsigned int height;
183 char tags[RADIX_TREE_TAGS]; 185 char tags[RADIX_TREE_MAX_TAGS];
184 int tag; 186 int tag;
185 187
186 /* Figure out what the height should be. */ 188 /* Figure out what the height should be. */
@@ -197,7 +199,7 @@ static int radix_tree_extend(struct radix_tree_root *root, unsigned long index)
197 * Prepare the tag status of the top-level node for propagation 199 * Prepare the tag status of the top-level node for propagation
198 * into the newly-pushed top-level node(s) 200 * into the newly-pushed top-level node(s)
199 */ 201 */
200 for (tag = 0; tag < RADIX_TREE_TAGS; tag++) { 202 for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) {
201 tags[tag] = 0; 203 tags[tag] = 0;
202 if (any_tag_set(root->rnode, tag)) 204 if (any_tag_set(root->rnode, tag))
203 tags[tag] = 1; 205 tags[tag] = 1;
@@ -211,7 +213,7 @@ static int radix_tree_extend(struct radix_tree_root *root, unsigned long index)
211 node->slots[0] = root->rnode; 213 node->slots[0] = root->rnode;
212 214
213 /* Propagate the aggregated tag info into the new root */ 215 /* Propagate the aggregated tag info into the new root */
214 for (tag = 0; tag < RADIX_TREE_TAGS; tag++) { 216 for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) {
215 if (tags[tag]) 217 if (tags[tag])
216 tag_set(node, tag, 0); 218 tag_set(node, tag, 0);
217 } 219 }
@@ -349,14 +351,15 @@ EXPORT_SYMBOL(radix_tree_lookup);
349 * @index: index key 351 * @index: index key
350 * @tag: tag index 352 * @tag: tag index
351 * 353 *
352 * Set the search tag corresponging to @index in the radix tree. From 354 * Set the search tag (which must be < RADIX_TREE_MAX_TAGS)
355 * corresponding to @index in the radix tree. From
353 * the root all the way down to the leaf node. 356 * the root all the way down to the leaf node.
354 * 357 *
355 * Returns the address of the tagged item. Setting a tag on a not-present 358 * Returns the address of the tagged item. Setting a tag on a not-present
356 * item is a bug. 359 * item is a bug.
357 */ 360 */
358void *radix_tree_tag_set(struct radix_tree_root *root, 361void *radix_tree_tag_set(struct radix_tree_root *root,
359 unsigned long index, int tag) 362 unsigned long index, unsigned int tag)
360{ 363{
361 unsigned int height, shift; 364 unsigned int height, shift;
362 struct radix_tree_node *slot; 365 struct radix_tree_node *slot;
@@ -390,7 +393,8 @@ EXPORT_SYMBOL(radix_tree_tag_set);
390 * @index: index key 393 * @index: index key
391 * @tag: tag index 394 * @tag: tag index
392 * 395 *
393 * Clear the search tag corresponging to @index in the radix tree. If 396 * Clear the search tag (which must be < RADIX_TREE_MAX_TAGS)
397 * corresponding to @index in the radix tree. If
394 * this causes the leaf node to have no tags set then clear the tag in the 398 * this causes the leaf node to have no tags set then clear the tag in the
395 * next-to-leaf node, etc. 399 * next-to-leaf node, etc.
396 * 400 *
@@ -398,7 +402,7 @@ EXPORT_SYMBOL(radix_tree_tag_set);
398 * has the same return value and semantics as radix_tree_lookup(). 402 * has the same return value and semantics as radix_tree_lookup().
399 */ 403 */
400void *radix_tree_tag_clear(struct radix_tree_root *root, 404void *radix_tree_tag_clear(struct radix_tree_root *root,
401 unsigned long index, int tag) 405 unsigned long index, unsigned int tag)
402{ 406{
403 struct radix_tree_path path[RADIX_TREE_MAX_PATH], *pathp = path; 407 struct radix_tree_path path[RADIX_TREE_MAX_PATH], *pathp = path;
404 struct radix_tree_node *slot; 408 struct radix_tree_node *slot;
@@ -450,7 +454,7 @@ EXPORT_SYMBOL(radix_tree_tag_clear);
450 * radix_tree_tag_get - get a tag on a radix tree node 454 * radix_tree_tag_get - get a tag on a radix tree node
451 * @root: radix tree root 455 * @root: radix tree root
452 * @index: index key 456 * @index: index key
453 * @tag: tag index 457 * @tag: tag index (< RADIX_TREE_MAX_TAGS)
454 * 458 *
455 * Return values: 459 * Return values:
456 * 460 *
@@ -459,7 +463,7 @@ EXPORT_SYMBOL(radix_tree_tag_clear);
459 * -1: tag present, unset 463 * -1: tag present, unset
460 */ 464 */
461int radix_tree_tag_get(struct radix_tree_root *root, 465int radix_tree_tag_get(struct radix_tree_root *root,
462 unsigned long index, int tag) 466 unsigned long index, unsigned int tag)
463{ 467{
464 unsigned int height, shift; 468 unsigned int height, shift;
465 struct radix_tree_node *slot; 469 struct radix_tree_node *slot;
@@ -592,7 +596,7 @@ EXPORT_SYMBOL(radix_tree_gang_lookup);
592 */ 596 */
593static unsigned int 597static unsigned int
594__lookup_tag(struct radix_tree_root *root, void **results, unsigned long index, 598__lookup_tag(struct radix_tree_root *root, void **results, unsigned long index,
595 unsigned int max_items, unsigned long *next_index, int tag) 599 unsigned int max_items, unsigned long *next_index, unsigned int tag)
596{ 600{
597 unsigned int nr_found = 0; 601 unsigned int nr_found = 0;
598 unsigned int shift; 602 unsigned int shift;
@@ -646,7 +650,7 @@ out:
646 * @results: where the results of the lookup are placed 650 * @results: where the results of the lookup are placed
647 * @first_index: start the lookup from this key 651 * @first_index: start the lookup from this key
648 * @max_items: place up to this many items at *results 652 * @max_items: place up to this many items at *results
649 * @tag: the tag index 653 * @tag: the tag index (< RADIX_TREE_MAX_TAGS)
650 * 654 *
651 * Performs an index-ascending scan of the tree for present items which 655 * Performs an index-ascending scan of the tree for present items which
652 * have the tag indexed by @tag set. Places the items at *@results and 656 * have the tag indexed by @tag set. Places the items at *@results and
@@ -654,7 +658,8 @@ out:
654 */ 658 */
655unsigned int 659unsigned int
656radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results, 660radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results,
657 unsigned long first_index, unsigned int max_items, int tag) 661 unsigned long first_index, unsigned int max_items,
662 unsigned int tag)
658{ 663{
659 const unsigned long max_index = radix_tree_maxindex(root->height); 664 const unsigned long max_index = radix_tree_maxindex(root->height);
660 unsigned long cur_index = first_index; 665 unsigned long cur_index = first_index;
@@ -716,7 +721,7 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
716 struct radix_tree_node *slot; 721 struct radix_tree_node *slot;
717 unsigned int height, shift; 722 unsigned int height, shift;
718 void *ret = NULL; 723 void *ret = NULL;
719 char tags[RADIX_TREE_TAGS]; 724 char tags[RADIX_TREE_MAX_TAGS];
720 int nr_cleared_tags; 725 int nr_cleared_tags;
721 int tag; 726 int tag;
722 int offset; 727 int offset;
@@ -751,7 +756,7 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
751 * Clear all tags associated with the just-deleted item 756 * Clear all tags associated with the just-deleted item
752 */ 757 */
753 nr_cleared_tags = 0; 758 nr_cleared_tags = 0;
754 for (tag = 0; tag < RADIX_TREE_TAGS; tag++) { 759 for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) {
755 tags[tag] = 1; 760 tags[tag] = 1;
756 if (tag_get(pathp->node, tag, pathp->offset)) { 761 if (tag_get(pathp->node, tag, pathp->offset)) {
757 tag_clear(pathp->node, tag, pathp->offset); 762 tag_clear(pathp->node, tag, pathp->offset);
@@ -763,7 +768,7 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
763 } 768 }
764 769
765 for (pathp--; nr_cleared_tags && pathp->node; pathp--) { 770 for (pathp--; nr_cleared_tags && pathp->node; pathp--) {
766 for (tag = 0; tag < RADIX_TREE_TAGS; tag++) { 771 for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) {
767 if (tags[tag]) 772 if (tags[tag])
768 continue; 773 continue;
769 774
@@ -801,7 +806,7 @@ EXPORT_SYMBOL(radix_tree_delete);
801 * @root: radix tree root 806 * @root: radix tree root
802 * @tag: tag to test 807 * @tag: tag to test
803 */ 808 */
804int radix_tree_tagged(struct radix_tree_root *root, int tag) 809int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag)
805{ 810{
806 struct radix_tree_node *rnode; 811 struct radix_tree_node *rnode;
807 rnode = root->rnode; 812 rnode = root->rnode;
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 0af497b6b9a8..10625785eefd 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -296,8 +296,7 @@ map_single(struct device *hwdev, char *buffer, size_t size, int dir)
296 else 296 else
297 stride = 1; 297 stride = 1;
298 298
299 if (!nslots) 299 BUG_ON(!nslots);
300 BUG();
301 300
302 /* 301 /*
303 * Find suitable number of IO TLB entries size that will fit this 302 * Find suitable number of IO TLB entries size that will fit this
@@ -416,14 +415,14 @@ sync_single(struct device *hwdev, char *dma_addr, size_t size,
416 case SYNC_FOR_CPU: 415 case SYNC_FOR_CPU:
417 if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)) 416 if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
418 memcpy(buffer, dma_addr, size); 417 memcpy(buffer, dma_addr, size);
419 else if (dir != DMA_TO_DEVICE) 418 else
420 BUG(); 419 BUG_ON(dir != DMA_TO_DEVICE);
421 break; 420 break;
422 case SYNC_FOR_DEVICE: 421 case SYNC_FOR_DEVICE:
423 if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)) 422 if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
424 memcpy(dma_addr, buffer, size); 423 memcpy(dma_addr, buffer, size);
425 else if (dir != DMA_FROM_DEVICE) 424 else
426 BUG(); 425 BUG_ON(dir != DMA_FROM_DEVICE);
427 break; 426 break;
428 default: 427 default:
429 BUG(); 428 BUG();
@@ -529,8 +528,7 @@ swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir)
529 unsigned long dev_addr = virt_to_phys(ptr); 528 unsigned long dev_addr = virt_to_phys(ptr);
530 void *map; 529 void *map;
531 530
532 if (dir == DMA_NONE) 531 BUG_ON(dir == DMA_NONE);
533 BUG();
534 /* 532 /*
535 * If the pointer passed in happens to be in the device's DMA window, 533 * If the pointer passed in happens to be in the device's DMA window,
536 * we can safely return the device addr and not worry about bounce 534 * we can safely return the device addr and not worry about bounce
@@ -592,8 +590,7 @@ swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size,
592{ 590{
593 char *dma_addr = phys_to_virt(dev_addr); 591 char *dma_addr = phys_to_virt(dev_addr);
594 592
595 if (dir == DMA_NONE) 593 BUG_ON(dir == DMA_NONE);
596 BUG();
597 if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) 594 if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
598 unmap_single(hwdev, dma_addr, size, dir); 595 unmap_single(hwdev, dma_addr, size, dir);
599 else if (dir == DMA_FROM_DEVICE) 596 else if (dir == DMA_FROM_DEVICE)
@@ -616,8 +613,7 @@ swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
616{ 613{
617 char *dma_addr = phys_to_virt(dev_addr); 614 char *dma_addr = phys_to_virt(dev_addr);
618 615
619 if (dir == DMA_NONE) 616 BUG_ON(dir == DMA_NONE);
620 BUG();
621 if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) 617 if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
622 sync_single(hwdev, dma_addr, size, dir, target); 618 sync_single(hwdev, dma_addr, size, dir, target);
623 else if (dir == DMA_FROM_DEVICE) 619 else if (dir == DMA_FROM_DEVICE)
@@ -648,8 +644,7 @@ swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr,
648{ 644{
649 char *dma_addr = phys_to_virt(dev_addr) + offset; 645 char *dma_addr = phys_to_virt(dev_addr) + offset;
650 646
651 if (dir == DMA_NONE) 647 BUG_ON(dir == DMA_NONE);
652 BUG();
653 if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) 648 if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
654 sync_single(hwdev, dma_addr, size, dir, target); 649 sync_single(hwdev, dma_addr, size, dir, target);
655 else if (dir == DMA_FROM_DEVICE) 650 else if (dir == DMA_FROM_DEVICE)
@@ -696,8 +691,7 @@ swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
696 unsigned long dev_addr; 691 unsigned long dev_addr;
697 int i; 692 int i;
698 693
699 if (dir == DMA_NONE) 694 BUG_ON(dir == DMA_NONE);
700 BUG();
701 695
702 for (i = 0; i < nelems; i++, sg++) { 696 for (i = 0; i < nelems; i++, sg++) {
703 addr = SG_ENT_VIRT_ADDRESS(sg); 697 addr = SG_ENT_VIRT_ADDRESS(sg);
@@ -730,8 +724,7 @@ swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
730{ 724{
731 int i; 725 int i;
732 726
733 if (dir == DMA_NONE) 727 BUG_ON(dir == DMA_NONE);
734 BUG();
735 728
736 for (i = 0; i < nelems; i++, sg++) 729 for (i = 0; i < nelems; i++, sg++)
737 if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg)) 730 if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
@@ -753,8 +746,7 @@ swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sg,
753{ 746{
754 int i; 747 int i;
755 748
756 if (dir == DMA_NONE) 749 BUG_ON(dir == DMA_NONE);
757 BUG();
758 750
759 for (i = 0; i < nelems; i++, sg++) 751 for (i = 0; i < nelems; i++, sg++)
760 if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg)) 752 if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))