aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorLen Brown <len.brown@intel.com>2006-01-27 17:18:29 -0500
committerLen Brown <len.brown@intel.com>2006-01-27 17:18:29 -0500
commit292dd876ee765c478b27c93cc51e93a558ed58bf (patch)
tree5b740e93253295baee2a9c414a6c66d03d44a9ef /lib
parentd4ec6c7cc9a15a7a529719bc3b84f46812f9842e (diff)
parent9fdb62af92c741addbea15545f214a6e89460865 (diff)
Pull release into acpica branch
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug38
-rw-r--r--lib/bitmap.c89
-rw-r--r--lib/dec_and_lock.c49
-rw-r--r--lib/find_next_bit.c3
-rw-r--r--lib/radix-tree.c143
-rw-r--r--lib/spinlock_debug.c18
-rw-r--r--lib/swiotlb.c2
-rw-r--r--lib/zlib_deflate/deflate.c6
-rw-r--r--lib/zlib_deflate/deflate_syms.c2
-rw-r--r--lib/zlib_inflate/infblock.c4
-rw-r--r--lib/zlib_inflate/infblock.h4
-rw-r--r--lib/zlib_inflate/inflate_syms.c2
-rw-r--r--lib/zlib_inflate/inflate_sync.c4
13 files changed, 198 insertions, 166 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 80598cfd728c..a314e663d517 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -9,15 +9,9 @@ config PRINTK_TIME
9 in kernel startup. 9 in kernel startup.
10 10
11 11
12config DEBUG_KERNEL
13 bool "Kernel debugging"
14 help
15 Say Y here if you are developing drivers or trying to debug and
16 identify kernel problems.
17
18config MAGIC_SYSRQ 12config MAGIC_SYSRQ
19 bool "Magic SysRq key" 13 bool "Magic SysRq key"
20 depends on DEBUG_KERNEL && !UML 14 depends on !UML
21 help 15 help
22 If you say Y here, you will have some control over the system even 16 If you say Y here, you will have some control over the system even
23 if the system crashes for example during kernel debugging (e.g., you 17 if the system crashes for example during kernel debugging (e.g., you
@@ -29,6 +23,12 @@ config MAGIC_SYSRQ
29 keys are documented in <file:Documentation/sysrq.txt>. Don't say Y 23 keys are documented in <file:Documentation/sysrq.txt>. Don't say Y
30 unless you really know what this hack does. 24 unless you really know what this hack does.
31 25
26config DEBUG_KERNEL
27 bool "Kernel debugging"
28 help
29 Say Y here if you are developing drivers or trying to debug and
30 identify kernel problems.
31
32config LOG_BUF_SHIFT 32config LOG_BUF_SHIFT
33 int "Kernel log buffer size (16 => 64KB, 17 => 128KB)" if DEBUG_KERNEL 33 int "Kernel log buffer size (16 => 64KB, 17 => 128KB)" if DEBUG_KERNEL
34 range 12 21 34 range 12 21
@@ -79,7 +79,7 @@ config SCHEDSTATS
79 79
80config DEBUG_SLAB 80config DEBUG_SLAB
81 bool "Debug memory allocations" 81 bool "Debug memory allocations"
82 depends on DEBUG_KERNEL 82 depends on DEBUG_KERNEL && SLAB
83 help 83 help
84 Say Y here to have the kernel do limited verification on memory 84 Say Y here to have the kernel do limited verification on memory
85 allocation as well as poisoning memory on free to catch use of freed 85 allocation as well as poisoning memory on free to catch use of freed
@@ -95,6 +95,14 @@ config DEBUG_PREEMPT
95 if kernel code uses it in a preemption-unsafe way. Also, the kernel 95 if kernel code uses it in a preemption-unsafe way. Also, the kernel
96 will detect preemption count underflows. 96 will detect preemption count underflows.
97 97
98config DEBUG_MUTEXES
99 bool "Mutex debugging, deadlock detection"
100 default y
101 depends on DEBUG_KERNEL
102 help
103 This allows mutex semantics violations and mutex related deadlocks
104 (lockups) to be detected and reported automatically.
105
98config DEBUG_SPINLOCK 106config DEBUG_SPINLOCK
99 bool "Spinlock debugging" 107 bool "Spinlock debugging"
100 depends on DEBUG_KERNEL 108 depends on DEBUG_KERNEL
@@ -187,6 +195,20 @@ config FRAME_POINTER
187 some architectures or if you use external debuggers. 195 some architectures or if you use external debuggers.
188 If you don't debug the kernel, you can say N. 196 If you don't debug the kernel, you can say N.
189 197
198config FORCED_INLINING
199 bool "Force gcc to inline functions marked 'inline'"
200 depends on DEBUG_KERNEL
201 default y
202 help
203 This option determines if the kernel forces gcc to inline the functions
204 developers have marked 'inline'. Doing so takes away freedom from gcc to
205 do what it thinks is best, which is desirable for the gcc 3.x series of
206 compilers. The gcc 4.x series have a rewritten inlining algorithm and
207 disabling this option will generate a smaller kernel there. Hopefully
208 this algorithm is so good that allowing gcc4 to make the decision can
209 become the default in the future, until then this option is there to
210 test gcc for this.
211
190config RCU_TORTURE_TEST 212config RCU_TORTURE_TEST
191 tristate "torture tests for RCU" 213 tristate "torture tests for RCU"
192 depends on DEBUG_KERNEL 214 depends on DEBUG_KERNEL
diff --git a/lib/bitmap.c b/lib/bitmap.c
index 23d3b1147fe9..48e708381d44 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -519,7 +519,7 @@ EXPORT_SYMBOL(bitmap_parselist);
519 * 519 *
520 * Map the bit at position @pos in @buf (of length @bits) to the 520 * Map the bit at position @pos in @buf (of length @bits) to the
521 * ordinal of which set bit it is. If it is not set or if @pos 521 * ordinal of which set bit it is. If it is not set or if @pos
522 * is not a valid bit position, map to zero (0). 522 * is not a valid bit position, map to -1.
523 * 523 *
524 * If for example, just bits 4 through 7 are set in @buf, then @pos 524 * If for example, just bits 4 through 7 are set in @buf, then @pos
525 * values 4 through 7 will get mapped to 0 through 3, respectively, 525 * values 4 through 7 will get mapped to 0 through 3, respectively,
@@ -531,18 +531,19 @@ EXPORT_SYMBOL(bitmap_parselist);
531 */ 531 */
532static int bitmap_pos_to_ord(const unsigned long *buf, int pos, int bits) 532static int bitmap_pos_to_ord(const unsigned long *buf, int pos, int bits)
533{ 533{
534 int ord = 0; 534 int i, ord;
535 535
536 if (pos >= 0 && pos < bits) { 536 if (pos < 0 || pos >= bits || !test_bit(pos, buf))
537 int i; 537 return -1;
538 538
539 for (i = find_first_bit(buf, bits); 539 i = find_first_bit(buf, bits);
540 i < pos; 540 ord = 0;
541 i = find_next_bit(buf, bits, i + 1)) 541 while (i < pos) {
542 ord++; 542 i = find_next_bit(buf, bits, i + 1);
543 if (i > pos) 543 ord++;
544 ord = 0;
545 } 544 }
545 BUG_ON(i != pos);
546
546 return ord; 547 return ord;
547} 548}
548 549
@@ -553,11 +554,12 @@ static int bitmap_pos_to_ord(const unsigned long *buf, int pos, int bits)
553 * @bits: number of valid bit positions in @buf 554 * @bits: number of valid bit positions in @buf
554 * 555 *
555 * Map the ordinal offset of bit @ord in @buf to its position in @buf. 556 * Map the ordinal offset of bit @ord in @buf to its position in @buf.
556 * If @ord is not the ordinal offset of a set bit in @buf, map to zero (0). 557 * Value of @ord should be in range 0 <= @ord < weight(buf), else
558 * results are undefined.
557 * 559 *
558 * If for example, just bits 4 through 7 are set in @buf, then @ord 560 * If for example, just bits 4 through 7 are set in @buf, then @ord
559 * values 0 through 3 will get mapped to 4 through 7, respectively, 561 * values 0 through 3 will get mapped to 4 through 7, respectively,
560 * and all other @ord valuds will get mapped to 0. When @ord value 3 562 * and all other @ord values return undefined values. When @ord value 3
561 * gets mapped to (returns) @pos value 7 in this example, that means 563 * gets mapped to (returns) @pos value 7 in this example, that means
562 * that the 3rd set bit (starting with 0th) is at position 7 in @buf. 564 * that the 3rd set bit (starting with 0th) is at position 7 in @buf.
563 * 565 *
@@ -583,8 +585,8 @@ static int bitmap_ord_to_pos(const unsigned long *buf, int ord, int bits)
583 585
584/** 586/**
585 * bitmap_remap - Apply map defined by a pair of bitmaps to another bitmap 587 * bitmap_remap - Apply map defined by a pair of bitmaps to another bitmap
586 * @src: subset to be remapped
587 * @dst: remapped result 588 * @dst: remapped result
589 * @src: subset to be remapped
588 * @old: defines domain of map 590 * @old: defines domain of map
589 * @new: defines range of map 591 * @new: defines range of map
590 * @bits: number of bits in each of these bitmaps 592 * @bits: number of bits in each of these bitmaps
@@ -596,49 +598,42 @@ static int bitmap_ord_to_pos(const unsigned long *buf, int ord, int bits)
596 * weight of @old, map the position of the n-th set bit in @old to 598 * weight of @old, map the position of the n-th set bit in @old to
597 * the position of the m-th set bit in @new, where m == n % w. 599 * the position of the m-th set bit in @new, where m == n % w.
598 * 600 *
599 * If either of the @old and @new bitmaps are empty, or if@src and @dst 601 * If either of the @old and @new bitmaps are empty, or if @src and
600 * point to the same location, then this routine does nothing. 602 * @dst point to the same location, then this routine copies @src
603 * to @dst.
601 * 604 *
602 * The positions of unset bits in @old are mapped to the position of 605 * The positions of unset bits in @old are mapped to themselves
603 * the first set bit in @new. 606 * (the identify map).
604 * 607 *
605 * Apply the above specified mapping to @src, placing the result in 608 * Apply the above specified mapping to @src, placing the result in
606 * @dst, clearing any bits previously set in @dst. 609 * @dst, clearing any bits previously set in @dst.
607 * 610 *
608 * The resulting value of @dst will have either the same weight as
609 * @src, or less weight in the general case that the mapping wasn't
610 * injective due to the weight of @new being less than that of @old.
611 * The resulting value of @dst will never have greater weight than
612 * that of @src, except perhaps in the case that one of the above
613 * conditions was not met and this routine just returned.
614 *
615 * For example, lets say that @old has bits 4 through 7 set, and 611 * For example, lets say that @old has bits 4 through 7 set, and
616 * @new has bits 12 through 15 set. This defines the mapping of bit 612 * @new has bits 12 through 15 set. This defines the mapping of bit
617 * position 4 to 12, 5 to 13, 6 to 14 and 7 to 15, and of all other 613 * position 4 to 12, 5 to 13, 6 to 14 and 7 to 15, and of all other
618 * bit positions to 12 (the first set bit in @new. So if say @src 614 * bit positions unchanged. So if say @src comes into this routine
619 * comes into this routine with bits 1, 5 and 7 set, then @dst should 615 * with bits 1, 5 and 7 set, then @dst should leave with bits 1,
620 * leave with bits 12, 13 and 15 set. 616 * 13 and 15 set.
621 */ 617 */
622void bitmap_remap(unsigned long *dst, const unsigned long *src, 618void bitmap_remap(unsigned long *dst, const unsigned long *src,
623 const unsigned long *old, const unsigned long *new, 619 const unsigned long *old, const unsigned long *new,
624 int bits) 620 int bits)
625{ 621{
626 int s; 622 int oldbit, w;
627 623
628 if (bitmap_weight(old, bits) == 0)
629 return;
630 if (bitmap_weight(new, bits) == 0)
631 return;
632 if (dst == src) /* following doesn't handle inplace remaps */ 624 if (dst == src) /* following doesn't handle inplace remaps */
633 return; 625 return;
634
635 bitmap_zero(dst, bits); 626 bitmap_zero(dst, bits);
636 for (s = find_first_bit(src, bits); 627
637 s < bits; 628 w = bitmap_weight(new, bits);
638 s = find_next_bit(src, bits, s + 1)) { 629 for (oldbit = find_first_bit(src, bits);
639 int x = bitmap_pos_to_ord(old, s, bits); 630 oldbit < bits;
640 int y = bitmap_ord_to_pos(new, x, bits); 631 oldbit = find_next_bit(src, bits, oldbit + 1)) {
641 set_bit(y, dst); 632 int n = bitmap_pos_to_ord(old, oldbit, bits);
633 if (n < 0 || w == 0)
634 set_bit(oldbit, dst); /* identity map */
635 else
636 set_bit(bitmap_ord_to_pos(new, n % w, bits), dst);
642 } 637 }
643} 638}
644EXPORT_SYMBOL(bitmap_remap); 639EXPORT_SYMBOL(bitmap_remap);
@@ -657,8 +652,8 @@ EXPORT_SYMBOL(bitmap_remap);
657 * weight of @old, map the position of the n-th set bit in @old to 652 * weight of @old, map the position of the n-th set bit in @old to
658 * the position of the m-th set bit in @new, where m == n % w. 653 * the position of the m-th set bit in @new, where m == n % w.
659 * 654 *
660 * The positions of unset bits in @old are mapped to the position of 655 * The positions of unset bits in @old are mapped to themselves
661 * the first set bit in @new. 656 * (the identify map).
662 * 657 *
663 * Apply the above specified mapping to bit position @oldbit, returning 658 * Apply the above specified mapping to bit position @oldbit, returning
664 * the new bit position. 659 * the new bit position.
@@ -666,14 +661,18 @@ EXPORT_SYMBOL(bitmap_remap);
666 * For example, lets say that @old has bits 4 through 7 set, and 661 * For example, lets say that @old has bits 4 through 7 set, and
667 * @new has bits 12 through 15 set. This defines the mapping of bit 662 * @new has bits 12 through 15 set. This defines the mapping of bit
668 * position 4 to 12, 5 to 13, 6 to 14 and 7 to 15, and of all other 663 * position 4 to 12, 5 to 13, 6 to 14 and 7 to 15, and of all other
669 * bit positions to 12 (the first set bit in @new. So if say @oldbit 664 * bit positions unchanged. So if say @oldbit is 5, then this routine
670 * is 5, then this routine returns 13. 665 * returns 13.
671 */ 666 */
672int bitmap_bitremap(int oldbit, const unsigned long *old, 667int bitmap_bitremap(int oldbit, const unsigned long *old,
673 const unsigned long *new, int bits) 668 const unsigned long *new, int bits)
674{ 669{
675 int x = bitmap_pos_to_ord(old, oldbit, bits); 670 int w = bitmap_weight(new, bits);
676 return bitmap_ord_to_pos(new, x, bits); 671 int n = bitmap_pos_to_ord(old, oldbit, bits);
672 if (n < 0 || w == 0)
673 return oldbit;
674 else
675 return bitmap_ord_to_pos(new, n % w, bits);
677} 676}
678EXPORT_SYMBOL(bitmap_bitremap); 677EXPORT_SYMBOL(bitmap_bitremap);
679 678
diff --git a/lib/dec_and_lock.c b/lib/dec_and_lock.c
index 305a9663aee3..a65c31455541 100644
--- a/lib/dec_and_lock.c
+++ b/lib/dec_and_lock.c
@@ -1,47 +1,11 @@
1#include <linux/module.h> 1#include <linux/module.h>
2#include <linux/spinlock.h> 2#include <linux/spinlock.h>
3#include <asm/atomic.h> 3#include <asm/atomic.h>
4#include <asm/system.h>
5 4
6#ifdef __HAVE_ARCH_CMPXCHG
7/* 5/*
8 * This is an implementation of the notion of "decrement a 6 * This is an implementation of the notion of "decrement a
9 * reference count, and return locked if it decremented to zero". 7 * reference count, and return locked if it decremented to zero".
10 * 8 *
11 * This implementation can be used on any architecture that
12 * has a cmpxchg, and where atomic->value is an int holding
13 * the value of the atomic (i.e. the high bits aren't used
14 * for a lock or anything like that).
15 */
16int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
17{
18 int counter;
19 int newcount;
20
21 for (;;) {
22 counter = atomic_read(atomic);
23 newcount = counter - 1;
24 if (!newcount)
25 break; /* do it the slow way */
26
27 newcount = cmpxchg(&atomic->counter, counter, newcount);
28 if (newcount == counter)
29 return 0;
30 }
31
32 spin_lock(lock);
33 if (atomic_dec_and_test(atomic))
34 return 1;
35 spin_unlock(lock);
36 return 0;
37}
38#else
39/*
40 * This is an architecture-neutral, but slow,
41 * implementation of the notion of "decrement
42 * a reference count, and return locked if it
43 * decremented to zero".
44 *
45 * NOTE NOTE NOTE! This is _not_ equivalent to 9 * NOTE NOTE NOTE! This is _not_ equivalent to
46 * 10 *
47 * if (atomic_dec_and_test(&atomic)) { 11 * if (atomic_dec_and_test(&atomic)) {
@@ -52,21 +16,20 @@ int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
52 * 16 *
53 * because the spin-lock and the decrement must be 17 * because the spin-lock and the decrement must be
54 * "atomic". 18 * "atomic".
55 *
56 * This slow version gets the spinlock unconditionally,
57 * and releases it if it isn't needed. Architectures
58 * are encouraged to come up with better approaches,
59 * this is trivially done efficiently using a load-locked
60 * store-conditional approach, for example.
61 */ 19 */
62int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) 20int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
63{ 21{
22#ifdef CONFIG_SMP
23 /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
24 if (atomic_add_unless(atomic, -1, 1))
25 return 0;
26#endif
27 /* Otherwise do it the slow way */
64 spin_lock(lock); 28 spin_lock(lock);
65 if (atomic_dec_and_test(atomic)) 29 if (atomic_dec_and_test(atomic))
66 return 1; 30 return 1;
67 spin_unlock(lock); 31 spin_unlock(lock);
68 return 0; 32 return 0;
69} 33}
70#endif
71 34
72EXPORT_SYMBOL(_atomic_dec_and_lock); 35EXPORT_SYMBOL(_atomic_dec_and_lock);
diff --git a/lib/find_next_bit.c b/lib/find_next_bit.c
index d08302d2a42c..c05b4b19cf6c 100644
--- a/lib/find_next_bit.c
+++ b/lib/find_next_bit.c
@@ -10,6 +10,7 @@
10 */ 10 */
11 11
12#include <linux/bitops.h> 12#include <linux/bitops.h>
13#include <linux/module.h>
13 14
14int find_next_bit(const unsigned long *addr, int size, int offset) 15int find_next_bit(const unsigned long *addr, int size, int offset)
15{ 16{
@@ -53,3 +54,5 @@ int find_next_bit(const unsigned long *addr, int size, int offset)
53 54
54 return offset; 55 return offset;
55} 56}
57
58EXPORT_SYMBOL(find_next_bit);
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 88511c3805ad..c0bd4a914803 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -137,18 +137,31 @@ out:
137 137
138static inline void tag_set(struct radix_tree_node *node, int tag, int offset) 138static inline void tag_set(struct radix_tree_node *node, int tag, int offset)
139{ 139{
140 if (!test_bit(offset, &node->tags[tag][0])) 140 __set_bit(offset, node->tags[tag]);
141 __set_bit(offset, &node->tags[tag][0]);
142} 141}
143 142
144static inline void tag_clear(struct radix_tree_node *node, int tag, int offset) 143static inline void tag_clear(struct radix_tree_node *node, int tag, int offset)
145{ 144{
146 __clear_bit(offset, &node->tags[tag][0]); 145 __clear_bit(offset, node->tags[tag]);
147} 146}
148 147
149static inline int tag_get(struct radix_tree_node *node, int tag, int offset) 148static inline int tag_get(struct radix_tree_node *node, int tag, int offset)
150{ 149{
151 return test_bit(offset, &node->tags[tag][0]); 150 return test_bit(offset, node->tags[tag]);
151}
152
153/*
154 * Returns 1 if any slot in the node has this tag set.
155 * Otherwise returns 0.
156 */
157static inline int any_tag_set(struct radix_tree_node *node, int tag)
158{
159 int idx;
160 for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) {
161 if (node->tags[tag][idx])
162 return 1;
163 }
164 return 0;
152} 165}
153 166
154/* 167/*
@@ -185,15 +198,9 @@ static int radix_tree_extend(struct radix_tree_root *root, unsigned long index)
185 * into the newly-pushed top-level node(s) 198 * into the newly-pushed top-level node(s)
186 */ 199 */
187 for (tag = 0; tag < RADIX_TREE_TAGS; tag++) { 200 for (tag = 0; tag < RADIX_TREE_TAGS; tag++) {
188 int idx;
189
190 tags[tag] = 0; 201 tags[tag] = 0;
191 for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) { 202 if (any_tag_set(root->rnode, tag))
192 if (root->rnode->tags[tag][idx]) { 203 tags[tag] = 1;
193 tags[tag] = 1;
194 break;
195 }
196 }
197 } 204 }
198 205
199 do { 206 do {
@@ -246,7 +253,7 @@ int radix_tree_insert(struct radix_tree_root *root,
246 shift = (height-1) * RADIX_TREE_MAP_SHIFT; 253 shift = (height-1) * RADIX_TREE_MAP_SHIFT;
247 254
248 offset = 0; /* uninitialised var warning */ 255 offset = 0; /* uninitialised var warning */
249 while (height > 0) { 256 do {
250 if (slot == NULL) { 257 if (slot == NULL) {
251 /* Have to add a child node. */ 258 /* Have to add a child node. */
252 if (!(slot = radix_tree_node_alloc(root))) 259 if (!(slot = radix_tree_node_alloc(root)))
@@ -264,18 +271,16 @@ int radix_tree_insert(struct radix_tree_root *root,
264 slot = node->slots[offset]; 271 slot = node->slots[offset];
265 shift -= RADIX_TREE_MAP_SHIFT; 272 shift -= RADIX_TREE_MAP_SHIFT;
266 height--; 273 height--;
267 } 274 } while (height > 0);
268 275
269 if (slot != NULL) 276 if (slot != NULL)
270 return -EEXIST; 277 return -EEXIST;
271 278
272 if (node) { 279 BUG_ON(!node);
273 node->count++; 280 node->count++;
274 node->slots[offset] = item; 281 node->slots[offset] = item;
275 BUG_ON(tag_get(node, 0, offset)); 282 BUG_ON(tag_get(node, 0, offset));
276 BUG_ON(tag_get(node, 1, offset)); 283 BUG_ON(tag_get(node, 1, offset));
277 } else
278 root->rnode = item;
279 284
280 return 0; 285 return 0;
281} 286}
@@ -367,7 +372,8 @@ void *radix_tree_tag_set(struct radix_tree_root *root,
367 int offset; 372 int offset;
368 373
369 offset = (index >> shift) & RADIX_TREE_MAP_MASK; 374 offset = (index >> shift) & RADIX_TREE_MAP_MASK;
370 tag_set(slot, tag, offset); 375 if (!tag_get(slot, tag, offset))
376 tag_set(slot, tag, offset);
371 slot = slot->slots[offset]; 377 slot = slot->slots[offset];
372 BUG_ON(slot == NULL); 378 BUG_ON(slot == NULL);
373 shift -= RADIX_TREE_MAP_SHIFT; 379 shift -= RADIX_TREE_MAP_SHIFT;
@@ -427,13 +433,11 @@ void *radix_tree_tag_clear(struct radix_tree_root *root,
427 goto out; 433 goto out;
428 434
429 do { 435 do {
430 int idx; 436 if (!tag_get(pathp->node, tag, pathp->offset))
431 437 goto out;
432 tag_clear(pathp->node, tag, pathp->offset); 438 tag_clear(pathp->node, tag, pathp->offset);
433 for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) { 439 if (any_tag_set(pathp->node, tag))
434 if (pathp->node->tags[tag][idx]) 440 goto out;
435 goto out;
436 }
437 pathp--; 441 pathp--;
438 } while (pathp->node); 442 } while (pathp->node);
439out: 443out:
@@ -674,6 +678,29 @@ radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results,
674EXPORT_SYMBOL(radix_tree_gang_lookup_tag); 678EXPORT_SYMBOL(radix_tree_gang_lookup_tag);
675 679
676/** 680/**
681 * radix_tree_shrink - shrink height of a radix tree to minimal
682 * @root radix tree root
683 */
684static inline void radix_tree_shrink(struct radix_tree_root *root)
685{
686 /* try to shrink tree height */
687 while (root->height > 1 &&
688 root->rnode->count == 1 &&
689 root->rnode->slots[0]) {
690 struct radix_tree_node *to_free = root->rnode;
691
692 root->rnode = to_free->slots[0];
693 root->height--;
694 /* must only free zeroed nodes into the slab */
695 tag_clear(to_free, 0, 0);
696 tag_clear(to_free, 1, 0);
697 to_free->slots[0] = NULL;
698 to_free->count = 0;
699 radix_tree_node_free(to_free);
700 }
701}
702
703/**
677 * radix_tree_delete - delete an item from a radix tree 704 * radix_tree_delete - delete an item from a radix tree
678 * @root: radix tree root 705 * @root: radix tree root
679 * @index: index key 706 * @index: index key
@@ -691,6 +718,8 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
691 void *ret = NULL; 718 void *ret = NULL;
692 char tags[RADIX_TREE_TAGS]; 719 char tags[RADIX_TREE_TAGS];
693 int nr_cleared_tags; 720 int nr_cleared_tags;
721 int tag;
722 int offset;
694 723
695 height = root->height; 724 height = root->height;
696 if (index > radix_tree_maxindex(height)) 725 if (index > radix_tree_maxindex(height))
@@ -701,16 +730,14 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
701 slot = root->rnode; 730 slot = root->rnode;
702 731
703 for ( ; height > 0; height--) { 732 for ( ; height > 0; height--) {
704 int offset;
705
706 if (slot == NULL) 733 if (slot == NULL)
707 goto out; 734 goto out;
708 735
736 pathp++;
709 offset = (index >> shift) & RADIX_TREE_MAP_MASK; 737 offset = (index >> shift) & RADIX_TREE_MAP_MASK;
710 pathp[1].offset = offset; 738 pathp->offset = offset;
711 pathp[1].node = slot; 739 pathp->node = slot;
712 slot = slot->slots[offset]; 740 slot = slot->slots[offset];
713 pathp++;
714 shift -= RADIX_TREE_MAP_SHIFT; 741 shift -= RADIX_TREE_MAP_SHIFT;
715 } 742 }
716 743
@@ -723,35 +750,39 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
723 /* 750 /*
724 * Clear all tags associated with the just-deleted item 751 * Clear all tags associated with the just-deleted item
725 */ 752 */
726 memset(tags, 0, sizeof(tags)); 753 nr_cleared_tags = 0;
727 do { 754 for (tag = 0; tag < RADIX_TREE_TAGS; tag++) {
728 int tag; 755 if (tag_get(pathp->node, tag, pathp->offset)) {
756 tag_clear(pathp->node, tag, pathp->offset);
757 tags[tag] = 0;
758 nr_cleared_tags++;
759 } else
760 tags[tag] = 1;
761 }
729 762
730 nr_cleared_tags = RADIX_TREE_TAGS; 763 for (pathp--; nr_cleared_tags && pathp->node; pathp--) {
731 for (tag = 0; tag < RADIX_TREE_TAGS; tag++) { 764 for (tag = 0; tag < RADIX_TREE_TAGS; tag++) {
732 int idx;
733
734 if (tags[tag]) 765 if (tags[tag])
735 continue; 766 continue;
736 767
737 tag_clear(pathp->node, tag, pathp->offset); 768 tag_clear(pathp->node, tag, pathp->offset);
738 769 if (any_tag_set(pathp->node, tag)) {
739 for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) { 770 tags[tag] = 1;
740 if (pathp->node->tags[tag][idx]) { 771 nr_cleared_tags--;
741 tags[tag] = 1;
742 nr_cleared_tags--;
743 break;
744 }
745 } 772 }
746 } 773 }
747 pathp--; 774 }
748 } while (pathp->node && nr_cleared_tags);
749 775
750 /* Now free the nodes we do not need anymore */ 776 /* Now free the nodes we do not need anymore */
751 for (pathp = orig_pathp; pathp->node; pathp--) { 777 for (pathp = orig_pathp; pathp->node; pathp--) {
752 pathp->node->slots[pathp->offset] = NULL; 778 pathp->node->slots[pathp->offset] = NULL;
753 if (--pathp->node->count) 779 pathp->node->count--;
780
781 if (pathp->node->count) {
782 if (pathp->node == root->rnode)
783 radix_tree_shrink(root);
754 goto out; 784 goto out;
785 }
755 786
756 /* Node with zero slots in use so free it */ 787 /* Node with zero slots in use so free it */
757 radix_tree_node_free(pathp->node); 788 radix_tree_node_free(pathp->node);
@@ -770,15 +801,11 @@ EXPORT_SYMBOL(radix_tree_delete);
770 */ 801 */
771int radix_tree_tagged(struct radix_tree_root *root, int tag) 802int radix_tree_tagged(struct radix_tree_root *root, int tag)
772{ 803{
773 int idx; 804 struct radix_tree_node *rnode;
774 805 rnode = root->rnode;
775 if (!root->rnode) 806 if (!rnode)
776 return 0; 807 return 0;
777 for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) { 808 return any_tag_set(rnode, tag);
778 if (root->rnode->tags[tag][idx])
779 return 1;
780 }
781 return 0;
782} 809}
783EXPORT_SYMBOL(radix_tree_tagged); 810EXPORT_SYMBOL(radix_tree_tagged);
784 811
diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c
index dcd4be9bd4e5..c8bb8cc899d7 100644
--- a/lib/spinlock_debug.c
+++ b/lib/spinlock_debug.c
@@ -19,10 +19,11 @@ static void spin_bug(spinlock_t *lock, const char *msg)
19 if (xchg(&print_once, 0)) { 19 if (xchg(&print_once, 0)) {
20 if (lock->owner && lock->owner != SPINLOCK_OWNER_INIT) 20 if (lock->owner && lock->owner != SPINLOCK_OWNER_INIT)
21 owner = lock->owner; 21 owner = lock->owner;
22 printk("BUG: spinlock %s on CPU#%d, %s/%d\n", 22 printk(KERN_EMERG "BUG: spinlock %s on CPU#%d, %s/%d\n",
23 msg, raw_smp_processor_id(), 23 msg, raw_smp_processor_id(),
24 current->comm, current->pid); 24 current->comm, current->pid);
25 printk(" lock: %p, .magic: %08x, .owner: %s/%d, .owner_cpu: %d\n", 25 printk(KERN_EMERG " lock: %p, .magic: %08x, .owner: %s/%d, "
26 ".owner_cpu: %d\n",
26 lock, lock->magic, 27 lock, lock->magic,
27 owner ? owner->comm : "<none>", 28 owner ? owner->comm : "<none>",
28 owner ? owner->pid : -1, 29 owner ? owner->pid : -1,
@@ -78,7 +79,8 @@ static void __spin_lock_debug(spinlock_t *lock)
78 /* lockup suspected: */ 79 /* lockup suspected: */
79 if (print_once) { 80 if (print_once) {
80 print_once = 0; 81 print_once = 0;
81 printk("BUG: spinlock lockup on CPU#%d, %s/%d, %p\n", 82 printk(KERN_EMERG "BUG: spinlock lockup on CPU#%d, "
83 "%s/%d, %p\n",
82 raw_smp_processor_id(), current->comm, 84 raw_smp_processor_id(), current->comm,
83 current->pid, lock); 85 current->pid, lock);
84 dump_stack(); 86 dump_stack();
@@ -120,8 +122,8 @@ static void rwlock_bug(rwlock_t *lock, const char *msg)
120 static long print_once = 1; 122 static long print_once = 1;
121 123
122 if (xchg(&print_once, 0)) { 124 if (xchg(&print_once, 0)) {
123 printk("BUG: rwlock %s on CPU#%d, %s/%d, %p\n", msg, 125 printk(KERN_EMERG "BUG: rwlock %s on CPU#%d, %s/%d, %p\n",
124 raw_smp_processor_id(), current->comm, 126 msg, raw_smp_processor_id(), current->comm,
125 current->pid, lock); 127 current->pid, lock);
126 dump_stack(); 128 dump_stack();
127#ifdef CONFIG_SMP 129#ifdef CONFIG_SMP
@@ -149,7 +151,8 @@ static void __read_lock_debug(rwlock_t *lock)
149 /* lockup suspected: */ 151 /* lockup suspected: */
150 if (print_once) { 152 if (print_once) {
151 print_once = 0; 153 print_once = 0;
152 printk("BUG: read-lock lockup on CPU#%d, %s/%d, %p\n", 154 printk(KERN_EMERG "BUG: read-lock lockup on CPU#%d, "
155 "%s/%d, %p\n",
153 raw_smp_processor_id(), current->comm, 156 raw_smp_processor_id(), current->comm,
154 current->pid, lock); 157 current->pid, lock);
155 dump_stack(); 158 dump_stack();
@@ -221,7 +224,8 @@ static void __write_lock_debug(rwlock_t *lock)
221 /* lockup suspected: */ 224 /* lockup suspected: */
222 if (print_once) { 225 if (print_once) {
223 print_once = 0; 226 print_once = 0;
224 printk("BUG: write-lock lockup on CPU#%d, %s/%d, %p\n", 227 printk(KERN_EMERG "BUG: write-lock lockup on CPU#%d, "
228 "%s/%d, %p\n",
225 raw_smp_processor_id(), current->comm, 229 raw_smp_processor_id(), current->comm,
226 current->pid, lock); 230 current->pid, lock);
227 dump_stack(); 231 dump_stack();
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 3b482052f403..0af497b6b9a8 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -463,7 +463,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
463 */ 463 */
464 dma_addr_t handle; 464 dma_addr_t handle;
465 handle = swiotlb_map_single(NULL, NULL, size, DMA_FROM_DEVICE); 465 handle = swiotlb_map_single(NULL, NULL, size, DMA_FROM_DEVICE);
466 if (dma_mapping_error(handle)) 466 if (swiotlb_dma_mapping_error(handle))
467 return NULL; 467 return NULL;
468 468
469 ret = phys_to_virt(handle); 469 ret = phys_to_virt(handle);
diff --git a/lib/zlib_deflate/deflate.c b/lib/zlib_deflate/deflate.c
index ad9a1bf4fc63..1653dd9bb01a 100644
--- a/lib/zlib_deflate/deflate.c
+++ b/lib/zlib_deflate/deflate.c
@@ -255,6 +255,7 @@ int zlib_deflateInit2_(
255} 255}
256 256
257/* ========================================================================= */ 257/* ========================================================================= */
258#if 0
258int zlib_deflateSetDictionary( 259int zlib_deflateSetDictionary(
259 z_streamp strm, 260 z_streamp strm,
260 const Byte *dictionary, 261 const Byte *dictionary,
@@ -297,6 +298,7 @@ int zlib_deflateSetDictionary(
297 if (hash_head) hash_head = 0; /* to make compiler happy */ 298 if (hash_head) hash_head = 0; /* to make compiler happy */
298 return Z_OK; 299 return Z_OK;
299} 300}
301#endif /* 0 */
300 302
301/* ========================================================================= */ 303/* ========================================================================= */
302int zlib_deflateReset( 304int zlib_deflateReset(
@@ -330,6 +332,7 @@ int zlib_deflateReset(
330} 332}
331 333
332/* ========================================================================= */ 334/* ========================================================================= */
335#if 0
333int zlib_deflateParams( 336int zlib_deflateParams(
334 z_streamp strm, 337 z_streamp strm,
335 int level, 338 int level,
@@ -365,6 +368,7 @@ int zlib_deflateParams(
365 s->strategy = strategy; 368 s->strategy = strategy;
366 return err; 369 return err;
367} 370}
371#endif /* 0 */
368 372
369/* ========================================================================= 373/* =========================================================================
370 * Put a short in the pending buffer. The 16-bit value is put in MSB order. 374 * Put a short in the pending buffer. The 16-bit value is put in MSB order.
@@ -572,6 +576,7 @@ int zlib_deflateEnd(
572/* ========================================================================= 576/* =========================================================================
573 * Copy the source state to the destination state. 577 * Copy the source state to the destination state.
574 */ 578 */
579#if 0
575int zlib_deflateCopy ( 580int zlib_deflateCopy (
576 z_streamp dest, 581 z_streamp dest,
577 z_streamp source 582 z_streamp source
@@ -624,6 +629,7 @@ int zlib_deflateCopy (
624 return Z_OK; 629 return Z_OK;
625#endif 630#endif
626} 631}
632#endif /* 0 */
627 633
628/* =========================================================================== 634/* ===========================================================================
629 * Read a new buffer from the current input stream, update the adler32 635 * Read a new buffer from the current input stream, update the adler32
diff --git a/lib/zlib_deflate/deflate_syms.c b/lib/zlib_deflate/deflate_syms.c
index 5985b28c8e30..767b573d1ef6 100644
--- a/lib/zlib_deflate/deflate_syms.c
+++ b/lib/zlib_deflate/deflate_syms.c
@@ -16,6 +16,4 @@ EXPORT_SYMBOL(zlib_deflateInit_);
16EXPORT_SYMBOL(zlib_deflateInit2_); 16EXPORT_SYMBOL(zlib_deflateInit2_);
17EXPORT_SYMBOL(zlib_deflateEnd); 17EXPORT_SYMBOL(zlib_deflateEnd);
18EXPORT_SYMBOL(zlib_deflateReset); 18EXPORT_SYMBOL(zlib_deflateReset);
19EXPORT_SYMBOL(zlib_deflateCopy);
20EXPORT_SYMBOL(zlib_deflateParams);
21MODULE_LICENSE("GPL"); 19MODULE_LICENSE("GPL");
diff --git a/lib/zlib_inflate/infblock.c b/lib/zlib_inflate/infblock.c
index 50f21ca4ef7f..c16cdeff51aa 100644
--- a/lib/zlib_inflate/infblock.c
+++ b/lib/zlib_inflate/infblock.c
@@ -338,6 +338,7 @@ int zlib_inflate_blocks_free(
338} 338}
339 339
340 340
341#if 0
341void zlib_inflate_set_dictionary( 342void zlib_inflate_set_dictionary(
342 inflate_blocks_statef *s, 343 inflate_blocks_statef *s,
343 const Byte *d, 344 const Byte *d,
@@ -347,15 +348,18 @@ void zlib_inflate_set_dictionary(
347 memcpy(s->window, d, n); 348 memcpy(s->window, d, n);
348 s->read = s->write = s->window + n; 349 s->read = s->write = s->window + n;
349} 350}
351#endif /* 0 */
350 352
351 353
352/* Returns true if inflate is currently at the end of a block generated 354/* Returns true if inflate is currently at the end of a block generated
353 * by Z_SYNC_FLUSH or Z_FULL_FLUSH. 355 * by Z_SYNC_FLUSH or Z_FULL_FLUSH.
354 * IN assertion: s != NULL 356 * IN assertion: s != NULL
355 */ 357 */
358#if 0
356int zlib_inflate_blocks_sync_point( 359int zlib_inflate_blocks_sync_point(
357 inflate_blocks_statef *s 360 inflate_blocks_statef *s
358) 361)
359{ 362{
360 return s->mode == LENS; 363 return s->mode == LENS;
361} 364}
365#endif /* 0 */
diff --git a/lib/zlib_inflate/infblock.h b/lib/zlib_inflate/infblock.h
index f5221ddf6054..ceee60b5107c 100644
--- a/lib/zlib_inflate/infblock.h
+++ b/lib/zlib_inflate/infblock.h
@@ -33,12 +33,16 @@ extern int zlib_inflate_blocks_free (
33 inflate_blocks_statef *, 33 inflate_blocks_statef *,
34 z_streamp); 34 z_streamp);
35 35
36#if 0
36extern void zlib_inflate_set_dictionary ( 37extern void zlib_inflate_set_dictionary (
37 inflate_blocks_statef *s, 38 inflate_blocks_statef *s,
38 const Byte *d, /* dictionary */ 39 const Byte *d, /* dictionary */
39 uInt n); /* dictionary length */ 40 uInt n); /* dictionary length */
41#endif /* 0 */
40 42
43#if 0
41extern int zlib_inflate_blocks_sync_point ( 44extern int zlib_inflate_blocks_sync_point (
42 inflate_blocks_statef *s); 45 inflate_blocks_statef *s);
46#endif /* 0 */
43 47
44#endif /* _INFBLOCK_H */ 48#endif /* _INFBLOCK_H */
diff --git a/lib/zlib_inflate/inflate_syms.c b/lib/zlib_inflate/inflate_syms.c
index aa1b08189121..ef49738f57ec 100644
--- a/lib/zlib_inflate/inflate_syms.c
+++ b/lib/zlib_inflate/inflate_syms.c
@@ -15,8 +15,6 @@ EXPORT_SYMBOL(zlib_inflate);
15EXPORT_SYMBOL(zlib_inflateInit_); 15EXPORT_SYMBOL(zlib_inflateInit_);
16EXPORT_SYMBOL(zlib_inflateInit2_); 16EXPORT_SYMBOL(zlib_inflateInit2_);
17EXPORT_SYMBOL(zlib_inflateEnd); 17EXPORT_SYMBOL(zlib_inflateEnd);
18EXPORT_SYMBOL(zlib_inflateSync);
19EXPORT_SYMBOL(zlib_inflateReset); 18EXPORT_SYMBOL(zlib_inflateReset);
20EXPORT_SYMBOL(zlib_inflateSyncPoint);
21EXPORT_SYMBOL(zlib_inflateIncomp); 19EXPORT_SYMBOL(zlib_inflateIncomp);
22MODULE_LICENSE("GPL"); 20MODULE_LICENSE("GPL");
diff --git a/lib/zlib_inflate/inflate_sync.c b/lib/zlib_inflate/inflate_sync.c
index e07bdb21f55c..61411ff89d61 100644
--- a/lib/zlib_inflate/inflate_sync.c
+++ b/lib/zlib_inflate/inflate_sync.c
@@ -7,6 +7,7 @@
7#include "infblock.h" 7#include "infblock.h"
8#include "infutil.h" 8#include "infutil.h"
9 9
10#if 0
10int zlib_inflateSync( 11int zlib_inflateSync(
11 z_streamp z 12 z_streamp z
12) 13)
@@ -57,6 +58,7 @@ int zlib_inflateSync(
57 z->state->mode = BLOCKS; 58 z->state->mode = BLOCKS;
58 return Z_OK; 59 return Z_OK;
59} 60}
61#endif /* 0 */
60 62
61 63
62/* Returns true if inflate is currently at the end of a block generated 64/* Returns true if inflate is currently at the end of a block generated
@@ -66,6 +68,7 @@ int zlib_inflateSync(
66 * decompressing, PPP checks that at the end of input packet, inflate is 68 * decompressing, PPP checks that at the end of input packet, inflate is
67 * waiting for these length bytes. 69 * waiting for these length bytes.
68 */ 70 */
71#if 0
69int zlib_inflateSyncPoint( 72int zlib_inflateSyncPoint(
70 z_streamp z 73 z_streamp z
71) 74)
@@ -74,6 +77,7 @@ int zlib_inflateSyncPoint(
74 return Z_STREAM_ERROR; 77 return Z_STREAM_ERROR;
75 return zlib_inflate_blocks_sync_point(z->state->blocks); 78 return zlib_inflate_blocks_sync_point(z->state->blocks);
76} 79}
80#endif /* 0 */
77 81
78/* 82/*
79 * This subroutine adds the data at next_in/avail_in to the output history 83 * This subroutine adds the data at next_in/avail_in to the output history