diff options
author | Dave Kleikamp <shaggy@austin.ibm.com> | 2006-01-24 15:34:47 -0500 |
---|---|---|
committer | Dave Kleikamp <shaggy@austin.ibm.com> | 2006-01-24 15:34:47 -0500 |
commit | 0a0fc0ddbe732779366ab6b1b879f62195e65967 (patch) | |
tree | 7b42490a676cf39ae0691b6859ecf7fd410f229b /lib | |
parent | 4d5dbd0945d9e0833dd7964a3d6ee33157f7cc7a (diff) | |
parent | 3ee68c4af3fd7228c1be63254b9f884614f9ebb2 (diff) |
Merge with /home/shaggy/git/linus-clean/
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig.debug | 43 | ||||
-rw-r--r-- | lib/bitmap.c | 89 | ||||
-rw-r--r-- | lib/dec_and_lock.c | 49 | ||||
-rw-r--r-- | lib/find_next_bit.c | 3 | ||||
-rw-r--r-- | lib/genalloc.c | 14 | ||||
-rw-r--r-- | lib/klist.c | 2 | ||||
-rw-r--r-- | lib/kobject.c | 4 | ||||
-rw-r--r-- | lib/kobject_uevent.c | 349 | ||||
-rw-r--r-- | lib/radix-tree.c | 143 | ||||
-rw-r--r-- | lib/spinlock_debug.c | 34 | ||||
-rw-r--r-- | lib/swiotlb.c | 10 | ||||
-rw-r--r-- | lib/zlib_deflate/deflate.c | 6 | ||||
-rw-r--r-- | lib/zlib_deflate/deflate_syms.c | 2 | ||||
-rw-r--r-- | lib/zlib_inflate/infblock.c | 4 | ||||
-rw-r--r-- | lib/zlib_inflate/infblock.h | 4 | ||||
-rw-r--r-- | lib/zlib_inflate/inflate_syms.c | 2 | ||||
-rw-r--r-- | lib/zlib_inflate/inflate_sync.c | 4 |
17 files changed, 344 insertions, 418 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 156822e3cc..a314e663d5 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
@@ -9,15 +9,9 @@ config PRINTK_TIME | |||
9 | in kernel startup. | 9 | in kernel startup. |
10 | 10 | ||
11 | 11 | ||
12 | config DEBUG_KERNEL | ||
13 | bool "Kernel debugging" | ||
14 | help | ||
15 | Say Y here if you are developing drivers or trying to debug and | ||
16 | identify kernel problems. | ||
17 | |||
18 | config MAGIC_SYSRQ | 12 | config MAGIC_SYSRQ |
19 | bool "Magic SysRq key" | 13 | bool "Magic SysRq key" |
20 | depends on DEBUG_KERNEL && !UML | 14 | depends on !UML |
21 | help | 15 | help |
22 | If you say Y here, you will have some control over the system even | 16 | If you say Y here, you will have some control over the system even |
23 | if the system crashes for example during kernel debugging (e.g., you | 17 | if the system crashes for example during kernel debugging (e.g., you |
@@ -29,10 +23,16 @@ config MAGIC_SYSRQ | |||
29 | keys are documented in <file:Documentation/sysrq.txt>. Don't say Y | 23 | keys are documented in <file:Documentation/sysrq.txt>. Don't say Y |
30 | unless you really know what this hack does. | 24 | unless you really know what this hack does. |
31 | 25 | ||
26 | config DEBUG_KERNEL | ||
27 | bool "Kernel debugging" | ||
28 | help | ||
29 | Say Y here if you are developing drivers or trying to debug and | ||
30 | identify kernel problems. | ||
31 | |||
32 | config LOG_BUF_SHIFT | 32 | config LOG_BUF_SHIFT |
33 | int "Kernel log buffer size (16 => 64KB, 17 => 128KB)" if DEBUG_KERNEL | 33 | int "Kernel log buffer size (16 => 64KB, 17 => 128KB)" if DEBUG_KERNEL |
34 | range 12 21 | 34 | range 12 21 |
35 | default 17 if ARCH_S390 | 35 | default 17 if S390 |
36 | default 16 if X86_NUMAQ || IA64 | 36 | default 16 if X86_NUMAQ || IA64 |
37 | default 15 if SMP | 37 | default 15 if SMP |
38 | default 14 | 38 | default 14 |
@@ -79,7 +79,7 @@ config SCHEDSTATS | |||
79 | 79 | ||
80 | config DEBUG_SLAB | 80 | config DEBUG_SLAB |
81 | bool "Debug memory allocations" | 81 | bool "Debug memory allocations" |
82 | depends on DEBUG_KERNEL | 82 | depends on DEBUG_KERNEL && SLAB |
83 | help | 83 | help |
84 | Say Y here to have the kernel do limited verification on memory | 84 | Say Y here to have the kernel do limited verification on memory |
85 | allocation as well as poisoning memory on free to catch use of freed | 85 | allocation as well as poisoning memory on free to catch use of freed |
@@ -95,6 +95,14 @@ config DEBUG_PREEMPT | |||
95 | if kernel code uses it in a preemption-unsafe way. Also, the kernel | 95 | if kernel code uses it in a preemption-unsafe way. Also, the kernel |
96 | will detect preemption count underflows. | 96 | will detect preemption count underflows. |
97 | 97 | ||
98 | config DEBUG_MUTEXES | ||
99 | bool "Mutex debugging, deadlock detection" | ||
100 | default y | ||
101 | depends on DEBUG_KERNEL | ||
102 | help | ||
103 | This allows mutex semantics violations and mutex related deadlocks | ||
104 | (lockups) to be detected and reported automatically. | ||
105 | |||
98 | config DEBUG_SPINLOCK | 106 | config DEBUG_SPINLOCK |
99 | bool "Spinlock debugging" | 107 | bool "Spinlock debugging" |
100 | depends on DEBUG_KERNEL | 108 | depends on DEBUG_KERNEL |
@@ -172,7 +180,8 @@ config DEBUG_VM | |||
172 | bool "Debug VM" | 180 | bool "Debug VM" |
173 | depends on DEBUG_KERNEL | 181 | depends on DEBUG_KERNEL |
174 | help | 182 | help |
175 | Enable this to debug the virtual-memory system. | 183 | Enable this to turn on extended checks in the virtual-memory system |
184 | that may impact performance. | ||
176 | 185 | ||
177 | If unsure, say N. | 186 | If unsure, say N. |
178 | 187 | ||
@@ -186,6 +195,20 @@ config FRAME_POINTER | |||
186 | some architectures or if you use external debuggers. | 195 | some architectures or if you use external debuggers. |
187 | If you don't debug the kernel, you can say N. | 196 | If you don't debug the kernel, you can say N. |
188 | 197 | ||
198 | config FORCED_INLINING | ||
199 | bool "Force gcc to inline functions marked 'inline'" | ||
200 | depends on DEBUG_KERNEL | ||
201 | default y | ||
202 | help | ||
203 | This option determines if the kernel forces gcc to inline the functions | ||
204 | developers have marked 'inline'. Doing so takes away freedom from gcc to | ||
205 | do what it thinks is best, which is desirable for the gcc 3.x series of | ||
206 | compilers. The gcc 4.x series have a rewritten inlining algorithm and | ||
207 | disabling this option will generate a smaller kernel there. Hopefully | ||
208 | this algorithm is so good that allowing gcc4 to make the decision can | ||
209 | become the default in the future, until then this option is there to | ||
210 | test gcc for this. | ||
211 | |||
189 | config RCU_TORTURE_TEST | 212 | config RCU_TORTURE_TEST |
190 | tristate "torture tests for RCU" | 213 | tristate "torture tests for RCU" |
191 | depends on DEBUG_KERNEL | 214 | depends on DEBUG_KERNEL |
diff --git a/lib/bitmap.c b/lib/bitmap.c index 23d3b1147f..48e708381d 100644 --- a/lib/bitmap.c +++ b/lib/bitmap.c | |||
@@ -519,7 +519,7 @@ EXPORT_SYMBOL(bitmap_parselist); | |||
519 | * | 519 | * |
520 | * Map the bit at position @pos in @buf (of length @bits) to the | 520 | * Map the bit at position @pos in @buf (of length @bits) to the |
521 | * ordinal of which set bit it is. If it is not set or if @pos | 521 | * ordinal of which set bit it is. If it is not set or if @pos |
522 | * is not a valid bit position, map to zero (0). | 522 | * is not a valid bit position, map to -1. |
523 | * | 523 | * |
524 | * If for example, just bits 4 through 7 are set in @buf, then @pos | 524 | * If for example, just bits 4 through 7 are set in @buf, then @pos |
525 | * values 4 through 7 will get mapped to 0 through 3, respectively, | 525 | * values 4 through 7 will get mapped to 0 through 3, respectively, |
@@ -531,18 +531,19 @@ EXPORT_SYMBOL(bitmap_parselist); | |||
531 | */ | 531 | */ |
532 | static int bitmap_pos_to_ord(const unsigned long *buf, int pos, int bits) | 532 | static int bitmap_pos_to_ord(const unsigned long *buf, int pos, int bits) |
533 | { | 533 | { |
534 | int ord = 0; | 534 | int i, ord; |
535 | 535 | ||
536 | if (pos >= 0 && pos < bits) { | 536 | if (pos < 0 || pos >= bits || !test_bit(pos, buf)) |
537 | int i; | 537 | return -1; |
538 | 538 | ||
539 | for (i = find_first_bit(buf, bits); | 539 | i = find_first_bit(buf, bits); |
540 | i < pos; | 540 | ord = 0; |
541 | i = find_next_bit(buf, bits, i + 1)) | 541 | while (i < pos) { |
542 | ord++; | 542 | i = find_next_bit(buf, bits, i + 1); |
543 | if (i > pos) | 543 | ord++; |
544 | ord = 0; | ||
545 | } | 544 | } |
545 | BUG_ON(i != pos); | ||
546 | |||
546 | return ord; | 547 | return ord; |
547 | } | 548 | } |
548 | 549 | ||
@@ -553,11 +554,12 @@ static int bitmap_pos_to_ord(const unsigned long *buf, int pos, int bits) | |||
553 | * @bits: number of valid bit positions in @buf | 554 | * @bits: number of valid bit positions in @buf |
554 | * | 555 | * |
555 | * Map the ordinal offset of bit @ord in @buf to its position in @buf. | 556 | * Map the ordinal offset of bit @ord in @buf to its position in @buf. |
556 | * If @ord is not the ordinal offset of a set bit in @buf, map to zero (0). | 557 | * Value of @ord should be in range 0 <= @ord < weight(buf), else |
558 | * results are undefined. | ||
557 | * | 559 | * |
558 | * If for example, just bits 4 through 7 are set in @buf, then @ord | 560 | * If for example, just bits 4 through 7 are set in @buf, then @ord |
559 | * values 0 through 3 will get mapped to 4 through 7, respectively, | 561 | * values 0 through 3 will get mapped to 4 through 7, respectively, |
560 | * and all other @ord valuds will get mapped to 0. When @ord value 3 | 562 | * and all other @ord values return undefined values. When @ord value 3 |
561 | * gets mapped to (returns) @pos value 7 in this example, that means | 563 | * gets mapped to (returns) @pos value 7 in this example, that means |
562 | * that the 3rd set bit (starting with 0th) is at position 7 in @buf. | 564 | * that the 3rd set bit (starting with 0th) is at position 7 in @buf. |
563 | * | 565 | * |
@@ -583,8 +585,8 @@ static int bitmap_ord_to_pos(const unsigned long *buf, int ord, int bits) | |||
583 | 585 | ||
584 | /** | 586 | /** |
585 | * bitmap_remap - Apply map defined by a pair of bitmaps to another bitmap | 587 | * bitmap_remap - Apply map defined by a pair of bitmaps to another bitmap |
586 | * @src: subset to be remapped | ||
587 | * @dst: remapped result | 588 | * @dst: remapped result |
589 | * @src: subset to be remapped | ||
588 | * @old: defines domain of map | 590 | * @old: defines domain of map |
589 | * @new: defines range of map | 591 | * @new: defines range of map |
590 | * @bits: number of bits in each of these bitmaps | 592 | * @bits: number of bits in each of these bitmaps |
@@ -596,49 +598,42 @@ static int bitmap_ord_to_pos(const unsigned long *buf, int ord, int bits) | |||
596 | * weight of @old, map the position of the n-th set bit in @old to | 598 | * weight of @old, map the position of the n-th set bit in @old to |
597 | * the position of the m-th set bit in @new, where m == n % w. | 599 | * the position of the m-th set bit in @new, where m == n % w. |
598 | * | 600 | * |
599 | * If either of the @old and @new bitmaps are empty, or if@src and @dst | 601 | * If either of the @old and @new bitmaps are empty, or if @src and |
600 | * point to the same location, then this routine does nothing. | 602 | * @dst point to the same location, then this routine copies @src |
603 | * to @dst. | ||
601 | * | 604 | * |
602 | * The positions of unset bits in @old are mapped to the position of | 605 | * The positions of unset bits in @old are mapped to themselves |
603 | * the first set bit in @new. | 606 | * (the identify map). |
604 | * | 607 | * |
605 | * Apply the above specified mapping to @src, placing the result in | 608 | * Apply the above specified mapping to @src, placing the result in |
606 | * @dst, clearing any bits previously set in @dst. | 609 | * @dst, clearing any bits previously set in @dst. |
607 | * | 610 | * |
608 | * The resulting value of @dst will have either the same weight as | ||
609 | * @src, or less weight in the general case that the mapping wasn't | ||
610 | * injective due to the weight of @new being less than that of @old. | ||
611 | * The resulting value of @dst will never have greater weight than | ||
612 | * that of @src, except perhaps in the case that one of the above | ||
613 | * conditions was not met and this routine just returned. | ||
614 | * | ||
615 | * For example, lets say that @old has bits 4 through 7 set, and | 611 | * For example, lets say that @old has bits 4 through 7 set, and |
616 | * @new has bits 12 through 15 set. This defines the mapping of bit | 612 | * @new has bits 12 through 15 set. This defines the mapping of bit |
617 | * position 4 to 12, 5 to 13, 6 to 14 and 7 to 15, and of all other | 613 | * position 4 to 12, 5 to 13, 6 to 14 and 7 to 15, and of all other |
618 | * bit positions to 12 (the first set bit in @new. So if say @src | 614 | * bit positions unchanged. So if say @src comes into this routine |
619 | * comes into this routine with bits 1, 5 and 7 set, then @dst should | 615 | * with bits 1, 5 and 7 set, then @dst should leave with bits 1, |
620 | * leave with bits 12, 13 and 15 set. | 616 | * 13 and 15 set. |
621 | */ | 617 | */ |
622 | void bitmap_remap(unsigned long *dst, const unsigned long *src, | 618 | void bitmap_remap(unsigned long *dst, const unsigned long *src, |
623 | const unsigned long *old, const unsigned long *new, | 619 | const unsigned long *old, const unsigned long *new, |
624 | int bits) | 620 | int bits) |
625 | { | 621 | { |
626 | int s; | 622 | int oldbit, w; |
627 | 623 | ||
628 | if (bitmap_weight(old, bits) == 0) | ||
629 | return; | ||
630 | if (bitmap_weight(new, bits) == 0) | ||
631 | return; | ||
632 | if (dst == src) /* following doesn't handle inplace remaps */ | 624 | if (dst == src) /* following doesn't handle inplace remaps */ |
633 | return; | 625 | return; |
634 | |||
635 | bitmap_zero(dst, bits); | 626 | bitmap_zero(dst, bits); |
636 | for (s = find_first_bit(src, bits); | 627 | |
637 | s < bits; | 628 | w = bitmap_weight(new, bits); |
638 | s = find_next_bit(src, bits, s + 1)) { | 629 | for (oldbit = find_first_bit(src, bits); |
639 | int x = bitmap_pos_to_ord(old, s, bits); | 630 | oldbit < bits; |
640 | int y = bitmap_ord_to_pos(new, x, bits); | 631 | oldbit = find_next_bit(src, bits, oldbit + 1)) { |
641 | set_bit(y, dst); | 632 | int n = bitmap_pos_to_ord(old, oldbit, bits); |
633 | if (n < 0 || w == 0) | ||
634 | set_bit(oldbit, dst); /* identity map */ | ||
635 | else | ||
636 | set_bit(bitmap_ord_to_pos(new, n % w, bits), dst); | ||
642 | } | 637 | } |
643 | } | 638 | } |
644 | EXPORT_SYMBOL(bitmap_remap); | 639 | EXPORT_SYMBOL(bitmap_remap); |
@@ -657,8 +652,8 @@ EXPORT_SYMBOL(bitmap_remap); | |||
657 | * weight of @old, map the position of the n-th set bit in @old to | 652 | * weight of @old, map the position of the n-th set bit in @old to |
658 | * the position of the m-th set bit in @new, where m == n % w. | 653 | * the position of the m-th set bit in @new, where m == n % w. |
659 | * | 654 | * |
660 | * The positions of unset bits in @old are mapped to the position of | 655 | * The positions of unset bits in @old are mapped to themselves |
661 | * the first set bit in @new. | 656 | * (the identify map). |
662 | * | 657 | * |
663 | * Apply the above specified mapping to bit position @oldbit, returning | 658 | * Apply the above specified mapping to bit position @oldbit, returning |
664 | * the new bit position. | 659 | * the new bit position. |
@@ -666,14 +661,18 @@ EXPORT_SYMBOL(bitmap_remap); | |||
666 | * For example, lets say that @old has bits 4 through 7 set, and | 661 | * For example, lets say that @old has bits 4 through 7 set, and |
667 | * @new has bits 12 through 15 set. This defines the mapping of bit | 662 | * @new has bits 12 through 15 set. This defines the mapping of bit |
668 | * position 4 to 12, 5 to 13, 6 to 14 and 7 to 15, and of all other | 663 | * position 4 to 12, 5 to 13, 6 to 14 and 7 to 15, and of all other |
669 | * bit positions to 12 (the first set bit in @new. So if say @oldbit | 664 | * bit positions unchanged. So if say @oldbit is 5, then this routine |
670 | * is 5, then this routine returns 13. | 665 | * returns 13. |
671 | */ | 666 | */ |
672 | int bitmap_bitremap(int oldbit, const unsigned long *old, | 667 | int bitmap_bitremap(int oldbit, const unsigned long *old, |
673 | const unsigned long *new, int bits) | 668 | const unsigned long *new, int bits) |
674 | { | 669 | { |
675 | int x = bitmap_pos_to_ord(old, oldbit, bits); | 670 | int w = bitmap_weight(new, bits); |
676 | return bitmap_ord_to_pos(new, x, bits); | 671 | int n = bitmap_pos_to_ord(old, oldbit, bits); |
672 | if (n < 0 || w == 0) | ||
673 | return oldbit; | ||
674 | else | ||
675 | return bitmap_ord_to_pos(new, n % w, bits); | ||
677 | } | 676 | } |
678 | EXPORT_SYMBOL(bitmap_bitremap); | 677 | EXPORT_SYMBOL(bitmap_bitremap); |
679 | 678 | ||
diff --git a/lib/dec_and_lock.c b/lib/dec_and_lock.c index 305a9663ae..a65c314555 100644 --- a/lib/dec_and_lock.c +++ b/lib/dec_and_lock.c | |||
@@ -1,47 +1,11 @@ | |||
1 | #include <linux/module.h> | 1 | #include <linux/module.h> |
2 | #include <linux/spinlock.h> | 2 | #include <linux/spinlock.h> |
3 | #include <asm/atomic.h> | 3 | #include <asm/atomic.h> |
4 | #include <asm/system.h> | ||
5 | 4 | ||
6 | #ifdef __HAVE_ARCH_CMPXCHG | ||
7 | /* | 5 | /* |
8 | * This is an implementation of the notion of "decrement a | 6 | * This is an implementation of the notion of "decrement a |
9 | * reference count, and return locked if it decremented to zero". | 7 | * reference count, and return locked if it decremented to zero". |
10 | * | 8 | * |
11 | * This implementation can be used on any architecture that | ||
12 | * has a cmpxchg, and where atomic->value is an int holding | ||
13 | * the value of the atomic (i.e. the high bits aren't used | ||
14 | * for a lock or anything like that). | ||
15 | */ | ||
16 | int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) | ||
17 | { | ||
18 | int counter; | ||
19 | int newcount; | ||
20 | |||
21 | for (;;) { | ||
22 | counter = atomic_read(atomic); | ||
23 | newcount = counter - 1; | ||
24 | if (!newcount) | ||
25 | break; /* do it the slow way */ | ||
26 | |||
27 | newcount = cmpxchg(&atomic->counter, counter, newcount); | ||
28 | if (newcount == counter) | ||
29 | return 0; | ||
30 | } | ||
31 | |||
32 | spin_lock(lock); | ||
33 | if (atomic_dec_and_test(atomic)) | ||
34 | return 1; | ||
35 | spin_unlock(lock); | ||
36 | return 0; | ||
37 | } | ||
38 | #else | ||
39 | /* | ||
40 | * This is an architecture-neutral, but slow, | ||
41 | * implementation of the notion of "decrement | ||
42 | * a reference count, and return locked if it | ||
43 | * decremented to zero". | ||
44 | * | ||
45 | * NOTE NOTE NOTE! This is _not_ equivalent to | 9 | * NOTE NOTE NOTE! This is _not_ equivalent to |
46 | * | 10 | * |
47 | * if (atomic_dec_and_test(&atomic)) { | 11 | * if (atomic_dec_and_test(&atomic)) { |
@@ -52,21 +16,20 @@ int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) | |||
52 | * | 16 | * |
53 | * because the spin-lock and the decrement must be | 17 | * because the spin-lock and the decrement must be |
54 | * "atomic". | 18 | * "atomic". |
55 | * | ||
56 | * This slow version gets the spinlock unconditionally, | ||
57 | * and releases it if it isn't needed. Architectures | ||
58 | * are encouraged to come up with better approaches, | ||
59 | * this is trivially done efficiently using a load-locked | ||
60 | * store-conditional approach, for example. | ||
61 | */ | 19 | */ |
62 | int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) | 20 | int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) |
63 | { | 21 | { |
22 | #ifdef CONFIG_SMP | ||
23 | /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */ | ||
24 | if (atomic_add_unless(atomic, -1, 1)) | ||
25 | return 0; | ||
26 | #endif | ||
27 | /* Otherwise do it the slow way */ | ||
64 | spin_lock(lock); | 28 | spin_lock(lock); |
65 | if (atomic_dec_and_test(atomic)) | 29 | if (atomic_dec_and_test(atomic)) |
66 | return 1; | 30 | return 1; |
67 | spin_unlock(lock); | 31 | spin_unlock(lock); |
68 | return 0; | 32 | return 0; |
69 | } | 33 | } |
70 | #endif | ||
71 | 34 | ||
72 | EXPORT_SYMBOL(_atomic_dec_and_lock); | 35 | EXPORT_SYMBOL(_atomic_dec_and_lock); |
diff --git a/lib/find_next_bit.c b/lib/find_next_bit.c index d08302d2a4..c05b4b19cf 100644 --- a/lib/find_next_bit.c +++ b/lib/find_next_bit.c | |||
@@ -10,6 +10,7 @@ | |||
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/bitops.h> | 12 | #include <linux/bitops.h> |
13 | #include <linux/module.h> | ||
13 | 14 | ||
14 | int find_next_bit(const unsigned long *addr, int size, int offset) | 15 | int find_next_bit(const unsigned long *addr, int size, int offset) |
15 | { | 16 | { |
@@ -53,3 +54,5 @@ int find_next_bit(const unsigned long *addr, int size, int offset) | |||
53 | 54 | ||
54 | return offset; | 55 | return offset; |
55 | } | 56 | } |
57 | |||
58 | EXPORT_SYMBOL(find_next_bit); | ||
diff --git a/lib/genalloc.c b/lib/genalloc.c index d6d30d2e71..9ce0a6a3b8 100644 --- a/lib/genalloc.c +++ b/lib/genalloc.c | |||
@@ -95,12 +95,10 @@ unsigned long gen_pool_alloc(struct gen_pool *poolp, int size) | |||
95 | if (size > max_chunk_size) | 95 | if (size > max_chunk_size) |
96 | return 0; | 96 | return 0; |
97 | 97 | ||
98 | i = 0; | ||
99 | |||
100 | size = max(size, 1 << ALLOC_MIN_SHIFT); | 98 | size = max(size, 1 << ALLOC_MIN_SHIFT); |
101 | s = roundup_pow_of_two(size); | 99 | i = fls(size - 1); |
102 | 100 | s = 1 << i; | |
103 | j = i; | 101 | j = i -= ALLOC_MIN_SHIFT; |
104 | 102 | ||
105 | spin_lock_irqsave(&poolp->lock, flags); | 103 | spin_lock_irqsave(&poolp->lock, flags); |
106 | while (!h[j].next) { | 104 | while (!h[j].next) { |
@@ -153,10 +151,10 @@ void gen_pool_free(struct gen_pool *poolp, unsigned long ptr, int size) | |||
153 | if (size > max_chunk_size) | 151 | if (size > max_chunk_size) |
154 | return; | 152 | return; |
155 | 153 | ||
156 | i = 0; | ||
157 | |||
158 | size = max(size, 1 << ALLOC_MIN_SHIFT); | 154 | size = max(size, 1 << ALLOC_MIN_SHIFT); |
159 | s = roundup_pow_of_two(size); | 155 | i = fls(size - 1); |
156 | s = 1 << i; | ||
157 | i -= ALLOC_MIN_SHIFT; | ||
160 | 158 | ||
161 | a = ptr; | 159 | a = ptr; |
162 | 160 | ||
diff --git a/lib/klist.c b/lib/klist.c index bb2f3551d5..9c94f0b163 100644 --- a/lib/klist.c +++ b/lib/klist.c | |||
@@ -199,6 +199,8 @@ void klist_iter_init_node(struct klist * k, struct klist_iter * i, struct klist_ | |||
199 | i->i_klist = k; | 199 | i->i_klist = k; |
200 | i->i_head = &k->k_list; | 200 | i->i_head = &k->k_list; |
201 | i->i_cur = n; | 201 | i->i_cur = n; |
202 | if (n) | ||
203 | kref_get(&n->n_ref); | ||
202 | } | 204 | } |
203 | 205 | ||
204 | EXPORT_SYMBOL_GPL(klist_iter_init_node); | 206 | EXPORT_SYMBOL_GPL(klist_iter_init_node); |
diff --git a/lib/kobject.c b/lib/kobject.c index a181abed89..7a0e680949 100644 --- a/lib/kobject.c +++ b/lib/kobject.c | |||
@@ -207,7 +207,7 @@ int kobject_register(struct kobject * kobj) | |||
207 | kobject_name(kobj),error); | 207 | kobject_name(kobj),error); |
208 | dump_stack(); | 208 | dump_stack(); |
209 | } else | 209 | } else |
210 | kobject_hotplug(kobj, KOBJ_ADD); | 210 | kobject_uevent(kobj, KOBJ_ADD); |
211 | } else | 211 | } else |
212 | error = -EINVAL; | 212 | error = -EINVAL; |
213 | return error; | 213 | return error; |
@@ -312,7 +312,7 @@ void kobject_del(struct kobject * kobj) | |||
312 | void kobject_unregister(struct kobject * kobj) | 312 | void kobject_unregister(struct kobject * kobj) |
313 | { | 313 | { |
314 | pr_debug("kobject %s: unregistering\n",kobject_name(kobj)); | 314 | pr_debug("kobject %s: unregistering\n",kobject_name(kobj)); |
315 | kobject_hotplug(kobj, KOBJ_REMOVE); | 315 | kobject_uevent(kobj, KOBJ_REMOVE); |
316 | kobject_del(kobj); | 316 | kobject_del(kobj); |
317 | kobject_put(kobj); | 317 | kobject_put(kobj); |
318 | } | 318 | } |
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c index 3ab375411e..f56e27ae9d 100644 --- a/lib/kobject_uevent.c +++ b/lib/kobject_uevent.c | |||
@@ -19,14 +19,16 @@ | |||
19 | #include <linux/skbuff.h> | 19 | #include <linux/skbuff.h> |
20 | #include <linux/netlink.h> | 20 | #include <linux/netlink.h> |
21 | #include <linux/string.h> | 21 | #include <linux/string.h> |
22 | #include <linux/kobject_uevent.h> | ||
23 | #include <linux/kobject.h> | 22 | #include <linux/kobject.h> |
24 | #include <net/sock.h> | 23 | #include <net/sock.h> |
25 | 24 | ||
26 | #define BUFFER_SIZE 1024 /* buffer for the hotplug env */ | 25 | #define BUFFER_SIZE 1024 /* buffer for the variables */ |
27 | #define NUM_ENVP 32 /* number of env pointers */ | 26 | #define NUM_ENVP 32 /* number of env pointers */ |
28 | 27 | ||
29 | #if defined(CONFIG_KOBJECT_UEVENT) || defined(CONFIG_HOTPLUG) | 28 | #if defined(CONFIG_HOTPLUG) && defined(CONFIG_NET) |
29 | static DEFINE_SPINLOCK(sequence_lock); | ||
30 | static struct sock *uevent_sock; | ||
31 | |||
30 | static char *action_to_string(enum kobject_action action) | 32 | static char *action_to_string(enum kobject_action action) |
31 | { | 33 | { |
32 | switch (action) { | 34 | switch (action) { |
@@ -36,10 +38,6 @@ static char *action_to_string(enum kobject_action action) | |||
36 | return "remove"; | 38 | return "remove"; |
37 | case KOBJ_CHANGE: | 39 | case KOBJ_CHANGE: |
38 | return "change"; | 40 | return "change"; |
39 | case KOBJ_MOUNT: | ||
40 | return "mount"; | ||
41 | case KOBJ_UMOUNT: | ||
42 | return "umount"; | ||
43 | case KOBJ_OFFLINE: | 41 | case KOBJ_OFFLINE: |
44 | return "offline"; | 42 | return "offline"; |
45 | case KOBJ_ONLINE: | 43 | case KOBJ_ONLINE: |
@@ -48,306 +46,183 @@ static char *action_to_string(enum kobject_action action) | |||
48 | return NULL; | 46 | return NULL; |
49 | } | 47 | } |
50 | } | 48 | } |
51 | #endif | ||
52 | |||
53 | #ifdef CONFIG_KOBJECT_UEVENT | ||
54 | static struct sock *uevent_sock; | ||
55 | 49 | ||
56 | /** | 50 | /** |
57 | * send_uevent - notify userspace by sending event through netlink socket | 51 | * kobject_uevent - notify userspace by ending an uevent |
58 | * | 52 | * |
59 | * @signal: signal name | 53 | * @action: action that is happening (usually KOBJ_ADD and KOBJ_REMOVE) |
60 | * @obj: object path (kobject) | ||
61 | * @envp: possible hotplug environment to pass with the message | ||
62 | * @gfp_mask: | ||
63 | */ | ||
64 | static int send_uevent(const char *signal, const char *obj, | ||
65 | char **envp, gfp_t gfp_mask) | ||
66 | { | ||
67 | struct sk_buff *skb; | ||
68 | char *pos; | ||
69 | int len; | ||
70 | |||
71 | if (!uevent_sock) | ||
72 | return -EIO; | ||
73 | |||
74 | len = strlen(signal) + 1; | ||
75 | len += strlen(obj) + 1; | ||
76 | |||
77 | /* allocate buffer with the maximum possible message size */ | ||
78 | skb = alloc_skb(len + BUFFER_SIZE, gfp_mask); | ||
79 | if (!skb) | ||
80 | return -ENOMEM; | ||
81 | |||
82 | pos = skb_put(skb, len); | ||
83 | sprintf(pos, "%s@%s", signal, obj); | ||
84 | |||
85 | /* copy the environment key by key to our continuous buffer */ | ||
86 | if (envp) { | ||
87 | int i; | ||
88 | |||
89 | for (i = 2; envp[i]; i++) { | ||
90 | len = strlen(envp[i]) + 1; | ||
91 | pos = skb_put(skb, len); | ||
92 | strcpy(pos, envp[i]); | ||
93 | } | ||
94 | } | ||
95 | |||
96 | NETLINK_CB(skb).dst_group = 1; | ||
97 | return netlink_broadcast(uevent_sock, skb, 0, 1, gfp_mask); | ||
98 | } | ||
99 | |||
100 | static int do_kobject_uevent(struct kobject *kobj, enum kobject_action action, | ||
101 | struct attribute *attr, gfp_t gfp_mask) | ||
102 | { | ||
103 | char *path; | ||
104 | char *attrpath; | ||
105 | char *signal; | ||
106 | int len; | ||
107 | int rc = -ENOMEM; | ||
108 | |||
109 | path = kobject_get_path(kobj, gfp_mask); | ||
110 | if (!path) | ||
111 | return -ENOMEM; | ||
112 | |||
113 | signal = action_to_string(action); | ||
114 | if (!signal) | ||
115 | return -EINVAL; | ||
116 | |||
117 | if (attr) { | ||
118 | len = strlen(path); | ||
119 | len += strlen(attr->name) + 2; | ||
120 | attrpath = kmalloc(len, gfp_mask); | ||
121 | if (!attrpath) | ||
122 | goto exit; | ||
123 | sprintf(attrpath, "%s/%s", path, attr->name); | ||
124 | rc = send_uevent(signal, attrpath, NULL, gfp_mask); | ||
125 | kfree(attrpath); | ||
126 | } else | ||
127 | rc = send_uevent(signal, path, NULL, gfp_mask); | ||
128 | |||
129 | exit: | ||
130 | kfree(path); | ||
131 | return rc; | ||
132 | } | ||
133 | |||
134 | /** | ||
135 | * kobject_uevent - notify userspace by sending event through netlink socket | ||
136 | * | ||
137 | * @signal: signal name | ||
138 | * @kobj: struct kobject that the event is happening to | ||
139 | * @attr: optional struct attribute the event belongs to | ||
140 | */ | ||
141 | int kobject_uevent(struct kobject *kobj, enum kobject_action action, | ||
142 | struct attribute *attr) | ||
143 | { | ||
144 | return do_kobject_uevent(kobj, action, attr, GFP_KERNEL); | ||
145 | } | ||
146 | EXPORT_SYMBOL_GPL(kobject_uevent); | ||
147 | |||
148 | int kobject_uevent_atomic(struct kobject *kobj, enum kobject_action action, | ||
149 | struct attribute *attr) | ||
150 | { | ||
151 | return do_kobject_uevent(kobj, action, attr, GFP_ATOMIC); | ||
152 | } | ||
153 | EXPORT_SYMBOL_GPL(kobject_uevent_atomic); | ||
154 | |||
155 | static int __init kobject_uevent_init(void) | ||
156 | { | ||
157 | uevent_sock = netlink_kernel_create(NETLINK_KOBJECT_UEVENT, 1, NULL, | ||
158 | THIS_MODULE); | ||
159 | |||
160 | if (!uevent_sock) { | ||
161 | printk(KERN_ERR | ||
162 | "kobject_uevent: unable to create netlink socket!\n"); | ||
163 | return -ENODEV; | ||
164 | } | ||
165 | |||
166 | return 0; | ||
167 | } | ||
168 | |||
169 | postcore_initcall(kobject_uevent_init); | ||
170 | |||
171 | #else | ||
172 | static inline int send_uevent(const char *signal, const char *obj, | ||
173 | char **envp, int gfp_mask) | ||
174 | { | ||
175 | return 0; | ||
176 | } | ||
177 | |||
178 | #endif /* CONFIG_KOBJECT_UEVENT */ | ||
179 | |||
180 | |||
181 | #ifdef CONFIG_HOTPLUG | ||
182 | char hotplug_path[HOTPLUG_PATH_LEN] = "/sbin/hotplug"; | ||
183 | u64 hotplug_seqnum; | ||
184 | static DEFINE_SPINLOCK(sequence_lock); | ||
185 | |||
186 | /** | ||
187 | * kobject_hotplug - notify userspace by executing /sbin/hotplug | ||
188 | * | ||
189 | * @action: action that is happening (usually "ADD" or "REMOVE") | ||
190 | * @kobj: struct kobject that the action is happening to | 54 | * @kobj: struct kobject that the action is happening to |
191 | */ | 55 | */ |
192 | void kobject_hotplug(struct kobject *kobj, enum kobject_action action) | 56 | void kobject_uevent(struct kobject *kobj, enum kobject_action action) |
193 | { | 57 | { |
194 | char *argv [3]; | 58 | char **envp; |
195 | char **envp = NULL; | 59 | char *buffer; |
196 | char *buffer = NULL; | ||
197 | char *seq_buff; | ||
198 | char *scratch; | 60 | char *scratch; |
61 | const char *action_string; | ||
62 | const char *devpath = NULL; | ||
63 | const char *subsystem; | ||
64 | struct kobject *top_kobj; | ||
65 | struct kset *kset; | ||
66 | struct kset_uevent_ops *uevent_ops; | ||
67 | u64 seq; | ||
68 | char *seq_buff; | ||
199 | int i = 0; | 69 | int i = 0; |
200 | int retval; | 70 | int retval; |
201 | char *kobj_path = NULL; | ||
202 | const char *name = NULL; | ||
203 | char *action_string; | ||
204 | u64 seq; | ||
205 | struct kobject *top_kobj = kobj; | ||
206 | struct kset *kset; | ||
207 | static struct kset_hotplug_ops null_hotplug_ops; | ||
208 | struct kset_hotplug_ops *hotplug_ops = &null_hotplug_ops; | ||
209 | 71 | ||
210 | /* If this kobj does not belong to a kset, | 72 | pr_debug("%s\n", __FUNCTION__); |
211 | try to find a parent that does. */ | 73 | |
74 | action_string = action_to_string(action); | ||
75 | if (!action_string) | ||
76 | return; | ||
77 | |||
78 | /* search the kset we belong to */ | ||
79 | top_kobj = kobj; | ||
212 | if (!top_kobj->kset && top_kobj->parent) { | 80 | if (!top_kobj->kset && top_kobj->parent) { |
213 | do { | 81 | do { |
214 | top_kobj = top_kobj->parent; | 82 | top_kobj = top_kobj->parent; |
215 | } while (!top_kobj->kset && top_kobj->parent); | 83 | } while (!top_kobj->kset && top_kobj->parent); |
216 | } | 84 | } |
217 | 85 | if (!top_kobj->kset) | |
218 | if (top_kobj->kset) | ||
219 | kset = top_kobj->kset; | ||
220 | else | ||
221 | return; | 86 | return; |
222 | 87 | ||
223 | if (kset->hotplug_ops) | 88 | kset = top_kobj->kset; |
224 | hotplug_ops = kset->hotplug_ops; | 89 | uevent_ops = kset->uevent_ops; |
225 | 90 | ||
226 | /* If the kset has a filter operation, call it. | 91 | /* skip the event, if the filter returns zero. */ |
227 | Skip the event, if the filter returns zero. */ | 92 | if (uevent_ops && uevent_ops->filter) |
228 | if (hotplug_ops->filter) { | 93 | if (!uevent_ops->filter(kset, kobj)) |
229 | if (!hotplug_ops->filter(kset, kobj)) | ||
230 | return; | 94 | return; |
231 | } | ||
232 | 95 | ||
233 | pr_debug ("%s\n", __FUNCTION__); | 96 | /* environment index */ |
234 | 97 | envp = kzalloc(NUM_ENVP * sizeof (char *), GFP_KERNEL); | |
235 | action_string = action_to_string(action); | ||
236 | if (!action_string) | ||
237 | return; | ||
238 | |||
239 | envp = kmalloc(NUM_ENVP * sizeof (char *), GFP_KERNEL); | ||
240 | if (!envp) | 98 | if (!envp) |
241 | return; | 99 | return; |
242 | memset (envp, 0x00, NUM_ENVP * sizeof (char *)); | ||
243 | 100 | ||
101 | /* environment values */ | ||
244 | buffer = kmalloc(BUFFER_SIZE, GFP_KERNEL); | 102 | buffer = kmalloc(BUFFER_SIZE, GFP_KERNEL); |
245 | if (!buffer) | 103 | if (!buffer) |
246 | goto exit; | 104 | goto exit; |
247 | 105 | ||
248 | if (hotplug_ops->name) | 106 | /* complete object path */ |
249 | name = hotplug_ops->name(kset, kobj); | 107 | devpath = kobject_get_path(kobj, GFP_KERNEL); |
250 | if (name == NULL) | 108 | if (!devpath) |
251 | name = kobject_name(&kset->kobj); | 109 | goto exit; |
252 | 110 | ||
253 | argv [0] = hotplug_path; | 111 | /* originating subsystem */ |
254 | argv [1] = (char *)name; /* won't be changed but 'const' has to go */ | 112 | if (uevent_ops && uevent_ops->name) |
255 | argv [2] = NULL; | 113 | subsystem = uevent_ops->name(kset, kobj); |
114 | else | ||
115 | subsystem = kobject_name(&kset->kobj); | ||
256 | 116 | ||
257 | /* minimal command environment */ | 117 | /* event environemnt for helper process only */ |
258 | envp [i++] = "HOME=/"; | 118 | envp[i++] = "HOME=/"; |
259 | envp [i++] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin"; | 119 | envp[i++] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin"; |
260 | 120 | ||
121 | /* default keys */ | ||
261 | scratch = buffer; | 122 | scratch = buffer; |
262 | |||
263 | envp [i++] = scratch; | 123 | envp [i++] = scratch; |
264 | scratch += sprintf(scratch, "ACTION=%s", action_string) + 1; | 124 | scratch += sprintf(scratch, "ACTION=%s", action_string) + 1; |
265 | |||
266 | kobj_path = kobject_get_path(kobj, GFP_KERNEL); | ||
267 | if (!kobj_path) | ||
268 | goto exit; | ||
269 | |||
270 | envp [i++] = scratch; | 125 | envp [i++] = scratch; |
271 | scratch += sprintf (scratch, "DEVPATH=%s", kobj_path) + 1; | 126 | scratch += sprintf (scratch, "DEVPATH=%s", devpath) + 1; |
272 | |||
273 | envp [i++] = scratch; | 127 | envp [i++] = scratch; |
274 | scratch += sprintf(scratch, "SUBSYSTEM=%s", name) + 1; | 128 | scratch += sprintf(scratch, "SUBSYSTEM=%s", subsystem) + 1; |
275 | 129 | ||
276 | /* reserve space for the sequence, | 130 | /* just reserve the space, overwrite it after kset call has returned */ |
277 | * put the real one in after the hotplug call */ | ||
278 | envp[i++] = seq_buff = scratch; | 131 | envp[i++] = seq_buff = scratch; |
279 | scratch += strlen("SEQNUM=18446744073709551616") + 1; | 132 | scratch += strlen("SEQNUM=18446744073709551616") + 1; |
280 | 133 | ||
281 | if (hotplug_ops->hotplug) { | 134 | /* let the kset specific function add its stuff */ |
282 | /* have the kset specific function add its stuff */ | 135 | if (uevent_ops && uevent_ops->uevent) { |
283 | retval = hotplug_ops->hotplug (kset, kobj, | 136 | retval = uevent_ops->uevent(kset, kobj, |
284 | &envp[i], NUM_ENVP - i, scratch, | 137 | &envp[i], NUM_ENVP - i, scratch, |
285 | BUFFER_SIZE - (scratch - buffer)); | 138 | BUFFER_SIZE - (scratch - buffer)); |
286 | if (retval) { | 139 | if (retval) { |
287 | pr_debug ("%s - hotplug() returned %d\n", | 140 | pr_debug ("%s - uevent() returned %d\n", |
288 | __FUNCTION__, retval); | 141 | __FUNCTION__, retval); |
289 | goto exit; | 142 | goto exit; |
290 | } | 143 | } |
291 | } | 144 | } |
292 | 145 | ||
146 | /* we will send an event, request a new sequence number */ | ||
293 | spin_lock(&sequence_lock); | 147 | spin_lock(&sequence_lock); |
294 | seq = ++hotplug_seqnum; | 148 | seq = ++uevent_seqnum; |
295 | spin_unlock(&sequence_lock); | 149 | spin_unlock(&sequence_lock); |
296 | sprintf(seq_buff, "SEQNUM=%llu", (unsigned long long)seq); | 150 | sprintf(seq_buff, "SEQNUM=%llu", (unsigned long long)seq); |
297 | 151 | ||
298 | pr_debug ("%s: %s %s seq=%llu %s %s %s %s %s\n", | 152 | /* send netlink message */ |
299 | __FUNCTION__, argv[0], argv[1], (unsigned long long)seq, | 153 | if (uevent_sock) { |
300 | envp[0], envp[1], envp[2], envp[3], envp[4]); | 154 | struct sk_buff *skb; |
301 | 155 | size_t len; | |
302 | send_uevent(action_string, kobj_path, envp, GFP_KERNEL); | 156 | |
157 | /* allocate message with the maximum possible size */ | ||
158 | len = strlen(action_string) + strlen(devpath) + 2; | ||
159 | skb = alloc_skb(len + BUFFER_SIZE, GFP_KERNEL); | ||
160 | if (skb) { | ||
161 | /* add header */ | ||
162 | scratch = skb_put(skb, len); | ||
163 | sprintf(scratch, "%s@%s", action_string, devpath); | ||
164 | |||
165 | /* copy keys to our continuous event payload buffer */ | ||
166 | for (i = 2; envp[i]; i++) { | ||
167 | len = strlen(envp[i]) + 1; | ||
168 | scratch = skb_put(skb, len); | ||
169 | strcpy(scratch, envp[i]); | ||
170 | } | ||
171 | |||
172 | NETLINK_CB(skb).dst_group = 1; | ||
173 | netlink_broadcast(uevent_sock, skb, 0, 1, GFP_KERNEL); | ||
174 | } | ||
175 | } | ||
303 | 176 | ||
304 | if (!hotplug_path[0]) | 177 | /* call uevent_helper, usually only enabled during early boot */ |
305 | goto exit; | 178 | if (uevent_helper[0]) { |
179 | char *argv [3]; | ||
306 | 180 | ||
307 | retval = call_usermodehelper (argv[0], argv, envp, 0); | 181 | argv [0] = uevent_helper; |
308 | if (retval) | 182 | argv [1] = (char *)subsystem; |
309 | pr_debug ("%s - call_usermodehelper returned %d\n", | 183 | argv [2] = NULL; |
310 | __FUNCTION__, retval); | 184 | call_usermodehelper (argv[0], argv, envp, 0); |
185 | } | ||
311 | 186 | ||
312 | exit: | 187 | exit: |
313 | kfree(kobj_path); | 188 | kfree(devpath); |
314 | kfree(buffer); | 189 | kfree(buffer); |
315 | kfree(envp); | 190 | kfree(envp); |
316 | return; | 191 | return; |
317 | } | 192 | } |
318 | EXPORT_SYMBOL(kobject_hotplug); | 193 | EXPORT_SYMBOL_GPL(kobject_uevent); |
319 | 194 | ||
320 | /** | 195 | /** |
321 | * add_hotplug_env_var - helper for creating hotplug environment variables | 196 | * add_uevent_var - helper for creating event variables |
322 | * @envp: Pointer to table of environment variables, as passed into | 197 | * @envp: Pointer to table of environment variables, as passed into |
323 | * hotplug() method. | 198 | * uevent() method. |
324 | * @num_envp: Number of environment variable slots available, as | 199 | * @num_envp: Number of environment variable slots available, as |
325 | * passed into hotplug() method. | 200 | * passed into uevent() method. |
326 | * @cur_index: Pointer to current index into @envp. It should be | 201 | * @cur_index: Pointer to current index into @envp. It should be |
327 | * initialized to 0 before the first call to add_hotplug_env_var(), | 202 | * initialized to 0 before the first call to add_uevent_var(), |
328 | * and will be incremented on success. | 203 | * and will be incremented on success. |
329 | * @buffer: Pointer to buffer for environment variables, as passed | 204 | * @buffer: Pointer to buffer for environment variables, as passed |
330 | * into hotplug() method. | 205 | * into uevent() method. |
331 | * @buffer_size: Length of @buffer, as passed into hotplug() method. | 206 | * @buffer_size: Length of @buffer, as passed into uevent() method. |
332 | * @cur_len: Pointer to current length of space used in @buffer. | 207 | * @cur_len: Pointer to current length of space used in @buffer. |
333 | * Should be initialized to 0 before the first call to | 208 | * Should be initialized to 0 before the first call to |
334 | * add_hotplug_env_var(), and will be incremented on success. | 209 | * add_uevent_var(), and will be incremented on success. |
335 | * @format: Format for creating environment variable (of the form | 210 | * @format: Format for creating environment variable (of the form |
336 | * "XXX=%x") for snprintf(). | 211 | * "XXX=%x") for snprintf(). |
337 | * | 212 | * |
338 | * Returns 0 if environment variable was added successfully or -ENOMEM | 213 | * Returns 0 if environment variable was added successfully or -ENOMEM |
339 | * if no space was available. | 214 | * if no space was available. |
340 | */ | 215 | */ |
341 | int add_hotplug_env_var(char **envp, int num_envp, int *cur_index, | 216 | int add_uevent_var(char **envp, int num_envp, int *cur_index, |
342 | char *buffer, int buffer_size, int *cur_len, | 217 | char *buffer, int buffer_size, int *cur_len, |
343 | const char *format, ...) | 218 | const char *format, ...) |
344 | { | 219 | { |
345 | va_list args; | 220 | va_list args; |
346 | 221 | ||
347 | /* | 222 | /* |
348 | * We check against num_envp - 1 to make sure there is at | 223 | * We check against num_envp - 1 to make sure there is at |
349 | * least one slot left after we return, since the hotplug | 224 | * least one slot left after we return, since kobject_uevent() |
350 | * method needs to set the last slot to NULL. | 225 | * needs to set the last slot to NULL. |
351 | */ | 226 | */ |
352 | if (*cur_index >= num_envp - 1) | 227 | if (*cur_index >= num_envp - 1) |
353 | return -ENOMEM; | 228 | return -ENOMEM; |
@@ -366,6 +241,22 @@ int add_hotplug_env_var(char **envp, int num_envp, int *cur_index, | |||
366 | (*cur_index)++; | 241 | (*cur_index)++; |
367 | return 0; | 242 | return 0; |
368 | } | 243 | } |
369 | EXPORT_SYMBOL(add_hotplug_env_var); | 244 | EXPORT_SYMBOL_GPL(add_uevent_var); |
245 | |||
246 | static int __init kobject_uevent_init(void) | ||
247 | { | ||
248 | uevent_sock = netlink_kernel_create(NETLINK_KOBJECT_UEVENT, 1, NULL, | ||
249 | THIS_MODULE); | ||
250 | |||
251 | if (!uevent_sock) { | ||
252 | printk(KERN_ERR | ||
253 | "kobject_uevent: unable to create netlink socket!\n"); | ||
254 | return -ENODEV; | ||
255 | } | ||
256 | |||
257 | return 0; | ||
258 | } | ||
259 | |||
260 | postcore_initcall(kobject_uevent_init); | ||
370 | 261 | ||
371 | #endif /* CONFIG_HOTPLUG */ | 262 | #endif /* CONFIG_HOTPLUG */ |
diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 88511c3805..c0bd4a9148 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c | |||
@@ -137,18 +137,31 @@ out: | |||
137 | 137 | ||
138 | static inline void tag_set(struct radix_tree_node *node, int tag, int offset) | 138 | static inline void tag_set(struct radix_tree_node *node, int tag, int offset) |
139 | { | 139 | { |
140 | if (!test_bit(offset, &node->tags[tag][0])) | 140 | __set_bit(offset, node->tags[tag]); |
141 | __set_bit(offset, &node->tags[tag][0]); | ||
142 | } | 141 | } |
143 | 142 | ||
144 | static inline void tag_clear(struct radix_tree_node *node, int tag, int offset) | 143 | static inline void tag_clear(struct radix_tree_node *node, int tag, int offset) |
145 | { | 144 | { |
146 | __clear_bit(offset, &node->tags[tag][0]); | 145 | __clear_bit(offset, node->tags[tag]); |
147 | } | 146 | } |
148 | 147 | ||
149 | static inline int tag_get(struct radix_tree_node *node, int tag, int offset) | 148 | static inline int tag_get(struct radix_tree_node *node, int tag, int offset) |
150 | { | 149 | { |
151 | return test_bit(offset, &node->tags[tag][0]); | 150 | return test_bit(offset, node->tags[tag]); |
151 | } | ||
152 | |||
153 | /* | ||
154 | * Returns 1 if any slot in the node has this tag set. | ||
155 | * Otherwise returns 0. | ||
156 | */ | ||
157 | static inline int any_tag_set(struct radix_tree_node *node, int tag) | ||
158 | { | ||
159 | int idx; | ||
160 | for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) { | ||
161 | if (node->tags[tag][idx]) | ||
162 | return 1; | ||
163 | } | ||
164 | return 0; | ||
152 | } | 165 | } |
153 | 166 | ||
154 | /* | 167 | /* |
@@ -185,15 +198,9 @@ static int radix_tree_extend(struct radix_tree_root *root, unsigned long index) | |||
185 | * into the newly-pushed top-level node(s) | 198 | * into the newly-pushed top-level node(s) |
186 | */ | 199 | */ |
187 | for (tag = 0; tag < RADIX_TREE_TAGS; tag++) { | 200 | for (tag = 0; tag < RADIX_TREE_TAGS; tag++) { |
188 | int idx; | ||
189 | |||
190 | tags[tag] = 0; | 201 | tags[tag] = 0; |
191 | for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) { | 202 | if (any_tag_set(root->rnode, tag)) |
192 | if (root->rnode->tags[tag][idx]) { | 203 | tags[tag] = 1; |
193 | tags[tag] = 1; | ||
194 | break; | ||
195 | } | ||
196 | } | ||
197 | } | 204 | } |
198 | 205 | ||
199 | do { | 206 | do { |
@@ -246,7 +253,7 @@ int radix_tree_insert(struct radix_tree_root *root, | |||
246 | shift = (height-1) * RADIX_TREE_MAP_SHIFT; | 253 | shift = (height-1) * RADIX_TREE_MAP_SHIFT; |
247 | 254 | ||
248 | offset = 0; /* uninitialised var warning */ | 255 | offset = 0; /* uninitialised var warning */ |
249 | while (height > 0) { | 256 | do { |
250 | if (slot == NULL) { | 257 | if (slot == NULL) { |
251 | /* Have to add a child node. */ | 258 | /* Have to add a child node. */ |
252 | if (!(slot = radix_tree_node_alloc(root))) | 259 | if (!(slot = radix_tree_node_alloc(root))) |
@@ -264,18 +271,16 @@ int radix_tree_insert(struct radix_tree_root *root, | |||
264 | slot = node->slots[offset]; | 271 | slot = node->slots[offset]; |
265 | shift -= RADIX_TREE_MAP_SHIFT; | 272 | shift -= RADIX_TREE_MAP_SHIFT; |
266 | height--; | 273 | height--; |
267 | } | 274 | } while (height > 0); |
268 | 275 | ||
269 | if (slot != NULL) | 276 | if (slot != NULL) |
270 | return -EEXIST; | 277 | return -EEXIST; |
271 | 278 | ||
272 | if (node) { | 279 | BUG_ON(!node); |
273 | node->count++; | 280 | node->count++; |
274 | node->slots[offset] = item; | 281 | node->slots[offset] = item; |
275 | BUG_ON(tag_get(node, 0, offset)); | 282 | BUG_ON(tag_get(node, 0, offset)); |
276 | BUG_ON(tag_get(node, 1, offset)); | 283 | BUG_ON(tag_get(node, 1, offset)); |
277 | } else | ||
278 | root->rnode = item; | ||
279 | 284 | ||
280 | return 0; | 285 | return 0; |
281 | } | 286 | } |
@@ -367,7 +372,8 @@ void *radix_tree_tag_set(struct radix_tree_root *root, | |||
367 | int offset; | 372 | int offset; |
368 | 373 | ||
369 | offset = (index >> shift) & RADIX_TREE_MAP_MASK; | 374 | offset = (index >> shift) & RADIX_TREE_MAP_MASK; |
370 | tag_set(slot, tag, offset); | 375 | if (!tag_get(slot, tag, offset)) |
376 | tag_set(slot, tag, offset); | ||
371 | slot = slot->slots[offset]; | 377 | slot = slot->slots[offset]; |
372 | BUG_ON(slot == NULL); | 378 | BUG_ON(slot == NULL); |
373 | shift -= RADIX_TREE_MAP_SHIFT; | 379 | shift -= RADIX_TREE_MAP_SHIFT; |
@@ -427,13 +433,11 @@ void *radix_tree_tag_clear(struct radix_tree_root *root, | |||
427 | goto out; | 433 | goto out; |
428 | 434 | ||
429 | do { | 435 | do { |
430 | int idx; | 436 | if (!tag_get(pathp->node, tag, pathp->offset)) |
431 | 437 | goto out; | |
432 | tag_clear(pathp->node, tag, pathp->offset); | 438 | tag_clear(pathp->node, tag, pathp->offset); |
433 | for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) { | 439 | if (any_tag_set(pathp->node, tag)) |
434 | if (pathp->node->tags[tag][idx]) | 440 | goto out; |
435 | goto out; | ||
436 | } | ||
437 | pathp--; | 441 | pathp--; |
438 | } while (pathp->node); | 442 | } while (pathp->node); |
439 | out: | 443 | out: |
@@ -674,6 +678,29 @@ radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results, | |||
674 | EXPORT_SYMBOL(radix_tree_gang_lookup_tag); | 678 | EXPORT_SYMBOL(radix_tree_gang_lookup_tag); |
675 | 679 | ||
676 | /** | 680 | /** |
681 | * radix_tree_shrink - shrink height of a radix tree to minimal | ||
682 | * @root radix tree root | ||
683 | */ | ||
684 | static inline void radix_tree_shrink(struct radix_tree_root *root) | ||
685 | { | ||
686 | /* try to shrink tree height */ | ||
687 | while (root->height > 1 && | ||
688 | root->rnode->count == 1 && | ||
689 | root->rnode->slots[0]) { | ||
690 | struct radix_tree_node *to_free = root->rnode; | ||
691 | |||
692 | root->rnode = to_free->slots[0]; | ||
693 | root->height--; | ||
694 | /* must only free zeroed nodes into the slab */ | ||
695 | tag_clear(to_free, 0, 0); | ||
696 | tag_clear(to_free, 1, 0); | ||
697 | to_free->slots[0] = NULL; | ||
698 | to_free->count = 0; | ||
699 | radix_tree_node_free(to_free); | ||
700 | } | ||
701 | } | ||
702 | |||
703 | /** | ||
677 | * radix_tree_delete - delete an item from a radix tree | 704 | * radix_tree_delete - delete an item from a radix tree |
678 | * @root: radix tree root | 705 | * @root: radix tree root |
679 | * @index: index key | 706 | * @index: index key |
@@ -691,6 +718,8 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index) | |||
691 | void *ret = NULL; | 718 | void *ret = NULL; |
692 | char tags[RADIX_TREE_TAGS]; | 719 | char tags[RADIX_TREE_TAGS]; |
693 | int nr_cleared_tags; | 720 | int nr_cleared_tags; |
721 | int tag; | ||
722 | int offset; | ||
694 | 723 | ||
695 | height = root->height; | 724 | height = root->height; |
696 | if (index > radix_tree_maxindex(height)) | 725 | if (index > radix_tree_maxindex(height)) |
@@ -701,16 +730,14 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index) | |||
701 | slot = root->rnode; | 730 | slot = root->rnode; |
702 | 731 | ||
703 | for ( ; height > 0; height--) { | 732 | for ( ; height > 0; height--) { |
704 | int offset; | ||
705 | |||
706 | if (slot == NULL) | 733 | if (slot == NULL) |
707 | goto out; | 734 | goto out; |
708 | 735 | ||
736 | pathp++; | ||
709 | offset = (index >> shift) & RADIX_TREE_MAP_MASK; | 737 | offset = (index >> shift) & RADIX_TREE_MAP_MASK; |
710 | pathp[1].offset = offset; | 738 | pathp->offset = offset; |
711 | pathp[1].node = slot; | 739 | pathp->node = slot; |
712 | slot = slot->slots[offset]; | 740 | slot = slot->slots[offset]; |
713 | pathp++; | ||
714 | shift -= RADIX_TREE_MAP_SHIFT; | 741 | shift -= RADIX_TREE_MAP_SHIFT; |
715 | } | 742 | } |
716 | 743 | ||
@@ -723,35 +750,39 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index) | |||
723 | /* | 750 | /* |
724 | * Clear all tags associated with the just-deleted item | 751 | * Clear all tags associated with the just-deleted item |
725 | */ | 752 | */ |
726 | memset(tags, 0, sizeof(tags)); | 753 | nr_cleared_tags = 0; |
727 | do { | 754 | for (tag = 0; tag < RADIX_TREE_TAGS; tag++) { |
728 | int tag; | 755 | if (tag_get(pathp->node, tag, pathp->offset)) { |
756 | tag_clear(pathp->node, tag, pathp->offset); | ||
757 | tags[tag] = 0; | ||
758 | nr_cleared_tags++; | ||
759 | } else | ||
760 | tags[tag] = 1; | ||
761 | } | ||
729 | 762 | ||
730 | nr_cleared_tags = RADIX_TREE_TAGS; | 763 | for (pathp--; nr_cleared_tags && pathp->node; pathp--) { |
731 | for (tag = 0; tag < RADIX_TREE_TAGS; tag++) { | 764 | for (tag = 0; tag < RADIX_TREE_TAGS; tag++) { |
732 | int idx; | ||
733 | |||
734 | if (tags[tag]) | 765 | if (tags[tag]) |
735 | continue; | 766 | continue; |
736 | 767 | ||
737 | tag_clear(pathp->node, tag, pathp->offset); | 768 | tag_clear(pathp->node, tag, pathp->offset); |
738 | 769 | if (any_tag_set(pathp->node, tag)) { | |
739 | for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) { | 770 | tags[tag] = 1; |
740 | if (pathp->node->tags[tag][idx]) { | 771 | nr_cleared_tags--; |
741 | tags[tag] = 1; | ||
742 | nr_cleared_tags--; | ||
743 | break; | ||
744 | } | ||
745 | } | 772 | } |
746 | } | 773 | } |
747 | pathp--; | 774 | } |
748 | } while (pathp->node && nr_cleared_tags); | ||
749 | 775 | ||
750 | /* Now free the nodes we do not need anymore */ | 776 | /* Now free the nodes we do not need anymore */ |
751 | for (pathp = orig_pathp; pathp->node; pathp--) { | 777 | for (pathp = orig_pathp; pathp->node; pathp--) { |
752 | pathp->node->slots[pathp->offset] = NULL; | 778 | pathp->node->slots[pathp->offset] = NULL; |
753 | if (--pathp->node->count) | 779 | pathp->node->count--; |
780 | |||
781 | if (pathp->node->count) { | ||
782 | if (pathp->node == root->rnode) | ||
783 | radix_tree_shrink(root); | ||
754 | goto out; | 784 | goto out; |
785 | } | ||
755 | 786 | ||
756 | /* Node with zero slots in use so free it */ | 787 | /* Node with zero slots in use so free it */ |
757 | radix_tree_node_free(pathp->node); | 788 | radix_tree_node_free(pathp->node); |
@@ -770,15 +801,11 @@ EXPORT_SYMBOL(radix_tree_delete); | |||
770 | */ | 801 | */ |
771 | int radix_tree_tagged(struct radix_tree_root *root, int tag) | 802 | int radix_tree_tagged(struct radix_tree_root *root, int tag) |
772 | { | 803 | { |
773 | int idx; | 804 | struct radix_tree_node *rnode; |
774 | 805 | rnode = root->rnode; | |
775 | if (!root->rnode) | 806 | if (!rnode) |
776 | return 0; | 807 | return 0; |
777 | for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) { | 808 | return any_tag_set(rnode, tag); |
778 | if (root->rnode->tags[tag][idx]) | ||
779 | return 1; | ||
780 | } | ||
781 | return 0; | ||
782 | } | 809 | } |
783 | EXPORT_SYMBOL(radix_tree_tagged); | 810 | EXPORT_SYMBOL(radix_tree_tagged); |
784 | 811 | ||
diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c index 906ad101ea..c8bb8cc899 100644 --- a/lib/spinlock_debug.c +++ b/lib/spinlock_debug.c | |||
@@ -19,9 +19,11 @@ static void spin_bug(spinlock_t *lock, const char *msg) | |||
19 | if (xchg(&print_once, 0)) { | 19 | if (xchg(&print_once, 0)) { |
20 | if (lock->owner && lock->owner != SPINLOCK_OWNER_INIT) | 20 | if (lock->owner && lock->owner != SPINLOCK_OWNER_INIT) |
21 | owner = lock->owner; | 21 | owner = lock->owner; |
22 | printk("BUG: spinlock %s on CPU#%d, %s/%d\n", | 22 | printk(KERN_EMERG "BUG: spinlock %s on CPU#%d, %s/%d\n", |
23 | msg, smp_processor_id(), current->comm, current->pid); | 23 | msg, raw_smp_processor_id(), |
24 | printk(" lock: %p, .magic: %08x, .owner: %s/%d, .owner_cpu: %d\n", | 24 | current->comm, current->pid); |
25 | printk(KERN_EMERG " lock: %p, .magic: %08x, .owner: %s/%d, " | ||
26 | ".owner_cpu: %d\n", | ||
25 | lock, lock->magic, | 27 | lock, lock->magic, |
26 | owner ? owner->comm : "<none>", | 28 | owner ? owner->comm : "<none>", |
27 | owner ? owner->pid : -1, | 29 | owner ? owner->pid : -1, |
@@ -77,9 +79,10 @@ static void __spin_lock_debug(spinlock_t *lock) | |||
77 | /* lockup suspected: */ | 79 | /* lockup suspected: */ |
78 | if (print_once) { | 80 | if (print_once) { |
79 | print_once = 0; | 81 | print_once = 0; |
80 | printk("BUG: spinlock lockup on CPU#%d, %s/%d, %p\n", | 82 | printk(KERN_EMERG "BUG: spinlock lockup on CPU#%d, " |
81 | smp_processor_id(), current->comm, current->pid, | 83 | "%s/%d, %p\n", |
82 | lock); | 84 | raw_smp_processor_id(), current->comm, |
85 | current->pid, lock); | ||
83 | dump_stack(); | 86 | dump_stack(); |
84 | } | 87 | } |
85 | } | 88 | } |
@@ -119,8 +122,9 @@ static void rwlock_bug(rwlock_t *lock, const char *msg) | |||
119 | static long print_once = 1; | 122 | static long print_once = 1; |
120 | 123 | ||
121 | if (xchg(&print_once, 0)) { | 124 | if (xchg(&print_once, 0)) { |
122 | printk("BUG: rwlock %s on CPU#%d, %s/%d, %p\n", msg, | 125 | printk(KERN_EMERG "BUG: rwlock %s on CPU#%d, %s/%d, %p\n", |
123 | smp_processor_id(), current->comm, current->pid, lock); | 126 | msg, raw_smp_processor_id(), current->comm, |
127 | current->pid, lock); | ||
124 | dump_stack(); | 128 | dump_stack(); |
125 | #ifdef CONFIG_SMP | 129 | #ifdef CONFIG_SMP |
126 | /* | 130 | /* |
@@ -147,9 +151,10 @@ static void __read_lock_debug(rwlock_t *lock) | |||
147 | /* lockup suspected: */ | 151 | /* lockup suspected: */ |
148 | if (print_once) { | 152 | if (print_once) { |
149 | print_once = 0; | 153 | print_once = 0; |
150 | printk("BUG: read-lock lockup on CPU#%d, %s/%d, %p\n", | 154 | printk(KERN_EMERG "BUG: read-lock lockup on CPU#%d, " |
151 | smp_processor_id(), current->comm, current->pid, | 155 | "%s/%d, %p\n", |
152 | lock); | 156 | raw_smp_processor_id(), current->comm, |
157 | current->pid, lock); | ||
153 | dump_stack(); | 158 | dump_stack(); |
154 | } | 159 | } |
155 | } | 160 | } |
@@ -219,9 +224,10 @@ static void __write_lock_debug(rwlock_t *lock) | |||
219 | /* lockup suspected: */ | 224 | /* lockup suspected: */ |
220 | if (print_once) { | 225 | if (print_once) { |
221 | print_once = 0; | 226 | print_once = 0; |
222 | printk("BUG: write-lock lockup on CPU#%d, %s/%d, %p\n", | 227 | printk(KERN_EMERG "BUG: write-lock lockup on CPU#%d, " |
223 | smp_processor_id(), current->comm, current->pid, | 228 | "%s/%d, %p\n", |
224 | lock); | 229 | raw_smp_processor_id(), current->comm, |
230 | current->pid, lock); | ||
225 | dump_stack(); | 231 | dump_stack(); |
226 | } | 232 | } |
227 | } | 233 | } |
diff --git a/lib/swiotlb.c b/lib/swiotlb.c index 57216f3544..0af497b6b9 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c | |||
@@ -142,8 +142,7 @@ swiotlb_init_with_default_size (size_t default_size) | |||
142 | /* | 142 | /* |
143 | * Get IO TLB memory from the low pages | 143 | * Get IO TLB memory from the low pages |
144 | */ | 144 | */ |
145 | io_tlb_start = alloc_bootmem_low_pages_limit(io_tlb_nslabs * | 145 | io_tlb_start = alloc_bootmem_low_pages(io_tlb_nslabs * (1 << IO_TLB_SHIFT)); |
146 | (1 << IO_TLB_SHIFT), 0x100000000); | ||
147 | if (!io_tlb_start) | 146 | if (!io_tlb_start) |
148 | panic("Cannot allocate SWIOTLB buffer"); | 147 | panic("Cannot allocate SWIOTLB buffer"); |
149 | io_tlb_end = io_tlb_start + io_tlb_nslabs * (1 << IO_TLB_SHIFT); | 148 | io_tlb_end = io_tlb_start + io_tlb_nslabs * (1 << IO_TLB_SHIFT); |
@@ -464,7 +463,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, | |||
464 | */ | 463 | */ |
465 | dma_addr_t handle; | 464 | dma_addr_t handle; |
466 | handle = swiotlb_map_single(NULL, NULL, size, DMA_FROM_DEVICE); | 465 | handle = swiotlb_map_single(NULL, NULL, size, DMA_FROM_DEVICE); |
467 | if (dma_mapping_error(handle)) | 466 | if (swiotlb_dma_mapping_error(handle)) |
468 | return NULL; | 467 | return NULL; |
469 | 468 | ||
470 | ret = phys_to_virt(handle); | 469 | ret = phys_to_virt(handle); |
@@ -704,8 +703,9 @@ swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nelems, | |||
704 | addr = SG_ENT_VIRT_ADDRESS(sg); | 703 | addr = SG_ENT_VIRT_ADDRESS(sg); |
705 | dev_addr = virt_to_phys(addr); | 704 | dev_addr = virt_to_phys(addr); |
706 | if (swiotlb_force || address_needs_mapping(hwdev, dev_addr)) { | 705 | if (swiotlb_force || address_needs_mapping(hwdev, dev_addr)) { |
707 | sg->dma_address = (dma_addr_t) virt_to_phys(map_single(hwdev, addr, sg->length, dir)); | 706 | void *map = map_single(hwdev, addr, sg->length, dir); |
708 | if (!sg->dma_address) { | 707 | sg->dma_address = virt_to_bus(map); |
708 | if (!map) { | ||
709 | /* Don't panic here, we expect map_sg users | 709 | /* Don't panic here, we expect map_sg users |
710 | to do proper error handling. */ | 710 | to do proper error handling. */ |
711 | swiotlb_full(hwdev, sg->length, dir, 0); | 711 | swiotlb_full(hwdev, sg->length, dir, 0); |
diff --git a/lib/zlib_deflate/deflate.c b/lib/zlib_deflate/deflate.c index ad9a1bf4fc..1653dd9bb0 100644 --- a/lib/zlib_deflate/deflate.c +++ b/lib/zlib_deflate/deflate.c | |||
@@ -255,6 +255,7 @@ int zlib_deflateInit2_( | |||
255 | } | 255 | } |
256 | 256 | ||
257 | /* ========================================================================= */ | 257 | /* ========================================================================= */ |
258 | #if 0 | ||
258 | int zlib_deflateSetDictionary( | 259 | int zlib_deflateSetDictionary( |
259 | z_streamp strm, | 260 | z_streamp strm, |
260 | const Byte *dictionary, | 261 | const Byte *dictionary, |
@@ -297,6 +298,7 @@ int zlib_deflateSetDictionary( | |||
297 | if (hash_head) hash_head = 0; /* to make compiler happy */ | 298 | if (hash_head) hash_head = 0; /* to make compiler happy */ |
298 | return Z_OK; | 299 | return Z_OK; |
299 | } | 300 | } |
301 | #endif /* 0 */ | ||
300 | 302 | ||
301 | /* ========================================================================= */ | 303 | /* ========================================================================= */ |
302 | int zlib_deflateReset( | 304 | int zlib_deflateReset( |
@@ -330,6 +332,7 @@ int zlib_deflateReset( | |||
330 | } | 332 | } |
331 | 333 | ||
332 | /* ========================================================================= */ | 334 | /* ========================================================================= */ |
335 | #if 0 | ||
333 | int zlib_deflateParams( | 336 | int zlib_deflateParams( |
334 | z_streamp strm, | 337 | z_streamp strm, |
335 | int level, | 338 | int level, |
@@ -365,6 +368,7 @@ int zlib_deflateParams( | |||
365 | s->strategy = strategy; | 368 | s->strategy = strategy; |
366 | return err; | 369 | return err; |
367 | } | 370 | } |
371 | #endif /* 0 */ | ||
368 | 372 | ||
369 | /* ========================================================================= | 373 | /* ========================================================================= |
370 | * Put a short in the pending buffer. The 16-bit value is put in MSB order. | 374 | * Put a short in the pending buffer. The 16-bit value is put in MSB order. |
@@ -572,6 +576,7 @@ int zlib_deflateEnd( | |||
572 | /* ========================================================================= | 576 | /* ========================================================================= |
573 | * Copy the source state to the destination state. | 577 | * Copy the source state to the destination state. |
574 | */ | 578 | */ |
579 | #if 0 | ||
575 | int zlib_deflateCopy ( | 580 | int zlib_deflateCopy ( |
576 | z_streamp dest, | 581 | z_streamp dest, |
577 | z_streamp source | 582 | z_streamp source |
@@ -624,6 +629,7 @@ int zlib_deflateCopy ( | |||
624 | return Z_OK; | 629 | return Z_OK; |
625 | #endif | 630 | #endif |
626 | } | 631 | } |
632 | #endif /* 0 */ | ||
627 | 633 | ||
628 | /* =========================================================================== | 634 | /* =========================================================================== |
629 | * Read a new buffer from the current input stream, update the adler32 | 635 | * Read a new buffer from the current input stream, update the adler32 |
diff --git a/lib/zlib_deflate/deflate_syms.c b/lib/zlib_deflate/deflate_syms.c index 5985b28c8e..767b573d1e 100644 --- a/lib/zlib_deflate/deflate_syms.c +++ b/lib/zlib_deflate/deflate_syms.c | |||
@@ -16,6 +16,4 @@ EXPORT_SYMBOL(zlib_deflateInit_); | |||
16 | EXPORT_SYMBOL(zlib_deflateInit2_); | 16 | EXPORT_SYMBOL(zlib_deflateInit2_); |
17 | EXPORT_SYMBOL(zlib_deflateEnd); | 17 | EXPORT_SYMBOL(zlib_deflateEnd); |
18 | EXPORT_SYMBOL(zlib_deflateReset); | 18 | EXPORT_SYMBOL(zlib_deflateReset); |
19 | EXPORT_SYMBOL(zlib_deflateCopy); | ||
20 | EXPORT_SYMBOL(zlib_deflateParams); | ||
21 | MODULE_LICENSE("GPL"); | 19 | MODULE_LICENSE("GPL"); |
diff --git a/lib/zlib_inflate/infblock.c b/lib/zlib_inflate/infblock.c index 50f21ca4ef..c16cdeff51 100644 --- a/lib/zlib_inflate/infblock.c +++ b/lib/zlib_inflate/infblock.c | |||
@@ -338,6 +338,7 @@ int zlib_inflate_blocks_free( | |||
338 | } | 338 | } |
339 | 339 | ||
340 | 340 | ||
341 | #if 0 | ||
341 | void zlib_inflate_set_dictionary( | 342 | void zlib_inflate_set_dictionary( |
342 | inflate_blocks_statef *s, | 343 | inflate_blocks_statef *s, |
343 | const Byte *d, | 344 | const Byte *d, |
@@ -347,15 +348,18 @@ void zlib_inflate_set_dictionary( | |||
347 | memcpy(s->window, d, n); | 348 | memcpy(s->window, d, n); |
348 | s->read = s->write = s->window + n; | 349 | s->read = s->write = s->window + n; |
349 | } | 350 | } |
351 | #endif /* 0 */ | ||
350 | 352 | ||
351 | 353 | ||
352 | /* Returns true if inflate is currently at the end of a block generated | 354 | /* Returns true if inflate is currently at the end of a block generated |
353 | * by Z_SYNC_FLUSH or Z_FULL_FLUSH. | 355 | * by Z_SYNC_FLUSH or Z_FULL_FLUSH. |
354 | * IN assertion: s != NULL | 356 | * IN assertion: s != NULL |
355 | */ | 357 | */ |
358 | #if 0 | ||
356 | int zlib_inflate_blocks_sync_point( | 359 | int zlib_inflate_blocks_sync_point( |
357 | inflate_blocks_statef *s | 360 | inflate_blocks_statef *s |
358 | ) | 361 | ) |
359 | { | 362 | { |
360 | return s->mode == LENS; | 363 | return s->mode == LENS; |
361 | } | 364 | } |
365 | #endif /* 0 */ | ||
diff --git a/lib/zlib_inflate/infblock.h b/lib/zlib_inflate/infblock.h index f5221ddf60..ceee60b510 100644 --- a/lib/zlib_inflate/infblock.h +++ b/lib/zlib_inflate/infblock.h | |||
@@ -33,12 +33,16 @@ extern int zlib_inflate_blocks_free ( | |||
33 | inflate_blocks_statef *, | 33 | inflate_blocks_statef *, |
34 | z_streamp); | 34 | z_streamp); |
35 | 35 | ||
36 | #if 0 | ||
36 | extern void zlib_inflate_set_dictionary ( | 37 | extern void zlib_inflate_set_dictionary ( |
37 | inflate_blocks_statef *s, | 38 | inflate_blocks_statef *s, |
38 | const Byte *d, /* dictionary */ | 39 | const Byte *d, /* dictionary */ |
39 | uInt n); /* dictionary length */ | 40 | uInt n); /* dictionary length */ |
41 | #endif /* 0 */ | ||
40 | 42 | ||
43 | #if 0 | ||
41 | extern int zlib_inflate_blocks_sync_point ( | 44 | extern int zlib_inflate_blocks_sync_point ( |
42 | inflate_blocks_statef *s); | 45 | inflate_blocks_statef *s); |
46 | #endif /* 0 */ | ||
43 | 47 | ||
44 | #endif /* _INFBLOCK_H */ | 48 | #endif /* _INFBLOCK_H */ |
diff --git a/lib/zlib_inflate/inflate_syms.c b/lib/zlib_inflate/inflate_syms.c index aa1b081891..ef49738f57 100644 --- a/lib/zlib_inflate/inflate_syms.c +++ b/lib/zlib_inflate/inflate_syms.c | |||
@@ -15,8 +15,6 @@ EXPORT_SYMBOL(zlib_inflate); | |||
15 | EXPORT_SYMBOL(zlib_inflateInit_); | 15 | EXPORT_SYMBOL(zlib_inflateInit_); |
16 | EXPORT_SYMBOL(zlib_inflateInit2_); | 16 | EXPORT_SYMBOL(zlib_inflateInit2_); |
17 | EXPORT_SYMBOL(zlib_inflateEnd); | 17 | EXPORT_SYMBOL(zlib_inflateEnd); |
18 | EXPORT_SYMBOL(zlib_inflateSync); | ||
19 | EXPORT_SYMBOL(zlib_inflateReset); | 18 | EXPORT_SYMBOL(zlib_inflateReset); |
20 | EXPORT_SYMBOL(zlib_inflateSyncPoint); | ||
21 | EXPORT_SYMBOL(zlib_inflateIncomp); | 19 | EXPORT_SYMBOL(zlib_inflateIncomp); |
22 | MODULE_LICENSE("GPL"); | 20 | MODULE_LICENSE("GPL"); |
diff --git a/lib/zlib_inflate/inflate_sync.c b/lib/zlib_inflate/inflate_sync.c index e07bdb21f5..61411ff89d 100644 --- a/lib/zlib_inflate/inflate_sync.c +++ b/lib/zlib_inflate/inflate_sync.c | |||
@@ -7,6 +7,7 @@ | |||
7 | #include "infblock.h" | 7 | #include "infblock.h" |
8 | #include "infutil.h" | 8 | #include "infutil.h" |
9 | 9 | ||
10 | #if 0 | ||
10 | int zlib_inflateSync( | 11 | int zlib_inflateSync( |
11 | z_streamp z | 12 | z_streamp z |
12 | ) | 13 | ) |
@@ -57,6 +58,7 @@ int zlib_inflateSync( | |||
57 | z->state->mode = BLOCKS; | 58 | z->state->mode = BLOCKS; |
58 | return Z_OK; | 59 | return Z_OK; |
59 | } | 60 | } |
61 | #endif /* 0 */ | ||
60 | 62 | ||
61 | 63 | ||
62 | /* Returns true if inflate is currently at the end of a block generated | 64 | /* Returns true if inflate is currently at the end of a block generated |
@@ -66,6 +68,7 @@ int zlib_inflateSync( | |||
66 | * decompressing, PPP checks that at the end of input packet, inflate is | 68 | * decompressing, PPP checks that at the end of input packet, inflate is |
67 | * waiting for these length bytes. | 69 | * waiting for these length bytes. |
68 | */ | 70 | */ |
71 | #if 0 | ||
69 | int zlib_inflateSyncPoint( | 72 | int zlib_inflateSyncPoint( |
70 | z_streamp z | 73 | z_streamp z |
71 | ) | 74 | ) |
@@ -74,6 +77,7 @@ int zlib_inflateSyncPoint( | |||
74 | return Z_STREAM_ERROR; | 77 | return Z_STREAM_ERROR; |
75 | return zlib_inflate_blocks_sync_point(z->state->blocks); | 78 | return zlib_inflate_blocks_sync_point(z->state->blocks); |
76 | } | 79 | } |
80 | #endif /* 0 */ | ||
77 | 81 | ||
78 | /* | 82 | /* |
79 | * This subroutine adds the data at next_in/avail_in to the output history | 83 | * This subroutine adds the data at next_in/avail_in to the output history |